diff --git a/Makefile b/Makefile index 10fd25fc3923f7d7e7934da0b07ea8b6da3db696..63dff67f55859711146c7a20191301fb8e6418fa 100644 --- a/Makefile +++ b/Makefile @@ -501,13 +501,13 @@ data-fetch-dbsnapshot: FORCE data-import-dbsnapshot: FORCE @echo "\n$(GREEN)*** Importing latest production database dump ***$(NC)\n" - @sudo -u postgres pg_restore --verbose --format=d --dbname=mentat_main /vagrant/data/devsnapshot/main - @sudo -u postgres psql mentat_main -c "COPY reports_events ($(cat /vagrant/data/devsnapshot/main/reports_events.cols)) FROM stdin;" < /vagrant/data/devsnapshot/chunks/reports_events.dat - @sudo -u postgres psql mentat_main -c "COPY statistics_events ($(cat /vagrant/data/devsnapshot/main/statistics_events.cols)) FROM stdin;" < /vagrant/data/devsnapshot/chunks/statistics_events.dat - @sudo -u postgres psql mentat_main -c "COPY changelogs_items ($(cat /vagrant/data/devsnapshot/main/changelogs_items.cols)) FROM stdin;" < /vagrant/data/devsnapshot/chunks/changelogs_items.dat + @sudo -u postgres pg_restore --verbose --format=d --dbname=mentat_main ./data/devsnapshot/main + @sudo -u postgres psql mentat_main -c "COPY reports_events ($(cat /vagrant/data/devsnapshot/main/reports_events.cols)) FROM stdin;" < ./data/devsnapshot/chunks/reports_events.dat + @sudo -u postgres psql mentat_main -c "COPY statistics_events ($(cat /vagrant/data/devsnapshot/main/statistics_events.cols)) FROM stdin;" < ./data/devsnapshot/chunks/statistics_events.dat + @sudo -u postgres psql mentat_main -c "COPY changelogs_items ($(cat /vagrant/data/devsnapshot/main/changelogs_items.cols)) FROM stdin;" < ./data/devsnapshot/chunks/changelogs_items.dat - @sudo -u postgres psql mentat_events -c "COPY events ($(cat /vagrant/data/devsnapshot/events/events.cols)) FROM stdin;" < /vagrant/data/devsnapshot/events/events.dat - @sudo -u postgres psql mentat_events -c "COPY events_json ($(cat /vagrant/data/devsnapshot/events/events_json.cols)) FROM stdin;" < /vagrant/data/devsnapshot/events/events_json.dat + @sudo -u postgres psql mentat_events -c "COPY events ($(cat /vagrant/data/devsnapshot/events/events.cols)) FROM stdin;" < ./data/devsnapshot/events/events.dat + @sudo -u postgres psql mentat_events -c "COPY events_json ($(cat /vagrant/data/devsnapshot/events/events_json.cols)) FROM stdin;" < ./data/devsnapshot/events/events_json.dat @echo "" @sudo -u postgres psql mentat_main -e -c "select count(*) as user_count, min(createtime) as from_date, max(createtime) as to_date from users;" @@ -518,8 +518,8 @@ data-import-dbsnapshot: FORCE @sudo -u postgres psql mentat_main -e -c "select count(*) as statistics_count, min(createtime) as from_date, max(createtime) as to_date from statistics_events;" @sudo -u postgres psql mentat_main -e -c "select count(*) as changelog_count, min(createtime) as from_date, max(createtime) as to_date from changelogs_items;" - @sudo -u postgres psql mentat_events -e -c "select count(*) as event_count, min(createtime) as from_date, max(storagetime) as to_date from events;" - @sudo -u postgres psql mentat_events -e -c "select count(*) as event_json_count, min(createtime) as from_date, max(storagetime) as to_date from events_json;" + @sudo -u postgres psql mentat_events -e -c "select count(*) as event_count, min(storagetime) as from_date, max(storagetime) as to_date from events;" + @sudo -u postgres psql mentat_events -e -c "select count(*) as event_json_count, min(storagetime) as from_date, max(storagetime) as to_date from events_json;" @echo "" #------------------------------------------------------------------------------- diff --git a/conf/scripts/devsnapshot.sh b/conf/scripts/devsnapshot.sh index 3dbe66a966fa6734dcbf91513b909b2a273fa8ef..e174deaccf5f6b2d509e51db4e62740dd850436c 100755 --- a/conf/scripts/devsnapshot.sh +++ b/conf/scripts/devsnapshot.sh @@ -28,7 +28,7 @@ for table_name in reports_events statistics_events changelogs_items do print_subtitle "Dumping chunk of table ${table_name} from metadata database" sudo -u mentat psql mentat_main -c "COPY (SELECT * FROM ${table_name} WHERE createtime >= '${BACKUP_DATE_FROM}'::timestamptz) TO stdout" > ${BACKUP_DIR}/${table_name}.dat - sudo -u mentat psql mentat_main -c "COPY (SELECT string_agg(column_name, ',') FROM information_schema.columns WHERE table_name = '${table_name}' ORDER BY ordinal_position) TO stdout" > ${BACKUP_DIR}/${table_name}.cols + sudo -u mentat psql mentat_main -c "COPY (SELECT string_agg(data.column_name, ',') FROM (SELECT column_name FROM information_schema.columns WHERE table_name = '${table_name}' ORDER BY ordinal_position) AS data) TO stdout" > ${BACKUP_DIR}/${table_name}.cols done BACKUP_DIR=/var/mentat/devsnapshots/devsnapshot/events @@ -36,13 +36,15 @@ mkdir -p $BACKUP_DIR chown mentat:mentat $BACKUP_DIR cd $BACKUP_DIR -print_subtitle "Dumping chunk of table events from event database" -sudo -u mentat psql mentat_events -c "COPY (SELECT * FROM events WHERE storagetime >= '${BACKUP_DATE_FROM}'::timestamptz) TO stdout" > ${BACKUP_DIR}/events.dat -sudo -u mentat psql mentat_events -c "COPY (SELECT string_agg(column_name, ',') FROM information_schema.columns WHERE table_name = 'events' ORDER BY ordinal_position) TO stdout" > ${BACKUP_DIR}/events.cols +table_name='events' +print_subtitle "Dumping chunk of table ${table_name} from event database" +sudo -u mentat psql mentat_events -c "COPY (SELECT * FROM ${table_name} WHERE storagetime >= '${BACKUP_DATE_FROM}'::timestamptz) TO stdout" > ${BACKUP_DIR}/${table_name}.dat +sudo -u mentat psql mentat_events -c "COPY (SELECT string_agg(data.column_name, ',') FROM (SELECT column_name FROM information_schema.columns WHERE table_name = '${table_name}' ORDER BY ordinal_position) AS data) TO stdout" > ${BACKUP_DIR}/${table_name}.cols -print_subtitle "Dumping chunk of table events_json from event database" -sudo -u mentat psql mentat_events -c "COPY (SELECT events_json.* FROM events_json JOIN events ON events_json.id = events.id WHERE events.storagetime >= '${BACKUP_DATE_FROM}'::timestamptz) TO stdout" > ${BACKUP_DIR}/events_json.dat -sudo -u mentat psql mentat_events -c "COPY (SELECT string_agg(column_name, ',') FROM information_schema.columns WHERE table_name = 'events_json' ORDER BY ordinal_position) TO stdout" > ${BACKUP_DIR}/events_json.cols +table_name='events_json' +print_subtitle "Dumping chunk of table ${table_name} from event database" +sudo -u mentat psql mentat_events -c "COPY (SELECT events_json.* FROM events_json JOIN events ON events_json.id = events.id WHERE events.storagetime >= '${BACKUP_DATE_FROM}'::timestamptz) TO stdout" > ${BACKUP_DIR}/${table_name}.dat +sudo -u mentat psql mentat_events -c "COPY (SELECT string_agg(data.column_name, ',') FROM (SELECT column_name FROM information_schema.columns WHERE table_name = '${table_name}' ORDER BY ordinal_position) AS data) TO stdout" > ${BACKUP_DIR}/${table_name}.cols print_subtitle "Size before packaging:" du -csh /var/mentat/devsnapshots/devsnapshot @@ -61,9 +63,3 @@ ls -alh ${SNAPSHOT_FILE_NAME} find /var/mentat/devsnapshots/ -type f -mtime +7 -name '*.tar.gz' -delete print_title '<DONE> CREATING DEVELOPMENT SNAPSHOT' - - -sudo -u mentat psql mentat_main -c "COPY (SELECT string_agg(column_name, ',') FROM information_schema.columns WHERE table_name = '${table_name}' ORDER BY ordinal_position) TO stdout" > ${BACKUP_DIR}/${table_name}.cols -sudo -u mentat psql mentat_main -c "COPY (SELECT * FROM ${table_name} WHERE createtime >= '${BACKUP_DATE_FROM}'::timestamptz) TO stdout" > ${BACKUP_DIR}/${table_name}.dat - -sudo -u postgres psql mentat_main -c "COPY ${table_name} ($(cat ${BACKUP_DIR}/${table_name}.cols)) FROM stdin;" < ${BACKUP_DIR}/${table_name}.dat diff --git a/doc/sphinx/_doclib/development.rst b/doc/sphinx/_doclib/development.rst index b2242bd298fde330e2bead9d23bff9565adeaab9..83888c4c4e4fd1abe4621bf6c1e1c951d869ee34 100644 --- a/doc/sphinx/_doclib/development.rst +++ b/doc/sphinx/_doclib/development.rst @@ -654,19 +654,35 @@ combinations of Vagrant/VirtualBox versions: - ? - Y -Next you must fetch yourself configuration file for GeoIP update utility, otherwise the -provisioning script will fail: +Prerequisites +```````````````````````````````````````````````````````````````````````````````` + +To make sure the provisioning is performed correctly you must ensure folowing +prerequisites are installed: + +Core developers with access to CESNET`s ``mentat-alt.cesnet.cz`` development server +can just execute following statement and be done: .. code-block:: shell make data-fetch-config -Next is standard Vagrant stuff to initialize and boot-up your development box and connect to it -via SSH: +Other developers must create `MaxMind <https://dev.maxmind.com/geoip/geolite2-free-geolocation-data?lang=en>`__ +account for **GeoLite2** databases GeoLite2-ASN GeoLite2-City GeoLite2-Country. After +the registration copy the template configuration file and enter your account ID and +license key: .. code-block:: shell - vagrant up + cp ./vagrantenv/GeoIP.conf ./data/ + vim ./data/GeoIP.conf # and enter your credential + +At this point you are all set to perform standard Vagrant stuff to initialize and +boot-up your development box and connect to it via SSH: + +.. code-block:: shell + + time vagrant up vagrant ssh After connecting to development machine you will be greeted with banner and some @@ -688,26 +704,34 @@ and be ready to work: At this point your development machine is ready, however the database is completely empty. There are no user accounts, no abuse groups, nothing. You may run Mentat`s backend services, the user interface -will however not be usable. For normal development workflow you will need a snapshot of production -database. First fetch the snapshot from ``mentat-alt.cesnet.cz`` server to your host machine: +will however not be usable. First option is to populate the database with some basic fixtures: .. code-block:: shell - make data-fetch-dbsnapshot + (venv) !DEV! mentat@mentat-devel /vagrant $ mentat-dbmngr.py --command fixtures-add -When that is done execute following command from within the development box: +Second option is to use following command to create first administrator account to start +completely from scratch: .. code-block:: shell - (venv) !DEV! mentat@mentat-devel /vagrant $ make data-fetch-dbsnapshot + (venv) !DEV! mentat@mentat-devel /vagrant $ mentat-dbmngr.py --command user-add login=superman "fullname=Clark Kent" email=kent@dailyplanet.com "organization=Daily Planet, inc." roles=user,admin -Another option is instead of importing the snapshot to load basic data fixtures: +Core developers with access to CESNET`s ``mentat-alt.cesnet.cz`` development server +can fetch and import database snapshot to work with production-like data. First +fetch the snapshot by executing following command on your host machine: .. code-block:: shell - (venv) !DEV! mentat@mentat-devel /vagrant $ mentat-dbmngr.py --command fixtures-add + make data-fetch-dbsnapshot + +When that is done execute following command from within the development Vagrant guest box: + +.. code-block:: shell + + (venv) !DEV! mentat@mentat-devel /vagrant $ make data-fetch-dbsnapshot -When working in Vagrat box please note and keep in mind following: +That is about it. When working in Vagrat box please note and keep in mind following: * By default you will be connected as user ``mentat`` with passwordless sudo privileges. * Default built-in user ``vagrant`` comes also with passwordless sudo privileges. diff --git a/vagrantenv/GeoIP.conf b/vagrantenv/GeoIP.conf new file mode 100644 index 0000000000000000000000000000000000000000..b766cd2f7b93724dc87fa005a83366acfdc68db2 --- /dev/null +++ b/vagrantenv/GeoIP.conf @@ -0,0 +1,39 @@ +# GeoIP.conf file for `geoipupdate` program, for versions >= 3.1.1. +# Used to update GeoIP databases from https://www.maxmind.com. +# For more information about this config file, visit the docs at +# https://dev.maxmind.com/geoip/geoipupdate/. + +# `AccountID` is from your MaxMind account. +AccountID your-account-id-here + +# `LicenseKey` is from your MaxMind account +LicenseKey your-license-key-here + +# `EditionIDs` is from your MaxMind account. +EditionIDs GeoLite2-ASN GeoLite2-City GeoLite2-Country + +# The remaining settings are OPTIONAL. + +# The directory to store the database files. Defaults to /usr/share/GeoIP +# DatabaseDirectory /usr/share/GeoIP + +# The server to use. Defaults to "updates.maxmind.com". +# Host updates.maxmind.com + +# The proxy host name or IP address. You may optionally specify a +# port number, e.g., 127.0.0.1:8888. If no port number is specified, 1080 +# will be used. +# Proxy 127.0.0.1:8888 + +# The user name and password to use with your proxy server. +# ProxyUserPassword username:password + +# Whether to preserve modification times of files downloaded from the server. +# Defaults to "0". +# PreserveFileTimes 0 + +# The lock file to use. This ensures only one geoipupdate process can run at a +# time. +# Note: Once created, this lockfile is not removed from the filesystem. +# Defaults to ".geoipupdate.lock" under the DatabaseDirectory. +# LockFile /usr/share/GeoIP/.geoipupdate.lock