Skip to content
Snippets Groups Projects
Commit 6bd5d38a authored by Jan Mach's avatar Jan Mach
Browse files

Fixes in scripts for making and importing development snapshot for Vagrant boxes.

(Redmine issue: #7041)
parent 3c8a6daf
No related branches found
No related tags found
No related merge requests found
...@@ -501,13 +501,13 @@ data-fetch-dbsnapshot: FORCE ...@@ -501,13 +501,13 @@ data-fetch-dbsnapshot: FORCE
data-import-dbsnapshot: FORCE data-import-dbsnapshot: FORCE
@echo "\n$(GREEN)*** Importing latest production database dump ***$(NC)\n" @echo "\n$(GREEN)*** Importing latest production database dump ***$(NC)\n"
@sudo -u postgres pg_restore --verbose --format=d --dbname=mentat_main /vagrant/data/devsnapshot/main @sudo -u postgres pg_restore --verbose --format=d --dbname=mentat_main ./data/devsnapshot/main
@sudo -u postgres psql mentat_main -c "COPY reports_events ($(cat /vagrant/data/devsnapshot/main/reports_events.cols)) FROM stdin;" < /vagrant/data/devsnapshot/chunks/reports_events.dat @sudo -u postgres psql mentat_main -c "COPY reports_events ($(cat /vagrant/data/devsnapshot/main/reports_events.cols)) FROM stdin;" < ./data/devsnapshot/chunks/reports_events.dat
@sudo -u postgres psql mentat_main -c "COPY statistics_events ($(cat /vagrant/data/devsnapshot/main/statistics_events.cols)) FROM stdin;" < /vagrant/data/devsnapshot/chunks/statistics_events.dat @sudo -u postgres psql mentat_main -c "COPY statistics_events ($(cat /vagrant/data/devsnapshot/main/statistics_events.cols)) FROM stdin;" < ./data/devsnapshot/chunks/statistics_events.dat
@sudo -u postgres psql mentat_main -c "COPY changelogs_items ($(cat /vagrant/data/devsnapshot/main/changelogs_items.cols)) FROM stdin;" < /vagrant/data/devsnapshot/chunks/changelogs_items.dat @sudo -u postgres psql mentat_main -c "COPY changelogs_items ($(cat /vagrant/data/devsnapshot/main/changelogs_items.cols)) FROM stdin;" < ./data/devsnapshot/chunks/changelogs_items.dat
@sudo -u postgres psql mentat_events -c "COPY events ($(cat /vagrant/data/devsnapshot/events/events.cols)) FROM stdin;" < /vagrant/data/devsnapshot/events/events.dat @sudo -u postgres psql mentat_events -c "COPY events ($(cat /vagrant/data/devsnapshot/events/events.cols)) FROM stdin;" < ./data/devsnapshot/events/events.dat
@sudo -u postgres psql mentat_events -c "COPY events_json ($(cat /vagrant/data/devsnapshot/events/events_json.cols)) FROM stdin;" < /vagrant/data/devsnapshot/events/events_json.dat @sudo -u postgres psql mentat_events -c "COPY events_json ($(cat /vagrant/data/devsnapshot/events/events_json.cols)) FROM stdin;" < ./data/devsnapshot/events/events_json.dat
@echo "" @echo ""
@sudo -u postgres psql mentat_main -e -c "select count(*) as user_count, min(createtime) as from_date, max(createtime) as to_date from users;" @sudo -u postgres psql mentat_main -e -c "select count(*) as user_count, min(createtime) as from_date, max(createtime) as to_date from users;"
...@@ -518,8 +518,8 @@ data-import-dbsnapshot: FORCE ...@@ -518,8 +518,8 @@ data-import-dbsnapshot: FORCE
@sudo -u postgres psql mentat_main -e -c "select count(*) as statistics_count, min(createtime) as from_date, max(createtime) as to_date from statistics_events;" @sudo -u postgres psql mentat_main -e -c "select count(*) as statistics_count, min(createtime) as from_date, max(createtime) as to_date from statistics_events;"
@sudo -u postgres psql mentat_main -e -c "select count(*) as changelog_count, min(createtime) as from_date, max(createtime) as to_date from changelogs_items;" @sudo -u postgres psql mentat_main -e -c "select count(*) as changelog_count, min(createtime) as from_date, max(createtime) as to_date from changelogs_items;"
@sudo -u postgres psql mentat_events -e -c "select count(*) as event_count, min(createtime) as from_date, max(storagetime) as to_date from events;" @sudo -u postgres psql mentat_events -e -c "select count(*) as event_count, min(storagetime) as from_date, max(storagetime) as to_date from events;"
@sudo -u postgres psql mentat_events -e -c "select count(*) as event_json_count, min(createtime) as from_date, max(storagetime) as to_date from events_json;" @sudo -u postgres psql mentat_events -e -c "select count(*) as event_json_count, min(storagetime) as from_date, max(storagetime) as to_date from events_json;"
@echo "" @echo ""
#------------------------------------------------------------------------------- #-------------------------------------------------------------------------------
......
...@@ -28,7 +28,7 @@ for table_name in reports_events statistics_events changelogs_items ...@@ -28,7 +28,7 @@ for table_name in reports_events statistics_events changelogs_items
do do
print_subtitle "Dumping chunk of table ${table_name} from metadata database" print_subtitle "Dumping chunk of table ${table_name} from metadata database"
sudo -u mentat psql mentat_main -c "COPY (SELECT * FROM ${table_name} WHERE createtime >= '${BACKUP_DATE_FROM}'::timestamptz) TO stdout" > ${BACKUP_DIR}/${table_name}.dat sudo -u mentat psql mentat_main -c "COPY (SELECT * FROM ${table_name} WHERE createtime >= '${BACKUP_DATE_FROM}'::timestamptz) TO stdout" > ${BACKUP_DIR}/${table_name}.dat
sudo -u mentat psql mentat_main -c "COPY (SELECT string_agg(column_name, ',') FROM information_schema.columns WHERE table_name = '${table_name}' ORDER BY ordinal_position) TO stdout" > ${BACKUP_DIR}/${table_name}.cols sudo -u mentat psql mentat_main -c "COPY (SELECT string_agg(data.column_name, ',') FROM (SELECT column_name FROM information_schema.columns WHERE table_name = '${table_name}' ORDER BY ordinal_position) AS data) TO stdout" > ${BACKUP_DIR}/${table_name}.cols
done done
BACKUP_DIR=/var/mentat/devsnapshots/devsnapshot/events BACKUP_DIR=/var/mentat/devsnapshots/devsnapshot/events
...@@ -36,13 +36,15 @@ mkdir -p $BACKUP_DIR ...@@ -36,13 +36,15 @@ mkdir -p $BACKUP_DIR
chown mentat:mentat $BACKUP_DIR chown mentat:mentat $BACKUP_DIR
cd $BACKUP_DIR cd $BACKUP_DIR
print_subtitle "Dumping chunk of table events from event database" table_name='events'
sudo -u mentat psql mentat_events -c "COPY (SELECT * FROM events WHERE storagetime >= '${BACKUP_DATE_FROM}'::timestamptz) TO stdout" > ${BACKUP_DIR}/events.dat print_subtitle "Dumping chunk of table ${table_name} from event database"
sudo -u mentat psql mentat_events -c "COPY (SELECT string_agg(column_name, ',') FROM information_schema.columns WHERE table_name = 'events' ORDER BY ordinal_position) TO stdout" > ${BACKUP_DIR}/events.cols sudo -u mentat psql mentat_events -c "COPY (SELECT * FROM ${table_name} WHERE storagetime >= '${BACKUP_DATE_FROM}'::timestamptz) TO stdout" > ${BACKUP_DIR}/${table_name}.dat
sudo -u mentat psql mentat_events -c "COPY (SELECT string_agg(data.column_name, ',') FROM (SELECT column_name FROM information_schema.columns WHERE table_name = '${table_name}' ORDER BY ordinal_position) AS data) TO stdout" > ${BACKUP_DIR}/${table_name}.cols
print_subtitle "Dumping chunk of table events_json from event database" table_name='events_json'
sudo -u mentat psql mentat_events -c "COPY (SELECT events_json.* FROM events_json JOIN events ON events_json.id = events.id WHERE events.storagetime >= '${BACKUP_DATE_FROM}'::timestamptz) TO stdout" > ${BACKUP_DIR}/events_json.dat print_subtitle "Dumping chunk of table ${table_name} from event database"
sudo -u mentat psql mentat_events -c "COPY (SELECT string_agg(column_name, ',') FROM information_schema.columns WHERE table_name = 'events_json' ORDER BY ordinal_position) TO stdout" > ${BACKUP_DIR}/events_json.cols sudo -u mentat psql mentat_events -c "COPY (SELECT events_json.* FROM events_json JOIN events ON events_json.id = events.id WHERE events.storagetime >= '${BACKUP_DATE_FROM}'::timestamptz) TO stdout" > ${BACKUP_DIR}/${table_name}.dat
sudo -u mentat psql mentat_events -c "COPY (SELECT string_agg(data.column_name, ',') FROM (SELECT column_name FROM information_schema.columns WHERE table_name = '${table_name}' ORDER BY ordinal_position) AS data) TO stdout" > ${BACKUP_DIR}/${table_name}.cols
print_subtitle "Size before packaging:" print_subtitle "Size before packaging:"
du -csh /var/mentat/devsnapshots/devsnapshot du -csh /var/mentat/devsnapshots/devsnapshot
...@@ -61,9 +63,3 @@ ls -alh ${SNAPSHOT_FILE_NAME} ...@@ -61,9 +63,3 @@ ls -alh ${SNAPSHOT_FILE_NAME}
find /var/mentat/devsnapshots/ -type f -mtime +7 -name '*.tar.gz' -delete find /var/mentat/devsnapshots/ -type f -mtime +7 -name '*.tar.gz' -delete
print_title '<DONE> CREATING DEVELOPMENT SNAPSHOT' print_title '<DONE> CREATING DEVELOPMENT SNAPSHOT'
sudo -u mentat psql mentat_main -c "COPY (SELECT string_agg(column_name, ',') FROM information_schema.columns WHERE table_name = '${table_name}' ORDER BY ordinal_position) TO stdout" > ${BACKUP_DIR}/${table_name}.cols
sudo -u mentat psql mentat_main -c "COPY (SELECT * FROM ${table_name} WHERE createtime >= '${BACKUP_DATE_FROM}'::timestamptz) TO stdout" > ${BACKUP_DIR}/${table_name}.dat
sudo -u postgres psql mentat_main -c "COPY ${table_name} ($(cat ${BACKUP_DIR}/${table_name}.cols)) FROM stdin;" < ${BACKUP_DIR}/${table_name}.dat
...@@ -654,19 +654,35 @@ combinations of Vagrant/VirtualBox versions: ...@@ -654,19 +654,35 @@ combinations of Vagrant/VirtualBox versions:
- ? - ?
- Y - Y
Next you must fetch yourself configuration file for GeoIP update utility, otherwise the Prerequisites
provisioning script will fail: ````````````````````````````````````````````````````````````````````````````````
To make sure the provisioning is performed correctly you must ensure folowing
prerequisites are installed:
Core developers with access to CESNET`s ``mentat-alt.cesnet.cz`` development server
can just execute following statement and be done:
.. code-block:: shell .. code-block:: shell
make data-fetch-config make data-fetch-config
Next is standard Vagrant stuff to initialize and boot-up your development box and connect to it Other developers must create `MaxMind <https://dev.maxmind.com/geoip/geolite2-free-geolocation-data?lang=en>`__
via SSH: account for **GeoLite2** databases GeoLite2-ASN GeoLite2-City GeoLite2-Country. After
the registration copy the template configuration file and enter your account ID and
license key:
.. code-block:: shell .. code-block:: shell
vagrant up cp ./vagrantenv/GeoIP.conf ./data/
vim ./data/GeoIP.conf # and enter your credential
At this point you are all set to perform standard Vagrant stuff to initialize and
boot-up your development box and connect to it via SSH:
.. code-block:: shell
time vagrant up
vagrant ssh vagrant ssh
After connecting to development machine you will be greeted with banner and some After connecting to development machine you will be greeted with banner and some
...@@ -688,26 +704,34 @@ and be ready to work: ...@@ -688,26 +704,34 @@ and be ready to work:
At this point your development machine is ready, however the database is completely empty. There are At this point your development machine is ready, however the database is completely empty. There are
no user accounts, no abuse groups, nothing. You may run Mentat`s backend services, the user interface no user accounts, no abuse groups, nothing. You may run Mentat`s backend services, the user interface
will however not be usable. For normal development workflow you will need a snapshot of production will however not be usable. First option is to populate the database with some basic fixtures:
database. First fetch the snapshot from ``mentat-alt.cesnet.cz`` server to your host machine:
.. code-block:: shell .. code-block:: shell
make data-fetch-dbsnapshot (venv) !DEV! mentat@mentat-devel /vagrant $ mentat-dbmngr.py --command fixtures-add
When that is done execute following command from within the development box: Second option is to use following command to create first administrator account to start
completely from scratch:
.. code-block:: shell .. code-block:: shell
(venv) !DEV! mentat@mentat-devel /vagrant $ make data-fetch-dbsnapshot (venv) !DEV! mentat@mentat-devel /vagrant $ mentat-dbmngr.py --command user-add login=superman "fullname=Clark Kent" email=kent@dailyplanet.com "organization=Daily Planet, inc." roles=user,admin
Another option is instead of importing the snapshot to load basic data fixtures: Core developers with access to CESNET`s ``mentat-alt.cesnet.cz`` development server
can fetch and import database snapshot to work with production-like data. First
fetch the snapshot by executing following command on your host machine:
.. code-block:: shell .. code-block:: shell
(venv) !DEV! mentat@mentat-devel /vagrant $ mentat-dbmngr.py --command fixtures-add make data-fetch-dbsnapshot
When that is done execute following command from within the development Vagrant guest box:
.. code-block:: shell
(venv) !DEV! mentat@mentat-devel /vagrant $ make data-fetch-dbsnapshot
When working in Vagrat box please note and keep in mind following: That is about it. When working in Vagrat box please note and keep in mind following:
* By default you will be connected as user ``mentat`` with passwordless sudo privileges. * By default you will be connected as user ``mentat`` with passwordless sudo privileges.
* Default built-in user ``vagrant`` comes also with passwordless sudo privileges. * Default built-in user ``vagrant`` comes also with passwordless sudo privileges.
......
# GeoIP.conf file for `geoipupdate` program, for versions >= 3.1.1.
# Used to update GeoIP databases from https://www.maxmind.com.
# For more information about this config file, visit the docs at
# https://dev.maxmind.com/geoip/geoipupdate/.
# `AccountID` is from your MaxMind account.
AccountID your-account-id-here
# `LicenseKey` is from your MaxMind account
LicenseKey your-license-key-here
# `EditionIDs` is from your MaxMind account.
EditionIDs GeoLite2-ASN GeoLite2-City GeoLite2-Country
# The remaining settings are OPTIONAL.
# The directory to store the database files. Defaults to /usr/share/GeoIP
# DatabaseDirectory /usr/share/GeoIP
# The server to use. Defaults to "updates.maxmind.com".
# Host updates.maxmind.com
# The proxy host name or IP address. You may optionally specify a
# port number, e.g., 127.0.0.1:8888. If no port number is specified, 1080
# will be used.
# Proxy 127.0.0.1:8888
# The user name and password to use with your proxy server.
# ProxyUserPassword username:password
# Whether to preserve modification times of files downloaded from the server.
# Defaults to "0".
# PreserveFileTimes 0
# The lock file to use. This ensures only one geoipupdate process can run at a
# time.
# Note: Once created, this lockfile is not removed from the filesystem.
# Defaults to ".geoipupdate.lock" under the DatabaseDirectory.
# LockFile /usr/share/GeoIP/.geoipupdate.lock
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment