From 482f9655c4e28fe5a24f0122986fd7c209709997 Mon Sep 17 00:00:00 2001 From: Ivaylo Novakov Date: Thu, 1 Apr 2021 15:30:06 +0200 Subject: [PATCH 1/3] Merge branch 'ivo/db_backups' into ivo/db_backups_new --- scripts/crdb_backup.sh | 21 -------- scripts/crdb_restore.sh | 25 ---------- scripts/db_backup.sh | 70 ++++++++++++++++++++++++++ scripts/db_restore.sh | 104 +++++++++++++++++++++++++++++++++++++++ scripts/mongo_backup.sh | 32 ------------ scripts/mongo_restore.sh | 40 --------------- setup-scripts/README.md | 2 + 7 files changed, 176 insertions(+), 118 deletions(-) delete mode 100644 scripts/crdb_backup.sh delete mode 100644 scripts/crdb_restore.sh create mode 100755 scripts/db_backup.sh create mode 100755 scripts/db_restore.sh delete mode 100644 scripts/mongo_backup.sh delete mode 100644 scripts/mongo_restore.sh diff --git a/scripts/crdb_backup.sh b/scripts/crdb_backup.sh deleted file mode 100644 index a216d9db..00000000 --- a/scripts/crdb_backup.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -# Get current working directory (pwd doesn't cut it) -cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P) -# Set the environment: -set -o allexport -source $cwd/../.env -set +o allexport -# Check for AWS credentials: -if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then - echo "Missing AWS credentials!" - exit 1 -fi -# Take the current datetime: -DT=`date +%Y-%m-%d` -# Create the backup: -docker exec cockroach \ - cockroach sql \ - --host cockroach:26257 \ - --certs-dir=/certs \ - --execute="BACKUP TO 's3://skynet-crdb-backups/backups/cockroach/$DT?AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID&AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY';" diff --git a/scripts/crdb_restore.sh b/scripts/crdb_restore.sh deleted file mode 100644 index a316984f..00000000 --- a/scripts/crdb_restore.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -BACKUP=$1 -if [[ $BACKUP == "" ]]; then - echo "No backup name given. It should look like '2020-01-29'." - exit 1 -fi - -# Get current working directory (pwd doesn't cut it) -cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P) -# Set the environment: -set -o allexport -source $cwd/../.env -set +o allexport -# Check for AWS credentials: -if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then - echo "Missing AWS credentials!" - exit 1 -fi -# Restore the backup: -docker exec cockroach \ - cockroach sql \ - --host cockroach:26257 \ - --certs-dir=/certs \ - --execute="RESTORE DATABASE defaultdb FROM 's3://skynet-crdb-backups/backups/cockroach/$DT?AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID&AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY';" diff --git a/scripts/db_backup.sh b/scripts/db_backup.sh new file mode 100755 index 00000000..7ec42741 --- /dev/null +++ b/scripts/db_backup.sh @@ -0,0 +1,70 @@ +#!/bin/bash + +# Get current working directory (pwd doesn't cut it) +cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P) +# Set the environment. We only grab the entries we need because otherwise we +# need to deal with the edge cases presented by problematic values. +set -o allexport +cat $cwd/../.env | grep "AWS_ACCESS_KEY_ID\|AWS_SECRET_ACCESS_KEY\|S3_BACKUP_PATH\|SKYNET_DB_USER\|SKYNET_DB_PASS\|SKYNET_DB_HOST\|SKYNET_DB_PORT" >.tmpenv +source .tmpenv +rm .tmpenv +set +o allexport +# Check for AWS credentials: +if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then + echo "Missing AWS credentials!" + exit 1 +fi +# Check for backup path: +if [[ $S3_BACKUP_PATH == "" ]]; then + echo "Missing S3_BACKUP_PATH!" + exit 1 +fi +# Take the current datetime: +DT=$(date +%Y-%m-%d) + +### COCKROACH DB ### +echo "Creating a backup of CockroachDB:" +# Check if a backup already exists: +totalFoundObjects=$(aws s3 ls $S3_BACKUP_PATH/$DT --recursive --summarize | grep "cockroach" | wc -l) +if [ "$totalFoundObjects" -ge "1" ]; then + echo "Backup already exists for today. Skipping." +else + # Create a cockroachdb backup: + docker exec cockroach \ + cockroach sql \ + --host cockroach:26257 \ + --certs-dir=/certs \ + --execute="BACKUP TO '$S3_BACKUP_PATH/$DT/cockroach/?AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID&AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY';" + if [[ $? > 0 ]]; then + echo "Creating a CockroachDB backup failed. Skipping." + else + echo "Successfully backed up CockroachDB." + fi +fi + +### MONGO DB ### +echo "Creating a backup of MongoDB:" +# Check if a backup already exists: +totalFoundObjects=$(aws s3 ls $S3_BACKUP_PATH/$DT --recursive --summarize | grep "mongo" | wc -l) +if [ "$totalFoundObjects" -ge "1" ]; then + echo "Backup already exists for today. Skipping." +else + # Create the backup: + docker exec mongo \ + mongodump \ + -o /data/db/backups/$DT \ + mongodb://$SKYNET_DB_USER:$SKYNET_DB_PASS@$SKYNET_DB_HOST:$SKYNET_DB_PORT + docker exec mongo chmod o+rw /data/db/backups/ + if [[ $? > 0 ]]; then + echo "Creating a MongoDB backup failed. Skipping." + else + # Compress the backup: + cd $cwd/../docker/data/mongo/db/backups/ && ls -l && tar -czf mongo.tgz $DT && cd - + # Upload the backup to S3: + aws s3 cp $cwd/../docker/data/mongo/db/backups/mongo.tgz $S3_BACKUP_PATH/$DT/mongo.tgz + # Clean up + rm -rf $DT.tgz $cwd/../docker/data/mongo/db/backups/mongo.tgz + echo "Finished MongoDB backup." + fi + docker exec mongo rm -rf /data/db/backups/$DT +fi diff --git a/scripts/db_restore.sh b/scripts/db_restore.sh new file mode 100755 index 00000000..ebda2a54 --- /dev/null +++ b/scripts/db_restore.sh @@ -0,0 +1,104 @@ +#!/bin/bash + +BACKUP=$1 +if [[ $BACKUP == "" ]]; then + echo "No backup name given. It should look like '2020-01-29'." + exit 1 +fi + +# Get current working directory (pwd doesn't cut it) +cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P) +# Set the environment: +set -o allexport +cat $cwd/../.env | grep "AWS_ACCESS_KEY_ID\|AWS_SECRET_ACCESS_KEY\|S3_BACKUP_PATH\|SKYNET_DB_USER\|SKYNET_DB_PASS\|SKYNET_DB_HOST\|SKYNET_DB_PORT" >.tmpenv +source .tmpenv +rm .tmpenv +set +o allexport +# Check for AWS credentials: +if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then + echo "Missing AWS credentials!" + exit 1 +fi +# Check for backup path: +if [[ $S3_BACKUP_PATH == "" ]]; then + echo "Missing S3_BACKUP_PATH!" + exit 1 +fi + +### COCKROACH DB ### +echo "Restoring CockroachDB." +# Check if the backup exists: +totalFoundObjects=$(aws s3 ls $S3_BACKUP_PATH/$BACKUP --recursive --summarize | grep "cockroach" | wc -l) +if [ "$totalFoundObjects" -eq "0" ]; then + echo "This backup doesn't exist!" + exit 1 +fi +# Restore the backup: +docker exec cockroach \ + cockroach sql \ + --host cockroach:26257 \ + --certs-dir=/certs \ + --execute="ALTER DATABASE defaultdb RENAME TO defaultdb_backup;" +if [[ $? > 0 ]]; then + echo "Failed to rename existing CockroachDB database. Exiting." + exit $? +fi +docker exec cockroach \ + cockroach sql \ + --host cockroach:26257 \ + --certs-dir=/certs \ + --execute="RESTORE DATABASE defaultdb FROM '$S3_BACKUP_PATH/$BACKUP/cockroach?AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID&AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY';" +if [[ $? == 0 ]]; then + # Restoration succeeded, drop the backup. + docker exec cockroach \ + cockroach sql \ + --host cockroach:26257 \ + --certs-dir=/certs \ + --execute="DROP DATABASE defaultdb_backup;" + echo "CockroachDB restoration succeeded." +else + # Restoration failed, drop the new DB and put back the old one. + echo "CockroachDB restoration failed, rolling back." + docker exec cockroach \ + cockroach sql \ + --host cockroach:26257 \ + --certs-dir=/certs \ + --execute="DROP DATABASE defaultdb;" + docker exec cockroach \ + cockroach sql \ + --host cockroach:26257 \ + --certs-dir=/certs \ + --execute="ALTER DATABASE defaultdb_backup RENAME TO defaultdb;" + if [[ $? > 0 ]]; then + echo "ERROR: Rollback failed! Inspect manually!" + exit $? + else + echo "Rollback successful. Restoration cancelled. Exiting." + exit 0 + fi +fi + +### MONGO DB ### +# Check if the backup exists: +totalFoundObjects=$(aws s3 ls $S3_BACKUP_PATH/$BACKUP --recursive --summarize | grep "mongo.tgz" | wc -l) +if [ "$totalFoundObjects" -eq "0" ]; then + echo "This backup doesn't exist!" + exit 1 +fi +# Get the backup from S3: +aws s3 cp $S3_BACKUP_PATH/$BACKUP/mongo.tgz mongo.tgz +# Prepare a clean `to_restore` dir: +rm -rf $cwd/../docker/data/mongo/db/backups/to_restore +mkdir -p $cwd/../docker/data/mongo/db/backups/to_restore +# Decompress the backup: +tar -xzf mongo.tgz -C $cwd/../docker/data/mongo/db/backups/to_restore +rm mongo.tgz +# Restore the backup: +# The name of the backup is not `mongo` due to the way we're creating it, +# it's $BACKUP. +docker exec mongo \ + mongorestore \ + mongodb://$SKYNET_DB_USER:$SKYNET_DB_PASS@$SKYNET_DB_HOST:$SKYNET_DB_PORT \ + /data/db/backups/to_restore/$BACKUP +# Clean up: +rm -rf $cwd/../docker/data/mongo/db/backups/to_restore diff --git a/scripts/mongo_backup.sh b/scripts/mongo_backup.sh deleted file mode 100644 index 9867bbc3..00000000 --- a/scripts/mongo_backup.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -# Get current working directory (pwd doesn't cut it) -cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P) -# Set the environment: -set -o allexport -source $cwd/../.env -set +o allexport -# Check for AWS credentials: -if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then - echo "Missing AWS credentials!" - exit 1 -fi -# Take the current datetime: -DT=`date +%Y-%m-%d` -# Check if a backup already exists: -totalFoundObjects=$(aws s3 ls s3://skynet-crdb-backups/backups/mongo/ --recursive --summarize | grep "$DT.tgz" | wc -l) -if [ "$totalFoundObjects" -eq "1" ]; then - echo "Backup already exists for today. Exiting." - exit 0 -fi -# Create the backup: -docker exec mongo \ - mongodump \ - -o /data/db/backups/$DT \ - mongodb://$SKYNET_DB_USER:$SKYNET_DB_PASS@$SKYNET_DB_HOST:$SKYNET_DB_PORT -# Compress the backup: -cd $cwd/../docker/data/mongo/db/backups/ && tar -czf $DT.tgz $DT && cd - -# Upload the backup to S3: -aws s3 cp $DT.tgz s3://skynet-crdb-backups/backups/mongo/ -# Clean up -rm -rf $DT.tgz $cwd/../docker/data/mongo/db/backups/$DT diff --git a/scripts/mongo_restore.sh b/scripts/mongo_restore.sh deleted file mode 100644 index f9f4396c..00000000 --- a/scripts/mongo_restore.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -BACKUP=$1 -if [[ $BACKUP == "" ]]; then - echo "No backup name given. It should look like '2020-01-29'." - exit 1 -fi - -# Get current working directory (pwd doesn't cut it) -cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P) -# Set the environment: -set -o allexport -source $cwd/../.env -set +o allexport -# Check for AWS credentials: -if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then - echo "Missing AWS credentials!" - exit 1 -fi -# Check if the backup exists: -totalFoundObjects=$(aws s3 ls s3://skynet-crdb-backups/backups/mongo/ --recursive --summarize | grep "$DT.tgz" | wc -l) -if [ "$totalFoundObjects" -eq "0" ]; then - echo "This backup doesn't exist!" - exit 1 -fi -# Get the backup from S3: -aws s3 cp s3://skynet-crdb-backups/backups/mongo/$BACKUP.tgz $BACKUP.tgz -# Prepare a clean `to_restore` dir: -rm -rf $cwd/../docker/data/mongo/db/backups/to_restore -mkdir -p $cwd/../docker/data/mongo/db/backups/to_restore -# Decompress the backup: -tar -xzf $BACKUP.tgz -C $cwd/../docker/data/mongo/db/backups/to_restore -rm $BACKUP.tgz -# Restore the backup: -docker exec mongo \ - mongorestore \ - mongodb://$SKYNET_DB_USER:$SKYNET_DB_PASS@$SKYNET_DB_HOST:$SKYNET_DB_PORT \ - /data/db/backups/to_restore/$BACKUP -# Clean up: -rm -rf $cwd/../docker/data/mongo/db/backups/to_restore diff --git a/setup-scripts/README.md b/setup-scripts/README.md index c6407c61..0637fa2b 100644 --- a/setup-scripts/README.md +++ b/setup-scripts/README.md @@ -94,6 +94,8 @@ At this point we have almost everything running, we just need to set up your wal - `COOKIE_DOMAIN` (optional) if using `accounts` this is the domain to which your cookies will be issued - `COOKIE_HASH_KEY` (optional) if using `accounts` hashing secret, at least 32 bytes - `COOKIE_ENC_KEY` (optional) if using `accounts` encryption key, at least 32 bytes + - `S3_BACKUP_PATH` (optional) is using `accounts` and backing up the databases to S3. This path should be an S3 bucket + with path to the location in the bucket where we want to store the daily backups. 1. if you have a custom domain and you configured it in `DOMAIN_NAME`, edit `/home/user/skynet-webportal/docker/caddy/Caddyfile` and uncomment `import custom.domain` 1. only for siasky.net domain instances: edit `/home/user/skynet-webportal/docker/caddy/Caddyfile`, uncomment `import siasky.net` From 6971b3938b570e3c4bdcb19b390f8be9bf69f16c Mon Sep 17 00:00:00 2001 From: Ivaylo Novakov Date: Tue, 6 Apr 2021 18:12:24 +0200 Subject: [PATCH 2/3] Update the READMEs after setting up a couple of portals. --- README.md | 4 ++-- setup-scripts/README.md | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 59c2e5df..c75abf5a 100644 --- a/README.md +++ b/README.md @@ -32,11 +32,11 @@ Mongo needs a couple of extra steps in order to start a secure cluster. - Open port 27017 on all nodes that will take part in the cluster. Ideally, you would only open the port for the other nodes in the cluster. -- Manually run an initialisation `docker run` with extra environment variables that will initialise the admin user with - a password (example below). - Manually add a `mgkey` file under `./docker/data/mongo` with the respective secret ( see [Mongo's keyfile access control](https://docs.mongodb.com/manual/tutorial/enforce-keyfile-access-control-in-existing-replica-set/) for details). +- Manually run an initialisation `docker run` with extra environment variables that will initialise the admin user with + a password (example below). - During the initialisation run mentioned above, we need to make two extra steps within the container: - Change the ownership of `mgkey` to `mongodb:mongodb` - Change its permissions to 400 diff --git a/setup-scripts/README.md b/setup-scripts/README.md index 0637fa2b..a8b42ba8 100644 --- a/setup-scripts/README.md +++ b/setup-scripts/README.md @@ -33,6 +33,8 @@ You may want to fork this repository and replace ssh keys in 1. `apt-get update && apt-get install sudo -y` to make sure `sudo` is available 1. `adduser user` to create user called `user` (creates `/home/user` directory) 1. `usermod -aG sudo user` to add this new user to sudo group +1. `sudo groupadd docker` to create a group for docker (it might already exist) +1. `sudo usermod -aG docker user` to add your user to that group 1. Quit the ssh session with `exit` command You a can now ssh into your machine as the user `user`. @@ -47,6 +49,7 @@ You a can now ssh into your machine as the user `user`. 1. `sudo apt-get install git -y` to install git 1. `git clone https://github.com/NebulousLabs/skynet-webportal` +1. `cd skynet-webportal` 1. run setup scripts in the exact order and provide sudo password when asked (if one of them fails, you can retry just this one before proceeding further) 1. `/home/user/skynet-webportal/setup-scripts/setup-server.sh` 1. `/home/user/skynet-webportal/setup-scripts/setup-docker-services.sh` From 0f18d062d9bc0cd2d34ab60698db16f7ef7eef95 Mon Sep 17 00:00:00 2001 From: Ivaylo Novakov Date: Wed, 7 Apr 2021 14:04:46 +0200 Subject: [PATCH 3/3] Better custom command name. --- scripts/db_backup.sh | 12 ++++++------ scripts/db_restore.sh | 14 +++++++------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/scripts/db_backup.sh b/scripts/db_backup.sh index 7ec42741..ba1c8e96 100755 --- a/scripts/db_backup.sh +++ b/scripts/db_backup.sh @@ -1,11 +1,11 @@ #!/bin/bash -# Get current working directory (pwd doesn't cut it) -cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P) +# Get current script directory (pwd doesn't cut it) +csd=$(cd -P -- "$(dirname -- "$0")" && pwd -P) # Set the environment. We only grab the entries we need because otherwise we # need to deal with the edge cases presented by problematic values. set -o allexport -cat $cwd/../.env | grep "AWS_ACCESS_KEY_ID\|AWS_SECRET_ACCESS_KEY\|S3_BACKUP_PATH\|SKYNET_DB_USER\|SKYNET_DB_PASS\|SKYNET_DB_HOST\|SKYNET_DB_PORT" >.tmpenv +cat $csd/../.env | grep "AWS_ACCESS_KEY_ID\|AWS_SECRET_ACCESS_KEY\|S3_BACKUP_PATH\|SKYNET_DB_USER\|SKYNET_DB_PASS\|SKYNET_DB_HOST\|SKYNET_DB_PORT" >.tmpenv source .tmpenv rm .tmpenv set +o allexport @@ -59,11 +59,11 @@ else echo "Creating a MongoDB backup failed. Skipping." else # Compress the backup: - cd $cwd/../docker/data/mongo/db/backups/ && ls -l && tar -czf mongo.tgz $DT && cd - + cd $csd/../docker/data/mongo/db/backups/ && ls -l && tar -czf mongo.tgz $DT && cd - # Upload the backup to S3: - aws s3 cp $cwd/../docker/data/mongo/db/backups/mongo.tgz $S3_BACKUP_PATH/$DT/mongo.tgz + aws s3 cp $csd/../docker/data/mongo/db/backups/mongo.tgz $S3_BACKUP_PATH/$DT/mongo.tgz # Clean up - rm -rf $DT.tgz $cwd/../docker/data/mongo/db/backups/mongo.tgz + rm -rf $DT.tgz $csd/../docker/data/mongo/db/backups/mongo.tgz echo "Finished MongoDB backup." fi docker exec mongo rm -rf /data/db/backups/$DT diff --git a/scripts/db_restore.sh b/scripts/db_restore.sh index ebda2a54..79e6e0f7 100755 --- a/scripts/db_restore.sh +++ b/scripts/db_restore.sh @@ -6,11 +6,11 @@ if [[ $BACKUP == "" ]]; then exit 1 fi -# Get current working directory (pwd doesn't cut it) -cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P) +# Get current script directory (pwd doesn't cut it) +csd=$(cd -P -- "$(dirname -- "$0")" && pwd -P) # Set the environment: set -o allexport -cat $cwd/../.env | grep "AWS_ACCESS_KEY_ID\|AWS_SECRET_ACCESS_KEY\|S3_BACKUP_PATH\|SKYNET_DB_USER\|SKYNET_DB_PASS\|SKYNET_DB_HOST\|SKYNET_DB_PORT" >.tmpenv +cat $csd/../.env | grep "AWS_ACCESS_KEY_ID\|AWS_SECRET_ACCESS_KEY\|S3_BACKUP_PATH\|SKYNET_DB_USER\|SKYNET_DB_PASS\|SKYNET_DB_HOST\|SKYNET_DB_PORT" >.tmpenv source .tmpenv rm .tmpenv set +o allexport @@ -88,10 +88,10 @@ fi # Get the backup from S3: aws s3 cp $S3_BACKUP_PATH/$BACKUP/mongo.tgz mongo.tgz # Prepare a clean `to_restore` dir: -rm -rf $cwd/../docker/data/mongo/db/backups/to_restore -mkdir -p $cwd/../docker/data/mongo/db/backups/to_restore +rm -rf $csd/../docker/data/mongo/db/backups/to_restore +mkdir -p $csd/../docker/data/mongo/db/backups/to_restore # Decompress the backup: -tar -xzf mongo.tgz -C $cwd/../docker/data/mongo/db/backups/to_restore +tar -xzf mongo.tgz -C $csd/../docker/data/mongo/db/backups/to_restore rm mongo.tgz # Restore the backup: # The name of the backup is not `mongo` due to the way we're creating it, @@ -101,4 +101,4 @@ docker exec mongo \ mongodb://$SKYNET_DB_USER:$SKYNET_DB_PASS@$SKYNET_DB_HOST:$SKYNET_DB_PORT \ /data/db/backups/to_restore/$BACKUP # Clean up: -rm -rf $cwd/../docker/data/mongo/db/backups/to_restore +rm -rf $csd/../docker/data/mongo/db/backups/to_restore