Merge pull request #662 from NebulousLabs/ivo/db_backups_new

DB Backups.
This commit is contained in:
Ivaylo Novakov 2021-04-07 14:05:20 +02:00 committed by GitHub
commit dd12262dc5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 181 additions and 120 deletions

View File

@ -32,11 +32,11 @@ Mongo needs a couple of extra steps in order to start a secure cluster.
- Open port 27017 on all nodes that will take part in the cluster. Ideally, you would only open the port for the other
nodes in the cluster.
- Manually run an initialisation `docker run` with extra environment variables that will initialise the admin user with
a password (example below).
- Manually add a `mgkey` file under `./docker/data/mongo` with the respective secret (
see [Mongo's keyfile access control](https://docs.mongodb.com/manual/tutorial/enforce-keyfile-access-control-in-existing-replica-set/)
for details).
- Manually run an initialisation `docker run` with extra environment variables that will initialise the admin user with
a password (example below).
- During the initialisation run mentioned above, we need to make two extra steps within the container:
- Change the ownership of `mgkey` to `mongodb:mongodb`
- Change its permissions to 400

View File

@ -1,21 +0,0 @@
#!/bin/bash
# Get current working directory (pwd doesn't cut it)
cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
# Set the environment:
set -o allexport
source $cwd/../.env
set +o allexport
# Check for AWS credentials:
if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then
echo "Missing AWS credentials!"
exit 1
fi
# Take the current datetime:
DT=`date +%Y-%m-%d`
# Create the backup:
docker exec cockroach \
cockroach sql \
--host cockroach:26257 \
--certs-dir=/certs \
--execute="BACKUP TO 's3://skynet-crdb-backups/backups/cockroach/$DT?AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID&AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY';"

View File

@ -1,25 +0,0 @@
#!/bin/bash
BACKUP=$1
if [[ $BACKUP == "" ]]; then
echo "No backup name given. It should look like '2020-01-29'."
exit 1
fi
# Get current working directory (pwd doesn't cut it)
cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
# Set the environment:
set -o allexport
source $cwd/../.env
set +o allexport
# Check for AWS credentials:
if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then
echo "Missing AWS credentials!"
exit 1
fi
# Restore the backup:
docker exec cockroach \
cockroach sql \
--host cockroach:26257 \
--certs-dir=/certs \
--execute="RESTORE DATABASE defaultdb FROM 's3://skynet-crdb-backups/backups/cockroach/$DT?AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID&AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY';"

70
scripts/db_backup.sh Executable file
View File

@ -0,0 +1,70 @@
#!/bin/bash
# Get current script directory (pwd doesn't cut it)
csd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
# Set the environment. We only grab the entries we need because otherwise we
# need to deal with the edge cases presented by problematic values.
set -o allexport
cat $csd/../.env | grep "AWS_ACCESS_KEY_ID\|AWS_SECRET_ACCESS_KEY\|S3_BACKUP_PATH\|SKYNET_DB_USER\|SKYNET_DB_PASS\|SKYNET_DB_HOST\|SKYNET_DB_PORT" >.tmpenv
source .tmpenv
rm .tmpenv
set +o allexport
# Check for AWS credentials:
if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then
echo "Missing AWS credentials!"
exit 1
fi
# Check for backup path:
if [[ $S3_BACKUP_PATH == "" ]]; then
echo "Missing S3_BACKUP_PATH!"
exit 1
fi
# Take the current datetime:
DT=$(date +%Y-%m-%d)
### COCKROACH DB ###
echo "Creating a backup of CockroachDB:"
# Check if a backup already exists:
totalFoundObjects=$(aws s3 ls $S3_BACKUP_PATH/$DT --recursive --summarize | grep "cockroach" | wc -l)
if [ "$totalFoundObjects" -ge "1" ]; then
echo "Backup already exists for today. Skipping."
else
# Create a cockroachdb backup:
docker exec cockroach \
cockroach sql \
--host cockroach:26257 \
--certs-dir=/certs \
--execute="BACKUP TO '$S3_BACKUP_PATH/$DT/cockroach/?AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID&AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY';"
if [[ $? > 0 ]]; then
echo "Creating a CockroachDB backup failed. Skipping."
else
echo "Successfully backed up CockroachDB."
fi
fi
### MONGO DB ###
echo "Creating a backup of MongoDB:"
# Check if a backup already exists:
totalFoundObjects=$(aws s3 ls $S3_BACKUP_PATH/$DT --recursive --summarize | grep "mongo" | wc -l)
if [ "$totalFoundObjects" -ge "1" ]; then
echo "Backup already exists for today. Skipping."
else
# Create the backup:
docker exec mongo \
mongodump \
-o /data/db/backups/$DT \
mongodb://$SKYNET_DB_USER:$SKYNET_DB_PASS@$SKYNET_DB_HOST:$SKYNET_DB_PORT
docker exec mongo chmod o+rw /data/db/backups/
if [[ $? > 0 ]]; then
echo "Creating a MongoDB backup failed. Skipping."
else
# Compress the backup:
cd $csd/../docker/data/mongo/db/backups/ && ls -l && tar -czf mongo.tgz $DT && cd -
# Upload the backup to S3:
aws s3 cp $csd/../docker/data/mongo/db/backups/mongo.tgz $S3_BACKUP_PATH/$DT/mongo.tgz
# Clean up
rm -rf $DT.tgz $csd/../docker/data/mongo/db/backups/mongo.tgz
echo "Finished MongoDB backup."
fi
docker exec mongo rm -rf /data/db/backups/$DT
fi

104
scripts/db_restore.sh Executable file
View File

@ -0,0 +1,104 @@
#!/bin/bash
BACKUP=$1
if [[ $BACKUP == "" ]]; then
echo "No backup name given. It should look like '2020-01-29'."
exit 1
fi
# Get current script directory (pwd doesn't cut it)
csd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
# Set the environment:
set -o allexport
cat $csd/../.env | grep "AWS_ACCESS_KEY_ID\|AWS_SECRET_ACCESS_KEY\|S3_BACKUP_PATH\|SKYNET_DB_USER\|SKYNET_DB_PASS\|SKYNET_DB_HOST\|SKYNET_DB_PORT" >.tmpenv
source .tmpenv
rm .tmpenv
set +o allexport
# Check for AWS credentials:
if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then
echo "Missing AWS credentials!"
exit 1
fi
# Check for backup path:
if [[ $S3_BACKUP_PATH == "" ]]; then
echo "Missing S3_BACKUP_PATH!"
exit 1
fi
### COCKROACH DB ###
echo "Restoring CockroachDB."
# Check if the backup exists:
totalFoundObjects=$(aws s3 ls $S3_BACKUP_PATH/$BACKUP --recursive --summarize | grep "cockroach" | wc -l)
if [ "$totalFoundObjects" -eq "0" ]; then
echo "This backup doesn't exist!"
exit 1
fi
# Restore the backup:
docker exec cockroach \
cockroach sql \
--host cockroach:26257 \
--certs-dir=/certs \
--execute="ALTER DATABASE defaultdb RENAME TO defaultdb_backup;"
if [[ $? > 0 ]]; then
echo "Failed to rename existing CockroachDB database. Exiting."
exit $?
fi
docker exec cockroach \
cockroach sql \
--host cockroach:26257 \
--certs-dir=/certs \
--execute="RESTORE DATABASE defaultdb FROM '$S3_BACKUP_PATH/$BACKUP/cockroach?AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID&AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY';"
if [[ $? == 0 ]]; then
# Restoration succeeded, drop the backup.
docker exec cockroach \
cockroach sql \
--host cockroach:26257 \
--certs-dir=/certs \
--execute="DROP DATABASE defaultdb_backup;"
echo "CockroachDB restoration succeeded."
else
# Restoration failed, drop the new DB and put back the old one.
echo "CockroachDB restoration failed, rolling back."
docker exec cockroach \
cockroach sql \
--host cockroach:26257 \
--certs-dir=/certs \
--execute="DROP DATABASE defaultdb;"
docker exec cockroach \
cockroach sql \
--host cockroach:26257 \
--certs-dir=/certs \
--execute="ALTER DATABASE defaultdb_backup RENAME TO defaultdb;"
if [[ $? > 0 ]]; then
echo "ERROR: Rollback failed! Inspect manually!"
exit $?
else
echo "Rollback successful. Restoration cancelled. Exiting."
exit 0
fi
fi
### MONGO DB ###
# Check if the backup exists:
totalFoundObjects=$(aws s3 ls $S3_BACKUP_PATH/$BACKUP --recursive --summarize | grep "mongo.tgz" | wc -l)
if [ "$totalFoundObjects" -eq "0" ]; then
echo "This backup doesn't exist!"
exit 1
fi
# Get the backup from S3:
aws s3 cp $S3_BACKUP_PATH/$BACKUP/mongo.tgz mongo.tgz
# Prepare a clean `to_restore` dir:
rm -rf $csd/../docker/data/mongo/db/backups/to_restore
mkdir -p $csd/../docker/data/mongo/db/backups/to_restore
# Decompress the backup:
tar -xzf mongo.tgz -C $csd/../docker/data/mongo/db/backups/to_restore
rm mongo.tgz
# Restore the backup:
# The name of the backup is not `mongo` due to the way we're creating it,
# it's $BACKUP.
docker exec mongo \
mongorestore \
mongodb://$SKYNET_DB_USER:$SKYNET_DB_PASS@$SKYNET_DB_HOST:$SKYNET_DB_PORT \
/data/db/backups/to_restore/$BACKUP
# Clean up:
rm -rf $csd/../docker/data/mongo/db/backups/to_restore

View File

@ -1,32 +0,0 @@
#!/bin/bash
# Get current working directory (pwd doesn't cut it)
cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
# Set the environment:
set -o allexport
source $cwd/../.env
set +o allexport
# Check for AWS credentials:
if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then
echo "Missing AWS credentials!"
exit 1
fi
# Take the current datetime:
DT=`date +%Y-%m-%d`
# Check if a backup already exists:
totalFoundObjects=$(aws s3 ls s3://skynet-crdb-backups/backups/mongo/ --recursive --summarize | grep "$DT.tgz" | wc -l)
if [ "$totalFoundObjects" -eq "1" ]; then
echo "Backup already exists for today. Exiting."
exit 0
fi
# Create the backup:
docker exec mongo \
mongodump \
-o /data/db/backups/$DT \
mongodb://$SKYNET_DB_USER:$SKYNET_DB_PASS@$SKYNET_DB_HOST:$SKYNET_DB_PORT
# Compress the backup:
cd $cwd/../docker/data/mongo/db/backups/ && tar -czf $DT.tgz $DT && cd -
# Upload the backup to S3:
aws s3 cp $DT.tgz s3://skynet-crdb-backups/backups/mongo/
# Clean up
rm -rf $DT.tgz $cwd/../docker/data/mongo/db/backups/$DT

View File

@ -1,40 +0,0 @@
#!/bin/bash
BACKUP=$1
if [[ $BACKUP == "" ]]; then
echo "No backup name given. It should look like '2020-01-29'."
exit 1
fi
# Get current working directory (pwd doesn't cut it)
cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
# Set the environment:
set -o allexport
source $cwd/../.env
set +o allexport
# Check for AWS credentials:
if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then
echo "Missing AWS credentials!"
exit 1
fi
# Check if the backup exists:
totalFoundObjects=$(aws s3 ls s3://skynet-crdb-backups/backups/mongo/ --recursive --summarize | grep "$DT.tgz" | wc -l)
if [ "$totalFoundObjects" -eq "0" ]; then
echo "This backup doesn't exist!"
exit 1
fi
# Get the backup from S3:
aws s3 cp s3://skynet-crdb-backups/backups/mongo/$BACKUP.tgz $BACKUP.tgz
# Prepare a clean `to_restore` dir:
rm -rf $cwd/../docker/data/mongo/db/backups/to_restore
mkdir -p $cwd/../docker/data/mongo/db/backups/to_restore
# Decompress the backup:
tar -xzf $BACKUP.tgz -C $cwd/../docker/data/mongo/db/backups/to_restore
rm $BACKUP.tgz
# Restore the backup:
docker exec mongo \
mongorestore \
mongodb://$SKYNET_DB_USER:$SKYNET_DB_PASS@$SKYNET_DB_HOST:$SKYNET_DB_PORT \
/data/db/backups/to_restore/$BACKUP
# Clean up:
rm -rf $cwd/../docker/data/mongo/db/backups/to_restore

View File

@ -33,6 +33,8 @@ You may want to fork this repository and replace ssh keys in
1. `apt-get update && apt-get install sudo -y` to make sure `sudo` is available
1. `adduser user` to create user called `user` (creates `/home/user` directory)
1. `usermod -aG sudo user` to add this new user to sudo group
1. `sudo groupadd docker` to create a group for docker (it might already exist)
1. `sudo usermod -aG docker user` to add your user to that group
1. Quit the ssh session with `exit` command
You a can now ssh into your machine as the user `user`.
@ -47,6 +49,7 @@ You a can now ssh into your machine as the user `user`.
1. `sudo apt-get install git -y` to install git
1. `git clone https://github.com/NebulousLabs/skynet-webportal`
1. `cd skynet-webportal`
1. run setup scripts in the exact order and provide sudo password when asked (if one of them fails, you can retry just this one before proceeding further)
1. `/home/user/skynet-webportal/setup-scripts/setup-server.sh`
1. `/home/user/skynet-webportal/setup-scripts/setup-docker-services.sh`
@ -94,6 +97,8 @@ At this point we have almost everything running, we just need to set up your wal
- `COOKIE_DOMAIN` (optional) if using `accounts` this is the domain to which your cookies will be issued
- `COOKIE_HASH_KEY` (optional) if using `accounts` hashing secret, at least 32 bytes
- `COOKIE_ENC_KEY` (optional) if using `accounts` encryption key, at least 32 bytes
- `S3_BACKUP_PATH` (optional) is using `accounts` and backing up the databases to S3. This path should be an S3 bucket
with path to the location in the bucket where we want to store the daily backups.
1. if you have a custom domain and you configured it in `DOMAIN_NAME`, edit `/home/user/skynet-webportal/docker/caddy/Caddyfile` and uncomment `import custom.domain`
1. only for siasky.net domain instances: edit `/home/user/skynet-webportal/docker/caddy/Caddyfile`, uncomment `import siasky.net`