Merge branch 'ivo/db_backups' into ivo/db_backups_new

This commit is contained in:
Ivaylo Novakov 2021-04-01 15:30:06 +02:00
parent fede204c6b
commit 482f9655c4
No known key found for this signature in database
GPG Key ID: 06B9354AB08BE9C6
7 changed files with 176 additions and 118 deletions

View File

@ -1,21 +0,0 @@
#!/bin/bash
# Get current working directory (pwd doesn't cut it)
cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
# Set the environment:
set -o allexport
source $cwd/../.env
set +o allexport
# Check for AWS credentials:
if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then
echo "Missing AWS credentials!"
exit 1
fi
# Take the current datetime:
DT=`date +%Y-%m-%d`
# Create the backup:
docker exec cockroach \
cockroach sql \
--host cockroach:26257 \
--certs-dir=/certs \
--execute="BACKUP TO 's3://skynet-crdb-backups/backups/cockroach/$DT?AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID&AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY';"

View File

@ -1,25 +0,0 @@
#!/bin/bash
BACKUP=$1
if [[ $BACKUP == "" ]]; then
echo "No backup name given. It should look like '2020-01-29'."
exit 1
fi
# Get current working directory (pwd doesn't cut it)
cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
# Set the environment:
set -o allexport
source $cwd/../.env
set +o allexport
# Check for AWS credentials:
if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then
echo "Missing AWS credentials!"
exit 1
fi
# Restore the backup:
docker exec cockroach \
cockroach sql \
--host cockroach:26257 \
--certs-dir=/certs \
--execute="RESTORE DATABASE defaultdb FROM 's3://skynet-crdb-backups/backups/cockroach/$DT?AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID&AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY';"

70
scripts/db_backup.sh Executable file
View File

@ -0,0 +1,70 @@
#!/bin/bash
# Get current working directory (pwd doesn't cut it)
cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
# Set the environment. We only grab the entries we need because otherwise we
# need to deal with the edge cases presented by problematic values.
set -o allexport
cat $cwd/../.env | grep "AWS_ACCESS_KEY_ID\|AWS_SECRET_ACCESS_KEY\|S3_BACKUP_PATH\|SKYNET_DB_USER\|SKYNET_DB_PASS\|SKYNET_DB_HOST\|SKYNET_DB_PORT" >.tmpenv
source .tmpenv
rm .tmpenv
set +o allexport
# Check for AWS credentials:
if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then
echo "Missing AWS credentials!"
exit 1
fi
# Check for backup path:
if [[ $S3_BACKUP_PATH == "" ]]; then
echo "Missing S3_BACKUP_PATH!"
exit 1
fi
# Take the current datetime:
DT=$(date +%Y-%m-%d)
### COCKROACH DB ###
echo "Creating a backup of CockroachDB:"
# Check if a backup already exists:
totalFoundObjects=$(aws s3 ls $S3_BACKUP_PATH/$DT --recursive --summarize | grep "cockroach" | wc -l)
if [ "$totalFoundObjects" -ge "1" ]; then
echo "Backup already exists for today. Skipping."
else
# Create a cockroachdb backup:
docker exec cockroach \
cockroach sql \
--host cockroach:26257 \
--certs-dir=/certs \
--execute="BACKUP TO '$S3_BACKUP_PATH/$DT/cockroach/?AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID&AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY';"
if [[ $? > 0 ]]; then
echo "Creating a CockroachDB backup failed. Skipping."
else
echo "Successfully backed up CockroachDB."
fi
fi
### MONGO DB ###
echo "Creating a backup of MongoDB:"
# Check if a backup already exists:
totalFoundObjects=$(aws s3 ls $S3_BACKUP_PATH/$DT --recursive --summarize | grep "mongo" | wc -l)
if [ "$totalFoundObjects" -ge "1" ]; then
echo "Backup already exists for today. Skipping."
else
# Create the backup:
docker exec mongo \
mongodump \
-o /data/db/backups/$DT \
mongodb://$SKYNET_DB_USER:$SKYNET_DB_PASS@$SKYNET_DB_HOST:$SKYNET_DB_PORT
docker exec mongo chmod o+rw /data/db/backups/
if [[ $? > 0 ]]; then
echo "Creating a MongoDB backup failed. Skipping."
else
# Compress the backup:
cd $cwd/../docker/data/mongo/db/backups/ && ls -l && tar -czf mongo.tgz $DT && cd -
# Upload the backup to S3:
aws s3 cp $cwd/../docker/data/mongo/db/backups/mongo.tgz $S3_BACKUP_PATH/$DT/mongo.tgz
# Clean up
rm -rf $DT.tgz $cwd/../docker/data/mongo/db/backups/mongo.tgz
echo "Finished MongoDB backup."
fi
docker exec mongo rm -rf /data/db/backups/$DT
fi

104
scripts/db_restore.sh Executable file
View File

@ -0,0 +1,104 @@
#!/bin/bash
BACKUP=$1
if [[ $BACKUP == "" ]]; then
echo "No backup name given. It should look like '2020-01-29'."
exit 1
fi
# Get current working directory (pwd doesn't cut it)
cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
# Set the environment:
set -o allexport
cat $cwd/../.env | grep "AWS_ACCESS_KEY_ID\|AWS_SECRET_ACCESS_KEY\|S3_BACKUP_PATH\|SKYNET_DB_USER\|SKYNET_DB_PASS\|SKYNET_DB_HOST\|SKYNET_DB_PORT" >.tmpenv
source .tmpenv
rm .tmpenv
set +o allexport
# Check for AWS credentials:
if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then
echo "Missing AWS credentials!"
exit 1
fi
# Check for backup path:
if [[ $S3_BACKUP_PATH == "" ]]; then
echo "Missing S3_BACKUP_PATH!"
exit 1
fi
### COCKROACH DB ###
echo "Restoring CockroachDB."
# Check if the backup exists:
totalFoundObjects=$(aws s3 ls $S3_BACKUP_PATH/$BACKUP --recursive --summarize | grep "cockroach" | wc -l)
if [ "$totalFoundObjects" -eq "0" ]; then
echo "This backup doesn't exist!"
exit 1
fi
# Restore the backup:
docker exec cockroach \
cockroach sql \
--host cockroach:26257 \
--certs-dir=/certs \
--execute="ALTER DATABASE defaultdb RENAME TO defaultdb_backup;"
if [[ $? > 0 ]]; then
echo "Failed to rename existing CockroachDB database. Exiting."
exit $?
fi
docker exec cockroach \
cockroach sql \
--host cockroach:26257 \
--certs-dir=/certs \
--execute="RESTORE DATABASE defaultdb FROM '$S3_BACKUP_PATH/$BACKUP/cockroach?AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID&AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY';"
if [[ $? == 0 ]]; then
# Restoration succeeded, drop the backup.
docker exec cockroach \
cockroach sql \
--host cockroach:26257 \
--certs-dir=/certs \
--execute="DROP DATABASE defaultdb_backup;"
echo "CockroachDB restoration succeeded."
else
# Restoration failed, drop the new DB and put back the old one.
echo "CockroachDB restoration failed, rolling back."
docker exec cockroach \
cockroach sql \
--host cockroach:26257 \
--certs-dir=/certs \
--execute="DROP DATABASE defaultdb;"
docker exec cockroach \
cockroach sql \
--host cockroach:26257 \
--certs-dir=/certs \
--execute="ALTER DATABASE defaultdb_backup RENAME TO defaultdb;"
if [[ $? > 0 ]]; then
echo "ERROR: Rollback failed! Inspect manually!"
exit $?
else
echo "Rollback successful. Restoration cancelled. Exiting."
exit 0
fi
fi
### MONGO DB ###
# Check if the backup exists:
totalFoundObjects=$(aws s3 ls $S3_BACKUP_PATH/$BACKUP --recursive --summarize | grep "mongo.tgz" | wc -l)
if [ "$totalFoundObjects" -eq "0" ]; then
echo "This backup doesn't exist!"
exit 1
fi
# Get the backup from S3:
aws s3 cp $S3_BACKUP_PATH/$BACKUP/mongo.tgz mongo.tgz
# Prepare a clean `to_restore` dir:
rm -rf $cwd/../docker/data/mongo/db/backups/to_restore
mkdir -p $cwd/../docker/data/mongo/db/backups/to_restore
# Decompress the backup:
tar -xzf mongo.tgz -C $cwd/../docker/data/mongo/db/backups/to_restore
rm mongo.tgz
# Restore the backup:
# The name of the backup is not `mongo` due to the way we're creating it,
# it's $BACKUP.
docker exec mongo \
mongorestore \
mongodb://$SKYNET_DB_USER:$SKYNET_DB_PASS@$SKYNET_DB_HOST:$SKYNET_DB_PORT \
/data/db/backups/to_restore/$BACKUP
# Clean up:
rm -rf $cwd/../docker/data/mongo/db/backups/to_restore

View File

@ -1,32 +0,0 @@
#!/bin/bash
# Get current working directory (pwd doesn't cut it)
cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
# Set the environment:
set -o allexport
source $cwd/../.env
set +o allexport
# Check for AWS credentials:
if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then
echo "Missing AWS credentials!"
exit 1
fi
# Take the current datetime:
DT=`date +%Y-%m-%d`
# Check if a backup already exists:
totalFoundObjects=$(aws s3 ls s3://skynet-crdb-backups/backups/mongo/ --recursive --summarize | grep "$DT.tgz" | wc -l)
if [ "$totalFoundObjects" -eq "1" ]; then
echo "Backup already exists for today. Exiting."
exit 0
fi
# Create the backup:
docker exec mongo \
mongodump \
-o /data/db/backups/$DT \
mongodb://$SKYNET_DB_USER:$SKYNET_DB_PASS@$SKYNET_DB_HOST:$SKYNET_DB_PORT
# Compress the backup:
cd $cwd/../docker/data/mongo/db/backups/ && tar -czf $DT.tgz $DT && cd -
# Upload the backup to S3:
aws s3 cp $DT.tgz s3://skynet-crdb-backups/backups/mongo/
# Clean up
rm -rf $DT.tgz $cwd/../docker/data/mongo/db/backups/$DT

View File

@ -1,40 +0,0 @@
#!/bin/bash
BACKUP=$1
if [[ $BACKUP == "" ]]; then
echo "No backup name given. It should look like '2020-01-29'."
exit 1
fi
# Get current working directory (pwd doesn't cut it)
cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
# Set the environment:
set -o allexport
source $cwd/../.env
set +o allexport
# Check for AWS credentials:
if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then
echo "Missing AWS credentials!"
exit 1
fi
# Check if the backup exists:
totalFoundObjects=$(aws s3 ls s3://skynet-crdb-backups/backups/mongo/ --recursive --summarize | grep "$DT.tgz" | wc -l)
if [ "$totalFoundObjects" -eq "0" ]; then
echo "This backup doesn't exist!"
exit 1
fi
# Get the backup from S3:
aws s3 cp s3://skynet-crdb-backups/backups/mongo/$BACKUP.tgz $BACKUP.tgz
# Prepare a clean `to_restore` dir:
rm -rf $cwd/../docker/data/mongo/db/backups/to_restore
mkdir -p $cwd/../docker/data/mongo/db/backups/to_restore
# Decompress the backup:
tar -xzf $BACKUP.tgz -C $cwd/../docker/data/mongo/db/backups/to_restore
rm $BACKUP.tgz
# Restore the backup:
docker exec mongo \
mongorestore \
mongodb://$SKYNET_DB_USER:$SKYNET_DB_PASS@$SKYNET_DB_HOST:$SKYNET_DB_PORT \
/data/db/backups/to_restore/$BACKUP
# Clean up:
rm -rf $cwd/../docker/data/mongo/db/backups/to_restore

View File

@ -94,6 +94,8 @@ At this point we have almost everything running, we just need to set up your wal
- `COOKIE_DOMAIN` (optional) if using `accounts` this is the domain to which your cookies will be issued
- `COOKIE_HASH_KEY` (optional) if using `accounts` hashing secret, at least 32 bytes
- `COOKIE_ENC_KEY` (optional) if using `accounts` encryption key, at least 32 bytes
- `S3_BACKUP_PATH` (optional) is using `accounts` and backing up the databases to S3. This path should be an S3 bucket
with path to the location in the bucket where we want to store the daily backups.
1. if you have a custom domain and you configured it in `DOMAIN_NAME`, edit `/home/user/skynet-webportal/docker/caddy/Caddyfile` and uncomment `import custom.domain`
1. only for siasky.net domain instances: edit `/home/user/skynet-webportal/docker/caddy/Caddyfile`, uncomment `import siasky.net`