Better custom command name.
This commit is contained in:
parent
6971b3938b
commit
0f18d062d9
|
@ -1,11 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Get current working directory (pwd doesn't cut it)
|
||||
cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
|
||||
# Get current script directory (pwd doesn't cut it)
|
||||
csd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
|
||||
# Set the environment. We only grab the entries we need because otherwise we
|
||||
# need to deal with the edge cases presented by problematic values.
|
||||
set -o allexport
|
||||
cat $cwd/../.env | grep "AWS_ACCESS_KEY_ID\|AWS_SECRET_ACCESS_KEY\|S3_BACKUP_PATH\|SKYNET_DB_USER\|SKYNET_DB_PASS\|SKYNET_DB_HOST\|SKYNET_DB_PORT" >.tmpenv
|
||||
cat $csd/../.env | grep "AWS_ACCESS_KEY_ID\|AWS_SECRET_ACCESS_KEY\|S3_BACKUP_PATH\|SKYNET_DB_USER\|SKYNET_DB_PASS\|SKYNET_DB_HOST\|SKYNET_DB_PORT" >.tmpenv
|
||||
source .tmpenv
|
||||
rm .tmpenv
|
||||
set +o allexport
|
||||
|
@ -59,11 +59,11 @@ else
|
|||
echo "Creating a MongoDB backup failed. Skipping."
|
||||
else
|
||||
# Compress the backup:
|
||||
cd $cwd/../docker/data/mongo/db/backups/ && ls -l && tar -czf mongo.tgz $DT && cd -
|
||||
cd $csd/../docker/data/mongo/db/backups/ && ls -l && tar -czf mongo.tgz $DT && cd -
|
||||
# Upload the backup to S3:
|
||||
aws s3 cp $cwd/../docker/data/mongo/db/backups/mongo.tgz $S3_BACKUP_PATH/$DT/mongo.tgz
|
||||
aws s3 cp $csd/../docker/data/mongo/db/backups/mongo.tgz $S3_BACKUP_PATH/$DT/mongo.tgz
|
||||
# Clean up
|
||||
rm -rf $DT.tgz $cwd/../docker/data/mongo/db/backups/mongo.tgz
|
||||
rm -rf $DT.tgz $csd/../docker/data/mongo/db/backups/mongo.tgz
|
||||
echo "Finished MongoDB backup."
|
||||
fi
|
||||
docker exec mongo rm -rf /data/db/backups/$DT
|
||||
|
|
|
@ -6,11 +6,11 @@ if [[ $BACKUP == "" ]]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
# Get current working directory (pwd doesn't cut it)
|
||||
cwd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
|
||||
# Get current script directory (pwd doesn't cut it)
|
||||
csd=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
|
||||
# Set the environment:
|
||||
set -o allexport
|
||||
cat $cwd/../.env | grep "AWS_ACCESS_KEY_ID\|AWS_SECRET_ACCESS_KEY\|S3_BACKUP_PATH\|SKYNET_DB_USER\|SKYNET_DB_PASS\|SKYNET_DB_HOST\|SKYNET_DB_PORT" >.tmpenv
|
||||
cat $csd/../.env | grep "AWS_ACCESS_KEY_ID\|AWS_SECRET_ACCESS_KEY\|S3_BACKUP_PATH\|SKYNET_DB_USER\|SKYNET_DB_PASS\|SKYNET_DB_HOST\|SKYNET_DB_PORT" >.tmpenv
|
||||
source .tmpenv
|
||||
rm .tmpenv
|
||||
set +o allexport
|
||||
|
@ -88,10 +88,10 @@ fi
|
|||
# Get the backup from S3:
|
||||
aws s3 cp $S3_BACKUP_PATH/$BACKUP/mongo.tgz mongo.tgz
|
||||
# Prepare a clean `to_restore` dir:
|
||||
rm -rf $cwd/../docker/data/mongo/db/backups/to_restore
|
||||
mkdir -p $cwd/../docker/data/mongo/db/backups/to_restore
|
||||
rm -rf $csd/../docker/data/mongo/db/backups/to_restore
|
||||
mkdir -p $csd/../docker/data/mongo/db/backups/to_restore
|
||||
# Decompress the backup:
|
||||
tar -xzf mongo.tgz -C $cwd/../docker/data/mongo/db/backups/to_restore
|
||||
tar -xzf mongo.tgz -C $csd/../docker/data/mongo/db/backups/to_restore
|
||||
rm mongo.tgz
|
||||
# Restore the backup:
|
||||
# The name of the backup is not `mongo` due to the way we're creating it,
|
||||
|
@ -101,4 +101,4 @@ docker exec mongo \
|
|||
mongodb://$SKYNET_DB_USER:$SKYNET_DB_PASS@$SKYNET_DB_HOST:$SKYNET_DB_PORT \
|
||||
/data/db/backups/to_restore/$BACKUP
|
||||
# Clean up:
|
||||
rm -rf $cwd/../docker/data/mongo/db/backups/to_restore
|
||||
rm -rf $csd/../docker/data/mongo/db/backups/to_restore
|
||||
|
|
Reference in New Issue