mirror of
https://github.com/tiredofit/docker-db-backup.git
synced 2025-12-21 13:23:12 +01:00
Reduce size of temporarily files
Changed way backups are performed to reduce temporary files Removed Rethink Support Rework MongoDB compression Remove function prefix from functions Rename case on variables for easier reading
This commit is contained in:
@@ -24,16 +24,16 @@ COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
|
||||
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
|
||||
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
|
||||
DB_DUMP_TARGET=${DB_DUMP_TARGET:-/backup}
|
||||
DBHOST=${DB_HOST}
|
||||
DBNAME=${DB_NAME}
|
||||
DBPASS=${DB_PASS}
|
||||
DBTYPE=${DB_TYPE}
|
||||
DBUSER=${DB_USER}
|
||||
dbhost=${DB_HOST}
|
||||
dbname=${DB_NAME}
|
||||
dbpass=${DB_PASS}
|
||||
dbtype=${DB_TYPE}
|
||||
dbuser=${DB_USER}
|
||||
MD5=${MD5:-TRUE}
|
||||
PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-TRUE}
|
||||
SIZE_VALUE=${SIZE_VALUE:-"bytes"}
|
||||
SPLIT_DB=${SPLIT_DB:-FALSE}
|
||||
TMPDIR=/tmp/backups
|
||||
tmpdir=/tmp/backups
|
||||
|
||||
if [ "BACKUP_TYPE" = "S3" ] || [ "BACKUP_TYPE" = "s3" ] || [ "BACKUP_TYPE" = "MINIO" ] || [ "BACKUP_TYPE" = "minio" ] ; then
|
||||
S3_PROTOCOL=${S3_PROTOCOL:-"https"}
|
||||
@@ -54,138 +54,140 @@ fi
|
||||
|
||||
### Set Compression Options
|
||||
if var_true $PARALLEL_COMPRESSION ; then
|
||||
BZIP="pbzip2 -${COMPRESSION_LEVEL}"
|
||||
GZIP="pigz -${COMPRESSION_LEVEL}"
|
||||
XZIP="pixz -${COMPRESSION_LEVEL}"
|
||||
ZSTD="zstd --rm -${COMPRESSION_LEVEL}"
|
||||
bzip="pbzip2 -${COMPRESSION_LEVEL}"
|
||||
gzip="pigz -${COMPRESSION_LEVEL}"
|
||||
xzip="pixz -${COMPRESSION_LEVEL}"
|
||||
zstd="zstd --rm -${COMPRESSION_LEVEL}"
|
||||
else
|
||||
BZIP="bzip2 -${COMPRESSION_LEVEL}"
|
||||
GZIP="gzip -${COMPRESSION_LEVEL}"
|
||||
XZIP="xz -${COMPRESSION_LEVEL} "
|
||||
ZSTD="zstd --rm -${COMPRESSION_LEVEL}"
|
||||
bzip="bzip2 -${COMPRESSION_LEVEL}"
|
||||
gzip="gzip -${COMPRESSION_LEVEL}"
|
||||
xzip="xz -${COMPRESSION_LEVEL} "
|
||||
zstd="zstd --rm -${COMPRESSION_LEVEL}"
|
||||
fi
|
||||
|
||||
|
||||
### Set the Database Type
|
||||
case "$DBTYPE" in
|
||||
case "$dbtype" in
|
||||
"couch" | "couchdb" | "COUCH" | "COUCHDB" )
|
||||
DBTYPE=couch
|
||||
DBPORT=${DB_PORT:-5984}
|
||||
dbtype=couch
|
||||
dbport=${DB_PORT:-5984}
|
||||
;;
|
||||
"influx" | "influxdb" | "INFLUX" | "INFLUXDB" )
|
||||
DBTYPE=influx
|
||||
DBPORT=${DB_PORT:-8088}
|
||||
dbtype=influx
|
||||
dbport=${DB_PORT:-8088}
|
||||
;;
|
||||
"mongo" | "mongodb" | "MONGO" | "MONGODB" )
|
||||
DBTYPE=mongo
|
||||
DBPORT=${DB_PORT:-27017}
|
||||
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DBUSER}"
|
||||
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DBPASS}"
|
||||
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DBNAME}"
|
||||
dbtype=mongo
|
||||
dbport=${DB_PORT:-27017}
|
||||
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${dbuser}"
|
||||
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${dbpass}"
|
||||
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${dbname}"
|
||||
;;
|
||||
"mysql" | "MYSQL" | "mariadb" | "MARIADB")
|
||||
DBTYPE=mysql
|
||||
DBPORT=${DB_PORT:-3306}
|
||||
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DBPASS}
|
||||
dbtype=mysql
|
||||
dbport=${DB_PORT:-3306}
|
||||
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${dbpass}
|
||||
;;
|
||||
"postgres" | "postgresql" | "pgsql" | "POSTGRES" | "POSTGRESQL" | "PGSQL" )
|
||||
DBTYPE=pgsql
|
||||
DBPORT=${DB_PORT:-5432}
|
||||
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DBPASS}"
|
||||
dbtype=pgsql
|
||||
dbport=${DB_PORT:-5432}
|
||||
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${dbpass}"
|
||||
;;
|
||||
"redis" | "REDIS" )
|
||||
DBTYPE=redis
|
||||
DBPORT=${DB_PORT:-6379}
|
||||
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DBPASS}"
|
||||
;;
|
||||
"rethink" | "RETHINK" )
|
||||
DBTYPE=rethink
|
||||
DBPORT=${DB_PORT:-28015}
|
||||
[[ ( -n "${DB_PASS}" ) ]] && echo $DB_PASS>/tmp/.rethink.auth; RETHINK_PASS_STR=" --password-file /tmp/.rethink.auth"
|
||||
[[ ( -n "${DB_NAME}" ) ]] && RETHINK_DB_STR=" -e ${DBNAME}"
|
||||
dbtype=redis
|
||||
dbport=${DB_PORT:-6379}
|
||||
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${dbpass}"
|
||||
;;
|
||||
esac
|
||||
|
||||
### Functions
|
||||
function backup_couch() {
|
||||
TARGET=couch_${DBNAME}_${DBHOST}_${now}.txt
|
||||
curl -X GET http://${DBHOST}:${DBPORT}/${DBNAME}/_all_docs?include_docs=true >${TMPDIR}/${TARGET}
|
||||
generate_md5
|
||||
backup_couch() {
|
||||
target=couch_${dbname}_${dbhost}_${now}.txt
|
||||
compression
|
||||
curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${dumpoutput} | $dumpoutput > ${tmpdir}/${target}
|
||||
generate_md5
|
||||
move_backup
|
||||
}
|
||||
|
||||
function backup_mysql() {
|
||||
backup_mysql() {
|
||||
if var_true $SPLIT_DB ; then
|
||||
DATABASES=`mysql -h ${DBHOST} -P $DBPORT -u$DBUSER --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema`
|
||||
DATABASES=`mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema`
|
||||
|
||||
for db in $DATABASES; do
|
||||
if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then
|
||||
echo "** [db-backup] Dumping database: $db"
|
||||
TARGET=mysql_${db}_${DBHOST}_${now}.sql
|
||||
mysqldump --max-allowed-packet=512M -h $DBHOST -P $DBPORT -u$DBUSER ${EXTRA_OPTS} --databases $db > ${TMPDIR}/${TARGET}
|
||||
generate_md5
|
||||
print_notice "Dumping MariaDB database: $db"
|
||||
target=mysql_${db}_${dbhost}_${now}.sql
|
||||
compression
|
||||
mysqldump --max-allowed-packet=512M -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} --databases $db | $dumpoutput > ${tmpdir}/${target}
|
||||
generate_md5
|
||||
move_backup
|
||||
fi
|
||||
done
|
||||
else
|
||||
mysqldump --max-allowed-packet=512M -A -h $DBHOST -P $DBPORT -u$DBUSER ${EXTRA_OPTS} > ${TMPDIR}/${TARGET}
|
||||
generate_md5
|
||||
compression
|
||||
mysqldump --max-allowed-packet=512M -A -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
|
||||
generate_md5
|
||||
move_backup
|
||||
fi
|
||||
}
|
||||
|
||||
function backup_influx() {
|
||||
backup_influx() {
|
||||
if [ "${COMPRESSION}" = "NONE" ] || [ "${COMPRESSION}" = "none" ] || [ "${COMPRESSION}" = "FALSE" ] || [ "${COMPRESSION}" = "false" ] ; then
|
||||
:
|
||||
else
|
||||
print_notice "Compressing InfluxDB backup with gzip"
|
||||
influx_compression="-portable"
|
||||
fi
|
||||
for DB in $DB_NAME; do
|
||||
influxd backup -database $DB -host ${DBHOST}:${DBPORT} ${TMPDIR}/${TARGET}
|
||||
influxd backup ${influx_compression} -database $DB -host ${dbhost}:${dbport} ${tmpdir}/${target}
|
||||
generate_md5
|
||||
compression
|
||||
move_backup
|
||||
done
|
||||
}
|
||||
|
||||
function backup_mongo() {
|
||||
mongodump --out ${TMPDIR}/${TARGET} --host ${DBHOST} --port ${DBPORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
|
||||
cd ${TMPDIR}
|
||||
tar cf ${TARGET}.tar ${TARGET}/*
|
||||
TARGET=${TARGET}.tar
|
||||
backup_mongo() {
|
||||
if [ "${COMPRESSION}" = "NONE" ] || [ "${COMPRESSION}" = "none" ] || [ "${COMPRESSION}" = "FALSE" ] || [ "${COMPRESSION}" = "false" ] ; then
|
||||
target=${dbtype}_${dbname}_${dbhost}_${now}.archive
|
||||
else
|
||||
print_notice "Compressing MongoDB backup with gzip"
|
||||
target=${dbtype}_${dbname}_${dbhost}_${now}.archivegz
|
||||
mongo_compression="--gzip"
|
||||
fi
|
||||
mongodump --archive=${tmpdir}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
|
||||
cd ${tmpdir}
|
||||
generate_md5
|
||||
compression
|
||||
move_backup
|
||||
}
|
||||
|
||||
function backup_pgsql() {
|
||||
backup_pgsql() {
|
||||
if var_true $SPLIT_DB ; then
|
||||
export PGPASSWORD=${DBPASS}
|
||||
DATABASES=`psql -h $DBHOST -U $DBUSER -p ${DBPORT} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' `
|
||||
export PGPASSWORD=${dbpass}
|
||||
DATABASES=`psql -h $dbhost -U $dbuser -p ${dbport} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' `
|
||||
for db in $DATABASES; do
|
||||
print_info "Dumping database: $db"
|
||||
TARGET=pgsql_${db}_${DBHOST}_${now}.sql
|
||||
pg_dump -h ${DBHOST} -p ${DBPORT} -U ${DBUSER} $db ${EXTRA_OPTS}> ${TMPDIR}/${TARGET}
|
||||
generate_md5
|
||||
target=pgsql_${db}_${dbhost}_${now}.sql
|
||||
compression
|
||||
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
|
||||
generate_md5
|
||||
move_backup
|
||||
done
|
||||
else
|
||||
export PGPASSWORD=${DBPASS}
|
||||
pg_dump -h ${DBHOST} -U ${DBUSER} -p ${DBPORT} ${DBNAME} ${EXTRA_OPTS}> ${TMPDIR}/${TARGET}
|
||||
generate_md5
|
||||
export PGPASSWORD=${dbpass}
|
||||
compression
|
||||
pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
|
||||
generate_md5
|
||||
move_backup
|
||||
fi
|
||||
}
|
||||
|
||||
function backup_redis() {
|
||||
TARGET=redis_${db}_${DBHOST}_${now}.rdb
|
||||
echo bgsave | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} --rdb ${TMPDIR}/${TARGET} ${EXTRA_OPTS}
|
||||
backup_redis() {
|
||||
target=redis_${db}_${dbhost}_${now}.rdb
|
||||
echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${tmpdir}/${target} ${EXTRA_OPTS}
|
||||
print_info "Dumping Redis - Flushing Redis Cache First"
|
||||
sleep 10
|
||||
try=5
|
||||
while [ $try -gt 0 ] ; do
|
||||
saved=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
|
||||
ok=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
|
||||
saved=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
|
||||
ok=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
|
||||
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
|
||||
print_info "Redis Backup Complete"
|
||||
fi
|
||||
@@ -198,120 +200,111 @@ function backup_redis() {
|
||||
move_backup
|
||||
}
|
||||
|
||||
function backup_rethink() {
|
||||
TARGET=rethink_${db}_${DBHOST}_${now}.tar.gz
|
||||
print_info "Dumping rethink Database: $db"
|
||||
rethinkdb dump -f ${TMPDIR}/${TARGET} -c ${DBHOST}:${DBPORT} ${RETHINK_PASS_STR} ${RETHINK_DB_STR} ${EXTRA_OPTS}
|
||||
move_backup
|
||||
}
|
||||
|
||||
function check_availability() {
|
||||
check_availability() {
|
||||
### Set the Database Type
|
||||
case "$DBTYPE" in
|
||||
case "$dbtype" in
|
||||
"couch" )
|
||||
COUNTER=0
|
||||
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
|
||||
while ! (nc -z ${dbhost} ${dbport}) ; do
|
||||
sleep 5
|
||||
let COUNTER+=5
|
||||
print_warn "CouchDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
print_warn "CouchDB Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
done
|
||||
;;
|
||||
"influx" )
|
||||
COUNTER=0
|
||||
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
|
||||
while ! (nc -z ${dbhost} ${dbport}) ; do
|
||||
sleep 5
|
||||
let COUNTER+=5
|
||||
print_warn "InfluxDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
print_warn "InfluxDB Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
done
|
||||
;;
|
||||
"mongo" )
|
||||
COUNTER=0
|
||||
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
|
||||
while ! (nc -z ${dbhost} ${dbport}) ; do
|
||||
sleep 5
|
||||
let COUNTER+=5
|
||||
print_warn "Mongo Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
print_warn "Mongo Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
done
|
||||
;;
|
||||
"mysql" )
|
||||
COUNTER=0
|
||||
while true; do
|
||||
mysqlcmd='mysql -u'${DBUSER}' -P '${DBPORT}' -h '${DBHOST}' -p'${DBPASS}
|
||||
mysqlcmd='mysql -u'${dbuser}' -P '${dbport}' -h '${dbhost}' -p'${dbpass}
|
||||
out="`$mysqlcmd -e "SELECT COUNT(*) FROM information_schema.FILES;" 2>&1`"
|
||||
echo "$out" | grep -E "COUNT|Enter" 2>&1 > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
:
|
||||
break
|
||||
fi
|
||||
print_warn "MySQL/MariaDB Server "$DBHOST" is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
print_warn "MySQL/MariaDB Server "$dbhost" is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
sleep 5
|
||||
let COUNTER+=5
|
||||
done
|
||||
;;
|
||||
"pgsql" )
|
||||
# Wait until mongo logs that it's ready (or timeout after 60s)
|
||||
COUNTER=0
|
||||
export PGPASSWORD=${DBPASS}
|
||||
until pg_isready --dbname=${DBNAME} --host=${DBHOST} --port=${DBPORT} --username=${DBUSER} -q
|
||||
export PGPASSWORD=${dbpass}
|
||||
until pg_isready --dbname=${dbname} --host=${dbhost} --port=${dbport} --username=${dbuser} -q
|
||||
do
|
||||
sleep 5
|
||||
let COUNTER+=5
|
||||
print_warn "Postgres Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
print_warn "Postgres Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
done
|
||||
;;
|
||||
"redis" )
|
||||
COUNTER=0
|
||||
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
|
||||
while ! (nc -z ${dbhost} ${dbport}) ; do
|
||||
sleep 5
|
||||
let COUNTER+=5
|
||||
print_warn "Redis Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
done
|
||||
;;
|
||||
"rethink" )
|
||||
COUNTER=0
|
||||
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
|
||||
sleep 5
|
||||
let COUNTER+=5
|
||||
print_warn "RethinkDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
print_warn "Redis Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
done
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
function compression() {
|
||||
compression() {
|
||||
case "$COMPRESSION" in
|
||||
"GZ" | "gz" | "gzip" | "GZIP")
|
||||
$GZIP ${TMPDIR}/${TARGET}
|
||||
TARGET=${TARGET}.gz
|
||||
print_notice "Compressing backup with gzip"
|
||||
target=${target}.gz
|
||||
dumpoutput="$gzip "
|
||||
;;
|
||||
"BZ" | "bz" | "bzip2" | "BZIP2" | "bzip" | "BZIP" | "bz2" | "BZ2")
|
||||
$BZIP ${TMPDIR}/${TARGET}
|
||||
TARGET=${TARGET}.bz2
|
||||
print_notice "Compressing backup with bzip2"
|
||||
target=${target}.bz2
|
||||
dumpoutput="$bzip "
|
||||
;;
|
||||
"XZ" | "xz" | "XZIP" | "xzip" )
|
||||
$XZIP ${TMPDIR}/${TARGET}
|
||||
TARGET=${TARGET}.xz
|
||||
print_notice "Compressing backup with xzip"
|
||||
target=${target}.xz
|
||||
dumpoutput="$xzip "
|
||||
;;
|
||||
"ZSTD" | "zstd" | "ZST" | "zst" )
|
||||
$ZSTD ${TMPDIR}/${TARGET}
|
||||
TARGET=${TARGET}.zst
|
||||
print_notice "Compressing backup with zstd"
|
||||
target=${target}.zst
|
||||
dumpoutput="$zstd "
|
||||
;;
|
||||
"NONE" | "none" | "FALSE" | "false")
|
||||
dumpoutput="cat "
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
function generate_md5() {
|
||||
generate_md5() {
|
||||
if var_true $MD5 ; then
|
||||
cd $TMPDIR
|
||||
md5sum ${TARGET} > ${TARGET}.md5
|
||||
MD5VALUE=$(md5sum ${TARGET} | awk '{ print $1}')
|
||||
print_notice "Generating MD5 for ${target}"
|
||||
cd $tmpdir
|
||||
md5sum ${target} > ${target}.md5
|
||||
MD5VALUE=$(md5sum ${target} | awk '{ print $1}')
|
||||
fi
|
||||
}
|
||||
|
||||
function move_backup() {
|
||||
move_backup() {
|
||||
case "$SIZE_VALUE" in
|
||||
"b" | "bytes" )
|
||||
SIZE_VALUE=1
|
||||
|
||||
;;
|
||||
"[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" )
|
||||
SIZE_VALUE="-h"
|
||||
@@ -321,16 +314,18 @@ function move_backup() {
|
||||
;;
|
||||
esac
|
||||
if [ "$SIZE_VALUE" = "1" ] ; then
|
||||
FILESIZE=$(stat -c%s "${DB_DUMP_TARGET}/${TARGET}")
|
||||
FILESIZE=$(stat -c%s "${tmpdir}/${target}")
|
||||
print_notice "Backup of ${target} created with the size of ${FILESIZE} bytes"
|
||||
else
|
||||
FILESIZE=$(du -h "${DB_DUMP_TARGET}/${TARGET}" | awk '{ print $1}')
|
||||
FILESIZE=$(du -h "${tmpdir}/${target}" | awk '{ print $1}')
|
||||
print_notice "Backup of ${target} created with the size of ${FILESIZE}"
|
||||
fi
|
||||
|
||||
case "${BACKUP_LOCATION}" in
|
||||
"FILE" | "file" | "filesystem" | "FILESYSTEM" )
|
||||
mkdir -p ${DB_DUMP_TARGET}
|
||||
mv ${TMPDIR}/*.md5 ${DB_DUMP_TARGET}/
|
||||
mv ${TMPDIR}/${TARGET} ${DB_DUMP_TARGET}/${TARGET}
|
||||
mv ${tmpdir}/*.md5 ${DB_DUMP_TARGET}/
|
||||
mv ${tmpdir}/${target} ${DB_DUMP_TARGET}/${target}
|
||||
;;
|
||||
"S3" | "s3" | "MINIO" | "minio" )
|
||||
s3_content_type="application/octet-stream"
|
||||
@@ -342,10 +337,10 @@ function move_backup() {
|
||||
|
||||
if var_true $MD5 ; then
|
||||
s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")"
|
||||
s3_md5="$(libressl md5 -binary < "${TMPDIR}/${TARGET}.md5" | base64)"
|
||||
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${TARGET}.md5" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
|
||||
print_debug "Uploading ${TARGET}.md5 to S3"
|
||||
curl -T "${TMPDIR}/${TARGET}.md5" ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${TARGET}.md5 \
|
||||
s3_md5="$(libressl md5 -binary < "${tmpdir}/${target}.md5" | base64)"
|
||||
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${target}.md5" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
|
||||
print_debug "Uploading ${target}.md5 to S3"
|
||||
curl -T "${tmpdir}/${target}.md5" ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${target}.md5 \
|
||||
-H "Date: $date" \
|
||||
-H "Authorization: AWS ${S3_KEY_ID}:$sig" \
|
||||
-H "Content-Type: ${s3_content_type}" \
|
||||
@@ -353,24 +348,24 @@ function move_backup() {
|
||||
fi
|
||||
|
||||
s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")"
|
||||
s3_md5="$(libressl md5 -binary < "${TMPDIR}/${TARGET}" | base64)"
|
||||
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${TARGET}" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
|
||||
print_debug "Uploading ${TARGET} to S3"
|
||||
curl -T ${TMPDIR}/${TARGET} ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${TARGET} \
|
||||
s3_md5="$(libressl md5 -binary < "${tmpdir}/${target}" | base64)"
|
||||
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${target}" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
|
||||
print_debug "Uploading ${target} to S3"
|
||||
curl -T ${tmpdir}/${target} ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${target} \
|
||||
-H "Date: $s3_date" \
|
||||
-H "Authorization: AWS ${S3_KEY_ID}:$sig" \
|
||||
-H "Content-Type: ${s3_content_type}" \
|
||||
-H "Content-MD5: ${s3_md5}"
|
||||
|
||||
rm -rf ${TMPDIR}/*.md5
|
||||
rm -rf ${TMPDIR}/${TARGET}
|
||||
rm -rf ${tmpdir}/*.md5
|
||||
rm -rf ${tmpdir}/${target}
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
### Container Startup
|
||||
print_info "Initialized on `date`"
|
||||
print_info "Backup routines Initialized on `date`"
|
||||
|
||||
### Wait for Next time to start backup
|
||||
current_time=$(date +"%s")
|
||||
@@ -392,65 +387,63 @@ print_info "Initialized on `date`"
|
||||
### Commence Backup
|
||||
while true; do
|
||||
# make sure the directory exists
|
||||
mkdir -p $TMPDIR
|
||||
mkdir -p $tmpdir
|
||||
|
||||
### Define Target name
|
||||
now=$(date +"%Y%m%d-%H%M%S")
|
||||
now_time=$(date +"%H:%M:%S")
|
||||
now_date=$(date +"%Y-%m-%d")
|
||||
TARGET=${DBTYPE}_${DBNAME}_${DBHOST}_${now}.sql
|
||||
target=${dbtype}_${dbname}_${dbhost}_${now}.sql
|
||||
|
||||
### Take a Dump
|
||||
case "$DBTYPE" in
|
||||
case "$dbtype" in
|
||||
"couch" )
|
||||
check_availability
|
||||
backup_couch
|
||||
check_availability
|
||||
backup_couch
|
||||
;;
|
||||
"influx" )
|
||||
check_availability
|
||||
backup_influx
|
||||
check_availability
|
||||
backup_influx
|
||||
;;
|
||||
"mysql" )
|
||||
check_availability
|
||||
backup_mysql
|
||||
check_availability
|
||||
backup_mysql
|
||||
;;
|
||||
"mongo" )
|
||||
check_availability
|
||||
backup_mongo
|
||||
check_availability
|
||||
backup_mongo
|
||||
;;
|
||||
"pgsql" )
|
||||
check_availability
|
||||
backup_pgsql
|
||||
check_availability
|
||||
backup_pgsql
|
||||
;;
|
||||
"redis" )
|
||||
check_availability
|
||||
backup_redis
|
||||
;;
|
||||
"rethink" )
|
||||
check_availability
|
||||
backup_rethink
|
||||
check_availability
|
||||
backup_redis
|
||||
;;
|
||||
esac
|
||||
|
||||
### Zabbix
|
||||
if var_true $ENABLE_ZABBIX ; then
|
||||
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o `stat -c%s ${DB_DUMP_TARGET}/${TARGET}`
|
||||
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o `date -r ${DB_DUMP_TARGET}/${TARGET} +'%s'`
|
||||
print_notice "Sending Backup Statistics to Zabbix"
|
||||
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o `stat -c%s ${DB_DUMP_TARGET}/${target}`
|
||||
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o `date -r ${DB_DUMP_TARGET}/${target} +'%s'`
|
||||
fi
|
||||
|
||||
### Automatic Cleanup
|
||||
if [[ -n "$DB_CLEANUP_TIME" ]]; then
|
||||
find $DB_DUMP_TARGET/ -mmin +$DB_CLEANUP_TIME -iname "*" -exec rm {} \;
|
||||
print_notice "Cleaning up old backups"
|
||||
find $DB_DUMP_TARGET/ -mmin +$DB_CLEANUP_TIME -iname "*" -exec rm {} \;
|
||||
fi
|
||||
|
||||
### Post Backup Custom Script Support
|
||||
if [ -d /assets/custom-scripts/ ] ; then
|
||||
print_info "Found Custom Scripts to Execute"
|
||||
print_notice "Found Custom Scripts to Execute"
|
||||
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
|
||||
print_info "Running Script ${f}"
|
||||
print_notice "Running Script ${f}"
|
||||
## script DB_TYPE DB_HOST DB_NAME DATE BACKUP_FILENAME FILESIZE MD5_VALUE
|
||||
chmod +x ${f}
|
||||
${f} "${DBTYPE}" "${DBHOST}" "${DBNAME}" "${now_date}" "${now_time}" "${TARGET}" "${FILESIZE}" "${MD5VALUE}"
|
||||
${f} "${dbtype}" "${dbhost}" "${dbname}" "${now_date}" "${now_time}" "${target}" "${FILESIZE}" "${MD5VALUE}"
|
||||
done
|
||||
fi
|
||||
|
||||
|
||||
Reference in New Issue
Block a user