Reduce size of temporarily files

Changed way backups are performed to reduce temporary files
Removed Rethink Support
Rework MongoDB compression
Remove function prefix from functions
Rename case on variables for easier reading
This commit is contained in:
Dave Conroy
2020-06-17 07:57:49 -07:00
parent 955a08a21b
commit 310edda88c

View File

@@ -24,16 +24,16 @@ COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0} DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440} DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
DB_DUMP_TARGET=${DB_DUMP_TARGET:-/backup} DB_DUMP_TARGET=${DB_DUMP_TARGET:-/backup}
DBHOST=${DB_HOST} dbhost=${DB_HOST}
DBNAME=${DB_NAME} dbname=${DB_NAME}
DBPASS=${DB_PASS} dbpass=${DB_PASS}
DBTYPE=${DB_TYPE} dbtype=${DB_TYPE}
DBUSER=${DB_USER} dbuser=${DB_USER}
MD5=${MD5:-TRUE} MD5=${MD5:-TRUE}
PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-TRUE} PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-TRUE}
SIZE_VALUE=${SIZE_VALUE:-"bytes"} SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SPLIT_DB=${SPLIT_DB:-FALSE} SPLIT_DB=${SPLIT_DB:-FALSE}
TMPDIR=/tmp/backups tmpdir=/tmp/backups
if [ "BACKUP_TYPE" = "S3" ] || [ "BACKUP_TYPE" = "s3" ] || [ "BACKUP_TYPE" = "MINIO" ] || [ "BACKUP_TYPE" = "minio" ] ; then if [ "BACKUP_TYPE" = "S3" ] || [ "BACKUP_TYPE" = "s3" ] || [ "BACKUP_TYPE" = "MINIO" ] || [ "BACKUP_TYPE" = "minio" ] ; then
S3_PROTOCOL=${S3_PROTOCOL:-"https"} S3_PROTOCOL=${S3_PROTOCOL:-"https"}
@@ -54,138 +54,140 @@ fi
### Set Compression Options ### Set Compression Options
if var_true $PARALLEL_COMPRESSION ; then if var_true $PARALLEL_COMPRESSION ; then
BZIP="pbzip2 -${COMPRESSION_LEVEL}" bzip="pbzip2 -${COMPRESSION_LEVEL}"
GZIP="pigz -${COMPRESSION_LEVEL}" gzip="pigz -${COMPRESSION_LEVEL}"
XZIP="pixz -${COMPRESSION_LEVEL}" xzip="pixz -${COMPRESSION_LEVEL}"
ZSTD="zstd --rm -${COMPRESSION_LEVEL}" zstd="zstd --rm -${COMPRESSION_LEVEL}"
else else
BZIP="bzip2 -${COMPRESSION_LEVEL}" bzip="bzip2 -${COMPRESSION_LEVEL}"
GZIP="gzip -${COMPRESSION_LEVEL}" gzip="gzip -${COMPRESSION_LEVEL}"
XZIP="xz -${COMPRESSION_LEVEL} " xzip="xz -${COMPRESSION_LEVEL} "
ZSTD="zstd --rm -${COMPRESSION_LEVEL}" zstd="zstd --rm -${COMPRESSION_LEVEL}"
fi fi
### Set the Database Type ### Set the Database Type
case "$DBTYPE" in case "$dbtype" in
"couch" | "couchdb" | "COUCH" | "COUCHDB" ) "couch" | "couchdb" | "COUCH" | "COUCHDB" )
DBTYPE=couch dbtype=couch
DBPORT=${DB_PORT:-5984} dbport=${DB_PORT:-5984}
;; ;;
"influx" | "influxdb" | "INFLUX" | "INFLUXDB" ) "influx" | "influxdb" | "INFLUX" | "INFLUXDB" )
DBTYPE=influx dbtype=influx
DBPORT=${DB_PORT:-8088} dbport=${DB_PORT:-8088}
;; ;;
"mongo" | "mongodb" | "MONGO" | "MONGODB" ) "mongo" | "mongodb" | "MONGO" | "MONGODB" )
DBTYPE=mongo dbtype=mongo
DBPORT=${DB_PORT:-27017} dbport=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DBUSER}" [[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${dbuser}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DBPASS}" [[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${dbpass}"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DBNAME}" [[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${dbname}"
;; ;;
"mysql" | "MYSQL" | "mariadb" | "MARIADB") "mysql" | "MYSQL" | "mariadb" | "MARIADB")
DBTYPE=mysql dbtype=mysql
DBPORT=${DB_PORT:-3306} dbport=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DBPASS} [[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${dbpass}
;; ;;
"postgres" | "postgresql" | "pgsql" | "POSTGRES" | "POSTGRESQL" | "PGSQL" ) "postgres" | "postgresql" | "pgsql" | "POSTGRES" | "POSTGRESQL" | "PGSQL" )
DBTYPE=pgsql dbtype=pgsql
DBPORT=${DB_PORT:-5432} dbport=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DBPASS}" [[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${dbpass}"
;; ;;
"redis" | "REDIS" ) "redis" | "REDIS" )
DBTYPE=redis dbtype=redis
DBPORT=${DB_PORT:-6379} dbport=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DBPASS}" [[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${dbpass}"
;;
"rethink" | "RETHINK" )
DBTYPE=rethink
DBPORT=${DB_PORT:-28015}
[[ ( -n "${DB_PASS}" ) ]] && echo $DB_PASS>/tmp/.rethink.auth; RETHINK_PASS_STR=" --password-file /tmp/.rethink.auth"
[[ ( -n "${DB_NAME}" ) ]] && RETHINK_DB_STR=" -e ${DBNAME}"
;; ;;
esac esac
### Functions ### Functions
function backup_couch() { backup_couch() {
TARGET=couch_${DBNAME}_${DBHOST}_${now}.txt target=couch_${dbname}_${dbhost}_${now}.txt
curl -X GET http://${DBHOST}:${DBPORT}/${DBNAME}/_all_docs?include_docs=true >${TMPDIR}/${TARGET}
generate_md5
compression compression
curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${dumpoutput} | $dumpoutput > ${tmpdir}/${target}
generate_md5
move_backup move_backup
} }
function backup_mysql() { backup_mysql() {
if var_true $SPLIT_DB ; then if var_true $SPLIT_DB ; then
DATABASES=`mysql -h ${DBHOST} -P $DBPORT -u$DBUSER --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema` DATABASES=`mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema`
for db in $DATABASES; do for db in $DATABASES; do
if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then
echo "** [db-backup] Dumping database: $db" print_notice "Dumping MariaDB database: $db"
TARGET=mysql_${db}_${DBHOST}_${now}.sql target=mysql_${db}_${dbhost}_${now}.sql
mysqldump --max-allowed-packet=512M -h $DBHOST -P $DBPORT -u$DBUSER ${EXTRA_OPTS} --databases $db > ${TMPDIR}/${TARGET}
generate_md5
compression compression
mysqldump --max-allowed-packet=512M -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} --databases $db | $dumpoutput > ${tmpdir}/${target}
generate_md5
move_backup move_backup
fi fi
done done
else else
mysqldump --max-allowed-packet=512M -A -h $DBHOST -P $DBPORT -u$DBUSER ${EXTRA_OPTS} > ${TMPDIR}/${TARGET}
generate_md5
compression compression
mysqldump --max-allowed-packet=512M -A -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
generate_md5
move_backup move_backup
fi fi
} }
function backup_influx() { backup_influx() {
if [ "${COMPRESSION}" = "NONE" ] || [ "${COMPRESSION}" = "none" ] || [ "${COMPRESSION}" = "FALSE" ] || [ "${COMPRESSION}" = "false" ] ; then
:
else
print_notice "Compressing InfluxDB backup with gzip"
influx_compression="-portable"
fi
for DB in $DB_NAME; do for DB in $DB_NAME; do
influxd backup -database $DB -host ${DBHOST}:${DBPORT} ${TMPDIR}/${TARGET} influxd backup ${influx_compression} -database $DB -host ${dbhost}:${dbport} ${tmpdir}/${target}
generate_md5 generate_md5
compression
move_backup move_backup
done done
} }
function backup_mongo() { backup_mongo() {
mongodump --out ${TMPDIR}/${TARGET} --host ${DBHOST} --port ${DBPORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_DB_STR} ${EXTRA_OPTS} if [ "${COMPRESSION}" = "NONE" ] || [ "${COMPRESSION}" = "none" ] || [ "${COMPRESSION}" = "FALSE" ] || [ "${COMPRESSION}" = "false" ] ; then
cd ${TMPDIR} target=${dbtype}_${dbname}_${dbhost}_${now}.archive
tar cf ${TARGET}.tar ${TARGET}/* else
TARGET=${TARGET}.tar print_notice "Compressing MongoDB backup with gzip"
target=${dbtype}_${dbname}_${dbhost}_${now}.archivegz
mongo_compression="--gzip"
fi
mongodump --archive=${tmpdir}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
cd ${tmpdir}
generate_md5 generate_md5
compression
move_backup move_backup
} }
function backup_pgsql() { backup_pgsql() {
if var_true $SPLIT_DB ; then if var_true $SPLIT_DB ; then
export PGPASSWORD=${DBPASS} export PGPASSWORD=${dbpass}
DATABASES=`psql -h $DBHOST -U $DBUSER -p ${DBPORT} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ` DATABASES=`psql -h $dbhost -U $dbuser -p ${dbport} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' `
for db in $DATABASES; do for db in $DATABASES; do
print_info "Dumping database: $db" print_info "Dumping database: $db"
TARGET=pgsql_${db}_${DBHOST}_${now}.sql target=pgsql_${db}_${dbhost}_${now}.sql
pg_dump -h ${DBHOST} -p ${DBPORT} -U ${DBUSER} $db ${EXTRA_OPTS}> ${TMPDIR}/${TARGET}
generate_md5
compression compression
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
generate_md5
move_backup move_backup
done done
else else
export PGPASSWORD=${DBPASS} export PGPASSWORD=${dbpass}
pg_dump -h ${DBHOST} -U ${DBUSER} -p ${DBPORT} ${DBNAME} ${EXTRA_OPTS}> ${TMPDIR}/${TARGET}
generate_md5
compression compression
pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
generate_md5
move_backup move_backup
fi fi
} }
function backup_redis() { backup_redis() {
TARGET=redis_${db}_${DBHOST}_${now}.rdb target=redis_${db}_${dbhost}_${now}.rdb
echo bgsave | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} --rdb ${TMPDIR}/${TARGET} ${EXTRA_OPTS} echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${tmpdir}/${target} ${EXTRA_OPTS}
print_info "Dumping Redis - Flushing Redis Cache First" print_info "Dumping Redis - Flushing Redis Cache First"
sleep 10 sleep 10
try=5 try=5
while [ $try -gt 0 ] ; do while [ $try -gt 0 ] ; do
saved=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}') saved=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
ok=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}') ok=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
print_info "Redis Backup Complete" print_info "Redis Backup Complete"
fi fi
@@ -198,120 +200,111 @@ function backup_redis() {
move_backup move_backup
} }
function backup_rethink() { check_availability() {
TARGET=rethink_${db}_${DBHOST}_${now}.tar.gz
print_info "Dumping rethink Database: $db"
rethinkdb dump -f ${TMPDIR}/${TARGET} -c ${DBHOST}:${DBPORT} ${RETHINK_PASS_STR} ${RETHINK_DB_STR} ${EXTRA_OPTS}
move_backup
}
function check_availability() {
### Set the Database Type ### Set the Database Type
case "$DBTYPE" in case "$dbtype" in
"couch" ) "couch" )
COUNTER=0 COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5 sleep 5
let COUNTER+=5 let COUNTER+=5
print_warn "CouchDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "CouchDB Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
done done
;; ;;
"influx" ) "influx" )
COUNTER=0 COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5 sleep 5
let COUNTER+=5 let COUNTER+=5
print_warn "InfluxDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "InfluxDB Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
done done
;; ;;
"mongo" ) "mongo" )
COUNTER=0 COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5 sleep 5
let COUNTER+=5 let COUNTER+=5
print_warn "Mongo Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "Mongo Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
done done
;; ;;
"mysql" ) "mysql" )
COUNTER=0 COUNTER=0
while true; do while true; do
mysqlcmd='mysql -u'${DBUSER}' -P '${DBPORT}' -h '${DBHOST}' -p'${DBPASS} mysqlcmd='mysql -u'${dbuser}' -P '${dbport}' -h '${dbhost}' -p'${dbpass}
out="`$mysqlcmd -e "SELECT COUNT(*) FROM information_schema.FILES;" 2>&1`" out="`$mysqlcmd -e "SELECT COUNT(*) FROM information_schema.FILES;" 2>&1`"
echo "$out" | grep -E "COUNT|Enter" 2>&1 > /dev/null echo "$out" | grep -E "COUNT|Enter" 2>&1 > /dev/null
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
: :
break break
fi fi
print_warn "MySQL/MariaDB Server "$DBHOST" is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "MySQL/MariaDB Server "$dbhost" is not accessible, retrying.. ($COUNTER seconds so far)"
sleep 5 sleep 5
let COUNTER+=5 let COUNTER+=5
done done
;; ;;
"pgsql" ) "pgsql" )
# Wait until mongo logs that it's ready (or timeout after 60s)
COUNTER=0 COUNTER=0
export PGPASSWORD=${DBPASS} export PGPASSWORD=${dbpass}
until pg_isready --dbname=${DBNAME} --host=${DBHOST} --port=${DBPORT} --username=${DBUSER} -q until pg_isready --dbname=${dbname} --host=${dbhost} --port=${dbport} --username=${dbuser} -q
do do
sleep 5 sleep 5
let COUNTER+=5 let COUNTER+=5
print_warn "Postgres Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "Postgres Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
done done
;; ;;
"redis" ) "redis" )
COUNTER=0 COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5 sleep 5
let COUNTER+=5 let COUNTER+=5
print_warn "Redis Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "Redis Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"rethink" )
COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
sleep 5
let COUNTER+=5
print_warn "RethinkDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
done done
;; ;;
esac esac
} }
function compression() { compression() {
case "$COMPRESSION" in case "$COMPRESSION" in
"GZ" | "gz" | "gzip" | "GZIP") "GZ" | "gz" | "gzip" | "GZIP")
$GZIP ${TMPDIR}/${TARGET} print_notice "Compressing backup with gzip"
TARGET=${TARGET}.gz target=${target}.gz
dumpoutput="$gzip "
;; ;;
"BZ" | "bz" | "bzip2" | "BZIP2" | "bzip" | "BZIP" | "bz2" | "BZ2") "BZ" | "bz" | "bzip2" | "BZIP2" | "bzip" | "BZIP" | "bz2" | "BZ2")
$BZIP ${TMPDIR}/${TARGET} print_notice "Compressing backup with bzip2"
TARGET=${TARGET}.bz2 target=${target}.bz2
dumpoutput="$bzip "
;; ;;
"XZ" | "xz" | "XZIP" | "xzip" ) "XZ" | "xz" | "XZIP" | "xzip" )
$XZIP ${TMPDIR}/${TARGET} print_notice "Compressing backup with xzip"
TARGET=${TARGET}.xz target=${target}.xz
dumpoutput="$xzip "
;; ;;
"ZSTD" | "zstd" | "ZST" | "zst" ) "ZSTD" | "zstd" | "ZST" | "zst" )
$ZSTD ${TMPDIR}/${TARGET} print_notice "Compressing backup with zstd"
TARGET=${TARGET}.zst target=${target}.zst
dumpoutput="$zstd "
;; ;;
"NONE" | "none" | "FALSE" | "false") "NONE" | "none" | "FALSE" | "false")
dumpoutput="cat "
;; ;;
esac esac
} }
function generate_md5() { generate_md5() {
if var_true $MD5 ; then if var_true $MD5 ; then
cd $TMPDIR print_notice "Generating MD5 for ${target}"
md5sum ${TARGET} > ${TARGET}.md5 cd $tmpdir
MD5VALUE=$(md5sum ${TARGET} | awk '{ print $1}') md5sum ${target} > ${target}.md5
MD5VALUE=$(md5sum ${target} | awk '{ print $1}')
fi fi
} }
function move_backup() { move_backup() {
case "$SIZE_VALUE" in case "$SIZE_VALUE" in
"b" | "bytes" ) "b" | "bytes" )
SIZE_VALUE=1 SIZE_VALUE=1
;; ;;
"[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" ) "[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" )
SIZE_VALUE="-h" SIZE_VALUE="-h"
@@ -321,16 +314,18 @@ function move_backup() {
;; ;;
esac esac
if [ "$SIZE_VALUE" = "1" ] ; then if [ "$SIZE_VALUE" = "1" ] ; then
FILESIZE=$(stat -c%s "${DB_DUMP_TARGET}/${TARGET}") FILESIZE=$(stat -c%s "${tmpdir}/${target}")
print_notice "Backup of ${target} created with the size of ${FILESIZE} bytes"
else else
FILESIZE=$(du -h "${DB_DUMP_TARGET}/${TARGET}" | awk '{ print $1}') FILESIZE=$(du -h "${tmpdir}/${target}" | awk '{ print $1}')
print_notice "Backup of ${target} created with the size of ${FILESIZE}"
fi fi
case "${BACKUP_LOCATION}" in case "${BACKUP_LOCATION}" in
"FILE" | "file" | "filesystem" | "FILESYSTEM" ) "FILE" | "file" | "filesystem" | "FILESYSTEM" )
mkdir -p ${DB_DUMP_TARGET} mkdir -p ${DB_DUMP_TARGET}
mv ${TMPDIR}/*.md5 ${DB_DUMP_TARGET}/ mv ${tmpdir}/*.md5 ${DB_DUMP_TARGET}/
mv ${TMPDIR}/${TARGET} ${DB_DUMP_TARGET}/${TARGET} mv ${tmpdir}/${target} ${DB_DUMP_TARGET}/${target}
;; ;;
"S3" | "s3" | "MINIO" | "minio" ) "S3" | "s3" | "MINIO" | "minio" )
s3_content_type="application/octet-stream" s3_content_type="application/octet-stream"
@@ -342,10 +337,10 @@ function move_backup() {
if var_true $MD5 ; then if var_true $MD5 ; then
s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")" s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")"
s3_md5="$(libressl md5 -binary < "${TMPDIR}/${TARGET}.md5" | base64)" s3_md5="$(libressl md5 -binary < "${tmpdir}/${target}.md5" | base64)"
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${TARGET}.md5" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)" sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${target}.md5" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
print_debug "Uploading ${TARGET}.md5 to S3" print_debug "Uploading ${target}.md5 to S3"
curl -T "${TMPDIR}/${TARGET}.md5" ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${TARGET}.md5 \ curl -T "${tmpdir}/${target}.md5" ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${target}.md5 \
-H "Date: $date" \ -H "Date: $date" \
-H "Authorization: AWS ${S3_KEY_ID}:$sig" \ -H "Authorization: AWS ${S3_KEY_ID}:$sig" \
-H "Content-Type: ${s3_content_type}" \ -H "Content-Type: ${s3_content_type}" \
@@ -353,24 +348,24 @@ function move_backup() {
fi fi
s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")" s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")"
s3_md5="$(libressl md5 -binary < "${TMPDIR}/${TARGET}" | base64)" s3_md5="$(libressl md5 -binary < "${tmpdir}/${target}" | base64)"
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${TARGET}" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)" sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${target}" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
print_debug "Uploading ${TARGET} to S3" print_debug "Uploading ${target} to S3"
curl -T ${TMPDIR}/${TARGET} ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${TARGET} \ curl -T ${tmpdir}/${target} ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${target} \
-H "Date: $s3_date" \ -H "Date: $s3_date" \
-H "Authorization: AWS ${S3_KEY_ID}:$sig" \ -H "Authorization: AWS ${S3_KEY_ID}:$sig" \
-H "Content-Type: ${s3_content_type}" \ -H "Content-Type: ${s3_content_type}" \
-H "Content-MD5: ${s3_md5}" -H "Content-MD5: ${s3_md5}"
rm -rf ${TMPDIR}/*.md5 rm -rf ${tmpdir}/*.md5
rm -rf ${TMPDIR}/${TARGET} rm -rf ${tmpdir}/${target}
;; ;;
esac esac
} }
### Container Startup ### Container Startup
print_info "Initialized on `date`" print_info "Backup routines Initialized on `date`"
### Wait for Next time to start backup ### Wait for Next time to start backup
current_time=$(date +"%s") current_time=$(date +"%s")
@@ -392,65 +387,63 @@ print_info "Initialized on `date`"
### Commence Backup ### Commence Backup
while true; do while true; do
# make sure the directory exists # make sure the directory exists
mkdir -p $TMPDIR mkdir -p $tmpdir
### Define Target name ### Define Target name
now=$(date +"%Y%m%d-%H%M%S") now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S") now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d") now_date=$(date +"%Y-%m-%d")
TARGET=${DBTYPE}_${DBNAME}_${DBHOST}_${now}.sql target=${dbtype}_${dbname}_${dbhost}_${now}.sql
### Take a Dump ### Take a Dump
case "$DBTYPE" in case "$dbtype" in
"couch" ) "couch" )
check_availability check_availability
backup_couch backup_couch
;; ;;
"influx" ) "influx" )
check_availability check_availability
backup_influx backup_influx
;; ;;
"mysql" ) "mysql" )
check_availability check_availability
backup_mysql backup_mysql
;; ;;
"mongo" ) "mongo" )
check_availability check_availability
backup_mongo backup_mongo
;; ;;
"pgsql" ) "pgsql" )
check_availability check_availability
backup_pgsql backup_pgsql
;; ;;
"redis" ) "redis" )
check_availability check_availability
backup_redis backup_redis
;;
"rethink" )
check_availability
backup_rethink
;; ;;
esac esac
### Zabbix ### Zabbix
if var_true $ENABLE_ZABBIX ; then if var_true $ENABLE_ZABBIX ; then
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o `stat -c%s ${DB_DUMP_TARGET}/${TARGET}` print_notice "Sending Backup Statistics to Zabbix"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o `date -r ${DB_DUMP_TARGET}/${TARGET} +'%s'` silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o `stat -c%s ${DB_DUMP_TARGET}/${target}`
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o `date -r ${DB_DUMP_TARGET}/${target} +'%s'`
fi fi
### Automatic Cleanup ### Automatic Cleanup
if [[ -n "$DB_CLEANUP_TIME" ]]; then if [[ -n "$DB_CLEANUP_TIME" ]]; then
find $DB_DUMP_TARGET/ -mmin +$DB_CLEANUP_TIME -iname "*" -exec rm {} \; print_notice "Cleaning up old backups"
find $DB_DUMP_TARGET/ -mmin +$DB_CLEANUP_TIME -iname "*" -exec rm {} \;
fi fi
### Post Backup Custom Script Support ### Post Backup Custom Script Support
if [ -d /assets/custom-scripts/ ] ; then if [ -d /assets/custom-scripts/ ] ; then
print_info "Found Custom Scripts to Execute" print_notice "Found Custom Scripts to Execute"
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
print_info "Running Script ${f}" print_notice "Running Script ${f}"
## script DB_TYPE DB_HOST DB_NAME DATE BACKUP_FILENAME FILESIZE MD5_VALUE ## script DB_TYPE DB_HOST DB_NAME DATE BACKUP_FILENAME FILESIZE MD5_VALUE
chmod +x ${f} chmod +x ${f}
${f} "${DBTYPE}" "${DBHOST}" "${DBNAME}" "${now_date}" "${now_time}" "${TARGET}" "${FILESIZE}" "${MD5VALUE}" ${f} "${dbtype}" "${dbhost}" "${dbname}" "${now_date}" "${now_time}" "${target}" "${FILESIZE}" "${MD5VALUE}"
done done
fi fi