mirror of
https://github.com/tiredofit/docker-db-backup.git
synced 2025-12-24 06:28:16 +01:00
Release 1.21.2 - See CHANGELOG.md
This commit is contained in:
458
install/etc/services.available/10-db-backup/run
Executable file
458
install/etc/services.available/10-db-backup/run
Executable file
@@ -0,0 +1,458 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
source /assets/functions/00-container
|
||||
|
||||
PROCESS_NAME="db-backup"
|
||||
|
||||
date >/dev/null
|
||||
|
||||
if [ "$1" != "NOW" ]; then
|
||||
sleep 10
|
||||
fi
|
||||
|
||||
### Sanity Test
|
||||
sanity_var DB_TYPE "Database Type"
|
||||
sanity_var DB_HOST "Database Host"
|
||||
file_env 'DB_USER'
|
||||
file_env 'DB_PASS'
|
||||
|
||||
### Set Defaults
|
||||
BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"}
|
||||
COMPRESSION=${COMPRESSION:-GZ}
|
||||
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
|
||||
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
|
||||
DB_DUMP_TARGET=${DB_DUMP_TARGET:-/backup}
|
||||
DBHOST=${DB_HOST}
|
||||
DBNAME=${DB_NAME}
|
||||
DBPASS=${DB_PASS}
|
||||
DBTYPE=${DB_TYPE}
|
||||
DBUSER=${DB_USER}
|
||||
MD5=${MD5:-TRUE}
|
||||
PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-TRUE}
|
||||
SIZE_VALUE=${SIZE_VALUE:-"bytes"}
|
||||
SPLIT_DB=${SPLIT_DB:-FALSE}
|
||||
TMPDIR=/tmp/backups
|
||||
|
||||
if [ "BACKUP_TYPE" = "S3" ] || [ "BACKUP_TYPE" = "s3" ] || [ "BACKUP_TYPE" = "MINIO" ] || [ "BACKUP_TYPE" = "minio" ] ; then
|
||||
S3_PROTOCOL=${S3_PROTOCOL:-"https"}
|
||||
sanity_var S3_HOST "S3 Host"
|
||||
sanity_var S3_BUCKET "S3 Bucket"
|
||||
sanity_var S3_KEY_ID "S3 Key ID"
|
||||
sanity_var S3_KEY_SECRET "S3 Key Secret"
|
||||
sanity_var S3_URI_STYLE "S3 URI Style (Virtualhost or Path)"
|
||||
sanity_var S3_PATH "S3 Path"
|
||||
file_env 'S3_KEY_ID'
|
||||
file_env 'S3_KEY_SECRET'
|
||||
|
||||
fi
|
||||
|
||||
if [ "$1" = "NOW" ]; then
|
||||
DB_DUMP_BEGIN=+0
|
||||
MANUAL=TRUE
|
||||
fi
|
||||
|
||||
### Set Compression Options
|
||||
if var_true $PARALLEL_COMPRESSION ; then
|
||||
BZIP="pbzip2"
|
||||
GZIP="pigz"
|
||||
XZIP="pixz"
|
||||
else
|
||||
BZIP="bzip2"
|
||||
GZIP="gzip"
|
||||
XZIP="xz"
|
||||
fi
|
||||
|
||||
|
||||
### Set the Database Type
|
||||
case "$DBTYPE" in
|
||||
"couch" | "couchdb" | "COUCH" | "COUCHDB" )
|
||||
DBTYPE=couch
|
||||
DBPORT=${DB_PORT:-5984}
|
||||
;;
|
||||
"influx" | "influxdb" | "INFLUX" | "INFLUXDB" )
|
||||
DBTYPE=influx
|
||||
DBPORT=${DB_PORT:-8088}
|
||||
;;
|
||||
"mongo" | "mongodb" | "MONGO" | "MONGODB" )
|
||||
DBTYPE=mongo
|
||||
DBPORT=${DB_PORT:-27017}
|
||||
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DBUSER}"
|
||||
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DBPASS}"
|
||||
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DBNAME}"
|
||||
;;
|
||||
"mysql" | "MYSQL" | "mariadb" | "MARIADB")
|
||||
DBTYPE=mysql
|
||||
DBPORT=${DB_PORT:-3306}
|
||||
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DBPASS}
|
||||
;;
|
||||
"postgres" | "postgresql" | "pgsql" | "POSTGRES" | "POSTGRESQL" | "PGSQL" )
|
||||
DBTYPE=pgsql
|
||||
DBPORT=${DB_PORT:-5432}
|
||||
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DBPASS}"
|
||||
;;
|
||||
"redis" | "REDIS" )
|
||||
DBTYPE=redis
|
||||
DBPORT=${DB_PORT:-6379}
|
||||
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DBPASS}"
|
||||
;;
|
||||
"rethink" | "RETHINK" )
|
||||
DBTYPE=rethink
|
||||
DBPORT=${DB_PORT:-28015}
|
||||
[[ ( -n "${DB_PASS}" ) ]] && echo $DB_PASS>/tmp/.rethink.auth; RETHINK_PASS_STR=" --password-file /tmp/.rethink.auth"
|
||||
[[ ( -n "${DB_NAME}" ) ]] && RETHINK_DB_STR=" -e ${DBNAME}"
|
||||
;;
|
||||
esac
|
||||
|
||||
### Functions
|
||||
function backup_couch() {
|
||||
TARGET=couch_${DBNAME}_${DBHOST}_${now}.txt
|
||||
curl -X GET http://${DBHOST}:${DBPORT}/${DBNAME}/_all_docs?include_docs=true >${TMPDIR}/${TARGET}
|
||||
generate_md5
|
||||
compression
|
||||
move_backup
|
||||
}
|
||||
|
||||
function backup_mysql() {
|
||||
if var_true $SPLIT_DB ; then
|
||||
DATABASES=`mysql -h ${DBHOST} -P $DBPORT -u$DBUSER --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema`
|
||||
|
||||
for db in $DATABASES; do
|
||||
if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then
|
||||
echo "** [db-backup] Dumping database: $db"
|
||||
TARGET=mysql_${db}_${DBHOST}_${now}.sql
|
||||
mysqldump --max-allowed-packet=512M -h $DBHOST -P $DBPORT -u$DBUSER --databases $db > ${TMPDIR}/${TARGET}
|
||||
generate_md5
|
||||
compression
|
||||
move_backup
|
||||
fi
|
||||
done
|
||||
else
|
||||
mysqldump --max-allowed-packet=512M -A -h $DBHOST -P $DBPORT -u$DBUSER > ${TMPDIR}/${TARGET}
|
||||
generate_md5
|
||||
compression
|
||||
move_backup
|
||||
fi
|
||||
}
|
||||
|
||||
function backup_influx() {
|
||||
for DB in $DB_NAME; do
|
||||
influxd backup -database $DB -host ${DBHOST}:${DBPORT} ${TMPDIR}/${TARGET}
|
||||
generate_md5
|
||||
compression
|
||||
move_backup
|
||||
done
|
||||
}
|
||||
|
||||
function backup_mongo() {
|
||||
mongodump --out ${TMPDIR}/${TARGET} --host ${DBHOST} --port ${DBPORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
|
||||
cd ${TMPDIR}
|
||||
tar cf ${TARGET}.tar ${TARGET}/*
|
||||
TARGET=${TARGET}.tar
|
||||
generate_md5
|
||||
compression
|
||||
move_backup
|
||||
}
|
||||
|
||||
function backup_pgsql() {
|
||||
if var_true $SPLIT_DB ; then
|
||||
export PGPASSWORD=${DBPASS}
|
||||
DATABASES=`psql -h $DBHOST -U $DBUSER -p ${DBPORT} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' `
|
||||
for db in $DATABASES; do
|
||||
print_info "Dumping database: $db"
|
||||
TARGET=pgsql_${db}_${DBHOST}_${now}.sql
|
||||
pg_dump -h ${DBHOST} -p ${DBPORT} -U ${DBUSER} $db > ${TMPDIR}/${TARGET}
|
||||
generate_md5
|
||||
compression
|
||||
move_backup
|
||||
done
|
||||
else
|
||||
export PGPASSWORD=${DBPASS}
|
||||
pg_dump -h ${DBHOST} -U ${DBUSER} -p ${DBPORT} ${DBNAME} > ${TMPDIR}/${TARGET}
|
||||
generate_md5
|
||||
compression
|
||||
move_backup
|
||||
fi
|
||||
}
|
||||
|
||||
function backup_redis() {
|
||||
TARGET=redis_${db}_${DBHOST}_${now}.rdb
|
||||
echo bgsave | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} --rdb ${TMPDIR}/${TARGET}
|
||||
print_info "Dumping Redis - Flushing Redis Cache First"
|
||||
sleep 10
|
||||
try=5
|
||||
while [ $try -gt 0 ] ; do
|
||||
saved=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
|
||||
ok=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
|
||||
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
|
||||
print_info "Redis Backup Complete"
|
||||
fi
|
||||
try=$((try - 1))
|
||||
print_info "Redis Busy - Waiting and retrying in 5 seconds"
|
||||
sleep 5
|
||||
done
|
||||
generate_md5
|
||||
compression
|
||||
move_backup
|
||||
}
|
||||
|
||||
function backup_rethink() {
|
||||
TARGET=rethink_${db}_${DBHOST}_${now}.tar.gz
|
||||
print_info "Dumping rethink Database: $db"
|
||||
rethinkdb dump -f ${TMPDIR}/${TARGET} -c ${DBHOST}:${DBPORT} ${RETHINK_PASS_STR} ${RETHINK_DB_STR}
|
||||
move_backup
|
||||
}
|
||||
|
||||
function check_availability() {
|
||||
### Set the Database Type
|
||||
case "$DBTYPE" in
|
||||
"couch" )
|
||||
COUNTER=0
|
||||
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
|
||||
sleep 5
|
||||
let COUNTER+=5
|
||||
print_warn "CouchDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
done
|
||||
;;
|
||||
"influx" )
|
||||
COUNTER=0
|
||||
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
|
||||
sleep 5
|
||||
let COUNTER+=5
|
||||
print_warn "InfluxDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
done
|
||||
;;
|
||||
"mongo" )
|
||||
COUNTER=0
|
||||
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
|
||||
sleep 5
|
||||
let COUNTER+=5
|
||||
print_warn "Mongo Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
done
|
||||
;;
|
||||
"mysql" )
|
||||
COUNTER=0
|
||||
while true; do
|
||||
mysqlcmd='mysql -u'${DBUSER}' -P '${DBPORT}' -h '${DBHOST}' -p'${DBPASS}
|
||||
out="`$mysqlcmd -e "SELECT COUNT(*) FROM information_schema.FILES;" 2>&1`"
|
||||
echo "$out" | grep -E "COUNT|Enter" 2>&1 > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
:
|
||||
break
|
||||
fi
|
||||
print_warn "MySQL/MariaDB Server "$DBHOST" is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
sleep 5
|
||||
let COUNTER+=5
|
||||
done
|
||||
;;
|
||||
"pgsql" )
|
||||
# Wait until mongo logs that it's ready (or timeout after 60s)
|
||||
COUNTER=0
|
||||
export PGPASSWORD=${DBPASS}
|
||||
until pg_isready --dbname=${DBNAME} --host=${DBHOST} --port=${DBPORT} --username=${DBUSER} -q
|
||||
do
|
||||
sleep 5
|
||||
let COUNTER+=5
|
||||
print_warn "Postgres Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
done
|
||||
;;
|
||||
"redis" )
|
||||
COUNTER=0
|
||||
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
|
||||
sleep 5
|
||||
let COUNTER+=5
|
||||
print_warn "Redis Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
done
|
||||
;;
|
||||
"rethink" )
|
||||
COUNTER=0
|
||||
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
|
||||
sleep 5
|
||||
let COUNTER+=5
|
||||
print_warn "RethinkDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
done
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
function compression() {
|
||||
case "$COMPRESSION" in
|
||||
"GZ" | "gz" | "gzip" | "GZIP")
|
||||
$GZIP ${TMPDIR}/${TARGET}
|
||||
TARGET=${TARGET}.gz
|
||||
;;
|
||||
"BZ" | "bz" | "bzip2" | "BZIP2" | "bzip" | "BZIP" | "bz2" | "BZ2")
|
||||
$BZIP ${TMPDIR}/${TARGET}
|
||||
TARGET=${TARGET}.bz2
|
||||
;;
|
||||
"XZ" | "xz" | "XZIP" | "xzip" )
|
||||
$XZIP ${TMPDIR}/${TARGET}
|
||||
TARGET=${TARGET}.xz
|
||||
;;
|
||||
"NONE" | "none" | "FALSE" | "false")
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
function generate_md5() {
|
||||
if var_true $MD5 ; then
|
||||
cd $TMPDIR
|
||||
md5sum ${TARGET} > ${TARGET}.md5
|
||||
MD5VALUE=$(md5sum ${TARGET} | awk '{ print $1}')
|
||||
fi
|
||||
}
|
||||
|
||||
function move_backup() {
|
||||
case "$SIZE_VALUE" in
|
||||
"b" | "bytes" )
|
||||
SIZE_VALUE=1
|
||||
;;
|
||||
"[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" )
|
||||
SIZE_VALUE="-h"
|
||||
;;
|
||||
*)
|
||||
SIZE_VALUE=1
|
||||
;;
|
||||
esac
|
||||
if [ "$SIZE_VALUE" = "1" ] ; then
|
||||
FILESIZE=$(stat -c%s "${DB_DUMP_TARGET}/${TARGET}")
|
||||
else
|
||||
FILESIZE=$(du -h "${DB_DUMP_TARGET}/${TARGET}" | awk '{ print $1}')
|
||||
fi
|
||||
|
||||
case "${BACKUP_LOCATION}" in
|
||||
"FILE" | "file" | "filesystem" | "FILESYSTEM" )
|
||||
mkdir -p ${DB_DUMP_TARGET}
|
||||
mv ${TMPDIR}/*.md5 ${DB_DUMP_TARGET}/
|
||||
mv ${TMPDIR}/${TARGET} ${DB_DUMP_TARGET}/${TARGET}
|
||||
;;
|
||||
"S3" | "s3" | "MINIO" | "minio" )
|
||||
s3_content_type="application/octet-stream"
|
||||
if [ "$S3_URI_STYLE" = "VIRTUALHOST" ] || [ "$S3_URI_STYLE" = "VHOST" ] [ "$S3_URI_STYLE" = "virtualhost" ] [ "$S3_URI_STYLE" = "vhost" ] ; then
|
||||
s3_url="${S3_BUCKET}.${S3_HOST}"
|
||||
else
|
||||
s3_url="${S3_HOST}/${S3_BUCKET}"
|
||||
fi
|
||||
|
||||
if var_true $MD5 ; then
|
||||
s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")"
|
||||
s3_md5="$(libressl md5 -binary < "${TMPDIR}/${TARGET}.md5" | base64)"
|
||||
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${TARGET}.md5" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
|
||||
print_debug "Uploading ${TARGET}.md5 to S3"
|
||||
curl -T "${TMPDIR}/${TARGET}.md5" ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${TARGET}.md5 \
|
||||
-H "Date: $date" \
|
||||
-H "Authorization: AWS ${S3_KEY_ID}:$sig" \
|
||||
-H "Content-Type: ${s3_content_type}" \
|
||||
-H "Content-MD5: ${s3_md5}"
|
||||
fi
|
||||
|
||||
s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")"
|
||||
s3_md5="$(libressl md5 -binary < "${TMPDIR}/${TARGET}" | base64)"
|
||||
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${TARGET}" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
|
||||
print_debug "Uploading ${TARGET} to S3"
|
||||
curl -T ${TMPDIR}/${TARGET} ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${TARGET} \
|
||||
-H "Date: $s3_date" \
|
||||
-H "Authorization: AWS ${S3_KEY_ID}:$sig" \
|
||||
-H "Content-Type: ${s3_content_type}" \
|
||||
-H "Content-MD5: ${s3_md5}"
|
||||
|
||||
rm -rf ${TMPDIR}/*.md5
|
||||
rm -rf ${TMPDIR}/${TARGET}
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
### Container Startup
|
||||
print_info "Initialized on `date`"
|
||||
|
||||
### Wait for Next time to start backup
|
||||
current_time=$(date +"%s")
|
||||
today=$(date +"%Y%m%d")
|
||||
|
||||
if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then
|
||||
waittime=$(( ${BASH_REMATCH[1]} * 60 ))
|
||||
else
|
||||
target_time=$(date --date="${today}${DB_DUMP_BEGIN}" +"%s")
|
||||
if [[ "$target_time" < "$current_time" ]]; then
|
||||
target_time=$(($target_time + 24*60*60))
|
||||
fi
|
||||
waittime=$(($target_time - $current_time))
|
||||
fi
|
||||
|
||||
sleep $waittime
|
||||
|
||||
|
||||
### Commence Backup
|
||||
while true; do
|
||||
# make sure the directory exists
|
||||
mkdir -p $TMPDIR
|
||||
|
||||
### Define Target name
|
||||
now=$(date +"%Y%m%d-%H%M%S")
|
||||
now_time=$(date +"%H:%M:%S")
|
||||
now_date=$(date +"%Y-%m-%d")
|
||||
TARGET=${DBTYPE}_${DBNAME}_${DBHOST}_${now}.sql
|
||||
|
||||
### Take a Dump
|
||||
case "$DBTYPE" in
|
||||
"couch" )
|
||||
check_availability
|
||||
backup_couch
|
||||
;;
|
||||
"influx" )
|
||||
check_availability
|
||||
backup_influx
|
||||
;;
|
||||
"mysql" )
|
||||
check_availability
|
||||
backup_mysql
|
||||
;;
|
||||
"mongo" )
|
||||
check_availability
|
||||
backup_mongo
|
||||
;;
|
||||
"pgsql" )
|
||||
check_availability
|
||||
backup_pgsql
|
||||
;;
|
||||
"redis" )
|
||||
check_availability
|
||||
backup_redis
|
||||
;;
|
||||
"rethink" )
|
||||
check_availability
|
||||
backup_rethink
|
||||
;;
|
||||
esac
|
||||
|
||||
### Zabbix
|
||||
if var_true $ENABLE_ZABBIX ; then
|
||||
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o `stat -c%s ${DB_DUMP_TARGET}/${TARGET}`
|
||||
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o `date -r ${DB_DUMP_TARGET}/${TARGET} +'%s'`
|
||||
fi
|
||||
|
||||
### Automatic Cleanup
|
||||
if [[ -n "$DB_CLEANUP_TIME" ]]; then
|
||||
find $DB_DUMP_TARGET/ -mmin +$DB_CLEANUP_TIME -iname "*" -exec rm {} \;
|
||||
fi
|
||||
|
||||
### Post Backup Custom Script Support
|
||||
if [ -d /assets/custom-scripts/ ] ; then
|
||||
print_info "Found Custom Scripts to Execute"
|
||||
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
|
||||
print_info "Running Script ${f}"
|
||||
## script DB_TYPE DB_HOST DB_NAME DATE BACKUP_FILENAME FILESIZE MD5_VALUE
|
||||
chmod +x ${f}
|
||||
${f} "${DBTYPE}" "${DBHOST}" "${DBNAME}" "${now_date}" "${now_time}" "${TARGET}" "${FILESIZE}" "${MD5VALUE}"
|
||||
done
|
||||
fi
|
||||
|
||||
### Go back to Sleep until next Backup time
|
||||
if var_true $MANUAL ; then
|
||||
exit 1;
|
||||
else
|
||||
sleep $(($DB_DUMP_FREQ*60))
|
||||
fi
|
||||
|
||||
done
|
||||
fi
|
||||
Reference in New Issue
Block a user