Compare commits

...

5 Commits

Author SHA1 Message Date
Dave Conroy
1323966e22 Update README.md 2020-06-17 08:21:12 -07:00
Dave Conroy
310edda88c Reduce size of temporarily files
Changed way backups are performed to reduce temporary files
Removed Rethink Support
Rework MongoDB compression
Remove function prefix from functions
Rename case on variables for easier reading
2020-06-17 08:15:34 -07:00
Dave Conroy
955a08a21b Release 1.23.0 - See CHANGELOG.md 2020-06-15 09:44:07 -07:00
Dave Conroy
bf97c3ab97 Update README.md 2020-06-10 05:48:03 -07:00
Dave Conroy
11969da1ea Release 1.22.0 - See CHANGELOG.md 2020-06-10 05:45:49 -07:00
4 changed files with 194 additions and 162 deletions

View File

@@ -1,3 +1,29 @@
## 2.0.0 2020-06-17 <dave at tiredofit dot ca>
### Added
- Reworked compression routines to remove dependency on temporary files
- Changed the way that MongoDB compression works - only supports GZ going forward
### Changed
- Code cleanup (removed function prefixes, added verbosity)
### Reverted
- Removed Rethink Support
## 1.23.0 2020-06-15 <dave at tiredofit dot ca>
### Added
- Add zstd compression support
- Add choice of compression level
## 1.22.0 2020-06-10 <dave at tiredofit dot ca>
### Added
- Added EXTRA_OPTS variable to all backup commands to pass extra arguments
## 1.21.3 2020-06-10 <dave at tiredofit dot ca>
### Changed

View File

@@ -4,7 +4,7 @@ LABEL maintainer="Dave Conroy (dave at tiredofit dot ca)"
### Set Environment Variables
ENV ENABLE_CRON=FALSE \
ENABLE_SMTP=FALSE \
ENABLE_ZABBIX=FALSE \
ENABLE_ZABBIX=TRUE \
ZABBIX_HOSTNAME=db-backup
### Dependencies
@@ -30,12 +30,13 @@ RUN set -ex && \
postgresql-client \
redis \
xz \
zstd \
&& \
\
apk add \
pixz@testing \
&& \
\
\
mkdir -p /usr/src/pbzip2 && \
curl -ssL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
cd /usr/src/pbzip2 && \

View File

@@ -17,7 +17,7 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis, Rethink s
* backup all databases
* choose to have an MD5 sum after backup for verification
* delete old backups after specific amount of time
* choose compression type (none, gz, bz, xz)
* choose compression type (none, gz, bz, xz, zstd)
* connect to any container running on the same system
* select how often to run a dump
* select when to start the first dump, whether time of day or relative to container start time
@@ -83,13 +83,16 @@ The following directories are used for configuration and can be mapped for persi
## Environment Variables
*If you are trying to backup a database that doesn't have a user or a password (you should!) make sure you set `CONTAINER_ENABLE_DOCKER_SECRETS=FALSE`*
Along with the Environment Variables from the [Base image](https://hub.docker.com/r/tiredofit/alpine), below is the complete list of available options that can be used to customize your installation.
| Parameter | Description |
|-----------|-------------|
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi - Default `FILESYSTEM`
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, or none `NONE` - Default `GZ`
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` - Default `GZ`
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - Default `3` |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `rethink`
| `DB_HOST` | Server Hostname e.g. `mariadb`
| `DB_NAME` | Schema Name e.g. `database`
@@ -102,10 +105,12 @@ Along with the Environment Variables from the [Base image](https://hub.docker.co
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything.
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed.
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. "--extra-command"
| `MD5` | Generate MD5 Sum in Directory, `TRUE` or `FALSE` - Default `TRUE`
| `PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` - Default `TRUE` |
| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. - Default `FALSE` |
**Backing Up to S3 Compatible Services**
If `BACKUP_LOCATION` = `S3` then the following options are used.

View File

@@ -13,25 +13,27 @@ fi
### Sanity Test
sanity_var DB_TYPE "Database Type"
sanity_var DB_HOST "Database Host"
file_env 'DB_USER'
file_env 'DB_PASS'
### Set Defaults
BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"}
COMPRESSION=${COMPRESSION:-GZ}
COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
DB_DUMP_TARGET=${DB_DUMP_TARGET:-/backup}
DBHOST=${DB_HOST}
DBNAME=${DB_NAME}
DBPASS=${DB_PASS}
DBTYPE=${DB_TYPE}
DBUSER=${DB_USER}
dbhost=${DB_HOST}
dbname=${DB_NAME}
dbpass=${DB_PASS}
dbtype=${DB_TYPE}
dbuser=${DB_USER}
MD5=${MD5:-TRUE}
PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-TRUE}
SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SPLIT_DB=${SPLIT_DB:-FALSE}
TMPDIR=/tmp/backups
tmpdir=/tmp/backups
if [ "BACKUP_TYPE" = "S3" ] || [ "BACKUP_TYPE" = "s3" ] || [ "BACKUP_TYPE" = "MINIO" ] || [ "BACKUP_TYPE" = "minio" ] ; then
S3_PROTOCOL=${S3_PROTOCOL:-"https"}
@@ -43,7 +45,6 @@ if [ "BACKUP_TYPE" = "S3" ] || [ "BACKUP_TYPE" = "s3" ] || [ "BACKUP_TYPE" = "MI
sanity_var S3_PATH "S3 Path"
file_env 'S3_KEY_ID'
file_env 'S3_KEY_SECRET'
fi
if [ "$1" = "NOW" ]; then
@@ -53,136 +54,140 @@ fi
### Set Compression Options
if var_true $PARALLEL_COMPRESSION ; then
BZIP="pbzip2"
GZIP="pigz"
XZIP="pixz"
bzip="pbzip2 -${COMPRESSION_LEVEL}"
gzip="pigz -${COMPRESSION_LEVEL}"
xzip="pixz -${COMPRESSION_LEVEL}"
zstd="zstd --rm -${COMPRESSION_LEVEL}"
else
BZIP="bzip2"
GZIP="gzip"
XZIP="xz"
bzip="bzip2 -${COMPRESSION_LEVEL}"
gzip="gzip -${COMPRESSION_LEVEL}"
xzip="xz -${COMPRESSION_LEVEL} "
zstd="zstd --rm -${COMPRESSION_LEVEL}"
fi
### Set the Database Type
case "$DBTYPE" in
case "$dbtype" in
"couch" | "couchdb" | "COUCH" | "COUCHDB" )
DBTYPE=couch
DBPORT=${DB_PORT:-5984}
dbtype=couch
dbport=${DB_PORT:-5984}
;;
"influx" | "influxdb" | "INFLUX" | "INFLUXDB" )
DBTYPE=influx
DBPORT=${DB_PORT:-8088}
dbtype=influx
dbport=${DB_PORT:-8088}
;;
"mongo" | "mongodb" | "MONGO" | "MONGODB" )
DBTYPE=mongo
DBPORT=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DBUSER}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DBPASS}"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DBNAME}"
dbtype=mongo
dbport=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${dbuser}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${dbpass}"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${dbname}"
;;
"mysql" | "MYSQL" | "mariadb" | "MARIADB")
DBTYPE=mysql
DBPORT=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DBPASS}
dbtype=mysql
dbport=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${dbpass}
;;
"postgres" | "postgresql" | "pgsql" | "POSTGRES" | "POSTGRESQL" | "PGSQL" )
DBTYPE=pgsql
DBPORT=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DBPASS}"
dbtype=pgsql
dbport=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${dbpass}"
;;
"redis" | "REDIS" )
DBTYPE=redis
DBPORT=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DBPASS}"
;;
"rethink" | "RETHINK" )
DBTYPE=rethink
DBPORT=${DB_PORT:-28015}
[[ ( -n "${DB_PASS}" ) ]] && echo $DB_PASS>/tmp/.rethink.auth; RETHINK_PASS_STR=" --password-file /tmp/.rethink.auth"
[[ ( -n "${DB_NAME}" ) ]] && RETHINK_DB_STR=" -e ${DBNAME}"
dbtype=redis
dbport=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${dbpass}"
;;
esac
### Functions
function backup_couch() {
TARGET=couch_${DBNAME}_${DBHOST}_${now}.txt
curl -X GET http://${DBHOST}:${DBPORT}/${DBNAME}/_all_docs?include_docs=true >${TMPDIR}/${TARGET}
generate_md5
backup_couch() {
target=couch_${dbname}_${dbhost}_${now}.txt
compression
curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${dumpoutput} | $dumpoutput > ${tmpdir}/${target}
generate_md5
move_backup
}
function backup_mysql() {
backup_mysql() {
if var_true $SPLIT_DB ; then
DATABASES=`mysql -h ${DBHOST} -P $DBPORT -u$DBUSER --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema`
DATABASES=`mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema`
for db in $DATABASES; do
if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then
echo "** [db-backup] Dumping database: $db"
TARGET=mysql_${db}_${DBHOST}_${now}.sql
mysqldump --max-allowed-packet=512M -h $DBHOST -P $DBPORT -u$DBUSER --databases $db > ${TMPDIR}/${TARGET}
generate_md5
print_notice "Dumping MariaDB database: $db"
target=mysql_${db}_${dbhost}_${now}.sql
compression
mysqldump --max-allowed-packet=512M -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} --databases $db | $dumpoutput > ${tmpdir}/${target}
generate_md5
move_backup
fi
done
else
mysqldump --max-allowed-packet=512M -A -h $DBHOST -P $DBPORT -u$DBUSER > ${TMPDIR}/${TARGET}
generate_md5
compression
mysqldump --max-allowed-packet=512M -A -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
generate_md5
move_backup
fi
}
function backup_influx() {
backup_influx() {
if [ "${COMPRESSION}" = "NONE" ] || [ "${COMPRESSION}" = "none" ] || [ "${COMPRESSION}" = "FALSE" ] || [ "${COMPRESSION}" = "false" ] ; then
:
else
print_notice "Compressing InfluxDB backup with gzip"
influx_compression="-portable"
fi
for DB in $DB_NAME; do
influxd backup -database $DB -host ${DBHOST}:${DBPORT} ${TMPDIR}/${TARGET}
influxd backup ${influx_compression} -database $DB -host ${dbhost}:${dbport} ${tmpdir}/${target}
generate_md5
compression
move_backup
done
}
function backup_mongo() {
mongodump --out ${TMPDIR}/${TARGET} --host ${DBHOST} --port ${DBPORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
cd ${TMPDIR}
tar cf ${TARGET}.tar ${TARGET}/*
TARGET=${TARGET}.tar
backup_mongo() {
if [ "${COMPRESSION}" = "NONE" ] || [ "${COMPRESSION}" = "none" ] || [ "${COMPRESSION}" = "FALSE" ] || [ "${COMPRESSION}" = "false" ] ; then
target=${dbtype}_${dbname}_${dbhost}_${now}.archive
else
print_notice "Compressing MongoDB backup with gzip"
target=${dbtype}_${dbname}_${dbhost}_${now}.archivegz
mongo_compression="--gzip"
fi
mongodump --archive=${tmpdir}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
cd ${tmpdir}
generate_md5
compression
move_backup
}
function backup_pgsql() {
backup_pgsql() {
if var_true $SPLIT_DB ; then
export PGPASSWORD=${DBPASS}
DATABASES=`psql -h $DBHOST -U $DBUSER -p ${DBPORT} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' `
export PGPASSWORD=${dbpass}
DATABASES=`psql -h $dbhost -U $dbuser -p ${dbport} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' `
for db in $DATABASES; do
print_info "Dumping database: $db"
TARGET=pgsql_${db}_${DBHOST}_${now}.sql
pg_dump -h ${DBHOST} -p ${DBPORT} -U ${DBUSER} $db > ${TMPDIR}/${TARGET}
generate_md5
target=pgsql_${db}_${dbhost}_${now}.sql
compression
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
generate_md5
move_backup
done
else
export PGPASSWORD=${DBPASS}
pg_dump -h ${DBHOST} -U ${DBUSER} -p ${DBPORT} ${DBNAME} > ${TMPDIR}/${TARGET}
generate_md5
export PGPASSWORD=${dbpass}
compression
pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
generate_md5
move_backup
fi
}
function backup_redis() {
TARGET=redis_${db}_${DBHOST}_${now}.rdb
echo bgsave | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} --rdb ${TMPDIR}/${TARGET}
backup_redis() {
target=redis_${db}_${dbhost}_${now}.rdb
echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${tmpdir}/${target} ${EXTRA_OPTS}
print_info "Dumping Redis - Flushing Redis Cache First"
sleep 10
try=5
while [ $try -gt 0 ] ; do
saved=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
ok=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
saved=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
ok=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
print_info "Redis Backup Complete"
fi
@@ -195,116 +200,111 @@ function backup_redis() {
move_backup
}
function backup_rethink() {
TARGET=rethink_${db}_${DBHOST}_${now}.tar.gz
print_info "Dumping rethink Database: $db"
rethinkdb dump -f ${TMPDIR}/${TARGET} -c ${DBHOST}:${DBPORT} ${RETHINK_PASS_STR} ${RETHINK_DB_STR}
move_backup
}
function check_availability() {
check_availability() {
### Set the Database Type
case "$DBTYPE" in
case "$dbtype" in
"couch" )
COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5
let COUNTER+=5
print_warn "CouchDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
print_warn "CouchDB Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"influx" )
COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5
let COUNTER+=5
print_warn "InfluxDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
print_warn "InfluxDB Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"mongo" )
COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5
let COUNTER+=5
print_warn "Mongo Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
print_warn "Mongo Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"mysql" )
COUNTER=0
while true; do
mysqlcmd='mysql -u'${DBUSER}' -P '${DBPORT}' -h '${DBHOST}' -p'${DBPASS}
mysqlcmd='mysql -u'${dbuser}' -P '${dbport}' -h '${dbhost}' -p'${dbpass}
out="`$mysqlcmd -e "SELECT COUNT(*) FROM information_schema.FILES;" 2>&1`"
echo "$out" | grep -E "COUNT|Enter" 2>&1 > /dev/null
if [ $? -eq 0 ]; then
:
break
fi
print_warn "MySQL/MariaDB Server "$DBHOST" is not accessible, retrying.. ($COUNTER seconds so far)"
print_warn "MySQL/MariaDB Server "$dbhost" is not accessible, retrying.. ($COUNTER seconds so far)"
sleep 5
let COUNTER+=5
done
;;
"pgsql" )
# Wait until mongo logs that it's ready (or timeout after 60s)
COUNTER=0
export PGPASSWORD=${DBPASS}
until pg_isready --dbname=${DBNAME} --host=${DBHOST} --port=${DBPORT} --username=${DBUSER} -q
export PGPASSWORD=${dbpass}
until pg_isready --dbname=${dbname} --host=${dbhost} --port=${dbport} --username=${dbuser} -q
do
sleep 5
let COUNTER+=5
print_warn "Postgres Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
print_warn "Postgres Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"redis" )
COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5
let COUNTER+=5
print_warn "Redis Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"rethink" )
COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
sleep 5
let COUNTER+=5
print_warn "RethinkDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
print_warn "Redis Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
esac
}
function compression() {
compression() {
case "$COMPRESSION" in
"GZ" | "gz" | "gzip" | "GZIP")
$GZIP ${TMPDIR}/${TARGET}
TARGET=${TARGET}.gz
print_notice "Compressing backup with gzip"
target=${target}.gz
dumpoutput="$gzip "
;;
"BZ" | "bz" | "bzip2" | "BZIP2" | "bzip" | "BZIP" | "bz2" | "BZ2")
$BZIP ${TMPDIR}/${TARGET}
TARGET=${TARGET}.bz2
print_notice "Compressing backup with bzip2"
target=${target}.bz2
dumpoutput="$bzip "
;;
"XZ" | "xz" | "XZIP" | "xzip" )
$XZIP ${TMPDIR}/${TARGET}
TARGET=${TARGET}.xz
print_notice "Compressing backup with xzip"
target=${target}.xz
dumpoutput="$xzip "
;;
"ZSTD" | "zstd" | "ZST" | "zst" )
print_notice "Compressing backup with zstd"
target=${target}.zst
dumpoutput="$zstd "
;;
"NONE" | "none" | "FALSE" | "false")
dumpoutput="cat "
;;
esac
}
function generate_md5() {
generate_md5() {
if var_true $MD5 ; then
cd $TMPDIR
md5sum ${TARGET} > ${TARGET}.md5
MD5VALUE=$(md5sum ${TARGET} | awk '{ print $1}')
print_notice "Generating MD5 for ${target}"
cd $tmpdir
md5sum ${target} > ${target}.md5
MD5VALUE=$(md5sum ${target} | awk '{ print $1}')
fi
}
function move_backup() {
move_backup() {
case "$SIZE_VALUE" in
"b" | "bytes" )
SIZE_VALUE=1
;;
"[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" )
SIZE_VALUE="-h"
@@ -314,16 +314,18 @@ function move_backup() {
;;
esac
if [ "$SIZE_VALUE" = "1" ] ; then
FILESIZE=$(stat -c%s "${DB_DUMP_TARGET}/${TARGET}")
FILESIZE=$(stat -c%s "${tmpdir}/${target}")
print_notice "Backup of ${target} created with the size of ${FILESIZE} bytes"
else
FILESIZE=$(du -h "${DB_DUMP_TARGET}/${TARGET}" | awk '{ print $1}')
FILESIZE=$(du -h "${tmpdir}/${target}" | awk '{ print $1}')
print_notice "Backup of ${target} created with the size of ${FILESIZE}"
fi
case "${BACKUP_LOCATION}" in
"FILE" | "file" | "filesystem" | "FILESYSTEM" )
mkdir -p ${DB_DUMP_TARGET}
mv ${TMPDIR}/*.md5 ${DB_DUMP_TARGET}/
mv ${TMPDIR}/${TARGET} ${DB_DUMP_TARGET}/${TARGET}
mv ${tmpdir}/*.md5 ${DB_DUMP_TARGET}/
mv ${tmpdir}/${target} ${DB_DUMP_TARGET}/${target}
;;
"S3" | "s3" | "MINIO" | "minio" )
s3_content_type="application/octet-stream"
@@ -335,10 +337,10 @@ function move_backup() {
if var_true $MD5 ; then
s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")"
s3_md5="$(libressl md5 -binary < "${TMPDIR}/${TARGET}.md5" | base64)"
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${TARGET}.md5" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
print_debug "Uploading ${TARGET}.md5 to S3"
curl -T "${TMPDIR}/${TARGET}.md5" ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${TARGET}.md5 \
s3_md5="$(libressl md5 -binary < "${tmpdir}/${target}.md5" | base64)"
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${target}.md5" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
print_debug "Uploading ${target}.md5 to S3"
curl -T "${tmpdir}/${target}.md5" ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${target}.md5 \
-H "Date: $date" \
-H "Authorization: AWS ${S3_KEY_ID}:$sig" \
-H "Content-Type: ${s3_content_type}" \
@@ -346,24 +348,24 @@ function move_backup() {
fi
s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")"
s3_md5="$(libressl md5 -binary < "${TMPDIR}/${TARGET}" | base64)"
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${TARGET}" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
print_debug "Uploading ${TARGET} to S3"
curl -T ${TMPDIR}/${TARGET} ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${TARGET} \
s3_md5="$(libressl md5 -binary < "${tmpdir}/${target}" | base64)"
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${target}" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
print_debug "Uploading ${target} to S3"
curl -T ${tmpdir}/${target} ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${target} \
-H "Date: $s3_date" \
-H "Authorization: AWS ${S3_KEY_ID}:$sig" \
-H "Content-Type: ${s3_content_type}" \
-H "Content-MD5: ${s3_md5}"
rm -rf ${TMPDIR}/*.md5
rm -rf ${TMPDIR}/${TARGET}
rm -rf ${tmpdir}/*.md5
rm -rf ${tmpdir}/${target}
;;
esac
}
### Container Startup
print_info "Initialized on `date`"
print_info "Backup routines Initialized on `date`"
### Wait for Next time to start backup
current_time=$(date +"%s")
@@ -385,65 +387,63 @@ print_info "Initialized on `date`"
### Commence Backup
while true; do
# make sure the directory exists
mkdir -p $TMPDIR
mkdir -p $tmpdir
### Define Target name
now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d")
TARGET=${DBTYPE}_${DBNAME}_${DBHOST}_${now}.sql
target=${dbtype}_${dbname}_${dbhost}_${now}.sql
### Take a Dump
case "$DBTYPE" in
case "$dbtype" in
"couch" )
check_availability
backup_couch
check_availability
backup_couch
;;
"influx" )
check_availability
backup_influx
check_availability
backup_influx
;;
"mysql" )
check_availability
backup_mysql
check_availability
backup_mysql
;;
"mongo" )
check_availability
backup_mongo
check_availability
backup_mongo
;;
"pgsql" )
check_availability
backup_pgsql
check_availability
backup_pgsql
;;
"redis" )
check_availability
backup_redis
;;
"rethink" )
check_availability
backup_rethink
check_availability
backup_redis
;;
esac
### Zabbix
if var_true $ENABLE_ZABBIX ; then
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o `stat -c%s ${DB_DUMP_TARGET}/${TARGET}`
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o `date -r ${DB_DUMP_TARGET}/${TARGET} +'%s'`
print_notice "Sending Backup Statistics to Zabbix"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o `stat -c%s ${DB_DUMP_TARGET}/${target}`
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o `date -r ${DB_DUMP_TARGET}/${target} +'%s'`
fi
### Automatic Cleanup
if [[ -n "$DB_CLEANUP_TIME" ]]; then
find $DB_DUMP_TARGET/ -mmin +$DB_CLEANUP_TIME -iname "*" -exec rm {} \;
print_notice "Cleaning up old backups"
find $DB_DUMP_TARGET/ -mmin +$DB_CLEANUP_TIME -iname "*" -exec rm {} \;
fi
### Post Backup Custom Script Support
if [ -d /assets/custom-scripts/ ] ; then
print_info "Found Custom Scripts to Execute"
print_notice "Found Custom Scripts to Execute"
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
print_info "Running Script ${f}"
print_notice "Running Script ${f}"
## script DB_TYPE DB_HOST DB_NAME DATE BACKUP_FILENAME FILESIZE MD5_VALUE
chmod +x ${f}
${f} "${DBTYPE}" "${DBHOST}" "${DBNAME}" "${now_date}" "${now_time}" "${TARGET}" "${FILESIZE}" "${MD5VALUE}"
${f} "${dbtype}" "${dbhost}" "${dbname}" "${now_date}" "${now_time}" "${target}" "${FILESIZE}" "${MD5VALUE}"
done
fi