From 88218915e18006c91ed01f58789c2520ed5addc0 Mon Sep 17 00:00:00 2001 From: Dave Conroy Date: Thu, 20 Jan 2022 09:23:06 -0800 Subject: [PATCH] Release 2.11.0 - See CHANGELOG.md --- CHANGELOG.md | 10 ++ LICENSE | 2 +- README.md | 105 ++++++++---------- .../etc/services.available/10-db-backup/run | 71 ++++++------ 4 files changed, 95 insertions(+), 93 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ed7413..1802be2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +## 2.11.0 2022-01-20 + + ### Added + - Add capability to select `TEMP_LOCATION` for initial backup and compression before backup completes to avoid filling system memory + + ### Changed + - Cleanup for MariaDB/MySQL DB ready routines that half worked in 2.10.3 + - Code cleanup + + ## 2.10.3 2022-01-07 ### Changed diff --git a/LICENSE b/LICENSE index 81d2163..5dd7a07 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2021 Dave Conroy +Copyright (c) 2022 Dave Conroy Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index 02172e6..945df14 100644 --- a/README.md +++ b/README.md @@ -31,40 +31,29 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers. ## Table of Contents -- [github.com/tiredofit/docker-db-backup](#githubcomtiredofitdocker-db-backup) - - [About](#about) - - [Maintainer](#maintainer) - - [Table of Contents](#table-of-contents) - - [Prerequisites and Assumptions](#prerequisites-and-assumptions) - - [Installation](#installation) - - [Build from Source](#build-from-source) - - [Prebuilt Images](#prebuilt-images) - - [Configuration](#configuration) - - [Quick Start](#quick-start) - - [Persistent Storage](#persistent-storage) - - [Environment Variables](#environment-variables) - - [Base Images used](#base-images-used) - - [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services) - - [Maintenance](#maintenance) - - [Shell Access](#shell-access) - - [Manual Backups](#manual-backups) - - [Custom Scripts](#custom-scripts) -- [#### Example Post Script](#-example-post-script) -- [#### $1=EXIT_CODE (After running backup routine)](#-1exit_code-after-running-backup-routine) -- [#### $2=DB_TYPE (Type of Backup)](#-2db_type-type-of-backup) -- [#### $3=DB_HOST (Backup Host)](#-3db_host-backup-host) -- [#### #4=DB_NAME (Name of Database backed up](#-4db_name-name-of-database-backed-up) -- [#### $5=DATE (Date of Backup)](#-5date-date-of-backup) -- [#### $6=TIME (Time of Backup)](#--6time-time-of-backup) -- [#### $7=BACKUP_FILENAME (Filename of Backup)](#--7backup_filename-filename-of-backup) -- [#### $8=FILESIZE (Filesize of backup)](#--8filesize-filesize-of-backup) -- [#### $9=MD5_RESULT (MD5Sum if enabled)](#--9md5_result-md5sum-if-enabled) - - [Support](#support) - - [Usage](#usage) - - [Bugfixes](#bugfixes) - - [Feature Requests](#feature-requests) - - [Updates](#updates) - - [License](#license) +- [About](#about) +- [Maintainer](#maintainer) +- [Table of Contents](#table-of-contents) +- [Prerequisites and Assumptions](#prerequisites-and-assumptions) +- [Installation](#installation) + - [Build from Source](#build-from-source) + - [Prebuilt Images](#prebuilt-images) +- [Configuration](#configuration) + - [Quick Start](#quick-start) + - [Persistent Storage](#persistent-storage) + - [Environment Variables](#environment-variables) + - [Base Images used](#base-images-used) + - [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services) +- [Maintenance](#maintenance) + - [Shell Access](#shell-access) + - [Manual Backups](#manual-backups) + - [Custom Scripts](#custom-scripts) +- [Support](#support) + - [Usage](#usage) + - [Bugfixes](#bugfixes) + - [Feature Requests](#feature-requests) + - [Updates](#updates) +- [License](#license) ## Prerequisites and Assumptions @@ -120,29 +109,31 @@ Be sure to view the following repositories to understand all the customizable op | [OS Base](https://github.com/tiredofit/docker-alpine/) | Customized Image based on Alpine Linux | -| Parameter | Description | -| ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi - Default `FILESYSTEM` | -| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` - Default `GZ` | -| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - Default `3` | -| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | -| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | -| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | -| `DB_NAME` | Schema Name e.g. `database` | -| `DB_USER` | username for the database - use `root` to backup all MySQL of them. | -| `DB_PASS` | (optional if DB doesn't require it) password for the database | -| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | -| `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. | -| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | -| | Absolute HHMM, e.g. `2330` or `0415` | -| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | -| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | -| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | -| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. "--extra-command" | -| `MD5` | Generate MD5 Sum in Directory, `TRUE` or `FALSE` - Default `TRUE` | -| `PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` - Default `TRUE` | -| `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | | -| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. - Default `FALSE` | +| Parameter | Description | Default | +| -------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` | +| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `GZ` | +| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | +| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | +| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | +| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | +| `DB_NAME` | Schema Name e.g. `database` | | +| `DB_USER` | username for the database - use `root` to backup all MySQL of them. | | +| `DB_PASS` | (optional if DB doesn't require it) password for the database | | +| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | +| `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. | `1440` | +| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | | +| | Absolute HHMM, e.g. `2330` or `0415` | | +| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | | +| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` | +| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | | +| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | +| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | +| `MD5` | Generate MD5 Sum in Directory, `TRUE` or `FALSE` | `TRUE` | +| `PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | +| `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | | +| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. | `FALSE` | +| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` | When using compression with MongoDB, only `GZ` compression is possible. diff --git a/install/etc/services.available/10-db-backup/run b/install/etc/services.available/10-db-backup/run index 7955207..801f769 100755 --- a/install/etc/services.available/10-db-backup/run +++ b/install/etc/services.available/10-db-backup/run @@ -39,7 +39,8 @@ case "$dbtype" in dbtype=mysql dbport=${DB_PORT:-3306} [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' - ;; + MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"} + ;; "mssql" | "MSSQL" | "microsoftsql" | "MICROSOFTSQL") apkArch="$(apk --print-arch)"; \ case "$apkArch" in @@ -66,20 +67,20 @@ esac ### Set Defaults BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"} -COMPRESSION=${COMPRESSION:-GZ} +COMPRESSION=${COMPRESSION:-"GZ"} COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"} DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0} DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440} -DB_DUMP_TARGET=${DB_DUMP_TARGET:-/backup} +DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"} dbhost=${DB_HOST} dbname=${DB_NAME} dbpass=${DB_PASS} dbuser=${DB_USER} MD5=${MD5:-TRUE} -PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-TRUE} +PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-"TRUE"} SIZE_VALUE=${SIZE_VALUE:-"bytes"} -SPLIT_DB=${SPLIT_DB:-FALSE} -tmpdir=/tmp/backups +SPLIT_DB=${SPLIT_DB:-"FALSE"} +TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"} if [ "$BACKUP_TYPE" = "S3" ] || [ "$BACKUP_TYPE" = "s3" ] || [ "$BACKUP_TYPE" = "MINIO" ] || [ "$BACKUP_TYPE" = "minio" ] ; then S3_PROTOCOL=${S3_PROTOCOL:-"https"} @@ -100,7 +101,7 @@ if [ "$1" = "NOW" ]; then fi ### Set Compression Options -if var_true "$PARALLEL_COMPRESSION" ; then +if var_true "${PARALLEL_COMPRESSION}" ; then bzip="pbzip2 -${COMPRESSION_LEVEL}" gzip="pigz -${COMPRESSION_LEVEL}" xzip="pixz -${COMPRESSION_LEVEL}" @@ -135,7 +136,7 @@ esac backup_couch() { target=couch_${dbname}_${dbhost}_${now}.txt compression - curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${dumpoutput} | $dumpoutput > ${tmpdir}/${target} + curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${dumpoutput} | $dumpoutput > ${TEMP_LOCATION}/${target} exit_code=$? generate_md5 move_backup @@ -148,9 +149,9 @@ backup_influx() { print_notice "Compressing InfluxDB backup with gzip" influx_compression="-portable" fi - for DB in $DB_NAME; do + for DB in ${DB_NAME}; do target=influx_${DB}_${dbhost}_${now} - influxd backup ${influx_compression} -database $DB -host ${dbhost}:${dbport} ${tmpdir}/${target} + influxd backup ${influx_compression} -database $DB -host ${dbhost}:${dbport} ${TEMP_LOCATION}/${target} exit_code=$? generate_md5 move_backup @@ -165,28 +166,28 @@ backup_mongo() { target=${dbtype}_${dbname}_${dbhost}_${now}.archivegz mongo_compression="--gzip" fi - mongodump --archive=${tmpdir}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS} + mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS} exit_code=$? - cd ${tmpdir} + cd ${TEMP_LOCATION} generate_md5 move_backup } backup_mssql() { target=mssql_${dbname}_${dbhost}_${now}.bak - /opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} –Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${tmpdir}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" + /opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} –Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" } backup_mysql() { - if var_true "$SPLIT_DB" ; then - DATABASES=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema) + if var_true "${SPLIT_DB}" ; then + DATABASES=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema) for db in $DATABASES; do if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then print_notice "Dumping MariaDB database: $db" target=mysql_${db}_${dbhost}_${now}.sql compression - mysqldump --max-allowed-packet=512M -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} --databases $db | $dumpoutput > ${tmpdir}/${target} + mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} --databases $db | $dumpoutput > ${TEMP_LOCATION}/${target} exit_code=$? generate_md5 move_backup @@ -194,7 +195,7 @@ backup_mysql() { done else compression - mysqldump --max-allowed-packet=512M -A -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target} + mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -A -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target} exit_code=$? generate_md5 move_backup @@ -202,7 +203,7 @@ backup_mysql() { } backup_pgsql() { - if var_true $SPLIT_DB ; then + if var_true "${SPLIT_DB}" ; then export PGPASSWORD=${dbpass} authdb=${DB_USER} [ -n "${DB_NAME}" ] && authdb=${DB_NAME} @@ -211,7 +212,7 @@ backup_pgsql() { print_info "Dumping database: $db" target=pgsql_${db}_${dbhost}_${now}.sql compression - pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target} + pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target} exit_code=$? generate_md5 move_backup @@ -219,7 +220,7 @@ backup_pgsql() { else export PGPASSWORD=${dbpass} compression - pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target} + pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target} exit_code=$? generate_md5 move_backup @@ -228,7 +229,7 @@ backup_pgsql() { backup_redis() { target=redis_${db}_${dbhost}_${now}.rdb - echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${tmpdir}/${target} ${EXTRA_OPTS} + echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS} print_info "Dumping Redis - Flushing Redis Cache First" sleep 10 try=5 @@ -245,7 +246,7 @@ backup_redis() { done target_original=${target} compression - $dumpoutput "${tmpdir}/${target_original}" + $dumpoutput "${TEMP_LOCATION}/${target_original}" generate_md5 move_backup } @@ -257,10 +258,10 @@ backup_sqlite3() { compression print_info "Dumping sqlite3 database: ${dbhost}" - sqlite3 "${dbhost}" ".backup '${tmpdir}/backup.sqlite3'" + sqlite3 "${dbhost}" ".backup '${TEMP_LOCATION}/backup.sqlite3'" exit_code=$? - cat "${tmpdir}/backup.sqlite3" | $dumpoutput > "${tmpdir}/${target}" + cat "${TEMP_LOCATION}/backup.sqlite3" | $dumpoutput > "${TEMP_LOCATION}/${target}" generate_md5 move_backup @@ -296,10 +297,10 @@ check_availability() { "mysql" ) COUNTER=0 export MYSQL_PWD=${dbpass} - while ! (mysql -u"${dbuser}" -P"${dbport}" -h"${dbhost}" -e "SHOW GRANTS FOR CURRENT_USER;" 2>&1 | grep -E "GRANT ALL PRIVILEGES ON \`${DB_NAME}\`" > /dev/null 2>&1) ; do + while ! (mysqladmin -u"${dbuser}" -P"${dbport}" -h"${dbhost}" status > /dev/null 2>&1) ; do sleep 5 (( COUNTER+=5 )) - print_warn "MySQL/MariaDB Server '${maria_host}' is not accessible, retrying.. ($counter seconds so far)" + print_warn "MySQL/MariaDB Server '${dbhost}' is not accessible, retrying.. (${COUNTER} seconds so far)" done ;; "mssql" ) @@ -377,7 +378,7 @@ compression() { generate_md5() { if var_true "$MD5" ; then print_notice "Generating MD5 for ${target}" - cd $tmpdir + cd ${TEMP_LOCATION} md5sum "${target}" > "${target}".md5 MD5VALUE=$(md5sum "${target}" | awk '{ print $1}') fi @@ -397,18 +398,18 @@ move_backup() { ;; esac if [ "$SIZE_VALUE" = "1" ] ; then - FILESIZE=$(stat -c%s "${tmpdir}/${target}") + FILESIZE=$(stat -c%s "${TEMP_LOCATION}/${target}") print_notice "Backup of ${target} created with the size of ${FILESIZE} bytes" else - FILESIZE=$(du -h "${tmpdir}/${target}" | awk '{ print $1}') + FILESIZE=$(du -h "${TEMP_LOCATION}/${target}" | awk '{ print $1}') print_notice "Backup of ${target} created with the size of ${FILESIZE}" fi case "${BACKUP_LOCATION}" in "FILE" | "file" | "filesystem" | "FILESYSTEM" ) mkdir -p "${DB_DUMP_TARGET}" - mv ${tmpdir}/*.md5 "${DB_DUMP_TARGET}"/ - mv ${tmpdir}/"${target}" "${DB_DUMP_TARGET}"/"${target}" + mv ${TEMP_LOCATION}/*.md5 "${DB_DUMP_TARGET}"/ + mv ${TEMP_LOCATION}/"${target}" "${DB_DUMP_TARGET}"/"${target}" ;; "S3" | "s3" | "MINIO" | "minio" ) export AWS_ACCESS_KEY_ID=${S3_KEY_ID} @@ -417,10 +418,10 @@ move_backup() { [[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}" - aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${tmpdir}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} + aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} - rm -rf ${tmpdir}/*.md5 - rm -rf ${tmpdir}/"${target}" + rm -rf ${TEMP_LOCATION}/*.md5 + rm -rf ${TEMP_LOCATION}/"${target}" ;; esac } @@ -453,7 +454,7 @@ print_debug "Backup routines Initialized on $(date)" ### Commence Backup while true; do # make sure the directory exists - mkdir -p $tmpdir + mkdir -p $TEMP_LOCATION ### Define Target name now=$(date +"%Y%m%d-%H%M%S")