diff --git a/CHANGELOG.md b/CHANGELOG.md index a638b7a..d237df8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,20 @@ +## 3.1.0 2022-03-23 + + ### Added + - Backup multiple databases by seperating with comma e.g. db1,db2 + - Backup ALL databases bu setting DB_NAME to ALL + - Exclude databases from being backed up comma seperated when DB_NAME is all eg DB_NAME_EXCLUDE=db3,db4 + - Backup timers execute per database, not per the whole script run + - Post scripts run after each database backup + - Checksum does not occur when database backup failed + - Database cleanup does not occur when any databases backups fail throughout session + - MongoDB now supported with 'restore' script - Credit to piemonkey@github + - Lots of reshuffling, optimizations with script due to botched 3.0 release + + ### Changed + - ZSTD replaces GZ as default compression type + - Output is cleaner when backups are occurring + ## 3.0.16 2022-03-23 ### Changed diff --git a/README.md b/README.md index 0aaba40..ca2146d 100644 --- a/README.md +++ b/README.md @@ -127,24 +127,25 @@ Be sure to view the following repositories to understand all the customizable op | `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` | | `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` | | `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` | -| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | | `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` | | `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | | -| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. | `FALSE` | +| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` ### Database Specific Options -| Parameter | Description | Default | -| --------- | --------------------------------------------------------------------------------------------- | ------- | -| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | -| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | -| `DB_NAME` | Schema Name e.g. `database` | | -| `DB_USER` | username for the database - use `root` to backup all MySQL of them. | | -| `DB_PASS` | (optional if DB doesn't require it) password for the database | | -| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | +| Parameter | Description | Default | +| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | +| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | +| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | +| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | | +| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | | +| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | +| `DB_PASS` | (optional if DB doesn't require it) password for the database | | +| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | ### Scheduling Options | Parameter | Description | Default | | ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | -| `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. | `1440` | +| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` | | `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | | | | Absolute HHMM, e.g. `2330` or `0415` | | | | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | | @@ -154,7 +155,7 @@ Be sure to view the following repositories to understand all the customizable op ### Backup Options | Parameter | Description | Default | | ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | -------------- | -| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `GZ` | +| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | | `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | | `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | | `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` | @@ -187,7 +188,6 @@ If `BACKUP_LOCATION` = `S3` then the following options are used. ## Maintenance - ### Shell Access For debugging and maintenance purposes you may want access the containers shell. @@ -201,7 +201,7 @@ Manual Backups can be performed by entering the container and typing `backup-now - Recently there was a request to have the container work with Kukbernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`. ### Restoring Databases -Entering in the container and executing `restore` will execute a menu based script to restore your backups. +Entering in the container and executing `restore` will execute a menu based script to restore your backups - MariaDB, Postgres, and Mongo supported. You will be presented with a series of menus allowing you to choose: - What file to restore diff --git a/install/assets/defaults/10-db-backup b/install/assets/defaults/10-db-backup index 880021b..74ff610 100755 --- a/install/assets/defaults/10-db-backup +++ b/install/assets/defaults/10-db-backup @@ -2,7 +2,7 @@ BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"} CHECKSUM=${CHECKSUM:-"MD5"} -COMPRESSION=${COMPRESSION:-"GZ"} +COMPRESSION=${COMPRESSION:-"ZSTD"} COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"} DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0} DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440} diff --git a/install/assets/functions/10-db-backup b/install/assets/functions/10-db-backup index 775fb1f..d216685 100755 --- a/install/assets/functions/10-db-backup +++ b/install/assets/functions/10-db-backup @@ -24,6 +24,7 @@ bootstrap_variables() { dbtype=mysql dbport=${DB_PORT:-3306} [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' + sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" ;; "mssql" | "microsoftsql" ) apkArch="$(apk --print-arch)"; \ @@ -38,6 +39,7 @@ bootstrap_variables() { dbtype=pgsql dbport=${DB_PORT:-5432} [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' + sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" ;; "redis" ) dbtype=redis @@ -80,9 +82,11 @@ backup_couch() { print_notice "Dumping CouchDB database: '${dbname}'" curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > ${TEMP_LOCATION}/${target} exit_code=$? - check_exit_code + check_exit_code $target generate_checksum - move_backup + move_dbbackup + post_dbbackup_hooks + send_statistics } backup_influx() { @@ -92,14 +96,16 @@ backup_influx() { print_notice "Compressing InfluxDB backup with gzip" influx_compression="-portable" fi - for DB in ${DB_NAME}; do - print_notice "Dumping Influx database: '${DB}'" - target=influx_${DB}_${dbhost}_${now} - influxd backup ${influx_compression} -database $DB -host ${dbhost}:${dbport} ${TEMP_LOCATION}/${target} + for db in ${DB_NAME}; do + print_notice "Dumping Influx database: '${db}'" + target=influx_${db}_${dbhost}_${now} + influxd backup ${influx_compression} -database $db -host ${dbhost}:${dbport} ${TEMP_LOCATION}/${target} exit_code=$? - check_exit_code + check_exit_code $target generate_checksum - move_backup + move_dbbackup + send_statistics + post_dbbackup_hooks done } @@ -114,10 +120,12 @@ backup_mongo() { print_notice "Dumping MongoDB database: '${DB_NAME}'" mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS} exit_code=$? - check_exit_code + check_exit_code $target cd "${TEMP_LOCATION}" generate_checksum - move_backup + move_dbbackup + send_statistics + post_dbbackup_hooks } backup_mssql() { @@ -125,9 +133,11 @@ backup_mssql() { print_notice "Dumping MSSQL database: '${dbname}'" /opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} –Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" exit_code=$? - check_exit_code + check_exit_code $target generate_checksum - move_backup + move_dbbackup + send_statistics + post_dbbackup_hooks } backup_mysql() { @@ -137,65 +147,111 @@ backup_mysql() { if var_true "${MYSQL_STORED_PROCEDURES}" ; then stored_procedures="--routines" fi + + if [ "${dbname,,}" = "all" ] ; then + print_debug "Preparing to back up everything except for information_schema and _* prefixes" + db_names=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema ) + if [ -n "${DB_NAME_EXCLUDE}" ] ; then + db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n') + for db_exclude in ${db_names_exclusions} ; do + print_debug "Excluding '${db_exclude}' from ALL DB_NAME backups" + db_names=$(echo "$db_names" | sed "/${db_exclude}/d" ) + done + fi + else + db_names=$(echo "${dbname}" | tr ',' '\n') + fi + + print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')" + if var_true "${SPLIT_DB}" ; then - DATABASES=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema) - print_debug "Backing up everything except for information_schema and _* prefixes" - print_debug "Databases Found: ${DATABASES}" - for db in ${DATABASES} ; do - if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then - print_notice "Dumping MySQL/MariaDB database: '${db}'" - target=mysql_${db}_${dbhost}_${now}.sql - compression - mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}" - exit_code=$? - check_exit_code - generate_checksum - move_backup - fi + for db in ${db_names} ; do + pre_dbbackup + target=mysql_${db}_${dbhost}_${now}.sql + compression + print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}" + mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}" + exit_code=$? + check_exit_code $target + generate_checksum + move_dbbackup + post_dbbackup done else + print_debug "Not splitting database dumps into their own files" + pre_dbbackup + target=mysql_all_${dbhost}_${now}.sql compression - print_notice "Dumping MySQL/MariaDB database: '${DB_NAME}'" - mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -A -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} | $compress_cmd > "${TEMP_LOCATION}"/"${target}" + print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" + mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}" exit_code=$? - check_exit_code + check_exit_code $target generate_checksum - move_backup + move_dbbackup + post_dbbackup fi } backup_pgsql() { export PGPASSWORD=${dbpass} - if var_true "${SPLIT_DB}" ; then - authdb=${DB_USER} - [ -n "${DB_NAME}" ] && authdb=${DB_NAME} - DATABASES=$(psql -h $dbhost -U $dbuser -p ${dbport} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ) - print_debug "Databases Found: ${DATABASES}" - for db in $DATABASES ; do - print_notice "Dumping Postgresql database: $db" + authdb=${DB_USER} + if [ "${dbname,,}" = "all" ] ; then + print_debug "Preparing to back up all databases" + db_names=$(psql -h $dbhost -U $dbuser -p ${dbport} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ) + if [ -n "${DB_NAME_EXCLUDE}" ] ; then + db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n') + for db_exclude in ${db_names_exclusions} ; do + print_debug "Excluding '${db_exclude}' from ALL DB_NAME backups" + db_names=$(echo "$db_names" | sed "/${db_exclude}/d" ) + done + fi + else + db_names=$(echo "${dbname}" | tr ',' '\n') + fi + + print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')" + + if var_true "${SPLIT_DB}" ; then + for db in ${db_names} ; do + pre_dbbackup target=pgsql_${db}_${dbhost}_${now}.sql compression + print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}" pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target} exit_code=$? - check_exit_code + check_exit_code $target generate_checksum - move_backup - done - else - compression - print_notice "Dumping PostgreSQL database: '${DB_NAME}'" - pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target} - exit_code=$? - check_exit_code - generate_checksum - move_backup - fi + move_dbbackup + post_dbbackup + done + else + print_debug "Not splitting database dumps into their own files" + pre_dbbackup + target=pgsql_all_${dbhost}_${now}.sql + compression + print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" + tmp_db_names=$(psql -h $dbhost -U $dbuser -p ${dbport} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ) + for r_db_name in $(echo $db_names | xargs); do + tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" ) + done + sleep 5 + for x_db_name in ${tmp_db_names} ; do + pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name}) + done + pg_dumpall -h ${dbhost} -U ${dbuser} -p ${dbport} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target} + exit_code=$? + check_exit_code $target + generate_checksum + move_dbbackup + post_dbbackup + fi } backup_redis() { + pre_dbbackup + print_notice "Dumping Redis - Flushing Redis Cache First" target=redis_${db}_${dbhost}_${now}.rdb echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS} - print_notice "Dumping Redis - Flushing Redis Cache First" sleep 10 try=5 while [ $try -gt 0 ] ; do @@ -213,22 +269,24 @@ backup_redis() { compression $compress_cmd "${TEMP_LOCATION}/${target_original}" generate_checksum - move_backup + move_dbbackup + post_dbbackup } backup_sqlite3() { + pre_dbbackup db=$(basename "$dbhost") db="${db%.*}" target=sqlite3_${db}_${now}.sqlite3 compression - - print_notice "Dumping sqlite3 database: '${dbhost}'" + print_notice "Dumping sqlite3 database: '${dbhost}' ${compression_string}" sqlite3 "${dbhost}" ".backup '${TEMP_LOCATION}/backup.sqlite3'" exit_code=$? - check_exit_code + check_exit_code $target cat "${TEMP_LOCATION}"/backup.sqlite3 | $compress_cmd > "${TEMP_LOCATION}/${target}" generate_checksum - move_backup + move_dbbackup + post_dbbackup } check_availability() { @@ -312,17 +370,31 @@ check_availability() { } check_exit_code() { - print_debug "Exit Code is ${exit_code}" + print_debug "DB Backup Exit Code is ${exit_code}" case "${exit_code}" in 0 ) - print_info "Backup completed successfully" + print_info "DB Backup of '${1}' completed successfully" ;; * ) - print_error "Backup reported errors" + print_error "DB Backup of '${1}' reported errors" + master_exit_code=1 ;; esac } +cleanup_old_data() { + if [ -n "${DB_CLEANUP_TIME}" ]; then + if [ "${master_exit_code}" != 1 ]; then + print_info "Cleaning up old backups" + mkdir -p "${DB_DUMP_TARGET}" + find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \; + else + print_info "Skipping Cleaning up old backups because there were errors in backing up" + fi + fi +} + + compression() { if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then PARALLEL_COMPRESSION_THREADS=1 @@ -330,58 +402,73 @@ compression() { case "${COMPRESSION,,}" in gz* ) - print_notice "Compressing backup with gzip" - print_debug "Compression Level: '${COMPRESSION_LEVEL}'" - target=${target}.gz compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} " + compression_type="gzip" + target=${target}.gz ;; bz* ) - print_notice "Compressing backup with bzip2" - print_debug "Compression Level: '${COMPRESSION_LEVEL}'" - target=${target}.bz2 compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} " + compression_type="bzip2" + target=${target}.bz2 ;; xz* ) - print_notice "Compressing backup with xzip" - print_debug "Compression Level: '${COMPRESSION_LEVEL}'" - target=${target}.xz compress_cmd="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} " + compression_type="xzip" + target=${target}.xz ;; zst* ) - print_notice "Compressing backup with zstd" - print_debug "Compression Level: '${COMPRESSION_LEVEL}'" - target=${target}.zst compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS}" + compression_type="zstd" + target=${target}.zst ;; "none" | "false") - print_notice "Not compressing backups" compress_cmd="cat " + compression_type="none" + ;; + esac + + case "${CONTAINER_LOG_LEVEL,,}" in + "debug" ) + if [ "${compression_type}" = "none" ] ; then + compression_string="with '${PARALLEL_COMPRESSION_THREADS}' threads" + else + compression_string="and compressing with '${compression_type}:${COMPRESSION_LEVEL}' with '${PARALLEL_COMPRESSION_THREADS}' threads" + fi + ;; + * ) + if [ "${compression_type}" != "none" ] ; then + compression_string="and compressing with '${compression_type}'" + fi ;; esac } generate_checksum() { - if var_true "${ENABLE_CHECKSUM}" ; then - case "${CHECKSUM,,}" in - "md5" ) - checksum_command="md5sum" - checksum_extension="md5" - ;; - "sha1" ) - checksum_command="sha1sum" - checksum_extension="sha1" - ;; - esac + if var_true "${ENABLE_CHECKSUM}" ;then + if [ "${exit_code}" = "0" ] ; then + case "${CHECKSUM,,}" in + "md5" ) + checksum_command="md5sum" + checksum_extension="md5" + ;; + "sha1" ) + checksum_command="sha1sum" + checksum_extension="sha1" + ;; + esac - print_notice "Generating ${checksum_extension^^} for '${target}'" - cd "${TEMP_LOCATION}" - ${checksum_command} "${target}" > "${target}"."${checksum_extension}" - checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}') - print_debug "${checksum_extension^^}: ${checksum_value} - ${target}" + print_notice "Generating ${checksum_extension^^} for '${target}'" + cd "${TEMP_LOCATION}" + ${checksum_command} "${target}" > "${target}"."${checksum_extension}" + checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}') + print_debug "${checksum_extension^^}: ${checksum_value} - ${target}" + else + print_warn "Skipping Checksum creation because backup did not complete successfully" + fi fi } -move_backup() { +move_dbbackup() { case "$SIZE_VALUE" in "b" | "bytes" ) SIZE_VALUE=1 @@ -433,12 +520,60 @@ move_backup() { esac } +pre_dbbackup() { + dbbackup_start_time=$(date +"%s") + now=$(date +"%Y%m%d-%H%M%S") + now_time=$(date +"%H:%M:%S") + now_date=$(date +"%Y-%m-%d") + target=${dbtype}_${dbname}_${dbhost}_${now}.sql +} + +post_dbbackup() { + dbbackup_finish_time=$(date +"%s") + dbbackup_total_time=$(echo $((dbbackup_finish_time-dbbackup_start_time))) + + if var_true "${CONTAINER_ENABLE_MONITORING}" ; then + print_notice "Sending Backup Statistics to Zabbix" + silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}")" + silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')" + silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}" + silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))" + fi + + ### Post Script Support + if [ -n "${POST_SCRIPT}" ] ; then + print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}" + eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" + fi + + ### Post Backup Custom Script Support + if [ -d "/assets/custom-scripts/" ] ; then + print_notice "Found Post Backup Custom Script to execute" + for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do + print_notice "Running Script: '${f}'" + ## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE + ${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" + done + fi + + print_notice "DB Backup for '${db}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')" +} + sanity_test() { sanity_var DB_TYPE "Database Type" sanity_var DB_HOST "Database Host" file_env 'DB_USER' file_env 'DB_PASS' + case "${dbtype,,}" in + "mysql" | "mariadb" ) + sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" + ;; + postgres* | "pgsql" ) + sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" + ;; + esac + if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then sanity_var S3_BUCKET "S3 Bucket" sanity_var S3_PATH "S3 Path" @@ -480,3 +615,4 @@ EOF fi fi } + diff --git a/install/etc/services.available/10-db-backup/run b/install/etc/services.available/10-db-backup/run index 4f5f16c..45eb54d 100755 --- a/install/etc/services.available/10-db-backup/run +++ b/install/etc/services.available/10-db-backup/run @@ -8,62 +8,34 @@ CONTAINER_LOG_LEVEL=DEBUG bootstrap_variables -if [ "${MODE,,}" = "manual" ] ; then +if [ "${MODE,,}" = "manual" ] || [ "${1,,}" = "manual" ] || [ "${1,,}" = "now" ]; then DB_DUMP_BEGIN=+0 manual=TRUE print_debug "Detected Manual Mode" +else + sleep 5 + current_time=$(date +"%s") + today=$(date +"%Y%m%d") + + if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then + waittime=$(( ${BASH_REMATCH[1]} * 60 )) + target_time=$(($current_time + $waittime)) + else + target_time=$(date --date="${today}${DB_DUMP_BEGIN}" +"%s") + if [[ "$target_time" < "$current_time" ]]; then + target_time=$(($target_time + 24*60*60)) + fi + waittime=$(($target_time - $current_time)) + fi + print_debug "Wait Time: ${waittime} Target time: ${target_time} Current Time: ${current_time}" + print_info "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")" + sleep $waittime fi -case "${1,,}" in - "now" | "manual" ) - DB_DUMP_BEGIN=+0 - manual=TRUE - ;; - * ) - sleep 5 - ;; -esac - -### Container Startup -print_debug "Backup routines Initialized on $(date +'%Y-%m-%d %H:%M:%S')" - -### Wait for Next time to start backup -case "${1,,}" in - "now" | "manual" ) - : - ;; - * ) - if [ "${manual,,}" != "true" ]; then - current_time=$(date +"%s") - today=$(date +"%Y%m%d") - - if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then - waittime=$(( ${BASH_REMATCH[1]} * 60 )) - target_time=$(($current_time + $waittime)) - else - target_time=$(date --date="${today}${DB_DUMP_BEGIN}" +"%s") - if [[ "$target_time" < "$current_time" ]]; then - target_time=$(($target_time + 24*60*60)) - fi - waittime=$(($target_time - $current_time)) - fi - print_debug "Wait Time: ${waittime} Target time: ${target_time} Current Time: ${current_time}" - print_info "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")" - sleep $waittime - fi - ;; -esac - -### Commence Backup while true; do mkdir -p "${TEMP_LOCATION}" backup_start_time=$(date +"%s") - now=$(date +"%Y%m%d-%H%M%S") - now_time=$(date +"%H:%M:%S") - now_date=$(date +"%Y-%m-%d") - target=${dbtype}_${dbname}_${dbhost}_${now}.sql - - ### Take a Dump + print_debug "Backup routines started time: $(date +'%Y-%m-%d %T %Z')" case "${dbtype,,}" in "couch" ) check_availability @@ -101,47 +73,16 @@ while true; do backup_finish_time=$(date +"%s") backup_total_time=$(echo $((backup_finish_time-backup_start_time))) + if [ -z "$master_exit_code" ] ; then master_exit_code="0" ; fi + print_info "Backup routines finish time: $(date -d @${backup_finish_time} +"%Y-%m-%d %T %Z") with overall exit code ${master_exit_code}" + print_notice "Backup routines time taken: $(echo ${backup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')" - print_info "Backup finish time: $(date -d @${backup_finish_time} +"%Y-%m-%d %T %Z") with exit code ${exit_code}" - print_notice "Backup time elapsed: $(echo ${backup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')" - - ### Zabbix / Monitoring stats - if var_true "${CONTAINER_ENABLE_MONITORING}" ; then - print_notice "Sending Backup Statistics to Zabbix" - silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}")" - silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')" - silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}" - silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((backup_finish_time-backup_start_time)))" - fi - - ### Automatic Cleanup - if [ -n "${DB_CLEANUP_TIME}" ]; then - print_info "Cleaning up old backups" - mkdir -p "${DB_DUMP_TARGET}" - find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \; - fi - - ### Post Script Support - if [ -n "${POST_SCRIPT}" ] ; then - print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}" - eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_time}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" - fi - - ### Post Backup Custom Script Support - if [ -d "/assets/custom-scripts/" ] ; then - print_notice "Found Post Backup Custom Script to execute" - for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do - print_notice "Running Script: '${f}'" - ## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE - ${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_time}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" - done - fi + cleanup_old_data if var_true "${manual}" ; then print_debug "Exitting due to manual mode" - exit ${exit_code}; + exit ${master_exit_code}; else - ### Go back to sleep until next backup time print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") " sleep $(($DB_DUMP_FREQ*60-backup_total_time)) fi diff --git a/install/usr/local/bin/restore b/install/usr/local/bin/restore index dbee688..1138f9f 100755 --- a/install/usr/local/bin/restore +++ b/install/usr/local/bin/restore @@ -263,6 +263,10 @@ get_dbtype() { p_dbtype=$(basename -- "${r_filename}" | cut -d _ -f 1) case "${p_dbtype}" in + mongo* ) + parsed_type=true + print_debug "Parsed DBType: MongoDB" + ;; mariadb | mysql ) parsed_type=true print_debug "Parsed DBType: MariaDB/MySQL" @@ -320,7 +324,9 @@ EOF What Database Type are you looking to restore? ${q_dbtype_menu} + M ) MySQL / MariaDB + O ) MongoDB P ) Postgresql Q ) Quit @@ -335,6 +341,10 @@ EOF r_dbtype=mysql break ;; + o* ) + r_dbtype=mongo + break + ;; p* ) r_dbtype=postgresql break @@ -358,6 +368,10 @@ EOF r_dbtype=mysql break ;; + o* ) + r_dbtype=mongo + break + ;; p* ) r_dbtype=postgresql break @@ -381,6 +395,10 @@ EOF r_dbtype=mysql break ;; + o* ) + r_dbtype=mongo + break + ;; p* ) r_dbtype=postgresql break @@ -735,7 +753,7 @@ EOF q_dbpass_menu=$(cat <