Compare commits

..

3 Commits

Author SHA1 Message Date
Dave Conroy
eb2a18672b Release 3.0.15 - See CHANGELOG.md 2022-03-22 18:27:57 -07:00
Dave Conroy
5f784ed156 Tweak Example 2022-03-22 09:57:28 -07:00
Dave Conroy
d9a4690ea2 Release 3.0.14 - See CHANGELOG.md 2022-03-22 07:52:15 -07:00
4 changed files with 38 additions and 36 deletions

View File

@@ -1,3 +1,16 @@
## 3.0.15 2022-03-22 <dave at tiredofit dot ca>
### Changed
- Rework compression function
- Fix for Bzip compression failing
## 3.0.14 2022-03-22 <dave at tiredofit dot ca>
### Changed
- Rearrange Notice stating when next backup is going to start
## 3.0.13 2022-03-21 <dave at tiredofit dot ca>
### Added

View File

@@ -20,7 +20,7 @@ services:
- example-db
volumes:
- ./backups:/backup
- ./post-script.sh:/assets/custom-scripts/post-script.sh
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
- DB_TYPE=mariadb
- DB_HOST=example-db
@@ -30,8 +30,8 @@ services:
- DB_DUMP_FREQ=1440
- DB_DUMP_BEGIN=0000
- DB_CLEANUP_TIME=8640
- CHECKSUM=MD5
- COMPRESSION=XZ
- CHECKSUM=SHA1
- COMPRESSION=ZSTD
- SPLIT_DB=FALSE
restart: always

View File

@@ -1,22 +1,5 @@
#!/command/with-contenv bash
bootstrap_compression() {
### Set Compression Options
if var_true "${ENABLE_PARALLEL_COMPRESSION}" ; then
print_debug "Utilizing '${PARALLEL_COMPRESSION_THREADS}' compression threads"
bzip="pbzip2 -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS}"
gzip="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS}"
xzip="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS}"
zstd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS}"
else
print_debug "Utilizing single compression thread"
bzip="pbzip2 -${COMPRESSION_LEVEL} -p 1"
gzip="pigz -${COMPRESSION_LEVEL} -p 1"
xzip="pixz -${COMPRESSION_LEVEL} -p 1"
zstd="zstd --rm -${COMPRESSION_LEVEL} -T1"
fi
}
bootstrap_variables() {
case "${dbtype,,}" in
couch* )
@@ -95,7 +78,7 @@ backup_couch() {
target=couch_${dbname}_${dbhost}_${now}.txt
compression
print_notice "Dumping CouchDB database: '${dbname}'"
curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${dumpoutput} | $dumpoutput > ${TEMP_LOCATION}/${target}
curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code
generate_checksum
@@ -162,8 +145,10 @@ backup_mysql() {
print_notice "Dumping MySQL/MariaDB database: '${db}'"
target=mysql_${db}_${dbhost}_${now}.sql
compression
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $dumpoutput > ${TEMP_LOCATION}/${target}
set -x
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
set +x
check_exit_code
generate_checksum
move_backup
@@ -172,7 +157,7 @@ backup_mysql() {
else
compression
print_notice "Dumping MySQL/MariaDB database: '${DB_NAME}'"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -A -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target}
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -A -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code
generate_checksum
@@ -191,7 +176,7 @@ backup_pgsql() {
print_notice "Dumping Postgresql database: $db"
target=pgsql_${db}_${dbhost}_${now}.sql
compression
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target}
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code
generate_checksum
@@ -200,7 +185,7 @@ backup_pgsql() {
else
compression
print_notice "Dumping PostgreSQL: '${DB_NAME}'"
pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target}
pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code
generate_checksum
@@ -227,7 +212,7 @@ backup_redis() {
done
target_original=${target}
compression
$dumpoutput "${TEMP_LOCATION}/${target_original}"
$compress_cmd "${TEMP_LOCATION}/${target_original}"
generate_checksum
move_backup
}
@@ -242,7 +227,7 @@ backup_sqlite3() {
sqlite3 "${dbhost}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
exit_code=$?
check_exit_code
cat "${TEMP_LOCATION}"/backup.sqlite3 | $dumpoutput > "${TEMP_LOCATION}/${target}"
cat "${TEMP_LOCATION}"/backup.sqlite3 | $compress_cmd > "${TEMP_LOCATION}/${target}"
generate_checksum
move_backup
}
@@ -340,34 +325,38 @@ check_exit_code() {
}
compression() {
if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then
PARALLEL_COMPRESSION_THREADS=1
fi
case "${COMPRESSION,,}" in
gz* )
print_notice "Compressing backup with gzip"
print_debug "Compression Level: '${COMPRESSION_LEVEL}'"
target=${target}.gz
dumpoutput="$gzip "
compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
;;
bz* )
print_notice "Compressing backup with bzip2"
print_debug "Compression Level: '${COMPRESSION_LEVEL}'"
target=${target}.bz2
dumpoutput="$bzip "
compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
;;
xz* )
print_notice "Compressing backup with xzip"
print_debug "Compression Level: '${COMPRESSION_LEVEL}'"
target=${target}.xz
dumpoutput="$xzip "
compress_cmd="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
;;
zst* )
print_notice "Compressing backup with zstd"
print_debug "Compression Level: '${COMPRESSION_LEVEL}'"
target=${target}.zst
dumpoutput="$zstd "
compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS}"
;;
"none" | "false")
print_notice "Not compressing backups"
dumpoutput="cat "
compress_cmd="cat "
;;
esac
}
@@ -491,4 +480,4 @@ EOF
fi
fi
fi
}
}

View File

@@ -4,8 +4,8 @@ source /assets/functions/00-container
source /assets/functions/10-db-backup
source /assets/defaults/10-db-backup
PROCESS_NAME="db-backup"
CONTAINER_LOG_LEVEL=DEBUG
bootstrap_compression
bootstrap_variables
if [ "${MODE,,}" = "manual" ] ; then
@@ -25,7 +25,7 @@ case "${1,,}" in
esac
### Container Startup
print_debug "Backup routines Initialized on $(date)"
print_debug "Backup routines Initialized on $(date +'%Y-%m-%d %H:%M:%S')"
### Wait for Next time to start backup
case "${1,,}" in
@@ -142,7 +142,7 @@ while true; do
exit ${exit_code};
else
### Go back to sleep until next backup time
sleep $(($DB_DUMP_FREQ*60-backup_total_time))
print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") "
sleep $(($DB_DUMP_FREQ*60-backup_total_time))
fi
done