Compare commits

..

8 Commits

Author SHA1 Message Date
Dave Conroy
fbe9dde4a1 Release 3.0.16 - See CHANGELOG.md 2022-03-23 07:57:28 -07:00
Dave Conroy
eb2a18672b Release 3.0.15 - See CHANGELOG.md 2022-03-22 18:27:57 -07:00
Dave Conroy
5f784ed156 Tweak Example 2022-03-22 09:57:28 -07:00
Dave Conroy
d9a4690ea2 Release 3.0.14 - See CHANGELOG.md 2022-03-22 07:52:15 -07:00
Dave Conroy
baba842373 Release 3.0.13 - See CHANGELOG.md 2022-03-21 16:26:45 -07:00
Dave Conroy
108938c17a Release 3.0.12 - See CHANGELOG.md 2022-03-21 13:51:01 -07:00
Dave Conroy
b0b39fa8c1 Release 3.0.11 - See CHANGELOG.md 2022-03-21 12:34:33 -07:00
Dave Conroy
fa8f43132c Release 3.0.10 - See CHANGELOG.md 2022-03-21 11:19:17 -07:00
7 changed files with 120 additions and 71 deletions

View File

@@ -1,3 +1,46 @@
## 3.0.16 2022-03-23 <dave at tiredofit dot ca>
### Changed
- Fix for SPLIT_DB not looping through all databse names properly
## 3.0.15 2022-03-22 <dave at tiredofit dot ca>
### Changed
- Rework compression function
- Fix for Bzip compression failing
## 3.0.14 2022-03-22 <dave at tiredofit dot ca>
### Changed
- Rearrange Notice stating when next backup is going to start
## 3.0.13 2022-03-21 <dave at tiredofit dot ca>
### Added
- Add compression levels to debug mode
## 3.0.12 2022-03-21 <dave at tiredofit dot ca>
### Added
- Throw Errors for MANUAL mode when certain other CONTAINER_* services are enabled
## 3.0.11 2022-03-21 <dave at tiredofit dot ca>
### Changed
- Fix for Parallel Compression
## 3.0.10 2022-03-21 <dave at tiredofit dot ca>
### Changed
- Fix for restore script not taking "custom" usernames or passwords
## 3.0.9 2022-03-21 <dave at tiredofit dot ca>
### Changed

View File

@@ -122,7 +122,7 @@ Be sure to view the following repositories to understand all the customizable op
#### Container Options
| Parameter | Description | Default |
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- |
| -------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- |
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` |
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
@@ -153,7 +153,7 @@ Be sure to view the following repositories to understand all the customizable op
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
### Backup Options
| Parameter | Description | Default |
| ----------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | ------- |
| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | -------------- |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `GZ` |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` |
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |

View File

@@ -20,7 +20,7 @@ services:
- example-db
volumes:
- ./backups:/backup
- ./post-script.sh:/assets/custom-scripts/post-script.sh
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
- DB_TYPE=mariadb
- DB_HOST=example-db
@@ -30,8 +30,8 @@ services:
- DB_DUMP_FREQ=1440
- DB_DUMP_BEGIN=0000
- DB_CLEANUP_TIME=8640
- CHECKSUM=MD5
- COMPRESSION=XZ
- CHECKSUM=SHA1
- COMPRESSION=ZSTD
- SPLIT_DB=FALSE
restart: always

View File

@@ -8,7 +8,7 @@ DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"}
ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"}
ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION}:-"TRUE"}
ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"}
MODE=${MODE:-"AUTO"}
MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"}

View File

@@ -1,22 +1,5 @@
#!/command/with-contenv bash
bootstrap_compression() {
### Set Compression Options
if var_true "${ENABLE_PARALLEL_COMPRESSION}" ; then
print_debug "Utilizing '${PARALLEL_COMPRESSION_THREADS}' compression threads"
bzip="pbzip2 -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS}"
gzip="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS}"
xzip="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS}"
zstd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS}"
else
print_debug "Utilizing single compression thread"
bzip="pbzip2 -${COMPRESSION_LEVEL} -p 1"
gzip="pigz -${COMPRESSION_LEVEL} -p 1"
xzip="pixz -${COMPRESSION_LEVEL} -p 1"
zstd="zstd --rm -${COMPRESSION_LEVEL} -T1"
fi
}
bootstrap_variables() {
case "${dbtype,,}" in
couch* )
@@ -95,7 +78,7 @@ backup_couch() {
target=couch_${dbname}_${dbhost}_${now}.txt
compression
print_notice "Dumping CouchDB database: '${dbname}'"
curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${dumpoutput} | $dumpoutput > ${TEMP_LOCATION}/${target}
curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code
generate_checksum
@@ -156,13 +139,14 @@ backup_mysql() {
fi
if var_true "${SPLIT_DB}" ; then
DATABASES=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema)
for db in "${DATABASES}" ; do
if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then
print_debug "Backing up everything except for information_schema and _* prefixes"
print_debug "Databases Found: ${DATABASES}"
for db in ${DATABASES} ; do
if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then
print_notice "Dumping MySQL/MariaDB database: '${db}'"
target=mysql_${db}_${dbhost}_${now}.sql
compression
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $dumpoutput > ${TEMP_LOCATION}/${target}
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code
generate_checksum
@@ -172,7 +156,7 @@ backup_mysql() {
else
compression
print_notice "Dumping MySQL/MariaDB database: '${DB_NAME}'"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -A -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target}
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -A -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code
generate_checksum
@@ -183,15 +167,15 @@ backup_mysql() {
backup_pgsql() {
export PGPASSWORD=${dbpass}
if var_true "${SPLIT_DB}" ; then
authdb=${DB_USER}
[ -n "${DB_NAME}" ] && authdb=${DB_NAME}
DATABASES=$(psql -h $dbhost -U $dbuser -p ${dbport} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
for db in "${DATABASES}"; do
print_debug "Databases Found: ${DATABASES}"
for db in $DATABASES ; do
print_notice "Dumping Postgresql database: $db"
target=pgsql_${db}_${dbhost}_${now}.sql
compression
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target}
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code
generate_checksum
@@ -199,8 +183,8 @@ backup_pgsql() {
done
else
compression
print_notice "Dumping PostgreSQL: '${DB_NAME}'"
pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target}
print_notice "Dumping PostgreSQL database: '${DB_NAME}'"
pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code
generate_checksum
@@ -227,7 +211,7 @@ backup_redis() {
done
target_original=${target}
compression
$dumpoutput "${TEMP_LOCATION}/${target_original}"
$compress_cmd "${TEMP_LOCATION}/${target_original}"
generate_checksum
move_backup
}
@@ -242,7 +226,7 @@ backup_sqlite3() {
sqlite3 "${dbhost}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
exit_code=$?
check_exit_code
cat "${TEMP_LOCATION}"/backup.sqlite3 | $dumpoutput > "${TEMP_LOCATION}/${target}"
cat "${TEMP_LOCATION}"/backup.sqlite3 | $compress_cmd > "${TEMP_LOCATION}/${target}"
generate_checksum
move_backup
}
@@ -340,30 +324,38 @@ check_exit_code() {
}
compression() {
if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then
PARALLEL_COMPRESSION_THREADS=1
fi
case "${COMPRESSION,,}" in
gz* )
print_notice "Compressing backup with gzip"
print_debug "Compression Level: '${COMPRESSION_LEVEL}'"
target=${target}.gz
dumpoutput="$gzip "
compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
;;
bz* )
print_notice "Compressing backup with bzip2"
print_debug "Compression Level: '${COMPRESSION_LEVEL}'"
target=${target}.bz2
dumpoutput="$bzip "
compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
;;
xz* )
print_notice "Compressing backup with xzip"
print_debug "Compression Level: '${COMPRESSION_LEVEL}'"
target=${target}.xz
dumpoutput="$xzip "
compress_cmd="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
;;
zst* )
print_notice "Compressing backup with zstd"
print_debug "Compression Level: '${COMPRESSION_LEVEL}'"
target=${target}.zst
dumpoutput="$zstd "
compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS}"
;;
"none" | "false")
print_notice "Not compressing backups"
dumpoutput="cat "
compress_cmd="cat "
;;
esac
}
@@ -460,7 +452,7 @@ setup_mode() {
if [ "${MODE,,}" = "auto" ] || [ ${MODE,,} = "default" ] ; then
print_debug "Running in Auto / Default Mode - Letting Image control scheduling"
else
print_info "Running in Manual mode - Execute 'backup_now' to run a manual backup"
print_info "Running in Manual mode - Execute 'backup_now' or '/etc/services.available/10-db-backup/run' to perform a manual backup"
service_stop 10-db-backup
if var_true "${MANUAL_RUN_FOREVER}" ; then
mkdir -p /etc/services.d/99-run_forever
@@ -472,6 +464,19 @@ do
done
EOF
chmod +x /etc/services.d/99-run_forever/run
else
if var_true "${CONTAINER_ENABLE_SCHEDULING}" ; then
print_error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_SCHEDULING=TRUE'"
exit 1
fi
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
print_error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_MONITORING=TRUE'"
exit 1
fi
if var_true "${CONTAINER_ENABLE_LOGSHIPPING}" ; then
print_error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_LOGSHIPPING=TRUE'"
exit 1
fi
fi
fi
}

View File

@@ -4,13 +4,14 @@ source /assets/functions/00-container
source /assets/functions/10-db-backup
source /assets/defaults/10-db-backup
PROCESS_NAME="db-backup"
CONTAINER_LOG_LEVEL=DEBUG
bootstrap_compression
bootstrap_variables
if [ "${MODE,,}" = "manual" ] ; then
DB_DUMP_BEGIN=+0
manual=TRUE
print_debug "Detected Manual Mode"
fi
case "${1,,}" in
@@ -24,7 +25,7 @@ case "${1,,}" in
esac
### Container Startup
print_debug "Backup routines Initialized on $(date)"
print_debug "Backup routines Initialized on $(date +'%Y-%m-%d %H:%M:%S')"
### Wait for Next time to start backup
case "${1,,}" in
@@ -32,7 +33,7 @@ case "${1,,}" in
:
;;
* )
if [ "${manual,,}" = "true" ]; then
if [ "${manual,,}" != "true" ]; then
current_time=$(date +"%s")
today=$(date +"%Y%m%d")
@@ -141,7 +142,7 @@ while true; do
exit ${exit_code};
else
### Go back to sleep until next backup time
sleep $(($DB_DUMP_FREQ*60-backup_total_time))
print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") "
sleep $(($DB_DUMP_FREQ*60-backup_total_time))
fi
done

View File

@@ -697,9 +697,9 @@ EOF
c* )
counter=1
q_dbuser=" "
while [[ $q_dbname = *" "* ]]; do
while [[ $q_dbuser = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Usernames can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB User do you wish to use:\ ${coff})" q_dbname
read -e -p "$(echo -e ${clg}** ${cdgy}What DB User do you wish to use:\ ${coff})" q_dbuser
(( counter+=1 ))
done
r_dbuser=${q_dbuser}
@@ -766,9 +766,9 @@ EOF
c* )
counter=1
q_dbpass=" "
while [[ $q_dbname = *" "* ]]; do
while [[ $q_dbpass = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Passwords can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbname
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbpass
(( counter+=1 ))
done
r_dbpass=${q_dbpass}