Compare commits

...

9 Commits

Author SHA1 Message Date
dave@tiredofit.ca
fb9fe8a032 Release 4.0.26 - See CHANGELOG.md 2023-11-30 08:55:34 -08:00
Dave Conroy
b705982ae1 Restore missing _SPLIT_DB environment variable information for MySQL/Postgres 2023-11-30 08:54:49 -08:00
dave@tiredofit.ca
f031d787ae Release 4.0.25 - See CHANGELOG.md 2023-11-29 10:43:25 -08:00
Dave Conroy
3eed5fc8a0 Switch BLOBXFER_STORAGE_KEY to BLOBXFER_STORAGE_ACCOUNT_KEY 2023-11-29 10:39:58 -08:00
dave@tiredofit.ca
be619fb707 Release 4.0.24 - See CHANGELOG.md 2023-11-28 15:06:50 -08:00
dave@tiredofit.ca
cccc088b35 Release 4.0.23 - See CHANGELOG.md 2023-11-28 08:05:11 -08:00
dave@tiredofit.ca
4579f4057c Release 4.0.22 - See CHANGELOG.md 2023-11-25 08:50:25 -08:00
dave@tiredofit.ca
cd683648d0 Release 4.0.21 - See CHANGELOG.md 2023-11-22 15:40:38 -08:00
dave@tiredofit.ca
11f55f3d82 Release 4.0.20 - See CHANGELOG.md 2023-11-21 15:18:22 -08:00
5 changed files with 81 additions and 23 deletions

View File

@@ -1,3 +1,46 @@
## 4.0.26 2023-11-30 <dave at tiredofit dot ca>
### Added
- AWS CLI 1.31.4
## 4.0.25 2023-11-29 <dave at tiredofit dot ca>
### Changed
- Fix #297 - Add parameters to blobxfer to restore functionality
## 4.0.24 2023-11-28 <dave at tiredofit dot ca>
### Changed
- Fix issue with cron parsing and 0 being a value getting clobbered by sort command
## 4.0.23 2023-11-28 <dave at tiredofit dot ca>
### Changed
- Resolve issue with custom notification scripts not executing
## 4.0.22 2023-11-25 <dave at tiredofit dot ca>
### Changed
- Move cleanup_old_data routines to happen within backup_ function to properly accomodate for globals, and ALL DB_NAME use cases
## 4.0.21 2023-11-22 <dave at tiredofit dot ca>
### Changed
- Fix for SQLite backups not being cleaned up properly due to a malformed base
## 4.0.20 2023-11-21 <dave at tiredofit dot ca>
### Changed
- Update base image to support S6 Overlay 3.1.6.2 to solve shutdown issues specifically with MODE=MANUAL and MANUAL_RUN_FOREVER=TRUE
- Add some safety nets for Manual scheduling
## 4.0.19 2023-11-20 <dave at tiredofit dot ca> ## 4.0.19 2023-11-20 <dave at tiredofit dot ca>
### Changed ### Changed

View File

@@ -9,7 +9,7 @@ ENV INFLUX1_CLIENT_VERSION=1.8.0 \
INFLUX2_CLIENT_VERSION=2.7.3 \ INFLUX2_CLIENT_VERSION=2.7.3 \
MSODBC_VERSION=18.3.2.1-1 \ MSODBC_VERSION=18.3.2.1-1 \
MSSQL_VERSION=18.3.1.1-1 \ MSSQL_VERSION=18.3.1.1-1 \
AWS_CLI_VERSION=1.29.78 \ AWS_CLI_VERSION=1.31.4 \
CONTAINER_ENABLE_MESSAGING=TRUE \ CONTAINER_ENABLE_MESSAGING=TRUE \
CONTAINER_ENABLE_MONITORING=TRUE \ CONTAINER_ENABLE_MONITORING=TRUE \
IMAGE_NAME="tiredofit/db-backup" \ IMAGE_NAME="tiredofit/db-backup" \

View File

@@ -536,6 +536,7 @@ Encryption will occur after compression and the resulting filename will have a `
| `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | | | `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | |
| | Backup multiple by separating with commas eg `db1,db2` | | x | | | Backup multiple by separating with commas eg `db1,db2` | | x |
| `DB01_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases separated via commas from being backed up | | x | | `DB01_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases separated via commas from being backed up | | x |
| `DB01_SPLIT_DB` | If using `ALL` - use this to split each database into its own file as opposed to one singular file | `FALSE` | |
| `DB01_PORT` | MySQL / MariaDB Port | `3306` | x | | `DB01_PORT` | MySQL / MariaDB Port | `3306` | x |
| `DB01_MYSQL_EVENTS` | Backup Events for | `TRUE` | | | `DB01_MYSQL_EVENTS` | Backup Events for | `TRUE` | |
| `DB01_MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet | `512M` | | | `DB01_MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet | `512M` | |
@@ -577,6 +578,7 @@ Encryption will occur after compression and the resulting filename will have a `
| `DB01_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | | | `DB01_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | | | `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | |
| | Backup multiple by separating with commas eg `db1,db2` | | x | | | Backup multiple by separating with commas eg `db1,db2` | | x |
| `DB01_SPLIT_DB` | If using `ALL` - use this to split each database into its own file as opposed to one singular file | `FALSE` | |
| `DB01_PORT` | PostgreSQL Port | `5432` | x | | `DB01_PORT` | PostgreSQL Port | `5432` | x |
###### Redis ###### Redis

View File

@@ -16,7 +16,7 @@ fi
trap ctrl_c INT trap ctrl_c INT
if [[ "${MODE,,}" =~ "standalone" ]] || [ "${1,,}" = "manual" ] || [ "${1,,}" = "now" ]; then if [[ "${MODE,,}" =~ "standalone" ]] || [ "${MODE,,}" = "manual" ] || [ "${1,,}" = "manual" ] || [ "${1,,}" = "now" ]; then
print_debug "Detected Manual Mode" print_debug "Detected Manual Mode"
persist=false persist=false
backup_job_backup_begin=+0 backup_job_backup_begin=+0
@@ -90,7 +90,6 @@ while true; do
fi fi
symlink_log symlink_log
cleanup_old_data
if var_false "${persist}" ; then if var_false "${persist}" ; then
print_debug "Exiting due to manual mode" print_debug "Exiting due to manual mode"

View File

@@ -94,7 +94,7 @@ bootstrap_variables() {
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT_KEY \ DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT_KEY \
DB"${backup_instance_number}"_BLOBXFER_REMOTE_PATH \ DB"${backup_instance_number}"_BLOBXFER_REMOTE_PATH \
BLOBXFER_STORAGE_ACCOUNT \ BLOBXFER_STORAGE_ACCOUNT \
BLOBXFER_STORAGE_KEY \ BLOBXFER_STORAGE_ACCOUNT_KEY \
DB_HOST \ DB_HOST \
DB_NAME \ DB_NAME \
DB_PORT \ DB_PORT \
@@ -188,7 +188,7 @@ bootstrap_variables() {
transform_backup_instance_variable "${backup_instance_number}" BLACKOUT_END backup_job_snapshot_blackout_finish transform_backup_instance_variable "${backup_instance_number}" BLACKOUT_END backup_job_snapshot_blackout_finish
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_REMOTE_PATH backup_job_blobxfer_remote_path transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_REMOTE_PATH backup_job_blobxfer_remote_path
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT backup_job_blobxfer_storage_account transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT backup_job_blobxfer_storage_account
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_KEY backup_job_blobxfer_storage_key transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT_KEY backup_job_blobxfer_storage_account_key
transform_backup_instance_variable "${backup_instance_number}" CHECKSUM backup_job_checksum transform_backup_instance_variable "${backup_instance_number}" CHECKSUM backup_job_checksum
transform_backup_instance_variable "${backup_instance_number}" CLEANUP_TIME backup_job_cleanup_time transform_backup_instance_variable "${backup_instance_number}" CLEANUP_TIME backup_job_cleanup_time
transform_backup_instance_variable "${backup_instance_number}" COMPRESSION backup_job_compression transform_backup_instance_variable "${backup_instance_number}" COMPRESSION backup_job_compression
@@ -482,6 +482,7 @@ backup_couch() {
move_dbbackup move_dbbackup
check_exit_code move "${backup_job_filename}" check_exit_code move "${backup_job_filename}"
post_dbbackup ${backup_job_db_name} post_dbbackup ${backup_job_db_name}
cleanup_old_data
} }
backup_influx() { backup_influx() {
@@ -522,6 +523,7 @@ backup_influx() {
move_dbbackup move_dbbackup
check_exit_code move "${backup_job_filename_dir}" check_exit_code move "${backup_job_filename_dir}"
post_dbbackup "${db}" post_dbbackup "${db}"
cleanup_old_data
done done
;; ;;
2 ) 2 )
@@ -550,6 +552,7 @@ backup_influx() {
move_dbbackup move_dbbackup
check_exit_code move "${backup_job_filename_dir}" check_exit_code move "${backup_job_filename_dir}"
post_dbbackup "${db}" post_dbbackup "${db}"
cleanup_old_data
done done
;; ;;
esac esac
@@ -586,6 +589,7 @@ backup_mongo() {
move_dbbackup move_dbbackup
check_exit_code move "${backup_job_filename}" check_exit_code move "${backup_job_filename}"
post_dbbackup "${backup_job_db_name}" post_dbbackup "${backup_job_db_name}"
cleanup_old_data
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
} }
@@ -614,6 +618,7 @@ backup_mssql() {
move_dbbackup move_dbbackup
check_exit_code move "${backup_job_filename}" check_exit_code move "${backup_job_filename}"
post_dbbackup "${backup_job_db_name}" post_dbbackup "${backup_job_db_name}"
cleanup_old_data
;; ;;
trn|transaction ) trn|transaction )
prepare_dbbackup prepare_dbbackup
@@ -636,6 +641,7 @@ backup_mssql() {
move_dbbackup move_dbbackup
check_exit_code move "${backup_job_filename}" check_exit_code move "${backup_job_filename}"
post_dbbackup "${backup_job_db_name}" post_dbbackup "${backup_job_db_name}"
cleanup_old_data
;; ;;
esac esac
} }
@@ -687,6 +693,7 @@ backup_mysql() {
move_dbbackup move_dbbackup
check_exit_code move "${backup_job_filename}" check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}" post_dbbackup "${db}"
cleanup_old_data
done done
else else
write_log debug "Not splitting database dumps into their own files" write_log debug "Not splitting database dumps into their own files"
@@ -707,6 +714,7 @@ backup_mysql() {
move_dbbackup move_dbbackup
check_exit_code move "${backup_job_filename}" check_exit_code move "${backup_job_filename}"
post_dbbackup all post_dbbackup all
cleanup_old_data
fi fi
} }
@@ -728,6 +736,7 @@ backup_pgsql() {
move_dbbackup move_dbbackup
check_exit_code move "${backup_job_filename}" check_exit_code move "${backup_job_filename}"
post_dbbackup "globals" post_dbbackup "globals"
cleanup_old_data
} }
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
@@ -776,6 +785,7 @@ backup_pgsql() {
move_dbbackup move_dbbackup
check_exit_code move "${backup_job_filename}" check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}" post_dbbackup "${db}"
cleanup_old_data
done done
if var_true "${_postgres_backup_globals}" ; then backup_pgsql_globals; fi if var_true "${_postgres_backup_globals}" ; then backup_pgsql_globals; fi
else else
@@ -805,6 +815,7 @@ backup_pgsql() {
move_dbbackup move_dbbackup
check_exit_code move "${backup_job_filename}" check_exit_code move "${backup_job_filename}"
post_dbbackup all post_dbbackup all
cleanup_old_data
if var_true "${_postgres_backup_globals}" ; then backup_pgsql_globals; fi if var_true "${_postgres_backup_globals}" ; then backup_pgsql_globals; fi
fi fi
} }
@@ -844,6 +855,7 @@ backup_redis() {
move_dbbackup move_dbbackup
check_exit_code move "${backup_job_filename}" check_exit_code move "${backup_job_filename}"
post_dbbackup all post_dbbackup all
cleanup_old_data
} }
backup_sqlite3() { backup_sqlite3() {
@@ -851,7 +863,7 @@ backup_sqlite3() {
db=$(basename "${backup_job_db_host}") db=$(basename "${backup_job_db_host}")
db="${db%.*}" db="${db%.*}"
backup_job_filename=sqlite3_${db}_${now}.sqlite3 backup_job_filename=sqlite3_${db}_${now}.sqlite3
backup_job_filename_base=sqlite3_${db}.sqlite3 backup_job_filename_base=sqlite3_${db}
pre_dbbackup "${db}" pre_dbbackup "${db}"
write_log notice "Dumping sqlite3 database: '${backup_job_db_host}' ${compression_string}" write_log notice "Dumping sqlite3 database: '${backup_job_db_host}' ${compression_string}"
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug on; fi if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug on; fi
@@ -872,6 +884,7 @@ backup_sqlite3() {
move_dbbackup move_dbbackup
check_exit_code move "${backup_job_filename}" check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}" post_dbbackup "${db}"
cleanup_old_data
} }
check_availability() { check_availability() {
@@ -1031,7 +1044,7 @@ cleanup_old_data() {
run_as_user mkdir -p "${backup_job_filesystem_path}" run_as_user mkdir -p "${backup_job_filesystem_path}"
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \; find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
write_log info "Syncing changes via blobxfer" write_log info "Syncing changes via blobxfer"
silent run_as_user blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --local-path ${backup_job_filesystem_path} --delete --delete-only silent run_as_user blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete --delete-only
;; ;;
"file" | "filesystem" ) "file" | "filesystem" )
write_log info "Cleaning up old backups on filesystem" write_log info "Cleaning up old backups on filesystem"
@@ -1160,13 +1173,14 @@ create_schedulers() {
instance=$(printf "%02d" $instance) instance=$(printf "%02d" $instance)
cp -R /assets/dbbackup/template-dbbackup /etc/services.available/dbbackup-"${instance}" cp -R /assets/dbbackup/template-dbbackup /etc/services.available/dbbackup-"${instance}"
sed -i "s|{{BACKUP_NUMBER}}|${instance}|g" /etc/services.available/dbbackup-"${instance}"/run sed -i "s|{{BACKUP_NUMBER}}|${instance}|g" /etc/services.available/dbbackup-"${instance}"/run
if [ "${MODE,,}" = "manual" ] ; then service_stop dbbackup-"${instance}" ; fi
cat <<EOF >> /usr/bin/backup"${instance}"-now cat <<EOF >> /usr/bin/backup"${instance}"-now
#!/bin/bash #!/bin/bash
source /assets/functions/00-container source /assets/functions/00-container
PROCESS_NAME=db-backup${instance} PROCESS_NAME=db-backup${instance}
print_info "Starting Manual Backup for db-backup${instance}" print_info "Starting Manual Backup for db-backup${instance}"
/var/run/s6/legacy-services/dbbackup-${instance}/run now #/var/run/s6/legacy-services/dbbackup-${instance}/run now
/etc/services.available/dbbackup-${instance}/run now
EOF EOF
chmod +x /usr/bin/backup"${instance}"-now chmod +x /usr/bin/backup"${instance}"-now
@@ -1176,11 +1190,11 @@ EOF
cat <<EOF > /usr/bin/backup-now cat <<EOF > /usr/bin/backup-now
#!/bin/bash #!/bin/bash
/usr/bin/backup${instance}-now /usr/bin/backup${instance}-now now
EOF EOF
else else
echo "/usr/bin/backup${instance}-now" >> /usr/bin/backup-now echo "/usr/bin/backup${instance}-now now" >> /usr/bin/backup-now
fi fi
instance=$(echo "${instance} +1" | bc) instance=$(echo "${instance} +1" | bc)
@@ -1337,20 +1351,20 @@ EOF
notify() { notify() {
if var_true "${DEBUG_NOTIFY}" ; then debug on; fi if var_true "${DEBUG_NOTIFY}" ; then debug on; fi
notification_custom() { notification_custom() {
if [ -n "${NOTIFICATION_SCRIPT}" ] ; then if [ -n "${NOTIFICATION_CUSTOM_SCRIPT}" ] ; then
if var_true "${NOTIFICATION_SCRIPT_SKIP_X_VERIFY}" ; then if var_true "${NOTIFICATION_CUSTOM_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${NOTIFICATION_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}" eval "${NOTIFICATION_CUSTOM_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}"
else else
if [ -x "${NOTIFICATION_SCRIPT}" ] ; then if [ -x "${NOTIFICATION_CUSTOM_SCRIPT}" ] ; then
write_log notice "Found NOTIFICATION_SCRIPT environment variable. Executing '${NOTIFICATION_SCRIPT}" write_log notice "Found NOTIFICATION_CUSTOM_SCRIPT environment variable. Executing '${NOTIFICATION_CUSTOM_SCRIPT}"
# script timestamp logfile errorcode subject body # script timestamp logfile errorcode subject body
eval "${NOTIFICATION_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}" eval "${NOTIFICATION_CUSTOM_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}"
else else
write_log error "Can't execute NOTIFICATION_SCRIPT environment variable '${NOTIFICATION_SCRIPT}' as its filesystem bit is not executible!" write_log error "Can't execute NOTIFICATION_CUSTOM_SCRIPT environment variable '${NOTIFICATION_CUSTOM_SCRIPT}' as its filesystem bit is not executible!"
fi fi
fi fi
else else
print_error "[notifications] No NOTIFICATION_SCRIPT variable set - Skipping sending Custom notifications" print_error "[notifications] No NOTIFICATION_CUSTOM_SCRIPT variable set - Skipping sending Custom notifications"
fi fi
} }
@@ -1452,7 +1466,7 @@ EOF
# $4 body # $4 body
if var_true "${ENABLE_NOTIFICATIONS}" ; then if var_true "${ENABLE_NOTIFICATIONS}" ; then
notification_types=$(echo "${NOTIIFICATION_TYPE}" | tr "," "\n") notification_types=$(echo "${NOTIFICATION_TYPE}" | tr "," "\n")
for notification_type in $notification_types ; do for notification_type in $notification_types ; do
case "${notification_type,,}" in case "${notification_type,,}" in
"custom" ) "custom" )
@@ -1585,7 +1599,7 @@ EOF
;; ;;
"blobxfer" ) "blobxfer" )
write_log info "Synchronize local storage from S3 Bucket with blobxfer" write_log info "Synchronize local storage from S3 Bucket with blobxfer"
${play_fair} blobxfer download --mode file --remote-path ${backup_job_blobxfer_remote_path} --local-path ${backup_job_filesystem_path} --delete ${play_fair} blobxfer download --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete
write_log info "Moving backup to external storage with blobxfer" write_log info "Moving backup to external storage with blobxfer"
mkdir -p "${backup_job_filesystem_path}" mkdir -p "${backup_job_filesystem_path}"
@@ -1593,7 +1607,7 @@ EOF
run_as_user mv "${TEMP_PATH}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}" run_as_user mv "${TEMP_PATH}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
silent run_as_user ${play_fair} blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --local-path ${backup_job_filesystem_path} silent run_as_user ${play_fair} blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path}
move_exit_code=$? move_exit_code=$?
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"."${checksum_extension}" ; fi if [ "${backup_job_checksum}" != "none" ] ; then run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"."${checksum_extension}" ; fi
@@ -1851,7 +1865,7 @@ timer() {
fi fi
done done
validate_all=$(echo "${validate_all}" | tr ' ' '\n' | sort -n -u | tr '\n' ' ') validate_all=$(echo "${validate_all}" | tr ' ' '\n' | sort -g -u | tr '\n' ' ')
for entry in $validate_all; do for entry in $validate_all; do
if [ ${entry} -ge ${3} ]; then if [ ${entry} -ge ${3} ]; then
echo "${entry}" echo "${entry}"