Compare commits

...

5 Commits

Author SHA1 Message Date
dave@tiredofit.ca
fb9fe8a032 Release 4.0.26 - See CHANGELOG.md 2023-11-30 08:55:34 -08:00
Dave Conroy
b705982ae1 Restore missing _SPLIT_DB environment variable information for MySQL/Postgres 2023-11-30 08:54:49 -08:00
dave@tiredofit.ca
f031d787ae Release 4.0.25 - See CHANGELOG.md 2023-11-29 10:43:25 -08:00
Dave Conroy
3eed5fc8a0 Switch BLOBXFER_STORAGE_KEY to BLOBXFER_STORAGE_ACCOUNT_KEY 2023-11-29 10:39:58 -08:00
dave@tiredofit.ca
be619fb707 Release 4.0.24 - See CHANGELOG.md 2023-11-28 15:06:50 -08:00
4 changed files with 27 additions and 7 deletions

View File

@@ -1,3 +1,21 @@
## 4.0.26 2023-11-30 <dave at tiredofit dot ca>
### Added
- AWS CLI 1.31.4
## 4.0.25 2023-11-29 <dave at tiredofit dot ca>
### Changed
- Fix #297 - Add parameters to blobxfer to restore functionality
## 4.0.24 2023-11-28 <dave at tiredofit dot ca>
### Changed
- Fix issue with cron parsing and 0 being a value getting clobbered by sort command
## 4.0.23 2023-11-28 <dave at tiredofit dot ca>
### Changed

View File

@@ -9,7 +9,7 @@ ENV INFLUX1_CLIENT_VERSION=1.8.0 \
INFLUX2_CLIENT_VERSION=2.7.3 \
MSODBC_VERSION=18.3.2.1-1 \
MSSQL_VERSION=18.3.1.1-1 \
AWS_CLI_VERSION=1.29.78 \
AWS_CLI_VERSION=1.31.4 \
CONTAINER_ENABLE_MESSAGING=TRUE \
CONTAINER_ENABLE_MONITORING=TRUE \
IMAGE_NAME="tiredofit/db-backup" \

View File

@@ -536,6 +536,7 @@ Encryption will occur after compression and the resulting filename will have a `
| `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | |
| | Backup multiple by separating with commas eg `db1,db2` | | x |
| `DB01_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases separated via commas from being backed up | | x |
| `DB01_SPLIT_DB` | If using `ALL` - use this to split each database into its own file as opposed to one singular file | `FALSE` | |
| `DB01_PORT` | MySQL / MariaDB Port | `3306` | x |
| `DB01_MYSQL_EVENTS` | Backup Events for | `TRUE` | |
| `DB01_MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet | `512M` | |
@@ -577,6 +578,7 @@ Encryption will occur after compression and the resulting filename will have a `
| `DB01_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | |
| | Backup multiple by separating with commas eg `db1,db2` | | x |
| `DB01_SPLIT_DB` | If using `ALL` - use this to split each database into its own file as opposed to one singular file | `FALSE` | |
| `DB01_PORT` | PostgreSQL Port | `5432` | x |
###### Redis

View File

@@ -94,7 +94,7 @@ bootstrap_variables() {
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT_KEY \
DB"${backup_instance_number}"_BLOBXFER_REMOTE_PATH \
BLOBXFER_STORAGE_ACCOUNT \
BLOBXFER_STORAGE_KEY \
BLOBXFER_STORAGE_ACCOUNT_KEY \
DB_HOST \
DB_NAME \
DB_PORT \
@@ -188,7 +188,7 @@ bootstrap_variables() {
transform_backup_instance_variable "${backup_instance_number}" BLACKOUT_END backup_job_snapshot_blackout_finish
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_REMOTE_PATH backup_job_blobxfer_remote_path
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT backup_job_blobxfer_storage_account
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_KEY backup_job_blobxfer_storage_key
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT_KEY backup_job_blobxfer_storage_account_key
transform_backup_instance_variable "${backup_instance_number}" CHECKSUM backup_job_checksum
transform_backup_instance_variable "${backup_instance_number}" CLEANUP_TIME backup_job_cleanup_time
transform_backup_instance_variable "${backup_instance_number}" COMPRESSION backup_job_compression
@@ -1044,7 +1044,7 @@ cleanup_old_data() {
run_as_user mkdir -p "${backup_job_filesystem_path}"
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
write_log info "Syncing changes via blobxfer"
silent run_as_user blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --local-path ${backup_job_filesystem_path} --delete --delete-only
silent run_as_user blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete --delete-only
;;
"file" | "filesystem" )
write_log info "Cleaning up old backups on filesystem"
@@ -1599,7 +1599,7 @@ EOF
;;
"blobxfer" )
write_log info "Synchronize local storage from S3 Bucket with blobxfer"
${play_fair} blobxfer download --mode file --remote-path ${backup_job_blobxfer_remote_path} --local-path ${backup_job_filesystem_path} --delete
${play_fair} blobxfer download --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete
write_log info "Moving backup to external storage with blobxfer"
mkdir -p "${backup_job_filesystem_path}"
@@ -1607,7 +1607,7 @@ EOF
run_as_user mv "${TEMP_PATH}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
silent run_as_user ${play_fair} blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --local-path ${backup_job_filesystem_path}
silent run_as_user ${play_fair} blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path}
move_exit_code=$?
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"."${checksum_extension}" ; fi
@@ -1865,7 +1865,7 @@ timer() {
fi
done
validate_all=$(echo "${validate_all}" | tr ' ' '\n' | sort -n -u | tr '\n' ' ')
validate_all=$(echo "${validate_all}" | tr ' ' '\n' | sort -g -u | tr '\n' ' ')
for entry in $validate_all; do
if [ ${entry} -ge ${3} ]; then
echo "${entry}"