Compare commits

...

19 Commits

Author SHA1 Message Date
dave@tiredofit.ca
c37de5778d Release 4.0.31 - See CHANGELOG.md 2023-12-12 19:25:40 -08:00
dave@tiredofit.ca
eeeafd6ab8 Release 4.0.30 - See CHANGELOG.md 2023-12-11 15:21:01 -08:00
dave@tiredofit.ca
17daf26084 Release 4.0.29 - See CHANGELOG.md 2023-12-04 11:29:14 -08:00
Dave Conroy
b53cda99f7 Don't execute blobxfer functions if both key and secret are not set 2023-12-04 11:09:43 -08:00
Dave Conroy
2cf3e2ae70 Show proper DB Name when backing up Mongo or MSSQL 2023-12-04 08:06:57 -08:00
dave@tiredofit.ca
c7ee94aec2 Release 4.0.28 - See CHANGELOG.md 2023-12-04 07:04:08 -08:00
Dave Conroy
f44233e51a AWS CLI 1.31.5 2023-12-04 07:02:40 -08:00
dave@tiredofit.ca
ccda858b18 Release 4.0.27 - See CHANGELOG.md 2023-12-04 07:00:39 -08:00
Dave Conroy
d58b27d5ef Use alternate cron 2023-12-03 22:04:12 -08:00
dave@tiredofit.ca
fb9fe8a032 Release 4.0.26 - See CHANGELOG.md 2023-11-30 08:55:34 -08:00
Dave Conroy
b705982ae1 Restore missing _SPLIT_DB environment variable information for MySQL/Postgres 2023-11-30 08:54:49 -08:00
dave@tiredofit.ca
f031d787ae Release 4.0.25 - See CHANGELOG.md 2023-11-29 10:43:25 -08:00
Dave Conroy
3eed5fc8a0 Switch BLOBXFER_STORAGE_KEY to BLOBXFER_STORAGE_ACCOUNT_KEY 2023-11-29 10:39:58 -08:00
dave@tiredofit.ca
be619fb707 Release 4.0.24 - See CHANGELOG.md 2023-11-28 15:06:50 -08:00
dave@tiredofit.ca
cccc088b35 Release 4.0.23 - See CHANGELOG.md 2023-11-28 08:05:11 -08:00
dave@tiredofit.ca
4579f4057c Release 4.0.22 - See CHANGELOG.md 2023-11-25 08:50:25 -08:00
dave@tiredofit.ca
cd683648d0 Release 4.0.21 - See CHANGELOG.md 2023-11-22 15:40:38 -08:00
dave@tiredofit.ca
11f55f3d82 Release 4.0.20 - See CHANGELOG.md 2023-11-21 15:18:22 -08:00
dave@tiredofit.ca
674a98fcd8 Release 4.0.19 - See CHANGELOG.md 2023-11-20 15:26:21 -08:00
7 changed files with 239 additions and 109 deletions

View File

@@ -1,3 +1,83 @@
## 4.0.31 2023-12-12 <dave at tiredofit dot ca>
### Changed
- Support backing up databases with spaces in them
## 4.0.30 2023-12-11 <dave at tiredofit dot ca>
### Added
- Seperate each job with its own temporary folder for isolation and to better cleanup jobs that backup as a directory instead of a flat file
## 4.0.29 2023-12-04 <dave at tiredofit dot ca>
### Changed
- Skip blobxfer if either account or key is not present
## 4.0.28 2023-12-04 <dave at tiredofit dot ca>
### Changed
- AWS Cli 1.31.5
- Switch to using PIP for installing AWS-Cli to remove deprecation warnings
## 4.0.27 2023-12-04 <dave at tiredofit dot ca>
### Changed
- Switch to using actual crontab for cron expressions
## 4.0.26 2023-11-30 <dave at tiredofit dot ca>
### Added
- AWS CLI 1.31.4
## 4.0.25 2023-11-29 <dave at tiredofit dot ca>
### Changed
- Fix #297 - Add parameters to blobxfer to restore functionality
## 4.0.24 2023-11-28 <dave at tiredofit dot ca>
### Changed
- Fix issue with cron parsing and 0 being a value getting clobbered by sort command
## 4.0.23 2023-11-28 <dave at tiredofit dot ca>
### Changed
- Resolve issue with custom notification scripts not executing
## 4.0.22 2023-11-25 <dave at tiredofit dot ca>
### Changed
- Move cleanup_old_data routines to happen within backup_ function to properly accomodate for globals, and ALL DB_NAME use cases
## 4.0.21 2023-11-22 <dave at tiredofit dot ca>
### Changed
- Fix for SQLite backups not being cleaned up properly due to a malformed base
## 4.0.20 2023-11-21 <dave at tiredofit dot ca>
### Changed
- Update base image to support S6 Overlay 3.1.6.2 to solve shutdown issues specifically with MODE=MANUAL and MANUAL_RUN_FOREVER=TRUE
- Add some safety nets for Manual scheduling
## 4.0.19 2023-11-20 <dave at tiredofit dot ca>
### Changed
- Make adjustments to cron scheduling feature to be able to handle whitespace properly"
## 4.0.18 2023-11-18 <joergmschulz@github>
### Changed

View File

@@ -1,5 +1,5 @@
ARG DISTRO=alpine
ARG DISTRO_VARIANT=edge
ARG DISTRO_VARIANT=3.19
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
@@ -9,7 +9,7 @@ ENV INFLUX1_CLIENT_VERSION=1.8.0 \
INFLUX2_CLIENT_VERSION=2.7.3 \
MSODBC_VERSION=18.3.2.1-1 \
MSSQL_VERSION=18.3.1.1-1 \
AWS_CLI_VERSION=1.29.78 \
AWS_CLI_VERSION=1.31.5 \
CONTAINER_ENABLE_MESSAGING=TRUE \
CONTAINER_ENABLE_MONITORING=TRUE \
IMAGE_NAME="tiredofit/db-backup" \
@@ -76,10 +76,8 @@ RUN source /assets/functions/00-container && \
*) sleep 0.1 ;; \
esac; \
\
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; ls -l ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
clone_git_repo https://github.com/aws/aws-cli "${AWS_CLI_VERSION}" && \
python3 setup.py install --prefix=/usr && \
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX1_CLIENT_VERSION}" && \
go build -o /usr/sbin/influxd ./cmd/influxd && \
strip /usr/sbin/influxd && \
@@ -99,6 +97,7 @@ RUN source /assets/functions/00-container && \
make && \
make install && \
\
pip3 install --break-system-packages awscli==${AWS_CLI_VERSION} && \
pip3 install --break-system-packages blobxfer && \
\
package remove .db-backup-build-deps && \

View File

@@ -536,6 +536,7 @@ Encryption will occur after compression and the resulting filename will have a `
| `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | |
| | Backup multiple by separating with commas eg `db1,db2` | | x |
| `DB01_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases separated via commas from being backed up | | x |
| `DB01_SPLIT_DB` | If using `ALL` - use this to split each database into its own file as opposed to one singular file | `FALSE` | |
| `DB01_PORT` | MySQL / MariaDB Port | `3306` | x |
| `DB01_MYSQL_EVENTS` | Backup Events for | `TRUE` | |
| `DB01_MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet | `512M` | |
@@ -577,6 +578,7 @@ Encryption will occur after compression and the resulting filename will have a `
| `DB01_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | |
| | Backup multiple by separating with commas eg `db1,db2` | | x |
| `DB01_SPLIT_DB` | If using `ALL` - use this to split each database into its own file as opposed to one singular file | `FALSE` | |
| `DB01_PORT` | PostgreSQL Port | `5432` | x |
###### Redis

View File

@@ -57,7 +57,7 @@ services:
# Add here azure storage account
- DB01_BLOBXFER_STORAGE_ACCOUNT={TODO Add Storage Name}
# Add here azure storage account key
- SB01_BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key}
- DB01_BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key}
- DB01_BLOBXFER_REMOTE_PATH=docker-db-backup
restart: always
networks:

View File

@@ -16,7 +16,7 @@ fi
trap ctrl_c INT
if [[ "${MODE,,}" =~ "standalone" ]] || [ "${1,,}" = "manual" ] || [ "${1,,}" = "now" ]; then
if [[ "${MODE,,}" =~ "standalone" ]] || [ "${MODE,,}" = "manual" ] || [ "${1,,}" = "manual" ] || [ "${1,,}" = "now" ]; then
print_debug "Detected Manual Mode"
persist=false
backup_job_backup_begin=+0
@@ -24,7 +24,6 @@ else
silent sleep {{BACKUP_NUMBER}}
time_last_run=0
time_current=$(date +'%s')
if [[ "${backup_job_backup_begin}" =~ ^\+(.*)$ ]]; then
print_debug "BACKUP_BEGIN is a jump of minute starting with +"
timer plusvalue
@@ -34,10 +33,21 @@ else
elif [[ "${backup_job_backup_begin}" =~ ([0-9]{4})-([0-9]{2})-([0-9]{2})[[:space:]]([0-9]{2}):([0-9]{2}):([0-9]{2}) ]]; then
print_debug "BACKUP_BEGIN is a full date timestamp"
timer datetime
#elif echo "${backup_job_backup_begin//\*/#}" | grep -qP "^(.*((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|#) ?){5}$" ; then # Allow slashes, yet not supporting advanced cron yet
elif echo "${backup_job_backup_begin//\*/#}" | grep -qP "^(((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|#) ?){5}$" ; then
print_debug "BACKUP_BEGIN is a cron expression"
time_last_run=$(date +"%s")
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
if var_false "${CRON_ALTERNATE}"; then
time_last_run=$(date +"%s")
backup_job_backup_begin=${backup_job_backup_begin//\"/}
backup_job_backup_begin=${backup_job_backup_begin//\'/}
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
else
echo "${backup_job_backup_begin} /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}/run now" > /tmp/.container/cron/{{BACKUP_NUMBER}}-backup
crontab -l | { cat; echo "${backup_job_backup_begin} /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}/run now"; } | crontab -
s6-svc -d /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}
exit 0
fi
else
print_error "_BACKUP_BEGIN is invalid - Unable to perform scheduling"
cat <<EOF
@@ -87,7 +97,6 @@ while true; do
fi
symlink_log
cleanup_old_data
if var_false "${persist}" ; then
print_debug "Exiting due to manual mode"
@@ -98,8 +107,8 @@ while true; do
s6-svc -d /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}
else
if [ ! "${time_cron}" = "true" ]; then
print_notice "Sleeping for another $(($backup_job_backup_interval*60-backup_job_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($backup_job_backup_interval*60-backup_job_total_time))))" +'%Y-%m-%d %T %Z') "
silent sleep $(($backup_job_backup_interval*60-backup_job_total_time))
print_notice "Sleeping for another $((backup_job_backup_interval*60-backup_job_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$((backup_job_backup_interval*60-backup_job_total_time))))" +'%Y-%m-%d %T %Z') "
silent sleep $((backup_job_backup_interval*60-backup_job_total_time))
else
time_last_run=$(date +"%s")
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"

View File

@@ -1,6 +1,7 @@
#!/command/with-contenv bash
BACKUP_JOB_CONCURRENCY=${BACKUP_JOB_CONCURRENCY:-"1"}
CRON_ALTERNATE=${CRON_ALTERNATE:-"TRUE"}
DBBACKUP_USER=${DBBACKUP_USER:-"dbbackup"}
DBBACKUP_GROUP=${DBBACKUP_GROUP:-"${DBBACKUP_USER}"} # Must go after DBBACKUP_USER
DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0}

View File

@@ -94,7 +94,7 @@ bootstrap_variables() {
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT_KEY \
DB"${backup_instance_number}"_BLOBXFER_REMOTE_PATH \
BLOBXFER_STORAGE_ACCOUNT \
BLOBXFER_STORAGE_KEY \
BLOBXFER_STORAGE_ACCOUNT_KEY \
DB_HOST \
DB_NAME \
DB_PORT \
@@ -153,6 +153,11 @@ bootstrap_variables() {
fi
##
if grep -qo ".*_NAME='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _NAME variable with quotes"
sed -i "s|_NAME='\(.*\)'|_NAME=\1|g" "${backup_instance_vars}"
fi
if grep -qo ".*_PASS='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _PASS variable with quotes"
sed -i "s|_PASS='\(.*\)'|_PASS=\1|g" "${backup_instance_vars}"
@@ -188,7 +193,7 @@ bootstrap_variables() {
transform_backup_instance_variable "${backup_instance_number}" BLACKOUT_END backup_job_snapshot_blackout_finish
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_REMOTE_PATH backup_job_blobxfer_remote_path
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT backup_job_blobxfer_storage_account
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_KEY backup_job_blobxfer_storage_key
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT_KEY backup_job_blobxfer_storage_account_key
transform_backup_instance_variable "${backup_instance_number}" CHECKSUM backup_job_checksum
transform_backup_instance_variable "${backup_instance_number}" CLEANUP_TIME backup_job_cleanup_time
transform_backup_instance_variable "${backup_instance_number}" COMPRESSION backup_job_compression
@@ -482,10 +487,11 @@ backup_couch() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup ${backup_job_db_name}
cleanup_old_data
}
backup_influx() {
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if [ "${backup_job_db_name,,}" = "all" ] ; then
write_log debug "[backup_influx] Preparing to back up everything"
db_names=justbackupeverything
@@ -508,20 +514,19 @@ backup_influx() {
pre_dbbackup "${db}"
write_log notice "Dumping Influx database: '${db}'"
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
run_as_user influxd backup ${influx_compression} ${bucket} -portable -host ${backup_job_db_host}:${backup_job_db_port} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} "${TEMP_PATH}"/"${backup_job_filename_dir}"
run_as_user influxd backup ${influx_compression} ${bucket} -portable -host ${backup_job_db_host}:${backup_job_db_port} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} "${temporary_directory}"/"${backup_job_filename}"
exit_code=$?
check_exit_code backup "${backup_job_filename_dir}"
write_log notice "Creating archive file of '${backup_job_filename_dir}' with tar ${compression_string}"
run_as_user tar cf - "${TEMP_PATH}"/"${backup_job_filename_dir}" | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename_dir}".tar"${extension}" > /dev/null
backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now}.tar${extension}
backup_job_filename_base=influx_${db}_${backup_job_db_host#*//}
check_exit_code backup "${backup_job_filename}"
create_archive
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}.tar${extension}
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
timer backup finish
file_encryption
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename_dir}"
check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}"
cleanup_old_data
done
;;
2 )
@@ -537,19 +542,19 @@ backup_influx() {
pre_dbbackup "${db}"
write_log notice "Dumping Influx2 database: '${db}'"
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --compression none "${TEMP_PATH}"/"${backup_job_filename_dir}"
run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --compression none "${temporary_directory}"/"${backup_job_filename}"
exit_code=$?
check_exit_code backup "${backup_job_filename_dir}"
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
create_archive
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}.tar${extension}
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
timer backup finish
file_encryption
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename_dir}"
check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}"
cleanup_old_data
done
;;
esac
@@ -574,9 +579,9 @@ backup_mongo() {
fi
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
pre_dbbackup "${backup_job_db_name}"
write_log notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
write_log notice "Dumping MongoDB database: '${backup_job_db_name}' ${compression_string}"
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi
silent run_as_user ${play_fair} mongodump --archive=${TEMP_PATH}/${backup_job_filename} ${mongo_compression} ${mongo_backup_parameter}
silent run_as_user ${play_fair} mongodump --archive=${temporary_directory}/${backup_job_filename} ${mongo_compression} ${mongo_backup_parameter}
exit_code=$?
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
@@ -586,6 +591,7 @@ backup_mongo() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${backup_job_db_name}"
cleanup_old_data
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
}
@@ -596,16 +602,16 @@ backup_mssql() {
backup_job_filename=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.bak
backup_job_filename_base=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}
pre_dbbackup "${backup_job_db_name}"
write_log notice "Dumping MSSQL database: '${DB_NAME}'"
write_log notice "Dumping MSSQL database: '${backup_job_db_name}'"
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP DATABASE [${backup_job_db_name}] TO DISK = N'${TEMP_PATH}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP DATABASE [${backup_job_db_name}] TO DISK = N'${temporary_directory}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$?
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
backup_job_filename_original=${backup_job_filename}
compression
pre_dbbackup all
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
check_exit_code backup "${backup_job_filename}"
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
timer backup finish
@@ -614,28 +620,30 @@ backup_mssql() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${backup_job_db_name}"
cleanup_old_data
;;
trn|transaction )
prepare_dbbackup
backup_job_filename=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.trn
backup_job_filename_base=mssql_${backup_job_db_name,,}_trn_${backup_job_db_host,,}
pre_dbbackup "${backup_job_db_name}"
write_log notice "Dumping MSSQL database: '${DB_NAME}'"
write_log notice "Dumping MSSQL database: '${backup_job_db_name}'"
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP LOG [${backup_job_db_name}] TO DISK = N'${TEMP_PATH}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-log', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP LOG [${backup_job_db_name}] TO DISK = N'${temporary_directory}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-log', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$?
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
backup_job_filename_original=${backup_job_filename}
compression
pre_dbbackup all
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
check_exit_code backup "${backup_job_filename}"
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
file_encryption
timer backup finish
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${backup_job_db_name}"
cleanup_old_data
;;
esac
}
@@ -677,7 +685,7 @@ backup_mysql() {
pre_dbbackup "${db}"
write_log notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} $db | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} $db | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
@@ -687,6 +695,7 @@ backup_mysql() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}"
cleanup_old_data
done
else
write_log debug "Not splitting database dumps into their own files"
@@ -697,7 +706,7 @@ backup_mysql() {
pre_dbbackup all
write_log notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
@@ -707,6 +716,7 @@ backup_mysql() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup all
cleanup_old_data
fi
}
@@ -718,7 +728,7 @@ backup_pgsql() {
pre_dbbackup "globals"
print_notice "Dumping PostgresSQL globals: with 'pg_dumpall -g' ${compression_string}"
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
run_as_user ${play_fair} pg_dumpall -h "${backup_job_db_host}" -U "${backup_job_db_user}" -p "${backup_job_db_port}" -g ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
run_as_user ${play_fair} pg_dumpall -h "${backup_job_db_host}" -U "${backup_job_db_user}" -p "${backup_job_db_port}" -g ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code "${backup_job_filename}"
@@ -728,6 +738,7 @@ backup_pgsql() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "globals"
cleanup_old_data
}
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
@@ -766,7 +777,7 @@ backup_pgsql() {
pre_dbbackup "${db}"
write_log notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
run_as_user ${play_fair} pg_dump -h "${backup_job_db_host}" -p "${backup_job_db_port}" -U "${backup_job_db_user}" $db ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
run_as_user ${play_fair} pg_dump -h "${backup_job_db_host}" -p "${backup_job_db_port}" -U "${backup_job_db_user}" $db ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
@@ -776,6 +787,7 @@ backup_pgsql() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}"
cleanup_old_data
done
if var_true "${_postgres_backup_globals}" ; then backup_pgsql_globals; fi
else
@@ -795,7 +807,7 @@ backup_pgsql() {
for x_db_name in ${tmp_db_names} ; do
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
done
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
@@ -805,6 +817,7 @@ backup_pgsql() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup all
cleanup_old_data
if var_true "${_postgres_backup_globals}" ; then backup_pgsql_globals; fi
fi
}
@@ -815,7 +828,7 @@ backup_redis() {
backup_job_filename=redis_all_${backup_job_db_host,,}_${now}.rdb
backup_job_filename_base=redis_${backup_job_db_host,,}
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
echo bgsave | silent run_as_user ${play_fair} redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} --rdb ${TEMP_PATH}/${backup_job_filename} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}
echo bgsave | silent run_as_user ${play_fair} redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} --rdb ${temporary_directory}/${backup_job_filename} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}
sleep 10
try=5
while [ $try -gt 0 ] ; do
@@ -835,7 +848,7 @@ backup_redis() {
compression
pre_dbbackup all
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug off; fi
timer backup finish
check_exit_code backup "${backup_job_filename}"
@@ -844,6 +857,7 @@ backup_redis() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup all
cleanup_old_data
}
backup_sqlite3() {
@@ -851,20 +865,20 @@ backup_sqlite3() {
db=$(basename "${backup_job_db_host}")
db="${db%.*}"
backup_job_filename=sqlite3_${db}_${now}.sqlite3
backup_job_filename_base=sqlite3_${db}.sqlite3
backup_job_filename_base=sqlite3_${db}
pre_dbbackup "${db}"
write_log notice "Dumping sqlite3 database: '${backup_job_db_host}' ${compression_string}"
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug on; fi
silent ${play_fair} sqlite3 "${backup_job_db_host}" ".backup '${TEMP_PATH}/backup_${now}.sqlite3'"
silent ${play_fair} sqlite3 "${backup_job_db_host}" ".backup '${temporary_directory}/backup_${now}.sqlite3'"
exit_code=$?
check_exit_code backup "${backup_job_filename}"
if [ ! -f "${TEMP_PATH}"/backup_${now}.sqlite3 ] ; then
if [ ! -f "${temporary_directory}"/backup_${now}.sqlite3 ] ; then
print_error "SQLite3 backup failed! Exitting"
return 1
fi
compression
run_as_user ${play_fair} cat "${TEMP_PATH}"/backup_${now}.sqlite3 | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}/${backup_job_filename}" > /dev/null
rm -rf "${TEMP_PATH}"/backup_${now}.sqlite3
run_as_user ${play_fair} cat "${temporary_directory}"/backup_${now}.sqlite3 | ${dir_compress_cmd} | run_as_user tee "${temporary_directory}/${backup_job_filename}" > /dev/null
rm -rf "${temporary_directory}"/backup_${now}.sqlite3
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug off; fi
timer backup finish
file_encryption
@@ -872,6 +886,7 @@ backup_sqlite3() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}"
cleanup_old_data
}
check_availability() {
@@ -1030,8 +1045,12 @@ cleanup_old_data() {
write_log info "Cleaning up old backups on filesystem"
run_as_user mkdir -p "${backup_job_filesystem_path}"
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
write_log info "Syncing changes via blobxfer"
silent run_as_user blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --local-path ${backup_job_filesystem_path} --delete --delete-only
if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
else
write_log info "Syncing changes via blobxfer"
silent run_as_user blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete --delete-only
fi
;;
"file" | "filesystem" )
write_log info "Cleaning up old backups on filesystem"
@@ -1136,8 +1155,9 @@ compression() {
create_archive() {
if var_true "${DEBUG_CREATE_ARCHIVE}" ; then debug on; fi
if [ "${exit_code}" = "0" ] ; then
write_log notice "Creating archive file of '${backup_job_filename_dir}' with tar ${compression_string}"
run_as_user tar cf - "${TEMP_PATH}"/"${backup_job_filename_dir}" | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename_dir}".tar"${extension}" > /dev/null
write_log notice "Creating archive file of '${backup_job_filename}' with tar ${compression_string}"
run_as_user tar cf - "${temporary_directory}"/"${backup_job_filename}" | ${dir_compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}".tar"${extension}" > /dev/null
rm -rf "${temporary_directory}"/"${backup_job_filename}"
else
write_log error "Skipping creating archive file because backup did not complete successfully"
fi
@@ -1160,13 +1180,14 @@ create_schedulers() {
instance=$(printf "%02d" $instance)
cp -R /assets/dbbackup/template-dbbackup /etc/services.available/dbbackup-"${instance}"
sed -i "s|{{BACKUP_NUMBER}}|${instance}|g" /etc/services.available/dbbackup-"${instance}"/run
if [ "${MODE,,}" = "manual" ] ; then service_stop dbbackup-"${instance}" ; fi
cat <<EOF >> /usr/bin/backup"${instance}"-now
#!/bin/bash
source /assets/functions/00-container
PROCESS_NAME=db-backup${instance}
print_info "Starting Manual Backup for db-backup${instance}"
/var/run/s6/legacy-services/dbbackup-${instance}/run now
#/var/run/s6/legacy-services/dbbackup-${instance}/run now
/etc/services.available/dbbackup-${instance}/run now
EOF
chmod +x /usr/bin/backup"${instance}"-now
@@ -1176,11 +1197,11 @@ EOF
cat <<EOF > /usr/bin/backup-now
#!/bin/bash
/usr/bin/backup${instance}-now
/usr/bin/backup${instance}-now now
EOF
else
echo "/usr/bin/backup${instance}-now" >> /usr/bin/backup-now
echo "/usr/bin/backup${instance}-now now" >> /usr/bin/backup-now
fi
instance=$(echo "${instance} +1" | bc)
@@ -1258,7 +1279,7 @@ file_encryption() {
print_notice "Encrypting with GPG Passphrase"
encrypt_routines_start_time=$(date +'%s')
encrypt_tmp_dir=$(run_as_user mktemp -d)
echo "${backup_job_encrypt_passphrase}" | silent run_as_user ${play_fair} gpg --batch --home ${encrypt_tmp_dir} --yes --passphrase-fd 0 -c "${TEMP_PATH}"/"${backup_job_filename}"
echo "${backup_job_encrypt_passphrase}" | silent run_as_user ${play_fair} gpg --batch --home ${encrypt_tmp_dir} --yes --passphrase-fd 0 -c "${temporary_directory}"/"${backup_job_filename}"
rm -rf "${encrypt_tmp_dir}"
elif [ -z "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_public_key}" ] && [ -n "${backup_job_encrypt_private_key}" ]; then
if [ -f "${backup_job_encrypt_private_key}" ]; then
@@ -1270,13 +1291,13 @@ file_encryption() {
silent run_as_user gpg --home ${encrypt_tmp_dir} --batch --import "${encrypt_tmp_dir}"/private_key.asc
print_debug "[file_encryption] [key] Encrypting to Public Key"
cat "${backup_job_encrypt_public_key}" | run_as_user tee "${encrypt_tmp_dir}"/public_key.asc > /dev/null
silent run_as_user ${play_fair} gpg --batch --yes --home "${encrypt_tmp_dir}" --encrypt --recipient-file "${encrypt_tmp_dir}"/public_key.asc "${TEMP_PATH}"/"${backup_job_filename}"
silent run_as_user ${play_fair} gpg --batch --yes --home "${encrypt_tmp_dir}" --encrypt --recipient-file "${encrypt_tmp_dir}"/public_key.asc "${temporary_directory}"/"${backup_job_filename}"
rm -rf "${encrypt_tmp_dir}"
fi
fi
if [ -f "${TEMP_PATH}"/"${backup_job_filename}".gpg ]; then
if [ -f "${temporary_directory}"/"${backup_job_filename}".gpg ]; then
print_debug "[file_encryption] Deleting original file"
rm -rf "${TEMP_PATH:?}"/"${backup_job_filename:?}"
rm -rf "${temporary_directory:?}"/"${backup_job_filename:?}"
backup_job_filename="${backup_job_filename}.gpg"
encrypt_routines_finish_time=$(date +'%s')
@@ -1315,7 +1336,7 @@ generate_checksum() {
checksum_routines_start_time=$(date +'%s')
write_log notice "Generating ${checksum_extension^^} sum for '${backup_job_filename}'"
cd "${TEMP_PATH}"
cd "${temporary_directory}"
run_as_user ${checksum_command} "${backup_job_filename}" | run_as_user tee "${backup_job_filename}"."${checksum_extension}" > /dev/null
chmod ${backup_job_filesystem_permission} "${backup_job_filename}"."${checksum_extension}"
checksum_value=$(run_as_user cat "${backup_job_filename}"."${checksum_extension}" | awk '{print $1}')
@@ -1337,20 +1358,20 @@ EOF
notify() {
if var_true "${DEBUG_NOTIFY}" ; then debug on; fi
notification_custom() {
if [ -n "${NOTIFICATION_SCRIPT}" ] ; then
if var_true "${NOTIFICATION_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${NOTIFICATION_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}"
if [ -n "${NOTIFICATION_CUSTOM_SCRIPT}" ] ; then
if var_true "${NOTIFICATION_CUSTOM_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${NOTIFICATION_CUSTOM_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}"
else
if [ -x "${NOTIFICATION_SCRIPT}" ] ; then
write_log notice "Found NOTIFICATION_SCRIPT environment variable. Executing '${NOTIFICATION_SCRIPT}"
if [ -x "${NOTIFICATION_CUSTOM_SCRIPT}" ] ; then
write_log notice "Found NOTIFICATION_CUSTOM_SCRIPT environment variable. Executing '${NOTIFICATION_CUSTOM_SCRIPT}"
# script timestamp logfile errorcode subject body
eval "${NOTIFICATION_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}"
eval "${NOTIFICATION_CUSTOM_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}"
else
write_log error "Can't execute NOTIFICATION_SCRIPT environment variable '${NOTIFICATION_SCRIPT}' as its filesystem bit is not executible!"
write_log error "Can't execute NOTIFICATION_CUSTOM_SCRIPT environment variable '${NOTIFICATION_CUSTOM_SCRIPT}' as its filesystem bit is not executible!"
fi
fi
else
print_error "[notifications] No NOTIFICATION_SCRIPT variable set - Skipping sending Custom notifications"
print_error "[notifications] No NOTIFICATION_CUSTOM_SCRIPT variable set - Skipping sending Custom notifications"
fi
}
@@ -1452,7 +1473,7 @@ EOF
# $4 body
if var_true "${ENABLE_NOTIFICATIONS}" ; then
notification_types=$(echo "${NOTIIFICATION_TYPE}" | tr "," "\n")
notification_types=$(echo "${NOTIFICATION_TYPE}" | tr "," "\n")
for notification_type in $notification_types ; do
case "${notification_type,,}" in
"custom" )
@@ -1487,8 +1508,8 @@ EOF
move_dbbackup() {
if var_true "${DEBUG_MOVE_DBBACKUP}" ; then debug on; fi
if [ "${exit_code}" = "0" ] ; then
dbbackup_size="$(run_as_user stat -c%s "${TEMP_PATH}"/"${backup_job_filename}")"
dbbackup_date="$(run_as_user date -r "${TEMP_PATH}"/"${backup_job_filename}" +'%s')"
dbbackup_size="$(run_as_user stat -c%s "${temporary_directory}"/"${backup_job_filename}")"
dbbackup_date="$(run_as_user date -r "${temporary_directory}"/"${backup_job_filename}" +'%s')"
case "${backup_job_size_value,,}" in
"b" | "bytes" )
@@ -1502,37 +1523,37 @@ move_dbbackup() {
;;
esac
if [ "${backup_job_size_value}" = "1" ] ; then
filesize=$(run_as_user stat -c%s "${TEMP_PATH}"/"${backup_job_filename}")
filesize=$(run_as_user stat -c%s "${temporary_directory}"/"${backup_job_filename}")
write_log notice "Backup of '${backup_job_filename}' created with the size of ${filesize} bytes"
else
filesize=$(run_as_user du -h "${TEMP_PATH}"/"${backup_job_filename}" | awk '{ print $1}')
filesize=$(run_as_user du -h "${temporary_directory}"/"${backup_job_filename}" | awk '{ print $1}')
write_log notice "Backup of '${backup_job_filename}' created with the size of ${filesize}"
fi
chmod "${backup_job_filesystem_permission}" "${TEMP_PATH}"/"${backup_job_filename}"
chmod "${backup_job_filesystem_permission}" "${temporary_directory}"/"${backup_job_filename}"
case "${backup_job_backup_location,,}" in
"file" | "filesystem" )
write_log debug "Moving backup to filesystem"
run_as_user mkdir -p "${backup_job_filesystem_path}"
if [ "${backup_job_checksum,,}" != "none" ] ; then run_as_user mv "${TEMP_PATH}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/ ; fi
if [ "${backup_job_checksum,,}" != "none" ] ; then run_as_user mv "${temporary_directory}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/ ; fi
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
cat <<EOF
## BEGIN Before Moving file from TEMP_PATH $(TZ=${TIMEZONE} date)
## BEGIN Before Moving file from temporary_directory $(TZ=${TIMEZONE} date)
##
$(ls -l "${TEMP_PATH}"/*)
$(ls -l "${temporary_directory}"/*)
## END
EOF
fi
run_as_user mv "${TEMP_PATH}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
run_as_user mv "${temporary_directory}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
move_exit_code=$?
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
cat <<EOF
## BEGIN After Moving file from TEMP_PATH $(TZ=${TIMEZONE} date)
## BEGIN After Moving file from temporary_directory $(TZ=${TIMEZONE} date)
##
$(ls -l "${TEMP_PATH}"/*)
$(ls -l "${temporary_directory}"/*)
## END
@@ -1574,43 +1595,49 @@ EOF
[[ ( -n "${backup_job_s3_host}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${backup_job_s3_protocol}://${backup_job_s3_host}"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_PATH}/${backup_job_filename} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/${backup_job_filename} ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${temporary_directory}/${backup_job_filename} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/${backup_job_filename} ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
move_exit_code=$?
if [ "${backup_job_checksum}" != "none" ] ; then
silent run_as_user ${play_fair} aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_PATH}/*.${checksum_extension} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/ ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"."${checksum_extension}"
silent run_as_user ${play_fair} aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${temporary_directory}/*.${checksum_extension} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/ ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"."${checksum_extension}"
fi
run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"
;;
"blobxfer" )
write_log info "Synchronize local storage from S3 Bucket with blobxfer"
${play_fair} blobxfer download --mode file --remote-path ${backup_job_blobxfer_remote_path} --local-path ${backup_job_filesystem_path} --delete
if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
else
write_log info "Synchronize local storage from S3 Bucket with blobxfer"
${play_fair} blobxfer download --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete
write_log info "Moving backup to external storage with blobxfer"
mkdir -p "${backup_job_filesystem_path}"
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user mv "${TEMP_PATH}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/; fi
write_log info "Moving backup to external storage with blobxfer"
mkdir -p "${backup_job_filesystem_path}"
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user mv "${temporary_directory}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/; fi
run_as_user mv "${TEMP_PATH}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
run_as_user mv "${temporary_directory}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
silent run_as_user ${play_fair} blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --local-path ${backup_job_filesystem_path}
move_exit_code=$?
silent run_as_user ${play_fair} blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path}
move_exit_code=$?
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"."${checksum_extension}" ; fi
run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"."${checksum_extension}" ; fi
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"
fi
;;
esac
else
write_log error "Skipping moving DB Backup to final location because backup did not complete successfully"
fi
run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"
if var_true "${DEBUG_MOVE_DBBACKUP}" ; then debug off; fi
}
prepare_dbbackup() {
timer backup start
now=$(run_as_user date +"%Y%m%d-%H%M%S")
temporary_directory=$(mktemp -d -p "${TEMP_PATH}" -t ${backup_instance_number}_dbbackup.XXXXXX)
chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${temporary_directory}"
backup_job_filename_base=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}
backup_job_filename=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.sql
}
@@ -1725,6 +1752,8 @@ EOZP
write_log notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
if var_true "${DEBUG_POST_DBBACKUP}" ; then debug on; fi
cd "${TEMP_PATH}"
rm -rf "${temporary_directory}"
}
process_limiter() {
@@ -1843,7 +1872,7 @@ timer() {
if [ "${expression_step}" != "${expression}" ]; then
for step in ${validate_temp}; do
if [ $(( ( step - expression_start ) % expression_step )) -eq 0 ]; then
validate_all="$validate_all ${step}"
validate_all="${validate_all} ${step}"
fi
done
else
@@ -1851,15 +1880,16 @@ timer() {
fi
done
validate_all=$(echo "${validate_all}" | tr ' ' '\n' | sort -n -u | tr '\n' ' ')
validate_all=$(echo "${validate_all}" | tr ' ' '\n' | sort -g -u | tr '\n' ' ')
for entry in $validate_all; do
if [ "${entry}" -ge "${3}" ]; then
if [ ${entry} -ge ${3} ]; then
echo "${entry}"
return 0
fi
done
echo "${validate_all%% *}"
echo "${validate_all// /}"
#echo "${validate_all%% *}"
}
local cron_compare="${3}"
@@ -1867,18 +1897,21 @@ timer() {
local cron_compare_difference=$(( cron_compare - ${4} ))
if [ "${cron_compare_difference}" -lt 60 ]; then
cron_compare=$((${cron_compare} + $(( 60 - cron_compare_difference )) ))
cron_compare=$((cron_compare + $(( 60 - cron_compare_difference )) ))
fi
local cron_current_seconds="$(date --date=@"${cron_compare_seconds}" +"%-S")"
if [ "${cron_current_seconds}" -ne 0 ]; then
cron_compare_seconds=$(( cron_compare_seconds - cron_current_seconds ))
cron_compare=$(( cron_compare_seconds - cron_current_seconds ))
fi
local cron_minute="$(echo -n "${2}" | awk '{print $1}')"
local cron_hour="$(echo -n "${2}" | awk '{print $2}')"
local cron_day_of_month="$(echo -n "${2}" | awk '{print $3}')"
local cron_month="$(echo -n "${2}" | awk '{print $4}')"Generating
local cron_month="$(echo -n "${2}" | awk '{print $4}')"
local cron_day_of_week="$(echo -n "${2}" | awk '{print $5}')"
local cron_next_minute="$(date --date=@"${cron_compare}" +"%-M")"
local cron_next_hour="$(date --date=@"${cron_compare}" +"%-H")"
local cron_next_day_of_month="$(date --date=@"${cron_compare}" +"%-d")"
local cron_next_month="$(date --date=@"${cron_compare}" +"%-m")"
@@ -1887,8 +1920,10 @@ timer() {
local cron_next_year="$(date --date=@"${cron_compare}" +"%-Y")"
local cron_next=
local cron_parsed=1
while [ "$cron_parsed" != "0" ]; do
while [ "${cron_parsed}" != "0" ]; do
print_debug "[timer] [cron] Parse Minute"
cron_next=$(parse_expression "${cron_minute}" 59 "${cron_next_minute}")
if [ "${cron_next}" != "${cron_next_minute}" ]; then
if [ "${cron_next_minute}" -gt "${cron_next}" ]; then
@@ -1898,20 +1933,23 @@ timer() {
cron_next_minute="${cron_next}"
fi
print_debug "[timer] [cron] Parse Hour"
cron_next=$(parse_expression "${cron_hour}" 23 "${cron_next_hour}")
if [ "${cron_next}" != "${cron_next_hour}" ]; then
if [ "${cron_next_hour}" -gt "${cron_next}" ]; then
cron_next_day_of_month=$(( cron_next_day_of_month + 1 ))
fi
cron_next_hour="${cron_next}"
#cron_next_minute=0
cron_next_minute=0
fi
print_debug "[timer] [cron] Parse Day of Week"
cron_next=$(parse_expression "${cron_day_of_week}" 6 "${cron_next_day_of_week}")
if [ "${cron_next}" != "${cron_next_day_of_week}" ]; then
day_of_week_difference=$(( ${cron_next} - ${cron_next_day_of_week} ))
day_of_week_difference=$(( cron_next - cron_next_day_of_week ))
if [ "${day_of_week_difference}" -lt "0" ]; then
if [ "${day_of_week_difference}" -lt 0 ]; then
day_of_week_difference=$(( day_of_week_difference + 7 ))
fi
@@ -1920,6 +1958,7 @@ timer() {
cron_next_minute=0
fi
print_debug "[timer] [cron] Parse day of month"
case "${cron_next_month}" in
1|3|5|7|8|10|12)
last_day_of_month="31"
@@ -1959,6 +1998,7 @@ timer() {
cron_next_day_of_month=$cron_next
fi
print_debug "[timer] [cron] Parse Next Month"
cron_next=$(parse_expression "${cron_month}" 12 "${cron_next_month}")
if [ "${cron_next}" != "${cron_next_month}" ]; then
if [ "${cron_next}" -gt "12" ]; then
@@ -1975,7 +2015,6 @@ timer() {
fi
cron_parsed=0
done
local cron_future=$(date --date="${cron_next_year}-$(printf "%02d" ${cron_next_month})-$(printf "%02d" ${cron_next_day_of_month})T$(printf "%02d" ${cron_next_hour}):$(printf "%02d" ${cron_next_minute}):00" "+%s")
local cron_future_difference=$(( cron_future - cron_compare_seconds ))
time_cron=true