Compare commits

...

10 Commits
3.2.2 ... 3.3.2

Author SHA1 Message Date
Dave Conroy
7d7cb9587d Release 3.3.2 - See CHANGELOG.md 2022-05-02 22:28:08 -07:00
Dave Conroy
d1713fe3f0 Release 3.3.1 - See CHANGELOG.md 2022-04-30 22:31:34 -07:00
Dave Conroy
d1e98d9c4b Release 3.3.0 - See CHANGELOG.md 2022-04-30 03:23:45 -07:00
Dave Conroy
0920b671cb Release 3.2.6 - See CHANGELOG.md 2022-04-25 10:29:45 -07:00
Dave Conroy
28ed6c3bb8 Release 3.2.5 - See CHANGELOG.md 2022-04-23 14:11:29 -07:00
Dave Conroy
c1bdf26598 Release 3.2.4 - See CHANGELOG.md 2022-04-21 16:04:43 -07:00
Dave Conroy
5a4cac2cee Release 3.2.3 - See CHANGELOG.md 2022-04-21 15:46:27 -07:00
Dave Conroy
c04eec7661 Add space after compress_cmd 2022-04-21 14:19:09 -07:00
Dave Conroy
32f1959a07 Merge pull request #120 from joergmschulz/patch-1
small typo / exiting instead of exitting
2022-04-21 14:18:43 -07:00
joergmschulz
d384d5a529 small typo / exiting instead of exitting 2022-04-21 23:16:02 +02:00
4 changed files with 109 additions and 30 deletions

View File

@@ -1,3 +1,42 @@
## 3.3.2 2022-05-02 <dave at tiredofit dot ca>
### Added
- Add POST_SCRIPT_SKIP_X_VERIFY environment variables to allow for more host compatibility for post scripts
## 3.3.1 2022-04-30 <dave at tiredofit dot ca>
### Changed
- Compressing silently was causing 0 byte backups
## 3.3.0 2022-04-30 <dave at tiredofit dot ca>
### Added
- Ability to auto clean old S3 / Minio Hosts like what occurs on filesysten
- Alert user how to turn off Zabbix Monitoring if fails
- Allow Zabbix Monitoring to work with S3
- Silence some more compression statements
### Changed
- Fix for Redis not backing up properly
- Start sending checksums for S3 Outputs
- Cleanup some code functions
- FIx Container Log Level always in DEBUG
## 3.2.4 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Add -portable flag when backing up Influx
## 3.2.3 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Fix for bucket / db name InfluxDB 1.xx
- Minor aesthetics, spacing, spelling
## 3.2.2 2022-04-21 <dave at tiredofit dot ca> ## 3.2.2 2022-04-21 <dave at tiredofit dot ca>
### Changed ### Changed

View File

@@ -261,6 +261,9 @@ Outputs the following on the console:
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes` If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`
You must make your scripts executible otherwise there is an internal check that will skip trying to run it otherwise.
If for some reason your filesystem or host is not detecting it right, use the environment variable `POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
## Support ## Support
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community. These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.

View File

@@ -102,11 +102,11 @@ backup_influx() {
1 ) 1 )
for db in ${db_names}; do for db in ${db_names}; do
pre_dbbackup pre_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="-bucket $db" ; else db=all ; fi if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
target=influx_${db}_${DB_HOST#*//}_${now} target=influx_${db}_${DB_HOST#*//}_${now}
compression compression
print_notice "Dumping Influx database: '${db}'" print_notice "Dumping Influx database: '${db}'"
influxd backup ${influx_compression} ${bucket} -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}" influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$? exit_code=$?
check_exit_code $target_dir check_exit_code $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}" print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
@@ -279,7 +279,7 @@ backup_redis() {
pre_dbbackup pre_dbbackup
print_notice "Dumping Redis - Flushing Redis Cache First" print_notice "Dumping Redis - Flushing Redis Cache First"
target=redis_all_${DB_HOST,,}_${now}.rdb target=redis_all_${DB_HOST,,}_${now}.rdb
echo bgsave | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS} echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
sleep 10 sleep 10
try=5 try=5
while [ $try -gt 0 ] ; do while [ $try -gt 0 ] ; do
@@ -287,6 +287,7 @@ backup_redis() {
ok=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}') ok=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
print_notice "Redis Backup Complete" print_notice "Redis Backup Complete"
exit_code=0
break break
fi fi
try=$((try - 1)) try=$((try - 1))
@@ -296,6 +297,7 @@ backup_redis() {
target_original=${target} target_original=${target}
compression compression
$compress_cmd "${TEMP_LOCATION}/${target_original}" $compress_cmd "${TEMP_LOCATION}/${target_original}"
check_exit_code $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup all post_dbbackup all
@@ -430,11 +432,31 @@ check_exit_code() {
cleanup_old_data() { cleanup_old_data() {
if [ -n "${DB_CLEANUP_TIME}" ]; then if [ -n "${DB_CLEANUP_TIME}" ]; then
if [ "${master_exit_code}" != 1 ]; then if [ "${master_exit_code}" != 1 ]; then
case "${BACKUP_LOCATION,,}" in
"file" | "filesystem" )
print_info "Cleaning up old backups" print_info "Cleaning up old backups"
mkdir -p "${DB_DUMP_TARGET}" mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \; find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
;;
"s3" | "minio" )
print_info "Cleaning up old backups"
aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | while read -r s3_file; do
s3_createdate=$(echo $s3_file | awk {'print $1" "$2'})
s3_createdate=$(date -d "$s3_createdate" "+%s")
s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 )))
if [[ $s3_createdate -le $s3_olderthan ]] ; then
s3_filename=$(echo $s3_file | awk {'print $4'})
if [ $s3_filename != "" ] ; then
print_debug "Deleting $s3_filename"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
fi
done
;;
esac
else else
print_info "Skipping Cleaning up old backups because there were errors in backing up" print_error "Skipping Cleaning up old backups because there were errors in backing up"
fi fi
fi fi
} }
@@ -446,7 +468,7 @@ compression() {
case "${COMPRESSION,,}" in case "${COMPRESSION,,}" in
gz* ) gz* )
compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} " compress_cmd="pigz -q -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compression_type="gzip" compression_type="gzip"
extension=".gz" extension=".gz"
dir_compress_cmd=${compress_cmd} dir_compress_cmd=${compress_cmd}
@@ -454,7 +476,7 @@ compression() {
target=${target}.gz target=${target}.gz
;; ;;
bz* ) bz* )
compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} " compress_cmd="pbzip2 -q -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
compression_type="bzip2" compression_type="bzip2"
dir_compress_cmd=${compress_cmd} dir_compress_cmd=${compress_cmd}
extension=".bz2" extension=".bz2"
@@ -470,7 +492,7 @@ compression() {
target=${target}.xz target=${target}.xz
;; ;;
zst* ) zst* )
compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS}" compress_cmd="zstd -q -q --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} "
compression_type="zstd" compression_type="zstd"
dir_compress_cmd=${compress_cmd} dir_compress_cmd=${compress_cmd}
extension=".zst" extension=".zst"
@@ -506,7 +528,7 @@ create_archive() {
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}" print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}" tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
else else
print_warn "Skipping creating archive file because backup did not complete successfully" print_error "Skipping creating archive file because backup did not complete successfully"
fi fi
} }
@@ -530,13 +552,16 @@ generate_checksum() {
checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}') checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}')
print_debug "${checksum_extension^^}: ${checksum_value} - ${target}" print_debug "${checksum_extension^^}: ${checksum_value} - ${target}"
else else
print_warn "Skipping Checksum creation because backup did not complete successfully" print_error "Skipping Checksum creation because backup did not complete successfully"
fi fi
fi fi
} }
move_dbbackup() { move_dbbackup() {
if [ "${exit_code}" = "0" ] ; then if [ "${exit_code}" = "0" ] ; then
dbbackup_size="$(stat -c%s "${TEMP_LOCATION}"/"${target}")"
dbbackup_date="$(date -r "${TEMP_LOCATION}"/"${target}" +'%s')"
case "${SIZE_VALUE,,}" in case "${SIZE_VALUE,,}" in
"b" | "bytes" ) "b" | "bytes" )
SIZE_VALUE=1 SIZE_VALUE=1
@@ -579,15 +604,17 @@ move_dbbackup() {
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}" [[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
unset s3_ssl if var_true "${ENABLE_CHECKSUM}" ; then
unset s3_ca_cert silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}" rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
rm -rf "${TEMP_LOCATION}"/"${target}" rm -rf "${TEMP_LOCATION}"/"${target}"
;; ;;
esac esac
else else
print_warn "Skipping moving DB Backup to final location because backup did not complete successfully" print_error "Skipping moving DB Backup to final location because backup did not complete successfully"
fi fi
rm -rf "${TEMP_LOCATION}"/* rm -rf "${TEMP_LOCATION}"/*
@@ -607,14 +634,18 @@ post_dbbackup() {
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
print_notice "Sending Backup Statistics to Zabbix" print_notice "Sending Backup Statistics to Zabbix"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}")" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "${dbbackup_size}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "${dbbackup_date}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))"
if [ "$?" != "0" ] ; then print_error "Error sending statistics, consider disabling with 'CONTAINER_ENABLE_MONITORING=FALSE'" ; fi
fi fi
### Post Script Support ### Post Script Support
if [ -n "${POST_SCRIPT}" ] ; then if [ -n "${POST_SCRIPT}" ] ; then
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
if [ -x "${POST_SCRIPT}" ] ; then if [ -x "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}" print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
@@ -622,10 +653,14 @@ post_dbbackup() {
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!" print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
fi fi
fi fi
fi
### Post Backup Custom Script Support ### Post Backup Custom Script Support
if [ -d "/assets/custom-scripts/" ] ; then if [ -d "/assets/custom-scripts/" ] ; then
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
if [ -x "${f}" ] ; then if [ -x "${f}" ] ; then
print_notice "Executing post backup custom script : '${f}'" print_notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE ## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
@@ -633,10 +668,13 @@ post_dbbackup() {
else else
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!" print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
fi fi
fi
done done
fi fi
print_notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')" print_notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
unset s3_ssl
unset s3_ca_cert
} }
sanity_test() { sanity_test() {

View File

@@ -4,7 +4,6 @@ source /assets/functions/00-container
source /assets/functions/10-db-backup source /assets/functions/10-db-backup
source /assets/defaults/10-db-backup source /assets/defaults/10-db-backup
PROCESS_NAME="db-backup" PROCESS_NAME="db-backup"
CONTAINER_LOG_LEVEL=DEBUG
bootstrap_variables bootstrap_variables
@@ -80,7 +79,7 @@ while true; do
cleanup_old_data cleanup_old_data
if var_true "${manual}" ; then if var_true "${manual}" ; then
print_debug "Exitting due to manual mode" print_debug "Exiting due to manual mode"
exit ${master_exit_code}; exit ${master_exit_code};
else else
print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") " print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") "