Files
docker-db-backup/install/assets/functions/10-db-backup
2024-06-30 14:53:31 -04:00

2143 lines
114 KiB
Plaintext

#!/command/with-contenv bash
bootstrap_filesystem() {
if var_true "${DEBUG_BOOTSTRAP_FILESYSTEM}" ; then debug on; fi
if [ ! -d "${backup_job_filesystem_path}" ]; then
mkdir -p "${backup_job_filesystem_path}"
fi
if [ "$(stat -c %U "${backup_job_filesystem_path}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${backup_job_filesystem_path}" ; fi
if [ "$(stat -c %a "${backup_job_filesystem_path}")" != "${backup_job_filesystem_path_permission}" ] ; then chmod "${backup_job_filesystem_path_permission}" "${backup_job_filesystem_path}" ; fi
if [ -d "${backup_job_filesystem_archive_path}" ]; then
if [ "$(stat -c %U "${backup_job_filesystem_archive_path}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${backup_job_filesystem_archive_path}" ; fi
if [ "$(stat -c %a "${backup_job_filesystem_archive_path}")" != "${backup_job_filesystem_path_permission}" ] ; then chmod "${backup_job_filesystem_path_permission}" "${backup_job_filesystem_archive_path}" ; fi
fi
if [ ! -d "${LOG_PATH}" ]; then
mkdir -p "${LOG_PATH}"
fi
if [ "$(stat -c %U "${LOG_PATH}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${LOG_PATH}" ; fi
if [ ! -d "${LOG_PATH}"/"$(date +'%Y%m%d')" ]; then run_as_user mkdir -p "${LOG_PATH}"/"$(date +'%Y%m%d')"; fi
if [ "$(stat -c %a "${LOG_PATH}")" != "755" ] ; then chmod -R 755 "${LOG_PATH}" ; fi
if [ ! -d "${TEMP_PATH}" ]; then
mkdir -p "${TEMP_PATH}"
fi
if [ "$(stat -c %U "${TEMP_PATH}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${TEMP_PATH}" ; fi
if var_true "${DEBUG_BOOTSTRAP_FILESYSTEM}" ; then debug off; fi
}
bootstrap_variables() {
if var_true "${DEBUG_BOOTSTRAP_VARIABLES}" ; then debug on; fi
backup_init() {
backup_instance_number=${1}
backup_instance_vars=$(mktemp)
set -o posix ; set | grep -oE "^backup_job_.*=" | grep -oE ".*=" | sed "/--/d" > "${backup_instance_vars}"
while read -r backup_instance_var ; do
unset "$(echo "${backup_instance_var}" | cut -d = -f 1)"
done < "${backup_instance_vars}"
transform_file_var \
DEFAULT_AUTH \
DEFAULT_TYPE \
DEFAULT_HOST \
DEFAULT_PORT \
DEFAULT_NAME \
DEFAULT_NAME_EXCLUDE \
DEFAULT_USER \
DEFAULT_PASS \
DEFAULT_ENCRYPT_PASSPHRASE \
DEFAULT_ENCRYPT_PUBLIC_KEY \
DEFAULT_ENCRYPT_PRIVATE_KEY \
DEFAULT_MONGO_CUSTOM_URI \
DEFAULT_MYSQL_TLS_CA_FILE \
DEFAULT_MYSQL_TLS_CERT_FILE \
DEFAULT_MYSQL_TLS_KEY_FILE \
DEFAULT_S3_BUCKET \
DEFAULT_S3_KEY_ID \
DEFAULT_S3_KEY_SECRET \
DEFAULT_S3_PATH \
DEFAULT_S3_REGION \
DEFAULT_S3_HOST \
DEFAULT_S3_PROTOCOL \
DEFAULT_S3_EXTRA_OPTS \
DEFAULT_S3_CERT_CA_FILE \
DEFAULT_BLOBXFER_STORAGE_ACCOUNT \
DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY \
DEFAULT_BLOBXFER_REMOTE_PATH \
DEFAULT_BLOBXFER_MODE \
DB"${backup_instance_number}"_AUTH \
DB"${backup_instance_number}"_TYPE \
DB"${backup_instance_number}"_HOST \
DB"${backup_instance_number}"_PORT \
DB"${backup_instance_number}"_NAME \
DB"${backup_instance_number}"_NAME_EXCLUDE \
DB"${backup_instance_number}"_USER \
DB"${backup_instance_number}"_PASS \
DB"${backup_instance_number}"_ENCRYPT_PASSPHRASE \
DB"${backup_instance_number}"_ENCRYPT_PUBLIC_KEY \
DB"${backup_instance_number}"_ENCRYPT_PRIVATE_KEY \
DB"${backup_instance_number}"_MONGO_CUSTOM_URI \
DB"${backup_instance_number}"_MYSQL_TLS_CA_FILE \
DB"${backup_instance_number}"_MYSQL_TLS_CERT_FILE \
DB"${backup_instance_number}"_MYSQL_TLS_KEY_FILE \
DB"${backup_instance_number}"_S3_BUCKET \
DB"${backup_instance_number}"_S3_KEY_ID \
DB"${backup_instance_number}"_S3_KEY_SECRET \
DB"${backup_instance_number}"_S3_PATH \
DB"${backup_instance_number}"_S3_REGION \
DB"${backup_instance_number}"_S3_HOST \
DB"${backup_instance_number}"_S3_PROTOCOL \
DB"${backup_instance_number}"_S3_EXTRA_OPTS \
DB"${backup_instance_number}"_S3_CERT_CA_FILE \
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT \
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT_KEY \
DB"${backup_instance_number}"_BLOBXFER_REMOTE_PATH \
DB"${backup_instance_number}"_BLOBXFER_MODE \
BLOBXFER_STORAGE_ACCOUNT \
BLOBXFER_STORAGE_ACCOUNT_KEY \
DB_HOST \
DB_NAME \
DB_PORT \
DB_USER \
DB_PASS \
MONGO_CUSTOM_URI \
DB_AUTH \
S3_BUCKET \
S3_KEY_ID \
S3_KEY_SECRET \
S3_PATH \
S3_REGION \
S3_HOST \
S3_PROTOCOL \
S3_EXTRA_OPTS
## Legacy after DEFAULT
set -o posix ; set | grep -E "^DB${backup_instance_number}_|^DEFAULT_|^DB_|^ARCHIVE|^BACKUP_|^BLOBXFER_|^CHECKSUM|^COMPRESSION|^CREATE_|^ENABLE_|^EXTRA_|^GZ_|^INFLUX_|^MYSQL_|^MONGO_|^PARALLEL|^PRE_|^POST_|^S3|^SKIP|^SPLIT" > "${backup_instance_vars}"
## Legacy checks from removed variables
if [ -n "${ENABLE_CHECKSUM}" ]; then
print_warn "Deprecated and unsupported variable 'ENABLE_CHECKSUM' detected - Please upgrade your variables as they will be removed in version 4.3.0"
if var_false "${ENABLE_CHECKSUM}" ; then
sed -i "/DEFAULT_CHECKSUM=/d" "${backup_instance_vars}"
echo "DEFAULT_CHECKSUM=NONE" >> "${backup_instance_vars}"
fi
fi
if [ -n "${DB_DUMP_BEGIN}" ]; then
print_warn "Deprecated and unsupported variable 'DB_DUMP_BEGIN' detected - Please upgrade your variables as they will be removed in version 4.3.0"
sed -i "/DEFAULT_BACKUP_BEGIN=/d" "${backup_instance_vars}"
echo "DEFAULT_BACKUP_BEGIN=${DB_DUMP_BEGIN}" >> "${backup_instance_vars}"
fi
if [ -n "${DB_DUMP_FREQ}" ]; then
print_warn "Deprecated and unsupported variable 'DB_DUMP_FREQ' detected - Please upgrade your variables as they will be removed in version 4.3.0"
sed -i "/DEFAULT_BACKUP_INTERVAL=/d" "${backup_instance_vars}"
echo "DEFAULT_BACKUP_INTERVAL=${DB_DUMP_FREQ}" >> "${backup_instance_vars}"
fi
if [ -n "${DB_DUMP_TARGET}" ]; then
print_warn "Deprecated and unsupported variable 'DB_DUMP_TARGET' detected - Please upgrade your variables as they will be removed in version 4.3.0"
sed -i "/DEFAULT_FILESYSTEM_PATH=/d" "${backup_instance_vars}"
echo "DEFAULT_FILESYSTEM_PATH=${DB_DUMP_TARGET}" >> "${backup_instance_vars}"
fi
if [ -n "${DB_DUMP_TARGET_ARCHIVE}" ]; then
print_warn "Deprecated and unsupported variable 'DB_DUMP_TARGET_ACRHIVE' detected - Please upgrade your variables as they will be removed in version 4.3.0"
sed -i "/DEFAULT_FILESYSTEM_ARCHIVE_PATH=/d" "${backup_instance_vars}"
echo "DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DB_DUMP_TARGET_ARCHIVE}" >> "${backup_instance_vars}"
fi
if [ -n "${EXTRA_DUMP_OPTS}" ]; then
print_warn "Deprecated and unsupported variable 'EXTRA_DUMP_OPTS' detected - Please upgrade your variables as they will be removed in version 4.3.0"
sed -i "/DEFAULT_EXTRA_BACKUP_OPTS=/d" "${backup_instance_vars}"
echo "DEFAULT_EXTRA_BACKUP_OPTS=${EXTRA_DUMP_OPTS}" >> "${backup_instance_vars}"
fi
##
if grep -qo ".*_NAME='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _NAME variable with quotes"
sed -i "s|_NAME='\(.*\)'|_NAME=\1|g" "${backup_instance_vars}"
fi
if grep -qo ".*_PASS='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _PASS variable with quotes"
sed -i "s|_PASS='\(.*\)'|_PASS=\1|g" "${backup_instance_vars}"
fi
if grep -qo "MONGO_CUSTOM_URI='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _MONGO_CUSTOM_URI variable with quotes"
sed -i "s|MONGO_CUSTOM_URI='\(.*\)'|MONGO_CUSTOM_URI=\1|g" "${backup_instance_vars}"
fi
if grep -qo ".*_OPTS='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _OPTS variable with quotes"
sed -i "s|_OPTS='\(.*\)'|_OPTS=\1|g" "${backup_instance_vars}"
fi
transform_backup_instance_variable() {
if grep -q "^DB${1}_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
export "$3"="$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
elif grep -q "^DB_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
# Allow old legacy work, perhaps remove old DB_ functionality in future? This should allow for seamless upgrades
#print_warn "Legacy Variable 'DB_${2}'' detected - Please upgrade your variables as they will be removed in version 4.3.0"
export "$3"="$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
elif grep -q "^${2}=" "${backup_instance_vars}" && [ "$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
print_warn "Legacy unsupported variable '${2}' detected - Please upgrade your variables as they will be removed in version 4.3.0"
export "$3"="$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
elif grep -q "^DEFAULT_${2}=" "${backup_instance_vars}" && [ "$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
export "$3"="$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
fi
}
transform_backup_instance_variable "${backup_instance_number}" ARCHIVE_TIME backup_job_archive_time
transform_backup_instance_variable "${backup_instance_number}" AUTH backup_job_db_auth
transform_backup_instance_variable "${backup_instance_number}" BACKUP_BEGIN backup_job_backup_begin
transform_backup_instance_variable "${backup_instance_number}" BACKUP_INTERVAL backup_job_backup_interval
transform_backup_instance_variable "${backup_instance_number}" BACKUP_GLOBALS backup_job_backup_pgsql_globals
transform_backup_instance_variable "${backup_instance_number}" BACKUP_LOCATION backup_job_backup_location
transform_backup_instance_variable "${backup_instance_number}" BLACKOUT_BEGIN backup_job_snapshot_blackout_start
transform_backup_instance_variable "${backup_instance_number}" BLACKOUT_END backup_job_snapshot_blackout_finish
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_REMOTE_PATH backup_job_blobxfer_remote_path
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT backup_job_blobxfer_storage_account
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT_KEY backup_job_blobxfer_storage_account_key
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_MODE backup_job_blobxfer_mode
transform_backup_instance_variable "${backup_instance_number}" CHECKSUM backup_job_checksum
transform_backup_instance_variable "${backup_instance_number}" CLEANUP_TIME backup_job_cleanup_time
transform_backup_instance_variable "${backup_instance_number}" COMPRESSION backup_job_compression
transform_backup_instance_variable "${backup_instance_number}" COMPRESSION_LEVEL backup_job_compression_level
transform_backup_instance_variable "${backup_instance_number}" CREATE_LATEST_SYMLINK backup_job_create_latest_symlink
transform_backup_instance_variable "${backup_instance_number}" ENABLE_PARALLEL_COMPRESSION backup_job_parallel_compression
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT backup_job_encrypt
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PASSPHRASE backup_job_encrypt_passphrase
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PRIVATE_KEY backup_job_encrypt_private_key
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PUBLIC_KEY backup_job_encrypt_public_key
transform_backup_instance_variable "${backup_instance_number}" EXTRA_BACKUP_OPTS backup_job_extra_backup_opts
transform_backup_instance_variable "${backup_instance_number}" EXTRA_ENUMERATION_OPTS backup_job_extra_enumeration_opts
transform_backup_instance_variable "${backup_instance_number}" EXTRA_OPTS backup_job_extra_opts
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_ARCHIVE_PATH backup_job_filesystem_archive_path
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PATH backup_job_filesystem_path
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PATH_PERMISSION backup_job_filesystem_path_permission
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PERMISSION backup_job_filesystem_permission
transform_backup_instance_variable "${backup_instance_number}" GZ_RSYNCABLE backup_job_gz_rsyncable
transform_backup_instance_variable "${backup_instance_number}" HOST backup_job_db_host
transform_backup_instance_variable "${backup_instance_number}" INFLUX_VERSION backup_job_influx_version
transform_backup_instance_variable "${backup_instance_number}" LOG_LEVEL backup_job_log_level
transform_backup_instance_variable "${backup_instance_number}" MONGO_CUSTOM_URI backup_job_mongo_custom_uri
transform_backup_instance_variable "${backup_instance_number}" MYSQL_CLIENT backup_job_mysql_client
transform_backup_instance_variable "${backup_instance_number}" MYSQL_ENABLE_TLS backup_job_mysql_enable_tls
transform_backup_instance_variable "${backup_instance_number}" MYSQL_EVENTS backup_job_mysql_events
transform_backup_instance_variable "${backup_instance_number}" MYSQL_MAX_ALLOWED_PACKET backup_job_mysql_max_allowed_packet
transform_backup_instance_variable "${backup_instance_number}" MYSQL_SINGLE_TRANSACTION backup_job_mysql_single_transaction
transform_backup_instance_variable "${backup_instance_number}" MYSQL_STORED_PROCEDURES backup_job_mysql_stored_procedures
transform_backup_instance_variable "${backup_instance_number}" MYSQL_TLS_CA_FILE backup_job_mysql_tls_ca_file
transform_backup_instance_variable "${backup_instance_number}" MYSQL_TLS_CERT_FILE backup_job_mysql_tls_cert_file
transform_backup_instance_variable "${backup_instance_number}" MYSQL_TLS_KEY_FILE backup_job_mysql_tls_key_file
transform_backup_instance_variable "${backup_instance_number}" MYSQL_TLS_VERIFY backup_job_mysql_tls_verify
transform_backup_instance_variable "${backup_instance_number}" MYSQL_TLS_VERSION backup_job_mysql_tls_version
transform_backup_instance_variable "${backup_instance_number}" MSSQL_MODE backup_job_mssql_mode
transform_backup_instance_variable "${backup_instance_number}" NAME backup_job_db_name
transform_backup_instance_variable "${backup_instance_number}" NAME_EXCLUDE backup_job_db_name_exclude
transform_backup_instance_variable "${backup_instance_number}" PARALLEL_COMPRESSION_THREADS backup_job_parallel_compression_threads
transform_backup_instance_variable "${backup_instance_number}" PASS backup_job_db_pass
transform_backup_instance_variable "${backup_instance_number}" PORT backup_job_db_port
transform_backup_instance_variable "${backup_instance_number}" POST_SCRIPT_X_VERIFY backup_job_post_script_x_verify
transform_backup_instance_variable "${backup_instance_number}" POST_SCRIPT backup_job_post_script
transform_backup_instance_variable "${backup_instance_number}" PRE_SCRIPT backup_job_pre_script
transform_backup_instance_variable "${backup_instance_number}" PRE_SCRIPT_X_VERIFY backup_job_pre_script_x_verify
transform_backup_instance_variable "${backup_instance_number}" RESOURCE_OPTIMIZED backup_job_resource_optimized
transform_backup_instance_variable "${backup_instance_number}" S3_BUCKET backup_job_s3_bucket
transform_backup_instance_variable "${backup_instance_number}" S3_CERT_CA_FILE backup_job_s3_cert_ca_file
transform_backup_instance_variable "${backup_instance_number}" S3_CERT_SKIP_VERIFY backup_job_s3_cert_skip_verify
transform_backup_instance_variable "${backup_instance_number}" S3_EXTRA_OPTS backup_job_s3_extra_opts
transform_backup_instance_variable "${backup_instance_number}" S3_HOST backup_job_s3_host
transform_backup_instance_variable "${backup_instance_number}" S3_KEY_ID backup_job_s3_key_id
transform_backup_instance_variable "${backup_instance_number}" S3_KEY_SECRET backup_job_s3_key_secret
transform_backup_instance_variable "${backup_instance_number}" S3_PATH backup_job_s3_path
transform_backup_instance_variable "${backup_instance_number}" S3_PROTOCOL backup_job_s3_protocol
transform_backup_instance_variable "${backup_instance_number}" S3_REGION backup_job_s3_region
transform_backup_instance_variable "${backup_instance_number}" SCRIPT_LOCATION_POST backup_job_script_location_post
transform_backup_instance_variable "${backup_instance_number}" SCRIPT_LOCATION_PRE backup_job_script_location_pre
transform_backup_instance_variable "${backup_instance_number}" SIZE_VALUE backup_job_size_value
transform_backup_instance_variable "${backup_instance_number}" SKIP_AVAILABILITY_CHECK backup_job_skip_availability_check
transform_backup_instance_variable "${backup_instance_number}" SPLIT_DB backup_job_split_db
transform_backup_instance_variable "${backup_instance_number}" TYPE backup_job_db_type
transform_backup_instance_variable "${backup_instance_number}" USER backup_job_db_user
backup_job_backup_begin=$(echo "${backup_job_backup_begin}" | sed -e "s|'||g" -e 's|"||g')
if var_true "${DEBUG_BACKUP_INSTANCE_VARIABLE}" ; then cat <<EOF
## BEGIN Variable Dump $(TZ=${TIMEZONE} date)
$(cat ${backup_instance_vars})
## END
EOF
fi
rm -rf "${backup_instance_vars}"
}
restore_init() {
restore_vars=$(mktemp)
set -o posix ; set | grep -oE "^restore_job_.*=" | grep -oE ".*=" | sed "/--/d" > "${restore_vars}"
while read -r restore_var ; do
unset "$(echo "${restore_var}" | cut -d = -f 1)"
done < "${restore_vars}"
if [ -n "${DB_NAME}" ] && [ -z "${DB01_NAME}" ] ; then export DB01_NAME="${DB_NAME}" ; unset DB_NAME ; fi
if [ -n "${DB_USER}" ] && [ -z "${DB01_USER}" ] ; then export DB01_USER="${DB_USER}" ; unset DB_USER ; fi
if [ -n "${DB_PASS}" ] && [ -z "${DB01_PASS}" ] ; then export DB01_PASS="${DB_PASS}" ; unset DB_PASS ; fi
if [ -n "${DB_TYPE}" ] && [ -z "${DB01_TYPE}" ] ; then export DB01_TYPE="${DB_TYPE}" ; unset DB_TYPE ; fi
if [ -n "${DB_PORT}" ] && [ -z "${DB01_PORT}" ] ; then export DB01_PORT="${DB_PORT}" ; unset DB_PORT ; fi
if [ -n "${DB_HOST}" ] && [ -z "${DB01_HOST}" ] ; then export DB01_HOST="${DB_HOST}" ; unset DB_HOST ; fi
if [ -n "${DB_AUTH}" ] && [ -z "${DB01_AUTH}" ] ; then export DB01_AUTH="${DB_AUTH}" ; unset DB_AUTH ; fi
if [ -n "${MONGO_CUSTOM_URI}" ] && [ -z "${DB01_MONGO_CUSTOM_URI}" ] ; then export DB01_MONGO_CUSTOM_URI="${DB_MONGO_CUSTOM_URI}" ; unset MONGO_CUSTOM_URI ; fi
if [ -n "${MYSQL_TLS_CA_FILE}" ] && [ -z "${DB01_MYSQL_TLS_CA_FILE}" ] ; then export DB01_MYSQL_TLS_CA_FILE="${MYSQL_TLS_CA_FILE}" ; unset MYSQL_TLS_CA_FILE ; fi
if [ -n "${MYSQL_TLS_CERT_FILE}" ] && [ -z "${DB01_MYSQL_TLS_CERT_FILE}" ] ; then export DB01_MYSQL_TLS_CERT_FILE="${MYSQL_TLS_CERT_FILE}" ; unset MYSQL_TLS_CERT_FILE ; fi
if [ -n "${MYSQL_TLS_KEY_FILE}" ] && [ -z "${DB01_MYSQL_TLS_KEY_FILE}" ] ; then export DB01_MYSQL_TLS_KEY_FILE="${MYSQL_TLS_KEY_FILE}" ; unset MYSQL_TLS_KEY_FILE ; fi
transform_file_var \
DEFAULT_AUTH \
DEFAULT_HOST \
DEFAULT_NAME \
DEFAULT_PASS \
DEFAULT_PORT \
DEFAULT_TYPE \
DEFAULT_USER \
DEFAULT_MONGO_CUSTOM_URI \
DEFAULT_MYSQL_TLS_CA_FILE \
DEFAULT_MYSQL_TLS_CERT_FILE \
DEFAULT_MYSQL_TLS_KEY_FILE
set -o posix ; set | grep -E "^DEFAULT_" > "${restore_vars}"
restore_instances=$(printenv | sort | grep -c '^DB.*_HOST')
for (( restore_instance_number = 01; restore_instance_number <= restore_instances; restore_instance_number++ )) ; do
restore_instance_number=$(printf "%02d" $restore_instance_number)
transform_file_var \
DB"${restore_instance_number}"_AUTH \
DB"${restore_instance_number}"_HOST \
DB"${restore_instance_number}"_NAME \
DB"${restore_instance_number}"_PASS \
DB"${restore_instance_number}"_PORT \
DB"${restore_instance_number}"_TYPE \
DB"${restore_instance_number}"_USER \
DB"${restore_instance_number}"_MONGO_CUSTOM_URI \
DB"${restore_instance_number}"_MYSQL_TLS_CA_FILE \
DB"${restore_instance_number}"_MYSQL_TLS_CERT_FILE \
DB"${restore_instance_number}"_MYSQL_TLS_KEY_FILE
set -o posix ; set | grep -E "^DB${restore_instance_number}_" >> "${restore_vars}"
done
if [ -n "${DB_DUMP_TARGET}" ]; then
print_warn "Deprecated and unsupported variable 'DB_DUMP_TARGET' detected - Please upgrade your variables as they will be removed in version 4.3.0"
sed -i "/DEFAULT_FILESYSTEM_PATH=/d" "${restore_vars}"
echo "DEFAULT_FILESYSTEM_PATH=${DB_DUMP_TARGET}" >> "${restore_vars}"
fi
if [ -n "${DB_DUMP_TARGET_ARCHIVE}" ]; then
print_warn "Deprecated and unsupported variable 'DB_DUMP_TARGET_ACRHIVE' detected - Please upgrade your variables as they will be removed in version 4.3.0"
sed -i "/DEFAULT_FILESYSTEM_ARCHIVE_PATH=/d" "${restore_vars}"
echo "DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DB_DUMP_TARGET_ARCHIVE}" >> "${restore_vars}"
fi
echo "RESTORE_VARS is ${restore_vars}"
}
parse_variables() {
local v_instance=${1}
check_var() {
## Check is Variable is Defined
## Usage: check_var transformed_varname real_varname "Description"
output_off
print_debug "[parse_variables] Looking for existence of $2 environment variable"
if [ ! -v "$1" ]; then
print_error "No '$3' Entered! - Set '\$$2' environment variable - Halting Backup Number ${v_instance}"
s6-svc -d /var/run/s6/legacy-services/dbbackup-"${v_instance}"
exit 1
fi
output_on
}
check_var backup_job_db_type DB"${v_instance}"_TYPE "appropriate database type"
case "${backup_job_db_type,,}" in
couch* )
dbtype=couch
backup_job_backup_job_db_port=${backup_job_db_port:-5984}
check_var backup_job_db_user DB"${v_instance}"_USER "database username"
check_var backup_job_db_pass DB"${v_instance}"_PASS "database password"
;;
influx* )
dbtype=influx
case "${backup_job_influx_version}" in
1) backup_job_db_port=${backup_job_db_port:-8088} ;;
2) backup_job_db_port=${backup_job_db_port:-8086} ;;
esac
check_var backup_job_db_user DB"${v_instance}"_USER "database username"
check_var backup_job_db_pass DB"${v_instance}"_PASS "database password"
check_var backup_job_influx_version DB"${v_instance}"_INFLUX_VERSION "InfluxDB version you are backing up from"
;;
mongo* )
dbtype=mongo
if [ -n "${backup_job_mongo_custom_uri}" ] ; then
mongo_uri_proto=$(echo "${backup_job_mongo_custom_uri}" | grep :// | sed -e's,^\(.*://\).*,\1,g')
mongo_uri_scratch="${backup_job_mongo_custom_uri/${mongo_uri_proto}/}"
mongo_uri_username_password="$(echo "${mongo_uri_scratch}" | grep @ | rev | cut -d@ -f2- | rev)"
if [ -n "${mongo_uri_username_password}" ]; then mongo_uri_scratch="$(echo "${mongo_uri_scratch}" | rev | cut -d@ -f1 | rev)" ; fi
mongo_uri_port="$(echo "${mongo_uri_scratch}" | grep : | rev | cut -d: -f2- | rev)"
if [ -n "${mongo_uri_port}" ]; then mongo_uri_port="$(echo "${mongo_uri_scratch}" | rev | cut -d: -f1 | cut -d/ -f2 | rev)" ; fi
mongo_uri_hostname="$(echo "${mongo_uri_scratch}" | cut -d/ -f1 | cut -d: -f1 )"
mongo_uri_database="$(echo "${mongo_uri_scratch}" | cut -d/ -f2 | cut -d? -f1 )"
mongo_uri_options="$(echo "${mongo_uri_scratch}" | cut -d/ -f2 | cut -d? -f2 )"
backup_job_db_name=${backup_job_db_name:-"${mongo_uri_database,,}"}
backup_job_db_host=${backup_job_db_host:-"${mongo_uri_hostname,,}"}
else
backup_job_db_port=${backup_job_db_port:-27017}
[[ ( -n "${backup_job_db_user}" ) ]] && MONGO_USER_STR=" --username ${backup_job_db_user}"
[[ ( -n "${backup_job_db_pass}" ) ]] && MONGO_PASS_STR=" --password ${backup_job_db_pass}"
[[ ( -n "${backup_job_db_name}" ) ]] && MONGO_DB_STR=" --db ${backup_job_db_name}"
[[ ( -n "${backup_job_db_auth}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${backup_job_db_auth}"
fi
;;
"mysql" | "mariadb" )
dbtype=mysql
backup_job_db_port=${backup_job_db_port:-3306}
check_var backup_job_db_name DB"${v_instance}"_NAME "database name. Seperate multiple with commas"
case "${backup_job_mysql_client,,}" in
mariadb )
_mysql_prefix=/usr/bin/
;;
mysql )
_mysql_prefix=/opt/mysql/bin/
;;
* )
print_error "I don't understand '${backup_job_mysql_client,,}' as a client. Exiting.."
exit 99
;;
esac
print_debug "Using '${backup_job_mysql_client,,}' as client"
if [ -n "${backup_job_db_pass}" ] ; then export MYSQL_PWD=${backup_job_db_pass} ; fi
if var_true "${backup_job_mysql_enable_tls}" ; then
if [ -n "${backup_job_mysql_tls_ca_file}" ] ; then
mysql_tls_args="--ssl_ca=${backup_job_mysql_tls_ca_file}"
fi
if [ -n "${backup_job_mysql_tls_cert_file}" ] ; then
mysql_tls_args="${mysql_tls_args} --ssl_cert=${backup_job_mysql_tls_cert_file}"
fi
if [ -n "${backup_job_mysql_tls_key_file}" ] ; then
mysql_tls_args="${mysql_tls_args} --ssl_key=${backup_job_mysql_tls_key_file}"
fi
if var_true "${backup_job_mysql_tls_verify}" ; then
mysql_tls_args="${mysql_tls_args} --sslverify-server-cert"
fi
if [ -n "${backup_job_mysql_tls_version}" ] ; then
mysql_tls_args="${mysql_tls_args} --tls_version=${backup_job_mysql_tls_version}"
fi
fi
;;
"mssql" | "microsoftsql" )
apkArch="$(apk --print-arch)"; \
case "$apkArch" in
x86_64) mssql=true ;;
*) write_log error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;;
esac
dbtype=mssql
backup_job_db_port=${backup_job_db_port:-1433}
;;
postgres* | "pgsql" )
dbtype=pgsql
backup_job_db_port=${backup_job_db_port:-5432}
[[ ( -n "${backup_job_db_pass}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${backup_job_db_pass}"
check_var backup_job_db_name DB"${v_instance}"_NAME "database name. Seperate multiple with commas"
;;
"redis" )
dbtype=redis
backup_job_db_port=${backup_job_db_port:-6379}
[[ ( -n "${backup_job_db_pass}" ) ]] && REDIS_PASS_STR=" -a ${backup_job_db_pass}"
;;
sqlite* )
dbtype=sqlite3
;;
* )
write_log error "I don't recognize 'DB${v_instance}_TYPE=${backup_job_db_type}' - Exitting.."
exit 99
;;
esac
if var_true "${backup_job_resource_optimized}" ; then play_fair="nice -19 ionice -c2" ; fi
}
upgrade_lonely_variables() {
upgrade_lonely_variables_tmp=$(mktemp)
set -o posix; set | grep "^$1" | grep -v "^$1[0-9]._" > "${upgrade_lonely_variables_tmp}"
while read -r exist_var ; do
if [ ! -v "${1}"01_"$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2)" ] ; then
export "${1}"01_"$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2-9)"="$(echo "${exist_var}" | cut -d = -f2)"
else
print_error "Variable ${1}01_$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2) already exists, fix your variables and start again.."
exit 1
fi
done < "${upgrade_lonely_variables_tmp}"
rm -rf "${upgrade_lonely_variables_tmp}"
}
case "${1}" in
backup_init ) backup_init "$2" ;;
parse_variables) parse_variables "$2" ;;
restore_init) restore_init ;;
upgrade ) upgrade_lonely_variables "$2" ;;
esac
if var_true "${DEBUG_BOOTSTRAP_VARIABLES}" ; then debug off; fi
}
backup_couch() {
prepare_dbbackup
backup_job_filename=couch_${backup_job_db_name}_${backup_job_db_host#*//}_${now}.txt
backup_job_filename_base=couch_${backup_job_db_name}_${backup_job_db_host#*//}
compressionzyclonite
if var_true "${DEBUG_BACKUP_COUCH}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
timer backup finish
file_encryption
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup ${backup_job_db_name}
cleanup_old_data
}
backup_influx() {
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if [ "${backup_job_db_name,,}" = "all" ] ; then
write_log debug "[backup_influx] Preparing to back up everything"
db_names=justbackupeverything
else
db_names=$(echo "${backup_job_db_name}" | tr ',' '\n')
fi
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
case "${backup_job_influx_version,,}" in
1 )
print_debug "[backup_influx] Influx DB Version 1 selected"
for db in ${db_names}; do
prepare_dbbackup
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now}
backup_job_filename_base=influx_${db}_${backup_job_db_host#*//}
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
pre_dbbackup "${db}"
write_log notice "Dumping Influx database: '${db}'"
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
run_as_user influxd backup ${influx_compression} ${bucket} -portable -host ${backup_job_db_host}:${backup_job_db_port} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} "${temporary_directory}"/"${backup_job_filename}"
exit_code=$?
check_exit_code backup "${backup_job_filename}"
compression
create_archive
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
timer backup finish
file_encryption
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}"
cleanup_old_data
done
;;
2 )
print_debug "[backup_influx] Influx DB Version 2 selected"
for db in ${db_names}; do
prepare_dbbackup
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
pre_dbbackup "${db}"
write_log notice "Dumping Influx2 database: '${db}'"
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --compression none "${temporary_directory}"/"${backup_job_filename}"
exit_code=$?
check_exit_code backup "${backup_job_filename}"
compression
create_archive
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
timer backup finish
file_encryption
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}"
cleanup_old_data
done
;;
esac
}
backup_mongo() {
prepare_dbbackup
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi
if [ "${backup_job_compression,,}" = "none" ] ; then
backup_job_filename=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.archive
backup_job_filename_base=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}
else
backup_job_filename=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.archive.gz
backup_job_filename_base=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}
mongo_compression="--gzip"
compression_string="and compressing with gzip"
fi
if [ -n "${backup_job_mongo_custom_uri}" ] ; then
mongo_backup_parameter="--uri=${backup_job_mongo_custom_uri} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}"
else
mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}"
fi
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
pre_dbbackup "${backup_job_db_name}"
write_log notice "Dumping MongoDB database: '${backup_job_db_name}' ${compression_string}"
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi
silent run_as_user ${play_fair} mongodump --archive=${temporary_directory}/${backup_job_filename} ${mongo_compression} ${mongo_backup_parameter}
exit_code=$?
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
timer backup finish
file_encryption
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${backup_job_db_name}"
cleanup_old_data
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
}
backup_mssql() {
case "${backup_job_mssql_mode,,}" in
db|database )
prepare_dbbackup
backup_job_filename=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.bak
backup_job_filename_base=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}
pre_dbbackup "${backup_job_db_name}"
write_log notice "Dumping MSSQL database: '${backup_job_db_name}'"
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP DATABASE [${backup_job_db_name}] TO DISK = N'${temporary_directory}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$?
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
backup_job_filename_original=${backup_job_filename}
compression
pre_dbbackup all
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
check_exit_code backup "${backup_job_filename}"
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
timer backup finish
file_encryption
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${backup_job_db_name}"
cleanup_old_data
;;
trn|transaction )
prepare_dbbackup
backup_job_filename=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.trn
backup_job_filename_base=mssql_${backup_job_db_name,,}_trn_${backup_job_db_host,,}
pre_dbbackup "${backup_job_db_name}"
write_log notice "Dumping MSSQL database: '${backup_job_db_name}'"
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP LOG [${backup_job_db_name}] TO DISK = N'${temporary_directory}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-log', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$?
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
backup_job_filename_original=${backup_job_filename}
compression
pre_dbbackup all
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
file_encryption
timer backup finish
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${backup_job_db_name}"
cleanup_old_data
;;
esac
}
backup_mysql() {
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
if var_true "${backup_job_mysql_events}" ; then
events="--events"
fi
if var_true "${backup_job_mysql_single_transaction}" ; then
single_transaction="--single-transaction"
fi
if var_true "${backup_job_mysql_stored_procedures}" ; then
stored_procedures="--routines"
fi
if [ "${backup_job_db_name,,}" = "all" ] ; then
write_log debug "Preparing to back up everything except for information_schema and _* prefixes"
db_names=$(run_as_user ${_mysql_prefix}mysql -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_enumeration_opts} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
if [ -n "${backup_job_db_name_exclude}" ] ; then
db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
write_log debug "Excluding '${db_exclude}' from ALL DB_NAME backups"
db_names=$(echo "$db_names" | sed "/${db_exclude}/d" )
done
fi
else
db_names=$(echo "${backup_job_db_name}" | tr ',' '\n')
fi
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
write_log debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
if var_true "${backup_job_split_db}" ; then
for db in ${db_names} ; do
prepare_dbbackup
backup_job_filename=${backup_job_mysql_client,,}_${db}_${backup_job_db_host,,}_${now}.sql
backup_job_filename_base=${backup_job_mysql_client,,}_${db}_${backup_job_db_host,,}
compression
pre_dbbackup "${db}"
write_log notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
run_as_user ${play_fair} ${_mysql_prefix}mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} $db | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
timer backup finish
file_encryption
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}"
cleanup_old_data
done
else
write_log debug "Not splitting database dumps into their own files"
prepare_dbbackup
backup_job_filename=${backup_job_mysql_client,,}_all_${backup_job_db_host,,}_${now}.sql
backup_job_filename_base=${backup_job_mysql_client,,}_all_${backup_job_db_host,,}
compression
pre_dbbackup all
write_log notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
run_as_user ${play_fair} ${_mysql_prefix}mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
timer backup finish
file_encryption
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup all
cleanup_old_data
fi
}
backup_pgsql() {
backup_pgsql_globals() {
prepare_dbbackup
backup_job_filename=pgsql_globals_${backup_job_db_host,,}_${now}.sql
backup_job_global_base=pgsql_globals_${backup_job_db_host,,}
compression
pre_dbbackup "globals"
print_notice "Dumping PostgresSQL globals: with 'pg_dumpall -g' ${compression_string}"
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
run_as_user ${play_fair} pg_dumpall -h "${backup_job_db_host}" -U "${backup_job_db_user}" -p "${backup_job_db_port}" -g ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code "${backup_job_filename}"
timer backup finish
file_encryption
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "globals"
cleanup_old_data
}
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
export PGPASSWORD=${backup_job_db_pass}
if [ -n "${backup_job_db_auth}" ] ; then
authdb=${backup_job_db_auth}
else
authdb=${backup_job_db_user}
fi
if [ "${backup_job_db_name,,}" = "all" ] ; then
write_log debug "Preparing to back up all databases"
db_names=$(psql -h ${backup_job_db_host} -U ${backup_job_db_user} -p ${backup_job_db_port} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
if [ -n "${backup_job_db_name_exclude}" ] ; then
db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
write_log debug "Excluding '${db_exclude}' from ALL DB_NAME backups"
db_names=$(echo "$db_names" | sed "/${db_exclude}/d" )
done
_postgres_backup_globals=true
fi
else
db_names=$(echo "${backup_job_db_name}" | tr ',' '\n')
_postgres_backup_globals=false
fi
if var_false "${_postgres_backup_globals}" && var_true "${backup_job_backup_pgsql_globals}" ; then _postgres_backup_globals=true; fi
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
write_log debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
if var_true "${backup_job_split_db}" ; then
for db in ${db_names} ; do
prepare_dbbackup
backup_job_filename=pgsql_${db}_${backup_job_db_host,,}_${now}.sql
backup_job_filename_base=pgsql_${db}_${backup_job_db_host,,}
compression
pre_dbbackup "${db}"
write_log notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
run_as_user ${play_fair} pg_dump -h "${backup_job_db_host}" -p "${backup_job_db_port}" -U "${backup_job_db_user}" $db ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
timer backup finish
file_encryption
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}"
cleanup_old_data
done
if var_true "${_postgres_backup_globals}" ; then backup_pgsql_globals; fi
else
write_log debug "Not splitting database dumps into their own files"
prepare_dbbackup
backup_job_filename=pgsql_all_${backup_job_db_host,,}_${now}.sql
backup_job_filename_base=pgsql_${db}_${backup_job_db_host,,}
compression
pre_dbbackup all
write_log notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
tmp_db_names=$(psql -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
for r_db_name in $(echo $db_names | xargs); do
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
done
sleep 5
for x_db_name in ${tmp_db_names} ; do
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
done
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
timer backup finish
file_encryption
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup all
cleanup_old_data
if var_true "${_postgres_backup_globals}" ; then backup_pgsql_globals; fi
fi
}
backup_redis() {
prepare_dbbackup
write_log notice "Dumping Redis - Flushing Redis Cache First"
backup_job_filename=redis_all_${backup_job_db_host,,}_${now}.rdb
backup_job_filename_base=redis_${backup_job_db_host,,}
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
echo bgsave | silent run_as_user ${play_fair} redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} --rdb ${temporary_directory}/${backup_job_filename} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}
sleep 10
try=5
while [ $try -gt 0 ] ; do
saved=$(echo 'info Persistence' | redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
ok=$(echo 'info Persistence' | redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
write_log notice "Redis Backup Complete"
exit_code=0
break
fi
try=$((try - 1))
write_log warn "Redis Busy - Waiting and retrying in 5 seconds"
sleep 5
done
backup_job_filename_original=${backup_job_filename}
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug off; fi
compression
pre_dbbackup all
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug off; fi
timer backup finish
check_exit_code backup "${backup_job_filename}"
file_encryption
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup all
cleanup_old_data
}
backup_sqlite3() {
prepare_dbbackup
db=$(basename "${backup_job_db_host}")
db="${db%.*}"
backup_job_filename=sqlite3_${db}_${now}.sqlite3
backup_job_filename_base=sqlite3_${db}
pre_dbbackup "${db}"
write_log notice "Dumping sqlite3 database: '${backup_job_db_host}' ${compression_string}"
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug on; fi
silent ${play_fair} sqlite3 "${backup_job_db_host}" ".backup '${temporary_directory}/backup_${now}.sqlite3'"
exit_code=$?
check_exit_code backup "${backup_job_filename}"
if [ ! -f "${temporary_directory}"/backup_${now}.sqlite3 ] ; then
print_error "SQLite3 backup failed! Exitting"
return 1
fi
compression
run_as_user ${play_fair} cat "${temporary_directory}"/backup_${now}.sqlite3 | ${dir_compress_cmd} | run_as_user tee "${temporary_directory}/${backup_job_filename}" > /dev/null
rm -rf "${temporary_directory}"/backup_${now}.sqlite3
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug off; fi
timer backup finish
file_encryption
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}"
cleanup_old_data
}
check_availability() {
if var_true "${DEBUG_CHECK_AVAILABILITY}" ; then debug on; fi
### Set the Database Type
if var_false "${backup_job_skip_availability_check}" ; then
case "${dbtype}" in
"couch" )
counter=0
code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(run_as_user curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${backup_job_db_host}:${backup_job_db_port})
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( counter+=5 ))
write_log warn "CouchDB Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"influx" )
counter=0
case "${backup_job_influx_version,,}" in
1 )
while ! (run_as_user nc -z ${backup_job_db_host#*//} ${backup_job_db_port}) ; do
sleep 5
(( counter+=5 ))
write_log warn "InfluxDB Host '${backup_job_db_host#*//}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
2 )
code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(run_as_user curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${backup_job_db_host}:${backup_job_db_port}/health)
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( counter+=5 ))
write_log warn "InfluxDB Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
esac
;;
"mongo" )
if [ -n "${backup_job_mongo_custom_uri}" ] ; then
write_log debug "Skipping Connectivity Check"
else
counter=0
while ! (run_as_user nc -z ${backup_job_db_host} ${backup_job_db_port}) ; do
sleep 5
(( counter+=5 ))
write_log warn "Mongo Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)"
done
fi
;;
"mysql" )
counter=0
export MYSQL_PWD=${backup_job_db_pass}
while ! (run_as_user mysqladmin -u"${backup_job_db_user}" -P"${backup_job_db_port}" -h"${backup_job_db_host}" ${mysql_tls_args} status > /dev/null 2>&1) ; do
sleep 5
(( counter+=5 ))
write_log warn "MySQL/MariaDB Server '${backup_job_db_host}' is not accessible, retrying.. (${counter} seconds so far)"
done
;;
"mssql" )
counter=0
while ! (run_as_user nc -z ${backup_job_db_host} ${backup_job_db_port}) ; do
sleep 5
(( counter+=5 ))
write_log warn "MSSQL Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"pgsql" )
counter=0
until run_as_user pg_isready --host=${backup_job_db_host} --port=${backup_job_db_port} -q
do
sleep 5
(( counter+=5 ))
write_log warn "Postgres Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"redis" )
counter=0
while ! (run_as_user nc -z "${backup_job_db_host}" "${backup_job_db_port}") ; do
sleep 5
(( counter+=5 ))
write_log warn "Redis Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"sqlite3" )
if [[ ! -e "${backup_job_db_host}" ]]; then
write_log error "File '${backup_job_db_host}' does not exist."
exit_code=2
exit $exit_code
elif [[ ! -f "${backup_job_db_host}" ]]; then
write_log error "File '${backup_job_db_host}' is not a file."
exit_code=2
exit $exit_code
elif [[ ! -r "${backup_job_db_host}" ]]; then
write_log error "File '${backup_job_db_host}' is not readable."
exit_code=2
exit $exit_code
fi
;;
esac
fi
if var_true "${DEBUG_CHECK_AVAILABILITY}" ; then debug off; fi
}
check_exit_code() {
if var_true "${DEBUG_CHECK_EXIT_CODE}" ; then debug on; fi
case "${1}" in
backup )
write_log debug "DB Backup exit Code is ${exit_code}"
case "${exit_code}" in
0 )
write_log info "DB Backup of '${2}' completed successfully"
;;
* )
write_log error "DB Backup of '${2}' reported errors"
notify \
"$(date -d @"${backup_job_start_time}" +'%Y%m%d_%H%M%S')" \
"${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${backup_job_filename_base}.log" \
"${exit_code}" \
"[FATAL] Host: ${backup_job_db_host} Name: ${backup_job_db_name} - Backup failed completely" \
"DB Backup is failing to backup the '${backup_job_db_host}-${backup_job_db_name}' job."
master_exit_code=1
;;
esac
;;
move )
write_log debug "Move exit Code is ${exit_code}"
case "${move_exit_code}" in
0 )
write_log debug "Moving of backup '${2}' completed successfully"
;;
* )
write_log error "Moving of backup '${2}' reported errors"
notify \
"$(date -d @"${backup_job_start_time}" +'%Y%m%d_%H%M%S')" \
"${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${backup_job_filename_base}.log" \
"${move_exit_code}" \
"[FATAL] Host: ${backup_job_db_host} Name: ${backup_job_db_name} - Backup failed to move to destination" \
"DB Backup is failing to backup the '${backup_job_db_host}-${backup_job_db_name}' job."
master_exit_code=1
;;
esac
;;
esac
if var_true "${DEBUG_CHECK_EXIT_CODE}" ; then debug off; fi
}
cleanup_old_data() {
if var_true "${DEBUG_CLEANUP_OLD_DATA}" ; then debug on; fi
if [ -n "${backup_job_cleanup_time}" ]; then
if [ "${master_exit_code}" != 1 ]; then
case "${backup_job_backup_location,,}" in
"blobxfer" )
write_log info "Cleaning up old backups on filesystem"
run_as_user mkdir -p "${backup_job_filesystem_path}"
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
if var_true "${_postgres_backup_globals}"; then
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_global_base}*" -exec rm -f {} \;
fi
if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
else
write_log info "Syncing changes via blobxfer"
silent run_as_user blobxfer upload --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete --delete-only
fi
;;
"file" | "filesystem" )
write_log info "Cleaning up old backups on filesystem"
run_as_user mkdir -p "${backup_job_filesystem_path}"
run_as_user find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
if var_true "${_postgres_backup_globals}"; then
run_as_user find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_global_base}*" -exec rm -f {} \;
fi
;;
"s3" | "minio" )
write_log info "Cleaning up old backups on S3 storage"
run_as_user aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${backup_job_s3_bucket}/${backup_job_s3_path}/ ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts} | grep " DIR " -v | grep " PRE " -v | while read -r s3_file; do
s3_createdate=$(echo $s3_file | awk {'print $1" "$2'})
s3_createdate=$(date -d "$s3_createdate" "+%s")
s3_olderthan=$(echo $(( $(date +%s)-${backup_job_cleanup_time}*60 )))
if [[ $s3_createdate -le $s3_olderthan ]] ; then
s3_filename=$(echo $s3_file | awk {'print $4'})
if [ "$s3_filename" != "" ] ; then
write_log debug "Deleting $s3_filename"
run_as_user aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${backup_job_s3_bucket}/${backup_job_s3_path}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
fi
fi
done
;;
esac
else
write_log error "Skipping Cleaning up old backups because there were errors in backing up"
fi
fi
if var_true "${DEBUG_CLEANUP_OLD_DATA}" ; then debug off; fi
}
compression() {
if var_true "${DEBUG_COMPRESSION}" ; then debug on; fi
if var_false "${backup_job_parallel_compression}" ; then
backup_job_parallel_compression_threads=1
fi
if var_true "${backup_job_gz_rsyncable}" ; then
gz_rsyncable=--rsyncable
fi
case "${backup_job_compression,,}" in
bz* )
print_debug "[compression] Selected BZIP"
compress_cmd="${play_fair} pbzip2 -q -${backup_job_compression_level} -p${backup_job_parallel_compression_threads} "
compression_type="bzip2"
dir_compress_cmd=${compress_cmd}
extension=".bz2"
backup_job_filename_dir=${backup_job_filename}
backup_job_filename=${backup_job_filename}.bz2
;;
gz* )
print_debug "[compression] Selected GZIP"
compress_cmd="${play_fair} pigz -q -${backup_job_compression_level} -p ${backup_job_parallel_compression_threads} ${gz_rsyncable}"
compression_type="gzip"
extension=".gz"
dir_compress_cmd=${compress_cmd}
backup_job_filename_dir=${backup_job_filename}
backup_job_filename=${backup_job_filename}.gz
;;
xz* )
print_debug "[compression] Selected XZIP"
compress_cmd="${play_fair} pixz -${backup_job_compression_level} -p ${backup_job_parallel_compression_threads} "
compression_type="xzip"
dir_compress_cmd=${compress_cmd}
extension=".xz"
backup_job_filename_dir=${backup_job_filename}
backup_job_filename=${backup_job_filename}.xz
;;
zst* )
print_debug "[compression] Selected ZSTD"
compress_cmd="${play_fair} zstd -q -q --rm -${backup_job_compression_level} -T${backup_job_parallel_compression_threads} ${gz_rsyncable}"
compression_type="zstd"
dir_compress_cmd=${compress_cmd}
extension=".zst"
backup_job_filename_dir=${backup_job_filename}
backup_job_filename=${backup_job_filename}.zst
;;
"none" | "false")
compress_cmd="cat "
compression_type="none"
dir_compress_cmd="cat "
backup_job_filename_dir=${backup_job_filename}
;;
esac
case "${CONTAINER_LOG_LEVEL,,}" in
"debug" )
if [ "${compression_type}" = "none" ] ; then
compression_string="with '${backup_job_parallel_compression_threads}' threads"
else
compression_string="and compressing with '${compression_type}:${backup_job_compression_level}' with '${backup_job_parallel_compression_threads}' threads"
fi
;;
* )
if [ "${compression_type}" != "none" ] ; then
compression_string="and compressing with '${compression_type}'"
fi
;;
esac
if var_true "${DEBUG_COMPRESSION}" ; then debug off; fi
}
create_archive() {
if var_true "${DEBUG_CREATE_ARCHIVE}" ; then debug on; fi
if [ "${exit_code}" = "0" ] ; then
write_log notice "Creating archive file of '${backup_job_filename}' with tar ${compression_string}"
run_as_user tar cf - "${temporary_directory}"/"${backup_job_filename_dir}" | ${dir_compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename_dir}".tar"${extension}" > /dev/null
backup_job_filename="${backup_job_filename_dir}".tar"${extension}"
rm -rf "${temporary_directory}"/"${backup_job_filename_dir}"
else
write_log error "Skipping creating archive file because backup did not complete successfully"
fi
if var_true "${DEBUG_CREATE_ARCHIVE}" ; then debug off; fi
}
create_schedulers() {
if var_true "${DEBUG_CREATE_SCHEDULERS}" ; then debug on; fi
backup() {
local backup_instances=$(set -o posix ; set | grep -Pc "^(DB[0-9]._HOST=|.*MONGO_CUSTOM_URI=)")
print_debug "[create_schedulers] Found '${backup_instances}' DB_HOST instances"
if [ -n "${DB_HOST}" ] && [ "${backup_instances}" ]; then
backup_instances=1;
print_debug "[create_schedulers] Detected using old DB_ variables"
fi
for (( instance = 01; instance <= backup_instances; )) ; do
instance=$(printf "%02d" $instance)
cp -R /assets/dbbackup/template-dbbackup /etc/services.available/dbbackup-"${instance}"
sed -i "s|{{BACKUP_NUMBER}}|${instance}|g" /etc/services.available/dbbackup-"${instance}"/run
if [ "${MODE,,}" = "manual" ] ; then service_stop dbbackup-"${instance}" ; fi
cat <<EOF >> /usr/bin/backup"${instance}"-now
#!/bin/bash
source /assets/functions/00-container
PROCESS_NAME=db-backup${instance}
print_info "Starting Manual Backup for db-backup${instance}"
#/var/run/s6/legacy-services/dbbackup-${instance}/run now
/etc/services.available/dbbackup-${instance}/run now
EOF
chmod +x /usr/bin/backup"${instance}"-now
if [ "${instance}" = "01" ] ; then
touch /usr/bin/backup-now
chmod +x /usr/bin/backup-now
cat <<EOF > /usr/bin/backup-now
#!/bin/bash
/usr/bin/backup${instance}-now now
EOF
else
echo "/usr/bin/backup${instance}-now now" >> /usr/bin/backup-now
fi
instance=$(echo "${instance} +1" | bc)
done
}
case "${1}" in
backup ) backup ;;
esac
if var_true "${DEBUG_CREATE_SCHEDULERS}" ; then debug off; fi
}
ctrl_c() {
sed -i "/^${backup_instance_number}/d" /tmp/.container/db-backup-backups
symlink_log
print_warn "User aborted"
exit
}
db_backup_container_init() {
rm -rf /tmp/.container/db-backup-backups
echo "0 0 * * * /usr/sbin/logrotate_dbbackup >/dev/null 2>&1" > /assets/cron/dbbackup_logrotate
touch /tmp/.container/db-backup-backups
}
debug() {
case "${1}" in
off)
backup_job_log_level=$_original_job_log_level}
CONTAINER_LOG_LEVEL=${_original_container_log_level}
DEBUG_MODE=${_original_debug_mode}
SHOW_OUTPUT=${_original_show_output}
if var_true "${DEBUG_MODE}" ; then
set -x
else
set +x
fi
;;
on)
if [ -z "${_original_container_log_level}" ]; then
_original_container_log_level="${CONTAINER_LOG_LEVEL}"
fi
if [ -z "${_original_job_log_level}" ]; then
_original_job_log_level="${backup_job_log_level}"
fi
if [ -z "${_original_debug_mode}" ]; then
_original_debug_mode="${DEBUG_MODE}"
fi
if [ -z "${_original_show_output}" ]; then
_original_show_output="${SHOW_OUTPUT}"
if ! [[ "${_original_show_output,,}" =~ true|false ]]; then
__original_show_output="FALSE"
fi
fi
backup_job_log_level=DEBUG
CONTAINER_LOG_LEVEL=DEBUG
SHOW_OUTPUT=TRUE
set -x
;;
esac
}
file_encryption() {
if var_true "${DEBUG_FILE_ENCRYPTION}" ; then debug on; fi
if var_true "${backup_job_encrypt}" ; then
if [ "${exit_code}" = "0" ] ; then
print_debug "[file_encryption] Encrypting"
output_off
if [ -n "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_public_key}" ]; then
print_error "Can't encrypt as both ENCRYPT_PASSPHRASE and ENCRYPT_PUBKEY exist!"
return
elif [ -n "${backup_job_encrypt_passphrase}" ] && [ -z "${backup_job_encrypt_public_key}" ]; then
print_notice "Encrypting with GPG Passphrase"
encrypt_routines_start_time=$(date +'%s')
encrypt_tmp_dir=$(run_as_user mktemp -d)
echo "${backup_job_encrypt_passphrase}" | silent run_as_user ${play_fair} gpg --batch --home ${encrypt_tmp_dir} --yes --passphrase-fd 0 -c "${temporary_directory}"/"${backup_job_filename}"
rm -rf "${encrypt_tmp_dir}"
elif [ -z "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_public_key}" ] && [ -n "${backup_job_encrypt_private_key}" ]; then
if [ -f "${backup_job_encrypt_private_key}" ]; then
encrypt_routines_start_time=$(date +'%s')
print_notice "Encrypting with GPG Private Key"
encrypt_tmp_dir=$(run_as_user mktemp -d)
cat "${backup_job_encrypt_private_key}" | run_as_user tee "${encrypt_tmp_dir}"/private_key.asc > /dev/null
print_debug "[file_encryption] [key] Importing Private Key"
silent run_as_user gpg --home ${encrypt_tmp_dir} --batch --import "${encrypt_tmp_dir}"/private_key.asc
print_debug "[file_encryption] [key] Encrypting to Public Key"
cat "${backup_job_encrypt_public_key}" | run_as_user tee "${encrypt_tmp_dir}"/public_key.asc > /dev/null
silent run_as_user ${play_fair} gpg --batch --yes --home "${encrypt_tmp_dir}" --encrypt --recipient-file "${encrypt_tmp_dir}"/public_key.asc "${temporary_directory}"/"${backup_job_filename}"
rm -rf "${encrypt_tmp_dir}"
fi
fi
if [ -f "${temporary_directory}"/"${backup_job_filename}".gpg ]; then
print_debug "[file_encryption] Deleting original file"
rm -rf "${temporary_directory:?}"/"${backup_job_filename:?}"
backup_job_filename="${backup_job_filename}.gpg"
encrypt_routines_finish_time=$(date +'%s')
encrypt_routines_total_time=$(echo $((encrypt_routines_finish_time-encrypt_routines_start_time)))
zabbix_encrypt_time=$(cat <<EOF
- dbbackup.backup.encrypt.duration.[${backup_job_db_host}.${backup_job_db_name}] ${encrypt_routines_total_time}
EOF
)
else
print_error "Encryption failed! Could not detect encrypted file"
return 99
fi
else
write_log error "Skipping encryption because backup did not complete successfully"
fi
fi
if var_true "${DEBUG_FILE_ENCRYPTION}" ; then debug off; fi
}
generate_checksum() {
if var_true "${DEBUG_GENERATE_CHECKSUM}" ; then debug on; fi
if [ "${exit_code}" = "0" ] ; then
case "${backup_job_checksum,,}" in
"md5" )
checksum_command="${play_fair} md5sum"
checksum_extension="md5"
;;
"sha1" )
checksum_command="${play_fair} sha1sum"
checksum_extension="sha1"
;;
"none" )
return
;;
esac
checksum_routines_start_time=$(date +'%s')
write_log notice "Generating ${checksum_extension^^} sum for '${backup_job_filename}'"
cd "${temporary_directory}"
run_as_user ${checksum_command} "${backup_job_filename}" | run_as_user tee "${backup_job_filename}"."${checksum_extension}" > /dev/null
chmod ${backup_job_filesystem_permission} "${backup_job_filename}"."${checksum_extension}"
checksum_value=$(run_as_user cat "${backup_job_filename}"."${checksum_extension}" | awk '{print $1}')
checksum_routines_finish_time=$(date +'%s')
checksum_routines_total_time=$(echo $((checksum_routines_finish_time-checksum_routines_start_time)))
zabbix_checksum_time=$(cat <<EOF
- dbbackup.backup.checksum.duration.[${backup_job_db_host}.${backup_job_db_name}] ${checksum_routines_total_time}
- dbbackup.backup.checksum.hash.[${backup_job_db_host}.${backup_job_db_name}] ${checksum_value}
EOF
)
write_log debug "${checksum_extension^^}: ${checksum_value} - ${backup_job_filename}"
write_log debug "Checksum routines time taken: $(echo ${checksum_routines_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
else
write_log error "Skipping Checksum creation because backup did not complete successfully"
fi
if var_true "${DEBUG_GENERATE_CHECKSUM}" ; then debug off; fi
}
notify() {
if var_true "${DEBUG_NOTIFY}" ; then debug on; fi
notification_custom() {
if [ -n "${NOTIFICATION_CUSTOM_SCRIPT}" ] ; then
if var_true "${NOTIFICATION_CUSTOM_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${NOTIFICATION_CUSTOM_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}"
else
if [ -x "${NOTIFICATION_CUSTOM_SCRIPT}" ] ; then
write_log notice "Found NOTIFICATION_CUSTOM_SCRIPT environment variable. Executing '${NOTIFICATION_CUSTOM_SCRIPT}"
# script timestamp logfile errorcode subject body
eval "${NOTIFICATION_CUSTOM_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}"
else
write_log error "Can't execute NOTIFICATION_CUSTOM_SCRIPT environment variable '${NOTIFICATION_CUSTOM_SCRIPT}' as its filesystem bit is not executible!"
fi
fi
else
print_error "[notifications] No NOTIFICATION_CUSTOM_SCRIPT variable set - Skipping sending Custom notifications"
fi
}
notification_email() {
transform_file_var \
SMTP_HOST \
SMTP_PORT
if [ -z "${MAIL_FROM}" ] ; then write_log error "[notifications] No MAIL_FROM variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
if [ -z "${MAIL_TO}" ] ; then write_log error "[notifications] No MAIL_TO variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
if [ -z "${SMTP_HOST}" ] ; then write_log error "[notifications] No SMTP_HOST variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
if [ -z "${SMTP_PORT}" ] ; then write_log error "[notifications] No SMTP_PORT variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
if var_nottrue "${skip_mail}" ; then
if ! grep -q ^from /etc/msmtprc ; then
echo "from ${MAIL_FROM}" >> /etc/msmtprc
fi
mail_recipients=$(echo "${MAIL_TO}" | tr "," "\n")
for mail_recipient in $mail_recipients ; do
cat <<EOF | msmtp -t "${mail_recipient}" -C /etc/msmtprc
To: ${mail_recipient}
Subject: [db-backup] ${4}
Time: ${1}
Log File: {2}
Error Code: ${3}
${5}
EOF
done
fi
}
notification_mattermost() {
transform_file_var \
MATTERMOST_RECIPIENT \
MATTERMOST_USERNAME \
MATTERMOST_WEBHOOK_URL
if [ -z "${MATTERMOST_RECIPIENT}" ] ; then write_log error "[notifications] No MATTERMOST_RECIPIENT variable set - Skipping sending Mattermost notifications" ; skip_mattermost=true ; fi
if [ -z "${MATTERMOST_USERNAME}" ] ; then write_log error "[notifications] No MATTERMOST_USERNAME variable set - Skipping sending Mattermost notifications" ; skip_mattermost=true ; fi
if [ -z "${MATTERMOST_WEBHOOK_URL}" ] ; then write_log error "[notifications] No MATTERMOST_WEBHOOK_URL variable set - Skipping sending Mattermost notifications" ; skip_mattermost=true ; fi
if var_nottrue "${skip_mattermost}" ; then
emoji=":bomb:"
message="*[db-backup] ${4}*\n${5}\n*Timestamp:* ${1}\n*Logfile:* ${2}\n*Error Code: ${3}"
mattermost_recipients=$(echo "${MATTERMOST_RECIPIENT}" | tr "," "\n")
for mattermost_recipient in $mattermost_recipients ; do
payload="payload={\"channel\": \"${mattermost_recipient//\"/\\\"}\", \"username\": \"${MATTERMOST_USERNAME//\"/\\\"}\", \"text\": \"${message//\"/\\\"}\", \"icon_emoji\": \"${emoji}\"}"
silent /usr/bin/curl \
-m 5 \
--data-urlencode "${payload}" \
"${MATTERMOST_WEBHOOK_URL}"
done
fi
}
notification_matrix() {
transform_file_var \
MATRIX_HOST \
MATRIX_ROOM \
MATRIX_ACCESS_TOKEN
if [ -z "${MATRIX_HOST}" ] ; then write_log error "[notifications] No MATRIX_HOST variable set - Skipping sending matrix notifications" ; skip_matrix=true ; fi
if [ -z "${MATRIX_ROOM}" ] ; then write_log error "[notifications] No MATRIX_ROOM variable set - Skipping sending matrix notifications" ; skip_matrix=true ; fi
if [ -z "${MATRIX_ACCESS_TOKEN}" ] ; then write_log error "[notifications] No MATRIX_ACCESS_TOKEN variable set - Skipping sending matrix notifications" ; skip_matrix=true ; fi
if var_nottrue "${skip_matrix}" ; then
matrix_rooms=$(echo "${MATRIX_ROOM}" | tr "," "\n")
for matrix_room in $matrix_rooms ; do
curl \
-XPOST \
-d "{\"msgtype\":\"m.text\", \"body\":\"*[db-backup] ${4}*\n${5}\n*Timestamp:* ${1}\n*Logfile:* ${2}\n*Error Code: ${3}*\"}" \
"${MATRIX_HOST}/_matrix/client/r0/rooms/${matrix_room}/send/m.room.message?access_token=${MATRIX_ACCESS_TOKEN}"
done
fi
}
notification_rocketchat() {
transform_file_var \
ROCKETCHAT_RECIPIENT \
ROCKETCHAT_USERNAME \
ROCKETCHAT_WEBHOOK_URL
if [ -z "${ROCKETCHAT_RECIPIENT}" ] ; then write_log error "[notifications] No ROCKETCHAT_RECIPIENT variable set - Skipping sending Rocket.Chat notifications" ; skip_rocketchat=true ; fi
if [ -z "${ROCKETCHAT_USERNAME}" ] ; then write_log error "[notifications] No ROCKETCHAT_USERNAME variable set - Skipping sending Rocket.Chat notifications" ; skip_rocketchat=true ; fi
if [ -z "${ROCKETCHAT_WEBHOOK_URL}" ] ; then write_log error "[notifications] No ROCKETCHAT_WEBHOOK_URL variable set - Skipping sending Rocket.Chat notifications" ; skip_rocketchat=true ; fi
if var_nottrue "${skip_rocketchat}" ; then
emoji=":bomb:"
message="*[db-backup] ${4}*\n${5}\n*Timestamp:* ${1}\n*Logfile:* ${2}\n*Error Code: ${3}"
rocketchat_recipients=$(echo "${ROCKETCHAT_RECIPIENT}" | tr "," "\n")
for rocketchat_recipient in $rocketchat_recipients ; do
payload="payload={\"channel\": \"${rocketchat_recipient//\"/\\\"}\", \"username\": \"${ROCKETCHAT_USERNAME//\"/\\\"}\", \"text\": \"${message//\"/\\\"}\", \"icon_emoji\": \"${emoji}\"}"
silent /usr/bin/curl \
-m 5 \
--data-urlencode "${payload}" \
"${ROCKETCHAT_WEBHOOK_URL}"
done
fi
}
# $1 timeststamp
# $2 logfile
# $2 errorcode
# $3 subject
# $4 body
if var_true "${ENABLE_NOTIFICATIONS}" ; then
notification_types=$(echo "${NOTIFICATION_TYPE}" | tr "," "\n")
for notification_type in $notification_types ; do
case "${notification_type,,}" in
"custom" )
print_debug "[notify] Sending Notification via custom"
notification_custom "${1}" "${2}" "${3}" "${4}" "${5}"
;;
"email" | "mail" )
print_debug "[notify] Sending Notification via email"
notification_email "${1}" "${2}" "${3}" "${4}" "${5}"
;;
"matrix" )
print_debug "[notify] Sending Notification via Matrix"
notification_matrix "${1}" "${2}" "${3}" "${4}" "${5}"
;;
"mattermost" )
print_debug "[notify] Sending Notification via Mattermost"
notification_mattermost "${1}" "${2}" "${3}" "${4}" "${5}"
;;
"rocketchat" )
print_debug "[notify] Sending Notification via Rocketchat"
notification_rocketchat "${1}" "${2}" "${3}" "${4}" "${5}"
;;
* )
print_error "Unknown 'NOTIFICATION_TYPE=${notification_type}' environment value. "
;;
esac
done
fi
if var_true "${DEBUG_NOTIFY}" ; then debug off; fi
}
move_dbbackup() {
if var_true "${DEBUG_MOVE_DBBACKUP}" ; then debug on; fi
if [ "${exit_code}" = "0" ] ; then
dbbackup_size="$(run_as_user stat -c%s "${temporary_directory}"/"${backup_job_filename}")"
dbbackup_date="$(run_as_user date -r "${temporary_directory}"/"${backup_job_filename}" +'%s')"
case "${backup_job_size_value,,}" in
"b" | "bytes" )
backup_job_size_value=1
;;
"[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" )
backup_job_size_value="-h"
;;
*)
backup_job_size_value=1
;;
esac
if [ "${backup_job_size_value}" = "1" ] ; then
filesize=$(run_as_user stat -c%s "${temporary_directory}"/"${backup_job_filename}")
write_log notice "Backup of '${backup_job_filename}' created with the size of ${filesize} bytes"
else
filesize=$(run_as_user du -h "${temporary_directory}"/"${backup_job_filename}" | awk '{ print $1}')
write_log notice "Backup of '${backup_job_filename}' created with the size of ${filesize}"
fi
chmod "${backup_job_filesystem_permission}" "${temporary_directory}"/"${backup_job_filename}"
case "${backup_job_backup_location,,}" in
"file" | "filesystem" )
write_log debug "Moving backup to filesystem"
run_as_user mkdir -p "${backup_job_filesystem_path}"
if [ "${backup_job_checksum,,}" != "none" ] ; then run_as_user mv "${temporary_directory}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/ ; fi
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
cat <<EOF
## BEGIN Before Moving file from temporary_directory $(TZ=${TIMEZONE} date)
##
$(ls -l "${temporary_directory}"/*)
## END
EOF
fi
run_as_user mv "${temporary_directory}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
move_exit_code=$?
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
cat <<EOF
## BEGIN After Moving file from temporary_directory $(TZ=${TIMEZONE} date)
##
$(ls -l "${temporary_directory}"/*)
## END
## BEGIN After Moving file to _FILESYSTEM_PATH $(TZ=${TIMEZONE} date)
##
$(ls -l "${backup_job_filesystem_path}"/*)
## END
EOF
fi
if var_true "${backup_job_create_latest_symlink}" ; then
run_as_user ln -sfr "${backup_job_filesystem_path}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/latest-"${backup_job_filename_base}"
fi
if [ -n "${backup_job_archive_time}" ] ; then
run_as_user mkdir -p "${backup_job_filesystem_archive_path}"
run_as_user find "${backup_job_filesystem_path}"/ -type f -maxdepth 1 -mmin +"${backup_job_archive_time}" -iname "${backup_job_filename_base}*" -exec mv {} "${backup_job_filesystem_archive_path}" \;
fi
;;
"s3" | "minio" )
write_log debug "Moving backup to S3 Bucket"
if [ -n "${backup_job_s3_key_id}" ] && [ -n "${backup_job_s3_key_secret}" ]; then
export AWS_ACCESS_KEY_ID=${backup_job_s3_key_id}
export AWS_SECRET_ACCESS_KEY=${backup_job_s3_key_secret}
else
write_log debug "Variable S3_KEY_ID or S3_KEY_SECRET is not set. Please ensure sufficiant IAM role is assigned."
fi
export AWS_DEFAULT_REGION=${backup_job_s3_region}
if [ -f "${backup_job_s3_cert_ca_file}" ] ; then
write_log debug "Using Custom CA for S3 Backups"
s3_ca_cert="--ca-bundle ${backup_job_s3_cert_ca_file}"
fi
if var_true "${backup_job_s3_cert_skip_verify}" ; then
write_log debug "Skipping SSL verification for HTTPS S3 Hosts"
s3_ssl="--no-verify-ssl"
fi
[[ ( -n "${backup_job_s3_host}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${backup_job_s3_protocol}://${backup_job_s3_host}"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${temporary_directory}/${backup_job_filename} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/${backup_job_filename} ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
move_exit_code=$?
if [ "${backup_job_checksum}" != "none" ] ; then
silent run_as_user ${play_fair} aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${temporary_directory}/*.${checksum_extension} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/ ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"."${checksum_extension}"
fi
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"
;;
"blobxfer" )
if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
else
write_log info "Synchronize local storage from blob with blobxfer"
${play_fair} blobxfer download --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --restore-file-lmt --delete
write_log info "Moving backup to external storage with blobxfer"
mkdir -p "${backup_job_filesystem_path}"
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user mv "${temporary_directory}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/; fi
run_as_user mv "${temporary_directory}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
silent run_as_user ${play_fair} blobxfer upload --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path}
move_exit_code=$?
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"."${checksum_extension}" ; fi
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"
fi
;;
esac
else
write_log error "Skipping moving DB Backup to final location because backup did not complete successfully"
fi
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"
if var_true "${DEBUG_MOVE_DBBACKUP}" ; then debug off; fi
}
prepare_dbbackup() {
timer backup start
now=$(run_as_user date +"%Y%m%d-%H%M%S")
temporary_directory=$(mktemp -d -p "${TEMP_PATH}" -t ${backup_instance_number}_dbbackup.XXXXXX)
chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${temporary_directory}"
backup_job_filename_base=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}
backup_job_filename=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.sql
}
pre_dbbackup() {
if var_true "${DEBUG_PRE_DBBACKUP}" ; then debug on; fi
### Pre Script Support
if [ -n "${backup_job_pre_script}" ] ; then
if var_true "${backup_job_pre_script_x_verify}" ; then
run_as_user eval "${backup_job_pre_script}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${backup_job_filename}"
else
if [ -x "${backup_job_pre_script}" ] ; then
write_log notice "Found PRE_SCRIPT environment variable. Executing '${backup_job_pre_script}"
run_as_user eval "${backup_job_pre_script}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${backup_job_filename}"
else
write_log error "Can't execute PRE_SCRIPT environment variable '${backup_job_pre_script}' as its filesystem bit is not executible!"
fi
fi
fi
### Pre Backup Custom Script Support
if [ -d "/assets/custom-scripts/pre" ] && dir_notempty "/assets/custom-scripts/pre" ; then
write_log warn "Found Custom Post Scripts in /assets/custom-scripts/pre - Automatically moving them to '${backup_job_script_location_pre}'"
mkdir -p "${backup_job_script_location_pre}"
silent cp -aR /assets/custom-scripts/pre/* "${backup_job_script_location_pre}"
fi
if [ -d "${backup_job_script_location_pre}" ] && dir_notempty "${backup_job_script_location_pre}" ; then
for f in $(find ${backup_job_script_location_pre} -name \*.sh -type f); do
if var_true "${backup_job_pre_script_x_verify}" ; then
${f} "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${backup_job_filename}"
else
if [ -x "${f}" ] ; then
write_log notice "Executing pre backup custom script : '${f}'"
## script DB_TYPE DB_HOST DB_NAME STARTEPOCH BACKUP_FILENAME
${f} "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${backup_job_filename}"
else
write_log error "Can't run pre backup custom script: '${f}' as its filesystem bit is not executible!"
fi
fi
done
fi
if var_true "${DEBUG_PRE_DBBACKUP}" ; then debug off; fi
}
post_dbbackup() {
if var_true "${DEBUG_POST_DBBACKUP}" ; then debug on; fi
dbbackup_finish_time=$(run_as_user date +"%s")
dbbackup_total_time=$(run_as_user echo $((dbbackup_finish_time-dbbackup_start_time)))
if var_true "${CONTAINER_ENABLE_MONITORING}" && [ "${CONTAINER_MONITORING_BACKEND,,}" = "zabbix" ]; then
source /assets/defaults/03-monitoring
write_log notice "Sending Backup Statistics to Zabbix"
silent run_as_user zabbix_sender -c "${ZABBIX_CONFIG_PATH}"/"${ZABBIX_CONFIG_FILE}" -k dbbackup.backup -o '[{"{#NAME}":"'${backup_job_db_host}.${backup_job_db_name}'"}]'
local zabbix_payload=$(run_as_user mktemp)
cat <<EOZP | silent run_as_user tee "${zabbix_payload}"
- dbbackup.backup.size.[${backup_job_db_host}.${backup_job_db_name}] "${dbbackup_size}"
- dbbackup.backup.datetime.[${backup_job_db_host}.${backup_job_db_name}] "${dbbackup_date}"
- dbbackup.backup.status.[${backup_job_db_host}.${backup_job_db_name}] "${exit_code}"
- dbbackup.backup.duration.[${backup_job_db_host}.${backup_job_db_name}] "$((dbbackup_finish_time-dbbackup_start_time))"
- dbbackup.backup.filename.[${backup_job_db_host}.${backup_job_db_name}] "${backup_job_filename}"
EOZP
if [ -n "${zabbix_checksum_time}" ] ; then echo "${zabbix_checksum_time}" | silent run_as_user tee -a "${zabbix_payload}" ; fi
if [ -n "${zabbix_encrypt_time}" ] ; then echo "${zabbix_encrypt_time}" | silent run_as_user tee -a "${zabbix_payload}" ; fi
silent run_as_user zabbix_sender -c "${ZABBIX_CONFIG_PATH}"/"${ZABBIX_CONFIG_FILE}" -i "${zabbix_payload}"
if [ "$?" != "0" ] ; then write_log error "Error sending statistics, consider disabling with 'CONTAINER_ENABLE_MONITORING=FALSE'" ; fi
unset zabbix_checksum_time
unset zabbix_encrypt_time
rm -rf "${zabbix_payload}"
fi
### Post Script Support
if [ -n "${backup_job_post_script}" ] ; then
if var_true "${backup_job_post_script_x_verify}" ; then
eval "${backup_job_post_script}" "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else
if [ -x "${backup_job_post_script}" ] ; then
write_log notice "Found POST_SCRIPT environment variable. Executing '${backup_job_post_script}"
eval "${backup_job_post_script}" "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else
write_log error "Can't execute POST_SCRIPT environment variable '${backup_job_post_script}' as its filesystem bit is not executible!"
fi
fi
fi
### Post Backup Custom Script Support
if [ -d "/assets/custom-scripts/" ] && dir_notempty "/assets/custom-scripts" ; then
write_log warn "Found Custom Post Scripts in /assets/custom-scripts/ - Automatically moving them to '${backup_job_script_location_post}'"
mkdir -p "${backup_job_script_location_post}"
cp -aR /assets/custom-scripts/* "${backup_job_script_location_post}"
fi
if [ -d "${backup_job_script_location_post}" ] && dir_notempty "${backup_job_script_location_post}" ; then
for f in $(run_as_user find "${backup_job_script_location_post}" -name \*.sh -type f); do
if var_true "${backup_job_post_script_x_verify}" ; then
${f} "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else
if [ -x "${f}" ] ; then
write_log notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else
write_log error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
fi
fi
done
fi
write_log notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
if var_true "${DEBUG_POST_DBBACKUP}" ; then debug on; fi
cd "${TEMP_PATH}"
rm -rf "${temporary_directory}"
}
process_limiter() {
if var_true "${DEBUG_PROCESS_LIMITER}" ; then debug on; fi
while true ; do
counter=0
process_amount="$(wc -l /tmp/.container/db-backup-backups | awk '{print $1}')"
if [ "${process_amount}" -ge "${BACKUP_JOB_CONCURRENCY}" ] ; then
if [ -z $text_concurrency_limit_initial ] ; then
print_notice "Backup concurrency limit reached (${BACKUP_JOB_CONCURRENCY}). Waiting for other tasks to finish before backing up."
text_concurrency_limit_initial=true
fi
if [[ "${counter}" =~ 45|90|135|180|225|270|315|360|405|450|495|540|585|630|675|720|765|810|855|900|945|990|1035|1080|1125|1170|1215|1260|1305|1350|1395|1440|1485|1530|1575|1620|1665|1710|1755|1800|1845|1890|1935|1980|2025|2070|2115|2160|2205|2250|2295|2340|2385|2430|2475|2520|2565|2610|2655|2700|2745|2790|2835|2880|2925|2970|3015|3060|3105|3150|3195|3240|3285|3330|3375|3420|3465|3510|3555|3600 ]] ; then
if [ "${counter}" != 0 ] ; then counter_verbose=" (${counter} seconds so far)" ; fi
print_notice "Still waiting for other jobs to finish..${counter_verbose}"
fi
sleep 1
(( counter+=1))
else
break
fi
done
if var_true "${DEBUG_PROCESS_LIMITER}" ; then debug off; fi
}
run_as_user() {
sudo -Eu "${DBBACKUP_USER}" "$@"
}
setup_mode() {
if [ "${MODE,,}" = "auto" ] || [ "${MODE,,}" = "default" ] ; then
write_log debug "Running in Auto / Default Mode - Letting Image control scheduling"
else
write_log info "Running in Manual mode - Execute 'backup_now' to perform a manual backup"
service_stop 10-db-backup
if var_true "${MANUAL_RUN_FOREVER}" ; then
mkdir -p /etc/services.d/99-run_forever
cat <<EOF > /etc/services.d/99-run_forever/run
#!/bin/bash
while true; do
sleep 86400
done
EOF
chmod +x /etc/services.d/99-run_forever/run
else
if var_true "${CONTAINER_ENABLE_SCHEDULING}" ; then
write_log error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_SCHEDULING=TRUE'"
exit 1
fi
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
write_log error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_MONITORING=TRUE'"
exit 1
fi
if var_true "${CONTAINER_ENABLE_LOGSHIPPING}" ; then
write_log error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_LOGSHIPPING=TRUE'"
exit 1
fi
fi
fi
}
symlink_log () {
if [ -n "${backup_job_db_type}" ] && [ -n "${backup_job_db_name}" ] && [ -n "${backup_job_db_host}" ] && [ -n "${backup_job_filename_base}" ]; then
local oldpwd=$(pwd)
cd "${LOG_PATH}"/"$(date +'%Y%m%d')"
ln -sf "$(date +'%Y%m%d')"/"$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')"-"${backup_job_filename_base}".log ../latest-"${backup_job_filename_base}".log
cd "${oldpwd}"
fi
}
timer() {
if var_true "${DEBUG_TIMER}" ; then debug on; fi
case "${1}" in
backup)
case "${2}" in
start)
dbbackup_start_time=$(run_as_user date +"%s")
;;
stop)
dbbackup_finish_time=$(run_as_user date +"%s")
dbbackup_total_time=$(echo $((dbbackup_finish_time-dbbackup_start_time)))
;;
esac
;;
cron)
parse_expression() {
local expressions=${1//,/ }
expressions=${expressions//\*/#}
local validate_all=""
local validate_temp=""
for expression in ${expressions}; do
if [ "${expression}" = "#" ] || [ "${expression}" = "${3}" ]; then
echo "${3}"
return 0
fi
expression_step=${expression##*\/}
expression_number=${expression%%\/*}
validate_temp=""
local expression_start=
local expression_end=
if [ "${expression_number}" = "#" ]; then
expression_start=0
expression_end="${2}"
else
expression_start=${expression_number%%-*}
expression_end=${expression_number##*-}
fi
validate_temp="$(seq "${expression_start}" "${expression_end}")"
if [ "${expression_step}" != "${expression}" ]; then
for step in ${validate_temp}; do
if [ $(( ( step - expression_start ) % expression_step )) -eq 0 ]; then
validate_all="${validate_all} ${step}"
fi
done
else
validate_all="${validate_all} ${validate_temp}"
fi
done
validate_all=$(echo "${validate_all}" | tr ' ' '\n' | sort -g -u | tr '\n' ' ')
for entry in $validate_all; do
if [ ${entry} -ge ${3} ]; then
echo "${entry}"
return 0
fi
done
echo "${validate_all// /}"
#echo "${validate_all%% *}"
}
local cron_compare="${3}"
local cron_compare_seconds=${cron_compare}
local cron_compare_difference=$(( cron_compare - ${4} ))
if [ "${cron_compare_difference}" -lt 60 ]; then
cron_compare=$((cron_compare + $(( 60 - cron_compare_difference )) ))
fi
local cron_current_seconds="$(date --date=@"${cron_compare_seconds}" +"%-S")"
if [ "${cron_current_seconds}" -ne 0 ]; then
cron_compare=$(( cron_compare_seconds - cron_current_seconds ))
fi
local cron_minute="$(echo -n "${2}" | awk '{print $1}')"
local cron_hour="$(echo -n "${2}" | awk '{print $2}')"
local cron_day_of_month="$(echo -n "${2}" | awk '{print $3}')"
local cron_month="$(echo -n "${2}" | awk '{print $4}')"
local cron_day_of_week="$(echo -n "${2}" | awk '{print $5}')"
local cron_next_minute="$(date --date=@"${cron_compare}" +"%-M")"
local cron_next_hour="$(date --date=@"${cron_compare}" +"%-H")"
local cron_next_day_of_month="$(date --date=@"${cron_compare}" +"%-d")"
local cron_next_month="$(date --date=@"${cron_compare}" +"%-m")"
local cron_next_day_of_week="$(date --date=@"${cron_compare}" +"%-u")"
cron_next_day_of_week=$(( cron_next_day_of_week % 7 ))
local cron_next_year="$(date --date=@"${cron_compare}" +"%-Y")"
local cron_next=
local cron_parsed=1
while [ "${cron_parsed}" != "0" ]; do
print_debug "[timer] [cron] Parse Minute"
cron_next=$(parse_expression "${cron_minute}" 59 "${cron_next_minute}")
if [ "${cron_next}" != "${cron_next_minute}" ]; then
if [ "${cron_next_minute}" -gt "${cron_next}" ]; then
cron_next_hour=$(( cron_next_hour + 1 ))
fi
cron_next_minute="${cron_next}"
fi
print_debug "[timer] [cron] Parse Hour"
cron_next=$(parse_expression "${cron_hour}" 23 "${cron_next_hour}")
if [ "${cron_next}" != "${cron_next_hour}" ]; then
if [ "${cron_next_hour}" -gt "${cron_next}" ]; then
cron_next_day_of_month=$(( cron_next_day_of_month + 1 ))
fi
cron_next_hour="${cron_next}"
cron_next_minute=0
fi
print_debug "[timer] [cron] Parse Day of Week"
cron_next=$(parse_expression "${cron_day_of_week}" 6 "${cron_next_day_of_week}")
if [ "${cron_next}" != "${cron_next_day_of_week}" ]; then
day_of_week_difference=$(( cron_next - cron_next_day_of_week ))
if [ "${day_of_week_difference}" -lt 0 ]; then
day_of_week_difference=$(( day_of_week_difference + 7 ))
fi
cron_next_day_of_month=$(( cron_next_day_of_month + day_of_week_difference ))
cron_next_hour=0
cron_next_minute=0
fi
print_debug "[timer] [cron] Parse day of month"
case "${cron_next_month}" in
1|3|5|7|8|10|12)
last_day_of_month="31"
;;
"2")
local divide_by_4=$(( cron_next_year % 4 ))
local divide_by_100=$(( cron_next_year % 100 ))
local divide_by_400=$(( cron_next_year % 400 ))
last_day_of_month=28
if [ "${divide_by_4}" = "0" ] && [ "${divide_by_100}" != "0" ]; then
last_day_of_month="29"
fi
if [ "${divide_by_400}" = "0" ]; then
last_day_of_month="29"
fi
;;
*)
last_day_of_month="30"
;;
esac
cron_next=$(parse_expression "${cron_day_of_month}" 30 "${cron_next_day_of_month}")
if [ "${cron_next}" != "${cron_next_day_of_month}" ]; then
cron_next_hour=0
cron_next_minute=0
fi
if [ "${cron_next_day_of_month}" -gt "${cron_next}" ] || [ "${cron_next_day_of_month}" -gt "${last_day_of_month}" ]; then
cron_next_month=$(( cron_next_month + 1 ))
if [ ${cron_next_month} -gt 12 ]; then
cron_next_month=$(( cron_next_month - 12))
cron_next_year=$(( cron_next_year + 1 ))
fi
cron_next_day_of_month=1
else
cron_next_day_of_month=$cron_next
fi
print_debug "[timer] [cron] Parse Next Month"
cron_next=$(parse_expression "${cron_month}" 12 "${cron_next_month}")
if [ "${cron_next}" != "${cron_next_month}" ]; then
if [ "${cron_next}" -gt "12" ]; then
cron_next_year=$(( cron_next_year + 1 ))
cron_next=$(( cron_next - 12 ))
fi
if [ "${cron_next_month}" -gt "${cron_next}" ]; then
cron_next_year=$(( cron_next_year + 1 ))
fi
cron_next_month="${cron_next}"
cron_next_day=1
cron_next_minute=0
cron_next_hour=0
fi
cron_parsed=0
done
local cron_future=$(date --date="${cron_next_year}-$(printf "%02d" ${cron_next_month})-$(printf "%02d" ${cron_next_day_of_month})T$(printf "%02d" ${cron_next_hour}):$(printf "%02d" ${cron_next_minute}):00" "+%s")
local cron_future_difference=$(( cron_future - cron_compare_seconds ))
time_cron=true
time_wait="${cron_future_difference}"
time_future="${cron_future}"
;;
datetime)
time_begin=$(date -d "${backup_job_backup_begin}" +%s)
print_debug "[timer] [datetime] BACKUP_BEGIN time = ${time_begin}"
time_wait=$(( time_begin - time_current ))
print_debug "[timer] [datetime] Difference in seconds: ${time_wait}"
if (( ${time_wait} < 0 )); then
time_wait=$(( (${time_wait} + (${backup_job_backup_interval} - 1)) / (${backup_job_backup_interval} * 60) ))
time_wait=$(( ${time_wait} * -1 ))
print_debug "[timer] [datetime] Difference in seconds (rounded) time_wait is in the past : ${time_wait}"
fi
time_future=$(( time_current + time_wait ))
print_debug "[timer] [datetime] Future execution time = ${time_future}"
;;
job)
case "${2}" in
start)
backup_job_start_time=$(date +'%s')
;;
stop)
backup_job_finish_time=$(date +'%s')
backup_job_total_time=$(( backup_job_finish_time - backup_job_start_time))
;;
esac
;;
plusvalue)
time_wait=$(( ${BASH_REMATCH[1]} * 60 ))
time_future=$(( time_current + time_wait ))
;;
time)
time_future=$(date --date="$(date +"%Y%m%d") ${backup_job_backup_begin}" +"%s")
if [[ "${time_future}" < "${time_current}" ]]; then
time_future=$(( time_future + 24*60*60))
fi
time_wait=$(( time_future - time_current ))
;;
esac
if var_true "${DEBUG_TIMER}" ; then debug off; fi
}
write_log() {
if var_true "${DEBUG_WRITE_LOG}" ; then debug on; fi
output_off
local _arg_log_level=${1}
shift 1
local _arg_log_message="$@"
if [ -n "${backup_job_db_type}" ] && [ -n "${backup_job_db_name}" ] && [ -n "${backup_job_db_host}" ] && [ -n "${backup_job_filename_base}" ]; then
case "${_arg_log_level,,}" in
debug )
case "${_arg_log_level,,}" in
"debug" )
echo "$(date +'%Y-%m-%d %H:%M:%S %Z') [debug] ${_arg_log_message}" | run_as_user tee -a "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${backup_job_filename_base}.log" > /dev/null
;;
esac
;;
error )
case "${_arg_log_level,,}" in
"debug" | "notice" | "warn" | "error")
echo "$(date +'%Y-%m-%d %H:%M:%S %Z') [error] ${_arg_log_message}" | run_as_user tee -a "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${backup_job_filename_base}.log" > /dev/null
;;
esac
;;
info )
echo "$(date +'%Y-%m-%d %H:%M:%S %Z') [info] ${_arg_log_message}" | run_as_user tee -a "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${backup_job_filename_base}.log" > /dev/null
;;
notice )
case "${_arg_log_level,,}" in
"debug" | "notice" )
echo "$(date +'%Y-%m-%d %H:%M:%S %Z') [notice] ${_arg_log_message}" | run_as_user tee -a "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${backup_job_filename_base}.log" > /dev/null
;;
esac
;;
warn )
case "${_arg_log_level,,}" in
"debug" | "notice" | "warn" )
echo "$(date +'%Y-%m-%d %H:%M:%S %Z') [warn] ${_arg_log_message}" | run_as_user tee -a "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${backup_job_filename_base}.log" > /dev/null
;;
esac
;;
esac
fi
print_${_arg_log_level} "${_arg_log_message}"
output_on
if var_true "${DEBUG_WRITE_LOG}" ; then debug off; fi
}