#!/command/with-contenv bash bootstrap_filesystem() { if var_true "${DEBUG_BOOTSTRAP_FILESYSTEM}" ; then debug on; fi if [ ! -d "${backup_job_filesystem_path}" ]; then mkdir -p "${backup_job_filesystem_path}" fi if [ "$(stat -c %U "${backup_job_filesystem_path}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${backup_job_filesystem_path}" ; fi if [ "$(stat -c %a "${backup_job_filesystem_path}")" != "${backup_job_filesystem_path_permission}" ] ; then chmod "${backup_job_filesystem_path_permission}" "${backup_job_filesystem_path}" ; fi if [ -d "${backup_job_filesystem_archive_path}" ]; then if [ "$(stat -c %U "${backup_job_filesystem_archive_path}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${backup_job_filesystem_archive_path}" ; fi if [ "$(stat -c %a "${backup_job_filesystem_archive_path}")" != "${backup_job_filesystem_path_permission}" ] ; then chmod "${backup_job_filesystem_path_permission}" "${backup_job_filesystem_archive_path}" ; fi fi if [ ! -d "${LOG_PATH}" ]; then mkdir -p "${LOG_PATH}" fi if [ "$(stat -c %U "${LOG_PATH}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${LOG_PATH}" ; fi if [ ! -d "${LOG_PATH}"/"$(date +'%Y%m%d')" ]; then run_as_user mkdir -p "${LOG_PATH}"/"$(date +'%Y%m%d')"; fi if [ "$(stat -c %a "${LOG_PATH}")" != "755" ] ; then chmod -R 755 "${LOG_PATH}" ; fi if [ ! -d "${TEMP_PATH}" ]; then mkdir -p "${TEMP_PATH}" fi if [ "$(stat -c %U "${TEMP_PATH}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${TEMP_PATH}" ; fi if var_true "${DEBUG_BOOTSTRAP_FILESYSTEM}" ; then debug off; fi } bootstrap_variables() { if var_true "${DEBUG_BOOTSTRAP_VARIABLES}" ; then debug on; fi backup_init() { backup_instance_number=${1} backup_instance_vars=$(mktemp) set -o posix ; set | grep -oE "^backup_job_.*=" | grep -oE ".*=" | sed "/--/d" > "${backup_instance_vars}" while read -r backup_instance_var ; do unset "$(echo "${backup_instance_var}" | cut -d = -f 1)" done < "${backup_instance_vars}" transform_file_var \ DEFAULT_AUTH \ DEFAULT_TYPE \ DEFAULT_HOST \ DEFAULT_PORT \ DEFAULT_NAME \ DEFAULT_NAME_EXCLUDE \ DEFAULT_USER \ DEFAULT_PASS \ DEFAULT_ENCRYPT_PASSPHRASE \ DEFAULT_ENCRYPT_PUBLIC_KEY \ DEFAULT_ENCRYPT_PRIVATE_KEY \ DEFAULT_MONGO_CUSTOM_URI \ DEFAULT_MYSQL_TLS_CA_FILE \ DEFAULT_MYSQL_TLS_CERT_FILE \ DEFAULT_MYSQL_TLS_KEY_FILE \ DEFAULT_S3_BUCKET \ DEFAULT_S3_KEY_ID \ DEFAULT_S3_KEY_SECRET \ DEFAULT_S3_PATH \ DEFAULT_S3_REGION \ DEFAULT_S3_HOST \ DEFAULT_S3_PROTOCOL \ DEFAULT_S3_EXTRA_OPTS \ DEFAULT_S3_CERT_CA_FILE \ DEFAULT_BLOBXFER_STORAGE_ACCOUNT \ DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY \ DEFAULT_BLOBXFER_REMOTE_PATH \ DEFAULT_BLOBXFER_MODE \ DB"${backup_instance_number}"_AUTH \ DB"${backup_instance_number}"_TYPE \ DB"${backup_instance_number}"_HOST \ DB"${backup_instance_number}"_PORT \ DB"${backup_instance_number}"_NAME \ DB"${backup_instance_number}"_NAME_EXCLUDE \ DB"${backup_instance_number}"_USER \ DB"${backup_instance_number}"_PASS \ DB"${backup_instance_number}"_ENCRYPT_PASSPHRASE \ DB"${backup_instance_number}"_ENCRYPT_PUBLIC_KEY \ DB"${backup_instance_number}"_ENCRYPT_PRIVATE_KEY \ DB"${backup_instance_number}"_MONGO_CUSTOM_URI \ DB"${backup_instance_number}"_MYSQL_TLS_CA_FILE \ DB"${backup_instance_number}"_MYSQL_TLS_CERT_FILE \ DB"${backup_instance_number}"_MYSQL_TLS_KEY_FILE \ DB"${backup_instance_number}"_S3_BUCKET \ DB"${backup_instance_number}"_S3_KEY_ID \ DB"${backup_instance_number}"_S3_KEY_SECRET \ DB"${backup_instance_number}"_S3_PATH \ DB"${backup_instance_number}"_S3_REGION \ DB"${backup_instance_number}"_S3_HOST \ DB"${backup_instance_number}"_S3_PROTOCOL \ DB"${backup_instance_number}"_S3_EXTRA_OPTS \ DB"${backup_instance_number}"_S3_CERT_CA_FILE \ DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT \ DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT_KEY \ DB"${backup_instance_number}"_BLOBXFER_REMOTE_PATH \ DB"${backup_instance_number}"_BLOBXFER_MODE \ BLOBXFER_STORAGE_ACCOUNT \ BLOBXFER_STORAGE_ACCOUNT_KEY \ DB_HOST \ DB_NAME \ DB_PORT \ DB_USER \ DB_PASS \ MONGO_CUSTOM_URI \ DB_AUTH \ S3_BUCKET \ S3_KEY_ID \ S3_KEY_SECRET \ S3_PATH \ S3_REGION \ S3_HOST \ S3_PROTOCOL \ S3_EXTRA_OPTS ## Legacy after DEFAULT set -o posix ; set | grep -E "^DB${backup_instance_number}_|^DEFAULT_|^DB_|^ARCHIVE|^BACKUP_|^BLOBXFER_|^CHECKSUM|^COMPRESSION|^CREATE_|^ENABLE_|^EXTRA_|^GZ_|^INFLUX_|^MYSQL_|^MONGO_|^PARALLEL|^PRE_|^POST_|^S3|^SKIP|^SPLIT" > "${backup_instance_vars}" ## Legacy checks from removed variables if [ -n "${ENABLE_CHECKSUM}" ]; then print_warn "Deprecated and unsupported variable 'ENABLE_CHECKSUM' detected - Please upgrade your variables as they will be removed in version 4.3.0" if var_false "${ENABLE_CHECKSUM}" ; then sed -i "/DEFAULT_CHECKSUM=/d" "${backup_instance_vars}" echo "DEFAULT_CHECKSUM=NONE" >> "${backup_instance_vars}" fi fi if [ -n "${DB_DUMP_BEGIN}" ]; then print_warn "Deprecated and unsupported variable 'DB_DUMP_BEGIN' detected - Please upgrade your variables as they will be removed in version 4.3.0" sed -i "/DEFAULT_BACKUP_BEGIN=/d" "${backup_instance_vars}" echo "DEFAULT_BACKUP_BEGIN=${DB_DUMP_BEGIN}" >> "${backup_instance_vars}" fi if [ -n "${DB_DUMP_FREQ}" ]; then print_warn "Deprecated and unsupported variable 'DB_DUMP_FREQ' detected - Please upgrade your variables as they will be removed in version 4.3.0" sed -i "/DEFAULT_BACKUP_INTERVAL=/d" "${backup_instance_vars}" echo "DEFAULT_BACKUP_INTERVAL=${DB_DUMP_FREQ}" >> "${backup_instance_vars}" fi if [ -n "${DB_DUMP_TARGET}" ]; then print_warn "Deprecated and unsupported variable 'DB_DUMP_TARGET' detected - Please upgrade your variables as they will be removed in version 4.3.0" sed -i "/DEFAULT_FILESYSTEM_PATH=/d" "${backup_instance_vars}" echo "DEFAULT_FILESYSTEM_PATH=${DB_DUMP_TARGET}" >> "${backup_instance_vars}" fi if [ -n "${DB_DUMP_TARGET_ARCHIVE}" ]; then print_warn "Deprecated and unsupported variable 'DB_DUMP_TARGET_ACRHIVE' detected - Please upgrade your variables as they will be removed in version 4.3.0" sed -i "/DEFAULT_FILESYSTEM_ARCHIVE_PATH=/d" "${backup_instance_vars}" echo "DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DB_DUMP_TARGET_ARCHIVE}" >> "${backup_instance_vars}" fi if [ -n "${EXTRA_DUMP_OPTS}" ]; then print_warn "Deprecated and unsupported variable 'EXTRA_DUMP_OPTS' detected - Please upgrade your variables as they will be removed in version 4.3.0" sed -i "/DEFAULT_EXTRA_BACKUP_OPTS=/d" "${backup_instance_vars}" echo "DEFAULT_EXTRA_BACKUP_OPTS=${EXTRA_DUMP_OPTS}" >> "${backup_instance_vars}" fi ## if grep -qo ".*_NAME='.*'" "${backup_instance_vars}"; then print_debug "[bootstrap_variables] [backup_init] Found _NAME variable with quotes" sed -i "s|_NAME='\(.*\)'|_NAME=\1|g" "${backup_instance_vars}" fi if grep -qo ".*_PASS='.*'" "${backup_instance_vars}"; then print_debug "[bootstrap_variables] [backup_init] Found _PASS variable with quotes" sed -i "s|_PASS='\(.*\)'|_PASS=\1|g" "${backup_instance_vars}" fi if grep -qo ".*_PASSPHRASE='.*'" "${backup_instance_vars}"; then print_debug "[bootstrap_variables] [backup_init] Found _PASSPHRASE variable with quotes" sed -i "s|_PASSPHRASE='\(.*\)'|_PASSPHRASE=\1|g" "${backup_instance_vars}" fi if grep -qo "MONGO_CUSTOM_URI='.*'" "${backup_instance_vars}"; then print_debug "[bootstrap_variables] [backup_init] Found _MONGO_CUSTOM_URI variable with quotes" sed -i "s|MONGO_CUSTOM_URI='\(.*\)'|MONGO_CUSTOM_URI=\1|g" "${backup_instance_vars}" fi if grep -qo ".*_OPTS='.*'" "${backup_instance_vars}"; then print_debug "[bootstrap_variables] [backup_init] Found _OPTS variable with quotes" sed -i "s|_OPTS='\(.*\)'|_OPTS=\1|g" "${backup_instance_vars}" fi transform_backup_instance_variable() { if grep -q "^DB${1}_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then export "$3"="$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2-)" elif grep -q "^DB_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then # Allow old legacy work, perhaps remove old DB_ functionality in future? This should allow for seamless upgrades #print_warn "Legacy Variable 'DB_${2}'' detected - Please upgrade your variables as they will be removed in version 4.3.0" export "$3"="$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2-)" elif grep -q "^${2}=" "${backup_instance_vars}" && [ "$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then print_warn "Legacy unsupported variable '${2}' detected - Please upgrade your variables as they will be removed in version 4.3.0" export "$3"="$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2-)" elif grep -q "^DEFAULT_${2}=" "${backup_instance_vars}" && [ "$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then export "$3"="$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2-)" fi } transform_backup_instance_variable "${backup_instance_number}" ARCHIVE_TIME backup_job_archive_time transform_backup_instance_variable "${backup_instance_number}" AUTH backup_job_db_auth transform_backup_instance_variable "${backup_instance_number}" BACKUP_BEGIN backup_job_backup_begin transform_backup_instance_variable "${backup_instance_number}" BACKUP_INTERVAL backup_job_backup_interval transform_backup_instance_variable "${backup_instance_number}" BACKUP_GLOBALS backup_job_backup_pgsql_globals transform_backup_instance_variable "${backup_instance_number}" BACKUP_LOCATION backup_job_backup_location transform_backup_instance_variable "${backup_instance_number}" BLACKOUT_BEGIN backup_job_snapshot_blackout_start transform_backup_instance_variable "${backup_instance_number}" BLACKOUT_END backup_job_snapshot_blackout_finish transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_REMOTE_PATH backup_job_blobxfer_remote_path transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT backup_job_blobxfer_storage_account transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT_KEY backup_job_blobxfer_storage_account_key transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_MODE backup_job_blobxfer_mode transform_backup_instance_variable "${backup_instance_number}" CHECKSUM backup_job_checksum transform_backup_instance_variable "${backup_instance_number}" CLEANUP_TIME backup_job_cleanup_time transform_backup_instance_variable "${backup_instance_number}" COMPRESSION backup_job_compression transform_backup_instance_variable "${backup_instance_number}" COMPRESSION_LEVEL backup_job_compression_level transform_backup_instance_variable "${backup_instance_number}" CREATE_LATEST_SYMLINK backup_job_create_latest_symlink transform_backup_instance_variable "${backup_instance_number}" ENABLE_PARALLEL_COMPRESSION backup_job_parallel_compression transform_backup_instance_variable "${backup_instance_number}" ENCRYPT backup_job_encrypt transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PASSPHRASE backup_job_encrypt_passphrase transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PRIVATE_KEY backup_job_encrypt_private_key transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PUBLIC_KEY backup_job_encrypt_public_key transform_backup_instance_variable "${backup_instance_number}" EXTRA_BACKUP_OPTS backup_job_extra_backup_opts transform_backup_instance_variable "${backup_instance_number}" EXTRA_ENUMERATION_OPTS backup_job_extra_enumeration_opts transform_backup_instance_variable "${backup_instance_number}" EXTRA_OPTS backup_job_extra_opts transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_ARCHIVE_PATH backup_job_filesystem_archive_path transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PATH backup_job_filesystem_path transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PATH_PERMISSION backup_job_filesystem_path_permission transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PERMISSION backup_job_filesystem_permission transform_backup_instance_variable "${backup_instance_number}" GZ_RSYNCABLE backup_job_gz_rsyncable transform_backup_instance_variable "${backup_instance_number}" HOST backup_job_db_host transform_backup_instance_variable "${backup_instance_number}" INFLUX_VERSION backup_job_influx_version transform_backup_instance_variable "${backup_instance_number}" LOG_LEVEL backup_job_log_level transform_backup_instance_variable "${backup_instance_number}" MONGO_CUSTOM_URI backup_job_mongo_custom_uri transform_backup_instance_variable "${backup_instance_number}" MYSQL_CLIENT backup_job_mysql_client transform_backup_instance_variable "${backup_instance_number}" MYSQL_ENABLE_TLS backup_job_mysql_enable_tls transform_backup_instance_variable "${backup_instance_number}" MYSQL_EVENTS backup_job_mysql_events transform_backup_instance_variable "${backup_instance_number}" MYSQL_MAX_ALLOWED_PACKET backup_job_mysql_max_allowed_packet transform_backup_instance_variable "${backup_instance_number}" MYSQL_SINGLE_TRANSACTION backup_job_mysql_single_transaction transform_backup_instance_variable "${backup_instance_number}" MYSQL_STORED_PROCEDURES backup_job_mysql_stored_procedures transform_backup_instance_variable "${backup_instance_number}" MYSQL_TLS_CA_FILE backup_job_mysql_tls_ca_file transform_backup_instance_variable "${backup_instance_number}" MYSQL_TLS_CERT_FILE backup_job_mysql_tls_cert_file transform_backup_instance_variable "${backup_instance_number}" MYSQL_TLS_KEY_FILE backup_job_mysql_tls_key_file transform_backup_instance_variable "${backup_instance_number}" MYSQL_TLS_VERIFY backup_job_mysql_tls_verify transform_backup_instance_variable "${backup_instance_number}" MYSQL_TLS_VERSION backup_job_mysql_tls_version transform_backup_instance_variable "${backup_instance_number}" MSSQL_MODE backup_job_mssql_mode transform_backup_instance_variable "${backup_instance_number}" NAME backup_job_db_name transform_backup_instance_variable "${backup_instance_number}" NAME_EXCLUDE backup_job_db_name_exclude transform_backup_instance_variable "${backup_instance_number}" PARALLEL_COMPRESSION_THREADS backup_job_parallel_compression_threads transform_backup_instance_variable "${backup_instance_number}" PASS backup_job_db_pass transform_backup_instance_variable "${backup_instance_number}" PORT backup_job_db_port transform_backup_instance_variable "${backup_instance_number}" POST_SCRIPT_X_VERIFY backup_job_post_script_x_verify transform_backup_instance_variable "${backup_instance_number}" POST_SCRIPT backup_job_post_script transform_backup_instance_variable "${backup_instance_number}" PRE_SCRIPT backup_job_pre_script transform_backup_instance_variable "${backup_instance_number}" PRE_SCRIPT_X_VERIFY backup_job_pre_script_x_verify transform_backup_instance_variable "${backup_instance_number}" RESOURCE_OPTIMIZED backup_job_resource_optimized transform_backup_instance_variable "${backup_instance_number}" S3_BUCKET backup_job_s3_bucket transform_backup_instance_variable "${backup_instance_number}" S3_CERT_CA_FILE backup_job_s3_cert_ca_file transform_backup_instance_variable "${backup_instance_number}" S3_CERT_SKIP_VERIFY backup_job_s3_cert_skip_verify transform_backup_instance_variable "${backup_instance_number}" S3_EXTRA_OPTS backup_job_s3_extra_opts transform_backup_instance_variable "${backup_instance_number}" S3_HOST backup_job_s3_host transform_backup_instance_variable "${backup_instance_number}" S3_KEY_ID backup_job_s3_key_id transform_backup_instance_variable "${backup_instance_number}" S3_KEY_SECRET backup_job_s3_key_secret transform_backup_instance_variable "${backup_instance_number}" S3_PATH backup_job_s3_path transform_backup_instance_variable "${backup_instance_number}" S3_PROTOCOL backup_job_s3_protocol transform_backup_instance_variable "${backup_instance_number}" S3_REGION backup_job_s3_region transform_backup_instance_variable "${backup_instance_number}" SCRIPT_LOCATION_POST backup_job_script_location_post transform_backup_instance_variable "${backup_instance_number}" SCRIPT_LOCATION_PRE backup_job_script_location_pre transform_backup_instance_variable "${backup_instance_number}" SIZE_VALUE backup_job_size_value transform_backup_instance_variable "${backup_instance_number}" SKIP_AVAILABILITY_CHECK backup_job_skip_availability_check transform_backup_instance_variable "${backup_instance_number}" SPLIT_DB backup_job_split_db transform_backup_instance_variable "${backup_instance_number}" TYPE backup_job_db_type transform_backup_instance_variable "${backup_instance_number}" USER backup_job_db_user backup_job_backup_begin=$(echo "${backup_job_backup_begin}" | sed -e "s|'||g" -e 's|"||g') if var_true "${DEBUG_BACKUP_INSTANCE_VARIABLE}" ; then cat < "${restore_vars}" while read -r restore_var ; do unset "$(echo "${restore_var}" | cut -d = -f 1)" done < "${restore_vars}" if [ -n "${DB_NAME}" ] && [ -z "${DB01_NAME}" ] ; then export DB01_NAME="${DB_NAME}" ; unset DB_NAME ; fi if [ -n "${DB_USER}" ] && [ -z "${DB01_USER}" ] ; then export DB01_USER="${DB_USER}" ; unset DB_USER ; fi if [ -n "${DB_PASS}" ] && [ -z "${DB01_PASS}" ] ; then export DB01_PASS="${DB_PASS}" ; unset DB_PASS ; fi if [ -n "${DB_TYPE}" ] && [ -z "${DB01_TYPE}" ] ; then export DB01_TYPE="${DB_TYPE}" ; unset DB_TYPE ; fi if [ -n "${DB_PORT}" ] && [ -z "${DB01_PORT}" ] ; then export DB01_PORT="${DB_PORT}" ; unset DB_PORT ; fi if [ -n "${DB_HOST}" ] && [ -z "${DB01_HOST}" ] ; then export DB01_HOST="${DB_HOST}" ; unset DB_HOST ; fi if [ -n "${DB_AUTH}" ] && [ -z "${DB01_AUTH}" ] ; then export DB01_AUTH="${DB_AUTH}" ; unset DB_AUTH ; fi if [ -n "${MONGO_CUSTOM_URI}" ] && [ -z "${DB01_MONGO_CUSTOM_URI}" ] ; then export DB01_MONGO_CUSTOM_URI="${DB_MONGO_CUSTOM_URI}" ; unset MONGO_CUSTOM_URI ; fi if [ -n "${MYSQL_TLS_CA_FILE}" ] && [ -z "${DB01_MYSQL_TLS_CA_FILE}" ] ; then export DB01_MYSQL_TLS_CA_FILE="${MYSQL_TLS_CA_FILE}" ; unset MYSQL_TLS_CA_FILE ; fi if [ -n "${MYSQL_TLS_CERT_FILE}" ] && [ -z "${DB01_MYSQL_TLS_CERT_FILE}" ] ; then export DB01_MYSQL_TLS_CERT_FILE="${MYSQL_TLS_CERT_FILE}" ; unset MYSQL_TLS_CERT_FILE ; fi if [ -n "${MYSQL_TLS_KEY_FILE}" ] && [ -z "${DB01_MYSQL_TLS_KEY_FILE}" ] ; then export DB01_MYSQL_TLS_KEY_FILE="${MYSQL_TLS_KEY_FILE}" ; unset MYSQL_TLS_KEY_FILE ; fi transform_file_var \ DEFAULT_AUTH \ DEFAULT_HOST \ DEFAULT_NAME \ DEFAULT_PASS \ DEFAULT_PORT \ DEFAULT_TYPE \ DEFAULT_USER \ DEFAULT_MONGO_CUSTOM_URI \ DEFAULT_MYSQL_TLS_CA_FILE \ DEFAULT_MYSQL_TLS_CERT_FILE \ DEFAULT_MYSQL_TLS_KEY_FILE set -o posix ; set | grep -E "^DEFAULT_" > "${restore_vars}" restore_instances=$(printenv | sort | grep -c '^DB.*_HOST') for (( restore_instance_number = 01; restore_instance_number <= restore_instances; restore_instance_number++ )) ; do restore_instance_number=$(printf "%02d" $restore_instance_number) transform_file_var \ DB"${restore_instance_number}"_AUTH \ DB"${restore_instance_number}"_HOST \ DB"${restore_instance_number}"_NAME \ DB"${restore_instance_number}"_PASS \ DB"${restore_instance_number}"_PORT \ DB"${restore_instance_number}"_TYPE \ DB"${restore_instance_number}"_USER \ DB"${restore_instance_number}"_MONGO_CUSTOM_URI \ DB"${restore_instance_number}"_MYSQL_TLS_CA_FILE \ DB"${restore_instance_number}"_MYSQL_TLS_CERT_FILE \ DB"${restore_instance_number}"_MYSQL_TLS_KEY_FILE set -o posix ; set | grep -E "^DB${restore_instance_number}_" >> "${restore_vars}" done if [ -n "${DB_DUMP_TARGET}" ]; then print_warn "Deprecated and unsupported variable 'DB_DUMP_TARGET' detected - Please upgrade your variables as they will be removed in version 4.3.0" sed -i "/DEFAULT_FILESYSTEM_PATH=/d" "${restore_vars}" echo "DEFAULT_FILESYSTEM_PATH=${DB_DUMP_TARGET}" >> "${restore_vars}" fi if [ -n "${DB_DUMP_TARGET_ARCHIVE}" ]; then print_warn "Deprecated and unsupported variable 'DB_DUMP_TARGET_ACRHIVE' detected - Please upgrade your variables as they will be removed in version 4.3.0" sed -i "/DEFAULT_FILESYSTEM_ARCHIVE_PATH=/d" "${restore_vars}" echo "DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DB_DUMP_TARGET_ARCHIVE}" >> "${restore_vars}" fi echo "RESTORE_VARS is ${restore_vars}" } parse_variables() { local v_instance=${1} check_var() { ## Check is Variable is Defined ## Usage: check_var transformed_varname real_varname "Description" output_off print_debug "[parse_variables] Looking for existence of $2 environment variable" if [ ! -v "$1" ]; then print_error "No '$3' Entered! - Set '\$$2' environment variable - Halting Backup Number ${v_instance}" s6-svc -d /var/run/s6/legacy-services/dbbackup-"${v_instance}" exit 1 fi output_on } check_var backup_job_db_type DB"${v_instance}"_TYPE "appropriate database type" case "${backup_job_db_type,,}" in couch* ) dbtype=couch backup_job_backup_job_db_port=${backup_job_db_port:-5984} check_var backup_job_db_user DB"${v_instance}"_USER "database username" check_var backup_job_db_pass DB"${v_instance}"_PASS "database password" ;; influx* ) dbtype=influx case "${backup_job_influx_version}" in 1) backup_job_db_port=${backup_job_db_port:-8088} ;; 2) backup_job_db_port=${backup_job_db_port:-8086} ;; esac check_var backup_job_db_user DB"${v_instance}"_USER "database username" check_var backup_job_db_pass DB"${v_instance}"_PASS "database password" check_var backup_job_influx_version DB"${v_instance}"_INFLUX_VERSION "InfluxDB version you are backing up from" ;; mongo* ) dbtype=mongo if [ -n "${backup_job_mongo_custom_uri}" ] ; then mongo_uri_proto=$(echo "${backup_job_mongo_custom_uri}" | grep :// | sed -e's,^\(.*://\).*,\1,g') mongo_uri_scratch="${backup_job_mongo_custom_uri/${mongo_uri_proto}/}" mongo_uri_username_password="$(echo "${mongo_uri_scratch}" | grep @ | rev | cut -d@ -f2- | rev)" if [ -n "${mongo_uri_username_password}" ]; then mongo_uri_scratch="$(echo "${mongo_uri_scratch}" | rev | cut -d@ -f1 | rev)" ; fi mongo_uri_port="$(echo "${mongo_uri_scratch}" | grep : | rev | cut -d: -f2- | rev)" if [ -n "${mongo_uri_port}" ]; then mongo_uri_port="$(echo "${mongo_uri_scratch}" | rev | cut -d: -f1 | cut -d/ -f2 | rev)" ; fi mongo_uri_hostname="$(echo "${mongo_uri_scratch}" | cut -d/ -f1 | cut -d: -f1 )" mongo_uri_database="$(echo "${mongo_uri_scratch}" | cut -d/ -f2 | cut -d? -f1 )" mongo_uri_options="$(echo "${mongo_uri_scratch}" | cut -d/ -f2 | cut -d? -f2 )" backup_job_db_name=${backup_job_db_name:-"${mongo_uri_database,,}"} backup_job_db_host=${backup_job_db_host:-"${mongo_uri_hostname,,}"} else backup_job_db_port=${backup_job_db_port:-27017} [[ ( -n "${backup_job_db_user}" ) ]] && MONGO_USER_STR=" --username ${backup_job_db_user}" [[ ( -n "${backup_job_db_pass}" ) ]] && MONGO_PASS_STR=" --password ${backup_job_db_pass}" [[ ( -n "${backup_job_db_name}" ) ]] && MONGO_DB_STR=" --db ${backup_job_db_name}" [[ ( -n "${backup_job_db_auth}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${backup_job_db_auth}" fi ;; "mysql" | "mariadb" ) dbtype=mysql backup_job_db_port=${backup_job_db_port:-3306} check_var backup_job_db_name DB"${v_instance}"_NAME "database name. Seperate multiple with commas" case "${backup_job_mysql_client,,}" in mariadb ) _mysql_prefix=/usr/bin/ _mysql_bin_prefix=mariadb- ;; mysql ) _mysql_prefix=/opt/mysql/bin/ _mysql_bin_prefix=mysql ;; * ) print_error "I don't understand '${backup_job_mysql_client,,}' as a client. Exiting.." exit 99 ;; esac print_debug "Using '${backup_job_mysql_client,,}' as client" if [ -n "${backup_job_db_pass}" ] ; then export MYSQL_PWD=${backup_job_db_pass} ; fi if var_true "${backup_job_mysql_enable_tls}" ; then if [ -n "${backup_job_mysql_tls_ca_file}" ] ; then mysql_tls_args="--ssl_ca=${backup_job_mysql_tls_ca_file}" fi if [ -n "${backup_job_mysql_tls_cert_file}" ] ; then mysql_tls_args="${mysql_tls_args} --ssl_cert=${backup_job_mysql_tls_cert_file}" fi if [ -n "${backup_job_mysql_tls_key_file}" ] ; then mysql_tls_args="${mysql_tls_args} --ssl_key=${backup_job_mysql_tls_key_file}" fi if var_true "${backup_job_mysql_tls_verify}" ; then mysql_tls_args="${mysql_tls_args} --sslverify-server-cert" fi if [ -n "${backup_job_mysql_tls_version}" ] ; then mysql_tls_args="${mysql_tls_args} --tls_version=${backup_job_mysql_tls_version}" fi else mysql_tls_args="--disable-ssl" fi ;; "mssql" | "microsoftsql" ) apkArch="$(apk --print-arch)"; \ case "$apkArch" in x86_64) mssql=true ;; *) write_log error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;; esac dbtype=mssql backup_job_db_port=${backup_job_db_port:-1433} ;; postgres* | "pgsql" ) dbtype=pgsql backup_job_db_port=${backup_job_db_port:-5432} [[ ( -n "${backup_job_db_pass}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${backup_job_db_pass}" check_var backup_job_db_name DB"${v_instance}"_NAME "database name. Seperate multiple with commas" ;; "redis" ) dbtype=redis backup_job_db_port=${backup_job_db_port:-6379} [[ ( -n "${backup_job_db_pass}" ) ]] && REDIS_PASS_STR=" -a ${backup_job_db_pass}" ;; sqlite* ) dbtype=sqlite3 ;; * ) write_log error "I don't recognize 'DB${v_instance}_TYPE=${backup_job_db_type}' - Exitting.." exit 99 ;; esac if var_true "${backup_job_resource_optimized}" ; then play_fair="nice -19 ionice -c2" ; fi } upgrade_lonely_variables() { upgrade_lonely_variables_tmp=$(mktemp) set -o posix; set | grep "^$1" | grep -v "^$1[0-9]._" > "${upgrade_lonely_variables_tmp}" while read -r exist_var ; do if [ ! -v "${1}"01_"$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2)" ] ; then export "${1}"01_"$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2-9)"="$(echo "${exist_var}" | cut -d = -f2)" else print_error "Variable ${1}01_$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2) already exists, fix your variables and start again.." exit 1 fi done < "${upgrade_lonely_variables_tmp}" rm -rf "${upgrade_lonely_variables_tmp}" } case "${1}" in backup_init ) backup_init "$2" ;; parse_variables) parse_variables "$2" ;; restore_init) restore_init ;; upgrade ) upgrade_lonely_variables "$2" ;; esac if var_true "${DEBUG_BOOTSTRAP_VARIABLES}" ; then debug off; fi } backup_couch() { prepare_dbbackup backup_job_filename=couch_${backup_job_db_name}_${backup_job_db_host#*//}_${now}.txt backup_job_filename_base=couch_${backup_job_db_name}_${backup_job_db_host#*//} compressionzyclonite if var_true "${DEBUG_BACKUP_COUCH}" ; then debug off; fi check_exit_code backup "${backup_job_filename}" timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move "${backup_job_filename}" post_dbbackup ${backup_job_db_name} cleanup_old_data } backup_influx() { if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi if [ "${backup_job_db_name,,}" = "all" ] ; then write_log debug "[backup_influx] Preparing to back up everything" db_names=justbackupeverything else db_names=$(echo "${backup_job_db_name}" | tr ',' '\n') fi if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi case "${backup_job_influx_version,,}" in 1 ) print_debug "[backup_influx] Influx DB Version 1 selected" for db in ${db_names}; do prepare_dbbackup if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now} backup_job_filename_base=influx_${db}_${backup_job_db_host#*//} if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi pre_dbbackup "${db}" write_log notice "Dumping Influx database: '${db}'" if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi run_as_user influxd backup ${influx_compression} ${bucket} -portable -host ${backup_job_db_host}:${backup_job_db_port} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} "${temporary_directory}"/"${backup_job_filename}" exit_code=$? check_exit_code backup "${backup_job_filename}" compression create_archive if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move "${backup_job_filename}" post_dbbackup "${db}" cleanup_old_data done ;; 2 ) print_debug "[backup_influx] Influx DB Version 2 selected" for db in ${db_names}; do prepare_dbbackup if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now} backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//} pre_dbbackup "${db}" write_log notice "Dumping Influx2 database: '${db}'" if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --compression none "${temporary_directory}"/"${backup_job_filename}" exit_code=$? check_exit_code backup "${backup_job_filename}" compression create_archive if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move "${backup_job_filename}" post_dbbackup "${db}" cleanup_old_data done ;; esac } backup_mongo() { prepare_dbbackup if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi if [ "${backup_job_compression,,}" = "none" ] ; then backup_job_filename=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.archive backup_job_filename_base=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,} else backup_job_filename=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.archive.gz backup_job_filename_base=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,} mongo_compression="--gzip" compression_string="and compressing with gzip" fi if [ -n "${backup_job_mongo_custom_uri}" ] ; then mongo_backup_parameter="--uri=${backup_job_mongo_custom_uri} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}" else mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}" fi if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi pre_dbbackup "${backup_job_db_name}" write_log notice "Dumping MongoDB database: '${backup_job_db_name}' ${compression_string}" if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi silent run_as_user ${play_fair} mongodump --archive=${temporary_directory}/${backup_job_filename} ${mongo_compression} ${mongo_backup_parameter} exit_code=$? if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi check_exit_code backup "${backup_job_filename}" timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move "${backup_job_filename}" post_dbbackup "${backup_job_db_name}" cleanup_old_data if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi } backup_mssql() { case "${backup_job_mssql_mode,,}" in db|database ) prepare_dbbackup backup_job_filename=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.bak backup_job_filename_base=mssql_${backup_job_db_name,,}_${backup_job_db_host,,} pre_dbbackup "${backup_job_db_name}" write_log notice "Dumping MSSQL database: '${backup_job_db_name}'" if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP DATABASE [${backup_job_db_name}] TO DISK = N'${temporary_directory}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" exit_code=$? if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi backup_job_filename_original=${backup_job_filename} compression pre_dbbackup all if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}" check_exit_code backup "${backup_job_filename}" if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move "${backup_job_filename}" post_dbbackup "${backup_job_db_name}" cleanup_old_data ;; trn|transaction ) prepare_dbbackup backup_job_filename=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.trn backup_job_filename_base=mssql_${backup_job_db_name,,}_trn_${backup_job_db_host,,} pre_dbbackup "${backup_job_db_name}" write_log notice "Dumping MSSQL database: '${backup_job_db_name}'" if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP LOG [${backup_job_db_name}] TO DISK = N'${temporary_directory}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-log', SKIP, NOREWIND, NOUNLOAD, STATS = 10" exit_code=$? if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi backup_job_filename_original=${backup_job_filename} compression pre_dbbackup all run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}" file_encryption timer backup finish generate_checksum move_dbbackup check_exit_code move "${backup_job_filename}" post_dbbackup "${backup_job_db_name}" cleanup_old_data ;; esac } backup_mysql() { if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi if var_true "${backup_job_mysql_events}" ; then events="--events" fi if var_true "${backup_job_mysql_single_transaction}" ; then single_transaction="--single-transaction" fi if var_true "${backup_job_mysql_stored_procedures}" ; then stored_procedures="--routines" fi if [ "${backup_job_db_name,,}" = "all" ] ; then write_log debug "Preparing to back up everything except for information_schema and _* prefixes" db_names=$(run_as_user ${_mysql_prefix}${_mysql_bin_prefix/-/} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_enumeration_opts} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema ) if [ -n "${backup_job_db_name_exclude}" ] ; then db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n') for db_exclude in ${db_names_exclusions} ; do write_log debug "Excluding '${db_exclude}' from ALL DB_NAME backups" db_names=$(echo "$db_names" | sed "/${db_exclude}/d" ) done fi else db_names=$(echo "${backup_job_db_name}" | tr ',' '\n') fi if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi write_log debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')" if var_true "${backup_job_split_db}" ; then for db in ${db_names} ; do prepare_dbbackup backup_job_filename=${backup_job_mysql_client,,}_${db}_${backup_job_db_host,,}_${now}.sql backup_job_filename_base=${backup_job_mysql_client,,}_${db}_${backup_job_db_host,,} compression pre_dbbackup "${db}" write_log notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}" if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi run_as_user ${play_fair} ${_mysql_prefix}${_mysql_bin_prefix}dump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} $db | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null exit_code=$? if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi check_exit_code backup "${backup_job_filename}" timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move "${backup_job_filename}" post_dbbackup "${db}" cleanup_old_data done else write_log debug "Not splitting database dumps into their own files" prepare_dbbackup backup_job_filename=${backup_job_mysql_client,,}_all_${backup_job_db_host,,}_${now}.sql backup_job_filename_base=${backup_job_mysql_client,,}_all_${backup_job_db_host,,} compression pre_dbbackup all write_log notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi run_as_user ${play_fair} ${_mysql_prefix}${_mysql_bin_prefix}dump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null exit_code=$? if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi check_exit_code backup "${backup_job_filename}" timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move "${backup_job_filename}" post_dbbackup all cleanup_old_data fi } backup_pgsql() { backup_pgsql_globals() { prepare_dbbackup backup_job_filename=pgsql_globals_${backup_job_db_host,,}_${now}.sql backup_job_global_base=pgsql_globals_${backup_job_db_host,,} compression pre_dbbackup "globals" print_notice "Dumping PostgresSQL globals: with 'pg_dumpall -g' ${compression_string}" if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi run_as_user ${play_fair} pg_dumpall -h "${backup_job_db_host}" -U "${backup_job_db_user}" -p "${backup_job_db_port}" -g ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null exit_code=$? if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi check_exit_code "${backup_job_filename}" timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move "${backup_job_filename}" post_dbbackup "globals" cleanup_old_data } if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi export PGPASSWORD=${backup_job_db_pass} if [ -n "${backup_job_db_auth}" ] ; then authdb=${backup_job_db_auth} else authdb=${backup_job_db_user} fi if [ "${backup_job_db_name,,}" = "all" ] ; then write_log debug "Preparing to back up all databases" db_names=$(psql -h ${backup_job_db_host} -U ${backup_job_db_user} -p ${backup_job_db_port} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ) if [ -n "${backup_job_db_name_exclude}" ] ; then db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n') for db_exclude in ${db_names_exclusions} ; do write_log debug "Excluding '${db_exclude}' from ALL DB_NAME backups" db_names=$(echo "$db_names" | sed "/${db_exclude}/d" ) done _postgres_backup_globals=true fi else db_names=$(echo "${backup_job_db_name}" | tr ',' '\n') _postgres_backup_globals=false fi if var_false "${_postgres_backup_globals}" && var_true "${backup_job_backup_pgsql_globals}" ; then _postgres_backup_globals=true; fi if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi write_log debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')" if var_true "${backup_job_split_db}" ; then for db in ${db_names} ; do prepare_dbbackup backup_job_filename=pgsql_${db}_${backup_job_db_host,,}_${now}.sql backup_job_filename_base=pgsql_${db}_${backup_job_db_host,,} compression pre_dbbackup "${db}" write_log notice "Dumping PostgresSQL database: '${db}' ${compression_string}" if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi run_as_user ${play_fair} pg_dump -h "${backup_job_db_host}" -p "${backup_job_db_port}" -U "${backup_job_db_user}" $db ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null exit_code=$? if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi check_exit_code backup "${backup_job_filename}" timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move "${backup_job_filename}" post_dbbackup "${db}" cleanup_old_data done if var_true "${_postgres_backup_globals}" ; then backup_pgsql_globals; fi else write_log debug "Not splitting database dumps into their own files" prepare_dbbackup backup_job_filename=pgsql_all_${backup_job_db_host,,}_${now}.sql backup_job_filename_base=pgsql_${db}_${backup_job_db_host,,} compression pre_dbbackup all write_log notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi tmp_db_names=$(psql -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ) for r_db_name in $(echo $db_names | xargs); do tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" ) done sleep 5 for x_db_name in ${tmp_db_names} ; do pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name}) done run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null exit_code=$? if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi check_exit_code backup "${backup_job_filename}" timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move "${backup_job_filename}" post_dbbackup all cleanup_old_data if var_true "${_postgres_backup_globals}" ; then backup_pgsql_globals; fi fi } backup_redis() { prepare_dbbackup write_log notice "Dumping Redis - Flushing Redis Cache First" backup_job_filename=redis_all_${backup_job_db_host,,}_${now}.rdb backup_job_filename_base=redis_${backup_job_db_host,,} if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi echo bgsave | silent run_as_user ${play_fair} redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} --rdb ${temporary_directory}/${backup_job_filename} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} sleep 10 try=5 while [ $try -gt 0 ] ; do saved=$(echo 'info Persistence' | redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}') ok=$(echo 'info Persistence' | redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}') if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then write_log notice "Redis Backup Complete" exit_code=0 break fi try=$((try - 1)) write_log warn "Redis Busy - Waiting and retrying in 5 seconds" sleep 5 done backup_job_filename_original=${backup_job_filename} if var_true "${DEBUG_BACKUP_REDIS}" ; then debug off; fi compression pre_dbbackup all if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}" if var_true "${DEBUG_BACKUP_REDIS}" ; then debug off; fi timer backup finish check_exit_code backup "${backup_job_filename}" file_encryption generate_checksum move_dbbackup check_exit_code move "${backup_job_filename}" post_dbbackup all cleanup_old_data } backup_sqlite3() { prepare_dbbackup db=$(basename "${backup_job_db_host}") db="${db%.*}" backup_job_filename=sqlite3_${db}_${now}.sqlite3 backup_job_filename_base=sqlite3_${db} pre_dbbackup "${db}" write_log notice "Dumping sqlite3 database: '${backup_job_db_host}' ${compression_string}" if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug on; fi silent ${play_fair} sqlite3 "${backup_job_db_host}" ".backup '${temporary_directory}/backup_${now}.sqlite3'" exit_code=$? check_exit_code backup "${backup_job_filename}" if [ ! -f "${temporary_directory}"/backup_${now}.sqlite3 ] ; then print_error "SQLite3 backup failed! Exitting" return 1 fi compression run_as_user ${play_fair} cat "${temporary_directory}"/backup_${now}.sqlite3 | ${dir_compress_cmd} | run_as_user tee "${temporary_directory}/${backup_job_filename}" > /dev/null rm -rf "${temporary_directory}"/backup_${now}.sqlite3 if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug off; fi timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move "${backup_job_filename}" post_dbbackup "${db}" cleanup_old_data } check_availability() { if var_true "${DEBUG_CHECK_AVAILABILITY}" ; then debug on; fi ### Set the Database Type if var_false "${backup_job_skip_availability_check}" ; then case "${dbtype}" in "couch" ) counter=0 code_received=0 while [ "${code_received}" != "200" ]; do code_received=$(run_as_user curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${backup_job_db_host}:${backup_job_db_port}) if [ "${code_received}" = "200" ] ; then break ; fi sleep 5 (( counter+=5 )) write_log warn "CouchDB Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)" done ;; "influx" ) counter=0 case "${backup_job_influx_version,,}" in 1 ) while ! (run_as_user nc -z ${backup_job_db_host#*//} ${backup_job_db_port}) ; do sleep 5 (( counter+=5 )) write_log warn "InfluxDB Host '${backup_job_db_host#*//}' is not accessible, retrying.. ($counter seconds so far)" done ;; 2 ) code_received=0 while [ "${code_received}" != "200" ]; do code_received=$(run_as_user curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${backup_job_db_host}:${backup_job_db_port}/health) if [ "${code_received}" = "200" ] ; then break ; fi sleep 5 (( counter+=5 )) write_log warn "InfluxDB Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)" done ;; esac ;; "mongo" ) if [ -n "${backup_job_mongo_custom_uri}" ] ; then write_log debug "Skipping Connectivity Check" else counter=0 while ! (run_as_user nc -z ${backup_job_db_host} ${backup_job_db_port}) ; do sleep 5 (( counter+=5 )) write_log warn "Mongo Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)" done fi ;; "mysql" ) counter=0 export MYSQL_PWD=${backup_job_db_pass} while ! (run_as_user ${_mysql_prefix}${_mysql_bin_prefix}admin -u"${backup_job_db_user}" -P"${backup_job_db_port}" -h"${backup_job_db_host}" ${mysql_tls_args} status > /dev/null 2>&1) ; do sleep 5 (( counter+=5 )) write_log warn "MySQL/MariaDB Server '${backup_job_db_host}' is not accessible, retrying.. (${counter} seconds so far)" done ;; "mssql" ) counter=0 while ! (run_as_user nc -z ${backup_job_db_host} ${backup_job_db_port}) ; do sleep 5 (( counter+=5 )) write_log warn "MSSQL Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)" done ;; "pgsql" ) counter=0 until run_as_user pg_isready --host=${backup_job_db_host} --port=${backup_job_db_port} -q do sleep 5 (( counter+=5 )) write_log warn "Postgres Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)" done ;; "redis" ) counter=0 while ! (run_as_user nc -z "${backup_job_db_host}" "${backup_job_db_port}") ; do sleep 5 (( counter+=5 )) write_log warn "Redis Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)" done ;; "sqlite3" ) if [[ ! -e "${backup_job_db_host}" ]]; then write_log error "File '${backup_job_db_host}' does not exist." exit_code=2 exit $exit_code elif [[ ! -f "${backup_job_db_host}" ]]; then write_log error "File '${backup_job_db_host}' is not a file." exit_code=2 exit $exit_code elif [[ ! -r "${backup_job_db_host}" ]]; then write_log error "File '${backup_job_db_host}' is not readable." exit_code=2 exit $exit_code fi ;; esac fi if var_true "${DEBUG_CHECK_AVAILABILITY}" ; then debug off; fi } check_exit_code() { if var_true "${DEBUG_CHECK_EXIT_CODE}" ; then debug on; fi case "${1}" in backup ) write_log debug "DB Backup exit Code is ${exit_code}" case "${exit_code}" in 0 ) write_log info "DB Backup of '${2}' completed successfully" ;; * ) write_log error "DB Backup of '${2}' reported errors" notify \ "$(date -d @"${backup_job_start_time}" +'%Y%m%d_%H%M%S')" \ "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${backup_job_filename_base}.log" \ "${exit_code}" \ "[FATAL] Host: ${backup_job_db_host} Name: ${backup_job_db_name} - Backup failed completely" \ "DB Backup is failing to backup the '${backup_job_db_host}-${backup_job_db_name}' job." master_exit_code=1 ;; esac ;; move ) write_log debug "Move exit Code is ${exit_code}" case "${move_exit_code}" in 0 ) write_log debug "Moving of backup '${2}' completed successfully" ;; * ) write_log error "Moving of backup '${2}' reported errors" notify \ "$(date -d @"${backup_job_start_time}" +'%Y%m%d_%H%M%S')" \ "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${backup_job_filename_base}.log" \ "${move_exit_code}" \ "[FATAL] Host: ${backup_job_db_host} Name: ${backup_job_db_name} - Backup failed to move to destination" \ "DB Backup is failing to backup the '${backup_job_db_host}-${backup_job_db_name}' job." master_exit_code=1 ;; esac ;; esac if var_true "${DEBUG_CHECK_EXIT_CODE}" ; then debug off; fi } cleanup_old_data() { if var_true "${DEBUG_CLEANUP_OLD_DATA}" ; then debug on; fi if [ -n "${backup_job_cleanup_time}" ]; then if [ "${master_exit_code}" != 1 ]; then case "${backup_job_backup_location,,}" in "blobxfer" ) write_log info "Cleaning up old backups on filesystem" run_as_user mkdir -p "${backup_job_filesystem_path}" find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \; if var_true "${_postgres_backup_globals}"; then find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_global_base}*" -exec rm -f {} \; fi if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions" else write_log info "Syncing changes via blobxfer" silent run_as_user blobxfer upload --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete --delete-only fi ;; "file" | "filesystem" ) write_log info "Cleaning up old backups on filesystem" run_as_user mkdir -p "${backup_job_filesystem_path}" run_as_user find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \; if var_true "${_postgres_backup_globals}"; then run_as_user find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_global_base}*" -exec rm -f {} \; fi ;; "s3" | "minio" ) write_log info "Cleaning up old backups on S3 storage" run_as_user aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${backup_job_s3_bucket}/${backup_job_s3_path}/ ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts} | grep " DIR " -v | grep " PRE " -v | while read -r s3_file; do s3_createdate=$(echo $s3_file | awk {'print $1" "$2'}) s3_createdate=$(date -d "$s3_createdate" "+%s") s3_olderthan=$(echo $(( $(date +%s)-${backup_job_cleanup_time}*60 ))) if [[ $s3_createdate -le $s3_olderthan ]] ; then s3_filename=$(echo $s3_file | awk {'print $4'}) if [ "$s3_filename" != "" ] ; then write_log debug "Deleting $s3_filename" run_as_user aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${backup_job_s3_bucket}/${backup_job_s3_path}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts} fi fi done ;; esac else write_log error "Skipping Cleaning up old backups because there were errors in backing up" fi fi if var_true "${DEBUG_CLEANUP_OLD_DATA}" ; then debug off; fi } compression() { if var_true "${DEBUG_COMPRESSION}" ; then debug on; fi if var_false "${backup_job_parallel_compression}" ; then backup_job_parallel_compression_threads=1 fi if var_true "${backup_job_gz_rsyncable}" ; then gz_rsyncable=--rsyncable fi case "${backup_job_compression,,}" in bz* ) print_debug "[compression] Selected BZIP" compress_cmd="${play_fair} pbzip2 -q -${backup_job_compression_level} -p${backup_job_parallel_compression_threads} " compression_type="bzip2" dir_compress_cmd=${compress_cmd} extension=".bz2" backup_job_filename_dir=${backup_job_filename} backup_job_filename=${backup_job_filename}.bz2 ;; gz* ) print_debug "[compression] Selected GZIP" compress_cmd="${play_fair} pigz -q -${backup_job_compression_level} -p ${backup_job_parallel_compression_threads} ${gz_rsyncable}" compression_type="gzip" extension=".gz" dir_compress_cmd=${compress_cmd} backup_job_filename_dir=${backup_job_filename} backup_job_filename=${backup_job_filename}.gz ;; xz* ) print_debug "[compression] Selected XZIP" compress_cmd="${play_fair} pixz -${backup_job_compression_level} -p ${backup_job_parallel_compression_threads} " compression_type="xzip" dir_compress_cmd=${compress_cmd} extension=".xz" backup_job_filename_dir=${backup_job_filename} backup_job_filename=${backup_job_filename}.xz ;; zst* ) print_debug "[compression] Selected ZSTD" compress_cmd="${play_fair} zstd -q -q --rm -${backup_job_compression_level} -T${backup_job_parallel_compression_threads} ${gz_rsyncable}" compression_type="zstd" dir_compress_cmd=${compress_cmd} extension=".zst" backup_job_filename_dir=${backup_job_filename} backup_job_filename=${backup_job_filename}.zst ;; "none" | "false") compress_cmd="cat " compression_type="none" dir_compress_cmd="cat " backup_job_filename_dir=${backup_job_filename} ;; esac case "${CONTAINER_LOG_LEVEL,,}" in "debug" ) if [ "${compression_type}" = "none" ] ; then compression_string="with '${backup_job_parallel_compression_threads}' threads" else compression_string="and compressing with '${compression_type}:${backup_job_compression_level}' with '${backup_job_parallel_compression_threads}' threads" fi ;; * ) if [ "${compression_type}" != "none" ] ; then compression_string="and compressing with '${compression_type}'" fi ;; esac if var_true "${DEBUG_COMPRESSION}" ; then debug off; fi } create_archive() { if var_true "${DEBUG_CREATE_ARCHIVE}" ; then debug on; fi if [ "${exit_code}" = "0" ] ; then write_log notice "Creating archive file of '${backup_job_filename}' with tar ${compression_string}" run_as_user tar cf - "${temporary_directory}"/"${backup_job_filename_dir}" | ${dir_compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename_dir}".tar"${extension}" > /dev/null backup_job_filename="${backup_job_filename_dir}".tar"${extension}" rm -rf "${temporary_directory}"/"${backup_job_filename_dir}" else write_log error "Skipping creating archive file because backup did not complete successfully" fi if var_true "${DEBUG_CREATE_ARCHIVE}" ; then debug off; fi } create_schedulers() { if var_true "${DEBUG_CREATE_SCHEDULERS}" ; then debug on; fi backup() { local backup_instances=$(set -o posix ; set | grep -Pc "^(DB[0-9]._HOST=|.*MONGO_CUSTOM_URI=)") print_debug "[create_schedulers] Found '${backup_instances}' DB_HOST instances" if [ -n "${DB_HOST}" ] && [ "${backup_instances}" ]; then backup_instances=1; print_debug "[create_schedulers] Detected using old DB_ variables" fi for (( instance = 01; instance <= backup_instances; )) ; do instance=$(printf "%02d" $instance) cp -R /assets/dbbackup/template-dbbackup /etc/services.available/dbbackup-"${instance}" sed -i "s|{{BACKUP_NUMBER}}|${instance}|g" /etc/services.available/dbbackup-"${instance}"/run if [ "${MODE,,}" = "manual" ] ; then service_stop dbbackup-"${instance}" ; fi cat <> /usr/bin/backup"${instance}"-now #!/bin/bash source /assets/functions/00-container PROCESS_NAME=db-backup${instance} print_info "Starting Manual Backup for db-backup${instance}" #/var/run/s6/legacy-services/dbbackup-${instance}/run now /etc/services.available/dbbackup-${instance}/run now EOF chmod +x /usr/bin/backup"${instance}"-now if [ "${instance}" = "01" ] ; then touch /usr/bin/backup-now chmod +x /usr/bin/backup-now cat < /usr/bin/backup-now #!/bin/bash /usr/bin/backup${instance}-now now EOF else echo "/usr/bin/backup${instance}-now now" >> /usr/bin/backup-now fi instance=$(echo "${instance} +1" | bc) done } case "${1}" in backup ) backup ;; esac if var_true "${DEBUG_CREATE_SCHEDULERS}" ; then debug off; fi } ctrl_c() { sed -i "/^${backup_instance_number}/d" /tmp/.container/db-backup-backups symlink_log print_warn "User aborted" exit } db_backup_container_init() { rm -rf /tmp/.container/db-backup-backups echo "0 0 * * * /usr/sbin/logrotate_dbbackup >/dev/null 2>&1" > /assets/cron/dbbackup_logrotate touch /tmp/.container/db-backup-backups } debug() { case "${1}" in off) backup_job_log_level=$_original_job_log_level} CONTAINER_LOG_LEVEL=${_original_container_log_level} DEBUG_MODE=${_original_debug_mode} SHOW_OUTPUT=${_original_show_output} if var_true "${DEBUG_MODE}" ; then set -x else set +x fi ;; on) if [ -z "${_original_container_log_level}" ]; then _original_container_log_level="${CONTAINER_LOG_LEVEL}" fi if [ -z "${_original_job_log_level}" ]; then _original_job_log_level="${backup_job_log_level}" fi if [ -z "${_original_debug_mode}" ]; then _original_debug_mode="${DEBUG_MODE}" fi if [ -z "${_original_show_output}" ]; then _original_show_output="${SHOW_OUTPUT}" if ! [[ "${_original_show_output,,}" =~ true|false ]]; then __original_show_output="FALSE" fi fi backup_job_log_level=DEBUG CONTAINER_LOG_LEVEL=DEBUG SHOW_OUTPUT=TRUE set -x ;; esac } file_encryption() { if var_true "${DEBUG_FILE_ENCRYPTION}" ; then debug on; fi if var_true "${backup_job_encrypt}" ; then if [ "${exit_code}" = "0" ] ; then print_debug "[file_encryption] Encrypting" output_off if [ -n "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_public_key}" ]; then print_error "Can't encrypt as both ENCRYPT_PASSPHRASE and ENCRYPT_PUBKEY exist!" return elif [ -n "${backup_job_encrypt_passphrase}" ] && [ -z "${backup_job_encrypt_public_key}" ]; then print_notice "Encrypting with GPG Passphrase" encrypt_routines_start_time=$(date +'%s') encrypt_tmp_dir=$(run_as_user mktemp -d) echo "${backup_job_encrypt_passphrase}" | silent run_as_user ${play_fair} gpg --batch --home ${encrypt_tmp_dir} --yes --passphrase-fd 0 -c "${temporary_directory}"/"${backup_job_filename}" rm -rf "${encrypt_tmp_dir}" elif [ -z "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_public_key}" ] && [ -n "${backup_job_encrypt_private_key}" ]; then if [ -f "${backup_job_encrypt_private_key}" ]; then encrypt_routines_start_time=$(date +'%s') print_notice "Encrypting with GPG Private Key" encrypt_tmp_dir=$(run_as_user mktemp -d) cat "${backup_job_encrypt_private_key}" | run_as_user tee "${encrypt_tmp_dir}"/private_key.asc > /dev/null print_debug "[file_encryption] [key] Importing Private Key" silent run_as_user gpg --home ${encrypt_tmp_dir} --batch --import "${encrypt_tmp_dir}"/private_key.asc print_debug "[file_encryption] [key] Encrypting to Public Key" cat "${backup_job_encrypt_public_key}" | run_as_user tee "${encrypt_tmp_dir}"/public_key.asc > /dev/null silent run_as_user ${play_fair} gpg --batch --yes --home "${encrypt_tmp_dir}" --encrypt --recipient-file "${encrypt_tmp_dir}"/public_key.asc "${temporary_directory}"/"${backup_job_filename}" rm -rf "${encrypt_tmp_dir}" fi fi if [ -f "${temporary_directory}"/"${backup_job_filename}".gpg ]; then print_debug "[file_encryption] Deleting original file" rm -rf "${temporary_directory:?}"/"${backup_job_filename:?}" backup_job_filename="${backup_job_filename}.gpg" encrypt_routines_finish_time=$(date +'%s') encrypt_routines_total_time=$(echo $((encrypt_routines_finish_time-encrypt_routines_start_time))) zabbix_encrypt_time=$(cat < /dev/null chmod ${backup_job_filesystem_permission} "${backup_job_filename}"."${checksum_extension}" checksum_value=$(run_as_user cat "${backup_job_filename}"."${checksum_extension}" | awk '{print $1}') checksum_routines_finish_time=$(date +'%s') checksum_routines_total_time=$(echo $((checksum_routines_finish_time-checksum_routines_start_time))) zabbix_checksum_time=$(cat <> /etc/msmtprc fi mail_recipients=$(echo "${MAIL_TO}" | tr "," "\n") for mail_recipient in $mail_recipients ; do cat < /etc/services.d/99-run_forever/run #!/bin/bash while true; do sleep 86400 done EOF chmod +x /etc/services.d/99-run_forever/run else if var_true "${CONTAINER_ENABLE_SCHEDULING}" ; then write_log error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_SCHEDULING=TRUE'" exit 1 fi if var_true "${CONTAINER_ENABLE_MONITORING}" ; then write_log error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_MONITORING=TRUE'" exit 1 fi if var_true "${CONTAINER_ENABLE_LOGSHIPPING}" ; then write_log error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_LOGSHIPPING=TRUE'" exit 1 fi fi fi } symlink_log () { if [ -n "${backup_job_db_type}" ] && [ -n "${backup_job_db_name}" ] && [ -n "${backup_job_db_host}" ] && [ -n "${backup_job_filename_base}" ]; then local oldpwd=$(pwd) cd "${LOG_PATH}"/"$(date +'%Y%m%d')" ln -sf "$(date +'%Y%m%d')"/"$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')"-"${backup_job_filename_base}".log ../latest-"${backup_job_filename_base}".log cd "${oldpwd}" fi } timer() { if var_true "${DEBUG_TIMER}" ; then debug on; fi case "${1}" in backup) case "${2}" in start) dbbackup_start_time=$(run_as_user date +"%s") ;; stop) dbbackup_finish_time=$(run_as_user date +"%s") dbbackup_total_time=$(echo $((dbbackup_finish_time-dbbackup_start_time))) ;; esac ;; cron) parse_expression() { local expressions=${1//,/ } expressions=${expressions//\*/#} local validate_all="" local validate_temp="" for expression in ${expressions}; do if [ "${expression}" = "#" ] || [ "${expression}" = "${3}" ]; then echo "${3}" return 0 fi expression_step=${expression##*\/} expression_number=${expression%%\/*} validate_temp="" local expression_start= local expression_end= if [ "${expression_number}" = "#" ]; then expression_start=0 expression_end="${2}" else expression_start=${expression_number%%-*} expression_end=${expression_number##*-} fi validate_temp="$(seq "${expression_start}" "${expression_end}")" if [ "${expression_step}" != "${expression}" ]; then for step in ${validate_temp}; do if [ $(( ( step - expression_start ) % expression_step )) -eq 0 ]; then validate_all="${validate_all} ${step}" fi done else validate_all="${validate_all} ${validate_temp}" fi done validate_all=$(echo "${validate_all}" | tr ' ' '\n' | sort -g -u | tr '\n' ' ') for entry in $validate_all; do if [ ${entry} -ge ${3} ]; then echo "${entry}" return 0 fi done echo "${validate_all// /}" #echo "${validate_all%% *}" } local cron_compare="${3}" local cron_compare_seconds=${cron_compare} local cron_compare_difference=$(( cron_compare - ${4} )) if [ "${cron_compare_difference}" -lt 60 ]; then cron_compare=$((cron_compare + $(( 60 - cron_compare_difference )) )) fi local cron_current_seconds="$(date --date=@"${cron_compare_seconds}" +"%-S")" if [ "${cron_current_seconds}" -ne 0 ]; then cron_compare=$(( cron_compare_seconds - cron_current_seconds )) fi local cron_minute="$(echo -n "${2}" | awk '{print $1}')" local cron_hour="$(echo -n "${2}" | awk '{print $2}')" local cron_day_of_month="$(echo -n "${2}" | awk '{print $3}')" local cron_month="$(echo -n "${2}" | awk '{print $4}')" local cron_day_of_week="$(echo -n "${2}" | awk '{print $5}')" local cron_next_minute="$(date --date=@"${cron_compare}" +"%-M")" local cron_next_hour="$(date --date=@"${cron_compare}" +"%-H")" local cron_next_day_of_month="$(date --date=@"${cron_compare}" +"%-d")" local cron_next_month="$(date --date=@"${cron_compare}" +"%-m")" local cron_next_day_of_week="$(date --date=@"${cron_compare}" +"%-u")" cron_next_day_of_week=$(( cron_next_day_of_week % 7 )) local cron_next_year="$(date --date=@"${cron_compare}" +"%-Y")" local cron_next= local cron_parsed=1 while [ "${cron_parsed}" != "0" ]; do print_debug "[timer] [cron] Parse Minute" cron_next=$(parse_expression "${cron_minute}" 59 "${cron_next_minute}") if [ "${cron_next}" != "${cron_next_minute}" ]; then if [ "${cron_next_minute}" -gt "${cron_next}" ]; then cron_next_hour=$(( cron_next_hour + 1 )) fi cron_next_minute="${cron_next}" fi print_debug "[timer] [cron] Parse Hour" cron_next=$(parse_expression "${cron_hour}" 23 "${cron_next_hour}") if [ "${cron_next}" != "${cron_next_hour}" ]; then if [ "${cron_next_hour}" -gt "${cron_next}" ]; then cron_next_day_of_month=$(( cron_next_day_of_month + 1 )) fi cron_next_hour="${cron_next}" cron_next_minute=0 fi print_debug "[timer] [cron] Parse Day of Week" cron_next=$(parse_expression "${cron_day_of_week}" 6 "${cron_next_day_of_week}") if [ "${cron_next}" != "${cron_next_day_of_week}" ]; then day_of_week_difference=$(( cron_next - cron_next_day_of_week )) if [ "${day_of_week_difference}" -lt 0 ]; then day_of_week_difference=$(( day_of_week_difference + 7 )) fi cron_next_day_of_month=$(( cron_next_day_of_month + day_of_week_difference )) cron_next_hour=0 cron_next_minute=0 fi print_debug "[timer] [cron] Parse day of month" case "${cron_next_month}" in 1|3|5|7|8|10|12) last_day_of_month="31" ;; "2") local divide_by_4=$(( cron_next_year % 4 )) local divide_by_100=$(( cron_next_year % 100 )) local divide_by_400=$(( cron_next_year % 400 )) last_day_of_month=28 if [ "${divide_by_4}" = "0" ] && [ "${divide_by_100}" != "0" ]; then last_day_of_month="29" fi if [ "${divide_by_400}" = "0" ]; then last_day_of_month="29" fi ;; *) last_day_of_month="30" ;; esac cron_next=$(parse_expression "${cron_day_of_month}" 30 "${cron_next_day_of_month}") if [ "${cron_next}" != "${cron_next_day_of_month}" ]; then cron_next_hour=0 cron_next_minute=0 fi if [ "${cron_next_day_of_month}" -gt "${cron_next}" ] || [ "${cron_next_day_of_month}" -gt "${last_day_of_month}" ]; then cron_next_month=$(( cron_next_month + 1 )) if [ ${cron_next_month} -gt 12 ]; then cron_next_month=$(( cron_next_month - 12)) cron_next_year=$(( cron_next_year + 1 )) fi cron_next_day_of_month=1 else cron_next_day_of_month=$cron_next fi print_debug "[timer] [cron] Parse Next Month" cron_next=$(parse_expression "${cron_month}" 12 "${cron_next_month}") if [ "${cron_next}" != "${cron_next_month}" ]; then if [ "${cron_next}" -gt "12" ]; then cron_next_year=$(( cron_next_year + 1 )) cron_next=$(( cron_next - 12 )) fi if [ "${cron_next_month}" -gt "${cron_next}" ]; then cron_next_year=$(( cron_next_year + 1 )) fi cron_next_month="${cron_next}" cron_next_day=1 cron_next_minute=0 cron_next_hour=0 fi cron_parsed=0 done local cron_future=$(date --date="${cron_next_year}-$(printf "%02d" ${cron_next_month})-$(printf "%02d" ${cron_next_day_of_month})T$(printf "%02d" ${cron_next_hour}):$(printf "%02d" ${cron_next_minute}):00" "+%s") local cron_future_difference=$(( cron_future - cron_compare_seconds )) time_cron=true time_wait="${cron_future_difference}" time_future="${cron_future}" ;; datetime) time_begin=$(date -d "${backup_job_backup_begin}" +%s) print_debug "[timer] [datetime] BACKUP_BEGIN time = ${time_begin}" time_wait=$(( time_begin - time_current )) print_debug "[timer] [datetime] Difference in seconds: ${time_wait}" if (( ${time_wait} < 0 )); then time_wait=$(( (${time_wait} + (${backup_job_backup_interval} - 1)) / (${backup_job_backup_interval} * 60) )) time_wait=$(( ${time_wait} * -1 )) print_debug "[timer] [datetime] Difference in seconds (rounded) time_wait is in the past : ${time_wait}" fi time_future=$(( time_current + time_wait )) print_debug "[timer] [datetime] Future execution time = ${time_future}" ;; job) case "${2}" in start) backup_job_start_time=$(date +'%s') ;; stop) backup_job_finish_time=$(date +'%s') backup_job_total_time=$(( backup_job_finish_time - backup_job_start_time)) ;; esac ;; plusvalue) time_wait=$(( ${BASH_REMATCH[1]} * 60 )) time_future=$(( time_current + time_wait )) ;; time) time_future=$(date --date="$(date +"%Y%m%d") ${backup_job_backup_begin}" +"%s") if [[ "${time_future}" < "${time_current}" ]]; then time_future=$(( time_future + 24*60*60)) fi time_wait=$(( time_future - time_current )) ;; esac if var_true "${DEBUG_TIMER}" ; then debug off; fi } write_log() { if var_true "${DEBUG_WRITE_LOG}" ; then debug on; fi output_off local _arg_log_level=${1} shift 1 local _arg_log_message="$@" if [ -n "${backup_job_db_type}" ] && [ -n "${backup_job_db_name}" ] && [ -n "${backup_job_db_host}" ] && [ -n "${backup_job_filename_base}" ]; then case "${_arg_log_level,,}" in debug ) case "${_arg_log_level,,}" in "debug" ) echo "$(date +'%Y-%m-%d %H:%M:%S %Z') [debug] ${_arg_log_message}" | run_as_user tee -a "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${backup_job_filename_base}.log" > /dev/null ;; esac ;; error ) case "${_arg_log_level,,}" in "debug" | "notice" | "warn" | "error") echo "$(date +'%Y-%m-%d %H:%M:%S %Z') [error] ${_arg_log_message}" | run_as_user tee -a "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${backup_job_filename_base}.log" > /dev/null ;; esac ;; info ) echo "$(date +'%Y-%m-%d %H:%M:%S %Z') [info] ${_arg_log_message}" | run_as_user tee -a "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${backup_job_filename_base}.log" > /dev/null ;; notice ) case "${_arg_log_level,,}" in "debug" | "notice" ) echo "$(date +'%Y-%m-%d %H:%M:%S %Z') [notice] ${_arg_log_message}" | run_as_user tee -a "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${backup_job_filename_base}.log" > /dev/null ;; esac ;; warn ) case "${_arg_log_level,,}" in "debug" | "notice" | "warn" ) echo "$(date +'%Y-%m-%d %H:%M:%S %Z') [warn] ${_arg_log_message}" | run_as_user tee -a "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${backup_job_filename_base}.log" > /dev/null ;; esac ;; esac fi print_${_arg_log_level} "${_arg_log_message}" output_on if var_true "${DEBUG_WRITE_LOG}" ; then debug off; fi }