#!/command/with-contenv bash bootstrap_filesystem() { if [ ! -d "${backup_job_filesystem_path}" ]; then mkdir -p "${backup_job_filesystem_path}" fi if [ "$(stat -c %U "${backup_job_filesystem_path}")" != "dbbackup" ] ; then chown -R dbbackup:dbbackup "${backup_job_filesystem_path}" ; fi if [ "$(stat -c %a "${backup_job_filesystem_path}")" != "${backup_job_filesystem_permission}" ] ; then chmod -R ${backup_job_filesystem_permission} "${backup_job_filesystem_path}" ; fi if [ -d "${backup_job_filesystem_archive}" ]; then if [ "$(stat -c %U "${backup_job_filesystem_archive}")" != "dbbackup" ] ; then chown -R dbbackup:dbbackup "${backup_job_filesystem_archive}" ; fi if [ "$(stat -c %a "${backup_job_filesystem_archive}")" != "${backup_job_filesystem_permission}" ] ; then chmod -R ${backup_job_filesystem_permission} "${backup_job_filesystem_archive}" ; fi fi if [ ! -d "${LOG_PATH}" ]; then mkdir -p "${LOG_PATH}" fi if [ "$(stat -c %U "${LOG_PATH}")" != "dbbackup" ] ; then chown dbbackup:dbbackup "${LOG_PATH}" ; fi if [ ! -d "${LOG_PATH}"/"$(date +'%Y%m%d')" ]; then run_as_user mkdir -p "${LOG_PATH}"/$(date +'%Y%m%d'); fi if [ "$(stat -c %a "${LOG_PATH}")" != "755" ] ; then chmod -R 755 "${LOG_PATH}" ; fi if [ ! -d "${TEMP_PATH}" ]; then mkdir -p "${TEMP_PATH}" fi if [ "$(stat -c %U "${TEMP_PATH}")" != "dbbackup" ] ; then chown -R dbbackup:dbbackup "${TEMP_PATH}" ; fi } bootstrap_variables() { backup_init() { backup_instance_number=${1} backup_instance_vars=$(mktemp) set -o posix ; set | grep -oE "^backup_job_.*=" | grep -oE ".*=" | sed "/--/d" > "${backup_instance_vars}" while read -r backup_instance_var ; do unset "$(echo "${backup_instance_var}" | cut -d = -f 1)" done < "${backup_instance_vars}" transform_file_var \ DEFAULT_AUTH \ DEFAULT_TYPE \ DEFAULT_HOST \ DEFAULT_PORT \ DEFAULT_NAME \ DEFAULT_NAME_EXCLUDE \ DEFAULT_USER \ DEFAULT_PASS \ DEFAULT_ENCRYPT_PASSPHRASE \ DEFAULT_ENCRYPT_PUBKEY \ DEFAULT_MONGO_CUSTOM_URI \ DEFAULT_MYSQL_TLS_CA_FILE \ DEFAULT_MYSQL_TLS_CERT_FILE \ DEFAULT_MYSQL_TLS_KEY_FILE \ DEFAULT_S3_BUCKET \ DEFAULT_S3_KEY_ID \ DEFAULT_S3_KEY_SECRET \ DEFAULT_S3_PATH \ DEFAULT_S3_REGION \ DEFAULT_S3_HOST \ DEFAULT_S3_PROTOCOL \ DEFAULT_S3_EXTRA_OPTS \ DEFAULT_S3_CERT_CA_FILE \ DEFAULT_BLOBXFER_STORAGE_ACCOUNT \ DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY \ DEFAULT_BLOBXFER_REMOTE_PATH \ DB"${backup_instance_number}"_AUTH \ DB"${backup_instance_number}"_TYPE \ DB"${backup_instance_number}"_HOST \ DB"${backup_instance_number}"_PORT \ DB"${backup_instance_number}"_NAME \ DB"${backup_instance_number}"_NAME_EXCLUDE \ DB"${backup_instance_number}"_USER \ DB"${backup_instance_number}"_PASS \ DB"${backup_instance_number}"_ENCRYPT_PASSPHRASE \ DB"${backup_instance_number}"_ENCRYPT_PUBKEY \ DB"${backup_instance_number}"_MONGO_CUSTOM_URI \ DB"${backup_instance_number}"_MYSQL_TLS_CA_FILE \ DB"${backup_instance_number}"_MYSQL_TLS_CERT_FILE \ DB"${backup_instance_number}"_MYSQL_TLS_KEY_FILE \ DB"${backup_instance_number}"_S3_BUCKET \ DB"${backup_instance_number}"_S3_KEY_ID \ DB"${backup_instance_number}"_S3_KEY_SECRET \ DB"${backup_instance_number}"_S3_PATH \ DB"${backup_instance_number}"_S3_REGION \ DB"${backup_instance_number}"_S3_HOST \ DB"${backup_instance_number}"_S3_PROTOCOL \ DB"${backup_instance_number}"_S3_EXTRA_OPTS \ DB"${backup_instance_number}"_S3_CERT_CA_FILE \ DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT \ DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT_KEY \ DB"${backup_instance_number}"_BLOBXFER_REMOTE_PATH \ BLOBXFER_STORAGE_ACCOUNT \ BLOBXFER_STORAGE_KEY \ DB_HOST \ DB_NAME \ DB_PORT \ DB_USER \ DB_PASS \ MONGO_CUSTOM_URI \ DB_AUTH \ S3_BUCKET \ S3_KEY_ID \ S3_KEY_SECRET \ S3_PATH \ S3_REGION \ S3_HOST \ S3_PROTOCOL \ S3_EXTRA_OPTS ## Legacy after DEFAULT set -o posix ; set | grep -E "^DB${backup_instance_number}_|^DEFAULT_|^DB_|^ARCHIVE|^BACKUP_|^BLOBXFER_|^CHECKSUM|^COMPRESSION|^CREATE_|^ENABLE_|^EXTRA_|^GZ_|^INFLUX_|^MYSQL_|^MONGO_|^PARALLEL|^PRE_|^POST_|^S3|^SKIP|^SPLIT" > "${backup_instance_vars}" ## Legacy checks from removed variables if [ -n "${ENABLE_CHECKSUM}" ]; then print_warn "Deprecated and unsupported variable 'ENABLE_CHECKSUM' detected being used - Please upgrade your variables as they will be removed in version 4.3.0" if var_false "${ENABLE_CHECKSUM}" ; then DEFAULT_CHECKSUM=NONE fi fi if [ -n "${DB_DUMP_BEGIN}" ]; then print_warn "Deprecated and unsupported variable 'DB_DUMP_BEGIN' dnow_date=$(run_as_user date +"%Y-%m-%d") DEFAULT_BACKUP_BEGIN=${DB_BACKUP_BEGIN} fi if [ -n "${DB_DUMP_FREQ}" ]; then print_warn "Deprecated and unsupported variable 'DB_DUMP_FREQ' dnow_date=$(run_as_user date +"%Y-%m-%d") DEFAULT_BACKUP_INTERVAL=${DB_BACKUP_INTERVAL} fi if [ -n "${DB_DUMP_TARGET}" ]; then print_warn "Deprecated and unsupported variable 'DB_DUMP_TARGET' detected being used - Please upgrade your variables as they will be removed in version 4.3.0" DEFAULT_FILESYSTEM_PATH="${DB_DUMP_TARGET}" fi if [ -n "${DB_DUMP_TARGET_ARCHIVE}" ]; then print_warn "Deprecated and unsupported variable 'DB_DUMP_TARGET_ACRHIVE' detected being used - Please upgrade your variables as they will be removed in version 4.3.0" DEFAULT_FILESYSTEM_ARCHIVE_PATH="${DB_DUMP_TARGET_ARCHIVE}" fi if [ -n "${EXTRA_DUMP_OPTS}" ]; then print_warn "Deprecated and unsupported variable 'EXTRA_DUMP_OPTS' detected being used - Please upgrade your variables as they will be removed in version 4.3.0" DEFAULT_EXTRA_BACKUP_OPTS="${EXTRA_DUMP_OPTS}" fi ## transform_backup_instance_variable() { if grep -q "^DB${1}_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then export "$3"="$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)" elif grep -q "^DB_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then # Allow old legacy work, perhaps remove old DB_ functionality in future? This should allow for seamless upgrades #print_warn "Legacy Variable 'DB_${2}'' detected being used - Please upgrade your variables as they will be removed in version 4.3.0" export "$3"="$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2)" elif grep -q "^${2}=" "${backup_instance_vars}" && [ "$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then print_warn "Legacy Variable '${2}' detected being used - Please upgrade your variables as they will be removed in version 4.3.0" export "$3"="$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2)" elif grep -q "^DEFAULT_${2}=" "${backup_instance_vars}" && [ "$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then export "$3"="$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2)" fi } transform_backup_instance_variable "${backup_instance_number}" ARCHIVE_TIME backup_job_archive_time transform_backup_instance_variable "${backup_instance_number}" AUTH backup_job_db_auth transform_backup_instance_variable "${backup_instance_number}" BACKUP_BEGIN backup_job_backup_begin transform_backup_instance_variable "${backup_instance_number}" BACKUP_INTERVAL backup_job_backup_interval transform_backup_instance_variable "${backup_instance_number}" BACKUP_LOCATION backup_job_backup_location transform_backup_instance_variable "${backup_instance_number}" BLACKOUT_BEGIN backup_job_snapshot_blackout_start transform_backup_instance_variable "${backup_instance_number}" BLACKOUT_END backup_job_snapshot_blackout_finish transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_REMOTE_PATH backup_job_blobxfer_remote_path transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT backup_job_blobxfer_storage_account transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_KEY backup_job_blobxfer_storage_key transform_backup_instance_variable "${backup_instance_number}" CHECKSUM backup_job_checksum transform_backup_instance_variable "${backup_instance_number}" CLEANUP_TIME backup_job_cleanup_time transform_backup_instance_variable "${backup_instance_number}" COMPRESSION backup_job_compression transform_backup_instance_variable "${backup_instance_number}" COMPRESSION_LEVEL backup_job_compression_level transform_backup_instance_variable "${backup_instance_number}" CREATE_LATEST_SYMLINK backup_job_create_latest_symlink transform_backup_instance_variable "${backup_instance_number}" ENABLE_PARALLEL_COMPRESSION backup_job_parallel_compression transform_backup_instance_variable "${backup_instance_number}" ENCRYPT backup_job_encrypt transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PASSPHRASE backup_job_encrypt_passphrase transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PUBKEY backup_job_encrypt_pubkey transform_backup_instance_variable "${backup_instance_number}" EXTRA_DUMP_OPTS backup_job_extra_dump_opts transform_backup_instance_variable "${backup_instance_number}" EXTRA_ENUMERATION_OPTS backup_job_extra_enumeration_opts transform_backup_instance_variable "${backup_instance_number}" EXTRA_OPTS backup_job_extra_opts transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_ARCHIVE_PATH backup_job_filesystem_archive_path transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PATH backup_job_filesystem_path transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PERMISSION backup_job_filesystem_permission transform_backup_instance_variable "${backup_instance_number}" GZ_RSYNCABLE backup_job_gz_rsyncable transform_backup_instance_variable "${backup_instance_number}" HOST backup_job_db_host transform_backup_instance_variable "${backup_instance_number}" INFLUX_VERSION backup_job_influx_version transform_backup_instance_variable "${backup_instance_number}" LOG_LEVEL backup_job_log_level transform_backup_instance_variable "${backup_instance_number}" MONGO_CUSTOM_URI backup_job_mongo_custom_uri transform_backup_instance_variable "${backup_instance_number}" MYSQL_ENABLE_TLS backup_job_mysql_enable_tls transform_backup_instance_variable "${backup_instance_number}" MYSQL_EVENTS backup_job_mysql_events transform_backup_instance_variable "${backup_instance_number}" MYSQL_MAX_ALLOWED_PACKET backup_job_mysql_max_allowed_packet transform_backup_instance_variable "${backup_instance_number}" MYSQL_SINGLE_TRANSACTION backup_job_mysql_single_transaction transform_backup_instance_variable "${backup_instance_number}" MYSQL_STORED_PROCEDURES backup_job_mysql_stored_procedures transform_backup_instance_variable "${backup_instance_number}" MYSQL_TLS_CA_FILE backup_job_mysql_tls_ca_file transform_backup_instance_variable "${backup_instance_number}" MYSQL_TLS_CERT_FILE backup_job_mysql_tls_cert_file transform_backup_instance_variable "${backup_instance_number}" MYSQL_TLS_KEY_FILE backup_job_mysql_tls_key_file transform_backup_instance_variable "${backup_instance_number}" MYSQL_TLS_VERIFY backup_job_mysql_tls_verify transform_backup_instance_variable "${backup_instance_number}" MYSQL_TLS_VERSION backup_job_mysql_tls_version transform_backup_instance_variable "${backup_instance_number}" MSSQL_MODE backup_job_mssql_mode transform_backup_instance_variable "${backup_instance_number}" NAME backup_job_db_name transform_backup_instance_variable "${backup_instance_number}" NAME_EXCLUDE backup_job_db_name_exclude transform_backup_instance_variable "${backup_instance_number}" PARALLEL_COMPRESSION_THREADS backup_job_parallel_compression_threads transform_backup_instance_variable "${backup_instance_number}" PASS backup_job_db_pass transform_backup_instance_variable "${backup_instance_number}" PORT backup_job_db_port transform_backup_instance_variable "${backup_instance_number}" POST_SCRIPT_X_VERIFY backup_job_post_script_x_verify transform_backup_instance_variable "${backup_instance_number}" POST_SCRIPT backup_job_post_script transform_backup_instance_variable "${backup_instance_number}" PRE_SCRIPT backup_job_pre_script transform_backup_instance_variable "${backup_instance_number}" PRE_SCRIPT_X_VERIFY backup_job_pre_script_x_verify transform_backup_instance_variable "${backup_instance_number}" RESOURCE_OPTIMIZED backup_job_resource_optimized transform_backup_instance_variable "${backup_instance_number}" S3_BUCKET backup_job_s3_bucket transform_backup_instance_variable "${backup_instance_number}" S3_CERT_CA_FILE backup_job_s3_cert_ca_file transform_backup_instance_variable "${backup_instance_number}" S3_CERT_SKIP_VERIFY backup_job_s3_cert_skip_verify transform_backup_instance_variable "${backup_instance_number}" S3_EXTRA_OPTS backup_job_s3_extra_opts transform_backup_instance_variable "${backup_instance_number}" S3_HOST backup_job_s3_host transform_backup_instance_variable "${backup_instance_number}" S3_KEY_ID backup_job_s3_key_id transform_backup_instance_variable "${backup_instance_number}" S3_KEY_SECRET backup_job_s3_key_secret transform_backup_instance_variable "${backup_instance_number}" S3_PATH backup_job_s3_path transform_backup_instance_variable "${backup_instance_number}" S3_PROTOCOL backup_job_s3_protocol transform_backup_instance_variable "${backup_instance_number}" S3_REGION backup_job_s3_region transform_backup_instance_variable "${backup_instance_number}" SCRIPT_LOCATION_POST backup_job_script_location_post transform_backup_instance_variable "${backup_instance_number}" SCRIPT_LOCATION_PRE backup_job_script_location_pre transform_backup_instance_variable "${backup_instance_number}" SIZE_VALUE backup_job_size_value transform_backup_instance_variable "${backup_instance_number}" SKIP_AVAILABILITY_CHECK backup_job_skip_availability_check transform_backup_instance_variable "${backup_instance_number}" SPLIT_DB backup_job_split_db transform_backup_instance_variable "${backup_instance_number}" TYPE backup_job_db_type transform_backup_instance_variable "${backup_instance_number}" USER backup_job_db_user backup_job_backup_begin=$(echo "${backup_job_backup_begin}" | sed -e "s|'||g" -e 's|"||g') } upgrade_lonely_variables() { upgrade_lonely_variables_tmp=$(mktemp) set -o posix; set | grep "^$1" | grep -v "^$1[0-9]._" > "${upgrade_lonely_variables_tmp}" while read -r exist_var ; do if [ ! -v "${1}"01_"$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2)" ] ; then export "${1}"01_"$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2-9)"="$(echo "${exist_var}" | cut -d = -f2)" else print_error "Variable ${1}01_$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2) already exists, fix your variables and start again.." exit 1 fi done < "${upgrade_lonely_variables_tmp}" rm -rf "${upgrade_lonely_variables_tmp}" } parse_variables() { local v_instance=${1} check_var() { ## Check is Variable is Defined ## Usage: check_var transformed_varname real_varname "Description" output_off print_debug "Looking for existence of $2 environment variable" if [ ! -v "$1" ]; then print_error "No '$3' Entered! - Set '\$$2' environment variable - Halting Backup Number ${v_instance}" s6-svc -d /var/run/s6/legacy-services/dbbackup-${v_instance} exit 1 fi output_on } check_var backup_job_db_type DB${v_instance}_TYPE "appropriate database type" case "${backup_job_db_type,,}" in couch* ) dbtype=couch backup_job_backup_job_db_port=${backup_job_db_port:-5984} check_var backup_job_db_user DB${v_instance}_USER "database username" check_var backup_job_db_pass DB${v_instance}_PASS "database password" ;; influx* ) dbtype=influx case "${backup_job_influx_version}" in 1) backup_job_db_port=${backup_job_db_port:-8088} ;; 2) backup_job_db_port=${backup_job_db_port:-8086} ;; esac check_var backup_job_db_user DB${v_instance}_USER "database username" check_var backup_job_db_pass DB${v_instance}_PASS "database password" check_var backup_job_influx_version DB${v_instance}_INFLUX_VERSION "InfluxDB version you are backing up from" ;; mongo* ) dbtype=mongo if [ -n "${backup_job_mongo_custom_uri}" ] ; then mongo_uri_proto=$(echo "${backup_job_mongo_custom_uri}" | grep :// | sed -e's,^\(.*://\).*,\1,g') mongo_uri_scratch="${backup_job_mongo_custom_uri/${mongo_uri_proto}/}" mongo_uri_username_password="$(echo "${mongo_uri_scratch}" | grep @ | rev | cut -d@ -f2- | rev)" if [ -n "${mongo_uri_username_password}" ]; then mongo_uri_scratch="$(echo "${mongo_uri_scratch}" | rev | cut -d@ -f1 | rev)" ; fi mongo_uri_port="$(echo "${mongo_uri_scratch}" | grep : | rev | cut -d: -f2- | rev)" if [ -n "${mongo_uri_port}" ]; then mongo_uri_port="$(echo "${mongo_uri_scratch}" | rev | cut -d: -f1 | cut -d/ -f2 | rev)" ; fi mongo_uri_hostname="$(echo "${mongo_uri_scratch}" | cut -d/ -f1 | cut -d: -f1 )" mongo_uri_database="$(echo "${mongo_uri_scratch}" | cut -d/ -f2 | cut -d? -f1 )" mongo_uri_options="$(echo "${mongo_uri_scratch}" | cut -d/ -f2 | cut -d? -f2 )" backup_job_db_name=${backup_job_db_name:-"${mongo_uri_database,,}"} backup_job_db_host=${backup_job_db_host:-"${mongo_uri_hostname,,}"} else backup_job_db_port=${backup_job_db_port:-27017} [[ ( -n "${backup_job_db_user}" ) ]] && MONGO_USER_STR=" --username ${backup_job_db_user}" [[ ( -n "${backup_job_db_pass}" ) ]] && MONGO_PASS_STR=" --password ${backup_job_db_pass}" [[ ( -n "${backup_job_db_name}" ) ]] && MONGO_DB_STR=" --db ${backup_job_db_name}" [[ ( -n "${backup_job_db_auth}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${backup_job_db_auth}" fi ;; "mysql" | "mariadb" ) dbtype=mysql backup_job_db_port=${backup_job_db_port:-3306} check_var backup_job_db_name DB${v_instance}_NAME "database name. Seperate multiple with commas" if [ -n "${backup_job_db_pass}" ] ; then export MYSQL_PWD=${backup_job_db_pass} ; fi if var_true "${backup_job_mysql_enable_tls}" ; then if [ -n "${backup_job_mysql_tls_ca_file}" ] ; then mysql_tls_args="--ssl_ca=${backup_job_mysql_tls_ca_file}" fi if [ -n "${backup_job_mysql_tls_cert_file}" ] ; then mysql_tls_args="${mysql_tls_args} --ssl_cert=${backup_job_mysql_tls_cert_file}" fi if [ -n "${backup_job_mysql_tls_key_file}" ] ; then mysql_tls_args="${mysql_tls_args} --ssl_key=${backup_job_mysql_tls_key_file}" fi if var_true "${backup_job_mysql_tls_verify}" ; then mysql_tls_args="${mysql_tls_args} --sslverify-server-cert" fi if [ -n "${backup_job_mysql_tls_version}" ] ; then mysql_tls_args="${mysql_tls_args} --tls_version=${backup_job_mysql_tls_version}" fi fi ;; "mssql" | "microsoftsql" ) apkArch="$(apk --print-arch)"; \ case "$apkArch" in x86_64) mssql=true ;; *) write_log error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;; esac dbtype=mssql backup_job_db_port=${backup_job_db_port:-1433} ;; postgres* | "pgsql" ) dbtype=pgsql backup_job_db_port=${backup_job_db_port:-5432} [[ ( -n "${backup_job_db_pass}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${backup_job_db_pass}" check_var backup_job_db_name DB${v_instance}_NAME "database name. Seperate multiple with commas" ;; "redis" ) dbtype=redis backup_job_db_port=${backup_job_db_port:-6379} [[ ( -n "${backup_job_db_pass}" ) ]] && REDIS_PASS_STR=" -a ${backup_job_db_pass}" ;; sqlite* ) dbtype=sqlite3 ;; * ) write_log error "I don't recognize 'DB${v_instance}_TYPE=${backup_job_db_type}' - Exitting.." exit 99 ;; esac if var_true "${backup_job_resource_optimized}" ; then play_fair="nice -19 ionice -c2" ; fi } case "${1}" in backup_init ) backup_init "$2" ;; parse_variables) parse_variables "$2" ;; upgrade ) upgrade_lonely_variables "$2" ;; esac } backup_couch() { prepare_dbbackup target=couch_${backup_job_db_name}_${backup_job_db_host#*//}_${now}.txt ltarget=couch_${backup_job_db_name}_${backup_job_db_host#*//} compression pre_dbbackup ${backup_job_db_name} write_log notice "Dumping CouchDB database: '${backup_job_db_name}' ${compression_string}" run_as_user curl -sSL -X GET ${backup_job_db_host}:${backup_job_db_port}/${backup_job_db_name}/_all_docs?include_docs=true | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${target}" > /dev/null exit_code=$? check_exit_code backup $target timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move $target post_dbbackup ${backup_job_db_name} } backup_influx() { if [ "${backup_job_db_name,,}" = "all" ] ; then write_log debug "Preparing to back up everything" db_names=justbackupeverything else db_names=$(echo "${backup_job_db_name}" | tr ',' '\n') fi case "${backup_job_db_influx_version,,}" in 1 ) for db in ${db_names}; do prepare_dbbackup if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi target=influx_${db}_${backup_job_db_host#*//}_${now} ltarget=influx_${db}_${backup_job_db_host#*//} compression pre_dbbackup $db write_log notice "Dumping Influx database: '${db}'" run_as_user influxd backup ${influx_compression} ${bucket} -portable -host ${backup_job_db_host}:${backup_job_db_port} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} "${TEMP_PATH}"/"${target_dir}" exit_code=$? check_exit_code backup $target_dir write_log notice "Creating archive file of '${target_dir}' with tar ${compression_string}" run_as_user tar cf - "${TEMP_PATH}"/"${target_dir}" | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${target_dir}".tar"${extension}" > /dev/null target=influx_${db}_${backup_job_db_host#*//}_${now}.tar${extension} ltarget=influx_${db}_${backup_job_db_host#*//} timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move $target_dir post_dbbackup $db done ;; 2 ) for db in ${db_names}; do prepare_dbbackup if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi target=influx2_${db}_${backup_job_db_host#*//}_${now} ltarget=influx2_${db}_${backup_job_db_host#*//} compression pre_dbbackup $db write_log notice "Dumping Influx2 database: '${db}'" run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --compression none "${TEMP_PATH}"/"${target_dir}" exit_code=$? check_exit_code backup $target_dir create_archive target=influx2_${db}_${backup_job_db_host#*//}_${now}.tar${extension} ltarget=influx2_${db}_${backup_job_db_host#*//} timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move $target_dir post_dbbackup $db done ;; esac } backup_mongo() { prepare_dbbackup ## TODO REMOVE ENABLE_COMPRESSION if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then target=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.archive ltarget=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,} else target=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.archive.gz ltarget=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,} mongo_compression="--gzip" compression_string="and compressing with gzip" fi if [ -n "${backup_job_mongo_custom_uri}" ] ; then mongo_backup_parameter="--uri=${backup_job_mongo_custom_uri} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}" else mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}" fi pre_dbbackup "${backup_job_db_name}" write_log notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}" silent run_as_user ${play_fair} mongodump --archive=${TEMP_PATH}/${target} ${mongo_compression} ${mongo_backup_parameter} exit_code=$? check_exit_code backup $target timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move $target post_dbbackup "${backup_job_db_name}" } backup_mssql() { case "${backup_job_mssql_mode,,}" in db|database ) prepare_dbbackup target=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.bak ltarget=mssql_${backup_job_db_name,,}_${backup_job_db_host,,} pre_dbbackup "${backup_job_db_name}" write_log notice "Dumping MSSQL database: '${DB_NAME}'" silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP DATABASE [${backup_job_db_name}] TO DISK = N'${TEMP_PATH}/${target}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" exit_code=$? target_original=${target} compression pre_dbbackup all run_as_user ${compress_cmd} "${TEMP_PATH}/${target_original}" check_exit_code backup $target timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move $target post_dbbackup "${backup_job_db_name}" ;; trn|transaction ) prepare_dbbackup target=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.trn ltarget=mssql_${backup_job_db_name,,}_trn_${backup_job_db_host,,} pre_dbbackup "${backup_job_db_name}" write_log notice "Dumping MSSQL database: '${DB_NAME}'" silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP LOG [${backup_job_db_name}] TO DISK = N'${TEMP_PATH}/${target}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-log', SKIP, NOREWIND, NOUNLOAD, STATS = 10" exit_code=$? target_original=${target} compression pre_dbbackup all run_as_user ${compress_cmd} "${TEMP_PATH}/${target_original}" check_exit_code backup $target file_encryption timer backup finish generate_checksum move_dbbackup check_exit_code move $target post_dbbackup "${backup_job_db_name}" ;; esac } backup_mysql() { if var_true "${backup_job_mysql_events}" ; then events="--events" fi if var_true "${backup_job_mysql_single_transaction}" ; then single_transaction="--single-transaction" fi if var_true "${backup_job_mysql_stored_procedures}" ; then stored_procedures="--routines" fi if [ "${backup_job_db_name,,}" = "all" ] ; then write_log debug "Preparing to back up everything except for information_schema and _* prefixes" db_names=$(run_as_user mysql -h ${backup_job_db_host} -P $backup_job_db_port -u$backup_job_db_user ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_enumeration_opts} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema ) if [ -n "${backup_job_db_name_exclude}" ] ; then db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n') for db_exclude in ${db_names_exclusions} ; do write_log debug "Excluding '${db_exclude}' from ALL DB_NAME backups" db_names=$(echo "$db_names" | sed "/${db_exclude}/d" ) done fi else db_names=$(echo "${backup_job_db_name}" | tr ',' '\n') fi write_log debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')" if var_true "${backup_job_split_db}" ; then for db in ${db_names} ; do prepare_dbbackup target=mysql_${db}_${backup_job_db_host,,}_${now}.sql ltarget=mysql_${db}_${backup_job_db_host,,} compression pre_dbbackup $db write_log notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}" run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} $db | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${target}" > /dev/null exit_code=$? check_exit_code backup $target timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move $target post_dbbackup $db done else write_log debug "Not splitting database dumps into their own files" prepare_dbbackup target=mysql_all_${backup_job_db_host,,}_${now}.sql ltarget=mysql_all_${backup_job_db_host,,} compression pre_dbbackup all write_log notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${target}" > /dev/null exit_code=$? check_exit_code backup $target timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move $target post_dbbackup all fi } backup_pgsql() { export PGPASSWORD=${backup_job_db_pass} if [ -n "${backup_job_db_auth}" ] ; then authdb=${backup_job_db_auth} else authdb=${backup_job_db_user} fi if [ "${backup_job_db_name,,}" = "all" ] ; then write_log debug "Preparing to back up all databases" db_names=$(run_as_user psql -h ${backup_job_db_host} -U ${backup_job_db_user} -p ${backup_job_db_port} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ) if [ -n "${backup_job_db_name_exclude}" ] ; then db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n') for db_exclude in ${db_names_exclusions} ; do write_log debug "Excluding '${db_exclude}' from ALL DB_NAME backups" db_names=$(echo "$db_names" | sed "/${db_exclude}/d" ) done fi else db_names=$(echo "${backup_job_db_name}" | tr ',' '\n') fi write_log debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')" if var_true "${backup_job_split_db}" ; then for db in ${db_names} ; do prepare_dbbackup target=pgsql_${db}_${backup_job_db_host,,}_${now}.sql ltarget=pgsql_${db}_${backup_job_db_host,,} compression pre_dbbackup $db write_log notice "Dumping PostgresSQL database: '${db}' ${compression_string}" run_as_user ${play_fair} pg_dump -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} $db ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${target}" > /dev/null exit_code=$? check_exit_code backup $target timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move $target post_dbbackup $db done prepare_dbbackup target=pgsql_globals_${backup_job_db_host,,}_${now}.sql compression pre_dbbackup "globals" print_notice "Dumping PostgresSQL globals: with 'pg_dumpall -g' ${compression_string}" run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -U ${backup_job_db_user} -p ${backup_job_db_port} -g ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${target}" > /dev/null exit_code=$? check_exit_code $target timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move $target post_dbbackup "globals" else write_log debug "Not splitting database dumps into their own files" prepare_dbbackup target=pgsql_all_${backup_job_db_host,,}_${now}.sql ltarget=pgsql_${db}_${backup_job_db_host,,} compression pre_dbbackup all write_log notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" tmp_db_names=$(run_as_user psql -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ) for r_db_name in $(echo $db_names | xargs); do tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" ) done sleep 5 for x_db_name in ${tmp_db_names} ; do pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name}) done run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${target}" > /dev/null exit_code=$? check_exit_code backup $target timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move $target post_dbbackup all fi } backup_redis() { prepare_dbbackup write_log notice "Dumping Redis - Flushing Redis Cache First" target=redis_all_${backup_job_db_host,,}_${now}.rdb ltarget=redis_${backup_job_db_host,,} echo bgsave | silent run_as_user ${play_fair} redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} --rdb ${TEMP_PATH}/${target} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} sleep 10 try=5 while [ $try -gt 0 ] ; do saved=$(echo 'info Persistence' | redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}') ok=$(echo 'info Persistence' | redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}') if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then write_log notice "Redis Backup Complete" exit_code=0 break fi try=$((try - 1)) write_log warn "Redis Busy - Waiting and retrying in 5 seconds" sleep 5 done target_original=${target} compression pre_dbbackup all run_as_user ${compress_cmd} "${TEMP_PATH}/${target_original}" timer backup finish check_exit_code backup $target file_encryption generate_checksum move_dbbackup check_exit_code move $target post_dbbackup all } backup_sqlite3() { prepare_dbbackup db=$(basename "${backup_job_db_host}") db="${db%.*}" target=sqlite3_${db}_${now}.sqlite3 ltarget=sqlite3_${db}.sqlite3 compression pre_dbbackup $db write_log notice "Dumping sqlite3 database: '${backup_job_db_host}' ${compression_string}" silent run_as_user ${play_fair} sqlite3 "${backup_job_db_host}" ".backup '${TEMP_PATH}/backup.sqlite3'" exit_code=$? check_exit_code backup $target run_as_user ${play_fair} cat "${TEMP_PATH}"/backup.sqlite3 | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}/${target}" > /dev/null timer backup finish file_encryption generate_checksum move_dbbackup check_exit_code move $target post_dbbackup $db } check_availability() { ### Set the Database Type if var_false "${backup_job_skip_availability_check}" ; then case "${dbtype}" in "couch" ) counter=0 code_received=0 while [ "${code_received}" != "200" ]; do code_received=$(run_as_user curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${backup_job_db_host}:${backup_job_db_port}) if [ "${code_received}" = "200" ] ; then break ; fi sleep 5 (( counter+=5 )) write_log warn "CouchDB Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)" done ;; "influx" ) counter=0 case "${backup_job_influx_version,,}" in 1 ) while ! (run_as_user nc -z ${backup_job_db_host#*//} ${backup_job_db_port}) ; do sleep 5 (( counter+=5 )) write_log warn "InfluxDB Host '${backup_job_db_host#*//}' is not accessible, retrying.. ($counter seconds so far)" done ;; 2 ) code_received=0 while [ "${code_received}" != "200" ]; do code_received=$(run_as_user curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${backup_job_db_host}:${backup_job_db_port}/health) if [ "${code_received}" = "200" ] ; then break ; fi sleep 5 (( counter+=5 )) write_log warn "InfluxDB Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)" done ;; esac ;; "mongo" ) if [ -n "${backup_job_mongo_custom_uri}" ] ; then write_log debug "Skipping Connectivity Check" else counter=0 while ! (run_as_user nc -z ${backup_job_db_host} ${backup_job_db_port}) ; do sleep 5 (( counter+=5 )) write_log warn "Mongo Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)" done fi ;; "mysql" ) counter=0 export MYSQL_PWD=${backup_job_db_pass} while ! (run_as_user mysqladmin -u"${backup_job_db_user}" -P"${backup_job_db_port}" -h"${backup_job_db_host}" ${mysql_tls_args} status > /dev/null 2>&1) ; do sleep 5 (( counter+=5 )) write_log warn "MySQL/MariaDB Server '${backup_job_db_host}' is not accessible, retrying.. (${counter} seconds so far)" done ;; "mssql" ) counter=0 while ! (run_as_user nc -z ${backup_job_db_host} ${backup_job_db_port}) ; do sleep 5 (( counter+=5 )) write_log warn "MSSQL Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)" done ;; "pgsql" ) counter=0 until run_as_user pg_isready --host=${backup_job_db_host} --port=${backup_job_db_port} -q do sleep 5 (( counter+=5 )) write_log warn "Postgres Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)" done ;; "redis" ) counter=0 while ! (run_as_user nc -z "${backup_job_db_host}" "${backup_job_db_port}") ; do sleep 5 (( counter+=5 )) write_log warn "Redis Host '${backup_job_db_host}' is not accessible, retrying.. ($counter seconds so far)" done ;; "sqlite3" ) if [[ ! -e "${backup_job_db_host}" ]]; then write_log error "File '${backup_job_db_host}' does not exist." exit_code=2 exit $exit_code elif [[ ! -f "${backup_job_db_host}" ]]; then write_log error "File '${backup_job_db_host}' is not a file." exit_code=2 exit $exit_code elif [[ ! -r "${backup_job_db_host}" ]]; then write_log error "File '${backup_job_db_host}' is not readable." exit_code=2 exit $exit_code fi ;; esac fi } check_exit_code() { write_log debug "DB Backup Exit Code is ${exit_code}" case "${1}" in backup ) case "${exit_code}" in 0 ) write_log info "DB Backup of '${2}' completed successfully" ;; * ) write_log error "DB Backup of '${2}' reported errors" notify \ "$(date -d @"${backup_job_start_time}" +'%Y%m%d_%H%M%S')" \ "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${ltarget}.log" \ "{exit_code}" \ "[FATAL] Host: ${backup_job_db_host} Name: ${backup_job_db_name} - Backup failed completely" \ "DB Backup is failing to backup the '${backup_job_db_host}-${backup_job_db_name}' job." master_exit_code=1 ;; esac ;; move ) case "${move_exit_code}" in 0 ) write_log debug "Moving of backup '${2}' completed successfully" ;; * ) write_log error "Moving of backup '${2}' reported errors" notify \ "$(date -d @"${backup_job_start_time}" +'%Y%m%d_%H%M%S')" \ "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${ltarget}.log" \ "{exit_code}" \ "[FATAL] Host: ${backup_job_db_host} Name: ${backup_job_db_name} - Backup failed to move to destination" \ "DB Backup is failing to backup the '${backup_job_db_host}-${backup_job_db_name}' job." master_exit_code=1 ;; esac ;; esac } cleanup_old_data() { if [ -n "${backup_job_cleanup_time}" ]; then if [ "${master_exit_code}" != 1 ]; then case "${backup_job_backup_location,,}" in "blobxfer" ) write_log info "Cleaning up old backups on filesystem" run_as_user mkdir -p "${backup_job_filesystem_path}" find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${ltarget}*" -exec rm -f {} \; write_log info "Syncing changes via blobxfer" silent run_as_user blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --local-path ${backup_job_filesystem_path} --delete --delete-only ;; "file" | "filesystem" ) write_log info "Cleaning up old backups on filesystem" run_as_user mkdir -p "${backup_job_filesystem_path}" run_as_user find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${ltarget}*" -exec rm -f {} \; ;; "s3" | "minio" ) write_log info "Cleaning up old backups on S3 storage" run_as_user aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${backup_job_s3_bucket}/${backup_job_s3_path}/ ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts} | grep " DIR " -v | grep " PRE " -v | while read -r s3_file; do s3_createdate=$(echo $s3_file | awk {'print $1" "$2'}) s3_createdate=$(date -d "$s3_createdate" "+%s") s3_olderthan=$(echo $(( $(date +%s)-${backup_job_cleanup_time}*60 ))) if [[ $s3_createdate -le $s3_olderthan ]] ; then s3_filename=$(echo $s3_file | awk {'print $4'}) if [ "$s3_filename" != "" ] ; then write_log debug "Deleting $s3_filename" run_as_user aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${backup_job_s3_bucket}/${backup_job_s3_path}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts} fi fi done ;; esac else write_log error "Skipping Cleaning up old backups because there were errors in backing up" fi fi } compression() { if var_false "${backup_job_parallel_compression}" ; then backup_job_parallel_compression_threads=1 fi if var_true "${backup_job_gz_rsyncable}" ; then gz_rsyncable=--rsyncable fi case "${backup_job_compression,,}" in bz* ) compress_cmd="${play_fair} pbzip2 -q -${backup_job_compression_level} -p${backup_job_parallel_compression_threads} " compression_type="bzip2" dir_compress_cmd=${compress_cmd} extension=".bz2" target_dir=${target} target=${target}.bz2 ;; gz* ) compress_cmd="${play_fair} pigz -q -${backup_job_compression_level} -p ${backup_job_parallel_compression_threads} ${gz_rsyncable}" compression_type="gzip" extension=".gz" dir_compress_cmd=${compress_cmd} target_dir=${target} target=${target}.gz ;; xz* ) compress_cmd="${play_fair} pixz -${backup_job_compression_level} -p ${backup_job_parallel_compression_threads} " compression_type="xzip" dir_compress_cmd=${compress_cmd} extension=".xz" target_dir=${target} target=${target}.xz ;; zst* ) compress_cmd="${play_fair} zstd -q -q --rm -${backup_job_compression_level} -T${backup_job_parallel_compression_threads} ${gz_rsyncable}" compression_type="zstd" dir_compress_cmd=${compress_cmd} extension=".zst" target_dir=${target} target=${target}.zst ;; "none" | "false") compress_cmd="cat " compression_type="none" dir_compress_cmd="cat " target_dir=${target} ;; esac case "${CONTAINER_LOG_LEVEL,,}" in "debug" ) if [ "${compression_type}" = "none" ] ; then compression_string="with '${backup_job_parallel_compression_threads}' threads" else compression_string="and compressing with '${compression_type}:${backup_job_compression_level}' with '${backup_job_parallel_compression_threads}' threads" fi ;; * ) if [ "${compression_type}" != "none" ] ; then compression_string="and compressing with '${compression_type}'" fi ;; esac } create_archive() { if [ "${exit_code}" = "0" ] ; then write_log notice "Creating archive file of '${target_dir}' with tar ${compression_string}" run_as_user tar cf - "${TEMP_PATH}"/"${target_dir}" | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${target_dir}".tar"${extension}" > /dev/null else write_log error "Skipping creating archive file because backup did not complete successfully" fi } create_schedulers() { backup() { bootstrap_variables upgrade BACKUP local backup_instances=$(printenv | sort | grep -c "^DB[0-9]._HOST") if [ -n "${DB_HOST}" ] && [ "${backup_instances}" ]; then backup_instances=1; print_debug "Detected using old DB_ variables" fi for (( instance = 01; instance <= backup_instances; )) ; do instance=$(printf "%02d" $instance) cp -R /assets/dbbackup/template-dbbackup /etc/services.available/dbbackup-"${instance}" sed -i "s|{{BACKUP_NUMBER}}|${instance}|g" /etc/services.available/dbbackup-"${instance}"/run cat <> /usr/bin/backup"${instance}"-now #!/bin/bash source /assets/functions/00-container PROCESS_NAME=db-backup${instance} print_info "Starting Manual Backup for db-backup${instance}" /var/run/s6/legacy-services/dbbackup-${instance}/run now EOF chmod +x /usr/bin/backup"${instance}"-now if [ "${instance}" = "01" ] ; then touch /usr/bin/backup-now chmod +x /usr/bin/backup-now cat < /usr/bin/backup-now #!/bin/bash /usr/bin/backup${instance}-now EOF else echo "/usr/bin/backup${instance}-now" >> /usr/bin/backup-now fi instance=$(echo "${instance} +1" | bc) done } case "${1}" in backup ) backup ;; esac } ctrl_c() { sed -i "/^{{BACKUP_NUMBER}}/d" /tmp/.container/db-backup-backups symlink_log print_warn "User aborted" exit } db_backup_container_init() { rm -rf /tmp/.container/db-backup-backups echo "0 0 * * * /usr/sbin/logrotate_dbbackup >/dev/null 2>&1" > /assets/cron/dbbackup_logrotate touch /tmp/.container/db-backup-backups } file_encryption() { if var_true "${backup_job_encrypt}" ; then if [ "${exit_code}" = "0" ] ; then print_debug "Encrypting" output_off if [ -n "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_pubkey}" ]; then print_error "Can't encrypt as both ENCRYPT_PASSPHRASE and ENCRYPT_PUBKEY exist!" return elif [ -n "${backup_job_encrypt_passphrase}" ] && [ -z "${backup_job_encrypt_pubkey}" ]; then print_notice "Encrypting with GPG Passphrase" encrypt_routines_start_time=$(date +'%s') encrypt_tmp_dir=$(run_as_user mktemp -d) echo "${backup_job_encrypt_passphrase}" | silent run_as_user ${play_fair} gpg --batch --home ${encrypt_tmp_dir} --yes --passphrase-fd 0 -c "${TEMP_PATH}"/"${target}" rm -rf "${encrypt_tmp_dir}" elif [ -z "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_pubkey}" ]; then if [ -f "${backup_job_encrypt_pubkey}" ]; then encrypt_routines_start_time=$(date +'%s') print_notice "Encrypting with GPG Public Key" encrypt_tmp_dir=$(run_as_user mktemp -d) silent run_as_user ${play_fair} gpg --batch --yes --home "${encrypt_tmp_dir}" --recipient-file "${backup_job_encrypt_pubkey}" -c "${TEMP_PATH}"/"${target}" rm -rf "${encrypt_tmp_dir}" fi fi if [ -f "${TEMP_PATH}"/"${target}".gpg ]; then rm -rf "${TEMP_PATH}"/"${target}" target="${target}.gpg" encrypt_routines_finish_time=$(date +'%s') encrypt_routines_total_time=$(echo $((encrypt_routines_finish_time-encrypt_routines_start_time))) zabbix_encrypt_time=$(cat < /dev/null chmod ${backup_job_filesystem_permission} "${target}"."${checksum_extension}" checksum_value=$(run_as_user cat "${target}"."${checksum_extension}" | awk '{print $1}') checksum_routines_finish_time=$(date +'%s') checksum_routines_total_time=$(echo $((checksum_routines_finish_time-checksum_routines_start_time))) zabbix_checksum_time=$(cat < /etc/services.d/99-run_forever/run #!/bin/bash while true; do sleep 86400 done EOF chmod +x /etc/services.d/99-run_forever/run else if var_true "${CONTAINER_ENABLE_SCHEDULING}" ; then write_log error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_SCHEDULING=TRUE'" exit 1 fi if var_true "${CONTAINER_ENABLE_MONITORING}" ; then write_log error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_MONITORING=TRUE'" exit 1 fi if var_true "${CONTAINER_ENABLE_LOGSHIPPING}" ; then write_log error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_LOGSHIPPING=TRUE'" exit 1 fi fi fi } symlink_log () { if [ -n "${backup_job_db_type}" ] && [ -n "${backup_job_db_name}" ] && [ -n "${backup_job_db_host}" ] && [ -n "${ltarget}" ]; then local oldpwd=$(pwd) cd "${LOG_PATH}"/"$(date +'%Y%m%d')" ln -sf $(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${ltarget}.log ../latest-"${ltarget}".log cd "${oldpwd}" fi } write_log() { case "${1}" in debug ) CONTAINER_LOG_LEVEL=DEBUG shift 1 case "${backup_job_log_level,,}" in "debug" ) print_debug "$@" output_off if [ -n "${backup_job_db_type}" ] && [ -n "${backup_job_db_name}" ] && [ -n "${backup_job_db_host}" ] && [ -n "${ltarget}" ]; then echo "$(date +'%Y-%m-%d %H:%M:%S %Z') [debug] $@" | run_as_user tee -a "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${ltarget}.log" > /dev/null fi output_on ;; esac ;; error ) CONTAINER_LOG_LEVEL=ERROR shift 1 case "${backup_job_log_level,,}" in "debug" | "notice" | "warn" | "error") output_off print_error "$@" if [ -n "${backup_job_db_type}" ] && [ -n "${backup_job_db_name}" ] && [ -n "${backup_job_db_host}" ] && [ -n "${ltarget}" ]; then echo "$(date +'%Y-%m-%d %H:%M:%S %Z') [error] $@" | run_as_user tee -a "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${ltarget}.log" > /dev/null fi output_on ;; esac ;; info ) CONTAINER_LOG_LEVEL=INFO shift 1 print_info "$@" output_off if [ -n "${backup_job_db_type}" ] && [ -n "${backup_job_db_name}" ] && [ -n "${backup_job_db_host}" ] && [ -n "${ltarget}" ]; then echo "$(date +'%Y-%m-%d %H:%M:%S %Z') [info] $@" | run_as_user tee -a "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${ltarget}.log" > /dev/null fi output_on ;; notice ) CONTAINER_LOG_LEVEL=NOTICE shift 1 case "${backup_job_log_level,,}" in "debug" | "notice" ) print_notice "$@" output_off if [ -n "${backup_job_db_type}" ] && [ -n "${backup_job_db_name}" ] && [ -n "${backup_job_db_host}" ] && [ -n "${ltarget}" ]; then echo "$(date +'%Y-%m-%d %H:%M:%S %Z') [notice] $@" | run_as_user tee -a "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${ltarget}.log" > /dev/null fi output_on ;; esac ;; warn ) CONTAINER_LOG_LEVEL=WARN shift 1 case "${backup_job_log_level,,}" in "debug" | "notice" | "warn" ) print_warn "$@" output_off if [ -n "${backup_job_db_type}" ] && [ -n "${backup_job_db_name}" ] && [ -n "${backup_job_db_host}" ] && [ -n "${ltarget}" ]; then echo "$(date +'%Y-%m-%d %H:%M:%S %Z') [warn] $@" | run_as_user tee -a "${LOG_PATH}/$(date +'%Y%m%d')/$(date -d @${backup_job_start_time} +'%Y%m%d_%H%M%S')-${ltarget}.log" > /dev/null fi output_on ;; esac ;; esac }