Files
docker-db-backup/install/assets/functions/10-db-backup
2022-04-02 07:37:34 -07:00

687 lines
26 KiB
Plaintext
Executable File
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#!/command/with-contenv bash
bootstrap_variables() {
case "${dbtype,,}" in
couch* )
dbtype=couch
dbport=${DB_PORT:-5984}
file_env 'DB_USER'
file_env 'DB_PASS'
;;
influx* )
dbtype=influx
dbport=${DB_PORT:-8088}
file_env 'DB_USER'
file_env 'DB_PASS'
sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'"
;;
mongo* )
dbtype=mongo
dbport=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER'
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
;;
"mysql" | "mariadb" )
dbtype=mysql
dbport=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
"mssql" | "microsoftsql" )
apkArch="$(apk --print-arch)"; \
case "$apkArch" in
x86_64) mssql=true ;;
*) print_error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;;
esac
dbtype=mssql
dbport=${DB_PORT:-1433}
;;
postgres* | "pgsql" )
dbtype=pgsql
dbport=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
"redis" )
dbtype=redis
dbport=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS'
;;
sqlite* )
dbtype=sqlite3
;;
esac
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then
file_env 'S3_KEY_ID'
file_env 'S3_KEY_SECRET'
fi
### Set the Database Authentication Details
case "$dbtype" in
"mongo" )
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${dbuser}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${dbpass}"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${dbname}"
[[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}"
;;
"mysql" )
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${dbpass}
;;
"postgres" )
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${dbpass}"
;;
"redis" )
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${dbpass}"
;;
esac
}
backup_couch() {
pre_dbbackup
target=couch_${dbname}_${dbhost#*//}_${now}.txt
compression
print_notice "Dumping CouchDB database: '${dbname}' ${compression_string}"
curl -sSL -X GET ${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup $dbname
}
backup_influx() {
if [ "${dbname,,}" = "all" ] ; then
print_debug "Preparing to back up everything"
db_names=justbackupeverything
else
db_names=$(echo "${dbname}" | tr ',' '\n')
fi
case "${INFLUX_VERSION,,}" in
1 )
for db in ${db_names}; do
pre_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="-bucket $db" ; else db=all ; fi
target=influx_${db}_${dbhost#*//}_${now}
compression
print_notice "Dumping Influx database: '${db}'"
influxd backup ${influx_compression} ${bucket} -host ${dbhost}:${dbport} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
target=influx_${db}_${dbhost#*//}_${now}.tar${extension}
generate_checksum
move_dbbackup
post_dbbackup $db
done
;;
2 )
for db in ${db_names}; do
pre_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
target=influx2_${db}_${dbhost#*//}_${now}
compression
print_notice "Dumping Influx2 database: '${db}'"
influx backup --org ${dbuser} ${bucket} --host ${dbhost}:${dbport} --token ${dbpass} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
create_archive
target=influx2_${db}_${dbhost#*//}_${now}.tar${extension}
generate_checksum
move_dbbackup
post_dbbackup $db
done
;;
esac
}
backup_mongo() {
pre_dbbackup
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
target=${dbtype}_${dbname}_${dbhost}_${now}.archive
else
target=${dbtype}_${dbname}_${dbhost}_${now}.archive.gz
mongo_compression="--gzip"
compression_string="and compressing with gzip"
fi
print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup
}
backup_mssql() {
pre_dbbackup
target=mssql_${dbname}_${dbhost}_${now}.bak
compression
print_notice "Dumping MSSQL database: '${dbname}'"
/opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup $dbname
}
backup_mysql() {
if var_true "${MYSQL_SINGLE_TRANSACTION}" ; then
single_transaction="--single-transaction"
fi
if var_true "${MYSQL_STORED_PROCEDURES}" ; then
stored_procedures="--routines"
fi
if [ "${dbname,,}" = "all" ] ; then
print_debug "Preparing to back up everything except for information_schema and _* prefixes"
db_names=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
print_debug "Excluding '${db_exclude}' from ALL DB_NAME backups"
db_names=$(echo "$db_names" | sed "/${db_exclude}/d" )
done
fi
else
db_names=$(echo "${dbname}" | tr ',' '\n')
fi
print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
if var_true "${SPLIT_DB}" ; then
for db in ${db_names} ; do
pre_dbbackup
target=mysql_${db}_${dbhost}_${now}.sql
compression
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup $db
done
else
print_debug "Not splitting database dumps into their own files"
pre_dbbackup
target=mysql_all_${dbhost}_${now}.sql
compression
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup all
fi
}
backup_pgsql() {
export PGPASSWORD=${dbpass}
authdb=${DB_USER}
if [ "${dbname,,}" = "all" ] ; then
print_debug "Preparing to back up all databases"
db_names=$(psql -h $dbhost -U $dbuser -p ${dbport} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
print_debug "Excluding '${db_exclude}' from ALL DB_NAME backups"
db_names=$(echo "$db_names" | sed "/${db_exclude}/d" )
done
fi
else
db_names=$(echo "${dbname}" | tr ',' '\n')
fi
print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
if var_true "${SPLIT_DB}" ; then
for db in ${db_names} ; do
pre_dbbackup
target=pgsql_${db}_${dbhost}_${now}.sql
compression
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup $db
done
else
print_debug "Not splitting database dumps into their own files"
pre_dbbackup
target=pgsql_all_${dbhost}_${now}.sql
compression
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
tmp_db_names=$(psql -h $dbhost -U $dbuser -p ${dbport} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
for r_db_name in $(echo $db_names | xargs); do
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
done
sleep 5
for x_db_name in ${tmp_db_names} ; do
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
done
pg_dumpall -h ${dbhost} -U ${dbuser} -p ${dbport} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup all
fi
}
backup_redis() {
pre_dbbackup
print_notice "Dumping Redis - Flushing Redis Cache First"
target=redis_all_${dbhost}_${now}.rdb
echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
sleep 10
try=5
while [ $try -gt 0 ] ; do
saved=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
ok=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
print_notice "Redis Backup Complete"
break
fi
try=$((try - 1))
print_warn "Redis Busy - Waiting and retrying in 5 seconds"
sleep 5
done
target_original=${target}
compression
$compress_cmd "${TEMP_LOCATION}/${target_original}"
generate_checksum
move_dbbackup
post_dbbackup all
}
backup_sqlite3() {
pre_dbbackup
db=$(basename "$dbhost")
db="${db%.*}"
target=sqlite3_${db}_${now}.sqlite3
compression
print_notice "Dumping sqlite3 database: '${dbhost}' ${compression_string}"
sqlite3 "${dbhost}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
exit_code=$?
check_exit_code $target
cat "${TEMP_LOCATION}"/backup.sqlite3 | ${dir_compress_cmd} > "${TEMP_LOCATION}/${target}"
generate_checksum
move_dbbackup
post_dbbackup $db
}
check_availability() {
### Set the Database Type
case "$dbtype" in
"couch" )
counter=0
code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${dbhost}:${dbport})
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( counter+=5 ))
print_warn "CouchDB Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"influx" )
counter=0
code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${dbhost}:${dbport}/health)
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( counter+=5 ))
print_warn "InfluxDB Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"mongo" )
counter=0
while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5
(( counter+=5 ))
print_warn "Mongo Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"mysql" )
counter=0
export MYSQL_PWD=${dbpass}
while ! (mysqladmin -u"${dbuser}" -P"${dbport}" -h"${dbhost}" status > /dev/null 2>&1) ; do
sleep 5
(( counter+=5 ))
print_warn "MySQL/MariaDB Server '${dbhost}' is not accessible, retrying.. (${COUNTER} seconds so far)"
done
;;
"mssql" )
counter=0
while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5
(( counter+=5 ))
print_warn "MSSQL Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"pgsql" )
counter=0
export PGPASSWORD=${dbpass}
until pg_isready --dbname=${dbname} --host=${dbhost} --port=${dbport} --username=${dbuser} -q
do
sleep 5
(( counter+=5 ))
print_warn "Postgres Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"redis" )
counter=0
while ! (nc -z "${dbhost}" "${dbport}") ; do
sleep 5
(( counter+=5 ))
print_warn "Redis Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"sqlite3" )
if [[ ! -e "${dbhost}" ]]; then
print_error "File '${dbhost}' does not exist."
exit_code=2
exit $exit_code
elif [[ ! -f "${dbhost}" ]]; then
print_error "File '${dbhost}' is not a file."
exit_code=2
exit $exit_code
elif [[ ! -r "${dbhost}" ]]; then
print_error "File '${dbhost}' is not readable."
exit_code=2
exit $exit_code
fi
;;
esac
}
check_exit_code() {
print_debug "DB Backup Exit Code is ${exit_code}"
case "${exit_code}" in
0 )
print_info "DB Backup of '${1}' completed successfully"
;;
* )
print_error "DB Backup of '${1}' reported errors"
master_exit_code=1
;;
esac
}
cleanup_old_data() {
if [ -n "${DB_CLEANUP_TIME}" ]; then
if [ "${master_exit_code}" != 1 ]; then
print_info "Cleaning up old backups"
mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
else
print_info "Skipping Cleaning up old backups because there were errors in backing up"
fi
fi
}
compression() {
if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then
PARALLEL_COMPRESSION_THREADS=1
fi
case "${COMPRESSION,,}" in
gz* )
compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compression_type="gzip"
extension=".gz"
dir_compress_cmd=${compress_cmd}
target_dir=${target}
target=${target}.gz
;;
bz* )
compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
compression_type="bzip2"
dir_compress_cmd=${compress_cmd}
extension=".bz2"
target_dir=${target}
target=${target}.bz2
;;
xz* )
compress_cmd="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compression_type="xzip"
dir_compress_cmd=${compress_cmd}
extension=".xz"
target_dir=${target}
target=${target}.xz
;;
zst* )
compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS}"
compression_type="zstd"
dir_compress_cmd=${compress_cmd}
extension=".zst"
target_dir=${target}
target=${target}.zst
;;
"none" | "false")
compression_type="none"
dir_compress_cmd="cat"
target_dir=${target}
;;
esac
case "${CONTAINER_LOG_LEVEL,,}" in
"debug" )
if [ "${compression_type}" = "none" ] ; then
compression_string="with '${PARALLEL_COMPRESSION_THREADS}' threads"
else
compression_string="and compressing with '${compression_type}:${COMPRESSION_LEVEL}' with '${PARALLEL_COMPRESSION_THREADS}' threads"
fi
;;
* )
if [ "${compression_type}" != "none" ] ; then
compression_string="and compressing with '${compression_type}'"
fi
;;
esac
}
create_archive() {
if [ "${exit_code}" = "0" ] ; then
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
else
print_warn "Skipping creating archive file because backup did not complete successfully"
fi
}
generate_checksum() {
if var_true "${ENABLE_CHECKSUM}" ;then
if [ "${exit_code}" = "0" ] ; then
case "${CHECKSUM,,}" in
"md5" )
checksum_command="md5sum"
checksum_extension="md5"
;;
"sha1" )
checksum_command="sha1sum"
checksum_extension="sha1"
;;
esac
print_notice "Generating ${checksum_extension^^} for '${target}'"
cd "${TEMP_LOCATION}"
${checksum_command} "${target}" > "${target}"."${checksum_extension}"
checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}')
print_debug "${checksum_extension^^}: ${checksum_value} - ${target}"
else
print_warn "Skipping Checksum creation because backup did not complete successfully"
fi
fi
}
move_dbbackup() {
if [ "${exit_code}" = "0" ] ; then
case "${SIZE_VALUE,,}" in
"b" | "bytes" )
SIZE_VALUE=1
;;
"[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" )
SIZE_VALUE="-h"
;;
*)
SIZE_VALUE=1
;;
esac
if [ "$SIZE_VALUE" = "1" ] ; then
filesize=$(stat -c%s "${TEMP_LOCATION}"/"${target}")
print_notice "Backup of ${target} created with the size of ${filesize} bytes"
else
filesize=$(du -h "${TEMP_LOCATION}"/"${target}" | awk '{ print $1}')
print_notice "Backup of ${target} created with the size of ${filesize}"
fi
case "${BACKUP_LOCATION,,}" in
"file" | "filesystem" )
print_debug "Moving backup to filesystem"
mkdir -p "${DB_DUMP_TARGET}"
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
;;
"s3" | "minio" )
print_debug "Moving backup to S3 Bucket"
export AWS_ACCESS_KEY_ID=${S3_KEY_ID}
export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET}
export AWS_DEFAULT_REGION=${S3_REGION}
if [ -f "${S3_CERT_CA_FILE}" ] ; then
print_debug "Using Custom CA for S3 Backups"
s3_ca_cert="--ca-bundle ${S3_CERT_CA_FILE}"
fi
if var_true "${S3_CERT_SKIP_VERIFY}" ; then
print_debug "Skipping SSL verification for HTTPS S3 Hosts"
s3_ssl="--no-verify-ssl"
fi
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
unset s3_ssl
unset s3_ca_cert
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
rm -rf "${TEMP_LOCATION}"/"${target}"
;;
esac
else
print_warn "Skipping moving DB Backup to final location because backup did not complete successfully"
fi
rm -rf "${TEMP_LOCATION}"/*
}
pre_dbbackup() {
dbbackup_start_time=$(date +"%s")
now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d")
target=${dbtype}_${dbname}_${dbhost}_${now}.sql
}
post_dbbackup() {
dbbackup_finish_time=$(date +"%s")
dbbackup_total_time=$(echo $((dbbackup_finish_time-dbbackup_start_time)))
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
print_notice "Sending Backup Statistics to Zabbix"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}")"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))"
fi
### Post Script Support
if [ -n "${POST_SCRIPT}" ] ; then
if [ -x "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${dbhost}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
fi
fi
### Post Backup Custom Script Support
if [ -d "/assets/custom-scripts/" ] ; then
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
if [ -x "${f}" ] ; then
print_notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
fi
done
fi
print_notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
}
sanity_test() {
sanity_var DB_TYPE "Database Type"
sanity_var DB_HOST "Database Host"
file_env 'DB_USER'
file_env 'DB_PASS'
case "${dbtype,,}" in
"mysql" | "mariadb" )
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
postgres* | "pgsql" )
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
esac
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then
sanity_var S3_BUCKET "S3 Bucket"
sanity_var S3_PATH "S3 Path"
sanity_var S3_REGION "S3 Region"
file_env 'S3_KEY_ID'
file_env 'S3_KEY_SECRET'
fi
}
setup_mode() {
if [ "${MODE,,}" = "auto" ] || [ ${MODE,,} = "default" ] ; then
print_debug "Running in Auto / Default Mode - Letting Image control scheduling"
else
print_info "Running in Manual mode - Execute 'backup_now' or '/etc/services.available/10-db-backup/run' to perform a manual backup"
service_stop 10-db-backup
if var_true "${MANUAL_RUN_FOREVER}" ; then
mkdir -p /etc/services.d/99-run_forever
cat <<EOF > /etc/services.d/99-run_forever/run
#!/bin/bash
while true
do
sleep 86400
done
EOF
chmod +x /etc/services.d/99-run_forever/run
else
if var_true "${CONTAINER_ENABLE_SCHEDULING}" ; then
print_error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_SCHEDULING=TRUE'"
exit 1
fi
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
print_error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_MONITORING=TRUE'"
exit 1
fi
if var_true "${CONTAINER_ENABLE_LOGSHIPPING}" ; then
print_error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_LOGSHIPPING=TRUE'"
exit 1
fi
fi
fi
}