@@ -1,29 +1,29 @@
#!/command/with-contenv bash
bootstrap_variables() {
case "${dbtype ,,}" in
case "${DB_TYPE ,,}" in
couch* )
dbtype=couch
dbport =${DB_PORT:-5984}
DB_PORT =${DB_PORT:-5984}
file_env 'DB_USER'
file_env 'DB_PASS'
;;
influx* )
dbtype=influx
dbport =${DB_PORT:-8088}
DB_PORT =${DB_PORT:-8088}
file_env 'DB_USER'
file_env 'DB_PASS'
sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'"
;;
mongo* )
dbtype=mongo
dbport =${DB_PORT:-27017}
DB_PORT =${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER'
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
;;
"mysql" | "mariadb" )
dbtype=mysql
dbport =${DB_PORT:-3306}
DB_PORT =${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
@@ -34,17 +34,17 @@ bootstrap_variables() {
*) print_error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;;
esac
dbtype=mssql
dbport =${DB_PORT:-1433}
DB_PORT =${DB_PORT:-1433}
;;
postgres* | "pgsql" )
dbtype=pgsql
dbport =${DB_PORT:-5432}
DB_PORT =${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
"redis" )
dbtype=redis
dbport =${DB_PORT:-6379}
DB_PORT =${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS'
;;
sqlite* )
@@ -60,58 +60,58 @@ bootstrap_variables() {
### Set the Database Authentication Details
case "$dbtype" in
"mongo" )
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${dbuser }"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${dbpass }"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${dbname }"
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER }"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS }"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME }"
[[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}"
;;
"mysql" )
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${dbpass }
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS }
;;
"postgres" )
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${dbpass }"
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS }"
;;
"redis" )
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${dbpass }"
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS }"
;;
esac
}
backup_couch() {
pre_dbbackup
target=couch_${dbname}_${dbhost #*//}_${now}.txt
target=couch_${DB_NAME}_${DB_HOST #*//}_${now}.txt
compression
print_notice "Dumping CouchDB database: '${dbname }' ${compression_string}"
curl -sSL -X GET ${dbhost}:${dbport}/${dbname }/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
print_notice "Dumping CouchDB database: '${DB_NAME }' ${compression_string}"
curl -sSL -X GET ${DB_HOST}:${DB_PORT}/${DB_NAME }/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup $dbname
post_dbbackup ${DB_NAME}
}
backup_influx() {
if [ "${dbname ,,}" = "all" ] ; then
if [ "${DB_NAME ,,}" = "all" ] ; then
print_debug "Preparing to back up everything"
db_names=justbackupeverything
else
db_names=$(echo "${dbname }" | tr ',' '\n')
db_names=$(echo "${DB_NAME }" | tr ',' '\n')
fi
case "${INFLUX_VERSION,,}" in
1 )
for db in ${db_names}; do
pre_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="-bucket $db" ; else db=all ; fi
target=influx_${db}_${dbhost #*//}_${now}
if [ "${db}" != "justbackupeverything" ] ; then bucket="-d b ${ db} " ; else db=all ; fi
target=influx_${db}_${DB_HOST #*//}_${now}
compression
print_notice "Dumping Influx database: '${db}'"
influxd backup ${influx_compression} ${bucket} -host ${dbhost}:${dbport } ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT } ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
target=influx_${db}_${dbhost #*//}_${now}.tar${extension}
target=influx_${db}_${DB_HOST #*//}_${now}.tar${extension}
generate_checksum
move_dbbackup
post_dbbackup $db
@@ -121,14 +121,14 @@ backup_influx() {
for db in ${db_names}; do
pre_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
target=influx2_${db}_${dbhost #*//}_${now}
target=influx2_${db}_${DB_HOST #*//}_${now}
compression
print_notice "Dumping Influx2 database: '${db}'"
influx backup --org ${dbuser } ${bucket} --host ${dbhost}:${dbport } --token ${dbpass } ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
influx backup --org ${DB_USER } ${bucket} --host ${DB_HOST}:${DB_PORT } --token ${DB_PASS } ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
create_archive
target=influx2_${db}_${dbhost #*//}_${now}.tar${extension}
target=influx2_${db}_${DB_HOST #*//}_${now}.tar${extension}
generate_checksum
move_dbbackup
post_dbbackup $db
@@ -140,14 +140,14 @@ backup_influx() {
backup_mongo() {
pre_dbbackup
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
target=${dbtype}_${dbname}_${dbhost }_${now}.archive
target=${dbtype}_${DB_NAME,,}_${DB_HOST,, }_${now}.archive
else
target=${dbtype}_${dbname}_${dbhost }_${now}.archive.gz
target=${dbtype}_${DB_NAME,,}_${DB_HOST,, }_${now}.archive.gz
mongo_compression="--gzip"
compression_string="and compressing with gzip"
fi
print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${dbhost } --port ${dbport } ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${DB_HOST } --port ${DB_PORT } ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
exit_code=$?
check_exit_code $target
generate_checksum
@@ -157,15 +157,15 @@ backup_mongo() {
backup_mssql() {
pre_dbbackup
target=mssql_${dbname}_${dbhost }_${now}.bak
target=mssql_${DB_NAME,,}_${DB_HOST,, }_${now}.bak
compression
print_notice "Dumping MSSQL database: '${dbname }'"
/opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass } – Q "BACKUP DATABASE \[${dbname }\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname }-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
print_notice "Dumping MSSQL database: '${DB_NAME }'"
/opt/mssql-tools/bin/sqlcmd -E -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS } – Q "BACKUP DATABASE \[${DB_NAME }\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME }-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup $dbname
post_dbbackup $DB_NAME
}
backup_mysql() {
@@ -176,9 +176,9 @@ backup_mysql() {
stored_procedures="--routines"
fi
if [ "${dbname ,,}" = "all" ] ; then
if [ "${DB_NAME ,,}" = "all" ] ; then
print_debug "Preparing to back up everything except for information_schema and _* prefixes"
db_names=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
@@ -187,7 +187,7 @@ backup_mysql() {
done
fi
else
db_names=$(echo "${dbname }" | tr ',' '\n')
db_names=$(echo "${DB_NAME }" | tr ',' '\n')
fi
print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
@@ -195,10 +195,10 @@ backup_mysql() {
if var_true "${SPLIT_DB}" ; then
for db in ${db_names} ; do
pre_dbbackup
target=mysql_${db}_${dbhost }_${now}.sql
target=mysql_${db}_${DB_HOST,, }_${now}.sql
compression
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
generate_checksum
@@ -208,10 +208,10 @@ backup_mysql() {
else
print_debug "Not splitting database dumps into their own files"
pre_dbbackup
target=mysql_all_${dbhost }_${now}.sql
target=mysql_all_${DB_HOST,, }_${now}.sql
compression
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
generate_checksum
@@ -221,11 +221,11 @@ backup_mysql() {
}
backup_pgsql() {
export PGPASSWORD=${dbpass }
export PGPASSWORD=${DB_PASS }
authdb=${DB_USER}
if [ "${dbname ,,}" = "all" ] ; then
if [ "${DB_NAME ,,}" = "all" ] ; then
print_debug "Preparing to back up all databases"
db_names=$(psql -h $dbhost -U $dbuser -p ${dbport } -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT } -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
@@ -234,7 +234,7 @@ backup_pgsql() {
done
fi
else
db_names=$(echo "${dbname }" | tr ',' '\n')
db_names=$(echo "${DB_NAME }" | tr ',' '\n')
fi
print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
@@ -242,10 +242,10 @@ backup_pgsql() {
if var_true "${SPLIT_DB}" ; then
for db in ${db_names} ; do
pre_dbbackup
target=pgsql_${db}_${dbhost }_${now}.sql
target=pgsql_${db}_${DB_HOST,, }_${now}.sql
compression
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser } $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER } $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code $target
generate_checksum
@@ -255,10 +255,10 @@ backup_pgsql() {
else
print_debug "Not splitting database dumps into their own files"
pre_dbbackup
target=pgsql_all_${dbhost }_${now}.sql
target=pgsql_all_${DB_HOST,, }_${now}.sql
compression
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
tmp_db_names=$(psql -h $dbhost -U $dbuser -p ${dbport } -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
tmp_db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT } -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
for r_db_name in $(echo $db_names | xargs); do
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
done
@@ -266,7 +266,7 @@ backup_pgsql() {
for x_db_name in ${tmp_db_names} ; do
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
done
pg_dumpall -h ${dbhost} -U ${dbuser} -p ${dbport } ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT } ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code $target
generate_checksum
@@ -278,15 +278,16 @@ backup_pgsql() {
backup_redis() {
pre_dbbackup
print_notice "Dumping Redis - Flushing Redis Cache First"
target=redis_all_${dbhost }_${now}.rdb
echo bgsave | redis-cli -h ${dbhost} -p ${dbport } ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
target=redis_all_${DB_HOST,, }_${now}.rdb
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT } ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
sleep 10
try=5
while [ $try -gt 0 ] ; do
saved=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport } ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
ok=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport } ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
saved=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT } ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
ok=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT } ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
print_notice "Redis Backup Complete"
exit_code=0
break
fi
try=$((try - 1))
@@ -296,6 +297,7 @@ backup_redis() {
target_original=${target}
compression
$compress_cmd "${TEMP_LOCATION}/${target_original}"
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup all
@@ -303,12 +305,12 @@ backup_redis() {
backup_sqlite3() {
pre_dbbackup
db=$(basename "$dbhost ")
db=$(basename "${DB_HOST} ")
db="${db%.*}"
target=sqlite3_${db}_${now}.sqlite3
compression
print_notice "Dumping sqlite3 database: '${dbhost }' ${compression_string}"
sqlite3 "${dbhost }" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
print_notice "Dumping sqlite3 database: '${DB_HOST }' ${compression_string}"
sqlite3 "${DB_HOST }" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
exit_code=$?
check_exit_code $target
cat "${TEMP_LOCATION}"/backup.sqlite3 | ${dir_compress_cmd} > "${TEMP_LOCATION}/${target}"
@@ -324,78 +326,89 @@ check_availability() {
counter=0
code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${dbhost}:${dbport })
code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${DB_HOST}:${DB_PORT })
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( counter+=5 ))
print_warn "CouchDB Host '${dbhost }' is not accessible, retrying.. ($counter seconds so far)"
print_warn "CouchDB Host '${DB_HOST }' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"influx" )
counter=0
code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${dbhost}:${dbport}/health)
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( counter+=5 ))
print_warn "InfluxDB Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
done
case "${INFLUX_VERSION,,}" in
1 )
while ! (nc -z ${DB_HOST#*//} ${DB_PORT}) ; do
sleep 5
(( counter+=5 ))
print_warn "InfluxDB Host '${DB_HOST#*//}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
2 )
code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${DB_HOST}:${DB_PORT}/health)
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( counter+=5 ))
print_warn "InfluxDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
esac
;;
"mongo" )
counter=0
while ! (nc -z ${dbhost} ${dbport }) ; do
while ! (nc -z ${DB_HOST} ${DB_PORT }) ; do
sleep 5
(( counter+=5 ))
print_warn "Mongo Host '${dbhost }' is not accessible, retrying.. ($counter seconds so far)"
print_warn "Mongo Host '${DB_HOST }' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"mysql" )
counter=0
export MYSQL_PWD=${dbpass }
while ! (mysqladmin -u"${dbuser }" -P"${dbport }" -h"${dbhost }" status > /dev/null 2>&1) ; do
export MYSQL_PWD=${DB_PASS }
while ! (mysqladmin -u"${DB_USER }" -P"${DB_PORT }" -h"${DB_HOST }" status > /dev/null 2>&1) ; do
sleep 5
(( counter+=5 ))
print_warn "MySQL/MariaDB Server '${dbhost }' is not accessible, retrying.. (${COUNTER } seconds so far)"
print_warn "MySQL/MariaDB Server '${DB_HOST }' is not accessible, retrying.. (${counter } seconds so far)"
done
;;
"mssql" )
counter=0
while ! (nc -z ${dbhost} ${dbport }) ; do
while ! (nc -z ${DB_HOST} ${DB_PORT }) ; do
sleep 5
(( counter+=5 ))
print_warn "MSSQL Host '${dbhost }' is not accessible, retrying.. ($counter seconds so far)"
print_warn "MSSQL Host '${DB_HOST }' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"pgsql" )
counter=0
export PGPASSWORD=${dbpass }
until pg_isready --dbname=${dbname } --host=${dbhost } --port=${dbport } --username=${dbuser } -q
export PGPASSWORD=${DB_PASS }
until pg_isready --dbname=${DB_NAME } --host=${DB_HOST } --port=${DB_PORT } --username=${DB_USER } -q
do
sleep 5
(( counter+=5 ))
print_warn "Postgres Host '${dbhost }' is not accessible, retrying.. ($counter seconds so far)"
print_warn "Postgres Host '${DB_HOST }' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"redis" )
counter=0
while ! (nc -z "${dbhost}" "${dbport }") ; do
while ! (nc -z "${DB_HOST}" "${DB_PORT }") ; do
sleep 5
(( counter+=5 ))
print_warn "Redis Host '${dbhost }' is not accessible, retrying.. ($counter seconds so far)"
print_warn "Redis Host '${DB_HOST }' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"sqlite3" )
if [[ ! -e "${dbhost }" ]]; then
print_error "File '${dbhost }' does not exist."
if [[ ! -e "${DB_HOST }" ]]; then
print_error "File '${DB_HOST }' does not exist."
exit_code=2
exit $exit_code
elif [[ ! -f "${dbhost }" ]]; then
print_error "File '${dbhost }' is not a file."
elif [[ ! -f "${DB_HOST }" ]]; then
print_error "File '${DB_HOST }' is not a file."
exit_code=2
exit $exit_code
elif [[ ! -r "${dbhost }" ]]; then
print_error "File '${dbhost }' is not readable."
elif [[ ! -r "${DB_HOST }" ]]; then
print_error "File '${DB_HOST }' is not readable."
exit_code=2
exit $exit_code
fi
@@ -419,11 +432,31 @@ check_exit_code() {
cleanup_old_data() {
if [ -n "${DB_CLEANUP_TIME}" ]; then
if [ "${master_exit_code}" != 1 ]; then
print_info "Cleaning up old backups"
mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
case "${BACKUP_LOCATION,,}" in
"file" | "filesystem" )
print_info "Cleaning up old backups"
mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
;;
"s3" | "minio" )
print_info "Cleaning up old backups"
aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | while read -r s3_file; do
s3_createdate=$(echo $s3_file | awk {'print $1" "$2'})
s3_createdate=$(date -d "$s3_createdate" "+%s")
s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 )))
if [[ $s3_createdate -le $s3_olderthan ]] ; then
s3_filename=$(echo $s3_file | awk {'print $4'})
if [ $s3_filename != "" ] ; then
print_debug "Deleting $s3_filename"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
fi
done
;;
esac
else
print_info "Skipping Cleaning up old backups because there were errors in backing up"
print_error "Skipping Cleaning up old backups because there were errors in backing up"
fi
fi
}
@@ -435,7 +468,7 @@ compression() {
case "${COMPRESSION,,}" in
gz* )
compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compress_cmd="silent pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compression_type="gzip"
extension=".gz"
dir_compress_cmd=${compress_cmd}
@@ -443,7 +476,7 @@ compression() {
target=${target}.gz
;;
bz* )
compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
compress_cmd="silent pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
compression_type="bzip2"
dir_compress_cmd=${compress_cmd}
extension=".bz2"
@@ -451,7 +484,7 @@ compression() {
target=${target}.bz2
;;
xz* )
compress_cmd="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compress_cmd="silent pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compression_type="xzip"
dir_compress_cmd=${compress_cmd}
extension=".xz"
@@ -459,7 +492,7 @@ compression() {
target=${target}.xz
;;
zst* )
compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS}"
compress_cmd="silent zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} "
compression_type="zstd"
dir_compress_cmd=${compress_cmd}
extension=".zst"
@@ -467,8 +500,9 @@ compression() {
target=${target}.zst
;;
"none" | "false")
compress_cmd="cat "
compression_type="none"
dir_compress_cmd="cat"
dir_compress_cmd="cat "
target_dir=${target}
;;
esac
@@ -494,12 +528,12 @@ create_archive() {
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
else
print_warn "Skipping creating archive file because backup did not complete successfully"
print_error "Skipping creating archive file because backup did not complete successfully"
fi
}
generate_checksum() {
if var_true "${ENABLE_CHECKSUM}" ;then
if var_true "${ENABLE_CHECKSUM}" ; then
if [ "${exit_code}" = "0" ] ; then
case "${CHECKSUM,,}" in
"md5" )
@@ -518,13 +552,16 @@ generate_checksum() {
checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}')
print_debug "${checksum_extension^^}: ${checksum_value} - ${target}"
else
print_warn "Skipping Checksum creation because backup did not complete successfully"
print_error "Skipping Checksum creation because backup did not complete successfully"
fi
fi
}
move_dbbackup() {
if [ "${exit_code}" = "0" ] ; then
dbbackup_size="$(stat -c%s "${TEMP_LOCATION}"/"${target}")"
dbbackup_date="$(date -r "${TEMP_LOCATION}"/"${target}" +'%s')"
case "${SIZE_VALUE,,}" in
"b" | "bytes" )
SIZE_VALUE=1
@@ -567,15 +604,17 @@ move_dbbackup() {
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
unset s3_ssl
unset s3_ca_cert
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
if var_true "${ENABLE_CHECKSUM}" ; then
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
rm -rf "${TEMP_LOCATION}"/"${target}"
;;
esac
else
print_warn "Skipping moving DB Backup to final location because backup did not complete successfully"
print_error "Skipping moving DB Backup to final location because backup did not complete successfully"
fi
rm -rf "${TEMP_LOCATION}"/*
@@ -586,7 +625,7 @@ pre_dbbackup() {
now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d")
target=${dbtype}_${dbname}_${dbhost }_${now}.sql
target=${dbtype}_${DB_NAME,,}_${DB_HOST,, }_${now}.sql
}
post_dbbackup() {
@@ -595,17 +634,18 @@ post_dbbackup() {
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
print_notice "Sending Backup Statistics to Zabbix"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}") "
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s') "
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "${dbbackup_size} "
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "${dbbackup_date} "
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))"
if [ "$?" != "0" ] ; then print_error "Error sending statistics, consider disabling with 'CONTAINER_ENABLE_MONITORING=FALSE'" ; fi
fi
### Post Script Support
if [ -n "${POST_SCRIPT}" ] ; then
if [ -x "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${dbhost }" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST }" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
fi
@@ -617,7 +657,7 @@ post_dbbackup() {
if [ -x "${f}" ] ; then
print_notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${dbhost }" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
${f} "${exit_code}" "${dbtype}" "${DB_HOST }" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
fi
@@ -625,15 +665,15 @@ post_dbbackup() {
fi
print_notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
unset s3_ssl
unset s3_ca_cert
}
sanity_test() {
sanity_var DB_TYPE "Database Type"
sanity_var DB_HOST "Database Host"
file_env 'DB_USER'
file_env 'DB_PASS'
case "${dbtype ,,}" in
case "${DB_TYPE ,,}" in
"mysql" | "mariadb" )
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;