mirror of
https://github.com/tiredofit/docker-db-backup.git
synced 2025-12-21 13:23:12 +01:00
Release 3.2.0 - See CHANGELOG.md
This commit is contained in:
@@ -13,6 +13,7 @@ bootstrap_variables() {
|
||||
dbport=${DB_PORT:-8088}
|
||||
file_env 'DB_USER'
|
||||
file_env 'DB_PASS'
|
||||
sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'"
|
||||
;;
|
||||
mongo* )
|
||||
dbtype=mongo
|
||||
@@ -81,7 +82,7 @@ backup_couch() {
|
||||
target=couch_${dbname}_${dbhost}_${now}.txt
|
||||
compression
|
||||
print_notice "Dumping CouchDB database: '${dbname}' ${compression_string}"
|
||||
curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
curl -sSL -X GET ${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
@@ -90,24 +91,59 @@ backup_couch() {
|
||||
}
|
||||
|
||||
backup_influx() {
|
||||
|
||||
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
|
||||
:
|
||||
if [ "${dbname,,}" = "all" ] ; then
|
||||
print_debug "Preparing to back up everything"
|
||||
db_names=justbackupeverything
|
||||
else
|
||||
influx_compression="-portable"
|
||||
compression_string=" and compressing with gzip"
|
||||
db_names=$(echo "${dbname}" | tr ',' '\n')
|
||||
fi
|
||||
|
||||
case "${INFLUX_VERSION,,}" in
|
||||
1 )
|
||||
for db in ${db_names}; do
|
||||
pre_dbbackup
|
||||
if [ "${db}" != "justbackupeverything" ] ; then bucket="-bucket $db" ; else db=all ; fi
|
||||
target=influx_${db}_${dbhost}_${now}
|
||||
compression
|
||||
print_notice "Dumping Influx database: '${db}'"
|
||||
influxd backup ${influx_compression} ${bucket} -host ${dbhost}:${dbport} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
|
||||
exit_code=$?
|
||||
check_exit_code $target_dir
|
||||
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
|
||||
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
|
||||
target=influx_${db}_${dbhost}_${now}.tar${extension}
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
post_dbbackup $db
|
||||
done
|
||||
;;
|
||||
2 )
|
||||
for db in ${db_names}; do
|
||||
pre_dbbackup
|
||||
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
|
||||
target=influx2_${db}_${dbhost}_${now}
|
||||
compression
|
||||
print_notice "Dumping Influx2 database: '${db}'"
|
||||
influx backup --org ${dbuser} ${bucket} --host ${dbhost}:${dbport} --token ${dbpass} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
|
||||
exit_code=$?
|
||||
check_exit_code $target_dir
|
||||
create_archive
|
||||
target=influx2_${db}_${dbhost}_${now}.tar${extension}
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
post_dbbackup $db
|
||||
done
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
create_archive() {
|
||||
if [ "${exit_code}" = "0" ] ; then
|
||||
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
|
||||
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
|
||||
else
|
||||
print_warn "Skipping creating archive file because backup did not complete successfully"
|
||||
fi
|
||||
for db in ${DB_NAME}; do
|
||||
pre_dbbackup
|
||||
target=influx_${db}_${dbhost}_${now}
|
||||
print_notice "Dumping Influx database: '${db}' ${compression_string}"
|
||||
influxd backup ${influx_compression} -database $db -host ${dbhost}:${dbport} ${TEMP_LOCATION}/${target}
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
post_dbbackup $db
|
||||
done
|
||||
}
|
||||
|
||||
backup_mongo() {
|
||||
@@ -123,7 +159,6 @@ backup_mongo() {
|
||||
mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
cd "${TEMP_LOCATION}"
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
post_dbbackup
|
||||
@@ -132,6 +167,7 @@ backup_mongo() {
|
||||
backup_mssql() {
|
||||
pre_dbbackup
|
||||
target=mssql_${dbname}_${dbhost}_${now}.bak
|
||||
compression
|
||||
print_notice "Dumping MSSQL database: '${dbname}'"
|
||||
/opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} –Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
|
||||
exit_code=$?
|
||||
@@ -284,7 +320,7 @@ backup_sqlite3() {
|
||||
sqlite3 "${dbhost}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
cat "${TEMP_LOCATION}"/backup.sqlite3 | $compress_cmd > "${TEMP_LOCATION}/${target}"
|
||||
cat "${TEMP_LOCATION}"/backup.sqlite3 | ${dir_compress_cmd} > "${TEMP_LOCATION}/${target}"
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
post_dbbackup $db
|
||||
@@ -294,62 +330,68 @@ check_availability() {
|
||||
### Set the Database Type
|
||||
case "$dbtype" in
|
||||
"couch" )
|
||||
COUNTER=0
|
||||
while ! (nc -z ${dbhost} ${dbport}) ; do
|
||||
counter=0
|
||||
code_received=0
|
||||
while [ "${code_received}" != "200" ]; do
|
||||
code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${dbhost}:${dbport})
|
||||
if [ "${code_received}" = "200" ] ; then break ; fi
|
||||
sleep 5
|
||||
(( COUNTER+=5 ))
|
||||
print_warn "CouchDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
(( counter+=5 ))
|
||||
print_warn "CouchDB Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"influx" )
|
||||
COUNTER=0
|
||||
while ! (nc -z ${dbhost} ${dbport}) ; do
|
||||
counter=0
|
||||
code_received=0
|
||||
while [ "${code_received}" != "200" ]; do
|
||||
code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${dbhost}:${dbport}/health)
|
||||
if [ "${code_received}" = "200" ] ; then break ; fi
|
||||
sleep 5
|
||||
(( COUNTER+=5 ))
|
||||
print_warn "InfluxDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
(( counter+=5 ))
|
||||
print_warn "InfluxDB Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"mongo" )
|
||||
COUNTER=0
|
||||
counter=0
|
||||
while ! (nc -z ${dbhost} ${dbport}) ; do
|
||||
sleep 5
|
||||
(( COUNTER+=5 ))
|
||||
print_warn "Mongo Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
(( counter+=5 ))
|
||||
print_warn "Mongo Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"mysql" )
|
||||
COUNTER=0
|
||||
counter=0
|
||||
export MYSQL_PWD=${dbpass}
|
||||
while ! (mysqladmin -u"${dbuser}" -P"${dbport}" -h"${dbhost}" status > /dev/null 2>&1) ; do
|
||||
sleep 5
|
||||
(( COUNTER+=5 ))
|
||||
(( counter+=5 ))
|
||||
print_warn "MySQL/MariaDB Server '${dbhost}' is not accessible, retrying.. (${COUNTER} seconds so far)"
|
||||
done
|
||||
;;
|
||||
"mssql" )
|
||||
COUNTER=0
|
||||
counter=0
|
||||
while ! (nc -z ${dbhost} ${dbport}) ; do
|
||||
sleep 5
|
||||
(( COUNTER+=5 ))
|
||||
print_warn "MSSQL Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
(( counter+=5 ))
|
||||
print_warn "MSSQL Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"pgsql" )
|
||||
COUNTER=0
|
||||
counter=0
|
||||
export PGPASSWORD=${dbpass}
|
||||
until pg_isready --dbname=${dbname} --host=${dbhost} --port=${dbport} --username=${dbuser} -q
|
||||
do
|
||||
sleep 5
|
||||
(( COUNTER+=5 ))
|
||||
print_warn "Postgres Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
(( counter+=5 ))
|
||||
print_warn "Postgres Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"redis" )
|
||||
COUNTER=0
|
||||
counter=0
|
||||
while ! (nc -z "${dbhost}" "${dbport}") ; do
|
||||
sleep 5
|
||||
(( COUNTER+=5 ))
|
||||
print_warn "Redis Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
|
||||
(( counter+=5 ))
|
||||
print_warn "Redis Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"sqlite3" )
|
||||
@@ -405,26 +447,39 @@ compression() {
|
||||
gz* )
|
||||
compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
|
||||
compression_type="gzip"
|
||||
extension=".gz"
|
||||
dir_compress_cmd=${compress_cmd}
|
||||
target_dir=${target}
|
||||
target=${target}.gz
|
||||
;;
|
||||
bz* )
|
||||
compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
|
||||
compression_type="bzip2"
|
||||
dir_compress_cmd=${compress_cmd}
|
||||
extension=".bz2"
|
||||
target_dir=${target}
|
||||
target=${target}.bz2
|
||||
;;
|
||||
xz* )
|
||||
compress_cmd="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
|
||||
compression_type="xzip"
|
||||
dir_compress_cmd=${compress_cmd}
|
||||
extension=".xz"
|
||||
target_dir=${target}
|
||||
target=${target}.xz
|
||||
;;
|
||||
zst* )
|
||||
compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS}"
|
||||
compression_type="zstd"
|
||||
dir_compress_cmd=${compress_cmd}
|
||||
extension=".zst"
|
||||
target_dir=${target}
|
||||
target=${target}.zst
|
||||
;;
|
||||
"none" | "false")
|
||||
compress_cmd="cat "
|
||||
compression_type="none"
|
||||
dir_compress_cmd="cat"
|
||||
target_dir=${target}
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -470,55 +525,61 @@ generate_checksum() {
|
||||
}
|
||||
|
||||
move_dbbackup() {
|
||||
case "$SIZE_VALUE" in
|
||||
"b" | "bytes" )
|
||||
SIZE_VALUE=1
|
||||
;;
|
||||
"[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" )
|
||||
SIZE_VALUE="-h"
|
||||
;;
|
||||
*)
|
||||
SIZE_VALUE=1
|
||||
;;
|
||||
esac
|
||||
if [ "$SIZE_VALUE" = "1" ] ; then
|
||||
FILESIZE=$(stat -c%s "${TEMP_LOCATION}"/"${target}")
|
||||
print_notice "Backup of ${target} created with the size of ${FILESIZE} bytes"
|
||||
if [ "${exit_code}" = "0" ] ; then
|
||||
case "${SIZE_VALUE,,}" in
|
||||
"b" | "bytes" )
|
||||
SIZE_VALUE=1
|
||||
;;
|
||||
"[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" )
|
||||
SIZE_VALUE="-h"
|
||||
;;
|
||||
*)
|
||||
SIZE_VALUE=1
|
||||
;;
|
||||
esac
|
||||
if [ "$SIZE_VALUE" = "1" ] ; then
|
||||
filesize=$(stat -c%s "${TEMP_LOCATION}"/"${target}")
|
||||
print_notice "Backup of ${target} created with the size of ${filesize} bytes"
|
||||
else
|
||||
filesize=$(du -h "${TEMP_LOCATION}"/"${target}" | awk '{ print $1}')
|
||||
print_notice "Backup of ${target} created with the size of ${filesize}"
|
||||
fi
|
||||
|
||||
case "${BACKUP_LOCATION,,}" in
|
||||
"file" | "filesystem" )
|
||||
print_debug "Moving backup to filesystem"
|
||||
mkdir -p "${DB_DUMP_TARGET}"
|
||||
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
|
||||
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
|
||||
;;
|
||||
"s3" | "minio" )
|
||||
print_debug "Moving backup to S3 Bucket"
|
||||
export AWS_ACCESS_KEY_ID=${S3_KEY_ID}
|
||||
export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET}
|
||||
export AWS_DEFAULT_REGION=${S3_REGION}
|
||||
if [ -f "${S3_CERT_CA_FILE}" ] ; then
|
||||
print_debug "Using Custom CA for S3 Backups"
|
||||
s3_ca_cert="--ca-bundle ${S3_CERT_CA_FILE}"
|
||||
fi
|
||||
if var_true "${S3_CERT_SKIP_VERIFY}" ; then
|
||||
print_debug "Skipping SSL verification for HTTPS S3 Hosts"
|
||||
s3_ssl="--no-verify-ssl"
|
||||
fi
|
||||
|
||||
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
|
||||
|
||||
aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
|
||||
unset s3_ssl
|
||||
unset s3_ca_cert
|
||||
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
|
||||
rm -rf "${TEMP_LOCATION}"/"${target}"
|
||||
;;
|
||||
esac
|
||||
else
|
||||
FILESIZE=$(du -h "${TEMP_LOCATION}"/"${target}" | awk '{ print $1}')
|
||||
print_notice "Backup of ${target} created with the size of ${FILESIZE}"
|
||||
print_warn "Skipping moving DB Backup to final location because backup did not complete successfully"
|
||||
fi
|
||||
|
||||
case "${BACKUP_LOCATION,,}" in
|
||||
"file" | "filesystem" )
|
||||
print_debug "Moving backup to filesystem"
|
||||
mkdir -p "${DB_DUMP_TARGET}"
|
||||
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
|
||||
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
|
||||
;;
|
||||
"s3" | "minio" )
|
||||
print_debug "Moving backup to S3 Bucket"
|
||||
export AWS_ACCESS_KEY_ID=${S3_KEY_ID}
|
||||
export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET}
|
||||
export AWS_DEFAULT_REGION=${S3_REGION}
|
||||
if [ -f "${S3_CERT_CA_FILE}" ] ; then
|
||||
print_debug "Using Custom CA for S3 Backups"
|
||||
s3_ca_cert="--ca-bundle ${S3_CERT_CA_FILE}"
|
||||
fi
|
||||
if var_true "${S3_CERT_SKIP_VERIFY}" ; then
|
||||
print_debug "Skipping SSL verification for HTTPS S3 Hosts"
|
||||
s3_ssl="--no-verify-ssl"
|
||||
fi
|
||||
|
||||
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
|
||||
|
||||
aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
|
||||
unset s3_ssl
|
||||
unset s3_ca_cert
|
||||
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
|
||||
rm -rf "${TEMP_LOCATION}"/"${target}"
|
||||
;;
|
||||
esac
|
||||
rm -rf "${TEMP_LOCATION}"/*
|
||||
}
|
||||
|
||||
pre_dbbackup() {
|
||||
|
||||
Reference in New Issue
Block a user