Compare commits

...

22 Commits
3.2.2 ... 3.3.9

Author SHA1 Message Date
Dave Conroy
82d6ce444b Merge branch 'master' of https://github.com/tiredofit/docker-db-backup 2022-07-11 09:26:38 -07:00
Dave Conroy
382a188b77 Release 3.3.9 - See CHANGELOG.md 2022-07-11 09:26:35 -07:00
Dave Conroy
f458c34546 Merge pull request #140 from fdarveau/fix-read-port-number-ineractive-restore
Fix reading port number in interactive restore
2022-07-11 09:25:41 -07:00
François Darveau
229db5cd03 Fix reading port number in interactive restore 2022-07-10 16:44:05 -04:00
dave@tiredofit.ca
8bb926fd20 Release 3.3.8 - See CHANGELOG.md 2022-07-09 08:59:59 -07:00
dave@tiredofit.ca
f005956c47 Release 3.3.7 - See CHANGELOG.md 2022-06-23 11:49:28 -07:00
dave@tiredofit.ca
ba20386e65 Release 3.3.6 - See CHANGELOG.md 2022-06-23 08:18:08 -07:00
dave@tiredofit.ca
12211d3b67 Release 3.3.5 - See CHANGELOG.md 2022-06-08 09:01:44 -07:00
Dave Conroy
83693d35b2 Release 3.3.4 - See CHANGELOG.md 2022-06-03 05:10:53 -07:00
Dave Conroy
52b726c821 Merge pull request #132 from rozdzynski/master
Unary operator fix
2022-06-03 05:09:27 -07:00
rozdzynski
5c43b3c907 unary operator fix 2022-06-03 14:02:42 +02:00
dave@tiredofit.ca
005e7f6e47 Release 3.3.3 - See CHANGELOG.md 2022-05-24 08:26:45 -07:00
Dave Conroy
7d7cb9587d Release 3.3.2 - See CHANGELOG.md 2022-05-02 22:28:08 -07:00
Dave Conroy
d1713fe3f0 Release 3.3.1 - See CHANGELOG.md 2022-04-30 22:31:34 -07:00
Dave Conroy
d1e98d9c4b Release 3.3.0 - See CHANGELOG.md 2022-04-30 03:23:45 -07:00
Dave Conroy
0920b671cb Release 3.2.6 - See CHANGELOG.md 2022-04-25 10:29:45 -07:00
Dave Conroy
28ed6c3bb8 Release 3.2.5 - See CHANGELOG.md 2022-04-23 14:11:29 -07:00
Dave Conroy
c1bdf26598 Release 3.2.4 - See CHANGELOG.md 2022-04-21 16:04:43 -07:00
Dave Conroy
5a4cac2cee Release 3.2.3 - See CHANGELOG.md 2022-04-21 15:46:27 -07:00
Dave Conroy
c04eec7661 Add space after compress_cmd 2022-04-21 14:19:09 -07:00
Dave Conroy
32f1959a07 Merge pull request #120 from joergmschulz/patch-1
small typo / exiting instead of exitting
2022-04-21 14:18:43 -07:00
joergmschulz
d384d5a529 small typo / exiting instead of exitting 2022-04-21 23:16:02 +02:00
10 changed files with 190 additions and 86 deletions

1
.dockerignore Normal file
View File

@@ -0,0 +1 @@
examples/

7
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,7 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Prepare
id: prep
@@ -63,17 +63,17 @@ jobs:
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
uses: docker/setup-qemu-action@v2
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
@@ -102,7 +102,7 @@ jobs:
fi
- name: Build
uses: docker/build-push-action@v2
uses: docker/build-push-action@v3
with:
builder: ${{ steps.buildx.outputs.name }}
context: .

View File

@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: Prepare
id: prep
@@ -63,17 +63,17 @@ jobs:
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
uses: docker/setup-qemu-action@v2
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
@@ -102,7 +102,7 @@ jobs:
fi
- name: Build
uses: docker/build-push-action@v2
uses: docker/build-push-action@v3
with:
builder: ${{ steps.buildx.outputs.name }}
context: .

View File

@@ -1,3 +1,85 @@
## 3.3.9 2022-07-09 <fardeau@github>
### Fixed
- Remaining work on interactive mode for entering port on restore script
## 3.3.8 2022-07-09 <dave at tiredofit dot ca>
### Added
- MSSQL Client Tools 18.0.1.1-1
## 3.3.7 2022-06-23 <dave at tiredofit dot ca>
### Changed
- Allow overrides to actually override with the restore script
## 3.3.6 2022-06-23 <dave at tiredofit dot ca>
### Changed
- Fix for restore script when using all 7 arguments
## 3.3.5 2022-06-08 <dave at tiredofit dot ca>
### Changed
- Fix DB Port parameter not being able to be input in restore script
- Fix MongoDB restore questionnaire
## 3.3.4 2022-06-03 <rozdzynski@github>
### Fixed
- S3 backups failing with special characters in filename
## 3.3.3 2022-05-24 <dave at tiredofit dot ca>
### Added
- Alpine 3.16 base
## 3.3.2 2022-05-02 <dave at tiredofit dot ca>
### Added
- Add POST_SCRIPT_SKIP_X_VERIFY environment variables to allow for more host compatibility for post scripts
## 3.3.1 2022-04-30 <dave at tiredofit dot ca>
### Changed
- Compressing silently was causing 0 byte backups
## 3.3.0 2022-04-30 <dave at tiredofit dot ca>
### Added
- Ability to auto clean old S3 / Minio Hosts like what occurs on filesysten
- Alert user how to turn off Zabbix Monitoring if fails
- Allow Zabbix Monitoring to work with S3
- Silence some more compression statements
### Changed
- Fix for Redis not backing up properly
- Start sending checksums for S3 Outputs
- Cleanup some code functions
- FIx Container Log Level always in DEBUG
## 3.2.4 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Add -portable flag when backing up Influx
## 3.2.3 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Fix for bucket / db name InfluxDB 1.xx
- Minor aesthetics, spacing, spelling
## 3.2.2 2022-04-21 <dave at tiredofit dot ca>
### Changed

View File

@@ -1,10 +1,10 @@
FROM docker.io/tiredofit/alpine:3.15
FROM docker.io/tiredofit/alpine:3.16
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables
ENV INFLUX2_VERSION=2.2.1 \
MSSQL_VERSION=17.8.1.1-1 \
MSSQL_VERSION=18.0.1.1-1 \
CONTAINER_ENABLE_MESSAGING=FALSE \
CONTAINER_ENABLE_MONITORING=TRUE \
CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \
@@ -42,8 +42,6 @@ RUN set -ex && \
zstd \
&& \
\
cd /usr/src && \
\
apkArch="$(apk --print-arch)"; \
case "$apkArch" in \
x86_64) mssql=true ; influx2=true ; influx_arch=amd64; ;; \
@@ -51,7 +49,7 @@ RUN set -ex && \
*) sleep 0.1 ;; \
esac; \
\
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql17_${MSSQL_VERSION}_amd64.apk mssql-tools_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/msodbcsql18_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/mssql-tools18_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSSQL_VERSION}_amd64.apk mssql-tools18_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
\
mkdir -p /usr/src/pbzip2 && \

View File

@@ -261,6 +261,9 @@ Outputs the following on the console:
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`
You must make your scripts executible otherwise there is an internal check that will skip trying to run it otherwise.
If for some reason your filesystem or host is not detecting it right, use the environment variable `POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
## Support
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.

View File

@@ -102,11 +102,11 @@ backup_influx() {
1 )
for db in ${db_names}; do
pre_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="-bucket $db" ; else db=all ; fi
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
target=influx_${db}_${DB_HOST#*//}_${now}
compression
print_notice "Dumping Influx database: '${db}'"
influxd backup ${influx_compression} ${bucket} -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
@@ -279,7 +279,7 @@ backup_redis() {
pre_dbbackup
print_notice "Dumping Redis - Flushing Redis Cache First"
target=redis_all_${DB_HOST,,}_${now}.rdb
echo bgsave | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
sleep 10
try=5
while [ $try -gt 0 ] ; do
@@ -287,6 +287,7 @@ backup_redis() {
ok=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
print_notice "Redis Backup Complete"
exit_code=0
break
fi
try=$((try - 1))
@@ -296,6 +297,7 @@ backup_redis() {
target_original=${target}
compression
$compress_cmd "${TEMP_LOCATION}/${target_original}"
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup all
@@ -430,11 +432,31 @@ check_exit_code() {
cleanup_old_data() {
if [ -n "${DB_CLEANUP_TIME}" ]; then
if [ "${master_exit_code}" != 1 ]; then
print_info "Cleaning up old backups"
mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
case "${BACKUP_LOCATION,,}" in
"file" | "filesystem" )
print_info "Cleaning up old backups"
mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
;;
"s3" | "minio" )
print_info "Cleaning up old backups"
aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | while read -r s3_file; do
s3_createdate=$(echo $s3_file | awk {'print $1" "$2'})
s3_createdate=$(date -d "$s3_createdate" "+%s")
s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 )))
if [[ $s3_createdate -le $s3_olderthan ]] ; then
s3_filename=$(echo $s3_file | awk {'print $4'})
if [ "$s3_filename" != "" ] ; then
print_debug "Deleting $s3_filename"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
fi
done
;;
esac
else
print_info "Skipping Cleaning up old backups because there were errors in backing up"
print_error "Skipping Cleaning up old backups because there were errors in backing up"
fi
fi
}
@@ -446,7 +468,7 @@ compression() {
case "${COMPRESSION,,}" in
gz* )
compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compress_cmd="pigz -q -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compression_type="gzip"
extension=".gz"
dir_compress_cmd=${compress_cmd}
@@ -454,7 +476,7 @@ compression() {
target=${target}.gz
;;
bz* )
compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
compress_cmd="pbzip2 -q -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
compression_type="bzip2"
dir_compress_cmd=${compress_cmd}
extension=".bz2"
@@ -470,7 +492,7 @@ compression() {
target=${target}.xz
;;
zst* )
compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS}"
compress_cmd="zstd -q -q --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} "
compression_type="zstd"
dir_compress_cmd=${compress_cmd}
extension=".zst"
@@ -478,9 +500,9 @@ compression() {
target=${target}.zst
;;
"none" | "false")
compress_cmd="cat"
compress_cmd="cat "
compression_type="none"
dir_compress_cmd="cat"
dir_compress_cmd="cat "
target_dir=${target}
;;
esac
@@ -506,12 +528,12 @@ create_archive() {
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
else
print_warn "Skipping creating archive file because backup did not complete successfully"
print_error "Skipping creating archive file because backup did not complete successfully"
fi
}
generate_checksum() {
if var_true "${ENABLE_CHECKSUM}" ;then
if var_true "${ENABLE_CHECKSUM}" ; then
if [ "${exit_code}" = "0" ] ; then
case "${CHECKSUM,,}" in
"md5" )
@@ -530,13 +552,16 @@ generate_checksum() {
checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}')
print_debug "${checksum_extension^^}: ${checksum_value} - ${target}"
else
print_warn "Skipping Checksum creation because backup did not complete successfully"
print_error "Skipping Checksum creation because backup did not complete successfully"
fi
fi
}
move_dbbackup() {
if [ "${exit_code}" = "0" ] ; then
dbbackup_size="$(stat -c%s "${TEMP_LOCATION}"/"${target}")"
dbbackup_date="$(date -r "${TEMP_LOCATION}"/"${target}" +'%s')"
case "${SIZE_VALUE,,}" in
"b" | "bytes" )
SIZE_VALUE=1
@@ -579,15 +604,17 @@ move_dbbackup() {
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
unset s3_ssl
unset s3_ca_cert
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
if var_true "${ENABLE_CHECKSUM}" ; then
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
rm -rf "${TEMP_LOCATION}"/"${target}"
;;
esac
else
print_warn "Skipping moving DB Backup to final location because backup did not complete successfully"
print_error "Skipping moving DB Backup to final location because backup did not complete successfully"
fi
rm -rf "${TEMP_LOCATION}"/*
@@ -607,36 +634,47 @@ post_dbbackup() {
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
print_notice "Sending Backup Statistics to Zabbix"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}")"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "${dbbackup_size}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "${dbbackup_date}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))"
if [ "$?" != "0" ] ; then print_error "Error sending statistics, consider disabling with 'CONTAINER_ENABLE_MONITORING=FALSE'" ; fi
fi
### Post Script Support
if [ -n "${POST_SCRIPT}" ] ; then
if [ -x "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
if [ -x "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
fi
fi
fi
### Post Backup Custom Script Support
if [ -d "/assets/custom-scripts/" ] ; then
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
if [ -x "${f}" ] ; then
print_notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
if [ -x "${f}" ] ; then
print_notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
fi
fi
done
fi
print_notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
unset s3_ssl
unset s3_ca_cert
}
sanity_test() {

View File

@@ -4,7 +4,6 @@ source /assets/functions/00-container
source /assets/functions/10-db-backup
source /assets/defaults/10-db-backup
PROCESS_NAME="db-backup"
CONTAINER_LOG_LEVEL=DEBUG
bootstrap_variables
@@ -80,7 +79,7 @@ while true; do
cleanup_old_data
if var_true "${manual}" ; then
print_debug "Exitting due to manual mode"
print_debug "Exiting due to manual mode"
exit ${master_exit_code};
else
print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") "

View File

@@ -55,7 +55,7 @@ The image will also allow you to use environment variables or Docker secrets use
The script can also be executed skipping the interactive mode by using the following syntax/
$(basename $0) <filename> <db_type> <db_hostname> <db_name> <db_user> <db_pass> <db_port>
$(basename "$0") <filename> <db_type> <db_hostname> <db_name> <db_user> <db_pass> <db_port>
If you only enter some of the arguments you will be prompted to fill them in.
@@ -70,7 +70,7 @@ EOF
interactive_mode=true
;;
* )
interactive_mode=false
interactive_mode=false
;;
esac
fi
@@ -78,7 +78,7 @@ fi
get_filename() {
COLUMNS=12
prompt="Please select a file to restore:"
options=( $(find ${DB_DUMP_TARGET} -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | xargs -0) )
options=( $(find "${DB_DUMP_TARGET}" -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | xargs -0) )
PS3="$prompt "
select opt in "${options[@]}" "Custom" "Quit" ; do
if (( REPLY == 2 + ${#options[@]} )) ; then
@@ -641,7 +641,7 @@ EOF
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbport_menu
case "${q_dbname_menu,,}" in
case "${q_dbport_menu,,}" in
c* )
counter=1
q_dbport=" "
@@ -829,11 +829,7 @@ print_debug "Filename to recover '${r_filename}'"
## Question Database Type
if [ -n "${2}" ]; then
if [ ! -f "${2}" ]; then
get_dbtype
else
r_dbtype="${2}"
fi
r_dbtype="${2}"
else
get_dbtype
fi
@@ -841,59 +837,39 @@ print_debug "Database type '${r_dbtype}'"
## Question Database Host
if [ -n "${3}" ]; then
if [ ! -f "${3}" ]; then
get_dbhost
else
r_dbhost="${3}"
fi
r_dbhost="${3}"
else
get_dbhost
fi
print_debug "Database Host '${r_dbhost}'"
## Question Database Name
if [ -n "${3}" ]; then
if [ ! -f "${3}" ]; then
get_dbname
else
r_dbname="${3}"
fi
if [ -n "${4}" ]; then
r_dbname="${4}"
else
get_dbname
fi
print_debug "Database Name '${r_dbname}'"
## Question Database User
if [ -n "${4}" ]; then
if [ ! -f "${4}" ]; then
get_dbuser
else
r_dbuser="${4}"
fi
if [ -n "${5}" ]; then
r_dbuser="${5}"
else
get_dbuser
fi
print_debug "Database User '${r_dbuser}'"
## Question Database Password
if [ -n "${5}" ]; then
if [ ! -f "${5}" ]; then
get_dbpass
else
r_dbpass="${5}"
fi
if [ -n "${6}" ]; then
r_dbpass="${6}"
else
get_dbpass
fi
print_debug "Database Pass '${r_dbpass}'"
## Question Database Port
if [ -n "${6}" ]; then
if [ ! -f "${6}" ]; then
get_dbport
else
r_dbport="${6}"
fi
if [ -n "${7}" ]; then
r_dbport="${7}"
else
get_dbport
fi
@@ -944,12 +920,12 @@ case "${r_dbtype}" in
mongo_compression="--gzip"
fi
if [ -n "${r_dbuser}" ] ; then
mongo_user="-u ${r_dbuser}"
mongo_user="-u=${r_dbuser}"
fi
if [ -n "${r_dbpass}" ] ; then
mongo_pass="-u ${r_dbpass}"
mongo_pass="-p=${r_dbpass}"
fi
mongorestore ${mongo_compression} -d ${r_dbname} -h ${r_dbhost} --port ${r_dbport} ${mongo_user} ${mongo_pass} --archive=${r_filename}
mongorestore ${mongo_compression} -d=${r_dbname} -h=${r_dbhost} --port=${r_dbport} ${mongo_user} ${mongo_pass} --archive=${r_filename}
exit_code=$?
;;
* )