Compare commits

...

13 Commits
3.2.3 ... 3.3.7

Author SHA1 Message Date
dave@tiredofit.ca
f005956c47 Release 3.3.7 - See CHANGELOG.md 2022-06-23 11:49:28 -07:00
dave@tiredofit.ca
ba20386e65 Release 3.3.6 - See CHANGELOG.md 2022-06-23 08:18:08 -07:00
dave@tiredofit.ca
12211d3b67 Release 3.3.5 - See CHANGELOG.md 2022-06-08 09:01:44 -07:00
Dave Conroy
83693d35b2 Release 3.3.4 - See CHANGELOG.md 2022-06-03 05:10:53 -07:00
Dave Conroy
52b726c821 Merge pull request #132 from rozdzynski/master
Unary operator fix
2022-06-03 05:09:27 -07:00
rozdzynski
5c43b3c907 unary operator fix 2022-06-03 14:02:42 +02:00
dave@tiredofit.ca
005e7f6e47 Release 3.3.3 - See CHANGELOG.md 2022-05-24 08:26:45 -07:00
Dave Conroy
7d7cb9587d Release 3.3.2 - See CHANGELOG.md 2022-05-02 22:28:08 -07:00
Dave Conroy
d1713fe3f0 Release 3.3.1 - See CHANGELOG.md 2022-04-30 22:31:34 -07:00
Dave Conroy
d1e98d9c4b Release 3.3.0 - See CHANGELOG.md 2022-04-30 03:23:45 -07:00
Dave Conroy
0920b671cb Release 3.2.6 - See CHANGELOG.md 2022-04-25 10:29:45 -07:00
Dave Conroy
28ed6c3bb8 Release 3.2.5 - See CHANGELOG.md 2022-04-23 14:11:29 -07:00
Dave Conroy
c1bdf26598 Release 3.2.4 - See CHANGELOG.md 2022-04-21 16:04:43 -07:00
10 changed files with 165 additions and 78 deletions

1
.dockerignore Normal file
View File

@@ -0,0 +1 @@
examples/

7
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,7 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v3
- name: Prepare - name: Prepare
id: prep id: prep
@@ -63,17 +63,17 @@ jobs:
echo ::set-output name=docker_image::${DOCKER_IMAGE} echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v1 uses: docker/setup-qemu-action@v2
with: with:
platforms: all platforms: all
- name: Set up Docker Buildx - name: Set up Docker Buildx
id: buildx id: buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v2
- name: Login to DockerHub - name: Login to DockerHub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@v1 uses: docker/login-action@v2
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
@@ -102,7 +102,7 @@ jobs:
fi fi
- name: Build - name: Build
uses: docker/build-push-action@v2 uses: docker/build-push-action@v3
with: with:
builder: ${{ steps.buildx.outputs.name }} builder: ${{ steps.buildx.outputs.name }}
context: . context: .

View File

@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v3
- name: Prepare - name: Prepare
id: prep id: prep
@@ -63,17 +63,17 @@ jobs:
echo ::set-output name=docker_image::${DOCKER_IMAGE} echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v1 uses: docker/setup-qemu-action@v2
with: with:
platforms: all platforms: all
- name: Set up Docker Buildx - name: Set up Docker Buildx
id: buildx id: buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v2
- name: Login to DockerHub - name: Login to DockerHub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@v1 uses: docker/login-action@v2
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
@@ -102,7 +102,7 @@ jobs:
fi fi
- name: Build - name: Build
uses: docker/build-push-action@v2 uses: docker/build-push-action@v3
with: with:
builder: ${{ steps.buildx.outputs.name }} builder: ${{ steps.buildx.outputs.name }}
context: . context: .

View File

@@ -1,3 +1,66 @@
## 3.3.7 2022-06-23 <dave at tiredofit dot ca>
### Changed
- Allow overrides to actually override with the restore script
## 3.3.6 2022-06-23 <dave at tiredofit dot ca>
### Changed
- Fix for restore script when using all 7 arguments
## 3.3.5 2022-06-08 <dave at tiredofit dot ca>
### Changed
- Fix DB Port parameter not being able to be input in restore script
- Fix MongoDB restore questionnaire
## 3.3.4 2022-06-03 <rozdzynski@github>
### Fixed
- S3 backups failing with special characters in filename
## 3.3.3 2022-05-24 <dave at tiredofit dot ca>
### Added
- Alpine 3.16 base
## 3.3.2 2022-05-02 <dave at tiredofit dot ca>
### Added
- Add POST_SCRIPT_SKIP_X_VERIFY environment variables to allow for more host compatibility for post scripts
## 3.3.1 2022-04-30 <dave at tiredofit dot ca>
### Changed
- Compressing silently was causing 0 byte backups
## 3.3.0 2022-04-30 <dave at tiredofit dot ca>
### Added
- Ability to auto clean old S3 / Minio Hosts like what occurs on filesysten
- Alert user how to turn off Zabbix Monitoring if fails
- Allow Zabbix Monitoring to work with S3
- Silence some more compression statements
### Changed
- Fix for Redis not backing up properly
- Start sending checksums for S3 Outputs
- Cleanup some code functions
- FIx Container Log Level always in DEBUG
## 3.2.4 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Add -portable flag when backing up Influx
## 3.2.3 2022-04-21 <dave at tiredofit dot ca> ## 3.2.3 2022-04-21 <dave at tiredofit dot ca>
### Changed ### Changed

View File

@@ -1,4 +1,4 @@
FROM docker.io/tiredofit/alpine:3.15 FROM docker.io/tiredofit/alpine:3.16
LABEL maintainer="Dave Conroy (github.com/tiredofit)" LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables ### Set Environment Variables

View File

@@ -261,6 +261,9 @@ Outputs the following on the console:
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes` If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`
You must make your scripts executible otherwise there is an internal check that will skip trying to run it otherwise.
If for some reason your filesystem or host is not detecting it right, use the environment variable `POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
## Support ## Support
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community. These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.

View File

@@ -106,7 +106,7 @@ backup_influx() {
target=influx_${db}_${DB_HOST#*//}_${now} target=influx_${db}_${DB_HOST#*//}_${now}
compression compression
print_notice "Dumping Influx database: '${db}'" print_notice "Dumping Influx database: '${db}'"
influxd backup ${influx_compression} ${bucket} -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}" influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$? exit_code=$?
check_exit_code $target_dir check_exit_code $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}" print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
@@ -279,7 +279,7 @@ backup_redis() {
pre_dbbackup pre_dbbackup
print_notice "Dumping Redis - Flushing Redis Cache First" print_notice "Dumping Redis - Flushing Redis Cache First"
target=redis_all_${DB_HOST,,}_${now}.rdb target=redis_all_${DB_HOST,,}_${now}.rdb
echo bgsave | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS} echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
sleep 10 sleep 10
try=5 try=5
while [ $try -gt 0 ] ; do while [ $try -gt 0 ] ; do
@@ -287,6 +287,7 @@ backup_redis() {
ok=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}') ok=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
print_notice "Redis Backup Complete" print_notice "Redis Backup Complete"
exit_code=0
break break
fi fi
try=$((try - 1)) try=$((try - 1))
@@ -296,6 +297,7 @@ backup_redis() {
target_original=${target} target_original=${target}
compression compression
$compress_cmd "${TEMP_LOCATION}/${target_original}" $compress_cmd "${TEMP_LOCATION}/${target_original}"
check_exit_code $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup all post_dbbackup all
@@ -430,11 +432,31 @@ check_exit_code() {
cleanup_old_data() { cleanup_old_data() {
if [ -n "${DB_CLEANUP_TIME}" ]; then if [ -n "${DB_CLEANUP_TIME}" ]; then
if [ "${master_exit_code}" != 1 ]; then if [ "${master_exit_code}" != 1 ]; then
print_info "Cleaning up old backups" case "${BACKUP_LOCATION,,}" in
mkdir -p "${DB_DUMP_TARGET}" "file" | "filesystem" )
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \; print_info "Cleaning up old backups"
mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
;;
"s3" | "minio" )
print_info "Cleaning up old backups"
aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | while read -r s3_file; do
s3_createdate=$(echo $s3_file | awk {'print $1" "$2'})
s3_createdate=$(date -d "$s3_createdate" "+%s")
s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 )))
if [[ $s3_createdate -le $s3_olderthan ]] ; then
s3_filename=$(echo $s3_file | awk {'print $4'})
if [ "$s3_filename" != "" ] ; then
print_debug "Deleting $s3_filename"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
fi
done
;;
esac
else else
print_info "Skipping Cleaning up old backups because there were errors in backing up" print_error "Skipping Cleaning up old backups because there were errors in backing up"
fi fi
fi fi
} }
@@ -446,7 +468,7 @@ compression() {
case "${COMPRESSION,,}" in case "${COMPRESSION,,}" in
gz* ) gz* )
compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} " compress_cmd="pigz -q -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compression_type="gzip" compression_type="gzip"
extension=".gz" extension=".gz"
dir_compress_cmd=${compress_cmd} dir_compress_cmd=${compress_cmd}
@@ -454,7 +476,7 @@ compression() {
target=${target}.gz target=${target}.gz
;; ;;
bz* ) bz* )
compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} " compress_cmd="pbzip2 -q -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
compression_type="bzip2" compression_type="bzip2"
dir_compress_cmd=${compress_cmd} dir_compress_cmd=${compress_cmd}
extension=".bz2" extension=".bz2"
@@ -470,7 +492,7 @@ compression() {
target=${target}.xz target=${target}.xz
;; ;;
zst* ) zst* )
compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} " compress_cmd="zstd -q -q --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} "
compression_type="zstd" compression_type="zstd"
dir_compress_cmd=${compress_cmd} dir_compress_cmd=${compress_cmd}
extension=".zst" extension=".zst"
@@ -506,12 +528,12 @@ create_archive() {
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}" print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}" tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
else else
print_warn "Skipping creating archive file because backup did not complete successfully" print_error "Skipping creating archive file because backup did not complete successfully"
fi fi
} }
generate_checksum() { generate_checksum() {
if var_true "${ENABLE_CHECKSUM}" ;then if var_true "${ENABLE_CHECKSUM}" ; then
if [ "${exit_code}" = "0" ] ; then if [ "${exit_code}" = "0" ] ; then
case "${CHECKSUM,,}" in case "${CHECKSUM,,}" in
"md5" ) "md5" )
@@ -530,13 +552,16 @@ generate_checksum() {
checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}') checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}')
print_debug "${checksum_extension^^}: ${checksum_value} - ${target}" print_debug "${checksum_extension^^}: ${checksum_value} - ${target}"
else else
print_warn "Skipping Checksum creation because backup did not complete successfully" print_error "Skipping Checksum creation because backup did not complete successfully"
fi fi
fi fi
} }
move_dbbackup() { move_dbbackup() {
if [ "${exit_code}" = "0" ] ; then if [ "${exit_code}" = "0" ] ; then
dbbackup_size="$(stat -c%s "${TEMP_LOCATION}"/"${target}")"
dbbackup_date="$(date -r "${TEMP_LOCATION}"/"${target}" +'%s')"
case "${SIZE_VALUE,,}" in case "${SIZE_VALUE,,}" in
"b" | "bytes" ) "b" | "bytes" )
SIZE_VALUE=1 SIZE_VALUE=1
@@ -579,15 +604,17 @@ move_dbbackup() {
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}" [[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
unset s3_ssl if var_true "${ENABLE_CHECKSUM}" ; then
unset s3_ca_cert silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}" rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
rm -rf "${TEMP_LOCATION}"/"${target}" rm -rf "${TEMP_LOCATION}"/"${target}"
;; ;;
esac esac
else else
print_warn "Skipping moving DB Backup to final location because backup did not complete successfully" print_error "Skipping moving DB Backup to final location because backup did not complete successfully"
fi fi
rm -rf "${TEMP_LOCATION}"/* rm -rf "${TEMP_LOCATION}"/*
@@ -607,36 +634,47 @@ post_dbbackup() {
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
print_notice "Sending Backup Statistics to Zabbix" print_notice "Sending Backup Statistics to Zabbix"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}")" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "${dbbackup_size}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "${dbbackup_date}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))"
if [ "$?" != "0" ] ; then print_error "Error sending statistics, consider disabling with 'CONTAINER_ENABLE_MONITORING=FALSE'" ; fi
fi fi
### Post Script Support ### Post Script Support
if [ -n "${POST_SCRIPT}" ] ; then if [ -n "${POST_SCRIPT}" ] ; then
if [ -x "${POST_SCRIPT}" ] ; then if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else else
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!" if [ -x "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
fi
fi fi
fi fi
### Post Backup Custom Script Support ### Post Backup Custom Script Support
if [ -d "/assets/custom-scripts/" ] ; then if [ -d "/assets/custom-scripts/" ] ; then
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
if [ -x "${f}" ] ; then if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
print_notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" ${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else else
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!" if [ -x "${f}" ] ; then
print_notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
fi
fi fi
done done
fi fi
print_notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')" print_notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
unset s3_ssl
unset s3_ca_cert
} }
sanity_test() { sanity_test() {

View File

@@ -4,7 +4,6 @@ source /assets/functions/00-container
source /assets/functions/10-db-backup source /assets/functions/10-db-backup
source /assets/defaults/10-db-backup source /assets/defaults/10-db-backup
PROCESS_NAME="db-backup" PROCESS_NAME="db-backup"
CONTAINER_LOG_LEVEL=DEBUG
bootstrap_variables bootstrap_variables

View File

@@ -55,7 +55,7 @@ The image will also allow you to use environment variables or Docker secrets use
The script can also be executed skipping the interactive mode by using the following syntax/ The script can also be executed skipping the interactive mode by using the following syntax/
$(basename $0) <filename> <db_type> <db_hostname> <db_name> <db_user> <db_pass> <db_port> $(basename "$0") <filename> <db_type> <db_hostname> <db_name> <db_user> <db_pass> <db_port>
If you only enter some of the arguments you will be prompted to fill them in. If you only enter some of the arguments you will be prompted to fill them in.
@@ -70,7 +70,7 @@ EOF
interactive_mode=true interactive_mode=true
;; ;;
* ) * )
interactive_mode=false interactive_mode=false
;; ;;
esac esac
fi fi
@@ -78,7 +78,7 @@ fi
get_filename() { get_filename() {
COLUMNS=12 COLUMNS=12
prompt="Please select a file to restore:" prompt="Please select a file to restore:"
options=( $(find ${DB_DUMP_TARGET} -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | xargs -0) ) options=( $(find "${DB_DUMP_TARGET}" -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | xargs -0) )
PS3="$prompt " PS3="$prompt "
select opt in "${options[@]}" "Custom" "Quit" ; do select opt in "${options[@]}" "Custom" "Quit" ; do
if (( REPLY == 2 + ${#options[@]} )) ; then if (( REPLY == 2 + ${#options[@]} )) ; then
@@ -641,7 +641,7 @@ EOF
2 ) 2 )
while true; do while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbport_menu read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbport_menu
case "${q_dbname_menu,,}" in case "${q_port_menu,,}" in
c* ) c* )
counter=1 counter=1
q_dbport=" " q_dbport=" "
@@ -829,11 +829,7 @@ print_debug "Filename to recover '${r_filename}'"
## Question Database Type ## Question Database Type
if [ -n "${2}" ]; then if [ -n "${2}" ]; then
if [ ! -f "${2}" ]; then r_dbtype="${2}"
get_dbtype
else
r_dbtype="${2}"
fi
else else
get_dbtype get_dbtype
fi fi
@@ -841,59 +837,39 @@ print_debug "Database type '${r_dbtype}'"
## Question Database Host ## Question Database Host
if [ -n "${3}" ]; then if [ -n "${3}" ]; then
if [ ! -f "${3}" ]; then r_dbhost="${3}"
get_dbhost
else
r_dbhost="${3}"
fi
else else
get_dbhost get_dbhost
fi fi
print_debug "Database Host '${r_dbhost}'" print_debug "Database Host '${r_dbhost}'"
## Question Database Name ## Question Database Name
if [ -n "${3}" ]; then if [ -n "${4}" ]; then
if [ ! -f "${3}" ]; then r_dbname="${4}"
get_dbname
else
r_dbname="${3}"
fi
else else
get_dbname get_dbname
fi fi
print_debug "Database Name '${r_dbname}'" print_debug "Database Name '${r_dbname}'"
## Question Database User ## Question Database User
if [ -n "${4}" ]; then if [ -n "${5}" ]; then
if [ ! -f "${4}" ]; then r_dbuser="${5}"
get_dbuser
else
r_dbuser="${4}"
fi
else else
get_dbuser get_dbuser
fi fi
print_debug "Database User '${r_dbuser}'" print_debug "Database User '${r_dbuser}'"
## Question Database Password ## Question Database Password
if [ -n "${5}" ]; then if [ -n "${6}" ]; then
if [ ! -f "${5}" ]; then r_dbpass="${6}"
get_dbpass
else
r_dbpass="${5}"
fi
else else
get_dbpass get_dbpass
fi fi
print_debug "Database Pass '${r_dbpass}'" print_debug "Database Pass '${r_dbpass}'"
## Question Database Port ## Question Database Port
if [ -n "${6}" ]; then if [ -n "${7}" ]; then
if [ ! -f "${6}" ]; then r_dbport="${7}"
get_dbport
else
r_dbport="${6}"
fi
else else
get_dbport get_dbport
fi fi
@@ -944,12 +920,12 @@ case "${r_dbtype}" in
mongo_compression="--gzip" mongo_compression="--gzip"
fi fi
if [ -n "${r_dbuser}" ] ; then if [ -n "${r_dbuser}" ] ; then
mongo_user="-u ${r_dbuser}" mongo_user="-u=${r_dbuser}"
fi fi
if [ -n "${r_dbpass}" ] ; then if [ -n "${r_dbpass}" ] ; then
mongo_pass="-u ${r_dbpass}" mongo_pass="-p=${r_dbpass}"
fi fi
mongorestore ${mongo_compression} -d ${r_dbname} -h ${r_dbhost} --port ${r_dbport} ${mongo_user} ${mongo_pass} --archive=${r_filename} mongorestore ${mongo_compression} -d=${r_dbname} -h=${r_dbhost} --port=${r_dbport} ${mongo_user} ${mongo_pass} --archive=${r_filename}
exit_code=$? exit_code=$?
;; ;;
* ) * )