Compare commits

..

29 Commits
3.1.1 ... 3.3.7

Author SHA1 Message Date
dave@tiredofit.ca
f005956c47 Release 3.3.7 - See CHANGELOG.md 2022-06-23 11:49:28 -07:00
dave@tiredofit.ca
ba20386e65 Release 3.3.6 - See CHANGELOG.md 2022-06-23 08:18:08 -07:00
dave@tiredofit.ca
12211d3b67 Release 3.3.5 - See CHANGELOG.md 2022-06-08 09:01:44 -07:00
Dave Conroy
83693d35b2 Release 3.3.4 - See CHANGELOG.md 2022-06-03 05:10:53 -07:00
Dave Conroy
52b726c821 Merge pull request #132 from rozdzynski/master
Unary operator fix
2022-06-03 05:09:27 -07:00
rozdzynski
5c43b3c907 unary operator fix 2022-06-03 14:02:42 +02:00
dave@tiredofit.ca
005e7f6e47 Release 3.3.3 - See CHANGELOG.md 2022-05-24 08:26:45 -07:00
Dave Conroy
7d7cb9587d Release 3.3.2 - See CHANGELOG.md 2022-05-02 22:28:08 -07:00
Dave Conroy
d1713fe3f0 Release 3.3.1 - See CHANGELOG.md 2022-04-30 22:31:34 -07:00
Dave Conroy
d1e98d9c4b Release 3.3.0 - See CHANGELOG.md 2022-04-30 03:23:45 -07:00
Dave Conroy
0920b671cb Release 3.2.6 - See CHANGELOG.md 2022-04-25 10:29:45 -07:00
Dave Conroy
28ed6c3bb8 Release 3.2.5 - See CHANGELOG.md 2022-04-23 14:11:29 -07:00
Dave Conroy
c1bdf26598 Release 3.2.4 - See CHANGELOG.md 2022-04-21 16:04:43 -07:00
Dave Conroy
5a4cac2cee Release 3.2.3 - See CHANGELOG.md 2022-04-21 15:46:27 -07:00
Dave Conroy
c04eec7661 Add space after compress_cmd 2022-04-21 14:19:09 -07:00
Dave Conroy
32f1959a07 Merge pull request #120 from joergmschulz/patch-1
small typo / exiting instead of exitting
2022-04-21 14:18:43 -07:00
joergmschulz
d384d5a529 small typo / exiting instead of exitting 2022-04-21 23:16:02 +02:00
Dave Conroy
56ab68dd71 Release 3.2.2 - See CHANGELOG.md 2022-04-21 12:14:17 -07:00
Dave Conroy
9a1a5efbd9 Do a different DB Ready check for Influx 1 vs 2 2022-04-21 12:12:57 -07:00
Dave Conroy
df5532c128 Fix blank database size when compression type=NONE 2022-04-21 12:08:51 -07:00
Dave Conroy
2ecd313778 Change database variables around 2022-04-21 12:08:27 -07:00
Dave Conroy
55cfe5ab02 Release 3.2.1 - See CHANGELOG.md 2022-04-03 10:28:07 -07:00
Dave Conroy
ae2e2c38e2 Sanitize DB_HOST for URLs 2022-04-02 07:37:34 -07:00
Dave Conroy
c23d7991fe Release 3.2.0 - See CHANGELOG.md 2022-04-01 18:41:58 -07:00
Dave Conroy
3f58cfd284 Release 3.1.3 - See CHANGELOG.md 2022-03-30 10:46:16 -07:00
Dave Conroy
2d01f5e692 Fix for MARIADB not sending DB name for post script 2022-03-30 10:45:27 -07:00
Dave Conroy
dbd0a03b0d SPLIT_DB is supposed to be TRUE 2022-03-30 10:43:22 -07:00
Dave Conroy
6527f4ff63 Add sanity checks for Post scripts to be executible 2022-03-30 10:37:58 -07:00
Dave Conroy
d843d21a1b Release 3.1.2 - See CHANGELOG.md 2022-03-29 08:09:36 -07:00
11 changed files with 490 additions and 271 deletions

1
.dockerignore Normal file
View File

@@ -0,0 +1 @@
examples/

7
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,7 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v3
- name: Prepare - name: Prepare
id: prep id: prep
@@ -63,17 +63,17 @@ jobs:
echo ::set-output name=docker_image::${DOCKER_IMAGE} echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v1 uses: docker/setup-qemu-action@v2
with: with:
platforms: all platforms: all
- name: Set up Docker Buildx - name: Set up Docker Buildx
id: buildx id: buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v2
- name: Login to DockerHub - name: Login to DockerHub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@v1 uses: docker/login-action@v2
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
@@ -102,7 +102,7 @@ jobs:
fi fi
- name: Build - name: Build
uses: docker/build-push-action@v2 uses: docker/build-push-action@v3
with: with:
builder: ${{ steps.buildx.outputs.name }} builder: ${{ steps.buildx.outputs.name }}
context: . context: .

View File

@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v3
- name: Prepare - name: Prepare
id: prep id: prep
@@ -63,17 +63,17 @@ jobs:
echo ::set-output name=docker_image::${DOCKER_IMAGE} echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v1 uses: docker/setup-qemu-action@v2
with: with:
platforms: all platforms: all
- name: Set up Docker Buildx - name: Set up Docker Buildx
id: buildx id: buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v2
- name: Login to DockerHub - name: Login to DockerHub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@v1 uses: docker/login-action@v2
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
@@ -102,7 +102,7 @@ jobs:
fi fi
- name: Build - name: Build
uses: docker/build-push-action@v2 uses: docker/build-push-action@v3
with: with:
builder: ${{ steps.buildx.outputs.name }} builder: ${{ steps.buildx.outputs.name }}
context: . context: .

View File

@@ -1,3 +1,113 @@
## 3.3.7 2022-06-23 <dave at tiredofit dot ca>
### Changed
- Allow overrides to actually override with the restore script
## 3.3.6 2022-06-23 <dave at tiredofit dot ca>
### Changed
- Fix for restore script when using all 7 arguments
## 3.3.5 2022-06-08 <dave at tiredofit dot ca>
### Changed
- Fix DB Port parameter not being able to be input in restore script
- Fix MongoDB restore questionnaire
## 3.3.4 2022-06-03 <rozdzynski@github>
### Fixed
- S3 backups failing with special characters in filename
## 3.3.3 2022-05-24 <dave at tiredofit dot ca>
### Added
- Alpine 3.16 base
## 3.3.2 2022-05-02 <dave at tiredofit dot ca>
### Added
- Add POST_SCRIPT_SKIP_X_VERIFY environment variables to allow for more host compatibility for post scripts
## 3.3.1 2022-04-30 <dave at tiredofit dot ca>
### Changed
- Compressing silently was causing 0 byte backups
## 3.3.0 2022-04-30 <dave at tiredofit dot ca>
### Added
- Ability to auto clean old S3 / Minio Hosts like what occurs on filesysten
- Alert user how to turn off Zabbix Monitoring if fails
- Allow Zabbix Monitoring to work with S3
- Silence some more compression statements
### Changed
- Fix for Redis not backing up properly
- Start sending checksums for S3 Outputs
- Cleanup some code functions
- FIx Container Log Level always in DEBUG
## 3.2.4 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Add -portable flag when backing up Influx
## 3.2.3 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Fix for bucket / db name InfluxDB 1.xx
- Minor aesthetics, spacing, spelling
## 3.2.2 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Restore script properly parses DB_PORT entry
- Influx Database ready performs different checks dependent on V1/V2
- Stop using weird database lowercase variables unneccessarily breaking Docker Secrets
## 3.2.1 2022-04-03 <dave at tiredofit dot ca>
### Changed
- Fix a variety of issues iwth 3.2.0 relating to InfluxDB
## 3.2.0 2022-03-31 <dave at tiredofit dot ca>
### Added
- Install InfluxDB2 Client alongside version 1 (amd64 and arm64)
- Introduce InfluxDB 2 backup support
- Introduce multiple compression types other than Gzip for Influx 1/2
- Introduce compression for MSSQL backups
### Changed
- Testing for Host availability for CouchDB and InfluxDB
## 3.1.3 2022-03-30 <dave at tiredofit dot ca>
### Changed
- Fix for MariaDB not sending database name to post script
- Check for executible bit on post scripts both via environment variable and /assets/custom
- SPLIT_DB defaulted to TRUE
## 3.1.2 2022-03-29 <dave at tiredofit dot ca>
### Changed
- Fix for blank Notice when individual backup is completed (time taken)
## 3.1.1 2022-03-28 <dave at tiredofit dot ca> ## 3.1.1 2022-03-28 <dave at tiredofit dot ca>
### Changed ### Changed

View File

@@ -1,8 +1,10 @@
FROM docker.io/tiredofit/alpine:3.15 FROM docker.io/tiredofit/alpine:3.16
LABEL maintainer="Dave Conroy (github.com/tiredofit)" LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables ### Set Environment Variables
ENV MSSQL_VERSION=17.8.1.1-1 \
ENV INFLUX2_VERSION=2.2.1 \
MSSQL_VERSION=17.8.1.1-1 \
CONTAINER_ENABLE_MESSAGING=FALSE \ CONTAINER_ENABLE_MESSAGING=FALSE \
CONTAINER_ENABLE_MONITORING=TRUE \ CONTAINER_ENABLE_MONITORING=TRUE \
CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \ CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \
@@ -44,9 +46,14 @@ RUN set -ex && \
\ \
apkArch="$(apk --print-arch)"; \ apkArch="$(apk --print-arch)"; \
case "$apkArch" in \ case "$apkArch" in \
x86_64) mssql=true ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql17_${MSSQL_VERSION}_amd64.apk mssql-tools_${MSSQL_VERSION}_amd64.apk ;; \ x86_64) mssql=true ; influx2=true ; influx_arch=amd64; ;; \
*) echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ;; \ aarch64 ) influx2=true ; influx_arch=arm64 ;; \
*) sleep 0.1 ;; \
esac; \ esac; \
\
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql17_${MSSQL_VERSION}_amd64.apk mssql-tools_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
\
mkdir -p /usr/src/pbzip2 && \ mkdir -p /usr/src/pbzip2 && \
curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \ curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
cd /usr/src/pbzip2 && \ cd /usr/src/pbzip2 && \

View File

@@ -50,6 +50,7 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
- [Base Images used](#base-images-used) - [Base Images used](#base-images-used)
- [Container Options](#container-options) - [Container Options](#container-options)
- [Database Specific Options](#database-specific-options) - [Database Specific Options](#database-specific-options)
- [For Influx DB2:](#for-influx-db2)
- [Scheduling Options](#scheduling-options) - [Scheduling Options](#scheduling-options)
- [Backup Options](#backup-options) - [Backup Options](#backup-options)
- [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services) - [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services)
@@ -143,6 +144,11 @@ Be sure to view the following repositories to understand all the customizable op
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | | `DB_USER` | username for the database(s) - Can use `root` for MySQL | |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | | | `DB_PASS` | (optional if DB doesn't require it) password for the database | |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | | `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies |
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2.x` series - AMD64 and ARM64 only for `2` | |
#### For Influx DB2:
Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name`
### Scheduling Options ### Scheduling Options
| Parameter | Description | Default | | Parameter | Description | Default |
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | | ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
@@ -255,6 +261,9 @@ Outputs the following on the console:
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes` If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`
You must make your scripts executible otherwise there is an internal check that will skip trying to run it otherwise.
If for some reason your filesystem or host is not detecting it right, use the environment variable `POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
## Support ## Support
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community. These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.

View File

@@ -18,10 +18,5 @@ PARALLEL_COMPRESSION_THREADS=${PARALLEL_COMPRESSION_THREADS:-"$(nproc)"}
S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"} S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"}
S3_PROTOCOL=${S3_PROTOCOL:-"https"} S3_PROTOCOL=${S3_PROTOCOL:-"https"}
SIZE_VALUE=${SIZE_VALUE:-"bytes"} SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SPLIT_DB=${SPLIT_DB:-"FALSE"} SPLIT_DB=${SPLIT_DB:-"TRUE"}
TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"} TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"}
dbhost=${DB_HOST}
dbname=${DB_NAME}
dbpass=${DB_PASS}
dbtype=${DB_TYPE}
dbuser=${DB_USER}

View File

@@ -1,28 +1,29 @@
#!/command/with-contenv bash #!/command/with-contenv bash
bootstrap_variables() { bootstrap_variables() {
case "${dbtype,,}" in case "${DB_TYPE,,}" in
couch* ) couch* )
dbtype=couch dbtype=couch
dbport=${DB_PORT:-5984} DB_PORT=${DB_PORT:-5984}
file_env 'DB_USER' file_env 'DB_USER'
file_env 'DB_PASS' file_env 'DB_PASS'
;; ;;
influx* ) influx* )
dbtype=influx dbtype=influx
dbport=${DB_PORT:-8088} DB_PORT=${DB_PORT:-8088}
file_env 'DB_USER' file_env 'DB_USER'
file_env 'DB_PASS' file_env 'DB_PASS'
sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'"
;; ;;
mongo* ) mongo* )
dbtype=mongo dbtype=mongo
dbport=${DB_PORT:-27017} DB_PORT=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER' [[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER'
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
;; ;;
"mysql" | "mariadb" ) "mysql" | "mariadb" )
dbtype=mysql dbtype=mysql
dbport=${DB_PORT:-3306} DB_PORT=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;; ;;
@@ -33,17 +34,17 @@ bootstrap_variables() {
*) print_error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;; *) print_error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;;
esac esac
dbtype=mssql dbtype=mssql
dbport=${DB_PORT:-1433} DB_PORT=${DB_PORT:-1433}
;; ;;
postgres* | "pgsql" ) postgres* | "pgsql" )
dbtype=pgsql dbtype=pgsql
dbport=${DB_PORT:-5432} DB_PORT=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;; ;;
"redis" ) "redis" )
dbtype=redis dbtype=redis
dbport=${DB_PORT:-6379} DB_PORT=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS' [[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS'
;; ;;
sqlite* ) sqlite* )
@@ -59,71 +60,96 @@ bootstrap_variables() {
### Set the Database Authentication Details ### Set the Database Authentication Details
case "$dbtype" in case "$dbtype" in
"mongo" ) "mongo" )
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${dbuser}" [[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${dbpass}" [[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS}"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${dbname}" [[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME}"
[[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}" [[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}"
;; ;;
"mysql" ) "mysql" )
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${dbpass} [[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS}
;; ;;
"postgres" ) "postgres" )
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${dbpass}" [[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS}"
;; ;;
"redis" ) "redis" )
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${dbpass}" [[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS}"
;; ;;
esac esac
} }
backup_couch() { backup_couch() {
pre_dbbackup pre_dbbackup
target=couch_${dbname}_${dbhost}_${now}.txt target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt
compression compression
print_notice "Dumping CouchDB database: '${dbname}' ${compression_string}" print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}"
curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > ${TEMP_LOCATION}/${target} curl -sSL -X GET ${DB_HOST}:${DB_PORT}/${DB_NAME}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup post_dbbackup ${DB_NAME}
} }
backup_influx() { backup_influx() {
if [ "${DB_NAME,,}" = "all" ] ; then
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then print_debug "Preparing to back up everything"
: db_names=justbackupeverything
else else
influx_compression="-portable" db_names=$(echo "${DB_NAME}" | tr ',' '\n')
compression_string=" and compressing with gzip"
fi fi
for db in ${DB_NAME}; do
case "${INFLUX_VERSION,,}" in
1 )
for db in ${db_names}; do
pre_dbbackup pre_dbbackup
target=influx_${db}_${dbhost}_${now} if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
print_notice "Dumping Influx database: '${db}' ${compression_string}" target=influx_${db}_${DB_HOST#*//}_${now}
influxd backup ${influx_compression} -database $db -host ${dbhost}:${dbport} ${TEMP_LOCATION}/${target} compression
print_notice "Dumping Influx database: '${db}'"
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension}
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup post_dbbackup $db
done done
;;
2 )
for db in ${db_names}; do
pre_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
target=influx2_${db}_${DB_HOST#*//}_${now}
compression
print_notice "Dumping Influx2 database: '${db}'"
influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
create_archive
target=influx2_${db}_${DB_HOST#*//}_${now}.tar${extension}
generate_checksum
move_dbbackup
post_dbbackup $db
done
;;
esac
} }
backup_mongo() { backup_mongo() {
pre_dbbackup pre_dbbackup
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
target=${dbtype}_${dbname}_${dbhost}_${now}.archive target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive
else else
target=${dbtype}_${dbname}_${dbhost}_${now}.archive.gz target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive.gz
mongo_compression="--gzip" mongo_compression="--gzip"
compression_string="and compressing with gzip" compression_string="and compressing with gzip"
fi fi
print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}" print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS} mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target
cd "${TEMP_LOCATION}"
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup post_dbbackup
@@ -131,14 +157,15 @@ backup_mongo() {
backup_mssql() { backup_mssql() {
pre_dbbackup pre_dbbackup
target=mssql_${dbname}_${dbhost}_${now}.bak target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak
print_notice "Dumping MSSQL database: '${dbname}'" compression
/opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" print_notice "Dumping MSSQL database: '${DB_NAME}'"
/opt/mssql-tools/bin/sqlcmd -E -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} Q "BACKUP DATABASE \[${DB_NAME}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup post_dbbackup $DB_NAME
} }
backup_mysql() { backup_mysql() {
@@ -149,9 +176,9 @@ backup_mysql() {
stored_procedures="--routines" stored_procedures="--routines"
fi fi
if [ "${dbname,,}" = "all" ] ; then if [ "${DB_NAME,,}" = "all" ] ; then
print_debug "Preparing to back up everything except for information_schema and _* prefixes" print_debug "Preparing to back up everything except for information_schema and _* prefixes"
db_names=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema ) db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n') db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do for db_exclude in ${db_names_exclusions} ; do
@@ -160,7 +187,7 @@ backup_mysql() {
done done
fi fi
else else
db_names=$(echo "${dbname}" | tr ',' '\n') db_names=$(echo "${DB_NAME}" | tr ',' '\n')
fi fi
print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')" print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
@@ -168,37 +195,37 @@ backup_mysql() {
if var_true "${SPLIT_DB}" ; then if var_true "${SPLIT_DB}" ; then
for db in ${db_names} ; do for db in ${db_names} ; do
pre_dbbackup pre_dbbackup
target=mysql_${db}_${dbhost}_${now}.sql target=mysql_${db}_${DB_HOST,,}_${now}.sql
compression compression
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}" print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}" mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup post_dbbackup $db
done done
else else
print_debug "Not splitting database dumps into their own files" print_debug "Not splitting database dumps into their own files"
pre_dbbackup pre_dbbackup
target=mysql_all_${dbhost}_${now}.sql target=mysql_all_${DB_HOST,,}_${now}.sql
compression compression
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}" mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup post_dbbackup all
fi fi
} }
backup_pgsql() { backup_pgsql() {
export PGPASSWORD=${dbpass} export PGPASSWORD=${DB_PASS}
authdb=${DB_USER} authdb=${DB_USER}
if [ "${dbname,,}" = "all" ] ; then if [ "${DB_NAME,,}" = "all" ] ; then
print_debug "Preparing to back up all databases" print_debug "Preparing to back up all databases"
db_names=$(psql -h $dbhost -U $dbuser -p ${dbport} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ) db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n') db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do for db_exclude in ${db_names_exclusions} ; do
@@ -207,7 +234,7 @@ backup_pgsql() {
done done
fi fi
else else
db_names=$(echo "${dbname}" | tr ',' '\n') db_names=$(echo "${DB_NAME}" | tr ',' '\n')
fi fi
print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')" print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
@@ -215,23 +242,23 @@ backup_pgsql() {
if var_true "${SPLIT_DB}" ; then if var_true "${SPLIT_DB}" ; then
for db in ${db_names} ; do for db in ${db_names} ; do
pre_dbbackup pre_dbbackup
target=pgsql_${db}_${dbhost}_${now}.sql target=pgsql_${db}_${DB_HOST,,}_${now}.sql
compression compression
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}" print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target} pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup post_dbbackup $db
done done
else else
print_debug "Not splitting database dumps into their own files" print_debug "Not splitting database dumps into their own files"
pre_dbbackup pre_dbbackup
target=pgsql_all_${dbhost}_${now}.sql target=pgsql_all_${DB_HOST,,}_${now}.sql
compression compression
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
tmp_db_names=$(psql -h $dbhost -U $dbuser -p ${dbport} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ) tmp_db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
for r_db_name in $(echo $db_names | xargs); do for r_db_name in $(echo $db_names | xargs); do
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" ) tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
done done
@@ -239,27 +266,28 @@ backup_pgsql() {
for x_db_name in ${tmp_db_names} ; do for x_db_name in ${tmp_db_names} ; do
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name}) pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
done done
pg_dumpall -h ${dbhost} -U ${dbuser} -p ${dbport} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target} pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup post_dbbackup all
fi fi
} }
backup_redis() { backup_redis() {
pre_dbbackup pre_dbbackup
print_notice "Dumping Redis - Flushing Redis Cache First" print_notice "Dumping Redis - Flushing Redis Cache First"
target=redis_${db}_${dbhost}_${now}.rdb target=redis_all_${DB_HOST,,}_${now}.rdb
echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS} echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
sleep 10 sleep 10
try=5 try=5
while [ $try -gt 0 ] ; do while [ $try -gt 0 ] ; do
saved=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}') saved=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
ok=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}') ok=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
print_notice "Redis Backup Complete" print_notice "Redis Backup Complete"
exit_code=0
break break
fi fi
try=$((try - 1)) try=$((try - 1))
@@ -269,100 +297,118 @@ backup_redis() {
target_original=${target} target_original=${target}
compression compression
$compress_cmd "${TEMP_LOCATION}/${target_original}" $compress_cmd "${TEMP_LOCATION}/${target_original}"
check_exit_code $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup post_dbbackup all
} }
backup_sqlite3() { backup_sqlite3() {
pre_dbbackup pre_dbbackup
db=$(basename "$dbhost") db=$(basename "${DB_HOST}")
db="${db%.*}" db="${db%.*}"
target=sqlite3_${db}_${now}.sqlite3 target=sqlite3_${db}_${now}.sqlite3
compression compression
print_notice "Dumping sqlite3 database: '${dbhost}' ${compression_string}" print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}"
sqlite3 "${dbhost}" ".backup '${TEMP_LOCATION}/backup.sqlite3'" sqlite3 "${DB_HOST}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target
cat "${TEMP_LOCATION}"/backup.sqlite3 | $compress_cmd > "${TEMP_LOCATION}/${target}" cat "${TEMP_LOCATION}"/backup.sqlite3 | ${dir_compress_cmd} > "${TEMP_LOCATION}/${target}"
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup post_dbbackup $db
} }
check_availability() { check_availability() {
### Set the Database Type ### Set the Database Type
case "$dbtype" in case "$dbtype" in
"couch" ) "couch" )
COUNTER=0 counter=0
while ! (nc -z ${dbhost} ${dbport}) ; do code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${DB_HOST}:${DB_PORT})
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "CouchDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "CouchDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done done
;; ;;
"influx" ) "influx" )
COUNTER=0 counter=0
while ! (nc -z ${dbhost} ${dbport}) ; do case "${INFLUX_VERSION,,}" in
1 )
while ! (nc -z ${DB_HOST#*//} ${DB_PORT}) ; do
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "InfluxDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "InfluxDB Host '${DB_HOST#*//}' is not accessible, retrying.. ($counter seconds so far)"
done done
;; ;;
"mongo" ) 2 )
COUNTER=0 code_received=0
while ! (nc -z ${dbhost} ${dbport}) ; do while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${DB_HOST}:${DB_PORT}/health)
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "Mongo Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "InfluxDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
esac
;;
"mongo" )
counter=0
while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
sleep 5
(( counter+=5 ))
print_warn "Mongo Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done done
;; ;;
"mysql" ) "mysql" )
COUNTER=0 counter=0
export MYSQL_PWD=${dbpass} export MYSQL_PWD=${DB_PASS}
while ! (mysqladmin -u"${dbuser}" -P"${dbport}" -h"${dbhost}" status > /dev/null 2>&1) ; do while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" status > /dev/null 2>&1) ; do
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "MySQL/MariaDB Server '${dbhost}' is not accessible, retrying.. (${COUNTER} seconds so far)" print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)"
done done
;; ;;
"mssql" ) "mssql" )
COUNTER=0 counter=0
while ! (nc -z ${dbhost} ${dbport}) ; do while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "MSSQL Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "MSSQL Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done done
;; ;;
"pgsql" ) "pgsql" )
COUNTER=0 counter=0
export PGPASSWORD=${dbpass} export PGPASSWORD=${DB_PASS}
until pg_isready --dbname=${dbname} --host=${dbhost} --port=${dbport} --username=${dbuser} -q until pg_isready --dbname=${DB_NAME} --host=${DB_HOST} --port=${DB_PORT} --username=${DB_USER} -q
do do
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "Postgres Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "Postgres Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done done
;; ;;
"redis" ) "redis" )
COUNTER=0 counter=0
while ! (nc -z "${dbhost}" "${dbport}") ; do while ! (nc -z "${DB_HOST}" "${DB_PORT}") ; do
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "Redis Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "Redis Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done done
;; ;;
"sqlite3" ) "sqlite3" )
if [[ ! -e "${dbhost}" ]]; then if [[ ! -e "${DB_HOST}" ]]; then
print_error "File '${dbhost}' does not exist." print_error "File '${DB_HOST}' does not exist."
exit_code=2 exit_code=2
exit $exit_code exit $exit_code
elif [[ ! -f "${dbhost}" ]]; then elif [[ ! -f "${DB_HOST}" ]]; then
print_error "File '${dbhost}' is not a file." print_error "File '${DB_HOST}' is not a file."
exit_code=2 exit_code=2
exit $exit_code exit $exit_code
elif [[ ! -r "${dbhost}" ]]; then elif [[ ! -r "${DB_HOST}" ]]; then
print_error "File '${dbhost}' is not readable." print_error "File '${DB_HOST}' is not readable."
exit_code=2 exit_code=2
exit $exit_code exit $exit_code
fi fi
@@ -386,16 +432,35 @@ check_exit_code() {
cleanup_old_data() { cleanup_old_data() {
if [ -n "${DB_CLEANUP_TIME}" ]; then if [ -n "${DB_CLEANUP_TIME}" ]; then
if [ "${master_exit_code}" != 1 ]; then if [ "${master_exit_code}" != 1 ]; then
case "${BACKUP_LOCATION,,}" in
"file" | "filesystem" )
print_info "Cleaning up old backups" print_info "Cleaning up old backups"
mkdir -p "${DB_DUMP_TARGET}" mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \; find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
;;
"s3" | "minio" )
print_info "Cleaning up old backups"
aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | while read -r s3_file; do
s3_createdate=$(echo $s3_file | awk {'print $1" "$2'})
s3_createdate=$(date -d "$s3_createdate" "+%s")
s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 )))
if [[ $s3_createdate -le $s3_olderthan ]] ; then
s3_filename=$(echo $s3_file | awk {'print $4'})
if [ "$s3_filename" != "" ] ; then
print_debug "Deleting $s3_filename"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
fi
done
;;
esac
else else
print_info "Skipping Cleaning up old backups because there were errors in backing up" print_error "Skipping Cleaning up old backups because there were errors in backing up"
fi fi
fi fi
} }
compression() { compression() {
if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then
PARALLEL_COMPRESSION_THREADS=1 PARALLEL_COMPRESSION_THREADS=1
@@ -403,28 +468,42 @@ compression() {
case "${COMPRESSION,,}" in case "${COMPRESSION,,}" in
gz* ) gz* )
compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} " compress_cmd="pigz -q -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compression_type="gzip" compression_type="gzip"
extension=".gz"
dir_compress_cmd=${compress_cmd}
target_dir=${target}
target=${target}.gz target=${target}.gz
;; ;;
bz* ) bz* )
compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} " compress_cmd="pbzip2 -q -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
compression_type="bzip2" compression_type="bzip2"
dir_compress_cmd=${compress_cmd}
extension=".bz2"
target_dir=${target}
target=${target}.bz2 target=${target}.bz2
;; ;;
xz* ) xz* )
compress_cmd="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} " compress_cmd="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compression_type="xzip" compression_type="xzip"
dir_compress_cmd=${compress_cmd}
extension=".xz"
target_dir=${target}
target=${target}.xz target=${target}.xz
;; ;;
zst* ) zst* )
compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS}" compress_cmd="zstd -q -q --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} "
compression_type="zstd" compression_type="zstd"
dir_compress_cmd=${compress_cmd}
extension=".zst"
target_dir=${target}
target=${target}.zst target=${target}.zst
;; ;;
"none" | "false") "none" | "false")
compress_cmd="cat " compress_cmd="cat "
compression_type="none" compression_type="none"
dir_compress_cmd="cat "
target_dir=${target}
;; ;;
esac esac
@@ -444,6 +523,15 @@ compression() {
esac esac
} }
create_archive() {
if [ "${exit_code}" = "0" ] ; then
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
else
print_error "Skipping creating archive file because backup did not complete successfully"
fi
}
generate_checksum() { generate_checksum() {
if var_true "${ENABLE_CHECKSUM}" ; then if var_true "${ENABLE_CHECKSUM}" ; then
if [ "${exit_code}" = "0" ] ; then if [ "${exit_code}" = "0" ] ; then
@@ -464,13 +552,17 @@ generate_checksum() {
checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}') checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}')
print_debug "${checksum_extension^^}: ${checksum_value} - ${target}" print_debug "${checksum_extension^^}: ${checksum_value} - ${target}"
else else
print_warn "Skipping Checksum creation because backup did not complete successfully" print_error "Skipping Checksum creation because backup did not complete successfully"
fi fi
fi fi
} }
move_dbbackup() { move_dbbackup() {
case "$SIZE_VALUE" in if [ "${exit_code}" = "0" ] ; then
dbbackup_size="$(stat -c%s "${TEMP_LOCATION}"/"${target}")"
dbbackup_date="$(date -r "${TEMP_LOCATION}"/"${target}" +'%s')"
case "${SIZE_VALUE,,}" in
"b" | "bytes" ) "b" | "bytes" )
SIZE_VALUE=1 SIZE_VALUE=1
;; ;;
@@ -482,11 +574,11 @@ move_dbbackup() {
;; ;;
esac esac
if [ "$SIZE_VALUE" = "1" ] ; then if [ "$SIZE_VALUE" = "1" ] ; then
FILESIZE=$(stat -c%s "${TEMP_LOCATION}"/"${target}") filesize=$(stat -c%s "${TEMP_LOCATION}"/"${target}")
print_notice "Backup of ${target} created with the size of ${FILESIZE} bytes" print_notice "Backup of ${target} created with the size of ${filesize} bytes"
else else
FILESIZE=$(du -h "${TEMP_LOCATION}"/"${target}" | awk '{ print $1}') filesize=$(du -h "${TEMP_LOCATION}"/"${target}" | awk '{ print $1}')
print_notice "Backup of ${target} created with the size of ${FILESIZE}" print_notice "Backup of ${target} created with the size of ${filesize}"
fi fi
case "${BACKUP_LOCATION,,}" in case "${BACKUP_LOCATION,,}" in
@@ -512,13 +604,20 @@ move_dbbackup() {
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}" [[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
unset s3_ssl if var_true "${ENABLE_CHECKSUM}" ; then
unset s3_ca_cert silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}" rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
rm -rf "${TEMP_LOCATION}"/"${target}" rm -rf "${TEMP_LOCATION}"/"${target}"
;; ;;
esac esac
else
print_error "Skipping moving DB Backup to final location because backup did not complete successfully"
fi
rm -rf "${TEMP_LOCATION}"/*
} }
pre_dbbackup() { pre_dbbackup() {
@@ -526,7 +625,7 @@ pre_dbbackup() {
now=$(date +"%Y%m%d-%H%M%S") now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S") now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d") now_date=$(date +"%Y-%m-%d")
target=${dbtype}_${dbname}_${dbhost}_${now}.sql target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql
} }
post_dbbackup() { post_dbbackup() {
@@ -535,38 +634,54 @@ post_dbbackup() {
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
print_notice "Sending Backup Statistics to Zabbix" print_notice "Sending Backup Statistics to Zabbix"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}")" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "${dbbackup_size}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "${dbbackup_date}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))"
if [ "$?" != "0" ] ; then print_error "Error sending statistics, consider disabling with 'CONTAINER_ENABLE_MONITORING=FALSE'" ; fi
fi fi
### Post Script Support ### Post Script Support
if [ -n "${POST_SCRIPT}" ] ; then if [ -n "${POST_SCRIPT}" ] ; then
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
if [ -x "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}" print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
fi
fi
fi fi
### Post Backup Custom Script Support ### Post Backup Custom Script Support
if [ -d "/assets/custom-scripts/" ] ; then if [ -d "/assets/custom-scripts/" ] ; then
print_notice "Found Post Backup Custom Script to execute"
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
print_notice "Running Script: '${f}'" if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
if [ -x "${f}" ] ; then
print_notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE ## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" ${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
fi
fi
done done
fi fi
print_notice "DB Backup for '${db}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')" print_notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
unset s3_ssl
unset s3_ca_cert
} }
sanity_test() { sanity_test() {
sanity_var DB_TYPE "Database Type" sanity_var DB_TYPE "Database Type"
sanity_var DB_HOST "Database Host" sanity_var DB_HOST "Database Host"
file_env 'DB_USER'
file_env 'DB_PASS'
case "${dbtype,,}" in case "${DB_TYPE,,}" in
"mysql" | "mariadb" ) "mysql" | "mariadb" )
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;; ;;

View File

@@ -4,7 +4,6 @@ source /assets/functions/00-container
source /assets/functions/10-db-backup source /assets/functions/10-db-backup
source /assets/defaults/10-db-backup source /assets/defaults/10-db-backup
PROCESS_NAME="db-backup" PROCESS_NAME="db-backup"
CONTAINER_LOG_LEVEL=DEBUG
bootstrap_variables bootstrap_variables
@@ -80,7 +79,7 @@ while true; do
cleanup_old_data cleanup_old_data
if var_true "${manual}" ; then if var_true "${manual}" ; then
print_debug "Exitting due to manual mode" print_debug "Exiting due to manual mode"
exit ${master_exit_code}; exit ${master_exit_code};
else else
print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") " print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") "

View File

@@ -55,7 +55,7 @@ The image will also allow you to use environment variables or Docker secrets use
The script can also be executed skipping the interactive mode by using the following syntax/ The script can also be executed skipping the interactive mode by using the following syntax/
$(basename $0) <filename> <db_type> <db_hostname> <db_name> <db_user> <db_pass> <db_port> $(basename "$0") <filename> <db_type> <db_hostname> <db_name> <db_user> <db_pass> <db_port>
If you only enter some of the arguments you will be prompted to fill them in. If you only enter some of the arguments you will be prompted to fill them in.
@@ -78,7 +78,7 @@ fi
get_filename() { get_filename() {
COLUMNS=12 COLUMNS=12
prompt="Please select a file to restore:" prompt="Please select a file to restore:"
options=( $(find ${DB_DUMP_TARGET} -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | xargs -0) ) options=( $(find "${DB_DUMP_TARGET}" -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | xargs -0) )
PS3="$prompt " PS3="$prompt "
select opt in "${options[@]}" "Custom" "Quit" ; do select opt in "${options[@]}" "Custom" "Quit" ; do
if (( REPLY == 2 + ${#options[@]} )) ; then if (( REPLY == 2 + ${#options[@]} )) ; then
@@ -110,7 +110,7 @@ get_dbhost() {
print_debug "Parsed DBHost: ${p_dbhost}" print_debug "Parsed DBHost: ${p_dbhost}"
fi fi
if [ -z "${dbhost}" ] && [ -z "${parsed_host}" ]; then if [ -z "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 1 - No Env, No Parsed Filename" print_debug "Parsed DBHost Variant: 1 - No Env, No Parsed Filename"
q_dbhost_variant=1 q_dbhost_variant=1
q_dbhost_menu=$(cat <<EOF q_dbhost_menu=$(cat <<EOF
@@ -119,7 +119,7 @@ EOF
) )
fi fi
if [ -n "${dbhost}" ] && [ -z "${parsed_host}" ]; then if [ -n "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 2 - Env, No Parsed Filename" print_debug "Parsed DBHost Variant: 2 - Env, No Parsed Filename"
q_dbhost_variant=2 q_dbhost_variant=2
q_dbhost_menu=$(cat <<EOF q_dbhost_menu=$(cat <<EOF
@@ -130,7 +130,7 @@ EOF
) )
fi fi
if [ -z "${dbhost}" ] && [ -n "${parsed_host}" ]; then if [ -z "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
print_debug "Parsed DBHostpho Variant: 3 - No Env, Parsed Filename" print_debug "Parsed DBHostpho Variant: 3 - No Env, Parsed Filename"
q_dbhost_variant=3 q_dbhost_variant=3
q_dbhost_menu=$(cat <<EOF q_dbhost_menu=$(cat <<EOF
@@ -141,7 +141,7 @@ EOF
) )
fi fi
if [ -n "${dbhost}" ] && [ -n "${parsed_host}" ]; then if [ -n "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 4 - Env, Parsed Filename" print_debug "Parsed DBHost Variant: 4 - Env, Parsed Filename"
q_dbhost_variant=4 q_dbhost_variant=4
q_dbhost_menu=$(cat <<EOF q_dbhost_menu=$(cat <<EOF
@@ -188,7 +188,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbhost=${dbhost} r_dbhost=${DB_HOST}
break break
;; ;;
q* ) q* )
@@ -241,7 +241,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbhost=${dbhost} r_dbhost=${DB_HOST}
break break
;; ;;
f* ) f* )
@@ -280,7 +280,7 @@ get_dbtype() {
;; ;;
esac esac
if [ -z "${dbtype}" ] && [ -z "${parsed_type}" ]; then if [ -z "${DB_TYPE}" ] && [ -z "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 1 - No Env, No Parsed Filename" print_debug "Parsed DBType Variant: 1 - No Env, No Parsed Filename"
q_dbtype_variant=1 q_dbtype_variant=1
q_dbtype_menu=$(cat <<EOF q_dbtype_menu=$(cat <<EOF
@@ -289,7 +289,7 @@ EOF
) )
fi fi
if [ -n "${dbtype}" ] && [ -z "${parsed_type}" ]; then if [ -n "${DB_TYPE}" ] && [ -z "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 2 - Env, No Parsed Filename" print_debug "Parsed DBType Variant: 2 - Env, No Parsed Filename"
q_dbtype_variant=2 q_dbtype_variant=2
q_dbtype_menu=$(cat <<EOF q_dbtype_menu=$(cat <<EOF
@@ -299,7 +299,7 @@ EOF
) )
fi fi
if [ -z "${dbtype}" ] && [ -n "${parsed_type}" ]; then if [ -z "${DB_TYPE}" ] && [ -n "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 3 - No Env, Parsed Filename" print_debug "Parsed DBType Variant: 3 - No Env, Parsed Filename"
q_dbtype_variant=3 q_dbtype_variant=3
q_dbtype_menu=$(cat <<EOF q_dbtype_menu=$(cat <<EOF
@@ -309,7 +309,7 @@ EOF
) )
fi fi
if [ -n "${dbtype}" ] && [ -n "${parsed_type}" ]; then if [ -n "${DB_TYPE}" ] && [ -n "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 4 - Env, Parsed Filename" print_debug "Parsed DBType Variant: 4 - Env, Parsed Filename"
q_dbtype_variant=4 q_dbtype_variant=4
q_dbtype_menu=$(cat <<EOF q_dbtype_menu=$(cat <<EOF
@@ -449,7 +449,7 @@ get_dbname() {
print_debug "Parsed DBName: ${p_dbhost}" print_debug "Parsed DBName: ${p_dbhost}"
fi fi
if [ -z "${dbname}" ] && [ -z "${parsed_name}" ]; then if [ -z "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 1 - No Env, No Parsed Filename" print_debug "Parsed DBName Variant: 1 - No Env, No Parsed Filename"
q_dbname_variant=1 q_dbname_variant=1
q_dbname_menu=$(cat <<EOF q_dbname_menu=$(cat <<EOF
@@ -458,7 +458,7 @@ EOF
) )
fi fi
if [ -n "${dbname}" ] && [ -z "${parsed_name}" ]; then if [ -n "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 2 - Env, No Parsed Filename" print_debug "Parsed DBName Variant: 2 - Env, No Parsed Filename"
q_dbname_variant=2 q_dbname_variant=2
q_dbname_menu=$(cat <<EOF q_dbname_menu=$(cat <<EOF
@@ -469,7 +469,7 @@ EOF
) )
fi fi
if [ -z "${dbname}" ] && [ -n "${parsed_name}" ]; then if [ -z "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 3 - No Env, Parsed Filename" print_debug "Parsed DBName Variant: 3 - No Env, Parsed Filename"
q_dbname_variant=3 q_dbname_variant=3
q_dbname_menu=$(cat <<EOF q_dbname_menu=$(cat <<EOF
@@ -480,7 +480,7 @@ EOF
) )
fi fi
if [ -n "${dbname}" ] && [ -n "${parsed_name}" ]; then if [ -n "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBname Variant: 4 - Env, Parsed Filename" print_debug "Parsed DBname Variant: 4 - Env, Parsed Filename"
q_dbname_variant=4 q_dbname_variant=4
q_dbname_menu=$(cat <<EOF q_dbname_menu=$(cat <<EOF
@@ -527,7 +527,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbname=${dbname} r_dbname=${DB_NAME}
break break
;; ;;
q* ) q* )
@@ -580,7 +580,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbname=${dbname} r_dbname=${DB_NAME}
break break
;; ;;
f* ) f* )
@@ -598,7 +598,7 @@ EOF
} }
get_dbport() { get_dbport() {
if [ -z "${dbport}" ] ; then if [ -z "${DB_PORT}" ] ; then
print_debug "Parsed DBPort Variant: 1 - No Env" print_debug "Parsed DBPort Variant: 1 - No Env"
q_dbport_variant=1 q_dbport_variant=1
q_dbport_menu=$(cat <<EOF q_dbport_menu=$(cat <<EOF
@@ -607,20 +607,20 @@ EOF
) )
fi fi
if [ -n "${dbport}" ] ; then if [ -n "${DB_PORT}" ] ; then
print_debug "Parsed DBPort Variant: 2 - Env" print_debug "Parsed DBPort Variant: 2 - Env"
q_dbport_variant=2 q_dbport_variant=2
q_dbport_menu=$(cat <<EOF q_dbport_menu=$(cat <<EOF
C ) Custom Entered Database Port C ) Custom Entered Database Port
E ) Environment Variable DB_PORT: '${dbport}' E ) Environment Variable DB_PORT: '${DB_PORT}'
EOF EOF
) )
fi fi
cat << EOF cat << EOF
What Database Port do you wish to use? What Database Port do you wish to use? MySQL/MariaDB typcially listens on port 3306. Postrgresql port 5432. MongoDB 27017
${q_dbport_menu} ${q_dbport_menu}
Q ) Quit Q ) Quit
@@ -641,7 +641,7 @@ EOF
2 ) 2 )
while true; do while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbport_menu read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbport_menu
case "${q_dbname_menu,,}" in case "${q_port_menu,,}" in
c* ) c* )
counter=1 counter=1
q_dbport=" " q_dbport=" "
@@ -655,7 +655,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbport=${dbport} r_dbport=${DB_PORT}
break break
;; ;;
q* ) q* )
@@ -669,7 +669,7 @@ EOF
} }
get_dbuser() { get_dbuser() {
if [ -z "${dbuser}" ] ; then if [ -z "${DB_USER}" ] ; then
print_debug "Parsed DBUser Variant: 1 - No Env" print_debug "Parsed DBUser Variant: 1 - No Env"
q_dbuser_variant=1 q_dbuser_variant=1
q_dbuser_menu=$(cat <<EOF q_dbuser_menu=$(cat <<EOF
@@ -678,7 +678,7 @@ EOF
) )
fi fi
if [ -n "${dbuser}" ] ; then if [ -n "${DB_USER}" ] ; then
print_debug "Parsed DBUser Variant: 2 - Env" print_debug "Parsed DBUser Variant: 2 - Env"
q_dbuser_variant=2 q_dbuser_variant=2
q_dbuser_menu=$(cat <<EOF q_dbuser_menu=$(cat <<EOF
@@ -724,7 +724,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbuser=${dbuser} r_dbuser=${DB_USER}
break break
;; ;;
q* ) q* )
@@ -738,7 +738,7 @@ EOF
} }
get_dbpass() { get_dbpass() {
if [ -z "${dbpass}" ] ; then if [ -z "${DB_PASS}" ] ; then
print_debug "Parsed DBPass Variant: 1 - No Env" print_debug "Parsed DBPass Variant: 1 - No Env"
q_dbpass_variant=1 q_dbpass_variant=1
q_dbpass_menu=$(cat <<EOF q_dbpass_menu=$(cat <<EOF
@@ -747,7 +747,7 @@ EOF
) )
fi fi
if [ -n "${dbpass}" ] ; then if [ -n "${DB_PASS}" ] ; then
print_debug "Parsed DBPass Variant: 2 - Env" print_debug "Parsed DBPass Variant: 2 - Env"
q_dbpass_variant=2 q_dbpass_variant=2
q_dbpass_menu=$(cat <<EOF q_dbpass_menu=$(cat <<EOF
@@ -793,7 +793,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbpass=${dbpass} r_dbpass=${DB_PASS}
break break
;; ;;
q* ) q* )
@@ -809,7 +809,7 @@ EOF
#### SCRIPT START #### SCRIPT START
cat << EOF cat << EOF
## ${IMAGE_NAME} Restore Script Version 1.0.1 ## ${IMAGE_NAME} Restore Script
## Visit ${IMAGE_REPO_URL} ## Visit ${IMAGE_REPO_URL}
## #################################################### ## ####################################################
@@ -829,11 +829,7 @@ print_debug "Filename to recover '${r_filename}'"
## Question Database Type ## Question Database Type
if [ -n "${2}" ]; then if [ -n "${2}" ]; then
if [ ! -f "${2}" ]; then
get_dbtype
else
r_dbtype="${2}" r_dbtype="${2}"
fi
else else
get_dbtype get_dbtype
fi fi
@@ -841,59 +837,39 @@ print_debug "Database type '${r_dbtype}'"
## Question Database Host ## Question Database Host
if [ -n "${3}" ]; then if [ -n "${3}" ]; then
if [ ! -f "${3}" ]; then
get_dbhost
else
r_dbhost="${3}" r_dbhost="${3}"
fi
else else
get_dbhost get_dbhost
fi fi
print_debug "Database Host '${r_dbhost}'" print_debug "Database Host '${r_dbhost}'"
## Question Database Name ## Question Database Name
if [ -n "${3}" ]; then if [ -n "${4}" ]; then
if [ ! -f "${3}" ]; then r_dbname="${4}"
get_dbname
else
r_dbname="${3}"
fi
else else
get_dbname get_dbname
fi fi
print_debug "Database Name '${r_dbname}'" print_debug "Database Name '${r_dbname}'"
## Question Database User ## Question Database User
if [ -n "${4}" ]; then if [ -n "${5}" ]; then
if [ ! -f "${4}" ]; then r_dbuser="${5}"
get_dbuser
else
r_dbuser="${4}"
fi
else else
get_dbuser get_dbuser
fi fi
print_debug "Database User '${r_dbuser}'" print_debug "Database User '${r_dbuser}'"
## Question Database Password ## Question Database Password
if [ -n "${5}" ]; then if [ -n "${6}" ]; then
if [ ! -f "${5}" ]; then r_dbpass="${6}"
get_dbpass
else
r_dbpass="${5}"
fi
else else
get_dbpass get_dbpass
fi fi
print_debug "Database Pass '${r_dbpass}'" print_debug "Database Pass '${r_dbpass}'"
## Question Database Port ## Question Database Port
if [ -n "${6}" ]; then if [ -n "${7}" ]; then
if [ ! -f "${6}" ]; then r_dbport="${7}"
get_dbport
else
r_dbport="${6}"
fi
else else
get_dbport get_dbport
fi fi
@@ -944,12 +920,12 @@ case "${r_dbtype}" in
mongo_compression="--gzip" mongo_compression="--gzip"
fi fi
if [ -n "${r_dbuser}" ] ; then if [ -n "${r_dbuser}" ] ; then
mongo_user="-u ${r_dbuser}" mongo_user="-u=${r_dbuser}"
fi fi
if [ -n "${r_dbpass}" ] ; then if [ -n "${r_dbpass}" ] ; then
mongo_pass="-u ${r_dbpass}" mongo_pass="-p=${r_dbpass}"
fi fi
mongorestore ${mongo_compression} -d ${r_dbname} -h ${r_dbhost} --port ${r_dbport} ${mongo_user} ${mongo_pass} --archive=${r_filename} mongorestore ${mongo_compression} -d=${r_dbname} -h=${r_dbhost} --port=${r_dbport} ${mongo_user} ${mongo_pass} --archive=${r_filename}
exit_code=$? exit_code=$?
;; ;;
* ) * )