Compare commits

...

26 Commits

Author SHA1 Message Date
Dave Conroy
c1bdf26598 Release 3.2.4 - See CHANGELOG.md 2022-04-21 16:04:43 -07:00
Dave Conroy
5a4cac2cee Release 3.2.3 - See CHANGELOG.md 2022-04-21 15:46:27 -07:00
Dave Conroy
c04eec7661 Add space after compress_cmd 2022-04-21 14:19:09 -07:00
Dave Conroy
32f1959a07 Merge pull request #120 from joergmschulz/patch-1
small typo / exiting instead of exitting
2022-04-21 14:18:43 -07:00
joergmschulz
d384d5a529 small typo / exiting instead of exitting 2022-04-21 23:16:02 +02:00
Dave Conroy
56ab68dd71 Release 3.2.2 - See CHANGELOG.md 2022-04-21 12:14:17 -07:00
Dave Conroy
9a1a5efbd9 Do a different DB Ready check for Influx 1 vs 2 2022-04-21 12:12:57 -07:00
Dave Conroy
df5532c128 Fix blank database size when compression type=NONE 2022-04-21 12:08:51 -07:00
Dave Conroy
2ecd313778 Change database variables around 2022-04-21 12:08:27 -07:00
Dave Conroy
55cfe5ab02 Release 3.2.1 - See CHANGELOG.md 2022-04-03 10:28:07 -07:00
Dave Conroy
ae2e2c38e2 Sanitize DB_HOST for URLs 2022-04-02 07:37:34 -07:00
Dave Conroy
c23d7991fe Release 3.2.0 - See CHANGELOG.md 2022-04-01 18:41:58 -07:00
Dave Conroy
3f58cfd284 Release 3.1.3 - See CHANGELOG.md 2022-03-30 10:46:16 -07:00
Dave Conroy
2d01f5e692 Fix for MARIADB not sending DB name for post script 2022-03-30 10:45:27 -07:00
Dave Conroy
dbd0a03b0d SPLIT_DB is supposed to be TRUE 2022-03-30 10:43:22 -07:00
Dave Conroy
6527f4ff63 Add sanity checks for Post scripts to be executible 2022-03-30 10:37:58 -07:00
Dave Conroy
d843d21a1b Release 3.1.2 - See CHANGELOG.md 2022-03-29 08:09:36 -07:00
Dave Conroy
24ed769429 Release 3.1.1 - See CHANGELOG.md 2022-03-28 10:29:00 -07:00
Dave Conroy
cbd87a5ede Update README.md 2022-03-23 19:16:51 -07:00
Dave Conroy
13214665c9 Release 3.1.0 - See CHANGELOG.md 2022-03-23 16:21:12 -07:00
Dave Conroy
2e71f617a1 Merge pull request #107 from piemonkey/mongo-restore
Add Mongo support to restore script
2022-03-23 12:24:43 -07:00
Dave Conroy
fbe9dde4a1 Release 3.0.16 - See CHANGELOG.md 2022-03-23 07:57:28 -07:00
Rich
e7eb88c32a Give feedback if restore script doesn't support db type 2022-03-22 09:12:34 +01:00
Rich
52dc510b89 Add auto restore support for mongodb 2022-03-22 09:12:16 +01:00
Rich
06677dbc8b Fix typo setting dbhost in restore script 2022-03-22 09:12:05 +01:00
Rich
e0dd2bc91b Set db type from env vars correctly during restore 2022-03-22 09:11:54 +01:00
7 changed files with 639 additions and 361 deletions

View File

@@ -1,3 +1,85 @@
## 3.2.4 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Add -portable flag when backing up Influx
## 3.2.3 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Fix for bucket / db name InfluxDB 1.xx
- Minor aesthetics, spacing, spelling
## 3.2.2 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Restore script properly parses DB_PORT entry
- Influx Database ready performs different checks dependent on V1/V2
- Stop using weird database lowercase variables unneccessarily breaking Docker Secrets
## 3.2.1 2022-04-03 <dave at tiredofit dot ca>
### Changed
- Fix a variety of issues iwth 3.2.0 relating to InfluxDB
## 3.2.0 2022-03-31 <dave at tiredofit dot ca>
### Added
- Install InfluxDB2 Client alongside version 1 (amd64 and arm64)
- Introduce InfluxDB 2 backup support
- Introduce multiple compression types other than Gzip for Influx 1/2
- Introduce compression for MSSQL backups
### Changed
- Testing for Host availability for CouchDB and InfluxDB
## 3.1.3 2022-03-30 <dave at tiredofit dot ca>
### Changed
- Fix for MariaDB not sending database name to post script
- Check for executible bit on post scripts both via environment variable and /assets/custom
- SPLIT_DB defaulted to TRUE
## 3.1.2 2022-03-29 <dave at tiredofit dot ca>
### Changed
- Fix for blank Notice when individual backup is completed (time taken)
## 3.1.1 2022-03-28 <dave at tiredofit dot ca>
### Changed
- Resolve some issues with backups of Mongo and others not saving the proper timestamp
## 3.1.0 2022-03-23 <dave at tiredofit dot ca>
### Added
- Backup multiple databases by seperating with comma e.g. db1,db2
- Backup ALL databases bu setting DB_NAME to ALL
- Exclude databases from being backed up comma seperated when DB_NAME is all eg DB_NAME_EXCLUDE=db3,db4
- Backup timers execute per database, not per the whole script run
- Post scripts run after each database backup
- Checksum does not occur when database backup failed
- Database cleanup does not occur when any databases backups fail throughout session
- MongoDB now supported with 'restore' script - Credit to piemonkey@github
- Lots of reshuffling, optimizations with script due to botched 3.0 release
### Changed
- ZSTD replaces GZ as default compression type
- Output is cleaner when backups are occurring
## 3.0.16 2022-03-23 <dave at tiredofit dot ca>
### Changed
- Fix for SPLIT_DB not looping through all databse names properly
## 3.0.15 2022-03-22 <dave at tiredofit dot ca>
### Changed
@@ -216,7 +298,7 @@
## 2.9.2 2021-10-22 <teenigma@github>
### Fixed
- Fix compression failing on Redis backup

View File

@@ -2,7 +2,9 @@ FROM docker.io/tiredofit/alpine:3.15
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables
ENV MSSQL_VERSION=17.8.1.1-1 \
ENV INFLUX2_VERSION=2.2.1 \
MSSQL_VERSION=17.8.1.1-1 \
CONTAINER_ENABLE_MESSAGING=FALSE \
CONTAINER_ENABLE_MONITORING=TRUE \
CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \
@@ -44,9 +46,14 @@ RUN set -ex && \
\
apkArch="$(apk --print-arch)"; \
case "$apkArch" in \
x86_64) mssql=true ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql17_${MSSQL_VERSION}_amd64.apk mssql-tools_${MSSQL_VERSION}_amd64.apk ;; \
*) echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ;; \
x86_64) mssql=true ; influx2=true ; influx_arch=amd64; ;; \
aarch64 ) influx2=true ; influx_arch=arm64 ;; \
*) sleep 0.1 ;; \
esac; \
\
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql17_${MSSQL_VERSION}_amd64.apk mssql-tools_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
\
mkdir -p /usr/src/pbzip2 && \
curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
cd /usr/src/pbzip2 && \

View File

@@ -16,7 +16,8 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
* dump to local filesystem or backup to S3 Compatible services
* select database user and password
* backup all databases
* backup all databases, single, or multiple databases
* backup all to seperate files or one singular file
* choose to have an MD5 or SHA1 sum after backup for verification
* delete old backups after specific amount of time
* choose compression type (none, gz, bz, xz, zstd)
@@ -49,6 +50,7 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
- [Base Images used](#base-images-used)
- [Container Options](#container-options)
- [Database Specific Options](#database-specific-options)
- [For Influx DB2:](#for-influx-db2)
- [Scheduling Options](#scheduling-options)
- [Backup Options](#backup-options)
- [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services)
@@ -127,24 +129,30 @@ Be sure to view the following repositories to understand all the customizable op
| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` |
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` |
| `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | |
| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. | `FALSE` |
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE`
### Database Specific Options
| Parameter | Description | Default |
| --------- | --------------------------------------------------------------------------------------------- | ------- |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | |
| `DB_NAME` | Schema Name e.g. `database` | |
| `DB_USER` | username for the database - use `root` to backup all MySQL of them. | |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies |
| Parameter | Description | Default |
| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | |
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | |
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | |
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies |
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2.x` series - AMD64 and ARM64 only for `2` | |
#### For Influx DB2:
Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name`
### Scheduling Options
| Parameter | Description | Default |
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. | `1440` |
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
| | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
@@ -154,7 +162,7 @@ Be sure to view the following repositories to understand all the customizable op
### Backup Options
| Parameter | Description | Default |
| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | -------------- |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `GZ` |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` |
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |
| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` |
@@ -187,7 +195,6 @@ If `BACKUP_LOCATION` = `S3` then the following options are used.
## Maintenance
### Shell Access
For debugging and maintenance purposes you may want access the containers shell.
@@ -201,7 +208,7 @@ Manual Backups can be performed by entering the container and typing `backup-now
- Recently there was a request to have the container work with Kukbernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`.
### Restoring Databases
Entering in the container and executing `restore` will execute a menu based script to restore your backups.
Entering in the container and executing `restore` will execute a menu based script to restore your backups - MariaDB, Postgres, and Mongo supported.
You will be presented with a series of menus allowing you to choose:
- What file to restore

View File

@@ -2,7 +2,7 @@
BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"}
CHECKSUM=${CHECKSUM:-"MD5"}
COMPRESSION=${COMPRESSION:-"GZ"}
COMPRESSION=${COMPRESSION:-"ZSTD"}
COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
@@ -18,10 +18,5 @@ PARALLEL_COMPRESSION_THREADS=${PARALLEL_COMPRESSION_THREADS:-"$(nproc)"}
S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"}
S3_PROTOCOL=${S3_PROTOCOL:-"https"}
SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SPLIT_DB=${SPLIT_DB:-"FALSE"}
SPLIT_DB=${SPLIT_DB:-"TRUE"}
TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"}
dbhost=${DB_HOST}
dbname=${DB_NAME}
dbpass=${DB_PASS}
dbtype=${DB_TYPE}
dbuser=${DB_USER}

View File

@@ -1,29 +1,31 @@
#!/command/with-contenv bash
bootstrap_variables() {
case "${dbtype,,}" in
case "${DB_TYPE,,}" in
couch* )
dbtype=couch
dbport=${DB_PORT:-5984}
DB_PORT=${DB_PORT:-5984}
file_env 'DB_USER'
file_env 'DB_PASS'
;;
influx* )
dbtype=influx
dbport=${DB_PORT:-8088}
DB_PORT=${DB_PORT:-8088}
file_env 'DB_USER'
file_env 'DB_PASS'
sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'"
;;
mongo* )
dbtype=mongo
dbport=${DB_PORT:-27017}
DB_PORT=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER'
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
;;
"mysql" | "mariadb" )
dbtype=mysql
dbport=${DB_PORT:-3306}
DB_PORT=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
"mssql" | "microsoftsql" )
apkArch="$(apk --print-arch)"; \
@@ -32,16 +34,17 @@ bootstrap_variables() {
*) print_error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;;
esac
dbtype=mssql
dbport=${DB_PORT:-1433}
DB_PORT=${DB_PORT:-1433}
;;
postgres* | "pgsql" )
dbtype=pgsql
dbport=${DB_PORT:-5432}
DB_PORT=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
"redis" )
dbtype=redis
dbport=${DB_PORT:-6379}
DB_PORT=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS'
;;
sqlite* )
@@ -57,77 +60,112 @@ bootstrap_variables() {
### Set the Database Authentication Details
case "$dbtype" in
"mongo" )
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${dbuser}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${dbpass}"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${dbname}"
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS}"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME}"
[[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}"
;;
"mysql" )
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${dbpass}
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS}
;;
"postgres" )
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${dbpass}"
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS}"
;;
"redis" )
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${dbpass}"
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS}"
;;
esac
}
backup_couch() {
target=couch_${dbname}_${dbhost}_${now}.txt
pre_dbbackup
target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt
compression
print_notice "Dumping CouchDB database: '${dbname}'"
curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > ${TEMP_LOCATION}/${target}
print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}"
curl -sSL -X GET ${DB_HOST}:${DB_PORT}/${DB_NAME}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code
check_exit_code $target
generate_checksum
move_backup
move_dbbackup
post_dbbackup ${DB_NAME}
}
backup_influx() {
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
:
if [ "${DB_NAME,,}" = "all" ] ; then
print_debug "Preparing to back up everything"
db_names=justbackupeverything
else
print_notice "Compressing InfluxDB backup with gzip"
influx_compression="-portable"
db_names=$(echo "${DB_NAME}" | tr ',' '\n')
fi
for DB in ${DB_NAME}; do
print_notice "Dumping Influx database: '${DB}'"
target=influx_${DB}_${dbhost}_${now}
influxd backup ${influx_compression} -database $DB -host ${dbhost}:${dbport} ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code
generate_checksum
move_backup
done
case "${INFLUX_VERSION,,}" in
1 )
for db in ${db_names}; do
pre_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
target=influx_${db}_${DB_HOST#*//}_${now}
compression
print_notice "Dumping Influx database: '${db}'"
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension}
generate_checksum
move_dbbackup
post_dbbackup $db
done
;;
2 )
for db in ${db_names}; do
pre_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
target=influx2_${db}_${DB_HOST#*//}_${now}
compression
print_notice "Dumping Influx2 database: '${db}'"
influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
create_archive
target=influx2_${db}_${DB_HOST#*//}_${now}.tar${extension}
generate_checksum
move_dbbackup
post_dbbackup $db
done
;;
esac
}
backup_mongo() {
pre_dbbackup
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
target=${dbtype}_${dbname}_${dbhost}_${now}.archive
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive
else
print_notice "Compressing MongoDB backup with gzip"
target=${dbtype}_${dbname}_${dbhost}_${now}.archive.gz
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive.gz
mongo_compression="--gzip"
compression_string="and compressing with gzip"
fi
print_notice "Dumping MongoDB database: '${DB_NAME}'"
mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
exit_code=$?
check_exit_code
cd "${TEMP_LOCATION}"
check_exit_code $target
generate_checksum
move_backup
move_dbbackup
post_dbbackup
}
backup_mssql() {
target=mssql_${dbname}_${dbhost}_${now}.bak
print_notice "Dumping MSSQL database: '${dbname}'"
/opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
pre_dbbackup
target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak
compression
print_notice "Dumping MSSQL database: '${DB_NAME}'"
/opt/mssql-tools/bin/sqlcmd -E -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} Q "BACKUP DATABASE \[${DB_NAME}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$?
check_exit_code
check_exit_code $target
generate_checksum
move_backup
move_dbbackup
post_dbbackup $DB_NAME
}
backup_mysql() {
@@ -137,71 +175,116 @@ backup_mysql() {
if var_true "${MYSQL_STORED_PROCEDURES}" ; then
stored_procedures="--routines"
fi
if [ "${DB_NAME,,}" = "all" ] ; then
print_debug "Preparing to back up everything except for information_schema and _* prefixes"
db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
print_debug "Excluding '${db_exclude}' from ALL DB_NAME backups"
db_names=$(echo "$db_names" | sed "/${db_exclude}/d" )
done
fi
else
db_names=$(echo "${DB_NAME}" | tr ',' '\n')
fi
print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
if var_true "${SPLIT_DB}" ; then
DATABASES=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema)
for db in "${DATABASES}" ; do
if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then
print_debug "Backing up everything except for information_schema and _* prefixes"
print_notice "Dumping MySQL/MariaDB database: '${db}'"
target=mysql_${db}_${dbhost}_${now}.sql
compression
set -x
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
set +x
check_exit_code
generate_checksum
move_backup
fi
for db in ${db_names} ; do
pre_dbbackup
target=mysql_${db}_${DB_HOST,,}_${now}.sql
compression
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup $db
done
else
print_debug "Not splitting database dumps into their own files"
pre_dbbackup
target=mysql_all_${DB_HOST,,}_${now}.sql
compression
print_notice "Dumping MySQL/MariaDB database: '${DB_NAME}'"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -A -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code
check_exit_code $target
generate_checksum
move_backup
move_dbbackup
post_dbbackup all
fi
}
backup_pgsql() {
export PGPASSWORD=${dbpass}
if var_true "${SPLIT_DB}" ; then
authdb=${DB_USER}
[ -n "${DB_NAME}" ] && authdb=${DB_NAME}
DATABASES=$(psql -h $dbhost -U $dbuser -p ${dbport} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
for db in "${DATABASES}"; do
print_notice "Dumping Postgresql database: $db"
target=pgsql_${db}_${dbhost}_${now}.sql
compression
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code
generate_checksum
move_backup
export PGPASSWORD=${DB_PASS}
authdb=${DB_USER}
if [ "${DB_NAME,,}" = "all" ] ; then
print_debug "Preparing to back up all databases"
db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
print_debug "Excluding '${db_exclude}' from ALL DB_NAME backups"
db_names=$(echo "$db_names" | sed "/${db_exclude}/d" )
done
else
compression
print_notice "Dumping PostgreSQL: '${DB_NAME}'"
pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code
generate_checksum
move_backup
fi
fi
else
db_names=$(echo "${DB_NAME}" | tr ',' '\n')
fi
print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
if var_true "${SPLIT_DB}" ; then
for db in ${db_names} ; do
pre_dbbackup
target=pgsql_${db}_${DB_HOST,,}_${now}.sql
compression
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup $db
done
else
print_debug "Not splitting database dumps into their own files"
pre_dbbackup
target=pgsql_all_${DB_HOST,,}_${now}.sql
compression
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
tmp_db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
for r_db_name in $(echo $db_names | xargs); do
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
done
sleep 5
for x_db_name in ${tmp_db_names} ; do
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
done
pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup all
fi
}
backup_redis() {
target=redis_${db}_${dbhost}_${now}.rdb
echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
pre_dbbackup
print_notice "Dumping Redis - Flushing Redis Cache First"
target=redis_all_${DB_HOST,,}_${now}.rdb
echo bgsave | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
sleep 10
try=5
while [ $try -gt 0 ] ; do
saved=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
ok=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
saved=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
ok=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
print_notice "Redis Backup Complete"
break
@@ -214,97 +297,116 @@ backup_redis() {
compression
$compress_cmd "${TEMP_LOCATION}/${target_original}"
generate_checksum
move_backup
move_dbbackup
post_dbbackup all
}
backup_sqlite3() {
db=$(basename "$dbhost")
pre_dbbackup
db=$(basename "${DB_HOST}")
db="${db%.*}"
target=sqlite3_${db}_${now}.sqlite3
compression
print_notice "Dumping sqlite3 database: '${dbhost}'"
sqlite3 "${dbhost}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}"
sqlite3 "${DB_HOST}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
exit_code=$?
check_exit_code
cat "${TEMP_LOCATION}"/backup.sqlite3 | $compress_cmd > "${TEMP_LOCATION}/${target}"
check_exit_code $target
cat "${TEMP_LOCATION}"/backup.sqlite3 | ${dir_compress_cmd} > "${TEMP_LOCATION}/${target}"
generate_checksum
move_backup
move_dbbackup
post_dbbackup $db
}
check_availability() {
### Set the Database Type
case "$dbtype" in
"couch" )
COUNTER=0
while ! (nc -z ${dbhost} ${dbport}) ; do
counter=0
code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${DB_HOST}:${DB_PORT})
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( COUNTER+=5 ))
print_warn "CouchDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
(( counter+=5 ))
print_warn "CouchDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"influx" )
COUNTER=0
while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5
(( COUNTER+=5 ))
print_warn "InfluxDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done
counter=0
case "${INFLUX_VERSION,,}" in
1 )
while ! (nc -z ${DB_HOST#*//} ${DB_PORT}) ; do
sleep 5
(( counter+=5 ))
print_warn "InfluxDB Host '${DB_HOST#*//}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
2 )
code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${DB_HOST}:${DB_PORT}/health)
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( counter+=5 ))
print_warn "InfluxDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
esac
;;
"mongo" )
COUNTER=0
while ! (nc -z ${dbhost} ${dbport}) ; do
counter=0
while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
sleep 5
(( COUNTER+=5 ))
print_warn "Mongo Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
(( counter+=5 ))
print_warn "Mongo Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"mysql" )
COUNTER=0
export MYSQL_PWD=${dbpass}
while ! (mysqladmin -u"${dbuser}" -P"${dbport}" -h"${dbhost}" status > /dev/null 2>&1) ; do
counter=0
export MYSQL_PWD=${DB_PASS}
while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" status > /dev/null 2>&1) ; do
sleep 5
(( COUNTER+=5 ))
print_warn "MySQL/MariaDB Server '${dbhost}' is not accessible, retrying.. (${COUNTER} seconds so far)"
(( counter+=5 ))
print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)"
done
;;
"mssql" )
COUNTER=0
while ! (nc -z ${dbhost} ${dbport}) ; do
counter=0
while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
sleep 5
(( COUNTER+=5 ))
print_warn "MSSQL Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
(( counter+=5 ))
print_warn "MSSQL Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"pgsql" )
COUNTER=0
export PGPASSWORD=${dbpass}
until pg_isready --dbname=${dbname} --host=${dbhost} --port=${dbport} --username=${dbuser} -q
counter=0
export PGPASSWORD=${DB_PASS}
until pg_isready --dbname=${DB_NAME} --host=${DB_HOST} --port=${DB_PORT} --username=${DB_USER} -q
do
sleep 5
(( COUNTER+=5 ))
print_warn "Postgres Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
(( counter+=5 ))
print_warn "Postgres Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"redis" )
COUNTER=0
while ! (nc -z "${dbhost}" "${dbport}") ; do
counter=0
while ! (nc -z "${DB_HOST}" "${DB_PORT}") ; do
sleep 5
(( COUNTER+=5 ))
print_warn "Redis Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
(( counter+=5 ))
print_warn "Redis Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"sqlite3" )
if [[ ! -e "${dbhost}" ]]; then
print_error "File '${dbhost}' does not exist."
if [[ ! -e "${DB_HOST}" ]]; then
print_error "File '${DB_HOST}' does not exist."
exit_code=2
exit $exit_code
elif [[ ! -f "${dbhost}" ]]; then
print_error "File '${dbhost}' is not a file."
elif [[ ! -f "${DB_HOST}" ]]; then
print_error "File '${DB_HOST}' is not a file."
exit_code=2
exit $exit_code
elif [[ ! -r "${dbhost}" ]]; then
print_error "File '${dbhost}' is not readable."
elif [[ ! -r "${DB_HOST}" ]]; then
print_error "File '${DB_HOST}' is not readable."
exit_code=2
exit $exit_code
fi
@@ -313,17 +415,30 @@ check_availability() {
}
check_exit_code() {
print_debug "Exit Code is ${exit_code}"
print_debug "DB Backup Exit Code is ${exit_code}"
case "${exit_code}" in
0 )
print_info "Backup completed successfully"
print_info "DB Backup of '${1}' completed successfully"
;;
* )
print_error "Backup reported errors"
print_error "DB Backup of '${1}' reported errors"
master_exit_code=1
;;
esac
}
cleanup_old_data() {
if [ -n "${DB_CLEANUP_TIME}" ]; then
if [ "${master_exit_code}" != 1 ]; then
print_info "Cleaning up old backups"
mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
else
print_info "Skipping Cleaning up old backups because there were errors in backing up"
fi
fi
}
compression() {
if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then
PARALLEL_COMPRESSION_THREADS=1
@@ -331,114 +446,211 @@ compression() {
case "${COMPRESSION,,}" in
gz* )
print_notice "Compressing backup with gzip"
print_debug "Compression Level: '${COMPRESSION_LEVEL}'"
target=${target}.gz
compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compression_type="gzip"
extension=".gz"
dir_compress_cmd=${compress_cmd}
target_dir=${target}
target=${target}.gz
;;
bz* )
print_notice "Compressing backup with bzip2"
print_debug "Compression Level: '${COMPRESSION_LEVEL}'"
target=${target}.bz2
compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
compression_type="bzip2"
dir_compress_cmd=${compress_cmd}
extension=".bz2"
target_dir=${target}
target=${target}.bz2
;;
xz* )
print_notice "Compressing backup with xzip"
print_debug "Compression Level: '${COMPRESSION_LEVEL}'"
target=${target}.xz
compress_cmd="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compression_type="xzip"
dir_compress_cmd=${compress_cmd}
extension=".xz"
target_dir=${target}
target=${target}.xz
;;
zst* )
print_notice "Compressing backup with zstd"
print_debug "Compression Level: '${COMPRESSION_LEVEL}'"
compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} "
compression_type="zstd"
dir_compress_cmd=${compress_cmd}
extension=".zst"
target_dir=${target}
target=${target}.zst
compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS}"
;;
"none" | "false")
print_notice "Not compressing backups"
compress_cmd="cat "
compression_type="none"
dir_compress_cmd="cat "
target_dir=${target}
;;
esac
case "${CONTAINER_LOG_LEVEL,,}" in
"debug" )
if [ "${compression_type}" = "none" ] ; then
compression_string="with '${PARALLEL_COMPRESSION_THREADS}' threads"
else
compression_string="and compressing with '${compression_type}:${COMPRESSION_LEVEL}' with '${PARALLEL_COMPRESSION_THREADS}' threads"
fi
;;
* )
if [ "${compression_type}" != "none" ] ; then
compression_string="and compressing with '${compression_type}'"
fi
;;
esac
}
create_archive() {
if [ "${exit_code}" = "0" ] ; then
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
else
print_warn "Skipping creating archive file because backup did not complete successfully"
fi
}
generate_checksum() {
if var_true "${ENABLE_CHECKSUM}" ; then
case "${CHECKSUM,,}" in
"md5" )
checksum_command="md5sum"
checksum_extension="md5"
;;
"sha1" )
checksum_command="sha1sum"
checksum_extension="sha1"
;;
esac
if var_true "${ENABLE_CHECKSUM}" ;then
if [ "${exit_code}" = "0" ] ; then
case "${CHECKSUM,,}" in
"md5" )
checksum_command="md5sum"
checksum_extension="md5"
;;
"sha1" )
checksum_command="sha1sum"
checksum_extension="sha1"
;;
esac
print_notice "Generating ${checksum_extension^^} for '${target}'"
cd "${TEMP_LOCATION}"
${checksum_command} "${target}" > "${target}"."${checksum_extension}"
checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}')
print_debug "${checksum_extension^^}: ${checksum_value} - ${target}"
print_notice "Generating ${checksum_extension^^} for '${target}'"
cd "${TEMP_LOCATION}"
${checksum_command} "${target}" > "${target}"."${checksum_extension}"
checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}')
print_debug "${checksum_extension^^}: ${checksum_value} - ${target}"
else
print_warn "Skipping Checksum creation because backup did not complete successfully"
fi
fi
}
move_backup() {
case "$SIZE_VALUE" in
"b" | "bytes" )
SIZE_VALUE=1
;;
"[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" )
SIZE_VALUE="-h"
;;
*)
SIZE_VALUE=1
;;
esac
if [ "$SIZE_VALUE" = "1" ] ; then
FILESIZE=$(stat -c%s "${TEMP_LOCATION}"/"${target}")
print_notice "Backup of ${target} created with the size of ${FILESIZE} bytes"
move_dbbackup() {
if [ "${exit_code}" = "0" ] ; then
case "${SIZE_VALUE,,}" in
"b" | "bytes" )
SIZE_VALUE=1
;;
"[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" )
SIZE_VALUE="-h"
;;
*)
SIZE_VALUE=1
;;
esac
if [ "$SIZE_VALUE" = "1" ] ; then
filesize=$(stat -c%s "${TEMP_LOCATION}"/"${target}")
print_notice "Backup of ${target} created with the size of ${filesize} bytes"
else
filesize=$(du -h "${TEMP_LOCATION}"/"${target}" | awk '{ print $1}')
print_notice "Backup of ${target} created with the size of ${filesize}"
fi
case "${BACKUP_LOCATION,,}" in
"file" | "filesystem" )
print_debug "Moving backup to filesystem"
mkdir -p "${DB_DUMP_TARGET}"
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
;;
"s3" | "minio" )
print_debug "Moving backup to S3 Bucket"
export AWS_ACCESS_KEY_ID=${S3_KEY_ID}
export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET}
export AWS_DEFAULT_REGION=${S3_REGION}
if [ -f "${S3_CERT_CA_FILE}" ] ; then
print_debug "Using Custom CA for S3 Backups"
s3_ca_cert="--ca-bundle ${S3_CERT_CA_FILE}"
fi
if var_true "${S3_CERT_SKIP_VERIFY}" ; then
print_debug "Skipping SSL verification for HTTPS S3 Hosts"
s3_ssl="--no-verify-ssl"
fi
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
unset s3_ssl
unset s3_ca_cert
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
rm -rf "${TEMP_LOCATION}"/"${target}"
;;
esac
else
FILESIZE=$(du -h "${TEMP_LOCATION}"/"${target}" | awk '{ print $1}')
print_notice "Backup of ${target} created with the size of ${FILESIZE}"
print_warn "Skipping moving DB Backup to final location because backup did not complete successfully"
fi
case "${BACKUP_LOCATION,,}" in
"file" | "filesystem" )
print_debug "Moving backup to filesystem"
mkdir -p "${DB_DUMP_TARGET}"
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
;;
"s3" | "minio" )
print_debug "Moving backup to S3 Bucket"
export AWS_ACCESS_KEY_ID=${S3_KEY_ID}
export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET}
export AWS_DEFAULT_REGION=${S3_REGION}
if [ -f "${S3_CERT_CA_FILE}" ] ; then
print_debug "Using Custom CA for S3 Backups"
s3_ca_cert="--ca-bundle ${S3_CERT_CA_FILE}"
fi
if var_true "${S3_CERT_SKIP_VERIFY}" ; then
print_debug "Skipping SSL verification for HTTPS S3 Hosts"
s3_ssl="--no-verify-ssl"
fi
rm -rf "${TEMP_LOCATION}"/*
}
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
pre_dbbackup() {
dbbackup_start_time=$(date +"%s")
now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d")
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql
}
aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
unset s3_ssl
unset s3_ca_cert
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
rm -rf "${TEMP_LOCATION}"/"${target}"
;;
esac
post_dbbackup() {
dbbackup_finish_time=$(date +"%s")
dbbackup_total_time=$(echo $((dbbackup_finish_time-dbbackup_start_time)))
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
print_notice "Sending Backup Statistics to Zabbix"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}")"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))"
fi
### Post Script Support
if [ -n "${POST_SCRIPT}" ] ; then
if [ -x "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
fi
fi
### Post Backup Custom Script Support
if [ -d "/assets/custom-scripts/" ] ; then
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
if [ -x "${f}" ] ; then
print_notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
fi
done
fi
print_notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
}
sanity_test() {
sanity_var DB_TYPE "Database Type"
sanity_var DB_HOST "Database Host"
file_env 'DB_USER'
file_env 'DB_PASS'
case "${DB_TYPE,,}" in
"mysql" | "mariadb" )
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
postgres* | "pgsql" )
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
esac
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then
sanity_var S3_BUCKET "S3 Bucket"
@@ -481,3 +693,4 @@ EOF
fi
fi
}

View File

@@ -8,62 +8,34 @@ CONTAINER_LOG_LEVEL=DEBUG
bootstrap_variables
if [ "${MODE,,}" = "manual" ] ; then
if [ "${MODE,,}" = "manual" ] || [ "${1,,}" = "manual" ] || [ "${1,,}" = "now" ]; then
DB_DUMP_BEGIN=+0
manual=TRUE
print_debug "Detected Manual Mode"
else
sleep 5
current_time=$(date +"%s")
today=$(date +"%Y%m%d")
if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then
waittime=$(( ${BASH_REMATCH[1]} * 60 ))
target_time=$(($current_time + $waittime))
else
target_time=$(date --date="${today}${DB_DUMP_BEGIN}" +"%s")
if [[ "$target_time" < "$current_time" ]]; then
target_time=$(($target_time + 24*60*60))
fi
waittime=$(($target_time - $current_time))
fi
print_debug "Wait Time: ${waittime} Target time: ${target_time} Current Time: ${current_time}"
print_info "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")"
sleep $waittime
fi
case "${1,,}" in
"now" | "manual" )
DB_DUMP_BEGIN=+0
manual=TRUE
;;
* )
sleep 5
;;
esac
### Container Startup
print_debug "Backup routines Initialized on $(date +'%Y-%m-%d %H:%M:%S')"
### Wait for Next time to start backup
case "${1,,}" in
"now" | "manual" )
:
;;
* )
if [ "${manual,,}" != "true" ]; then
current_time=$(date +"%s")
today=$(date +"%Y%m%d")
if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then
waittime=$(( ${BASH_REMATCH[1]} * 60 ))
target_time=$(($current_time + $waittime))
else
target_time=$(date --date="${today}${DB_DUMP_BEGIN}" +"%s")
if [[ "$target_time" < "$current_time" ]]; then
target_time=$(($target_time + 24*60*60))
fi
waittime=$(($target_time - $current_time))
fi
print_debug "Wait Time: ${waittime} Target time: ${target_time} Current Time: ${current_time}"
print_info "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")"
sleep $waittime
fi
;;
esac
### Commence Backup
while true; do
mkdir -p "${TEMP_LOCATION}"
backup_start_time=$(date +"%s")
now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d")
target=${dbtype}_${dbname}_${dbhost}_${now}.sql
### Take a Dump
print_debug "Backup routines started time: $(date +'%Y-%m-%d %T %Z')"
case "${dbtype,,}" in
"couch" )
check_availability
@@ -101,47 +73,16 @@ while true; do
backup_finish_time=$(date +"%s")
backup_total_time=$(echo $((backup_finish_time-backup_start_time)))
if [ -z "$master_exit_code" ] ; then master_exit_code="0" ; fi
print_info "Backup routines finish time: $(date -d @${backup_finish_time} +"%Y-%m-%d %T %Z") with overall exit code ${master_exit_code}"
print_notice "Backup routines time taken: $(echo ${backup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
print_info "Backup finish time: $(date -d @${backup_finish_time} +"%Y-%m-%d %T %Z") with exit code ${exit_code}"
print_notice "Backup time elapsed: $(echo ${backup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
### Zabbix / Monitoring stats
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
print_notice "Sending Backup Statistics to Zabbix"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}")"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((backup_finish_time-backup_start_time)))"
fi
### Automatic Cleanup
if [ -n "${DB_CLEANUP_TIME}" ]; then
print_info "Cleaning up old backups"
mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
fi
### Post Script Support
if [ -n "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_time}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
fi
### Post Backup Custom Script Support
if [ -d "/assets/custom-scripts/" ] ; then
print_notice "Found Post Backup Custom Script to execute"
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
print_notice "Running Script: '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_time}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
done
fi
cleanup_old_data
if var_true "${manual}" ; then
print_debug "Exitting due to manual mode"
exit ${exit_code};
print_debug "Exiting due to manual mode"
exit ${master_exit_code};
else
### Go back to sleep until next backup time
print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") "
sleep $(($DB_DUMP_FREQ*60-backup_total_time))
fi

View File

@@ -110,7 +110,7 @@ get_dbhost() {
print_debug "Parsed DBHost: ${p_dbhost}"
fi
if [ -z "${dbhost}" ] && [ -z "${parsed_host}" ]; then
if [ -z "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 1 - No Env, No Parsed Filename"
q_dbhost_variant=1
q_dbhost_menu=$(cat <<EOF
@@ -119,7 +119,7 @@ EOF
)
fi
if [ -n "${dbhost}" ] && [ -z "${parsed_host}" ]; then
if [ -n "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 2 - Env, No Parsed Filename"
q_dbhost_variant=2
q_dbhost_menu=$(cat <<EOF
@@ -130,7 +130,7 @@ EOF
)
fi
if [ -z "${dbhost}" ] && [ -n "${parsed_host}" ]; then
if [ -z "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
print_debug "Parsed DBHostpho Variant: 3 - No Env, Parsed Filename"
q_dbhost_variant=3
q_dbhost_menu=$(cat <<EOF
@@ -141,7 +141,7 @@ EOF
)
fi
if [ -n "${dbhost}" ] && [ -n "${parsed_host}" ]; then
if [ -n "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 4 - Env, Parsed Filename"
q_dbhost_variant=4
q_dbhost_menu=$(cat <<EOF
@@ -188,7 +188,7 @@ EOF
break
;;
e* | "" )
r_dbhost=${dbhost}
r_dbhost=${DB_HOST}
break
;;
q* )
@@ -241,7 +241,7 @@ EOF
break
;;
e* | "" )
r_dbhost=${dbhost}
r_dbhost=${DB_HOST}
break
;;
f* )
@@ -263,6 +263,10 @@ get_dbtype() {
p_dbtype=$(basename -- "${r_filename}" | cut -d _ -f 1)
case "${p_dbtype}" in
mongo* )
parsed_type=true
print_debug "Parsed DBType: MongoDB"
;;
mariadb | mysql )
parsed_type=true
print_debug "Parsed DBType: MariaDB/MySQL"
@@ -276,7 +280,7 @@ get_dbtype() {
;;
esac
if [ -z "${dbtype}" ] && [ -z "${parsed_type}" ]; then
if [ -z "${DB_TYPE}" ] && [ -z "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 1 - No Env, No Parsed Filename"
q_dbtype_variant=1
q_dbtype_menu=$(cat <<EOF
@@ -285,7 +289,7 @@ EOF
)
fi
if [ -n "${dbtype}" ] && [ -z "${parsed_type}" ]; then
if [ -n "${DB_TYPE}" ] && [ -z "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 2 - Env, No Parsed Filename"
q_dbtype_variant=2
q_dbtype_menu=$(cat <<EOF
@@ -295,7 +299,7 @@ EOF
)
fi
if [ -z "${dbtype}" ] && [ -n "${parsed_type}" ]; then
if [ -z "${DB_TYPE}" ] && [ -n "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 3 - No Env, Parsed Filename"
q_dbtype_variant=3
q_dbtype_menu=$(cat <<EOF
@@ -305,7 +309,7 @@ EOF
)
fi
if [ -n "${dbtype}" ] && [ -n "${parsed_type}" ]; then
if [ -n "${DB_TYPE}" ] && [ -n "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 4 - Env, Parsed Filename"
q_dbtype_variant=4
q_dbtype_menu=$(cat <<EOF
@@ -320,7 +324,9 @@ EOF
What Database Type are you looking to restore?
${q_dbtype_menu}
M ) MySQL / MariaDB
O ) MongoDB
P ) Postgresql
Q ) Quit
@@ -335,6 +341,10 @@ EOF
r_dbtype=mysql
break
;;
o* )
r_dbtype=mongo
break
;;
p* )
r_dbtype=postgresql
break
@@ -351,13 +361,17 @@ EOF
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in
e* | "" )
r_dbtype=${db_name}
r_dbtype=${DB_TYPE}
break
;;
m* )
r_dbtype=mysql
break
;;
o* )
r_dbtype=mongo
break
;;
p* )
r_dbtype=postgresql
break
@@ -381,6 +395,10 @@ EOF
r_dbtype=mysql
break
;;
o* )
r_dbtype=mongo
break
;;
p* )
r_dbtype=postgresql
break
@@ -398,7 +416,7 @@ EOF
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in
e* | "" )
r_dbtype=${dbtype}
r_dbtype=${DB_TYPE}
break
;;
f* )
@@ -431,7 +449,7 @@ get_dbname() {
print_debug "Parsed DBName: ${p_dbhost}"
fi
if [ -z "${dbname}" ] && [ -z "${parsed_name}" ]; then
if [ -z "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 1 - No Env, No Parsed Filename"
q_dbname_variant=1
q_dbname_menu=$(cat <<EOF
@@ -440,7 +458,7 @@ EOF
)
fi
if [ -n "${dbname}" ] && [ -z "${parsed_name}" ]; then
if [ -n "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 2 - Env, No Parsed Filename"
q_dbname_variant=2
q_dbname_menu=$(cat <<EOF
@@ -451,7 +469,7 @@ EOF
)
fi
if [ -z "${dbname}" ] && [ -n "${parsed_name}" ]; then
if [ -z "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 3 - No Env, Parsed Filename"
q_dbname_variant=3
q_dbname_menu=$(cat <<EOF
@@ -462,7 +480,7 @@ EOF
)
fi
if [ -n "${dbname}" ] && [ -n "${parsed_name}" ]; then
if [ -n "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBname Variant: 4 - Env, Parsed Filename"
q_dbname_variant=4
q_dbname_menu=$(cat <<EOF
@@ -509,7 +527,7 @@ EOF
break
;;
e* | "" )
r_dbname=${dbname}
r_dbname=${DB_NAME}
break
;;
q* )
@@ -562,7 +580,7 @@ EOF
break
;;
e* | "" )
r_dbname=${dbname}
r_dbname=${DB_NAME}
break
;;
f* )
@@ -580,7 +598,7 @@ EOF
}
get_dbport() {
if [ -z "${dbport}" ] ; then
if [ -z "${DB_PORT}" ] ; then
print_debug "Parsed DBPort Variant: 1 - No Env"
q_dbport_variant=1
q_dbport_menu=$(cat <<EOF
@@ -589,20 +607,20 @@ EOF
)
fi
if [ -n "${dbport}" ] ; then
if [ -n "${DB_PORT}" ] ; then
print_debug "Parsed DBPort Variant: 2 - Env"
q_dbport_variant=2
q_dbport_menu=$(cat <<EOF
C ) Custom Entered Database Port
E ) Environment Variable DB_PORT: '${dbport}'
E ) Environment Variable DB_PORT: '${DB_PORT}'
EOF
)
fi
cat << EOF
What Database Port do you wish to use?
What Database Port do you wish to use? MySQL/MariaDB typcially listens on port 3306. Postrgresql port 5432. MongoDB 27017
${q_dbport_menu}
Q ) Quit
@@ -637,7 +655,7 @@ EOF
break
;;
e* | "" )
r_dbport=${dbport}
r_dbport=${DB_PORT}
break
;;
q* )
@@ -651,7 +669,7 @@ EOF
}
get_dbuser() {
if [ -z "${dbuser}" ] ; then
if [ -z "${DB_USER}" ] ; then
print_debug "Parsed DBUser Variant: 1 - No Env"
q_dbuser_variant=1
q_dbuser_menu=$(cat <<EOF
@@ -660,7 +678,7 @@ EOF
)
fi
if [ -n "${dbuser}" ] ; then
if [ -n "${DB_USER}" ] ; then
print_debug "Parsed DBUser Variant: 2 - Env"
q_dbuser_variant=2
q_dbuser_menu=$(cat <<EOF
@@ -706,7 +724,7 @@ EOF
break
;;
e* | "" )
r_dbuser=${dbuser}
r_dbuser=${DB_USER}
break
;;
q* )
@@ -720,7 +738,7 @@ EOF
}
get_dbpass() {
if [ -z "${dbpass}" ] ; then
if [ -z "${DB_PASS}" ] ; then
print_debug "Parsed DBPass Variant: 1 - No Env"
q_dbpass_variant=1
q_dbpass_menu=$(cat <<EOF
@@ -729,13 +747,13 @@ EOF
)
fi
if [ -n "${dbpass}" ] ; then
if [ -n "${DB_PASS}" ] ; then
print_debug "Parsed DBPass Variant: 2 - Env"
q_dbpass_variant=2
q_dbpass_menu=$(cat <<EOF
C ) Custom Entered Database Password
E ) Environment Variable DB_PASS: '${DB_PASS}'
E ) Environment Variable DB_PASS
EOF
)
fi
@@ -775,7 +793,7 @@ EOF
break
;;
e* | "" )
r_dbpass=${dbpass}
r_dbpass=${DB_PASS}
break
;;
q* )
@@ -791,7 +809,7 @@ EOF
#### SCRIPT START
cat << EOF
## ${IMAGE_NAME} Restore Script Version 1.0.1
## ${IMAGE_NAME} Restore Script
## Visit ${IMAGE_REPO_URL}
## ####################################################
@@ -826,7 +844,7 @@ if [ -n "${3}" ]; then
if [ ! -f "${3}" ]; then
get_dbhost
else
r_dbtype="${3}"
r_dbhost="${3}"
fi
else
get_dbhost
@@ -920,8 +938,23 @@ case "${r_dbtype}" in
pv ${r_filename} | ${decompress_cmd}cat | psql -d ${r_dbname} -h ${r_dbhost} -p ${r_dbport} -U ${r_dbuser}
exit_code=$?
;;
mongo )
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
if [ "${ENABLE_COMPRESSION,,}" != "none" ] && [ "${ENABLE_COMPRESSION,,}" != "false" ] ; then
mongo_compression="--gzip"
fi
if [ -n "${r_dbuser}" ] ; then
mongo_user="-u ${r_dbuser}"
fi
if [ -n "${r_dbpass}" ] ; then
mongo_pass="-u ${r_dbpass}"
fi
mongorestore ${mongo_compression} -d ${r_dbname} -h ${r_dbhost} --port ${r_dbport} ${mongo_user} ${mongo_pass} --archive=${r_filename}
exit_code=$?
;;
* )
exit 3
print_info "Unable to restore DB of type '${r_dbtype}'"
exit_code=3
;;
esac