Compare commits

...

123 Commits

Author SHA1 Message Date
dave@tiredofit.ca
11f55f3d82 Release 4.0.20 - See CHANGELOG.md 2023-11-21 15:18:22 -08:00
dave@tiredofit.ca
674a98fcd8 Release 4.0.19 - See CHANGELOG.md 2023-11-20 15:26:21 -08:00
dave@tiredofit.ca
77c747e01b Release 4.0.18 - See CHANGELOG.md 2023-11-18 09:53:41 -08:00
Dave Conroy
2e30558a27 Merge pull request #282 from joergmschulz/patch-1
Update 10-db-backup msmtp -C
2023-11-18 09:52:23 -08:00
joergmschulz
c746fb641e Update 10-db-backup msmtp -C
the config file is referenced by -C , not -c
2023-11-17 23:28:08 +01:00
dave@tiredofit.ca
ca2f04cd59 Release 4.0.17 - See CHANGELOG.md 2023-11-17 08:16:34 -08:00
dave@tiredofit.ca
dfa94ecab7 Release 4.0.16 - See CHANGELOG.md 2023-11-17 08:07:54 -08:00
Dave Conroy
eaea6dc348 Update README.md 2023-11-16 09:38:18 -08:00
dave@tiredofit.ca
34abe88159 Release 4.0.15 - See CHANGELOG.md 2023-11-16 09:35:56 -08:00
Dave Conroy
5ffbeeb163 Merge pull request #280 from joergmschulz/patch-1
warn instead of warning
2023-11-14 07:14:55 -08:00
joergmschulz
c82cee80f8 warn instead of warning
see #279
2023-11-14 08:53:38 +01:00
dave@tiredofit.ca
ab059ccdf1 Release 4.0.14 - See CHANGELOG.md 2023-11-13 15:16:36 -08:00
dave@tiredofit.ca
1e8ccf4d56 Release 4.0.13 - See CHANGELOG.md 2023-11-12 17:07:07 -08:00
dave@tiredofit.ca
65c40cac0a Release 4.0.12 - See CHANGELOG.md 2023-11-12 09:03:01 -08:00
dave@tiredofit.ca
a9f2d51ff9 Release 4.0.11 - See CHANGELOG.md 2023-11-11 13:43:57 -08:00
dave@tiredofit.ca
7f455abc1a Release 4.0.10 - See CHANGELOG.md 2023-11-11 09:34:11 -08:00
dave@tiredofit.ca
c16add4525 Release 4.0.9 - See CHANGELOG.md 2023-11-11 09:16:02 -08:00
Dave Conroy
d5769b1588 Fix Ctrl-C Backup Concurrency 2023-11-11 08:48:59 -08:00
dave@tiredofit.ca
0b2c7836cf Release 4.0.8 - See CHANGELOG.md 2023-11-11 08:32:58 -08:00
Dave Conroy
535e011740 Add safety net to debug() SHOW_OUTPUT 2023-11-11 07:28:38 -08:00
Dave Conroy
5a391b908a Fix debug() duplicate variable 2023-11-11 07:23:13 -08:00
dave@tiredofit.ca
fddca646c8 Release 4.0.7 - See CHANGELOG.md 2023-11-11 07:15:00 -08:00
Dave Conroy
68f954c59b Fix SQLite3 Backups and clean up temp directory properly 2023-11-11 07:12:29 -08:00
Dave Conroy
0ab0a6d182 sqlit3 scheduler process name remove slashes 2023-11-11 06:48:39 -08:00
Dave Conroy
f6bf2993f7 Add seperate persmissions for _FILESYSTEM_PATH 2023-11-11 06:36:26 -08:00
dave@tiredofit.ca
5cf00a8b8e Release 4.0.6 - See CHANGELOG.md 2023-11-10 17:53:47 -08:00
dave@tiredofit.ca
2bc730013e Release 4.0.5 - See CHANGELOG.md 2023-11-10 07:25:25 -08:00
Dave Conroy
d628ed8ff4 Expand upon DEBUG_ statements to give more detail 2023-11-10 07:24:31 -08:00
Dave Conroy
d7399667a1 Update _FILESYSTEM_PERMISSIONS from 700 to 600 and add undocumented DBBACKUP_USER|GROUP variable 2023-11-10 07:16:56 -08:00
dave@tiredofit.ca
9caec737e0 Release 4.0.4 - See CHANGELOG.md 2023-11-09 11:49:26 -08:00
Dave Conroy
87a803512d Merge pull request #269 from tiredofit/4.x
New Restore Script
2023-11-09 11:48:19 -08:00
Dave Conroy
c6a8fb0ae0 Merge branch 'main' into 4.x 2023-11-09 11:48:08 -08:00
Dave Conroy
8fafdeb45c Restore - Support multiple DB Hosts 2023-11-09 11:46:04 -08:00
Dave Conroy
4a3a79d328 restore - we're actually using FILESYSTEM_PATH 2023-11-09 09:21:19 -08:00
dave@tiredofit.ca
bad5057bcf Release 4.0.3 - See CHANGELOG.md 2023-11-09 09:20:27 -08:00
Dave Conroy
d2acfc4a88 restore - dont browse .gpg files 2023-11-09 09:19:39 -08:00
Dave Conroy
3d794a819f Commence work on restore scripts 2023-11-09 09:19:13 -08:00
dave@tiredofit.ca
aaf6309cc4 Release 4.0.2 - See CHANGELOG.md 2023-11-09 08:09:59 -08:00
dave@tiredofit.ca
55d2067b43 Release 4.0.1 - See CHANGELOG.md 2023-11-09 08:04:05 -08:00
Dave Conroy
0d56a26f0f restore - remove reference to DB_DUMP_TARGET and instead use DEFAULT_BACKUP_PATH 2023-11-09 08:03:39 -08:00
Dave Conroy
635411bdd5 Update README.md 2023-11-08 22:37:39 -08:00
Dave Conroy
39776a96b8 Release 4.0.0 - See CHANGELOG.md and README.md 2023-11-08 18:55:57 -08:00
Dave Conroy
d24cdc5db5 Merge pull request #266 from tiredofit/4.x
Release 4.0.0
2023-11-08 18:49:42 -08:00
Dave Conroy
2df35e46e5 Merge 4.x prep 2023-11-08 18:48:58 -08:00
Dave Conroy
41b518f2f0 Add a failsafe for _original_debug_mode 2023-11-08 18:18:05 -08:00
Dave Conroy
89e6956cdd Adjust debug statements 2023-11-08 15:08:32 -08:00
Dave Conroy
089687dc55 Add DEBUG_ statements 2023-11-08 14:48:27 -08:00
Dave Conroy
c88fbacde9 Update Zabbix Template to fix trigger 2023-11-08 13:14:09 -08:00
Dave Conroy
3010a4d187 Rework Zabbix Payloads 2023-11-08 10:48:18 -08:00
Dave Conroy
fd59daf125 Update Zabbix Template 2023-11-08 10:48:06 -08:00
Dave Conroy
140e3183a4 Update Zabbix Template 2023-11-08 10:43:25 -08:00
Dave Conroy
45eba40360 Update Zabbix Template 2023-11-08 10:40:02 -08:00
Dave Conroy
440b24da8d Update Zabbix Template 2023-11-08 10:06:19 -08:00
Dave Conroy
9849c7339e Add PGSQL to override and force backing up globals 2023-11-08 09:11:24 -08:00
Dave Conroy
80e407d81d switch globals to only get backed up if using all 2023-11-08 08:55:21 -08:00
Dave Conroy
40ad7abac4 No more extra now 2023-11-08 08:35:22 -08:00
Dave Conroy
3ecb24c603 Fix naming issue with backup_job_filesystem_archive 2023-11-08 08:29:35 -08:00
Dave Conroy
ff96f09e33 future_time is time_future 2023-11-08 08:25:08 -08:00
Dave Conroy
77f54d06fa Rearrange variables and polish: 2023-11-08 08:00:21 -08:00
Dave Conroy
633cc8410e Fix cron expression stepping 2023-11-08 07:43:37 -08:00
Dave Conroy
f5b92854da Cleanup cron expression 2023-11-07 13:06:06 -08:00
Dave Conroy
50039f8d0c Rearrange timer() 2023-11-07 13:03:01 -08:00
Dave Conroy
b8a825a3af Rework write_log to not output more than it needs to 2023-11-07 12:33:03 -08:00
Dave Conroy
2ccc867a75 Write Legacy vars to file instead 2023-11-07 12:13:04 -08:00
Dave Conroy
11205be091 Properly handle BACKUP_BEGIN plus values 2023-11-07 12:00:19 -08:00
Dave Conroy
57193cc824 Properly translate legacy _DUMP var to DEFAULT_BACKUP 2023-11-07 11:50:59 -08:00
Dave Conroy
9863358469 Split debug statement for backup/move exit codes 2023-11-07 11:05:39 -08:00
Dave Conroy
bb3c942b35 Properly output exit code for notifications 2023-11-07 11:02:49 -08:00
Dave Conroy
72f90876e3 Fix writing logfiles to filessytem 2023-11-06 17:00:29 -08:00
Dave Conroy
c94a85b523 Change Zabbix autoregister string to dbbackup4 2023-11-06 16:53:20 -08:00
Dave Conroy
c9fd36db72 Fix DEFAULT_MYSQL table 2023-11-06 11:09:15 -08:00
Dave Conroy
a629282093 Update dependencies 2023-11-05 10:42:51 -08:00
Dave Conroy
050cc3ef62 Cleanup Mongo compression routine 2023-11-05 10:12:32 -08:00
Dave Conroy
da54cdf48b Code formatting and cleanup 2023-11-05 10:10:59 -08:00
Dave Conroy
2b446f1e4c Add encrypt/checksum duration graphs 2023-11-05 09:48:23 -08:00
Dave Conroy
9e7904ce4d Cleanup Formatting 2023-11-05 09:47:59 -08:00
Dave Conroy
c5c026d0fd Merge pull request #232 from benvia/feature-cron
Adds a cronjob-like mechanism
2023-11-05 09:17:33 -08:00
Dave Conroy
5d8a7f1720 Optimize write_log function 2023-11-05 08:54:43 -08:00
Dave Conroy
ca6b045d7d Optimize write_log function 2023-11-05 08:51:32 -08:00
Dave Conroy
da22217e7b Refactor timer/datetime 2023-11-05 08:36:15 -08:00
Dave Conroy
59854c1715 Refactor timer/datetime 2023-11-05 08:35:40 -08:00
Dave Conroy
e090eeda3f feat - add MySQL Event backup support 2023-11-05 07:55:40 -08:00
Dave Conroy
0ba1989083 modify MSSQL backups to get compressed post initial backup 2023-11-05 07:45:52 -08:00
Dave Conroy
5392bf5179 feat - add basic cron expression scheduling 2023-11-05 07:40:30 -08:00
Benoit Vianin
e42f8e9a8c Code refactoring 2023-11-05 06:48:47 +00:00
Benoit Vianin
6a28ac2d92 Fix code for absolute time 2023-11-04 18:53:42 +00:00
Dave Conroy
3af9ef6d3d Rework timers 2023-11-04 08:07:10 -07:00
Dave Conroy
5e3d8b3083 Also use ionice for DB_RESOURCE_OPTIMIZED 2023-11-04 07:43:32 -07:00
Benoit Vianin
06cfba4952 Modify DB_DUMP_BEGIN to support a full date as cron 2023-11-04 11:43:45 +00:00
Benoit Vianin
250cae98ef Restore the +XX minutes fucntion and move print to debug mode 2023-11-04 10:34:40 +00:00
Dave Conroy
5dafa69439 feat - add file encryption 2023-11-03 19:46:46 -07:00
Dave Conroy
0159a13746 Add gpg to rundeps 2023-11-03 18:18:05 -07:00
Dave Conroy
dad5294295 Update Zabbix Template 2023-11-03 18:15:30 -07:00
Dave Conroy
3837beae97 feat - Add checksum time, and hash output to Zabbix 2023-11-03 18:11:58 -07:00
Dave Conroy
20bfcec1a9 feat - Add DEFAULT_RESOURCE_OPTIMIZED to adjust CPU scheduler 2023-11-03 18:03:52 -07:00
Dave Conroy
2cc97694f4 Fix formatting: 2023-11-03 15:53:54 -07:00
Dave Conroy
0a867f9719 feat - Add Support for MSSQL Transaction logs - Closes #200 2023-11-03 15:48:39 -07:00
Dave Conroy
7d77266d2a Use relative path insteasd of absolute when creating_latest_symlink - Closes #256 2023-11-03 15:32:33 -07:00
Dave Conroy
50b61ad358 Merge pull request #236 from eoehen/feature/blobxfer-download-befor-move-new-backup-file
feat(235) Synchronize local storage from S3 before upload
2023-11-03 15:23:22 -07:00
Dave Conroy
a10eb1dfba Synchronize local storage from blobxfer before upload 2023-11-03 15:22:59 -07:00
Dave Conroy
b0bf624ad3 Add Postgres GLobals Backup if SPLIT_DB=TRUE 2023-11-03 15:14:02 -07:00
Dave Conroy
07e74ca746 Merge pull request #228 from oscarsiles/main
add postgres globals backup if SPLIT_DB=true
2023-11-03 15:11:35 -07:00
Dave Conroy
599c3d7940 Rework bin directory 2023-11-03 15:02:52 -07:00
Dave Conroy
3e666ef655 Add coreutils to run-deps 2023-11-03 15:02:36 -07:00
Dave Conroy
c0c7202b6d Add multi host support 2023-11-03 15:02:13 -07:00
Dave Conroy
82d8175eeb Support 4.0.x defaults 2023-11-03 15:01:59 -07:00
Dave Conroy
ee294c08a2 Update scheduler template 2023-11-03 15:01:41 -07:00
Dave Conroy
cfbac00268 Zabbix Template > 4.0.x 2023-11-03 15:01:17 -07:00
Dave Conroy
b0413e6708 Version 4.0.x examples 2023-11-03 15:00:56 -07:00
Dave Conroy
57c853d02a MIT 2023-11-03 15:00:33 -07:00
Dave Conroy
0845ec30b3 Update install/etc folder contents 2023-11-03 15:00:13 -07:00
Dave Conroy
96f9825e19 Update README.md with new options 2023-11-03 14:59:44 -07:00
Dave Conroy
2b10a0b679 feat - only cleanup / mv backups of the same file_name pattern 2023-11-01 15:55:32 -07:00
Dave Conroy
1450a33c27 feat - add file logging support 2023-11-01 15:44:03 -07:00
Dave Conroy
74e7a7e74c feat - optimize zabbix sending routines to be one process 2023-11-01 14:43:43 -07:00
Dave Conroy
e03fefeb02 fix - Optimize generating checksum routines 2023-11-01 14:34:34 -07:00
Dave Conroy
3ff3cdb19c feat - Add TARGET_DB_DUMP_PERMISSION to set file and directory permissions 2023-11-01 14:32:43 -07:00
Dave Conroy
bcf7bc5ecd rearrange compression function 2023-11-01 14:18:27 -07:00
Dave Conroy
5a01b6118e feat - make processes and files save as username dbbackup instead of root 2023-11-01 14:13:37 -07:00
Elias Oehen
f67170c1ec feat(235) Synchronize local storage from S3 before upload
close #235
2023-09-26 13:05:20 +02:00
Oscar Siles Brügge
03d2362b6d Merge branch 'tiredofit:main' into main 2023-09-16 21:43:45 +01:00
Benoit Vianin
3c6beeaae9 Adds a cronjob-like mechanism 2023-06-14 04:36:21 +00:00
Oscar Siles Brugge
5f58ce81c8 add postgres globals backup if SPLIT_DB=true 2023-06-07 09:19:25 +01:00
18 changed files with 3537 additions and 1461 deletions

View File

@@ -1,3 +1,200 @@
## 4.0.20 2023-11-21 <dave at tiredofit dot ca>
### Changed
- Update base image to support S6 Overlay 3.1.6.2 to solve shutdown issues specifically with MODE=MANUAL and MANUAL_RUN_FOREVER=TRUE
- Add some safety nets for Manual scheduling
## 4.0.19 2023-11-20 <dave at tiredofit dot ca>
### Changed
- Make adjustments to cron scheduling feature to be able to handle whitespace properly"
## 4.0.18 2023-11-18 <joergmschulz@github>
### Changed
- Fix loading msmtp configuration
## 4.0.17 2023-11-17 <dave at tiredofit dot ca>
### Changed
- Provide more details when notifying via instant messages
## 4.0.16 2023-11-17 <dave at tiredofit dot ca>
### Changed
- Switch to using msmtp instead of s-mail for notify()
## 4.0.15 2023-11-16 <dave at tiredofit dot ca>
### Changed
- Fix cleanup of old backups
## 4.0.14 2023-11-13 <dave at tiredofit dot ca>
### Changed
- Bugfix when PRE/POST scripts found not giving legacy warning
- Run pre / post scripts as root
## 4.0.13 2023-11-12 <dave at tiredofit dot ca>
### Changed
- Check for any quotes if using MONGO_CUSTOM_URI and remove
## 4.0.12 2023-11-12 <dave at tiredofit dot ca>
### Changed
- Allow creating schedulers if _MONGO_CUSTOM_URI is set and _DB_HOST blank
## 4.0.11 2023-11-11 <dave at tiredofit dot ca>
### Changed
- Resolve issue with backing up ALL databases with PGSQL and MySQL
## 4.0.10 2023-11-11 <dave at tiredofit dot ca>
### Changed
- Change environment variable parsing routines to properly accomodate for Passwords containing '=='
## 4.0.9 2023-11-11 <dave at tiredofit dot ca>
### Changed
- Fix issue with quotes being wrapped around _PASS variables
## 4.0.8 2023-11-11 <dave at tiredofit dot ca>
### Changed
- Tidy up file_encryption() routines
- Change environment variable _ENCRYPT_PUBKEY to _ENCRYPT_PUBLIC_KEY
- Add new environment variable _ENCRYPT_PRIVATE_KEY
## 4.0.7 2023-11-11 <dave at tiredofit dot ca>
### Added
- Add seperate permissions for _FILESYSTEM_PATH
### Changed
- More output and debugging additions
- SQLite3 now backs up without running into file permission/access problems
- Cleanup old sqlite backups from temp directory
- Handle multiple SQLite3 backups concurrently
## 4.0.6 2023-11-10 <dave at tiredofit dot ca>
### Added
- Add additional DEBUG_ statements
### Changed
- Fix issue with Influx DB not properly detecting the correct version
## 4.0.5 2023-11-10 <dave at tiredofit dot ca>
### Added
- Add undocumented DBBACKUP_USER|GROUP environment variables for troubleshooting permissions
- Add more verbosity when using DEBUG_ statements
### Changed
- Change _FILESYSTEM_PERMISSION to 600 from 700
## 4.0.4 2023-11-09 <dave at tiredofit dot ca>
### Added
- Add support for restoring from different DB_ variables in restore script
## 4.0.3 2023-11-09 <dave at tiredofit dot ca>
### Changed
- Resolve issue with _MYSQL_TLS_CERT_FILE not being read
## 4.0.2 2023-11-09 <dave at tiredofit dot ca>
### Changed
- Properly use custom _S3_HOST variables
## 4.0.1 2023-11-09 <dave at tiredofit dot ca>
### Changed
- Restore - Stop using DB_DUMP_TARGET and instead browse using DEFAULT_BACKUP_PATH
## 4.0.0 2023-11-08 <dave at tiredofit dot ca>
This is the fourth major release to the DB Backup image which started as a basic MySQL backup service in early 2017. With each major release brings enhancements, bugfixes, removals along with breaking changes and this one is no different.
This release brings functionality requested by the community such as multiple host backup support by means of independent scheduler tasks,blackout periods, better resource usage, better security via file encryption, file permissions, and more verbosity via log files. , and also merges contributions from other developers.
Upgrading to this image should for the most part work for most users, but will involve event upgrading environment variables as the formathas changed significantly. Old variables should continue to work, however are unsupported and will be removed with the `4.3.0` release, whenever that will be.
A significant amount of development hours were put in to accomodate for feature requests by the community. If you are using this in a commercial setting or find this image valuable, please consider sponsoring my work for a period of time or engaging in a private support offering. More details at https://www.tiredofit.ca/sponsor
### Added
- Backup Multiple Hosts in same image all with different options (scheduling, compression, destination, cleanup) (Use `DBXX_option` variables)
- Backup limits how many backup jobs run concurrently
- Backup Scheduling now allows using a timestamp (e.g. `Dec 12 2023 03:00:00`) - credit benvia@github
- Backup Scheduling now allows using a cron expression (e.g `00 03 * * *`)
- Backup Blackout period to skip backing up during a period of time
- Backup runs as dedicated user (no longer root)
- Backup can have specific file permissions set upon completion (e.g. `700` or `rwx------`)
- Backups can run with reduced priority mode to allow for fair scheduling across system
- Backups - MySQL/MariaDB now has ability to backup events
- Backups - Microsoft SQL server now has option to backup transaction logs
- Backups - Postgres now backs up globals - credit oscarsiles@github
- Backups with Azure synchronize storage before upload - credit eoehen@github
- Encrypt backup file with a passphrase or a GPG Public Key ability
- Log backup jobs to file along with log rotation
- Notification support upon job failure via Email, Mattermost, Matrix, Rocketchat
- Zabbix Metrics now auto discovers new jobs
- Zabbix Metrics sends metrics relating to backed up filename, checksum hash, and the duration of backup/compression time, checksum time, encryption time
- New Debug Capabilities
### Changed
- Reworked Documentation
- Reworked all functions and renamed all variables
- Many variables now use a prefix of `DEFAULT_` to operate on all backup jobs
- Can be overridden per backup job by setting `DB_<option>` or to unset default variable `DB_<option>=unset`
- Renamed variables and terms
- `_DUMP_LOCATION` -> `_BACKUP_LOCATION`
- `_DUMP_BEGIN` -> `_BACKUP_BEGIN`
- `_DUMP_FREQ` -> `_BACKUP_INTERVAL`
- `_DUMP_TARGET`` -> `_FILESYSTEM_PATH`
- `_DUMP_ARCHIVE`` -> `_FILESYSTEM_PATH`
- `EXTRA_DUMP_OPTS`` -> `_EXTRA_BACKUP_OPTS`
- `TEMP_LOCATION`` -> `TEMP_PATH`
- Backups - AWS CLI updated to 1.29.78
- Backups - InfluxDB 2 Client version updated to 2.7.3
- Backups - Microsoft SQL server now compresses files post initial backup
- Backups - Manual backups handle aborting gracefully
- Checksum routines now complete in half the time
- Checksum variable now supports "NONE"
- Zabbix metrics sending occurs in one process as opposed to singular
- Cleanup Only cleanup files that match same backup name pattern
- Cleanup/Archive uses relative path instead of absolute with creating_latest_symlink
- A handful of code optimizations and cleanup
### Removed
- `ENABLE_CHECKSUM` - has been wrapped into `_CHECKSUM=none`
## 3.12.0 2023-10-29 <alwynpan@github>
### Added

View File

@@ -5,64 +5,69 @@ FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables
ENV INFLUX_VERSION=1.8.0 \
INFLUX2_VERSION=2.4.0 \
ENV INFLUX1_CLIENT_VERSION=1.8.0 \
INFLUX2_CLIENT_VERSION=2.7.3 \
MSODBC_VERSION=18.3.2.1-1 \
MSSQL_VERSION=18.3.1.1-1 \
AWS_CLI_VERSION=1.25.97 \
CONTAINER_ENABLE_MESSAGING=FALSE \
AWS_CLI_VERSION=1.29.78 \
CONTAINER_ENABLE_MESSAGING=TRUE \
CONTAINER_ENABLE_MONITORING=TRUE \
CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \
IMAGE_NAME="tiredofit/db-backup" \
IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/"
### Dependencies
RUN source /assets/functions/00-container && \
set -ex && \
addgroup -S -g 10000 dbbackup && \
adduser -S -D -H -u 10000 -G dbbackup -g "Tired of I.T! DB Backup" dbbackup && \
\
package update && \
package upgrade && \
package install .db-backup-build-deps \
build-base \
bzip2-dev \
cargo \
git \
go \
libarchive-dev \
openssl-dev \
libffi-dev \
python3-dev \
py3-pip \
xz-dev \
&& \
build-base \
bzip2-dev \
cargo \
git \
go \
libarchive-dev \
openssl-dev \
libffi-dev \
python3-dev \
py3-pip \
xz-dev \
&& \
\
package install .db-backup-run-deps \
bzip2 \
groff \
libarchive \
mariadb-client \
mariadb-connector-c \
mongodb-tools \
openssl \
pigz \
postgresql16 \
postgresql16-client \
pv \
py3-botocore \
py3-colorama \
py3-cryptography \
py3-docutils \
py3-jmespath \
py3-rsa \
py3-setuptools \
py3-s3transfer \
py3-yaml \
python3 \
redis \
sqlite \
xz \
zip \
zstd \
&& \
bzip2 \
coreutils \
gpg \
gpg-agent \
groff \
libarchive \
mariadb-client \
mariadb-connector-c \
mongodb-tools \
openssl \
pigz \
postgresql16 \
postgresql16-client \
pv \
py3-botocore \
py3-colorama \
py3-cryptography \
py3-docutils \
py3-jmespath \
py3-rsa \
py3-setuptools \
py3-s3transfer \
py3-yaml \
python3 \
redis \
sqlite \
xz \
zip \
zstd \
&& \
\
apkArch="$(uname -m)"; \
case "$apkArch" in \
@@ -71,11 +76,11 @@ RUN source /assets/functions/00-container && \
*) sleep 0.1 ;; \
esac; \
\
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; ls -l ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
clone_git_repo https://github.com/aws/aws-cli "${AWS_CLI_VERSION}" && \
python3 setup.py install --prefix=/usr && \
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX_VERSION}" && \
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX1_CLIENT_VERSION}" && \
go build -o /usr/sbin/influxd ./cmd/influxd && \
strip /usr/sbin/influxd && \
mkdir -p /usr/src/pbzip2 && \
@@ -106,5 +111,4 @@ RUN source /assets/functions/00-container && \
/tmp/* \
/usr/src/*
COPY install /

View File

@@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2022 Dave Conroy
Copyright (c) 2023 Dave Conroy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

898
README.md
View File

@@ -4,7 +4,7 @@
[![Build Status](https://img.shields.io/github/actions/workflow/status/tiredofit/docker-db-backup/main.yml?branch=main&style=flat-square)](https://github.com/tiredofit/docker-db-backup/actions)
[![Docker Stars](https://img.shields.io/docker/stars/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/)
[![Docker Pulls](https://img.shields.io/docker/pulls/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/)
[![Become a sponsor](https://img.shields.io/badge/sponsor-tiredofit-181717.svg?logo=github&style=flat-square)](https://github.com/sponsors/tiredofit)
[![Become a sponsor](https://img.shields.io/badge/sponsor-tiredofit-181717.svg?logo=github&style=flat-square)](https://www.tiredofit.ca/sponsor)
[![Paypal Donate](https://img.shields.io/badge/donate-paypal-00457c.svg?logo=paypal&style=flat-square)](https://www.paypal.me/tiredofit)
---
@@ -13,21 +13,24 @@
This will build a container for backing up multiple types of DB Servers
Currently backs up CouchDB, InfluxDB, MySQL, Microsoft SQL, MongoDB, Postgres, Redis servers.
Backs up CouchDB, InfluxDB, MySQL/MariaDB, Microsoft SQL, MongoDB, Postgres, Redis servers.
- dump to local filesystem or backup to S3 Compatible services, and Azure.
- select database user and password
- backup all databases, single, or multiple databases
- backup all to separate files or one singular file
- choose to have an MD5 or SHA1 sum after backup for verification
- delete old backups after specific amount of time
- choose compression type (none, gz, bz, xz, zstd)
- connect to any container running on the same system
- Script to perform restores
- Zabbix Monitoring capabilities
- select how often to run a dump
- select when to start the first dump, whether time of day or relative to container start time
- Execute script after backup for monitoring/alerting purposes
- multiple backup job support
- selectable when to start the first dump, whether time of day or relative to container start time
- selectable interval
- selectable omit scheduling during periods of time
- selectable database user and password
- selectable cleanup and archive capabilities
- selectable database name support - all databases, single, or multiple databases
- backup all to separate files or one singular file
- checksum support choose to have an MD5 or SHA1 hash generated after backup for verification
- compression support (none, gz, bz, xz, zstd)
- encryption support (passphrase and public key)
- notify upon job failure to email, matrix, mattermost, rocketchat, custom script
- zabbix metrics support
- hooks to execute pre and post backup job for customization purposes
- companion script to aid in restores
## Maintainer
@@ -35,43 +38,77 @@ Currently backs up CouchDB, InfluxDB, MySQL, Microsoft SQL, MongoDB, Postgres, R
## Table of Contents
- [github.com/tiredofit/docker-db-backup](#githubcomtiredofitdocker-db-backup)
- [About](#about)
- [Maintainer](#maintainer)
- [Table of Contents](#table-of-contents)
- [Prerequisites and Assumptions](#prerequisites-and-assumptions)
- [Installation](#installation)
- [Build from Source](#build-from-source)
- [Prebuilt Images](#prebuilt-images)
- [Multi Architecture](#multi-architecture)
- [Configuration](#configuration)
- [Quick Start](#quick-start)
- [Persistent Storage](#persistent-storage)
- [Environment Variables](#environment-variables)
- [Base Images used](#base-images-used)
- [Container Options](#container-options)
- [Database Specific Options](#database-specific-options)
- [For Influx DB2](#for-influx-db2)
- [Scheduling Options](#scheduling-options)
- [Backup Options](#backup-options)
- [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services)
- [Upload to a Azure storage account by `blobxfer`](#upload-to-a-azure-storage-account-by-blobxfer)
- [Maintenance](#maintenance)
- [Shell Access](#shell-access)
- [Manual Backups](#manual-backups)
- [Restoring Databases](#restoring-databases)
- [Custom Scripts](#custom-scripts)
- [Path Options](#path-options)
- [Pre Backup](#pre-backup)
- [Post backup](#post-backup)
- [Support](#support)
- [Usage](#usage)
- [Bugfixes](#bugfixes)
- [Feature Requests](#feature-requests)
- [Updates](#updates)
- [License](#license)
> **NOTE**: If you are using this with a docker-compose file along with a seperate SQL container, take care not to set the variables to backup immediately, more so have it delay execution for a minute, otherwise you will get a failed first backup.
- [About](#about)
- [Maintainer](#maintainer)
- [Table of Contents](#table-of-contents)
- [Prerequisites and Assumptions](#prerequisites-and-assumptions)
- [Installation](#installation)
- [Build from Source](#build-from-source)
- [Prebuilt Images](#prebuilt-images)
- [Multi Architecture](#multi-architecture)
- [Configuration](#configuration)
- [Quick Start](#quick-start)
- [Persistent Storage](#persistent-storage)
- [Environment Variables](#environment-variables)
- [Base Images used](#base-images-used)
- [Container Options](#container-options)
- [Job Defaults](#job-defaults)
- [Compression Options](#compression-options)
- [Encryption Options](#encryption-options)
- [Scheduling Options](#scheduling-options)
- [Default Database Options](#default-database-options)
- [CouchDB](#couchdb)
- [InfluxDB](#influxdb)
- [MariaDB/MySQL](#mariadbmysql)
- [Microsoft SQL](#microsoft-sql)
- [MongoDB](#mongodb)
- [Postgresql](#postgresql)
- [Redis](#redis)
- [Default Storage Options](#default-storage-options)
- [Filesystem](#filesystem)
- [S3](#s3)
- [Azure](#azure)
- [Hooks](#hooks)
- [Path Options](#path-options)
- [Pre Backup](#pre-backup)
- [Post backup](#post-backup)
- [Job Backup Options](#job-backup-options)
- [Compression Options](#compression-options-1)
- [Encryption Options](#encryption-options-1)
- [Scheduling Options](#scheduling-options-1)
- [Specific Database Options](#specific-database-options)
- [CouchDB](#couchdb-1)
- [InfluxDB](#influxdb-1)
- [MariaDB/MySQL](#mariadbmysql-1)
- [Microsoft SQL](#microsoft-sql-1)
- [MongoDB](#mongodb-1)
- [Postgresql](#postgresql-1)
- [Redis](#redis-1)
- [SQLite](#sqlite)
- [Specific Storage Options](#specific-storage-options)
- [Filesystem](#filesystem-1)
- [S3](#s3-1)
- [Azure](#azure-1)
- [Hooks](#hooks-1)
- [Path Options](#path-options-1)
- [Pre Backup](#pre-backup-1)
- [Post backup](#post-backup-1)
- [Notifications](#notifications)
- [Custom Notifications](#custom-notifications)
- [Email Notifications](#email-notifications)
- [Matrix Notifications](#matrix-notifications)
- [Mattermost Notifications](#mattermost-notifications)
- [Rocketchat Notifications](#rocketchat-notifications)
- [Maintenance](#maintenance)
- [Shell Access](#shell-access)
- [Manual Backups](#manual-backups)
- [Restoring Databases](#restoring-databases)
- [Support](#support)
- [Usage](#usage)
- [Bugfixes](#bugfixes)
- [Feature Requests](#feature-requests)
- [Updates](#updates)
- [License](#license)
## Prerequisites and Assumptions
@@ -105,17 +142,16 @@ docker pull docker.io/tiredofit/db-backup:(imagetag)
#### Multi Architecture
Images are built primarily for `amd64` architecture, and may also include builds for `arm/v7`, `arm64` and others. These variants are all unsupported. Consider [sponsoring](https://github.com/sponsors/tiredofit) my work so that I can work with various hardware. To see if this image supports multiple architectures, type `docker manifest (image):(tag)`
Images are built primarily for `amd64` architecture, and may also include builds for `arm/v7`, `arm64` and others. These variants are all unsupported. Consider [sponsoring](https://www.tiredofit.ca/sponsor) my work so that I can work with various hardware. To see if this image supports multiple architectures, type `docker manifest (image):(tag)`
## Configuration
### Quick Start
- The quickest way to get started is using [docker-compose](https://docs.docker.com/compose/). See the examples folder for a working [docker-compose.yml](examples/docker-compose.yml) that can be modified for development or production use.
- The quickest way to get started is using [docker-compose](https://docs.docker.com/compose/). See the examples folder for a series of example compose.yml that can be modified for development or production use.
- Set various [environment variables](#environment-variables) to understand the capabilities of this image.
- Map [persistent storage](#data-volumes) for access to configuration and data files for backup.
- Make [networking ports](#networking) available for public access if necessary
### Persistent Storage
@@ -125,6 +161,7 @@ The following directories are used for configuration and can be mapped for persi
| `/backup` | Backups |
| `/assets/scripts/pre` | _Optional_ Put custom scripts in this directory to execute before backup operations |
| `/assets/scripts/post` | _Optional_ Put custom scripts in this directory to execute after backup operations |
| `/logs` | _Optional_ Logfiles for backup jobs |
### Environment Variables
@@ -140,113 +177,614 @@ Be sure to view the following repositories to understand all the customizable op
#### Container Options
| Parameter | Description | Default |
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- |
| `BACKUP_LOCATION` | Backup to `FILESYSTEM`, `blobxfer` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` |
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` |
| `CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
| `PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | |
| `POST_SCRIPT` | Fill this variable in with a command to execute post backing up | |
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` |
| Parameter | Description | Default |
| ------------------------ | -------------------------------------------------------------------------------------------------------------------------------- | --------------- |
| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` |
| `USER_DBBACKUP` | The uid that the image should read and write files as (username is `dbbackup`) | `10000` |
| `GROUP_DBBACKUP` | The gid that the image should read and write files as (groupname is `dbbackup`) | `10000` |
| `LOG_PATH` | Path to log files | `/logs` |
| `TEMP_PATH` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` |
| `BACKUP_JOB_CONCURRENCY` | How many backup jobs to run concurrently | `1` |
### Database Specific Options
#### Job Defaults
If these are set and no other defaults or variables are set explicitly, they will be added to any of the backup jobs.
| Parameter | Description | Default | `_FILE` |
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | ------- |
| `DB_AUTH` | (Mongo/PGSQL Only - Optional) Authentication Database | | |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `mssql` `pgsql` `mongo` `redis` `sqlite3` | | |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | x |
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by separating with commas eg `db1,db2` | | x |
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases separated via commas from being backed up | | x |
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | x |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | | x |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | x |
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | | |
| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | x |
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can override them by making your own entries | | |
| Variable | Description | Default |
| --------------------------------- | ------------------------------------------------------------------------------------- | ------------ |
| `DEFAULT_BACKUP_LOCATION` | Backup to `FILESYSTEM`, `blobxfer` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
| `DEFAULT_CHECKSUM` | Either `MD5` or `SHA1` or `NONE` | `MD5` |
| `DEFAULT_LOG_LEVEL` | Log output on screen and in files `INFO` `NOTICE` `ERROR` `WARN` `DEBUG` | `notice` |
| `DEFAULT_RESOURCE_OPTIMIZED` | Perform operations at a lower priority to the CPU and IO scheduler | `FALSE` |
| `DEFAULT_SKIP_AVAILABILITY_CHECK` | Before backing up - skip connectivity check | `FALSE` |
#### For Influx DB2
##### Compression Options
Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name`
| Variable | Description | Default |
| -------------------------------------- | ---------------------------------------------------------------------------------------------- | -------------- |
| `DEFAULT_COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` |
| `DEFAULT_COMPRESSION_LEVEL` | Numerical value of what level of compression to use, most allow `1` to `9` | `3` |
| | except for `ZSTD` which allows for `1` to `19` | |
| `DEFAULT_GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. | `FALSE` |
| `DEFAULT_ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |
| `DEFAULT_PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` |
### Scheduling Options
##### Encryption Options
| Parameter | Description | Default |
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- |
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
| | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `${DB_DUMP_TARGET}/archive/` |
| `DB_DUMP_TARGET_ARCHIVE` | Optional Directory where the database dumps archives are kept. | |
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump frequency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
| `DB_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from `DB_DUMP_TARGET` to `DB_DUMP_TARGET_ARCHIVE` - which is useful when pairing against an external backup system. | |
Encryption occurs after compression and the encrypted filename will have a `.gpg` suffix
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
| Variable | Description | Default | `_FILE` |
| ----------------------------- | -------------------------------------------- | ------- | ------- |
| `DEFAULT_ENCRYPT` | Encrypt file after backing up with GPG | `FALSE` | |
| `DEFAULT_ENCRYPT_PASSPHRASE` | Passphrase to encrypt file with GPG | | x |
| *or* | | | |
| `DEFAULT_ENCRYPT_PUBLIC_KEY` | Path of public key to encrypt file with GPG | | x |
| `DEFAULT_ENCRYPT_PRIVATE_KEY` | Path of private key to encrypt file with GPG | | x |
### Backup Options
##### Scheduling Options
| Parameter | Description | Default | `_FILE` |
| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- | ------- |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | |
| `COMPRESSION_LEVEL` | Numerical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | |
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | |
| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` | |
| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` | |
| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` | |
| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` | |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
| `EXTRA_DUMP_OPTS` | If you need to pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
| `EXTRA_ENUMERATION_OPTS` | If you need to pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | |
| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` | |
| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` | |
| `MYSQL_ENABLE_TLS` | Enable TLS functionality for MySQL client | `FALSE` | |
| `MYSQL_TLS_VERIFY` | (optional) If using TLS (by means of MYSQL_TLS_* variables) verify remote host | `FALSE` | |
| `MYSQL_TLS_VERSION` | What TLS `v1.1` `v1.2` `v1.3` version to utilize | `TLSv1.1,TLSv1.2,TLSv1.3` | |
| `MYSQL_TLS_CA_FILE` | Filename to load custom CA certificate for connecting via TLS | `/etc/ssl/cert.pem` | x |
| `MYSQL_TLS_CERT_FILE` | Filename to load client certificate for connecting via TLS | | x |
| `MYSQL_TLS_KEY_FILE` | Filename to load client key for connecting via TLS | | x |
| Variable | Description | Default |
| ------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `DEFAULT_BACKUP_INTERVAL` | How often to do a backup, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
| `DEFAULT_BACKUP_BEGIN` | What time to do the initial backup. Defaults to immediate. (`+1`) | `+0` |
| | Must be in one of four formats: | |
| | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
| | Full datestamp e.g. `2023-12-21 23:30:00` | |
| | Cron expression e.g. `30 23 * * *` [Understand the format](https://en.wikipedia.org/wiki/Cron) - *BACKUP_INTERVAL is ignored* | |
| `DEFAULT_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when backup interval executes) | `FALSE` |
| | 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | |
| `DEFAULT_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from | |
| `DEFAULT_BACKUP_BLACKOUT_BEGIN` | Use `HHMM` notation to start a blackout period where no backups occur eg `0420` | |
| `DEFAULT_BACKUP_BLACKOUT_END` | Use `HHMM` notation to set the end period where no backups occur eg `0430` | |
- When using compression with MongoDB, only `GZ` compression is possible.
> You may need to wrap your `DEFAULT_BACKUP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
#### Backing Up to S3 Compatible Services
If `BACKUP_LOCATION` = `S3` then the following options are used.
##### Default Database Options
| Parameter | Description | Default | `_FILE` |
| --------------------- | ----------------------------------------------------------------------------------------- | ------- | ------- |
| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | x |
| `S3_KEY_ID` | S3 Key ID (Optional) | | x |
| `S3_KEY_SECRET` | S3 Key Secret (Optional) | | x |
| `S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | | x |
| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | x |
| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | x |
| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` | x |
| `S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | | x |
| `S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | | x |
| _*OR*_ | | | |
| `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | |
###### CouchDB
- When `S3_KEY_ID` and/or `S3_KEY_SECRET` is not set, will try to use IAM role assigned (if any) for uploading the backup files to S3 bucket.
| Variable | Description | Default | `_FILE` |
| -------------- | ------------ | ------- | ------- |
| `DEFAULT_PORT` | CouchDB Port | `5984` | x |
#### Upload to a Azure storage account by `blobxfer`
###### InfluxDB
Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure file share storage.
| Variable | Description | Default | `_FILE` |
| ------------------------ | ------------------------------------------------------------------------------------------------------- | ------- | ------- |
| `DEFAULT_PORT` | InfluxDB Port | | x |
| | Version 1.x | `8088` | |
| | Version 2.x | `8086` | |
| `DEFAULT_INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - amd64 and aarch/armv8 only for `2` | `2` | |
If `BACKUP_LOCATION` = `blobxfer` then the following options are used.
###### MariaDB/MySQL
| Parameter | Description | Default | `_FILE` |
| ------------------------------ | ------------------------------------------- | ------------------- | ------- |
| `BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
| Variable | Description | Default | `_FILE` |
| ---------------------------------- | --------------------------------------------------------------------------------------------------------- | ------------------------- | ------- |
| `DEFAULT_PORT` | MySQL / MariaDB Port | `3306` | x |
| `DEFAULT_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
| `DEFAULT_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `DEFAULT_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
| `DEFAULT_MYSQL_EVENTS` | Backup Events | `TRUE` | |
| `DEFAULT_MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet | `512M` | |
| `DEFAULT_MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction | `TRUE` | |
| `DEFAULT_MYSQL_STORED_PROCEDURES` | Backup stored procedures | `TRUE` | |
| `DEFAULT_MYSQL_ENABLE_TLS` | Enable TLS functionality | `FALSE` | |
| `DEFAULT_MYSQL_TLS_VERIFY` | (optional) If using TLS (by means of MYSQL_TLS_* variables) verify remote host | `FALSE` | |
| `DEFAULT_MYSQL_TLS_VERSION` | What TLS `v1.1` `v1.2` `v1.3` version to utilize | `TLSv1.1,TLSv1.2,TLSv1.3` | |
| `DEFAULT_MYSQL_TLS_CA_FILE` | Filename to load custom CA certificate for connecting via TLS | `/etc/ssl/cert.pem` | x |
| `DEFAULT_MYSQL_TLS_CERT_FILE` | Filename to load client certificate for connecting via TLS | | x |
| `DEFAULT_MYSQL_TLS_KEY_FILE` | Filename to load client key for connecting via TLS | | x |
> This service uploads files from backup targed directory `DB_DUMP_TARGET`.
> If the a cleanup configuration in `DB_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
###### Microsoft SQL
| Variable | Description | Default | `_FILE` |
| -------------------- | --------------------------------------- | ---------- | ------- |
| `DEFAULT_PORT` | Microsoft SQL Port | `1433` | x |
| `DEFAULT_MSSQL_MODE` | Backup `DATABASE` or `TRANSACTION` logs | `DATABASE` |
###### MongoDB
| Variable | Description | Default | `_FILE` |
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------ | ------- | ------- |
| `DEFAULT_AUTH` | (Optional) Authentication Database | | x |
| `DEFAULT_PORT` | MongoDB Port | `27017` | x |
| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | x |
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. | | |
| | You can override them by making your own entries | | |
###### Postgresql
| Variable | Description | Default | `_FILE` |
| -------------------------------- | --------------------------------------------------------------------------------------------------------- | ------- | ------- |
| `DEFAULT_AUTH` | (Optional) Authentication Database | | x |
| `DEFAULT_BACKUP_GLOBALS` | Backup Globals as part of backup procedure | | |
| `DEFAULT_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
| `DEFAULT_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `DEFAULT_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
| `DEFAULT_PORT` | PostgreSQL Port | `5432` | x |
###### Redis
| Variable | Description | Default | `_FILE` |
| -------------------------------- | --------------------------------------------------------------------------------------------------- | ------- | ------- |
| `DEFAULT_PORT` | Default Redis Port | `6379` | x |
| `DEFAULT_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
##### Default Storage Options
Options that are related to the value of `DEFAULT_BACKUP_LOCATION`
###### Filesystem
If `DEFAULT_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
| Variable | Description | Default |
| ------------------------------------ | ----------------------------------------------------------------------------------------------------- | ------------------------------------- |
| `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
| `DEFAULT_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
| `DEFAULT_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` |
| `DEFAULT_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DEFAULT_FILESYSTEM_PATH}/archive/` |
| `DEFAULT_FILESYSTEM_PERMISSION` | Permissions to apply to files. | `600` |
###### S3
If `DEFAULT_BACKUP_LOCATION` = `S3` then the following options are used:
| Parameter | Description | Default | `_FILE` |
| ----------------------------- | ----------------------------------------------------------------------------------------- | ------- | ------- |
| `DEFAULT_S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | x |
| `DEFAULT_S3_KEY_ID` | S3 Key ID (Optional) | | x |
| `DEFAULT_S3_KEY_SECRET` | S3 Key Secret (Optional) | | x |
| `DEFAULT_S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | | x |
| `DEFAULT_S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | x |
| `DEFAULT_S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | x |
| `DEFAULT_S3_PROTOCOL` | Protocol to connect to `DEFAULT_S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` | x |
| `DEFAULT_S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | | x |
| `DEFAULT_S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | | x |
| _*OR*_ | | | |
| `DEFAULT_S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | |
- When `DEFAULT_S3_KEY_ID` and/or `DEFAULT_S3_KEY_SECRET` is not set, will try to use IAM role assigned (if any) for uploading the backup files to S3 bucket.
###### Azure
If `DEFAULT_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
| Parameter | Description | Default | `_FILE` |
| -------------------------------------- | ------------------------------------------- | ------------------- | ------- |
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `DEFAULT_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
> This service uploads files from backup targed directory `DEFAULT_FILESYSTEM_PATH`.
> If the a cleanup configuration in `DEFAULT_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
##### Hooks
###### Path Options
| Parameter | Description | Default |
| ------------------------------ | --------------------------------------------------------------------------- | ----------------------- |
| `DEFAULT_SCRIPT_LOCATION_PRE` | Location on filesystem inside container to execute bash scripts pre backup | `/assets/scripts/pre/` |
| `DEFAULT_SCRIPT_LOCATION_POST` | Location on filesystem inside container to execute bash scripts post backup | `/assets/scripts/post/` |
| `DEFAULT_PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | |
| `DEFAULT_POST_SCRIPT` | Fill this variable in with a command to execute post backing up | |
###### Pre Backup
If you want to execute a custom script before a backup starts, you can drop bash scripts with the extension of `.sh` in the location defined in `DB01_SCRIPT_LOCATION_PRE`. See the following example to utilize:
```bash
$ cat pre-script.sh
##!/bin/bash
# #### Example Pre Script
# #### $1=DBXX_TYPE (Type of Backup)
# #### $2=DBXX_HOST (Backup Host)
# #### $3=DBXX_NAME (Name of Database backed up
# #### $4=BACKUP START TIME (Seconds since Epoch)
# #### $5=BACKUP FILENAME (Filename)
echo "${1} Backup Starting on ${2} for ${3} at ${4}. Filename: ${5}"
```
## script DBXX_TYPE DBXX_HOST DBXX_NAME STARTEPOCH BACKUP_FILENAME
${f} "${backup_job_db_type}" "${backup_job_db_host}" "${backup_job_db_name}" "${backup_routines_start_time}" "${backup_job_file}"
Outputs the following on the console:
`mysql Backup Starting on example-db for example at 1647370800. Filename: mysql_example_example-db_202200315-000000.sql.bz2`
###### Post backup
If you want to execute a custom script at the end of a backup, you can drop bash scripts with the extension of `.sh` in the location defined in `DB01_SCRIPT_LOCATION_POST`. Also to support legacy users `/assets/custom-scripts` is also scanned and executed.See the following example to utilize:
```bash
$ cat post-script.sh
##!/bin/bash
# #### Example Post Script
# #### $1=EXIT_CODE (After running backup routine)
# #### $2=DBXX_TYPE (Type of Backup)
# #### $3=DBXX_HOST (Backup Host)
# #### #4=DBXX_NAME (Name of Database backed up
# #### $5=BACKUP START TIME (Seconds since Epoch)
# #### $6=BACKUP FINISH TIME (Seconds since Epoch)
# #### $7=BACKUP TOTAL TIME (Seconds between Start and Finish)
# #### $8=BACKUP FILENAME (Filename)
# #### $9=BACKUP FILESIZE
# #### $10=HASH (If CHECKSUM enabled)
# #### $11=MOVE_EXIT_CODE
echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a duration of ${7} seconds. Filename: ${8} Size: ${9} bytes MD5: ${10}"
```
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${backup_job_db_name}" "${backup_routines_start_time}" "${backup_routines_finish_time}" "${backup_routines_total_time}" "${backup_job_file}" "${filesize}" "${checksum_value}" "${move_exit_code}
Outputs the following on the console:
`0 mysql Backup Completed on example-db for example on 1647370800 ending 1647370920 for a duration of 120 seconds. Filename: mysql_example_example-db_202200315-000000.sql.bz2 Size: 7795 bytes Hash: 952fbaafa30437494fdf3989a662cd40 0`
If you wish to change the size value from bytes to megabytes set environment variable `DB01_SIZE_VALUE=megabytes`
You must make your scripts executable otherwise there is an internal check that will skip trying to run it otherwise.
If for some reason your filesystem or host is not detecting it right, use the environment variable `DB01_POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
#### Job Backup Options
If `DEFAULT_` variables are set and you do not wish for the settings to carry over into your jobs, you can set the appropriate environment variable with the value of `unset`.
Otherwise, override them per backup job. Additional backup jobs can be scheduled by using `DB02_`,`DB03_`,`DB04_` ... prefixes. See [Specific Database Options](#specific-database-options) which may overrule this list.
| Parameter | Description | Default | `_FILE` |
| ----------- | ---------------------------------------------------------------------------------------------- | ------- | ------- |
| `DB01_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `mssql` `pgsql` `mongo` `redis` `sqlite3` | | |
| `DB01_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | x |
| `DB01_NAME` | Schema Name e.g. `database` | | x |
| `DB01_USER` | username for the database(s) - Can use `root` for MySQL | | x |
| `DB01_PASS` | (optional if DB doesn't require it) password for the database | | x |
| Variable | Description | Default |
| ------------------------------ | --------------------------------------------------------------------------------------------------------- | ------------ |
| `DB01_BACKUP_LOCATION` | Backup to `FILESYSTEM`, `blobxfer` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
| `DB01_CHECKSUM` | Either `MD5` or `SHA1` or `NONE` | `MD5` |
| `DB01_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | |
| `DB01_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | |
| `DB01_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | |
| `DB01_LOG_LEVEL` | Log output on screen and in files `INFO` `NOTICE` `ERROR` `WARN` `DEBUG` | `debug` |
| `DB01_RESOURCE_OPTIMIZED` | Perform operations at a lower priority to the CPU and IO scheduler | `FALSE` |
| `DB01_SKIP_AVAILABILITY_CHECK` | Before backing up - skip connectivity check | `FALSE` |
##### Compression Options
| Variable | Description | Default |
| ----------------------------------- | ---------------------------------------------------------------------------------------------- | -------------- |
| `DB01_COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` |
| `DB01_COMPRESSION_LEVEL` | Numerical value of what level of compression to use, most allow `1` to `9` | `3` |
| | except for `ZSTD` which allows for `1` to `19` | |
| `DB01_GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. | `FALSE` |
| `DB01_ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |
| `DB01_PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` |
##### Encryption Options
Encryption will occur after compression and the resulting filename will have a `.gpg` suffix
| Variable | Description | Default | `_FILE` |
| -------------------------- | -------------------------------------------- | ------- | ------- |
| `DB01_ENCRYPT` | Encrypt file after backing up with GPG | `FALSE` | |
| `DB01_ENCRYPT_PASSPHRASE` | Passphrase to encrypt file with GPG | | x |
| *or* | | | |
| `DB01_ENCRYPT_PUBLIC_KEY` | Path of public key to encrypt file with GPG | | x |
| `DB01_ENCRYPT_PRIVATE_KEY` | Path of private key to encrypt file with GPG | | x |
##### Scheduling Options
| Variable | Description | Default |
| ---------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `DB01_BACKUP_INTERVAL` | How often to do a backup, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
| `DB01_BACKUP_BEGIN` | What time to do the initial backup. Defaults to immediate. (`+1`) | `+0` |
| | Must be in one of four formats: | |
| | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
| | Full datestamp e.g. `2023-12-21 23:30:00` | |
| | Cron expression e.g. `30 23 * * *` [Understand the format](https://en.wikipedia.org/wiki/Cron) - *BACKUP_INTERVAL is ignored* | |
| `DB01_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when backup interval executes) | `FALSE` |
| | 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | |
| `DB01_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from `DB01_BACKUP_FILESYSTEM_PATH` | |
| | to `DB01_BACKUP_FILESYSTEM_ARCHIVE_PATH` - which is useful when pairing against an external backup system. | |
| `DB01_BACKUP_BLACKOUT_BEGIN` | Use `HHMM` notation to start a blackout period where no backups occur eg `0420` | |
| `DB01_BACKUP_BLACKOUT_END` | Use `HHMM` notation to set the end period where no backups occur eg `0430` | |
##### Specific Database Options
###### CouchDB
| Variable | Description | Default | `_FILE` |
| ----------- | ------------ | ------- | ------- |
| `DB01_PORT` | CouchDB Port | `5984` | x |
###### InfluxDB
| Variable | Description | Default | `_FILE` |
| --------------------- | ------------------------------------------------------------------------------------------------------- | ------- | ------- |
| `DB01_PORT` | InfluxDB Port | | x |
| | Version 1.x | `8088` | |
| | Version 2.x | `8086` | |
| `DB01_INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - amd64 and aarch/armv8 only for `2` | `2` | |
> Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`.
> You may use `DB_NAME=ALL` to backup the entire set of databases.
> For `DB_HOST` use syntax of `http(s)://db-name`
###### MariaDB/MySQL
| Variable | Description | Default | `_FILE` |
| ------------------------------- | --------------------------------------------------------------------------------------------------------- | ------------------------- | ------- |
| `DB01_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
| `DB01_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
| `DB01_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | |
| | Backup multiple by separating with commas eg `db1,db2` | | x |
| `DB01_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases separated via commas from being backed up | | x |
| `DB01_PORT` | MySQL / MariaDB Port | `3306` | x |
| `DB01_MYSQL_EVENTS` | Backup Events for | `TRUE` | |
| `DB01_MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet | `512M` | |
| `DB01_MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction | `TRUE` | |
| `DB01_MYSQL_STORED_PROCEDURES` | Backup stored procedures | `TRUE` | |
| `DB01_MYSQL_ENABLE_TLS` | Enable TLS functionality | `FALSE` | |
| `DB01_MYSQL_TLS_VERIFY` | (optional) If using TLS (by means of MYSQL_TLS_* variables) verify remote host | `FALSE` | |
| `DB01_MYSQL_TLS_VERSION` | What TLS `v1.1` `v1.2` `v1.3` version to utilize | `TLSv1.1,TLSv1.2,TLSv1.3` | |
| `DB01_MYSQL_TLS_CA_FILE` | Filename to load custom CA certificate for connecting via TLS | `/etc/ssl/cert.pem` | x |
| `DB01_MYSQL_TLS_CERT_FILE` | Filename to load client certificate for connecting via TLS | | x |
| `DB01_MYSQL_TLS_KEY_FILE` | Filename to load client key for connecting via TLS | | x |
###### Microsoft SQL
| Variable | Description | Default | `_FILE` |
| ----------------- | --------------------------------------- | ---------- | ------- |
| `DB01_PORT` | Microsoft SQL Port | `1433` | x |
| `DB01_MSSQL_MODE` | Backup `DATABASE` or `TRANSACTION` logs | `DATABASE` |
###### MongoDB
| Variable | Description | Default | `_FILE` |
| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ------- | ------- |
| `DB01_AUTH` | (Optional) Authentication Database | | |
| `DB01_PORT` | MongoDB Port | `27017` | x |
| `DB01_MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | x |
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. | | |
| | You can override them by making your own entries | | |
###### Postgresql
| Variable | Description | Default | `_FILE` |
| ----------------------------- | --------------------------------------------------------------------------------------------------------- | ------- | ------- |
| `DB01_AUTH` | (Optional) Authentication Database | | |
| `DB01_BACKUP_GLOBALS` | Backup Globals after backing up database (forces `TRUE` if `_NAME=ALL``) | `FALSE` | |
| `DB01_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
| `DB01_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
| `DB01_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | |
| | Backup multiple by separating with commas eg `db1,db2` | | x |
| `DB01_PORT` | PostgreSQL Port | `5432` | x |
###### Redis
| Variable | Description | Default | `_FILE` |
| ------------------------ | --------------------------------------------------------------------------------------------------------- | ------- | ------- |
| `DB01_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
| `DB01_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
| `DB01_PORT` | Redis Port | `6379` | x |
###### SQLite
| Variable | Description | Default | `_FILE` |
| ----------- | -------------------------------------------------------- | ------- | ------- |
| `DB01_HOST` | Enter the full path to DB file e.g. `/backup/db.sqlite3` | | x |
##### Specific Storage Options
Options that are related to the value of `DB01_BACKUP_LOCATION`
###### Filesystem
If `DB01_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
| Variable | Description | Default |
| --------------------------------- | ----------------------------------------------------------------------------------------------------- | ---------------------------------- |
| `DB01_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
| `DB01_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
| `DB01_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` |
| `DB01_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DB01_FILESYSTEM_PATH}/archive/` |
| `DB01_FILESYSTEM_PERMISSION` | Directory and File permissions to apply to files. | `600` |
###### S3
If `DB01_BACKUP_LOCATION` = `S3` then the following options are used:
| Parameter | Description | Default | `_FILE` |
| -------------------------- | ----------------------------------------------------------------------------------------- | ------- | ------- |
| `DB01_S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | x |
| `DB01_S3_KEY_ID` | S3 Key ID (Optional) | | x |
| `DB01_S3_KEY_SECRET` | S3 Key Secret (Optional) | | x |
| `DB01_S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | | x |
| `DB01_S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | x |
| `DB01_S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | x |
| `DB01_S3_PROTOCOL` | Protocol to connect to `DB01_S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` | x |
| `DB01_S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | | x |
| `DB01_S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | | x |
| _*OR*_ | | | |
| `DB01_S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | |
> When `DB01_S3_KEY_ID` and/or `DB01_S3_KEY_SECRET` is not set, will try to use IAM role assigned (if any) for uploading the backup files to S3 bucket.
###### Azure
If `DB01_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
| Parameter | Description | Default | `_FILE` |
| ----------------------------------- | ------------------------------------------- | ------------------- | ------- |
| `DB01_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `DB01_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `DB01_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
> This service uploads files from backup directory `DB01_BACKUP_FILESYSTEM_PATH`.
> If the a cleanup configuration in `DB01_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
##### Hooks
###### Path Options
| Parameter | Description | Default |
| --------------------------- | --------------------------------------------------------------------------- | ----------------------- |
| `DB01_SCRIPT_LOCATION_PRE` | Location on filesystem inside container to execute bash scripts pre backup | `/assets/scripts/pre/` |
| `DB01_SCRIPT_LOCATION_POST` | Location on filesystem inside container to execute bash scripts post backup | `/assets/scripts/post/` |
| `DB01_PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | |
| `DB01_POST_SCRIPT` | Fill this variable in with a command to execute post backing up | |
###### Pre Backup
If you want to execute a custom script before a backup starts, you can drop bash scripts with the extension of `.sh` in the location defined in `DB01_SCRIPT_LOCATION_PRE`. See the following example to utilize:
```bash
$ cat pre-script.sh
##!/bin/bash
# #### Example Pre Script
# #### $1=DB01_TYPE (Type of Backup)
# #### $2=DB01_HOST (Backup Host)
# #### $3=DB01_NAME (Name of Database backed up
# #### $4=BACKUP START TIME (Seconds since Epoch)
# #### $5=BACKUP FILENAME (Filename)
echo "${1} Backup Starting on ${2} for ${3} at ${4}. Filename: ${5}"
```
## script DB01_TYPE DB01_HOST DB01_NAME STARTEPOCH BACKUP_FILENAME
${f} "${backup_job_db_type}" "${backup_job_db_host}" "${backup_job_db_name}" "${backup_routines_start_time}" "${backup_job_filename}"
Outputs the following on the console:
`mysql Backup Starting on example-db for example at 1647370800. Filename: mysql_example_example-db_202200315-000000.sql.bz2`
###### Post backup
If you want to execute a custom script at the end of a backup, you can drop bash scripts with the extension of `.sh` in the location defined in `DB01_SCRIPT_LOCATION_POST`. Also to support legacy users `/assets/custom-scripts` is also scanned and executed.See the following example to utilize:
```bash
$ cat post-script.sh
##!/bin/bash
# #### Example Post Script
# #### $1=EXIT_CODE (After running backup routine)
# #### $2=DB_TYPE (Type of Backup)
# #### $3=DB_HOST (Backup Host)
# #### #4=DB_NAME (Name of Database backed up
# #### $5=BACKUP START TIME (Seconds since Epoch)
# #### $6=BACKUP FINISH TIME (Seconds since Epoch)
# #### $7=BACKUP TOTAL TIME (Seconds between Start and Finish)
# #### $8=BACKUP FILENAME (Filename)
# #### $9=BACKUP FILESIZE
# #### $10=HASH (If CHECKSUM enabled)
# #### $11=MOVE_EXIT_CODE
echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a duration of ${7} seconds. Filename: ${8} Size: ${9} bytes MD5: ${10}"
```
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_routines_start_time}" "${backup_routines_finish_time}" "${backup_routines_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}
Outputs the following on the console:
`0 mysql Backup Completed on example-db for example on 1647370800 ending 1647370920 for a duration of 120 seconds. Filename: mysql_example_example-db_202200315-000000.sql.bz2 Size: 7795 bytes Hash: 952fbaafa30437494fdf3989a662cd40 0`
If you wish to change the size value from bytes to megabytes set environment variable `DB01_SIZE_VALUE=megabytes`
You must make your scripts executable otherwise there is an internal check that will skip trying to run it otherwise.
If for some reason your filesystem or host is not detecting it right, use the environment variable `DB01_POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
#### Notifications
This image has capabilities on sending notifications via a handful of services when a backup job fails. This is a global option that cannot be individually set per backup job.
| Parameter | Description | Default |
| ---------------------- | --------------------------------------------------------------------------------- | ------- |
| `ENABLE_NOTIFICATIONS` | Enable Notifications | `FALSE` |
| `NOTIFICATION_TYPE` | `CUSTOM` `EMAIL` `MATRIX` `MATTERMOST` `ROCKETCHAT` - Seperate Multiple by commas | |
##### Custom Notifications
The following is sent to the custom script. Use how you wish:
````
$1 unix timestamp
$2 logfile
$3 errorcode
$4 subject
$5 body/error message
````
| Parameter | Description | Default |
| ---------------------------- | ------------------------------------------------------- | ------- |
| `NOTIFICATION_CUSTOM_SCRIPT` | Path and name of custom script to execute notification. | |
##### Email Notifications
See more details in the base image listed above for more mail environment variables.
| Parameter | Description | Default | `_FILE` |
| ----------- | ----------------------------------------------------------------------------------------- | ------- | ------- |
| `MAIL_FROM` | What email address to send mail from for errors | | |
| `MAIL_TO` | What email address to send mail to for errors. Send to multiple by seperating with comma. | | |
| `SMTP_HOST` | What SMTP server to use for sending mail | | x |
| `SMTP_PORT` | What SMTP port to use for sending mail | | x |
##### Matrix Notifications
Fetch a `MATRIX_ACCESS_TOKEN`:
````
curl -XPOST -d '{"type":"m.login.password", "user":"myuserid", "password":"mypass"}' "https://matrix.org/_matrix/client/r0/login"
````
Copy the JSON response `access_token` that will look something like this:
````
{"access_token":"MDAxO...blahblah","refresh_token":"MDAxO...blahblah","home_server":"matrix.org","user_id":"@myuserid:matrix.org"}
````
| Parameter | Description | Default | `_FILE` |
| --------------------- | ---------------------------------------------------------------------------------------- | ------- | ------- |
| `MATRIX_HOST` | URL (https://matrix.example.com) of Matrix Homeserver | | x |
| `MATRIX_ROOM` | Room ID eg `\!abcdef:example.com` to send to. Send to multiple by seperating with comma. | | x |
| `MATRIX_ACCESS_TOKEN` | Access token of user authorized to send to room | | x |
##### Mattermost Notifications
| Parameter | Description | Default | `_FILE` |
| ------------------------ | -------------------------------------------------------------------------------------------- | ------- | ------- |
| `MATTERMOST_WEBHOOK_URL` | Full URL to send webhook notifications to | | x |
| `MATTERMOST_RECIPIENT` | Channel or User to send Webhook notifications to. Send to multiple by seperating with comma. | | x |
| `MATTERMOST_USERNAME` | Username to send as eg `tiredofit` | | x |
##### Rocketchat Notifications
| Parameter | Description | Default | `_FILE` |
| ------------------------ | -------------------------------------------------------------------------------------------- | ------- | ------- |
| `ROCKETCHAT_WEBHOOK_URL` | Full URL to send webhook notifications to | | x |
| `ROCKETCHAT_RECIPIENT` | Channel or User to send Webhook notifications to. Send to multiple by seperating with comma. | | x |
| `ROCKETCHAT_USERNAME` | Username to send as eg `tiredofit` | | x |
## Maintenance
@@ -260,7 +798,7 @@ docker exec -it (whatever your container name is) bash
### Manual Backups
Manual Backups can be performed by entering the container and typing `backup-now`
Manual Backups can be performed by entering the container and typing `backup-now`. This will execute all the backup tasks that are scheduled by means of the `BACKUPXX_` variables. Alternatively if you wanted to execute a job on its own you could simply type `backup01-now` (or whatever your number would be). There is no concurrency, and jobs will be executed sequentially.
- Recently there was a request to have the container work with Kubernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`.
@@ -287,75 +825,7 @@ The script can also be executed skipping the interactive mode by using the follo
If you only enter some of the arguments you will be prompted to fill them in.
### Custom Scripts
#### Path Options
| Parameter | Description | Default |
| ---------------------- | --------------------------------------------------------------------------- | ----------------------- |
| `SCRIPT_LOCATION_PRE` | Location on filesystem inside container to execute bash scripts pre backup | `/assets/scripts/pre/` |
| `SCRIPT_LOCATION_POST` | Location on filesystem inside container to execute bash scripts post backup | `/assets/scripts/post/` |
#### Pre Backup
If you want to execute a custom script before a backup starts, you can drop bash scripts with the extension of `.sh` in the location defined in `SCRIPT_LOCATION_PRE`. See the following example to utilize:
```bash
$ cat pre-script.sh
##!/bin/bash
# #### Example Pre Script
# #### $1=DB_TYPE (Type of Backup)
# #### $2=DB_HOST (Backup Host)
# #### $3=DB_NAME (Name of Database backed up
# #### $4=BACKUP START TIME (Seconds since Epoch)ff
# #### $5=BACKUP FILENAME (Filename)
echo "${1} Backup Starting on ${2} for ${3} at ${4}. Filename: ${5}"
```
## script DB_TYPE DB_HOST DB_NAME STARTEPOCH BACKUP_FILENAME
${f} "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_time}" "${target}"
Outputs the following on the console:
`mysql Backup Starting on example-db for example at 1647370800. Filename: mysql_example_example-db_202200315-000000.sql.bz2
#### Post backup
If you want to execute a custom script at the end of a backup, you can drop bash scripts with the extension of `.sh` in the location defined in `SCRIPT_LOCATION_POST`. Also to support legacy users `/assets/custom-scripts` is also scanned and executed.See the following example to utilize:
```bash
$ cat post-script.sh
##!/bin/bash
# #### Example Post Script
# #### $1=EXIT_CODE (After running backup routine)
# #### $2=DB_TYPE (Type of Backup)
# #### $3=DB_HOST (Backup Host)
# #### #4=DB_NAME (Name of Database backed up
# #### $5=BACKUP START TIME (Seconds since Epoch)
# #### $6=BACKUP FINISH TIME (Seconds since Epoch)
# #### $7=BACKUP TOTAL TIME (Seconds between Start and Finish)
# #### $8=BACKUP FILENAME (Filename)
# #### $9=BACKUP FILESIZE
# #### $10=HASH (If CHECKSUM enabled)
# #### $11=MOVE_EXIT_CODE
echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a duration of ${7} seconds. Filename: ${8} Size: ${9} bytes MD5: ${10}"
```
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_timme}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" "${move_exit_code}
Outputs the following on the console:
`0 mysql Backup Completed on example-db for example on 1647370800 ending 1647370920 for a duration of 120 seconds. Filename: mysql_example_example-db_202200315-000000.sql.bz2 Size: 7795 bytes Hash: 952fbaafa30437494fdf3989a662cd40 0`
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`
You must make your scripts executable otherwise there is an internal check that will skip trying to run it otherwise.
If for some reason your filesystem or host is not detecting it right, use the environment variable `POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
## Support
@@ -364,7 +834,7 @@ These images were built to serve a specific need in a production environment and
### Usage
- The [Discussions board](../../discussions) is a great place for working with the community on tips and tricks of using this image.
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) for personalized support
- [Sponsor me](https://www.tiredofit.ca/sponsor) for personalized support
### Bugfixes
@@ -373,12 +843,12 @@ These images were built to serve a specific need in a production environment and
### Feature Requests
- Feel free to submit a feature request, however there is no guarantee that it will be added, or at what timeline.
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) regarding development of features.
- [Sponsor me](https://www.tiredofit.ca/sponsor) regarding development of features.
### Updates
- Best effort to track upstream changes, More priority if I am actively using the image in a production environment.
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) for up to date releases.
- [Sponsor me](https://www.tiredofit.ca/sponsor) for up to date releases.
## License

67
examples/compose.yml Normal file
View File

@@ -0,0 +1,67 @@
services:
example-db:
hostname: example-db-host
container_name: example-db
image: tiredofit/mariadb:10.11
ports:
- 3306:3306
volumes:
- ./db:/var/lib/mysql
environment:
- ROOT_PASS=examplerootpassword
- DB_NAME=example
- DB_USER=example
- DB_PASS=examplepassword
restart: always
networks:
- example-db-network
example-db-backup:
container_name: example-db-backup
image: tiredofit/db-backup
volumes:
- ./backups:/backup
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
- TIMEZONE=America/Vancouver
- CONTAINER_NAME=example-db-backup
- CONTAINER_ENABLE_MONITORING=FALSE
# - DEBUG_MODE=TRUE
- BACKUP_JOB_CONCURRENCY=1 # Only run one job at a time
- DEFAULT_CHECKSUM=NONE # Don't create checksums
- DEFAULT_COMPRESSION=ZSTD # Compress all with ZSTD
- DEFAULT_DUMP_INTERVAL=1440 # Backup every 1440 minutes
- DEFAULT_DUMP_BEGIN=0000 # Start backing up at midnight
- DEFAULT_CLEANUP_TIME=8640 # Cleanup backups after a week
- DB01_TYPE=mariadb
- DB01_HOST=example-db-host
- DB01_NAME=example
- DB01_USER=example
- DB01_PASS=examplepassword
- DB01_DUMP_INTERVAL=30 # (override) Backup every 30 minutes
- DB01_DUMP_BEGIN=+1 # (override) Backup starts immediately
- DB01_CLEANUP_TIME=180 # (override) Cleanup backups they are older than 180 minutes
- DB01_CHECKSUM=SHA1 # (override) Create a SHA1 checksum
- DB01_COMPRESSION=GZ # (override) Compress with GZIP
#- DB02_TYPE=postgres
#- DB02_HOST=example-postgres-host
#- DB02_NAME=example
#- DB02_USER=example
#- DB02_PASS=examplepassword
#- DB02_DUMP_INTERVAL=60 # (override) Backup every 60 minutes
#- DB02_DUMP_BEGIN=+10 # (override) Backup starts in ten minutes
#- DB02_CLEANUP_TIME=240 # (override) Cleanup backups they are older than 240 minutes
#- DB02_CHECKSUM=MD5 # (override) Create a SHA1 checksum
#- DB02_COMPRESSION=BZ # (override) Compress with BZIP
restart: always
networks:
- example-db-network
networks:
example-db-network:
name: example-db-network

View File

@@ -1,13 +1,7 @@
#
#
# Example for Microsoft SQL Server
# upload with blobxfer to azure storage
#
version: '2'
networks:
example-mssql-blobxfer-net:
name: example-mssql-blobxfer-net
#
services:
example-mssql-s3-db:
@@ -32,7 +26,7 @@ services:
# execute in terminal --> docker build -t tiredofit/db-backup-mssql-blobxfer .
# replace --> image: tiredofit/db-backup-mssql
# image: tiredofit/db-backup
image: tiredofit/db-backup-mssql-blobxfer
image: tiredofit/db-backup
links:
- example-mssql-s3-db
volumes:
@@ -40,30 +34,35 @@ services:
- ./tmp/backups:/tmp/backups # shared tmp backup directory
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
# - DEBUG_MODE=TRUE
- DB_TYPE=mssql
- DB_HOST=example-db-host
# - DB_PORT=1488
# - DB_NAME=ALL # [ALL] not working on sql server.
# create database with name `test1` manually first
- DB_NAME=test1 # Create this database
- DB_USER=sa
- DB_PASS=5hQa0utRFBpIY3yhoIyE
- DB_DUMP_FREQ=1 # backup every 5 minute
# - DB_DUMP_BEGIN=0000 # backup starts immediately
- DB_CLEANUP_TIME=3 # clean backups they are older than 60 minutes
- ENABLE_CHECKSUM=TRUE
- CHECKSUM=SHA1
- COMPRESSION=GZ
- SPLIT_DB=FALSE
- TIMEZONE=America/Vancouver
- CONTAINER_ENABLE_MONITORING=FALSE
- CONTAINER_NAME=example-mssql-blobxfer-db-backup
# - DEBUG_MODE=TRUE
- DB01_TYPE=mssql
- DB01_HOST=example-db-host
# - DB01_PORT=1488
# create database with name `test1` manually first
- DB01_NAME=test1 # Create this database
- DB01_USER=sa
- DB01_PASS=5hQa0utRFBpIY3yhoIyE
- DB01_DUMP_INTERVAL=5 # backup every 5 minute
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs not set immediately
- DB01_CLEANUP_TIME=60 # clean backups they are older than 60 minutes
- DB01_CHECKSUM=SHA1 # Set Checksum to be SHA1
- DB01_COMPRESSION=GZ # Set compression to use GZIP
# === S3 Blobxfer ===
- BACKUP_LOCATION=blobxfer
- DB01_BACKUP_LOCATION=blobxfer
# Add here azure storage account
- BLOBXFER_STORAGE_ACCOUNT={TODO Add Storage Name}
- DB01_BLOBXFER_STORAGE_ACCOUNT={TODO Add Storage Name}
# Add here azure storage account key
- BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key}
- BLOBXFER_REMOTE_PATH=docker-db-backup
- SB01_BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key}
- DB01_BLOBXFER_REMOTE_PATH=docker-db-backup
restart: always
networks:
example-mssql-blobxfer-net:
example-mssql-blobxfer-net:
networks:
example-mssql-blobxfer-net:
name: example-mssql-blobxfer-net

View File

@@ -1,12 +1,6 @@
#
#
# Example for Microsoft SQL Server
#
version: '2'
networks:
example-mssql-net:
name: example-mssql-net
#
services:
example-mssql-db:
@@ -31,7 +25,7 @@ services:
# execute in terminal --> docker build -t tiredofit/db-backup-mssql .
# replace --> image: tiredofit/db-backup-mssql
# image: tiredofit/db-backup
image: tiredofit/db-backup-mssql
image: tiredofit/db-backup
links:
- example-mssql-db
volumes:
@@ -39,23 +33,28 @@ services:
- ./tmp/backups:/tmp/backups # shared tmp backup directory
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
- TIMEZONE=America/Vancouver
- CONTAINER_ENABLE_MONITORING=FALSE
- CONTAINER_NAME=example-mssql-blobxfer-db-backup
# - DEBUG_MODE=TRUE
- DB_TYPE=mssql
- DB_HOST=example-db-host
- DB01_TYPE=mssql
- DB01_HOST=example-db-host
# - DB_PORT=1488
# - DB_NAME=ALL # [ALL] not working on sql server.
# create database with name `test1` manually first
- DB_NAME=test1
- DB_USER=sa
- DB_PASS=5hQa0utRFBpIY3yhoIyE
- DB_DUMP_FREQ=1 # backup every minute
# - DB_DUMP_BEGIN=0000 # backup starts immediately
- DB_CLEANUP_TIME=5 # clean backups they are older than 5 minute
- ENABLE_CHECKSUM=FALSE
- CHECKSUM=SHA1
- COMPRESSION=GZ
- SPLIT_DB=FALSE
- CONTAINER_ENABLE_MONITORING=FALSE
- DB01_NAME=test1
- DB01_USER=sa
- DB01_PASS=5hQa0utRFBpIY3yhoIyE
- DB01_DUMP_INTERVAL=1 # backup every minute
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs unset immediately
- DB01_CLEANUP_TIME=5 # clean backups they are older than 5 minute
- DB01_CHECKSUM=NONE
- DB01_COMPRESSION=GZ
restart: always
networks:
example-mssql-net:
example-mssql-net:
networks:
example-mssql-net:
name: example-mssql-net

View File

@@ -1,53 +0,0 @@
version: '2'
networks:
example-db-network:
name: example-db-network
services:
example-db:
hostname: example-db-host
container_name: example-db
image: mariadb:latest
ports:
- 13306:3306
volumes:
- ./db:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=examplerootpassword
- MYSQL_DATABASE=example
- MYSQL_USER=example
- MYSQL_PASSWORD=examplepassword
restart: always
networks:
- example-db-network
example-db-backup:
container_name: example-db-backup
image: tiredofit/db-backup
links:
- example-db
volumes:
- ./backups:/backup
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
- TIMEZONE=America/Vancouver
- CONTAINER_ENABLE_MONITORING=FALSE
# - DEBUG_MODE=TRUE
- DB_TYPE=mariadb
- DB_HOST=example-db-host
- DB_NAME=example
- DB_USER=example
- DB_PASS=examplepassword
- DB_DUMP_FREQ=1 # backup every minute
# - DB_DUMP_BEGIN=0000 # backup starts immediately
- DB_CLEANUP_TIME=5 # clean backups they are older than 5 minute
- CHECKSUM=SHA1
- COMPRESSION=GZ
- SPLIT_DB=FALSE
restart: always
networks:
- example-db-network

View File

@@ -4,7 +4,7 @@
# #### $1=EXIT_CODE (After running backup routine)
# #### $2=DB_TYPE (Type of Backup)
# #### $3=DB_HOST (Backup Host)
# #### #4=DB_NAME (Name of Database backed up
# #### #4=DB_NAME (Name of Database backed up)
# #### $5=BACKUP START TIME (Seconds since Epoch)
# #### $6=BACKUP FINISH TIME (Seconds since Epoch)
# #### $7=BACKUP TOTAL TIME (Seconds between Start and Finish)

View File

@@ -0,0 +1,114 @@
#!/command/with-contenv bash
source /assets/functions/00-container
PROCESS_NAME="db-backup{{BACKUP_NUMBER}}-scheduler"
check_container_initialized
check_service_initialized init 10-db-backup
source /assets/functions/10-db-backup
source /assets/defaults/10-db-backup
bootstrap_variables backup_init {{BACKUP_NUMBER}}
bootstrap_variables parse_variables {{BACKUP_NUMBER}}
if [ -z "${backup_job_db_name}" ]; then
PROCESS_NAME="{{BACKUP_NUMBER}}${backup_job_db_host//\//_}"
else
PROCESS_NAME="{{BACKUP_NUMBER}}-${backup_job_db_host//\//_}__${backup_job_db_name}"
fi
trap ctrl_c INT
if [[ "${MODE,,}" =~ "standalone" ]] || [ "${MODE,,}" = "manual" ] || [ "${1,,}" = "manual" ] || [ "${1,,}" = "now" ]; then
print_debug "Detected Manual Mode"
persist=false
backup_job_backup_begin=+0
else
silent sleep {{BACKUP_NUMBER}}
time_last_run=0
time_current=$(date +'%s')
if [[ "${backup_job_backup_begin}" =~ ^\+(.*)$ ]]; then
print_debug "BACKUP_BEGIN is a jump of minute starting with +"
timer plusvalue
elif [[ "${backup_job_backup_begin}" =~ ^[0-9]{4}$ ]]; then
print_debug "BACKUP_BEGIN is a HHMM value"
timer time
elif [[ "${backup_job_backup_begin}" =~ ([0-9]{4})-([0-9]{2})-([0-9]{2})[[:space:]]([0-9]{2}):([0-9]{2}):([0-9]{2}) ]]; then
print_debug "BACKUP_BEGIN is a full date timestamp"
timer datetime
#elif echo "${backup_job_backup_begin//\*/#}" | grep -qP "^(.*((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|#) ?){5}$" ; then # Allow slashes, yet not supporting advanced cron yet
elif echo "${backup_job_backup_begin//\*/#}" | grep -qP "^(((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|#) ?){5}$" ; then
print_debug "BACKUP_BEGIN is a cron expression"
time_last_run=$(date +"%s")
backup_job_backup_begin=${backup_job_backup_begin//\"/}
backup_job_backup_begin=${backup_job_backup_begin//\'/}
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
else
print_error "_BACKUP_BEGIN is invalid - Unable to perform scheduling"
cat <<EOF
Valid Methods:
+(number) - Start in however many minutes
HHMM - Start at hour (00-24) and minute (00-59)
YYYY-MM-DD HH:mm:ss - Start at a specific date and time
0 23 * * * - Cron expression
EOF
print_error "Stopping backup_scheduler {{BACKUP_NUMBER}} due to detected errors. Fix and restart container."
stop_scheduler_backup=true
s6-svc -d /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}
fi
print_debug "Wait Time: ${time_wait} Future execution time: ${time_future} Current Time: ${time_current}"
print_info "Next Backup at $(date -d @"${time_future}" +'%Y-%m-%d %T %Z')"
silent sleep "${time_wait}"
fi
while true; do
if [ -n "${backup_job_blackout_start}" ] && [ -n "${backup_job_blackout_finish}" ] ; then
time_current_hour_minute=$(date +%H%M)
if [[ "${time_current_hour_minute}" > "${backup_job_blackout_start}" ]] && [[ "${time_current_hour_minute}" < "${backup_job_blackout_finish}" ]] ; then
blackout=true
else
blackout=false
fi
fi
if var_true "${blackout}" ; then
print_notice "Detected Blackout Period - Not performing backup operations"
else
timer job start
process_limiter
echo "{{BACKUP_NUMBER}}" >> /tmp/.container/db-backup-backups
print_debug "Backup {{BACKUP_NUMBER}} routines started time: $(date +'%Y-%m-%d %T %Z')"
bootstrap_filesystem
check_availability
backup_"${dbtype,,}"
timer job stop
if [ -z "${exitcode_backup}" ] ; then exitcode_backup="0" ; fi
print_info "Backup {{BACKUP_NUMBER}} routines finish time: $(date -d @"${backup_job_finish_time}" +'%Y-%m-%d %T %Z') with exit code ${exitcode_backup}"
print_notice "Backup {{BACKUP_NUMBER}} routines time taken: $(echo "${backup_job_total_time}" | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
sed -i "/^{{BACKUP_NUMBER}}/d" /tmp/.container/db-backup-backups
fi
symlink_log
cleanup_old_data
if var_false "${persist}" ; then
print_debug "Exiting due to manual mode"
exit "${exitcode_backup}";
else
if var_true "${stop_scheduler_backup}" ; then
print_error "Stopping backup_scheduler {{BACKUP_NUMBER}} due to detected errors. Fix and restart container."
s6-svc -d /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}
else
if [ ! "${time_cron}" = "true" ]; then
print_notice "Sleeping for another $(($backup_job_backup_interval*60-backup_job_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($backup_job_backup_interval*60-backup_job_total_time))))" +'%Y-%m-%d %T %Z') "
silent sleep $(($backup_job_backup_interval*60-backup_job_total_time))
else
time_last_run=$(date +"%s")
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
print_notice "Sleeping for another ${time_wait} seconds. Waking up at $(date -d@"${time_future}" +'%Y-%m-%d %T %Z') "
silent sleep "${time_wait}"
fi
fi
fi
done

View File

@@ -1,32 +1,46 @@
#!/command/with-contenv bash
BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"}
BLOBXFER_REMOTE_PATH=${BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
CHECKSUM=${CHECKSUM:-"MD5"}
COMPRESSION=${COMPRESSION:-"ZSTD"}
COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
CREATE_LATEST_SYMLINK=${CREATE_LATEST_SYMLINK:-"TRUE"}
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"}
DB_DUMP_TARGET_ARCHIVE=${DB_DUMP_TARGET_ARCHIVE:-"${DB_DUMP_TARGET}/archive/"}
ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"}
ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
BACKUP_JOB_CONCURRENCY=${BACKUP_JOB_CONCURRENCY:-"1"}
DBBACKUP_USER=${DBBACKUP_USER:-"dbbackup"}
DBBACKUP_GROUP=${DBBACKUP_GROUP:-"${DBBACKUP_USER}"} # Must go after DBBACKUP_USER
DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0}
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
DEFAULT_BACKUP_LOCATION=${DEFAULT_BACKUP_LOCATION:-"FILESYSTEM"}
DEFAULT_BLOBXFER_REMOTE_PATH=${DEFAULT_BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
DEFAULT_CHECKSUM=${DEFAULT_CHECKSUM:-"MD5"}
DEFAULT_COMPRESSION=${DEFAULT_COMPRESSION:-"ZSTD"}
DEFAULT_COMPRESSION_LEVEL=${DEFAULT_COMPRESSION_LEVEL:-"3"}
DEFAULT_CREATE_LATEST_SYMLINK=${DEFAULT_CREATE_LATEST_SYMLINK:-"TRUE"}
DEFAULT_ENABLE_PARALLEL_COMPRESSION=${DEFAULT_ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
DEFAULT_ENCRYPT=${DEFAULT_ENCRYPT:-"FALSE"}
DEFAULT_FILESYSTEM_PATH=${DEFAULT_FILESYSTEM_PATH:-"/backup"}
DEFAULT_FILESYSTEM_PATH_PERMISSION=${DEFAULT_FILESYSTEM_PATH_PERMISSION:-"700"}
DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"600"}
DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DEFAULT_FILESYSTEM_ARCHIVE_PATH:-"${DEFAULT_FILESYSTEM_PATH}/archive/"}
DEFAULT_LOG_LEVEL=${DEFAULT_LOG_LEVEL:-"notice"}
DEFAULT_MYSQL_ENABLE_TLS=${DEFAULT_MYSQL_ENABLE_TLS:-"FALSE"}
DEFAULT_MYSQL_EVENTS=${DEFAULT_MYSQL_EVENTS:-"TRUE"}
DEFAULT_MYSQL_MAX_ALLOWED_PACKET=${DEFAULT_MYSQL_MAX_ALLOWED_PACKET:-"512M"}
DEFAULT_MYSQL_SINGLE_TRANSACTION=${DEFAULT_MYSQL_SINGLE_TRANSACTION:-"TRUE"}
DEFAULT_MYSQL_STORED_PROCEDURES=${DEFAULT_MYSQL_STORED_PROCEDURES:-"TRUE"}
DEFAULT_MYSQL_TLS_CA_FILE=${DEFAULT_MYSQL_TLS_CA_FILE:-"/etc/ssl/cert.pem"}
DEFAULT_MYSQL_TLS_VERIFY=${DEFAULT_MYSQL_TLS_VERIFY:-"FALSE"}
DEFAULT_MYSQL_TLS_VERSION=${DEFAULT_MYSQL_TLS_VERSION:-"TLSv1.1,TLSv1.2,TLSv1.3"}
DEFAULT_MSSQL_MODE=${DEFAULT_MSSQL_MODE:-"database"}
DEFAULT_PARALLEL_COMPRESSION_THREADS=${DEFAULT_PARALLEL_COMPRESSION_THREADS:-"$(nproc)"}
DEFAULT_RESOURCE_OPTIMIZED=${DEFAULT_RESOURCE_OPTIMIZED:-"FALSE"}
DEFAULT_S3_CERT_SKIP_VERIFY=${DEFAULT_S3_CERT_SKIP_VERIFY:-"TRUE"}
DEFAULT_S3_PROTOCOL=${DEFAULT_S3_PROTOCOL:-"https"}
DEFAULT_SCRIPT_LOCATION_PRE=${DEFAULT_SCRIPT_LOCATION_PRE:-"/assets/scripts/pre/"}
DEFAULT_SCRIPT_LOCATION_POST=${DEFAULT_SCRIPT_LOCATION_POST:-"/assets/scripts/post/"}
DEFAULT_SIZE_VALUE=${DEFAULT_SIZE_VALUE:-"bytes"}
DEFAULT_SKIP_AVAILABILITY_CHECK=${DEFAULT_SKIP_AVAILABILITY_CHECK:-"FALSE"}
DEFAULT_SPLIT_DB=${DEFAULT_SPLIT_DB:-"TRUE"}
LOG_PATH=${LOG_PATH:-"/logs"}
MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"}
MODE=${MODE:-"AUTO"}
MYSQL_ENABLE_TLS=${MYSQL_ENABLE_TLS:-"FALSE"}
MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"}
MYSQL_SINGLE_TRANSACTION=${MYSQL_SINGLE_TRANSACTION:-"TRUE"}
MYSQL_STORED_PROCEDURES=${MYSQL_STORED_PROCEDURES:-"TRUE"}
MYSQL_TLS_CA_FILE=${MYSQL_TLS_CA_FILE:-"/etc/ssl/cert.pem"}
MYSQL_TLS_VERIFY=${MYSQL_TLS_VERIFY:-"FALSE"}
MYSQL_TLS_VERSION=${MYSQL_TLS_VERSION:-"TLSv1.1,TLSv1.2,TLSv1.3"}
PARALLEL_COMPRESSION_THREADS=${PARALLEL_COMPRESSION_THREADS:-"$(nproc)"}
S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"}
S3_PROTOCOL=${S3_PROTOCOL:-"https"}
SCRIPT_LOCATION_PRE=${SCRIPT_LOCATION_PRE:-"/assets/scripts/pre/"}
SCRIPT_LOCATION_POST=${SCRIPT_LOCATION_POST:-"/assets/scripts/post/"}
SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SKIP_AVAILABILITY_CHECK=${SKIP_AVAILABILITY_CHECK:-"FALSE"}
SPLIT_DB=${SPLIT_DB:-"TRUE"}
TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"}
TEMP_PATH=${TEMP_PATH:-"/tmp/backups"}
if [ -n "${TEMP_LOCATION}" ] ; then TEMP_PATH=${TEMP_LOCATION:-"/tmp/backups"} ; fi # To be removed 4.3.0

File diff suppressed because it is too large Load Diff

View File

@@ -6,9 +6,9 @@ prepare_service 03-monitoring
PROCESS_NAME="db-backup"
output_off
bootstrap_variables
sanity_test
setup_mode
create_zabbix dbbackup
db_backup_container_init
create_schedulers backup
create_zabbix dbbackup4
liftoff

View File

@@ -1,88 +0,0 @@
#!/command/with-contenv bash
source /assets/functions/00-container
source /assets/functions/10-db-backup
source /assets/defaults/10-db-backup
PROCESS_NAME="db-backup"
bootstrap_variables
if [ "${MODE,,}" = "manual" ] || [ "${1,,}" = "manual" ] || [ "${1,,}" = "now" ]; then
DB_DUMP_BEGIN=+0
manual=TRUE
print_debug "Detected Manual Mode"
else
sleep 5
current_time=$(date +"%s")
today=$(date +"%Y%m%d")
if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then
waittime=$(( ${BASH_REMATCH[1]} * 60 ))
target_time=$(($current_time + $waittime))
else
target_time=$(date --date="${today}${DB_DUMP_BEGIN}" +"%s")
if [[ "$target_time" < "$current_time" ]]; then
target_time=$(($target_time + 24*60*60))
fi
waittime=$(($target_time - $current_time))
fi
print_debug "Wait Time: ${waittime} Target time: ${target_time} Current Time: ${current_time}"
print_info "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")"
sleep $waittime
fi
while true; do
mkdir -p "${TEMP_LOCATION}"
backup_start_time=$(date +"%s")
print_debug "Backup routines started time: $(date +'%Y-%m-%d %T %Z')"
case "${dbtype,,}" in
"couch" )
check_availability
backup_couch
;;
"influx" )
check_availability
backup_influx
;;
"mssql" )
check_availability
backup_mssql
;;
"mysql" )
check_availability
backup_mysql
;;
"mongo" )
check_availability
backup_mongo
;;
"pgsql" )
check_availability
backup_pgsql
;;
"redis" )
check_availability
backup_redis
;;
"sqlite3" )
check_availability
backup_sqlite3
;;
esac
backup_finish_time=$(date +"%s")
backup_total_time=$(echo $((backup_finish_time-backup_start_time)))
if [ -z "$master_exit_code" ] ; then master_exit_code="0" ; fi
print_info "Backup routines finish time: $(date -d @${backup_finish_time} +"%Y-%m-%d %T %Z") with overall exit code ${master_exit_code}"
print_notice "Backup routines time taken: $(echo ${backup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
cleanup_old_data
if var_true "${manual}" ; then
print_debug "Exiting due to manual mode"
exit ${master_exit_code};
else
print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") "
sleep $(($DB_DUMP_FREQ*60-backup_total_time))
fi
done

View File

@@ -1,4 +0,0 @@
#!/command/with-contenv bash
echo '** Performing Manual Backup'
/etc/services.available/10-db-backup/run manual

View File

@@ -0,0 +1,24 @@
#!/command/with-contenv bash
source /assets/functions/00-container
source /assets/defaults/05-logging
source /assets/defaults/10-db-backup
## Compress each log 2 days old
timestamp_2dayold_unixtime="$(stat -c %Y "${LOG_PATH}"/"$(date --date='2 days ago' +'%Y%m%d')")"
for logfile in "${LOG_PATH}"/"$(date --date='2 days ago' +'%Y%m%d')"/"$(date --date='2 days ago' +'%Y%m%d')"_*.log ; do
sudo -u restic zstd --rm --rsyncable "${logfile}"
done
touch -t $(date -d"@${timestamp_2dayold_unixtime}" +'%Y%m%d%H%m.%S') "${LOG_PATH}"/"$(date --date='2 days ago' +'%Y%m%d')"
# Look fook files older than certain day and delete
if [ -n "${LOG_PATH}" ] && [ -d "${LOG_PATH}" ] ; then
find "${LOG_PATH}" -mtime +"${LOGROTATE_RETAIN_DAYS}" -type d -exec rm -rf {} +
fi
# Look for stale symbolic links and delete accordingly
for symbolic_link in "${LOG_PATH}"/latest*.log ; do
if [ ! -e "${symbolic_link}" ] ; then
rm -rf "${symbolic_link}"
fi
done

View File

@@ -29,7 +29,6 @@ bdgy="\e[100m" # Background Color Dark Gray
blr="\e[101m" # Background Color Light Red
boff="\e[49m" # Background Color Off
bootstrap_variables
if [ -z "${1}" ] ; then
interactive_mode=true
@@ -38,7 +37,7 @@ else
"-h" )
cat <<EOF
${IMAGE_NAME} Restore Tool ${IMAGE_VERSION}
(c) 2022 Dave Conroy (https://github.com/tiredofit)
(c) 2023 Dave Conroy (https://github.com/tiredofit) (https://www.tiredofit.ca)
This script will assist you in recovering databases taken by the Docker image.
You will be presented with a series of menus allowing you to choose:
@@ -75,10 +74,17 @@ EOF
esac
fi
control_c() {
if [ -f "${restore_vars}" ] ; then rm -rf "${restore_vars}" ; fi
print_warn "User aborted"
exit
}
get_filename() {
COLUMNS=12
prompt="Please select a file to restore:"
options=( $(find "${DB_DUMP_TARGET}" -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | sort -z | xargs -0) )
options=( $(find "${DEFAULT_FILESYSTEM_PATH}" -type f -maxdepth 2 -not -name '*.md5' -not -name '*.sha1' -print0 | sort -z | xargs -0) )
PS3="$prompt "
select opt in "${options[@]}" "Custom" "Quit" ; do
if (( REPLY == 2 + ${#options[@]} )) ; then
@@ -104,13 +110,17 @@ get_filename() {
get_dbhost() {
p_dbhost=$(basename -- "${r_filename}" | cut -d _ -f 3)
if [ -n "${p_dbhost}" ]; then
parsed_host=true
print_debug "Parsed DBHost: ${p_dbhost}"
if grep -q "${p_dbhost}" "${restore_vars}" ; then
detected_host_num=$(grep "${p_dbhost}" "${restore_vars}" | head -n1 | cut -c 3,4)
detected_host_value=$(grep "${p_dbhost}" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
fi
fi
if [ -z "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then
if [ -z "${detected_host_value}" ] && [ -z "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 1 - No Env, No Parsed Filename"
q_dbhost_variant=1
q_dbhost_menu=$(cat <<EOF
@@ -119,18 +129,18 @@ EOF
)
fi
if [ -n "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then
if [ -n "${detected_host_value}" ] && [ -z "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 2 - Env, No Parsed Filename"
q_dbhost_variant=2
q_dbhost_menu=$(cat <<EOF
C ) Custom Entered Hostname
E ) Environment Variable DB_HOST: '${DB_HOST}'
E ) Environment Variable DB${detected_host_num}_HOST: '${detected_host_value}'
EOF
)
fi
if [ -z "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
if [ -z "${detected_host_value}" ] && [ -n "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 3 - No Env, Parsed Filename"
q_dbhost_variant=3
q_dbhost_menu=$(cat <<EOF
@@ -141,13 +151,13 @@ EOF
)
fi
if [ -n "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
if [ -n "${detected_host_value}" ] && [ -n "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 4 - Env, Parsed Filename"
q_dbhost_variant=4
q_dbhost_menu=$(cat <<EOF
C ) Custom Entered Hostname
E ) Environment Variable DB_HOST: '${DB_HOST}'
E ) Environment Variable DB${detected_host_num}_HOST: '${detected_host_value}'
F ) Parsed Filename Host: '${p_dbhost}'
EOF
)
@@ -174,7 +184,7 @@ EOF
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E\*${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
case "${q_dbhost_menu,,}" in
c* )
counter=1
@@ -188,7 +198,7 @@ EOF
break
;;
e* | "" )
r_dbhost=${DB_HOST}
r_dbhost=${detected_host_value}
break
;;
q* )
@@ -200,7 +210,7 @@ EOF
;;
3 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}F\*${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
case "${q_dbhost_menu,,}" in
c* )
counter=1
@@ -227,7 +237,7 @@ EOF
4 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E\*${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
case "${q_dbhost_menu,,}" in
c* )
counter=1
@@ -241,7 +251,7 @@ EOF
break
;;
e* | "" )
r_dbhost=${DB_HOST}
r_dbhost=${detected_host_value}
break
;;
f* )
@@ -258,6 +268,337 @@ EOF
esac
}
get_dbname() {
p_dbname=$(basename -- "${r_filename}" | cut -d _ -f 2)
if [ -n "${p_dbname}" ]; then
parsed_name=true
print_debug "Parsed DBName: ${p_dbname}"
fi
if grep -q "^DB${detected_host_num}_NAME=${p_dbname}" "${restore_vars}" ; then
detected_name_value=$(grep -q "^DB${detected_host_num}_NAME=${p_dbname}" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
fi
if [ -z "${detected_name_value}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 1 - No Env, No Parsed Filename"
q_dbname_variant=1
q_dbname_menu=$(cat <<EOF
EOF
)
fi
if [ -n "${detected_name_value}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 2 - Env, No Parsed Filename"
q_dbname_variant=2
q_dbname_menu=$(cat <<EOF
C ) Custom Entered Database Name
E ) Environment Variable DB${detected_host_num}_NAME: '${detected_name_value}'
EOF
)
fi
if [ -z "${detected_name_value}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 3 - No Env, Parsed Filename"
q_dbname_variant=3
q_dbname_menu=$(cat <<EOF
C ) Custom Entered Database Name
F ) Parsed Filename DB Name: '${p_dbname}'
EOF
)
fi
if [ -n "${detected_name_value}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBname Variant: 4 - Env, Parsed Filename"
q_dbname_variant=4
q_dbname_menu=$(cat <<EOF
C ) Custom Entered Database Name
E ) Environment Variable DB${detected_host_num}_NAME: '${detected_name_value}'
F ) Parsed Filename DB Name: '${p_dbname}'
EOF
)
fi
cat << EOF
What Database Name do you want to restore to?
${q_dbname_menu}
Q ) Quit
EOF
case "${q_dbname_variant}" in
1 )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E\*${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
case "${q_dbname_menu,,}" in
c* )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
break
;;
e* | "" )
r_dbname=${detected_name_value}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
3 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}F\*${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
case "${q_dbname_menu,,}" in
c* )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
break
;;
f* | "" )
r_dbname=${p_dbname}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
4 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E\*${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
case "${q_dbname_menu,,}" in
c* )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
break
;;
e* | "" )
r_dbname=${detected_name_value}
break
;;
f* )
r_dbname=${p_dbname}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
get_dbpass() {
if grep -q "^DB${detected_host_num}_PASS=" "${restore_vars}" ; then
detected_pass_value=$(grep "^DB${detected_host_num}_PASS=" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
fi
if [ -z "${detected_pass_value}" ] ; then
print_debug "Parsed DBPass Variant: 1 - No Env"
q_dbpass_variant=1
q_dbpass_menu=$(cat <<EOF
EOF
)
fi
if [ -n "${detected_pass_value}" ] ; then
print_debug "Parsed DBPass Variant: 2 - Env"
q_dbpass_variant=2
q_dbpass_menu=$(cat <<EOF
C ) Custom Entered Database Password
E ) Environment Variable DB${detected_host_num}_PASS
EOF
)
fi
cat << EOF
What Database Password will be used to restore?
${q_dbpass_menu}
Q ) Quit
EOF
case "${q_dbpass_variant}" in
1 )
counter=1
q_dbpass=" "
while [[ $q_dbpass = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Passwords can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbpass
(( counter+=1 ))
done
r_dbpass=${q_dbpass}
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E\*${cdgy}\) : ${cwh}${coff}) " q_dbpass_menu
case "${q_dbpass_menu,,}" in
c* )
counter=1
q_dbpass=" "
while [[ $q_dbpass = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Passwords can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbpass
(( counter+=1 ))
done
r_dbpass=${q_dbpass}
break
;;
e* | "" )
r_dbpass=${detected_pass_value}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
get_dbport() {
if grep -q "^DB${detected_host_num}_PORT=" "${restore_vars}" ; then
detected_port_value=$(grep "^DB${detected_host_num}_PORT=" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
fi
if [ -z "${detected_port_value}" ] ; then
print_debug "Parsed DBPort Variant: 1 - No Env"
q_dbport_variant=1
q_dbport_menu_opt_default="| (${cwh}D${cdgy}) * "
q_dbport_menu=$(cat <<EOF
C ) Custom Entered Database Port
D ) Default Port for Database type '${r_dbtype}': '${DEFAULT_PORT}'
EOF
)
fi
if [ -n "${detected_port_value}" ] ; then
print_debug "Parsed DBPort Variant: 2 - Env"
q_dbport_variant=2
q_dbport_menu=$(cat <<EOF
C ) Custom Entered Database Port
D ) Default Port for Database type '${r_dbtype}': '${DEFAULT_PORT}'
E ) Environment Variable DB${detected_host_num}_PORT: '${detected_port_value}'
EOF
)
fi
cat << EOF
What Database Port do you wish to use? MySQL/MariaDB typcially listens on port 3306. Postrgresql port 5432. MongoDB 27017
${q_dbport_menu}
Q ) Quit
EOF
case "${q_dbport_variant}" in
1 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}D\*${cdgy}\) : ${cwh}${coff}) " q_dbport_menu
case "${q_dbport_menu,,}" in
c* )
counter=1
q_dbport=" "
q_dbportre='^[0-9]+$'
while ! [[ $q_dbport =~ ${q_dbportre} ]]; do
if [ $counter -gt 1 ] ; then print_error "Must be a port number, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Port do you want to use:\ ${coff})" q_dbport
(( counter+=1 ))
done
r_dbport=${q_dbport}
break
;;
d* | "" )
r_dbport=${DEFAULT_PORT}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}D${cdgy}\) \| \(${cwh}E\*${cdgy}\) : ${cwh}${coff}) " q_dbport_menu
case "${q_dbport_menu,,}" in
c* )
counter=1
q_dbport=" "
q_dbportre='^[0-9]+$'
while ! [[ $q_dbport =~ ${q_dbportre} ]]; do
if [ $counter -gt 1 ] ; then print_error "Must be a port number, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Port do you want to use:\ ${coff})" q_dbport
(( counter+=1 ))
done
r_dbport=${q_dbport}
break
;;
d* )
r_dbport=${DEFAULT_PORT}
break
;;
e* | "" )
r_dbport=${detected_port_value}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
get_dbtype() {
p_dbtype=$(basename -- "${r_filename}" | cut -d _ -f 1)
@@ -265,14 +606,17 @@ get_dbtype() {
case "${p_dbtype}" in
mongo* )
parsed_type=true
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
print_debug "Parsed DBType: MongoDB"
;;
mariadb | mysql )
parsed_type=true
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
print_debug "Parsed DBType: MariaDB/MySQL"
;;
pgsql | postgres* )
parsed_type=true
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
print_debug "Parsed DBType: Postgresql"
;;
* )
@@ -339,14 +683,17 @@ EOF
case "${q_dbtype,,}" in
m* )
r_dbtype=mysql
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
break
;;
o* )
r_dbtype=mongo
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
break
;;
p* )
r_dbtype=postgresql
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
break
;;
q* )
@@ -366,14 +713,17 @@ EOF
;;
m* )
r_dbtype=mysql
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
break
;;
o* )
r_dbtype=mongo
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
break
;;
p* )
r_dbtype=postgresql
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
break
;;
q* )
@@ -385,22 +735,36 @@ EOF
;;
3 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}F${cdgy}\) \(Default\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in
f* | "" )
r_dbtype=${p_dbtype}
case "${r_dbtype}" in
mongo )
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
;;
mysql )
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
;;
pgsql )
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
;;
esac
break
;;
m* )
r_dbtype=mysql
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
break
;;
o* )
r_dbtype=mongo
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
break
;;
p* )
r_dbtype=postgresql
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
break
;;
q* )
@@ -425,14 +789,17 @@ EOF
;;
m* )
r_dbtype=mysql
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
break
;;
o* )
r_dbtype=mongo
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
break
;;
p* )
r_dbtype=postgresql
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
break
;;
q* )
@@ -445,235 +812,12 @@ EOF
esac
}
get_dbname() {
p_dbname=$(basename -- "${r_filename}" | cut -d _ -f 2)
if [ -n "${p_dbname}" ]; then
parsed_name=true
print_debug "Parsed DBName: ${p_dbhost}"
fi
if [ -z "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 1 - No Env, No Parsed Filename"
q_dbname_variant=1
q_dbname_menu=$(cat <<EOF
EOF
)
fi
if [ -n "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 2 - Env, No Parsed Filename"
q_dbname_variant=2
q_dbname_menu=$(cat <<EOF
C ) Custom Entered Database Name
E ) Environment Variable DB_NAME: '${DB_NAME}'
EOF
)
fi
if [ -z "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 3 - No Env, Parsed Filename"
q_dbname_variant=3
q_dbname_menu=$(cat <<EOF
C ) Custom Entered Database Name
F ) Parsed Filename DB Name: '${p_dbname}'
EOF
)
fi
if [ -n "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBname Variant: 4 - Env, Parsed Filename"
q_dbname_variant=4
q_dbname_menu=$(cat <<EOF
C ) Custom Entered Database Name
E ) Environment Variable DB_NAME: '${DB_NAME}'
F ) Parsed Filename DB Name: '${p_dbname}'
EOF
)
fi
cat << EOF
What Database Name do you want to restore to?
${q_dbname_menu}
Q ) Quit
EOF
case "${q_dbname_variant}" in
1 )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
case "${q_dbname_menu,,}" in
c* )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
break
;;
e* | "" )
r_dbname=${DB_NAME}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
3 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
case "${q_dbname_menu,,}" in
c* )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
break
;;
f* | "" )
r_dbname=${p_dbname}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
4 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
case "${q_dbname_menu,,}" in
c* )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
break
;;
e* | "" )
r_dbname=${DB_NAME}
break
;;
f* )
r_dbname=${p_dbname}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
get_dbport() {
if [ -z "${DB_PORT}" ] ; then
print_debug "Parsed DBPort Variant: 1 - No Env"
q_dbport_variant=1
q_dbport_menu=$(cat <<EOF
EOF
)
fi
if [ -n "${DB_PORT}" ] ; then
print_debug "Parsed DBPort Variant: 2 - Env"
q_dbport_variant=2
q_dbport_menu=$(cat <<EOF
C ) Custom Entered Database Port
E ) Environment Variable DB_PORT: '${DB_PORT}'
EOF
)
fi
cat << EOF
What Database Port do you wish to use? MySQL/MariaDB typcially listens on port 3306. Postrgresql port 5432. MongoDB 27017
${q_dbport_menu}
Q ) Quit
EOF
case "${q_dbport_variant}" in
1 )
counter=1
q_dbport=" "
q_dbportre='^[0-9]+$'
while ! [[ $q_dbport =~ ${q_dbportre} ]]; do
if [ $counter -gt 1 ] ; then print_error "Must be a port number, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Port do you want to use:\ ${coff})" q_dbport
(( counter+=1 ))
done
r_dbport=${q_dbport}
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbport_menu
case "${q_dbport_menu,,}" in
c* )
counter=1
q_dbport=" "
q_dbportre='^[0-9]+$'
while ! [[ $q_dbport =~ ${q_dbportre} ]]; do
if [ $counter -gt 1 ] ; then print_error "Must be a port number, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Port do you want to use:\ ${coff})" q_dbport
(( counter+=1 ))
done
r_dbport=${q_dbport}
break
;;
e* | "" )
r_dbport=${DB_PORT}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
get_dbuser() {
if [ -z "${DB_USER}" ] ; then
if grep -q "^DB${detected_host_num}_USER=" "${restore_vars}" ; then
detected_user_value=$(grep "^DB${detected_host_num}_USER=" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
fi
if [ -z "${detected_user_value}" ] ; then
print_debug "Parsed DBUser Variant: 1 - No Env"
q_dbuser_variant=1
q_dbuser_menu=$(cat <<EOF
@@ -682,13 +826,13 @@ EOF
)
fi
if [ -n "${DB_USER}" ] ; then
if [ -n "${detected_user_value}" ] ; then
print_debug "Parsed DBUser Variant: 2 - Env"
q_dbuser_variant=2
q_dbuser_menu=$(cat <<EOF
C ) Custom Entered Database User
E ) Environment Variable DB_USER: '${DB_USER}'
E ) Environment Variable DB${detected_host_num}_USER: '${detected_user_value}'
EOF
)
fi
@@ -728,7 +872,7 @@ EOF
break
;;
e* | "" )
r_dbuser=${DB_USER}
r_dbuser=${detected_user_value}
break
;;
q* )
@@ -741,76 +885,37 @@ EOF
esac
}
get_dbpass() {
if [ -z "${DB_PASS}" ] ; then
print_debug "Parsed DBPass Variant: 1 - No Env"
q_dbpass_variant=1
q_dbpass_menu=$(cat <<EOF
EOF
)
fi
if [ -n "${DB_PASS}" ] ; then
print_debug "Parsed DBPass Variant: 2 - Env"
q_dbpass_variant=2
q_dbpass_menu=$(cat <<EOF
C ) Custom Entered Database Password
E ) Environment Variable DB_PASS
EOF
)
fi
cat << EOF
What Database Password will be used to restore?
${q_dbpass_menu}
Q ) Quit
EOF
case "${q_dbpass_variant}" in
1 )
counter=1
q_dbpass=" "
while [[ $q_dbpass = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Passwords can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbpass
(( counter+=1 ))
get_filename() {
COLUMNS=12
prompt="Please select a file to restore:"
options=( $(find "${DEFAULT_FILESYSTEM_PATH}" -type f -maxdepth 2 -not -name '*.md5' -not -name '*.sha1' -not -name '*.gpg' -print0 | sort -z | xargs -0) )
PS3="$prompt "
select opt in "${options[@]}" "Custom" "Quit" ; do
if (( REPLY == 2 + ${#options[@]} )) ; then
echo "Bye!"
exit 2
elif (( REPLY == 1 + ${#options[@]} )) ; then
while [ ! -f "${opt}" ] ; do
read -p "What path and filename to restore: " opt
if [ ! -f "${opt}" ] ; then
print_error "File not found. Please retry.."
fi
done
r_dbpass=${q_dbpass}
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbpass_menu
case "${q_dbpass_menu,,}" in
c* )
counter=1
q_dbpass=" "
while [[ $q_dbpass = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Passwords can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbpass
(( counter+=1 ))
done
r_dbpass=${q_dbpass}
break
;;
e* | "" )
r_dbpass=${DB_PASS}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
break
elif (( REPLY > 0 && REPLY <= ${#options[@]} )) ; then
break
else
echo "Invalid option. Try another one."
fi
done
COLUMNS=$oldcolumns
r_filename=${opt}
}
#### SCRIPT START
trap control_c INT
bootstrap_variables restore_init
cat << EOF
## ${IMAGE_NAME} Restore Script

View File

@@ -1,10 +1,9 @@
{
"zabbix_export": {
"version": "6.0",
"date": "2022-03-18T13:32:12Z",
"groups": [
"version": "6.4",
"template_groups": [
{
"uuid": "fa56524b5dbb4ec09d9777a6f7ccfbe4",
"uuid": "10b88d2b3a3a4c72b43bdce9310e1162",
"name": "DB/Backup"
},
{
@@ -14,10 +13,10 @@
],
"templates": [
{
"uuid": "5fc64d517afb4cc5bc09a3ef58b43ef7",
"template": "DB Backup",
"name": "DB Backup",
"description": "Template for Docker DB Backup Image\n\nMeant for use specifically with https://github.com/tiredofit/docker-db-backup\nLast tested with version 3.0.2",
"uuid": "5a16c1bd694145389eed5ee803d954cc",
"template": "DB Backup4",
"name": "DB Backup4",
"description": "Template for Docker DB Backup Image\n\nMeant for use specifically with https://github.com/tiredofit/docker-db-backup Version > 4.0.0\n\nSupports auto discovery of backup jobs and creates graphs and triggers",
"groups": [
{
"name": "DB/Backup"
@@ -26,134 +25,260 @@
"name": "Templates/Databases"
}
],
"items": [
"discovery_rules": [
{
"uuid": "72fd00fa2dd24e479f5affe03e8711d8",
"name": "DB Backup: Backup Duration",
"uuid": "94bb6f862e1841f8b2834b04c41c1d86",
"name": "Backup",
"type": "TRAP",
"key": "dbbackup.backup_duration",
"key": "dbbackup.backup",
"delay": "0",
"history": "7d",
"units": "uptime",
"description": "How long the backup took",
"tags": [
"item_prototypes": [
{
"tag": "Application",
"value": "DB Backup"
}
]
},
{
"uuid": "3549a2c9d56849babc6dc3c855484c1e",
"name": "DB Backup: Backup Time",
"type": "TRAP",
"key": "dbbackup.datetime",
"delay": "0",
"history": "7d",
"units": "unixtime",
"request_method": "POST",
"tags": [
"uuid": "5a2c4d1cacf844829bc1fbf912e071c5",
"name": "[{#NAME}] Checksum - Duration",
"type": "TRAP",
"key": "dbbackup.backup.checksum.duration.[{#NAME}]",
"delay": "0",
"history": "7d",
"units": "uptime",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
]
},
{
"tag": "Application",
"value": "DB Backup"
"uuid": "6e49769ec07344a4974b13dab00c3539",
"name": "[{#NAME}] Checksum - Hash",
"type": "TRAP",
"key": "dbbackup.backup.checksum.hash.[{#NAME}]",
"delay": "0",
"history": "30d",
"trends": "0",
"value_type": "TEXT",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
]
},
{
"uuid": "bb6472e30bff4d9c908b1d34b893e622",
"name": "[{#NAME}] Backup - Last Backup",
"type": "TRAP",
"key": "dbbackup.backup.datetime.[{#NAME}]",
"delay": "0",
"history": "7d",
"units": "unixtime",
"description": "Datestamp of last database backup",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
],
"trigger_prototypes": [
{
"uuid": "3681b56bb882466fb304a48b4beb15f0",
"expression": "fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],172800s)=0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],259200s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],345600s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],432800s)<>0",
"name": "[{#NAME}] No backups detected in 2 days",
"priority": "HIGH",
"manual_close": "YES"
},
{
"uuid": "6c70136c84994197b6396a143b4e956f",
"expression": "fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],172800s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],259200s)=0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],345600s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],432800s)<>0",
"name": "[{#NAME}] No backups detected in 3 days",
"priority": "DISASTER",
"manual_close": "YES"
},
{
"uuid": "d2038025cab643019cb9610c301f0cb9",
"expression": "fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],172800s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],259200s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],345600s)=0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],432800s)<>0",
"name": "[{#NAME}] No backups detected in 4 days",
"priority": "DISASTER",
"manual_close": "YES"
},
{
"uuid": "ea85f02d032c4a1dbc1b6e91a3b2b37b",
"expression": "fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],172800s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],259200s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],345600s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],432800s)=0",
"name": "[{#NAME}] No backups detected in 5 days",
"priority": "DISASTER",
"manual_close": "YES"
}
]
},
{
"uuid": "8ec2b2f44ddf4f36b3dbb2aa15e3a32f",
"name": "[{#NAME}] Backup - Duration",
"type": "TRAP",
"key": "dbbackup.backup.duration.[{#NAME}]",
"delay": "0",
"history": "7d",
"units": "uptime",
"description": "How long the DB Backup job took",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
]
},
{
"uuid": "3f0dc3c75261447c93482815c3d69524",
"name": "[{#NAME}] Encrypt - Duration",
"type": "TRAP",
"key": "dbbackup.backup.encrypt.duration.[{#NAME}]",
"delay": "0",
"history": "7d",
"units": "uptime",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
]
},
{
"uuid": "c3d5ad0789c443859d6a673e03db9cec",
"name": "[{#NAME}] Backup - Filename",
"type": "TRAP",
"key": "dbbackup.backup.filename.[{#NAME}]",
"delay": "0",
"history": "30d",
"trends": "0",
"value_type": "TEXT",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
]
},
{
"uuid": "43b700c03897465eb7e49bbfe8fc9fc5",
"name": "[{#NAME}] Backup - Size",
"type": "TRAP",
"key": "dbbackup.backup.size.[{#NAME}]",
"delay": "0",
"history": "7d",
"description": "Backup Size",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
],
"trigger_prototypes": [
{
"uuid": "849f8660bee04427aff55af47b6f509c",
"expression": "last(/DB Backup4/dbbackup.backup.size.[{#NAME}])/last(/DB Backup4/dbbackup.backup.size.[{#NAME}],#2)>1.2",
"name": "[{#NAME}] Backup 20% Greater in size",
"priority": "WARNING",
"manual_close": "YES"
},
{
"uuid": "74d16a7680544c65af22cc568ce3d59d",
"expression": "last(/DB Backup4/dbbackup.backup.size.[{#NAME}])/last(/DB Backup4/dbbackup.backup.size.[{#NAME}],#2)<0.2",
"name": "[{#NAME}] Backup 20% Smaller in Size",
"priority": "WARNING",
"manual_close": "YES"
},
{
"uuid": "5595d769c73f4eaeadda95c84c2c0f17",
"expression": "last(/DB Backup4/dbbackup.backup.size.[{#NAME}])<1K",
"name": "[{#NAME}] Backup Empty",
"priority": "HIGH",
"manual_close": "YES"
}
]
},
{
"uuid": "a6fc542a565c4baba8429ed9ab31b5ae",
"name": "[{#NAME}] Backup - Status",
"type": "TRAP",
"key": "dbbackup.backup.status.[{#NAME}]",
"delay": "0",
"history": "7d",
"description": "Maps exit code by DB Backup procedure",
"valuemap": {
"name": "Backup Status"
},
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
],
"trigger_prototypes": [
{
"uuid": "74b91e28453b4c2a84743f5e371495c1",
"expression": "last(/DB Backup4/dbbackup.backup.status.[{#NAME}])=1",
"name": "[{#NAME}] Backup - Failed with errors",
"priority": "WARNING",
"manual_close": "YES"
}
]
}
],
"triggers": [
"graph_prototypes": [
{
"uuid": "3ac1e074ffea46eb8002c9c08a85e7b4",
"expression": "nodata(/DB Backup/dbbackup.datetime,2d)=1",
"name": "DB-Backup: No backups detected in 2 days",
"priority": "DISASTER",
"manual_close": "YES"
"uuid": "b5e8e9fe0c474fedba2b06366234afdf",
"name": "[{#NAME}] Backup Duration",
"graph_items": [
{
"color": "199C0D",
"calc_fnc": "ALL",
"item": {
"host": "DB Backup4",
"key": "dbbackup.backup.duration.[{#NAME}]"
}
}
]
},
{
"uuid": "b8b5933dfa1a488c9c37dd7f4784c1ff",
"expression": "fuzzytime(/DB Backup/dbbackup.datetime,172800s)=0 and fuzzytime(/DB Backup/dbbackup.datetime,259200s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,345600s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,432800s)<>0",
"name": "DB Backup: No Backups occurred in 2 days",
"priority": "AVERAGE"
"uuid": "99b5deb4e28f40059c50846c7be2ef26",
"name": "[{#NAME}] Backup Size",
"graph_items": [
{
"color": "199C0D",
"calc_fnc": "ALL",
"item": {
"host": "DB Backup4",
"key": "dbbackup.backup.size.[{#NAME}]"
}
}
]
},
{
"uuid": "35c5f420d0e142cc9601bae38decdc40",
"expression": "fuzzytime(/DB Backup/dbbackup.datetime,172800s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,259200s)=0 and fuzzytime(/DB Backup/dbbackup.datetime,345600s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,432800s)<>0",
"name": "DB Backup: No Backups occurred in 3 days",
"priority": "AVERAGE"
"uuid": "8c641e33659e4c8b866da64e252cfc2a",
"name": "[{#NAME}] Checksum Duration",
"graph_items": [
{
"color": "199C0D",
"calc_fnc": "ALL",
"item": {
"host": "DB Backup4",
"key": "dbbackup.backup.checksum.duration.[{#NAME}]"
}
}
]
},
{
"uuid": "03c3719d82c241e886a0383c7d908a77",
"expression": "fuzzytime(/DB Backup/dbbackup.datetime,172800s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,259200s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,345600s)=0 and fuzzytime(/DB Backup/dbbackup.datetime,432800s)<>0",
"name": "DB Backup: No Backups occurred in 4 days",
"priority": "AVERAGE"
},
{
"uuid": "1634a03e44964e42b7e0101f5f68499c",
"expression": "fuzzytime(/DB Backup/dbbackup.datetime,172800s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,259200s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,345600s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,432800s)=0",
"name": "DB Backup: No Backups occurred in 5 days or more",
"priority": "HIGH"
}
]
},
{
"uuid": "467dfec952b34f5aa4cc890b4351b62d",
"name": "DB Backup: Backup Size",
"type": "TRAP",
"key": "dbbackup.size",
"delay": "0",
"history": "7d",
"units": "B",
"request_method": "POST",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
],
"triggers": [
{
"uuid": "a41eb49b8a3541afb6de247dca750e38",
"expression": "last(/DB Backup/dbbackup.size)/last(/DB Backup/dbbackup.size,#2)>1.2",
"name": "DB Backup: 20% Greater in Size",
"priority": "WARNING",
"manual_close": "YES"
},
{
"uuid": "422f66be5049403293f3d96fc53f20cd",
"expression": "last(/DB Backup/dbbackup.size)/last(/DB Backup/dbbackup.size,#2)<0.2",
"name": "DB Backup: 20% Smaller in Size",
"priority": "WARNING",
"manual_close": "YES"
},
{
"uuid": "d6d9d875b92f4d799d4bc89aabd4e90e",
"expression": "last(/DB Backup/dbbackup.size)<1K",
"name": "DB Backup: empty",
"priority": "HIGH"
}
]
},
{
"uuid": "a6b13e8b46a64abab64a4d44d620d272",
"name": "DB Backup: Last Backup Status",
"type": "TRAP",
"key": "dbbackup.status",
"delay": "0",
"history": "7d",
"description": "Maps Exit Codes received by backup applications",
"valuemap": {
"name": "DB Backup Status"
},
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
],
"triggers": [
{
"uuid": "23d71e356f96493180f02d4b84a79fd6",
"expression": "last(/DB Backup/dbbackup.status)=1",
"name": "DB Backup: Failed Backup Detected",
"priority": "HIGH",
"manual_close": "YES"
"uuid": "65b8770f71ed4cff9111b82c42b17571",
"name": "[{#NAME}] Encrypt Duration",
"graph_items": [
{
"color": "199C0D",
"calc_fnc": "ALL",
"item": {
"host": "DB Backup4",
"key": "dbbackup.backup.encrypt.duration.[{#NAME}]"
}
}
]
}
]
}
@@ -168,38 +293,10 @@
"value": "Database"
}
],
"dashboards": [
{
"uuid": "90c81bb47184401ca9663626784a6f30",
"name": "DB Backup",
"pages": [
{
"widgets": [
{
"type": "GRAPH_CLASSIC",
"name": "Backup Size",
"width": "23",
"height": "5",
"fields": [
{
"type": "GRAPH",
"name": "graphid",
"value": {
"name": "DB Backup: Backup Size",
"host": "DB Backup"
}
}
]
}
]
}
]
}
],
"valuemaps": [
{
"uuid": "82f3a3d01b3c42b8942b59d2363724e0",
"name": "DB Backup Status",
"uuid": "92a87279388b4fd1ac51c1e417e1776e",
"name": "Backup Status",
"mappings": [
{
"value": "0",
@@ -214,36 +311,6 @@
}
]
}
],
"graphs": [
{
"uuid": "6e02c200b76046bab76062cd1ab086b2",
"name": "DB Backup: Backup Duration",
"graph_items": [
{
"color": "199C0D",
"item": {
"host": "DB Backup",
"key": "dbbackup.backup_duration"
}
}
]
},
{
"uuid": "b881ee18f05c4f4c835982c9dfbb55d6",
"name": "DB Backup: Backup Size",
"type": "STACKED",
"graph_items": [
{
"sortorder": "1",
"color": "1A7C11",
"item": {
"host": "DB Backup",
"key": "dbbackup.size"
}
}
]
}
]
}
}