Compare commits

...

306 Commits

Author SHA1 Message Date
dave@tiredofit.ca
f7f72ba2c1 Release 4.0.35 - See CHANGELOG.md 2024-01-14 20:22:08 -08:00
Dave Conroy
2f05d76f4e README weirdness 2024-01-03 17:33:52 -08:00
Dave Conroy
c9a634ff25 Convert > to - in README 2024-01-03 17:21:01 -08:00
dave@tiredofit.ca
0ce21e8f43 Release 4.0.34 - See CHANGELOG.md 2024-01-02 14:01:28 -08:00
Dave Conroy
a700eb0fef Merge pull request #315 from ToshY/docs/latest-symlink-format
[Docs] Updated `DEFAULT_CREATE_LATEST_SYMLINK` description format
2024-01-02 13:21:59 -08:00
Dave Conroy
7baa3774c7 Merge pull request #318 from devmethodgit/main
Fix environment variables in examples
2024-01-02 13:21:43 -08:00
Dave Conroy
341e4d12ea Update case statement to support arm64|aarch64 2024-01-02 13:21:07 -08:00
Dave Conroy
5c51bbcb7e Wrap if statement in double brackets 2024-01-02 12:54:57 -08:00
@vladimirzyuzin
24d9a9a937 Fix environment variables 2023-12-30 20:58:26 +03:00
ToshY
591b8d6dbd updated create latest symlink description format 2023-12-26 12:24:52 +00:00
dave@tiredofit.ca
a5b15b4412 Release 4.0.33 - See CHANGELOG.md 2023-12-18 07:58:54 -08:00
dave@tiredofit.ca
6692cf9834 Release 4.0.32 - See CHANGELOG.md 2023-12-15 15:32:32 -08:00
dave@tiredofit.ca
c37de5778d Release 4.0.31 - See CHANGELOG.md 2023-12-12 19:25:40 -08:00
dave@tiredofit.ca
eeeafd6ab8 Release 4.0.30 - See CHANGELOG.md 2023-12-11 15:21:01 -08:00
dave@tiredofit.ca
17daf26084 Release 4.0.29 - See CHANGELOG.md 2023-12-04 11:29:14 -08:00
Dave Conroy
b53cda99f7 Don't execute blobxfer functions if both key and secret are not set 2023-12-04 11:09:43 -08:00
Dave Conroy
2cf3e2ae70 Show proper DB Name when backing up Mongo or MSSQL 2023-12-04 08:06:57 -08:00
dave@tiredofit.ca
c7ee94aec2 Release 4.0.28 - See CHANGELOG.md 2023-12-04 07:04:08 -08:00
Dave Conroy
f44233e51a AWS CLI 1.31.5 2023-12-04 07:02:40 -08:00
dave@tiredofit.ca
ccda858b18 Release 4.0.27 - See CHANGELOG.md 2023-12-04 07:00:39 -08:00
Dave Conroy
d58b27d5ef Use alternate cron 2023-12-03 22:04:12 -08:00
dave@tiredofit.ca
fb9fe8a032 Release 4.0.26 - See CHANGELOG.md 2023-11-30 08:55:34 -08:00
Dave Conroy
b705982ae1 Restore missing _SPLIT_DB environment variable information for MySQL/Postgres 2023-11-30 08:54:49 -08:00
dave@tiredofit.ca
f031d787ae Release 4.0.25 - See CHANGELOG.md 2023-11-29 10:43:25 -08:00
Dave Conroy
3eed5fc8a0 Switch BLOBXFER_STORAGE_KEY to BLOBXFER_STORAGE_ACCOUNT_KEY 2023-11-29 10:39:58 -08:00
dave@tiredofit.ca
be619fb707 Release 4.0.24 - See CHANGELOG.md 2023-11-28 15:06:50 -08:00
dave@tiredofit.ca
cccc088b35 Release 4.0.23 - See CHANGELOG.md 2023-11-28 08:05:11 -08:00
dave@tiredofit.ca
4579f4057c Release 4.0.22 - See CHANGELOG.md 2023-11-25 08:50:25 -08:00
dave@tiredofit.ca
cd683648d0 Release 4.0.21 - See CHANGELOG.md 2023-11-22 15:40:38 -08:00
dave@tiredofit.ca
11f55f3d82 Release 4.0.20 - See CHANGELOG.md 2023-11-21 15:18:22 -08:00
dave@tiredofit.ca
674a98fcd8 Release 4.0.19 - See CHANGELOG.md 2023-11-20 15:26:21 -08:00
dave@tiredofit.ca
77c747e01b Release 4.0.18 - See CHANGELOG.md 2023-11-18 09:53:41 -08:00
Dave Conroy
2e30558a27 Merge pull request #282 from joergmschulz/patch-1
Update 10-db-backup msmtp -C
2023-11-18 09:52:23 -08:00
joergmschulz
c746fb641e Update 10-db-backup msmtp -C
the config file is referenced by -C , not -c
2023-11-17 23:28:08 +01:00
dave@tiredofit.ca
ca2f04cd59 Release 4.0.17 - See CHANGELOG.md 2023-11-17 08:16:34 -08:00
dave@tiredofit.ca
dfa94ecab7 Release 4.0.16 - See CHANGELOG.md 2023-11-17 08:07:54 -08:00
Dave Conroy
eaea6dc348 Update README.md 2023-11-16 09:38:18 -08:00
dave@tiredofit.ca
34abe88159 Release 4.0.15 - See CHANGELOG.md 2023-11-16 09:35:56 -08:00
Dave Conroy
5ffbeeb163 Merge pull request #280 from joergmschulz/patch-1
warn instead of warning
2023-11-14 07:14:55 -08:00
joergmschulz
c82cee80f8 warn instead of warning
see #279
2023-11-14 08:53:38 +01:00
dave@tiredofit.ca
ab059ccdf1 Release 4.0.14 - See CHANGELOG.md 2023-11-13 15:16:36 -08:00
dave@tiredofit.ca
1e8ccf4d56 Release 4.0.13 - See CHANGELOG.md 2023-11-12 17:07:07 -08:00
dave@tiredofit.ca
65c40cac0a Release 4.0.12 - See CHANGELOG.md 2023-11-12 09:03:01 -08:00
dave@tiredofit.ca
a9f2d51ff9 Release 4.0.11 - See CHANGELOG.md 2023-11-11 13:43:57 -08:00
dave@tiredofit.ca
7f455abc1a Release 4.0.10 - See CHANGELOG.md 2023-11-11 09:34:11 -08:00
dave@tiredofit.ca
c16add4525 Release 4.0.9 - See CHANGELOG.md 2023-11-11 09:16:02 -08:00
Dave Conroy
d5769b1588 Fix Ctrl-C Backup Concurrency 2023-11-11 08:48:59 -08:00
dave@tiredofit.ca
0b2c7836cf Release 4.0.8 - See CHANGELOG.md 2023-11-11 08:32:58 -08:00
Dave Conroy
535e011740 Add safety net to debug() SHOW_OUTPUT 2023-11-11 07:28:38 -08:00
Dave Conroy
5a391b908a Fix debug() duplicate variable 2023-11-11 07:23:13 -08:00
dave@tiredofit.ca
fddca646c8 Release 4.0.7 - See CHANGELOG.md 2023-11-11 07:15:00 -08:00
Dave Conroy
68f954c59b Fix SQLite3 Backups and clean up temp directory properly 2023-11-11 07:12:29 -08:00
Dave Conroy
0ab0a6d182 sqlit3 scheduler process name remove slashes 2023-11-11 06:48:39 -08:00
Dave Conroy
f6bf2993f7 Add seperate persmissions for _FILESYSTEM_PATH 2023-11-11 06:36:26 -08:00
dave@tiredofit.ca
5cf00a8b8e Release 4.0.6 - See CHANGELOG.md 2023-11-10 17:53:47 -08:00
dave@tiredofit.ca
2bc730013e Release 4.0.5 - See CHANGELOG.md 2023-11-10 07:25:25 -08:00
Dave Conroy
d628ed8ff4 Expand upon DEBUG_ statements to give more detail 2023-11-10 07:24:31 -08:00
Dave Conroy
d7399667a1 Update _FILESYSTEM_PERMISSIONS from 700 to 600 and add undocumented DBBACKUP_USER|GROUP variable 2023-11-10 07:16:56 -08:00
dave@tiredofit.ca
9caec737e0 Release 4.0.4 - See CHANGELOG.md 2023-11-09 11:49:26 -08:00
Dave Conroy
87a803512d Merge pull request #269 from tiredofit/4.x
New Restore Script
2023-11-09 11:48:19 -08:00
Dave Conroy
c6a8fb0ae0 Merge branch 'main' into 4.x 2023-11-09 11:48:08 -08:00
Dave Conroy
8fafdeb45c Restore - Support multiple DB Hosts 2023-11-09 11:46:04 -08:00
Dave Conroy
4a3a79d328 restore - we're actually using FILESYSTEM_PATH 2023-11-09 09:21:19 -08:00
dave@tiredofit.ca
bad5057bcf Release 4.0.3 - See CHANGELOG.md 2023-11-09 09:20:27 -08:00
Dave Conroy
d2acfc4a88 restore - dont browse .gpg files 2023-11-09 09:19:39 -08:00
Dave Conroy
3d794a819f Commence work on restore scripts 2023-11-09 09:19:13 -08:00
dave@tiredofit.ca
aaf6309cc4 Release 4.0.2 - See CHANGELOG.md 2023-11-09 08:09:59 -08:00
dave@tiredofit.ca
55d2067b43 Release 4.0.1 - See CHANGELOG.md 2023-11-09 08:04:05 -08:00
Dave Conroy
0d56a26f0f restore - remove reference to DB_DUMP_TARGET and instead use DEFAULT_BACKUP_PATH 2023-11-09 08:03:39 -08:00
Dave Conroy
635411bdd5 Update README.md 2023-11-08 22:37:39 -08:00
Dave Conroy
39776a96b8 Release 4.0.0 - See CHANGELOG.md and README.md 2023-11-08 18:55:57 -08:00
Dave Conroy
d24cdc5db5 Merge pull request #266 from tiredofit/4.x
Release 4.0.0
2023-11-08 18:49:42 -08:00
Dave Conroy
2df35e46e5 Merge 4.x prep 2023-11-08 18:48:58 -08:00
Dave Conroy
41b518f2f0 Add a failsafe for _original_debug_mode 2023-11-08 18:18:05 -08:00
Dave Conroy
89e6956cdd Adjust debug statements 2023-11-08 15:08:32 -08:00
Dave Conroy
089687dc55 Add DEBUG_ statements 2023-11-08 14:48:27 -08:00
Dave Conroy
c88fbacde9 Update Zabbix Template to fix trigger 2023-11-08 13:14:09 -08:00
Dave Conroy
3010a4d187 Rework Zabbix Payloads 2023-11-08 10:48:18 -08:00
Dave Conroy
fd59daf125 Update Zabbix Template 2023-11-08 10:48:06 -08:00
Dave Conroy
140e3183a4 Update Zabbix Template 2023-11-08 10:43:25 -08:00
Dave Conroy
45eba40360 Update Zabbix Template 2023-11-08 10:40:02 -08:00
Dave Conroy
440b24da8d Update Zabbix Template 2023-11-08 10:06:19 -08:00
Dave Conroy
9849c7339e Add PGSQL to override and force backing up globals 2023-11-08 09:11:24 -08:00
Dave Conroy
80e407d81d switch globals to only get backed up if using all 2023-11-08 08:55:21 -08:00
Dave Conroy
40ad7abac4 No more extra now 2023-11-08 08:35:22 -08:00
Dave Conroy
3ecb24c603 Fix naming issue with backup_job_filesystem_archive 2023-11-08 08:29:35 -08:00
Dave Conroy
ff96f09e33 future_time is time_future 2023-11-08 08:25:08 -08:00
Dave Conroy
77f54d06fa Rearrange variables and polish: 2023-11-08 08:00:21 -08:00
Dave Conroy
633cc8410e Fix cron expression stepping 2023-11-08 07:43:37 -08:00
Dave Conroy
f5b92854da Cleanup cron expression 2023-11-07 13:06:06 -08:00
Dave Conroy
50039f8d0c Rearrange timer() 2023-11-07 13:03:01 -08:00
Dave Conroy
b8a825a3af Rework write_log to not output more than it needs to 2023-11-07 12:33:03 -08:00
Dave Conroy
2ccc867a75 Write Legacy vars to file instead 2023-11-07 12:13:04 -08:00
Dave Conroy
11205be091 Properly handle BACKUP_BEGIN plus values 2023-11-07 12:00:19 -08:00
Dave Conroy
57193cc824 Properly translate legacy _DUMP var to DEFAULT_BACKUP 2023-11-07 11:50:59 -08:00
Dave Conroy
9863358469 Split debug statement for backup/move exit codes 2023-11-07 11:05:39 -08:00
Dave Conroy
bb3c942b35 Properly output exit code for notifications 2023-11-07 11:02:49 -08:00
Dave Conroy
72f90876e3 Fix writing logfiles to filessytem 2023-11-06 17:00:29 -08:00
Dave Conroy
c94a85b523 Change Zabbix autoregister string to dbbackup4 2023-11-06 16:53:20 -08:00
Dave Conroy
c9fd36db72 Fix DEFAULT_MYSQL table 2023-11-06 11:09:15 -08:00
Dave Conroy
a629282093 Update dependencies 2023-11-05 10:42:51 -08:00
Dave Conroy
050cc3ef62 Cleanup Mongo compression routine 2023-11-05 10:12:32 -08:00
Dave Conroy
da54cdf48b Code formatting and cleanup 2023-11-05 10:10:59 -08:00
Dave Conroy
2b446f1e4c Add encrypt/checksum duration graphs 2023-11-05 09:48:23 -08:00
Dave Conroy
9e7904ce4d Cleanup Formatting 2023-11-05 09:47:59 -08:00
Dave Conroy
c5c026d0fd Merge pull request #232 from benvia/feature-cron
Adds a cronjob-like mechanism
2023-11-05 09:17:33 -08:00
Dave Conroy
5d8a7f1720 Optimize write_log function 2023-11-05 08:54:43 -08:00
Dave Conroy
ca6b045d7d Optimize write_log function 2023-11-05 08:51:32 -08:00
Dave Conroy
da22217e7b Refactor timer/datetime 2023-11-05 08:36:15 -08:00
Dave Conroy
59854c1715 Refactor timer/datetime 2023-11-05 08:35:40 -08:00
Dave Conroy
e090eeda3f feat - add MySQL Event backup support 2023-11-05 07:55:40 -08:00
Dave Conroy
0ba1989083 modify MSSQL backups to get compressed post initial backup 2023-11-05 07:45:52 -08:00
Dave Conroy
5392bf5179 feat - add basic cron expression scheduling 2023-11-05 07:40:30 -08:00
Benoit Vianin
e42f8e9a8c Code refactoring 2023-11-05 06:48:47 +00:00
Benoit Vianin
6a28ac2d92 Fix code for absolute time 2023-11-04 18:53:42 +00:00
Dave Conroy
3af9ef6d3d Rework timers 2023-11-04 08:07:10 -07:00
Dave Conroy
5e3d8b3083 Also use ionice for DB_RESOURCE_OPTIMIZED 2023-11-04 07:43:32 -07:00
Benoit Vianin
06cfba4952 Modify DB_DUMP_BEGIN to support a full date as cron 2023-11-04 11:43:45 +00:00
Benoit Vianin
250cae98ef Restore the +XX minutes fucntion and move print to debug mode 2023-11-04 10:34:40 +00:00
Dave Conroy
5dafa69439 feat - add file encryption 2023-11-03 19:46:46 -07:00
Dave Conroy
0159a13746 Add gpg to rundeps 2023-11-03 18:18:05 -07:00
Dave Conroy
dad5294295 Update Zabbix Template 2023-11-03 18:15:30 -07:00
Dave Conroy
3837beae97 feat - Add checksum time, and hash output to Zabbix 2023-11-03 18:11:58 -07:00
Dave Conroy
20bfcec1a9 feat - Add DEFAULT_RESOURCE_OPTIMIZED to adjust CPU scheduler 2023-11-03 18:03:52 -07:00
Dave Conroy
2cc97694f4 Fix formatting: 2023-11-03 15:53:54 -07:00
Dave Conroy
0a867f9719 feat - Add Support for MSSQL Transaction logs - Closes #200 2023-11-03 15:48:39 -07:00
Dave Conroy
7d77266d2a Use relative path insteasd of absolute when creating_latest_symlink - Closes #256 2023-11-03 15:32:33 -07:00
Dave Conroy
50b61ad358 Merge pull request #236 from eoehen/feature/blobxfer-download-befor-move-new-backup-file
feat(235) Synchronize local storage from S3 before upload
2023-11-03 15:23:22 -07:00
Dave Conroy
a10eb1dfba Synchronize local storage from blobxfer before upload 2023-11-03 15:22:59 -07:00
Dave Conroy
b0bf624ad3 Add Postgres GLobals Backup if SPLIT_DB=TRUE 2023-11-03 15:14:02 -07:00
Dave Conroy
07e74ca746 Merge pull request #228 from oscarsiles/main
add postgres globals backup if SPLIT_DB=true
2023-11-03 15:11:35 -07:00
Dave Conroy
599c3d7940 Rework bin directory 2023-11-03 15:02:52 -07:00
Dave Conroy
3e666ef655 Add coreutils to run-deps 2023-11-03 15:02:36 -07:00
Dave Conroy
c0c7202b6d Add multi host support 2023-11-03 15:02:13 -07:00
Dave Conroy
82d8175eeb Support 4.0.x defaults 2023-11-03 15:01:59 -07:00
Dave Conroy
ee294c08a2 Update scheduler template 2023-11-03 15:01:41 -07:00
Dave Conroy
cfbac00268 Zabbix Template > 4.0.x 2023-11-03 15:01:17 -07:00
Dave Conroy
b0413e6708 Version 4.0.x examples 2023-11-03 15:00:56 -07:00
Dave Conroy
57c853d02a MIT 2023-11-03 15:00:33 -07:00
Dave Conroy
0845ec30b3 Update install/etc folder contents 2023-11-03 15:00:13 -07:00
Dave Conroy
96f9825e19 Update README.md with new options 2023-11-03 14:59:44 -07:00
Dave Conroy
2b10a0b679 feat - only cleanup / mv backups of the same file_name pattern 2023-11-01 15:55:32 -07:00
Dave Conroy
1450a33c27 feat - add file logging support 2023-11-01 15:44:03 -07:00
Dave Conroy
74e7a7e74c feat - optimize zabbix sending routines to be one process 2023-11-01 14:43:43 -07:00
Dave Conroy
e03fefeb02 fix - Optimize generating checksum routines 2023-11-01 14:34:34 -07:00
Dave Conroy
3ff3cdb19c feat - Add TARGET_DB_DUMP_PERMISSION to set file and directory permissions 2023-11-01 14:32:43 -07:00
Dave Conroy
bcf7bc5ecd rearrange compression function 2023-11-01 14:18:27 -07:00
Dave Conroy
5a01b6118e feat - make processes and files save as username dbbackup instead of root 2023-11-01 14:13:37 -07:00
dave@tiredofit.ca
8fb2972b32 Release 3.12.0 - See CHANGELOG.md 2023-10-29 18:56:11 -07:00
Dave Conroy
82eac9ad2e Merge pull request #263 from alwynpan/bugfix/#262-pgsql-db-not-exist
fix: Add an option DB_AUTH for PGSQL
2023-10-29 18:54:52 -07:00
Alwyn Pan
6bad6d8d65 fix: #262 Add option for DB_AUTH in PGSQL 2023-10-30 12:52:26 +11:00
Alwyn Pan
a922922374 fix: #262 Fix lint issues and typos in README files 2023-10-30 12:44:36 +11:00
dave@tiredofit.ca
edeadade4d Release 3.11.1 - See CHANGELOG.md 2023-10-23 08:14:29 -07:00
dave@tiredofit.ca
31b256b02d Release 3.11.0 - See CHANGELOG.md 2023-10-12 07:35:12 -07:00
dave@tiredofit.ca
d5cacdb32a Release 3.10.5 - See CHANGELOG.md 2023-10-11 15:44:26 -07:00
dave@tiredofit.ca
238b4d852c Release 3.10.4 - See CHANGELOG.md 2023-10-11 15:17:54 -07:00
Dave Conroy
8d6e72eead Merge pull request #258 from thomas-negrault/fix/mongo-restore-authentication-database
Use authentification database in mongorestore
2023-10-11 15:16:42 -07:00
Thomas Negrault
a9037f97ac Use authentification database in mongorestore 2023-10-11 22:57:29 +02:00
dave@tiredofit.ca
ebcd4fcde4 Release 3.10.3 - See CHANGELOG.md 2023-10-11 12:31:25 -07:00
Dave Conroy
adf52c1160 Merge pull request #257 from thomas-negrault/fix/alphabetical-filenames-sorting
Sort filenames alphabetically when using the restore command
2023-10-11 12:30:26 -07:00
Thomas Negrault
1eee4a49d7 Sort filenames alphabetically when using the restore command 2023-10-11 18:41:05 +02:00
Elias Oehen
f67170c1ec feat(235) Synchronize local storage from S3 before upload
close #235
2023-09-26 13:05:20 +02:00
Oscar Siles Brügge
03d2362b6d Merge branch 'tiredofit:main' into main 2023-09-16 21:43:45 +01:00
dave@tiredofit.ca
e3faab5c36 Release 3.10.2 - See CHANGELOG.md 2023-09-14 08:13:56 -07:00
Dave Conroy
768d5e60fe Merge pull request #252 from pimjansen/feature/name-typo
Updated name where it is not writing to s3
2023-09-14 08:12:39 -07:00
Dave Conroy
e3e0d7ed67 Merge pull request #251 from pimjansen/feature/split-db-use
Remove the --database flag for a single db dump
2023-09-14 08:03:41 -07:00
Pim Jansen
db808d25c7 Updated name where it is not writing to s3 2023-09-14 10:39:34 +02:00
Pim Jansen
cb5b49b90b Remove the --database flag for a single db dump which ensures there is no use statement in the dump 2023-09-14 10:36:55 +02:00
dave@tiredofit.ca
48a1ff8bbe Release 3.10.1 - See CHANGELOG.md 2023-09-13 22:37:21 -07:00
dave@tiredofit.ca
8b1308ffd1 Release 3.10.0 - See CHANGELOG.md 2023-09-13 08:32:22 -07:00
Dave Conroy
3ab3f67be9 Merge pull request #248 from jcdirks/#247-env-variable-for-additional-arguments-to-the-dump-command-only
add env variables EXTRA_DUMP_OPTS and EXTRA_ENUMERATION_OPTS
2023-09-13 08:31:13 -07:00
Jan-Claas Dirks
cd1899d849 add env variables EXTRA_DUMP_OPTS and EXTRA_ENUMERATION_OPTS 2023-09-13 10:00:41 +02:00
dave@tiredofit.ca
663667dbff Release 3.9.12 - See CHANGELOG.md 2023-09-04 08:32:05 -07:00
dave@tiredofit.ca
36506091be Release 3.9.11 - See CHANGELOG.md 2023-08-24 18:12:36 -07:00
dave@tiredofit.ca
bf646381cb Release 3.9.10 - See CHANGELOG.md 2023-08-23 15:31:41 -07:00
dave@tiredofit.ca
fb3b65b33a Release 3.9.9 - See CHANGELOG.md 2023-08-21 15:38:51 -07:00
dave@tiredofit.ca
6d1ef87042 Release 3.9.8 - See CHANGELOG.md 2023-08-20 08:18:04 -07:00
Dave Conroy
c985cc8a4f Merge pull request #240 from ToshY/issue/239-armv7
Add cargo to build dependencies
2023-08-14 19:15:21 -07:00
ToshY
2265a6acf5 Add cargo to build dependencies 2023-08-05 14:39:13 +02:00
dave@tiredofit.ca
10e7debc65 Release 3.9.7 - See CHANGELOG.md 2023-07-18 07:26:59 -07:00
dave@tiredofit.ca
94e9881b7b Release 3.9.6 - See CHANGELOG.md 2023-06-16 09:50:16 -07:00
Benoit Vianin
3c6beeaae9 Adds a cronjob-like mechanism 2023-06-14 04:36:21 +00:00
dave@tiredofit.ca
425383639a Release 3.9.5 - See CHANGELOG.md 2023-06-13 16:24:17 -07:00
dave@tiredofit.ca
1e46996812 Release 3.9.4 - See CHANGELOG.md 2023-06-13 10:16:04 -07:00
Dave Conroy
e71334564f Drop auto builds for armv7 2023-06-11 11:42:13 -07:00
Oscar Siles Brugge
5f58ce81c8 add postgres globals backup if SPLIT_DB=true 2023-06-07 09:19:25 +01:00
dave@tiredofit.ca
f83f97bf76 Release 3.9.3 - See CHANGELOG.md 2023-06-05 10:24:46 -07:00
Dave Conroy
94a8e45af6 Merge pull request #226 from vanzhiganov/patch-1
Update README.md
2023-06-05 10:13:57 -07:00
Dave Conroy
9d90e37339 Merge pull request #225 from alwynpan/bugfix/#199
#199 Report error when move the backup file to S3 or Blob fails
2023-06-05 10:13:44 -07:00
Vyacheslav Anzhiganov
085b7cd6ce Update README.md 2023-06-03 16:27:17 +03:00
Yao (Alwyn) Pan
12484bb3f3 feat: Add zip package to the image 2023-06-01 16:54:26 +10:00
Yao (Alwyn) Pan
8fc2721dd4 fix: #199 report error when move the backup file to S3 or Blob fails 2023-06-01 16:46:13 +10:00
dave@tiredofit.ca
68174c061f Release 3.9.2 - See CHANGELOG.md 2023-05-10 08:19:01 -07:00
dave@tiredofit.ca
fd1d95090e Release 3.9.1 - See CHANGELOG.md 2023-05-03 12:13:29 -07:00
Dave Conroy
7befba0d96 Update README.md 2023-04-27 08:18:26 -07:00
Dave Conroy
583253fce7 Update README.md 2023-04-26 14:43:29 -07:00
dave@tiredofit.ca
068577001e Release 3.9.0 - See CHANGELOG.md 2023-04-26 14:32:36 -07:00
Dave Conroy
7781542816 Expand on amount of variables that can use 2023-04-24 14:54:47 -07:00
dave@tiredofit.ca
9283b5440e Release 3.8.5 - See CHANGELOG.md 2023-04-11 15:42:30 -07:00
Dave Conroy
5e62485e7f Merge pull request #216 from tpansino/bug/215
Set ltargets properly
2023-04-11 15:36:26 -07:00
Tom Pansino
f224571448 Set ltargets properly 2023-04-11 15:31:02 -07:00
dave@tiredofit.ca
01620fec00 Release 3.8.4 - See CHANGELOG.md 2023-04-06 12:14:22 -07:00
dave@tiredofit.ca
18a38b4f1d Release 3.8.3 - See CHANGELOG.md 2023-03-30 14:18:55 -07:00
dave@tiredofit.ca
150f356275 Release 3.8.2 - See CHANGELOG.md 2023-03-30 14:05:17 -07:00
dave@tiredofit.ca
e838ed0027 Release 3.8.1 - See CHANGELOG.md 2023-03-30 11:04:35 -07:00
Dave Conroy
8329b4c065 Add defaults 2023-03-27 16:41:31 -07:00
dave@tiredofit.ca
dab1ac301a Release 3.8.0 - See CHANGELOG.md 2023-03-27 15:01:10 -07:00
dave@tiredofit.ca
077201cd18 Release 3.7.7 - See CHANGELOG.md 2023-03-20 16:24:23 -07:00
Dave Conroy
eeaf59dc6f Merge pull request #210 from codemonium/simplify-pg_isready
Simplify pg_isready usage
2023-03-20 16:22:13 -07:00
Igor Artemenko
88fe0d6411 Simplify pg_isready usage
The pg_isready documentation says that it does not need a correct
database name or username to get the server status. In fact, incorrect
values result in the server logging failed connection attempts. As a
result, when we set DB_NAME to ALL, calls to the check_availability
function (which uses pg_isready) cause the server to log the following
error:

    FATAL:  database "ALL" does not exist

To eliminate this error, this change simplifies the pg_isready call.
2023-03-20 22:51:05 +00:00
dave@tiredofit.ca
366c4759a5 Release 3.7.6 - See CHANGELOG.md 2023-03-14 16:10:11 -07:00
Dave Conroy
37f255ec99 Merge pull request #207 from kamartem/patch-1
Typo correction
2023-03-14 16:09:08 -07:00
Dave Conroy
efa9a678c0 Merge pull request #209 from ToshY/bug/208-mysql-extra-opts-status-check
Removed EXTRA_OPTS in MySQL status check
2023-03-14 16:08:53 -07:00
ToshY
68747a4aff Removed EXTRA_OPTS in MySQL status check 2023-03-14 20:28:05 +01:00
Artem Kamyshansky
cf736278bb Typo correction 2023-03-12 19:31:04 +03:00
dave@tiredofit.ca
1659e34fc7 Release 3.7.5 - See CHANGELOG.md 2023-03-02 07:39:58 -08:00
dave@tiredofit.ca
a8df7a2c75 Release 3.7.4 - See CHANGELOG.md 2023-02-22 08:36:46 -08:00
Dave Conroy
b5194dcce9 Merge pull request #203 from gbe0/issue/201
Fix issue #201 - 99-run_forever exec format error
2023-02-22 08:35:55 -08:00
Chris
6fb947684a fix issue #201 - 99-run_forever exec format error 2023-02-23 00:26:54 +08:00
Dave Conroy
9287f4efeb Update README.md 2023-01-30 11:47:15 -08:00
Dave Conroy
eeb5b5a119 Update README.md 2023-01-30 09:58:44 -08:00
Dave Conroy
a83dfd1a0b Update Workflows 2023-01-29 18:13:20 -08:00
Dave Conroy
8fb379b51a Update workflows 2023-01-29 16:04:15 -08:00
dave@tiredofit.ca
a90e52091d Release 3.7.3 - See CHANGELOG.md 2022-12-20 06:47:24 -08:00
Dave Conroy
ac58b5cdf6 Merge branch 'main' of https://github.com/tiredofit/docker-db-backup 2022-12-20 06:46:46 -08:00
Dave Conroy
fcbe771793 Merge pull request #194 from alwynpan/feature/#193
#193 Make S3_KEY_ID and S3_KEY_SECRET optional for S3 Backup
2022-12-20 06:46:40 -08:00
Yao (Alwyn) Pan
168982ab53 Make S3_KEY_ID and S3_KEY_SECRET optional for S3 Backup 2022-12-20 17:51:56 +11:00
Dave Conroy
e377fcb6ae Fix spelling mistake in Archive notice 2022-12-19 12:35:33 -08:00
dave@tiredofit.ca
50f27233a9 Release 3.7.2 - See CHANGELOG.md 2022-12-19 12:33:07 -08:00
dave@tiredofit.ca
7ccbf23af6 Release 3.7.1 - See CHANGELOG.md 2022-12-19 08:21:35 -08:00
dave@tiredofit.ca
0921971aa3 Release 3.7.0 - See CHANGELOG.md 2022-12-16 14:02:35 -08:00
dave@tiredofit.ca
fd3b9c5fa0 Release 3.6.1 - See CHANGELOG.md 2022-11-23 07:44:28 -08:00
dave@tiredofit.ca
89b6176188 Release 3.6.0 - See CHANGELOG.md 2022-11-21 12:32:33 -08:00
dave@tiredofit.ca
22e126200e Release 3.5.6 - See CHANGELOG.md 2022-11-15 13:07:21 -08:00
dave@tiredofit.ca
3e79ca68a0 Release 3.5.5 - See CHANGELOG.md 2022-10-18 07:52:34 -07:00
Dave Conroy
bfeb07d7c0 Merge pull request #179 from greena13/bugfix/s3_backup_prefixes
Bugfix: Generating S3 prefix to store new backups
2022-10-18 07:51:50 -07:00
Aleck Greenham
8a5d647de7 Merge branch 'master' of github.com:tiredofit/docker-db-backup into bugfix/s3_backup_prefixes
# Conflicts:
#	install/assets/functions/10-db-backup
2022-10-18 07:40:33 +01:00
Aleck Greenham
4f5c04acac Bugfix: Generating S3 prefix to store new backups 2022-10-17 18:17:48 +01:00
dave@tiredofit.ca
494f742cb0 Release 3.5.4 - See CHANGELOG.md 2022-10-13 13:59:22 -07:00
dave@tiredofit.ca
e7b9a36745 Release 3.5.3 - See CHANGELOG.md 2022-10-12 08:42:14 -07:00
dave@tiredofit.ca
28c7058f37 Release 3.5.2 - See CHANGELOG.md 2022-10-11 08:48:05 -07:00
Dave Conroy
6f15c77a0f Merge pull request #174 from jauderho/patch-1
Update Dockerfile to use influxdb client 2.4.0
2022-10-11 08:45:47 -07:00
dave@tiredofit.ca
4e04e31d84 Release 3.5.1 - See CHANGELOG.md 2022-10-11 08:10:42 -07:00
Jauder Ho
f3fad4a893 Update Dockerfile
Update influxdb client to 2.4.0.
2022-10-10 16:44:40 -07:00
dave@tiredofit.ca
69f0ca762c Release 3.5.0 - See CHANGELOG.md 2022-10-10 12:06:38 -07:00
Dave Conroy
07d72163a0 Merge pull request #173 from tiredofit/develop 2022-10-10 12:01:34 -07:00
Dave Conroy
b31da0b785 Merge branch 'master' into develop 2022-10-10 12:01:06 -07:00
Dave Conroy
be490b3f4b Remove url [en|de]code functions 2022-10-10 11:53:09 -07:00
Dave Conroy
028966d0b2 Merge pull request #171 from greena13/bugfix/s3_backup_cleanup
Bugfix: S3 database cleanups
2022-10-09 08:33:45 -07:00
Dave Conroy
19d8e98744 Merge pull request #169 from eoehen/feature/Add-documentation-for-blobxfer
Add blobxfer documentation in readme file.
2022-10-09 08:32:59 -07:00
Aleck Greenham
25def5b6f0 Bugfix: S3 database cleanup path 2022-10-09 13:59:48 +01:00
Aleck Greenham
03b7ef9d0d Bugfix: S3 database cleanups 2022-10-09 12:44:22 +01:00
Dave Conroy
231dd63a38 Remove quote 2022-10-08 11:57:40 -07:00
Elias Oehen
666eb81846 Add blobxfer documentation in readme file. 2022-10-08 16:57:00 +02:00
Dave Conroy
4572ab6fca Add Azure wording to README.md 2022-10-08 07:44:05 -07:00
Dave Conroy
9ba51bcec9 Add py3-cryptography module to allow armv7 builds to finish 2022-10-08 07:19:49 -07:00
Dave Conroy
b9edbf68d3 Merge branch 'master' of https://github.com/tiredofit/docker-db-backup 2022-10-08 06:54:00 -07:00
Dave Conroy
2022158a4e Swap around some environment checks 2022-10-07 08:33:19 -07:00
Dave Conroy
2b441f11e1 Restore old functionality for Mongo backup when not using MONGO_CUSTOM_URI 2022-10-07 08:26:54 -07:00
Dave Conroy
532a6b456b Merge pull request #166 from eoehen/feature/Add-support-for-blobxfer
Add support for blobxfer
2022-10-07 08:05:50 -07:00
Dave Conroy
a8d9a0185f Merge pull request #165 from eoehen/feature/Improve-mssql-support
Improve MSSQL Support
2022-10-07 08:04:59 -07:00
Dave Conroy
8b41f5efcf Allow EXTRA_OPTS with MONGO_CUSTOM_URI 2022-10-07 08:03:24 -07:00
Elias Oehen
956904046d Add support for blobxfer 2022-10-04 19:04:43 +02:00
Elias Oehen
4d7f5e9459 Add mssql docker-compose example 2022-10-04 18:37:34 +02:00
Elias Oehen
d9723823c9 Improve mssql server support 2022-10-04 18:22:20 +02:00
Elias Oehen
0067f552f1 Move mysql example to mysql folder 2022-10-04 18:21:21 +02:00
Dave Conroy
4a8f85ddf5 Add EXTRA_OPTS to Mongo string 2022-09-29 19:35:01 -07:00
Dave Conroy
6de0cc7c03 Wrap mongo_generated_uri in quotes 2022-09-29 16:06:06 -07:00
Dave Conroy
2d017e26c5 Cleanup README 2022-09-25 10:00:33 -07:00
Dave Conroy
1efb2d43a8 Set proper environment variable 2022-09-21 10:51:25 -07:00
Dave Conroy
789aa96113 Alternate way of solving Host check 2022-09-21 09:28:01 -07:00
Dave Conroy
9d8cfd69cb Fix a couple variable issues and add silencing 2022-09-20 17:45:05 -07:00
Dave Conroy
c4dbf53ced Final Availability check 2022-09-20 13:51:54 -07:00
Dave Conroy
8706d3a91c Skip availability checks by default 2022-09-20 12:50:00 -07:00
Dave Conroy
73ad356ebf Force lowercase for filenames and hostnames for filename generation 2022-09-20 12:08:53 -07:00
Dave Conroy
73c4003dc4 Fix environment variable check for Mongo 2022-09-20 12:06:13 -07:00
Dave Conroy
a377f570f1 Introduce MONGO_CUSTOM_URI support 2022-09-20 09:41:44 -07:00
dave@tiredofit.ca
b956bd817f Release 3.4.2 - See CHANGELOG.md 2022-09-19 08:00:15 -07:00
Dave Conroy
7bda69b062 Merge pull request #158 from mark-monteiro/patch-1
Improve release notes for 3.4.1
2022-09-14 09:17:51 -07:00
Mark Monteiro
bc23b6a65e Improve release notes for 3.4.1
Add clear migration instructions for custom script paths
2022-09-14 11:57:10 -04:00
dave@tiredofit.ca
8fb3d8315f Release 3.4.1 - See CHANGELOG.md 2022-09-13 08:24:30 -07:00
Dave Conroy
c16133fdd0 Merge pull request #157 from ToshY/fix/readme-missing-db-dump-target
updated README with missing DB_DUMP_TARGET environment variable
2022-09-12 13:09:37 -07:00
ToshY
6967fd5e56 updated README with missing DB_DUMP_TARGET environment variable 2022-09-12 21:51:14 +02:00
Dave Conroy
75acaefb64 Merge pull request #155 from ToshY/fix/readme-custom-scripts-directory
updated README with correct custom scripts directory
2022-09-12 12:24:02 -07:00
ToshY
6933b0f87c updated README with correct custom scripts directory 2022-09-12 21:17:38 +02:00
dave@tiredofit.ca
dc4ab0bfc5 Release 3.4.0 - See CHANGELOG.md 2022-09-12 07:50:55 -07:00
Dave Conroy
9ea34f5a44 Add MongoDB Atlas Support 2022-09-12 07:49:29 -07:00
Dave Conroy
1d53785e7d Fix default port for Influx 2 DBs 2022-09-12 07:19:18 -07:00
Dave Conroy
4e0878b2ad Merge pull request #150 from teun95/teun95-add-rsync
Add --rsyncable for gzip compression
2022-09-12 07:17:41 -07:00
teun95
a98d33bfdb Correct table formatting in README.md 2022-09-08 11:04:11 +00:00
teun95
00c851eda2 Update README.md to include GZ_RSYNCABLE 2022-09-07 19:54:41 +01:00
teun95
cd88285036 Added rsyncable option for gzip using GZ_RSYNCABLE
GZ_RSYNCABLE=TRUE enables --rsyncable for gzip compression. Useful to speed up backups, reduce size of incremental backups, and allow for better deduplication.
2022-09-07 19:50:50 +01:00
dave@tiredofit.ca
428c313c7b Release 3.3.12 - See CHANGELOG.md 2022-08-15 12:19:55 -07:00
Dave Conroy
210acb1e2a Merge pull request #143 from arifer612/patch-1
Fix incorrect case for filesize variable
2022-08-15 12:19:12 -07:00
Arif Er
e50a8cb0ec fix: correct case for filesize variable
Post script support expects a value from a declared variable `$FILESIZE` to provide the size of the backup files. Such a variable does not exist, leading to a situation where using `"${9}"` in a custom script furnishes the checksum hash. However, earlier up in the script the file size of the backup is indeed assigned to a variable, only that is it completely in lower case: `$filesize`. This commit aims to fix that inconsistency.
2022-08-15 21:51:40 +08:00
Dave Conroy
7453852046 Release 3.3.11 - See CHANGELOG.md 2022-07-22 12:00:05 -07:00
Dave Conroy
f115a89a3c Merge pull request #141 from khoazero123/fix_postgres_restore
Fix postgres restore wrong db type
2022-07-22 11:58:57 -07:00
KhoaZero123
8b8d243944 Fix postgres restore wrong db type 2022-07-22 09:41:55 +07:00
dave@tiredofit.ca
be34ceb6ff Release 3.3.10 - See CHANGELOG.md 2022-07-19 12:16:29 -07:00
Dave Conroy
82d6ce444b Merge branch 'master' of https://github.com/tiredofit/docker-db-backup 2022-07-11 09:26:38 -07:00
Dave Conroy
382a188b77 Release 3.3.9 - See CHANGELOG.md 2022-07-11 09:26:35 -07:00
Dave Conroy
f458c34546 Merge pull request #140 from fdarveau/fix-read-port-number-ineractive-restore
Fix reading port number in interactive restore
2022-07-11 09:25:41 -07:00
François Darveau
229db5cd03 Fix reading port number in interactive restore 2022-07-10 16:44:05 -04:00
dave@tiredofit.ca
8bb926fd20 Release 3.3.8 - See CHANGELOG.md 2022-07-09 08:59:59 -07:00
dave@tiredofit.ca
f005956c47 Release 3.3.7 - See CHANGELOG.md 2022-06-23 11:49:28 -07:00
dave@tiredofit.ca
ba20386e65 Release 3.3.6 - See CHANGELOG.md 2022-06-23 08:18:08 -07:00
22 changed files with 4636 additions and 1723 deletions

2
.gitattributes vendored Normal file
View File

@@ -0,0 +1,2 @@
# Declare files that will always have LF line endings on checkout.
*.* text eol=lf

View File

@@ -1,112 +1,14 @@
### Application Level Image CI
### Dave Conroy <dave at tiredofit dot ca>
name: 'build'
name: "build_image"
on:
push:
paths:
- '**'
- '!README.md'
- "**"
- "!README.md"
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
set -x
if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"
else
BRANCH=${GITHUB_REF#refs/heads/}
fi
case ${BRANCH} in
"main" | "master" )
BRANCHTAG="${DOCKER_IMAGE}:latest"
;;
"develop" )
BRANCHTAG="${DOCKER_IMAGE}:develop"
;;
* )
if [ -n "${{ secrets.LATEST }}" ] ; then
if [ "${BRANCHTAG}" = "${{ secrets.LATEST }}" ]; then
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest,${DOCKER_IMAGE}:latest"
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
;;
esac
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
GITTAG="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed 's|refs/tags/||g')"
fi
if [ -n "${BRANCHTAG}" ] && [ -n "${GITTAG}" ]; then
TAGS=${BRANCHTAG},${GITTAG}
else
TAGS="${BRANCHTAG}${GITTAG}"
fi
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Label
id: Label
run: |
if [ -f "Dockerfile" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_repository=\"https://github.com/${GITHUB_REPOSITORY}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_commit=\"${GITHUB_SHA}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
if [ -f "CHANGELOG.md" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.db-backup.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
mkdir -p install/assets/.changelogs ; cp CHANGELOG.md install/assets/.changelogs/${GITHUB_REPOSITORY/\//_}.md
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
fi
if [[ $GITHUB_REF == refs/heads/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_branch=\"${GITHUB_REF#refs/heads/}\"" Dockerfile
fi
fi
- name: Build
uses: docker/build-push-action@v3
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm/v7,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}
build:
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
secrets: inherit

View File

@@ -1,6 +1,4 @@
# Manual Workflow (Application)
name: manual
name: "manual_build_image"
on:
workflow_dispatch:
@@ -8,105 +6,10 @@ on:
Manual Build:
description: 'Manual Build'
required: false
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
set -x
if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"
else
BRANCH=${GITHUB_REF#refs/heads/}
fi
case ${BRANCH} in
"main" | "master" )
BRANCHTAG="${DOCKER_IMAGE}:latest"
;;
"develop" )
BRANCHTAG="${DOCKER_IMAGE}:develop"
;;
* )
if [ -n "${{ secrets.LATEST }}" ] ; then
if [ "${BRANCHTAG}" = "${{ secrets.LATEST }}" ]; then
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest,${DOCKER_IMAGE}:latest"
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
;;
esac
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
GITTAG="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed 's|refs/tags/||g')"
fi
if [ -n "${BRANCHTAG}" ] && [ -n "${GITTAG}" ]; then
TAGS=${BRANCHTAG},${GITTAG}
else
TAGS="${BRANCHTAG}${GITTAG}"
fi
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Label
id: Label
run: |
if [ -f "Dockerfile" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_repository=\"https://github.com/${GITHUB_REPOSITORY}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_commit=\"${GITHUB_SHA}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
if [ -f "CHANGELOG.md" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.db-backup.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
mkdir -p install/assets/.changelogs ; cp CHANGELOG.md install/assets/.changelogs/${GITHUB_REPOSITORY/\//_}.md
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
fi
if [[ $GITHUB_REF == refs/heads/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_branch=\"${GITHUB_REF#refs/heads/}\"" Dockerfile
fi
fi
- name: Build
uses: docker/build-push-action@v3
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm/v7,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}
build:
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
secrets: inherit

File diff suppressed because it is too large Load Diff

View File

@@ -1,59 +1,86 @@
FROM docker.io/tiredofit/alpine:3.16
ARG DISTRO=alpine
ARG DISTRO_VARIANT=3.19
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables
ENV INFLUX2_VERSION=2.2.1 \
MSSQL_VERSION=17.8.1.1-1 \
CONTAINER_ENABLE_MESSAGING=FALSE \
ENV INFLUX1_CLIENT_VERSION=1.8.0 \
INFLUX2_CLIENT_VERSION=2.7.3 \
MSODBC_VERSION=18.3.2.1-1 \
MSSQL_VERSION=18.3.1.1-1 \
AWS_CLI_VERSION=1.31.5 \
CONTAINER_ENABLE_MESSAGING=TRUE \
CONTAINER_ENABLE_MONITORING=TRUE \
CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \
IMAGE_NAME="tiredofit/db-backup" \
IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/"
### Dependencies
RUN set -ex && \
apk update && \
apk upgrade && \
apk add -t .db-backup-build-deps \
build-base \
bzip2-dev \
git \
libarchive-dev \
xz-dev \
&& \
RUN source /assets/functions/00-container && \
set -ex && \
addgroup -S -g 10000 dbbackup && \
adduser -S -D -H -u 10000 -G dbbackup -g "Tired of I.T! DB Backup" dbbackup && \
\
apk add --no-cache -t .db-backup-run-deps \
aws-cli \
bzip2 \
influxdb \
libarchive \
mariadb-client \
mariadb-connector-c \
mongodb-tools \
libressl \
pigz \
postgresql \
postgresql-client \
pv \
redis \
sqlite \
xz \
zstd \
&& \
package update && \
package upgrade && \
package install .db-backup-build-deps \
build-base \
bzip2-dev \
cargo \
git \
go \
libarchive-dev \
openssl-dev \
libffi-dev \
python3-dev \
py3-pip \
xz-dev \
&& \
\
cd /usr/src && \
package install .db-backup-run-deps \
bzip2 \
coreutils \
gpg \
gpg-agent \
groff \
libarchive \
mariadb-client \
mariadb-connector-c \
mongodb-tools \
openssl \
pigz \
postgresql16 \
postgresql16-client \
pv \
py3-botocore \
py3-colorama \
py3-cryptography \
py3-docutils \
py3-jmespath \
py3-rsa \
py3-setuptools \
py3-s3transfer \
py3-yaml \
python3 \
redis \
sqlite \
xz \
zip \
zstd \
&& \
\
apkArch="$(apk --print-arch)"; \
apkArch="$(uname -m)"; \
case "$apkArch" in \
x86_64) mssql=true ; influx2=true ; influx_arch=amd64; ;; \
aarch64 ) influx2=true ; influx_arch=arm64 ;; \
*) sleep 0.1 ;; \
x86_64) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \
arm64 | aarch64 ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=arm64 ;; \
*) sleep 0.1 ;; \
esac; \
\
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql17_${MSSQL_VERSION}_amd64.apk mssql-tools_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
\
if [[ $mssql = "true" ]] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \
if [[ $influx2 = "true" ]] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX1_CLIENT_VERSION}" && \
go build -o /usr/sbin/influxd ./cmd/influxd && \
strip /usr/sbin/influxd && \
mkdir -p /usr/src/pbzip2 && \
curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
cd /usr/src/pbzip2 && \
@@ -70,11 +97,17 @@ RUN set -ex && \
make && \
make install && \
\
### Cleanup
apk del .db-backup-build-deps && \
rm -rf /usr/src/* && \
rm -rf /etc/logrotate.d/redis && \
rm -rf /root/.cache /tmp/* /var/cache/apk/*
pip3 install --break-system-packages awscli==${AWS_CLI_VERSION} && \
pip3 install --break-system-packages blobxfer && \
\
package remove .db-backup-build-deps && \
package cleanup && \
rm -rf \
/*.apk \
/etc/logrotate.d/* \
/root/.cache \
/root/go \
/tmp/* \
/usr/src/*
### S6 Setup
ADD install /
COPY install /

View File

@@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2022 Dave Conroy
Copyright (c) 2023 Dave Conroy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

850
README.md
View File

@@ -1,32 +1,36 @@
# github.com/tiredofit/docker-db-backup
[![GitHub release](https://img.shields.io/github/v/tag/tiredofit/docker-db-backup?style=flat-square)](https://github.com/tiredofit/docker-db-backup/releases/latest)
[![Build Status](https://img.shields.io/github/workflow/status/tiredofit/docker-db-backup/build?style=flat-square)](https://github.com/tiredofit/docker-db-backup/actions?query=workflow%3Abuild)
[![Build Status](https://img.shields.io/github/actions/workflow/status/tiredofit/docker-db-backup/main.yml?branch=main&style=flat-square)](https://github.com/tiredofit/docker-db-backup/actions)
[![Docker Stars](https://img.shields.io/docker/stars/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/)
[![Docker Pulls](https://img.shields.io/docker/pulls/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/)
[![Become a sponsor](https://img.shields.io/badge/sponsor-tiredofit-181717.svg?logo=github&style=flat-square)](https://github.com/sponsors/tiredofit)
[![Become a sponsor](https://img.shields.io/badge/sponsor-tiredofit-181717.svg?logo=github&style=flat-square)](https://www.tiredofit.ca/sponsor)
[![Paypal Donate](https://img.shields.io/badge/donate-paypal-00457c.svg?logo=paypal&style=flat-square)](https://www.paypal.me/tiredofit)
* * *
---
## About
This will build a container for backing up multiple types of DB Servers
Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
Backs up CouchDB, InfluxDB, MySQL/MariaDB, Microsoft SQL, MongoDB, Postgres, Redis servers.
* dump to local filesystem or backup to S3 Compatible services
* select database user and password
* backup all databases, single, or multiple databases
* backup all to seperate files or one singular file
* choose to have an MD5 or SHA1 sum after backup for verification
* delete old backups after specific amount of time
* choose compression type (none, gz, bz, xz, zstd)
* connect to any container running on the same system
* Script to perform restores
* Zabbix Monitoring capabilities
* select how often to run a dump
* select when to start the first dump, whether time of day or relative to container start time
* Execute script after backup for monitoring/alerting purposes
- dump to local filesystem or backup to S3 Compatible services, and Azure.
- multiple backup job support
- selectable when to start the first dump, whether time of day or relative to container start time
- selectable interval
- selectable omit scheduling during periods of time
- selectable database user and password
- selectable cleanup and archive capabilities
- selectable database name support - all databases, single, or multiple databases
- backup all to separate files or one singular file
- checksum support choose to have an MD5 or SHA1 hash generated after backup for verification
- compression support (none, gz, bz, xz, zstd)
- encryption support (passphrase and public key)
- notify upon job failure to email, matrix, mattermost, rocketchat, custom script
- zabbix metrics support
- hooks to execute pre and post backup job for customization purposes
- companion script to aid in restores
## Maintainer
@@ -37,7 +41,6 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
- [About](#about)
- [Maintainer](#maintainer)
- [Table of Contents](#table-of-contents)
- [Persistent Storage](#persistent-storage)
- [Prerequisites and Assumptions](#prerequisites-and-assumptions)
- [Installation](#installation)
- [Build from Source](#build-from-source)
@@ -45,20 +48,61 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
- [Multi Architecture](#multi-architecture)
- [Configuration](#configuration)
- [Quick Start](#quick-start)
- [Persistent Storage](#persistent-storage-1)
- [Persistent Storage](#persistent-storage)
- [Environment Variables](#environment-variables)
- [Base Images used](#base-images-used)
- [Container Options](#container-options)
- [Database Specific Options](#database-specific-options)
- [For Influx DB2:](#for-influx-db2)
- [Scheduling Options](#scheduling-options)
- [Backup Options](#backup-options)
- [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services)
- [Job Defaults](#job-defaults)
- [Compression Options](#compression-options)
- [Encryption Options](#encryption-options)
- [Scheduling Options](#scheduling-options)
- [Default Database Options](#default-database-options)
- [CouchDB](#couchdb)
- [InfluxDB](#influxdb)
- [MariaDB/MySQL](#mariadbmysql)
- [Microsoft SQL](#microsoft-sql)
- [MongoDB](#mongodb)
- [Postgresql](#postgresql)
- [Redis](#redis)
- [Default Storage Options](#default-storage-options)
- [Filesystem](#filesystem)
- [S3](#s3)
- [Azure](#azure)
- [Hooks](#hooks)
- [Path Options](#path-options)
- [Pre Backup](#pre-backup)
- [Post backup](#post-backup)
- [Job Backup Options](#job-backup-options)
- [Compression Options](#compression-options-1)
- [Encryption Options](#encryption-options-1)
- [Scheduling Options](#scheduling-options-1)
- [Specific Database Options](#specific-database-options)
- [CouchDB](#couchdb-1)
- [InfluxDB](#influxdb-1)
- [MariaDB/MySQL](#mariadbmysql-1)
- [Microsoft SQL](#microsoft-sql-1)
- [MongoDB](#mongodb-1)
- [Postgresql](#postgresql-1)
- [Redis](#redis-1)
- [SQLite](#sqlite)
- [Specific Storage Options](#specific-storage-options)
- [Filesystem](#filesystem-1)
- [S3](#s3-1)
- [Azure](#azure-1)
- [Hooks](#hooks-1)
- [Path Options](#path-options-1)
- [Pre Backup](#pre-backup-1)
- [Post backup](#post-backup-1)
- [Notifications](#notifications)
- [Custom Notifications](#custom-notifications)
- [Email Notifications](#email-notifications)
- [Matrix Notifications](#matrix-notifications)
- [Mattermost Notifications](#mattermost-notifications)
- [Rocketchat Notifications](#rocketchat-notifications)
- [Maintenance](#maintenance)
- [Shell Access](#shell-access)
- [Manual Backups](#manual-backups)
- [Restoring Databases](#restoring-databases)
- [Custom Scripts](#custom-scripts)
- [Support](#support)
- [Usage](#usage)
- [Bugfixes](#bugfixes)
@@ -66,19 +110,25 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
- [Updates](#updates)
- [License](#license)
> **NOTE**: If you are using this with a docker-compose file along with a seperate SQL container, take care not to set the variables to backup immediately, more so have it delay execution for a minute, otherwise you will get a failed first backup.
### Persistent Storage
## Prerequisites and Assumptions
* You must have a working connection to one of the supported DB Servers and appropriate credentials
- You must have a working connection to one of the supported DB Servers and appropriate credentials
## Installation
### Build from Source
Clone this repository and build the image with `docker build <arguments> (imagename) .`
### Prebuilt Images
Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup) and is the recommended method of installation.
Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup)
Builds of the image are also available on the [Github Container Registry](https://github.com/tiredofit/docker-db-backup/pkgs/container/docker-db-backup)
```bash
docker pull ghcr.io/tiredofit/docker-db-backup:(imagetag)
```
The following image tags are available along with their tagged release based on what's written in the [Changelog](CHANGELOG.md):
@@ -87,33 +137,37 @@ The following image tags are available along with their tagged release based on
| latest | `:latest` |
```bash
docker pull tiredofit/db-backup:(imagetag)
docker pull docker.io/tiredofit/db-backup:(imagetag)
```
#### Multi Architecture
Images are built primarily for `amd64` architecture, and may also include builds for `arm/v7`, `arm64` and others. These variants are all unsupported. Consider [sponsoring](https://github.com/sponsors/tiredofit) my work so that I can work with various hardware. To see if this image supports multiple architecures, type `docker manifest (image):(tag)`
Images are built primarily for `amd64` architecture, and may also include builds for `arm/v7`, `arm64` and others. These variants are all unsupported. Consider [sponsoring](https://www.tiredofit.ca/sponsor) my work so that I can work with various hardware. To see if this image supports multiple architectures, type `docker manifest (image):(tag)`
## Configuration
### Quick Start
* The quickest way to get started is using [docker-compose](https://docs.docker.com/compose/). See the examples folder for a working [docker-compose.yml](examples/docker-compose.yml) that can be modified for development or production use.
- The quickest way to get started is using [docker-compose](https://docs.docker.com/compose/). See the examples folder for a series of example compose.yml that can be modified for development or production use.
- Set various [environment variables](#environment-variables) to understand the capabilities of this image.
- Map [persistent storage](#data-volumes) for access to configuration and data files for backup.
* Set various [environment variables](#environment-variables) to understand the capabilities of this image.
* Map [persistent storage](#data-volumes) for access to configuration and data files for backup.
* Make [networking ports](#networking) available for public access if necessary
### Persistent Storage
The following directories are used for configuration and can be mapped for persistent storage.
| Directory | Description |
| ------------------------ | ---------------------------------------------------------------------------------- |
| `/backup` | Backups |
| `/assets/custom-scripts` | *Optional* Put custom scripts in this directory to execute after backup operations |
| Directory | Description |
| ---------------------- | ----------------------------------------------------------------------------------- |
| `/backup` | Backups |
| `/assets/scripts/pre` | _Optional_ Put custom scripts in this directory to execute before backup operations |
| `/assets/scripts/post` | _Optional_ Put custom scripts in this directory to execute after backup operations |
| `/logs` | _Optional_ Logfiles for backup jobs |
### Environment Variables
#### Base Images used
This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) or [Debian Linux](https://hub.docker.com/r/tiredofit/debian) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`,`vim`.
This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handled via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`.
Be sure to view the following repositories to understand all the customizable options:
@@ -123,116 +177,514 @@ Be sure to view the following repositories to understand all the customizable op
#### Container Options
| Parameter | Description | Default |
| -------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- |
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` |
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` |
| `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | |
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE`
| Parameter | Description | Default |
| ------------------------ | -------------------------------------------------------------------------------------------------------------------------------- | --------------- |
| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` |
| `USER_DBBACKUP` | The uid that the image should read and write files as (username is `dbbackup`) | `10000` |
| `GROUP_DBBACKUP` | The gid that the image should read and write files as (groupname is `dbbackup`) | `10000` |
| `LOG_PATH` | Path to log files | `/logs` |
| `TEMP_PATH` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` |
| `BACKUP_JOB_CONCURRENCY` | How many backup jobs to run concurrently | `1` |
### Database Specific Options
| Parameter | Description | Default |
| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | |
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | |
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | |
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies |
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2.x` series - AMD64 and ARM64 only for `2` | |
#### Job Defaults
If these are set and no other defaults or variables are set explicitly, they will be added to any of the backup jobs.
#### For Influx DB2:
Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name`
| Variable | Description | Default |
| --------------------------------- | ------------------------------------------------------------------------------------- | ------------ |
| `DEFAULT_BACKUP_LOCATION` | Backup to `FILESYSTEM`, `blobxfer` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
| `DEFAULT_CHECKSUM` | Either `MD5` or `SHA1` or `NONE` | `MD5` |
| `DEFAULT_LOG_LEVEL` | Log output on screen and in files `INFO` `NOTICE` `ERROR` `WARN` `DEBUG` | `notice` |
| `DEFAULT_RESOURCE_OPTIMIZED` | Perform operations at a lower priority to the CPU and IO scheduler | `FALSE` |
| `DEFAULT_SKIP_AVAILABILITY_CHECK` | Before backing up - skip connectivity check | `FALSE` |
### Scheduling Options
| Parameter | Description | Default |
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
| | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
##### Compression Options
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
### Backup Options
| Parameter | Description | Default |
| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | -------------- |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` |
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |
| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` |
| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` |
| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | |
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` |
| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` |
| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` |
| Variable | Description | Default |
| -------------------------------------- | ---------------------------------------------------------------------------------------------- | -------------- |
| `DEFAULT_COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` |
| `DEFAULT_COMPRESSION_LEVEL` | Numerical value of what level of compression to use, most allow `1` to `9` | `3` |
| | except for `ZSTD` which allows for `1` to `19` | |
| `DEFAULT_GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. | `FALSE` |
| `DEFAULT_ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |
| `DEFAULT_PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` |
- When using compression with MongoDB, only `GZ` compression is possible.
##### Encryption Options
#### Backing Up to S3 Compatible Services
Encryption occurs after compression and the encrypted filename will have a `.gpg` suffix
If `BACKUP_LOCATION` = `S3` then the following options are used.
| Variable | Description | Default | `_FILE` |
| ----------------------------- | -------------------------------------------- | ------- | ------- |
| `DEFAULT_ENCRYPT` | Encrypt file after backing up with GPG | `FALSE` | |
| `DEFAULT_ENCRYPT_PASSPHRASE` | Passphrase to encrypt file with GPG | | x |
| *or* | | | |
| `DEFAULT_ENCRYPT_PUBLIC_KEY` | Path of public key to encrypt file with GPG | | x |
| `DEFAULT_ENCRYPT_PRIVATE_KEY` | Path of private key to encrypt file with GPG | | x |
| Parameter | Description | Default |
| --------------------- | ----------------------------------------------------------------------------------------- | ------- |
| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | |
| `S3_KEY_ID` | S3 Key ID | |
| `S3_KEY_SECRET` | S3 Key Secret | |
| `S3_PATH` | S3 Pathname to save to e.g. '`backup`' | |
| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | |
| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | |
| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` |
| `S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | |
| `S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | |
| _*OR*_ | | |
| `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` |
##### Scheduling Options
## Maintenance
| Variable | Description | Default |
| ------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `DEFAULT_BACKUP_INTERVAL` | How often to do a backup, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
| `DEFAULT_BACKUP_BEGIN` | What time to do the initial backup. Defaults to immediate. (`+1`) | `+0` |
| | Must be in one of four formats: | |
| | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
| | Full datestamp e.g. `2023-12-21 23:30:00` | |
| | Cron expression e.g. `30 23 * * *` [Understand the format](https://en.wikipedia.org/wiki/Cron) - *BACKUP_INTERVAL is ignored* | |
| `DEFAULT_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when backup interval executes) | `FALSE` |
| | 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | |
| `DEFAULT_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from | |
| `DEFAULT_BACKUP_BLACKOUT_BEGIN` | Use `HHMM` notation to start a blackout period where no backups occur eg `0420` | |
| `DEFAULT_BACKUP_BLACKOUT_END` | Use `HHMM` notation to set the end period where no backups occur eg `0430` | |
### Shell Access
> You may need to wrap your `DEFAULT_BACKUP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
For debugging and maintenance purposes you may want access the containers shell.
``bash
docker exec -it (whatever your container name is) bash
``
### Manual Backups
Manual Backups can be performed by entering the container and typing `backup-now`
##### Default Database Options
- Recently there was a request to have the container work with Kukbernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`.
###### CouchDB
### Restoring Databases
Entering in the container and executing `restore` will execute a menu based script to restore your backups - MariaDB, Postgres, and Mongo supported.
| Variable | Description | Default | `_FILE` |
| -------------- | ------------ | ------- | ------- |
| `DEFAULT_PORT` | CouchDB Port | `5984` | x |
You will be presented with a series of menus allowing you to choose:
- What file to restore
- What type of DB Backup
- What Host to restore to
- What Database Name to restore to
- What Database User to use
- What Database Password to use
- What Database Port to use
###### InfluxDB
The image will try to do autodetection based on the filename for the type, hostname, and database name.
The image will also allow you to use environment variables or Docker secrets used to backup the images
| Variable | Description | Default | `_FILE` |
| ------------------------ | ------------------------------------------------------------------------------------------------------- | ------- | ------- |
| `DEFAULT_PORT` | InfluxDB Port | | x |
| | Version 1.x | `8088` | |
| | Version 2.x | `8086` | |
| `DEFAULT_INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - amd64 and aarch/armv8 only for `2` | `2` | |
The script can also be executed skipping the interactive mode by using the following syntax/
###### MariaDB/MySQL
`restore <filename> <db_type> <db_hostname> <db_name> <db_user> <db_pass> <db_port>`
| Variable | Description | Default | `_FILE` |
| ---------------------------------- | --------------------------------------------------------------------------------------------------------- | ------------------------- | ------- |
| `DEFAULT_PORT` | MySQL / MariaDB Port | `3306` | x |
| `DEFAULT_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
| `DEFAULT_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `DEFAULT_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
| `DEFAULT_MYSQL_EVENTS` | Backup Events | `TRUE` | |
| `DEFAULT_MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet | `512M` | |
| `DEFAULT_MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction | `TRUE` | |
| `DEFAULT_MYSQL_STORED_PROCEDURES` | Backup stored procedures | `TRUE` | |
| `DEFAULT_MYSQL_ENABLE_TLS` | Enable TLS functionality | `FALSE` | |
| `DEFAULT_MYSQL_TLS_VERIFY` | (optional) If using TLS (by means of MYSQL_TLS_* variables) verify remote host | `FALSE` | |
| `DEFAULT_MYSQL_TLS_VERSION` | What TLS `v1.1` `v1.2` `v1.3` version to utilize | `TLSv1.1,TLSv1.2,TLSv1.3` | |
| `DEFAULT_MYSQL_TLS_CA_FILE` | Filename to load custom CA certificate for connecting via TLS | `/etc/ssl/cert.pem` | x |
| `DEFAULT_MYSQL_TLS_CERT_FILE` | Filename to load client certificate for connecting via TLS | | x |
| `DEFAULT_MYSQL_TLS_KEY_FILE` | Filename to load client key for connecting via TLS | | x |
If you only enter some of the arguments you will be prompted to fill them in.
### Custom Scripts
###### Microsoft SQL
If you want to execute a custom script at the end of backup, you can drop bash scripts with the extension of `.sh` in this directory. See the following example to utilize:
| Variable | Description | Default | `_FILE` |
| -------------------- | --------------------------------------- | ---------- | ------- |
| `DEFAULT_PORT` | Microsoft SQL Port | `1433` | x |
| `DEFAULT_MSSQL_MODE` | Backup `DATABASE` or `TRANSACTION` logs | `DATABASE` |
````bash
###### MongoDB
| Variable | Description | Default | `_FILE` |
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------ | ------- | ------- |
| `DEFAULT_AUTH` | (Optional) Authentication Database | | x |
| `DEFAULT_PORT` | MongoDB Port | `27017` | x |
| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | x |
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. | | |
| | You can override them by making your own entries | | |
###### Postgresql
| Variable | Description | Default | `_FILE` |
| -------------------------------- | --------------------------------------------------------------------------------------------------------- | ------- | ------- |
| `DEFAULT_AUTH` | (Optional) Authentication Database | | x |
| `DEFAULT_BACKUP_GLOBALS` | Backup Globals as part of backup procedure | | |
| `DEFAULT_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
| `DEFAULT_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `DEFAULT_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
| `DEFAULT_PORT` | PostgreSQL Port | `5432` | x |
###### Redis
| Variable | Description | Default | `_FILE` |
| -------------------------------- | --------------------------------------------------------------------------------------------------- | ------- | ------- |
| `DEFAULT_PORT` | Default Redis Port | `6379` | x |
| `DEFAULT_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
##### Default Storage Options
Options that are related to the value of `DEFAULT_BACKUP_LOCATION`
###### Filesystem
If `DEFAULT_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
| Variable | Description | Default |
| ------------------------------------ | ----------------------------------------------------------------------------------------------------- | ------------------------------------- |
| `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)_(DB_NAME)_(DB_HOST)` | `TRUE` |
| `DEFAULT_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
| `DEFAULT_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` |
| `DEFAULT_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DEFAULT_FILESYSTEM_PATH}/archive/` |
| `DEFAULT_FILESYSTEM_PERMISSION` | Permissions to apply to files. | `600` |
###### S3
If `DEFAULT_BACKUP_LOCATION` = `S3` then the following options are used:
| Parameter | Description | Default | `_FILE` |
| ----------------------------- | ----------------------------------------------------------------------------------------- | ------- | ------- |
| `DEFAULT_S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | x |
| `DEFAULT_S3_KEY_ID` | S3 Key ID (Optional) | | x |
| `DEFAULT_S3_KEY_SECRET` | S3 Key Secret (Optional) | | x |
| `DEFAULT_S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | | x |
| `DEFAULT_S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | x |
| `DEFAULT_S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | x |
| `DEFAULT_S3_PROTOCOL` | Protocol to connect to `DEFAULT_S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` | x |
| `DEFAULT_S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | | x |
| `DEFAULT_S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | | x |
| _*OR*_ | | | |
| `DEFAULT_S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | |
- When `DEFAULT_S3_KEY_ID` and/or `DEFAULT_S3_KEY_SECRET` is not set, will try to use IAM role assigned (if any) for uploading the backup files to S3 bucket.
###### Azure
If `DEFAULT_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
| Parameter | Description | Default | `_FILE` |
| -------------------------------------- | ------------------------------------------- | ------------------- | ------- |
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `DEFAULT_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
> This service uploads files from backup targed directory `DEFAULT_FILESYSTEM_PATH`.
> If the a cleanup configuration in `DEFAULT_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
##### Hooks
###### Path Options
| Parameter | Description | Default |
| ------------------------------ | --------------------------------------------------------------------------- | ----------------------- |
| `DEFAULT_SCRIPT_LOCATION_PRE` | Location on filesystem inside container to execute bash scripts pre backup | `/assets/scripts/pre/` |
| `DEFAULT_SCRIPT_LOCATION_POST` | Location on filesystem inside container to execute bash scripts post backup | `/assets/scripts/post/` |
| `DEFAULT_PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | |
| `DEFAULT_POST_SCRIPT` | Fill this variable in with a command to execute post backing up | |
###### Pre Backup
If you want to execute a custom script before a backup starts, you can drop bash scripts with the extension of `.sh` in the location defined in `DB01_SCRIPT_LOCATION_PRE`. See the following example to utilize:
```bash
$ cat pre-script.sh
##!/bin/bash
# #### Example Pre Script
# #### $1=DBXX_TYPE (Type of Backup)
# #### $2=DBXX_HOST (Backup Host)
# #### $3=DBXX_NAME (Name of Database backed up
# #### $4=BACKUP START TIME (Seconds since Epoch)
# #### $5=BACKUP FILENAME (Filename)
echo "${1} Backup Starting on ${2} for ${3} at ${4}. Filename: ${5}"
```
## script DBXX_TYPE DBXX_HOST DBXX_NAME STARTEPOCH BACKUP_FILENAME
${f} "${backup_job_db_type}" "${backup_job_db_host}" "${backup_job_db_name}" "${backup_routines_start_time}" "${backup_job_file}"
Outputs the following on the console:
`mysql Backup Starting on example-db for example at 1647370800. Filename: mysql_example_example-db_202200315-000000.sql.bz2`
###### Post backup
If you want to execute a custom script at the end of a backup, you can drop bash scripts with the extension of `.sh` in the location defined in `DB01_SCRIPT_LOCATION_POST`. Also to support legacy users `/assets/custom-scripts` is also scanned and executed.See the following example to utilize:
```bash
$ cat post-script.sh
##!/bin/bash
# #### Example Post Script
# #### $1=EXIT_CODE (After running backup routine)
# #### $2=DBXX_TYPE (Type of Backup)
# #### $3=DBXX_HOST (Backup Host)
# #### #4=DBXX_NAME (Name of Database backed up
# #### $5=BACKUP START TIME (Seconds since Epoch)
# #### $6=BACKUP FINISH TIME (Seconds since Epoch)
# #### $7=BACKUP TOTAL TIME (Seconds between Start and Finish)
# #### $8=BACKUP FILENAME (Filename)
# #### $9=BACKUP FILESIZE
# #### $10=HASH (If CHECKSUM enabled)
# #### $11=MOVE_EXIT_CODE
echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a duration of ${7} seconds. Filename: ${8} Size: ${9} bytes MD5: ${10}"
```
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${backup_job_db_name}" "${backup_routines_start_time}" "${backup_routines_finish_time}" "${backup_routines_total_time}" "${backup_job_file}" "${filesize}" "${checksum_value}" "${move_exit_code}
Outputs the following on the console:
`0 mysql Backup Completed on example-db for example on 1647370800 ending 1647370920 for a duration of 120 seconds. Filename: mysql_example_example-db_202200315-000000.sql.bz2 Size: 7795 bytes Hash: 952fbaafa30437494fdf3989a662cd40 0`
If you wish to change the size value from bytes to megabytes set environment variable `DB01_SIZE_VALUE=megabytes`
You must make your scripts executable otherwise there is an internal check that will skip trying to run it otherwise.
If for some reason your filesystem or host is not detecting it right, use the environment variable `DB01_POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
#### Job Backup Options
If `DEFAULT_` variables are set and you do not wish for the settings to carry over into your jobs, you can set the appropriate environment variable with the value of `unset`.
Otherwise, override them per backup job. Additional backup jobs can be scheduled by using `DB02_`,`DB03_`,`DB04_` ... prefixes. See [Specific Database Options](#specific-database-options) which may overrule this list.
| Parameter | Description | Default | `_FILE` |
| ----------- | ---------------------------------------------------------------------------------------------- | ------- | ------- |
| `DB01_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `mssql` `pgsql` `mongo` `redis` `sqlite3` | | |
| `DB01_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | x |
| `DB01_NAME` | Schema Name e.g. `database` | | x |
| `DB01_USER` | username for the database(s) - Can use `root` for MySQL | | x |
| `DB01_PASS` | (optional if DB doesn't require it) password for the database | | x |
| Variable | Description | Default |
| ------------------------------ | --------------------------------------------------------------------------------------------------------- | ------------ |
| `DB01_BACKUP_LOCATION` | Backup to `FILESYSTEM`, `blobxfer` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
| `DB01_CHECKSUM` | Either `MD5` or `SHA1` or `NONE` | `MD5` |
| `DB01_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | |
| `DB01_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | |
| `DB01_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | |
| `DB01_LOG_LEVEL` | Log output on screen and in files `INFO` `NOTICE` `ERROR` `WARN` `DEBUG` | `debug` |
| `DB01_RESOURCE_OPTIMIZED` | Perform operations at a lower priority to the CPU and IO scheduler | `FALSE` |
| `DB01_SKIP_AVAILABILITY_CHECK` | Before backing up - skip connectivity check | `FALSE` |
##### Compression Options
| Variable | Description | Default |
| ----------------------------------- | ---------------------------------------------------------------------------------------------- | -------------- |
| `DB01_COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` |
| `DB01_COMPRESSION_LEVEL` | Numerical value of what level of compression to use, most allow `1` to `9` | `3` |
| | except for `ZSTD` which allows for `1` to `19` | |
| `DB01_GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. | `FALSE` |
| `DB01_ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |
| `DB01_PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` |
##### Encryption Options
Encryption will occur after compression and the resulting filename will have a `.gpg` suffix
| Variable | Description | Default | `_FILE` |
| -------------------------- | -------------------------------------------- | ------- | ------- |
| `DB01_ENCRYPT` | Encrypt file after backing up with GPG | `FALSE` | |
| `DB01_ENCRYPT_PASSPHRASE` | Passphrase to encrypt file with GPG | | x |
| *or* | | | |
| `DB01_ENCRYPT_PUBLIC_KEY` | Path of public key to encrypt file with GPG | | x |
| `DB01_ENCRYPT_PRIVATE_KEY` | Path of private key to encrypt file with GPG | | x |
##### Scheduling Options
| Variable | Description | Default |
| ---------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `DB01_BACKUP_INTERVAL` | How often to do a backup, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
| `DB01_BACKUP_BEGIN` | What time to do the initial backup. Defaults to immediate. (`+1`) | `+0` |
| | Must be in one of four formats: | |
| | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
| | Full datestamp e.g. `2023-12-21 23:30:00` | |
| | Cron expression e.g. `30 23 * * *` [Understand the format](https://en.wikipedia.org/wiki/Cron) - *BACKUP_INTERVAL is ignored* | |
| `DB01_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when backup interval executes) | `FALSE` |
| | 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | |
| `DB01_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from `DB01_BACKUP_FILESYSTEM_PATH` | |
| | to `DB01_BACKUP_FILESYSTEM_ARCHIVE_PATH` - which is useful when pairing against an external backup system. | |
| `DB01_BACKUP_BLACKOUT_BEGIN` | Use `HHMM` notation to start a blackout period where no backups occur eg `0420` | |
| `DB01_BACKUP_BLACKOUT_END` | Use `HHMM` notation to set the end period where no backups occur eg `0430` | |
##### Specific Database Options
###### CouchDB
| Variable | Description | Default | `_FILE` |
| ----------- | ------------ | ------- | ------- |
| `DB01_PORT` | CouchDB Port | `5984` | x |
###### InfluxDB
| Variable | Description | Default | `_FILE` |
| --------------------- | ------------------------------------------------------------------------------------------------------- | ------- | ------- |
| `DB01_PORT` | InfluxDB Port | | x |
| | Version 1.x | `8088` | |
| | Version 2.x | `8086` | |
| `DB01_INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - amd64 and aarch/armv8 only for `2` | `2` | |
> Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`.
> You may use `DB_NAME=ALL` to backup the entire set of databases.
> For `DB_HOST` use syntax of `http(s)://db-name`
###### MariaDB/MySQL
| Variable | Description | Default | `_FILE` |
| ------------------------------- | --------------------------------------------------------------------------------------------------------- | ------------------------- | ------- |
| `DB01_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
| `DB01_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
| `DB01_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | |
| | Backup multiple by separating with commas eg `db1,db2` | | x |
| `DB01_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases separated via commas from being backed up | | x |
| `DB01_SPLIT_DB` | If using `ALL` - use this to split each database into its own file as opposed to one singular file | `FALSE` | |
| `DB01_PORT` | MySQL / MariaDB Port | `3306` | x |
| `DB01_MYSQL_EVENTS` | Backup Events for | `TRUE` | |
| `DB01_MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet | `512M` | |
| `DB01_MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction | `TRUE` | |
| `DB01_MYSQL_STORED_PROCEDURES` | Backup stored procedures | `TRUE` | |
| `DB01_MYSQL_ENABLE_TLS` | Enable TLS functionality | `FALSE` | |
| `DB01_MYSQL_TLS_VERIFY` | (optional) If using TLS (by means of MYSQL_TLS_* variables) verify remote host | `FALSE` | |
| `DB01_MYSQL_TLS_VERSION` | What TLS `v1.1` `v1.2` `v1.3` version to utilize | `TLSv1.1,TLSv1.2,TLSv1.3` | |
| `DB01_MYSQL_TLS_CA_FILE` | Filename to load custom CA certificate for connecting via TLS | `/etc/ssl/cert.pem` | x |
| `DB01_MYSQL_TLS_CERT_FILE` | Filename to load client certificate for connecting via TLS | | x |
| `DB01_MYSQL_TLS_KEY_FILE` | Filename to load client key for connecting via TLS | | x |
###### Microsoft SQL
| Variable | Description | Default | `_FILE` |
| ----------------- | --------------------------------------- | ---------- | ------- |
| `DB01_PORT` | Microsoft SQL Port | `1433` | x |
| `DB01_MSSQL_MODE` | Backup `DATABASE` or `TRANSACTION` logs | `DATABASE` |
###### MongoDB
| Variable | Description | Default | `_FILE` |
| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ------- | ------- |
| `DB01_AUTH` | (Optional) Authentication Database | | |
| `DB01_PORT` | MongoDB Port | `27017` | x |
| `DB01_MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | x |
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. | | |
| | You can override them by making your own entries | | |
###### Postgresql
| Variable | Description | Default | `_FILE` |
| ----------------------------- | --------------------------------------------------------------------------------------------------------- | ------- | ------- |
| `DB01_AUTH` | (Optional) Authentication Database | | |
| `DB01_BACKUP_GLOBALS` | Backup Globals after backing up database (forces `TRUE` if `_NAME=ALL``) | `FALSE` | |
| `DB01_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
| `DB01_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
| `DB01_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | |
| | Backup multiple by separating with commas eg `db1,db2` | | x |
| `DB01_SPLIT_DB` | If using `ALL` - use this to split each database into its own file as opposed to one singular file | `FALSE` | |
| `DB01_PORT` | PostgreSQL Port | `5432` | x |
###### Redis
| Variable | Description | Default | `_FILE` |
| ------------------------ | --------------------------------------------------------------------------------------------------------- | ------- | ------- |
| `DB01_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
| `DB01_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
| `DB01_PORT` | Redis Port | `6379` | x |
###### SQLite
| Variable | Description | Default | `_FILE` |
| ----------- | -------------------------------------------------------- | ------- | ------- |
| `DB01_HOST` | Enter the full path to DB file e.g. `/backup/db.sqlite3` | | x |
##### Specific Storage Options
Options that are related to the value of `DB01_BACKUP_LOCATION`
###### Filesystem
If `DB01_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
| Variable | Description | Default |
| --------------------------------- | ----------------------------------------------------------------------------------------------------- | ---------------------------------- |
| `DB01_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
| `DB01_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
| `DB01_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` |
| `DB01_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DB01_FILESYSTEM_PATH}/archive/` |
| `DB01_FILESYSTEM_PERMISSION` | Directory and File permissions to apply to files. | `600` |
###### S3
If `DB01_BACKUP_LOCATION` = `S3` then the following options are used:
| Parameter | Description | Default | `_FILE` |
| -------------------------- | ----------------------------------------------------------------------------------------- | ------- | ------- |
| `DB01_S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | x |
| `DB01_S3_KEY_ID` | S3 Key ID (Optional) | | x |
| `DB01_S3_KEY_SECRET` | S3 Key Secret (Optional) | | x |
| `DB01_S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | | x |
| `DB01_S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | x |
| `DB01_S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | x |
| `DB01_S3_PROTOCOL` | Protocol to connect to `DB01_S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` | x |
| `DB01_S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | | x |
| `DB01_S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | | x |
| _*OR*_ | | | |
| `DB01_S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | |
> When `DB01_S3_KEY_ID` and/or `DB01_S3_KEY_SECRET` is not set, will try to use IAM role assigned (if any) for uploading the backup files to S3 bucket.
###### Azure
If `DB01_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
| Parameter | Description | Default | `_FILE` |
| ----------------------------------- | ------------------------------------------- | ------------------- | ------- |
| `DB01_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `DB01_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `DB01_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
> This service uploads files from backup directory `DB01_BACKUP_FILESYSTEM_PATH`.
> If the a cleanup configuration in `DB01_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
##### Hooks
###### Path Options
| Parameter | Description | Default |
| --------------------------- | --------------------------------------------------------------------------- | ----------------------- |
| `DB01_SCRIPT_LOCATION_PRE` | Location on filesystem inside container to execute bash scripts pre backup | `/assets/scripts/pre/` |
| `DB01_SCRIPT_LOCATION_POST` | Location on filesystem inside container to execute bash scripts post backup | `/assets/scripts/post/` |
| `DB01_PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | |
| `DB01_POST_SCRIPT` | Fill this variable in with a command to execute post backing up | |
###### Pre Backup
If you want to execute a custom script before a backup starts, you can drop bash scripts with the extension of `.sh` in the location defined in `DB01_SCRIPT_LOCATION_PRE`. See the following example to utilize:
```bash
$ cat pre-script.sh
##!/bin/bash
# #### Example Pre Script
# #### $1=DB01_TYPE (Type of Backup)
# #### $2=DB01_HOST (Backup Host)
# #### $3=DB01_NAME (Name of Database backed up
# #### $4=BACKUP START TIME (Seconds since Epoch)
# #### $5=BACKUP FILENAME (Filename)
echo "${1} Backup Starting on ${2} for ${3} at ${4}. Filename: ${5}"
```
## script DB01_TYPE DB01_HOST DB01_NAME STARTEPOCH BACKUP_FILENAME
${f} "${backup_job_db_type}" "${backup_job_db_host}" "${backup_job_db_name}" "${backup_routines_start_time}" "${backup_job_filename}"
Outputs the following on the console:
`mysql Backup Starting on example-db for example at 1647370800. Filename: mysql_example_example-db_202200315-000000.sql.bz2`
###### Post backup
If you want to execute a custom script at the end of a backup, you can drop bash scripts with the extension of `.sh` in the location defined in `DB01_SCRIPT_LOCATION_POST`. Also to support legacy users `/assets/custom-scripts` is also scanned and executed.See the following example to utilize:
```bash
$ cat post-script.sh
##!/bin/bash
@@ -247,39 +699,159 @@ $ cat post-script.sh
# #### $8=BACKUP FILENAME (Filename)
# #### $9=BACKUP FILESIZE
# #### $10=HASH (If CHECKSUM enabled)
# #### $11=MOVE_EXIT_CODE
echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a duration of ${7} seconds. Filename: ${8} Size: ${9} bytes MD5: ${10}"
````
```
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_timme}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_routines_start_time}" "${backup_routines_finish_time}" "${backup_routines_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}
Outputs the following on the console:
`0 mysql Backup Completed on example-db for example on 1647370800 ending 1647370920 for a duration of 120 seconds. Filename: mysql_example_example-db_202200315-000000.sql.bz2 Size: 7795 bytes Hash: 952fbaafa30437494fdf3989a662cd40`
`0 mysql Backup Completed on example-db for example on 1647370800 ending 1647370920 for a duration of 120 seconds. Filename: mysql_example_example-db_202200315-000000.sql.bz2 Size: 7795 bytes Hash: 952fbaafa30437494fdf3989a662cd40 0`
If you wish to change the size value from bytes to megabytes set environment variable `DB01_SIZE_VALUE=megabytes`
You must make your scripts executable otherwise there is an internal check that will skip trying to run it otherwise.
If for some reason your filesystem or host is not detecting it right, use the environment variable `DB01_POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
#### Notifications
This image has capabilities on sending notifications via a handful of services when a backup job fails. This is a global option that cannot be individually set per backup job.
| Parameter | Description | Default |
| ---------------------- | --------------------------------------------------------------------------------- | ------- |
| `ENABLE_NOTIFICATIONS` | Enable Notifications | `FALSE` |
| `NOTIFICATION_TYPE` | `CUSTOM` `EMAIL` `MATRIX` `MATTERMOST` `ROCKETCHAT` - Seperate Multiple by commas | |
##### Custom Notifications
The following is sent to the custom script. Use how you wish:
````
$1 unix timestamp
$2 logfile
$3 errorcode
$4 subject
$5 body/error message
````
| Parameter | Description | Default |
| ---------------------------- | ------------------------------------------------------- | ------- |
| `NOTIFICATION_CUSTOM_SCRIPT` | Path and name of custom script to execute notification. | |
##### Email Notifications
See more details in the base image listed above for more mail environment variables.
| Parameter | Description | Default | `_FILE` |
| ----------- | ----------------------------------------------------------------------------------------- | ------- | ------- |
| `MAIL_FROM` | What email address to send mail from for errors | | |
| `MAIL_TO` | What email address to send mail to for errors. Send to multiple by seperating with comma. | | |
| `SMTP_HOST` | What SMTP server to use for sending mail | | x |
| `SMTP_PORT` | What SMTP port to use for sending mail | | x |
##### Matrix Notifications
Fetch a `MATRIX_ACCESS_TOKEN`:
````
curl -XPOST -d '{"type":"m.login.password", "user":"myuserid", "password":"mypass"}' "https://matrix.org/_matrix/client/r0/login"
````
Copy the JSON response `access_token` that will look something like this:
````
{"access_token":"MDAxO...blahblah","refresh_token":"MDAxO...blahblah","home_server":"matrix.org","user_id":"@myuserid:matrix.org"}
````
| Parameter | Description | Default | `_FILE` |
| --------------------- | ---------------------------------------------------------------------------------------- | ------- | ------- |
| `MATRIX_HOST` | URL (https://matrix.example.com) of Matrix Homeserver | | x |
| `MATRIX_ROOM` | Room ID eg `\!abcdef:example.com` to send to. Send to multiple by seperating with comma. | | x |
| `MATRIX_ACCESS_TOKEN` | Access token of user authorized to send to room | | x |
##### Mattermost Notifications
| Parameter | Description | Default | `_FILE` |
| ------------------------ | -------------------------------------------------------------------------------------------- | ------- | ------- |
| `MATTERMOST_WEBHOOK_URL` | Full URL to send webhook notifications to | | x |
| `MATTERMOST_RECIPIENT` | Channel or User to send Webhook notifications to. Send to multiple by seperating with comma. | | x |
| `MATTERMOST_USERNAME` | Username to send as eg `tiredofit` | | x |
##### Rocketchat Notifications
| Parameter | Description | Default | `_FILE` |
| ------------------------ | -------------------------------------------------------------------------------------------- | ------- | ------- |
| `ROCKETCHAT_WEBHOOK_URL` | Full URL to send webhook notifications to | | x |
| `ROCKETCHAT_RECIPIENT` | Channel or User to send Webhook notifications to. Send to multiple by seperating with comma. | | x |
| `ROCKETCHAT_USERNAME` | Username to send as eg `tiredofit` | | x |
## Maintenance
### Shell Access
For debugging and maintenance purposes you may want access the containers shell.
`bash
docker exec -it (whatever your container name is) bash
`
### Manual Backups
Manual Backups can be performed by entering the container and typing `backup-now`. This will execute all the backup tasks that are scheduled by means of the `BACKUPXX_` variables. Alternatively if you wanted to execute a job on its own you could simply type `backup01-now` (or whatever your number would be). There is no concurrency, and jobs will be executed sequentially.
- Recently there was a request to have the container work with Kubernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`.
### Restoring Databases
Entering in the container and executing `restore` will execute a menu based script to restore your backups - MariaDB, Postgres, and Mongo supported.
You will be presented with a series of menus allowing you to choose:
- What file to restore
- What type of DB Backup
- What Host to restore to
- What Database Name to restore to
- What Database User to use
- What Database Password to use
- What Database Port to use
The image will try to do auto detection based on the filename for the type, hostname, and database name.
The image will also allow you to use environment variables or Docker secrets used to backup the images
The script can also be executed skipping the interactive mode by using the following syntax/
`restore <filename> <db_type> <db_hostname> <db_name> <db_user> <db_pass> <db_port>`
If you only enter some of the arguments you will be prompted to fill them in.
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`
You must make your scripts executible otherwise there is an internal check that will skip trying to run it otherwise.
If for some reason your filesystem or host is not detecting it right, use the environment variable `POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
## Support
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.
### Usage
- The [Discussions board](../../discussions) is a great place for working with the community on tips and tricks of using this image.
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) personalized support.
- [Sponsor me](https://www.tiredofit.ca/sponsor) for personalized support
### Bugfixes
- Please, submit a [Bug Report](issues/new) if something isn't working as expected. I'll do my best to issue a fix in short order.
### Feature Requests
- Feel free to submit a feature request, however there is no guarantee that it will be added, or at what timeline.
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) regarding development of features.
- [Sponsor me](https://www.tiredofit.ca/sponsor) regarding development of features.
### Updates
- Best effort to track upstream changes, More priority if I am actively using the image in a production environment.
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) for up to date releases.
- [Sponsor me](https://www.tiredofit.ca/sponsor) for up to date releases.
## License
MIT. See [LICENSE](LICENSE) for more details.

5
examples/.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
# See http://help.github.com/ignore-files/ for more about ignoring files.
# Example container mounted folders
**/backups/
**/db/

67
examples/compose.yml Normal file
View File

@@ -0,0 +1,67 @@
services:
example-db:
hostname: example-db-host
container_name: example-db
image: tiredofit/mariadb:10.11
ports:
- 3306:3306
volumes:
- ./db:/var/lib/mysql
environment:
- ROOT_PASS=examplerootpassword
- DB_NAME=example
- DB_USER=example
- DB_PASS=examplepassword
restart: always
networks:
- example-db-network
example-db-backup:
container_name: example-db-backup
image: tiredofit/db-backup
volumes:
- ./backups:/backup
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
- TIMEZONE=America/Vancouver
- CONTAINER_NAME=example-db-backup
- CONTAINER_ENABLE_MONITORING=FALSE
# - DEBUG_MODE=TRUE
- BACKUP_JOB_CONCURRENCY=1 # Only run one job at a time
- DEFAULT_CHECKSUM=NONE # Don't create checksums
- DEFAULT_COMPRESSION=ZSTD # Compress all with ZSTD
- DEFAULT_BACKUP_INTERVAL=1440 # Backup every 1440 minutes
- DEFAULT_BACKUP_BEGIN=0000 # Start backing up at midnight
- DEFAULT_CLEANUP_TIME=8640 # Cleanup backups after a week
- DB01_TYPE=mariadb
- DB01_HOST=example-db-host
- DB01_NAME=example
- DB01_USER=example
- DB01_PASS=examplepassword
- DB01_BACKUP_INTERVAL=30 # (override) Backup every 30 minutes
- DB01_BACKUP_BEGIN=+1 # (override) Backup starts immediately
- DB01_CLEANUP_TIME=180 # (override) Cleanup backups they are older than 180 minutes
- DB01_CHECKSUM=SHA1 # (override) Create a SHA1 checksum
- DB01_COMPRESSION=GZ # (override) Compress with GZIP
#- DB02_TYPE=postgres
#- DB02_HOST=example-postgres-host
#- DB02_NAME=example
#- DB02_USER=example
#- DB02_PASS=examplepassword
#- DB02_BACKUP_INTERVAL=60 # (override) Backup every 60 minutes
#- DB02_BACKUP_BEGIN=+10 # (override) Backup starts in ten minutes
#- DB02_CLEANUP_TIME=240 # (override) Cleanup backups they are older than 240 minutes
#- DB02_CHECKSUM=MD5 # (override) Create a SHA1 checksum
#- DB02_COMPRESSION=BZ # (override) Compress with BZIP
restart: always
networks:
- example-db-network
networks:
example-db-network:
name: example-db-network

View File

@@ -1,38 +0,0 @@
version: '2'
services:
example-db:
container_name: example-db
image: mariadb:latest
volumes:
- ./db:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=examplerootpassword
- MYSQL_DATABASE=example
- MYSQL_USER=example
- MYSQL_PASSWORD=examplepassword
restart: always
example-db-backup:
container_name: example-db-backup
image: tiredofit/db-backup
links:
- example-db
volumes:
- ./backups:/backup
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
- DB_TYPE=mariadb
- DB_HOST=example-db
- DB_NAME=example
- DB_USER=example
- DB_PASS="examplepassword"
- DB_DUMP_FREQ=1440
- DB_DUMP_BEGIN=0000
- DB_CLEANUP_TIME=8640
- CHECKSUM=SHA1
- COMPRESSION=ZSTD
- SPLIT_DB=FALSE
restart: always

View File

@@ -0,0 +1,68 @@
#
# Example for Microsoft SQL Server
# upload with blobxfer to azure storage
#
services:
example-mssql-s3-db:
hostname: example-db-host
image: mcr.microsoft.com/mssql/server:2019-latest
container_name: example-mssql-s3-db
restart: unless-stopped
ports:
- "127.0.0.1:11433:1433"
networks:
example-mssql-blobxfer-net:
volumes:
- ./tmp/backups:/tmp/backups # shared tmp backup directory
environment:
ACCEPT_EULA: Y
MSSQL_SA_PASSWORD: 5hQa0utRFBpIY3yhoIyE
MSSQL_PID: Express
example-mssql-blobxfer-db-backup:
container_name: example-mssql-blobxfer-db-backup
# if you want to build and use image from current source
# execute in terminal --> docker build -t tiredofit/db-backup-mssql-blobxfer .
# replace --> image: tiredofit/db-backup-mssql
# image: tiredofit/db-backup
image: tiredofit/db-backup
links:
- example-mssql-s3-db
volumes:
- ./backups:/backup
- ./tmp/backups:/tmp/backups # shared tmp backup directory
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
- TIMEZONE=America/Vancouver
- CONTAINER_ENABLE_MONITORING=FALSE
- CONTAINER_NAME=example-mssql-blobxfer-db-backup
# - DEBUG_MODE=TRUE
- DB01_TYPE=mssql
- DB01_HOST=example-db-host
# - DB01_PORT=1488
# create database with name `test1` manually first
- DB01_NAME=test1 # Create this database
- DB01_USER=sa
- DB01_PASS=5hQa0utRFBpIY3yhoIyE
- DB01_BACKUP_INTERVAL=5 # backup every 5 minute
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs not set immediately
- DB01_CLEANUP_TIME=60 # clean backups they are older than 60 minutes
- DB01_CHECKSUM=SHA1 # Set Checksum to be SHA1
- DB01_COMPRESSION=GZ # Set compression to use GZIP
# === S3 Blobxfer ===
- DB01_BACKUP_LOCATION=blobxfer
# Add here azure storage account
- DB01_BLOBXFER_STORAGE_ACCOUNT={TODO Add Storage Name}
# Add here azure storage account key
- DB01_BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key}
- DB01_BLOBXFER_REMOTE_PATH=docker-db-backup
restart: always
networks:
example-mssql-blobxfer-net:
networks:
example-mssql-blobxfer-net:
name: example-mssql-blobxfer-net

View File

@@ -0,0 +1,60 @@
#
# Example for Microsoft SQL Server
#
services:
example-mssql-db:
hostname: example-db-host
image: mcr.microsoft.com/mssql/server:2019-latest
container_name: example-mssql-db
restart: unless-stopped
ports:
- "127.0.0.1:11433:1433"
networks:
example-mssql-net:
volumes:
- ./tmp/backups:/tmp/backups # shared tmp backup directory
environment:
ACCEPT_EULA: Y
MSSQL_SA_PASSWORD: 5hQa0utRFBpIY3yhoIyE
MSSQL_PID: Express
example-mssql-db-backup:
container_name: example-mssql-db-backup
# if you want to build and use image from current source
# execute in terminal --> docker build -t tiredofit/db-backup-mssql .
# replace --> image: tiredofit/db-backup-mssql
# image: tiredofit/db-backup
image: tiredofit/db-backup
links:
- example-mssql-db
volumes:
- ./backups:/backup
- ./tmp/backups:/tmp/backups # shared tmp backup directory
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
- TIMEZONE=America/Vancouver
- CONTAINER_ENABLE_MONITORING=FALSE
- CONTAINER_NAME=example-mssql-blobxfer-db-backup
# - DEBUG_MODE=TRUE
- DB01_TYPE=mssql
- DB01_HOST=example-db-host
# - DB_PORT=1488
# - DB_NAME=ALL # [ALL] not working on sql server.
# create database with name `test1` manually first
- DB01_NAME=test1
- DB01_USER=sa
- DB01_PASS=5hQa0utRFBpIY3yhoIyE
- DB01_BACKUP_INTERVAL=1 # backup every minute
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs unset immediately
- DB01_CLEANUP_TIME=5 # clean backups they are older than 5 minute
- DB01_CHECKSUM=NONE
- DB01_COMPRESSION=GZ
restart: always
networks:
example-mssql-net:
networks:
example-mssql-net:
name: example-mssql-net

2
examples/post-script.sh Executable file → Normal file
View File

@@ -4,7 +4,7 @@
# #### $1=EXIT_CODE (After running backup routine)
# #### $2=DB_TYPE (Type of Backup)
# #### $3=DB_HOST (Backup Host)
# #### #4=DB_NAME (Name of Database backed up
# #### #4=DB_NAME (Name of Database backed up)
# #### $5=BACKUP START TIME (Seconds since Epoch)
# #### $6=BACKUP FINISH TIME (Seconds since Epoch)
# #### $7=BACKUP TOTAL TIME (Seconds between Start and Finish)

View File

@@ -0,0 +1,120 @@
#!/command/with-contenv bash
source /assets/functions/00-container
PROCESS_NAME="db-backup{{BACKUP_NUMBER}}-scheduler"
check_container_initialized
check_service_initialized init 10-db-backup
source /assets/functions/10-db-backup
source /assets/defaults/10-db-backup
bootstrap_variables backup_init {{BACKUP_NUMBER}}
bootstrap_variables parse_variables {{BACKUP_NUMBER}}
if [ -z "${backup_job_db_name}" ]; then
PROCESS_NAME="{{BACKUP_NUMBER}}${backup_job_db_host//\//_}"
else
PROCESS_NAME="{{BACKUP_NUMBER}}-${backup_job_db_host//\//_}__${backup_job_db_name}"
fi
trap ctrl_c INT
if [[ "${MODE,,}" =~ "standalone" ]] || [ "${MODE,,}" = "manual" ] || [ "${1,,}" = "manual" ] || [ "${1,,}" = "now" ]; then
print_debug "Detected Manual Mode"
persist=false
backup_job_backup_begin=+0
else
silent sleep {{BACKUP_NUMBER}}
time_last_run=0
time_current=$(date +'%s')
if [[ "${backup_job_backup_begin}" =~ ^\+(.*)$ ]]; then
print_debug "BACKUP_BEGIN is a jump of minute starting with +"
timer plusvalue
elif [[ "${backup_job_backup_begin}" =~ ^[0-9]{4}$ ]]; then
print_debug "BACKUP_BEGIN is a HHMM value"
timer time
elif [[ "${backup_job_backup_begin}" =~ ([0-9]{4})-([0-9]{2})-([0-9]{2})[[:space:]]([0-9]{2}):([0-9]{2}):([0-9]{2}) ]]; then
print_debug "BACKUP_BEGIN is a full date timestamp"
timer datetime
#elif echo "${backup_job_backup_begin//\*/#}" | grep -qP "^(.*((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|#) ?){5}$" ; then # Allow slashes, yet not supporting advanced cron yet
elif echo "${backup_job_backup_begin//\*/#}" | grep -qP "^(((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|#) ?){5}$" ; then
print_debug "BACKUP_BEGIN is a cron expression"
if var_false "${CRON_ALTERNATE}"; then
time_last_run=$(date +"%s")
backup_job_backup_begin=${backup_job_backup_begin//\"/}
backup_job_backup_begin=${backup_job_backup_begin//\'/}
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
else
echo "${backup_job_backup_begin} /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}/run now" > /tmp/.container/cron/{{BACKUP_NUMBER}}-backup
crontab -l | { cat; echo "${backup_job_backup_begin} /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}/run now"; } | crontab -
s6-svc -d /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}
exit 0
fi
else
print_error "_BACKUP_BEGIN is invalid - Unable to perform scheduling"
cat <<EOF
Valid Methods:
+(number) - Start in however many minutes
HHMM - Start at hour (00-24) and minute (00-59)
YYYY-MM-DD HH:mm:ss - Start at a specific date and time
0 23 * * * - Cron expression
EOF
print_error "Stopping backup_scheduler {{BACKUP_NUMBER}} due to detected errors. Fix and restart container."
stop_scheduler_backup=true
s6-svc -d /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}
fi
print_debug "Wait Time: ${time_wait} Future execution time: ${time_future} Current Time: ${time_current}"
print_info "Next Backup at $(date -d @"${time_future}" +'%Y-%m-%d %T %Z')"
silent sleep "${time_wait}"
fi
while true; do
if [ -n "${backup_job_blackout_start}" ] && [ -n "${backup_job_blackout_finish}" ] ; then
time_current_hour_minute=$(date +%H%M)
if [[ "${time_current_hour_minute}" > "${backup_job_blackout_start}" ]] && [[ "${time_current_hour_minute}" < "${backup_job_blackout_finish}" ]] ; then
blackout=true
else
blackout=false
fi
fi
if var_true "${blackout}" ; then
print_notice "Detected Blackout Period - Not performing backup operations"
else
timer job start
process_limiter
echo "{{BACKUP_NUMBER}}" >> /tmp/.container/db-backup-backups
print_debug "Backup {{BACKUP_NUMBER}} routines started time: $(date +'%Y-%m-%d %T %Z')"
bootstrap_filesystem
check_availability
backup_"${dbtype,,}"
timer job stop
if [ -z "${exitcode_backup}" ] ; then exitcode_backup="0" ; fi
print_info "Backup {{BACKUP_NUMBER}} routines finish time: $(date -d @"${backup_job_finish_time}" +'%Y-%m-%d %T %Z') with exit code ${exitcode_backup}"
print_notice "Backup {{BACKUP_NUMBER}} routines time taken: $(echo "${backup_job_total_time}" | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
sed -i "/^{{BACKUP_NUMBER}}/d" /tmp/.container/db-backup-backups
fi
symlink_log
if var_false "${persist}" ; then
print_debug "Exiting due to manual mode"
exit "${exitcode_backup}";
else
if var_true "${stop_scheduler_backup}" ; then
print_error "Stopping backup_scheduler {{BACKUP_NUMBER}} due to detected errors. Fix and restart container."
s6-svc -d /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}
else
if [ ! "${time_cron}" = "true" ]; then
print_notice "Sleeping for another $((backup_job_backup_interval*60-backup_job_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$((backup_job_backup_interval*60-backup_job_total_time))))" +'%Y-%m-%d %T %Z') "
silent sleep $((backup_job_backup_interval*60-backup_job_total_time))
else
time_last_run=$(date +"%s")
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
print_notice "Sleeping for another ${time_wait} seconds. Waking up at $(date -d@"${time_future}" +'%Y-%m-%d %T %Z') "
silent sleep "${time_wait}"
fi
fi
fi
done

61
install/assets/defaults/10-db-backup Executable file → Normal file
View File

@@ -1,22 +1,47 @@
#!/command/with-contenv bash
BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"}
CHECKSUM=${CHECKSUM:-"MD5"}
COMPRESSION=${COMPRESSION:-"ZSTD"}
COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"}
ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"}
ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
BACKUP_JOB_CONCURRENCY=${BACKUP_JOB_CONCURRENCY:-"1"}
CRON_ALTERNATE=${CRON_ALTERNATE:-"TRUE"}
DBBACKUP_USER=${DBBACKUP_USER:-"dbbackup"}
DBBACKUP_GROUP=${DBBACKUP_GROUP:-"${DBBACKUP_USER}"} # Must go after DBBACKUP_USER
DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0}
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
DEFAULT_BACKUP_LOCATION=${DEFAULT_BACKUP_LOCATION:-"FILESYSTEM"}
DEFAULT_BLOBXFER_REMOTE_PATH=${DEFAULT_BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
DEFAULT_CHECKSUM=${DEFAULT_CHECKSUM:-"MD5"}
DEFAULT_COMPRESSION=${DEFAULT_COMPRESSION:-"ZSTD"}
DEFAULT_COMPRESSION_LEVEL=${DEFAULT_COMPRESSION_LEVEL:-"3"}
DEFAULT_CREATE_LATEST_SYMLINK=${DEFAULT_CREATE_LATEST_SYMLINK:-"TRUE"}
DEFAULT_ENABLE_PARALLEL_COMPRESSION=${DEFAULT_ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
DEFAULT_ENCRYPT=${DEFAULT_ENCRYPT:-"FALSE"}
DEFAULT_FILESYSTEM_PATH=${DEFAULT_FILESYSTEM_PATH:-"/backup"}
DEFAULT_FILESYSTEM_PATH_PERMISSION=${DEFAULT_FILESYSTEM_PATH_PERMISSION:-"700"}
DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"600"}
DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DEFAULT_FILESYSTEM_ARCHIVE_PATH:-"${DEFAULT_FILESYSTEM_PATH}/archive/"}
DEFAULT_LOG_LEVEL=${DEFAULT_LOG_LEVEL:-"notice"}
DEFAULT_MYSQL_ENABLE_TLS=${DEFAULT_MYSQL_ENABLE_TLS:-"FALSE"}
DEFAULT_MYSQL_EVENTS=${DEFAULT_MYSQL_EVENTS:-"TRUE"}
DEFAULT_MYSQL_MAX_ALLOWED_PACKET=${DEFAULT_MYSQL_MAX_ALLOWED_PACKET:-"512M"}
DEFAULT_MYSQL_SINGLE_TRANSACTION=${DEFAULT_MYSQL_SINGLE_TRANSACTION:-"TRUE"}
DEFAULT_MYSQL_STORED_PROCEDURES=${DEFAULT_MYSQL_STORED_PROCEDURES:-"TRUE"}
DEFAULT_MYSQL_TLS_CA_FILE=${DEFAULT_MYSQL_TLS_CA_FILE:-"/etc/ssl/cert.pem"}
DEFAULT_MYSQL_TLS_VERIFY=${DEFAULT_MYSQL_TLS_VERIFY:-"FALSE"}
DEFAULT_MYSQL_TLS_VERSION=${DEFAULT_MYSQL_TLS_VERSION:-"TLSv1.1,TLSv1.2,TLSv1.3"}
DEFAULT_MSSQL_MODE=${DEFAULT_MSSQL_MODE:-"database"}
DEFAULT_PARALLEL_COMPRESSION_THREADS=${DEFAULT_PARALLEL_COMPRESSION_THREADS:-"$(nproc)"}
DEFAULT_RESOURCE_OPTIMIZED=${DEFAULT_RESOURCE_OPTIMIZED:-"FALSE"}
DEFAULT_S3_CERT_SKIP_VERIFY=${DEFAULT_S3_CERT_SKIP_VERIFY:-"TRUE"}
DEFAULT_S3_PROTOCOL=${DEFAULT_S3_PROTOCOL:-"https"}
DEFAULT_SCRIPT_LOCATION_PRE=${DEFAULT_SCRIPT_LOCATION_PRE:-"/assets/scripts/pre/"}
DEFAULT_SCRIPT_LOCATION_POST=${DEFAULT_SCRIPT_LOCATION_POST:-"/assets/scripts/post/"}
DEFAULT_SIZE_VALUE=${DEFAULT_SIZE_VALUE:-"bytes"}
DEFAULT_SKIP_AVAILABILITY_CHECK=${DEFAULT_SKIP_AVAILABILITY_CHECK:-"FALSE"}
DEFAULT_SPLIT_DB=${DEFAULT_SPLIT_DB:-"TRUE"}
LOG_PATH=${LOG_PATH:-"/logs"}
MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"}
MODE=${MODE:-"AUTO"}
MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"}
MYSQL_SINGLE_TRANSACTION=${MYSQL_SINGLE_TRANSACTION:-"TRUE"}
MYSQL_STORED_PROCEDURES=${MYSQL_STORED_PROCEDURES:-"TRUE"}
PARALLEL_COMPRESSION_THREADS=${PARALLEL_COMPRESSION_THREADS:-"$(nproc)"}
S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"}
S3_PROTOCOL=${S3_PROTOCOL:-"https"}
SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SPLIT_DB=${SPLIT_DB:-"TRUE"}
TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"}
TEMP_PATH=${TEMP_PATH:-"/tmp/backups"}
if [ -n "${TEMP_LOCATION}" ] ; then TEMP_PATH=${TEMP_LOCATION:-"/tmp/backups"} ; fi # To be removed 4.3.0

2252
install/assets/functions/10-db-backup Executable file → Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -6,8 +6,9 @@ prepare_service 03-monitoring
PROCESS_NAME="db-backup"
output_off
sanity_test
setup_mode
create_zabbix dbbackup
db_backup_container_init
create_schedulers backup
create_zabbix dbbackup4
liftoff

View File

@@ -1,88 +0,0 @@
#!/command/with-contenv bash
source /assets/functions/00-container
source /assets/functions/10-db-backup
source /assets/defaults/10-db-backup
PROCESS_NAME="db-backup"
bootstrap_variables
if [ "${MODE,,}" = "manual" ] || [ "${1,,}" = "manual" ] || [ "${1,,}" = "now" ]; then
DB_DUMP_BEGIN=+0
manual=TRUE
print_debug "Detected Manual Mode"
else
sleep 5
current_time=$(date +"%s")
today=$(date +"%Y%m%d")
if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then
waittime=$(( ${BASH_REMATCH[1]} * 60 ))
target_time=$(($current_time + $waittime))
else
target_time=$(date --date="${today}${DB_DUMP_BEGIN}" +"%s")
if [[ "$target_time" < "$current_time" ]]; then
target_time=$(($target_time + 24*60*60))
fi
waittime=$(($target_time - $current_time))
fi
print_debug "Wait Time: ${waittime} Target time: ${target_time} Current Time: ${current_time}"
print_info "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")"
sleep $waittime
fi
while true; do
mkdir -p "${TEMP_LOCATION}"
backup_start_time=$(date +"%s")
print_debug "Backup routines started time: $(date +'%Y-%m-%d %T %Z')"
case "${dbtype,,}" in
"couch" )
check_availability
backup_couch
;;
"influx" )
check_availability
backup_influx
;;
"mssql" )
check_availability
backup_mssql
;;
"mysql" )
check_availability
backup_mysql
;;
"mongo" )
check_availability
backup_mongo
;;
"pgsql" )
check_availability
backup_pgsql
;;
"redis" )
check_availability
backup_redis
;;
"sqlite3" )
check_availability
backup_sqlite3
;;
esac
backup_finish_time=$(date +"%s")
backup_total_time=$(echo $((backup_finish_time-backup_start_time)))
if [ -z "$master_exit_code" ] ; then master_exit_code="0" ; fi
print_info "Backup routines finish time: $(date -d @${backup_finish_time} +"%Y-%m-%d %T %Z") with overall exit code ${master_exit_code}"
print_notice "Backup routines time taken: $(echo ${backup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
cleanup_old_data
if var_true "${manual}" ; then
print_debug "Exiting due to manual mode"
exit ${master_exit_code};
else
print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") "
sleep $(($DB_DUMP_FREQ*60-backup_total_time))
fi
done

View File

@@ -1,4 +0,0 @@
#!/command/with-contenv bash
echo '** Performing Manual Backup'
/etc/services.available/10-db-backup/run manual

View File

@@ -0,0 +1,24 @@
#!/command/with-contenv bash
source /assets/functions/00-container
source /assets/defaults/05-logging
source /assets/defaults/10-db-backup
## Compress each log 2 days old
timestamp_2dayold_unixtime="$(stat -c %Y "${LOG_PATH}"/"$(date --date='2 days ago' +'%Y%m%d')")"
for logfile in "${LOG_PATH}"/"$(date --date='2 days ago' +'%Y%m%d')"/"$(date --date='2 days ago' +'%Y%m%d')"_*.log ; do
sudo -u restic zstd --rm --rsyncable "${logfile}"
done
touch -t $(date -d"@${timestamp_2dayold_unixtime}" +'%Y%m%d%H%m.%S') "${LOG_PATH}"/"$(date --date='2 days ago' +'%Y%m%d')"
# Look fook files older than certain day and delete
if [ -n "${LOG_PATH}" ] && [ -d "${LOG_PATH}" ] ; then
find "${LOG_PATH}" -mtime +"${LOGROTATE_RETAIN_DAYS}" -type d -exec rm -rf {} +
fi
# Look for stale symbolic links and delete accordingly
for symbolic_link in "${LOG_PATH}"/latest*.log ; do
if [ ! -e "${symbolic_link}" ] ; then
rm -rf "${symbolic_link}"
fi
done

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,9 @@
{
"zabbix_export": {
"version": "6.0",
"date": "2022-03-18T13:32:12Z",
"groups": [
"version": "6.4",
"template_groups": [
{
"uuid": "fa56524b5dbb4ec09d9777a6f7ccfbe4",
"uuid": "10b88d2b3a3a4c72b43bdce9310e1162",
"name": "DB/Backup"
},
{
@@ -14,10 +13,10 @@
],
"templates": [
{
"uuid": "5fc64d517afb4cc5bc09a3ef58b43ef7",
"template": "DB Backup",
"name": "DB Backup",
"description": "Template for Docker DB Backup Image\n\nMeant for use specifically with https://github.com/tiredofit/docker-db-backup\nLast tested with version 3.0.2",
"uuid": "5a16c1bd694145389eed5ee803d954cc",
"template": "DB Backup4",
"name": "DB Backup4",
"description": "Template for Docker DB Backup Image\n\nMeant for use specifically with https://github.com/tiredofit/docker-db-backup Version > 4.0.0\n\nSupports auto discovery of backup jobs and creates graphs and triggers",
"groups": [
{
"name": "DB/Backup"
@@ -26,134 +25,260 @@
"name": "Templates/Databases"
}
],
"items": [
"discovery_rules": [
{
"uuid": "72fd00fa2dd24e479f5affe03e8711d8",
"name": "DB Backup: Backup Duration",
"uuid": "94bb6f862e1841f8b2834b04c41c1d86",
"name": "Backup",
"type": "TRAP",
"key": "dbbackup.backup_duration",
"key": "dbbackup.backup",
"delay": "0",
"history": "7d",
"units": "uptime",
"description": "How long the backup took",
"tags": [
"item_prototypes": [
{
"tag": "Application",
"value": "DB Backup"
}
]
},
{
"uuid": "3549a2c9d56849babc6dc3c855484c1e",
"name": "DB Backup: Backup Time",
"type": "TRAP",
"key": "dbbackup.datetime",
"delay": "0",
"history": "7d",
"units": "unixtime",
"request_method": "POST",
"tags": [
"uuid": "5a2c4d1cacf844829bc1fbf912e071c5",
"name": "[{#NAME}] Checksum - Duration",
"type": "TRAP",
"key": "dbbackup.backup.checksum.duration.[{#NAME}]",
"delay": "0",
"history": "7d",
"units": "uptime",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
]
},
{
"tag": "Application",
"value": "DB Backup"
"uuid": "6e49769ec07344a4974b13dab00c3539",
"name": "[{#NAME}] Checksum - Hash",
"type": "TRAP",
"key": "dbbackup.backup.checksum.hash.[{#NAME}]",
"delay": "0",
"history": "30d",
"trends": "0",
"value_type": "TEXT",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
]
},
{
"uuid": "bb6472e30bff4d9c908b1d34b893e622",
"name": "[{#NAME}] Backup - Last Backup",
"type": "TRAP",
"key": "dbbackup.backup.datetime.[{#NAME}]",
"delay": "0",
"history": "7d",
"units": "unixtime",
"description": "Datestamp of last database backup",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
],
"trigger_prototypes": [
{
"uuid": "3681b56bb882466fb304a48b4beb15f0",
"expression": "fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],172800s)=0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],259200s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],345600s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],432800s)<>0",
"name": "[{#NAME}] No backups detected in 2 days",
"priority": "HIGH",
"manual_close": "YES"
},
{
"uuid": "6c70136c84994197b6396a143b4e956f",
"expression": "fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],172800s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],259200s)=0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],345600s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],432800s)<>0",
"name": "[{#NAME}] No backups detected in 3 days",
"priority": "DISASTER",
"manual_close": "YES"
},
{
"uuid": "d2038025cab643019cb9610c301f0cb9",
"expression": "fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],172800s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],259200s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],345600s)=0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],432800s)<>0",
"name": "[{#NAME}] No backups detected in 4 days",
"priority": "DISASTER",
"manual_close": "YES"
},
{
"uuid": "ea85f02d032c4a1dbc1b6e91a3b2b37b",
"expression": "fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],172800s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],259200s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],345600s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],432800s)=0",
"name": "[{#NAME}] No backups detected in 5 days",
"priority": "DISASTER",
"manual_close": "YES"
}
]
},
{
"uuid": "8ec2b2f44ddf4f36b3dbb2aa15e3a32f",
"name": "[{#NAME}] Backup - Duration",
"type": "TRAP",
"key": "dbbackup.backup.duration.[{#NAME}]",
"delay": "0",
"history": "7d",
"units": "uptime",
"description": "How long the DB Backup job took",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
]
},
{
"uuid": "3f0dc3c75261447c93482815c3d69524",
"name": "[{#NAME}] Encrypt - Duration",
"type": "TRAP",
"key": "dbbackup.backup.encrypt.duration.[{#NAME}]",
"delay": "0",
"history": "7d",
"units": "uptime",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
]
},
{
"uuid": "c3d5ad0789c443859d6a673e03db9cec",
"name": "[{#NAME}] Backup - Filename",
"type": "TRAP",
"key": "dbbackup.backup.filename.[{#NAME}]",
"delay": "0",
"history": "30d",
"trends": "0",
"value_type": "TEXT",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
]
},
{
"uuid": "43b700c03897465eb7e49bbfe8fc9fc5",
"name": "[{#NAME}] Backup - Size",
"type": "TRAP",
"key": "dbbackup.backup.size.[{#NAME}]",
"delay": "0",
"history": "7d",
"description": "Backup Size",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
],
"trigger_prototypes": [
{
"uuid": "849f8660bee04427aff55af47b6f509c",
"expression": "last(/DB Backup4/dbbackup.backup.size.[{#NAME}])/last(/DB Backup4/dbbackup.backup.size.[{#NAME}],#2)>1.2",
"name": "[{#NAME}] Backup 20% Greater in size",
"priority": "WARNING",
"manual_close": "YES"
},
{
"uuid": "74d16a7680544c65af22cc568ce3d59d",
"expression": "last(/DB Backup4/dbbackup.backup.size.[{#NAME}])/last(/DB Backup4/dbbackup.backup.size.[{#NAME}],#2)<0.2",
"name": "[{#NAME}] Backup 20% Smaller in Size",
"priority": "WARNING",
"manual_close": "YES"
},
{
"uuid": "5595d769c73f4eaeadda95c84c2c0f17",
"expression": "last(/DB Backup4/dbbackup.backup.size.[{#NAME}])<1K",
"name": "[{#NAME}] Backup Empty",
"priority": "HIGH",
"manual_close": "YES"
}
]
},
{
"uuid": "a6fc542a565c4baba8429ed9ab31b5ae",
"name": "[{#NAME}] Backup - Status",
"type": "TRAP",
"key": "dbbackup.backup.status.[{#NAME}]",
"delay": "0",
"history": "7d",
"description": "Maps exit code by DB Backup procedure",
"valuemap": {
"name": "Backup Status"
},
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
],
"trigger_prototypes": [
{
"uuid": "74b91e28453b4c2a84743f5e371495c1",
"expression": "last(/DB Backup4/dbbackup.backup.status.[{#NAME}])=1",
"name": "[{#NAME}] Backup - Failed with errors",
"priority": "WARNING",
"manual_close": "YES"
}
]
}
],
"triggers": [
"graph_prototypes": [
{
"uuid": "3ac1e074ffea46eb8002c9c08a85e7b4",
"expression": "nodata(/DB Backup/dbbackup.datetime,2d)=1",
"name": "DB-Backup: No backups detected in 2 days",
"priority": "DISASTER",
"manual_close": "YES"
"uuid": "b5e8e9fe0c474fedba2b06366234afdf",
"name": "[{#NAME}] Backup Duration",
"graph_items": [
{
"color": "199C0D",
"calc_fnc": "ALL",
"item": {
"host": "DB Backup4",
"key": "dbbackup.backup.duration.[{#NAME}]"
}
}
]
},
{
"uuid": "b8b5933dfa1a488c9c37dd7f4784c1ff",
"expression": "fuzzytime(/DB Backup/dbbackup.datetime,172800s)=0 and fuzzytime(/DB Backup/dbbackup.datetime,259200s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,345600s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,432800s)<>0",
"name": "DB Backup: No Backups occurred in 2 days",
"priority": "AVERAGE"
"uuid": "99b5deb4e28f40059c50846c7be2ef26",
"name": "[{#NAME}] Backup Size",
"graph_items": [
{
"color": "199C0D",
"calc_fnc": "ALL",
"item": {
"host": "DB Backup4",
"key": "dbbackup.backup.size.[{#NAME}]"
}
}
]
},
{
"uuid": "35c5f420d0e142cc9601bae38decdc40",
"expression": "fuzzytime(/DB Backup/dbbackup.datetime,172800s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,259200s)=0 and fuzzytime(/DB Backup/dbbackup.datetime,345600s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,432800s)<>0",
"name": "DB Backup: No Backups occurred in 3 days",
"priority": "AVERAGE"
"uuid": "8c641e33659e4c8b866da64e252cfc2a",
"name": "[{#NAME}] Checksum Duration",
"graph_items": [
{
"color": "199C0D",
"calc_fnc": "ALL",
"item": {
"host": "DB Backup4",
"key": "dbbackup.backup.checksum.duration.[{#NAME}]"
}
}
]
},
{
"uuid": "03c3719d82c241e886a0383c7d908a77",
"expression": "fuzzytime(/DB Backup/dbbackup.datetime,172800s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,259200s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,345600s)=0 and fuzzytime(/DB Backup/dbbackup.datetime,432800s)<>0",
"name": "DB Backup: No Backups occurred in 4 days",
"priority": "AVERAGE"
},
{
"uuid": "1634a03e44964e42b7e0101f5f68499c",
"expression": "fuzzytime(/DB Backup/dbbackup.datetime,172800s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,259200s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,345600s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,432800s)=0",
"name": "DB Backup: No Backups occurred in 5 days or more",
"priority": "HIGH"
}
]
},
{
"uuid": "467dfec952b34f5aa4cc890b4351b62d",
"name": "DB Backup: Backup Size",
"type": "TRAP",
"key": "dbbackup.size",
"delay": "0",
"history": "7d",
"units": "B",
"request_method": "POST",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
],
"triggers": [
{
"uuid": "a41eb49b8a3541afb6de247dca750e38",
"expression": "last(/DB Backup/dbbackup.size)/last(/DB Backup/dbbackup.size,#2)>1.2",
"name": "DB Backup: 20% Greater in Size",
"priority": "WARNING",
"manual_close": "YES"
},
{
"uuid": "422f66be5049403293f3d96fc53f20cd",
"expression": "last(/DB Backup/dbbackup.size)/last(/DB Backup/dbbackup.size,#2)<0.2",
"name": "DB Backup: 20% Smaller in Size",
"priority": "WARNING",
"manual_close": "YES"
},
{
"uuid": "d6d9d875b92f4d799d4bc89aabd4e90e",
"expression": "last(/DB Backup/dbbackup.size)<1K",
"name": "DB Backup: empty",
"priority": "HIGH"
}
]
},
{
"uuid": "a6b13e8b46a64abab64a4d44d620d272",
"name": "DB Backup: Last Backup Status",
"type": "TRAP",
"key": "dbbackup.status",
"delay": "0",
"history": "7d",
"description": "Maps Exit Codes received by backup applications",
"valuemap": {
"name": "DB Backup Status"
},
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
],
"triggers": [
{
"uuid": "23d71e356f96493180f02d4b84a79fd6",
"expression": "last(/DB Backup/dbbackup.status)=1",
"name": "DB Backup: Failed Backup Detected",
"priority": "HIGH",
"manual_close": "YES"
"uuid": "65b8770f71ed4cff9111b82c42b17571",
"name": "[{#NAME}] Encrypt Duration",
"graph_items": [
{
"color": "199C0D",
"calc_fnc": "ALL",
"item": {
"host": "DB Backup4",
"key": "dbbackup.backup.encrypt.duration.[{#NAME}]"
}
}
]
}
]
}
@@ -168,38 +293,10 @@
"value": "Database"
}
],
"dashboards": [
{
"uuid": "90c81bb47184401ca9663626784a6f30",
"name": "DB Backup",
"pages": [
{
"widgets": [
{
"type": "GRAPH_CLASSIC",
"name": "Backup Size",
"width": "23",
"height": "5",
"fields": [
{
"type": "GRAPH",
"name": "graphid",
"value": {
"name": "DB Backup: Backup Size",
"host": "DB Backup"
}
}
]
}
]
}
]
}
],
"valuemaps": [
{
"uuid": "82f3a3d01b3c42b8942b59d2363724e0",
"name": "DB Backup Status",
"uuid": "92a87279388b4fd1ac51c1e417e1776e",
"name": "Backup Status",
"mappings": [
{
"value": "0",
@@ -214,36 +311,6 @@
}
]
}
],
"graphs": [
{
"uuid": "6e02c200b76046bab76062cd1ab086b2",
"name": "DB Backup: Backup Duration",
"graph_items": [
{
"color": "199C0D",
"item": {
"host": "DB Backup",
"key": "dbbackup.backup_duration"
}
}
]
},
{
"uuid": "b881ee18f05c4f4c835982c9dfbb55d6",
"name": "DB Backup: Backup Size",
"type": "STACKED",
"graph_items": [
{
"sortorder": "1",
"color": "1A7C11",
"item": {
"host": "DB Backup",
"key": "dbbackup.size"
}
}
]
}
]
}
}