Compare commits

...

208 Commits
2.4.0 ... 3.5.6

Author SHA1 Message Date
dave@tiredofit.ca
22e126200e Release 3.5.6 - See CHANGELOG.md 2022-11-15 13:07:21 -08:00
dave@tiredofit.ca
3e79ca68a0 Release 3.5.5 - See CHANGELOG.md 2022-10-18 07:52:34 -07:00
Dave Conroy
bfeb07d7c0 Merge pull request #179 from greena13/bugfix/s3_backup_prefixes
Bugfix: Generating S3 prefix to store new backups
2022-10-18 07:51:50 -07:00
Aleck Greenham
8a5d647de7 Merge branch 'master' of github.com:tiredofit/docker-db-backup into bugfix/s3_backup_prefixes
# Conflicts:
#	install/assets/functions/10-db-backup
2022-10-18 07:40:33 +01:00
Aleck Greenham
4f5c04acac Bugfix: Generating S3 prefix to store new backups 2022-10-17 18:17:48 +01:00
dave@tiredofit.ca
494f742cb0 Release 3.5.4 - See CHANGELOG.md 2022-10-13 13:59:22 -07:00
dave@tiredofit.ca
e7b9a36745 Release 3.5.3 - See CHANGELOG.md 2022-10-12 08:42:14 -07:00
dave@tiredofit.ca
28c7058f37 Release 3.5.2 - See CHANGELOG.md 2022-10-11 08:48:05 -07:00
Dave Conroy
6f15c77a0f Merge pull request #174 from jauderho/patch-1
Update Dockerfile to use influxdb client 2.4.0
2022-10-11 08:45:47 -07:00
dave@tiredofit.ca
4e04e31d84 Release 3.5.1 - See CHANGELOG.md 2022-10-11 08:10:42 -07:00
Jauder Ho
f3fad4a893 Update Dockerfile
Update influxdb client to 2.4.0.
2022-10-10 16:44:40 -07:00
dave@tiredofit.ca
69f0ca762c Release 3.5.0 - See CHANGELOG.md 2022-10-10 12:06:38 -07:00
Dave Conroy
07d72163a0 Merge pull request #173 from tiredofit/develop 2022-10-10 12:01:34 -07:00
Dave Conroy
b31da0b785 Merge branch 'master' into develop 2022-10-10 12:01:06 -07:00
Dave Conroy
be490b3f4b Remove url [en|de]code functions 2022-10-10 11:53:09 -07:00
Dave Conroy
028966d0b2 Merge pull request #171 from greena13/bugfix/s3_backup_cleanup
Bugfix: S3 database cleanups
2022-10-09 08:33:45 -07:00
Dave Conroy
19d8e98744 Merge pull request #169 from eoehen/feature/Add-documentation-for-blobxfer
Add blobxfer documentation in readme file.
2022-10-09 08:32:59 -07:00
Aleck Greenham
25def5b6f0 Bugfix: S3 database cleanup path 2022-10-09 13:59:48 +01:00
Aleck Greenham
03b7ef9d0d Bugfix: S3 database cleanups 2022-10-09 12:44:22 +01:00
Dave Conroy
231dd63a38 Remove quote 2022-10-08 11:57:40 -07:00
Elias Oehen
666eb81846 Add blobxfer documentation in readme file. 2022-10-08 16:57:00 +02:00
Dave Conroy
4572ab6fca Add Azure wording to README.md 2022-10-08 07:44:05 -07:00
Dave Conroy
9ba51bcec9 Add py3-cryptography module to allow armv7 builds to finish 2022-10-08 07:19:49 -07:00
Dave Conroy
b9edbf68d3 Merge branch 'master' of https://github.com/tiredofit/docker-db-backup 2022-10-08 06:54:00 -07:00
Dave Conroy
2022158a4e Swap around some environment checks 2022-10-07 08:33:19 -07:00
Dave Conroy
2b441f11e1 Restore old functionality for Mongo backup when not using MONGO_CUSTOM_URI 2022-10-07 08:26:54 -07:00
Dave Conroy
532a6b456b Merge pull request #166 from eoehen/feature/Add-support-for-blobxfer
Add support for blobxfer
2022-10-07 08:05:50 -07:00
Dave Conroy
a8d9a0185f Merge pull request #165 from eoehen/feature/Improve-mssql-support
Improve MSSQL Support
2022-10-07 08:04:59 -07:00
Dave Conroy
8b41f5efcf Allow EXTRA_OPTS with MONGO_CUSTOM_URI 2022-10-07 08:03:24 -07:00
Elias Oehen
956904046d Add support for blobxfer 2022-10-04 19:04:43 +02:00
Elias Oehen
4d7f5e9459 Add mssql docker-compose example 2022-10-04 18:37:34 +02:00
Elias Oehen
d9723823c9 Improve mssql server support 2022-10-04 18:22:20 +02:00
Elias Oehen
0067f552f1 Move mysql example to mysql folder 2022-10-04 18:21:21 +02:00
Dave Conroy
4a8f85ddf5 Add EXTRA_OPTS to Mongo string 2022-09-29 19:35:01 -07:00
Dave Conroy
6de0cc7c03 Wrap mongo_generated_uri in quotes 2022-09-29 16:06:06 -07:00
Dave Conroy
2d017e26c5 Cleanup README 2022-09-25 10:00:33 -07:00
Dave Conroy
1efb2d43a8 Set proper environment variable 2022-09-21 10:51:25 -07:00
Dave Conroy
789aa96113 Alternate way of solving Host check 2022-09-21 09:28:01 -07:00
Dave Conroy
9d8cfd69cb Fix a couple variable issues and add silencing 2022-09-20 17:45:05 -07:00
Dave Conroy
c4dbf53ced Final Availability check 2022-09-20 13:51:54 -07:00
Dave Conroy
8706d3a91c Skip availability checks by default 2022-09-20 12:50:00 -07:00
Dave Conroy
73ad356ebf Force lowercase for filenames and hostnames for filename generation 2022-09-20 12:08:53 -07:00
Dave Conroy
73c4003dc4 Fix environment variable check for Mongo 2022-09-20 12:06:13 -07:00
Dave Conroy
a377f570f1 Introduce MONGO_CUSTOM_URI support 2022-09-20 09:41:44 -07:00
dave@tiredofit.ca
b956bd817f Release 3.4.2 - See CHANGELOG.md 2022-09-19 08:00:15 -07:00
Dave Conroy
7bda69b062 Merge pull request #158 from mark-monteiro/patch-1
Improve release notes for 3.4.1
2022-09-14 09:17:51 -07:00
Mark Monteiro
bc23b6a65e Improve release notes for 3.4.1
Add clear migration instructions for custom script paths
2022-09-14 11:57:10 -04:00
dave@tiredofit.ca
8fb3d8315f Release 3.4.1 - See CHANGELOG.md 2022-09-13 08:24:30 -07:00
Dave Conroy
c16133fdd0 Merge pull request #157 from ToshY/fix/readme-missing-db-dump-target
updated README with missing DB_DUMP_TARGET environment variable
2022-09-12 13:09:37 -07:00
ToshY
6967fd5e56 updated README with missing DB_DUMP_TARGET environment variable 2022-09-12 21:51:14 +02:00
Dave Conroy
75acaefb64 Merge pull request #155 from ToshY/fix/readme-custom-scripts-directory
updated README with correct custom scripts directory
2022-09-12 12:24:02 -07:00
ToshY
6933b0f87c updated README with correct custom scripts directory 2022-09-12 21:17:38 +02:00
dave@tiredofit.ca
dc4ab0bfc5 Release 3.4.0 - See CHANGELOG.md 2022-09-12 07:50:55 -07:00
Dave Conroy
9ea34f5a44 Add MongoDB Atlas Support 2022-09-12 07:49:29 -07:00
Dave Conroy
1d53785e7d Fix default port for Influx 2 DBs 2022-09-12 07:19:18 -07:00
Dave Conroy
4e0878b2ad Merge pull request #150 from teun95/teun95-add-rsync
Add --rsyncable for gzip compression
2022-09-12 07:17:41 -07:00
teun95
a98d33bfdb Correct table formatting in README.md 2022-09-08 11:04:11 +00:00
teun95
00c851eda2 Update README.md to include GZ_RSYNCABLE 2022-09-07 19:54:41 +01:00
teun95
cd88285036 Added rsyncable option for gzip using GZ_RSYNCABLE
GZ_RSYNCABLE=TRUE enables --rsyncable for gzip compression. Useful to speed up backups, reduce size of incremental backups, and allow for better deduplication.
2022-09-07 19:50:50 +01:00
dave@tiredofit.ca
428c313c7b Release 3.3.12 - See CHANGELOG.md 2022-08-15 12:19:55 -07:00
Dave Conroy
210acb1e2a Merge pull request #143 from arifer612/patch-1
Fix incorrect case for filesize variable
2022-08-15 12:19:12 -07:00
Arif Er
e50a8cb0ec fix: correct case for filesize variable
Post script support expects a value from a declared variable `$FILESIZE` to provide the size of the backup files. Such a variable does not exist, leading to a situation where using `"${9}"` in a custom script furnishes the checksum hash. However, earlier up in the script the file size of the backup is indeed assigned to a variable, only that is it completely in lower case: `$filesize`. This commit aims to fix that inconsistency.
2022-08-15 21:51:40 +08:00
Dave Conroy
7453852046 Release 3.3.11 - See CHANGELOG.md 2022-07-22 12:00:05 -07:00
Dave Conroy
f115a89a3c Merge pull request #141 from khoazero123/fix_postgres_restore
Fix postgres restore wrong db type
2022-07-22 11:58:57 -07:00
KhoaZero123
8b8d243944 Fix postgres restore wrong db type 2022-07-22 09:41:55 +07:00
dave@tiredofit.ca
be34ceb6ff Release 3.3.10 - See CHANGELOG.md 2022-07-19 12:16:29 -07:00
Dave Conroy
82d6ce444b Merge branch 'master' of https://github.com/tiredofit/docker-db-backup 2022-07-11 09:26:38 -07:00
Dave Conroy
382a188b77 Release 3.3.9 - See CHANGELOG.md 2022-07-11 09:26:35 -07:00
Dave Conroy
f458c34546 Merge pull request #140 from fdarveau/fix-read-port-number-ineractive-restore
Fix reading port number in interactive restore
2022-07-11 09:25:41 -07:00
François Darveau
229db5cd03 Fix reading port number in interactive restore 2022-07-10 16:44:05 -04:00
dave@tiredofit.ca
8bb926fd20 Release 3.3.8 - See CHANGELOG.md 2022-07-09 08:59:59 -07:00
dave@tiredofit.ca
f005956c47 Release 3.3.7 - See CHANGELOG.md 2022-06-23 11:49:28 -07:00
dave@tiredofit.ca
ba20386e65 Release 3.3.6 - See CHANGELOG.md 2022-06-23 08:18:08 -07:00
dave@tiredofit.ca
12211d3b67 Release 3.3.5 - See CHANGELOG.md 2022-06-08 09:01:44 -07:00
Dave Conroy
83693d35b2 Release 3.3.4 - See CHANGELOG.md 2022-06-03 05:10:53 -07:00
Dave Conroy
52b726c821 Merge pull request #132 from rozdzynski/master
Unary operator fix
2022-06-03 05:09:27 -07:00
rozdzynski
5c43b3c907 unary operator fix 2022-06-03 14:02:42 +02:00
dave@tiredofit.ca
005e7f6e47 Release 3.3.3 - See CHANGELOG.md 2022-05-24 08:26:45 -07:00
Dave Conroy
7d7cb9587d Release 3.3.2 - See CHANGELOG.md 2022-05-02 22:28:08 -07:00
Dave Conroy
d1713fe3f0 Release 3.3.1 - See CHANGELOG.md 2022-04-30 22:31:34 -07:00
Dave Conroy
d1e98d9c4b Release 3.3.0 - See CHANGELOG.md 2022-04-30 03:23:45 -07:00
Dave Conroy
0920b671cb Release 3.2.6 - See CHANGELOG.md 2022-04-25 10:29:45 -07:00
Dave Conroy
28ed6c3bb8 Release 3.2.5 - See CHANGELOG.md 2022-04-23 14:11:29 -07:00
Dave Conroy
c1bdf26598 Release 3.2.4 - See CHANGELOG.md 2022-04-21 16:04:43 -07:00
Dave Conroy
5a4cac2cee Release 3.2.3 - See CHANGELOG.md 2022-04-21 15:46:27 -07:00
Dave Conroy
c04eec7661 Add space after compress_cmd 2022-04-21 14:19:09 -07:00
Dave Conroy
32f1959a07 Merge pull request #120 from joergmschulz/patch-1
small typo / exiting instead of exitting
2022-04-21 14:18:43 -07:00
joergmschulz
d384d5a529 small typo / exiting instead of exitting 2022-04-21 23:16:02 +02:00
Dave Conroy
56ab68dd71 Release 3.2.2 - See CHANGELOG.md 2022-04-21 12:14:17 -07:00
Dave Conroy
9a1a5efbd9 Do a different DB Ready check for Influx 1 vs 2 2022-04-21 12:12:57 -07:00
Dave Conroy
df5532c128 Fix blank database size when compression type=NONE 2022-04-21 12:08:51 -07:00
Dave Conroy
2ecd313778 Change database variables around 2022-04-21 12:08:27 -07:00
Dave Conroy
55cfe5ab02 Release 3.2.1 - See CHANGELOG.md 2022-04-03 10:28:07 -07:00
Dave Conroy
ae2e2c38e2 Sanitize DB_HOST for URLs 2022-04-02 07:37:34 -07:00
Dave Conroy
c23d7991fe Release 3.2.0 - See CHANGELOG.md 2022-04-01 18:41:58 -07:00
Dave Conroy
3f58cfd284 Release 3.1.3 - See CHANGELOG.md 2022-03-30 10:46:16 -07:00
Dave Conroy
2d01f5e692 Fix for MARIADB not sending DB name for post script 2022-03-30 10:45:27 -07:00
Dave Conroy
dbd0a03b0d SPLIT_DB is supposed to be TRUE 2022-03-30 10:43:22 -07:00
Dave Conroy
6527f4ff63 Add sanity checks for Post scripts to be executible 2022-03-30 10:37:58 -07:00
Dave Conroy
d843d21a1b Release 3.1.2 - See CHANGELOG.md 2022-03-29 08:09:36 -07:00
Dave Conroy
24ed769429 Release 3.1.1 - See CHANGELOG.md 2022-03-28 10:29:00 -07:00
Dave Conroy
cbd87a5ede Update README.md 2022-03-23 19:16:51 -07:00
Dave Conroy
13214665c9 Release 3.1.0 - See CHANGELOG.md 2022-03-23 16:21:12 -07:00
Dave Conroy
2e71f617a1 Merge pull request #107 from piemonkey/mongo-restore
Add Mongo support to restore script
2022-03-23 12:24:43 -07:00
Dave Conroy
fbe9dde4a1 Release 3.0.16 - See CHANGELOG.md 2022-03-23 07:57:28 -07:00
Dave Conroy
eb2a18672b Release 3.0.15 - See CHANGELOG.md 2022-03-22 18:27:57 -07:00
Dave Conroy
5f784ed156 Tweak Example 2022-03-22 09:57:28 -07:00
Dave Conroy
d9a4690ea2 Release 3.0.14 - See CHANGELOG.md 2022-03-22 07:52:15 -07:00
Rich
e7eb88c32a Give feedback if restore script doesn't support db type 2022-03-22 09:12:34 +01:00
Rich
52dc510b89 Add auto restore support for mongodb 2022-03-22 09:12:16 +01:00
Rich
06677dbc8b Fix typo setting dbhost in restore script 2022-03-22 09:12:05 +01:00
Rich
e0dd2bc91b Set db type from env vars correctly during restore 2022-03-22 09:11:54 +01:00
Dave Conroy
baba842373 Release 3.0.13 - See CHANGELOG.md 2022-03-21 16:26:45 -07:00
Dave Conroy
108938c17a Release 3.0.12 - See CHANGELOG.md 2022-03-21 13:51:01 -07:00
Dave Conroy
b0b39fa8c1 Release 3.0.11 - See CHANGELOG.md 2022-03-21 12:34:33 -07:00
Dave Conroy
fa8f43132c Release 3.0.10 - See CHANGELOG.md 2022-03-21 11:19:17 -07:00
Dave Conroy
3f693feefc Release 3.0.9 - See CHANGELOG.md 2022-03-21 10:57:17 -07:00
Dave Conroy
bc32b7d084 Release 3.0.8 - See CHANGELOG.md 2022-03-21 10:47:18 -07:00
Dave Conroy
f7f6a646a0 Release 3.0.7 - See CHANGELOG.md 2022-03-21 10:32:57 -07:00
Dave Conroy
b755497062 Release 3.0.6 - See CHANGELOG.md 2022-03-21 10:32:27 -07:00
Dave Conroy
656bca02cd Release 3.0.5 - See CHANGELOG.md 2022-03-21 09:44:05 -07:00
Dave Conroy
da0c7f9a03 Release 3.0.4 - See CHANGELOG.md 2022-03-21 09:04:52 -07:00
Dave Conroy
b8d7832145 Release 3.0.3 - See CHANGELOG.md 2022-03-21 08:07:26 -07:00
Dave Conroy
a4d7d833b7 Release 3.0.2 - See CHANGELOG.md 2022-03-18 06:37:19 -07:00
Dave Conroy
24b3239e9f Send proper Zabbix value for Exit Code 2022-03-18 06:34:16 -07:00
Dave Conroy
ac5a09361a Add updated Zabbix template 2022-03-18 06:33:38 -07:00
Dave Conroy
179b39e7d5 Don't fail the script when there is an error code 1 2022-03-18 06:10:33 -07:00
Dave Conroy
22db6d79a7 Cleanup backup start time argument for POST_SCRIPT environment var 2022-03-18 06:01:24 -07:00
Dave Conroy
f725b59c5c Cleanup DEBUG Mode backup duration output 2022-03-18 06:00:39 -07:00
Dave Conroy
19f8a6f167 Release 3.0.1 - See CHANGELOG.md 2022-03-17 18:20:14 -07:00
Dave Conroy
ed98621984 Release 3.0.0 - See CHANGELOG.md 2022-03-17 16:38:05 -07:00
Dave Conroy
bb4df1b32c Release 2.12.0 - See CHANGELOG.md 2022-03-16 19:18:40 -07:00
Dave Conroy
dbaeeabd53 Release 2.11.5 - See CHANGELOG.md 2022-03-15 14:35:57 -07:00
Dave Conroy
c515e2aaa0 Release 2.11.4 - See CHANGELOG.md 2022-03-15 10:29:37 -07:00
Dave Conroy
e5dea03d9e Github Workflow fix 2022-03-01 16:11:30 -08:00
Dave Conroy
31e4487827 Update github configuration 2022-02-10 09:28:38 -08:00
Dave Conroy
fa1dde53c0 Release 2.11.3 - See CHANGELOG.md 2022-02-09 19:34:27 -08:00
Dave Conroy
789a9e5f5a Release 2.11.2 - See CHANGELOG.md 2022-02-09 17:14:25 -08:00
Dave Conroy
b80d61f997 Merge pull request #102 from jacksgt/fix-s3-variable
Fix S3 variables (again)
2022-02-08 13:58:33 -08:00
Jack Henschel
9e23def7b4 Fix S3 variables (again)
S3_ENDPOINT is not used anywhere in the code, but S3_HOST.
2022-02-08 21:52:53 +01:00
Dave Conroy
ca31eb840d 2.11.1 - See CHANGELOG.md 2022-02-06 17:03:36 -08:00
Dave Conroy
6bec188633 Merge pull request #101 from jacksgt/add-s3-endpoint
Correct several variables for S3 backups
2022-02-06 17:01:18 -08:00
Dave Conroy
43562077b0 Merge pull request #100 from jacksgt/patch-1
Use exit code 0 when running in manual mode
2022-02-06 16:59:00 -08:00
Jack Henschel
c39bdeebae Correct several variables for S3 backups
* S3_URI_STYLE is not used anywhere (anymore)
* BACKUP_TYPE is not documented anywhere, redundant with BACKUP_LOCATION
* S3_HOST is not used anymore, document S3_ENDPOINT instead

ref 4d6419fd18
2022-02-06 00:50:35 +01:00
Jack Henschel
36b5909483 Use exit code 0 when running in manual mode
When running this container as a Kubernetes cronjob, it is really inconvenient that the script exits with return code "1", thereby marking the job as a failure.
Instead, the script should return "0" because everything was successful.
2022-02-06 00:20:30 +01:00
Dave Conroy
6033b1b0a9 Update README to talk about quoting the values 2022-01-31 13:59:53 -08:00
Dave Conroy
88218915e1 Release 2.11.0 - See CHANGELOG.md 2022-01-20 09:23:06 -08:00
Dave Conroy
065887f789 Release 2.10.3 - See CHANGELOG.md 2022-01-07 06:33:46 -08:00
Dave Conroy
5aba713b73 Release 2.10.2 - See CHANGELOG.md 2021-12-28 14:16:46 -08:00
Dave Conroy
9a6039d71d Release 2.10.1 - See CHANGELOG.md 2021-12-24 17:43:18 -08:00
Dave Conroy
c5f1618231 Merge pull request #93 from milenkara/master
Provide region when using S3
2021-12-24 17:42:16 -08:00
milenkara
7dd9fa890f Provide region when using S3 2021-12-24 16:26:03 +01:00
Dave Conroy
b62554ceff Release 2.10.0 - See CHANGELOG.md 2021-12-22 14:29:35 -08:00
Dave Conroy
7729743ccf Release 2.9.7 - See CHANGELOG.md 2021-12-15 07:17:57 -08:00
Dave Conroy
d56efc0ee9 Release 2.9.6 - See CHANGELOG.md 2021-12-13 17:40:43 -08:00
Dave Conroy
7d87e474e0 Merge branch 'master' of https://github.com/tiredofit/docker-db-backup 2021-12-13 17:38:25 -08:00
Dave Conroy
342c252d9a Release 2.9.5 - See CHANGELOG.md 2021-12-13 17:33:14 -08:00
Dave Conroy
25f3cab21f Merge pull request #91 from alexbarcelo/minio_fix
MINIO support by reacting to S3_HOST
2021-12-13 17:30:50 -08:00
Dave Conroy
e63d56c753 Merge pull request #92 from alexbarcelo/targettimeprint
Fixing the print_notice for Next Backup when `DB_DUMP_BEGIN` is +XXX
2021-12-13 17:27:58 -08:00
Alex Barcelo
86722a8e8a defining target_time variable in that branch 2021-12-13 21:56:45 +01:00
Alex Barcelo
4d6419fd18 reacting to S3_HOST config envvar by setting the --endpoint-url parameter on AWS CLI 2021-12-13 21:49:02 +01:00
Dave Conroy
99153ac6d1 Release 2.9.5 2021-12-07 15:10:28 -08:00
Dave Conroy
142967135d Release 2.9.4 - See CHANGELOG.md 2021-12-07 15:02:47 -08:00
Dave Conroy
1df66853fb Release 2.9.3 - See CHANGELOG.md 2021-11-24 09:53:49 -08:00
Dave Conroy
c019efeb74 Release 2.9.2 - See CHANGELOG.md 2021-10-22 07:12:05 -07:00
Dave Conroy
f276af2512 Merge pull request #86 from teenigma/backup-redis
Fix compression on Redis backup - Fixes #58
2021-10-22 07:10:00 -07:00
Teerapatr K
4e41e66eff Fix compression on Redis backup file 2021-10-22 14:06:37 +07:00
Teerapatr K
4488d113ef Jump out loop when complete 2021-10-22 14:04:54 +07:00
Dave Conroy
1cd014b165 Add manual CI pipeline 2021-10-17 09:41:53 -07:00
Dave Conroy
39bd8537ff Add manual CI pipeline 2021-10-17 09:41:18 -07:00
Dave Conroy
75ded7599c Release 2.9.1 - See CHANGELOG.md 2021-10-15 16:27:19 -07:00
Dave Conroy
5e5986db69 Merge pull request #83 from sbrunecker/bug/79/fix-mysql-8
fix: not able to connect to mysql 8 db with caching_sha2_password
2021-10-15 16:25:20 -07:00
Dave Conroy
1d61f40d0c Release 2.9.1 - See CHANGELOG.md 2021-10-15 16:24:46 -07:00
Dave Conroy
ee8bbb370b Merge pull request #84 from sbrunecker/bug/80/empty-password
fix: db available check getting stuck with empty password
2021-10-15 16:23:26 -07:00
Stefan Brunecker
ae201814fa fix: db available check getting stuck with empty password 2021-10-16 00:50:15 +02:00
Stefan Brunecker
bf8bce0893 fix: not able to connect to mysql 8 db with caching_sha2_password 2021-10-16 00:49:27 +02:00
Dave Conroy
3e8585394d Release 2.9.0 - See CHANGELOG.md 2021-10-15 10:08:27 -07:00
Dave Conroy
2e0c0d9248 Release 2.8.2 - See CHANGELOG.md 2021-10-15 09:52:25 -07:00
Dave Conroy
c81d8e2713 Release 2.8.1 - See CHANGELOG.md 2021-09-01 07:43:01 -07:00
Dave Conroy
b808b35624 Release 2.8.0 - See CHANGELOG.md 2021-08-27 07:22:59 -07:00
Dave Conroy
e060aeb0e5 Merge pull request #70 from the1ts/master
Fix syntax error - Issue #69
2021-06-22 08:26:58 -07:00
Paul Mansfield
03c16cc582 Fix syntax error
Case statement is missing double semi-colon.
2021-06-22 14:06:07 +01:00
Dave Conroy
e45d916b00 Release 2.7.0 - See CHANGELOG.md 2021-06-17 08:45:57 -07:00
Dave Conroy
eb0ee61662 Mongo DB Authentication Database 2021-06-17 07:22:38 -07:00
Dave Conroy
8e737eb579 Update to Alpine 3.14 2021-06-17 07:17:34 -07:00
Dave Conroy
5d2043a603 Merge pull request #63 from james-song/master
Fixed #62
2021-06-12 09:03:41 -07:00
Dave Conroy
89fe251321 Update CHANGELOG.md 2021-06-08 14:16:31 -07:00
Dave Conroy
e9052f1cd6 Release 2.6.1 - See CHANGELOG.md 2021-06-08 13:36:22 -07:00
Dave Conroy
a73528c5b2 Merge pull request #66 from jwillmer/patch-1
Fix for issue #14 - Use db_name in query
2021-06-08 13:33:35 -07:00
Jens Willmer
7d1e112dfc Fix for issue #14 - Use db_name in query
As pointed out by @wglambert the db_name needs to be specified when POSTGRES_DB was set: https://github.com/docker-library/postgres/issues/854#issuecomment-856877311
2021-06-08 22:10:58 +02:00
Dave Conroy
e0bb4c313b Merge pull request #65 from MadWalnut/master
Fix Docker image name and URL
2021-06-01 16:22:32 -07:00
MadWalnut
cb5333a59c Fix Docker image name and URL 2021-06-02 00:34:00 +02:00
Dave Conroy
7b59632378 Update README 2021-05-27 08:40:48 -07:00
james-song
353b1af7b4 Fixed #62, for upload backup files at S3 using awscli 2021-05-10 16:13:21 +09:00
Dave Conroy
a27b46a43a Update README 2021-05-02 16:18:28 -07:00
Dave Conroy
e5b9ee2cc0 Add Issue Templates 2021-05-02 14:48:24 -07:00
Dave Conroy
6ddb749bc6 Add Issue Templates 2021-05-02 14:47:56 -07:00
Dave Conroy
fb299c41dd Update README.md 2021-03-15 17:48:00 -07:00
Dave Conroy
d7d4f1cc19 Merge branch 'master' of https://github.com/tiredofit/docker-db-backup 2021-02-19 08:33:48 -08:00
Dave Conroy
c8c9a80533 Release 2.6.0 - See CHANGELOG.md 2021-02-19 08:33:43 -08:00
Dave Conroy
018234b9bc Merge pull request #56 from tpansino/add-sqlite-support
Add sqlite support
2021-02-19 08:32:52 -08:00
Tom Pansino
912e60edd8 Exit on failed file checks 2021-02-18 00:35:06 -08:00
Tom Pansino
46fddb533c Add sqlite3 to README 2021-02-18 00:09:17 -08:00
Tom Pansino
e8a1859d1a Add initial sqlite3 support 2021-02-17 23:55:22 -08:00
Dave Conroy
30fe2f181c #55 - Fix xz parallel compression 2021-02-14 09:06:21 -08:00
Dave Conroy
f57ce461e9 GitHub CI 2021-01-25 17:05:25 -08:00
Dave Conroy
34aab69cc2 Release 2.5.0 - See CHANGELOG.md 2021-01-25 16:39:42 -08:00
Dave Conroy
1930358775 Multi Arch CI 2021-01-21 15:35:16 -08:00
29 changed files with 3472 additions and 1432 deletions

1
.dockerignore Normal file
View File

@@ -0,0 +1 @@
examples/

2
.gitattributes vendored Normal file
View File

@@ -0,0 +1,2 @@
# Declare files that will always have LF line endings on checkout.
*.* text eol=lf

42
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,42 @@
---
name: Bug report
about: If something isn't working right..
title: ''
labels: bug
assignees: ''
---
### Summary
<!-- Summarize the bug encountered -->
### Steps to reproduce
<!-- Describe how one can reproduce the issue - this is very important. Please use an ordered list. -->
### What is the expected *correct* behavior?
<!-- Describe what should be seen instead. -->
### Relevant logs and/or screenshots
<!-- Paste any relevant logs - please use code blocks (```) to format console output, logs, and code as it's tough to read otherwise. -->
### Environment
<!--Your Configuration (please complete the following information): -->
- Image version / tag:
- Host OS:
<details>
<summary>Any logs | docker-compose.yml</summary>
</details>
<!-- Include anything additional -->
### Possible fixes
<!-- If you can, provide details to the root cause that might be responsible for the problem. -->

View File

@@ -0,0 +1,23 @@
---
name: Feature request
about: Suggest an idea or feature
title: ''
labels: enhancement
assignees: ''
---
---
name: Feature Request
about: Suggest an idea for this project
---
**Description of the feature**
<!-- A clear description of the feature you'd like implemented -->
**Benftits of feature**
<!-- Explain the measurable benefits this feature would achieve. -->
**Additional context**
<!--Add any other context or screenshots about the feature request here. -->

1
.github/config.yml vendored Normal file
View File

@@ -0,0 +1 @@
blank_issues_enabled: false

7
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,7 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

111
.github/workflows/main.yml vendored Normal file
View File

@@ -0,0 +1,111 @@
### Application Level Image CI
### Dave Conroy <dave at tiredofit dot ca>
name: 'build'
on:
push:
paths:
- '**'
- '!README.md'
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"
else
BRANCH=${GITHUB_REF#refs/heads/}
fi
case ${BRANCH} in
"main" | "master" )
BRANCHTAG="${DOCKER_IMAGE}:latest"
;;
"develop" )
BRANCHTAG="${DOCKER_IMAGE}:develop"
;;
* )
if [ -n "${{ secrets.LATEST }}" ] ; then
if [ "${BRANCHTAG}" = "${{ secrets.LATEST }}" ]; then
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest,${DOCKER_IMAGE}:latest"
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
;;
esac
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
GITTAG="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed 's|refs/tags/||g')"
fi
if [ -n "${BRANCHTAG}" ] && [ -n "${GITTAG}" ]; then
TAGS=${BRANCHTAG},${GITTAG}
else
TAGS="${BRANCHTAG}${GITTAG}"
fi
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Label
id: Label
run: |
if [ -f "Dockerfile" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_repository=\"https://github.com/${GITHUB_REPOSITORY}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_commit=\"${GITHUB_SHA}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
if [ -f "CHANGELOG.md" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.db-backup.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
mkdir -p install/assets/.changelogs ; cp CHANGELOG.md install/assets/.changelogs/${GITHUB_REPOSITORY/\//_}.md
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
fi
if [[ $GITHUB_REF == refs/heads/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_branch=\"${GITHUB_REF#refs/heads/}\"" Dockerfile
fi
fi
- name: Build
uses: docker/build-push-action@v3
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm/v7,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}

111
.github/workflows/manual.yml vendored Normal file
View File

@@ -0,0 +1,111 @@
# Manual Workflow (Application)
name: manual
on:
workflow_dispatch:
inputs:
Manual Build:
description: 'Manual Build'
required: false
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"
else
BRANCH=${GITHUB_REF#refs/heads/}
fi
case ${BRANCH} in
"main" | "master" )
BRANCHTAG="${DOCKER_IMAGE}:latest"
;;
"develop" )
BRANCHTAG="${DOCKER_IMAGE}:develop"
;;
* )
if [ -n "${{ secrets.LATEST }}" ] ; then
if [ "${BRANCHTAG}" = "${{ secrets.LATEST }}" ]; then
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest,${DOCKER_IMAGE}:latest"
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
;;
esac
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
GITTAG="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed 's|refs/tags/||g')"
fi
if [ -n "${BRANCHTAG}" ] && [ -n "${GITTAG}" ]; then
TAGS=${BRANCHTAG},${GITTAG}
else
TAGS="${BRANCHTAG}${GITTAG}"
fi
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Label
id: Label
run: |
if [ -f "Dockerfile" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_repository=\"https://github.com/${GITHUB_REPOSITORY}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_commit=\"${GITHUB_SHA}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
if [ -f "CHANGELOG.md" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.db-backup.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
mkdir -p install/assets/.changelogs ; cp CHANGELOG.md install/assets/.changelogs/${GITHUB_REPOSITORY/\//_}.md
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
fi
if [[ $GITHUB_REF == refs/heads/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_branch=\"${GITHUB_REF#refs/heads/}\"" Dockerfile
fi
fi
- name: Build
uses: docker/build-push-action@v3
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm/v7,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}

View File

@@ -1,3 +1,548 @@
## 3.5.6 2022-11-15 <dave at tiredofit dot ca>
### Changed
- Add failure if DB_TYPE empty or malformed
## 3.5.5 2022-10-18 <dave at tiredofit dot ca>
### Changed
- Fix for S3 backups and trailing slashes (@greena13)
## 3.5.4 2022-10-13 <dave at tiredofit dot ca>
### Changed
- Fix for Influx DB 1 backups when compression enabled
## 3.5.3 2022-10-12 <dave at tiredofit dot ca>
### Changed
- Remove build dependencies for blobxfer making image size smaller
- Remove silencing commands limiting Postgres backups from working without DEBUG_MODE=TRUE
## 3.5.2 2022-10-11 <dave at tiredofit dot ca>
### Added
- Update Influxdb client to 2.4.0 (jauderho@github)
## 3.5.1 2022-10-11 <dave at tiredofit dot ca>
### Changed
- Tighten up cleanup routines to not call blobxfer unless absolutely necessary
## 3.5.0 2022-10-10 <dave at tiredofit dot ca>
### Added
- Blobxfer / Microsoft Azure Support (credit: eoehen@github)
- Introduce MONGO_CUSTOM_URI environment variable for those not wanting to use DB_* variables
### Changed
- Force filenames to be in lowercase
- Fix S3 Database cleanups (credit greenatwork@github)
- Remove MONGO_DB_TYPE variable as MONGO_CUSTOM_URI overrides
- Fix MSSQL Backups (credit eoehen@github)
- Seperate examples for MySQL and MSSQL
## 3.4.2 2022-09-19 <dave at tiredofit dot ca>
### Changed
- Skip availability check for Mongo Atlas connections
## 3.4.1 2022-09-13 <dave at tiredofit dot ca>
### Added
- Introduce environment variables for SCRIPT_LOCATION_POST and SCRIPT_LOCATION_PRE for better seperation
### Deprecated
- Introduce deprecation warning for the custom script paths `/assets/custom-scripts` and `/assets/custom-scripts/pre`. These
paths will continue to work for now but support may be removed in the next major version release. To support the new
default paths your scripts should be moved as follows:
|Script Type|Old Path (Deprecated)|New Environment Variable|Environment Value Default|
|-----------|--------|-------------------------|----------------|
|Pre|`/assets/custom-scripts/pre`|SCRIPT_LOCATION_PRE|`/assets/scripts/pre`|
|Post|`/assets/custom-scripts`|SCRIPT_LOCATION_POST|`/assets/scripts/post`|
## 3.4.0 2022-09-12 <dave at tiredofit dot ca>
### Added
- Add GZ_RSYNCABLE environment variable for better rsync compatibility (Credit teun95@github)
- Add Pre Backup Script Support
- Add MongoDB Atlas Support
### Changed
- Fix Default Port for Influx 2 DB Hosts
## 3.3.12 2022-08-15 <dave at tiredofit dot ca>
### Changed
- arifer612@github contributed a fix for incorrect case of "filesize" variable when using post backup scripts
## 3.3.11 2022-07-22 <khoazero123@github>
### Fixed
- Restore script not properly detecting postgres backups
## 3.3.10 2022-07-19 <dave at tiredofit dot ca>
### Changed
- Remove MSSQL install packages properly
## 3.3.9 2022-07-09 <fardeau@github>
### Fixed
- Remaining work on interactive mode for entering port on restore script
## 3.3.8 2022-07-09 <dave at tiredofit dot ca>
### Added
- MSSQL Client Tools 18.0.1.1-1
## 3.3.7 2022-06-23 <dave at tiredofit dot ca>
### Changed
- Allow overrides to actually override with the restore script
## 3.3.6 2022-06-23 <dave at tiredofit dot ca>
### Changed
- Fix for restore script when using all 7 arguments
## 3.3.5 2022-06-08 <dave at tiredofit dot ca>
### Changed
- Fix DB Port parameter not being able to be input in restore script
- Fix MongoDB restore questionnaire
## 3.3.4 2022-06-03 <rozdzynski@github>
### Fixed
- S3 backups failing with special characters in filename
## 3.3.3 2022-05-24 <dave at tiredofit dot ca>
### Added
- Alpine 3.16 base
## 3.3.2 2022-05-02 <dave at tiredofit dot ca>
### Added
- Add POST_SCRIPT_SKIP_X_VERIFY environment variables to allow for more host compatibility for post scripts
## 3.3.1 2022-04-30 <dave at tiredofit dot ca>
### Changed
- Compressing silently was causing 0 byte backups
## 3.3.0 2022-04-30 <dave at tiredofit dot ca>
### Added
- Ability to auto clean old S3 / Minio Hosts like what occurs on filesysten
- Alert user how to turn off Zabbix Monitoring if fails
- Allow Zabbix Monitoring to work with S3
- Silence some more compression statements
### Changed
- Fix for Redis not backing up properly
- Start sending checksums for S3 Outputs
- Cleanup some code functions
- FIx Container Log Level always in DEBUG
## 3.2.4 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Add -portable flag when backing up Influx
## 3.2.3 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Fix for bucket / db name InfluxDB 1.xx
- Minor aesthetics, spacing, spelling
## 3.2.2 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Restore script properly parses DB_PORT entry
- Influx Database ready performs different checks dependent on V1/V2
- Stop using weird database lowercase variables unneccessarily breaking Docker Secrets
## 3.2.1 2022-04-03 <dave at tiredofit dot ca>
### Changed
- Fix a variety of issues iwth 3.2.0 relating to InfluxDB
## 3.2.0 2022-03-31 <dave at tiredofit dot ca>
### Added
- Install InfluxDB2 Client alongside version 1 (amd64 and arm64)
- Introduce InfluxDB 2 backup support
- Introduce multiple compression types other than Gzip for Influx 1/2
- Introduce compression for MSSQL backups
### Changed
- Testing for Host availability for CouchDB and InfluxDB
## 3.1.3 2022-03-30 <dave at tiredofit dot ca>
### Changed
- Fix for MariaDB not sending database name to post script
- Check for executible bit on post scripts both via environment variable and /assets/custom
- SPLIT_DB defaulted to TRUE
## 3.1.2 2022-03-29 <dave at tiredofit dot ca>
### Changed
- Fix for blank Notice when individual backup is completed (time taken)
## 3.1.1 2022-03-28 <dave at tiredofit dot ca>
### Changed
- Resolve some issues with backups of Mongo and others not saving the proper timestamp
## 3.1.0 2022-03-23 <dave at tiredofit dot ca>
### Added
- Backup multiple databases by seperating with comma e.g. db1,db2
- Backup ALL databases bu setting DB_NAME to ALL
- Exclude databases from being backed up comma seperated when DB_NAME is all eg DB_NAME_EXCLUDE=db3,db4
- Backup timers execute per database, not per the whole script run
- Post scripts run after each database backup
- Checksum does not occur when database backup failed
- Database cleanup does not occur when any databases backups fail throughout session
- MongoDB now supported with 'restore' script - Credit to piemonkey@github
- Lots of reshuffling, optimizations with script due to botched 3.0 release
### Changed
- ZSTD replaces GZ as default compression type
- Output is cleaner when backups are occurring
## 3.0.16 2022-03-23 <dave at tiredofit dot ca>
### Changed
- Fix for SPLIT_DB not looping through all databse names properly
## 3.0.15 2022-03-22 <dave at tiredofit dot ca>
### Changed
- Rework compression function
- Fix for Bzip compression failing
## 3.0.14 2022-03-22 <dave at tiredofit dot ca>
### Changed
- Rearrange Notice stating when next backup is going to start
## 3.0.13 2022-03-21 <dave at tiredofit dot ca>
### Added
- Add compression levels to debug mode
## 3.0.12 2022-03-21 <dave at tiredofit dot ca>
### Added
- Throw Errors for MANUAL mode when certain other CONTAINER_* services are enabled
## 3.0.11 2022-03-21 <dave at tiredofit dot ca>
### Changed
- Fix for Parallel Compression
## 3.0.10 2022-03-21 <dave at tiredofit dot ca>
### Changed
- Fix for restore script not taking "custom" usernames or passwords
## 3.0.9 2022-03-21 <dave at tiredofit dot ca>
### Changed
- Switch to using parallel versions of compression tools all the time, yet explicitly state the threads in use (1 or ++)
## 3.0.8 2022-03-21 <dave at tiredofit dot ca>
### Added
- Add PARALLEL_COMPRESSION_THREADS environment variable to limit amount of threads when compressing - Currently autodetects however many processors are avaialable to the container
## 3.0.7 2022-03-21 <dave at tiredofit dot ca>
### Reverted
- Strip unused LOG directives
## 3.0.6 2022-03-21 <dave at tiredofit dot ca>
### Changed
- Fix for parallel compression
## 3.0.5 2022-03-21 <dave at tiredofit dot ca>
### Changed
- Add more detail regarding manual modes
## 3.0.4 2022-03-21 <dave at tiredofit dot ca>
### Changed
- Fix for 3.0.3
## 3.0.3 2022-03-21 <dave at tiredofit dot ca>
### Changed
- Add documentation for Manual mode
- Revert Compression variables
## 3.0.2 2022-03-18 <dave at tiredofit dot ca>
### Changed
- Cleanup of Zabbix Agent options
- Updated Zabbix template
- Split apart S3 options for better debugging and also cleaned up their variables
- Fixed issue with post scripts not outputting proper backup start time
- Cleaned up some notifications
- Rearranged code
## 3.0.1 2022-03-17 <dave at tiredofit dot ca>
### Changed
- Fix for Environment variable not reading correctly for restore script for DB TYPE
## 3.0.0 2022-03-17 <dave at tiredofit dot ca>
### Added
- Rewrote entire image
- Ability to choose which file hash after backup (MD5 or SHA1)
- Restore Script (execute 'restore' in container)
- Allow to map custom CA certs for S3 backups
- Allow to skip certificate certification for S3 Backups
- Revamped Logging and parameters - File logs also exist in /var/log/container/container.log
- Added more functionality to send to zabbix to track start, end, duration and status
- Ability to backup stored procedures for MySQL / MariaDB
- Ability to backup as a single transaction for MySQL / MariaDB
- Ability to execute "manually" and still allow container to execute to accommodate for Kubernetes cron usage
### Changed
- Environment variables have changed! Specifically relating to COMPRESSION, PARALLEL COMPRESSION, CHECKSUMs
## 2.12.0 2022-03-16 <dave at tiredofit dot ca>
### Changed
- Last release of 2.x series
- Fix timer for backups that take excessively long time, and allow it to start repetitively at the same time daily. What was happening is that if a backup took 30 minutes, time would shift by 30 minutes daily eventually taking backups mid day.
## 2.11.5 2022-03-15 <dave at tiredofit dot ca>
### Added
- Add additional debug statements
## 2.11.4 2022-03-15 <dave at tiredofit dot ca>
### Added
- Add debug statement around the scheduling component
## 2.11.3 2022-02-09 <dave at tiredofit dot ca>
### Changed
- Rework to support new base image
## 2.11.2 2022-02-09 <dave at tiredofit dot ca>
### Changed
- Refresh base image
## 2.11.1 2022-01-20 <jacksgt@github>
### Changed
- Modernized S3 variables and sanity checks
- Change exit code to 0 when executing a manual backup
## 2.11.0 2022-01-20 <dave at tiredofit dot ca>
### Added
- Add capability to select `TEMP_LOCATION` for initial backup and compression before backup completes to avoid filling system memory
### Changed
- Cleanup for MariaDB/MySQL DB ready routines that half worked in 2.10.3
- Code cleanup
## 2.10.3 2022-01-07 <dave at tiredofit dot ca>
### Changed
- Change the way MariaD/MySQL connectivity check is performed to allow for better compatibility without requiring the DB_USER to have PROCESS privileges
## 2.10.2 2021-12-28 <dave at tiredofit dot ca>
### Changed
- Remove logrotate configuration for redis which shouldn't exist in the first place
## 2.10.1 2021-12-22 <milenkara@github>
### Added
- Allow for choosing region when backing up to S3
## 2.10.0 2021-12-22 <dave at tiredofit dot ca>
### Changed
- Revert back to Postgresql 14 from packages as its now in the repositories
- Fix for Zabbix Monitoring
## 2.9.7 2021-12-15 <dave at tiredofit dot ca>
### Changed
- Fixup for Zabbix Autoagent registration
## 2.9.6 2021-12-03 <alexbarcello@githuba>
### Changed
- Fix for S3 Minio backup targets
- Fix for annoying output on certain target time print conditions
## 2.9.5 2021-12-07 <dave at tiredofit dot ca>
### Changed
- Fix for 2.9.3
## 2.9.4 2021-12-07 <dave at tiredofit dot ca>
### Added
- Add Zabbix auto register support for templates
## 2.9.3 2021-11-24 <dave at tiredofit dot ca>
### Added
- Alpine 3.15 base
## 2.9.2 2021-10-22 <teenigma@github>
### Fixed
- Fix compression failing on Redis backup
## 2.9.1 2021-10-15 <sbrunecker@github>
### Fixed
- Allow MariaDB 8.0 servers to be backed up
- Fixed DB available check getting stuck with empty password
## 2.9.0 2021-10-15 <dave at tiredofit dot ca>
### Added
- Postgresql 14 Support (compiled)
- MSSQL 17.8.1.1
## 2.8.2 2021-10-15 <dave at tiredofit dot ca>
### Changed
- Change to using aws cli from Alpine repositories (fixes #81)
## 2.8.1 2021-09-01 <dave at tiredofit dot ca>
### Changed
- Modernize image with updated environment varialbes from upstream
## 2.8.0 2021-08-27 <dave at tiredofit dot ca>
### Added
- Alpine 3.14 Base
### Changed
- Fix for syntax error in 2.7.0 Release (Credit the1ts@github)
- Cleanup image and leftover cache with AWS CLI installation
## 2.7.0 2021-06-17 <dave at tiredofit dot ca>
### Added
- MongoDB Authentication Database support (DB_AUTH)
## 2.6.1 2021-06-08 <jwillmer@github>
### Changed
- Fix for Issue #14 - SPLIT_DB=TRUE was not working for Postgres DB server
## 2.6.0 2021-02-19 <tpansino@github>
### Added
- SQLite support
## 2.5.1 2021-02-14 <dave at tiredofit dot ca>
### Changed
- Fix xz backups with `PARALLEL_COMPRESION=TRUE`
## 2.5.0 2021-01-25 <dave at tiredofit dot ca>
### Added
- Multi Platform Build Variants (ARMv7 AMD64 AArch64)
### Changed
- Alpine 3.13 Base
- Compile Pixz as opposed to relying on testing repository
- MSSQL Support only available under AMD64. Container exits if any other platform detected when MSSQL set to be backed up.
## 2.4.0 2020-12-07 <dave at tiredofit dot ca>
### Added

View File

@@ -1,57 +1,86 @@
FROM tiredofit/alpine:edge
FROM docker.io/tiredofit/alpine:3.16
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables
ENV MSSQL_VERSION=17.5.2.1-1 \
ENABLE_CRON=FALSE \
ENABLE_SMTP=FALSE \
ENABLE_ZABBIX=TRUE \
ZABBIX_HOSTNAME=db-backup
### Dependencies
ENV INFLUX2_VERSION=2.4.0 \
MSSQL_VERSION=18.0.1.1-1 \
CONTAINER_ENABLE_MESSAGING=FALSE \
CONTAINER_ENABLE_MONITORING=TRUE \
CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \
IMAGE_NAME="tiredofit/db-backup" \
IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/"
### Dependencies
RUN set -ex && \
echo "@testing https://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
apk update && \
apk upgrade && \
apk add -t .db-backup-build-deps \
build-base \
bzip2-dev \
git \
libarchive-dev \
libressl-dev \
libffi-dev \
python3-dev \
py3-pip \
xz-dev \
&& \
\
apk add --no-cache -t .db-backup-run-deps \
bzip2 \
aws-cli \
bzip2 \
influxdb \
libarchive \
mariadb-client \
mariadb-connector-c \
mongodb-tools \
libressl \
pigz \
postgresql \
postgresql-client \
pv \
py3-cryptography \
redis \
sqlite \
xz \
zstd \
&& \
\
apk add --no-cache \
pixz@testing \
&& \
apkArch="$(apk --print-arch)"; \
case "$apkArch" in \
x86_64) mssql=true ; influx2=true ; influx_arch=amd64; ;; \
aarch64 ) influx2=true ; influx_arch=arm64 ;; \
*) sleep 0.1 ;; \
esac; \
\
cd /usr/src && \
curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_${MSSQL_VERSION}_amd64.apk && \
curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_${MSSQL_VERSION}_amd64.apk && \
echo y | apk add --allow-untrusted msodbcsql17_${MSSQL_VERSION}_amd64.apk mssql-tools_${MSSQL_VERSION}_amd64.apk && \
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/msodbcsql18_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/mssql-tools18_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSSQL_VERSION}_amd64.apk mssql-tools18_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
\
mkdir -p /usr/src/pbzip2 && \
curl -ssL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
cd /usr/src/pbzip2 && \
make && \
make install && \
mkdir -p /usr/src/pixz && \
curl -sSL https://github.com/vasi/pixz/releases/download/v1.0.7/pixz-1.0.7.tar.xz | tar xvfJ - --strip 1 -C /usr/src/pixz && \
cd /usr/src/pixz && \
./configure \
--prefix=/usr \
--sysconfdir=/etc \
--localstatedir=/var \
&& \
make && \
make install && \
\
pip3 install blobxfer && \
\
### Cleanup
apk del .db-backup-build-deps && \
rm -rf /usr/src/* && \
rm -rf /tmp/* /var/cache/apk/*
rm -rf /*.apk && \
rm -rf /etc/logrotate.d/redis && \
rm -rf /root/.cache /tmp/* /var/cache/apk/*
### S6 Setup
ADD install /
COPY install /

View File

@@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2020 Dave Conroy
Copyright (c) 2022 Dave Conroy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

386
README.md
View File

@@ -1,173 +1,345 @@
# hub.docker.com/r/tiredofit/db-backup
# github.com/tiredofit/docker-db-backup
[![Build Status](https://img.shields.io/docker/build/tiredofit/db-backup.svg)](https://hub.docker.com/r/tiredofit/db-backup)
[![Docker Pulls](https://img.shields.io/docker/pulls/tiredofit/db-backup.svg)](https://hub.docker.com/r/tiredofit/db-backup)
[![Docker Stars](https://img.shields.io/docker/stars/tiredofit/db-backup.svg)](https://hub.docker.com/r/tiredofit/db-backup)
[![Docker Layers](https://images.microbadger.com/badges/image/tiredofit/db-backup.svg)](https://microbadger.com/images/tiredofit/db-backup)
[![GitHub release](https://img.shields.io/github/v/tag/tiredofit/docker-db-backup?style=flat-square)](https://github.com/tiredofit/docker-db-backup/releases/latest)
[![Build Status](https://img.shields.io/github/workflow/status/tiredofit/docker-db-backup/build?style=flat-square)](https://github.com/tiredofit/docker-db-backup/actions?query=workflow%3Abuild)
[![Docker Stars](https://img.shields.io/docker/stars/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/)
[![Docker Pulls](https://img.shields.io/docker/pulls/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/)
[![Become a sponsor](https://img.shields.io/badge/sponsor-tiredofit-181717.svg?logo=github&style=flat-square)](https://github.com/sponsors/tiredofit)
[![Paypal Donate](https://img.shields.io/badge/donate-paypal-00457c.svg?logo=paypal&style=flat-square)](https://www.paypal.me/tiredofit)
## Introduction
* * *
## About
This will build a container for backing up multiple type of DB Servers
This will build a container for backing up multiple types of DB Servers
Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
* dump to local filesystem or backup to S3 Compatible services
* dump to local filesystem or backup to S3 Compatible services, and Azure.
* select database user and password
* backup all databases
* choose to have an MD5 sum after backup for verification
* backup all databases, single, or multiple databases
* backup all to seperate files or one singular file
* choose to have an MD5 or SHA1 sum after backup for verification
* delete old backups after specific amount of time
* choose compression type (none, gz, bz, xz, zstd)
* connect to any container running on the same system
* Script to perform restores
* Zabbix Monitoring capabilities
* select how often to run a dump
* select when to start the first dump, whether time of day or relative to container start time
* Execute script after backup for monitoring/alerting purposes
* This Container uses a [customized Alpine Linux base](https://hub.docker.com/r/tiredofit/alpine) which includes [s6 overlay](https://github.com/just-containers/s6-overlay) enabled for PID 1 Init capabilities, [zabbix-agent](https://zabbix.org) for individual container monitoring, Cron also installed along with other tools (bash,curl, less, logrotate, nano, vim) for easier management. It also supports sending to external SMTP servers.
[Changelog](CHANGELOG.md)
## Authors
## Maintainer
- [Dave Conroy](https://github.com/tiredofit)
## Table of Contents
- [hub.docker.com/r/tiredofit/db-backup](#hubdockercomrtiredofitdb-backup)
- [Introduction](#introduction)
- [Authors](#authors)
- [Table of Contents](#table-of-contents)
- [Prerequisites](#prerequisites)
- [Installation](#installation)
- [Quick Start](#quick-start)
- [Configuration](#configuration)
- [Data-Volumes](#data-volumes)
- [Environment Variables](#environment-variables)
- [Maintenance](#maintenance)
- [Shell Access](#shell-access)
- [Custom Scripts](#custom-scripts)
- [Example Post Script](#example-post-script)
- [$1=EXIT_CODE (After running backup routine)](#1exit_code-after-running-backup-routine)
- [$2=DB_TYPE (Type of Backup)](#2db_type-type-of-backup)
- [$3=DB_HOST (Backup Host)](#3db_host-backup-host)
- [#4=DB_NAME (Name of Database backed up](#4db_name-name-of-database-backed-up)
- [$5=DATE (Date of Backup)](#5date-date-of-backup)
- [$6=TIME (Time of Backup)](#6time-time-of-backup)
- [$7=BACKUP_FILENAME (Filename of Backup)](#7backup_filename-filename-of-backup)
- [$8=FILESIZE (Filesize of backup)](#8filesize-filesize-of-backup)
- [$9=MD5_RESULT (MD5Sum if enabled)](#9md5_result-md5sum-if-enabled)
- [About](#about)
- [Maintainer](#maintainer)
- [Table of Contents](#table-of-contents)
- [Prerequisites and Assumptions](#prerequisites-and-assumptions)
- [Installation](#installation)
- [Build from Source](#build-from-source)
- [Prebuilt Images](#prebuilt-images)
- [Multi Architecture](#multi-architecture)
- [Configuration](#configuration)
- [Quick Start](#quick-start)
- [Persistent Storage](#persistent-storage)
- [Environment Variables](#environment-variables)
- [Base Images used](#base-images-used)
- [Container Options](#container-options)
- [Database Specific Options](#database-specific-options)
- [For Influx DB2:](#for-influx-db2)
- [Scheduling Options](#scheduling-options)
- [Backup Options](#backup-options)
- [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services)
- [Maintenance](#maintenance)
- [Shell Access](#shell-access)
- [Manual Backups](#manual-backups)
- [Restoring Databases](#restoring-databases)
- [Custom Scripts](#custom-scripts)
- [Path Options](#path-options)
- [Pre Backup](#pre-backup)
- [Post backup](#post-backup)
- [Support](#support)
- [Usage](#usage)
- [Bugfixes](#bugfixes)
- [Feature Requests](#feature-requests)
- [Updates](#updates)
- [License](#license)
## Prerequisites
> **NOTE**: If you are using this with a docker-compose file along with a seperate SQL container, take care not to set the variables to backup immediately, more so have it delay execution for a minute, otherwise you will get a failed first backup.
You must have a working DB server or container available for this to work properly, it does not provide server functionality!
## Prerequisites and Assumptions
* You must have a working connection to one of the supported DB Servers and appropriate credentials
## Installation
Automated builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup) and is the recommended
method of installation.
### Build from Source
Clone this repository and build the image with `docker build <arguments> (imagename) .`
### Prebuilt Images
Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup) and is the recommended method of installation.
The following image tags are available along with their tagged release based on what's written in the [Changelog](CHANGELOG.md):
| Alpine Base | Tag |
| ----------- | --------- |
| latest | `:latest` |
```bash
docker pull tiredofit/db-backup:latest
docker pull tiredofit/db-backup:(imagetag)
```
#### Multi Architecture
Images are built primarily for `amd64` architecture, and may also include builds for `arm/v7`, `arm64` and others. These variants are all unsupported. Consider [sponsoring](https://github.com/sponsors/tiredofit) my work so that I can work with various hardware. To see if this image supports multiple architecures, type `docker manifest (image):(tag)`
## Configuration
### Quick Start
* The quickest way to get started is using [docker-compose](https://docs.docker.com/compose/). See the examples folder for a working [docker-compose.yml](examples/docker-compose.yml) that can be modified for development or production use.
* Set various [environment variables](#environment-variables) to understand the capabiltiies of this image.
* Set various [environment variables](#environment-variables) to understand the capabilities of this image.
* Map [persistent storage](#data-volumes) for access to configuration and data files for backup.
* Make [networking ports](#networking) available for public access if necessary
> **NOTE**: If you are using this with a docker-compose file along with a seperate SQL container, take care not to set the variables to backup immediately, more so have it delay execution for a minute, otherwise you will get a failed first backup.
## Configuration
### Data-Volumes
### Persistent Storage
The following directories are used for configuration and can be mapped for persistent storage.
| Directory | Description |
| ------------------------ | ---------------------------------------------------------------------------------- |
| `/backup` | Backups |
| `/assets/custom-scripts` | *Optional* Put custom scripts in this directory to execute after backup operations |
| Directory | Description |
| ---------------------- | ----------------------------------------------------------------------------------- |
| `/backup` | Backups |
| `/assets/scripts/pre` | *Optional* Put custom scripts in this directory to execute before backup operations |
| `/assets/scripts/post` | *Optional* Put custom scripts in this directory to execute after backup operations |
### Environment Variables
Along with the Environment Variables from the [Base image](https://hub.docker.com/r/tiredofit/alpine), below is the complete list of available options that can be used to customize your installation.
#### Base Images used
| Parameter | Description |
| ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi - Default `FILESYSTEM` |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` - Default `GZ` |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - Default `3` |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` |
| `DB_HOST` | Server Hostname e.g. `mariadb` |
| `DB_NAME` | Schema Name e.g. `database` |
| `DB_USER` | username for the database - use `root` to backup all MySQL of them. |
| `DB_PASS` | (optional if DB doesn't require it) password for the database |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided |
| `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. |
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats |
| | Absolute HHMM, e.g. `2330` or `0415` |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half |
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. "--extra-command" |
| `MD5` | Generate MD5 Sum in Directory, `TRUE` or `FALSE` - Default `TRUE` |
| `PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` - Default `TRUE` |
| `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | |
| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. - Default `FALSE` |
This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) or [Debian Linux](https://hub.docker.com/r/tiredofit/debian) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`,`vim`.
When using compression with MongoDB, only `GZ` compression is possible.
Be sure to view the following repositories to understand all the customizable options:
**Backing Up to S3 Compatible Services**
| Image | Description |
| ------------------------------------------------------ | -------------------------------------- |
| [OS Base](https://github.com/tiredofit/docker-alpine/) | Customized Image based on Alpine Linux |
#### Container Options
| Parameter | Description | Default |
| -------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- |
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` |
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` |
| `PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | |
| `POST_SCRIPT` | Fill this variable in with a command to execute post backing up | |
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` |
### Database Specific Options
| Parameter | Description | Default |
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------- |
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | |
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | |
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | |
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies |
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | |
| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | |
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can overrde them by making your own entries |
#### For Influx DB2:
Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name`
### Scheduling Options
| Parameter | Description | Default |
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- |
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
| | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `/backup` |
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
### Backup Options
| Parameter | Description | Default |
| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | -------------- |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` |
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |
| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` |
| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` |
| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` |
| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | |
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` |
| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` |
| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` |
- When using compression with MongoDB, only `GZ` compression is possible.
#### Backing Up to S3 Compatible Services
If `BACKUP_LOCATION` = `S3` then the following options are used.
| Parameter | Description |
| --------------- | --------------------------------------------------------------------------------------- |
| `S3_BUCKET` | S3 Bucket name e.g. 'mybucket' |
| `S3_HOSTNAME` | Hostname of S3 Server e.g "s3.amazonaws.com" - You can also include a port if necessary |
| `S3_KEY_ID` | S3 Key ID |
| `S3_KEY_SECRET` | S3 Key Secret |
| `S3_PATH` | S3 Pathname to save to e.g. '`backup`' |
| `S3_PROTOCOL` | Use either `http` or `https` to access service - Default `https` |
| `S3_URI_STYLE` | Choose either `VIRTUALHOST` or `PATH` style - Default `VIRTUALHOST` |
| Parameter | Description | Default |
|-----------------------|------------------------------------------------------------------------------------------|---------|
| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | |
| `S3_KEY_ID` | S3 Key ID | |
| `S3_KEY_SECRET` | S3 Key Secret | |
| `S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | |
| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | |
| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | |
| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` |
| `S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | |
| `S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | |
| _*OR*_ | | |
| `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` |
#### Upload to a Azure storage account by `blobxfer`
Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure fileshare storage.
If `BACKUP_LOCATION` = `blobxfer` then the following options are used.
| Parameter | Description | Default |
| ------------------------------- | ------------------------------------------------------------------------ | -------------------- |
| `BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | |
| `BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | |
| `BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` |
> This service uploads files from backup targed directory `DB_DUMP_TARGET`.
> If the a cleanup configuration in `DB_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
## Maintenance
Manual Backups can be performed by entering the container and typing `backup-now`
### Shell Access
For debugging and maintenance purposes you may want access the containers shell.
```bash
docker exec -it (whatever your container name is e.g.) db-backup bash
```
``bash
docker exec -it (whatever your container name is) bash
``
### Manual Backups
Manual Backups can be performed by entering the container and typing `backup-now`
#### Custom Scripts
- Recently there was a request to have the container work with Kukbernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`.
If you want to execute a custom script at the end of backup, you can drop bash scripts with the extension of `.sh` in this directory. See the following example to utilize:
### Restoring Databases
Entering in the container and executing `restore` will execute a menu based script to restore your backups - MariaDB, Postgres, and Mongo supported.
You will be presented with a series of menus allowing you to choose:
- What file to restore
- What type of DB Backup
- What Host to restore to
- What Database Name to restore to
- What Database User to use
- What Database Password to use
- What Database Port to use
The image will try to do autodetection based on the filename for the type, hostname, and database name.
The image will also allow you to use environment variables or Docker secrets used to backup the images
The script can also be executed skipping the interactive mode by using the following syntax/
`restore <filename> <db_type> <db_hostname> <db_name> <db_user> <db_pass> <db_port>`
If you only enter some of the arguments you will be prompted to fill them in.
### Custom Scripts
#### Path Options
| Parameter | Description | Default |
| ---------------------- | --------------------------------------------------------------------------- | ----------------------- |
| `SCRIPT_LOCATION_PRE` | Location on filesystem inside container to execute bash scripts pre backup | `/assets/scripts/pre/` |
| `SCRIPT_LOCATION_POST` | Location on filesystem inside container to execute bash scripts post backup | `/assets/scripts/post/` |
#### Pre Backup
If you want to execute a custom script before a backup starts, you can drop bash scripts with the extension of `.sh` in the location defined in `SCRIPT_LOCATION_PRE`. See the following example to utilize:
````bash
$ cat pre-script.sh
##!/bin/bash
# #### Example Pre Script
# #### $1=DB_TYPE (Type of Backup)
# #### $2=DB_HOST (Backup Host)
# #### $3=DB_NAME (Name of Database backed up
# #### $4=BACKUP START TIME (Seconds since Epoch)ff
# #### $5=BACKUP FILENAME (Filename)
echo "${1} Backup Starting on ${2} for ${3} at ${4}. Filename: ${5}"
````
## script DB_TYPE DB_HOST DB_NAME STARTEPOCH BACKUP_FILENAME
${f} "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_time}" "${target}"
Outputs the following on the console:
`mysql Backup Starting on example-db for example at 1647370800. Filename: mysql_example_example-db_202200315-000000.sql.bz2
#### Post backup
If you want to execute a custom script at the end of a backup, you can drop bash scripts with the extension of `.sh` in the location defined in `SCRIPT_LOCATION_POST`. Also to support legacy users `/assets/custom-scripts` is also scanned and executed.See the following example to utilize:
````bash
$ cat post-script.sh
##!/bin/bash
## Example Post Script
## $1=EXIT_CODE (After running backup routine)
## $2=DB_TYPE (Type of Backup)
## $3=DB_HOST (Backup Host)
## #4=DB_NAME (Name of Database backed up
## $5=DATE (Date of Backup)
## $6=TIME (Time of Backup)
## $7=BACKUP_FILENAME (Filename of Backup)
## $8=FILESIZE (Filesize of backup)
## $9=MD5_RESULT (MD5Sum if enabled)
# #### Example Post Script
# #### $1=EXIT_CODE (After running backup routine)
# #### $2=DB_TYPE (Type of Backup)
# #### $3=DB_HOST (Backup Host)
# #### #4=DB_NAME (Name of Database backed up
# #### $5=BACKUP START TIME (Seconds since Epoch)
# #### $6=BACKUP FINISH TIME (Seconds since Epoch)
# #### $7=BACKUP TOTAL TIME (Seconds between Start and Finish)
# #### $8=BACKUP FILENAME (Filename)
# #### $9=BACKUP FILESIZE
# #### $10=HASH (If CHECKSUM enabled)
echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ${6}. Filename: ${7} Size: ${8} bytes MD5: ${9}"
echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a duration of ${7} seconds. Filename: ${8} Size: ${9} bytes MD5: ${10}"
````
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_timme}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
Outputs the following on the console:
`0 mysql Backup Completed on example-db for example on 2020-04-22 05:19:10. Filename: mysql_example_example-db_20200422-051910.sql.bz2 Size: 7795 bytes MD5: 952fbaafa30437494fdf3989a662cd40`
`0 mysql Backup Completed on example-db for example on 1647370800 ending 1647370920 for a duration of 120 seconds. Filename: mysql_example_example-db_202200315-000000.sql.bz2 Size: 7795 bytes Hash: 952fbaafa30437494fdf3989a662cd40`
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`
You must make your scripts executible otherwise there is an internal check that will skip trying to run it otherwise.
If for some reason your filesystem or host is not detecting it right, use the environment variable `POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
## Support
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.
### Usage
- The [Discussions board](../../discussions) is a great place for working with the community on tips and tricks of using this image.
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) personalized support.
### Bugfixes
- Please, submit a [Bug Report](issues/new) if something isn't working as expected. I'll do my best to issue a fix in short order.
### Feature Requests
- Feel free to submit a feature request, however there is no guarantee that it will be added, or at what timeline.
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) regarding development of features.
### Updates
- Best effort to track upstream changes, More priority if I am actively using the image in a production environment.
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) for up to date releases.
## License
MIT. See [LICENSE](LICENSE) for more details.

5
examples/.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
# See http://help.github.com/ignore-files/ for more about ignoring files.
# Example container mounted folders
**/backups/
**/db/

View File

@@ -1,38 +0,0 @@
version: '2'
services:
example-db:
container_name: example-db
image: mariadb:latest
volumes:
- ./db:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=examplerootpassword
- MYSQL_DATABASE=example
- MYSQL_USER=example
- MYSQL_PASSWORD=examplepassword
restart: always
example-db-backup:
container_name: example-db-backup
image: tiredofit/db-backup
links:
- example-db
volumes:
- ./backups:/backup
- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
- DB_TYPE=mariadb
- DB_HOST=example-db
- DB_NAME=example
- DB_USER=example
- DB_PASS="examplepassword"
- DB_DUMP_FREQ=1440
- DB_DUMP_BEGIN=0000
- DB_CLEANUP_TIME=8640
- MD5=TRUE
- COMPRESSION=XZ
- SPLIT_DB=FALSE
restart: always

View File

@@ -0,0 +1,69 @@
#
# Example for Microsoft SQL Server
# upload with blobxfer to azure storage
#
version: '2'
networks:
example-mssql-blobxfer-net:
name: example-mssql-blobxfer-net
services:
example-mssql-s3-db:
hostname: example-db-host
image: mcr.microsoft.com/mssql/server:2019-latest
container_name: example-mssql-s3-db
restart: unless-stopped
ports:
- "127.0.0.1:11433:1433"
networks:
example-mssql-blobxfer-net:
volumes:
- ./tmp/backups:/tmp/backups # shared tmp backup directory
environment:
ACCEPT_EULA: Y
MSSQL_SA_PASSWORD: 5hQa0utRFBpIY3yhoIyE
MSSQL_PID: Express
example-mssql-blobxfer-db-backup:
container_name: example-mssql-blobxfer-db-backup
# if you want to build and use image from current source
# execute in terminal --> docker build -t tiredofit/db-backup-mssql-blobxfer .
# replace --> image: tiredofit/db-backup-mssql
# image: tiredofit/db-backup
image: tiredofit/db-backup-mssql-blobxfer
links:
- example-mssql-s3-db
volumes:
- ./backups:/backup
- ./tmp/backups:/tmp/backups # shared tmp backup directory
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
# - DEBUG_MODE=TRUE
- DB_TYPE=mssql
- DB_HOST=example-db-host
# - DB_PORT=1488
# - DB_NAME=ALL # [ALL] not working on sql server.
# create database with name `test1` manually first
- DB_NAME=test1 # Create this database
- DB_USER=sa
- DB_PASS=5hQa0utRFBpIY3yhoIyE
- DB_DUMP_FREQ=1 # backup every 5 minute
# - DB_DUMP_BEGIN=0000 # backup starts immediately
- DB_CLEANUP_TIME=3 # clean backups they are older than 60 minutes
- ENABLE_CHECKSUM=TRUE
- CHECKSUM=SHA1
- COMPRESSION=GZ
- SPLIT_DB=FALSE
- CONTAINER_ENABLE_MONITORING=FALSE
# === S3 Blobxfer ===
- BACKUP_LOCATION=blobxfer
# Add here azure storage account
- BLOBXFER_STORAGE_ACCOUNT={TODO Add Storage Name}
# Add here azure storage account key
- BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key}
- BLOBXFER_REMOTE_PATH=docker-db-backup
restart: always
networks:
example-mssql-blobxfer-net:

View File

@@ -0,0 +1,61 @@
#
# Example for Microsoft SQL Server
#
version: '2'
networks:
example-mssql-net:
name: example-mssql-net
services:
example-mssql-db:
hostname: example-db-host
image: mcr.microsoft.com/mssql/server:2019-latest
container_name: example-mssql-db
restart: unless-stopped
ports:
- "127.0.0.1:11433:1433"
networks:
example-mssql-net:
volumes:
- ./tmp/backups:/tmp/backups # shared tmp backup directory
environment:
ACCEPT_EULA: Y
MSSQL_SA_PASSWORD: 5hQa0utRFBpIY3yhoIyE
MSSQL_PID: Express
example-mssql-db-backup:
container_name: example-mssql-db-backup
# if you want to build and use image from current source
# execute in terminal --> docker build -t tiredofit/db-backup-mssql .
# replace --> image: tiredofit/db-backup-mssql
# image: tiredofit/db-backup
image: tiredofit/db-backup-mssql
links:
- example-mssql-db
volumes:
- ./backups:/backup
- ./tmp/backups:/tmp/backups # shared tmp backup directory
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
# - DEBUG_MODE=TRUE
- DB_TYPE=mssql
- DB_HOST=example-db-host
# - DB_PORT=1488
# - DB_NAME=ALL # [ALL] not working on sql server.
# create database with name `test1` manually first
- DB_NAME=test1
- DB_USER=sa
- DB_PASS=5hQa0utRFBpIY3yhoIyE
- DB_DUMP_FREQ=1 # backup every minute
# - DB_DUMP_BEGIN=0000 # backup starts immediately
- DB_CLEANUP_TIME=5 # clean backups they are older than 5 minute
- ENABLE_CHECKSUM=FALSE
- CHECKSUM=SHA1
- COMPRESSION=GZ
- SPLIT_DB=FALSE
- CONTAINER_ENABLE_MONITORING=FALSE
restart: always
networks:
example-mssql-net:

View File

@@ -0,0 +1,51 @@
version: '2'
networks:
example-db-network:
name: example-db-network
services:
example-db:
hostname: example-db-host
container_name: example-db
image: mariadb:latest
ports:
- 13306:3306
volumes:
- ./db:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=examplerootpassword
- MYSQL_DATABASE=example
- MYSQL_USER=example
- MYSQL_PASSWORD=examplepassword
restart: always
networks:
- example-db-network
example-db-backup:
container_name: example-db-backup
image: tiredofit/db-backup
links:
- example-db
volumes:
- ./backups:/backup
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
# - DEBUG_MODE=TRUE
- DB_TYPE=mariadb
- DB_HOST=example-db-host
- DB_NAME=example
- DB_USER=example
- DB_PASS=examplepassword
- DB_DUMP_FREQ=1 # backup every minute
# - DB_DUMP_BEGIN=0000 # backup starts immediately
- DB_CLEANUP_TIME=5 # clean backups they are older than 5 minute
- CHECKSUM=SHA1
- COMPRESSION=GZ
- SPLIT_DB=FALSE
- CONTAINER_ENABLE_MONITORING=FALSE
restart: always
networks:
- example-db-network

View File

@@ -0,0 +1,15 @@
##!/bin/bash
# #### Example Post Script
# #### $1=EXIT_CODE (After running backup routine)
# #### $2=DB_TYPE (Type of Backup)
# #### $3=DB_HOST (Backup Host)
# #### #4=DB_NAME (Name of Database backed up
# #### $5=BACKUP START TIME (Seconds since Epoch)
# #### $6=BACKUP FINISH TIME (Seconds since Epoch)
# #### $7=BACKUP TOTAL TIME (Seconds between Start and Finish)
# #### $8=BACKUP FILENAME (Filename)
# #### $9=BACKUP FILESIZE
# #### $10=HASH (If CHECKSUM enabled)
echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a duration of ${7} seconds. Filename: ${8} Size: ${9} bytes MD5: ${10}"

View File

@@ -1,14 +0,0 @@
##!/bin/bash
## Example Post Script
## $1=EXIT_CODE (After running backup routine)
## $2=DB_TYPE (Type of Backup)
## $3=DB_HOST (Backup Host)
## #4=DB_NAME (Name of Database backed up
## $5=DATE (Date of Backup)
## $6=TIME (Time of Backup)
## $7=BACKUP_FILENAME (Filename of Backup)
## $8=FILESIZE (Filesize of backup)
## $9=MD5_RESULT (MD5Sum if enabled)
echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ${6}. Filename: ${7} Size: ${8} bytes MD5: ${9}"

View File

@@ -0,0 +1,26 @@
#!/command/with-contenv bash
BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"}
BLOBXFER_REMOTE_PATH=${BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
CHECKSUM=${CHECKSUM:-"MD5"}
COMPRESSION=${COMPRESSION:-"ZSTD"}
COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"}
ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"}
ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"}
MODE=${MODE:-"AUTO"}
MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"}
MYSQL_SINGLE_TRANSACTION=${MYSQL_SINGLE_TRANSACTION:-"TRUE"}
MYSQL_STORED_PROCEDURES=${MYSQL_STORED_PROCEDURES:-"TRUE"}
PARALLEL_COMPRESSION_THREADS=${PARALLEL_COMPRESSION_THREADS:-"$(nproc)"}
S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"}
S3_PROTOCOL=${S3_PROTOCOL:-"https"}
SCRIPT_LOCATION_PRE=${SCRIPT_LOCATION_PRE:-"/assets/scripts/pre/"}
SCRIPT_LOCATION_POST=${SCRIPT_LOCATION_POST:-"/assets/scripts/post/"}
SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SKIP_AVAILABILITY_CHECK=${SKIP_AVAILABILITY_CHECK:-"FALSE"}
SPLIT_DB=${SPLIT_DB:-"TRUE"}
TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"}

View File

@@ -0,0 +1,831 @@
#!/command/with-contenv bash
bootstrap_variables() {
sanity_var DB_TYPE "Set appropriate DB_TYPE"
case "${DB_TYPE,,}" in
couch* )
dbtype=couch
DB_PORT=${DB_PORT:-5984}
file_env 'DB_USER'
file_env 'DB_PASS'
;;
influx* )
dbtype=influx
case "${INFLUX_VERSION}" in
1) DB_PORT=${DB_PORT:-8088} ;;
2) DB_PORT=${DB_PORT:-8086} ;;
esac
file_env 'DB_USER'
file_env 'DB_PASS'
sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'"
;;
mongo* )
dbtype=mongo
if [ -n "${MONGO_CUSTOM_URI}" ] ; then
mongo_uri_proto=$(echo ${MONGO_CUSTOM_URI} | grep :// | sed -e's,^\(.*://\).*,\1,g')
mongo_uri_scratch="${MONGO_CUSTOM_URI/${mongo_uri_proto}/}"
mongo_uri_username_password=$(echo ${mongo_uri_scratch} | grep @ | rev | cut -d@ -f2- | rev)
if [ -n "${mongo_uri_username_password}" ]; then mongo_uri_scratch=$(echo ${mongo_uri_scratch} | rev | cut -d@ -f1 | rev) ; fi
mongo_uri_port=$(echo ${mongo_uri_scratch} | grep : | rev | cut -d: -f2- | rev)
if [ -n "${mongo_uri_port}" ]; then mongo_uri_port=$(echo ${mongo_uri_scratch} | rev | cut -d: -f1 | cut -d/ -f2 | rev) ; fi
mongo_uri_hostname=$(echo ${mongo_uri_scratch} | cut -d/ -f1 | cut -d: -f1 )
mongo_uri_database=$(echo ${mongo_uri_scratch} | cut -d/ -f2 | cut -d? -f1 )
mongo_uri_options=$(echo ${mongo_uri_scratch} | cut -d/ -f2 | cut -d? -f2 )
DB_NAME=${DB_NAME:-"${mongo_uri_database,,}"}
DB_HOST=${DB_HOST:-"${mongo_uri_hostname,,}"}
else
DB_PORT=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER'
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS}"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME}"
[[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}"
fi
;;
"mysql" | "mariadb" )
dbtype=mysql
DB_PORT=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS}
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
"mssql" | "microsoftsql" )
apkArch="$(apk --print-arch)"; \
case "$apkArch" in
x86_64) mssql=true ;;
*) print_error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;;
esac
dbtype=mssql
DB_PORT=${DB_PORT:-1433}
;;
postgres* | "pgsql" )
dbtype=pgsql
DB_PORT=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS}"
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
"redis" )
dbtype=redis
DB_PORT=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS}"
;;
sqlite* )
dbtype=sqlite3
;;
* )
print_error "I don't recognize 'DB_TYPE=${DB_TYPE}' - Exitting.."
exit 99
;;
esac
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then
file_env 'S3_KEY_ID'
file_env 'S3_KEY_SECRET'
fi
}
backup_couch() {
prepare_dbbackup
target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt
compression
pre_dbbackup ${DB_NAME}
print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}"
curl -sSL -X GET ${DB_HOST}:${DB_PORT}/${DB_NAME}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup ${DB_NAME}
}
backup_influx() {
if [ "${DB_NAME,,}" = "all" ] ; then
print_debug "Preparing to back up everything"
db_names=justbackupeverything
else
db_names=$(echo "${DB_NAME}" | tr ',' '\n')
fi
case "${INFLUX_VERSION,,}" in
1 )
for db in ${db_names}; do
prepare_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
target=influx_${db}_${DB_HOST#*//}_${now}
compression
pre_dbbackup $db
print_notice "Dumping Influx database: '${db}'"
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension}
generate_checksum
move_dbbackup
post_dbbackup $db
done
;;
2 )
for db in ${db_names}; do
prepare_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
target=influx2_${db}_${DB_HOST#*//}_${now}
compression
pre_dbbackup $db
print_notice "Dumping Influx2 database: '${db}'"
influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
create_archive
target=influx2_${db}_${DB_HOST#*//}_${now}.tar${extension}
generate_checksum
move_dbbackup
post_dbbackup $db
done
;;
esac
}
backup_mongo() {
prepare_dbbackup
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive
else
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive.gz
mongo_compression="--gzip"
compression_string="and compressing with gzip"
fi
if [ -n "${MONGO_CUSTOM_URI}" ] ; then
mongo_backup_parameter="--uri=${MONGO_CUSTOM_URI} ${EXTRA_OPTS}"
else
mongo_backup_parameter="--host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}"
fi
pre_dbbackup "${DB_NAME}"
print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
silent mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} ${mongo_backup_parameter}
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup "${DB_NAME}"
}
backup_mssql() {
prepare_dbbackup
target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak
compression
pre_dbbackup "${DB_NAME}"
print_notice "Dumping MSSQL database: '${DB_NAME}'"
silent /opt/mssql-tools18/bin/sqlcmd -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} -Q "BACKUP DATABASE [${DB_NAME}] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup $DB_NAME
}
backup_mysql() {
if var_true "${MYSQL_SINGLE_TRANSACTION}" ; then
single_transaction="--single-transaction"
fi
if var_true "${MYSQL_STORED_PROCEDURES}" ; then
stored_procedures="--routines"
fi
if [ "${DB_NAME,,}" = "all" ] ; then
print_debug "Preparing to back up everything except for information_schema and _* prefixes"
db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
print_debug "Excluding '${db_exclude}' from ALL DB_NAME backups"
db_names=$(echo "$db_names" | sed "/${db_exclude}/d" )
done
fi
else
db_names=$(echo "${DB_NAME}" | tr ',' '\n')
fi
print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
if var_true "${SPLIT_DB}" ; then
for db in ${db_names} ; do
prepare_dbbackup
target=mysql_${db}_${DB_HOST,,}_${now}.sql
compression
pre_dbbackup $db
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup $db
done
else
print_debug "Not splitting database dumps into their own files"
prepare_dbbackup
target=mysql_all_${DB_HOST,,}_${now}.sql
compression
pre_dbbackup all
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup all
fi
}
backup_pgsql() {
export PGPASSWORD=${DB_PASS}
authdb=${DB_USER}
if [ "${DB_NAME,,}" = "all" ] ; then
print_debug "Preparing to back up all databases"
db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
print_debug "Excluding '${db_exclude}' from ALL DB_NAME backups"
db_names=$(echo "$db_names" | sed "/${db_exclude}/d" )
done
fi
else
db_names=$(echo "${DB_NAME}" | tr ',' '\n')
fi
print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
if var_true "${SPLIT_DB}" ; then
for db in ${db_names} ; do
prepare_dbbackup
target=pgsql_${db}_${DB_HOST,,}_${now}.sql
compression
pre_dbbackup $db
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup $db
done
else
print_debug "Not splitting database dumps into their own files"
prepare_dbbackup
target=pgsql_all_${DB_HOST,,}_${now}.sql
compression
pre_dbbackup all
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
tmp_db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
for r_db_name in $(echo $db_names | xargs); do
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
done
sleep 5
for x_db_name in ${tmp_db_names} ; do
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
done
pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup all
fi
}
backup_redis() {
prepare_dbbackup
print_notice "Dumping Redis - Flushing Redis Cache First"
target=redis_all_${DB_HOST,,}_${now}.rdb
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
sleep 10
try=5
while [ $try -gt 0 ] ; do
saved=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
ok=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
print_notice "Redis Backup Complete"
exit_code=0
break
fi
try=$((try - 1))
print_warn "Redis Busy - Waiting and retrying in 5 seconds"
sleep 5
done
target_original=${target}
compression
pre_dbbackup all
$compress_cmd "${TEMP_LOCATION}/${target_original}"
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup all
}
backup_sqlite3() {
prepare_dbbackup
db=$(basename "${DB_HOST}")
db="${db%.*}"
target=sqlite3_${db}_${now}.sqlite3
compression
pre_dbbackup $db
print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}"
silent sqlite3 "${DB_HOST}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
exit_code=$?
check_exit_code $target
cat "${TEMP_LOCATION}"/backup.sqlite3 | ${dir_compress_cmd} > "${TEMP_LOCATION}/${target}"
generate_checksum
move_dbbackup
post_dbbackup $db
}
check_availability() {
### Set the Database Type
if var_false "${SKIP_AVAILABILITY_CHECK}" ; then
case "$dbtype" in
"couch" )
counter=0
code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${DB_HOST}:${DB_PORT})
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( counter+=5 ))
print_warn "CouchDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"influx" )
counter=0
case "${INFLUX_VERSION,,}" in
1 )
while ! (nc -z ${DB_HOST#*//} ${DB_PORT}) ; do
sleep 5
(( counter+=5 ))
print_warn "InfluxDB Host '${DB_HOST#*//}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
2 )
code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${DB_HOST}:${DB_PORT}/health)
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( counter+=5 ))
print_warn "InfluxDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
esac
;;
"mongo" )
if [ -n "${MONGO_CUSTOM_URI}" ] ; then
print_debug "Skipping Connectivity Check"
else
counter=0
while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
sleep 5
(( counter+=5 ))
print_warn "Mongo Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
fi
;;
"mysql" )
counter=0
export MYSQL_PWD=${DB_PASS}
while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" status > /dev/null 2>&1) ; do
sleep 5
(( counter+=5 ))
print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)"
done
;;
"mssql" )
counter=0
while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
sleep 5
(( counter+=5 ))
print_warn "MSSQL Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"pgsql" )
counter=0
export PGPASSWORD=${DB_PASS}
until pg_isready --dbname=${DB_NAME} --host=${DB_HOST} --port=${DB_PORT} --username=${DB_USER} -q
do
sleep 5
(( counter+=5 ))
print_warn "Postgres Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"redis" )
counter=0
while ! (nc -z "${DB_HOST}" "${DB_PORT}") ; do
sleep 5
(( counter+=5 ))
print_warn "Redis Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"sqlite3" )
if [[ ! -e "${DB_HOST}" ]]; then
print_error "File '${DB_HOST}' does not exist."
exit_code=2
exit $exit_code
elif [[ ! -f "${DB_HOST}" ]]; then
print_error "File '${DB_HOST}' is not a file."
exit_code=2
exit $exit_code
elif [[ ! -r "${DB_HOST}" ]]; then
print_error "File '${DB_HOST}' is not readable."
exit_code=2
exit $exit_code
fi
;;
esac
fi
}
check_exit_code() {
print_debug "DB Backup Exit Code is ${exit_code}"
case "${exit_code}" in
0 )
print_info "DB Backup of '${1}' completed successfully"
;;
* )
print_error "DB Backup of '${1}' reported errors"
master_exit_code=1
;;
esac
}
cleanup_old_data() {
if [ -n "${DB_CLEANUP_TIME}" ]; then
if [ "${master_exit_code}" != 1 ]; then
case "${BACKUP_LOCATION,,}" in
"file" | "filesystem" )
print_info "Cleaning up old backups on filesystem"
mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
if [ "${BACKUP_LOCATION,,}" = "blobxfer" ] ; then
print_info "Syncing changes via blobxfer"
silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET} --delete --delete-only
fi
;;
"s3" | "minio" )
print_info "Cleaning up old backups on S3 storage"
aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | grep " PRE " -v | while read -r s3_file; do
s3_createdate=$(echo $s3_file | awk {'print $1" "$2'})
s3_createdate=$(date -d "$s3_createdate" "+%s")
s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 )))
if [[ $s3_createdate -le $s3_olderthan ]] ; then
s3_filename=$(echo $s3_file | awk {'print $4'})
if [ "$s3_filename" != "" ] ; then
print_debug "Deleting $s3_filename"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
fi
done
;;
esac
else
print_error "Skipping Cleaning up old backups because there were errors in backing up"
fi
fi
}
compression() {
if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then
PARALLEL_COMPRESSION_THREADS=1
fi
case "${COMPRESSION,,}" in
gz* )
if var_true "${GZ_RSYNCABLE}" ; then
gz_rsyncable=--rsyncable
fi
compress_cmd="pigz -q -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} ${gz_rsyncable}"
compression_type="gzip"
extension=".gz"
dir_compress_cmd=${compress_cmd}
target_dir=${target}
target=${target}.gz
;;
bz* )
compress_cmd="pbzip2 -q -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
compression_type="bzip2"
dir_compress_cmd=${compress_cmd}
extension=".bz2"
target_dir=${target}
target=${target}.bz2
;;
xz* )
compress_cmd="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compression_type="xzip"
dir_compress_cmd=${compress_cmd}
extension=".xz"
target_dir=${target}
target=${target}.xz
;;
zst* )
compress_cmd="zstd -q -q --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} "
compression_type="zstd"
dir_compress_cmd=${compress_cmd}
extension=".zst"
target_dir=${target}
target=${target}.zst
;;
"none" | "false")
compress_cmd="cat "
compression_type="none"
dir_compress_cmd="cat "
target_dir=${target}
;;
esac
case "${CONTAINER_LOG_LEVEL,,}" in
"debug" )
if [ "${compression_type}" = "none" ] ; then
compression_string="with '${PARALLEL_COMPRESSION_THREADS}' threads"
else
compression_string="and compressing with '${compression_type}:${COMPRESSION_LEVEL}' with '${PARALLEL_COMPRESSION_THREADS}' threads"
fi
;;
* )
if [ "${compression_type}" != "none" ] ; then
compression_string="and compressing with '${compression_type}'"
fi
;;
esac
}
create_archive() {
if [ "${exit_code}" = "0" ] ; then
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
else
print_error "Skipping creating archive file because backup did not complete successfully"
fi
}
generate_checksum() {
if var_true "${ENABLE_CHECKSUM}" ; then
if [ "${exit_code}" = "0" ] ; then
case "${CHECKSUM,,}" in
"md5" )
checksum_command="md5sum"
checksum_extension="md5"
;;
"sha1" )
checksum_command="sha1sum"
checksum_extension="sha1"
;;
esac
print_notice "Generating ${checksum_extension^^} for '${target}'"
cd "${TEMP_LOCATION}"
${checksum_command} "${target}" > "${target}"."${checksum_extension}"
checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}')
print_debug "${checksum_extension^^}: ${checksum_value} - ${target}"
else
print_error "Skipping Checksum creation because backup did not complete successfully"
fi
fi
}
move_dbbackup() {
if [ "${exit_code}" = "0" ] ; then
dbbackup_size="$(stat -c%s "${TEMP_LOCATION}"/"${target}")"
dbbackup_date="$(date -r "${TEMP_LOCATION}"/"${target}" +'%s')"
case "${SIZE_VALUE,,}" in
"b" | "bytes" )
SIZE_VALUE=1
;;
"[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" )
SIZE_VALUE="-h"
;;
*)
SIZE_VALUE=1
;;
esac
if [ "$SIZE_VALUE" = "1" ] ; then
filesize=$(stat -c%s "${TEMP_LOCATION}"/"${target}")
print_notice "Backup of ${target} created with the size of ${filesize} bytes"
else
filesize=$(du -h "${TEMP_LOCATION}"/"${target}" | awk '{ print $1}')
print_notice "Backup of ${target} created with the size of ${filesize}"
fi
case "${BACKUP_LOCATION,,}" in
"file" | "filesystem" )
print_debug "Moving backup to filesystem"
mkdir -p "${DB_DUMP_TARGET}"
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
;;
"s3" | "minio" )
print_debug "Moving backup to S3 Bucket"
export AWS_ACCESS_KEY_ID=${S3_KEY_ID}
export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET}
export AWS_DEFAULT_REGION=${S3_REGION}
if [ -f "${S3_CERT_CA_FILE}" ] ; then
print_debug "Using Custom CA for S3 Backups"
s3_ca_cert="--ca-bundle ${S3_CERT_CA_FILE}"
fi
if var_true "${S3_CERT_SKIP_VERIFY}" ; then
print_debug "Skipping SSL verification for HTTPS S3 Hosts"
s3_ssl="--no-verify-ssl"
fi
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
if var_true "${ENABLE_CHECKSUM}" ; then
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
rm -rf "${TEMP_LOCATION}"/"${target}"
;;
"blobxfer" )
print_info "Moving backup to S3 Bucket with blobxfer"
mkdir -p "${DB_DUMP_TARGET}"
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET}
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
rm -rf "${TEMP_LOCATION}"/"${target}"
;;
esac
else
print_error "Skipping moving DB Backup to final location because backup did not complete successfully"
fi
rm -rf "${TEMP_LOCATION}"/*
}
prepare_dbbackup() {
dbbackup_start_time=$(date +"%s")
now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d")
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql
}
pre_dbbackup() {
### Pre Script Support
if [ -n "${PRE_SCRIPT}" ] ; then
if var_true "${PRE_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${PRE_SCRIPT}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${target}"
else
if [ -x "${PRE_SCRIPT}" ] ; then
print_notice "Found PRE_SCRIPT environment variable. Executing '${PRE_SCRIPT}"
eval "${PRE_SCRIPT}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${target}"
else
print_error "Can't execute PRE_SCRIPT environment variable '${PRE_SCRIPT}' as its filesystem bit is not executible!"
fi
fi
fi
### Pre Backup Custom Script Support
if [ -d "/assets/custom-scripts/pre" ] && dir_notempty "/assets/custom-scripts/pre" ; then
print_warning "Found Custom Post Scripts in /assets/custom-scripts/pre - Automatically moving them to '${SCRIPT_LOCATION_PRE}'"
mkdir -p "${SCRIPT_LOCATION_PRE}"
silent cp /assets/custom-scripts/pre/* "${SCRIPT_LOCATION_PRE}"
fi
if [ -d "${SCRIPT_LOCATION_PRE}" ] && dir_notempty "${SCRIPT_LOCATION_PRE}" ; then
for f in $(find ${SCRIPT_LOCATION_PRE} -name \*.sh -type f); do
if var_true "${PRE_SCRIPT_SKIP_X_VERIFY}" ; then
${f} "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${target}"
else
if [ -x "${f}" ] ; then
print_notice "Executing pre backup custom script : '${f}'"
## script DB_TYPE DB_HOST DB_NAME STARTEPOCH BACKUP_FILENAME
${f} "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${target}"
else
print_error "Can't run pre backup custom script: '${f}' as its filesystem bit is not executible!"
fi
fi
done
fi
}
post_dbbackup() {
dbbackup_finish_time=$(date +"%s")
dbbackup_total_time=$(echo $((dbbackup_finish_time-dbbackup_start_time)))
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
print_notice "Sending Backup Statistics to Zabbix"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "${dbbackup_size}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "${dbbackup_date}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))"
if [ "$?" != "0" ] ; then print_error "Error sending statistics, consider disabling with 'CONTAINER_ENABLE_MONITORING=FALSE'" ; fi
fi
### Post Script Support
if [ -n "${POST_SCRIPT}" ] ; then
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}"
else
if [ -x "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}"
else
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
fi
fi
fi
### Post Backup Custom Script Support
if [ -d "/assets/custom-scripts/" ] && dir_notempty "/assets/custom-scripts" ; then
print_warning "Found Custom Post Scripts in /assets/custom-scripts/ - Automatically moving them to '${SCRIPT_LOCATION_POST}'"
mkdir -p "${SCRIPT_LOCATION_POST}"
silent cp /assets/custom-scripts/* "${SCRIPT_LOCATION_POST}"
fi
if [ -d "${SCRIPT_LOCATION_POST}" ] && dir_notempty "${SCRIPT_LOCATION_POST}" ; then
for f in $(find ${SCRIPT_LOCATION_POST} -name \*.sh -type f); do
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}"
else
if [ -x "${f}" ] ; then
print_notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}"
else
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
fi
fi
done
fi
print_notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
unset s3_ssl
unset s3_ca_cert
}
sanity_test() {
sanity_var DB_TYPE "Database Type"
sanity_var DB_HOST "Database Host"
case "${DB_TYPE,,}" in
"mysql" | "mariadb" )
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
postgres* | "pgsql" )
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
esac
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then
sanity_var S3_BUCKET "S3 Bucket"
sanity_var S3_PATH "S3 Path"
sanity_var S3_REGION "S3 Region"
file_env 'S3_KEY_ID'
file_env 'S3_KEY_SECRET'
fi
}
setup_mode() {
if [ "${MODE,,}" = "auto" ] || [ ${MODE,,} = "default" ] ; then
print_debug "Running in Auto / Default Mode - Letting Image control scheduling"
else
print_info "Running in Manual mode - Execute 'backup_now' or '/etc/services.available/10-db-backup/run' to perform a manual backup"
service_stop 10-db-backup
if var_true "${MANUAL_RUN_FOREVER}" ; then
mkdir -p /etc/services.d/99-run_forever
cat <<EOF > /etc/services.d/99-run_forever/run
#!/bin/bash
while true
do
sleep 86400
done
EOF
chmod +x /etc/services.d/99-run_forever/run
else
if var_true "${CONTAINER_ENABLE_SCHEDULING}" ; then
print_error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_SCHEDULING=TRUE'"
exit 1
fi
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
print_error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_MONITORING=TRUE'"
exit 1
fi
if var_true "${CONTAINER_ENABLE_LOGSHIPPING}" ; then
print_error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_LOGSHIPPING=TRUE'"
exit 1
fi
fi
fi
}

View File

@@ -1,4 +0,0 @@
#!/usr/bin/with-contenv bash
pkill bash

View File

@@ -0,0 +1,14 @@
#!/command/with-contenv bash
source /assets/functions/00-container
prepare_service single
prepare_service 03-monitoring
PROCESS_NAME="db-backup"
output_off
bootstrap_variables
sanity_test
setup_mode
create_zabbix dbbackup
liftoff

View File

@@ -1,449 +1,41 @@
#!/usr/bin/with-contenv bash
#!/command/with-contenv bash
source /assets/functions/00-container
source /assets/functions/10-db-backup
source /assets/defaults/10-db-backup
PROCESS_NAME="db-backup"
date >/dev/null
bootstrap_variables
if [ "$1" != "NOW" ]; then
sleep 10
fi
### Sanity Test
sanity_var DB_TYPE "Database Type"
sanity_var DB_HOST "Database Host"
### Set the Database Type
dbtype=${DB_TYPE}
case "$dbtype" in
"couch" | "couchdb" | "COUCH" | "COUCHDB" )
dbtype=couch
dbport=${DB_PORT:-5984}
file_env 'DB_USER'
file_env 'DB_PASS'
;;
"influx" | "influxdb" | "INFLUX" | "INFLUXDB" )
dbtype=influx
dbport=${DB_PORT:-8088}
file_env 'DB_USER'
file_env 'DB_PASS'
;;
"mongo" | "mongodb" | "MONGO" | "MONGODB" )
dbtype=mongo
dbport=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER'
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
;;
"mysql" | "MYSQL" | "mariadb" | "MARIADB")
dbtype=mysql
dbport=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
;;
"mssql" | "MSSQL" | "microsoftsql" | "MICROSOFTSQL")
dbtype=mssql
dbport=${DB_PORT:-1433}
;;
"postgres" | "postgresql" | "pgsql" | "POSTGRES" | "POSTGRESQL" | "PGSQL" )
dbtype=pgsql
dbport=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
;;
"redis" | "REDIS" )
dbtype=redis
dbport=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS'
;;
esac
### Set Defaults
BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"}
COMPRESSION=${COMPRESSION:-GZ}
COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
DB_DUMP_TARGET=${DB_DUMP_TARGET:-/backup}
dbhost=${DB_HOST}
dbname=${DB_NAME}
dbpass=${DB_PASS}
dbuser=${DB_USER}
MD5=${MD5:-TRUE}
PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-TRUE}
SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SPLIT_DB=${SPLIT_DB:-FALSE}
tmpdir=/tmp/backups
if [ "$BACKUP_TYPE" = "S3" ] || [ "$BACKUP_TYPE" = "s3" ] || [ "$BACKUP_TYPE" = "MINIO" ] || [ "$BACKUP_TYPE" = "minio" ] ; then
S3_PROTOCOL=${S3_PROTOCOL:-"https"}
sanity_var S3_HOST "S3 Host"
sanity_var S3_BUCKET "S3 Bucket"
sanity_var S3_KEY_ID "S3 Key ID"
sanity_var S3_KEY_SECRET "S3 Key Secret"
sanity_var S3_URI_STYLE "S3 URI Style (Virtualhost or Path)"
sanity_var S3_PATH "S3 Path"
file_env 'S3_KEY_ID'
file_env 'S3_KEY_SECRET'
fi
if [ "$1" = "NOW" ]; then
if [ "${MODE,,}" = "manual" ] || [ "${1,,}" = "manual" ] || [ "${1,,}" = "now" ]; then
DB_DUMP_BEGIN=+0
MANUAL=TRUE
fi
### Set Compression Options
if var_true "$PARALLEL_COMPRESSION" ; then
bzip="pbzip2 -${COMPRESSION_LEVEL}"
gzip="pigz -${COMPRESSION_LEVEL}"
xzip="pixz -${COMPRESSION_LEVEL}"
zstd="zstd --rm -${COMPRESSION_LEVEL}"
manual=TRUE
print_debug "Detected Manual Mode"
else
bzip="bzip2 -${COMPRESSION_LEVEL}"
gzip="gzip -${COMPRESSION_LEVEL}"
xzip="xz -${COMPRESSION_LEVEL} "
zstd="zstd --rm -${COMPRESSION_LEVEL}"
fi
### Set the Database Authentication Details
case "$dbtype" in
"mongo" )
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${dbuser}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${dbpass}"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${dbname}"
;;
"mysql" )
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${dbpass}
;;
"postgres" )
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${dbpass}"
;;
"redis" )
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${dbpass}"
;;
esac
### Functions
backup_couch() {
target=couch_${dbname}_${dbhost}_${now}.txt
compression
curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${dumpoutput} | $dumpoutput > ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup
}
backup_influx() {
if [ "${COMPRESSION}" = "NONE" ] || [ "${COMPRESSION}" = "none" ] || [ "${COMPRESSION}" = "FALSE" ] || [ "${COMPRESSION}" = "false" ] ; then
:
else
print_notice "Compressing InfluxDB backup with gzip"
influx_compression="-portable"
fi
for DB in $DB_NAME; do
target=influx_${DB}_${dbhost}_${now}
influxd backup ${influx_compression} -database $DB -host ${dbhost}:${dbport} ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup
done
}
backup_mongo() {
if [ "${COMPRESSION}" = "NONE" ] || [ "${COMPRESSION}" = "none" ] || [ "${COMPRESSION}" = "FALSE" ] || [ "${COMPRESSION}" = "false" ] ; then
target=${dbtype}_${dbname}_${dbhost}_${now}.archive
else
print_notice "Compressing MongoDB backup with gzip"
target=${dbtype}_${dbname}_${dbhost}_${now}.archivegz
mongo_compression="--gzip"
fi
mongodump --archive=${tmpdir}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
exit_code=$?
cd ${tmpdir}
generate_md5
move_backup
}
backup_mssql() {
target=mssql_${dbname}_${dbhost}_${now}.bak
/opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${tmpdir}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
}
backup_mysql() {
if var_true "$SPLIT_DB" ; then
DATABASES=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema)
for db in $DATABASES; do
if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then
print_notice "Dumping MariaDB database: $db"
target=mysql_${db}_${dbhost}_${now}.sql
compression
mysqldump --max-allowed-packet=512M -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} --databases $db | $dumpoutput > ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup
fi
done
else
compression
mysqldump --max-allowed-packet=512M -A -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup
fi
}
backup_pgsql() {
if var_true $SPLIT_DB ; then
export PGPASSWORD=${dbpass}
DATABASES=$(psql -h $dbhost -U $dbuser -p ${dbport} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
for db in $DATABASES; do
print_info "Dumping database: $db"
target=pgsql_${db}_${dbhost}_${now}.sql
compression
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup
done
else
export PGPASSWORD=${dbpass}
compression
pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup
fi
}
backup_redis() {
target=redis_${db}_${dbhost}_${now}.rdb
echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${tmpdir}/${target} ${EXTRA_OPTS}
print_info "Dumping Redis - Flushing Redis Cache First"
sleep 10
try=5
while [ $try -gt 0 ] ; do
saved=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
ok=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
print_info "Redis Backup Complete"
fi
try=$((try - 1))
print_info "Redis Busy - Waiting and retrying in 5 seconds"
sleep 5
done
generate_md5
compression
move_backup
}
check_availability() {
### Set the Database Type
case "$dbtype" in
"couch" )
COUNTER=0
while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5
(( COUNTER+=5 ))
print_warn "CouchDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"influx" )
COUNTER=0
while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5
(( COUNTER+=5 ))
print_warn "InfluxDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"mongo" )
COUNTER=0
while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5
(( COUNTER+=5 ))
print_warn "Mongo Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"mysql" )
COUNTER=0
while true; do
mysqlcmd='mysql -u'${dbuser}' -P '${dbport}' -h '${dbhost}' -p'${dbpass}
out="$($mysqlcmd -e "SELECT COUNT(*) FROM information_schema.FILES;" 2>&1)"
echo "$out" | grep -E "COUNT|Enter" 2>&1 > /dev/null
if [ $? -eq 0 ]; then
:
break
fi
print_warn "MySQL/MariaDB Server '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
sleep 5
(( COUNTER+=5 ))
done
;;
"mssql" )
COUNTER=0
while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5
(( COUNTER+=5 ))
print_warn "MSSQL Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"pgsql" )
COUNTER=0
export PGPASSWORD=${dbpass}
until pg_isready --dbname=${dbname} --host=${dbhost} --port=${dbport} --username=${dbuser} -q
do
sleep 5
(( COUNTER+=5 ))
print_warn "Postgres Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"redis" )
COUNTER=0
while ! (nc -z "${dbhost}" "${dbport}") ; do
sleep 5
(( COUNTER+=5 ))
print_warn "Redis Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
esac
}
compression() {
case "$COMPRESSION" in
"GZ" | "gz" | "gzip" | "GZIP")
print_notice "Compressing backup with gzip"
target=${target}.gz
dumpoutput="$gzip "
;;
"BZ" | "bz" | "bzip2" | "BZIP2" | "bzip" | "BZIP" | "bz2" | "BZ2")
print_notice "Compressing backup with bzip2"
target=${target}.bz2
dumpoutput="$bzip "
;;
"XZ" | "xz" | "XZIP" | "xzip" )
print_notice "Compressing backup with xzip"
target=${target}.xz
dumpoutput="$xzip "
;;
"ZSTD" | "zstd" | "ZST" | "zst" )
print_notice "Compressing backup with zstd"
target=${target}.zst
dumpoutput="$zstd "
;;
"NONE" | "none" | "FALSE" | "false")
dumpoutput="cat "
;;
esac
}
generate_md5() {
if var_true "$MD5" ; then
print_notice "Generating MD5 for ${target}"
cd $tmpdir
md5sum "${target}" > "${target}".md5
MD5VALUE=$(md5sum "${target}" | awk '{ print $1}')
fi
}
move_backup() {
case "$SIZE_VALUE" in
"b" | "bytes" )
SIZE_VALUE=1
;;
"[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" )
SIZE_VALUE="-h"
;;
*)
SIZE_VALUE=1
;;
esac
if [ "$SIZE_VALUE" = "1" ] ; then
FILESIZE=$(stat -c%s "${tmpdir}/${target}")
print_notice "Backup of ${target} created with the size of ${FILESIZE} bytes"
else
FILESIZE=$(du -h "${tmpdir}/${target}" | awk '{ print $1}')
print_notice "Backup of ${target} created with the size of ${FILESIZE}"
fi
case "${BACKUP_LOCATION}" in
"FILE" | "file" | "filesystem" | "FILESYSTEM" )
mkdir -p "${DB_DUMP_TARGET}"
mv ${tmpdir}/*.md5 "${DB_DUMP_TARGET}"/
mv ${tmpdir}/"${target}" "${DB_DUMP_TARGET}"/"${target}"
;;
"S3" | "s3" | "MINIO" | "minio" )
s3_content_type="application/octet-stream"
if [ "$S3_URI_STYLE" = "VIRTUALHOST" ] || [ "$S3_URI_STYLE" = "VHOST" ] || [ "$S3_URI_STYLE" = "virtualhost" ] || [ "$S3_URI_STYLE" = "vhost" ] ; then
s3_url="${S3_BUCKET}.${S3_HOST}"
else
s3_url="${S3_HOST}/${S3_BUCKET}"
fi
if var_true "$MD5" ; then
s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")"
s3_md5="$(libressl md5 -binary < "${tmpdir}/${target}.md5" | base64)"
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${target}.md5" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
print_debug "Uploading ${target}.md5 to S3"
curl -T "${tmpdir}/${target}.md5" "${S3_PROTOCOL}"://"${s3_url}"/"${S3_PATH}"/"${target}".md5 \
-H "Date: $date" \
-H "Authorization: AWS ${S3_KEY_ID}:$sig" \
-H "Content-Type: ${s3_content_type}" \
-H "Content-MD5: ${s3_md5}"
fi
s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")"
s3_md5="$(libressl md5 -binary < "${tmpdir}/${target}" | base64)"
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${target}" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
print_debug "Uploading ${target} to S3"
curl -T ${tmpdir}/"${target}" "${S3_PROTOCOL}"://"${s3_url}"/"${S3_PATH}"/"${target}" \
-H "Date: $s3_date" \
-H "Authorization: AWS ${S3_KEY_ID}:$sig" \
-H "Content-Type: ${s3_content_type}" \
-H "Content-MD5: ${s3_md5}"
rm -rf ${tmpdir}/*.md5
rm -rf ${tmpdir}/"${target}"
;;
esac
}
### Container Startup
print_debug "Backup routines Initialized on $(date)"
### Wait for Next time to start backup
if [ "$1" != "NOW" ]; then
sleep 5
current_time=$(date +"%s")
today=$(date +"%Y%m%d")
if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then
waittime=$(( ${BASH_REMATCH[1]} * 60 ))
waittime=$(( ${BASH_REMATCH[1]} * 60 ))
target_time=$(($current_time + $waittime))
else
target_time=$(date --date="${today}${DB_DUMP_BEGIN}" +"%s")
target_time=$(date --date="${today}${DB_DUMP_BEGIN}" +"%s")
if [[ "$target_time" < "$current_time" ]]; then
target_time=$(($target_time + 24*60*60))
fi
waittime=$(($target_time - $current_time))
fi
print_debug "Wait Time: ${waittime} Target time: ${target_time} Current Time: ${current_time}"
print_info "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")"
sleep $waittime
fi
print_notice "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")"
sleep $waittime
fi
### Commence Backup
while true; do
# make sure the directory exists
mkdir -p $tmpdir
### Define Target name
now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d")
target=${dbtype}_${dbname}_${dbhost}_${now}.sql
### Take a Dump
case "$dbtype" in
while true; do
mkdir -p "${TEMP_LOCATION}"
backup_start_time=$(date +"%s")
print_debug "Backup routines started time: $(date +'%Y-%m-%d %T %Z')"
case "${dbtype,,}" in
"couch" )
check_availability
backup_couch
@@ -472,43 +64,25 @@ print_debug "Backup routines Initialized on $(date)"
check_availability
backup_redis
;;
"sqlite3" )
check_availability
backup_sqlite3
;;
esac
### Zabbix
if var_true "$ENABLE_ZABBIX" ; then
print_notice "Sending Backup Statistics to Zabbix"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}")"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')"
fi
backup_finish_time=$(date +"%s")
backup_total_time=$(echo $((backup_finish_time-backup_start_time)))
if [ -z "$master_exit_code" ] ; then master_exit_code="0" ; fi
print_info "Backup routines finish time: $(date -d @${backup_finish_time} +"%Y-%m-%d %T %Z") with overall exit code ${master_exit_code}"
print_notice "Backup routines time taken: $(echo ${backup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
### Automatic Cleanup
if [[ -n "$DB_CLEANUP_TIME" ]]; then
print_notice "Cleaning up old backups"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
fi
cleanup_old_data
if [ -n "$POST_SCRIPT" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing"
eval "${POST_SCRIPT}"
fi
### Post Backup Custom Script Support
if [ -d /assets/custom-scripts/ ] ; then
print_notice "Found Custom Filesystem Scripts to Execute"
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
print_notice "Running Script ${f}"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME DATE BACKUP_FILENAME FILESIZE MD5_VALUE
chmod +x "${f}"
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${now_date}" "${now_time}" "${target}" "${FILESIZE}" "${MD5VALUE}"
done
fi
### Go back to Sleep until next Backup time
if var_true $MANUAL ; then
exit 1;
if var_true "${manual}" ; then
print_debug "Exiting due to manual mode"
exit ${master_exit_code};
else
sleep $(($DB_DUMP_FREQ*60))
print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") "
sleep $(($DB_DUMP_FREQ*60-backup_total_time))
fi
done
fi
done

View File

@@ -1,4 +1,4 @@
#!/usr/bin/with-contenv bash
#!/command/with-contenv bash
echo '** Performing Manual Backup'
/etc/services.available/10-db-backup/run NOW
/etc/services.available/10-db-backup/run manual

942
install/usr/local/bin/restore Executable file
View File

@@ -0,0 +1,942 @@
#!/command/with-contenv /bin/bash
source /assets/functions/00-container
source /assets/defaults/10-db-backup
source /assets/functions/10-db-backup
PROCESS_NAME="db-backup-restore"
oldcolumns=$COLUMNS
########################################################################################
### System Functions ###
########################################################################################
### Colours
# Foreground (Text) Colors
cdgy="\e[90m" # Color Dark Gray
clg="\e[92m" # Color Light Green
clm="\e[95m" # Color Light Magenta
cwh="\e[97m" # Color White
# Turns off all formatting
coff="\e[0m" # Color Off
# Background Colors
bdr="\e[41m" # Background Color Dark Red
bdg="\e[42m" # Background Color Dark Green
bdb="\e[44m" # Background Color Dark Blue
bdm="\e[45m" # Background Color Dark Magenta
bdgy="\e[100m" # Background Color Dark Gray
blr="\e[101m" # Background Color Light Red
boff="\e[49m" # Background Color Off
bootstrap_variables
if [ -z "${1}" ] ; then
interactive_mode=true
else
case "$1" in
"-h" )
cat <<EOF
${IMAGE_NAME} Restore Tool
(c) 2022 Dave Conroy (https://github.com/tiredofit)
This script will assist you in recovering databases taken by the Docker image.
You will be presented with a series of menus allowing you to choose:
- What file to restore
- What type of DB Backup
- What Host to restore to
- What Database Name to restore to
- What Database User to use
- What Database Password to use
- What Database Port to use
The image will try to do autodetection based on the filename for the type, hostname, and database name.
The image will also allow you to use environment variables or Docker secrets used to backup the images
The script can also be executed skipping the interactive mode by using the following syntax/
$(basename "$0") <filename> <db_type> <db_hostname> <db_name> <db_user> <db_pass> <db_port>
If you only enter some of the arguments you will be prompted to fill them in.
Other arguments
-h This help screen
EOF
exit 0
;;
"-i" )
echo "interactive mode"
interactive_mode=true
;;
* )
interactive_mode=false
;;
esac
fi
get_filename() {
COLUMNS=12
prompt="Please select a file to restore:"
options=( $(find "${DB_DUMP_TARGET}" -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | xargs -0) )
PS3="$prompt "
select opt in "${options[@]}" "Custom" "Quit" ; do
if (( REPLY == 2 + ${#options[@]} )) ; then
echo "Bye!"
exit 2
elif (( REPLY == 1 + ${#options[@]} )) ; then
while [ ! -f "${opt}" ] ; do
read -p "What path and filename to restore: " opt
if [ ! -f "${opt}" ] ; then
print_error "File not found. Please retry.."
fi
done
break
elif (( REPLY > 0 && REPLY <= ${#options[@]} )) ; then
break
else
echo "Invalid option. Try another one."
fi
done
COLUMNS=$oldcolumns
r_filename=${opt}
}
get_dbhost() {
p_dbhost=$(basename -- "${r_filename}" | cut -d _ -f 3)
if [ -n "${p_dbhost}" ]; then
parsed_host=true
print_debug "Parsed DBHost: ${p_dbhost}"
fi
if [ -z "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 1 - No Env, No Parsed Filename"
q_dbhost_variant=1
q_dbhost_menu=$(cat <<EOF
EOF
)
fi
if [ -n "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 2 - Env, No Parsed Filename"
q_dbhost_variant=2
q_dbhost_menu=$(cat <<EOF
C ) Custom Entered Hostname
E ) Environment Variable DB_HOST: '${DB_HOST}'
EOF
)
fi
if [ -z "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
print_debug "Parsed DBHostpho Variant: 3 - No Env, Parsed Filename"
q_dbhost_variant=3
q_dbhost_menu=$(cat <<EOF
C ) Custom Entered Hostname
F ) Parsed Filename Host: '${p_dbhost}'
EOF
)
fi
if [ -n "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 4 - Env, Parsed Filename"
q_dbhost_variant=4
q_dbhost_menu=$(cat <<EOF
C ) Custom Entered Hostname
E ) Environment Variable DB_HOST: '${DB_HOST}'
F ) Parsed Filename Host: '${p_dbhost}'
EOF
)
fi
cat << EOF
What Hostname do you wish to restore to:
${q_dbhost_menu}
Q ) Quit
EOF
case "${q_dbhost_variant}" in
1 )
counter=1
q_dbhost=" "
while [[ $q_dbhost = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "Hostnames can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Host do you want to restore to:\ ${coff})" q_dbhost
(( counter+=1 ))
done
r_dbhost=${q_dbhost}
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
case "${q_dbhost_menu,,}" in
c* )
counter=1
q_dbhost=" "
while [[ $q_dbhost = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "Hostnames can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Host do you want to restore to:\ ${coff})" q_dbhost
(( counter+=1 ))
done
r_dbhost=${q_dbhost}
break
;;
e* | "" )
r_dbhost=${DB_HOST}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
3 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
case "${q_dbhost_menu,,}" in
c* )
counter=1
q_dbhost=" "
while [[ $q_dbhost = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "Hostnames can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Host do you want to restore to:\ ${coff})" q_dbhost
(( counter+=1 ))
done
r_dbhost=${q_dbhost}
break
;;
f* | "" )
r_dbhost=${p_dbhost}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
4 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
case "${q_dbhost_menu,,}" in
c* )
counter=1
q_dbhost=" "
while [[ $q_dbhost = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "Hostnames can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Host do you want to restore to:\ ${coff})" q_dbhost
(( counter+=1 ))
done
r_dbhost=${q_dbhost}
break
;;
e* | "" )
r_dbhost=${DB_HOST}
break
;;
f* )
r_dbhost=${p_dbhost}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
get_dbtype() {
p_dbtype=$(basename -- "${r_filename}" | cut -d _ -f 1)
case "${p_dbtype}" in
mongo* )
parsed_type=true
print_debug "Parsed DBType: MongoDB"
;;
mariadb | mysql )
parsed_type=true
print_debug "Parsed DBType: MariaDB/MySQL"
;;
pgsql | postgres* )
parsed_type=true
print_debug "Parsed DBType: Postgresql"
;;
* )
print_debug "Parsed DBType: UNKNOWN"
;;
esac
if [ -z "${DB_TYPE}" ] && [ -z "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 1 - No Env, No Parsed Filename"
q_dbtype_variant=1
q_dbtype_menu=$(cat <<EOF
EOF
)
fi
if [ -n "${DB_TYPE}" ] && [ -z "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 2 - Env, No Parsed Filename"
q_dbtype_variant=2
q_dbtype_menu=$(cat <<EOF
E ) Environment Variable DB_TYPE: '${DB_TYPE}'
EOF
)
fi
if [ -z "${DB_TYPE}" ] && [ -n "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 3 - No Env, Parsed Filename"
q_dbtype_variant=3
q_dbtype_menu=$(cat <<EOF
F ) Parsed Filename Type: '${p_dbtype}'
EOF
)
fi
if [ -n "${DB_TYPE}" ] && [ -n "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 4 - Env, Parsed Filename"
q_dbtype_variant=4
q_dbtype_menu=$(cat <<EOF
E ) Environment Variable DB_TYPE: '${DB_TYPE}'
F ) Parsed Filename Type: '${p_dbtype}'
EOF
)
fi
cat << EOF
What Database Type are you looking to restore?
${q_dbtype_menu}
M ) MySQL / MariaDB
O ) MongoDB
P ) Postgresql
Q ) Quit
EOF
case "${q_dbtype_variant}" in
1 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}M${cdgy}\) | \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in
m* )
r_dbtype=mysql
break
;;
o* )
r_dbtype=mongo
break
;;
p* )
r_dbtype=postgresql
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in
e* | "" )
r_dbtype=${DB_TYPE}
break
;;
m* )
r_dbtype=mysql
break
;;
o* )
r_dbtype=mongo
break
;;
p* )
r_dbtype=postgresql
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
3 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in
f* | "" )
r_dbtype=${p_dbtype}
break
;;
m* )
r_dbtype=mysql
break
;;
o* )
r_dbtype=mongo
break
;;
p* )
r_dbtype=postgresql
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
4 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in
e* | "" )
r_dbtype=${DB_TYPE}
break
;;
f* )
r_dbtype=${p_dbtype}
break
;;
m* )
r_dbtype=mysql
break
;;
p* )
r_dbtype=postgresql
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
get_dbname() {
p_dbname=$(basename -- "${r_filename}" | cut -d _ -f 2)
if [ -n "${p_dbname}" ]; then
parsed_name=true
print_debug "Parsed DBName: ${p_dbhost}"
fi
if [ -z "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 1 - No Env, No Parsed Filename"
q_dbname_variant=1
q_dbname_menu=$(cat <<EOF
EOF
)
fi
if [ -n "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 2 - Env, No Parsed Filename"
q_dbname_variant=2
q_dbname_menu=$(cat <<EOF
C ) Custom Entered Database Name
E ) Environment Variable DB_NAME: '${DB_NAME}'
EOF
)
fi
if [ -z "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 3 - No Env, Parsed Filename"
q_dbname_variant=3
q_dbname_menu=$(cat <<EOF
C ) Custom Entered Database Name
F ) Parsed Filename DB Name: '${p_dbname}'
EOF
)
fi
if [ -n "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBname Variant: 4 - Env, Parsed Filename"
q_dbname_variant=4
q_dbname_menu=$(cat <<EOF
C ) Custom Entered Database Name
E ) Environment Variable DB_NAME: '${DB_NAME}'
F ) Parsed Filename DB Name: '${p_dbname}'
EOF
)
fi
cat << EOF
What Database Name do you want to restore to?
${q_dbname_menu}
Q ) Quit
EOF
case "${q_dbname_variant}" in
1 )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
case "${q_dbname_menu,,}" in
c* )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
break
;;
e* | "" )
r_dbname=${DB_NAME}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
3 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
case "${q_dbname_menu,,}" in
c* )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
break
;;
f* | "" )
r_dbname=${p_dbname}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
4 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
case "${q_dbname_menu,,}" in
c* )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
break
;;
e* | "" )
r_dbname=${DB_NAME}
break
;;
f* )
r_dbname=${p_dbname}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
get_dbport() {
if [ -z "${DB_PORT}" ] ; then
print_debug "Parsed DBPort Variant: 1 - No Env"
q_dbport_variant=1
q_dbport_menu=$(cat <<EOF
EOF
)
fi
if [ -n "${DB_PORT}" ] ; then
print_debug "Parsed DBPort Variant: 2 - Env"
q_dbport_variant=2
q_dbport_menu=$(cat <<EOF
C ) Custom Entered Database Port
E ) Environment Variable DB_PORT: '${DB_PORT}'
EOF
)
fi
cat << EOF
What Database Port do you wish to use? MySQL/MariaDB typcially listens on port 3306. Postrgresql port 5432. MongoDB 27017
${q_dbport_menu}
Q ) Quit
EOF
case "${q_dbport_variant}" in
1 )
counter=1
q_dbport=" "
q_dbportre='^[0-9]+$'
while ! [[ $q_dbport =~ ${q_dbportre} ]]; do
if [ $counter -gt 1 ] ; then print_error "Must be a port number, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Port do you want to use:\ ${coff})" q_dbport
(( counter+=1 ))
done
r_dbport=${q_dbport}
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbport_menu
case "${q_dbport_menu,,}" in
c* )
counter=1
q_dbport=" "
q_dbportre='^[0-9]+$'
while ! [[ $q_dbport =~ ${q_dbportre} ]]; do
if [ $counter -gt 1 ] ; then print_error "Must be a port number, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Port do you want to use:\ ${coff})" q_dbport
(( counter+=1 ))
done
r_dbport=${q_dbport}
break
;;
e* | "" )
r_dbport=${DB_PORT}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
get_dbuser() {
if [ -z "${DB_USER}" ] ; then
print_debug "Parsed DBUser Variant: 1 - No Env"
q_dbuser_variant=1
q_dbuser_menu=$(cat <<EOF
EOF
)
fi
if [ -n "${DB_USER}" ] ; then
print_debug "Parsed DBUser Variant: 2 - Env"
q_dbuser_variant=2
q_dbuser_menu=$(cat <<EOF
C ) Custom Entered Database User
E ) Environment Variable DB_USER: '${DB_USER}'
EOF
)
fi
cat << EOF
What database user will be used for restore:
${q_dbuser_menu}
Q ) Quit
EOF
case "${q_dbuser_variant}" in
1 )
counter=1
q_dbuser=" "
while [[ $q_dbuser = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Usernames can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB User do you wish to use:\ ${coff})" q_dbuser
(( counter+=1 ))
done
r_dbuser=${q_dbuser}
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbuser_menu
case "${q_dbuser_menu,,}" in
c* )
counter=1
q_dbuser=" "
while [[ $q_dbuser = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Usernames can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB User do you wish to use:\ ${coff})" q_dbuser
(( counter+=1 ))
done
r_dbuser=${q_dbuser}
break
;;
e* | "" )
r_dbuser=${DB_USER}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
get_dbpass() {
if [ -z "${DB_PASS}" ] ; then
print_debug "Parsed DBPass Variant: 1 - No Env"
q_dbpass_variant=1
q_dbpass_menu=$(cat <<EOF
EOF
)
fi
if [ -n "${DB_PASS}" ] ; then
print_debug "Parsed DBPass Variant: 2 - Env"
q_dbpass_variant=2
q_dbpass_menu=$(cat <<EOF
C ) Custom Entered Database Password
E ) Environment Variable DB_PASS
EOF
)
fi
cat << EOF
What Database Password will be used to restore?
${q_dbpass_menu}
Q ) Quit
EOF
case "${q_dbpass_variant}" in
1 )
counter=1
q_dbpass=" "
while [[ $q_dbpass = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Passwords can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbpass
(( counter+=1 ))
done
r_dbpass=${q_dbpass}
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbpass_menu
case "${q_dbpass_menu,,}" in
c* )
counter=1
q_dbpass=" "
while [[ $q_dbpass = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Passwords can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbpass
(( counter+=1 ))
done
r_dbpass=${q_dbpass}
break
;;
e* | "" )
r_dbpass=${DB_PASS}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
#### SCRIPT START
cat << EOF
## ${IMAGE_NAME} Restore Script
## Visit ${IMAGE_REPO_URL}
## ####################################################
EOF
## Question Filename
if [ -n "${1}" ]; then
if [ ! -f "${1}" ]; then
get_filename
else
r_filename="${1}"
fi
else
get_filename
fi
print_debug "Filename to recover '${r_filename}'"
## Question Database Type
if [ -n "${2}" ]; then
r_dbtype="${2}"
else
get_dbtype
fi
print_debug "Database type '${r_dbtype}'"
## Question Database Host
if [ -n "${3}" ]; then
r_dbhost="${3}"
else
get_dbhost
fi
print_debug "Database Host '${r_dbhost}'"
## Question Database Name
if [ -n "${4}" ]; then
r_dbname="${4}"
else
get_dbname
fi
print_debug "Database Name '${r_dbname}'"
## Question Database User
if [ -n "${5}" ]; then
r_dbuser="${5}"
else
get_dbuser
fi
print_debug "Database User '${r_dbuser}'"
## Question Database Password
if [ -n "${6}" ]; then
r_dbpass="${6}"
else
get_dbpass
fi
print_debug "Database Pass '${r_dbpass}'"
## Question Database Port
if [ -n "${7}" ]; then
r_dbport="${7}"
else
get_dbport
fi
print_debug "Database Port '${r_dbport}'"
## Parse Extension
case "${r_filename##*.}" in
bz* )
decompress_cmd='bz'
print_debug "Detected 'bzip2' compression"
;;
gz* )
decompress_cmd="z"
print_debug "Detected 'gzip' compression"
;;
xz* )
decompress_cmd="xz"
print_debug "Detected 'xzip' compression"
;;
zst* )
decompress_cmd='zstd'
print_debug "Detected 'zstd' compression"
;;
sql )
print_debug "Detected No compression"
;;
* )
print_debug "Cannot tell what the extension is for compression"
;;
esac
## Perform a restore
case "${r_dbtype}" in
mariadb | mysql )
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
pv ${r_filename} | ${decompress_cmd}cat | mysql -u${r_dbuser} -p${r_dbpass} -P${r_dbport} -h${r_dbhost} ${r_dbname}
exit_code=$?
;;
pgsql | postgres* )
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
export PGPASSWORD=${r_dbpass}
pv ${r_filename} | ${decompress_cmd}cat | psql -d ${r_dbname} -h ${r_dbhost} -p ${r_dbport} -U ${r_dbuser}
exit_code=$?
;;
mongo )
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
if [ "${ENABLE_COMPRESSION,,}" != "none" ] && [ "${ENABLE_COMPRESSION,,}" != "false" ] ; then
mongo_compression="--gzip"
fi
if [ -n "${r_dbuser}" ] ; then
mongo_user="-u=${r_dbuser}"
fi
if [ -n "${r_dbpass}" ] ; then
mongo_pass="-p=${r_dbpass}"
fi
mongorestore ${mongo_compression} -d=${r_dbname} -h=${r_dbhost} --port=${r_dbport} ${mongo_user} ${mongo_pass} --archive=${r_filename}
exit_code=$?
;;
* )
print_info "Unable to restore DB of type '${r_dbtype}'"
exit_code=3
;;
esac
print_debug "Exit code: ${exit_code}"
if [ "${exit_code}" = 0 ] ; then
print_info "Restore complete!"
else
print_error "Restore reported errors"
fi

View File

@@ -0,0 +1,249 @@
{
"zabbix_export": {
"version": "6.0",
"date": "2022-03-18T13:32:12Z",
"groups": [
{
"uuid": "fa56524b5dbb4ec09d9777a6f7ccfbe4",
"name": "DB/Backup"
},
{
"uuid": "748ad4d098d447d492bb935c907f652f",
"name": "Templates/Databases"
}
],
"templates": [
{
"uuid": "5fc64d517afb4cc5bc09a3ef58b43ef7",
"template": "DB Backup",
"name": "DB Backup",
"description": "Template for Docker DB Backup Image\n\nMeant for use specifically with https://github.com/tiredofit/docker-db-backup\nLast tested with version 3.0.2",
"groups": [
{
"name": "DB/Backup"
},
{
"name": "Templates/Databases"
}
],
"items": [
{
"uuid": "72fd00fa2dd24e479f5affe03e8711d8",
"name": "DB Backup: Backup Duration",
"type": "TRAP",
"key": "dbbackup.backup_duration",
"delay": "0",
"history": "7d",
"units": "uptime",
"description": "How long the backup took",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
]
},
{
"uuid": "3549a2c9d56849babc6dc3c855484c1e",
"name": "DB Backup: Backup Time",
"type": "TRAP",
"key": "dbbackup.datetime",
"delay": "0",
"history": "7d",
"units": "unixtime",
"request_method": "POST",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
],
"triggers": [
{
"uuid": "3ac1e074ffea46eb8002c9c08a85e7b4",
"expression": "nodata(/DB Backup/dbbackup.datetime,2d)=1",
"name": "DB-Backup: No backups detected in 2 days",
"priority": "DISASTER",
"manual_close": "YES"
},
{
"uuid": "b8b5933dfa1a488c9c37dd7f4784c1ff",
"expression": "fuzzytime(/DB Backup/dbbackup.datetime,172800s)=0 and fuzzytime(/DB Backup/dbbackup.datetime,259200s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,345600s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,432800s)<>0",
"name": "DB Backup: No Backups occurred in 2 days",
"priority": "AVERAGE"
},
{
"uuid": "35c5f420d0e142cc9601bae38decdc40",
"expression": "fuzzytime(/DB Backup/dbbackup.datetime,172800s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,259200s)=0 and fuzzytime(/DB Backup/dbbackup.datetime,345600s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,432800s)<>0",
"name": "DB Backup: No Backups occurred in 3 days",
"priority": "AVERAGE"
},
{
"uuid": "03c3719d82c241e886a0383c7d908a77",
"expression": "fuzzytime(/DB Backup/dbbackup.datetime,172800s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,259200s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,345600s)=0 and fuzzytime(/DB Backup/dbbackup.datetime,432800s)<>0",
"name": "DB Backup: No Backups occurred in 4 days",
"priority": "AVERAGE"
},
{
"uuid": "1634a03e44964e42b7e0101f5f68499c",
"expression": "fuzzytime(/DB Backup/dbbackup.datetime,172800s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,259200s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,345600s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,432800s)=0",
"name": "DB Backup: No Backups occurred in 5 days or more",
"priority": "HIGH"
}
]
},
{
"uuid": "467dfec952b34f5aa4cc890b4351b62d",
"name": "DB Backup: Backup Size",
"type": "TRAP",
"key": "dbbackup.size",
"delay": "0",
"history": "7d",
"units": "B",
"request_method": "POST",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
],
"triggers": [
{
"uuid": "a41eb49b8a3541afb6de247dca750e38",
"expression": "last(/DB Backup/dbbackup.size)/last(/DB Backup/dbbackup.size,#2)>1.2",
"name": "DB Backup: 20% Greater in Size",
"priority": "WARNING",
"manual_close": "YES"
},
{
"uuid": "422f66be5049403293f3d96fc53f20cd",
"expression": "last(/DB Backup/dbbackup.size)/last(/DB Backup/dbbackup.size,#2)<0.2",
"name": "DB Backup: 20% Smaller in Size",
"priority": "WARNING",
"manual_close": "YES"
},
{
"uuid": "d6d9d875b92f4d799d4bc89aabd4e90e",
"expression": "last(/DB Backup/dbbackup.size)<1K",
"name": "DB Backup: empty",
"priority": "HIGH"
}
]
},
{
"uuid": "a6b13e8b46a64abab64a4d44d620d272",
"name": "DB Backup: Last Backup Status",
"type": "TRAP",
"key": "dbbackup.status",
"delay": "0",
"history": "7d",
"description": "Maps Exit Codes received by backup applications",
"valuemap": {
"name": "DB Backup Status"
},
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
],
"triggers": [
{
"uuid": "23d71e356f96493180f02d4b84a79fd6",
"expression": "last(/DB Backup/dbbackup.status)=1",
"name": "DB Backup: Failed Backup Detected",
"priority": "HIGH",
"manual_close": "YES"
}
]
}
],
"tags": [
{
"tag": "Service",
"value": "Backup"
},
{
"tag": "Service",
"value": "Database"
}
],
"dashboards": [
{
"uuid": "90c81bb47184401ca9663626784a6f30",
"name": "DB Backup",
"pages": [
{
"widgets": [
{
"type": "GRAPH_CLASSIC",
"name": "Backup Size",
"width": "23",
"height": "5",
"fields": [
{
"type": "GRAPH",
"name": "graphid",
"value": {
"name": "DB Backup: Backup Size",
"host": "DB Backup"
}
}
]
}
]
}
]
}
],
"valuemaps": [
{
"uuid": "82f3a3d01b3c42b8942b59d2363724e0",
"name": "DB Backup Status",
"mappings": [
{
"value": "0",
"newvalue": "OK"
},
{
"type": "GREATER_OR_EQUAL",
"value": "1",
"newvalue": "FAIL"
}
]
}
]
}
],
"graphs": [
{
"uuid": "6e02c200b76046bab76062cd1ab086b2",
"name": "DB Backup: Backup Duration",
"graph_items": [
{
"color": "199C0D",
"item": {
"host": "DB Backup",
"key": "dbbackup.backup_duration"
}
}
]
},
{
"uuid": "b881ee18f05c4f4c835982c9dfbb55d6",
"name": "DB Backup: Backup Size",
"type": "STACKED",
"graph_items": [
{
"sortorder": "1",
"color": "1A7C11",
"item": {
"host": "DB Backup",
"key": "dbbackup.size"
}
}
]
}
]
}
}

View File

@@ -1,270 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>3.4</version>
<date>2018-02-02T19:03:49Z</date>
<groups>
<group>
<name>DB - Backup</name>
</group>
<group>
<name>Templates</name>
</group>
</groups>
<templates>
<template>
<template>Service - DB Backup</template>
<name>Service - DB Backup</name>
<description/>
<groups>
<group>
<name>DB - Backup</name>
</group>
<group>
<name>Templates</name>
</group>
</groups>
<applications>
<application>
<name>DB Backup</name>
</application>
</applications>
<items>
<item>
<name>Backup Time</name>
<type>2</type>
<snmp_community/>
<snmp_oid/>
<key>dbbackup.datetime</key>
<delay>0</delay>
<history>365d</history>
<trends>365d</trends>
<status>0</status>
<value_type>3</value_type>
<allowed_hosts/>
<units>unixtime</units>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>DB Backup</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing/>
<jmx_endpoint/>
<master_item/>
</item>
<item>
<name>Backup Size</name>
<type>2</type>
<snmp_community/>
<snmp_oid/>
<key>dbbackup.size</key>
<delay>0</delay>
<history>365d</history>
<trends>365d</trends>
<status>0</status>
<value_type>3</value_type>
<allowed_hosts/>
<units>byte</units>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>DB Backup</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing/>
<jmx_endpoint/>
<master_item/>
</item>
</items>
<discovery_rules/>
<httptests/>
<macros/>
<templates/>
<screens/>
</template>
</templates>
<triggers>
<trigger>
<expression>{Service - DB Backup:dbbackup.size.change()}&gt;20</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>DB Backup is 20% Greater in Size</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>2</priority>
<description/>
<type>0</type>
<manual_close>1</manual_close>
<dependencies/>
<tags/>
</trigger>
<trigger>
<expression>{Service - DB Backup:dbbackup.size.change()}&lt;20</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>DB Backup is 20% Smaller in Size</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>2</priority>
<description/>
<type>0</type>
<manual_close>1</manual_close>
<dependencies/>
<tags/>
</trigger>
<trigger>
<expression>{Service - DB Backup:dbbackup.size.last()}&lt;1K</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>DB Backup is empty</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>4</priority>
<description/>
<type>0</type>
<manual_close>0</manual_close>
<dependencies/>
<tags/>
</trigger>
<trigger>
<expression>{Service - DB Backup:dbbackup.datetime.fuzzytime(172800)}=0 and {Service - DB Backup:dbbackup.datetime.fuzzytime(259200)}&lt;&gt;0 and {Service - DB Backup:dbbackup.datetime.fuzzytime(345600)}&lt;&gt;0 and {Service - DB Backup:dbbackup.datetime.fuzzytime(432800)}&lt;&gt;0</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>No Backups occurred in 2 days</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>3</priority>
<description/>
<type>0</type>
<manual_close>0</manual_close>
<dependencies/>
<tags/>
</trigger>
<trigger>
<expression>{Service - DB Backup:dbbackup.datetime.fuzzytime(172800)}&lt;&gt;0 and {Service - DB Backup:dbbackup.datetime.fuzzytime(259200)}=0 and {Service - DB Backup:dbbackup.datetime.fuzzytime(345600)}&lt;&gt;0 and {Service - DB Backup:dbbackup.datetime.fuzzytime(432800)}&lt;&gt;0</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>No Backups occurred in 3 days</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>3</priority>
<description/>
<type>0</type>
<manual_close>0</manual_close>
<dependencies/>
<tags/>
</trigger>
<trigger>
<expression>{Service - DB Backup:dbbackup.datetime.fuzzytime(172800)}&lt;&gt;0 and {Service - DB Backup:dbbackup.datetime.fuzzytime(259200)}&lt;&gt;0 and {Service - DB Backup:dbbackup.datetime.fuzzytime(345600)}=0 and {Service - DB Backup:dbbackup.datetime.fuzzytime(432800)}&lt;&gt;0</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>No Backups occurred in 4 days</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>3</priority>
<description/>
<type>0</type>
<manual_close>0</manual_close>
<dependencies/>
<tags/>
</trigger>
<trigger>
<expression>{Service - DB Backup:dbbackup.datetime.fuzzytime(172800)}&lt;&gt;0 and {Service - DB Backup:dbbackup.datetime.fuzzytime(259200)}&lt;&gt;0 and {Service - DB Backup:dbbackup.datetime.fuzzytime(345600)}&lt;&gt;0 and {Service - DB Backup:dbbackup.datetime.fuzzytime(432800)}=0</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>No Backups occurred in 5 days or more</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>4</priority>
<description/>
<type>0</type>
<manual_close>0</manual_close>
<dependencies/>
<tags/>
</trigger>
</triggers>
<graphs>
<graph>
<name>Backup Size</name>
<width>900</width>
<height>200</height>
<yaxismin>0.0000</yaxismin>
<yaxismax>100.0000</yaxismax>
<show_work_period>1</show_work_period>
<show_triggers>1</show_triggers>
<type>1</type>
<show_legend>1</show_legend>
<show_3d>0</show_3d>
<percent_left>0.0000</percent_left>
<percent_right>0.0000</percent_right>
<ymin_type_1>0</ymin_type_1>
<ymax_type_1>0</ymax_type_1>
<ymin_item_1>0</ymin_item_1>
<ymax_item_1>0</ymax_item_1>
<graph_items>
<graph_item>
<sortorder>0</sortorder>
<drawtype>0</drawtype>
<color>1A7C11</color>
<yaxisside>0</yaxisside>
<calc_fnc>2</calc_fnc>
<type>0</type>
<item>
<host>Service - DB Backup</host>
<key>dbbackup.size</key>
</item>
</graph_item>
</graph_items>
</graph>
</graphs>
</zabbix_export>

View File

@@ -1,515 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<zabbix_export>
<version>3.4</version>
<date>2018-02-02T19:04:27Z</date>
<groups>
<group>
<name>Discovered Containers</name>
</group>
<group>
<name>Templates</name>
</group>
</groups>
<templates>
<template>
<template>Service - ICMP</template>
<name>Service - ICMP (Ping)</name>
<description/>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<applications>
<application>
<name>ICMP</name>
</application>
</applications>
<items>
<item>
<name>ICMP ping</name>
<type>3</type>
<snmp_community/>
<snmp_oid/>
<key>icmpping</key>
<delay>1m</delay>
<history>1w</history>
<trends>365d</trends>
<status>0</status>
<value_type>3</value_type>
<allowed_hosts/>
<units/>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>ICMP</name>
</application>
</applications>
<valuemap>
<name>Service state</name>
</valuemap>
<logtimefmt/>
<preprocessing/>
<jmx_endpoint/>
<master_item/>
</item>
<item>
<name>ICMP loss</name>
<type>3</type>
<snmp_community/>
<snmp_oid/>
<key>icmppingloss</key>
<delay>1m</delay>
<history>1w</history>
<trends>365d</trends>
<status>0</status>
<value_type>0</value_type>
<allowed_hosts/>
<units>%</units>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>ICMP</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing/>
<jmx_endpoint/>
<master_item/>
</item>
<item>
<name>ICMP response time</name>
<type>3</type>
<snmp_community/>
<snmp_oid/>
<key>icmppingsec</key>
<delay>1m</delay>
<history>1w</history>
<trends>365d</trends>
<status>0</status>
<value_type>0</value_type>
<allowed_hosts/>
<units>s</units>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>ICMP</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing/>
<jmx_endpoint/>
<master_item/>
</item>
</items>
<discovery_rules/>
<httptests/>
<macros/>
<templates/>
<screens/>
</template>
<template>
<template>Zabbix - Container Agent</template>
<name>Zabbix - Container Agent</name>
<description/>
<groups>
<group>
<name>Discovered Containers</name>
</group>
<group>
<name>Templates</name>
</group>
</groups>
<applications>
<application>
<name>Packages</name>
</application>
<application>
<name>Zabbix agent</name>
</application>
</applications>
<items>
<item>
<name>Hostname of Container</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>agent.hostname</key>
<delay>1h</delay>
<history>1w</history>
<trends>0</trends>
<status>0</status>
<value_type>1</value_type>
<allowed_hosts/>
<units/>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>3</inventory_link>
<applications>
<application>
<name>Zabbix agent</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing/>
<jmx_endpoint/>
<master_item/>
</item>
<item>
<name>Contaner OS</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>agent.os</key>
<delay>6h</delay>
<history>30d</history>
<trends>0</trends>
<status>0</status>
<value_type>1</value_type>
<allowed_hosts/>
<units/>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>5</inventory_link>
<applications>
<application>
<name>Zabbix agent</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing/>
<jmx_endpoint/>
<master_item/>
</item>
<item>
<name>Zabbix Agent ping</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>agent.ping</key>
<delay>1m</delay>
<history>1w</history>
<trends>365d</trends>
<status>0</status>
<value_type>3</value_type>
<allowed_hosts/>
<units/>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description>The agent always returns 1 for this item. It could be used in combination with nodata() for availability check.</description>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>Zabbix agent</name>
</application>
</applications>
<valuemap>
<name>Zabbix agent ping status</name>
</valuemap>
<logtimefmt/>
<preprocessing/>
<jmx_endpoint/>
<master_item/>
</item>
<item>
<name>Zabbix Agent Version</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>agent.version</key>
<delay>1h</delay>
<history>1w</history>
<trends>0</trends>
<status>0</status>
<value_type>1</value_type>
<allowed_hosts/>
<units/>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>Zabbix agent</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing/>
<jmx_endpoint/>
<master_item/>
</item>
<item>
<name>Upgradable Packages</name>
<type>0</type>
<snmp_community/>
<snmp_oid/>
<key>packages.upgradable</key>
<delay>6h</delay>
<history>90d</history>
<trends>365d</trends>
<status>0</status>
<value_type>3</value_type>
<allowed_hosts/>
<units/>
<snmpv3_contextname/>
<snmpv3_securityname/>
<snmpv3_securitylevel>0</snmpv3_securitylevel>
<snmpv3_authprotocol>0</snmpv3_authprotocol>
<snmpv3_authpassphrase/>
<snmpv3_privprotocol>0</snmpv3_privprotocol>
<snmpv3_privpassphrase/>
<params/>
<ipmi_sensor/>
<authtype>0</authtype>
<username/>
<password/>
<publickey/>
<privatekey/>
<port/>
<description/>
<inventory_link>0</inventory_link>
<applications>
<application>
<name>Packages</name>
</application>
</applications>
<valuemap/>
<logtimefmt/>
<preprocessing/>
<jmx_endpoint/>
<master_item/>
</item>
</items>
<discovery_rules/>
<httptests/>
<macros/>
<templates/>
<screens/>
</template>
</templates>
<triggers>
<trigger>
<expression>{Service - ICMP:icmpping.max(3m)}=3</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>Cannot be pinged</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>5</priority>
<description/>
<type>0</type>
<manual_close>0</manual_close>
<dependencies/>
<tags/>
</trigger>
<trigger>
<expression>{Service - ICMP:icmppingloss.min(10m)}&gt;50</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>Ping loss is too high</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>4</priority>
<description/>
<type>0</type>
<manual_close>0</manual_close>
<dependencies>
<dependency>
<name>Cannot be pinged</name>
<expression>{Service - ICMP:icmpping.max(3m)}=3</expression>
<recovery_expression/>
</dependency>
</dependencies>
<tags/>
</trigger>
<trigger>
<expression>{Service - ICMP:icmppingsec.avg(2m)}&gt;100</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>Ping Response time is too high</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>4</priority>
<description/>
<type>1</type>
<manual_close>0</manual_close>
<dependencies>
<dependency>
<name>Cannot be pinged</name>
<expression>{Service - ICMP:icmpping.max(3m)}=3</expression>
<recovery_expression/>
</dependency>
</dependencies>
<tags/>
</trigger>
<trigger>
<expression>{Zabbix - Container Agent:packages.upgradable.last()}&gt;0</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>Upgraded Packages in Container Available</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>1</priority>
<description/>
<type>0</type>
<manual_close>0</manual_close>
<dependencies/>
<tags/>
</trigger>
<trigger>
<expression>{Zabbix - Container Agent:agent.ping.nodata(3m)}=1</expression>
<recovery_mode>0</recovery_mode>
<recovery_expression/>
<name>Zabbix agent is unreachable</name>
<correlation_mode>0</correlation_mode>
<correlation_tag/>
<url/>
<status>0</status>
<priority>5</priority>
<description/>
<type>0</type>
<manual_close>0</manual_close>
<dependencies/>
<tags/>
</trigger>
</triggers>
<value_maps>
<value_map>
<name>Service state</name>
<mappings>
<mapping>
<value>0</value>
<newvalue>Down</newvalue>
</mapping>
<mapping>
<value>1</value>
<newvalue>Up</newvalue>
</mapping>
</mappings>
</value_map>
<value_map>
<name>Zabbix agent ping status</name>
<mappings>
<mapping>
<value>1</value>
<newvalue>Up</newvalue>
</mapping>
</mappings>
</value_map>
</value_maps>
</zabbix_export>