Compare commits

...

147 Commits

Author SHA1 Message Date
dave@tiredofit.ca
663667dbff Release 3.9.12 - See CHANGELOG.md 2023-09-04 08:32:05 -07:00
dave@tiredofit.ca
36506091be Release 3.9.11 - See CHANGELOG.md 2023-08-24 18:12:36 -07:00
dave@tiredofit.ca
bf646381cb Release 3.9.10 - See CHANGELOG.md 2023-08-23 15:31:41 -07:00
dave@tiredofit.ca
fb3b65b33a Release 3.9.9 - See CHANGELOG.md 2023-08-21 15:38:51 -07:00
dave@tiredofit.ca
6d1ef87042 Release 3.9.8 - See CHANGELOG.md 2023-08-20 08:18:04 -07:00
Dave Conroy
c985cc8a4f Merge pull request #240 from ToshY/issue/239-armv7
Add cargo to build dependencies
2023-08-14 19:15:21 -07:00
ToshY
2265a6acf5 Add cargo to build dependencies 2023-08-05 14:39:13 +02:00
dave@tiredofit.ca
10e7debc65 Release 3.9.7 - See CHANGELOG.md 2023-07-18 07:26:59 -07:00
dave@tiredofit.ca
94e9881b7b Release 3.9.6 - See CHANGELOG.md 2023-06-16 09:50:16 -07:00
dave@tiredofit.ca
425383639a Release 3.9.5 - See CHANGELOG.md 2023-06-13 16:24:17 -07:00
dave@tiredofit.ca
1e46996812 Release 3.9.4 - See CHANGELOG.md 2023-06-13 10:16:04 -07:00
Dave Conroy
e71334564f Drop auto builds for armv7 2023-06-11 11:42:13 -07:00
dave@tiredofit.ca
f83f97bf76 Release 3.9.3 - See CHANGELOG.md 2023-06-05 10:24:46 -07:00
Dave Conroy
94a8e45af6 Merge pull request #226 from vanzhiganov/patch-1
Update README.md
2023-06-05 10:13:57 -07:00
Dave Conroy
9d90e37339 Merge pull request #225 from alwynpan/bugfix/#199
#199 Report error when move the backup file to S3 or Blob fails
2023-06-05 10:13:44 -07:00
Vyacheslav Anzhiganov
085b7cd6ce Update README.md 2023-06-03 16:27:17 +03:00
Yao (Alwyn) Pan
12484bb3f3 feat: Add zip package to the image 2023-06-01 16:54:26 +10:00
Yao (Alwyn) Pan
8fc2721dd4 fix: #199 report error when move the backup file to S3 or Blob fails 2023-06-01 16:46:13 +10:00
dave@tiredofit.ca
68174c061f Release 3.9.2 - See CHANGELOG.md 2023-05-10 08:19:01 -07:00
dave@tiredofit.ca
fd1d95090e Release 3.9.1 - See CHANGELOG.md 2023-05-03 12:13:29 -07:00
Dave Conroy
7befba0d96 Update README.md 2023-04-27 08:18:26 -07:00
Dave Conroy
583253fce7 Update README.md 2023-04-26 14:43:29 -07:00
dave@tiredofit.ca
068577001e Release 3.9.0 - See CHANGELOG.md 2023-04-26 14:32:36 -07:00
Dave Conroy
7781542816 Expand on amount of variables that can use 2023-04-24 14:54:47 -07:00
dave@tiredofit.ca
9283b5440e Release 3.8.5 - See CHANGELOG.md 2023-04-11 15:42:30 -07:00
Dave Conroy
5e62485e7f Merge pull request #216 from tpansino/bug/215
Set ltargets properly
2023-04-11 15:36:26 -07:00
Tom Pansino
f224571448 Set ltargets properly 2023-04-11 15:31:02 -07:00
dave@tiredofit.ca
01620fec00 Release 3.8.4 - See CHANGELOG.md 2023-04-06 12:14:22 -07:00
dave@tiredofit.ca
18a38b4f1d Release 3.8.3 - See CHANGELOG.md 2023-03-30 14:18:55 -07:00
dave@tiredofit.ca
150f356275 Release 3.8.2 - See CHANGELOG.md 2023-03-30 14:05:17 -07:00
dave@tiredofit.ca
e838ed0027 Release 3.8.1 - See CHANGELOG.md 2023-03-30 11:04:35 -07:00
Dave Conroy
8329b4c065 Add defaults 2023-03-27 16:41:31 -07:00
dave@tiredofit.ca
dab1ac301a Release 3.8.0 - See CHANGELOG.md 2023-03-27 15:01:10 -07:00
dave@tiredofit.ca
077201cd18 Release 3.7.7 - See CHANGELOG.md 2023-03-20 16:24:23 -07:00
Dave Conroy
eeaf59dc6f Merge pull request #210 from codemonium/simplify-pg_isready
Simplify pg_isready usage
2023-03-20 16:22:13 -07:00
Igor Artemenko
88fe0d6411 Simplify pg_isready usage
The pg_isready documentation says that it does not need a correct
database name or username to get the server status. In fact, incorrect
values result in the server logging failed connection attempts. As a
result, when we set DB_NAME to ALL, calls to the check_availability
function (which uses pg_isready) cause the server to log the following
error:

    FATAL:  database "ALL" does not exist

To eliminate this error, this change simplifies the pg_isready call.
2023-03-20 22:51:05 +00:00
dave@tiredofit.ca
366c4759a5 Release 3.7.6 - See CHANGELOG.md 2023-03-14 16:10:11 -07:00
Dave Conroy
37f255ec99 Merge pull request #207 from kamartem/patch-1
Typo correction
2023-03-14 16:09:08 -07:00
Dave Conroy
efa9a678c0 Merge pull request #209 from ToshY/bug/208-mysql-extra-opts-status-check
Removed EXTRA_OPTS in MySQL status check
2023-03-14 16:08:53 -07:00
ToshY
68747a4aff Removed EXTRA_OPTS in MySQL status check 2023-03-14 20:28:05 +01:00
Artem Kamyshansky
cf736278bb Typo correction 2023-03-12 19:31:04 +03:00
dave@tiredofit.ca
1659e34fc7 Release 3.7.5 - See CHANGELOG.md 2023-03-02 07:39:58 -08:00
dave@tiredofit.ca
a8df7a2c75 Release 3.7.4 - See CHANGELOG.md 2023-02-22 08:36:46 -08:00
Dave Conroy
b5194dcce9 Merge pull request #203 from gbe0/issue/201
Fix issue #201 - 99-run_forever exec format error
2023-02-22 08:35:55 -08:00
Chris
6fb947684a fix issue #201 - 99-run_forever exec format error 2023-02-23 00:26:54 +08:00
Dave Conroy
9287f4efeb Update README.md 2023-01-30 11:47:15 -08:00
Dave Conroy
eeb5b5a119 Update README.md 2023-01-30 09:58:44 -08:00
Dave Conroy
a83dfd1a0b Update Workflows 2023-01-29 18:13:20 -08:00
Dave Conroy
8fb379b51a Update workflows 2023-01-29 16:04:15 -08:00
dave@tiredofit.ca
a90e52091d Release 3.7.3 - See CHANGELOG.md 2022-12-20 06:47:24 -08:00
Dave Conroy
ac58b5cdf6 Merge branch 'main' of https://github.com/tiredofit/docker-db-backup 2022-12-20 06:46:46 -08:00
Dave Conroy
fcbe771793 Merge pull request #194 from alwynpan/feature/#193
#193 Make S3_KEY_ID and S3_KEY_SECRET optional for S3 Backup
2022-12-20 06:46:40 -08:00
Yao (Alwyn) Pan
168982ab53 Make S3_KEY_ID and S3_KEY_SECRET optional for S3 Backup 2022-12-20 17:51:56 +11:00
Dave Conroy
e377fcb6ae Fix spelling mistake in Archive notice 2022-12-19 12:35:33 -08:00
dave@tiredofit.ca
50f27233a9 Release 3.7.2 - See CHANGELOG.md 2022-12-19 12:33:07 -08:00
dave@tiredofit.ca
7ccbf23af6 Release 3.7.1 - See CHANGELOG.md 2022-12-19 08:21:35 -08:00
dave@tiredofit.ca
0921971aa3 Release 3.7.0 - See CHANGELOG.md 2022-12-16 14:02:35 -08:00
dave@tiredofit.ca
fd3b9c5fa0 Release 3.6.1 - See CHANGELOG.md 2022-11-23 07:44:28 -08:00
dave@tiredofit.ca
89b6176188 Release 3.6.0 - See CHANGELOG.md 2022-11-21 12:32:33 -08:00
dave@tiredofit.ca
22e126200e Release 3.5.6 - See CHANGELOG.md 2022-11-15 13:07:21 -08:00
dave@tiredofit.ca
3e79ca68a0 Release 3.5.5 - See CHANGELOG.md 2022-10-18 07:52:34 -07:00
Dave Conroy
bfeb07d7c0 Merge pull request #179 from greena13/bugfix/s3_backup_prefixes
Bugfix: Generating S3 prefix to store new backups
2022-10-18 07:51:50 -07:00
Aleck Greenham
8a5d647de7 Merge branch 'master' of github.com:tiredofit/docker-db-backup into bugfix/s3_backup_prefixes
# Conflicts:
#	install/assets/functions/10-db-backup
2022-10-18 07:40:33 +01:00
Aleck Greenham
4f5c04acac Bugfix: Generating S3 prefix to store new backups 2022-10-17 18:17:48 +01:00
dave@tiredofit.ca
494f742cb0 Release 3.5.4 - See CHANGELOG.md 2022-10-13 13:59:22 -07:00
dave@tiredofit.ca
e7b9a36745 Release 3.5.3 - See CHANGELOG.md 2022-10-12 08:42:14 -07:00
dave@tiredofit.ca
28c7058f37 Release 3.5.2 - See CHANGELOG.md 2022-10-11 08:48:05 -07:00
Dave Conroy
6f15c77a0f Merge pull request #174 from jauderho/patch-1
Update Dockerfile to use influxdb client 2.4.0
2022-10-11 08:45:47 -07:00
dave@tiredofit.ca
4e04e31d84 Release 3.5.1 - See CHANGELOG.md 2022-10-11 08:10:42 -07:00
Jauder Ho
f3fad4a893 Update Dockerfile
Update influxdb client to 2.4.0.
2022-10-10 16:44:40 -07:00
dave@tiredofit.ca
69f0ca762c Release 3.5.0 - See CHANGELOG.md 2022-10-10 12:06:38 -07:00
Dave Conroy
07d72163a0 Merge pull request #173 from tiredofit/develop 2022-10-10 12:01:34 -07:00
Dave Conroy
b31da0b785 Merge branch 'master' into develop 2022-10-10 12:01:06 -07:00
Dave Conroy
be490b3f4b Remove url [en|de]code functions 2022-10-10 11:53:09 -07:00
Dave Conroy
028966d0b2 Merge pull request #171 from greena13/bugfix/s3_backup_cleanup
Bugfix: S3 database cleanups
2022-10-09 08:33:45 -07:00
Dave Conroy
19d8e98744 Merge pull request #169 from eoehen/feature/Add-documentation-for-blobxfer
Add blobxfer documentation in readme file.
2022-10-09 08:32:59 -07:00
Aleck Greenham
25def5b6f0 Bugfix: S3 database cleanup path 2022-10-09 13:59:48 +01:00
Aleck Greenham
03b7ef9d0d Bugfix: S3 database cleanups 2022-10-09 12:44:22 +01:00
Dave Conroy
231dd63a38 Remove quote 2022-10-08 11:57:40 -07:00
Elias Oehen
666eb81846 Add blobxfer documentation in readme file. 2022-10-08 16:57:00 +02:00
Dave Conroy
4572ab6fca Add Azure wording to README.md 2022-10-08 07:44:05 -07:00
Dave Conroy
9ba51bcec9 Add py3-cryptography module to allow armv7 builds to finish 2022-10-08 07:19:49 -07:00
Dave Conroy
b9edbf68d3 Merge branch 'master' of https://github.com/tiredofit/docker-db-backup 2022-10-08 06:54:00 -07:00
Dave Conroy
2022158a4e Swap around some environment checks 2022-10-07 08:33:19 -07:00
Dave Conroy
2b441f11e1 Restore old functionality for Mongo backup when not using MONGO_CUSTOM_URI 2022-10-07 08:26:54 -07:00
Dave Conroy
532a6b456b Merge pull request #166 from eoehen/feature/Add-support-for-blobxfer
Add support for blobxfer
2022-10-07 08:05:50 -07:00
Dave Conroy
a8d9a0185f Merge pull request #165 from eoehen/feature/Improve-mssql-support
Improve MSSQL Support
2022-10-07 08:04:59 -07:00
Dave Conroy
8b41f5efcf Allow EXTRA_OPTS with MONGO_CUSTOM_URI 2022-10-07 08:03:24 -07:00
Elias Oehen
956904046d Add support for blobxfer 2022-10-04 19:04:43 +02:00
Elias Oehen
4d7f5e9459 Add mssql docker-compose example 2022-10-04 18:37:34 +02:00
Elias Oehen
d9723823c9 Improve mssql server support 2022-10-04 18:22:20 +02:00
Elias Oehen
0067f552f1 Move mysql example to mysql folder 2022-10-04 18:21:21 +02:00
Dave Conroy
4a8f85ddf5 Add EXTRA_OPTS to Mongo string 2022-09-29 19:35:01 -07:00
Dave Conroy
6de0cc7c03 Wrap mongo_generated_uri in quotes 2022-09-29 16:06:06 -07:00
Dave Conroy
2d017e26c5 Cleanup README 2022-09-25 10:00:33 -07:00
Dave Conroy
1efb2d43a8 Set proper environment variable 2022-09-21 10:51:25 -07:00
Dave Conroy
789aa96113 Alternate way of solving Host check 2022-09-21 09:28:01 -07:00
Dave Conroy
9d8cfd69cb Fix a couple variable issues and add silencing 2022-09-20 17:45:05 -07:00
Dave Conroy
c4dbf53ced Final Availability check 2022-09-20 13:51:54 -07:00
Dave Conroy
8706d3a91c Skip availability checks by default 2022-09-20 12:50:00 -07:00
Dave Conroy
73ad356ebf Force lowercase for filenames and hostnames for filename generation 2022-09-20 12:08:53 -07:00
Dave Conroy
73c4003dc4 Fix environment variable check for Mongo 2022-09-20 12:06:13 -07:00
Dave Conroy
a377f570f1 Introduce MONGO_CUSTOM_URI support 2022-09-20 09:41:44 -07:00
dave@tiredofit.ca
b956bd817f Release 3.4.2 - See CHANGELOG.md 2022-09-19 08:00:15 -07:00
Dave Conroy
7bda69b062 Merge pull request #158 from mark-monteiro/patch-1
Improve release notes for 3.4.1
2022-09-14 09:17:51 -07:00
Mark Monteiro
bc23b6a65e Improve release notes for 3.4.1
Add clear migration instructions for custom script paths
2022-09-14 11:57:10 -04:00
dave@tiredofit.ca
8fb3d8315f Release 3.4.1 - See CHANGELOG.md 2022-09-13 08:24:30 -07:00
Dave Conroy
c16133fdd0 Merge pull request #157 from ToshY/fix/readme-missing-db-dump-target
updated README with missing DB_DUMP_TARGET environment variable
2022-09-12 13:09:37 -07:00
ToshY
6967fd5e56 updated README with missing DB_DUMP_TARGET environment variable 2022-09-12 21:51:14 +02:00
Dave Conroy
75acaefb64 Merge pull request #155 from ToshY/fix/readme-custom-scripts-directory
updated README with correct custom scripts directory
2022-09-12 12:24:02 -07:00
ToshY
6933b0f87c updated README with correct custom scripts directory 2022-09-12 21:17:38 +02:00
dave@tiredofit.ca
dc4ab0bfc5 Release 3.4.0 - See CHANGELOG.md 2022-09-12 07:50:55 -07:00
Dave Conroy
9ea34f5a44 Add MongoDB Atlas Support 2022-09-12 07:49:29 -07:00
Dave Conroy
1d53785e7d Fix default port for Influx 2 DBs 2022-09-12 07:19:18 -07:00
Dave Conroy
4e0878b2ad Merge pull request #150 from teun95/teun95-add-rsync
Add --rsyncable for gzip compression
2022-09-12 07:17:41 -07:00
teun95
a98d33bfdb Correct table formatting in README.md 2022-09-08 11:04:11 +00:00
teun95
00c851eda2 Update README.md to include GZ_RSYNCABLE 2022-09-07 19:54:41 +01:00
teun95
cd88285036 Added rsyncable option for gzip using GZ_RSYNCABLE
GZ_RSYNCABLE=TRUE enables --rsyncable for gzip compression. Useful to speed up backups, reduce size of incremental backups, and allow for better deduplication.
2022-09-07 19:50:50 +01:00
dave@tiredofit.ca
428c313c7b Release 3.3.12 - See CHANGELOG.md 2022-08-15 12:19:55 -07:00
Dave Conroy
210acb1e2a Merge pull request #143 from arifer612/patch-1
Fix incorrect case for filesize variable
2022-08-15 12:19:12 -07:00
Arif Er
e50a8cb0ec fix: correct case for filesize variable
Post script support expects a value from a declared variable `$FILESIZE` to provide the size of the backup files. Such a variable does not exist, leading to a situation where using `"${9}"` in a custom script furnishes the checksum hash. However, earlier up in the script the file size of the backup is indeed assigned to a variable, only that is it completely in lower case: `$filesize`. This commit aims to fix that inconsistency.
2022-08-15 21:51:40 +08:00
Dave Conroy
7453852046 Release 3.3.11 - See CHANGELOG.md 2022-07-22 12:00:05 -07:00
Dave Conroy
f115a89a3c Merge pull request #141 from khoazero123/fix_postgres_restore
Fix postgres restore wrong db type
2022-07-22 11:58:57 -07:00
KhoaZero123
8b8d243944 Fix postgres restore wrong db type 2022-07-22 09:41:55 +07:00
dave@tiredofit.ca
be34ceb6ff Release 3.3.10 - See CHANGELOG.md 2022-07-19 12:16:29 -07:00
Dave Conroy
82d6ce444b Merge branch 'master' of https://github.com/tiredofit/docker-db-backup 2022-07-11 09:26:38 -07:00
Dave Conroy
382a188b77 Release 3.3.9 - See CHANGELOG.md 2022-07-11 09:26:35 -07:00
Dave Conroy
f458c34546 Merge pull request #140 from fdarveau/fix-read-port-number-ineractive-restore
Fix reading port number in interactive restore
2022-07-11 09:25:41 -07:00
François Darveau
229db5cd03 Fix reading port number in interactive restore 2022-07-10 16:44:05 -04:00
dave@tiredofit.ca
8bb926fd20 Release 3.3.8 - See CHANGELOG.md 2022-07-09 08:59:59 -07:00
dave@tiredofit.ca
f005956c47 Release 3.3.7 - See CHANGELOG.md 2022-06-23 11:49:28 -07:00
dave@tiredofit.ca
ba20386e65 Release 3.3.6 - See CHANGELOG.md 2022-06-23 08:18:08 -07:00
dave@tiredofit.ca
12211d3b67 Release 3.3.5 - See CHANGELOG.md 2022-06-08 09:01:44 -07:00
Dave Conroy
83693d35b2 Release 3.3.4 - See CHANGELOG.md 2022-06-03 05:10:53 -07:00
Dave Conroy
52b726c821 Merge pull request #132 from rozdzynski/master
Unary operator fix
2022-06-03 05:09:27 -07:00
rozdzynski
5c43b3c907 unary operator fix 2022-06-03 14:02:42 +02:00
dave@tiredofit.ca
005e7f6e47 Release 3.3.3 - See CHANGELOG.md 2022-05-24 08:26:45 -07:00
Dave Conroy
7d7cb9587d Release 3.3.2 - See CHANGELOG.md 2022-05-02 22:28:08 -07:00
Dave Conroy
d1713fe3f0 Release 3.3.1 - See CHANGELOG.md 2022-04-30 22:31:34 -07:00
Dave Conroy
d1e98d9c4b Release 3.3.0 - See CHANGELOG.md 2022-04-30 03:23:45 -07:00
Dave Conroy
0920b671cb Release 3.2.6 - See CHANGELOG.md 2022-04-25 10:29:45 -07:00
Dave Conroy
28ed6c3bb8 Release 3.2.5 - See CHANGELOG.md 2022-04-23 14:11:29 -07:00
Dave Conroy
c1bdf26598 Release 3.2.4 - See CHANGELOG.md 2022-04-21 16:04:43 -07:00
Dave Conroy
5a4cac2cee Release 3.2.3 - See CHANGELOG.md 2022-04-21 15:46:27 -07:00
Dave Conroy
c04eec7661 Add space after compress_cmd 2022-04-21 14:19:09 -07:00
Dave Conroy
32f1959a07 Merge pull request #120 from joergmschulz/patch-1
small typo / exiting instead of exitting
2022-04-21 14:18:43 -07:00
joergmschulz
d384d5a529 small typo / exiting instead of exitting 2022-04-21 23:16:02 +02:00
18 changed files with 1221 additions and 568 deletions

1
.dockerignore Normal file
View File

@@ -0,0 +1 @@
examples/

2
.gitattributes vendored Normal file
View File

@@ -0,0 +1,2 @@
# Declare files that will always have LF line endings on checkout.
*.* text eol=lf

7
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,7 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@@ -1,112 +1,14 @@
### Application Level Image CI
### Dave Conroy <dave at tiredofit dot ca>
name: 'build'
name: "build_image"
on:
push:
paths:
- '**'
- '!README.md'
- "**"
- "!README.md"
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
set -x
if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"
else
BRANCH=${GITHUB_REF#refs/heads/}
fi
case ${BRANCH} in
"main" | "master" )
BRANCHTAG="${DOCKER_IMAGE}:latest"
;;
"develop" )
BRANCHTAG="${DOCKER_IMAGE}:develop"
;;
* )
if [ -n "${{ secrets.LATEST }}" ] ; then
if [ "${BRANCHTAG}" = "${{ secrets.LATEST }}" ]; then
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest,${DOCKER_IMAGE}:latest"
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
;;
esac
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
GITTAG="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed 's|refs/tags/||g')"
fi
if [ -n "${BRANCHTAG}" ] && [ -n "${GITTAG}" ]; then
TAGS=${BRANCHTAG},${GITTAG}
else
TAGS="${BRANCHTAG}${GITTAG}"
fi
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Label
id: Label
run: |
if [ -f "Dockerfile" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_repository=\"https://github.com/${GITHUB_REPOSITORY}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_commit=\"${GITHUB_SHA}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
if [ -f "CHANGELOG.md" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.db-backup.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
mkdir -p install/assets/.changelogs ; cp CHANGELOG.md install/assets/.changelogs/${GITHUB_REPOSITORY/\//_}.md
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
fi
if [[ $GITHUB_REF == refs/heads/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_branch=\"${GITHUB_REF#refs/heads/}\"" Dockerfile
fi
fi
- name: Build
uses: docker/build-push-action@v2
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm/v7,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}
build:
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
secrets: inherit

View File

@@ -1,6 +1,4 @@
# Manual Workflow (Application)
name: manual
name: "manual_build_image"
on:
workflow_dispatch:
@@ -8,105 +6,10 @@ on:
Manual Build:
description: 'Manual Build'
required: false
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
set -x
if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"
else
BRANCH=${GITHUB_REF#refs/heads/}
fi
case ${BRANCH} in
"main" | "master" )
BRANCHTAG="${DOCKER_IMAGE}:latest"
;;
"develop" )
BRANCHTAG="${DOCKER_IMAGE}:develop"
;;
* )
if [ -n "${{ secrets.LATEST }}" ] ; then
if [ "${BRANCHTAG}" = "${{ secrets.LATEST }}" ]; then
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest,${DOCKER_IMAGE}:latest"
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
;;
esac
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
GITTAG="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed 's|refs/tags/||g')"
fi
if [ -n "${BRANCHTAG}" ] && [ -n "${GITTAG}" ]; then
TAGS=${BRANCHTAG},${GITTAG}
else
TAGS="${BRANCHTAG}${GITTAG}"
fi
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Label
id: Label
run: |
if [ -f "Dockerfile" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_repository=\"https://github.com/${GITHUB_REPOSITORY}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_commit=\"${GITHUB_SHA}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
if [ -f "CHANGELOG.md" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.db-backup.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
mkdir -p install/assets/.changelogs ; cp CHANGELOG.md install/assets/.changelogs/${GITHUB_REPOSITORY/\//_}.md
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
fi
if [[ $GITHUB_REF == refs/heads/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_branch=\"${GITHUB_REF#refs/heads/}\"" Dockerfile
fi
fi
- name: Build
uses: docker/build-push-action@v2
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm/v7,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}
build:
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
secrets: inherit

View File

@@ -1,3 +1,373 @@
## 3.9.12 2023-09-04 <dave at tiredofit dot ca>
### Changed
- Perform additional checks for ENABLE_CHECKSUM=FALSE and skip executing actions for S3/BlobXfer
## 3.9.11 2023-08-24 <dave at tiredofit dot ca>
### Changed
- AWS CLI 2.13.9
## 3.9.10 2023-08-23 <dave at tiredofit dot ca>
### Changed
- Stop trying to move a non existent checksum file when ENABLE_CHECKSUM=FALSE
## 3.9.9 2023-08-21 <dave at tiredofit dot ca>
### Changed
- Start compiling aws-cli instead of from packages to continue to support arm/v7
## 3.9.8 2023-08-20 <ToshY@github>
### Changed
- Restore armv7 and aarch64 builds
## 3.9.7 2023-07-18 <dave at tiredofit dot ca>
### Changed
- Cleanup check_exit_code parameter and reduce duplicate output
## 3.9.6 2023-06-16 <dave at tiredofit dot ca>
### Changed
- Resolve issues introduced with 3.9.3. Split exit codes to be specific for backing up and moving. Uses paremter $11 for post backup scripts
## 3.9.5 2023-06-13 <dave at tiredofit dot ca>
### Changed
- Start building Influx DB v1 manually due to being removed from Alpine repositories
## 3.9.4 2023-06-13 <dave at tiredofit dot ca>
### Added
- Add abliity to use --rsyncable argument to zstd archives
## 3.9.3 2023-06-05 <dave at tiredofit dot ca>
### Added
- Add notification if blobxfer/s3 upload fails (credit @alwynpan)
- Add zip package
## 3.9.2 2023-05-10 <dave at tiredofit dot ca>
### Changed
- Alpine 3.18 base
## 3.9.1 2023-05-03 <dave at tiredofit dot ca>
### Changed
- Properly allow multiple _FILE environment variables to execute solving an issue with MySQL backups
- Fix _FILE functionality for DB_NAME variable
## 3.9.0 2023-04-26 <dave at tiredofit dot ca>
### Added
- Add support for _FILE environment variables
## 3.8.5 2023-04-11 <tpansino@github>
### Changed
- Fix SQLite3, Influx, and MSSQL backups failing due to malformed/non existent ltarget
## 3.8.4 2023-04-06 <dave at tiredofit dot ca>
### Changed
- Fix issue with Influx2 and MSSQL clients not installing properly
## 3.8.3 2023-03-30 <dave at tiredofit dot ca>
### Changed
- Patchup for 3.8.2
## 3.8.2 2023-03-30 <dave at tiredofit dot ca>
### Changed
- Patchup for 3.8.1
## 3.8.1 2023-03-30 <dave at tiredofit dot ca>
### Changed
- Cleanup Dockerfile
- Fix issue with DB_ARCHIVE_TIME not firing correctly
## 3.8.0 2023-03-27 <dave at tiredofit dot ca>
### Added
- Introduce DB_DUMP_TARGET_ARCHIVE which works with DB_ARCHIVE_TIME to move backups older than (x) minutes from DB_DUMP_TARGET to DB_DUMP_TARGET_ARCHIVE for use with external backup systems and custom exclude rules
- Introduce CREATE_LATEST_SYMLINK which creates a symbolic link in DB_DUMP_TARGET of `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)`
## 3.7.7 2023-03-20 <codemonium@github>
### Changed
- Simplify pg_isready usage
## 3.7.6 2023-03-14 <toshy@github>
### Changed
- Remove EXTRA_OPT variable from MySQL/MariaDB check
## 3.7.5 2023-03-02 <dave at tiredofit dot ca>
### Added
- Add support for Docker Swarm mode Secrets for BLOBXFER_STORAGE_ACCOUNT_*_FILE
## 3.7.4 2023-02-22 <gbe0@github>
### Changed
- Fix when running in MANUAL_RUN_FOREVER mode looping
## 3.7.3 2022-12-20 <dave at tiredofit dot ca>
### Changed
- Make S3_KEY_ID and S3_KEY_SECRET optional should IAM roles be used (Credit to alwynpan@github)
## 3.7.2 2022-12-19 <dave at tiredofit dot ca>
### Changed
- Bugfix for 3.7.1
## 3.7.1 2022-12-19 <dave at tiredofit dot ca>
### Changed
- Add MYSQL_ENABLE_TLS environment variable to switch on and off
### Reverted
- Set default for MYSQL_TLS_CA_FILE to accomodate for most use cases
## 3.7.0 2022-12-16 <dave at tiredofit dot ca>
### Added
- Introduce support for connecting via TLS to MySQL / MariaDB Hosts with MYSQL_TLS_* variables - See README for more details
### Changed
- Fix for cleaning up filesystems that are syncing to Azure via blobxfer
## 3.6.1 2022-11-23 <dave at tiredofit dot ca>
### Added
- Switch to Alpine 3.17 base
- Switch to OpenSSL instead of LibreSSL
## 3.6.0 2022-11-21 <dave at tiredofit dot ca>
### Added
- Postgresql 15 Support
## 3.5.6 2022-11-15 <dave at tiredofit dot ca>
### Changed
- Add failure if DB_TYPE empty or malformed
## 3.5.5 2022-10-18 <dave at tiredofit dot ca>
### Changed
- Fix for S3 backups and trailing slashes (@greena13)
## 3.5.4 2022-10-13 <dave at tiredofit dot ca>
### Changed
- Fix for Influx DB 1 backups when compression enabled
## 3.5.3 2022-10-12 <dave at tiredofit dot ca>
### Changed
- Remove build dependencies for blobxfer making image size smaller
- Remove silencing commands limiting Postgres backups from working without DEBUG_MODE=TRUE
## 3.5.2 2022-10-11 <dave at tiredofit dot ca>
### Added
- Update Influxdb client to 2.4.0 (jauderho@github)
## 3.5.1 2022-10-11 <dave at tiredofit dot ca>
### Changed
- Tighten up cleanup routines to not call blobxfer unless absolutely necessary
## 3.5.0 2022-10-10 <dave at tiredofit dot ca>
### Added
- Blobxfer / Microsoft Azure Support (credit: eoehen@github)
- Introduce MONGO_CUSTOM_URI environment variable for those not wanting to use DB_* variables
### Changed
- Force filenames to be in lowercase
- Fix S3 Database cleanups (credit greenatwork@github)
- Remove MONGO_DB_TYPE variable as MONGO_CUSTOM_URI overrides
- Fix MSSQL Backups (credit eoehen@github)
- Seperate examples for MySQL and MSSQL
## 3.4.2 2022-09-19 <dave at tiredofit dot ca>
### Changed
- Skip availability check for Mongo Atlas connections
## 3.4.1 2022-09-13 <dave at tiredofit dot ca>
### Added
- Introduce environment variables for SCRIPT_LOCATION_POST and SCRIPT_LOCATION_PRE for better seperation
### Deprecated
- Introduce deprecation warning for the custom script paths `/assets/custom-scripts` and `/assets/custom-scripts/pre`. These
paths will continue to work for now but support may be removed in the next major version release. To support the new
default paths your scripts should be moved as follows:
|Script Type|Old Path (Deprecated)|New Environment Variable|Environment Value Default|
|-----------|--------|-------------------------|----------------|
|Pre|`/assets/custom-scripts/pre`|SCRIPT_LOCATION_PRE|`/assets/scripts/pre`|
|Post|`/assets/custom-scripts`|SCRIPT_LOCATION_POST|`/assets/scripts/post`|
## 3.4.0 2022-09-12 <dave at tiredofit dot ca>
### Added
- Add GZ_RSYNCABLE environment variable for better rsync compatibility (Credit teun95@github)
- Add Pre Backup Script Support
- Add MongoDB Atlas Support
### Changed
- Fix Default Port for Influx 2 DB Hosts
## 3.3.12 2022-08-15 <dave at tiredofit dot ca>
### Changed
- arifer612@github contributed a fix for incorrect case of "filesize" variable when using post backup scripts
## 3.3.11 2022-07-22 <khoazero123@github>
### Fixed
- Restore script not properly detecting postgres backups
## 3.3.10 2022-07-19 <dave at tiredofit dot ca>
### Changed
- Remove MSSQL install packages properly
## 3.3.9 2022-07-09 <fardeau@github>
### Fixed
- Remaining work on interactive mode for entering port on restore script
## 3.3.8 2022-07-09 <dave at tiredofit dot ca>
### Added
- MSSQL Client Tools 18.0.1.1-1
## 3.3.7 2022-06-23 <dave at tiredofit dot ca>
### Changed
- Allow overrides to actually override with the restore script
## 3.3.6 2022-06-23 <dave at tiredofit dot ca>
### Changed
- Fix for restore script when using all 7 arguments
## 3.3.5 2022-06-08 <dave at tiredofit dot ca>
### Changed
- Fix DB Port parameter not being able to be input in restore script
- Fix MongoDB restore questionnaire
## 3.3.4 2022-06-03 <rozdzynski@github>
### Fixed
- S3 backups failing with special characters in filename
## 3.3.3 2022-05-24 <dave at tiredofit dot ca>
### Added
- Alpine 3.16 base
## 3.3.2 2022-05-02 <dave at tiredofit dot ca>
### Added
- Add POST_SCRIPT_SKIP_X_VERIFY environment variables to allow for more host compatibility for post scripts
## 3.3.1 2022-04-30 <dave at tiredofit dot ca>
### Changed
- Compressing silently was causing 0 byte backups
## 3.3.0 2022-04-30 <dave at tiredofit dot ca>
### Added
- Ability to auto clean old S3 / Minio Hosts like what occurs on filesysten
- Alert user how to turn off Zabbix Monitoring if fails
- Allow Zabbix Monitoring to work with S3
- Silence some more compression statements
### Changed
- Fix for Redis not backing up properly
- Start sending checksums for S3 Outputs
- Cleanup some code functions
- FIx Container Log Level always in DEBUG
## 3.2.4 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Add -portable flag when backing up Influx
## 3.2.3 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Fix for bucket / db name InfluxDB 1.xx
- Minor aesthetics, spacing, spelling
## 3.2.2 2022-04-21 <dave at tiredofit dot ca>
### Changed

View File

@@ -1,10 +1,14 @@
FROM docker.io/tiredofit/alpine:3.15
ARG DISTRO=alpine
ARG DISTRO_VARIANT=3.18
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables
ENV INFLUX2_VERSION=2.2.1 \
MSSQL_VERSION=17.8.1.1-1 \
ENV INFLUX_VERSION=1.8.0 \
INFLUX2_VERSION=2.4.0 \
MSSQL_VERSION=18.0.1.1-1 \
AWS_CLI_VERSION=1.25.97 \
CONTAINER_ENABLE_MESSAGING=FALSE \
CONTAINER_ENABLE_MONITORING=TRUE \
CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \
@@ -12,38 +16,53 @@ ENV INFLUX2_VERSION=2.2.1 \
IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/"
### Dependencies
RUN set -ex && \
apk update && \
apk upgrade && \
apk add -t .db-backup-build-deps \
RUN source /assets/functions/00-container && \
set -ex && \
package update && \
package upgrade && \
package install .db-backup-build-deps \
build-base \
bzip2-dev \
cargo \
git \
go \
libarchive-dev \
openssl-dev \
libffi-dev \
python3-dev \
py3-pip \
xz-dev \
&& \
\
apk add --no-cache -t .db-backup-run-deps \
aws-cli \
package install .db-backup-run-deps \
bzip2 \
influxdb \
groff \
libarchive \
mariadb-client \
mariadb-connector-c \
mongodb-tools \
libressl \
openssl \
pigz \
postgresql \
postgresql-client \
postgresql15 \
postgresql15-client \
pv \
py3-botocore \
py3-colorama \
py3-cryptography \
py3-docutils \
py3-jmespath \
py3-rsa \
py3-setuptools \
py3-s3transfer \
py3-yaml \
python3 \
redis \
sqlite \
xz \
zip \
zstd \
&& \
\
cd /usr/src && \
\
apkArch="$(apk --print-arch)"; \
case "$apkArch" in \
x86_64) mssql=true ; influx2=true ; influx_arch=amd64; ;; \
@@ -51,9 +70,13 @@ RUN set -ex && \
*) sleep 0.1 ;; \
esac; \
\
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql17_${MSSQL_VERSION}_amd64.apk mssql-tools_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/msodbcsql18_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/mssql-tools18_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSSQL_VERSION}_amd64.apk mssql-tools18_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
\
clone_git_repo https://github.com/aws/aws-cli "${AWS_CLI_VERSION}" && \
python3 setup.py install --prefix=/usr && \
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX_VERSION}" && \
go build -o /usr/sbin/influxd ./cmd/influxd && \
strip /usr/sbin/influxd && \
mkdir -p /usr/src/pbzip2 && \
curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
cd /usr/src/pbzip2 && \
@@ -70,11 +93,17 @@ RUN set -ex && \
make && \
make install && \
\
### Cleanup
apk del .db-backup-build-deps && \
rm -rf /usr/src/* && \
rm -rf /etc/logrotate.d/redis && \
rm -rf /root/.cache /tmp/* /var/cache/apk/*
pip3 install blobxfer && \
\
package remove .db-backup-build-deps && \
package cleanup && \
rm -rf \
/*.apk \
/etc/logrotate.d/* \
/root/.cache \
/root/go \
/tmp/* \
/usr/src/*
### S6 Setup
ADD install /
COPY install /

218
README.md
View File

@@ -1,7 +1,7 @@
# github.com/tiredofit/docker-db-backup
[![GitHub release](https://img.shields.io/github/v/tag/tiredofit/docker-db-backup?style=flat-square)](https://github.com/tiredofit/docker-db-backup/releases/latest)
[![Build Status](https://img.shields.io/github/workflow/status/tiredofit/docker-db-backup/build?style=flat-square)](https://github.com/tiredofit/docker-db-backup/actions?query=workflow%3Abuild)
[![Build Status](https://img.shields.io/github/actions/workflow/status/tiredofit/docker-db-backup/main.yml?branch=main&style=flat-square)](https://github.com/tiredofit/docker-db-backup/actions)
[![Docker Stars](https://img.shields.io/docker/stars/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/)
[![Docker Pulls](https://img.shields.io/docker/pulls/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/)
[![Become a sponsor](https://img.shields.io/badge/sponsor-tiredofit-181717.svg?logo=github&style=flat-square)](https://github.com/sponsors/tiredofit)
@@ -14,7 +14,7 @@ This will build a container for backing up multiple types of DB Servers
Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
* dump to local filesystem or backup to S3 Compatible services
* dump to local filesystem or backup to S3 Compatible services, and Azure.
* select database user and password
* backup all databases, single, or multiple databases
* backup all to seperate files or one singular file
@@ -37,7 +37,6 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
- [About](#about)
- [Maintainer](#maintainer)
- [Table of Contents](#table-of-contents)
- [Persistent Storage](#persistent-storage)
- [Prerequisites and Assumptions](#prerequisites-and-assumptions)
- [Installation](#installation)
- [Build from Source](#build-from-source)
@@ -45,7 +44,7 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
- [Multi Architecture](#multi-architecture)
- [Configuration](#configuration)
- [Quick Start](#quick-start)
- [Persistent Storage](#persistent-storage-1)
- [Persistent Storage](#persistent-storage)
- [Environment Variables](#environment-variables)
- [Base Images used](#base-images-used)
- [Container Options](#container-options)
@@ -54,11 +53,15 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
- [Scheduling Options](#scheduling-options)
- [Backup Options](#backup-options)
- [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services)
- [Upload to a Azure storage account by `blobxfer`](#upload-to-a-azure-storage-account-by-blobxfer)
- [Maintenance](#maintenance)
- [Shell Access](#shell-access)
- [Manual Backups](#manual-backups)
- [Restoring Databases](#restoring-databases)
- [Custom Scripts](#custom-scripts)
- [Path Options](#path-options)
- [Pre Backup](#pre-backup)
- [Post backup](#post-backup)
- [Support](#support)
- [Usage](#usage)
- [Bugfixes](#bugfixes)
@@ -67,7 +70,6 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
- [License](#license)
> **NOTE**: If you are using this with a docker-compose file along with a seperate SQL container, take care not to set the variables to backup immediately, more so have it delay execution for a minute, otherwise you will get a failed first backup.
### Persistent Storage
## Prerequisites and Assumptions
* You must have a working connection to one of the supported DB Servers and appropriate credentials
@@ -78,7 +80,13 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
Clone this repository and build the image with `docker build <arguments> (imagename) .`
### Prebuilt Images
Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup) and is the recommended method of installation.
Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup)
Builds of the image are also available on the [Github Container Registry](https://github.com/tiredofit/docker-db-backup/pkgs/container/docker-db-backup)
```
docker pull ghcr.io/tiredofit/docker-db-backup:(imagetag)
```
The following image tags are available along with their tagged release based on what's written in the [Changelog](CHANGELOG.md):
@@ -87,7 +95,7 @@ The following image tags are available along with their tagged release based on
| latest | `:latest` |
```bash
docker pull tiredofit/db-backup:(imagetag)
docker pull docker.io/tiredofdit/db-backup:(imagetag)
```
#### Multi Architecture
Images are built primarily for `amd64` architecture, and may also include builds for `arm/v7`, `arm64` and others. These variants are all unsupported. Consider [sponsoring](https://github.com/sponsors/tiredofit) my work so that I can work with various hardware. To see if this image supports multiple architecures, type `docker manifest (image):(tag)`
@@ -101,19 +109,21 @@ Images are built primarily for `amd64` architecture, and may also include builds
* Set various [environment variables](#environment-variables) to understand the capabilities of this image.
* Map [persistent storage](#data-volumes) for access to configuration and data files for backup.
* Make [networking ports](#networking) available for public access if necessary
### Persistent Storage
The following directories are used for configuration and can be mapped for persistent storage.
| Directory | Description |
| ------------------------ | ---------------------------------------------------------------------------------- |
| `/backup` | Backups |
| `/assets/custom-scripts` | *Optional* Put custom scripts in this directory to execute after backup operations |
| Directory | Description |
| ---------------------- | ----------------------------------------------------------------------------------- |
| `/backup` | Backups |
| `/assets/scripts/pre` | *Optional* Put custom scripts in this directory to execute before backup operations |
| `/assets/scripts/post` | *Optional* Put custom scripts in this directory to execute after backup operations |
### Environment Variables
#### Base Images used
This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) or [Debian Linux](https://hub.docker.com/r/tiredofit/debian) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`,`vim`.
This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`.
Be sure to view the following repositories to understand all the customizable options:
@@ -123,55 +133,70 @@ Be sure to view the following repositories to understand all the customizable op
#### Container Options
| Parameter | Description | Default |
| -------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- |
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` |
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` |
| `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | |
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE`
| Parameter | Description | Default |
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- |
| `BACKUP_LOCATION` | Backup to `FILESYSTEM`, `blobxfer` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` |
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` |
| `CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
| `PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | |
| `POST_SCRIPT` | Fill this variable in with a command to execute post backing up | |
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` |
### Database Specific Options
| Parameter | Description | Default |
| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | |
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | |
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | |
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies |
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2.x` series - AMD64 and ARM64 only for `2` | |
| Parameter | Description | Default | `_FILE` |
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- | ------- |
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | x |
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | | x |
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | | x |
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | x |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | | x |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | x |
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | | |
| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | x |
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can overrde them by making your own entries | | |
#### For Influx DB2:
Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name`
### Scheduling Options
| Parameter | Description | Default |
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
| | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
| Parameter | Description | Default |
| ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- |
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
| | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `${DB_DUMP_TARGET}/archive/` |
| `DB_DUMP_TARGET_ARCHIVE` | Optional Directory where the database dumps archives are kept. | |
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
| `DB_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from `DB_DUMP_TARGET` to `DB_DUMP_TARGET_ARCHIVE` - which is useful when pairing against an external backup system. | |
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
### Backup Options
| Parameter | Description | Default |
| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | -------------- |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` |
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |
| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` |
| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` |
| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | |
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` |
| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` |
| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` |
| Parameter | Description | Default | `_FILE` |
| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- | ------- |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | |
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | |
| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` | |
| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` | |
| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` | |
| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` | |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | |
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | |
| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` | |
| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` | |
| `MYSQL_ENABLE_TLS` | Enable TLS functionality for MySQL client | `FALSE` | |
| `MYSQL_TLS_VERIFY` | (optional) If using TLS (by means of MYSQL_TLS_* variables) verify remote host | `FALSE` | |
| `MYSQL_TLS_VERSION` | What TLS `v1.1` `v1.2` `v1.3` version to utilize | `TLSv1.1,TLSv1.2,TLSv1.3` | |
| `MYSQL_TLS_CA_FILE` | Filename to load custom CA certificate for connecting via TLS | `/etc/ssl/cert.pem` | x |
| `MYSQL_TLS_CERT_FILE` | Filename to load client certificate for connecting via TLS | | x |
| `MYSQL_TLS_KEY_FILE` | Filename to load client key for connecting via TLS | | x |
- When using compression with MongoDB, only `GZ` compression is possible.
@@ -179,19 +204,36 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
If `BACKUP_LOCATION` = `S3` then the following options are used.
| Parameter | Description | Default |
| --------------------- | ----------------------------------------------------------------------------------------- | ------- |
| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | |
| `S3_KEY_ID` | S3 Key ID | |
| `S3_KEY_SECRET` | S3 Key Secret | |
| `S3_PATH` | S3 Pathname to save to e.g. '`backup`' | |
| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | |
| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | |
| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` |
| `S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | |
| `S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | |
| _*OR*_ | | |
| `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` |
| Parameter | Description | Default | `_FILE` |
| --------------------- | ----------------------------------------------------------------------------------------- | ------- | ------- |
| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | x |
| `S3_KEY_ID` | S3 Key ID (Optional) | | x |
| `S3_KEY_SECRET` | S3 Key Secret (Optional) | | x |
| `S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | | x |
| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | x |
| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | x |
| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` | x |
| `S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | | x |
| `S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | | x |
| _*OR*_ | | | |
| `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | |
- When `S3_KEY_ID` and/or `S3_KEY_SECRET` is not set, will try to use IAM role assigned (if any) for uploading the backup files to S3 bucket.
#### Upload to a Azure storage account by `blobxfer`
Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure fileshare storage.
If `BACKUP_LOCATION` = `blobxfer` then the following options are used.
| Parameter | Description | Default | `_FILE` |
| ------------------------------ | ------------------------------------------- | ------------------- | ------- |
| `BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
> This service uploads files from backup targed directory `DB_DUMP_TARGET`.
> If the a cleanup configuration in `DB_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
## Maintenance
@@ -205,7 +247,7 @@ docker exec -it (whatever your container name is) bash
### Manual Backups
Manual Backups can be performed by entering the container and typing `backup-now`
- Recently there was a request to have the container work with Kukbernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`.
- Recently there was a request to have the container work with Kubernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`.
### Restoring Databases
Entering in the container and executing `restore` will execute a menu based script to restore your backups - MariaDB, Postgres, and Mongo supported.
@@ -230,7 +272,41 @@ If you only enter some of the arguments you will be prompted to fill them in.
### Custom Scripts
If you want to execute a custom script at the end of backup, you can drop bash scripts with the extension of `.sh` in this directory. See the following example to utilize:
#### Path Options
| Parameter | Description | Default |
| ---------------------- | --------------------------------------------------------------------------- | ----------------------- |
| `SCRIPT_LOCATION_PRE` | Location on filesystem inside container to execute bash scripts pre backup | `/assets/scripts/pre/` |
| `SCRIPT_LOCATION_POST` | Location on filesystem inside container to execute bash scripts post backup | `/assets/scripts/post/` |
#### Pre Backup
If you want to execute a custom script before a backup starts, you can drop bash scripts with the extension of `.sh` in the location defined in `SCRIPT_LOCATION_PRE`. See the following example to utilize:
````bash
$ cat pre-script.sh
##!/bin/bash
# #### Example Pre Script
# #### $1=DB_TYPE (Type of Backup)
# #### $2=DB_HOST (Backup Host)
# #### $3=DB_NAME (Name of Database backed up
# #### $4=BACKUP START TIME (Seconds since Epoch)ff
# #### $5=BACKUP FILENAME (Filename)
echo "${1} Backup Starting on ${2} for ${3} at ${4}. Filename: ${5}"
````
## script DB_TYPE DB_HOST DB_NAME STARTEPOCH BACKUP_FILENAME
${f} "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_time}" "${target}"
Outputs the following on the console:
`mysql Backup Starting on example-db for example at 1647370800. Filename: mysql_example_example-db_202200315-000000.sql.bz2
#### Post backup
If you want to execute a custom script at the end of a backup, you can drop bash scripts with the extension of `.sh` in the location defined in `SCRIPT_LOCATION_POST`. Also to support legacy users `/assets/custom-scripts` is also scanned and executed.See the following example to utilize:
````bash
$ cat post-script.sh
@@ -247,26 +323,30 @@ $ cat post-script.sh
# #### $8=BACKUP FILENAME (Filename)
# #### $9=BACKUP FILESIZE
# #### $10=HASH (If CHECKSUM enabled)
# #### $11=MOVE_EXIT_CODE
echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a duration of ${7} seconds. Filename: ${8} Size: ${9} bytes MD5: ${10}"
````
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_timme}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_timme}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" "${move_exit_code}
Outputs the following on the console:
`0 mysql Backup Completed on example-db for example on 1647370800 ending 1647370920 for a duration of 120 seconds. Filename: mysql_example_example-db_202200315-000000.sql.bz2 Size: 7795 bytes Hash: 952fbaafa30437494fdf3989a662cd40`
`0 mysql Backup Completed on example-db for example on 1647370800 ending 1647370920 for a duration of 120 seconds. Filename: mysql_example_example-db_202200315-000000.sql.bz2 Size: 7795 bytes Hash: 952fbaafa30437494fdf3989a662cd40 0`
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`
You must make your scripts executible otherwise there is an internal check that will skip trying to run it otherwise.
If for some reason your filesystem or host is not detecting it right, use the environment variable `POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
## Support
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.
### Usage
- The [Discussions board](../../discussions) is a great place for working with the community on tips and tricks of using this image.
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) personalized support.
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) for personalized support
### Bugfixes
- Please, submit a [Bug Report](issues/new) if something isn't working as expected. I'll do my best to issue a fix in short order.

5
examples/.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
# See http://help.github.com/ignore-files/ for more about ignoring files.
# Example container mounted folders
**/backups/
**/db/

View File

@@ -0,0 +1,69 @@
#
# Example for Microsoft SQL Server
# upload with blobxfer to azure storage
#
version: '2'
networks:
example-mssql-blobxfer-net:
name: example-mssql-blobxfer-net
services:
example-mssql-s3-db:
hostname: example-db-host
image: mcr.microsoft.com/mssql/server:2019-latest
container_name: example-mssql-s3-db
restart: unless-stopped
ports:
- "127.0.0.1:11433:1433"
networks:
example-mssql-blobxfer-net:
volumes:
- ./tmp/backups:/tmp/backups # shared tmp backup directory
environment:
ACCEPT_EULA: Y
MSSQL_SA_PASSWORD: 5hQa0utRFBpIY3yhoIyE
MSSQL_PID: Express
example-mssql-blobxfer-db-backup:
container_name: example-mssql-blobxfer-db-backup
# if you want to build and use image from current source
# execute in terminal --> docker build -t tiredofit/db-backup-mssql-blobxfer .
# replace --> image: tiredofit/db-backup-mssql
# image: tiredofit/db-backup
image: tiredofit/db-backup-mssql-blobxfer
links:
- example-mssql-s3-db
volumes:
- ./backups:/backup
- ./tmp/backups:/tmp/backups # shared tmp backup directory
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
# - DEBUG_MODE=TRUE
- DB_TYPE=mssql
- DB_HOST=example-db-host
# - DB_PORT=1488
# - DB_NAME=ALL # [ALL] not working on sql server.
# create database with name `test1` manually first
- DB_NAME=test1 # Create this database
- DB_USER=sa
- DB_PASS=5hQa0utRFBpIY3yhoIyE
- DB_DUMP_FREQ=1 # backup every 5 minute
# - DB_DUMP_BEGIN=0000 # backup starts immediately
- DB_CLEANUP_TIME=3 # clean backups they are older than 60 minutes
- ENABLE_CHECKSUM=TRUE
- CHECKSUM=SHA1
- COMPRESSION=GZ
- SPLIT_DB=FALSE
- CONTAINER_ENABLE_MONITORING=FALSE
# === S3 Blobxfer ===
- BACKUP_LOCATION=blobxfer
# Add here azure storage account
- BLOBXFER_STORAGE_ACCOUNT={TODO Add Storage Name}
# Add here azure storage account key
- BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key}
- BLOBXFER_REMOTE_PATH=docker-db-backup
restart: always
networks:
example-mssql-blobxfer-net:

View File

@@ -0,0 +1,61 @@
#
# Example for Microsoft SQL Server
#
version: '2'
networks:
example-mssql-net:
name: example-mssql-net
services:
example-mssql-db:
hostname: example-db-host
image: mcr.microsoft.com/mssql/server:2019-latest
container_name: example-mssql-db
restart: unless-stopped
ports:
- "127.0.0.1:11433:1433"
networks:
example-mssql-net:
volumes:
- ./tmp/backups:/tmp/backups # shared tmp backup directory
environment:
ACCEPT_EULA: Y
MSSQL_SA_PASSWORD: 5hQa0utRFBpIY3yhoIyE
MSSQL_PID: Express
example-mssql-db-backup:
container_name: example-mssql-db-backup
# if you want to build and use image from current source
# execute in terminal --> docker build -t tiredofit/db-backup-mssql .
# replace --> image: tiredofit/db-backup-mssql
# image: tiredofit/db-backup
image: tiredofit/db-backup-mssql
links:
- example-mssql-db
volumes:
- ./backups:/backup
- ./tmp/backups:/tmp/backups # shared tmp backup directory
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
# - DEBUG_MODE=TRUE
- DB_TYPE=mssql
- DB_HOST=example-db-host
# - DB_PORT=1488
# - DB_NAME=ALL # [ALL] not working on sql server.
# create database with name `test1` manually first
- DB_NAME=test1
- DB_USER=sa
- DB_PASS=5hQa0utRFBpIY3yhoIyE
- DB_DUMP_FREQ=1 # backup every minute
# - DB_DUMP_BEGIN=0000 # backup starts immediately
- DB_CLEANUP_TIME=5 # clean backups they are older than 5 minute
- ENABLE_CHECKSUM=FALSE
- CHECKSUM=SHA1
- COMPRESSION=GZ
- SPLIT_DB=FALSE
- CONTAINER_ENABLE_MONITORING=FALSE
restart: always
networks:
example-mssql-net:

View File

@@ -1,9 +1,16 @@
version: '2'
networks:
example-db-network:
name: example-db-network
services:
example-db:
hostname: example-db-host
container_name: example-db
image: mariadb:latest
ports:
- 13306:3306
volumes:
- ./db:/var/lib/mysql
environment:
@@ -12,6 +19,8 @@ services:
- MYSQL_USER=example
- MYSQL_PASSWORD=examplepassword
restart: always
networks:
- example-db-network
example-db-backup:
container_name: example-db-backup
@@ -22,17 +31,23 @@ services:
- ./backups:/backup
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
- TIMEZONE=America/Vancouver
- CONTAINER_ENABLE_MONITORING=FALSE
# - DEBUG_MODE=TRUE
- DB_TYPE=mariadb
- DB_HOST=example-db
- DB_HOST=example-db-host
- DB_NAME=example
- DB_USER=example
- DB_PASS="examplepassword"
- DB_DUMP_FREQ=1440
- DB_DUMP_BEGIN=0000
- DB_CLEANUP_TIME=8640
- DB_PASS=examplepassword
- DB_DUMP_FREQ=1 # backup every minute
# - DB_DUMP_BEGIN=0000 # backup starts immediately
- DB_CLEANUP_TIME=5 # clean backups they are older than 5 minute
- CHECKSUM=SHA1
- COMPRESSION=ZSTD
- COMPRESSION=GZ
- SPLIT_DB=FALSE
restart: always
networks:
- example-db-network

View File

10
install/assets/defaults/10-db-backup Executable file → Normal file
View File

@@ -1,22 +1,32 @@
#!/command/with-contenv bash
BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"}
BLOBXFER_REMOTE_PATH=${BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
CHECKSUM=${CHECKSUM:-"MD5"}
COMPRESSION=${COMPRESSION:-"ZSTD"}
COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
CREATE_LATEST_SYMLINK=${CREATE_LATEST_SYMLINK:-"TRUE"}
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"}
DB_DUMP_TARGET_ARCHIVE=${DB_DUMP_TARGET_ARCHIVE:-"${DB_DUMP_TARGET}/archive/"}
ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"}
ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"}
MODE=${MODE:-"AUTO"}
MYSQL_ENABLE_TLS=${MYSQL_ENABLE_TLS:-"FALSE"}
MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"}
MYSQL_SINGLE_TRANSACTION=${MYSQL_SINGLE_TRANSACTION:-"TRUE"}
MYSQL_STORED_PROCEDURES=${MYSQL_STORED_PROCEDURES:-"TRUE"}
MYSQL_TLS_CA_FILE=${MYSQL_TLS_CA_FILE:-"/etc/ssl/cert.pem"}
MYSQL_TLS_VERIFY=${MYSQL_TLS_VERIFY:-"FALSE"}
MYSQL_TLS_VERSION=${MYSQL_TLS_VERSION:-"TLSv1.1,TLSv1.2,TLSv1.3"}
PARALLEL_COMPRESSION_THREADS=${PARALLEL_COMPRESSION_THREADS:-"$(nproc)"}
S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"}
S3_PROTOCOL=${S3_PROTOCOL:-"https"}
SCRIPT_LOCATION_PRE=${SCRIPT_LOCATION_PRE:-"/assets/scripts/pre/"}
SCRIPT_LOCATION_POST=${SCRIPT_LOCATION_POST:-"/assets/scripts/post/"}
SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SKIP_AVAILABILITY_CHECK=${SKIP_AVAILABILITY_CHECK:-"FALSE"}
SPLIT_DB=${SPLIT_DB:-"TRUE"}
TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"}

645
install/assets/functions/10-db-backup Executable file → Normal file
View File

@@ -1,31 +1,80 @@
#!/command/with-contenv bash
bootstrap_variables() {
sanity_var DB_TYPE "Set appropriate DB_TYPE"
transform_file_var \
DB_HOST \
DB_NAME \
DB_PORT \
DB_USER \
DB_PASS
case "${DB_TYPE,,}" in
couch* )
dbtype=couch
DB_PORT=${DB_PORT:-5984}
file_env 'DB_USER'
file_env 'DB_PASS'
sanity_var DB_USER
sanity_var DB_PASS
;;
influx* )
dbtype=influx
DB_PORT=${DB_PORT:-8088}
file_env 'DB_USER'
file_env 'DB_PASS'
case "${INFLUX_VERSION}" in
1) DB_PORT=${DB_PORT:-8088} ;;
2) DB_PORT=${DB_PORT:-8086} ;;
esac
sanity_var DB_USER
sanity_var DB_PASS
sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'"
;;
mongo* )
dbtype=mongo
DB_PORT=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER'
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
transform_file_var MONGO_CUSTOM_URI
if [ -n "${MONGO_CUSTOM_URI}" ] ; then
mongo_uri_proto=$(echo "${MONGO_CUSTOM_URI}" | grep :// | sed -e's,^\(.*://\).*,\1,g')
mongo_uri_scratch="${MONGO_CUSTOM_URI/${mongo_uri_proto}/}"
mongo_uri_username_password="$(echo "${mongo_uri_scratch}" | grep @ | rev | cut -d@ -f2- | rev)"
if [ -n "${mongo_uri_username_password}" ]; then mongo_uri_scratch="$(echo "${mongo_uri_scratch}" | rev | cut -d@ -f1 | rev)" ; fi
mongo_uri_port="$(echo "${mongo_uri_scratch}" | grep : | rev | cut -d: -f2- | rev)"
if [ -n "${mongo_uri_port}" ]; then mongo_uri_port="$(echo "${mongo_uri_scratch}" | rev | cut -d: -f1 | cut -d/ -f2 | rev)" ; fi
mongo_uri_hostname="$(echo "${mongo_uri_scratch}" | cut -d/ -f1 | cut -d: -f1 )"
mongo_uri_database="$(echo "${mongo_uri_scratch}" | cut -d/ -f2 | cut -d? -f1 )"
mongo_uri_options="$(echo "${mongo_uri_scratch}" | cut -d/ -f2 | cut -d? -f2 )"
DB_NAME=${DB_NAME:-"${mongo_uri_database,,}"}
DB_HOST=${DB_HOST:-"${mongo_uri_hostname,,}"}
else
DB_PORT=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS}"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME}"
transform_file_var DB_AUTH
[[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}"
fi
;;
"mysql" | "mariadb" )
dbtype=mysql
DB_PORT=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
transform_file_var DB_PASS
if [ -n "${DB_PASS}" ] ; then export MYSQL_PWD=${DB_PASS} ; fi
if var_true "${MYSQL_ENABLE_TLS}" ; then
if [ -n "${MYSQL_TLS_CA_FILE}" ] ; then
mysql_tls_args="--ssl_ca=${MYSQL_TLS_CA_FILE}"
fi
if [ -n "${MYSQL_TLS_CERT_FILE}" ] ; then
mysql_tls_args="${mysql_tls_args} --ssl_cert=${MYSQL_TLS_CERT_FILE}"
fi
if [ -n "${MYSQL_TLS_KEY_FILE}" ] ; then
mysql_tls_args="${mysql_tls_args} --ssl_key=${MYSQL_TLS_KEY_FILE}"
fi
if var_true "${TLS_VERIFY}" ; then
mysql_tls_args="${mysql_tls_args} --sslverify-server-cert"
fi
if [ -n "${MYSQL_TLS_VERSION}" ] ; then
mysql_tls_args="${mysql_tls_args} --tls_version=${MYSQL_TLS_VERSION}"
fi
fi
;;
"mssql" | "microsoftsql" )
apkArch="$(apk --print-arch)"; \
@@ -39,54 +88,56 @@ bootstrap_variables() {
postgres* | "pgsql" )
dbtype=pgsql
DB_PORT=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS}"
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
"redis" )
dbtype=redis
DB_PORT=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS}"
;;
sqlite* )
dbtype=sqlite3
;;
* )
print_error "I don't recognize 'DB_TYPE=${DB_TYPE}' - Exitting.."
exit 99
;;
esac
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then
file_env 'S3_KEY_ID'
file_env 'S3_KEY_SECRET'
transform_file_var \
S3_BUCKET \
S3_KEY_ID \
S3_KEY_SECRET \
S3_PATH \
S3_REGION \
S3_HOST \
S3_PROTOCOL \
S3_EXTRA_OPTS \
S3_CERT_CA_FILE
fi
### Set the Database Authentication Details
case "$dbtype" in
"mongo" )
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS}"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME}"
[[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}"
;;
"mysql" )
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS}
;;
"postgres" )
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS}"
;;
"redis" )
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS}"
;;
esac
if [ "${BACKUP_LOCATION,,}" = "blobxfer" ] ; then
transform_file_var \
BLOBXFER_STORAGE_ACCOUNT \
BLOBXFER_STORAGE_KEY
fi
}
backup_couch() {
pre_dbbackup
prepare_dbbackup
target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt
ltarget=couch_${DB_NAME}_${DB_HOST#*//}
compression
pre_dbbackup ${DB_NAME}
print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}"
curl -sSL -X GET ${DB_HOST}:${DB_PORT}/${DB_NAME}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
check_exit_code backup $target
generate_checksum
move_dbbackup
check_exit_code move $target
post_dbbackup ${DB_NAME}
}
@@ -101,36 +152,44 @@ backup_influx() {
case "${INFLUX_VERSION,,}" in
1 )
for db in ${db_names}; do
pre_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="-bucket $db" ; else db=all ; fi
prepare_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
target=influx_${db}_${DB_HOST#*//}_${now}
ltarget=influx_${db}_${DB_HOST#*//}
compression
pre_dbbackup $db
print_notice "Dumping Influx database: '${db}'"
influxd backup ${influx_compression} ${bucket} -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
check_exit_code backup $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension}
ltarget=influx_${db}_${DB_HOST#*//}
generate_checksum
move_dbbackup
check_exit_code move $target_dir
post_dbbackup $db
done
;;
2 )
for db in ${db_names}; do
pre_dbbackup
prepare_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
target=influx2_${db}_${DB_HOST#*//}_${now}
ltarget=influx2_${db}_${DB_HOST#*//}
compression
pre_dbbackup $db
print_notice "Dumping Influx2 database: '${db}'"
influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
check_exit_code backup $target_dir
create_archive
target=influx2_${db}_${DB_HOST#*//}_${now}.tar${extension}
ltarget=influx2_${db}_${DB_HOST#*//}
generate_checksum
move_dbbackup
check_exit_code move $target_dir
post_dbbackup $db
done
;;
@@ -138,33 +197,45 @@ backup_influx() {
}
backup_mongo() {
pre_dbbackup
prepare_dbbackup
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive
ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,}
else
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive.gz
ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,}
mongo_compression="--gzip"
compression_string="and compressing with gzip"
fi
if [ -n "${MONGO_CUSTOM_URI}" ] ; then
mongo_backup_parameter="--uri=${MONGO_CUSTOM_URI} ${EXTRA_OPTS}"
else
mongo_backup_parameter="--host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}"
fi
pre_dbbackup "${DB_NAME}"
print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
silent mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} ${mongo_backup_parameter}
exit_code=$?
check_exit_code $target
check_exit_code backup $target
generate_checksum
move_dbbackup
post_dbbackup
check_exit_code move $target
post_dbbackup "${DB_NAME}"
}
backup_mssql() {
pre_dbbackup
prepare_dbbackup
target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak
ltarget=mssql_${DB_NAME,,}_${DB_HOST,,}
compression
pre_dbbackup "${DB_NAME}"
print_notice "Dumping MSSQL database: '${DB_NAME}'"
/opt/mssql-tools/bin/sqlcmd -E -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} Q "BACKUP DATABASE \[${DB_NAME}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
silent /opt/mssql-tools18/bin/sqlcmd -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} -Q "BACKUP DATABASE [${DB_NAME}] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$?
check_exit_code $target
check_exit_code backup $target
generate_checksum
move_dbbackup
check_exit_code move $target
post_dbbackup $DB_NAME
}
@@ -178,7 +249,7 @@ backup_mysql() {
if [ "${DB_NAME,,}" = "all" ] ; then
print_debug "Preparing to back up everything except for information_schema and _* prefixes"
db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER ${mysql_tls_args} ${EXTRA_OPTS} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
@@ -194,28 +265,34 @@ backup_mysql() {
if var_true "${SPLIT_DB}" ; then
for db in ${db_names} ; do
pre_dbbackup
prepare_dbbackup
target=mysql_${db}_${DB_HOST,,}_${now}.sql
ltarget=mysql_${db}_${DB_HOST,,}
compression
pre_dbbackup $db
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
check_exit_code backup $target
generate_checksum
move_dbbackup
check_exit_code move $target
post_dbbackup $db
done
else
print_debug "Not splitting database dumps into their own files"
pre_dbbackup
prepare_dbbackup
target=mysql_all_${DB_HOST,,}_${now}.sql
ltarget=mysql_all_${DB_HOST,,}
compression
pre_dbbackup all
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
check_exit_code backup $target
generate_checksum
move_dbbackup
check_exit_code move $target
post_dbbackup all
fi
}
@@ -241,22 +318,27 @@ backup_pgsql() {
if var_true "${SPLIT_DB}" ; then
for db in ${db_names} ; do
pre_dbbackup
prepare_dbbackup
target=pgsql_${db}_${DB_HOST,,}_${now}.sql
ltarget=pgsql_${db}_${DB_HOST,,}
compression
pre_dbbackup $db
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code $target
check_exit_code backup $target
generate_checksum
move_dbbackup
check_exit_code move $target
post_dbbackup $db
done
else
print_debug "Not splitting database dumps into their own files"
pre_dbbackup
prepare_dbbackup
target=pgsql_all_${DB_HOST,,}_${now}.sql
ltarget=pgsql_${db}_${DB_HOST,,}
compression
pre_dbbackup all
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
tmp_db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
for r_db_name in $(echo $db_names | xargs); do
@@ -268,18 +350,20 @@ backup_pgsql() {
done
pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code $target
check_exit_code backup $target
generate_checksum
move_dbbackup
check_exit_code move $target
post_dbbackup all
fi
}
backup_redis() {
pre_dbbackup
prepare_dbbackup
print_notice "Dumping Redis - Flushing Redis Cache First"
target=redis_all_${DB_HOST,,}_${now}.rdb
echo bgsave | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
ltarget=redis_${DB_HOST,,}
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
sleep 10
try=5
while [ $try -gt 0 ] ; do
@@ -287,6 +371,7 @@ backup_redis() {
ok=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
print_notice "Redis Backup Complete"
exit_code=0
break
fi
try=$((try - 1))
@@ -295,134 +380,161 @@ backup_redis() {
done
target_original=${target}
compression
pre_dbbackup all
$compress_cmd "${TEMP_LOCATION}/${target_original}"
check_exit_code backup $target
generate_checksum
move_dbbackup
check_exit_code move $target
post_dbbackup all
}
backup_sqlite3() {
pre_dbbackup
prepare_dbbackup
db=$(basename "${DB_HOST}")
db="${db%.*}"
target=sqlite3_${db}_${now}.sqlite3
ltarget=sqlite3_${db}.sqlite3
compression
pre_dbbackup $db
print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}"
sqlite3 "${DB_HOST}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
silent sqlite3 "${DB_HOST}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
exit_code=$?
check_exit_code $target
check_exit_code backup $target
cat "${TEMP_LOCATION}"/backup.sqlite3 | ${dir_compress_cmd} > "${TEMP_LOCATION}/${target}"
generate_checksum
move_dbbackup
check_exit_code move $target
post_dbbackup $db
}
check_availability() {
### Set the Database Type
case "$dbtype" in
"couch" )
counter=0
code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${DB_HOST}:${DB_PORT})
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( counter+=5 ))
print_warn "CouchDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"influx" )
counter=0
case "${INFLUX_VERSION,,}" in
1 )
while ! (nc -z ${DB_HOST#*//} ${DB_PORT}) ; do
if var_false "${SKIP_AVAILABILITY_CHECK}" ; then
case "$dbtype" in
"couch" )
counter=0
code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${DB_HOST}:${DB_PORT})
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( counter+=5 ))
print_warn "CouchDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"influx" )
counter=0
case "${INFLUX_VERSION,,}" in
1 )
while ! (nc -z ${DB_HOST#*//} ${DB_PORT}) ; do
sleep 5
(( counter+=5 ))
print_warn "InfluxDB Host '${DB_HOST#*//}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
2 )
code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${DB_HOST}:${DB_PORT}/health)
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( counter+=5 ))
print_warn "InfluxDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
esac
;;
"mongo" )
if [ -n "${MONGO_CUSTOM_URI}" ] ; then
print_debug "Skipping Connectivity Check"
else
counter=0
while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
sleep 5
(( counter+=5 ))
print_warn "InfluxDB Host '${DB_HOST#*//}' is not accessible, retrying.. ($counter seconds so far)"
print_warn "Mongo Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
2 )
code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${DB_HOST}:${DB_PORT}/health)
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( counter+=5 ))
print_warn "InfluxDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
esac
;;
"mongo" )
counter=0
while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
sleep 5
(( counter+=5 ))
print_warn "Mongo Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"mysql" )
counter=0
export MYSQL_PWD=${DB_PASS}
while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" status > /dev/null 2>&1) ; do
sleep 5
(( counter+=5 ))
print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)"
done
;;
"mssql" )
counter=0
while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
sleep 5
(( counter+=5 ))
print_warn "MSSQL Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"pgsql" )
counter=0
export PGPASSWORD=${DB_PASS}
until pg_isready --dbname=${DB_NAME} --host=${DB_HOST} --port=${DB_PORT} --username=${DB_USER} -q
do
sleep 5
(( counter+=5 ))
print_warn "Postgres Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"redis" )
counter=0
while ! (nc -z "${DB_HOST}" "${DB_PORT}") ; do
sleep 5
(( counter+=5 ))
print_warn "Redis Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"sqlite3" )
if [[ ! -e "${DB_HOST}" ]]; then
print_error "File '${DB_HOST}' does not exist."
exit_code=2
exit $exit_code
elif [[ ! -f "${DB_HOST}" ]]; then
print_error "File '${DB_HOST}' is not a file."
exit_code=2
exit $exit_code
elif [[ ! -r "${DB_HOST}" ]]; then
print_error "File '${DB_HOST}' is not readable."
exit_code=2
exit $exit_code
fi
;;
esac
fi
;;
"mysql" )
counter=0
transform_file_var DB_PASS
export MYSQL_PWD=${DB_PASS}
while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" ${mysql_tls_args} status > /dev/null 2>&1) ; do
sleep 5
(( counter+=5 ))
print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)"
done
;;
"mssql" )
counter=0
while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
sleep 5
(( counter+=5 ))
print_warn "MSSQL Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"pgsql" )
counter=0
until pg_isready --host=${DB_HOST} --port=${DB_PORT} -q
do
sleep 5
(( counter+=5 ))
print_warn "Postgres Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"redis" )
counter=0
while ! (nc -z "${DB_HOST}" "${DB_PORT}") ; do
sleep 5
(( counter+=5 ))
print_warn "Redis Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
"sqlite3" )
if [[ ! -e "${DB_HOST}" ]]; then
print_error "File '${DB_HOST}' does not exist."
exit_code=2
exit $exit_code
elif [[ ! -f "${DB_HOST}" ]]; then
print_error "File '${DB_HOST}' is not a file."
exit_code=2
exit $exit_code
elif [[ ! -r "${DB_HOST}" ]]; then
print_error "File '${DB_HOST}' is not readable."
exit_code=2
exit $exit_code
fi
;;
esac
fi
}
check_exit_code() {
print_debug "DB Backup Exit Code is ${exit_code}"
case "${exit_code}" in
0 )
print_info "DB Backup of '${1}' completed successfully"
;;
* )
print_error "DB Backup of '${1}' reported errors"
master_exit_code=1
case "${1}" in
backup )
case "${exit_code}" in
0 )
print_info "DB Backup of '${2}' completed successfully"
;;
* )
print_error "DB Backup of '${2}' reported errors"
master_exit_code=1
;;
esac
;;
move )
case "${move_exit_code}" in
0 )
print_debug "Moving of backup '${2}' completed successfully"
;;
* )
print_error "Moving of backup '${2}' reported errors"
master_exit_code=1
;;
esac
;;
esac
}
@@ -430,23 +542,55 @@ check_exit_code() {
cleanup_old_data() {
if [ -n "${DB_CLEANUP_TIME}" ]; then
if [ "${master_exit_code}" != 1 ]; then
print_info "Cleaning up old backups"
mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
case "${BACKUP_LOCATION,,}" in
"blobxfer" )
print_info "Cleaning up old backups on filesystem"
mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
print_info "Syncing changes via blobxfer"
silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET} --delete --delete-only
;;
"file" | "filesystem" )
print_info "Cleaning up old backups on filesystem"
mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
;;
"s3" | "minio" )
print_info "Cleaning up old backups on S3 storage"
aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | grep " PRE " -v | while read -r s3_file; do
s3_createdate=$(echo $s3_file | awk {'print $1" "$2'})
s3_createdate=$(date -d "$s3_createdate" "+%s")
s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 )))
if [[ $s3_createdate -le $s3_olderthan ]] ; then
s3_filename=$(echo $s3_file | awk {'print $4'})
if [ "$s3_filename" != "" ] ; then
print_debug "Deleting $s3_filename"
aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
fi
done
;;
esac
else
print_info "Skipping Cleaning up old backups because there were errors in backing up"
print_error "Skipping Cleaning up old backups because there were errors in backing up"
fi
fi
}
compression() {
if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then
PARALLEL_COMPRESSION_THREADS=1
fi
case "${COMPRESSION,,}" in
compression() {
if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then
PARALLEL_COMPRESSION_THREADS=1
fi
if var_true "${GZ_RSYNCABLE}" ; then
gz_rsyncable=--rsyncable
fi
case "${COMPRESSION,,}" in
gz* )
compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compress_cmd="pigz -q -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} ${gz_rsyncable}"
compression_type="gzip"
extension=".gz"
dir_compress_cmd=${compress_cmd}
@@ -454,7 +598,7 @@ compression() {
target=${target}.gz
;;
bz* )
compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
compress_cmd="pbzip2 -q -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
compression_type="bzip2"
dir_compress_cmd=${compress_cmd}
extension=".bz2"
@@ -470,7 +614,7 @@ compression() {
target=${target}.xz
;;
zst* )
compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS}"
compress_cmd="zstd -q -q --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} ${gz_rsyncable}"
compression_type="zstd"
dir_compress_cmd=${compress_cmd}
extension=".zst"
@@ -478,9 +622,9 @@ compression() {
target=${target}.zst
;;
"none" | "false")
compress_cmd="cat"
compress_cmd="cat "
compression_type="none"
dir_compress_cmd="cat"
dir_compress_cmd="cat "
target_dir=${target}
;;
esac
@@ -503,15 +647,15 @@ compression() {
create_archive() {
if [ "${exit_code}" = "0" ] ; then
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
else
print_warn "Skipping creating archive file because backup did not complete successfully"
print_error "Skipping creating archive file because backup did not complete successfully"
fi
}
generate_checksum() {
if var_true "${ENABLE_CHECKSUM}" ;then
if var_true "${ENABLE_CHECKSUM}" ; then
if [ "${exit_code}" = "0" ] ; then
case "${CHECKSUM,,}" in
"md5" )
@@ -530,13 +674,16 @@ generate_checksum() {
checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}')
print_debug "${checksum_extension^^}: ${checksum_value} - ${target}"
else
print_warn "Skipping Checksum creation because backup did not complete successfully"
print_error "Skipping Checksum creation because backup did not complete successfully"
fi
fi
}
move_dbbackup() {
if [ "${exit_code}" = "0" ] ; then
dbbackup_size="$(stat -c%s "${TEMP_LOCATION}"/"${target}")"
dbbackup_date="$(date -r "${TEMP_LOCATION}"/"${target}" +'%s')"
case "${SIZE_VALUE,,}" in
"b" | "bytes" )
SIZE_VALUE=1
@@ -560,13 +707,25 @@ move_dbbackup() {
"file" | "filesystem" )
print_debug "Moving backup to filesystem"
mkdir -p "${DB_DUMP_TARGET}"
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
if var_true "${ENABLE_CHECKSUM}" ; then mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/ ; fi
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
move_exit_code=$?
if var_true "${CREATE_LATEST_SYMLINK}" ; then
ln -sf "${DB_DUMP_TARGET}"/"${target}" "${DB_DUMP_TARGET}"/latest-"${ltarget}"
fi
if [ -n "${DB_ARCHIVE_TIME}" ] ; then
mkdir -p "${DB_DUMP_TARGET_ARCHIVE}"
find "${DB_DUMP_TARGET}"/ -maxdepth 1 -mmin +"${DB_ARCHIVE_TIME}" -iname "*" -exec mv {} "${DB_DUMP_TARGET_ARCHIVE}" \;
fi
;;
"s3" | "minio" )
print_debug "Moving backup to S3 Bucket"
export AWS_ACCESS_KEY_ID=${S3_KEY_ID}
export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET}
if [ -n "${S3_KEY_ID}" ] && [ -n "${S3_KEY_SECRET}" ]; then
export AWS_ACCESS_KEY_ID=${S3_KEY_ID}
export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET}
else
print_debug "Variable S3_KEY_ID or S3_KEY_SECRET is not set. Please ensure sufficiant IAM role is assigned."
fi
export AWS_DEFAULT_REGION=${S3_REGION}
if [ -f "${S3_CERT_CA_FILE}" ] ; then
print_debug "Using Custom CA for S3 Backups"
@@ -579,64 +738,138 @@ move_dbbackup() {
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
unset s3_ssl
unset s3_ca_cert
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
move_exit_code=$?
if var_true "${ENABLE_CHECKSUM}" ; then
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
if var_true "${ENABLE_CHECKSUM}" ; then rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"; fi
rm -rf "${TEMP_LOCATION}"/"${target}"
;;
"blobxfer" )
print_info "Moving backup to S3 Bucket with blobxfer"
mkdir -p "${DB_DUMP_TARGET}"
if var_true "${ENABLE_CHECKSUM}" ; then mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/; fi
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET}
move_exit_code=$?
if var_true "${ENABLE_CHECKSUM}" ; then rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}" ; fi
rm -rf "${TEMP_LOCATION}"/"${target}"
;;
esac
else
print_warn "Skipping moving DB Backup to final location because backup did not complete successfully"
print_error "Skipping moving DB Backup to final location because backup did not complete successfully"
fi
rm -rf "${TEMP_LOCATION}"/*
}
pre_dbbackup() {
prepare_dbbackup() {
dbbackup_start_time=$(date +"%s")
now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d")
ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,}
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql
}
pre_dbbackup() {
### Pre Script Support
if [ -n "${PRE_SCRIPT}" ] ; then
if var_true "${PRE_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${PRE_SCRIPT}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${target}"
else
if [ -x "${PRE_SCRIPT}" ] ; then
print_notice "Found PRE_SCRIPT environment variable. Executing '${PRE_SCRIPT}"
eval "${PRE_SCRIPT}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${target}"
else
print_error "Can't execute PRE_SCRIPT environment variable '${PRE_SCRIPT}' as its filesystem bit is not executible!"
fi
fi
fi
### Pre Backup Custom Script Support
if [ -d "/assets/custom-scripts/pre" ] && dir_notempty "/assets/custom-scripts/pre" ; then
print_warning "Found Custom Post Scripts in /assets/custom-scripts/pre - Automatically moving them to '${SCRIPT_LOCATION_PRE}'"
mkdir -p "${SCRIPT_LOCATION_PRE}"
silent cp /assets/custom-scripts/pre/* "${SCRIPT_LOCATION_PRE}"
fi
if [ -d "${SCRIPT_LOCATION_PRE}" ] && dir_notempty "${SCRIPT_LOCATION_PRE}" ; then
for f in $(find ${SCRIPT_LOCATION_PRE} -name \*.sh -type f); do
if var_true "${PRE_SCRIPT_SKIP_X_VERIFY}" ; then
${f} "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${target}"
else
if [ -x "${f}" ] ; then
print_notice "Executing pre backup custom script : '${f}'"
## script DB_TYPE DB_HOST DB_NAME STARTEPOCH BACKUP_FILENAME
${f} "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${target}"
else
print_error "Can't run pre backup custom script: '${f}' as its filesystem bit is not executible!"
fi
fi
done
fi
}
post_dbbackup() {
dbbackup_finish_time=$(date +"%s")
dbbackup_total_time=$(echo $((dbbackup_finish_time-dbbackup_start_time)))
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
print_notice "Sending Backup Statistics to Zabbix"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}")"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "${dbbackup_size}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "${dbbackup_date}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))"
if [ "$?" != "0" ] ; then print_error "Error sending statistics, consider disabling with 'CONTAINER_ENABLE_MONITORING=FALSE'" ; fi
fi
### Post Script Support
if [ -n "${POST_SCRIPT}" ] ; then
if [ -x "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
if [ -x "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
fi
fi
fi
### Post Backup Custom Script Support
if [ -d "/assets/custom-scripts/" ] ; then
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
if [ -x "${f}" ] ; then
print_notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
if [ -d "/assets/custom-scripts/" ] && dir_notempty "/assets/custom-scripts" ; then
print_warning "Found Custom Post Scripts in /assets/custom-scripts/ - Automatically moving them to '${SCRIPT_LOCATION_POST}'"
mkdir -p "${SCRIPT_LOCATION_POST}"
silent cp /assets/custom-scripts/* "${SCRIPT_LOCATION_POST}"
fi
if [ -d "${SCRIPT_LOCATION_POST}" ] && dir_notempty "${SCRIPT_LOCATION_POST}" ; then
for f in $(find ${SCRIPT_LOCATION_POST} -name \*.sh -type f); do
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
if [ -x "${f}" ] ; then
print_notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
fi
fi
done
fi
print_notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
unset s3_ssl
unset s3_ca_cert
}
sanity_test() {
@@ -651,14 +884,6 @@ sanity_test() {
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
esac
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then
sanity_var S3_BUCKET "S3 Bucket"
sanity_var S3_PATH "S3 Path"
sanity_var S3_REGION "S3 Region"
file_env 'S3_KEY_ID'
file_env 'S3_KEY_SECRET'
fi
}
setup_mode() {
@@ -671,8 +896,7 @@ setup_mode() {
mkdir -p /etc/services.d/99-run_forever
cat <<EOF > /etc/services.d/99-run_forever/run
#!/bin/bash
while true
do
while true; do
sleep 86400
done
EOF
@@ -693,4 +917,3 @@ EOF
fi
fi
}

View File

@@ -6,6 +6,7 @@ prepare_service 03-monitoring
PROCESS_NAME="db-backup"
output_off
bootstrap_variables
sanity_test
setup_mode
create_zabbix dbbackup

View File

@@ -4,7 +4,6 @@ source /assets/functions/00-container
source /assets/functions/10-db-backup
source /assets/defaults/10-db-backup
PROCESS_NAME="db-backup"
CONTAINER_LOG_LEVEL=DEBUG
bootstrap_variables
@@ -80,7 +79,7 @@ while true; do
cleanup_old_data
if var_true "${manual}" ; then
print_debug "Exitting due to manual mode"
print_debug "Exiting due to manual mode"
exit ${master_exit_code};
else
print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") "

View File

@@ -37,7 +37,7 @@ else
case "$1" in
"-h" )
cat <<EOF
${IMAGE_NAME} Restore Tool
${IMAGE_NAME} Restore Tool ${IMAGE_VERSION}
(c) 2022 Dave Conroy (https://github.com/tiredofit)
This script will assist you in recovering databases taken by the Docker image.
@@ -55,7 +55,7 @@ The image will also allow you to use environment variables or Docker secrets use
The script can also be executed skipping the interactive mode by using the following syntax/
$(basename $0) <filename> <db_type> <db_hostname> <db_name> <db_user> <db_pass> <db_port>
$(basename "$0") <filename> <db_type> <db_hostname> <db_name> <db_user> <db_pass> <db_port>
If you only enter some of the arguments you will be prompted to fill them in.
@@ -70,7 +70,7 @@ EOF
interactive_mode=true
;;
* )
interactive_mode=false
interactive_mode=false
;;
esac
fi
@@ -78,7 +78,7 @@ fi
get_filename() {
COLUMNS=12
prompt="Please select a file to restore:"
options=( $(find ${DB_DUMP_TARGET} -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | xargs -0) )
options=( $(find "${DB_DUMP_TARGET}" -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | xargs -0) )
PS3="$prompt "
select opt in "${options[@]}" "Custom" "Quit" ; do
if (( REPLY == 2 + ${#options[@]} )) ; then
@@ -271,7 +271,7 @@ get_dbtype() {
parsed_type=true
print_debug "Parsed DBType: MariaDB/MySQL"
;;
psql | postgres* )
pgsql | postgres* )
parsed_type=true
print_debug "Parsed DBType: Postgresql"
;;
@@ -641,7 +641,7 @@ EOF
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbport_menu
case "${q_dbname_menu,,}" in
case "${q_dbport_menu,,}" in
c* )
counter=1
q_dbport=" "
@@ -829,11 +829,7 @@ print_debug "Filename to recover '${r_filename}'"
## Question Database Type
if [ -n "${2}" ]; then
if [ ! -f "${2}" ]; then
get_dbtype
else
r_dbtype="${2}"
fi
r_dbtype="${2}"
else
get_dbtype
fi
@@ -841,59 +837,39 @@ print_debug "Database type '${r_dbtype}'"
## Question Database Host
if [ -n "${3}" ]; then
if [ ! -f "${3}" ]; then
get_dbhost
else
r_dbhost="${3}"
fi
r_dbhost="${3}"
else
get_dbhost
fi
print_debug "Database Host '${r_dbhost}'"
## Question Database Name
if [ -n "${3}" ]; then
if [ ! -f "${3}" ]; then
get_dbname
else
r_dbname="${3}"
fi
if [ -n "${4}" ]; then
r_dbname="${4}"
else
get_dbname
fi
print_debug "Database Name '${r_dbname}'"
## Question Database User
if [ -n "${4}" ]; then
if [ ! -f "${4}" ]; then
get_dbuser
else
r_dbuser="${4}"
fi
if [ -n "${5}" ]; then
r_dbuser="${5}"
else
get_dbuser
fi
print_debug "Database User '${r_dbuser}'"
## Question Database Password
if [ -n "${5}" ]; then
if [ ! -f "${5}" ]; then
get_dbpass
else
r_dbpass="${5}"
fi
if [ -n "${6}" ]; then
r_dbpass="${6}"
else
get_dbpass
fi
print_debug "Database Pass '${r_dbpass}'"
## Question Database Port
if [ -n "${6}" ]; then
if [ ! -f "${6}" ]; then
get_dbport
else
r_dbport="${6}"
fi
if [ -n "${7}" ]; then
r_dbport="${7}"
else
get_dbport
fi
@@ -932,7 +908,7 @@ case "${r_dbtype}" in
pv ${r_filename} | ${decompress_cmd}cat | mysql -u${r_dbuser} -p${r_dbpass} -P${r_dbport} -h${r_dbhost} ${r_dbname}
exit_code=$?
;;
psql | postgres* )
pgsql | postgres* )
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
export PGPASSWORD=${r_dbpass}
pv ${r_filename} | ${decompress_cmd}cat | psql -d ${r_dbname} -h ${r_dbhost} -p ${r_dbport} -U ${r_dbuser}
@@ -944,12 +920,12 @@ case "${r_dbtype}" in
mongo_compression="--gzip"
fi
if [ -n "${r_dbuser}" ] ; then
mongo_user="-u ${r_dbuser}"
mongo_user="-u=${r_dbuser}"
fi
if [ -n "${r_dbpass}" ] ; then
mongo_pass="-u ${r_dbpass}"
mongo_pass="-p=${r_dbpass}"
fi
mongorestore ${mongo_compression} -d ${r_dbname} -h ${r_dbhost} --port ${r_dbport} ${mongo_user} ${mongo_pass} --archive=${r_filename}
mongorestore ${mongo_compression} -d=${r_dbname} -h=${r_dbhost} --port=${r_dbport} ${mongo_user} ${mongo_pass} --archive=${r_filename}
exit_code=$?
;;
* )