Compare commits

...

76 Commits

Author SHA1 Message Date
dave@tiredofit.ca
edeadade4d Release 3.11.1 - See CHANGELOG.md 2023-10-23 08:14:29 -07:00
dave@tiredofit.ca
31b256b02d Release 3.11.0 - See CHANGELOG.md 2023-10-12 07:35:12 -07:00
dave@tiredofit.ca
d5cacdb32a Release 3.10.5 - See CHANGELOG.md 2023-10-11 15:44:26 -07:00
dave@tiredofit.ca
238b4d852c Release 3.10.4 - See CHANGELOG.md 2023-10-11 15:17:54 -07:00
Dave Conroy
8d6e72eead Merge pull request #258 from thomas-negrault/fix/mongo-restore-authentication-database
Use authentification database in mongorestore
2023-10-11 15:16:42 -07:00
Thomas Negrault
a9037f97ac Use authentification database in mongorestore 2023-10-11 22:57:29 +02:00
dave@tiredofit.ca
ebcd4fcde4 Release 3.10.3 - See CHANGELOG.md 2023-10-11 12:31:25 -07:00
Dave Conroy
adf52c1160 Merge pull request #257 from thomas-negrault/fix/alphabetical-filenames-sorting
Sort filenames alphabetically when using the restore command
2023-10-11 12:30:26 -07:00
Thomas Negrault
1eee4a49d7 Sort filenames alphabetically when using the restore command 2023-10-11 18:41:05 +02:00
dave@tiredofit.ca
e3faab5c36 Release 3.10.2 - See CHANGELOG.md 2023-09-14 08:13:56 -07:00
Dave Conroy
768d5e60fe Merge pull request #252 from pimjansen/feature/name-typo
Updated name where it is not writing to s3
2023-09-14 08:12:39 -07:00
Dave Conroy
e3e0d7ed67 Merge pull request #251 from pimjansen/feature/split-db-use
Remove the --database flag for a single db dump
2023-09-14 08:03:41 -07:00
Pim Jansen
db808d25c7 Updated name where it is not writing to s3 2023-09-14 10:39:34 +02:00
Pim Jansen
cb5b49b90b Remove the --database flag for a single db dump which ensures there is no use statement in the dump 2023-09-14 10:36:55 +02:00
dave@tiredofit.ca
48a1ff8bbe Release 3.10.1 - See CHANGELOG.md 2023-09-13 22:37:21 -07:00
dave@tiredofit.ca
8b1308ffd1 Release 3.10.0 - See CHANGELOG.md 2023-09-13 08:32:22 -07:00
Dave Conroy
3ab3f67be9 Merge pull request #248 from jcdirks/#247-env-variable-for-additional-arguments-to-the-dump-command-only
add env variables EXTRA_DUMP_OPTS and EXTRA_ENUMERATION_OPTS
2023-09-13 08:31:13 -07:00
Jan-Claas Dirks
cd1899d849 add env variables EXTRA_DUMP_OPTS and EXTRA_ENUMERATION_OPTS 2023-09-13 10:00:41 +02:00
dave@tiredofit.ca
663667dbff Release 3.9.12 - See CHANGELOG.md 2023-09-04 08:32:05 -07:00
dave@tiredofit.ca
36506091be Release 3.9.11 - See CHANGELOG.md 2023-08-24 18:12:36 -07:00
dave@tiredofit.ca
bf646381cb Release 3.9.10 - See CHANGELOG.md 2023-08-23 15:31:41 -07:00
dave@tiredofit.ca
fb3b65b33a Release 3.9.9 - See CHANGELOG.md 2023-08-21 15:38:51 -07:00
dave@tiredofit.ca
6d1ef87042 Release 3.9.8 - See CHANGELOG.md 2023-08-20 08:18:04 -07:00
Dave Conroy
c985cc8a4f Merge pull request #240 from ToshY/issue/239-armv7
Add cargo to build dependencies
2023-08-14 19:15:21 -07:00
ToshY
2265a6acf5 Add cargo to build dependencies 2023-08-05 14:39:13 +02:00
dave@tiredofit.ca
10e7debc65 Release 3.9.7 - See CHANGELOG.md 2023-07-18 07:26:59 -07:00
dave@tiredofit.ca
94e9881b7b Release 3.9.6 - See CHANGELOG.md 2023-06-16 09:50:16 -07:00
dave@tiredofit.ca
425383639a Release 3.9.5 - See CHANGELOG.md 2023-06-13 16:24:17 -07:00
dave@tiredofit.ca
1e46996812 Release 3.9.4 - See CHANGELOG.md 2023-06-13 10:16:04 -07:00
Dave Conroy
e71334564f Drop auto builds for armv7 2023-06-11 11:42:13 -07:00
dave@tiredofit.ca
f83f97bf76 Release 3.9.3 - See CHANGELOG.md 2023-06-05 10:24:46 -07:00
Dave Conroy
94a8e45af6 Merge pull request #226 from vanzhiganov/patch-1
Update README.md
2023-06-05 10:13:57 -07:00
Dave Conroy
9d90e37339 Merge pull request #225 from alwynpan/bugfix/#199
#199 Report error when move the backup file to S3 or Blob fails
2023-06-05 10:13:44 -07:00
Vyacheslav Anzhiganov
085b7cd6ce Update README.md 2023-06-03 16:27:17 +03:00
Yao (Alwyn) Pan
12484bb3f3 feat: Add zip package to the image 2023-06-01 16:54:26 +10:00
Yao (Alwyn) Pan
8fc2721dd4 fix: #199 report error when move the backup file to S3 or Blob fails 2023-06-01 16:46:13 +10:00
dave@tiredofit.ca
68174c061f Release 3.9.2 - See CHANGELOG.md 2023-05-10 08:19:01 -07:00
dave@tiredofit.ca
fd1d95090e Release 3.9.1 - See CHANGELOG.md 2023-05-03 12:13:29 -07:00
Dave Conroy
7befba0d96 Update README.md 2023-04-27 08:18:26 -07:00
Dave Conroy
583253fce7 Update README.md 2023-04-26 14:43:29 -07:00
dave@tiredofit.ca
068577001e Release 3.9.0 - See CHANGELOG.md 2023-04-26 14:32:36 -07:00
Dave Conroy
7781542816 Expand on amount of variables that can use 2023-04-24 14:54:47 -07:00
dave@tiredofit.ca
9283b5440e Release 3.8.5 - See CHANGELOG.md 2023-04-11 15:42:30 -07:00
Dave Conroy
5e62485e7f Merge pull request #216 from tpansino/bug/215
Set ltargets properly
2023-04-11 15:36:26 -07:00
Tom Pansino
f224571448 Set ltargets properly 2023-04-11 15:31:02 -07:00
dave@tiredofit.ca
01620fec00 Release 3.8.4 - See CHANGELOG.md 2023-04-06 12:14:22 -07:00
dave@tiredofit.ca
18a38b4f1d Release 3.8.3 - See CHANGELOG.md 2023-03-30 14:18:55 -07:00
dave@tiredofit.ca
150f356275 Release 3.8.2 - See CHANGELOG.md 2023-03-30 14:05:17 -07:00
dave@tiredofit.ca
e838ed0027 Release 3.8.1 - See CHANGELOG.md 2023-03-30 11:04:35 -07:00
Dave Conroy
8329b4c065 Add defaults 2023-03-27 16:41:31 -07:00
dave@tiredofit.ca
dab1ac301a Release 3.8.0 - See CHANGELOG.md 2023-03-27 15:01:10 -07:00
dave@tiredofit.ca
077201cd18 Release 3.7.7 - See CHANGELOG.md 2023-03-20 16:24:23 -07:00
Dave Conroy
eeaf59dc6f Merge pull request #210 from codemonium/simplify-pg_isready
Simplify pg_isready usage
2023-03-20 16:22:13 -07:00
Igor Artemenko
88fe0d6411 Simplify pg_isready usage
The pg_isready documentation says that it does not need a correct
database name or username to get the server status. In fact, incorrect
values result in the server logging failed connection attempts. As a
result, when we set DB_NAME to ALL, calls to the check_availability
function (which uses pg_isready) cause the server to log the following
error:

    FATAL:  database "ALL" does not exist

To eliminate this error, this change simplifies the pg_isready call.
2023-03-20 22:51:05 +00:00
dave@tiredofit.ca
366c4759a5 Release 3.7.6 - See CHANGELOG.md 2023-03-14 16:10:11 -07:00
Dave Conroy
37f255ec99 Merge pull request #207 from kamartem/patch-1
Typo correction
2023-03-14 16:09:08 -07:00
Dave Conroy
efa9a678c0 Merge pull request #209 from ToshY/bug/208-mysql-extra-opts-status-check
Removed EXTRA_OPTS in MySQL status check
2023-03-14 16:08:53 -07:00
ToshY
68747a4aff Removed EXTRA_OPTS in MySQL status check 2023-03-14 20:28:05 +01:00
Artem Kamyshansky
cf736278bb Typo correction 2023-03-12 19:31:04 +03:00
dave@tiredofit.ca
1659e34fc7 Release 3.7.5 - See CHANGELOG.md 2023-03-02 07:39:58 -08:00
dave@tiredofit.ca
a8df7a2c75 Release 3.7.4 - See CHANGELOG.md 2023-02-22 08:36:46 -08:00
Dave Conroy
b5194dcce9 Merge pull request #203 from gbe0/issue/201
Fix issue #201 - 99-run_forever exec format error
2023-02-22 08:35:55 -08:00
Chris
6fb947684a fix issue #201 - 99-run_forever exec format error 2023-02-23 00:26:54 +08:00
Dave Conroy
9287f4efeb Update README.md 2023-01-30 11:47:15 -08:00
Dave Conroy
eeb5b5a119 Update README.md 2023-01-30 09:58:44 -08:00
Dave Conroy
a83dfd1a0b Update Workflows 2023-01-29 18:13:20 -08:00
Dave Conroy
8fb379b51a Update workflows 2023-01-29 16:04:15 -08:00
dave@tiredofit.ca
a90e52091d Release 3.7.3 - See CHANGELOG.md 2022-12-20 06:47:24 -08:00
Dave Conroy
ac58b5cdf6 Merge branch 'main' of https://github.com/tiredofit/docker-db-backup 2022-12-20 06:46:46 -08:00
Dave Conroy
fcbe771793 Merge pull request #194 from alwynpan/feature/#193
#193 Make S3_KEY_ID and S3_KEY_SECRET optional for S3 Backup
2022-12-20 06:46:40 -08:00
Yao (Alwyn) Pan
168982ab53 Make S3_KEY_ID and S3_KEY_SECRET optional for S3 Backup 2022-12-20 17:51:56 +11:00
Dave Conroy
e377fcb6ae Fix spelling mistake in Archive notice 2022-12-19 12:35:33 -08:00
dave@tiredofit.ca
50f27233a9 Release 3.7.2 - See CHANGELOG.md 2022-12-19 12:33:07 -08:00
dave@tiredofit.ca
7ccbf23af6 Release 3.7.1 - See CHANGELOG.md 2022-12-19 08:21:35 -08:00
dave@tiredofit.ca
0921971aa3 Release 3.7.0 - See CHANGELOG.md 2022-12-16 14:02:35 -08:00
dave@tiredofit.ca
fd3b9c5fa0 Release 3.6.1 - See CHANGELOG.md 2022-11-23 07:44:28 -08:00
9 changed files with 626 additions and 505 deletions

View File

@@ -1,111 +1,14 @@
### Application Level Image CI name: "build_image"
### Dave Conroy <dave at tiredofit dot ca>
name: 'build'
on: on:
push: push:
paths: paths:
- '**' - "**"
- '!README.md' - "!README.md"
jobs: jobs:
docker: build:
runs-on: ubuntu-latest uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
steps: #uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
- name: Checkout #uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
uses: actions/checkout@v3 secrets: inherit
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"
else
BRANCH=${GITHUB_REF#refs/heads/}
fi
case ${BRANCH} in
"main" | "master" )
BRANCHTAG="${DOCKER_IMAGE}:latest"
;;
"develop" )
BRANCHTAG="${DOCKER_IMAGE}:develop"
;;
* )
if [ -n "${{ secrets.LATEST }}" ] ; then
if [ "${BRANCHTAG}" = "${{ secrets.LATEST }}" ]; then
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest,${DOCKER_IMAGE}:latest"
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
;;
esac
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
GITTAG="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed 's|refs/tags/||g')"
fi
if [ -n "${BRANCHTAG}" ] && [ -n "${GITTAG}" ]; then
TAGS=${BRANCHTAG},${GITTAG}
else
TAGS="${BRANCHTAG}${GITTAG}"
fi
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Label
id: Label
run: |
if [ -f "Dockerfile" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_repository=\"https://github.com/${GITHUB_REPOSITORY}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_commit=\"${GITHUB_SHA}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
if [ -f "CHANGELOG.md" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.db-backup.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
mkdir -p install/assets/.changelogs ; cp CHANGELOG.md install/assets/.changelogs/${GITHUB_REPOSITORY/\//_}.md
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
fi
if [[ $GITHUB_REF == refs/heads/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_branch=\"${GITHUB_REF#refs/heads/}\"" Dockerfile
fi
fi
- name: Build
uses: docker/build-push-action@v3
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm/v7,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}

View File

@@ -1,6 +1,4 @@
# Manual Workflow (Application) name: "manual_build_image"
name: manual
on: on:
workflow_dispatch: workflow_dispatch:
@@ -8,104 +6,10 @@ on:
Manual Build: Manual Build:
description: 'Manual Build' description: 'Manual Build'
required: false required: false
jobs: jobs:
docker: build:
runs-on: ubuntu-latest uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
steps: #uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
- name: Checkout #uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
uses: actions/checkout@v3 secrets: inherit
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"
else
BRANCH=${GITHUB_REF#refs/heads/}
fi
case ${BRANCH} in
"main" | "master" )
BRANCHTAG="${DOCKER_IMAGE}:latest"
;;
"develop" )
BRANCHTAG="${DOCKER_IMAGE}:develop"
;;
* )
if [ -n "${{ secrets.LATEST }}" ] ; then
if [ "${BRANCHTAG}" = "${{ secrets.LATEST }}" ]; then
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest,${DOCKER_IMAGE}:latest"
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
;;
esac
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
GITTAG="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed 's|refs/tags/||g')"
fi
if [ -n "${BRANCHTAG}" ] && [ -n "${GITTAG}" ]; then
TAGS=${BRANCHTAG},${GITTAG}
else
TAGS="${BRANCHTAG}${GITTAG}"
fi
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Label
id: Label
run: |
if [ -f "Dockerfile" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_repository=\"https://github.com/${GITHUB_REPOSITORY}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_commit=\"${GITHUB_SHA}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
if [ -f "CHANGELOG.md" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.db-backup.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
mkdir -p install/assets/.changelogs ; cp CHANGELOG.md install/assets/.changelogs/${GITHUB_REPOSITORY/\//_}.md
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
fi
if [[ $GITHUB_REF == refs/heads/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_branch=\"${GITHUB_REF#refs/heads/}\"" Dockerfile
fi
fi
- name: Build
uses: docker/build-push-action@v3
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm/v7,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}

View File

@@ -1,3 +1,240 @@
## 3.11.1 2023-10-23 <dave at tiredofit dot ca>
### Added
- Switch to tiredofit/alpine:edge for newer packages
- Postgresql 16 support
### Changed
- Add --break-system-packages flag to pip when installing blobxfer
## 3.11.0 2023-10-12 <dave at tiredofit dot ca>
### Added
- Introduce aarch64 (ARMv8) support for Microsoft SQL Server backups
- Microsoft ODBC Driver 18.3.2.1-1
- Microsoft SQL Client 18.3.1.1-1
## 3.10.5 2023-10-11 <dave at tiredofit dot ca>
### Added
- Add option to drop exsiting data from MongoDB restore
### Changed
- Fix some capabilities of not being able to select mongodb manually to restore
## 3.10.4 2023-10-11 <thomas-negrault@github>
### Changed
- Use authentication database for MongoDB restores
## 3.10.3 2023-10-11 <thomas-negrault@github>
### Changed
- Change sorting for restore script
## 3.10.2 2023-09-14 <pimjansen@github>
### Changed
- Update to wording when sending files to blobxfer
- Remove --databases flag when backing up a single mysql/mariadb backup which allows to omit the "USE <db_name>" statement in the backup allowing for better restores
## 3.10.1 2023-09-13 <dave at tiredofit dot ca>
### Changed
- Bugfix to 3.10.0 with syntax error revolving around unbraced variable
## 3.10.0 2023-09-13 <jcdirks@github>
### Added
- Add EXTRA_DUMP_OPTS and EXTRA_ENUMERATION_OPTS to add different arguments when checking for databases, vs doing the actual backup
## 3.9.12 2023-09-04 <dave at tiredofit dot ca>
### Changed
- Perform additional checks for ENABLE_CHECKSUM=FALSE and skip executing actions for S3/BlobXfer
## 3.9.11 2023-08-24 <dave at tiredofit dot ca>
### Changed
- AWS CLI 2.13.9
## 3.9.10 2023-08-23 <dave at tiredofit dot ca>
### Changed
- Stop trying to move a non existent checksum file when ENABLE_CHECKSUM=FALSE
## 3.9.9 2023-08-21 <dave at tiredofit dot ca>
### Changed
- Start compiling aws-cli instead of from packages to continue to support arm/v7
## 3.9.8 2023-08-20 <ToshY@github>
### Changed
- Restore armv7 and aarch64 builds
## 3.9.7 2023-07-18 <dave at tiredofit dot ca>
### Changed
- Cleanup check_exit_code parameter and reduce duplicate output
## 3.9.6 2023-06-16 <dave at tiredofit dot ca>
### Changed
- Resolve issues introduced with 3.9.3. Split exit codes to be specific for backing up and moving. Uses paremter $11 for post backup scripts
## 3.9.5 2023-06-13 <dave at tiredofit dot ca>
### Changed
- Start building Influx DB v1 manually due to being removed from Alpine repositories
## 3.9.4 2023-06-13 <dave at tiredofit dot ca>
### Added
- Add abliity to use --rsyncable argument to zstd archives
## 3.9.3 2023-06-05 <dave at tiredofit dot ca>
### Added
- Add notification if blobxfer/s3 upload fails (credit @alwynpan)
- Add zip package
## 3.9.2 2023-05-10 <dave at tiredofit dot ca>
### Changed
- Alpine 3.18 base
## 3.9.1 2023-05-03 <dave at tiredofit dot ca>
### Changed
- Properly allow multiple _FILE environment variables to execute solving an issue with MySQL backups
- Fix _FILE functionality for DB_NAME variable
## 3.9.0 2023-04-26 <dave at tiredofit dot ca>
### Added
- Add support for _FILE environment variables
## 3.8.5 2023-04-11 <tpansino@github>
### Changed
- Fix SQLite3, Influx, and MSSQL backups failing due to malformed/non existent ltarget
## 3.8.4 2023-04-06 <dave at tiredofit dot ca>
### Changed
- Fix issue with Influx2 and MSSQL clients not installing properly
## 3.8.3 2023-03-30 <dave at tiredofit dot ca>
### Changed
- Patchup for 3.8.2
## 3.8.2 2023-03-30 <dave at tiredofit dot ca>
### Changed
- Patchup for 3.8.1
## 3.8.1 2023-03-30 <dave at tiredofit dot ca>
### Changed
- Cleanup Dockerfile
- Fix issue with DB_ARCHIVE_TIME not firing correctly
## 3.8.0 2023-03-27 <dave at tiredofit dot ca>
### Added
- Introduce DB_DUMP_TARGET_ARCHIVE which works with DB_ARCHIVE_TIME to move backups older than (x) minutes from DB_DUMP_TARGET to DB_DUMP_TARGET_ARCHIVE for use with external backup systems and custom exclude rules
- Introduce CREATE_LATEST_SYMLINK which creates a symbolic link in DB_DUMP_TARGET of `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)`
## 3.7.7 2023-03-20 <codemonium@github>
### Changed
- Simplify pg_isready usage
## 3.7.6 2023-03-14 <toshy@github>
### Changed
- Remove EXTRA_OPT variable from MySQL/MariaDB check
## 3.7.5 2023-03-02 <dave at tiredofit dot ca>
### Added
- Add support for Docker Swarm mode Secrets for BLOBXFER_STORAGE_ACCOUNT_*_FILE
## 3.7.4 2023-02-22 <gbe0@github>
### Changed
- Fix when running in MANUAL_RUN_FOREVER mode looping
## 3.7.3 2022-12-20 <dave at tiredofit dot ca>
### Changed
- Make S3_KEY_ID and S3_KEY_SECRET optional should IAM roles be used (Credit to alwynpan@github)
## 3.7.2 2022-12-19 <dave at tiredofit dot ca>
### Changed
- Bugfix for 3.7.1
## 3.7.1 2022-12-19 <dave at tiredofit dot ca>
### Changed
- Add MYSQL_ENABLE_TLS environment variable to switch on and off
### Reverted
- Set default for MYSQL_TLS_CA_FILE to accomodate for most use cases
## 3.7.0 2022-12-16 <dave at tiredofit dot ca>
### Added
- Introduce support for connecting via TLS to MySQL / MariaDB Hosts with MYSQL_TLS_* variables - See README for more details
### Changed
- Fix for cleaning up filesystems that are syncing to Azure via blobxfer
## 3.6.1 2022-11-23 <dave at tiredofit dot ca>
### Added
- Switch to Alpine 3.17 base
- Switch to OpenSSL instead of LibreSSL
## 3.6.0 2022-11-21 <dave at tiredofit dot ca> ## 3.6.0 2022-11-21 <dave at tiredofit dot ca>
### Added ### Added

View File

@@ -1,158 +1,83 @@
FROM docker.io/tiredofit/alpine:3.16 ARG DISTRO=alpine
ARG DISTRO_VARIANT=edge
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
LABEL maintainer="Dave Conroy (github.com/tiredofit)" LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables ### Set Environment Variables
ENV INFLUX2_VERSION=2.4.0 \ ENV INFLUX_VERSION=1.8.0 \
MSSQL_VERSION=18.0.1.1-1 \ INFLUX2_VERSION=2.4.0 \
MSODBC_VERSION=18.3.2.1-1 \
MSSQL_VERSION=18.3.1.1-1 \
AWS_CLI_VERSION=1.25.97 \
CONTAINER_ENABLE_MESSAGING=FALSE \ CONTAINER_ENABLE_MESSAGING=FALSE \
CONTAINER_ENABLE_MONITORING=TRUE \ CONTAINER_ENABLE_MONITORING=TRUE \
CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \ CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \
IMAGE_NAME="tiredofit/db-backup" \ IMAGE_NAME="tiredofit/db-backup" \
IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/" IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/"
ENV LANG=en_US.utf8 \
PG_MAJOR=15 \
PG_VERSION=15.1 \
PGDATA=/var/lib/postgresql/data
### Create User Accounts
RUN set -ex && \
addgroup -g 70 postgres && \
adduser -S -D -H -h /var/lib/postgresql -s /bin/sh -G postgres -u 70 postgres && \
mkdir -p /var/lib/postgresql && \
chown -R postgres:postgres /var/lib/postgresql && \
\
### Install Dependencies
apk update && \
apk upgrade && \
apk add \
openssl \
&& \
\
apk add --no-cache --virtual .postgres-build-deps \
bison \
build-base \
coreutils \
dpkg-dev \
dpkg \
flex \
gcc \
icu-dev \
libc-dev \
libedit-dev \
libxml2-dev \
libxslt-dev \
linux-headers \
make \
openssl-dev \
perl-utils \
perl-ipc-run \
util-linux-dev \
wget \
zlib-dev \
&& \
\
### Build Postgresql
mkdir -p /usr/src/postgresql && \
curl -sSL "https://ftp.postgresql.org/pub/source/v$PG_VERSION/postgresql-$PG_VERSION.tar.bz2" | tar xvfj - --strip 1 -C /usr/src/postgresql && \
cd /usr/src/postgresql && \
# update "DEFAULT_PGSOCKET_DIR" to "/var/run/postgresql" (matching Debian)
# see https://anonscm.debian.org/git/pkg-postgresql/postgresql.git/tree/debian/patches/51-default-sockets-in-var.patch?id=8b539fcb3e093a521c095e70bdfa76887217b89f
awk '$1 == "#define" && $2 == "DEFAULT_PGSOCKET_DIR" && $3 == "\"/tmp\"" { $3 = "\"/var/run/postgresql\""; print; next } { print }' src/include/pg_config_manual.h > src/include/pg_config_manual.h.new && \
grep '/var/run/postgresql' src/include/pg_config_manual.h.new && \
mv src/include/pg_config_manual.h.new src/include/pg_config_manual.h && \
gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)" && \
# explicitly update autoconf config.guess and config.sub so they support more arches/libcs
wget --inet4-only -O config/config.guess 'https://git.savannah.gnu.org/cgit/config.git/plain/config.guess?id=7d3d27baf8107b630586c962c057e22149653deb' && \
wget --inet4-only -O config/config.sub 'https://git.savannah.gnu.org/cgit/config.git/plain/config.sub?id=7d3d27baf8107b630586c962c057e22149653deb' && \
./configure \
--build="$gnuArch" \
--enable-integer-datetimes \
--enable-thread-safety \
--enable-tap-tests \
--disable-rpath \
--with-uuid=e2fs \
--with-gnu-ld \
--with-pgport=5432 \
--with-system-tzdata=/usr/share/zoneinfo \
--prefix=/usr/local \
--with-includes=/usr/local/include \
--with-libraries=/usr/local/lib \
--with-openssl \
--with-libxml \
--with-libxslt \
--with-icu \
&& \
\
make -j "$(nproc)" world && \
make install-world && \
make -C contrib install && \
runDeps="$( \
scanelf --needed --nobanner --format '%n#p' --recursive /usr/local \
| tr ',' '\n' \
| sort -u \
| awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \
)" && \
apk add -t .postgres-additional-deps \
$runDeps \
&& \
\
### Cleanup
apk del .postgres-build-deps && \
cd / && \
rm -rf \
/usr/src/postgresql \
/usr/local/share/doc \
/usr/local/share/man && \
find /usr/local -name '*.a' -delete && \
rm -rf /var/cache/apk/* && \
\
### Dependencies ### Dependencies
RUN source /assets/functions/00-container && \
set -ex && \ set -ex && \
apk update && \ package update && \
apk upgrade && \ package upgrade && \
apk add -t .db-backup-build-deps \ package install .db-backup-build-deps \
build-base \ build-base \
bzip2-dev \ bzip2-dev \
cargo \
git \ git \
go \
libarchive-dev \ libarchive-dev \
libressl-dev \ openssl-dev \
libffi-dev \ libffi-dev \
python3-dev \ python3-dev \
py3-pip \ py3-pip \
xz-dev \ xz-dev \
&& \ && \
\ \
apk add --no-cache -t .db-backup-run-deps \ package install .db-backup-run-deps \
aws-cli \
bzip2 \ bzip2 \
influxdb \ groff \
libarchive \ libarchive \
mariadb-client \ mariadb-client \
mariadb-connector-c \ mariadb-connector-c \
mongodb-tools \ mongodb-tools \
libressl \ openssl \
pigz \ pigz \
#postgresql \ postgresql16 \
#postgresql-client \ postgresql16-client \
pv \ pv \
py3-botocore \
py3-colorama \
py3-cryptography \ py3-cryptography \
py3-docutils \
py3-jmespath \
py3-rsa \
py3-setuptools \
py3-s3transfer \
py3-yaml \
python3 \
redis \ redis \
sqlite \ sqlite \
xz \ xz \
zip \
zstd \ zstd \
&& \ && \
\ \
apkArch="$(apk --print-arch)"; \ apkArch="$(uname -m)"; \
case "$apkArch" in \ case "$apkArch" in \
x86_64) mssql=true ; influx2=true ; influx_arch=amd64; ;; \ x86_64) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \
aarch64 ) influx2=true ; influx_arch=arm64 ;; \ arm64 ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=arm64 ;; \
*) sleep 0.1 ;; \ *) sleep 0.1 ;; \
esac; \ esac; \
\ \
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/msodbcsql18_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/mssql-tools18_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSSQL_VERSION}_amd64.apk mssql-tools18_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \ if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; ls -l ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \ if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
\ clone_git_repo https://github.com/aws/aws-cli "${AWS_CLI_VERSION}" && \
python3 setup.py install --prefix=/usr && \
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX_VERSION}" && \
go build -o /usr/sbin/influxd ./cmd/influxd && \
strip /usr/sbin/influxd && \
mkdir -p /usr/src/pbzip2 && \ mkdir -p /usr/src/pbzip2 && \
curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \ curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
cd /usr/src/pbzip2 && \ cd /usr/src/pbzip2 && \
@@ -169,14 +94,17 @@ RUN set -ex && \
make && \ make && \
make install && \ make install && \
\ \
pip3 install blobxfer && \ pip3 install --break-system-packages blobxfer && \
\ \
### Cleanup package remove .db-backup-build-deps && \
apk del .db-backup-build-deps && \ package cleanup && \
rm -rf /usr/src/* && \ rm -rf \
rm -rf /*.apk && \ /*.apk \
rm -rf /etc/logrotate.d/redis && \ /etc/logrotate.d/* \
rm -rf /root/.cache /tmp/* /var/cache/apk/* /root/.cache \
/root/go \
/tmp/* \
/usr/src/*
### S6 Setup
COPY install / COPY install /

165
README.md
View File

@@ -1,7 +1,7 @@
# github.com/tiredofit/docker-db-backup # github.com/tiredofit/docker-db-backup
[![GitHub release](https://img.shields.io/github/v/tag/tiredofit/docker-db-backup?style=flat-square)](https://github.com/tiredofit/docker-db-backup/releases/latest) [![GitHub release](https://img.shields.io/github/v/tag/tiredofit/docker-db-backup?style=flat-square)](https://github.com/tiredofit/docker-db-backup/releases/latest)
[![Build Status](https://img.shields.io/github/workflow/status/tiredofit/docker-db-backup/build?style=flat-square)](https://github.com/tiredofit/docker-db-backup/actions?query=workflow%3Abuild) [![Build Status](https://img.shields.io/github/actions/workflow/status/tiredofit/docker-db-backup/main.yml?branch=main&style=flat-square)](https://github.com/tiredofit/docker-db-backup/actions)
[![Docker Stars](https://img.shields.io/docker/stars/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/) [![Docker Stars](https://img.shields.io/docker/stars/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/)
[![Docker Pulls](https://img.shields.io/docker/pulls/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/) [![Docker Pulls](https://img.shields.io/docker/pulls/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/)
[![Become a sponsor](https://img.shields.io/badge/sponsor-tiredofit-181717.svg?logo=github&style=flat-square)](https://github.com/sponsors/tiredofit) [![Become a sponsor](https://img.shields.io/badge/sponsor-tiredofit-181717.svg?logo=github&style=flat-square)](https://github.com/sponsors/tiredofit)
@@ -12,7 +12,7 @@
This will build a container for backing up multiple types of DB Servers This will build a container for backing up multiple types of DB Servers
Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers. Currently backs up CouchDB, InfluxDB, MySQL, Microsoft SQL, MongoDB, Postgres, Redis servers.
* dump to local filesystem or backup to S3 Compatible services, and Azure. * dump to local filesystem or backup to S3 Compatible services, and Azure.
* select database user and password * select database user and password
@@ -53,6 +53,7 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
- [Scheduling Options](#scheduling-options) - [Scheduling Options](#scheduling-options)
- [Backup Options](#backup-options) - [Backup Options](#backup-options)
- [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services) - [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services)
- [Upload to a Azure storage account by `blobxfer`](#upload-to-a-azure-storage-account-by-blobxfer)
- [Maintenance](#maintenance) - [Maintenance](#maintenance)
- [Shell Access](#shell-access) - [Shell Access](#shell-access)
- [Manual Backups](#manual-backups) - [Manual Backups](#manual-backups)
@@ -79,7 +80,13 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
Clone this repository and build the image with `docker build <arguments> (imagename) .` Clone this repository and build the image with `docker build <arguments> (imagename) .`
### Prebuilt Images ### Prebuilt Images
Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup) and is the recommended method of installation. Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup)
Builds of the image are also available on the [Github Container Registry](https://github.com/tiredofit/docker-db-backup/pkgs/container/docker-db-backup)
```
docker pull ghcr.io/tiredofit/docker-db-backup:(imagetag)
```
The following image tags are available along with their tagged release based on what's written in the [Changelog](CHANGELOG.md): The following image tags are available along with their tagged release based on what's written in the [Changelog](CHANGELOG.md):
@@ -88,7 +95,7 @@ The following image tags are available along with their tagged release based on
| latest | `:latest` | | latest | `:latest` |
```bash ```bash
docker pull tiredofit/db-backup:(imagetag) docker pull docker.io/tiredofdit/db-backup:(imagetag)
``` ```
#### Multi Architecture #### Multi Architecture
Images are built primarily for `amd64` architecture, and may also include builds for `arm/v7`, `arm64` and others. These variants are all unsupported. Consider [sponsoring](https://github.com/sponsors/tiredofit) my work so that I can work with various hardware. To see if this image supports multiple architecures, type `docker manifest (image):(tag)` Images are built primarily for `amd64` architecture, and may also include builds for `arm/v7`, `arm64` and others. These variants are all unsupported. Consider [sponsoring](https://github.com/sponsors/tiredofit) my work so that I can work with various hardware. To see if this image supports multiple architecures, type `docker manifest (image):(tag)`
@@ -116,7 +123,7 @@ The following directories are used for configuration and can be mapped for persi
#### Base Images used #### Base Images used
This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) or [Debian Linux](https://hub.docker.com/r/tiredofit/debian) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`,`vim`. This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`.
Be sure to view the following repositories to understand all the customizable options: Be sure to view the following repositories to understand all the customizable options:
@@ -126,62 +133,72 @@ Be sure to view the following repositories to understand all the customizable op
#### Container Options #### Container Options
| Parameter | Description | Default | | Parameter | Description | Default |
| -------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- | | ----------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- |
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` | | `BACKUP_LOCATION` | Backup to `FILESYSTEM`, `blobxfer` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` | | `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` |
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` | | `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` | | `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` | | `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` |
| `PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | | | `CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
| `POST_SCRIPT` | Fill this variable in with a command to execute post backing up | | | `PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | |
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` | | `POST_SCRIPT` | Fill this variable in with a command to execute post backing up | |
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` |
### Database Specific Options ### Database Specific Options
| Parameter | Description | Default | | Parameter | Description | Default | `_FILE` |
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------- | | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- | ------- |
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | | `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | | `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `mssql` `pgsql` `mongo` `redis` `sqlite3` | | |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | | `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | x |
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | | | `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | | x |
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | | | `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | | x |
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | | `DB_USER` | username for the database(s) - Can use `root` for MySQL | | x |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | | | `DB_PASS` | (optional if DB doesn't require it) password for the database | | x |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | | `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | x |
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | | | `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | | |
| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | | `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | x |
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can overrde them by making your own entries | | | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can overrde them by making your own entries | | |
#### For Influx DB2: #### For Influx DB2:
Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name` Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name`
### Scheduling Options ### Scheduling Options
| Parameter | Description | Default | | Parameter | Description | Default |
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | | ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- |
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` | | `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | | | `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
| | Absolute HHMM, e.g. `2330` or `0415` | | | | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | | | | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `/backup` | | `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `${DB_DUMP_TARGET}/archive/` |
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` | | `DB_DUMP_TARGET_ARCHIVE` | Optional Directory where the database dumps archives are kept. | |
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
| `DB_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from `DB_DUMP_TARGET` to `DB_DUMP_TARGET_ARCHIVE` - which is useful when pairing against an external backup system. | |
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time. - You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
### Backup Options ### Backup Options
| Parameter | Description | Default | | Parameter | Description | Default | `_FILE` |
| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | -------------- | |--------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------|---------|
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | | `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | | `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | |
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | | `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | |
| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` | | `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` | |
| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` | | `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` | |
| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` | | `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` | |
| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` | | `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` | |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | | `EXTRA_OPTS` | If you need to pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | | `EXTRA_DUMP_OPTS` | If you need to pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` | | `EXTRA_ENUMERATION_OPTS` | If you need to pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` | | `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | |
| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` | |
| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` | |
| `MYSQL_ENABLE_TLS` | Enable TLS functionality for MySQL client | `FALSE` | |
| `MYSQL_TLS_VERIFY` | (optional) If using TLS (by means of MYSQL_TLS_* variables) verify remote host | `FALSE` | |
| `MYSQL_TLS_VERSION` | What TLS `v1.1` `v1.2` `v1.3` version to utilize | `TLSv1.1,TLSv1.2,TLSv1.3` | |
| `MYSQL_TLS_CA_FILE` | Filename to load custom CA certificate for connecting via TLS | `/etc/ssl/cert.pem` | x |
| `MYSQL_TLS_CERT_FILE` | Filename to load client certificate for connecting via TLS | | x |
| `MYSQL_TLS_KEY_FILE` | Filename to load client key for connecting via TLS | | x |
- When using compression with MongoDB, only `GZ` compression is possible. - When using compression with MongoDB, only `GZ` compression is possible.
@@ -189,32 +206,33 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
If `BACKUP_LOCATION` = `S3` then the following options are used. If `BACKUP_LOCATION` = `S3` then the following options are used.
| Parameter | Description | Default | | Parameter | Description | Default | `_FILE` |
|-----------------------|------------------------------------------------------------------------------------------|---------| | --------------------- | ----------------------------------------------------------------------------------------- | ------- | ------- |
| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | | `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | x |
| `S3_KEY_ID` | S3 Key ID | | | `S3_KEY_ID` | S3 Key ID (Optional) | | x |
| `S3_KEY_SECRET` | S3 Key Secret | | | `S3_KEY_SECRET` | S3 Key Secret (Optional) | | x |
| `S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | | | `S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | | x |
| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | | `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | x |
| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | | `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | x |
| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` | | `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` | x |
| `S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | | | `S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | | x |
| `S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | | | `S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | | x |
| _*OR*_ | | | | _*OR*_ | | | |
| `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | | `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | |
- When `S3_KEY_ID` and/or `S3_KEY_SECRET` is not set, will try to use IAM role assigned (if any) for uploading the backup files to S3 bucket.
#### Upload to a Azure storage account by `blobxfer` #### Upload to a Azure storage account by `blobxfer`
Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure fileshare storage. Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure fileshare storage.
If `BACKUP_LOCATION` = `blobxfer` then the following options are used. If `BACKUP_LOCATION` = `blobxfer` then the following options are used.
| Parameter | Description | Default | | Parameter | Description | Default | `_FILE` |
| ------------------------------- | ------------------------------------------------------------------------ | -------------------- | | ------------------------------ | ------------------------------------------- | ------------------- | ------- |
| `BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | | `BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | | `BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | | `BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
> This service uploads files from backup targed directory `DB_DUMP_TARGET`. > This service uploads files from backup targed directory `DB_DUMP_TARGET`.
> If the a cleanup configuration in `DB_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically. > If the a cleanup configuration in `DB_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
@@ -231,7 +249,7 @@ docker exec -it (whatever your container name is) bash
### Manual Backups ### Manual Backups
Manual Backups can be performed by entering the container and typing `backup-now` Manual Backups can be performed by entering the container and typing `backup-now`
- Recently there was a request to have the container work with Kukbernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`. - Recently there was a request to have the container work with Kubernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`.
### Restoring Databases ### Restoring Databases
Entering in the container and executing `restore` will execute a menu based script to restore your backups - MariaDB, Postgres, and Mongo supported. Entering in the container and executing `restore` will execute a menu based script to restore your backups - MariaDB, Postgres, and Mongo supported.
@@ -307,17 +325,18 @@ $ cat post-script.sh
# #### $8=BACKUP FILENAME (Filename) # #### $8=BACKUP FILENAME (Filename)
# #### $9=BACKUP FILESIZE # #### $9=BACKUP FILESIZE
# #### $10=HASH (If CHECKSUM enabled) # #### $10=HASH (If CHECKSUM enabled)
# #### $11=MOVE_EXIT_CODE
echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a duration of ${7} seconds. Filename: ${8} Size: ${9} bytes MD5: ${10}" echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a duration of ${7} seconds. Filename: ${8} Size: ${9} bytes MD5: ${10}"
```` ````
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE ## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_timme}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" ${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_timme}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" "${move_exit_code}
Outputs the following on the console: Outputs the following on the console:
`0 mysql Backup Completed on example-db for example on 1647370800 ending 1647370920 for a duration of 120 seconds. Filename: mysql_example_example-db_202200315-000000.sql.bz2 Size: 7795 bytes Hash: 952fbaafa30437494fdf3989a662cd40` `0 mysql Backup Completed on example-db for example on 1647370800 ending 1647370920 for a duration of 120 seconds. Filename: mysql_example_example-db_202200315-000000.sql.bz2 Size: 7795 bytes Hash: 952fbaafa30437494fdf3989a662cd40 0`
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes` If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`
@@ -329,7 +348,7 @@ If for some reason your filesystem or host is not detecting it right, use the en
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community. These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.
### Usage ### Usage
- The [Discussions board](../../discussions) is a great place for working with the community on tips and tricks of using this image. - The [Discussions board](../../discussions) is a great place for working with the community on tips and tricks of using this image.
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) personalized support. - Consider [sponsoring me](https://github.com/sponsors/tiredofit) for personalized support
### Bugfixes ### Bugfixes
- Please, submit a [Bug Report](issues/new) if something isn't working as expected. I'll do my best to issue a fix in short order. - Please, submit a [Bug Report](issues/new) if something isn't working as expected. I'll do my best to issue a fix in short order.

View File

@@ -31,6 +31,8 @@ services:
- ./backups:/backup - ./backups:/backup
#- ./post-script.sh:/assets/custom-scripts/post-script.sh #- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment: environment:
- TIMEZONE=America/Vancouver
- CONTAINER_ENABLE_MONITORING=FALSE
# - DEBUG_MODE=TRUE # - DEBUG_MODE=TRUE
- DB_TYPE=mariadb - DB_TYPE=mariadb
- DB_HOST=example-db-host - DB_HOST=example-db-host
@@ -43,7 +45,7 @@ services:
- CHECKSUM=SHA1 - CHECKSUM=SHA1
- COMPRESSION=GZ - COMPRESSION=GZ
- SPLIT_DB=FALSE - SPLIT_DB=FALSE
- CONTAINER_ENABLE_MONITORING=FALSE
restart: always restart: always
networks: networks:
- example-db-network - example-db-network

View File

@@ -5,16 +5,22 @@ BLOBXFER_REMOTE_PATH=${BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
CHECKSUM=${CHECKSUM:-"MD5"} CHECKSUM=${CHECKSUM:-"MD5"}
COMPRESSION=${COMPRESSION:-"ZSTD"} COMPRESSION=${COMPRESSION:-"ZSTD"}
COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"} COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
CREATE_LATEST_SYMLINK=${CREATE_LATEST_SYMLINK:-"TRUE"}
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0} DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440} DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"} DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"}
DB_DUMP_TARGET_ARCHIVE=${DB_DUMP_TARGET_ARCHIVE:-"${DB_DUMP_TARGET}/archive/"}
ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"} ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"}
ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION:-"TRUE"} ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"} MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"}
MODE=${MODE:-"AUTO"} MODE=${MODE:-"AUTO"}
MYSQL_ENABLE_TLS=${MYSQL_ENABLE_TLS:-"FALSE"}
MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"} MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"}
MYSQL_SINGLE_TRANSACTION=${MYSQL_SINGLE_TRANSACTION:-"TRUE"} MYSQL_SINGLE_TRANSACTION=${MYSQL_SINGLE_TRANSACTION:-"TRUE"}
MYSQL_STORED_PROCEDURES=${MYSQL_STORED_PROCEDURES:-"TRUE"} MYSQL_STORED_PROCEDURES=${MYSQL_STORED_PROCEDURES:-"TRUE"}
MYSQL_TLS_CA_FILE=${MYSQL_TLS_CA_FILE:-"/etc/ssl/cert.pem"}
MYSQL_TLS_VERIFY=${MYSQL_TLS_VERIFY:-"FALSE"}
MYSQL_TLS_VERSION=${MYSQL_TLS_VERSION:-"TLSv1.1,TLSv1.2,TLSv1.3"}
PARALLEL_COMPRESSION_THREADS=${PARALLEL_COMPRESSION_THREADS:-"$(nproc)"} PARALLEL_COMPRESSION_THREADS=${PARALLEL_COMPRESSION_THREADS:-"$(nproc)"}
S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"} S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"}
S3_PROTOCOL=${S3_PROTOCOL:-"https"} S3_PROTOCOL=${S3_PROTOCOL:-"https"}

View File

@@ -2,12 +2,19 @@
bootstrap_variables() { bootstrap_variables() {
sanity_var DB_TYPE "Set appropriate DB_TYPE" sanity_var DB_TYPE "Set appropriate DB_TYPE"
transform_file_var \
DB_HOST \
DB_NAME \
DB_PORT \
DB_USER \
DB_PASS
case "${DB_TYPE,,}" in case "${DB_TYPE,,}" in
couch* ) couch* )
dbtype=couch dbtype=couch
DB_PORT=${DB_PORT:-5984} DB_PORT=${DB_PORT:-5984}
file_env 'DB_USER' sanity_var DB_USER
file_env 'DB_PASS' sanity_var DB_PASS
;; ;;
influx* ) influx* )
dbtype=influx dbtype=influx
@@ -15,40 +22,59 @@ bootstrap_variables() {
1) DB_PORT=${DB_PORT:-8088} ;; 1) DB_PORT=${DB_PORT:-8088} ;;
2) DB_PORT=${DB_PORT:-8086} ;; 2) DB_PORT=${DB_PORT:-8086} ;;
esac esac
file_env 'DB_USER' sanity_var DB_USER
file_env 'DB_PASS' sanity_var DB_PASS
sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'" sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'"
;; ;;
mongo* ) mongo* )
dbtype=mongo dbtype=mongo
transform_file_var MONGO_CUSTOM_URI
if [ -n "${MONGO_CUSTOM_URI}" ] ; then if [ -n "${MONGO_CUSTOM_URI}" ] ; then
mongo_uri_proto=$(echo ${MONGO_CUSTOM_URI} | grep :// | sed -e's,^\(.*://\).*,\1,g') mongo_uri_proto=$(echo "${MONGO_CUSTOM_URI}" | grep :// | sed -e's,^\(.*://\).*,\1,g')
mongo_uri_scratch="${MONGO_CUSTOM_URI/${mongo_uri_proto}/}" mongo_uri_scratch="${MONGO_CUSTOM_URI/${mongo_uri_proto}/}"
mongo_uri_username_password=$(echo ${mongo_uri_scratch} | grep @ | rev | cut -d@ -f2- | rev) mongo_uri_username_password="$(echo "${mongo_uri_scratch}" | grep @ | rev | cut -d@ -f2- | rev)"
if [ -n "${mongo_uri_username_password}" ]; then mongo_uri_scratch=$(echo ${mongo_uri_scratch} | rev | cut -d@ -f1 | rev) ; fi if [ -n "${mongo_uri_username_password}" ]; then mongo_uri_scratch="$(echo "${mongo_uri_scratch}" | rev | cut -d@ -f1 | rev)" ; fi
mongo_uri_port=$(echo ${mongo_uri_scratch} | grep : | rev | cut -d: -f2- | rev) mongo_uri_port="$(echo "${mongo_uri_scratch}" | grep : | rev | cut -d: -f2- | rev)"
if [ -n "${mongo_uri_port}" ]; then mongo_uri_port=$(echo ${mongo_uri_scratch} | rev | cut -d: -f1 | cut -d/ -f2 | rev) ; fi if [ -n "${mongo_uri_port}" ]; then mongo_uri_port="$(echo "${mongo_uri_scratch}" | rev | cut -d: -f1 | cut -d/ -f2 | rev)" ; fi
mongo_uri_hostname=$(echo ${mongo_uri_scratch} | cut -d/ -f1 | cut -d: -f1 ) mongo_uri_hostname="$(echo "${mongo_uri_scratch}" | cut -d/ -f1 | cut -d: -f1 )"
mongo_uri_database=$(echo ${mongo_uri_scratch} | cut -d/ -f2 | cut -d? -f1 ) mongo_uri_database="$(echo "${mongo_uri_scratch}" | cut -d/ -f2 | cut -d? -f1 )"
mongo_uri_options=$(echo ${mongo_uri_scratch} | cut -d/ -f2 | cut -d? -f2 ) mongo_uri_options="$(echo "${mongo_uri_scratch}" | cut -d/ -f2 | cut -d? -f2 )"
DB_NAME=${DB_NAME:-"${mongo_uri_database,,}"} DB_NAME=${DB_NAME:-"${mongo_uri_database,,}"}
DB_HOST=${DB_HOST:-"${mongo_uri_hostname,,}"} DB_HOST=${DB_HOST:-"${mongo_uri_hostname,,}"}
else else
DB_PORT=${DB_PORT:-27017} DB_PORT=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER'
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER}" [[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS}" [[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS}"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME}" [[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME}"
transform_file_var DB_AUTH
[[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}" [[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}"
fi fi
;; ;;
"mysql" | "mariadb" ) "mysql" | "mariadb" )
dbtype=mysql dbtype=mysql
DB_PORT=${DB_PORT:-3306} DB_PORT=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS}
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
transform_file_var DB_PASS
if [ -n "${DB_PASS}" ] ; then export MYSQL_PWD=${DB_PASS} ; fi
if var_true "${MYSQL_ENABLE_TLS}" ; then
if [ -n "${MYSQL_TLS_CA_FILE}" ] ; then
mysql_tls_args="--ssl_ca=${MYSQL_TLS_CA_FILE}"
fi
if [ -n "${MYSQL_TLS_CERT_FILE}" ] ; then
mysql_tls_args="${mysql_tls_args} --ssl_cert=${MYSQL_TLS_CERT_FILE}"
fi
if [ -n "${MYSQL_TLS_KEY_FILE}" ] ; then
mysql_tls_args="${mysql_tls_args} --ssl_key=${MYSQL_TLS_KEY_FILE}"
fi
if var_true "${TLS_VERIFY}" ; then
mysql_tls_args="${mysql_tls_args} --sslverify-server-cert"
fi
if [ -n "${MYSQL_TLS_VERSION}" ] ; then
mysql_tls_args="${mysql_tls_args} --tls_version=${MYSQL_TLS_VERSION}"
fi
fi
;; ;;
"mssql" | "microsoftsql" ) "mssql" | "microsoftsql" )
apkArch="$(apk --print-arch)"; \ apkArch="$(apk --print-arch)"; \
@@ -62,14 +88,12 @@ bootstrap_variables() {
postgres* | "pgsql" ) postgres* | "pgsql" )
dbtype=pgsql dbtype=pgsql
DB_PORT=${DB_PORT:-5432} DB_PORT=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS}" [[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS}"
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;; ;;
"redis" ) "redis" )
dbtype=redis dbtype=redis
DB_PORT=${DB_PORT:-6379} DB_PORT=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS}" [[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS}"
;; ;;
sqlite* ) sqlite* )
@@ -82,22 +106,38 @@ bootstrap_variables() {
esac esac
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then
file_env 'S3_KEY_ID' transform_file_var \
file_env 'S3_KEY_SECRET' S3_BUCKET \
S3_KEY_ID \
S3_KEY_SECRET \
S3_PATH \
S3_REGION \
S3_HOST \
S3_PROTOCOL \
S3_EXTRA_OPTS \
S3_CERT_CA_FILE
fi
if [ "${BACKUP_LOCATION,,}" = "blobxfer" ] ; then
transform_file_var \
BLOBXFER_STORAGE_ACCOUNT \
BLOBXFER_STORAGE_KEY
fi fi
} }
backup_couch() { backup_couch() {
prepare_dbbackup prepare_dbbackup
target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt
ltarget=couch_${DB_NAME}_${DB_HOST#*//}
compression compression
pre_dbbackup ${DB_NAME} pre_dbbackup ${DB_NAME}
print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}" print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}"
curl -sSL -X GET ${DB_HOST}:${DB_PORT}/${DB_NAME}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > "${TEMP_LOCATION}"/"${target}" curl -sSL -X GET ${DB_HOST}:${DB_PORT}/${DB_NAME}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code backup $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
check_exit_code move $target
post_dbbackup ${DB_NAME} post_dbbackup ${DB_NAME}
} }
@@ -115,17 +155,20 @@ backup_influx() {
prepare_dbbackup prepare_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
target=influx_${db}_${DB_HOST#*//}_${now} target=influx_${db}_${DB_HOST#*//}_${now}
ltarget=influx_${db}_${DB_HOST#*//}
compression compression
pre_dbbackup $db pre_dbbackup $db
print_notice "Dumping Influx database: '${db}'" print_notice "Dumping Influx database: '${db}'"
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}" influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$? exit_code=$?
check_exit_code $target_dir check_exit_code backup $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}" print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}" tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension} target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension}
ltarget=influx_${db}_${DB_HOST#*//}
generate_checksum generate_checksum
move_dbbackup move_dbbackup
check_exit_code move $target_dir
post_dbbackup $db post_dbbackup $db
done done
;; ;;
@@ -134,16 +177,19 @@ backup_influx() {
prepare_dbbackup prepare_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
target=influx2_${db}_${DB_HOST#*//}_${now} target=influx2_${db}_${DB_HOST#*//}_${now}
ltarget=influx2_${db}_${DB_HOST#*//}
compression compression
pre_dbbackup $db pre_dbbackup $db
print_notice "Dumping Influx2 database: '${db}'" print_notice "Dumping Influx2 database: '${db}'"
influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}" influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$? exit_code=$?
check_exit_code $target_dir check_exit_code backup $target_dir
create_archive create_archive
target=influx2_${db}_${DB_HOST#*//}_${now}.tar${extension} target=influx2_${db}_${DB_HOST#*//}_${now}.tar${extension}
ltarget=influx2_${db}_${DB_HOST#*//}
generate_checksum generate_checksum
move_dbbackup move_dbbackup
check_exit_code move $target_dir
post_dbbackup $db post_dbbackup $db
done done
;; ;;
@@ -154,37 +200,42 @@ backup_mongo() {
prepare_dbbackup prepare_dbbackup
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive
ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,}
else else
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive.gz target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive.gz
ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,}
mongo_compression="--gzip" mongo_compression="--gzip"
compression_string="and compressing with gzip" compression_string="and compressing with gzip"
fi fi
if [ -n "${MONGO_CUSTOM_URI}" ] ; then if [ -n "${MONGO_CUSTOM_URI}" ] ; then
mongo_backup_parameter="--uri=${MONGO_CUSTOM_URI} ${EXTRA_OPTS}" mongo_backup_parameter="--uri=${MONGO_CUSTOM_URI} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS}"
else else
mongo_backup_parameter="--host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}" mongo_backup_parameter="--host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS}"
fi fi
pre_dbbackup "${DB_NAME}" pre_dbbackup "${DB_NAME}"
print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}" print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
silent mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} ${mongo_backup_parameter} silent mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} ${mongo_backup_parameter}
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code backup $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
check_exit_code move $target
post_dbbackup "${DB_NAME}" post_dbbackup "${DB_NAME}"
} }
backup_mssql() { backup_mssql() {
prepare_dbbackup prepare_dbbackup
target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak
ltarget=mssql_${DB_NAME,,}_${DB_HOST,,}
compression compression
pre_dbbackup "${DB_NAME}" pre_dbbackup "${DB_NAME}"
print_notice "Dumping MSSQL database: '${DB_NAME}'" print_notice "Dumping MSSQL database: '${DB_NAME}'"
silent /opt/mssql-tools18/bin/sqlcmd -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} -Q "BACKUP DATABASE [${DB_NAME}] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" silent /opt/mssql-tools18/bin/sqlcmd -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} -Q "BACKUP DATABASE [${DB_NAME}] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code backup $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
check_exit_code move $target
post_dbbackup $DB_NAME post_dbbackup $DB_NAME
} }
@@ -198,7 +249,7 @@ backup_mysql() {
if [ "${DB_NAME,,}" = "all" ] ; then if [ "${DB_NAME,,}" = "all" ] ; then
print_debug "Preparing to back up everything except for information_schema and _* prefixes" print_debug "Preparing to back up everything except for information_schema and _* prefixes"
db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema ) db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER ${mysql_tls_args} ${EXTRA_OPTS} ${EXTRA_ENUMERATION_OPTS} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n') db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do for db_exclude in ${db_names_exclusions} ; do
@@ -216,28 +267,32 @@ backup_mysql() {
for db in ${db_names} ; do for db in ${db_names} ; do
prepare_dbbackup prepare_dbbackup
target=mysql_${db}_${DB_HOST,,}_${now}.sql target=mysql_${db}_${DB_HOST,,}_${now}.sql
ltarget=mysql_${db}_${DB_HOST,,}
compression compression
pre_dbbackup $db pre_dbbackup $db
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}" print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}" mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code backup $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
check_exit_code move $target
post_dbbackup $db post_dbbackup $db
done done
else else
print_debug "Not splitting database dumps into their own files" print_debug "Not splitting database dumps into their own files"
prepare_dbbackup prepare_dbbackup
target=mysql_all_${DB_HOST,,}_${now}.sql target=mysql_all_${DB_HOST,,}_${now}.sql
ltarget=mysql_all_${DB_HOST,,}
compression compression
pre_dbbackup all pre_dbbackup all
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}" mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code backup $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
check_exit_code move $target
post_dbbackup all post_dbbackup all
fi fi
} }
@@ -265,20 +320,23 @@ backup_pgsql() {
for db in ${db_names} ; do for db in ${db_names} ; do
prepare_dbbackup prepare_dbbackup
target=pgsql_${db}_${DB_HOST,,}_${now}.sql target=pgsql_${db}_${DB_HOST,,}_${now}.sql
ltarget=pgsql_${db}_${DB_HOST,,}
compression compression
pre_dbbackup $db pre_dbbackup $db
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}" print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target} pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code backup $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
check_exit_code move $target
post_dbbackup $db post_dbbackup $db
done done
else else
print_debug "Not splitting database dumps into their own files" print_debug "Not splitting database dumps into their own files"
prepare_dbbackup prepare_dbbackup
target=pgsql_all_${DB_HOST,,}_${now}.sql target=pgsql_all_${DB_HOST,,}_${now}.sql
ltarget=pgsql_${db}_${DB_HOST,,}
compression compression
pre_dbbackup all pre_dbbackup all
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
@@ -290,11 +348,12 @@ backup_pgsql() {
for x_db_name in ${tmp_db_names} ; do for x_db_name in ${tmp_db_names} ; do
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name}) pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
done done
pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target} pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code backup $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
check_exit_code move $target
post_dbbackup all post_dbbackup all
fi fi
} }
@@ -303,7 +362,8 @@ backup_redis() {
prepare_dbbackup prepare_dbbackup
print_notice "Dumping Redis - Flushing Redis Cache First" print_notice "Dumping Redis - Flushing Redis Cache First"
target=redis_all_${DB_HOST,,}_${now}.rdb target=redis_all_${DB_HOST,,}_${now}.rdb
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS} ltarget=redis_${DB_HOST,,}
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS}
sleep 10 sleep 10
try=5 try=5
while [ $try -gt 0 ] ; do while [ $try -gt 0 ] ; do
@@ -322,9 +382,10 @@ backup_redis() {
compression compression
pre_dbbackup all pre_dbbackup all
$compress_cmd "${TEMP_LOCATION}/${target_original}" $compress_cmd "${TEMP_LOCATION}/${target_original}"
check_exit_code $target check_exit_code backup $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
check_exit_code move $target
post_dbbackup all post_dbbackup all
} }
@@ -333,15 +394,17 @@ backup_sqlite3() {
db=$(basename "${DB_HOST}") db=$(basename "${DB_HOST}")
db="${db%.*}" db="${db%.*}"
target=sqlite3_${db}_${now}.sqlite3 target=sqlite3_${db}_${now}.sqlite3
ltarget=sqlite3_${db}.sqlite3
compression compression
pre_dbbackup $db pre_dbbackup $db
print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}" print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}"
silent sqlite3 "${DB_HOST}" ".backup '${TEMP_LOCATION}/backup.sqlite3'" silent sqlite3 "${DB_HOST}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code backup $target
cat "${TEMP_LOCATION}"/backup.sqlite3 | ${dir_compress_cmd} > "${TEMP_LOCATION}/${target}" cat "${TEMP_LOCATION}"/backup.sqlite3 | ${dir_compress_cmd} > "${TEMP_LOCATION}/${target}"
generate_checksum generate_checksum
move_dbbackup move_dbbackup
check_exit_code move $target
post_dbbackup $db post_dbbackup $db
} }
@@ -396,8 +459,9 @@ check_availability() {
;; ;;
"mysql" ) "mysql" )
counter=0 counter=0
transform_file_var DB_PASS
export MYSQL_PWD=${DB_PASS} export MYSQL_PWD=${DB_PASS}
while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" status > /dev/null 2>&1) ; do while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" ${mysql_tls_args} status > /dev/null 2>&1) ; do
sleep 5 sleep 5
(( counter+=5 )) (( counter+=5 ))
print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)" print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)"
@@ -413,8 +477,7 @@ check_availability() {
;; ;;
"pgsql" ) "pgsql" )
counter=0 counter=0
export PGPASSWORD=${DB_PASS} until pg_isready --host=${DB_HOST} --port=${DB_PORT} -q
until pg_isready --dbname=${DB_NAME} --host=${DB_HOST} --port=${DB_PORT} --username=${DB_USER} -q
do do
sleep 5 sleep 5
(( counter+=5 )) (( counter+=5 ))
@@ -450,13 +513,28 @@ check_availability() {
check_exit_code() { check_exit_code() {
print_debug "DB Backup Exit Code is ${exit_code}" print_debug "DB Backup Exit Code is ${exit_code}"
case "${exit_code}" in case "${1}" in
0 ) backup )
print_info "DB Backup of '${1}' completed successfully" case "${exit_code}" in
;; 0 )
* ) print_info "DB Backup of '${2}' completed successfully"
print_error "DB Backup of '${1}' reported errors" ;;
master_exit_code=1 * )
print_error "DB Backup of '${2}' reported errors"
master_exit_code=1
;;
esac
;;
move )
case "${move_exit_code}" in
0 )
print_debug "Moving of backup '${2}' completed successfully"
;;
* )
print_error "Moving of backup '${2}' reported errors"
master_exit_code=1
;;
esac
;; ;;
esac esac
} }
@@ -465,14 +543,17 @@ cleanup_old_data() {
if [ -n "${DB_CLEANUP_TIME}" ]; then if [ -n "${DB_CLEANUP_TIME}" ]; then
if [ "${master_exit_code}" != 1 ]; then if [ "${master_exit_code}" != 1 ]; then
case "${BACKUP_LOCATION,,}" in case "${BACKUP_LOCATION,,}" in
"blobxfer" )
print_info "Cleaning up old backups on filesystem"
mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
print_info "Syncing changes via blobxfer"
silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET} --delete --delete-only
;;
"file" | "filesystem" ) "file" | "filesystem" )
print_info "Cleaning up old backups on filesystem" print_info "Cleaning up old backups on filesystem"
mkdir -p "${DB_DUMP_TARGET}" mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \; find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
if [ "${BACKUP_LOCATION,,}" = "blobxfer" ] ; then
print_info "Syncing changes via blobxfer"
silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET} --delete --delete-only
fi
;; ;;
"s3" | "minio" ) "s3" | "minio" )
print_info "Cleaning up old backups on S3 storage" print_info "Cleaning up old backups on S3 storage"
@@ -484,7 +565,7 @@ cleanup_old_data() {
s3_filename=$(echo $s3_file | awk {'print $4'}) s3_filename=$(echo $s3_file | awk {'print $4'})
if [ "$s3_filename" != "" ] ; then if [ "$s3_filename" != "" ] ; then
print_debug "Deleting $s3_filename" print_debug "Deleting $s3_filename"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi fi
fi fi
@@ -497,16 +578,18 @@ cleanup_old_data() {
fi fi
} }
compression() {
if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then
PARALLEL_COMPRESSION_THREADS=1
fi
case "${COMPRESSION,,}" in compression() {
if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then
PARALLEL_COMPRESSION_THREADS=1
fi
if var_true "${GZ_RSYNCABLE}" ; then
gz_rsyncable=--rsyncable
fi
case "${COMPRESSION,,}" in
gz* ) gz* )
if var_true "${GZ_RSYNCABLE}" ; then
gz_rsyncable=--rsyncable
fi
compress_cmd="pigz -q -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} ${gz_rsyncable}" compress_cmd="pigz -q -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} ${gz_rsyncable}"
compression_type="gzip" compression_type="gzip"
extension=".gz" extension=".gz"
@@ -531,7 +614,7 @@ compression() {
target=${target}.xz target=${target}.xz
;; ;;
zst* ) zst* )
compress_cmd="zstd -q -q --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} " compress_cmd="zstd -q -q --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} ${gz_rsyncable}"
compression_type="zstd" compression_type="zstd"
dir_compress_cmd=${compress_cmd} dir_compress_cmd=${compress_cmd}
extension=".zst" extension=".zst"
@@ -564,7 +647,7 @@ compression() {
create_archive() { create_archive() {
if [ "${exit_code}" = "0" ] ; then if [ "${exit_code}" = "0" ] ; then
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}" print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}" tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
else else
print_error "Skipping creating archive file because backup did not complete successfully" print_error "Skipping creating archive file because backup did not complete successfully"
@@ -624,13 +707,25 @@ move_dbbackup() {
"file" | "filesystem" ) "file" | "filesystem" )
print_debug "Moving backup to filesystem" print_debug "Moving backup to filesystem"
mkdir -p "${DB_DUMP_TARGET}" mkdir -p "${DB_DUMP_TARGET}"
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/ if var_true "${ENABLE_CHECKSUM}" ; then mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/ ; fi
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}" mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
move_exit_code=$?
if var_true "${CREATE_LATEST_SYMLINK}" ; then
ln -sf "${DB_DUMP_TARGET}"/"${target}" "${DB_DUMP_TARGET}"/latest-"${ltarget}"
fi
if [ -n "${DB_ARCHIVE_TIME}" ] ; then
mkdir -p "${DB_DUMP_TARGET_ARCHIVE}"
find "${DB_DUMP_TARGET}"/ -maxdepth 1 -mmin +"${DB_ARCHIVE_TIME}" -iname "*" -exec mv {} "${DB_DUMP_TARGET_ARCHIVE}" \;
fi
;; ;;
"s3" | "minio" ) "s3" | "minio" )
print_debug "Moving backup to S3 Bucket" print_debug "Moving backup to S3 Bucket"
export AWS_ACCESS_KEY_ID=${S3_KEY_ID} if [ -n "${S3_KEY_ID}" ] && [ -n "${S3_KEY_SECRET}" ]; then
export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET} export AWS_ACCESS_KEY_ID=${S3_KEY_ID}
export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET}
else
print_debug "Variable S3_KEY_ID or S3_KEY_SECRET is not set. Please ensure sufficiant IAM role is assigned."
fi
export AWS_DEFAULT_REGION=${S3_REGION} export AWS_DEFAULT_REGION=${S3_REGION}
if [ -f "${S3_CERT_CA_FILE}" ] ; then if [ -f "${S3_CERT_CA_FILE}" ] ; then
print_debug "Using Custom CA for S3 Backups" print_debug "Using Custom CA for S3 Backups"
@@ -644,23 +739,26 @@ move_dbbackup() {
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}" [[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
move_exit_code=$?
if var_true "${ENABLE_CHECKSUM}" ; then if var_true "${ENABLE_CHECKSUM}" ; then
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi fi
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}" if var_true "${ENABLE_CHECKSUM}" ; then rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"; fi
rm -rf "${TEMP_LOCATION}"/"${target}" rm -rf "${TEMP_LOCATION}"/"${target}"
;; ;;
"blobxfer" ) "blobxfer" )
print_info "Moving backup to S3 Bucket with blobxfer" print_info "Moving backup to external storage with blobxfer"
mkdir -p "${DB_DUMP_TARGET}" mkdir -p "${DB_DUMP_TARGET}"
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/ if var_true "${ENABLE_CHECKSUM}" ; then mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/; fi
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}" mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET} silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET}
move_exit_code=$?
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}" if var_true "${ENABLE_CHECKSUM}" ; then rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}" ; fi
rm -rf "${TEMP_LOCATION}"/"${target}" rm -rf "${TEMP_LOCATION}"/"${target}"
;; ;;
esac esac
@@ -676,6 +774,7 @@ prepare_dbbackup() {
now=$(date +"%Y%m%d-%H%M%S") now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S") now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d") now_date=$(date +"%Y-%m-%d")
ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,}
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql
} }
@@ -734,11 +833,11 @@ post_dbbackup() {
### Post Script Support ### Post Script Support
if [ -n "${POST_SCRIPT}" ] ; then if [ -n "${POST_SCRIPT}" ] ; then
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}" eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else else
if [ -x "${POST_SCRIPT}" ] ; then if [ -x "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}" print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}" eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else else
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!" print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
fi fi
@@ -755,12 +854,12 @@ post_dbbackup() {
if [ -d "${SCRIPT_LOCATION_POST}" ] && dir_notempty "${SCRIPT_LOCATION_POST}" ; then if [ -d "${SCRIPT_LOCATION_POST}" ] && dir_notempty "${SCRIPT_LOCATION_POST}" ; then
for f in $(find ${SCRIPT_LOCATION_POST} -name \*.sh -type f); do for f in $(find ${SCRIPT_LOCATION_POST} -name \*.sh -type f); do
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}" ${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else else
if [ -x "${f}" ] ; then if [ -x "${f}" ] ; then
print_notice "Executing post backup custom script : '${f}'" print_notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE ## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}" ${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else else
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!" print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
fi fi
@@ -785,14 +884,6 @@ sanity_test() {
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;; ;;
esac esac
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then
sanity_var S3_BUCKET "S3 Bucket"
sanity_var S3_PATH "S3 Path"
sanity_var S3_REGION "S3 Region"
file_env 'S3_KEY_ID'
file_env 'S3_KEY_SECRET'
fi
} }
setup_mode() { setup_mode() {
@@ -805,8 +896,7 @@ setup_mode() {
mkdir -p /etc/services.d/99-run_forever mkdir -p /etc/services.d/99-run_forever
cat <<EOF > /etc/services.d/99-run_forever/run cat <<EOF > /etc/services.d/99-run_forever/run
#!/bin/bash #!/bin/bash
while true while true; do
do
sleep 86400 sleep 86400
done done
EOF EOF

View File

@@ -37,7 +37,7 @@ else
case "$1" in case "$1" in
"-h" ) "-h" )
cat <<EOF cat <<EOF
${IMAGE_NAME} Restore Tool ${IMAGE_NAME} Restore Tool ${IMAGE_VERSION}
(c) 2022 Dave Conroy (https://github.com/tiredofit) (c) 2022 Dave Conroy (https://github.com/tiredofit)
This script will assist you in recovering databases taken by the Docker image. This script will assist you in recovering databases taken by the Docker image.
@@ -78,7 +78,7 @@ fi
get_filename() { get_filename() {
COLUMNS=12 COLUMNS=12
prompt="Please select a file to restore:" prompt="Please select a file to restore:"
options=( $(find "${DB_DUMP_TARGET}" -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | xargs -0) ) options=( $(find "${DB_DUMP_TARGET}" -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | sort -z | xargs -0) )
PS3="$prompt " PS3="$prompt "
select opt in "${options[@]}" "Custom" "Quit" ; do select opt in "${options[@]}" "Custom" "Quit" ; do
if (( REPLY == 2 + ${#options[@]} )) ; then if (( REPLY == 2 + ${#options[@]} )) ; then
@@ -131,7 +131,7 @@ EOF
fi fi
if [ -z "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then if [ -z "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
print_debug "Parsed DBHostpho Variant: 3 - No Env, Parsed Filename" print_debug "Parsed DBHost Variant: 3 - No Env, Parsed Filename"
q_dbhost_variant=3 q_dbhost_variant=3
q_dbhost_menu=$(cat <<EOF q_dbhost_menu=$(cat <<EOF
@@ -335,7 +335,7 @@ EOF
case "${q_dbtype_variant}" in case "${q_dbtype_variant}" in
1 ) 1 )
while true; do while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}M${cdgy}\) | \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in case "${q_dbtype,,}" in
m* ) m* )
r_dbtype=mysql r_dbtype=mysql
@@ -358,7 +358,7 @@ EOF
;; ;;
2 ) 2 )
while true; do while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in case "${q_dbtype,,}" in
e* | "" ) e* | "" )
r_dbtype=${DB_TYPE} r_dbtype=${DB_TYPE}
@@ -385,7 +385,7 @@ EOF
;; ;;
3 ) 3 )
while true; do while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in case "${q_dbtype,,}" in
f* | "" ) f* | "" )
r_dbtype=${p_dbtype} r_dbtype=${p_dbtype}
@@ -413,7 +413,7 @@ EOF
4 ) 4 )
while true; do while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in case "${q_dbtype,,}" in
e* | "" ) e* | "" )
r_dbtype=${DB_TYPE} r_dbtype=${DB_TYPE}
@@ -427,6 +427,10 @@ EOF
r_dbtype=mysql r_dbtype=mysql
break break
;; ;;
o* )
r_dbtype=mongo
break
;;
p* ) p* )
r_dbtype=postgresql r_dbtype=postgresql
break break
@@ -915,6 +919,30 @@ case "${r_dbtype}" in
exit_code=$? exit_code=$?
;; ;;
mongo ) mongo )
cat << EOF
Do you wish to drop any existing data before restoring?
Y ) Yes
N ) No
Q ) Quit
EOF
echo -e "${coff}"
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}Y${cdgy}\) \| \(${cwh}N${cdgy}\) \| \(${cwh}Q${cdgy}\) : ${cwh}${coff})" q_menu_mongo_dropdb
case "${q_menu_mongo_dropdb,,}" in
"y" | "yes" | * )
mongo_dropdb="--drop"
;;
"n" | "update" )
unset mongo_dropdb
;;
"q" | "exit" )
print_info "Quitting Script"
exit 1
;;
esac
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'" print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
if [ "${ENABLE_COMPRESSION,,}" != "none" ] && [ "${ENABLE_COMPRESSION,,}" != "false" ] ; then if [ "${ENABLE_COMPRESSION,,}" != "none" ] && [ "${ENABLE_COMPRESSION,,}" != "false" ] ; then
mongo_compression="--gzip" mongo_compression="--gzip"
@@ -925,7 +953,11 @@ case "${r_dbtype}" in
if [ -n "${r_dbpass}" ] ; then if [ -n "${r_dbpass}" ] ; then
mongo_pass="-p=${r_dbpass}" mongo_pass="-p=${r_dbpass}"
fi fi
mongorestore ${mongo_compression} -d=${r_dbname} -h=${r_dbhost} --port=${r_dbport} ${mongo_user} ${mongo_pass} --archive=${r_filename} if [ -n "${DB_AUTH}" ] ; then
mongo_auth_database="--authenticationDatabase=${DB_AUTH}"
fi
mongorestore ${mongo_compression} -d=${r_dbname} -h=${r_dbhost} --port=${r_dbport} ${mongo_dropdb} ${mongo_user} ${mongo_pass} --archive=${r_filename} ${mongo_auth_database}
exit_code=$? exit_code=$?
;; ;;
* ) * )