Compare commits

...

51 Commits
4.0.31 ... main

Author SHA1 Message Date
dave@tiredofit.ca
016c5c1a23 Release 4.1.21 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-08-08 09:26:13 -07:00
Dave Conroy
de8a952825 Merge pull request #421 from tlex/main 2025-08-08 09:24:55 -07:00
Alex Thomae
c7912d355e fix: tabs used instead of spaces 2025-08-08 07:37:56 +02:00
Alex Thomae
15902829c0 #420: fix exit_code got changed to exitcode 2025-08-08 07:37:29 +02:00
dave@tiredofit.ca
2c8f40e37c Release 4.1.20 - See CHANGELOG.md 2025-07-23 08:55:52 -07:00
Dave Conroy
c360150117 Merge pull request #418 from alteriks/main 2025-07-23 08:54:36 -07:00
Krzysztof Dajka
7c32879e80 fix: exitcode masking db errors 2025-07-22 16:56:53 +02:00
dave@tiredofit.ca
a475f7d0f3 Release 4.1.19 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-05-28 08:05:58 -07:00
dave@tiredofit.ca
399727cd37 Release 4.1.18 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-05-12 08:33:53 -07:00
Dave Conroy
f58de70dc4 Merge pull request #412 from logicoa/logicoa-mongodb-wildcard-drop-order
fix: wildcard case order
2025-05-12 08:33:01 -07:00
logicoa
5ab0cbe7c5 fix: wildcard case order
mongoDB restore always dropped schema, irrespective of flag variant, due to wildcard being the first option in case.
2025-05-12 11:18:43 +02:00
dave@tiredofit.ca
9d5406b6a9 Release 4.1.17 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-04-17 21:08:25 -07:00
dave@tiredofit.ca
53760fbe4d Release 4.1.16 - See CHANGELOG.md 2025-02-21 07:39:11 -08:00
Dave Conroy
a72b562c70 Merge pull request #402 from fermion2020/main
Update restore script
2025-02-21 07:38:17 -08:00
Ihor Kolos
fc586c204f Update restore script
Switch the mysql command to mariadb to resolve the deprecation warning.
Fix the restore issue caused by missing SSL configuration (error message: "TLS/SSL error: SSL is required, but the server does not support it").
2025-02-20 14:31:23 -06:00
dave@tiredofit.ca
e9ed8d1a72 Release 4.1.15 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-01-29 08:01:35 -08:00
Dave Conroy
78ac4a4a81 Add username/password check and append auth details to a couch db connectivity check 2025-01-29 07:59:49 -08:00
Dave Conroy
949aafdbe1 fix - zyclonite being attached to compression for couchdb 2025-01-29 07:53:39 -08:00
dave@tiredofit.ca
7a94472055 Release 4.1.14 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-01-21 12:59:35 -08:00
dave@tiredofit.ca
23aeaf58a2 Release 4.1.13 - See CHANGELOG.md
Some checks are pending
build_image / build (push) Waiting to run
2025-01-21 09:30:06 -08:00
Dave Conroy
b88816337f Seperate TLS configuration for MariaDB and MySQL 2025-01-21 09:29:29 -08:00
Dave Conroy
ac8181b3b5 Update MySQL client to 8.4.4 2025-01-21 08:33:22 -08:00
Dave Conroy
c75c41a34d Update AWS CLI to 1.37.2 2025-01-21 08:32:52 -08:00
dave@tiredofit.ca
244e411e76 Release 4.1.12 - See CHANGELOG.md 2024-12-13 07:51:35 -08:00
dave@tiredofit.ca
e69ac23898 Release 4.1.11 - See CHANGELOG.md 2024-12-13 07:40:04 -08:00
dave@tiredofit.ca
261951045f Release 4.1.10 - See CHANGELOG.md 2024-12-12 08:38:57 -08:00
dave@tiredofit.ca
67f4326d0b Release 4.1.9 - See CHANGELOG.md 2024-11-07 11:16:32 -08:00
dave@tiredofit.ca
2cd62b8732 Release 4.1.8 - See CHANGELOG.md 2024-10-29 18:58:34 -07:00
dave@tiredofit.ca
0d2b3ccc8c Release 4.1.4 - See CHANGELOG.md 2024-08-13 16:34:44 -07:00
Dave Conroy
90f53a7f00 Merge pull request #358 from ToshY/docs/blobxfer-mode
[docs] fixed blobxfer mode correct parameter name
2024-07-31 13:07:30 -07:00
ToshY
c5f89da681 fixed blobxfermode correct parameter name 2024-07-31 08:11:32 +00:00
dave@tiredofit.ca
753a780204 Release 4.1.3 - See CHANGELOG.md 2024-07-05 12:06:15 -07:00
dave@tiredofit.ca
7c07253428 Release 4.1.2 - See CHANGELOG.md 2024-07-02 16:15:22 -07:00
Dave Conroy
0fdb447706 Merge pull request #354 from effectivelywild/main
Resolve multiple issues using Azure blobs for remote storage
2024-07-02 16:13:41 -07:00
Frank Muise
0d23c2645c Add --no-overwrite to blobxfer download 2024-06-30 16:28:16 -04:00
Frank Muise
4786ea9c7f Update log entry for blob sync 2024-06-30 14:56:50 -04:00
Frank Muise
a26dba947b Fix issues with Azure blobs 2024-06-30 14:53:31 -04:00
dave@tiredofit.ca
b9fa7d18b1 Release 4.1.1 - See CHANGELOG.md 2024-06-19 15:41:45 -07:00
dave@tiredofit.ca
626d276c68 Release 4.1.0 - See CHANGELOG.md 2024-05-25 12:48:58 -07:00
dave@tiredofit.ca
f7f72ba2c1 Release 4.0.35 - See CHANGELOG.md 2024-01-14 20:22:08 -08:00
Dave Conroy
2f05d76f4e README weirdness 2024-01-03 17:33:52 -08:00
Dave Conroy
c9a634ff25 Convert > to - in README 2024-01-03 17:21:01 -08:00
dave@tiredofit.ca
0ce21e8f43 Release 4.0.34 - See CHANGELOG.md 2024-01-02 14:01:28 -08:00
Dave Conroy
a700eb0fef Merge pull request #315 from ToshY/docs/latest-symlink-format
[Docs] Updated `DEFAULT_CREATE_LATEST_SYMLINK` description format
2024-01-02 13:21:59 -08:00
Dave Conroy
7baa3774c7 Merge pull request #318 from devmethodgit/main
Fix environment variables in examples
2024-01-02 13:21:43 -08:00
Dave Conroy
341e4d12ea Update case statement to support arm64|aarch64 2024-01-02 13:21:07 -08:00
Dave Conroy
5c51bbcb7e Wrap if statement in double brackets 2024-01-02 12:54:57 -08:00
@vladimirzyuzin
24d9a9a937 Fix environment variables 2023-12-30 20:58:26 +03:00
ToshY
591b8d6dbd updated create latest symlink description format 2023-12-26 12:24:52 +00:00
dave@tiredofit.ca
a5b15b4412 Release 4.0.33 - See CHANGELOG.md 2023-12-18 07:58:54 -08:00
dave@tiredofit.ca
6692cf9834 Release 4.0.32 - See CHANGELOG.md 2023-12-15 15:32:32 -08:00
12 changed files with 461 additions and 104 deletions

View File

@@ -8,7 +8,7 @@ on:
jobs: jobs:
build: build:
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main #uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main #uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
secrets: inherit secrets: inherit

View File

@@ -9,7 +9,7 @@ on:
jobs: jobs:
build: build:
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main #uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main #uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
secrets: inherit secrets: inherit

View File

@@ -1,3 +1,171 @@
## 4.1.21 2025-08-08 <dave at tiredofit dot ca>
### Changed
- Fix for 4.1.20 release inadvertently failing backups (credit tlex@github)
## 4.1.20 2025-07-23 <dave at tiredofit dot ca>
### Changed
- Fix backup error checking routines #417 - credit alteriks@github
## 4.1.19 2025-05-28 <dave at tiredofit dot ca>
### Changed
- Force overwrite manual scripts as opposed to append (#414)
## 4.1.18 2025-05-12 <dave at tiredofit dot ca>
### Changed
- Fix MongoDB restore from not dropping DB each time before restore except explicitly told (credit logicoa@github)
## 4.1.17 2025-04-17 <dave at tiredofit dot ca>
### Changed
- Fix issue with Postgres database cleanup when ALL databases being backed up as one file (SPLIT_DB=FALSE)
## 4.1.16 2025-02-21 <dave at tiredofit dot ca>
### Added
- Update to tiredofit/alpine:7.10.28
- Support TLS connectivity with restore script (credit fermion2020@github)
## 4.1.15 2025-01-29 <dave at tiredofit dot ca>
### Added
- Add support for username and password support when checking for connectivity to couchdb (credit: JvSomeren)
### Changed
- Fix issue with couchdb compression routines
## 4.1.14 2025-01-21 <dave at tiredofit dot ca>
### Changed
- Downgrade AWS Client to 1.36.40 due to incompatibilities with providers with 1.37x. for time being
## 4.1.13 2025-01-21 <dave at tiredofit dot ca>
### Added
- Update MySQL client to 8.4.4
- Update AWS Client to 1.37.2
### Changed
- Seperate MySQL and MariaDB TLS Configurationf for arguments that have deviated
## 4.1.12 2024-12-13 <dave at tiredofit dot ca>
### Changed
- Fix for 4.1.11
## 4.1.11 2024-12-13 <dave at tiredofit dot ca>
### Changed
- Fix when backing up 'ALL' databases with MariaDB
## 4.1.10 2024-12-12 <dave at tiredofit dot ca>
### Added
- Use tiredofit/alpine:3.21-7.10.27 base
- Use the actual binary name when dumping mariadb and mysql databases
- Silence warnings that are appearing due to filenames, ssl warnings re MariaDB / MySQL
## 4.1.9 2024-11-07 <dave at tiredofit dot ca>
### Added
- Pin to tiredofit/alpine:edge-7.10.19
- MySQL 8.4.3 client
- MSSQL and MSODBC 18.4.1.1-1
- Mysql 11.x Support
- Influx2 Client 2.7.5
- AWS Client 1.35.13
- Postgresql 17.x Support
## 4.1.8 2024-10-29 <dave at tiredofit dot ca>
Rebuild using 4.1.4 sources - ignore any versions of 4.1.5-4.1.7
### Added
## 4.1.4 2024-08-13 <dave at tiredofit dot ca>
Please note that if using encryption using a passphrase, you may be encountering issues with manual decryption. This release fixes that.
If you try to manually decrypt and your passphrase fails. Try wrapping it in single (') or double (") quotes.
### Changed
- Fix for stray quotes appearing inside of ENCRYPT_PASSPHRASE variables
## 4.1.3 2024-07-05 <dave at tiredofit dot ca>
### Changed
- Rebuild to support tiredofit/alpine:7.10.0
## 4.1.2 2024-07-02 <effectivelywild@github>
### Added
- Add support for Azure Blob containers
- Fix timestamps when comparing previous backups
- Resolve unnecessary read operations in Azure
- Resolve issues with backup cleanup operations in Azure
## 4.1.1 2024-06-19 <dave at tiredofit dot ca>
### Changed
- Fix issue where postgresql globals when backing up ALL not being deleted (#352)
## 4.1.0 2024-05-25 <dave at tiredofit dot ca>
Note that arm/v7 builds have been removed from this release going forward
### Added
- Introduce DEFAULT/DBXX_MYSQL_CLIENT option to use mariadb or mysql for client dumping to solve incompatibility issues
- Alpine 3.20 Base
- MariaDB 10.11.8 Client
- AWS Client 1.32.113
- MySQL Client 8.4.0
## 4.0.35 2024-01-14 <dave at tiredofit dot ca>
### Changed
- Fix issue with emaail notifications and not being able to add from statement
## 4.0.34 2024-01-02 <dave at tiredofit dot ca>
### Changed
- Change the way architectures are detected to re-enable backups with MSSQL and Influx2
## 4.0.33 2023-12-18 <dave at tiredofit dot ca>
### Changed
- Allow _OPTS variabls to contain spaces
- Switch references of _DUMP_OPTS to _BACKUP_OPTS
## 4.0.32 2023-12-15 <dave at tiredofit dot ca>
### Changed
- Fix issue with directories not properly being backed up (InfluxDB)
## 4.0.31 2023-12-12 <dave at tiredofit dot ca> ## 4.0.31 2023-12-12 <dave at tiredofit dot ca>
### Changed ### Changed

View File

@@ -1,21 +1,21 @@
ARG DISTRO=alpine ARG DISTRO=alpine
ARG DISTRO_VARIANT=3.19 ARG DISTRO_VARIANT=3.21-7.10.28
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT} FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
LABEL maintainer="Dave Conroy (github.com/tiredofit)" LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables
ENV INFLUX1_CLIENT_VERSION=1.8.0 \ ENV INFLUX1_CLIENT_VERSION=1.8.0 \
INFLUX2_CLIENT_VERSION=2.7.3 \ INFLUX2_CLIENT_VERSION=2.7.5 \
MSODBC_VERSION=18.3.2.1-1 \ MSODBC_VERSION=18.4.1.1-1 \
MSSQL_VERSION=18.3.1.1-1 \ MSSQL_VERSION=18.4.1.1-1 \
AWS_CLI_VERSION=1.31.5 \ MYSQL_VERSION=mysql-8.4.4 \
MYSQL_REPO_URL=https://github.com/mysql/mysql-server \
AWS_CLI_VERSION=1.36.40 \
CONTAINER_ENABLE_MESSAGING=TRUE \ CONTAINER_ENABLE_MESSAGING=TRUE \
CONTAINER_ENABLE_MONITORING=TRUE \ CONTAINER_ENABLE_MONITORING=TRUE \
IMAGE_NAME="tiredofit/db-backup" \ IMAGE_NAME="tiredofit/db-backup" \
IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/" IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/"
### Dependencies
RUN source /assets/functions/00-container && \ RUN source /assets/functions/00-container && \
set -ex && \ set -ex && \
addgroup -S -g 10000 dbbackup && \ addgroup -S -g 10000 dbbackup && \
@@ -27,11 +27,14 @@ RUN source /assets/functions/00-container && \
build-base \ build-base \
bzip2-dev \ bzip2-dev \
cargo \ cargo \
cmake \
git \ git \
go \ go \
libarchive-dev \ libarchive-dev \
libtirpc-dev \
openssl-dev \ openssl-dev \
libffi-dev \ libffi-dev \
ncurses-dev \
python3-dev \ python3-dev \
py3-pip \ py3-pip \
xz-dev \ xz-dev \
@@ -44,13 +47,16 @@ RUN source /assets/functions/00-container && \
gpg-agent \ gpg-agent \
groff \ groff \
libarchive \ libarchive \
libtirpc \
mariadb-client \ mariadb-client \
mariadb-connector-c \ mariadb-connector-c \
mongodb-tools \ mongodb-tools \
ncurses \
openssl \ openssl \
pigz \ pigz \
postgresql16 \ pixz \
postgresql16-client \ postgresql17 \
postgresql17-client \
pv \ pv \
py3-botocore \ py3-botocore \
py3-colorama \ py3-colorama \
@@ -69,36 +75,49 @@ RUN source /assets/functions/00-container && \
zstd \ zstd \
&& \ && \
\ \
apkArch="$(uname -m)"; \ case "$(uname -m)" in \
case "$apkArch" in \ "x86_64" ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \
x86_64) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \ "arm64" | "aarch64" ) mssql=true ; mssql_arch=arm64; influx2=true ; influx_arch=arm64 ;; \
arm64 ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=arm64 ;; \
*) sleep 0.1 ;; \ *) sleep 0.1 ;; \
esac; \ esac; \
\ \
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \ if [ "${mssql,,}" = "true" ] ; then \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \ curl -sSLO https://download.microsoft.com/download/7/6/d/76de322a-d860-4894-9945-f0cc5d6a45f8/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; \
curl -sSLO https://download.microsoft.com/download/7/6/d/76de322a-d860-4894-9945-f0cc5d6a45f8/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; \
echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; \
else \
echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; \
fi; \
\
if [ "${influx2,,}" = "true" ] ; then \
curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; \
chmod +x /usr/src/influx ; \
mv /usr/src/influx /usr/sbin/ ; \
else \
echo >&2 "Unable to build Influx 2 on this system" ; \
fi ; \
\
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX1_CLIENT_VERSION}" && \ clone_git_repo https://github.com/influxdata/influxdb "${INFLUX1_CLIENT_VERSION}" && \
go build -o /usr/sbin/influxd ./cmd/influxd && \ go build -o /usr/sbin/influxd ./cmd/influxd && \
strip /usr/sbin/influxd && \ strip /usr/sbin/influxd && \
\
clone_git_repo "${MYSQL_REPO_URL}" "${MYSQL_VERSION}" && \
cmake \
-DCMAKE_BUILD_TYPE=MinSizeRel \
-DCMAKE_INSTALL_PREFIX=/opt/mysql \
-DFORCE_INSOURCE_BUILD=1 \
-DWITHOUT_SERVER:BOOL=ON \
&& \
make -j$(nproc) install && \
\
pip3 install --break-system-packages awscli==${AWS_CLI_VERSION} && \
pip3 install --break-system-packages blobxfer && \
\
mkdir -p /usr/src/pbzip2 && \ mkdir -p /usr/src/pbzip2 && \
curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \ curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
cd /usr/src/pbzip2 && \ cd /usr/src/pbzip2 && \
make && \ make && \
make install && \ make install && \
mkdir -p /usr/src/pixz && \
curl -sSL https://github.com/vasi/pixz/releases/download/v1.0.7/pixz-1.0.7.tar.xz | tar xvfJ - --strip 1 -C /usr/src/pixz && \
cd /usr/src/pixz && \
./configure \
--prefix=/usr \
--sysconfdir=/etc \
--localstatedir=/var \
&& \
make && \
make install && \
\
pip3 install --break-system-packages awscli==${AWS_CLI_VERSION} && \
pip3 install --break-system-packages blobxfer && \
\ \
package remove .db-backup-build-deps && \ package remove .db-backup-build-deps && \
package cleanup && \ package cleanup && \

View File

@@ -1,6 +1,6 @@
The MIT License (MIT) The MIT License (MIT)
Copyright (c) 2023 Dave Conroy Copyright (c) 2025 Dave Conroy
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -267,6 +267,7 @@ Encryption occurs after compression and the encrypted filename will have a `.gpg
| `DEFAULT_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | | | `DEFAULT_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
| `DEFAULT_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | | | `DEFAULT_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `DEFAULT_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | | | `DEFAULT_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
| `DEFAULT_MYSQL_CLIENT` | Choose between `mariadb` or `mysql` client to perform dump operations for compatibility purposes | `mariadb` | |
| `DEFAULT_MYSQL_EVENTS` | Backup Events | `TRUE` | | | `DEFAULT_MYSQL_EVENTS` | Backup Events | `TRUE` | |
| `DEFAULT_MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet | `512M` | | | `DEFAULT_MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet | `512M` | |
| `DEFAULT_MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction | `TRUE` | | | `DEFAULT_MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction | `TRUE` | |
@@ -325,7 +326,7 @@ If `DEFAULT_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
| Variable | Description | Default | | Variable | Description | Default |
| ------------------------------------ | ----------------------------------------------------------------------------------------------------- | ------------------------------------- | | ------------------------------------ | ----------------------------------------------------------------------------------------------------- | ------------------------------------- |
| `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` | | `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)_(DB_NAME)_(DB_HOST)` | `TRUE` |
| `DEFAULT_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` | | `DEFAULT_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
| `DEFAULT_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` | | `DEFAULT_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` |
| `DEFAULT_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DEFAULT_FILESYSTEM_PATH}/archive/` | | `DEFAULT_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DEFAULT_FILESYSTEM_PATH}/archive/` |
@@ -355,11 +356,14 @@ If `DEFAULT_BACKUP_LOCATION` = `S3` then the following options are used:
If `DEFAULT_BACKUP_LOCATION` = `blobxfer` then the following options are used:. If `DEFAULT_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
| Parameter | Description | Default | `_FILE` | | Parameter | Description | Default | `_FILE` |
| -------------------------------------- | ------------------------------------------- | ------------------- | ------- | | -------------------------------------- | ------------------------------------------------------------------- | ------------------- | ------- |
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x | | `DEFAULT_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x | | `DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `DEFAULT_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x | | `DEFAULT_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
| `DEFAULT_BLOBXFER_MODE` | Azure Storage mode e.g. `auto`, `file`, `append`, `block` or `page` | `auto` | x |
- When `DEFAULT_BLOBXFER_MODE` is set to auto it will use blob containers by default. If the `DEFAULT_BLOBXFER_REMOTE_PATH` path does not exist a blob container with that name will be created.
> This service uploads files from backup targed directory `DEFAULT_FILESYSTEM_PATH`. > This service uploads files from backup targed directory `DEFAULT_FILESYSTEM_PATH`.
> If the a cleanup configuration in `DEFAULT_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically. > If the a cleanup configuration in `DEFAULT_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
@@ -635,11 +639,14 @@ If `DB01_BACKUP_LOCATION` = `S3` then the following options are used:
If `DB01_BACKUP_LOCATION` = `blobxfer` then the following options are used:. If `DB01_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
| Parameter | Description | Default | `_FILE` | | Parameter | Description | Default | `_FILE` |
| ----------------------------------- | ------------------------------------------- | ------------------- | ------- | | -------------------------------------- | ------------------------------------------------------------------- | ------------------- | ------- |
| `DB01_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x | | `DB01_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `DB01_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x | | `DB01_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `DB01_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x | | `DB01_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
| `DB01_BLOBXFER_REMOTE_MODE` | Azure Storage mode e.g. `auto`, `file`, `append`, `block` or `page` | `auto` | x |
- When `DEFAULT_BLOBXFER_MODE` is set to auto it will use blob containers by default. If the `DEFAULT_BLOBXFER_REMOTE_PATH` path does not exist a blob container with that name will be created.
> This service uploads files from backup directory `DB01_BACKUP_FILESYSTEM_PATH`. > This service uploads files from backup directory `DB01_BACKUP_FILESYSTEM_PATH`.
> If the a cleanup configuration in `DB01_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically. > If the a cleanup configuration in `DB01_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.

View File

@@ -31,8 +31,8 @@ services:
- BACKUP_JOB_CONCURRENCY=1 # Only run one job at a time - BACKUP_JOB_CONCURRENCY=1 # Only run one job at a time
- DEFAULT_CHECKSUM=NONE # Don't create checksums - DEFAULT_CHECKSUM=NONE # Don't create checksums
- DEFAULT_COMPRESSION=ZSTD # Compress all with ZSTD - DEFAULT_COMPRESSION=ZSTD # Compress all with ZSTD
- DEFAULT_DUMP_INTERVAL=1440 # Backup every 1440 minutes - DEFAULT_BACKUP_INTERVAL=1440 # Backup every 1440 minutes
- DEFAULT_DUMP_BEGIN=0000 # Start backing up at midnight - DEFAULT_BACKUP_BEGIN=0000 # Start backing up at midnight
- DEFAULT_CLEANUP_TIME=8640 # Cleanup backups after a week - DEFAULT_CLEANUP_TIME=8640 # Cleanup backups after a week
- DB01_TYPE=mariadb - DB01_TYPE=mariadb
@@ -40,8 +40,8 @@ services:
- DB01_NAME=example - DB01_NAME=example
- DB01_USER=example - DB01_USER=example
- DB01_PASS=examplepassword - DB01_PASS=examplepassword
- DB01_DUMP_INTERVAL=30 # (override) Backup every 30 minutes - DB01_BACKUP_INTERVAL=30 # (override) Backup every 30 minutes
- DB01_DUMP_BEGIN=+1 # (override) Backup starts immediately - DB01_BACKUP_BEGIN=+1 # (override) Backup starts immediately
- DB01_CLEANUP_TIME=180 # (override) Cleanup backups they are older than 180 minutes - DB01_CLEANUP_TIME=180 # (override) Cleanup backups they are older than 180 minutes
- DB01_CHECKSUM=SHA1 # (override) Create a SHA1 checksum - DB01_CHECKSUM=SHA1 # (override) Create a SHA1 checksum
- DB01_COMPRESSION=GZ # (override) Compress with GZIP - DB01_COMPRESSION=GZ # (override) Compress with GZIP
@@ -51,8 +51,8 @@ services:
#- DB02_NAME=example #- DB02_NAME=example
#- DB02_USER=example #- DB02_USER=example
#- DB02_PASS=examplepassword #- DB02_PASS=examplepassword
#- DB02_DUMP_INTERVAL=60 # (override) Backup every 60 minutes #- DB02_BACKUP_INTERVAL=60 # (override) Backup every 60 minutes
#- DB02_DUMP_BEGIN=+10 # (override) Backup starts in ten minutes #- DB02_BACKUP_BEGIN=+10 # (override) Backup starts in ten minutes
#- DB02_CLEANUP_TIME=240 # (override) Cleanup backups they are older than 240 minutes #- DB02_CLEANUP_TIME=240 # (override) Cleanup backups they are older than 240 minutes
#- DB02_CHECKSUM=MD5 # (override) Create a SHA1 checksum #- DB02_CHECKSUM=MD5 # (override) Create a SHA1 checksum
#- DB02_COMPRESSION=BZ # (override) Compress with BZIP #- DB02_COMPRESSION=BZ # (override) Compress with BZIP

View File

@@ -45,7 +45,7 @@ services:
- DB01_NAME=test1 # Create this database - DB01_NAME=test1 # Create this database
- DB01_USER=sa - DB01_USER=sa
- DB01_PASS=5hQa0utRFBpIY3yhoIyE - DB01_PASS=5hQa0utRFBpIY3yhoIyE
- DB01_DUMP_INTERVAL=5 # backup every 5 minute - DB01_BACKUP_INTERVAL=5 # backup every 5 minute
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs not set immediately # - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs not set immediately
- DB01_CLEANUP_TIME=60 # clean backups they are older than 60 minutes - DB01_CLEANUP_TIME=60 # clean backups they are older than 60 minutes
- DB01_CHECKSUM=SHA1 # Set Checksum to be SHA1 - DB01_CHECKSUM=SHA1 # Set Checksum to be SHA1

View File

@@ -46,7 +46,7 @@ services:
- DB01_NAME=test1 - DB01_NAME=test1
- DB01_USER=sa - DB01_USER=sa
- DB01_PASS=5hQa0utRFBpIY3yhoIyE - DB01_PASS=5hQa0utRFBpIY3yhoIyE
- DB01_DUMP_INTERVAL=1 # backup every minute - DB01_BACKUP_INTERVAL=1 # backup every minute
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs unset immediately # - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs unset immediately
- DB01_CLEANUP_TIME=5 # clean backups they are older than 5 minute - DB01_CLEANUP_TIME=5 # clean backups they are older than 5 minute
- DB01_CHECKSUM=NONE - DB01_CHECKSUM=NONE

View File

@@ -6,9 +6,9 @@ DBBACKUP_USER=${DBBACKUP_USER:-"dbbackup"}
DBBACKUP_GROUP=${DBBACKUP_GROUP:-"${DBBACKUP_USER}"} # Must go after DBBACKUP_USER DBBACKUP_GROUP=${DBBACKUP_GROUP:-"${DBBACKUP_USER}"} # Must go after DBBACKUP_USER
DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0} DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0}
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440} DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
DEFAULT_BACKUP_LOCATION=${DEFAULT_BACKUP_LOCATION:-"FILESYSTEM"} DEFAULT_BACKUP_LOCATION=${DEFAULT_BACKUP_LOCATION:-"FILESYSTEM"}
DEFAULT_BLOBXFER_REMOTE_PATH=${DEFAULT_BLOBXFER_REMOTE_PATH:-"/docker-db-backup"} DEFAULT_BLOBXFER_REMOTE_PATH=${DEFAULT_BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
DEFAULT_BLOBXFER_MODE=${DEFAULT_BLOBXFER_MODE:-"auto"}
DEFAULT_CHECKSUM=${DEFAULT_CHECKSUM:-"MD5"} DEFAULT_CHECKSUM=${DEFAULT_CHECKSUM:-"MD5"}
DEFAULT_COMPRESSION=${DEFAULT_COMPRESSION:-"ZSTD"} DEFAULT_COMPRESSION=${DEFAULT_COMPRESSION:-"ZSTD"}
DEFAULT_COMPRESSION_LEVEL=${DEFAULT_COMPRESSION_LEVEL:-"3"} DEFAULT_COMPRESSION_LEVEL=${DEFAULT_COMPRESSION_LEVEL:-"3"}
@@ -20,6 +20,7 @@ DEFAULT_FILESYSTEM_PATH_PERMISSION=${DEFAULT_FILESYSTEM_PATH_PERMISSION:-"700"}
DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"600"} DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"600"}
DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DEFAULT_FILESYSTEM_ARCHIVE_PATH:-"${DEFAULT_FILESYSTEM_PATH}/archive/"} DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DEFAULT_FILESYSTEM_ARCHIVE_PATH:-"${DEFAULT_FILESYSTEM_PATH}/archive/"}
DEFAULT_LOG_LEVEL=${DEFAULT_LOG_LEVEL:-"notice"} DEFAULT_LOG_LEVEL=${DEFAULT_LOG_LEVEL:-"notice"}
DEFAULT_MYSQL_CLIENT=${DEFAULT_MYSQL_CLIENT:-"mariadb"}
DEFAULT_MYSQL_ENABLE_TLS=${DEFAULT_MYSQL_ENABLE_TLS:-"FALSE"} DEFAULT_MYSQL_ENABLE_TLS=${DEFAULT_MYSQL_ENABLE_TLS:-"FALSE"}
DEFAULT_MYSQL_EVENTS=${DEFAULT_MYSQL_EVENTS:-"TRUE"} DEFAULT_MYSQL_EVENTS=${DEFAULT_MYSQL_EVENTS:-"TRUE"}
DEFAULT_MYSQL_MAX_ALLOWED_PACKET=${DEFAULT_MYSQL_MAX_ALLOWED_PACKET:-"512M"} DEFAULT_MYSQL_MAX_ALLOWED_PACKET=${DEFAULT_MYSQL_MAX_ALLOWED_PACKET:-"512M"}

View File

@@ -66,6 +66,7 @@ bootstrap_variables() {
DEFAULT_BLOBXFER_STORAGE_ACCOUNT \ DEFAULT_BLOBXFER_STORAGE_ACCOUNT \
DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY \ DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY \
DEFAULT_BLOBXFER_REMOTE_PATH \ DEFAULT_BLOBXFER_REMOTE_PATH \
DEFAULT_BLOBXFER_MODE \
DB"${backup_instance_number}"_AUTH \ DB"${backup_instance_number}"_AUTH \
DB"${backup_instance_number}"_TYPE \ DB"${backup_instance_number}"_TYPE \
DB"${backup_instance_number}"_HOST \ DB"${backup_instance_number}"_HOST \
@@ -93,6 +94,7 @@ bootstrap_variables() {
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT \ DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT \
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT_KEY \ DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT_KEY \
DB"${backup_instance_number}"_BLOBXFER_REMOTE_PATH \ DB"${backup_instance_number}"_BLOBXFER_REMOTE_PATH \
DB"${backup_instance_number}"_BLOBXFER_MODE \
BLOBXFER_STORAGE_ACCOUNT \ BLOBXFER_STORAGE_ACCOUNT \
BLOBXFER_STORAGE_ACCOUNT_KEY \ BLOBXFER_STORAGE_ACCOUNT_KEY \
DB_HOST \ DB_HOST \
@@ -163,11 +165,21 @@ bootstrap_variables() {
sed -i "s|_PASS='\(.*\)'|_PASS=\1|g" "${backup_instance_vars}" sed -i "s|_PASS='\(.*\)'|_PASS=\1|g" "${backup_instance_vars}"
fi fi
if grep -qo ".*_PASSPHRASE='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _PASSPHRASE variable with quotes"
sed -i "s|_PASSPHRASE='\(.*\)'|_PASSPHRASE=\1|g" "${backup_instance_vars}"
fi
if grep -qo "MONGO_CUSTOM_URI='.*'" "${backup_instance_vars}"; then if grep -qo "MONGO_CUSTOM_URI='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _MONGO_CUSTOM_URI variable with quotes" print_debug "[bootstrap_variables] [backup_init] Found _MONGO_CUSTOM_URI variable with quotes"
sed -i "s|MONGO_CUSTOM_URI='\(.*\)'|MONGO_CUSTOM_URI=\1|g" "${backup_instance_vars}" sed -i "s|MONGO_CUSTOM_URI='\(.*\)'|MONGO_CUSTOM_URI=\1|g" "${backup_instance_vars}"
fi fi
if grep -qo ".*_OPTS='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _OPTS variable with quotes"
sed -i "s|_OPTS='\(.*\)'|_OPTS=\1|g" "${backup_instance_vars}"
fi
transform_backup_instance_variable() { transform_backup_instance_variable() {
if grep -q "^DB${1}_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then if grep -q "^DB${1}_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
export "$3"="$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2-)" export "$3"="$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
@@ -194,6 +206,7 @@ bootstrap_variables() {
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_REMOTE_PATH backup_job_blobxfer_remote_path transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_REMOTE_PATH backup_job_blobxfer_remote_path
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT backup_job_blobxfer_storage_account transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT backup_job_blobxfer_storage_account
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT_KEY backup_job_blobxfer_storage_account_key transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT_KEY backup_job_blobxfer_storage_account_key
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_MODE backup_job_blobxfer_mode
transform_backup_instance_variable "${backup_instance_number}" CHECKSUM backup_job_checksum transform_backup_instance_variable "${backup_instance_number}" CHECKSUM backup_job_checksum
transform_backup_instance_variable "${backup_instance_number}" CLEANUP_TIME backup_job_cleanup_time transform_backup_instance_variable "${backup_instance_number}" CLEANUP_TIME backup_job_cleanup_time
transform_backup_instance_variable "${backup_instance_number}" COMPRESSION backup_job_compression transform_backup_instance_variable "${backup_instance_number}" COMPRESSION backup_job_compression
@@ -204,7 +217,7 @@ bootstrap_variables() {
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PASSPHRASE backup_job_encrypt_passphrase transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PASSPHRASE backup_job_encrypt_passphrase
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PRIVATE_KEY backup_job_encrypt_private_key transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PRIVATE_KEY backup_job_encrypt_private_key
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PUBLIC_KEY backup_job_encrypt_public_key transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PUBLIC_KEY backup_job_encrypt_public_key
transform_backup_instance_variable "${backup_instance_number}" EXTRA_DUMP_OPTS backup_job_extra_dump_opts transform_backup_instance_variable "${backup_instance_number}" EXTRA_BACKUP_OPTS backup_job_extra_backup_opts
transform_backup_instance_variable "${backup_instance_number}" EXTRA_ENUMERATION_OPTS backup_job_extra_enumeration_opts transform_backup_instance_variable "${backup_instance_number}" EXTRA_ENUMERATION_OPTS backup_job_extra_enumeration_opts
transform_backup_instance_variable "${backup_instance_number}" EXTRA_OPTS backup_job_extra_opts transform_backup_instance_variable "${backup_instance_number}" EXTRA_OPTS backup_job_extra_opts
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_ARCHIVE_PATH backup_job_filesystem_archive_path transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_ARCHIVE_PATH backup_job_filesystem_archive_path
@@ -216,6 +229,7 @@ bootstrap_variables() {
transform_backup_instance_variable "${backup_instance_number}" INFLUX_VERSION backup_job_influx_version transform_backup_instance_variable "${backup_instance_number}" INFLUX_VERSION backup_job_influx_version
transform_backup_instance_variable "${backup_instance_number}" LOG_LEVEL backup_job_log_level transform_backup_instance_variable "${backup_instance_number}" LOG_LEVEL backup_job_log_level
transform_backup_instance_variable "${backup_instance_number}" MONGO_CUSTOM_URI backup_job_mongo_custom_uri transform_backup_instance_variable "${backup_instance_number}" MONGO_CUSTOM_URI backup_job_mongo_custom_uri
transform_backup_instance_variable "${backup_instance_number}" MYSQL_CLIENT backup_job_mysql_client
transform_backup_instance_variable "${backup_instance_number}" MYSQL_ENABLE_TLS backup_job_mysql_enable_tls transform_backup_instance_variable "${backup_instance_number}" MYSQL_ENABLE_TLS backup_job_mysql_enable_tls
transform_backup_instance_variable "${backup_instance_number}" MYSQL_EVENTS backup_job_mysql_events transform_backup_instance_variable "${backup_instance_number}" MYSQL_EVENTS backup_job_mysql_events
transform_backup_instance_variable "${backup_instance_number}" MYSQL_MAX_ALLOWED_PACKET backup_job_mysql_max_allowed_packet transform_backup_instance_variable "${backup_instance_number}" MYSQL_MAX_ALLOWED_PACKET backup_job_mysql_max_allowed_packet
@@ -396,9 +410,33 @@ EOF
dbtype=mysql dbtype=mysql
backup_job_db_port=${backup_job_db_port:-3306} backup_job_db_port=${backup_job_db_port:-3306}
check_var backup_job_db_name DB"${v_instance}"_NAME "database name. Seperate multiple with commas" check_var backup_job_db_name DB"${v_instance}"_NAME "database name. Seperate multiple with commas"
case "${backup_job_mysql_client,,}" in
mariadb )
_mysql_prefix=/usr/bin/
_mysql_bin_prefix=mariadb-
;;
mysql )
_mysql_prefix=/opt/mysql/bin/
_mysql_bin_prefix=mysql
;;
* )
print_error "I don't understand '${backup_job_mysql_client,,}' as a client. Exiting.."
exit 99
;;
esac
print_debug "Using '${backup_job_mysql_client,,}' as client"
if [ -n "${backup_job_db_pass}" ] ; then export MYSQL_PWD=${backup_job_db_pass} ; fi if [ -n "${backup_job_db_pass}" ] ; then export MYSQL_PWD=${backup_job_db_pass} ; fi
if var_true "${backup_job_mysql_enable_tls}" ; then if var_true "${backup_job_mysql_enable_tls}" ; then
case "${backup_job_mysql_client,,}" in
mariadb )
mysql_tls_args="--ssl"
;;
mysql )
mysql_tls_args="--ssl-mode=REQUIRED"
;;
esac
if [ -n "${backup_job_mysql_tls_ca_file}" ] ; then if [ -n "${backup_job_mysql_tls_ca_file}" ] ; then
mysql_tls_args="--ssl_ca=${backup_job_mysql_tls_ca_file}" mysql_tls_args="--ssl_ca=${backup_job_mysql_tls_ca_file}"
fi fi
@@ -410,12 +448,28 @@ EOF
fi fi
if var_true "${backup_job_mysql_tls_verify}" ; then if var_true "${backup_job_mysql_tls_verify}" ; then
mysql_tls_args="${mysql_tls_args} --sslverify-server-cert" case "${backup_job_mysql_client,,}" in
mariadb )
mysql_tls_args="${mysql_tls_args} --sslverify-server-cert"
;;
mysql )
mysql_tls_args="${mysql_tls_args} --ssl-mode=VERIFY_CA"
;;
esac
fi fi
if [ -n "${backup_job_mysql_tls_version}" ] ; then if [ -n "${backup_job_mysql_tls_version}" ] ; then
mysql_tls_args="${mysql_tls_args} --tls_version=${backup_job_mysql_tls_version}" mysql_tls_args="${mysql_tls_args} --tls_version=${backup_job_mysql_tls_version}"
fi fi
else
case "${backup_job_mysql_client,,}" in
mariadb )
mysql_tls_args="--disable-ssl"
;;
mysql )
mysql_tls_args="--ssl-mode=DISABLED"
;;
esac
fi fi
;; ;;
"mssql" | "microsoftsql" ) "mssql" | "microsoftsql" )
@@ -478,7 +532,7 @@ backup_couch() {
prepare_dbbackup prepare_dbbackup
backup_job_filename=couch_${backup_job_db_name}_${backup_job_db_host#*//}_${now}.txt backup_job_filename=couch_${backup_job_db_name}_${backup_job_db_host#*//}_${now}.txt
backup_job_filename_base=couch_${backup_job_db_name}_${backup_job_db_host#*//} backup_job_filename_base=couch_${backup_job_db_name}_${backup_job_db_host#*//}
compressionzyclonite compression
if var_true "${DEBUG_BACKUP_COUCH}" ; then debug off; fi if var_true "${DEBUG_BACKUP_COUCH}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}" check_exit_code backup "${backup_job_filename}"
timer backup finish timer backup finish
@@ -505,20 +559,19 @@ backup_influx() {
print_debug "[backup_influx] Influx DB Version 1 selected" print_debug "[backup_influx] Influx DB Version 1 selected"
for db in ${db_names}; do for db in ${db_names}; do
prepare_dbbackup prepare_dbbackup
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now} backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now}
backup_job_filename_base=influx_${db}_${backup_job_db_host#*//} backup_job_filename_base=influx_${db}_${backup_job_db_host#*//}
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
compression
pre_dbbackup "${db}" pre_dbbackup "${db}"
write_log notice "Dumping Influx database: '${db}'" write_log notice "Dumping Influx database: '${db}'"
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
run_as_user influxd backup ${influx_compression} ${bucket} -portable -host ${backup_job_db_host}:${backup_job_db_port} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} "${temporary_directory}"/"${backup_job_filename}" run_as_user influxd backup ${influx_compression} ${bucket} -portable -host ${backup_job_db_host}:${backup_job_db_port} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} "${temporary_directory}"/"${backup_job_filename}"
exit_code=$? exit_code=$?
check_exit_code backup "${backup_job_filename}" check_exit_code backup "${backup_job_filename}"
compression
create_archive create_archive
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}.tar${extension}
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
timer backup finish timer backup finish
file_encryption file_encryption
@@ -538,15 +591,14 @@ backup_influx() {
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now} backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//} backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
compression
pre_dbbackup "${db}" pre_dbbackup "${db}"
write_log notice "Dumping Influx2 database: '${db}'" write_log notice "Dumping Influx2 database: '${db}'"
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --compression none "${temporary_directory}"/"${backup_job_filename}" run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --compression none "${temporary_directory}"/"${backup_job_filename}"
exit_code=$? exit_code=$?
check_exit_code backup "${backup_job_filename}" check_exit_code backup "${backup_job_filename}"
compression
create_archive create_archive
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}.tar${extension}
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
timer backup finish timer backup finish
file_encryption file_encryption
@@ -573,9 +625,9 @@ backup_mongo() {
compression_string="and compressing with gzip" compression_string="and compressing with gzip"
fi fi
if [ -n "${backup_job_mongo_custom_uri}" ] ; then if [ -n "${backup_job_mongo_custom_uri}" ] ; then
mongo_backup_parameter="--uri=${backup_job_mongo_custom_uri} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}" mongo_backup_parameter="--uri=${backup_job_mongo_custom_uri} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}"
else else
mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}" mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}"
fi fi
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
pre_dbbackup "${backup_job_db_name}" pre_dbbackup "${backup_job_db_name}"
@@ -636,7 +688,6 @@ backup_mssql() {
compression compression
pre_dbbackup all pre_dbbackup all
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}" run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
file_encryption file_encryption
timer backup finish timer backup finish
generate_checksum generate_checksum
@@ -662,7 +713,7 @@ backup_mysql() {
if [ "${backup_job_db_name,,}" = "all" ] ; then if [ "${backup_job_db_name,,}" = "all" ] ; then
write_log debug "Preparing to back up everything except for information_schema and _* prefixes" write_log debug "Preparing to back up everything except for information_schema and _* prefixes"
db_names=$(run_as_user mysql -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_enumeration_opts} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema ) db_names=$(run_as_user ${_mysql_prefix}${_mysql_bin_prefix/-/} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_enumeration_opts} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
if [ -n "${backup_job_db_name_exclude}" ] ; then if [ -n "${backup_job_db_name_exclude}" ] ; then
db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n') db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do for db_exclude in ${db_names_exclusions} ; do
@@ -679,14 +730,14 @@ backup_mysql() {
if var_true "${backup_job_split_db}" ; then if var_true "${backup_job_split_db}" ; then
for db in ${db_names} ; do for db in ${db_names} ; do
prepare_dbbackup prepare_dbbackup
backup_job_filename=mysql_${db}_${backup_job_db_host,,}_${now}.sql backup_job_filename=${backup_job_mysql_client,,}_${db}_${backup_job_db_host,,}_${now}.sql
backup_job_filename_base=mysql_${db}_${backup_job_db_host,,} backup_job_filename_base=${backup_job_mysql_client,,}_${db}_${backup_job_db_host,,}
compression compression
pre_dbbackup "${db}" pre_dbbackup "${db}"
write_log notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}" write_log notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} $db | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null run_as_user ${play_fair} ${_mysql_prefix}${_mysql_bin_prefix}dump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} $db | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$? exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}" check_exit_code backup "${backup_job_filename}"
timer backup finish timer backup finish
@@ -700,14 +751,14 @@ backup_mysql() {
else else
write_log debug "Not splitting database dumps into their own files" write_log debug "Not splitting database dumps into their own files"
prepare_dbbackup prepare_dbbackup
backup_job_filename=mysql_all_${backup_job_db_host,,}_${now}.sql backup_job_filename=${backup_job_mysql_client,,}_all_${backup_job_db_host,,}_${now}.sql
backup_job_filename_base=mysql_all_${backup_job_db_host,,} backup_job_filename_base=${backup_job_mysql_client,,}_all_${backup_job_db_host,,}
compression compression
pre_dbbackup all pre_dbbackup all
write_log notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" write_log notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null run_as_user ${play_fair} ${_mysql_prefix}${_mysql_bin_prefix}dump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$? exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}" check_exit_code backup "${backup_job_filename}"
timer backup finish timer backup finish
@@ -724,12 +775,13 @@ backup_pgsql() {
backup_pgsql_globals() { backup_pgsql_globals() {
prepare_dbbackup prepare_dbbackup
backup_job_filename=pgsql_globals_${backup_job_db_host,,}_${now}.sql backup_job_filename=pgsql_globals_${backup_job_db_host,,}_${now}.sql
backup_job_global_base=pgsql_globals_${backup_job_db_host,,}
compression compression
pre_dbbackup "globals" pre_dbbackup "globals"
print_notice "Dumping PostgresSQL globals: with 'pg_dumpall -g' ${compression_string}" print_notice "Dumping PostgresSQL globals: with 'pg_dumpall -g' ${compression_string}"
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
run_as_user ${play_fair} pg_dumpall -h "${backup_job_db_host}" -U "${backup_job_db_user}" -p "${backup_job_db_port}" -g ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null run_as_user ${play_fair} pg_dumpall -h "${backup_job_db_host}" -U "${backup_job_db_user}" -p "${backup_job_db_port}" -g ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$? exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code "${backup_job_filename}" check_exit_code "${backup_job_filename}"
timer backup finish timer backup finish
@@ -757,7 +809,7 @@ backup_pgsql() {
write_log debug "Excluding '${db_exclude}' from ALL DB_NAME backups" write_log debug "Excluding '${db_exclude}' from ALL DB_NAME backups"
db_names=$(echo "$db_names" | sed "/${db_exclude}/d" ) db_names=$(echo "$db_names" | sed "/${db_exclude}/d" )
done done
_postgres_backup_globals=true _postgres_backup_globals=true
fi fi
else else
db_names=$(echo "${backup_job_db_name}" | tr ',' '\n') db_names=$(echo "${backup_job_db_name}" | tr ',' '\n')
@@ -777,8 +829,8 @@ backup_pgsql() {
pre_dbbackup "${db}" pre_dbbackup "${db}"
write_log notice "Dumping PostgresSQL database: '${db}' ${compression_string}" write_log notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
run_as_user ${play_fair} pg_dump -h "${backup_job_db_host}" -p "${backup_job_db_port}" -U "${backup_job_db_user}" $db ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null run_as_user ${play_fair} pg_dump -h "${backup_job_db_host}" -p "${backup_job_db_port}" -U "${backup_job_db_user}" $db ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$? exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}" check_exit_code backup "${backup_job_filename}"
timer backup finish timer backup finish
@@ -794,7 +846,11 @@ backup_pgsql() {
write_log debug "Not splitting database dumps into their own files" write_log debug "Not splitting database dumps into their own files"
prepare_dbbackup prepare_dbbackup
backup_job_filename=pgsql_all_${backup_job_db_host,,}_${now}.sql backup_job_filename=pgsql_all_${backup_job_db_host,,}_${now}.sql
backup_job_filename_base=pgsql_${db}_${backup_job_db_host,,} if [ "${backup_job_db_name,,}" = "all" ] ; then
backup_job_filename_base=pgsql_all_${backup_job_db_host,,}
else
backup_job_filename_base=pgsql_${db}_${backup_job_db_host,,}
fi
compression compression
pre_dbbackup all pre_dbbackup all
write_log notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" write_log notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
@@ -807,8 +863,8 @@ backup_pgsql() {
for x_db_name in ${tmp_db_names} ; do for x_db_name in ${tmp_db_names} ; do
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name}) pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
done done
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$? exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}" check_exit_code backup "${backup_job_filename}"
timer backup finish timer backup finish
@@ -828,7 +884,7 @@ backup_redis() {
backup_job_filename=redis_all_${backup_job_db_host,,}_${now}.rdb backup_job_filename=redis_all_${backup_job_db_host,,}_${now}.rdb
backup_job_filename_base=redis_${backup_job_db_host,,} backup_job_filename_base=redis_${backup_job_db_host,,}
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
echo bgsave | silent run_as_user ${play_fair} redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} --rdb ${temporary_directory}/${backup_job_filename} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} echo bgsave | silent run_as_user ${play_fair} redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} --rdb ${temporary_directory}/${backup_job_filename} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}
sleep 10 sleep 10
try=5 try=5
while [ $try -gt 0 ] ; do while [ $try -gt 0 ] ; do
@@ -891,14 +947,16 @@ backup_sqlite3() {
check_availability() { check_availability() {
if var_true "${DEBUG_CHECK_AVAILABILITY}" ; then debug on; fi if var_true "${DEBUG_CHECK_AVAILABILITY}" ; then debug on; fi
### Set the Database Type
if var_false "${backup_job_skip_availability_check}" ; then if var_false "${backup_job_skip_availability_check}" ; then
case "${dbtype}" in case "${dbtype}" in
"couch" ) "couch" )
counter=0 counter=0
code_received=0 code_received=0
if [ -n "${backup_job_db_user}" ] && [ -n ${backup_job_db_pass} ]; then
_ca_couch_auth="-u ${backup_job_db_user}:${backup_job_db_pass}"
fi
while [ "${code_received}" != "200" ]; do while [ "${code_received}" != "200" ]; do
code_received=$(run_as_user curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${backup_job_db_host}:${backup_job_db_port}) code_received=$(run_as_user curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${_ca_couch_auth} ${backup_job_db_host}:${backup_job_db_port})
if [ "${code_received}" = "200" ] ; then break ; fi if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5 sleep 5
(( counter+=5 )) (( counter+=5 ))
@@ -942,7 +1000,7 @@ check_availability() {
"mysql" ) "mysql" )
counter=0 counter=0
export MYSQL_PWD=${backup_job_db_pass} export MYSQL_PWD=${backup_job_db_pass}
while ! (run_as_user mysqladmin -u"${backup_job_db_user}" -P"${backup_job_db_port}" -h"${backup_job_db_host}" ${mysql_tls_args} status > /dev/null 2>&1) ; do while ! (run_as_user ${_mysql_prefix}${_mysql_bin_prefix}admin -u"${backup_job_db_user}" -P"${backup_job_db_port}" -h"${backup_job_db_host}" ${mysql_tls_args} status > /dev/null 2>&1) ; do
sleep 5 sleep 5
(( counter+=5 )) (( counter+=5 ))
write_log warn "MySQL/MariaDB Server '${backup_job_db_host}' is not accessible, retrying.. (${counter} seconds so far)" write_log warn "MySQL/MariaDB Server '${backup_job_db_host}' is not accessible, retrying.. (${counter} seconds so far)"
@@ -1045,17 +1103,24 @@ cleanup_old_data() {
write_log info "Cleaning up old backups on filesystem" write_log info "Cleaning up old backups on filesystem"
run_as_user mkdir -p "${backup_job_filesystem_path}" run_as_user mkdir -p "${backup_job_filesystem_path}"
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \; find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
if var_true "${_postgres_backup_globals}"; then
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_global_base}*" -exec rm -f {} \;
fi
if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions" write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
else else
write_log info "Syncing changes via blobxfer" write_log info "Syncing changes via blobxfer"
silent run_as_user blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete --delete-only silent run_as_user blobxfer upload --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete --delete-only
fi fi
;; ;;
"file" | "filesystem" ) "file" | "filesystem" )
write_log info "Cleaning up old backups on filesystem" write_log info "Cleaning up old backups on filesystem"
run_as_user mkdir -p "${backup_job_filesystem_path}" run_as_user mkdir -p "${backup_job_filesystem_path}"
run_as_user find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \; run_as_user find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
if var_true "${_postgres_backup_globals}"; then
run_as_user find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_global_base}*" -exec rm -f {} \;
fi
;; ;;
"s3" | "minio" ) "s3" | "minio" )
write_log info "Cleaning up old backups on S3 storage" write_log info "Cleaning up old backups on S3 storage"
@@ -1156,8 +1221,9 @@ create_archive() {
if var_true "${DEBUG_CREATE_ARCHIVE}" ; then debug on; fi if var_true "${DEBUG_CREATE_ARCHIVE}" ; then debug on; fi
if [ "${exit_code}" = "0" ] ; then if [ "${exit_code}" = "0" ] ; then
write_log notice "Creating archive file of '${backup_job_filename}' with tar ${compression_string}" write_log notice "Creating archive file of '${backup_job_filename}' with tar ${compression_string}"
run_as_user tar cf - "${temporary_directory}"/"${backup_job_filename}" | ${dir_compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}".tar"${extension}" > /dev/null run_as_user tar cf - "${temporary_directory}"/"${backup_job_filename_dir}" | ${dir_compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename_dir}".tar"${extension}" > /dev/null
rm -rf "${temporary_directory}"/"${backup_job_filename}" backup_job_filename="${backup_job_filename_dir}".tar"${extension}"
rm -rf "${temporary_directory}"/"${backup_job_filename_dir}"
else else
write_log error "Skipping creating archive file because backup did not complete successfully" write_log error "Skipping creating archive file because backup did not complete successfully"
fi fi
@@ -1172,8 +1238,8 @@ create_schedulers() {
print_debug "[create_schedulers] Found '${backup_instances}' DB_HOST instances" print_debug "[create_schedulers] Found '${backup_instances}' DB_HOST instances"
if [ -n "${DB_HOST}" ] && [ "${backup_instances}" ]; then if [ -n "${DB_HOST}" ] && [ "${backup_instances}" ]; then
backup_instances=1; backup_instances=1;
print_debug "[create_schedulers] Detected using old DB_ variables" print_debug "[create_schedulers] Detected using old DB_ variables"
fi fi
for (( instance = 01; instance <= backup_instances; )) ; do for (( instance = 01; instance <= backup_instances; )) ; do
@@ -1181,7 +1247,7 @@ create_schedulers() {
cp -R /assets/dbbackup/template-dbbackup /etc/services.available/dbbackup-"${instance}" cp -R /assets/dbbackup/template-dbbackup /etc/services.available/dbbackup-"${instance}"
sed -i "s|{{BACKUP_NUMBER}}|${instance}|g" /etc/services.available/dbbackup-"${instance}"/run sed -i "s|{{BACKUP_NUMBER}}|${instance}|g" /etc/services.available/dbbackup-"${instance}"/run
if [ "${MODE,,}" = "manual" ] ; then service_stop dbbackup-"${instance}" ; fi if [ "${MODE,,}" = "manual" ] ; then service_stop dbbackup-"${instance}" ; fi
cat <<EOF >> /usr/bin/backup"${instance}"-now cat <<EOF > /usr/bin/backup"${instance}"-now
#!/bin/bash #!/bin/bash
source /assets/functions/00-container source /assets/functions/00-container
PROCESS_NAME=db-backup${instance} PROCESS_NAME=db-backup${instance}
@@ -1201,7 +1267,7 @@ EOF
EOF EOF
else else
echo "/usr/bin/backup${instance}-now now" >> /usr/bin/backup-now echo "/usr/bin/backup${instance}-now now" > /usr/bin/backup-now
fi fi
instance=$(echo "${instance} +1" | bc) instance=$(echo "${instance} +1" | bc)
@@ -1384,7 +1450,7 @@ notify() {
if [ -z "${SMTP_HOST}" ] ; then write_log error "[notifications] No SMTP_HOST variable set - Skipping sending Email notifications" ; skip_mail=true ; fi if [ -z "${SMTP_HOST}" ] ; then write_log error "[notifications] No SMTP_HOST variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
if [ -z "${SMTP_PORT}" ] ; then write_log error "[notifications] No SMTP_PORT variable set - Skipping sending Email notifications" ; skip_mail=true ; fi if [ -z "${SMTP_PORT}" ] ; then write_log error "[notifications] No SMTP_PORT variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
if var_nottrue "${skip_mail}" ; then if var_nottrue "${skip_mail}" ; then
if ! grep -q ^from /etc/msmptrc ; then if ! grep -q ^from /etc/msmtprc ; then
echo "from ${MAIL_FROM}" >> /etc/msmtprc echo "from ${MAIL_FROM}" >> /etc/msmtprc
fi fi
mail_recipients=$(echo "${MAIL_TO}" | tr "," "\n") mail_recipients=$(echo "${MAIL_TO}" | tr "," "\n")
@@ -1608,8 +1674,8 @@ EOF
if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions" write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
else else
write_log info "Synchronize local storage from S3 Bucket with blobxfer" write_log info "Synchronize local storage from blob container with blobxfer"
${play_fair} blobxfer download --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete ${play_fair} blobxfer download --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --restore-file-lmt --delete
write_log info "Moving backup to external storage with blobxfer" write_log info "Moving backup to external storage with blobxfer"
mkdir -p "${backup_job_filesystem_path}" mkdir -p "${backup_job_filesystem_path}"
@@ -1617,7 +1683,7 @@ EOF
run_as_user mv "${temporary_directory}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}" run_as_user mv "${temporary_directory}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
silent run_as_user ${play_fair} blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} silent run_as_user ${play_fair} blobxfer upload --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path}
move_exit_code=$? move_exit_code=$?
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"."${checksum_extension}" ; fi if [ "${backup_job_checksum}" != "none" ] ; then run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"."${checksum_extension}" ; fi
@@ -1794,7 +1860,7 @@ setup_mode() {
cat <<EOF > /etc/services.d/99-run_forever/run cat <<EOF > /etc/services.d/99-run_forever/run
#!/bin/bash #!/bin/bash
while true; do while true; do
sleep 86400 sleep 86400
done done
EOF EOF
chmod +x /etc/services.d/99-run_forever/run chmod +x /etc/services.d/99-run_forever/run

View File

@@ -912,6 +912,83 @@ get_filename() {
r_filename=${opt} r_filename=${opt}
} }
get_ssl() {
if grep -q "^DB${detected_host_num}_MYSQL_ENABLE_TLS=" "${restore_vars}" ; then
detected_ssl_value=$(grep "^DB${detected_host_num}_MYSQL_ENABLE_TLS=" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
fi
if [[ -z "${detected_ssl_value}" ]]; then
print_debug "Parsed SSL Variant: 1 - No Env Variable Found"
default_ssl="false" # Default if no env variable
q_ssl_variant=1
q_ssl_menu_opt_default="| (${cwh}N${cdgy}) * "
q_ssl_menu="" #No menu option
else
print_debug "Parsed SSL Variant: 2 - Env Variable DB${detected_host_num}_MYSQL_ENABLE_TLS = '${detected_ssl_value}'"
default_ssl="${detected_ssl_value,,}"
q_ssl_variant=2
q_ssl_menu="E ) Environment Variable DB${detected_host_num}_MYSQL_ENABLE_TLS: '${detected_ssl_value}'"
q_ssl_menu_opt_default="| (${cwh}E${cdgy}) * "
fi
cat <<EOF
Do you wish to use SSL for the connection?
${q_ssl_menu}
Y ) Yes
N ) No
Q ) Quit
EOF
r_ssl=""
case "${q_ssl_variant}" in
1) # No env variable, ask directly
while true; do
read -r -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}Y${cdgy}\) \| \(${cwh}N\*${cdgy}\) : ${cwh}${coff}) " q_ssl
case "${q_ssl,,}" in
y*)
r_ssl="true"
break
;;
n* | "")
r_ssl="false"
break
;;
q*)
print_info "Quitting Script"
exit 1
;;
esac
done
;;
2) # Env variable exists, offer it as an option
while true; do
read -r -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E\*${cdgy}\) \| \(${cwh}Y${cdgy}\) \| \(${cwh}N${cdgy}\) : ${cwh}${coff}) " q_ssl
case "${q_ssl,,}" in
e* | "") # Default to env variable if just enter is pressed.
r_ssl="${detected_ssl_value}"
break
;;
y*)
r_ssl="true"
break
;;
n*)
r_ssl="false"
break
;;
q*)
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
#### SCRIPT START #### SCRIPT START
trap control_c INT trap control_c INT
bootstrap_variables restore_init bootstrap_variables restore_init
@@ -984,6 +1061,20 @@ else
fi fi
print_debug "Database Port '${r_dbport}'" print_debug "Database Port '${r_dbport}'"
## Question SSL connection
if [[ "${r_dbtype,,}" == "mariadb" || "${r_dbtype,,}" == "mysql" ]]; then
if [ -n "${8}" ]; then
r_ssl="${8}"
else
get_ssl
fi
print_debug "SSL enable: '${r_ssl}'"
else
r_ssl="false"
print_debug "SSL disabled for ${r_dbtype}"
fi
## Parse Extension ## Parse Extension
case "${r_filename##*.}" in case "${r_filename##*.}" in
bz* ) bz* )
@@ -1013,8 +1104,13 @@ esac
## Perform a restore ## Perform a restore
case "${r_dbtype}" in case "${r_dbtype}" in
mariadb | mysql ) mariadb | mysql )
if [[ "${r_ssl,,}" == "false" ]]; then
mysql_ssl_option="--disable-ssl"
else
mysql_ssl_option=""
fi
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'" print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
pv ${r_filename} | ${decompress_cmd}cat | mysql -u${r_dbuser} -p${r_dbpass} -P${r_dbport} -h${r_dbhost} ${r_dbname} pv ${r_filename} | ${decompress_cmd}cat | mariadb -u${r_dbuser} -p${r_dbpass} -P${r_dbport} -h${r_dbhost} ${mysql_ssl_option} ${r_dbname}
exit_code=$? exit_code=$?
;; ;;
pgsql | postgres* ) pgsql | postgres* )
@@ -1036,9 +1132,6 @@ EOF
echo -e "${coff}" echo -e "${coff}"
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}Y${cdgy}\) \| \(${cwh}N${cdgy}\) \| \(${cwh}Q${cdgy}\) : ${cwh}${coff})" q_menu_mongo_dropdb read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}Y${cdgy}\) \| \(${cwh}N${cdgy}\) \| \(${cwh}Q${cdgy}\) : ${cwh}${coff})" q_menu_mongo_dropdb
case "${q_menu_mongo_dropdb,,}" in case "${q_menu_mongo_dropdb,,}" in
"y" | "yes" | * )
mongo_dropdb="--drop"
;;
"n" | "update" ) "n" | "update" )
unset mongo_dropdb unset mongo_dropdb
;; ;;
@@ -1046,6 +1139,9 @@ EOF
print_info "Quitting Script" print_info "Quitting Script"
exit 1 exit 1
;; ;;
"y" | "yes" | * )
mongo_dropdb="--drop"
;;
esac esac
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'" print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"