Compare commits

...

68 Commits
4.0.20 ... main

Author SHA1 Message Date
dave@tiredofit.ca
016c5c1a23 Release 4.1.21 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-08-08 09:26:13 -07:00
Dave Conroy
de8a952825 Merge pull request #421 from tlex/main 2025-08-08 09:24:55 -07:00
Alex Thomae
c7912d355e fix: tabs used instead of spaces 2025-08-08 07:37:56 +02:00
Alex Thomae
15902829c0 #420: fix exit_code got changed to exitcode 2025-08-08 07:37:29 +02:00
dave@tiredofit.ca
2c8f40e37c Release 4.1.20 - See CHANGELOG.md 2025-07-23 08:55:52 -07:00
Dave Conroy
c360150117 Merge pull request #418 from alteriks/main 2025-07-23 08:54:36 -07:00
Krzysztof Dajka
7c32879e80 fix: exitcode masking db errors 2025-07-22 16:56:53 +02:00
dave@tiredofit.ca
a475f7d0f3 Release 4.1.19 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-05-28 08:05:58 -07:00
dave@tiredofit.ca
399727cd37 Release 4.1.18 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-05-12 08:33:53 -07:00
Dave Conroy
f58de70dc4 Merge pull request #412 from logicoa/logicoa-mongodb-wildcard-drop-order
fix: wildcard case order
2025-05-12 08:33:01 -07:00
logicoa
5ab0cbe7c5 fix: wildcard case order
mongoDB restore always dropped schema, irrespective of flag variant, due to wildcard being the first option in case.
2025-05-12 11:18:43 +02:00
dave@tiredofit.ca
9d5406b6a9 Release 4.1.17 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-04-17 21:08:25 -07:00
dave@tiredofit.ca
53760fbe4d Release 4.1.16 - See CHANGELOG.md 2025-02-21 07:39:11 -08:00
Dave Conroy
a72b562c70 Merge pull request #402 from fermion2020/main
Update restore script
2025-02-21 07:38:17 -08:00
Ihor Kolos
fc586c204f Update restore script
Switch the mysql command to mariadb to resolve the deprecation warning.
Fix the restore issue caused by missing SSL configuration (error message: "TLS/SSL error: SSL is required, but the server does not support it").
2025-02-20 14:31:23 -06:00
dave@tiredofit.ca
e9ed8d1a72 Release 4.1.15 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-01-29 08:01:35 -08:00
Dave Conroy
78ac4a4a81 Add username/password check and append auth details to a couch db connectivity check 2025-01-29 07:59:49 -08:00
Dave Conroy
949aafdbe1 fix - zyclonite being attached to compression for couchdb 2025-01-29 07:53:39 -08:00
dave@tiredofit.ca
7a94472055 Release 4.1.14 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-01-21 12:59:35 -08:00
dave@tiredofit.ca
23aeaf58a2 Release 4.1.13 - See CHANGELOG.md
Some checks are pending
build_image / build (push) Waiting to run
2025-01-21 09:30:06 -08:00
Dave Conroy
b88816337f Seperate TLS configuration for MariaDB and MySQL 2025-01-21 09:29:29 -08:00
Dave Conroy
ac8181b3b5 Update MySQL client to 8.4.4 2025-01-21 08:33:22 -08:00
Dave Conroy
c75c41a34d Update AWS CLI to 1.37.2 2025-01-21 08:32:52 -08:00
dave@tiredofit.ca
244e411e76 Release 4.1.12 - See CHANGELOG.md 2024-12-13 07:51:35 -08:00
dave@tiredofit.ca
e69ac23898 Release 4.1.11 - See CHANGELOG.md 2024-12-13 07:40:04 -08:00
dave@tiredofit.ca
261951045f Release 4.1.10 - See CHANGELOG.md 2024-12-12 08:38:57 -08:00
dave@tiredofit.ca
67f4326d0b Release 4.1.9 - See CHANGELOG.md 2024-11-07 11:16:32 -08:00
dave@tiredofit.ca
2cd62b8732 Release 4.1.8 - See CHANGELOG.md 2024-10-29 18:58:34 -07:00
dave@tiredofit.ca
0d2b3ccc8c Release 4.1.4 - See CHANGELOG.md 2024-08-13 16:34:44 -07:00
Dave Conroy
90f53a7f00 Merge pull request #358 from ToshY/docs/blobxfer-mode
[docs] fixed blobxfer mode correct parameter name
2024-07-31 13:07:30 -07:00
ToshY
c5f89da681 fixed blobxfermode correct parameter name 2024-07-31 08:11:32 +00:00
dave@tiredofit.ca
753a780204 Release 4.1.3 - See CHANGELOG.md 2024-07-05 12:06:15 -07:00
dave@tiredofit.ca
7c07253428 Release 4.1.2 - See CHANGELOG.md 2024-07-02 16:15:22 -07:00
Dave Conroy
0fdb447706 Merge pull request #354 from effectivelywild/main
Resolve multiple issues using Azure blobs for remote storage
2024-07-02 16:13:41 -07:00
Frank Muise
0d23c2645c Add --no-overwrite to blobxfer download 2024-06-30 16:28:16 -04:00
Frank Muise
4786ea9c7f Update log entry for blob sync 2024-06-30 14:56:50 -04:00
Frank Muise
a26dba947b Fix issues with Azure blobs 2024-06-30 14:53:31 -04:00
dave@tiredofit.ca
b9fa7d18b1 Release 4.1.1 - See CHANGELOG.md 2024-06-19 15:41:45 -07:00
dave@tiredofit.ca
626d276c68 Release 4.1.0 - See CHANGELOG.md 2024-05-25 12:48:58 -07:00
dave@tiredofit.ca
f7f72ba2c1 Release 4.0.35 - See CHANGELOG.md 2024-01-14 20:22:08 -08:00
Dave Conroy
2f05d76f4e README weirdness 2024-01-03 17:33:52 -08:00
Dave Conroy
c9a634ff25 Convert > to - in README 2024-01-03 17:21:01 -08:00
dave@tiredofit.ca
0ce21e8f43 Release 4.0.34 - See CHANGELOG.md 2024-01-02 14:01:28 -08:00
Dave Conroy
a700eb0fef Merge pull request #315 from ToshY/docs/latest-symlink-format
[Docs] Updated `DEFAULT_CREATE_LATEST_SYMLINK` description format
2024-01-02 13:21:59 -08:00
Dave Conroy
7baa3774c7 Merge pull request #318 from devmethodgit/main
Fix environment variables in examples
2024-01-02 13:21:43 -08:00
Dave Conroy
341e4d12ea Update case statement to support arm64|aarch64 2024-01-02 13:21:07 -08:00
Dave Conroy
5c51bbcb7e Wrap if statement in double brackets 2024-01-02 12:54:57 -08:00
@vladimirzyuzin
24d9a9a937 Fix environment variables 2023-12-30 20:58:26 +03:00
ToshY
591b8d6dbd updated create latest symlink description format 2023-12-26 12:24:52 +00:00
dave@tiredofit.ca
a5b15b4412 Release 4.0.33 - See CHANGELOG.md 2023-12-18 07:58:54 -08:00
dave@tiredofit.ca
6692cf9834 Release 4.0.32 - See CHANGELOG.md 2023-12-15 15:32:32 -08:00
dave@tiredofit.ca
c37de5778d Release 4.0.31 - See CHANGELOG.md 2023-12-12 19:25:40 -08:00
dave@tiredofit.ca
eeeafd6ab8 Release 4.0.30 - See CHANGELOG.md 2023-12-11 15:21:01 -08:00
dave@tiredofit.ca
17daf26084 Release 4.0.29 - See CHANGELOG.md 2023-12-04 11:29:14 -08:00
Dave Conroy
b53cda99f7 Don't execute blobxfer functions if both key and secret are not set 2023-12-04 11:09:43 -08:00
Dave Conroy
2cf3e2ae70 Show proper DB Name when backing up Mongo or MSSQL 2023-12-04 08:06:57 -08:00
dave@tiredofit.ca
c7ee94aec2 Release 4.0.28 - See CHANGELOG.md 2023-12-04 07:04:08 -08:00
Dave Conroy
f44233e51a AWS CLI 1.31.5 2023-12-04 07:02:40 -08:00
dave@tiredofit.ca
ccda858b18 Release 4.0.27 - See CHANGELOG.md 2023-12-04 07:00:39 -08:00
Dave Conroy
d58b27d5ef Use alternate cron 2023-12-03 22:04:12 -08:00
dave@tiredofit.ca
fb9fe8a032 Release 4.0.26 - See CHANGELOG.md 2023-11-30 08:55:34 -08:00
Dave Conroy
b705982ae1 Restore missing _SPLIT_DB environment variable information for MySQL/Postgres 2023-11-30 08:54:49 -08:00
dave@tiredofit.ca
f031d787ae Release 4.0.25 - See CHANGELOG.md 2023-11-29 10:43:25 -08:00
Dave Conroy
3eed5fc8a0 Switch BLOBXFER_STORAGE_KEY to BLOBXFER_STORAGE_ACCOUNT_KEY 2023-11-29 10:39:58 -08:00
dave@tiredofit.ca
be619fb707 Release 4.0.24 - See CHANGELOG.md 2023-11-28 15:06:50 -08:00
dave@tiredofit.ca
cccc088b35 Release 4.0.23 - See CHANGELOG.md 2023-11-28 08:05:11 -08:00
dave@tiredofit.ca
4579f4057c Release 4.0.22 - See CHANGELOG.md 2023-11-25 08:50:25 -08:00
dave@tiredofit.ca
cd683648d0 Release 4.0.21 - See CHANGELOG.md 2023-11-22 15:40:38 -08:00
13 changed files with 642 additions and 182 deletions

View File

@@ -8,7 +8,7 @@ on:
jobs:
build:
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
secrets: inherit

View File

@@ -9,7 +9,7 @@ on:
jobs:
build:
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
secrets: inherit

View File

@@ -1,3 +1,238 @@
## 4.1.21 2025-08-08 <dave at tiredofit dot ca>
### Changed
- Fix for 4.1.20 release inadvertently failing backups (credit tlex@github)
## 4.1.20 2025-07-23 <dave at tiredofit dot ca>
### Changed
- Fix backup error checking routines #417 - credit alteriks@github
## 4.1.19 2025-05-28 <dave at tiredofit dot ca>
### Changed
- Force overwrite manual scripts as opposed to append (#414)
## 4.1.18 2025-05-12 <dave at tiredofit dot ca>
### Changed
- Fix MongoDB restore from not dropping DB each time before restore except explicitly told (credit logicoa@github)
## 4.1.17 2025-04-17 <dave at tiredofit dot ca>
### Changed
- Fix issue with Postgres database cleanup when ALL databases being backed up as one file (SPLIT_DB=FALSE)
## 4.1.16 2025-02-21 <dave at tiredofit dot ca>
### Added
- Update to tiredofit/alpine:7.10.28
- Support TLS connectivity with restore script (credit fermion2020@github)
## 4.1.15 2025-01-29 <dave at tiredofit dot ca>
### Added
- Add support for username and password support when checking for connectivity to couchdb (credit: JvSomeren)
### Changed
- Fix issue with couchdb compression routines
## 4.1.14 2025-01-21 <dave at tiredofit dot ca>
### Changed
- Downgrade AWS Client to 1.36.40 due to incompatibilities with providers with 1.37x. for time being
## 4.1.13 2025-01-21 <dave at tiredofit dot ca>
### Added
- Update MySQL client to 8.4.4
- Update AWS Client to 1.37.2
### Changed
- Seperate MySQL and MariaDB TLS Configurationf for arguments that have deviated
## 4.1.12 2024-12-13 <dave at tiredofit dot ca>
### Changed
- Fix for 4.1.11
## 4.1.11 2024-12-13 <dave at tiredofit dot ca>
### Changed
- Fix when backing up 'ALL' databases with MariaDB
## 4.1.10 2024-12-12 <dave at tiredofit dot ca>
### Added
- Use tiredofit/alpine:3.21-7.10.27 base
- Use the actual binary name when dumping mariadb and mysql databases
- Silence warnings that are appearing due to filenames, ssl warnings re MariaDB / MySQL
## 4.1.9 2024-11-07 <dave at tiredofit dot ca>
### Added
- Pin to tiredofit/alpine:edge-7.10.19
- MySQL 8.4.3 client
- MSSQL and MSODBC 18.4.1.1-1
- Mysql 11.x Support
- Influx2 Client 2.7.5
- AWS Client 1.35.13
- Postgresql 17.x Support
## 4.1.8 2024-10-29 <dave at tiredofit dot ca>
Rebuild using 4.1.4 sources - ignore any versions of 4.1.5-4.1.7
### Added
## 4.1.4 2024-08-13 <dave at tiredofit dot ca>
Please note that if using encryption using a passphrase, you may be encountering issues with manual decryption. This release fixes that.
If you try to manually decrypt and your passphrase fails. Try wrapping it in single (') or double (") quotes.
### Changed
- Fix for stray quotes appearing inside of ENCRYPT_PASSPHRASE variables
## 4.1.3 2024-07-05 <dave at tiredofit dot ca>
### Changed
- Rebuild to support tiredofit/alpine:7.10.0
## 4.1.2 2024-07-02 <effectivelywild@github>
### Added
- Add support for Azure Blob containers
- Fix timestamps when comparing previous backups
- Resolve unnecessary read operations in Azure
- Resolve issues with backup cleanup operations in Azure
## 4.1.1 2024-06-19 <dave at tiredofit dot ca>
### Changed
- Fix issue where postgresql globals when backing up ALL not being deleted (#352)
## 4.1.0 2024-05-25 <dave at tiredofit dot ca>
Note that arm/v7 builds have been removed from this release going forward
### Added
- Introduce DEFAULT/DBXX_MYSQL_CLIENT option to use mariadb or mysql for client dumping to solve incompatibility issues
- Alpine 3.20 Base
- MariaDB 10.11.8 Client
- AWS Client 1.32.113
- MySQL Client 8.4.0
## 4.0.35 2024-01-14 <dave at tiredofit dot ca>
### Changed
- Fix issue with emaail notifications and not being able to add from statement
## 4.0.34 2024-01-02 <dave at tiredofit dot ca>
### Changed
- Change the way architectures are detected to re-enable backups with MSSQL and Influx2
## 4.0.33 2023-12-18 <dave at tiredofit dot ca>
### Changed
- Allow _OPTS variabls to contain spaces
- Switch references of _DUMP_OPTS to _BACKUP_OPTS
## 4.0.32 2023-12-15 <dave at tiredofit dot ca>
### Changed
- Fix issue with directories not properly being backed up (InfluxDB)
## 4.0.31 2023-12-12 <dave at tiredofit dot ca>
### Changed
- Support backing up databases with spaces in them
## 4.0.30 2023-12-11 <dave at tiredofit dot ca>
### Added
- Seperate each job with its own temporary folder for isolation and to better cleanup jobs that backup as a directory instead of a flat file
## 4.0.29 2023-12-04 <dave at tiredofit dot ca>
### Changed
- Skip blobxfer if either account or key is not present
## 4.0.28 2023-12-04 <dave at tiredofit dot ca>
### Changed
- AWS Cli 1.31.5
- Switch to using PIP for installing AWS-Cli to remove deprecation warnings
## 4.0.27 2023-12-04 <dave at tiredofit dot ca>
### Changed
- Switch to using actual crontab for cron expressions
## 4.0.26 2023-11-30 <dave at tiredofit dot ca>
### Added
- AWS CLI 1.31.4
## 4.0.25 2023-11-29 <dave at tiredofit dot ca>
### Changed
- Fix #297 - Add parameters to blobxfer to restore functionality
## 4.0.24 2023-11-28 <dave at tiredofit dot ca>
### Changed
- Fix issue with cron parsing and 0 being a value getting clobbered by sort command
## 4.0.23 2023-11-28 <dave at tiredofit dot ca>
### Changed
- Resolve issue with custom notification scripts not executing
## 4.0.22 2023-11-25 <dave at tiredofit dot ca>
### Changed
- Move cleanup_old_data routines to happen within backup_ function to properly accomodate for globals, and ALL DB_NAME use cases
## 4.0.21 2023-11-22 <dave at tiredofit dot ca>
### Changed
- Fix for SQLite backups not being cleaned up properly due to a malformed base
## 4.0.20 2023-11-21 <dave at tiredofit dot ca>
### Changed

View File

@@ -1,21 +1,21 @@
ARG DISTRO=alpine
ARG DISTRO_VARIANT=edge
ARG DISTRO_VARIANT=3.21-7.10.28
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables
ENV INFLUX1_CLIENT_VERSION=1.8.0 \
INFLUX2_CLIENT_VERSION=2.7.3 \
MSODBC_VERSION=18.3.2.1-1 \
MSSQL_VERSION=18.3.1.1-1 \
AWS_CLI_VERSION=1.29.78 \
INFLUX2_CLIENT_VERSION=2.7.5 \
MSODBC_VERSION=18.4.1.1-1 \
MSSQL_VERSION=18.4.1.1-1 \
MYSQL_VERSION=mysql-8.4.4 \
MYSQL_REPO_URL=https://github.com/mysql/mysql-server \
AWS_CLI_VERSION=1.36.40 \
CONTAINER_ENABLE_MESSAGING=TRUE \
CONTAINER_ENABLE_MONITORING=TRUE \
IMAGE_NAME="tiredofit/db-backup" \
IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/"
### Dependencies
RUN source /assets/functions/00-container && \
set -ex && \
addgroup -S -g 10000 dbbackup && \
@@ -27,11 +27,14 @@ RUN source /assets/functions/00-container && \
build-base \
bzip2-dev \
cargo \
cmake \
git \
go \
libarchive-dev \
libtirpc-dev \
openssl-dev \
libffi-dev \
ncurses-dev \
python3-dev \
py3-pip \
xz-dev \
@@ -44,13 +47,16 @@ RUN source /assets/functions/00-container && \
gpg-agent \
groff \
libarchive \
libtirpc \
mariadb-client \
mariadb-connector-c \
mongodb-tools \
ncurses \
openssl \
pigz \
postgresql16 \
postgresql16-client \
pixz \
postgresql17 \
postgresql17-client \
pv \
py3-botocore \
py3-colorama \
@@ -69,37 +75,49 @@ RUN source /assets/functions/00-container && \
zstd \
&& \
\
apkArch="$(uname -m)"; \
case "$apkArch" in \
x86_64) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \
arm64 ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=arm64 ;; \
case "$(uname -m)" in \
"x86_64" ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \
"arm64" | "aarch64" ) mssql=true ; mssql_arch=arm64; influx2=true ; influx_arch=arm64 ;; \
*) sleep 0.1 ;; \
esac; \
\
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
clone_git_repo https://github.com/aws/aws-cli "${AWS_CLI_VERSION}" && \
python3 setup.py install --prefix=/usr && \
if [ "${mssql,,}" = "true" ] ; then \
curl -sSLO https://download.microsoft.com/download/7/6/d/76de322a-d860-4894-9945-f0cc5d6a45f8/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; \
curl -sSLO https://download.microsoft.com/download/7/6/d/76de322a-d860-4894-9945-f0cc5d6a45f8/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; \
echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; \
else \
echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; \
fi; \
\
if [ "${influx2,,}" = "true" ] ; then \
curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; \
chmod +x /usr/src/influx ; \
mv /usr/src/influx /usr/sbin/ ; \
else \
echo >&2 "Unable to build Influx 2 on this system" ; \
fi ; \
\
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX1_CLIENT_VERSION}" && \
go build -o /usr/sbin/influxd ./cmd/influxd && \
strip /usr/sbin/influxd && \
\
clone_git_repo "${MYSQL_REPO_URL}" "${MYSQL_VERSION}" && \
cmake \
-DCMAKE_BUILD_TYPE=MinSizeRel \
-DCMAKE_INSTALL_PREFIX=/opt/mysql \
-DFORCE_INSOURCE_BUILD=1 \
-DWITHOUT_SERVER:BOOL=ON \
&& \
make -j$(nproc) install && \
\
pip3 install --break-system-packages awscli==${AWS_CLI_VERSION} && \
pip3 install --break-system-packages blobxfer && \
\
mkdir -p /usr/src/pbzip2 && \
curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
cd /usr/src/pbzip2 && \
make && \
make install && \
mkdir -p /usr/src/pixz && \
curl -sSL https://github.com/vasi/pixz/releases/download/v1.0.7/pixz-1.0.7.tar.xz | tar xvfJ - --strip 1 -C /usr/src/pixz && \
cd /usr/src/pixz && \
./configure \
--prefix=/usr \
--sysconfdir=/etc \
--localstatedir=/var \
&& \
make && \
make install && \
\
pip3 install --break-system-packages blobxfer && \
\
package remove .db-backup-build-deps && \
package cleanup && \

View File

@@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2023 Dave Conroy
Copyright (c) 2025 Dave Conroy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -267,6 +267,7 @@ Encryption occurs after compression and the encrypted filename will have a `.gpg
| `DEFAULT_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
| `DEFAULT_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `DEFAULT_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
| `DEFAULT_MYSQL_CLIENT` | Choose between `mariadb` or `mysql` client to perform dump operations for compatibility purposes | `mariadb` | |
| `DEFAULT_MYSQL_EVENTS` | Backup Events | `TRUE` | |
| `DEFAULT_MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet | `512M` | |
| `DEFAULT_MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction | `TRUE` | |
@@ -325,7 +326,7 @@ If `DEFAULT_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
| Variable | Description | Default |
| ------------------------------------ | ----------------------------------------------------------------------------------------------------- | ------------------------------------- |
| `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
| `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)_(DB_NAME)_(DB_HOST)` | `TRUE` |
| `DEFAULT_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
| `DEFAULT_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` |
| `DEFAULT_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DEFAULT_FILESYSTEM_PATH}/archive/` |
@@ -355,11 +356,14 @@ If `DEFAULT_BACKUP_LOCATION` = `S3` then the following options are used:
If `DEFAULT_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
| Parameter | Description | Default | `_FILE` |
| -------------------------------------- | ------------------------------------------- | ------------------- | ------- |
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `DEFAULT_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
| Parameter | Description | Default | `_FILE` |
| -------------------------------------- | ------------------------------------------------------------------- | ------------------- | ------- |
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `DEFAULT_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
| `DEFAULT_BLOBXFER_MODE` | Azure Storage mode e.g. `auto`, `file`, `append`, `block` or `page` | `auto` | x |
- When `DEFAULT_BLOBXFER_MODE` is set to auto it will use blob containers by default. If the `DEFAULT_BLOBXFER_REMOTE_PATH` path does not exist a blob container with that name will be created.
> This service uploads files from backup targed directory `DEFAULT_FILESYSTEM_PATH`.
> If the a cleanup configuration in `DEFAULT_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
@@ -536,6 +540,7 @@ Encryption will occur after compression and the resulting filename will have a `
| `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | |
| | Backup multiple by separating with commas eg `db1,db2` | | x |
| `DB01_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases separated via commas from being backed up | | x |
| `DB01_SPLIT_DB` | If using `ALL` - use this to split each database into its own file as opposed to one singular file | `FALSE` | |
| `DB01_PORT` | MySQL / MariaDB Port | `3306` | x |
| `DB01_MYSQL_EVENTS` | Backup Events for | `TRUE` | |
| `DB01_MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet | `512M` | |
@@ -577,6 +582,7 @@ Encryption will occur after compression and the resulting filename will have a `
| `DB01_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | |
| | Backup multiple by separating with commas eg `db1,db2` | | x |
| `DB01_SPLIT_DB` | If using `ALL` - use this to split each database into its own file as opposed to one singular file | `FALSE` | |
| `DB01_PORT` | PostgreSQL Port | `5432` | x |
###### Redis
@@ -633,11 +639,14 @@ If `DB01_BACKUP_LOCATION` = `S3` then the following options are used:
If `DB01_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
| Parameter | Description | Default | `_FILE` |
| ----------------------------------- | ------------------------------------------- | ------------------- | ------- |
| `DB01_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `DB01_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `DB01_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
| Parameter | Description | Default | `_FILE` |
| -------------------------------------- | ------------------------------------------------------------------- | ------------------- | ------- |
| `DB01_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `DB01_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `DB01_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
| `DB01_BLOBXFER_REMOTE_MODE` | Azure Storage mode e.g. `auto`, `file`, `append`, `block` or `page` | `auto` | x |
- When `DEFAULT_BLOBXFER_MODE` is set to auto it will use blob containers by default. If the `DEFAULT_BLOBXFER_REMOTE_PATH` path does not exist a blob container with that name will be created.
> This service uploads files from backup directory `DB01_BACKUP_FILESYSTEM_PATH`.
> If the a cleanup configuration in `DB01_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.

View File

@@ -31,8 +31,8 @@ services:
- BACKUP_JOB_CONCURRENCY=1 # Only run one job at a time
- DEFAULT_CHECKSUM=NONE # Don't create checksums
- DEFAULT_COMPRESSION=ZSTD # Compress all with ZSTD
- DEFAULT_DUMP_INTERVAL=1440 # Backup every 1440 minutes
- DEFAULT_DUMP_BEGIN=0000 # Start backing up at midnight
- DEFAULT_BACKUP_INTERVAL=1440 # Backup every 1440 minutes
- DEFAULT_BACKUP_BEGIN=0000 # Start backing up at midnight
- DEFAULT_CLEANUP_TIME=8640 # Cleanup backups after a week
- DB01_TYPE=mariadb
@@ -40,8 +40,8 @@ services:
- DB01_NAME=example
- DB01_USER=example
- DB01_PASS=examplepassword
- DB01_DUMP_INTERVAL=30 # (override) Backup every 30 minutes
- DB01_DUMP_BEGIN=+1 # (override) Backup starts immediately
- DB01_BACKUP_INTERVAL=30 # (override) Backup every 30 minutes
- DB01_BACKUP_BEGIN=+1 # (override) Backup starts immediately
- DB01_CLEANUP_TIME=180 # (override) Cleanup backups they are older than 180 minutes
- DB01_CHECKSUM=SHA1 # (override) Create a SHA1 checksum
- DB01_COMPRESSION=GZ # (override) Compress with GZIP
@@ -51,8 +51,8 @@ services:
#- DB02_NAME=example
#- DB02_USER=example
#- DB02_PASS=examplepassword
#- DB02_DUMP_INTERVAL=60 # (override) Backup every 60 minutes
#- DB02_DUMP_BEGIN=+10 # (override) Backup starts in ten minutes
#- DB02_BACKUP_INTERVAL=60 # (override) Backup every 60 minutes
#- DB02_BACKUP_BEGIN=+10 # (override) Backup starts in ten minutes
#- DB02_CLEANUP_TIME=240 # (override) Cleanup backups they are older than 240 minutes
#- DB02_CHECKSUM=MD5 # (override) Create a SHA1 checksum
#- DB02_COMPRESSION=BZ # (override) Compress with BZIP

View File

@@ -45,7 +45,7 @@ services:
- DB01_NAME=test1 # Create this database
- DB01_USER=sa
- DB01_PASS=5hQa0utRFBpIY3yhoIyE
- DB01_DUMP_INTERVAL=5 # backup every 5 minute
- DB01_BACKUP_INTERVAL=5 # backup every 5 minute
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs not set immediately
- DB01_CLEANUP_TIME=60 # clean backups they are older than 60 minutes
- DB01_CHECKSUM=SHA1 # Set Checksum to be SHA1
@@ -57,7 +57,7 @@ services:
# Add here azure storage account
- DB01_BLOBXFER_STORAGE_ACCOUNT={TODO Add Storage Name}
# Add here azure storage account key
- SB01_BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key}
- DB01_BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key}
- DB01_BLOBXFER_REMOTE_PATH=docker-db-backup
restart: always
networks:

View File

@@ -46,7 +46,7 @@ services:
- DB01_NAME=test1
- DB01_USER=sa
- DB01_PASS=5hQa0utRFBpIY3yhoIyE
- DB01_DUMP_INTERVAL=1 # backup every minute
- DB01_BACKUP_INTERVAL=1 # backup every minute
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs unset immediately
- DB01_CLEANUP_TIME=5 # clean backups they are older than 5 minute
- DB01_CHECKSUM=NONE

View File

@@ -24,7 +24,6 @@ else
silent sleep {{BACKUP_NUMBER}}
time_last_run=0
time_current=$(date +'%s')
if [[ "${backup_job_backup_begin}" =~ ^\+(.*)$ ]]; then
print_debug "BACKUP_BEGIN is a jump of minute starting with +"
timer plusvalue
@@ -37,10 +36,18 @@ else
#elif echo "${backup_job_backup_begin//\*/#}" | grep -qP "^(.*((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|#) ?){5}$" ; then # Allow slashes, yet not supporting advanced cron yet
elif echo "${backup_job_backup_begin//\*/#}" | grep -qP "^(((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|#) ?){5}$" ; then
print_debug "BACKUP_BEGIN is a cron expression"
time_last_run=$(date +"%s")
backup_job_backup_begin=${backup_job_backup_begin//\"/}
backup_job_backup_begin=${backup_job_backup_begin//\'/}
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
if var_false "${CRON_ALTERNATE}"; then
time_last_run=$(date +"%s")
backup_job_backup_begin=${backup_job_backup_begin//\"/}
backup_job_backup_begin=${backup_job_backup_begin//\'/}
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
else
echo "${backup_job_backup_begin} /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}/run now" > /tmp/.container/cron/{{BACKUP_NUMBER}}-backup
crontab -l | { cat; echo "${backup_job_backup_begin} /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}/run now"; } | crontab -
s6-svc -d /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}
exit 0
fi
else
print_error "_BACKUP_BEGIN is invalid - Unable to perform scheduling"
cat <<EOF
@@ -90,7 +97,6 @@ while true; do
fi
symlink_log
cleanup_old_data
if var_false "${persist}" ; then
print_debug "Exiting due to manual mode"
@@ -101,8 +107,8 @@ while true; do
s6-svc -d /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}
else
if [ ! "${time_cron}" = "true" ]; then
print_notice "Sleeping for another $(($backup_job_backup_interval*60-backup_job_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($backup_job_backup_interval*60-backup_job_total_time))))" +'%Y-%m-%d %T %Z') "
silent sleep $(($backup_job_backup_interval*60-backup_job_total_time))
print_notice "Sleeping for another $((backup_job_backup_interval*60-backup_job_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$((backup_job_backup_interval*60-backup_job_total_time))))" +'%Y-%m-%d %T %Z') "
silent sleep $((backup_job_backup_interval*60-backup_job_total_time))
else
time_last_run=$(date +"%s")
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"

View File

@@ -1,13 +1,14 @@
#!/command/with-contenv bash
BACKUP_JOB_CONCURRENCY=${BACKUP_JOB_CONCURRENCY:-"1"}
CRON_ALTERNATE=${CRON_ALTERNATE:-"TRUE"}
DBBACKUP_USER=${DBBACKUP_USER:-"dbbackup"}
DBBACKUP_GROUP=${DBBACKUP_GROUP:-"${DBBACKUP_USER}"} # Must go after DBBACKUP_USER
DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0}
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
DEFAULT_BACKUP_LOCATION=${DEFAULT_BACKUP_LOCATION:-"FILESYSTEM"}
DEFAULT_BLOBXFER_REMOTE_PATH=${DEFAULT_BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
DEFAULT_BLOBXFER_MODE=${DEFAULT_BLOBXFER_MODE:-"auto"}
DEFAULT_CHECKSUM=${DEFAULT_CHECKSUM:-"MD5"}
DEFAULT_COMPRESSION=${DEFAULT_COMPRESSION:-"ZSTD"}
DEFAULT_COMPRESSION_LEVEL=${DEFAULT_COMPRESSION_LEVEL:-"3"}
@@ -19,6 +20,7 @@ DEFAULT_FILESYSTEM_PATH_PERMISSION=${DEFAULT_FILESYSTEM_PATH_PERMISSION:-"700"}
DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"600"}
DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DEFAULT_FILESYSTEM_ARCHIVE_PATH:-"${DEFAULT_FILESYSTEM_PATH}/archive/"}
DEFAULT_LOG_LEVEL=${DEFAULT_LOG_LEVEL:-"notice"}
DEFAULT_MYSQL_CLIENT=${DEFAULT_MYSQL_CLIENT:-"mariadb"}
DEFAULT_MYSQL_ENABLE_TLS=${DEFAULT_MYSQL_ENABLE_TLS:-"FALSE"}
DEFAULT_MYSQL_EVENTS=${DEFAULT_MYSQL_EVENTS:-"TRUE"}
DEFAULT_MYSQL_MAX_ALLOWED_PACKET=${DEFAULT_MYSQL_MAX_ALLOWED_PACKET:-"512M"}

View File

@@ -66,6 +66,7 @@ bootstrap_variables() {
DEFAULT_BLOBXFER_STORAGE_ACCOUNT \
DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY \
DEFAULT_BLOBXFER_REMOTE_PATH \
DEFAULT_BLOBXFER_MODE \
DB"${backup_instance_number}"_AUTH \
DB"${backup_instance_number}"_TYPE \
DB"${backup_instance_number}"_HOST \
@@ -93,8 +94,9 @@ bootstrap_variables() {
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT \
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT_KEY \
DB"${backup_instance_number}"_BLOBXFER_REMOTE_PATH \
DB"${backup_instance_number}"_BLOBXFER_MODE \
BLOBXFER_STORAGE_ACCOUNT \
BLOBXFER_STORAGE_KEY \
BLOBXFER_STORAGE_ACCOUNT_KEY \
DB_HOST \
DB_NAME \
DB_PORT \
@@ -153,16 +155,31 @@ bootstrap_variables() {
fi
##
if grep -qo ".*_NAME='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _NAME variable with quotes"
sed -i "s|_NAME='\(.*\)'|_NAME=\1|g" "${backup_instance_vars}"
fi
if grep -qo ".*_PASS='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _PASS variable with quotes"
sed -i "s|_PASS='\(.*\)'|_PASS=\1|g" "${backup_instance_vars}"
fi
if grep -qo ".*_PASSPHRASE='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _PASSPHRASE variable with quotes"
sed -i "s|_PASSPHRASE='\(.*\)'|_PASSPHRASE=\1|g" "${backup_instance_vars}"
fi
if grep -qo "MONGO_CUSTOM_URI='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _MONGO_CUSTOM_URI variable with quotes"
sed -i "s|MONGO_CUSTOM_URI='\(.*\)'|MONGO_CUSTOM_URI=\1|g" "${backup_instance_vars}"
fi
if grep -qo ".*_OPTS='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _OPTS variable with quotes"
sed -i "s|_OPTS='\(.*\)'|_OPTS=\1|g" "${backup_instance_vars}"
fi
transform_backup_instance_variable() {
if grep -q "^DB${1}_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
export "$3"="$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
@@ -188,7 +205,8 @@ bootstrap_variables() {
transform_backup_instance_variable "${backup_instance_number}" BLACKOUT_END backup_job_snapshot_blackout_finish
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_REMOTE_PATH backup_job_blobxfer_remote_path
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT backup_job_blobxfer_storage_account
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_KEY backup_job_blobxfer_storage_key
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT_KEY backup_job_blobxfer_storage_account_key
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_MODE backup_job_blobxfer_mode
transform_backup_instance_variable "${backup_instance_number}" CHECKSUM backup_job_checksum
transform_backup_instance_variable "${backup_instance_number}" CLEANUP_TIME backup_job_cleanup_time
transform_backup_instance_variable "${backup_instance_number}" COMPRESSION backup_job_compression
@@ -199,7 +217,7 @@ bootstrap_variables() {
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PASSPHRASE backup_job_encrypt_passphrase
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PRIVATE_KEY backup_job_encrypt_private_key
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PUBLIC_KEY backup_job_encrypt_public_key
transform_backup_instance_variable "${backup_instance_number}" EXTRA_DUMP_OPTS backup_job_extra_dump_opts
transform_backup_instance_variable "${backup_instance_number}" EXTRA_BACKUP_OPTS backup_job_extra_backup_opts
transform_backup_instance_variable "${backup_instance_number}" EXTRA_ENUMERATION_OPTS backup_job_extra_enumeration_opts
transform_backup_instance_variable "${backup_instance_number}" EXTRA_OPTS backup_job_extra_opts
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_ARCHIVE_PATH backup_job_filesystem_archive_path
@@ -211,6 +229,7 @@ bootstrap_variables() {
transform_backup_instance_variable "${backup_instance_number}" INFLUX_VERSION backup_job_influx_version
transform_backup_instance_variable "${backup_instance_number}" LOG_LEVEL backup_job_log_level
transform_backup_instance_variable "${backup_instance_number}" MONGO_CUSTOM_URI backup_job_mongo_custom_uri
transform_backup_instance_variable "${backup_instance_number}" MYSQL_CLIENT backup_job_mysql_client
transform_backup_instance_variable "${backup_instance_number}" MYSQL_ENABLE_TLS backup_job_mysql_enable_tls
transform_backup_instance_variable "${backup_instance_number}" MYSQL_EVENTS backup_job_mysql_events
transform_backup_instance_variable "${backup_instance_number}" MYSQL_MAX_ALLOWED_PACKET backup_job_mysql_max_allowed_packet
@@ -391,9 +410,33 @@ EOF
dbtype=mysql
backup_job_db_port=${backup_job_db_port:-3306}
check_var backup_job_db_name DB"${v_instance}"_NAME "database name. Seperate multiple with commas"
case "${backup_job_mysql_client,,}" in
mariadb )
_mysql_prefix=/usr/bin/
_mysql_bin_prefix=mariadb-
;;
mysql )
_mysql_prefix=/opt/mysql/bin/
_mysql_bin_prefix=mysql
;;
* )
print_error "I don't understand '${backup_job_mysql_client,,}' as a client. Exiting.."
exit 99
;;
esac
print_debug "Using '${backup_job_mysql_client,,}' as client"
if [ -n "${backup_job_db_pass}" ] ; then export MYSQL_PWD=${backup_job_db_pass} ; fi
if var_true "${backup_job_mysql_enable_tls}" ; then
case "${backup_job_mysql_client,,}" in
mariadb )
mysql_tls_args="--ssl"
;;
mysql )
mysql_tls_args="--ssl-mode=REQUIRED"
;;
esac
if [ -n "${backup_job_mysql_tls_ca_file}" ] ; then
mysql_tls_args="--ssl_ca=${backup_job_mysql_tls_ca_file}"
fi
@@ -405,12 +448,28 @@ EOF
fi
if var_true "${backup_job_mysql_tls_verify}" ; then
mysql_tls_args="${mysql_tls_args} --sslverify-server-cert"
case "${backup_job_mysql_client,,}" in
mariadb )
mysql_tls_args="${mysql_tls_args} --sslverify-server-cert"
;;
mysql )
mysql_tls_args="${mysql_tls_args} --ssl-mode=VERIFY_CA"
;;
esac
fi
if [ -n "${backup_job_mysql_tls_version}" ] ; then
mysql_tls_args="${mysql_tls_args} --tls_version=${backup_job_mysql_tls_version}"
fi
else
case "${backup_job_mysql_client,,}" in
mariadb )
mysql_tls_args="--disable-ssl"
;;
mysql )
mysql_tls_args="--ssl-mode=DISABLED"
;;
esac
fi
;;
"mssql" | "microsoftsql" )
@@ -473,7 +532,7 @@ backup_couch() {
prepare_dbbackup
backup_job_filename=couch_${backup_job_db_name}_${backup_job_db_host#*//}_${now}.txt
backup_job_filename_base=couch_${backup_job_db_name}_${backup_job_db_host#*//}
compressionzyclonite
compression
if var_true "${DEBUG_BACKUP_COUCH}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
timer backup finish
@@ -482,10 +541,11 @@ backup_couch() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup ${backup_job_db_name}
cleanup_old_data
}
backup_influx() {
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if [ "${backup_job_db_name,,}" = "all" ] ; then
write_log debug "[backup_influx] Preparing to back up everything"
db_names=justbackupeverything
@@ -499,29 +559,27 @@ backup_influx() {
print_debug "[backup_influx] Influx DB Version 1 selected"
for db in ${db_names}; do
prepare_dbbackup
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now}
backup_job_filename_base=influx_${db}_${backup_job_db_host#*//}
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
compression
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
pre_dbbackup "${db}"
write_log notice "Dumping Influx database: '${db}'"
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
run_as_user influxd backup ${influx_compression} ${bucket} -portable -host ${backup_job_db_host}:${backup_job_db_port} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} "${TEMP_PATH}"/"${backup_job_filename_dir}"
run_as_user influxd backup ${influx_compression} ${bucket} -portable -host ${backup_job_db_host}:${backup_job_db_port} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} "${temporary_directory}"/"${backup_job_filename}"
exit_code=$?
check_exit_code backup "${backup_job_filename_dir}"
write_log notice "Creating archive file of '${backup_job_filename_dir}' with tar ${compression_string}"
run_as_user tar cf - "${TEMP_PATH}"/"${backup_job_filename_dir}" | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename_dir}".tar"${extension}" > /dev/null
backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now}.tar${extension}
backup_job_filename_base=influx_${db}_${backup_job_db_host#*//}
check_exit_code backup "${backup_job_filename}"
compression
create_archive
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
timer backup finish
file_encryption
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename_dir}"
check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}"
cleanup_old_data
done
;;
2 )
@@ -533,23 +591,22 @@ backup_influx() {
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
compression
pre_dbbackup "${db}"
write_log notice "Dumping Influx2 database: '${db}'"
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --compression none "${TEMP_PATH}"/"${backup_job_filename_dir}"
run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --compression none "${temporary_directory}"/"${backup_job_filename}"
exit_code=$?
check_exit_code backup "${backup_job_filename_dir}"
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
compression
create_archive
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}.tar${extension}
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
timer backup finish
file_encryption
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename_dir}"
check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}"
cleanup_old_data
done
;;
esac
@@ -568,15 +625,15 @@ backup_mongo() {
compression_string="and compressing with gzip"
fi
if [ -n "${backup_job_mongo_custom_uri}" ] ; then
mongo_backup_parameter="--uri=${backup_job_mongo_custom_uri} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}"
mongo_backup_parameter="--uri=${backup_job_mongo_custom_uri} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}"
else
mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}"
mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}"
fi
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
pre_dbbackup "${backup_job_db_name}"
write_log notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
write_log notice "Dumping MongoDB database: '${backup_job_db_name}' ${compression_string}"
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi
silent run_as_user ${play_fair} mongodump --archive=${TEMP_PATH}/${backup_job_filename} ${mongo_compression} ${mongo_backup_parameter}
silent run_as_user ${play_fair} mongodump --archive=${temporary_directory}/${backup_job_filename} ${mongo_compression} ${mongo_backup_parameter}
exit_code=$?
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
@@ -586,6 +643,7 @@ backup_mongo() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${backup_job_db_name}"
cleanup_old_data
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
}
@@ -596,16 +654,16 @@ backup_mssql() {
backup_job_filename=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.bak
backup_job_filename_base=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}
pre_dbbackup "${backup_job_db_name}"
write_log notice "Dumping MSSQL database: '${DB_NAME}'"
write_log notice "Dumping MSSQL database: '${backup_job_db_name}'"
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP DATABASE [${backup_job_db_name}] TO DISK = N'${TEMP_PATH}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP DATABASE [${backup_job_db_name}] TO DISK = N'${temporary_directory}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$?
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
backup_job_filename_original=${backup_job_filename}
compression
pre_dbbackup all
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
check_exit_code backup "${backup_job_filename}"
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
timer backup finish
@@ -614,28 +672,29 @@ backup_mssql() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${backup_job_db_name}"
cleanup_old_data
;;
trn|transaction )
prepare_dbbackup
backup_job_filename=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.trn
backup_job_filename_base=mssql_${backup_job_db_name,,}_trn_${backup_job_db_host,,}
pre_dbbackup "${backup_job_db_name}"
write_log notice "Dumping MSSQL database: '${DB_NAME}'"
write_log notice "Dumping MSSQL database: '${backup_job_db_name}'"
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP LOG [${backup_job_db_name}] TO DISK = N'${TEMP_PATH}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-log', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP LOG [${backup_job_db_name}] TO DISK = N'${temporary_directory}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-log', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$?
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
backup_job_filename_original=${backup_job_filename}
compression
pre_dbbackup all
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
check_exit_code backup "${backup_job_filename}"
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
file_encryption
timer backup finish
generate_checksum
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${backup_job_db_name}"
cleanup_old_data
;;
esac
}
@@ -654,7 +713,7 @@ backup_mysql() {
if [ "${backup_job_db_name,,}" = "all" ] ; then
write_log debug "Preparing to back up everything except for information_schema and _* prefixes"
db_names=$(run_as_user mysql -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_enumeration_opts} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
db_names=$(run_as_user ${_mysql_prefix}${_mysql_bin_prefix/-/} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_enumeration_opts} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
if [ -n "${backup_job_db_name_exclude}" ] ; then
db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
@@ -671,14 +730,14 @@ backup_mysql() {
if var_true "${backup_job_split_db}" ; then
for db in ${db_names} ; do
prepare_dbbackup
backup_job_filename=mysql_${db}_${backup_job_db_host,,}_${now}.sql
backup_job_filename_base=mysql_${db}_${backup_job_db_host,,}
backup_job_filename=${backup_job_mysql_client,,}_${db}_${backup_job_db_host,,}_${now}.sql
backup_job_filename_base=${backup_job_mysql_client,,}_${db}_${backup_job_db_host,,}
compression
pre_dbbackup "${db}"
write_log notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} $db | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
exit_code=$?
run_as_user ${play_fair} ${_mysql_prefix}${_mysql_bin_prefix}dump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} $db | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
timer backup finish
@@ -687,18 +746,19 @@ backup_mysql() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}"
cleanup_old_data
done
else
write_log debug "Not splitting database dumps into their own files"
prepare_dbbackup
backup_job_filename=mysql_all_${backup_job_db_host,,}_${now}.sql
backup_job_filename_base=mysql_all_${backup_job_db_host,,}
backup_job_filename=${backup_job_mysql_client,,}_all_${backup_job_db_host,,}_${now}.sql
backup_job_filename_base=${backup_job_mysql_client,,}_all_${backup_job_db_host,,}
compression
pre_dbbackup all
write_log notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
exit_code=$?
run_as_user ${play_fair} ${_mysql_prefix}${_mysql_bin_prefix}dump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
timer backup finish
@@ -707,6 +767,7 @@ backup_mysql() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup all
cleanup_old_data
fi
}
@@ -714,12 +775,13 @@ backup_pgsql() {
backup_pgsql_globals() {
prepare_dbbackup
backup_job_filename=pgsql_globals_${backup_job_db_host,,}_${now}.sql
backup_job_global_base=pgsql_globals_${backup_job_db_host,,}
compression
pre_dbbackup "globals"
print_notice "Dumping PostgresSQL globals: with 'pg_dumpall -g' ${compression_string}"
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
run_as_user ${play_fair} pg_dumpall -h "${backup_job_db_host}" -U "${backup_job_db_user}" -p "${backup_job_db_port}" -g ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
exit_code=$?
run_as_user ${play_fair} pg_dumpall -h "${backup_job_db_host}" -U "${backup_job_db_user}" -p "${backup_job_db_port}" -g ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code "${backup_job_filename}"
timer backup finish
@@ -728,6 +790,7 @@ backup_pgsql() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "globals"
cleanup_old_data
}
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
@@ -746,7 +809,7 @@ backup_pgsql() {
write_log debug "Excluding '${db_exclude}' from ALL DB_NAME backups"
db_names=$(echo "$db_names" | sed "/${db_exclude}/d" )
done
_postgres_backup_globals=true
_postgres_backup_globals=true
fi
else
db_names=$(echo "${backup_job_db_name}" | tr ',' '\n')
@@ -766,8 +829,8 @@ backup_pgsql() {
pre_dbbackup "${db}"
write_log notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
run_as_user ${play_fair} pg_dump -h "${backup_job_db_host}" -p "${backup_job_db_port}" -U "${backup_job_db_user}" $db ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
exit_code=$?
run_as_user ${play_fair} pg_dump -h "${backup_job_db_host}" -p "${backup_job_db_port}" -U "${backup_job_db_user}" $db ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
timer backup finish
@@ -776,13 +839,18 @@ backup_pgsql() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}"
cleanup_old_data
done
if var_true "${_postgres_backup_globals}" ; then backup_pgsql_globals; fi
else
write_log debug "Not splitting database dumps into their own files"
prepare_dbbackup
backup_job_filename=pgsql_all_${backup_job_db_host,,}_${now}.sql
backup_job_filename_base=pgsql_${db}_${backup_job_db_host,,}
if [ "${backup_job_db_name,,}" = "all" ] ; then
backup_job_filename_base=pgsql_all_${backup_job_db_host,,}
else
backup_job_filename_base=pgsql_${db}_${backup_job_db_host,,}
fi
compression
pre_dbbackup all
write_log notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
@@ -795,8 +863,8 @@ backup_pgsql() {
for x_db_name in ${tmp_db_names} ; do
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
done
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
exit_code=$?
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
timer backup finish
@@ -805,6 +873,7 @@ backup_pgsql() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup all
cleanup_old_data
if var_true "${_postgres_backup_globals}" ; then backup_pgsql_globals; fi
fi
}
@@ -815,7 +884,7 @@ backup_redis() {
backup_job_filename=redis_all_${backup_job_db_host,,}_${now}.rdb
backup_job_filename_base=redis_${backup_job_db_host,,}
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
echo bgsave | silent run_as_user ${play_fair} redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} --rdb ${TEMP_PATH}/${backup_job_filename} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}
echo bgsave | silent run_as_user ${play_fair} redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} --rdb ${temporary_directory}/${backup_job_filename} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}
sleep 10
try=5
while [ $try -gt 0 ] ; do
@@ -835,7 +904,7 @@ backup_redis() {
compression
pre_dbbackup all
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug off; fi
timer backup finish
check_exit_code backup "${backup_job_filename}"
@@ -844,6 +913,7 @@ backup_redis() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup all
cleanup_old_data
}
backup_sqlite3() {
@@ -851,20 +921,20 @@ backup_sqlite3() {
db=$(basename "${backup_job_db_host}")
db="${db%.*}"
backup_job_filename=sqlite3_${db}_${now}.sqlite3
backup_job_filename_base=sqlite3_${db}.sqlite3
backup_job_filename_base=sqlite3_${db}
pre_dbbackup "${db}"
write_log notice "Dumping sqlite3 database: '${backup_job_db_host}' ${compression_string}"
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug on; fi
silent ${play_fair} sqlite3 "${backup_job_db_host}" ".backup '${TEMP_PATH}/backup_${now}.sqlite3'"
silent ${play_fair} sqlite3 "${backup_job_db_host}" ".backup '${temporary_directory}/backup_${now}.sqlite3'"
exit_code=$?
check_exit_code backup "${backup_job_filename}"
if [ ! -f "${TEMP_PATH}"/backup_${now}.sqlite3 ] ; then
if [ ! -f "${temporary_directory}"/backup_${now}.sqlite3 ] ; then
print_error "SQLite3 backup failed! Exitting"
return 1
fi
compression
run_as_user ${play_fair} cat "${TEMP_PATH}"/backup_${now}.sqlite3 | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}/${backup_job_filename}" > /dev/null
rm -rf "${TEMP_PATH}"/backup_${now}.sqlite3
run_as_user ${play_fair} cat "${temporary_directory}"/backup_${now}.sqlite3 | ${dir_compress_cmd} | run_as_user tee "${temporary_directory}/${backup_job_filename}" > /dev/null
rm -rf "${temporary_directory}"/backup_${now}.sqlite3
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug off; fi
timer backup finish
file_encryption
@@ -872,18 +942,21 @@ backup_sqlite3() {
move_dbbackup
check_exit_code move "${backup_job_filename}"
post_dbbackup "${db}"
cleanup_old_data
}
check_availability() {
if var_true "${DEBUG_CHECK_AVAILABILITY}" ; then debug on; fi
### Set the Database Type
if var_false "${backup_job_skip_availability_check}" ; then
case "${dbtype}" in
"couch" )
counter=0
code_received=0
if [ -n "${backup_job_db_user}" ] && [ -n ${backup_job_db_pass} ]; then
_ca_couch_auth="-u ${backup_job_db_user}:${backup_job_db_pass}"
fi
while [ "${code_received}" != "200" ]; do
code_received=$(run_as_user curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${backup_job_db_host}:${backup_job_db_port})
code_received=$(run_as_user curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${_ca_couch_auth} ${backup_job_db_host}:${backup_job_db_port})
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( counter+=5 ))
@@ -927,7 +1000,7 @@ check_availability() {
"mysql" )
counter=0
export MYSQL_PWD=${backup_job_db_pass}
while ! (run_as_user mysqladmin -u"${backup_job_db_user}" -P"${backup_job_db_port}" -h"${backup_job_db_host}" ${mysql_tls_args} status > /dev/null 2>&1) ; do
while ! (run_as_user ${_mysql_prefix}${_mysql_bin_prefix}admin -u"${backup_job_db_user}" -P"${backup_job_db_port}" -h"${backup_job_db_host}" ${mysql_tls_args} status > /dev/null 2>&1) ; do
sleep 5
(( counter+=5 ))
write_log warn "MySQL/MariaDB Server '${backup_job_db_host}' is not accessible, retrying.. (${counter} seconds so far)"
@@ -1030,13 +1103,24 @@ cleanup_old_data() {
write_log info "Cleaning up old backups on filesystem"
run_as_user mkdir -p "${backup_job_filesystem_path}"
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
write_log info "Syncing changes via blobxfer"
silent run_as_user blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --local-path ${backup_job_filesystem_path} --delete --delete-only
if var_true "${_postgres_backup_globals}"; then
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_global_base}*" -exec rm -f {} \;
fi
if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
else
write_log info "Syncing changes via blobxfer"
silent run_as_user blobxfer upload --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete --delete-only
fi
;;
"file" | "filesystem" )
write_log info "Cleaning up old backups on filesystem"
run_as_user mkdir -p "${backup_job_filesystem_path}"
run_as_user find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
if var_true "${_postgres_backup_globals}"; then
run_as_user find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_global_base}*" -exec rm -f {} \;
fi
;;
"s3" | "minio" )
write_log info "Cleaning up old backups on S3 storage"
@@ -1136,8 +1220,10 @@ compression() {
create_archive() {
if var_true "${DEBUG_CREATE_ARCHIVE}" ; then debug on; fi
if [ "${exit_code}" = "0" ] ; then
write_log notice "Creating archive file of '${backup_job_filename_dir}' with tar ${compression_string}"
run_as_user tar cf - "${TEMP_PATH}"/"${backup_job_filename_dir}" | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename_dir}".tar"${extension}" > /dev/null
write_log notice "Creating archive file of '${backup_job_filename}' with tar ${compression_string}"
run_as_user tar cf - "${temporary_directory}"/"${backup_job_filename_dir}" | ${dir_compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename_dir}".tar"${extension}" > /dev/null
backup_job_filename="${backup_job_filename_dir}".tar"${extension}"
rm -rf "${temporary_directory}"/"${backup_job_filename_dir}"
else
write_log error "Skipping creating archive file because backup did not complete successfully"
fi
@@ -1152,8 +1238,8 @@ create_schedulers() {
print_debug "[create_schedulers] Found '${backup_instances}' DB_HOST instances"
if [ -n "${DB_HOST}" ] && [ "${backup_instances}" ]; then
backup_instances=1;
print_debug "[create_schedulers] Detected using old DB_ variables"
backup_instances=1;
print_debug "[create_schedulers] Detected using old DB_ variables"
fi
for (( instance = 01; instance <= backup_instances; )) ; do
@@ -1161,7 +1247,7 @@ create_schedulers() {
cp -R /assets/dbbackup/template-dbbackup /etc/services.available/dbbackup-"${instance}"
sed -i "s|{{BACKUP_NUMBER}}|${instance}|g" /etc/services.available/dbbackup-"${instance}"/run
if [ "${MODE,,}" = "manual" ] ; then service_stop dbbackup-"${instance}" ; fi
cat <<EOF >> /usr/bin/backup"${instance}"-now
cat <<EOF > /usr/bin/backup"${instance}"-now
#!/bin/bash
source /assets/functions/00-container
PROCESS_NAME=db-backup${instance}
@@ -1181,7 +1267,7 @@ EOF
EOF
else
echo "/usr/bin/backup${instance}-now now" >> /usr/bin/backup-now
echo "/usr/bin/backup${instance}-now now" > /usr/bin/backup-now
fi
instance=$(echo "${instance} +1" | bc)
@@ -1259,7 +1345,7 @@ file_encryption() {
print_notice "Encrypting with GPG Passphrase"
encrypt_routines_start_time=$(date +'%s')
encrypt_tmp_dir=$(run_as_user mktemp -d)
echo "${backup_job_encrypt_passphrase}" | silent run_as_user ${play_fair} gpg --batch --home ${encrypt_tmp_dir} --yes --passphrase-fd 0 -c "${TEMP_PATH}"/"${backup_job_filename}"
echo "${backup_job_encrypt_passphrase}" | silent run_as_user ${play_fair} gpg --batch --home ${encrypt_tmp_dir} --yes --passphrase-fd 0 -c "${temporary_directory}"/"${backup_job_filename}"
rm -rf "${encrypt_tmp_dir}"
elif [ -z "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_public_key}" ] && [ -n "${backup_job_encrypt_private_key}" ]; then
if [ -f "${backup_job_encrypt_private_key}" ]; then
@@ -1271,13 +1357,13 @@ file_encryption() {
silent run_as_user gpg --home ${encrypt_tmp_dir} --batch --import "${encrypt_tmp_dir}"/private_key.asc
print_debug "[file_encryption] [key] Encrypting to Public Key"
cat "${backup_job_encrypt_public_key}" | run_as_user tee "${encrypt_tmp_dir}"/public_key.asc > /dev/null
silent run_as_user ${play_fair} gpg --batch --yes --home "${encrypt_tmp_dir}" --encrypt --recipient-file "${encrypt_tmp_dir}"/public_key.asc "${TEMP_PATH}"/"${backup_job_filename}"
silent run_as_user ${play_fair} gpg --batch --yes --home "${encrypt_tmp_dir}" --encrypt --recipient-file "${encrypt_tmp_dir}"/public_key.asc "${temporary_directory}"/"${backup_job_filename}"
rm -rf "${encrypt_tmp_dir}"
fi
fi
if [ -f "${TEMP_PATH}"/"${backup_job_filename}".gpg ]; then
if [ -f "${temporary_directory}"/"${backup_job_filename}".gpg ]; then
print_debug "[file_encryption] Deleting original file"
rm -rf "${TEMP_PATH:?}"/"${backup_job_filename:?}"
rm -rf "${temporary_directory:?}"/"${backup_job_filename:?}"
backup_job_filename="${backup_job_filename}.gpg"
encrypt_routines_finish_time=$(date +'%s')
@@ -1316,7 +1402,7 @@ generate_checksum() {
checksum_routines_start_time=$(date +'%s')
write_log notice "Generating ${checksum_extension^^} sum for '${backup_job_filename}'"
cd "${TEMP_PATH}"
cd "${temporary_directory}"
run_as_user ${checksum_command} "${backup_job_filename}" | run_as_user tee "${backup_job_filename}"."${checksum_extension}" > /dev/null
chmod ${backup_job_filesystem_permission} "${backup_job_filename}"."${checksum_extension}"
checksum_value=$(run_as_user cat "${backup_job_filename}"."${checksum_extension}" | awk '{print $1}')
@@ -1338,20 +1424,20 @@ EOF
notify() {
if var_true "${DEBUG_NOTIFY}" ; then debug on; fi
notification_custom() {
if [ -n "${NOTIFICATION_SCRIPT}" ] ; then
if var_true "${NOTIFICATION_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${NOTIFICATION_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}"
if [ -n "${NOTIFICATION_CUSTOM_SCRIPT}" ] ; then
if var_true "${NOTIFICATION_CUSTOM_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${NOTIFICATION_CUSTOM_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}"
else
if [ -x "${NOTIFICATION_SCRIPT}" ] ; then
write_log notice "Found NOTIFICATION_SCRIPT environment variable. Executing '${NOTIFICATION_SCRIPT}"
if [ -x "${NOTIFICATION_CUSTOM_SCRIPT}" ] ; then
write_log notice "Found NOTIFICATION_CUSTOM_SCRIPT environment variable. Executing '${NOTIFICATION_CUSTOM_SCRIPT}"
# script timestamp logfile errorcode subject body
eval "${NOTIFICATION_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}"
eval "${NOTIFICATION_CUSTOM_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}"
else
write_log error "Can't execute NOTIFICATION_SCRIPT environment variable '${NOTIFICATION_SCRIPT}' as its filesystem bit is not executible!"
write_log error "Can't execute NOTIFICATION_CUSTOM_SCRIPT environment variable '${NOTIFICATION_CUSTOM_SCRIPT}' as its filesystem bit is not executible!"
fi
fi
else
print_error "[notifications] No NOTIFICATION_SCRIPT variable set - Skipping sending Custom notifications"
print_error "[notifications] No NOTIFICATION_CUSTOM_SCRIPT variable set - Skipping sending Custom notifications"
fi
}
@@ -1364,7 +1450,7 @@ notify() {
if [ -z "${SMTP_HOST}" ] ; then write_log error "[notifications] No SMTP_HOST variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
if [ -z "${SMTP_PORT}" ] ; then write_log error "[notifications] No SMTP_PORT variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
if var_nottrue "${skip_mail}" ; then
if ! grep -q ^from /etc/msmptrc ; then
if ! grep -q ^from /etc/msmtprc ; then
echo "from ${MAIL_FROM}" >> /etc/msmtprc
fi
mail_recipients=$(echo "${MAIL_TO}" | tr "," "\n")
@@ -1453,7 +1539,7 @@ EOF
# $4 body
if var_true "${ENABLE_NOTIFICATIONS}" ; then
notification_types=$(echo "${NOTIIFICATION_TYPE}" | tr "," "\n")
notification_types=$(echo "${NOTIFICATION_TYPE}" | tr "," "\n")
for notification_type in $notification_types ; do
case "${notification_type,,}" in
"custom" )
@@ -1488,8 +1574,8 @@ EOF
move_dbbackup() {
if var_true "${DEBUG_MOVE_DBBACKUP}" ; then debug on; fi
if [ "${exit_code}" = "0" ] ; then
dbbackup_size="$(run_as_user stat -c%s "${TEMP_PATH}"/"${backup_job_filename}")"
dbbackup_date="$(run_as_user date -r "${TEMP_PATH}"/"${backup_job_filename}" +'%s')"
dbbackup_size="$(run_as_user stat -c%s "${temporary_directory}"/"${backup_job_filename}")"
dbbackup_date="$(run_as_user date -r "${temporary_directory}"/"${backup_job_filename}" +'%s')"
case "${backup_job_size_value,,}" in
"b" | "bytes" )
@@ -1503,37 +1589,37 @@ move_dbbackup() {
;;
esac
if [ "${backup_job_size_value}" = "1" ] ; then
filesize=$(run_as_user stat -c%s "${TEMP_PATH}"/"${backup_job_filename}")
filesize=$(run_as_user stat -c%s "${temporary_directory}"/"${backup_job_filename}")
write_log notice "Backup of '${backup_job_filename}' created with the size of ${filesize} bytes"
else
filesize=$(run_as_user du -h "${TEMP_PATH}"/"${backup_job_filename}" | awk '{ print $1}')
filesize=$(run_as_user du -h "${temporary_directory}"/"${backup_job_filename}" | awk '{ print $1}')
write_log notice "Backup of '${backup_job_filename}' created with the size of ${filesize}"
fi
chmod "${backup_job_filesystem_permission}" "${TEMP_PATH}"/"${backup_job_filename}"
chmod "${backup_job_filesystem_permission}" "${temporary_directory}"/"${backup_job_filename}"
case "${backup_job_backup_location,,}" in
"file" | "filesystem" )
write_log debug "Moving backup to filesystem"
run_as_user mkdir -p "${backup_job_filesystem_path}"
if [ "${backup_job_checksum,,}" != "none" ] ; then run_as_user mv "${TEMP_PATH}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/ ; fi
if [ "${backup_job_checksum,,}" != "none" ] ; then run_as_user mv "${temporary_directory}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/ ; fi
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
cat <<EOF
## BEGIN Before Moving file from TEMP_PATH $(TZ=${TIMEZONE} date)
## BEGIN Before Moving file from temporary_directory $(TZ=${TIMEZONE} date)
##
$(ls -l "${TEMP_PATH}"/*)
$(ls -l "${temporary_directory}"/*)
## END
EOF
fi
run_as_user mv "${TEMP_PATH}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
run_as_user mv "${temporary_directory}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
move_exit_code=$?
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
cat <<EOF
## BEGIN After Moving file from TEMP_PATH $(TZ=${TIMEZONE} date)
## BEGIN After Moving file from temporary_directory $(TZ=${TIMEZONE} date)
##
$(ls -l "${TEMP_PATH}"/*)
$(ls -l "${temporary_directory}"/*)
## END
@@ -1575,43 +1661,49 @@ EOF
[[ ( -n "${backup_job_s3_host}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${backup_job_s3_protocol}://${backup_job_s3_host}"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_PATH}/${backup_job_filename} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/${backup_job_filename} ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${temporary_directory}/${backup_job_filename} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/${backup_job_filename} ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
move_exit_code=$?
if [ "${backup_job_checksum}" != "none" ] ; then
silent run_as_user ${play_fair} aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_PATH}/*.${checksum_extension} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/ ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"."${checksum_extension}"
silent run_as_user ${play_fair} aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${temporary_directory}/*.${checksum_extension} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/ ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"."${checksum_extension}"
fi
run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"
;;
"blobxfer" )
write_log info "Synchronize local storage from S3 Bucket with blobxfer"
${play_fair} blobxfer download --mode file --remote-path ${backup_job_blobxfer_remote_path} --local-path ${backup_job_filesystem_path} --delete
if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
else
write_log info "Synchronize local storage from blob container with blobxfer"
${play_fair} blobxfer download --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --restore-file-lmt --delete
write_log info "Moving backup to external storage with blobxfer"
mkdir -p "${backup_job_filesystem_path}"
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user mv "${TEMP_PATH}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/; fi
write_log info "Moving backup to external storage with blobxfer"
mkdir -p "${backup_job_filesystem_path}"
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user mv "${temporary_directory}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/; fi
run_as_user mv "${TEMP_PATH}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
run_as_user mv "${temporary_directory}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
silent run_as_user ${play_fair} blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --local-path ${backup_job_filesystem_path}
move_exit_code=$?
silent run_as_user ${play_fair} blobxfer upload --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path}
move_exit_code=$?
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"."${checksum_extension}" ; fi
run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"."${checksum_extension}" ; fi
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"
fi
;;
esac
else
write_log error "Skipping moving DB Backup to final location because backup did not complete successfully"
fi
run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"
if var_true "${DEBUG_MOVE_DBBACKUP}" ; then debug off; fi
}
prepare_dbbackup() {
timer backup start
now=$(run_as_user date +"%Y%m%d-%H%M%S")
temporary_directory=$(mktemp -d -p "${TEMP_PATH}" -t ${backup_instance_number}_dbbackup.XXXXXX)
chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${temporary_directory}"
backup_job_filename_base=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}
backup_job_filename=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.sql
}
@@ -1726,6 +1818,8 @@ EOZP
write_log notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
if var_true "${DEBUG_POST_DBBACKUP}" ; then debug on; fi
cd "${TEMP_PATH}"
rm -rf "${temporary_directory}"
}
process_limiter() {
@@ -1766,7 +1860,7 @@ setup_mode() {
cat <<EOF > /etc/services.d/99-run_forever/run
#!/bin/bash
while true; do
sleep 86400
sleep 86400
done
EOF
chmod +x /etc/services.d/99-run_forever/run
@@ -1852,7 +1946,7 @@ timer() {
fi
done
validate_all=$(echo "${validate_all}" | tr ' ' '\n' | sort -n -u | tr '\n' ' ')
validate_all=$(echo "${validate_all}" | tr ' ' '\n' | sort -g -u | tr '\n' ' ')
for entry in $validate_all; do
if [ ${entry} -ge ${3} ]; then
echo "${entry}"
@@ -1869,12 +1963,12 @@ timer() {
local cron_compare_difference=$(( cron_compare - ${4} ))
if [ "${cron_compare_difference}" -lt 60 ]; then
cron_compare=$((${cron_compare} + $(( 60 - cron_compare_difference )) ))
cron_compare=$((cron_compare + $(( 60 - cron_compare_difference )) ))
fi
local cron_current_seconds="$(date --date=@"${cron_compare_seconds}" +"%-S")"
if [ "${cron_current_seconds}" -ne 0 ]; then
cron_compare_seconds=$(( cron_compare_seconds - cron_current_seconds ))
cron_compare=$(( cron_compare_seconds - cron_current_seconds ))
fi
local cron_minute="$(echo -n "${2}" | awk '{print $1}')"
@@ -1913,6 +2007,7 @@ timer() {
fi
cron_next_hour="${cron_next}"
cron_next_minute=0
fi
print_debug "[timer] [cron] Parse Day of Week"
@@ -1986,7 +2081,6 @@ timer() {
fi
cron_parsed=0
done
local cron_future=$(date --date="${cron_next_year}-$(printf "%02d" ${cron_next_month})-$(printf "%02d" ${cron_next_day_of_month})T$(printf "%02d" ${cron_next_hour}):$(printf "%02d" ${cron_next_minute}):00" "+%s")
local cron_future_difference=$(( cron_future - cron_compare_seconds ))
time_cron=true

View File

@@ -912,6 +912,83 @@ get_filename() {
r_filename=${opt}
}
get_ssl() {
if grep -q "^DB${detected_host_num}_MYSQL_ENABLE_TLS=" "${restore_vars}" ; then
detected_ssl_value=$(grep "^DB${detected_host_num}_MYSQL_ENABLE_TLS=" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
fi
if [[ -z "${detected_ssl_value}" ]]; then
print_debug "Parsed SSL Variant: 1 - No Env Variable Found"
default_ssl="false" # Default if no env variable
q_ssl_variant=1
q_ssl_menu_opt_default="| (${cwh}N${cdgy}) * "
q_ssl_menu="" #No menu option
else
print_debug "Parsed SSL Variant: 2 - Env Variable DB${detected_host_num}_MYSQL_ENABLE_TLS = '${detected_ssl_value}'"
default_ssl="${detected_ssl_value,,}"
q_ssl_variant=2
q_ssl_menu="E ) Environment Variable DB${detected_host_num}_MYSQL_ENABLE_TLS: '${detected_ssl_value}'"
q_ssl_menu_opt_default="| (${cwh}E${cdgy}) * "
fi
cat <<EOF
Do you wish to use SSL for the connection?
${q_ssl_menu}
Y ) Yes
N ) No
Q ) Quit
EOF
r_ssl=""
case "${q_ssl_variant}" in
1) # No env variable, ask directly
while true; do
read -r -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}Y${cdgy}\) \| \(${cwh}N\*${cdgy}\) : ${cwh}${coff}) " q_ssl
case "${q_ssl,,}" in
y*)
r_ssl="true"
break
;;
n* | "")
r_ssl="false"
break
;;
q*)
print_info "Quitting Script"
exit 1
;;
esac
done
;;
2) # Env variable exists, offer it as an option
while true; do
read -r -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E\*${cdgy}\) \| \(${cwh}Y${cdgy}\) \| \(${cwh}N${cdgy}\) : ${cwh}${coff}) " q_ssl
case "${q_ssl,,}" in
e* | "") # Default to env variable if just enter is pressed.
r_ssl="${detected_ssl_value}"
break
;;
y*)
r_ssl="true"
break
;;
n*)
r_ssl="false"
break
;;
q*)
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
#### SCRIPT START
trap control_c INT
bootstrap_variables restore_init
@@ -984,6 +1061,20 @@ else
fi
print_debug "Database Port '${r_dbport}'"
## Question SSL connection
if [[ "${r_dbtype,,}" == "mariadb" || "${r_dbtype,,}" == "mysql" ]]; then
if [ -n "${8}" ]; then
r_ssl="${8}"
else
get_ssl
fi
print_debug "SSL enable: '${r_ssl}'"
else
r_ssl="false"
print_debug "SSL disabled for ${r_dbtype}"
fi
## Parse Extension
case "${r_filename##*.}" in
bz* )
@@ -1013,8 +1104,13 @@ esac
## Perform a restore
case "${r_dbtype}" in
mariadb | mysql )
if [[ "${r_ssl,,}" == "false" ]]; then
mysql_ssl_option="--disable-ssl"
else
mysql_ssl_option=""
fi
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
pv ${r_filename} | ${decompress_cmd}cat | mysql -u${r_dbuser} -p${r_dbpass} -P${r_dbport} -h${r_dbhost} ${r_dbname}
pv ${r_filename} | ${decompress_cmd}cat | mariadb -u${r_dbuser} -p${r_dbpass} -P${r_dbport} -h${r_dbhost} ${mysql_ssl_option} ${r_dbname}
exit_code=$?
;;
pgsql | postgres* )
@@ -1036,9 +1132,6 @@ EOF
echo -e "${coff}"
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}Y${cdgy}\) \| \(${cwh}N${cdgy}\) \| \(${cwh}Q${cdgy}\) : ${cwh}${coff})" q_menu_mongo_dropdb
case "${q_menu_mongo_dropdb,,}" in
"y" | "yes" | * )
mongo_dropdb="--drop"
;;
"n" | "update" )
unset mongo_dropdb
;;
@@ -1046,6 +1139,9 @@ EOF
print_info "Quitting Script"
exit 1
;;
"y" | "yes" | * )
mongo_dropdb="--drop"
;;
esac
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"