mirror of
https://github.com/tiredofit/docker-db-backup.git
synced 2025-12-22 05:33:53 +01:00
Compare commits
56 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c8f40e37c | ||
|
|
c360150117 | ||
|
|
7c32879e80 | ||
|
|
a475f7d0f3 | ||
|
|
399727cd37 | ||
|
|
f58de70dc4 | ||
|
|
5ab0cbe7c5 | ||
|
|
9d5406b6a9 | ||
|
|
53760fbe4d | ||
|
|
a72b562c70 | ||
|
|
fc586c204f | ||
|
|
e9ed8d1a72 | ||
|
|
78ac4a4a81 | ||
|
|
949aafdbe1 | ||
|
|
7a94472055 | ||
|
|
23aeaf58a2 | ||
|
|
b88816337f | ||
|
|
ac8181b3b5 | ||
|
|
c75c41a34d | ||
|
|
244e411e76 | ||
|
|
e69ac23898 | ||
|
|
261951045f | ||
|
|
67f4326d0b | ||
|
|
2cd62b8732 | ||
|
|
0d2b3ccc8c | ||
|
|
90f53a7f00 | ||
|
|
c5f89da681 | ||
|
|
753a780204 | ||
|
|
7c07253428 | ||
|
|
0fdb447706 | ||
|
|
0d23c2645c | ||
|
|
4786ea9c7f | ||
|
|
a26dba947b | ||
|
|
b9fa7d18b1 | ||
|
|
626d276c68 | ||
|
|
f7f72ba2c1 | ||
|
|
2f05d76f4e | ||
|
|
c9a634ff25 | ||
|
|
0ce21e8f43 | ||
|
|
a700eb0fef | ||
|
|
7baa3774c7 | ||
|
|
341e4d12ea | ||
|
|
5c51bbcb7e | ||
|
|
24d9a9a937 | ||
|
|
591b8d6dbd | ||
|
|
a5b15b4412 | ||
|
|
6692cf9834 | ||
|
|
c37de5778d | ||
|
|
eeeafd6ab8 | ||
|
|
17daf26084 | ||
|
|
b53cda99f7 | ||
|
|
2cf3e2ae70 | ||
|
|
c7ee94aec2 | ||
|
|
f44233e51a | ||
|
|
ccda858b18 | ||
|
|
d58b27d5ef |
4
.github/workflows/main.yml
vendored
4
.github/workflows/main.yml
vendored
@@ -8,7 +8,7 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
|
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
|
||||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
|
uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|||||||
4
.github/workflows/manual.yml
vendored
4
.github/workflows/manual.yml
vendored
@@ -9,7 +9,7 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
|
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
|
||||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
|
uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|||||||
193
CHANGELOG.md
193
CHANGELOG.md
@@ -1,3 +1,196 @@
|
|||||||
|
## 4.1.20 2025-07-23 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix backup error checking routines #417 - credit alteriks@github
|
||||||
|
|
||||||
|
|
||||||
|
## 4.1.19 2025-05-28 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Force overwrite manual scripts as opposed to append (#414)
|
||||||
|
|
||||||
|
|
||||||
|
## 4.1.18 2025-05-12 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix MongoDB restore from not dropping DB each time before restore except explicitly told (credit logicoa@github)
|
||||||
|
|
||||||
|
|
||||||
|
## 4.1.17 2025-04-17 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix issue with Postgres database cleanup when ALL databases being backed up as one file (SPLIT_DB=FALSE)
|
||||||
|
|
||||||
|
|
||||||
|
## 4.1.16 2025-02-21 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Update to tiredofit/alpine:7.10.28
|
||||||
|
- Support TLS connectivity with restore script (credit fermion2020@github)
|
||||||
|
|
||||||
|
|
||||||
|
## 4.1.15 2025-01-29 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Add support for username and password support when checking for connectivity to couchdb (credit: JvSomeren)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix issue with couchdb compression routines
|
||||||
|
|
||||||
|
|
||||||
|
## 4.1.14 2025-01-21 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Downgrade AWS Client to 1.36.40 due to incompatibilities with providers with 1.37x. for time being
|
||||||
|
|
||||||
|
|
||||||
|
## 4.1.13 2025-01-21 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Update MySQL client to 8.4.4
|
||||||
|
- Update AWS Client to 1.37.2
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Seperate MySQL and MariaDB TLS Configurationf for arguments that have deviated
|
||||||
|
|
||||||
|
|
||||||
|
## 4.1.12 2024-12-13 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix for 4.1.11
|
||||||
|
|
||||||
|
|
||||||
|
## 4.1.11 2024-12-13 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix when backing up 'ALL' databases with MariaDB
|
||||||
|
|
||||||
|
|
||||||
|
## 4.1.10 2024-12-12 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Use tiredofit/alpine:3.21-7.10.27 base
|
||||||
|
- Use the actual binary name when dumping mariadb and mysql databases
|
||||||
|
- Silence warnings that are appearing due to filenames, ssl warnings re MariaDB / MySQL
|
||||||
|
|
||||||
|
|
||||||
|
## 4.1.9 2024-11-07 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Pin to tiredofit/alpine:edge-7.10.19
|
||||||
|
- MySQL 8.4.3 client
|
||||||
|
- MSSQL and MSODBC 18.4.1.1-1
|
||||||
|
- Mysql 11.x Support
|
||||||
|
- Influx2 Client 2.7.5
|
||||||
|
- AWS Client 1.35.13
|
||||||
|
- Postgresql 17.x Support
|
||||||
|
|
||||||
|
|
||||||
|
## 4.1.8 2024-10-29 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
Rebuild using 4.1.4 sources - ignore any versions of 4.1.5-4.1.7
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
|
||||||
|
## 4.1.4 2024-08-13 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
Please note that if using encryption using a passphrase, you may be encountering issues with manual decryption. This release fixes that.
|
||||||
|
If you try to manually decrypt and your passphrase fails. Try wrapping it in single (') or double (") quotes.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix for stray quotes appearing inside of ENCRYPT_PASSPHRASE variables
|
||||||
|
|
||||||
|
|
||||||
|
## 4.1.3 2024-07-05 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Rebuild to support tiredofit/alpine:7.10.0
|
||||||
|
|
||||||
|
|
||||||
|
## 4.1.2 2024-07-02 <effectivelywild@github>
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Add support for Azure Blob containers
|
||||||
|
- Fix timestamps when comparing previous backups
|
||||||
|
- Resolve unnecessary read operations in Azure
|
||||||
|
- Resolve issues with backup cleanup operations in Azure
|
||||||
|
|
||||||
|
|
||||||
|
## 4.1.1 2024-06-19 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix issue where postgresql globals when backing up ALL not being deleted (#352)
|
||||||
|
|
||||||
|
|
||||||
|
## 4.1.0 2024-05-25 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
Note that arm/v7 builds have been removed from this release going forward
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Introduce DEFAULT/DBXX_MYSQL_CLIENT option to use mariadb or mysql for client dumping to solve incompatibility issues
|
||||||
|
- Alpine 3.20 Base
|
||||||
|
- MariaDB 10.11.8 Client
|
||||||
|
- AWS Client 1.32.113
|
||||||
|
- MySQL Client 8.4.0
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.35 2024-01-14 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix issue with emaail notifications and not being able to add from statement
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.34 2024-01-02 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Change the way architectures are detected to re-enable backups with MSSQL and Influx2
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.33 2023-12-18 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Allow _OPTS variabls to contain spaces
|
||||||
|
- Switch references of _DUMP_OPTS to _BACKUP_OPTS
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.32 2023-12-15 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix issue with directories not properly being backed up (InfluxDB)
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.31 2023-12-12 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Support backing up databases with spaces in them
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.30 2023-12-11 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Seperate each job with its own temporary folder for isolation and to better cleanup jobs that backup as a directory instead of a flat file
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.29 2023-12-04 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Skip blobxfer if either account or key is not present
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.28 2023-12-04 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- AWS Cli 1.31.5
|
||||||
|
- Switch to using PIP for installing AWS-Cli to remove deprecation warnings
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.27 2023-12-04 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Switch to using actual crontab for cron expressions
|
||||||
|
|
||||||
|
|
||||||
## 4.0.26 2023-11-30 <dave at tiredofit dot ca>
|
## 4.0.26 2023-11-30 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|||||||
76
Dockerfile
76
Dockerfile
@@ -1,21 +1,21 @@
|
|||||||
ARG DISTRO=alpine
|
ARG DISTRO=alpine
|
||||||
ARG DISTRO_VARIANT=edge
|
ARG DISTRO_VARIANT=3.21-7.10.28
|
||||||
|
|
||||||
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
|
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
|
||||||
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
|
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
|
||||||
|
|
||||||
### Set Environment Variables
|
|
||||||
ENV INFLUX1_CLIENT_VERSION=1.8.0 \
|
ENV INFLUX1_CLIENT_VERSION=1.8.0 \
|
||||||
INFLUX2_CLIENT_VERSION=2.7.3 \
|
INFLUX2_CLIENT_VERSION=2.7.5 \
|
||||||
MSODBC_VERSION=18.3.2.1-1 \
|
MSODBC_VERSION=18.4.1.1-1 \
|
||||||
MSSQL_VERSION=18.3.1.1-1 \
|
MSSQL_VERSION=18.4.1.1-1 \
|
||||||
AWS_CLI_VERSION=1.31.4 \
|
MYSQL_VERSION=mysql-8.4.4 \
|
||||||
|
MYSQL_REPO_URL=https://github.com/mysql/mysql-server \
|
||||||
|
AWS_CLI_VERSION=1.36.40 \
|
||||||
CONTAINER_ENABLE_MESSAGING=TRUE \
|
CONTAINER_ENABLE_MESSAGING=TRUE \
|
||||||
CONTAINER_ENABLE_MONITORING=TRUE \
|
CONTAINER_ENABLE_MONITORING=TRUE \
|
||||||
IMAGE_NAME="tiredofit/db-backup" \
|
IMAGE_NAME="tiredofit/db-backup" \
|
||||||
IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/"
|
IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/"
|
||||||
|
|
||||||
### Dependencies
|
|
||||||
RUN source /assets/functions/00-container && \
|
RUN source /assets/functions/00-container && \
|
||||||
set -ex && \
|
set -ex && \
|
||||||
addgroup -S -g 10000 dbbackup && \
|
addgroup -S -g 10000 dbbackup && \
|
||||||
@@ -27,11 +27,14 @@ RUN source /assets/functions/00-container && \
|
|||||||
build-base \
|
build-base \
|
||||||
bzip2-dev \
|
bzip2-dev \
|
||||||
cargo \
|
cargo \
|
||||||
|
cmake \
|
||||||
git \
|
git \
|
||||||
go \
|
go \
|
||||||
libarchive-dev \
|
libarchive-dev \
|
||||||
|
libtirpc-dev \
|
||||||
openssl-dev \
|
openssl-dev \
|
||||||
libffi-dev \
|
libffi-dev \
|
||||||
|
ncurses-dev \
|
||||||
python3-dev \
|
python3-dev \
|
||||||
py3-pip \
|
py3-pip \
|
||||||
xz-dev \
|
xz-dev \
|
||||||
@@ -44,13 +47,16 @@ RUN source /assets/functions/00-container && \
|
|||||||
gpg-agent \
|
gpg-agent \
|
||||||
groff \
|
groff \
|
||||||
libarchive \
|
libarchive \
|
||||||
|
libtirpc \
|
||||||
mariadb-client \
|
mariadb-client \
|
||||||
mariadb-connector-c \
|
mariadb-connector-c \
|
||||||
mongodb-tools \
|
mongodb-tools \
|
||||||
|
ncurses \
|
||||||
openssl \
|
openssl \
|
||||||
pigz \
|
pigz \
|
||||||
postgresql16 \
|
pixz \
|
||||||
postgresql16-client \
|
postgresql17 \
|
||||||
|
postgresql17-client \
|
||||||
pv \
|
pv \
|
||||||
py3-botocore \
|
py3-botocore \
|
||||||
py3-colorama \
|
py3-colorama \
|
||||||
@@ -69,37 +75,49 @@ RUN source /assets/functions/00-container && \
|
|||||||
zstd \
|
zstd \
|
||||||
&& \
|
&& \
|
||||||
\
|
\
|
||||||
apkArch="$(uname -m)"; \
|
case "$(uname -m)" in \
|
||||||
case "$apkArch" in \
|
"x86_64" ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \
|
||||||
x86_64) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \
|
"arm64" | "aarch64" ) mssql=true ; mssql_arch=arm64; influx2=true ; influx_arch=arm64 ;; \
|
||||||
arm64 ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=arm64 ;; \
|
|
||||||
*) sleep 0.1 ;; \
|
*) sleep 0.1 ;; \
|
||||||
esac; \
|
esac; \
|
||||||
\
|
\
|
||||||
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \
|
if [ "${mssql,,}" = "true" ] ; then \
|
||||||
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
|
curl -sSLO https://download.microsoft.com/download/7/6/d/76de322a-d860-4894-9945-f0cc5d6a45f8/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; \
|
||||||
clone_git_repo https://github.com/aws/aws-cli "${AWS_CLI_VERSION}" && \
|
curl -sSLO https://download.microsoft.com/download/7/6/d/76de322a-d860-4894-9945-f0cc5d6a45f8/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; \
|
||||||
python3 setup.py install --prefix=/usr && \
|
echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; \
|
||||||
|
else \
|
||||||
|
echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; \
|
||||||
|
fi; \
|
||||||
|
\
|
||||||
|
if [ "${influx2,,}" = "true" ] ; then \
|
||||||
|
curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; \
|
||||||
|
chmod +x /usr/src/influx ; \
|
||||||
|
mv /usr/src/influx /usr/sbin/ ; \
|
||||||
|
else \
|
||||||
|
echo >&2 "Unable to build Influx 2 on this system" ; \
|
||||||
|
fi ; \
|
||||||
|
\
|
||||||
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX1_CLIENT_VERSION}" && \
|
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX1_CLIENT_VERSION}" && \
|
||||||
go build -o /usr/sbin/influxd ./cmd/influxd && \
|
go build -o /usr/sbin/influxd ./cmd/influxd && \
|
||||||
strip /usr/sbin/influxd && \
|
strip /usr/sbin/influxd && \
|
||||||
|
\
|
||||||
|
clone_git_repo "${MYSQL_REPO_URL}" "${MYSQL_VERSION}" && \
|
||||||
|
cmake \
|
||||||
|
-DCMAKE_BUILD_TYPE=MinSizeRel \
|
||||||
|
-DCMAKE_INSTALL_PREFIX=/opt/mysql \
|
||||||
|
-DFORCE_INSOURCE_BUILD=1 \
|
||||||
|
-DWITHOUT_SERVER:BOOL=ON \
|
||||||
|
&& \
|
||||||
|
make -j$(nproc) install && \
|
||||||
|
\
|
||||||
|
pip3 install --break-system-packages awscli==${AWS_CLI_VERSION} && \
|
||||||
|
pip3 install --break-system-packages blobxfer && \
|
||||||
|
\
|
||||||
mkdir -p /usr/src/pbzip2 && \
|
mkdir -p /usr/src/pbzip2 && \
|
||||||
curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
|
curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
|
||||||
cd /usr/src/pbzip2 && \
|
cd /usr/src/pbzip2 && \
|
||||||
make && \
|
make && \
|
||||||
make install && \
|
make install && \
|
||||||
mkdir -p /usr/src/pixz && \
|
|
||||||
curl -sSL https://github.com/vasi/pixz/releases/download/v1.0.7/pixz-1.0.7.tar.xz | tar xvfJ - --strip 1 -C /usr/src/pixz && \
|
|
||||||
cd /usr/src/pixz && \
|
|
||||||
./configure \
|
|
||||||
--prefix=/usr \
|
|
||||||
--sysconfdir=/etc \
|
|
||||||
--localstatedir=/var \
|
|
||||||
&& \
|
|
||||||
make && \
|
|
||||||
make install && \
|
|
||||||
\
|
|
||||||
pip3 install --break-system-packages blobxfer && \
|
|
||||||
\
|
\
|
||||||
package remove .db-backup-build-deps && \
|
package remove .db-backup-build-deps && \
|
||||||
package cleanup && \
|
package cleanup && \
|
||||||
|
|||||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
|||||||
The MIT License (MIT)
|
The MIT License (MIT)
|
||||||
|
|
||||||
Copyright (c) 2023 Dave Conroy
|
Copyright (c) 2025 Dave Conroy
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
29
README.md
29
README.md
@@ -267,6 +267,7 @@ Encryption occurs after compression and the encrypted filename will have a `.gpg
|
|||||||
| `DEFAULT_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
|
| `DEFAULT_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
|
||||||
| `DEFAULT_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
|
| `DEFAULT_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
|
||||||
| `DEFAULT_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
|
| `DEFAULT_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
|
||||||
|
| `DEFAULT_MYSQL_CLIENT` | Choose between `mariadb` or `mysql` client to perform dump operations for compatibility purposes | `mariadb` | |
|
||||||
| `DEFAULT_MYSQL_EVENTS` | Backup Events | `TRUE` | |
|
| `DEFAULT_MYSQL_EVENTS` | Backup Events | `TRUE` | |
|
||||||
| `DEFAULT_MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet | `512M` | |
|
| `DEFAULT_MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet | `512M` | |
|
||||||
| `DEFAULT_MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction | `TRUE` | |
|
| `DEFAULT_MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction | `TRUE` | |
|
||||||
@@ -325,7 +326,7 @@ If `DEFAULT_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
|
|||||||
|
|
||||||
| Variable | Description | Default |
|
| Variable | Description | Default |
|
||||||
| ------------------------------------ | ----------------------------------------------------------------------------------------------------- | ------------------------------------- |
|
| ------------------------------------ | ----------------------------------------------------------------------------------------------------- | ------------------------------------- |
|
||||||
| `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
|
| `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)_(DB_NAME)_(DB_HOST)` | `TRUE` |
|
||||||
| `DEFAULT_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
|
| `DEFAULT_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
|
||||||
| `DEFAULT_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` |
|
| `DEFAULT_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` |
|
||||||
| `DEFAULT_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DEFAULT_FILESYSTEM_PATH}/archive/` |
|
| `DEFAULT_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DEFAULT_FILESYSTEM_PATH}/archive/` |
|
||||||
@@ -355,11 +356,14 @@ If `DEFAULT_BACKUP_LOCATION` = `S3` then the following options are used:
|
|||||||
|
|
||||||
If `DEFAULT_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
|
If `DEFAULT_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
|
||||||
|
|
||||||
| Parameter | Description | Default | `_FILE` |
|
| Parameter | Description | Default | `_FILE` |
|
||||||
| -------------------------------------- | ------------------------------------------- | ------------------- | ------- |
|
| -------------------------------------- | ------------------------------------------------------------------- | ------------------- | ------- |
|
||||||
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
|
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
|
||||||
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
|
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
|
||||||
| `DEFAULT_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
|
| `DEFAULT_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
|
||||||
|
| `DEFAULT_BLOBXFER_MODE` | Azure Storage mode e.g. `auto`, `file`, `append`, `block` or `page` | `auto` | x |
|
||||||
|
|
||||||
|
- When `DEFAULT_BLOBXFER_MODE` is set to auto it will use blob containers by default. If the `DEFAULT_BLOBXFER_REMOTE_PATH` path does not exist a blob container with that name will be created.
|
||||||
|
|
||||||
> This service uploads files from backup targed directory `DEFAULT_FILESYSTEM_PATH`.
|
> This service uploads files from backup targed directory `DEFAULT_FILESYSTEM_PATH`.
|
||||||
> If the a cleanup configuration in `DEFAULT_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
|
> If the a cleanup configuration in `DEFAULT_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
|
||||||
@@ -635,11 +639,14 @@ If `DB01_BACKUP_LOCATION` = `S3` then the following options are used:
|
|||||||
|
|
||||||
If `DB01_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
|
If `DB01_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
|
||||||
|
|
||||||
| Parameter | Description | Default | `_FILE` |
|
| Parameter | Description | Default | `_FILE` |
|
||||||
| ----------------------------------- | ------------------------------------------- | ------------------- | ------- |
|
| -------------------------------------- | ------------------------------------------------------------------- | ------------------- | ------- |
|
||||||
| `DB01_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
|
| `DB01_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
|
||||||
| `DB01_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
|
| `DB01_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
|
||||||
| `DB01_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
|
| `DB01_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
|
||||||
|
| `DB01_BLOBXFER_REMOTE_MODE` | Azure Storage mode e.g. `auto`, `file`, `append`, `block` or `page` | `auto` | x |
|
||||||
|
|
||||||
|
- When `DEFAULT_BLOBXFER_MODE` is set to auto it will use blob containers by default. If the `DEFAULT_BLOBXFER_REMOTE_PATH` path does not exist a blob container with that name will be created.
|
||||||
|
|
||||||
> This service uploads files from backup directory `DB01_BACKUP_FILESYSTEM_PATH`.
|
> This service uploads files from backup directory `DB01_BACKUP_FILESYSTEM_PATH`.
|
||||||
> If the a cleanup configuration in `DB01_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
|
> If the a cleanup configuration in `DB01_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
|
||||||
|
|||||||
@@ -31,8 +31,8 @@ services:
|
|||||||
- BACKUP_JOB_CONCURRENCY=1 # Only run one job at a time
|
- BACKUP_JOB_CONCURRENCY=1 # Only run one job at a time
|
||||||
- DEFAULT_CHECKSUM=NONE # Don't create checksums
|
- DEFAULT_CHECKSUM=NONE # Don't create checksums
|
||||||
- DEFAULT_COMPRESSION=ZSTD # Compress all with ZSTD
|
- DEFAULT_COMPRESSION=ZSTD # Compress all with ZSTD
|
||||||
- DEFAULT_DUMP_INTERVAL=1440 # Backup every 1440 minutes
|
- DEFAULT_BACKUP_INTERVAL=1440 # Backup every 1440 minutes
|
||||||
- DEFAULT_DUMP_BEGIN=0000 # Start backing up at midnight
|
- DEFAULT_BACKUP_BEGIN=0000 # Start backing up at midnight
|
||||||
- DEFAULT_CLEANUP_TIME=8640 # Cleanup backups after a week
|
- DEFAULT_CLEANUP_TIME=8640 # Cleanup backups after a week
|
||||||
|
|
||||||
- DB01_TYPE=mariadb
|
- DB01_TYPE=mariadb
|
||||||
@@ -40,8 +40,8 @@ services:
|
|||||||
- DB01_NAME=example
|
- DB01_NAME=example
|
||||||
- DB01_USER=example
|
- DB01_USER=example
|
||||||
- DB01_PASS=examplepassword
|
- DB01_PASS=examplepassword
|
||||||
- DB01_DUMP_INTERVAL=30 # (override) Backup every 30 minutes
|
- DB01_BACKUP_INTERVAL=30 # (override) Backup every 30 minutes
|
||||||
- DB01_DUMP_BEGIN=+1 # (override) Backup starts immediately
|
- DB01_BACKUP_BEGIN=+1 # (override) Backup starts immediately
|
||||||
- DB01_CLEANUP_TIME=180 # (override) Cleanup backups they are older than 180 minutes
|
- DB01_CLEANUP_TIME=180 # (override) Cleanup backups they are older than 180 minutes
|
||||||
- DB01_CHECKSUM=SHA1 # (override) Create a SHA1 checksum
|
- DB01_CHECKSUM=SHA1 # (override) Create a SHA1 checksum
|
||||||
- DB01_COMPRESSION=GZ # (override) Compress with GZIP
|
- DB01_COMPRESSION=GZ # (override) Compress with GZIP
|
||||||
@@ -51,8 +51,8 @@ services:
|
|||||||
#- DB02_NAME=example
|
#- DB02_NAME=example
|
||||||
#- DB02_USER=example
|
#- DB02_USER=example
|
||||||
#- DB02_PASS=examplepassword
|
#- DB02_PASS=examplepassword
|
||||||
#- DB02_DUMP_INTERVAL=60 # (override) Backup every 60 minutes
|
#- DB02_BACKUP_INTERVAL=60 # (override) Backup every 60 minutes
|
||||||
#- DB02_DUMP_BEGIN=+10 # (override) Backup starts in ten minutes
|
#- DB02_BACKUP_BEGIN=+10 # (override) Backup starts in ten minutes
|
||||||
#- DB02_CLEANUP_TIME=240 # (override) Cleanup backups they are older than 240 minutes
|
#- DB02_CLEANUP_TIME=240 # (override) Cleanup backups they are older than 240 minutes
|
||||||
#- DB02_CHECKSUM=MD5 # (override) Create a SHA1 checksum
|
#- DB02_CHECKSUM=MD5 # (override) Create a SHA1 checksum
|
||||||
#- DB02_COMPRESSION=BZ # (override) Compress with BZIP
|
#- DB02_COMPRESSION=BZ # (override) Compress with BZIP
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ services:
|
|||||||
- DB01_NAME=test1 # Create this database
|
- DB01_NAME=test1 # Create this database
|
||||||
- DB01_USER=sa
|
- DB01_USER=sa
|
||||||
- DB01_PASS=5hQa0utRFBpIY3yhoIyE
|
- DB01_PASS=5hQa0utRFBpIY3yhoIyE
|
||||||
- DB01_DUMP_INTERVAL=5 # backup every 5 minute
|
- DB01_BACKUP_INTERVAL=5 # backup every 5 minute
|
||||||
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs not set immediately
|
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs not set immediately
|
||||||
- DB01_CLEANUP_TIME=60 # clean backups they are older than 60 minutes
|
- DB01_CLEANUP_TIME=60 # clean backups they are older than 60 minutes
|
||||||
- DB01_CHECKSUM=SHA1 # Set Checksum to be SHA1
|
- DB01_CHECKSUM=SHA1 # Set Checksum to be SHA1
|
||||||
@@ -57,7 +57,7 @@ services:
|
|||||||
# Add here azure storage account
|
# Add here azure storage account
|
||||||
- DB01_BLOBXFER_STORAGE_ACCOUNT={TODO Add Storage Name}
|
- DB01_BLOBXFER_STORAGE_ACCOUNT={TODO Add Storage Name}
|
||||||
# Add here azure storage account key
|
# Add here azure storage account key
|
||||||
- SB01_BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key}
|
- DB01_BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key}
|
||||||
- DB01_BLOBXFER_REMOTE_PATH=docker-db-backup
|
- DB01_BLOBXFER_REMOTE_PATH=docker-db-backup
|
||||||
restart: always
|
restart: always
|
||||||
networks:
|
networks:
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ services:
|
|||||||
- DB01_NAME=test1
|
- DB01_NAME=test1
|
||||||
- DB01_USER=sa
|
- DB01_USER=sa
|
||||||
- DB01_PASS=5hQa0utRFBpIY3yhoIyE
|
- DB01_PASS=5hQa0utRFBpIY3yhoIyE
|
||||||
- DB01_DUMP_INTERVAL=1 # backup every minute
|
- DB01_BACKUP_INTERVAL=1 # backup every minute
|
||||||
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs unset immediately
|
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs unset immediately
|
||||||
- DB01_CLEANUP_TIME=5 # clean backups they are older than 5 minute
|
- DB01_CLEANUP_TIME=5 # clean backups they are older than 5 minute
|
||||||
- DB01_CHECKSUM=NONE
|
- DB01_CHECKSUM=NONE
|
||||||
|
|||||||
@@ -24,7 +24,6 @@ else
|
|||||||
silent sleep {{BACKUP_NUMBER}}
|
silent sleep {{BACKUP_NUMBER}}
|
||||||
time_last_run=0
|
time_last_run=0
|
||||||
time_current=$(date +'%s')
|
time_current=$(date +'%s')
|
||||||
|
|
||||||
if [[ "${backup_job_backup_begin}" =~ ^\+(.*)$ ]]; then
|
if [[ "${backup_job_backup_begin}" =~ ^\+(.*)$ ]]; then
|
||||||
print_debug "BACKUP_BEGIN is a jump of minute starting with +"
|
print_debug "BACKUP_BEGIN is a jump of minute starting with +"
|
||||||
timer plusvalue
|
timer plusvalue
|
||||||
@@ -37,10 +36,18 @@ else
|
|||||||
#elif echo "${backup_job_backup_begin//\*/#}" | grep -qP "^(.*((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|#) ?){5}$" ; then # Allow slashes, yet not supporting advanced cron yet
|
#elif echo "${backup_job_backup_begin//\*/#}" | grep -qP "^(.*((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|#) ?){5}$" ; then # Allow slashes, yet not supporting advanced cron yet
|
||||||
elif echo "${backup_job_backup_begin//\*/#}" | grep -qP "^(((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|#) ?){5}$" ; then
|
elif echo "${backup_job_backup_begin//\*/#}" | grep -qP "^(((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|#) ?){5}$" ; then
|
||||||
print_debug "BACKUP_BEGIN is a cron expression"
|
print_debug "BACKUP_BEGIN is a cron expression"
|
||||||
time_last_run=$(date +"%s")
|
if var_false "${CRON_ALTERNATE}"; then
|
||||||
backup_job_backup_begin=${backup_job_backup_begin//\"/}
|
time_last_run=$(date +"%s")
|
||||||
backup_job_backup_begin=${backup_job_backup_begin//\'/}
|
|
||||||
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
|
backup_job_backup_begin=${backup_job_backup_begin//\"/}
|
||||||
|
backup_job_backup_begin=${backup_job_backup_begin//\'/}
|
||||||
|
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
|
||||||
|
else
|
||||||
|
echo "${backup_job_backup_begin} /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}/run now" > /tmp/.container/cron/{{BACKUP_NUMBER}}-backup
|
||||||
|
crontab -l | { cat; echo "${backup_job_backup_begin} /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}/run now"; } | crontab -
|
||||||
|
s6-svc -d /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
print_error "_BACKUP_BEGIN is invalid - Unable to perform scheduling"
|
print_error "_BACKUP_BEGIN is invalid - Unable to perform scheduling"
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
@@ -100,8 +107,8 @@ while true; do
|
|||||||
s6-svc -d /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}
|
s6-svc -d /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}
|
||||||
else
|
else
|
||||||
if [ ! "${time_cron}" = "true" ]; then
|
if [ ! "${time_cron}" = "true" ]; then
|
||||||
print_notice "Sleeping for another $(($backup_job_backup_interval*60-backup_job_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($backup_job_backup_interval*60-backup_job_total_time))))" +'%Y-%m-%d %T %Z') "
|
print_notice "Sleeping for another $((backup_job_backup_interval*60-backup_job_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$((backup_job_backup_interval*60-backup_job_total_time))))" +'%Y-%m-%d %T %Z') "
|
||||||
silent sleep $(($backup_job_backup_interval*60-backup_job_total_time))
|
silent sleep $((backup_job_backup_interval*60-backup_job_total_time))
|
||||||
else
|
else
|
||||||
time_last_run=$(date +"%s")
|
time_last_run=$(date +"%s")
|
||||||
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
|
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
|
||||||
|
|||||||
@@ -1,13 +1,14 @@
|
|||||||
#!/command/with-contenv bash
|
#!/command/with-contenv bash
|
||||||
|
|
||||||
BACKUP_JOB_CONCURRENCY=${BACKUP_JOB_CONCURRENCY:-"1"}
|
BACKUP_JOB_CONCURRENCY=${BACKUP_JOB_CONCURRENCY:-"1"}
|
||||||
|
CRON_ALTERNATE=${CRON_ALTERNATE:-"TRUE"}
|
||||||
DBBACKUP_USER=${DBBACKUP_USER:-"dbbackup"}
|
DBBACKUP_USER=${DBBACKUP_USER:-"dbbackup"}
|
||||||
DBBACKUP_GROUP=${DBBACKUP_GROUP:-"${DBBACKUP_USER}"} # Must go after DBBACKUP_USER
|
DBBACKUP_GROUP=${DBBACKUP_GROUP:-"${DBBACKUP_USER}"} # Must go after DBBACKUP_USER
|
||||||
DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0}
|
DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0}
|
||||||
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
|
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
|
||||||
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
|
|
||||||
DEFAULT_BACKUP_LOCATION=${DEFAULT_BACKUP_LOCATION:-"FILESYSTEM"}
|
DEFAULT_BACKUP_LOCATION=${DEFAULT_BACKUP_LOCATION:-"FILESYSTEM"}
|
||||||
DEFAULT_BLOBXFER_REMOTE_PATH=${DEFAULT_BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
|
DEFAULT_BLOBXFER_REMOTE_PATH=${DEFAULT_BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
|
||||||
|
DEFAULT_BLOBXFER_MODE=${DEFAULT_BLOBXFER_MODE:-"auto"}
|
||||||
DEFAULT_CHECKSUM=${DEFAULT_CHECKSUM:-"MD5"}
|
DEFAULT_CHECKSUM=${DEFAULT_CHECKSUM:-"MD5"}
|
||||||
DEFAULT_COMPRESSION=${DEFAULT_COMPRESSION:-"ZSTD"}
|
DEFAULT_COMPRESSION=${DEFAULT_COMPRESSION:-"ZSTD"}
|
||||||
DEFAULT_COMPRESSION_LEVEL=${DEFAULT_COMPRESSION_LEVEL:-"3"}
|
DEFAULT_COMPRESSION_LEVEL=${DEFAULT_COMPRESSION_LEVEL:-"3"}
|
||||||
@@ -19,6 +20,7 @@ DEFAULT_FILESYSTEM_PATH_PERMISSION=${DEFAULT_FILESYSTEM_PATH_PERMISSION:-"700"}
|
|||||||
DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"600"}
|
DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"600"}
|
||||||
DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DEFAULT_FILESYSTEM_ARCHIVE_PATH:-"${DEFAULT_FILESYSTEM_PATH}/archive/"}
|
DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DEFAULT_FILESYSTEM_ARCHIVE_PATH:-"${DEFAULT_FILESYSTEM_PATH}/archive/"}
|
||||||
DEFAULT_LOG_LEVEL=${DEFAULT_LOG_LEVEL:-"notice"}
|
DEFAULT_LOG_LEVEL=${DEFAULT_LOG_LEVEL:-"notice"}
|
||||||
|
DEFAULT_MYSQL_CLIENT=${DEFAULT_MYSQL_CLIENT:-"mariadb"}
|
||||||
DEFAULT_MYSQL_ENABLE_TLS=${DEFAULT_MYSQL_ENABLE_TLS:-"FALSE"}
|
DEFAULT_MYSQL_ENABLE_TLS=${DEFAULT_MYSQL_ENABLE_TLS:-"FALSE"}
|
||||||
DEFAULT_MYSQL_EVENTS=${DEFAULT_MYSQL_EVENTS:-"TRUE"}
|
DEFAULT_MYSQL_EVENTS=${DEFAULT_MYSQL_EVENTS:-"TRUE"}
|
||||||
DEFAULT_MYSQL_MAX_ALLOWED_PACKET=${DEFAULT_MYSQL_MAX_ALLOWED_PACKET:-"512M"}
|
DEFAULT_MYSQL_MAX_ALLOWED_PACKET=${DEFAULT_MYSQL_MAX_ALLOWED_PACKET:-"512M"}
|
||||||
|
|||||||
@@ -66,6 +66,7 @@ bootstrap_variables() {
|
|||||||
DEFAULT_BLOBXFER_STORAGE_ACCOUNT \
|
DEFAULT_BLOBXFER_STORAGE_ACCOUNT \
|
||||||
DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY \
|
DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY \
|
||||||
DEFAULT_BLOBXFER_REMOTE_PATH \
|
DEFAULT_BLOBXFER_REMOTE_PATH \
|
||||||
|
DEFAULT_BLOBXFER_MODE \
|
||||||
DB"${backup_instance_number}"_AUTH \
|
DB"${backup_instance_number}"_AUTH \
|
||||||
DB"${backup_instance_number}"_TYPE \
|
DB"${backup_instance_number}"_TYPE \
|
||||||
DB"${backup_instance_number}"_HOST \
|
DB"${backup_instance_number}"_HOST \
|
||||||
@@ -93,6 +94,7 @@ bootstrap_variables() {
|
|||||||
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT \
|
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT \
|
||||||
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT_KEY \
|
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT_KEY \
|
||||||
DB"${backup_instance_number}"_BLOBXFER_REMOTE_PATH \
|
DB"${backup_instance_number}"_BLOBXFER_REMOTE_PATH \
|
||||||
|
DB"${backup_instance_number}"_BLOBXFER_MODE \
|
||||||
BLOBXFER_STORAGE_ACCOUNT \
|
BLOBXFER_STORAGE_ACCOUNT \
|
||||||
BLOBXFER_STORAGE_ACCOUNT_KEY \
|
BLOBXFER_STORAGE_ACCOUNT_KEY \
|
||||||
DB_HOST \
|
DB_HOST \
|
||||||
@@ -153,16 +155,31 @@ bootstrap_variables() {
|
|||||||
fi
|
fi
|
||||||
##
|
##
|
||||||
|
|
||||||
|
if grep -qo ".*_NAME='.*'" "${backup_instance_vars}"; then
|
||||||
|
print_debug "[bootstrap_variables] [backup_init] Found _NAME variable with quotes"
|
||||||
|
sed -i "s|_NAME='\(.*\)'|_NAME=\1|g" "${backup_instance_vars}"
|
||||||
|
fi
|
||||||
|
|
||||||
if grep -qo ".*_PASS='.*'" "${backup_instance_vars}"; then
|
if grep -qo ".*_PASS='.*'" "${backup_instance_vars}"; then
|
||||||
print_debug "[bootstrap_variables] [backup_init] Found _PASS variable with quotes"
|
print_debug "[bootstrap_variables] [backup_init] Found _PASS variable with quotes"
|
||||||
sed -i "s|_PASS='\(.*\)'|_PASS=\1|g" "${backup_instance_vars}"
|
sed -i "s|_PASS='\(.*\)'|_PASS=\1|g" "${backup_instance_vars}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if grep -qo ".*_PASSPHRASE='.*'" "${backup_instance_vars}"; then
|
||||||
|
print_debug "[bootstrap_variables] [backup_init] Found _PASSPHRASE variable with quotes"
|
||||||
|
sed -i "s|_PASSPHRASE='\(.*\)'|_PASSPHRASE=\1|g" "${backup_instance_vars}"
|
||||||
|
fi
|
||||||
|
|
||||||
if grep -qo "MONGO_CUSTOM_URI='.*'" "${backup_instance_vars}"; then
|
if grep -qo "MONGO_CUSTOM_URI='.*'" "${backup_instance_vars}"; then
|
||||||
print_debug "[bootstrap_variables] [backup_init] Found _MONGO_CUSTOM_URI variable with quotes"
|
print_debug "[bootstrap_variables] [backup_init] Found _MONGO_CUSTOM_URI variable with quotes"
|
||||||
sed -i "s|MONGO_CUSTOM_URI='\(.*\)'|MONGO_CUSTOM_URI=\1|g" "${backup_instance_vars}"
|
sed -i "s|MONGO_CUSTOM_URI='\(.*\)'|MONGO_CUSTOM_URI=\1|g" "${backup_instance_vars}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if grep -qo ".*_OPTS='.*'" "${backup_instance_vars}"; then
|
||||||
|
print_debug "[bootstrap_variables] [backup_init] Found _OPTS variable with quotes"
|
||||||
|
sed -i "s|_OPTS='\(.*\)'|_OPTS=\1|g" "${backup_instance_vars}"
|
||||||
|
fi
|
||||||
|
|
||||||
transform_backup_instance_variable() {
|
transform_backup_instance_variable() {
|
||||||
if grep -q "^DB${1}_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
if grep -q "^DB${1}_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
||||||
export "$3"="$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
|
export "$3"="$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
|
||||||
@@ -189,6 +206,7 @@ bootstrap_variables() {
|
|||||||
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_REMOTE_PATH backup_job_blobxfer_remote_path
|
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_REMOTE_PATH backup_job_blobxfer_remote_path
|
||||||
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT backup_job_blobxfer_storage_account
|
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT backup_job_blobxfer_storage_account
|
||||||
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT_KEY backup_job_blobxfer_storage_account_key
|
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT_KEY backup_job_blobxfer_storage_account_key
|
||||||
|
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_MODE backup_job_blobxfer_mode
|
||||||
transform_backup_instance_variable "${backup_instance_number}" CHECKSUM backup_job_checksum
|
transform_backup_instance_variable "${backup_instance_number}" CHECKSUM backup_job_checksum
|
||||||
transform_backup_instance_variable "${backup_instance_number}" CLEANUP_TIME backup_job_cleanup_time
|
transform_backup_instance_variable "${backup_instance_number}" CLEANUP_TIME backup_job_cleanup_time
|
||||||
transform_backup_instance_variable "${backup_instance_number}" COMPRESSION backup_job_compression
|
transform_backup_instance_variable "${backup_instance_number}" COMPRESSION backup_job_compression
|
||||||
@@ -199,7 +217,7 @@ bootstrap_variables() {
|
|||||||
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PASSPHRASE backup_job_encrypt_passphrase
|
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PASSPHRASE backup_job_encrypt_passphrase
|
||||||
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PRIVATE_KEY backup_job_encrypt_private_key
|
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PRIVATE_KEY backup_job_encrypt_private_key
|
||||||
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PUBLIC_KEY backup_job_encrypt_public_key
|
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PUBLIC_KEY backup_job_encrypt_public_key
|
||||||
transform_backup_instance_variable "${backup_instance_number}" EXTRA_DUMP_OPTS backup_job_extra_dump_opts
|
transform_backup_instance_variable "${backup_instance_number}" EXTRA_BACKUP_OPTS backup_job_extra_backup_opts
|
||||||
transform_backup_instance_variable "${backup_instance_number}" EXTRA_ENUMERATION_OPTS backup_job_extra_enumeration_opts
|
transform_backup_instance_variable "${backup_instance_number}" EXTRA_ENUMERATION_OPTS backup_job_extra_enumeration_opts
|
||||||
transform_backup_instance_variable "${backup_instance_number}" EXTRA_OPTS backup_job_extra_opts
|
transform_backup_instance_variable "${backup_instance_number}" EXTRA_OPTS backup_job_extra_opts
|
||||||
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_ARCHIVE_PATH backup_job_filesystem_archive_path
|
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_ARCHIVE_PATH backup_job_filesystem_archive_path
|
||||||
@@ -211,6 +229,7 @@ bootstrap_variables() {
|
|||||||
transform_backup_instance_variable "${backup_instance_number}" INFLUX_VERSION backup_job_influx_version
|
transform_backup_instance_variable "${backup_instance_number}" INFLUX_VERSION backup_job_influx_version
|
||||||
transform_backup_instance_variable "${backup_instance_number}" LOG_LEVEL backup_job_log_level
|
transform_backup_instance_variable "${backup_instance_number}" LOG_LEVEL backup_job_log_level
|
||||||
transform_backup_instance_variable "${backup_instance_number}" MONGO_CUSTOM_URI backup_job_mongo_custom_uri
|
transform_backup_instance_variable "${backup_instance_number}" MONGO_CUSTOM_URI backup_job_mongo_custom_uri
|
||||||
|
transform_backup_instance_variable "${backup_instance_number}" MYSQL_CLIENT backup_job_mysql_client
|
||||||
transform_backup_instance_variable "${backup_instance_number}" MYSQL_ENABLE_TLS backup_job_mysql_enable_tls
|
transform_backup_instance_variable "${backup_instance_number}" MYSQL_ENABLE_TLS backup_job_mysql_enable_tls
|
||||||
transform_backup_instance_variable "${backup_instance_number}" MYSQL_EVENTS backup_job_mysql_events
|
transform_backup_instance_variable "${backup_instance_number}" MYSQL_EVENTS backup_job_mysql_events
|
||||||
transform_backup_instance_variable "${backup_instance_number}" MYSQL_MAX_ALLOWED_PACKET backup_job_mysql_max_allowed_packet
|
transform_backup_instance_variable "${backup_instance_number}" MYSQL_MAX_ALLOWED_PACKET backup_job_mysql_max_allowed_packet
|
||||||
@@ -391,9 +410,33 @@ EOF
|
|||||||
dbtype=mysql
|
dbtype=mysql
|
||||||
backup_job_db_port=${backup_job_db_port:-3306}
|
backup_job_db_port=${backup_job_db_port:-3306}
|
||||||
check_var backup_job_db_name DB"${v_instance}"_NAME "database name. Seperate multiple with commas"
|
check_var backup_job_db_name DB"${v_instance}"_NAME "database name. Seperate multiple with commas"
|
||||||
|
case "${backup_job_mysql_client,,}" in
|
||||||
|
mariadb )
|
||||||
|
_mysql_prefix=/usr/bin/
|
||||||
|
_mysql_bin_prefix=mariadb-
|
||||||
|
;;
|
||||||
|
mysql )
|
||||||
|
_mysql_prefix=/opt/mysql/bin/
|
||||||
|
_mysql_bin_prefix=mysql
|
||||||
|
;;
|
||||||
|
* )
|
||||||
|
print_error "I don't understand '${backup_job_mysql_client,,}' as a client. Exiting.."
|
||||||
|
exit 99
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
print_debug "Using '${backup_job_mysql_client,,}' as client"
|
||||||
|
|
||||||
if [ -n "${backup_job_db_pass}" ] ; then export MYSQL_PWD=${backup_job_db_pass} ; fi
|
if [ -n "${backup_job_db_pass}" ] ; then export MYSQL_PWD=${backup_job_db_pass} ; fi
|
||||||
if var_true "${backup_job_mysql_enable_tls}" ; then
|
if var_true "${backup_job_mysql_enable_tls}" ; then
|
||||||
|
case "${backup_job_mysql_client,,}" in
|
||||||
|
mariadb )
|
||||||
|
mysql_tls_args="--ssl"
|
||||||
|
;;
|
||||||
|
mysql )
|
||||||
|
mysql_tls_args="--ssl-mode=REQUIRED"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
if [ -n "${backup_job_mysql_tls_ca_file}" ] ; then
|
if [ -n "${backup_job_mysql_tls_ca_file}" ] ; then
|
||||||
mysql_tls_args="--ssl_ca=${backup_job_mysql_tls_ca_file}"
|
mysql_tls_args="--ssl_ca=${backup_job_mysql_tls_ca_file}"
|
||||||
fi
|
fi
|
||||||
@@ -405,12 +448,28 @@ EOF
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if var_true "${backup_job_mysql_tls_verify}" ; then
|
if var_true "${backup_job_mysql_tls_verify}" ; then
|
||||||
mysql_tls_args="${mysql_tls_args} --sslverify-server-cert"
|
case "${backup_job_mysql_client,,}" in
|
||||||
|
mariadb )
|
||||||
|
mysql_tls_args="${mysql_tls_args} --sslverify-server-cert"
|
||||||
|
;;
|
||||||
|
mysql )
|
||||||
|
mysql_tls_args="${mysql_tls_args} --ssl-mode=VERIFY_CA"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "${backup_job_mysql_tls_version}" ] ; then
|
if [ -n "${backup_job_mysql_tls_version}" ] ; then
|
||||||
mysql_tls_args="${mysql_tls_args} --tls_version=${backup_job_mysql_tls_version}"
|
mysql_tls_args="${mysql_tls_args} --tls_version=${backup_job_mysql_tls_version}"
|
||||||
fi
|
fi
|
||||||
|
else
|
||||||
|
case "${backup_job_mysql_client,,}" in
|
||||||
|
mariadb )
|
||||||
|
mysql_tls_args="--disable-ssl"
|
||||||
|
;;
|
||||||
|
mysql )
|
||||||
|
mysql_tls_args="--ssl-mode=DISABLED"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
"mssql" | "microsoftsql" )
|
"mssql" | "microsoftsql" )
|
||||||
@@ -473,7 +532,7 @@ backup_couch() {
|
|||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
backup_job_filename=couch_${backup_job_db_name}_${backup_job_db_host#*//}_${now}.txt
|
backup_job_filename=couch_${backup_job_db_name}_${backup_job_db_host#*//}_${now}.txt
|
||||||
backup_job_filename_base=couch_${backup_job_db_name}_${backup_job_db_host#*//}
|
backup_job_filename_base=couch_${backup_job_db_name}_${backup_job_db_host#*//}
|
||||||
compressionzyclonite
|
compression
|
||||||
if var_true "${DEBUG_BACKUP_COUCH}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_COUCH}" ; then debug off; fi
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
timer backup finish
|
timer backup finish
|
||||||
@@ -486,7 +545,7 @@ backup_couch() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
backup_influx() {
|
backup_influx() {
|
||||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
||||||
if [ "${backup_job_db_name,,}" = "all" ] ; then
|
if [ "${backup_job_db_name,,}" = "all" ] ; then
|
||||||
write_log debug "[backup_influx] Preparing to back up everything"
|
write_log debug "[backup_influx] Preparing to back up everything"
|
||||||
db_names=justbackupeverything
|
db_names=justbackupeverything
|
||||||
@@ -500,28 +559,25 @@ backup_influx() {
|
|||||||
print_debug "[backup_influx] Influx DB Version 1 selected"
|
print_debug "[backup_influx] Influx DB Version 1 selected"
|
||||||
for db in ${db_names}; do
|
for db in ${db_names}; do
|
||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
||||||
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
|
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
|
||||||
backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now}
|
backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now}
|
||||||
backup_job_filename_base=influx_${db}_${backup_job_db_host#*//}
|
backup_job_filename_base=influx_${db}_${backup_job_db_host#*//}
|
||||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
||||||
compression
|
|
||||||
pre_dbbackup "${db}"
|
pre_dbbackup "${db}"
|
||||||
write_log notice "Dumping Influx database: '${db}'"
|
write_log notice "Dumping Influx database: '${db}'"
|
||||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
||||||
run_as_user influxd backup ${influx_compression} ${bucket} -portable -host ${backup_job_db_host}:${backup_job_db_port} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} "${TEMP_PATH}"/"${backup_job_filename_dir}"
|
run_as_user influxd backup ${influx_compression} ${bucket} -portable -host ${backup_job_db_host}:${backup_job_db_port} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} "${temporary_directory}"/"${backup_job_filename}"
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
check_exit_code backup "${backup_job_filename_dir}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
write_log notice "Creating archive file of '${backup_job_filename_dir}' with tar ${compression_string}"
|
compression
|
||||||
run_as_user tar cf - "${TEMP_PATH}"/"${backup_job_filename_dir}" | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename_dir}".tar"${extension}" > /dev/null
|
create_archive
|
||||||
backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now}.tar${extension}
|
|
||||||
backup_job_filename_base=influx_${db}_${backup_job_db_host#*//}
|
|
||||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
||||||
timer backup finish
|
timer backup finish
|
||||||
file_encryption
|
file_encryption
|
||||||
generate_checksum
|
generate_checksum
|
||||||
move_dbbackup
|
move_dbbackup
|
||||||
check_exit_code move "${backup_job_filename_dir}"
|
check_exit_code move "${backup_job_filename}"
|
||||||
post_dbbackup "${db}"
|
post_dbbackup "${db}"
|
||||||
cleanup_old_data
|
cleanup_old_data
|
||||||
done
|
done
|
||||||
@@ -535,22 +591,20 @@ backup_influx() {
|
|||||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
||||||
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}
|
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}
|
||||||
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
|
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
|
||||||
compression
|
|
||||||
pre_dbbackup "${db}"
|
pre_dbbackup "${db}"
|
||||||
write_log notice "Dumping Influx2 database: '${db}'"
|
write_log notice "Dumping Influx2 database: '${db}'"
|
||||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
||||||
run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --compression none "${TEMP_PATH}"/"${backup_job_filename_dir}"
|
run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --compression none "${temporary_directory}"/"${backup_job_filename}"
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
check_exit_code backup "${backup_job_filename_dir}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
compression
|
||||||
create_archive
|
create_archive
|
||||||
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}.tar${extension}
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
||||||
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
|
|
||||||
timer backup finish
|
timer backup finish
|
||||||
file_encryption
|
file_encryption
|
||||||
generate_checksum
|
generate_checksum
|
||||||
move_dbbackup
|
move_dbbackup
|
||||||
check_exit_code move "${backup_job_filename_dir}"
|
check_exit_code move "${backup_job_filename}"
|
||||||
post_dbbackup "${db}"
|
post_dbbackup "${db}"
|
||||||
cleanup_old_data
|
cleanup_old_data
|
||||||
done
|
done
|
||||||
@@ -571,15 +625,15 @@ backup_mongo() {
|
|||||||
compression_string="and compressing with gzip"
|
compression_string="and compressing with gzip"
|
||||||
fi
|
fi
|
||||||
if [ -n "${backup_job_mongo_custom_uri}" ] ; then
|
if [ -n "${backup_job_mongo_custom_uri}" ] ; then
|
||||||
mongo_backup_parameter="--uri=${backup_job_mongo_custom_uri} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}"
|
mongo_backup_parameter="--uri=${backup_job_mongo_custom_uri} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}"
|
||||||
else
|
else
|
||||||
mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}"
|
mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}"
|
||||||
fi
|
fi
|
||||||
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
|
||||||
pre_dbbackup "${backup_job_db_name}"
|
pre_dbbackup "${backup_job_db_name}"
|
||||||
write_log notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
|
write_log notice "Dumping MongoDB database: '${backup_job_db_name}' ${compression_string}"
|
||||||
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi
|
||||||
silent run_as_user ${play_fair} mongodump --archive=${TEMP_PATH}/${backup_job_filename} ${mongo_compression} ${mongo_backup_parameter}
|
silent run_as_user ${play_fair} mongodump --archive=${temporary_directory}/${backup_job_filename} ${mongo_compression} ${mongo_backup_parameter}
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
@@ -600,16 +654,16 @@ backup_mssql() {
|
|||||||
backup_job_filename=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.bak
|
backup_job_filename=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.bak
|
||||||
backup_job_filename_base=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}
|
backup_job_filename_base=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}
|
||||||
pre_dbbackup "${backup_job_db_name}"
|
pre_dbbackup "${backup_job_db_name}"
|
||||||
write_log notice "Dumping MSSQL database: '${DB_NAME}'"
|
write_log notice "Dumping MSSQL database: '${backup_job_db_name}'"
|
||||||
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
|
||||||
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP DATABASE [${backup_job_db_name}] TO DISK = N'${TEMP_PATH}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
|
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP DATABASE [${backup_job_db_name}] TO DISK = N'${temporary_directory}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
|
||||||
backup_job_filename_original=${backup_job_filename}
|
backup_job_filename_original=${backup_job_filename}
|
||||||
compression
|
compression
|
||||||
pre_dbbackup all
|
pre_dbbackup all
|
||||||
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
|
||||||
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
|
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
|
||||||
timer backup finish
|
timer backup finish
|
||||||
@@ -625,16 +679,15 @@ backup_mssql() {
|
|||||||
backup_job_filename=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.trn
|
backup_job_filename=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.trn
|
||||||
backup_job_filename_base=mssql_${backup_job_db_name,,}_trn_${backup_job_db_host,,}
|
backup_job_filename_base=mssql_${backup_job_db_name,,}_trn_${backup_job_db_host,,}
|
||||||
pre_dbbackup "${backup_job_db_name}"
|
pre_dbbackup "${backup_job_db_name}"
|
||||||
write_log notice "Dumping MSSQL database: '${DB_NAME}'"
|
write_log notice "Dumping MSSQL database: '${backup_job_db_name}'"
|
||||||
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
|
||||||
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP LOG [${backup_job_db_name}] TO DISK = N'${TEMP_PATH}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-log', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
|
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP LOG [${backup_job_db_name}] TO DISK = N'${temporary_directory}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-log', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
|
||||||
backup_job_filename_original=${backup_job_filename}
|
backup_job_filename_original=${backup_job_filename}
|
||||||
compression
|
compression
|
||||||
pre_dbbackup all
|
pre_dbbackup all
|
||||||
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
|
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
|
||||||
check_exit_code backup "${backup_job_filename}"
|
|
||||||
file_encryption
|
file_encryption
|
||||||
timer backup finish
|
timer backup finish
|
||||||
generate_checksum
|
generate_checksum
|
||||||
@@ -660,7 +713,7 @@ backup_mysql() {
|
|||||||
|
|
||||||
if [ "${backup_job_db_name,,}" = "all" ] ; then
|
if [ "${backup_job_db_name,,}" = "all" ] ; then
|
||||||
write_log debug "Preparing to back up everything except for information_schema and _* prefixes"
|
write_log debug "Preparing to back up everything except for information_schema and _* prefixes"
|
||||||
db_names=$(run_as_user mysql -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_enumeration_opts} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
|
db_names=$(run_as_user ${_mysql_prefix}${_mysql_bin_prefix/-/} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_enumeration_opts} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
|
||||||
if [ -n "${backup_job_db_name_exclude}" ] ; then
|
if [ -n "${backup_job_db_name_exclude}" ] ; then
|
||||||
db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n')
|
db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n')
|
||||||
for db_exclude in ${db_names_exclusions} ; do
|
for db_exclude in ${db_names_exclusions} ; do
|
||||||
@@ -677,14 +730,14 @@ backup_mysql() {
|
|||||||
if var_true "${backup_job_split_db}" ; then
|
if var_true "${backup_job_split_db}" ; then
|
||||||
for db in ${db_names} ; do
|
for db in ${db_names} ; do
|
||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
backup_job_filename=mysql_${db}_${backup_job_db_host,,}_${now}.sql
|
backup_job_filename=${backup_job_mysql_client,,}_${db}_${backup_job_db_host,,}_${now}.sql
|
||||||
backup_job_filename_base=mysql_${db}_${backup_job_db_host,,}
|
backup_job_filename_base=${backup_job_mysql_client,,}_${db}_${backup_job_db_host,,}
|
||||||
compression
|
compression
|
||||||
pre_dbbackup "${db}"
|
pre_dbbackup "${db}"
|
||||||
write_log notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
|
write_log notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
|
||||||
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
|
||||||
run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} $db | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
|
run_as_user ${play_fair} ${_mysql_prefix}${_mysql_bin_prefix}dump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} $db | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
|
||||||
exit_code=$?
|
exitcode=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
|
||||||
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
timer backup finish
|
timer backup finish
|
||||||
@@ -698,14 +751,14 @@ backup_mysql() {
|
|||||||
else
|
else
|
||||||
write_log debug "Not splitting database dumps into their own files"
|
write_log debug "Not splitting database dumps into their own files"
|
||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
backup_job_filename=mysql_all_${backup_job_db_host,,}_${now}.sql
|
backup_job_filename=${backup_job_mysql_client,,}_all_${backup_job_db_host,,}_${now}.sql
|
||||||
backup_job_filename_base=mysql_all_${backup_job_db_host,,}
|
backup_job_filename_base=${backup_job_mysql_client,,}_all_${backup_job_db_host,,}
|
||||||
compression
|
compression
|
||||||
pre_dbbackup all
|
pre_dbbackup all
|
||||||
write_log notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
write_log notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
||||||
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
|
||||||
run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
|
run_as_user ${play_fair} ${_mysql_prefix}${_mysql_bin_prefix}dump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
|
||||||
exit_code=$?
|
exitcode=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
|
||||||
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
timer backup finish
|
timer backup finish
|
||||||
@@ -722,12 +775,13 @@ backup_pgsql() {
|
|||||||
backup_pgsql_globals() {
|
backup_pgsql_globals() {
|
||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
backup_job_filename=pgsql_globals_${backup_job_db_host,,}_${now}.sql
|
backup_job_filename=pgsql_globals_${backup_job_db_host,,}_${now}.sql
|
||||||
|
backup_job_global_base=pgsql_globals_${backup_job_db_host,,}
|
||||||
compression
|
compression
|
||||||
pre_dbbackup "globals"
|
pre_dbbackup "globals"
|
||||||
print_notice "Dumping PostgresSQL globals: with 'pg_dumpall -g' ${compression_string}"
|
print_notice "Dumping PostgresSQL globals: with 'pg_dumpall -g' ${compression_string}"
|
||||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
|
||||||
run_as_user ${play_fair} pg_dumpall -h "${backup_job_db_host}" -U "${backup_job_db_user}" -p "${backup_job_db_port}" -g ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
|
run_as_user ${play_fair} pg_dumpall -h "${backup_job_db_host}" -U "${backup_job_db_user}" -p "${backup_job_db_port}" -g ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
|
||||||
exit_code=$?
|
exitcode=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
|
||||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
||||||
check_exit_code "${backup_job_filename}"
|
check_exit_code "${backup_job_filename}"
|
||||||
timer backup finish
|
timer backup finish
|
||||||
@@ -755,7 +809,7 @@ backup_pgsql() {
|
|||||||
write_log debug "Excluding '${db_exclude}' from ALL DB_NAME backups"
|
write_log debug "Excluding '${db_exclude}' from ALL DB_NAME backups"
|
||||||
db_names=$(echo "$db_names" | sed "/${db_exclude}/d" )
|
db_names=$(echo "$db_names" | sed "/${db_exclude}/d" )
|
||||||
done
|
done
|
||||||
_postgres_backup_globals=true
|
_postgres_backup_globals=true
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
db_names=$(echo "${backup_job_db_name}" | tr ',' '\n')
|
db_names=$(echo "${backup_job_db_name}" | tr ',' '\n')
|
||||||
@@ -775,8 +829,8 @@ backup_pgsql() {
|
|||||||
pre_dbbackup "${db}"
|
pre_dbbackup "${db}"
|
||||||
write_log notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
|
write_log notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
|
||||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
|
||||||
run_as_user ${play_fair} pg_dump -h "${backup_job_db_host}" -p "${backup_job_db_port}" -U "${backup_job_db_user}" $db ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
|
run_as_user ${play_fair} pg_dump -h "${backup_job_db_host}" -p "${backup_job_db_port}" -U "${backup_job_db_user}" $db ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
|
||||||
exit_code=$?
|
exitcode=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
|
||||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
timer backup finish
|
timer backup finish
|
||||||
@@ -792,7 +846,11 @@ backup_pgsql() {
|
|||||||
write_log debug "Not splitting database dumps into their own files"
|
write_log debug "Not splitting database dumps into their own files"
|
||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
backup_job_filename=pgsql_all_${backup_job_db_host,,}_${now}.sql
|
backup_job_filename=pgsql_all_${backup_job_db_host,,}_${now}.sql
|
||||||
backup_job_filename_base=pgsql_${db}_${backup_job_db_host,,}
|
if [ "${backup_job_db_name,,}" = "all" ] ; then
|
||||||
|
backup_job_filename_base=pgsql_all_${backup_job_db_host,,}
|
||||||
|
else
|
||||||
|
backup_job_filename_base=pgsql_${db}_${backup_job_db_host,,}
|
||||||
|
fi
|
||||||
compression
|
compression
|
||||||
pre_dbbackup all
|
pre_dbbackup all
|
||||||
write_log notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
write_log notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
||||||
@@ -805,8 +863,8 @@ backup_pgsql() {
|
|||||||
for x_db_name in ${tmp_db_names} ; do
|
for x_db_name in ${tmp_db_names} ; do
|
||||||
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
|
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
|
||||||
done
|
done
|
||||||
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
|
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
|
||||||
exit_code=$?
|
exitcode=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
|
||||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
timer backup finish
|
timer backup finish
|
||||||
@@ -826,7 +884,7 @@ backup_redis() {
|
|||||||
backup_job_filename=redis_all_${backup_job_db_host,,}_${now}.rdb
|
backup_job_filename=redis_all_${backup_job_db_host,,}_${now}.rdb
|
||||||
backup_job_filename_base=redis_${backup_job_db_host,,}
|
backup_job_filename_base=redis_${backup_job_db_host,,}
|
||||||
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
|
||||||
echo bgsave | silent run_as_user ${play_fair} redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} --rdb ${TEMP_PATH}/${backup_job_filename} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}
|
echo bgsave | silent run_as_user ${play_fair} redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} --rdb ${temporary_directory}/${backup_job_filename} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}
|
||||||
sleep 10
|
sleep 10
|
||||||
try=5
|
try=5
|
||||||
while [ $try -gt 0 ] ; do
|
while [ $try -gt 0 ] ; do
|
||||||
@@ -846,7 +904,7 @@ backup_redis() {
|
|||||||
compression
|
compression
|
||||||
pre_dbbackup all
|
pre_dbbackup all
|
||||||
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
|
||||||
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
|
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
|
||||||
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug off; fi
|
||||||
timer backup finish
|
timer backup finish
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
@@ -867,16 +925,16 @@ backup_sqlite3() {
|
|||||||
pre_dbbackup "${db}"
|
pre_dbbackup "${db}"
|
||||||
write_log notice "Dumping sqlite3 database: '${backup_job_db_host}' ${compression_string}"
|
write_log notice "Dumping sqlite3 database: '${backup_job_db_host}' ${compression_string}"
|
||||||
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug on; fi
|
||||||
silent ${play_fair} sqlite3 "${backup_job_db_host}" ".backup '${TEMP_PATH}/backup_${now}.sqlite3'"
|
silent ${play_fair} sqlite3 "${backup_job_db_host}" ".backup '${temporary_directory}/backup_${now}.sqlite3'"
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
if [ ! -f "${TEMP_PATH}"/backup_${now}.sqlite3 ] ; then
|
if [ ! -f "${temporary_directory}"/backup_${now}.sqlite3 ] ; then
|
||||||
print_error "SQLite3 backup failed! Exitting"
|
print_error "SQLite3 backup failed! Exitting"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
compression
|
compression
|
||||||
run_as_user ${play_fair} cat "${TEMP_PATH}"/backup_${now}.sqlite3 | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}/${backup_job_filename}" > /dev/null
|
run_as_user ${play_fair} cat "${temporary_directory}"/backup_${now}.sqlite3 | ${dir_compress_cmd} | run_as_user tee "${temporary_directory}/${backup_job_filename}" > /dev/null
|
||||||
rm -rf "${TEMP_PATH}"/backup_${now}.sqlite3
|
rm -rf "${temporary_directory}"/backup_${now}.sqlite3
|
||||||
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug off; fi
|
||||||
timer backup finish
|
timer backup finish
|
||||||
file_encryption
|
file_encryption
|
||||||
@@ -889,14 +947,16 @@ backup_sqlite3() {
|
|||||||
|
|
||||||
check_availability() {
|
check_availability() {
|
||||||
if var_true "${DEBUG_CHECK_AVAILABILITY}" ; then debug on; fi
|
if var_true "${DEBUG_CHECK_AVAILABILITY}" ; then debug on; fi
|
||||||
### Set the Database Type
|
|
||||||
if var_false "${backup_job_skip_availability_check}" ; then
|
if var_false "${backup_job_skip_availability_check}" ; then
|
||||||
case "${dbtype}" in
|
case "${dbtype}" in
|
||||||
"couch" )
|
"couch" )
|
||||||
counter=0
|
counter=0
|
||||||
code_received=0
|
code_received=0
|
||||||
|
if [ -n "${backup_job_db_user}" ] && [ -n ${backup_job_db_pass} ]; then
|
||||||
|
_ca_couch_auth="-u ${backup_job_db_user}:${backup_job_db_pass}"
|
||||||
|
fi
|
||||||
while [ "${code_received}" != "200" ]; do
|
while [ "${code_received}" != "200" ]; do
|
||||||
code_received=$(run_as_user curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${backup_job_db_host}:${backup_job_db_port})
|
code_received=$(run_as_user curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${_ca_couch_auth} ${backup_job_db_host}:${backup_job_db_port})
|
||||||
if [ "${code_received}" = "200" ] ; then break ; fi
|
if [ "${code_received}" = "200" ] ; then break ; fi
|
||||||
sleep 5
|
sleep 5
|
||||||
(( counter+=5 ))
|
(( counter+=5 ))
|
||||||
@@ -940,7 +1000,7 @@ check_availability() {
|
|||||||
"mysql" )
|
"mysql" )
|
||||||
counter=0
|
counter=0
|
||||||
export MYSQL_PWD=${backup_job_db_pass}
|
export MYSQL_PWD=${backup_job_db_pass}
|
||||||
while ! (run_as_user mysqladmin -u"${backup_job_db_user}" -P"${backup_job_db_port}" -h"${backup_job_db_host}" ${mysql_tls_args} status > /dev/null 2>&1) ; do
|
while ! (run_as_user ${_mysql_prefix}${_mysql_bin_prefix}admin -u"${backup_job_db_user}" -P"${backup_job_db_port}" -h"${backup_job_db_host}" ${mysql_tls_args} status > /dev/null 2>&1) ; do
|
||||||
sleep 5
|
sleep 5
|
||||||
(( counter+=5 ))
|
(( counter+=5 ))
|
||||||
write_log warn "MySQL/MariaDB Server '${backup_job_db_host}' is not accessible, retrying.. (${counter} seconds so far)"
|
write_log warn "MySQL/MariaDB Server '${backup_job_db_host}' is not accessible, retrying.. (${counter} seconds so far)"
|
||||||
@@ -1043,13 +1103,24 @@ cleanup_old_data() {
|
|||||||
write_log info "Cleaning up old backups on filesystem"
|
write_log info "Cleaning up old backups on filesystem"
|
||||||
run_as_user mkdir -p "${backup_job_filesystem_path}"
|
run_as_user mkdir -p "${backup_job_filesystem_path}"
|
||||||
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
|
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
|
||||||
write_log info "Syncing changes via blobxfer"
|
if var_true "${_postgres_backup_globals}"; then
|
||||||
silent run_as_user blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete --delete-only
|
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_global_base}*" -exec rm -f {} \;
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then
|
||||||
|
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
|
||||||
|
else
|
||||||
|
write_log info "Syncing changes via blobxfer"
|
||||||
|
silent run_as_user blobxfer upload --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete --delete-only
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
"file" | "filesystem" )
|
"file" | "filesystem" )
|
||||||
write_log info "Cleaning up old backups on filesystem"
|
write_log info "Cleaning up old backups on filesystem"
|
||||||
run_as_user mkdir -p "${backup_job_filesystem_path}"
|
run_as_user mkdir -p "${backup_job_filesystem_path}"
|
||||||
run_as_user find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
|
run_as_user find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
|
||||||
|
if var_true "${_postgres_backup_globals}"; then
|
||||||
|
run_as_user find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_global_base}*" -exec rm -f {} \;
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
"s3" | "minio" )
|
"s3" | "minio" )
|
||||||
write_log info "Cleaning up old backups on S3 storage"
|
write_log info "Cleaning up old backups on S3 storage"
|
||||||
@@ -1149,8 +1220,10 @@ compression() {
|
|||||||
create_archive() {
|
create_archive() {
|
||||||
if var_true "${DEBUG_CREATE_ARCHIVE}" ; then debug on; fi
|
if var_true "${DEBUG_CREATE_ARCHIVE}" ; then debug on; fi
|
||||||
if [ "${exit_code}" = "0" ] ; then
|
if [ "${exit_code}" = "0" ] ; then
|
||||||
write_log notice "Creating archive file of '${backup_job_filename_dir}' with tar ${compression_string}"
|
write_log notice "Creating archive file of '${backup_job_filename}' with tar ${compression_string}"
|
||||||
run_as_user tar cf - "${TEMP_PATH}"/"${backup_job_filename_dir}" | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename_dir}".tar"${extension}" > /dev/null
|
run_as_user tar cf - "${temporary_directory}"/"${backup_job_filename_dir}" | ${dir_compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename_dir}".tar"${extension}" > /dev/null
|
||||||
|
backup_job_filename="${backup_job_filename_dir}".tar"${extension}"
|
||||||
|
rm -rf "${temporary_directory}"/"${backup_job_filename_dir}"
|
||||||
else
|
else
|
||||||
write_log error "Skipping creating archive file because backup did not complete successfully"
|
write_log error "Skipping creating archive file because backup did not complete successfully"
|
||||||
fi
|
fi
|
||||||
@@ -1165,8 +1238,8 @@ create_schedulers() {
|
|||||||
print_debug "[create_schedulers] Found '${backup_instances}' DB_HOST instances"
|
print_debug "[create_schedulers] Found '${backup_instances}' DB_HOST instances"
|
||||||
|
|
||||||
if [ -n "${DB_HOST}" ] && [ "${backup_instances}" ]; then
|
if [ -n "${DB_HOST}" ] && [ "${backup_instances}" ]; then
|
||||||
backup_instances=1;
|
backup_instances=1;
|
||||||
print_debug "[create_schedulers] Detected using old DB_ variables"
|
print_debug "[create_schedulers] Detected using old DB_ variables"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for (( instance = 01; instance <= backup_instances; )) ; do
|
for (( instance = 01; instance <= backup_instances; )) ; do
|
||||||
@@ -1174,7 +1247,7 @@ create_schedulers() {
|
|||||||
cp -R /assets/dbbackup/template-dbbackup /etc/services.available/dbbackup-"${instance}"
|
cp -R /assets/dbbackup/template-dbbackup /etc/services.available/dbbackup-"${instance}"
|
||||||
sed -i "s|{{BACKUP_NUMBER}}|${instance}|g" /etc/services.available/dbbackup-"${instance}"/run
|
sed -i "s|{{BACKUP_NUMBER}}|${instance}|g" /etc/services.available/dbbackup-"${instance}"/run
|
||||||
if [ "${MODE,,}" = "manual" ] ; then service_stop dbbackup-"${instance}" ; fi
|
if [ "${MODE,,}" = "manual" ] ; then service_stop dbbackup-"${instance}" ; fi
|
||||||
cat <<EOF >> /usr/bin/backup"${instance}"-now
|
cat <<EOF > /usr/bin/backup"${instance}"-now
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
source /assets/functions/00-container
|
source /assets/functions/00-container
|
||||||
PROCESS_NAME=db-backup${instance}
|
PROCESS_NAME=db-backup${instance}
|
||||||
@@ -1194,7 +1267,7 @@ EOF
|
|||||||
|
|
||||||
EOF
|
EOF
|
||||||
else
|
else
|
||||||
echo "/usr/bin/backup${instance}-now now" >> /usr/bin/backup-now
|
echo "/usr/bin/backup${instance}-now now" > /usr/bin/backup-now
|
||||||
fi
|
fi
|
||||||
|
|
||||||
instance=$(echo "${instance} +1" | bc)
|
instance=$(echo "${instance} +1" | bc)
|
||||||
@@ -1272,7 +1345,7 @@ file_encryption() {
|
|||||||
print_notice "Encrypting with GPG Passphrase"
|
print_notice "Encrypting with GPG Passphrase"
|
||||||
encrypt_routines_start_time=$(date +'%s')
|
encrypt_routines_start_time=$(date +'%s')
|
||||||
encrypt_tmp_dir=$(run_as_user mktemp -d)
|
encrypt_tmp_dir=$(run_as_user mktemp -d)
|
||||||
echo "${backup_job_encrypt_passphrase}" | silent run_as_user ${play_fair} gpg --batch --home ${encrypt_tmp_dir} --yes --passphrase-fd 0 -c "${TEMP_PATH}"/"${backup_job_filename}"
|
echo "${backup_job_encrypt_passphrase}" | silent run_as_user ${play_fair} gpg --batch --home ${encrypt_tmp_dir} --yes --passphrase-fd 0 -c "${temporary_directory}"/"${backup_job_filename}"
|
||||||
rm -rf "${encrypt_tmp_dir}"
|
rm -rf "${encrypt_tmp_dir}"
|
||||||
elif [ -z "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_public_key}" ] && [ -n "${backup_job_encrypt_private_key}" ]; then
|
elif [ -z "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_public_key}" ] && [ -n "${backup_job_encrypt_private_key}" ]; then
|
||||||
if [ -f "${backup_job_encrypt_private_key}" ]; then
|
if [ -f "${backup_job_encrypt_private_key}" ]; then
|
||||||
@@ -1284,13 +1357,13 @@ file_encryption() {
|
|||||||
silent run_as_user gpg --home ${encrypt_tmp_dir} --batch --import "${encrypt_tmp_dir}"/private_key.asc
|
silent run_as_user gpg --home ${encrypt_tmp_dir} --batch --import "${encrypt_tmp_dir}"/private_key.asc
|
||||||
print_debug "[file_encryption] [key] Encrypting to Public Key"
|
print_debug "[file_encryption] [key] Encrypting to Public Key"
|
||||||
cat "${backup_job_encrypt_public_key}" | run_as_user tee "${encrypt_tmp_dir}"/public_key.asc > /dev/null
|
cat "${backup_job_encrypt_public_key}" | run_as_user tee "${encrypt_tmp_dir}"/public_key.asc > /dev/null
|
||||||
silent run_as_user ${play_fair} gpg --batch --yes --home "${encrypt_tmp_dir}" --encrypt --recipient-file "${encrypt_tmp_dir}"/public_key.asc "${TEMP_PATH}"/"${backup_job_filename}"
|
silent run_as_user ${play_fair} gpg --batch --yes --home "${encrypt_tmp_dir}" --encrypt --recipient-file "${encrypt_tmp_dir}"/public_key.asc "${temporary_directory}"/"${backup_job_filename}"
|
||||||
rm -rf "${encrypt_tmp_dir}"
|
rm -rf "${encrypt_tmp_dir}"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
if [ -f "${TEMP_PATH}"/"${backup_job_filename}".gpg ]; then
|
if [ -f "${temporary_directory}"/"${backup_job_filename}".gpg ]; then
|
||||||
print_debug "[file_encryption] Deleting original file"
|
print_debug "[file_encryption] Deleting original file"
|
||||||
rm -rf "${TEMP_PATH:?}"/"${backup_job_filename:?}"
|
rm -rf "${temporary_directory:?}"/"${backup_job_filename:?}"
|
||||||
backup_job_filename="${backup_job_filename}.gpg"
|
backup_job_filename="${backup_job_filename}.gpg"
|
||||||
|
|
||||||
encrypt_routines_finish_time=$(date +'%s')
|
encrypt_routines_finish_time=$(date +'%s')
|
||||||
@@ -1329,7 +1402,7 @@ generate_checksum() {
|
|||||||
|
|
||||||
checksum_routines_start_time=$(date +'%s')
|
checksum_routines_start_time=$(date +'%s')
|
||||||
write_log notice "Generating ${checksum_extension^^} sum for '${backup_job_filename}'"
|
write_log notice "Generating ${checksum_extension^^} sum for '${backup_job_filename}'"
|
||||||
cd "${TEMP_PATH}"
|
cd "${temporary_directory}"
|
||||||
run_as_user ${checksum_command} "${backup_job_filename}" | run_as_user tee "${backup_job_filename}"."${checksum_extension}" > /dev/null
|
run_as_user ${checksum_command} "${backup_job_filename}" | run_as_user tee "${backup_job_filename}"."${checksum_extension}" > /dev/null
|
||||||
chmod ${backup_job_filesystem_permission} "${backup_job_filename}"."${checksum_extension}"
|
chmod ${backup_job_filesystem_permission} "${backup_job_filename}"."${checksum_extension}"
|
||||||
checksum_value=$(run_as_user cat "${backup_job_filename}"."${checksum_extension}" | awk '{print $1}')
|
checksum_value=$(run_as_user cat "${backup_job_filename}"."${checksum_extension}" | awk '{print $1}')
|
||||||
@@ -1377,7 +1450,7 @@ notify() {
|
|||||||
if [ -z "${SMTP_HOST}" ] ; then write_log error "[notifications] No SMTP_HOST variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
|
if [ -z "${SMTP_HOST}" ] ; then write_log error "[notifications] No SMTP_HOST variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
|
||||||
if [ -z "${SMTP_PORT}" ] ; then write_log error "[notifications] No SMTP_PORT variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
|
if [ -z "${SMTP_PORT}" ] ; then write_log error "[notifications] No SMTP_PORT variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
|
||||||
if var_nottrue "${skip_mail}" ; then
|
if var_nottrue "${skip_mail}" ; then
|
||||||
if ! grep -q ^from /etc/msmptrc ; then
|
if ! grep -q ^from /etc/msmtprc ; then
|
||||||
echo "from ${MAIL_FROM}" >> /etc/msmtprc
|
echo "from ${MAIL_FROM}" >> /etc/msmtprc
|
||||||
fi
|
fi
|
||||||
mail_recipients=$(echo "${MAIL_TO}" | tr "," "\n")
|
mail_recipients=$(echo "${MAIL_TO}" | tr "," "\n")
|
||||||
@@ -1501,8 +1574,8 @@ EOF
|
|||||||
move_dbbackup() {
|
move_dbbackup() {
|
||||||
if var_true "${DEBUG_MOVE_DBBACKUP}" ; then debug on; fi
|
if var_true "${DEBUG_MOVE_DBBACKUP}" ; then debug on; fi
|
||||||
if [ "${exit_code}" = "0" ] ; then
|
if [ "${exit_code}" = "0" ] ; then
|
||||||
dbbackup_size="$(run_as_user stat -c%s "${TEMP_PATH}"/"${backup_job_filename}")"
|
dbbackup_size="$(run_as_user stat -c%s "${temporary_directory}"/"${backup_job_filename}")"
|
||||||
dbbackup_date="$(run_as_user date -r "${TEMP_PATH}"/"${backup_job_filename}" +'%s')"
|
dbbackup_date="$(run_as_user date -r "${temporary_directory}"/"${backup_job_filename}" +'%s')"
|
||||||
|
|
||||||
case "${backup_job_size_value,,}" in
|
case "${backup_job_size_value,,}" in
|
||||||
"b" | "bytes" )
|
"b" | "bytes" )
|
||||||
@@ -1516,37 +1589,37 @@ move_dbbackup() {
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
if [ "${backup_job_size_value}" = "1" ] ; then
|
if [ "${backup_job_size_value}" = "1" ] ; then
|
||||||
filesize=$(run_as_user stat -c%s "${TEMP_PATH}"/"${backup_job_filename}")
|
filesize=$(run_as_user stat -c%s "${temporary_directory}"/"${backup_job_filename}")
|
||||||
write_log notice "Backup of '${backup_job_filename}' created with the size of ${filesize} bytes"
|
write_log notice "Backup of '${backup_job_filename}' created with the size of ${filesize} bytes"
|
||||||
else
|
else
|
||||||
filesize=$(run_as_user du -h "${TEMP_PATH}"/"${backup_job_filename}" | awk '{ print $1}')
|
filesize=$(run_as_user du -h "${temporary_directory}"/"${backup_job_filename}" | awk '{ print $1}')
|
||||||
write_log notice "Backup of '${backup_job_filename}' created with the size of ${filesize}"
|
write_log notice "Backup of '${backup_job_filename}' created with the size of ${filesize}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
chmod "${backup_job_filesystem_permission}" "${TEMP_PATH}"/"${backup_job_filename}"
|
chmod "${backup_job_filesystem_permission}" "${temporary_directory}"/"${backup_job_filename}"
|
||||||
case "${backup_job_backup_location,,}" in
|
case "${backup_job_backup_location,,}" in
|
||||||
"file" | "filesystem" )
|
"file" | "filesystem" )
|
||||||
write_log debug "Moving backup to filesystem"
|
write_log debug "Moving backup to filesystem"
|
||||||
run_as_user mkdir -p "${backup_job_filesystem_path}"
|
run_as_user mkdir -p "${backup_job_filesystem_path}"
|
||||||
if [ "${backup_job_checksum,,}" != "none" ] ; then run_as_user mv "${TEMP_PATH}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/ ; fi
|
if [ "${backup_job_checksum,,}" != "none" ] ; then run_as_user mv "${temporary_directory}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/ ; fi
|
||||||
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
|
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
## BEGIN Before Moving file from TEMP_PATH $(TZ=${TIMEZONE} date)
|
## BEGIN Before Moving file from temporary_directory $(TZ=${TIMEZONE} date)
|
||||||
##
|
##
|
||||||
|
|
||||||
$(ls -l "${TEMP_PATH}"/*)
|
$(ls -l "${temporary_directory}"/*)
|
||||||
|
|
||||||
## END
|
## END
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
run_as_user mv "${TEMP_PATH}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
|
run_as_user mv "${temporary_directory}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
|
||||||
move_exit_code=$?
|
move_exit_code=$?
|
||||||
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
|
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
## BEGIN After Moving file from TEMP_PATH $(TZ=${TIMEZONE} date)
|
## BEGIN After Moving file from temporary_directory $(TZ=${TIMEZONE} date)
|
||||||
##
|
##
|
||||||
|
|
||||||
$(ls -l "${TEMP_PATH}"/*)
|
$(ls -l "${temporary_directory}"/*)
|
||||||
|
|
||||||
## END
|
## END
|
||||||
|
|
||||||
@@ -1588,43 +1661,49 @@ EOF
|
|||||||
|
|
||||||
[[ ( -n "${backup_job_s3_host}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${backup_job_s3_protocol}://${backup_job_s3_host}"
|
[[ ( -n "${backup_job_s3_host}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${backup_job_s3_protocol}://${backup_job_s3_host}"
|
||||||
|
|
||||||
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_PATH}/${backup_job_filename} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/${backup_job_filename} ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
|
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${temporary_directory}/${backup_job_filename} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/${backup_job_filename} ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
|
||||||
move_exit_code=$?
|
move_exit_code=$?
|
||||||
if [ "${backup_job_checksum}" != "none" ] ; then
|
if [ "${backup_job_checksum}" != "none" ] ; then
|
||||||
silent run_as_user ${play_fair} aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_PATH}/*.${checksum_extension} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/ ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
|
silent run_as_user ${play_fair} aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${temporary_directory}/*.${checksum_extension} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/ ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
|
||||||
run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"."${checksum_extension}"
|
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"."${checksum_extension}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"
|
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"
|
||||||
;;
|
;;
|
||||||
"blobxfer" )
|
"blobxfer" )
|
||||||
write_log info "Synchronize local storage from S3 Bucket with blobxfer"
|
if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then
|
||||||
${play_fair} blobxfer download --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete
|
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
|
||||||
|
else
|
||||||
|
write_log info "Synchronize local storage from blob container with blobxfer"
|
||||||
|
${play_fair} blobxfer download --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --restore-file-lmt --delete
|
||||||
|
|
||||||
write_log info "Moving backup to external storage with blobxfer"
|
write_log info "Moving backup to external storage with blobxfer"
|
||||||
mkdir -p "${backup_job_filesystem_path}"
|
mkdir -p "${backup_job_filesystem_path}"
|
||||||
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user mv "${TEMP_PATH}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/; fi
|
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user mv "${temporary_directory}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/; fi
|
||||||
|
|
||||||
run_as_user mv "${TEMP_PATH}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
|
run_as_user mv "${temporary_directory}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
|
||||||
|
|
||||||
silent run_as_user ${play_fair} blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path}
|
silent run_as_user ${play_fair} blobxfer upload --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path}
|
||||||
move_exit_code=$?
|
move_exit_code=$?
|
||||||
|
|
||||||
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"."${checksum_extension}" ; fi
|
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"."${checksum_extension}" ; fi
|
||||||
run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"
|
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
else
|
else
|
||||||
write_log error "Skipping moving DB Backup to final location because backup did not complete successfully"
|
write_log error "Skipping moving DB Backup to final location because backup did not complete successfully"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"
|
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"
|
||||||
if var_true "${DEBUG_MOVE_DBBACKUP}" ; then debug off; fi
|
if var_true "${DEBUG_MOVE_DBBACKUP}" ; then debug off; fi
|
||||||
}
|
}
|
||||||
|
|
||||||
prepare_dbbackup() {
|
prepare_dbbackup() {
|
||||||
timer backup start
|
timer backup start
|
||||||
now=$(run_as_user date +"%Y%m%d-%H%M%S")
|
now=$(run_as_user date +"%Y%m%d-%H%M%S")
|
||||||
|
temporary_directory=$(mktemp -d -p "${TEMP_PATH}" -t ${backup_instance_number}_dbbackup.XXXXXX)
|
||||||
|
chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${temporary_directory}"
|
||||||
backup_job_filename_base=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}
|
backup_job_filename_base=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}
|
||||||
backup_job_filename=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.sql
|
backup_job_filename=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.sql
|
||||||
}
|
}
|
||||||
@@ -1739,6 +1818,8 @@ EOZP
|
|||||||
|
|
||||||
write_log notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
|
write_log notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
|
||||||
if var_true "${DEBUG_POST_DBBACKUP}" ; then debug on; fi
|
if var_true "${DEBUG_POST_DBBACKUP}" ; then debug on; fi
|
||||||
|
cd "${TEMP_PATH}"
|
||||||
|
rm -rf "${temporary_directory}"
|
||||||
}
|
}
|
||||||
|
|
||||||
process_limiter() {
|
process_limiter() {
|
||||||
@@ -1882,12 +1963,12 @@ timer() {
|
|||||||
local cron_compare_difference=$(( cron_compare - ${4} ))
|
local cron_compare_difference=$(( cron_compare - ${4} ))
|
||||||
|
|
||||||
if [ "${cron_compare_difference}" -lt 60 ]; then
|
if [ "${cron_compare_difference}" -lt 60 ]; then
|
||||||
cron_compare=$((${cron_compare} + $(( 60 - cron_compare_difference )) ))
|
cron_compare=$((cron_compare + $(( 60 - cron_compare_difference )) ))
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local cron_current_seconds="$(date --date=@"${cron_compare_seconds}" +"%-S")"
|
local cron_current_seconds="$(date --date=@"${cron_compare_seconds}" +"%-S")"
|
||||||
if [ "${cron_current_seconds}" -ne 0 ]; then
|
if [ "${cron_current_seconds}" -ne 0 ]; then
|
||||||
cron_compare_seconds=$(( cron_compare_seconds - cron_current_seconds ))
|
cron_compare=$(( cron_compare_seconds - cron_current_seconds ))
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local cron_minute="$(echo -n "${2}" | awk '{print $1}')"
|
local cron_minute="$(echo -n "${2}" | awk '{print $1}')"
|
||||||
@@ -1926,6 +2007,7 @@ timer() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
cron_next_hour="${cron_next}"
|
cron_next_hour="${cron_next}"
|
||||||
|
cron_next_minute=0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
print_debug "[timer] [cron] Parse Day of Week"
|
print_debug "[timer] [cron] Parse Day of Week"
|
||||||
@@ -1999,7 +2081,6 @@ timer() {
|
|||||||
fi
|
fi
|
||||||
cron_parsed=0
|
cron_parsed=0
|
||||||
done
|
done
|
||||||
|
|
||||||
local cron_future=$(date --date="${cron_next_year}-$(printf "%02d" ${cron_next_month})-$(printf "%02d" ${cron_next_day_of_month})T$(printf "%02d" ${cron_next_hour}):$(printf "%02d" ${cron_next_minute}):00" "+%s")
|
local cron_future=$(date --date="${cron_next_year}-$(printf "%02d" ${cron_next_month})-$(printf "%02d" ${cron_next_day_of_month})T$(printf "%02d" ${cron_next_hour}):$(printf "%02d" ${cron_next_minute}):00" "+%s")
|
||||||
local cron_future_difference=$(( cron_future - cron_compare_seconds ))
|
local cron_future_difference=$(( cron_future - cron_compare_seconds ))
|
||||||
time_cron=true
|
time_cron=true
|
||||||
|
|||||||
@@ -912,6 +912,83 @@ get_filename() {
|
|||||||
r_filename=${opt}
|
r_filename=${opt}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
get_ssl() {
|
||||||
|
if grep -q "^DB${detected_host_num}_MYSQL_ENABLE_TLS=" "${restore_vars}" ; then
|
||||||
|
detected_ssl_value=$(grep "^DB${detected_host_num}_MYSQL_ENABLE_TLS=" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "${detected_ssl_value}" ]]; then
|
||||||
|
print_debug "Parsed SSL Variant: 1 - No Env Variable Found"
|
||||||
|
default_ssl="false" # Default if no env variable
|
||||||
|
q_ssl_variant=1
|
||||||
|
q_ssl_menu_opt_default="| (${cwh}N${cdgy}) * "
|
||||||
|
q_ssl_menu="" #No menu option
|
||||||
|
else
|
||||||
|
print_debug "Parsed SSL Variant: 2 - Env Variable DB${detected_host_num}_MYSQL_ENABLE_TLS = '${detected_ssl_value}'"
|
||||||
|
default_ssl="${detected_ssl_value,,}"
|
||||||
|
q_ssl_variant=2
|
||||||
|
q_ssl_menu="E ) Environment Variable DB${detected_host_num}_MYSQL_ENABLE_TLS: '${detected_ssl_value}'"
|
||||||
|
q_ssl_menu_opt_default="| (${cwh}E${cdgy}) * "
|
||||||
|
fi
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
|
||||||
|
Do you wish to use SSL for the connection?
|
||||||
|
${q_ssl_menu}
|
||||||
|
|
||||||
|
Y ) Yes
|
||||||
|
N ) No
|
||||||
|
Q ) Quit
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
r_ssl=""
|
||||||
|
case "${q_ssl_variant}" in
|
||||||
|
1) # No env variable, ask directly
|
||||||
|
while true; do
|
||||||
|
read -r -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}Y${cdgy}\) \| \(${cwh}N\*${cdgy}\) : ${cwh}${coff}) " q_ssl
|
||||||
|
case "${q_ssl,,}" in
|
||||||
|
y*)
|
||||||
|
r_ssl="true"
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
n* | "")
|
||||||
|
r_ssl="false"
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
q*)
|
||||||
|
print_info "Quitting Script"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
;;
|
||||||
|
2) # Env variable exists, offer it as an option
|
||||||
|
while true; do
|
||||||
|
read -r -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E\*${cdgy}\) \| \(${cwh}Y${cdgy}\) \| \(${cwh}N${cdgy}\) : ${cwh}${coff}) " q_ssl
|
||||||
|
case "${q_ssl,,}" in
|
||||||
|
e* | "") # Default to env variable if just enter is pressed.
|
||||||
|
r_ssl="${detected_ssl_value}"
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
y*)
|
||||||
|
r_ssl="true"
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
n*)
|
||||||
|
r_ssl="false"
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
q*)
|
||||||
|
print_info "Quitting Script"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
#### SCRIPT START
|
#### SCRIPT START
|
||||||
trap control_c INT
|
trap control_c INT
|
||||||
bootstrap_variables restore_init
|
bootstrap_variables restore_init
|
||||||
@@ -984,6 +1061,20 @@ else
|
|||||||
fi
|
fi
|
||||||
print_debug "Database Port '${r_dbport}'"
|
print_debug "Database Port '${r_dbport}'"
|
||||||
|
|
||||||
|
## Question SSL connection
|
||||||
|
if [[ "${r_dbtype,,}" == "mariadb" || "${r_dbtype,,}" == "mysql" ]]; then
|
||||||
|
if [ -n "${8}" ]; then
|
||||||
|
r_ssl="${8}"
|
||||||
|
else
|
||||||
|
get_ssl
|
||||||
|
fi
|
||||||
|
print_debug "SSL enable: '${r_ssl}'"
|
||||||
|
else
|
||||||
|
r_ssl="false"
|
||||||
|
print_debug "SSL disabled for ${r_dbtype}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
## Parse Extension
|
## Parse Extension
|
||||||
case "${r_filename##*.}" in
|
case "${r_filename##*.}" in
|
||||||
bz* )
|
bz* )
|
||||||
@@ -1013,8 +1104,13 @@ esac
|
|||||||
## Perform a restore
|
## Perform a restore
|
||||||
case "${r_dbtype}" in
|
case "${r_dbtype}" in
|
||||||
mariadb | mysql )
|
mariadb | mysql )
|
||||||
|
if [[ "${r_ssl,,}" == "false" ]]; then
|
||||||
|
mysql_ssl_option="--disable-ssl"
|
||||||
|
else
|
||||||
|
mysql_ssl_option=""
|
||||||
|
fi
|
||||||
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
|
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
|
||||||
pv ${r_filename} | ${decompress_cmd}cat | mysql -u${r_dbuser} -p${r_dbpass} -P${r_dbport} -h${r_dbhost} ${r_dbname}
|
pv ${r_filename} | ${decompress_cmd}cat | mariadb -u${r_dbuser} -p${r_dbpass} -P${r_dbport} -h${r_dbhost} ${mysql_ssl_option} ${r_dbname}
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
;;
|
;;
|
||||||
pgsql | postgres* )
|
pgsql | postgres* )
|
||||||
@@ -1036,9 +1132,6 @@ EOF
|
|||||||
echo -e "${coff}"
|
echo -e "${coff}"
|
||||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}Y${cdgy}\) \| \(${cwh}N${cdgy}\) \| \(${cwh}Q${cdgy}\) : ${cwh}${coff})" q_menu_mongo_dropdb
|
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}Y${cdgy}\) \| \(${cwh}N${cdgy}\) \| \(${cwh}Q${cdgy}\) : ${cwh}${coff})" q_menu_mongo_dropdb
|
||||||
case "${q_menu_mongo_dropdb,,}" in
|
case "${q_menu_mongo_dropdb,,}" in
|
||||||
"y" | "yes" | * )
|
|
||||||
mongo_dropdb="--drop"
|
|
||||||
;;
|
|
||||||
"n" | "update" )
|
"n" | "update" )
|
||||||
unset mongo_dropdb
|
unset mongo_dropdb
|
||||||
;;
|
;;
|
||||||
@@ -1046,6 +1139,9 @@ EOF
|
|||||||
print_info "Quitting Script"
|
print_info "Quitting Script"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
|
"y" | "yes" | * )
|
||||||
|
mongo_dropdb="--drop"
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
|
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
|
||||||
|
|||||||
Reference in New Issue
Block a user