mirror of
https://github.com/tiredofit/docker-db-backup.git
synced 2025-12-22 21:53:42 +01:00
Compare commits
54 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f7f72ba2c1 | ||
|
|
2f05d76f4e | ||
|
|
c9a634ff25 | ||
|
|
0ce21e8f43 | ||
|
|
a700eb0fef | ||
|
|
7baa3774c7 | ||
|
|
341e4d12ea | ||
|
|
5c51bbcb7e | ||
|
|
24d9a9a937 | ||
|
|
591b8d6dbd | ||
|
|
a5b15b4412 | ||
|
|
6692cf9834 | ||
|
|
c37de5778d | ||
|
|
eeeafd6ab8 | ||
|
|
17daf26084 | ||
|
|
b53cda99f7 | ||
|
|
2cf3e2ae70 | ||
|
|
c7ee94aec2 | ||
|
|
f44233e51a | ||
|
|
ccda858b18 | ||
|
|
d58b27d5ef | ||
|
|
fb9fe8a032 | ||
|
|
b705982ae1 | ||
|
|
f031d787ae | ||
|
|
3eed5fc8a0 | ||
|
|
be619fb707 | ||
|
|
cccc088b35 | ||
|
|
4579f4057c | ||
|
|
cd683648d0 | ||
|
|
11f55f3d82 | ||
|
|
674a98fcd8 | ||
|
|
77c747e01b | ||
|
|
2e30558a27 | ||
|
|
c746fb641e | ||
|
|
ca2f04cd59 | ||
|
|
dfa94ecab7 | ||
|
|
eaea6dc348 | ||
|
|
34abe88159 | ||
|
|
5ffbeeb163 | ||
|
|
c82cee80f8 | ||
|
|
ab059ccdf1 | ||
|
|
1e8ccf4d56 | ||
|
|
65c40cac0a | ||
|
|
a9f2d51ff9 | ||
|
|
7f455abc1a | ||
|
|
c16add4525 | ||
|
|
d5769b1588 | ||
|
|
0b2c7836cf | ||
|
|
535e011740 | ||
|
|
5a391b908a | ||
|
|
fddca646c8 | ||
|
|
68f954c59b | ||
|
|
0ab0a6d182 | ||
|
|
f6bf2993f7 |
186
CHANGELOG.md
186
CHANGELOG.md
@@ -1,3 +1,189 @@
|
|||||||
|
## 4.0.35 2024-01-14 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix issue with emaail notifications and not being able to add from statement
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.34 2024-01-02 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Change the way architectures are detected to re-enable backups with MSSQL and Influx2
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.33 2023-12-18 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Allow _OPTS variabls to contain spaces
|
||||||
|
- Switch references of _DUMP_OPTS to _BACKUP_OPTS
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.32 2023-12-15 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix issue with directories not properly being backed up (InfluxDB)
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.31 2023-12-12 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Support backing up databases with spaces in them
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.30 2023-12-11 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Seperate each job with its own temporary folder for isolation and to better cleanup jobs that backup as a directory instead of a flat file
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.29 2023-12-04 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Skip blobxfer if either account or key is not present
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.28 2023-12-04 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- AWS Cli 1.31.5
|
||||||
|
- Switch to using PIP for installing AWS-Cli to remove deprecation warnings
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.27 2023-12-04 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Switch to using actual crontab for cron expressions
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.26 2023-11-30 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- AWS CLI 1.31.4
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.25 2023-11-29 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix #297 - Add parameters to blobxfer to restore functionality
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.24 2023-11-28 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix issue with cron parsing and 0 being a value getting clobbered by sort command
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.23 2023-11-28 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Resolve issue with custom notification scripts not executing
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.22 2023-11-25 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Move cleanup_old_data routines to happen within backup_ function to properly accomodate for globals, and ALL DB_NAME use cases
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.21 2023-11-22 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix for SQLite backups not being cleaned up properly due to a malformed base
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.20 2023-11-21 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Update base image to support S6 Overlay 3.1.6.2 to solve shutdown issues specifically with MODE=MANUAL and MANUAL_RUN_FOREVER=TRUE
|
||||||
|
- Add some safety nets for Manual scheduling
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.19 2023-11-20 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Make adjustments to cron scheduling feature to be able to handle whitespace properly"
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.18 2023-11-18 <joergmschulz@github>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix loading msmtp configuration
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.17 2023-11-17 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Provide more details when notifying via instant messages
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.16 2023-11-17 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Switch to using msmtp instead of s-mail for notify()
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.15 2023-11-16 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix cleanup of old backups
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.14 2023-11-13 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Bugfix when PRE/POST scripts found not giving legacy warning
|
||||||
|
- Run pre / post scripts as root
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.13 2023-11-12 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Check for any quotes if using MONGO_CUSTOM_URI and remove
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.12 2023-11-12 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Allow creating schedulers if _MONGO_CUSTOM_URI is set and _DB_HOST blank
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.11 2023-11-11 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Resolve issue with backing up ALL databases with PGSQL and MySQL
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.10 2023-11-11 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Change environment variable parsing routines to properly accomodate for Passwords containing '=='
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.9 2023-11-11 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix issue with quotes being wrapped around _PASS variables
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.8 2023-11-11 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Tidy up file_encryption() routines
|
||||||
|
- Change environment variable _ENCRYPT_PUBKEY to _ENCRYPT_PUBLIC_KEY
|
||||||
|
- Add new environment variable _ENCRYPT_PRIVATE_KEY
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.7 2023-11-11 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Add seperate permissions for _FILESYSTEM_PATH
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- More output and debugging additions
|
||||||
|
- SQLite3 now backs up without running into file permission/access problems
|
||||||
|
- Cleanup old sqlite backups from temp directory
|
||||||
|
- Handle multiple SQLite3 backups concurrently
|
||||||
|
|
||||||
|
|
||||||
## 4.0.6 2023-11-10 <dave at tiredofit dot ca>
|
## 4.0.6 2023-11-10 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|||||||
15
Dockerfile
15
Dockerfile
@@ -1,5 +1,5 @@
|
|||||||
ARG DISTRO=alpine
|
ARG DISTRO=alpine
|
||||||
ARG DISTRO_VARIANT=edge
|
ARG DISTRO_VARIANT=3.19
|
||||||
|
|
||||||
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
|
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
|
||||||
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
|
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
|
||||||
@@ -9,8 +9,8 @@ ENV INFLUX1_CLIENT_VERSION=1.8.0 \
|
|||||||
INFLUX2_CLIENT_VERSION=2.7.3 \
|
INFLUX2_CLIENT_VERSION=2.7.3 \
|
||||||
MSODBC_VERSION=18.3.2.1-1 \
|
MSODBC_VERSION=18.3.2.1-1 \
|
||||||
MSSQL_VERSION=18.3.1.1-1 \
|
MSSQL_VERSION=18.3.1.1-1 \
|
||||||
AWS_CLI_VERSION=1.29.78 \
|
AWS_CLI_VERSION=1.31.5 \
|
||||||
CONTAINER_ENABLE_MESSAGING=FALSE \
|
CONTAINER_ENABLE_MESSAGING=TRUE \
|
||||||
CONTAINER_ENABLE_MONITORING=TRUE \
|
CONTAINER_ENABLE_MONITORING=TRUE \
|
||||||
IMAGE_NAME="tiredofit/db-backup" \
|
IMAGE_NAME="tiredofit/db-backup" \
|
||||||
IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/"
|
IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/"
|
||||||
@@ -72,14 +72,12 @@ RUN source /assets/functions/00-container && \
|
|||||||
apkArch="$(uname -m)"; \
|
apkArch="$(uname -m)"; \
|
||||||
case "$apkArch" in \
|
case "$apkArch" in \
|
||||||
x86_64) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \
|
x86_64) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \
|
||||||
arm64 ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=arm64 ;; \
|
arm64 | aarch64 ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=arm64 ;; \
|
||||||
*) sleep 0.1 ;; \
|
*) sleep 0.1 ;; \
|
||||||
esac; \
|
esac; \
|
||||||
\
|
\
|
||||||
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; ls -l ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \
|
if [[ $mssql = "true" ]] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \
|
||||||
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
|
if [[ $influx2 = "true" ]] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
|
||||||
clone_git_repo https://github.com/aws/aws-cli "${AWS_CLI_VERSION}" && \
|
|
||||||
python3 setup.py install --prefix=/usr && \
|
|
||||||
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX1_CLIENT_VERSION}" && \
|
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX1_CLIENT_VERSION}" && \
|
||||||
go build -o /usr/sbin/influxd ./cmd/influxd && \
|
go build -o /usr/sbin/influxd ./cmd/influxd && \
|
||||||
strip /usr/sbin/influxd && \
|
strip /usr/sbin/influxd && \
|
||||||
@@ -99,6 +97,7 @@ RUN source /assets/functions/00-container && \
|
|||||||
make && \
|
make && \
|
||||||
make install && \
|
make install && \
|
||||||
\
|
\
|
||||||
|
pip3 install --break-system-packages awscli==${AWS_CLI_VERSION} && \
|
||||||
pip3 install --break-system-packages blobxfer && \
|
pip3 install --break-system-packages blobxfer && \
|
||||||
\
|
\
|
||||||
package remove .db-backup-build-deps && \
|
package remove .db-backup-build-deps && \
|
||||||
|
|||||||
44
README.md
44
README.md
@@ -214,12 +214,13 @@ If these are set and no other defaults or variables are set explicitly, they wil
|
|||||||
|
|
||||||
Encryption occurs after compression and the encrypted filename will have a `.gpg` suffix
|
Encryption occurs after compression and the encrypted filename will have a `.gpg` suffix
|
||||||
|
|
||||||
| Variable | Description | Default |
|
| Variable | Description | Default | `_FILE` |
|
||||||
| ---------------------------- | ------------------------------------------- | ------- |
|
| ----------------------------- | -------------------------------------------- | ------- | ------- |
|
||||||
| `DEFAULT_ENCRYPT` | Encrypt file after backing up with GPG | `FALSE` |
|
| `DEFAULT_ENCRYPT` | Encrypt file after backing up with GPG | `FALSE` | |
|
||||||
| `DEFAULT_ENCRYPT_PASSPHRASE` | Passphrase to encrypt file with GPG | |
|
| `DEFAULT_ENCRYPT_PASSPHRASE` | Passphrase to encrypt file with GPG | | x |
|
||||||
| *or* | | |
|
| *or* | | | |
|
||||||
| `DEFAULT_ENCRYPT_PUBKEY` | Path of public key to encrypt file with GPG | |
|
| `DEFAULT_ENCRYPT_PUBLIC_KEY` | Path of public key to encrypt file with GPG | | x |
|
||||||
|
| `DEFAULT_ENCRYPT_PRIVATE_KEY` | Path of private key to encrypt file with GPG | | x |
|
||||||
|
|
||||||
##### Scheduling Options
|
##### Scheduling Options
|
||||||
|
|
||||||
@@ -323,11 +324,12 @@ Options that are related to the value of `DEFAULT_BACKUP_LOCATION`
|
|||||||
If `DEFAULT_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
|
If `DEFAULT_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
|
||||||
|
|
||||||
| Variable | Description | Default |
|
| Variable | Description | Default |
|
||||||
| --------------------------------- | ----------------------------------------------------------------------------------------------------- | ------------------------------------- |
|
| ------------------------------------ | ----------------------------------------------------------------------------------------------------- | ------------------------------------- |
|
||||||
| `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
|
| `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)_(DB_NAME)_(DB_HOST)` | `TRUE` |
|
||||||
| `DEFAULT_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
|
| `DEFAULT_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
|
||||||
|
| `DEFAULT_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` |
|
||||||
| `DEFAULT_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DEFAULT_FILESYSTEM_PATH}/archive/` |
|
| `DEFAULT_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DEFAULT_FILESYSTEM_PATH}/archive/` |
|
||||||
| `DEFAULT_FILESYSTEM_PERMISSION` | Directory and File permissions to apply to files. | `600` |
|
| `DEFAULT_FILESYSTEM_PERMISSION` | Permissions to apply to files. | `600` |
|
||||||
|
|
||||||
###### S3
|
###### S3
|
||||||
|
|
||||||
@@ -475,12 +477,14 @@ Otherwise, override them per backup job. Additional backup jobs can be scheduled
|
|||||||
|
|
||||||
Encryption will occur after compression and the resulting filename will have a `.gpg` suffix
|
Encryption will occur after compression and the resulting filename will have a `.gpg` suffix
|
||||||
|
|
||||||
| Variable | Description | Default |
|
|
||||||
| ------------------------- | ------------------------------------------- | ------- |
|
| Variable | Description | Default | `_FILE` |
|
||||||
| `DB01_ENCRYPT` | Encrypt file after backing up with GPG | `FALSE` |
|
| -------------------------- | -------------------------------------------- | ------- | ------- |
|
||||||
| `DB01_ENCRYPT_PASSPHRASE` | Passphrase to encrypt file with GPG | |
|
| `DB01_ENCRYPT` | Encrypt file after backing up with GPG | `FALSE` | |
|
||||||
| *or* | | |
|
| `DB01_ENCRYPT_PASSPHRASE` | Passphrase to encrypt file with GPG | | x |
|
||||||
| `DB01_ENCRYPT_PUBKEY` | Path of public key to encrypt file with GPG | |
|
| *or* | | | |
|
||||||
|
| `DB01_ENCRYPT_PUBLIC_KEY` | Path of public key to encrypt file with GPG | | x |
|
||||||
|
| `DB01_ENCRYPT_PRIVATE_KEY` | Path of private key to encrypt file with GPG | | x |
|
||||||
|
|
||||||
##### Scheduling Options
|
##### Scheduling Options
|
||||||
|
|
||||||
@@ -532,6 +536,7 @@ Encryption will occur after compression and the resulting filename will have a `
|
|||||||
| `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | |
|
| `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | |
|
||||||
| | Backup multiple by separating with commas eg `db1,db2` | | x |
|
| | Backup multiple by separating with commas eg `db1,db2` | | x |
|
||||||
| `DB01_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases separated via commas from being backed up | | x |
|
| `DB01_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases separated via commas from being backed up | | x |
|
||||||
|
| `DB01_SPLIT_DB` | If using `ALL` - use this to split each database into its own file as opposed to one singular file | `FALSE` | |
|
||||||
| `DB01_PORT` | MySQL / MariaDB Port | `3306` | x |
|
| `DB01_PORT` | MySQL / MariaDB Port | `3306` | x |
|
||||||
| `DB01_MYSQL_EVENTS` | Backup Events for | `TRUE` | |
|
| `DB01_MYSQL_EVENTS` | Backup Events for | `TRUE` | |
|
||||||
| `DB01_MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet | `512M` | |
|
| `DB01_MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet | `512M` | |
|
||||||
@@ -573,6 +578,7 @@ Encryption will occur after compression and the resulting filename will have a `
|
|||||||
| `DB01_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
|
| `DB01_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
|
||||||
| `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | |
|
| `DB01_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. | | |
|
||||||
| | Backup multiple by separating with commas eg `db1,db2` | | x |
|
| | Backup multiple by separating with commas eg `db1,db2` | | x |
|
||||||
|
| `DB01_SPLIT_DB` | If using `ALL` - use this to split each database into its own file as opposed to one singular file | `FALSE` | |
|
||||||
| `DB01_PORT` | PostgreSQL Port | `5432` | x |
|
| `DB01_PORT` | PostgreSQL Port | `5432` | x |
|
||||||
|
|
||||||
###### Redis
|
###### Redis
|
||||||
@@ -598,10 +604,11 @@ Options that are related to the value of `DB01_BACKUP_LOCATION`
|
|||||||
If `DB01_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
|
If `DB01_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
|
||||||
|
|
||||||
| Variable | Description | Default |
|
| Variable | Description | Default |
|
||||||
| ------------------------------ | ----------------------------------------------------------------------------------------------------- | --------------------------------- |
|
| --------------------------------- | ----------------------------------------------------------------------------------------------------- | ---------------------------------- |
|
||||||
| `DB01_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
|
| `DB01_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
|
||||||
| `DB01_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
|
| `DB01_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
|
||||||
| `DB01_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DB01_FILESYSTEM_PATH/archive/` |
|
| `DB01_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` |
|
||||||
|
| `DB01_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DB01_FILESYSTEM_PATH}/archive/` |
|
||||||
| `DB01_FILESYSTEM_PERMISSION` | Directory and File permissions to apply to files. | `600` |
|
| `DB01_FILESYSTEM_PERMISSION` | Directory and File permissions to apply to files. | `600` |
|
||||||
|
|
||||||
###### S3
|
###### S3
|
||||||
@@ -737,6 +744,9 @@ $5 body/error message
|
|||||||
|
|
||||||
|
|
||||||
##### Email Notifications
|
##### Email Notifications
|
||||||
|
|
||||||
|
See more details in the base image listed above for more mail environment variables.
|
||||||
|
|
||||||
| Parameter | Description | Default | `_FILE` |
|
| Parameter | Description | Default | `_FILE` |
|
||||||
| ----------- | ----------------------------------------------------------------------------------------- | ------- | ------- |
|
| ----------- | ----------------------------------------------------------------------------------------- | ------- | ------- |
|
||||||
| `MAIL_FROM` | What email address to send mail from for errors | | |
|
| `MAIL_FROM` | What email address to send mail from for errors | | |
|
||||||
|
|||||||
@@ -31,8 +31,8 @@ services:
|
|||||||
- BACKUP_JOB_CONCURRENCY=1 # Only run one job at a time
|
- BACKUP_JOB_CONCURRENCY=1 # Only run one job at a time
|
||||||
- DEFAULT_CHECKSUM=NONE # Don't create checksums
|
- DEFAULT_CHECKSUM=NONE # Don't create checksums
|
||||||
- DEFAULT_COMPRESSION=ZSTD # Compress all with ZSTD
|
- DEFAULT_COMPRESSION=ZSTD # Compress all with ZSTD
|
||||||
- DEFAULT_DUMP_INTERVAL=1440 # Backup every 1440 minutes
|
- DEFAULT_BACKUP_INTERVAL=1440 # Backup every 1440 minutes
|
||||||
- DEFAULT_DUMP_BEGIN=0000 # Start backing up at midnight
|
- DEFAULT_BACKUP_BEGIN=0000 # Start backing up at midnight
|
||||||
- DEFAULT_CLEANUP_TIME=8640 # Cleanup backups after a week
|
- DEFAULT_CLEANUP_TIME=8640 # Cleanup backups after a week
|
||||||
|
|
||||||
- DB01_TYPE=mariadb
|
- DB01_TYPE=mariadb
|
||||||
@@ -40,8 +40,8 @@ services:
|
|||||||
- DB01_NAME=example
|
- DB01_NAME=example
|
||||||
- DB01_USER=example
|
- DB01_USER=example
|
||||||
- DB01_PASS=examplepassword
|
- DB01_PASS=examplepassword
|
||||||
- DB01_DUMP_INTERVAL=30 # (override) Backup every 30 minutes
|
- DB01_BACKUP_INTERVAL=30 # (override) Backup every 30 minutes
|
||||||
- DB01_DUMP_BEGIN=+1 # (override) Backup starts immediately
|
- DB01_BACKUP_BEGIN=+1 # (override) Backup starts immediately
|
||||||
- DB01_CLEANUP_TIME=180 # (override) Cleanup backups they are older than 180 minutes
|
- DB01_CLEANUP_TIME=180 # (override) Cleanup backups they are older than 180 minutes
|
||||||
- DB01_CHECKSUM=SHA1 # (override) Create a SHA1 checksum
|
- DB01_CHECKSUM=SHA1 # (override) Create a SHA1 checksum
|
||||||
- DB01_COMPRESSION=GZ # (override) Compress with GZIP
|
- DB01_COMPRESSION=GZ # (override) Compress with GZIP
|
||||||
@@ -51,8 +51,8 @@ services:
|
|||||||
#- DB02_NAME=example
|
#- DB02_NAME=example
|
||||||
#- DB02_USER=example
|
#- DB02_USER=example
|
||||||
#- DB02_PASS=examplepassword
|
#- DB02_PASS=examplepassword
|
||||||
#- DB02_DUMP_INTERVAL=60 # (override) Backup every 60 minutes
|
#- DB02_BACKUP_INTERVAL=60 # (override) Backup every 60 minutes
|
||||||
#- DB02_DUMP_BEGIN=+10 # (override) Backup starts in ten minutes
|
#- DB02_BACKUP_BEGIN=+10 # (override) Backup starts in ten minutes
|
||||||
#- DB02_CLEANUP_TIME=240 # (override) Cleanup backups they are older than 240 minutes
|
#- DB02_CLEANUP_TIME=240 # (override) Cleanup backups they are older than 240 minutes
|
||||||
#- DB02_CHECKSUM=MD5 # (override) Create a SHA1 checksum
|
#- DB02_CHECKSUM=MD5 # (override) Create a SHA1 checksum
|
||||||
#- DB02_COMPRESSION=BZ # (override) Compress with BZIP
|
#- DB02_COMPRESSION=BZ # (override) Compress with BZIP
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ services:
|
|||||||
- DB01_NAME=test1 # Create this database
|
- DB01_NAME=test1 # Create this database
|
||||||
- DB01_USER=sa
|
- DB01_USER=sa
|
||||||
- DB01_PASS=5hQa0utRFBpIY3yhoIyE
|
- DB01_PASS=5hQa0utRFBpIY3yhoIyE
|
||||||
- DB01_DUMP_INTERVAL=5 # backup every 5 minute
|
- DB01_BACKUP_INTERVAL=5 # backup every 5 minute
|
||||||
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs not set immediately
|
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs not set immediately
|
||||||
- DB01_CLEANUP_TIME=60 # clean backups they are older than 60 minutes
|
- DB01_CLEANUP_TIME=60 # clean backups they are older than 60 minutes
|
||||||
- DB01_CHECKSUM=SHA1 # Set Checksum to be SHA1
|
- DB01_CHECKSUM=SHA1 # Set Checksum to be SHA1
|
||||||
@@ -57,7 +57,7 @@ services:
|
|||||||
# Add here azure storage account
|
# Add here azure storage account
|
||||||
- DB01_BLOBXFER_STORAGE_ACCOUNT={TODO Add Storage Name}
|
- DB01_BLOBXFER_STORAGE_ACCOUNT={TODO Add Storage Name}
|
||||||
# Add here azure storage account key
|
# Add here azure storage account key
|
||||||
- SB01_BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key}
|
- DB01_BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key}
|
||||||
- DB01_BLOBXFER_REMOTE_PATH=docker-db-backup
|
- DB01_BLOBXFER_REMOTE_PATH=docker-db-backup
|
||||||
restart: always
|
restart: always
|
||||||
networks:
|
networks:
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ services:
|
|||||||
- DB01_NAME=test1
|
- DB01_NAME=test1
|
||||||
- DB01_USER=sa
|
- DB01_USER=sa
|
||||||
- DB01_PASS=5hQa0utRFBpIY3yhoIyE
|
- DB01_PASS=5hQa0utRFBpIY3yhoIyE
|
||||||
- DB01_DUMP_INTERVAL=1 # backup every minute
|
- DB01_BACKUP_INTERVAL=1 # backup every minute
|
||||||
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs unset immediately
|
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs unset immediately
|
||||||
- DB01_CLEANUP_TIME=5 # clean backups they are older than 5 minute
|
- DB01_CLEANUP_TIME=5 # clean backups they are older than 5 minute
|
||||||
- DB01_CHECKSUM=NONE
|
- DB01_CHECKSUM=NONE
|
||||||
|
|||||||
@@ -8,12 +8,15 @@ source /assets/functions/10-db-backup
|
|||||||
source /assets/defaults/10-db-backup
|
source /assets/defaults/10-db-backup
|
||||||
bootstrap_variables backup_init {{BACKUP_NUMBER}}
|
bootstrap_variables backup_init {{BACKUP_NUMBER}}
|
||||||
bootstrap_variables parse_variables {{BACKUP_NUMBER}}
|
bootstrap_variables parse_variables {{BACKUP_NUMBER}}
|
||||||
PROCESS_NAME="{{BACKUP_NUMBER}}-${backup_job_db_host}__${backup_job_db_name}"
|
if [ -z "${backup_job_db_name}" ]; then
|
||||||
|
PROCESS_NAME="{{BACKUP_NUMBER}}${backup_job_db_host//\//_}"
|
||||||
|
else
|
||||||
|
PROCESS_NAME="{{BACKUP_NUMBER}}-${backup_job_db_host//\//_}__${backup_job_db_name}"
|
||||||
|
fi
|
||||||
|
|
||||||
trap ctrl_c INT
|
trap ctrl_c INT
|
||||||
|
|
||||||
if [[ "${MODE,,}" =~ "standalone" ]] || [ "${1,,}" = "manual" ] || [ "${1,,}" = "now" ]; then
|
if [[ "${MODE,,}" =~ "standalone" ]] || [ "${MODE,,}" = "manual" ] || [ "${1,,}" = "manual" ] || [ "${1,,}" = "now" ]; then
|
||||||
print_debug "Detected Manual Mode"
|
print_debug "Detected Manual Mode"
|
||||||
persist=false
|
persist=false
|
||||||
backup_job_backup_begin=+0
|
backup_job_backup_begin=+0
|
||||||
@@ -21,7 +24,6 @@ else
|
|||||||
silent sleep {{BACKUP_NUMBER}}
|
silent sleep {{BACKUP_NUMBER}}
|
||||||
time_last_run=0
|
time_last_run=0
|
||||||
time_current=$(date +'%s')
|
time_current=$(date +'%s')
|
||||||
|
|
||||||
if [[ "${backup_job_backup_begin}" =~ ^\+(.*)$ ]]; then
|
if [[ "${backup_job_backup_begin}" =~ ^\+(.*)$ ]]; then
|
||||||
print_debug "BACKUP_BEGIN is a jump of minute starting with +"
|
print_debug "BACKUP_BEGIN is a jump of minute starting with +"
|
||||||
timer plusvalue
|
timer plusvalue
|
||||||
@@ -31,10 +33,21 @@ else
|
|||||||
elif [[ "${backup_job_backup_begin}" =~ ([0-9]{4})-([0-9]{2})-([0-9]{2})[[:space:]]([0-9]{2}):([0-9]{2}):([0-9]{2}) ]]; then
|
elif [[ "${backup_job_backup_begin}" =~ ([0-9]{4})-([0-9]{2})-([0-9]{2})[[:space:]]([0-9]{2}):([0-9]{2}):([0-9]{2}) ]]; then
|
||||||
print_debug "BACKUP_BEGIN is a full date timestamp"
|
print_debug "BACKUP_BEGIN is a full date timestamp"
|
||||||
timer datetime
|
timer datetime
|
||||||
|
#elif echo "${backup_job_backup_begin//\*/#}" | grep -qP "^(.*((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|#) ?){5}$" ; then # Allow slashes, yet not supporting advanced cron yet
|
||||||
elif echo "${backup_job_backup_begin//\*/#}" | grep -qP "^(((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|#) ?){5}$" ; then
|
elif echo "${backup_job_backup_begin//\*/#}" | grep -qP "^(((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|#) ?){5}$" ; then
|
||||||
print_debug "BACKUP_BEGIN is a cron expression"
|
print_debug "BACKUP_BEGIN is a cron expression"
|
||||||
|
if var_false "${CRON_ALTERNATE}"; then
|
||||||
time_last_run=$(date +"%s")
|
time_last_run=$(date +"%s")
|
||||||
|
|
||||||
|
backup_job_backup_begin=${backup_job_backup_begin//\"/}
|
||||||
|
backup_job_backup_begin=${backup_job_backup_begin//\'/}
|
||||||
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
|
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
|
||||||
|
else
|
||||||
|
echo "${backup_job_backup_begin} /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}/run now" > /tmp/.container/cron/{{BACKUP_NUMBER}}-backup
|
||||||
|
crontab -l | { cat; echo "${backup_job_backup_begin} /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}/run now"; } | crontab -
|
||||||
|
s6-svc -d /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
print_error "_BACKUP_BEGIN is invalid - Unable to perform scheduling"
|
print_error "_BACKUP_BEGIN is invalid - Unable to perform scheduling"
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
@@ -94,8 +107,8 @@ while true; do
|
|||||||
s6-svc -d /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}
|
s6-svc -d /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}
|
||||||
else
|
else
|
||||||
if [ ! "${time_cron}" = "true" ]; then
|
if [ ! "${time_cron}" = "true" ]; then
|
||||||
print_notice "Sleeping for another $(($backup_job_backup_interval*60-backup_job_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($backup_job_backup_interval*60-backup_job_total_time))))" +'%Y-%m-%d %T %Z') "
|
print_notice "Sleeping for another $((backup_job_backup_interval*60-backup_job_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$((backup_job_backup_interval*60-backup_job_total_time))))" +'%Y-%m-%d %T %Z') "
|
||||||
silent sleep $(($backup_job_backup_interval*60-backup_job_total_time))
|
silent sleep $((backup_job_backup_interval*60-backup_job_total_time))
|
||||||
else
|
else
|
||||||
time_last_run=$(date +"%s")
|
time_last_run=$(date +"%s")
|
||||||
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
|
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
#!/command/with-contenv bash
|
#!/command/with-contenv bash
|
||||||
|
|
||||||
BACKUP_JOB_CONCURRENCY=${BACKUP_JOB_CONCURRENCY:-"1"}
|
BACKUP_JOB_CONCURRENCY=${BACKUP_JOB_CONCURRENCY:-"1"}
|
||||||
|
CRON_ALTERNATE=${CRON_ALTERNATE:-"TRUE"}
|
||||||
DBBACKUP_USER=${DBBACKUP_USER:-"dbbackup"}
|
DBBACKUP_USER=${DBBACKUP_USER:-"dbbackup"}
|
||||||
DBBACKUP_GROUP=${DBBACKUP_USER:-"${DBBACKUP_USER}"} # Must go after DBBACKUP_USER
|
DBBACKUP_GROUP=${DBBACKUP_GROUP:-"${DBBACKUP_USER}"} # Must go after DBBACKUP_USER
|
||||||
DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0}
|
DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0}
|
||||||
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
|
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
|
||||||
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
|
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
|
||||||
@@ -15,6 +16,7 @@ DEFAULT_CREATE_LATEST_SYMLINK=${DEFAULT_CREATE_LATEST_SYMLINK:-"TRUE"}
|
|||||||
DEFAULT_ENABLE_PARALLEL_COMPRESSION=${DEFAULT_ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
|
DEFAULT_ENABLE_PARALLEL_COMPRESSION=${DEFAULT_ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
|
||||||
DEFAULT_ENCRYPT=${DEFAULT_ENCRYPT:-"FALSE"}
|
DEFAULT_ENCRYPT=${DEFAULT_ENCRYPT:-"FALSE"}
|
||||||
DEFAULT_FILESYSTEM_PATH=${DEFAULT_FILESYSTEM_PATH:-"/backup"}
|
DEFAULT_FILESYSTEM_PATH=${DEFAULT_FILESYSTEM_PATH:-"/backup"}
|
||||||
|
DEFAULT_FILESYSTEM_PATH_PERMISSION=${DEFAULT_FILESYSTEM_PATH_PERMISSION:-"700"}
|
||||||
DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"600"}
|
DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"600"}
|
||||||
DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DEFAULT_FILESYSTEM_ARCHIVE_PATH:-"${DEFAULT_FILESYSTEM_PATH}/archive/"}
|
DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DEFAULT_FILESYSTEM_ARCHIVE_PATH:-"${DEFAULT_FILESYSTEM_PATH}/archive/"}
|
||||||
DEFAULT_LOG_LEVEL=${DEFAULT_LOG_LEVEL:-"notice"}
|
DEFAULT_LOG_LEVEL=${DEFAULT_LOG_LEVEL:-"notice"}
|
||||||
|
|||||||
@@ -6,11 +6,11 @@ bootstrap_filesystem() {
|
|||||||
mkdir -p "${backup_job_filesystem_path}"
|
mkdir -p "${backup_job_filesystem_path}"
|
||||||
fi
|
fi
|
||||||
if [ "$(stat -c %U "${backup_job_filesystem_path}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${backup_job_filesystem_path}" ; fi
|
if [ "$(stat -c %U "${backup_job_filesystem_path}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${backup_job_filesystem_path}" ; fi
|
||||||
if [ "$(stat -c %a "${backup_job_filesystem_path}")" != "${backup_job_filesystem_permission}" ] ; then chmod -R "${backup_job_filesystem_permission}" "${backup_job_filesystem_path}" ; fi
|
if [ "$(stat -c %a "${backup_job_filesystem_path}")" != "${backup_job_filesystem_path_permission}" ] ; then chmod "${backup_job_filesystem_path_permission}" "${backup_job_filesystem_path}" ; fi
|
||||||
|
|
||||||
if [ -d "${backup_job_filesystem_archive_path}" ]; then
|
if [ -d "${backup_job_filesystem_archive_path}" ]; then
|
||||||
if [ "$(stat -c %U "${backup_job_filesystem_archive_path}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${backup_job_filesystem_archive_path}" ; fi
|
if [ "$(stat -c %U "${backup_job_filesystem_archive_path}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${backup_job_filesystem_archive_path}" ; fi
|
||||||
if [ "$(stat -c %a "${backup_job_filesystem_archive_path}")" != "${backup_job_filesystem_permission}" ] ; then chmod -R "${backup_job_filesystem_permission}" "${backup_job_filesystem_archive_path}" ; fi
|
if [ "$(stat -c %a "${backup_job_filesystem_archive_path}")" != "${backup_job_filesystem_path_permission}" ] ; then chmod "${backup_job_filesystem_path_permission}" "${backup_job_filesystem_archive_path}" ; fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -d "${LOG_PATH}" ]; then
|
if [ ! -d "${LOG_PATH}" ]; then
|
||||||
@@ -48,7 +48,8 @@ bootstrap_variables() {
|
|||||||
DEFAULT_USER \
|
DEFAULT_USER \
|
||||||
DEFAULT_PASS \
|
DEFAULT_PASS \
|
||||||
DEFAULT_ENCRYPT_PASSPHRASE \
|
DEFAULT_ENCRYPT_PASSPHRASE \
|
||||||
DEFAULT_ENCRYPT_PUBKEY \
|
DEFAULT_ENCRYPT_PUBLIC_KEY \
|
||||||
|
DEFAULT_ENCRYPT_PRIVATE_KEY \
|
||||||
DEFAULT_MONGO_CUSTOM_URI \
|
DEFAULT_MONGO_CUSTOM_URI \
|
||||||
DEFAULT_MYSQL_TLS_CA_FILE \
|
DEFAULT_MYSQL_TLS_CA_FILE \
|
||||||
DEFAULT_MYSQL_TLS_CERT_FILE \
|
DEFAULT_MYSQL_TLS_CERT_FILE \
|
||||||
@@ -74,7 +75,8 @@ bootstrap_variables() {
|
|||||||
DB"${backup_instance_number}"_USER \
|
DB"${backup_instance_number}"_USER \
|
||||||
DB"${backup_instance_number}"_PASS \
|
DB"${backup_instance_number}"_PASS \
|
||||||
DB"${backup_instance_number}"_ENCRYPT_PASSPHRASE \
|
DB"${backup_instance_number}"_ENCRYPT_PASSPHRASE \
|
||||||
DB"${backup_instance_number}"_ENCRYPT_PUBKEY \
|
DB"${backup_instance_number}"_ENCRYPT_PUBLIC_KEY \
|
||||||
|
DB"${backup_instance_number}"_ENCRYPT_PRIVATE_KEY \
|
||||||
DB"${backup_instance_number}"_MONGO_CUSTOM_URI \
|
DB"${backup_instance_number}"_MONGO_CUSTOM_URI \
|
||||||
DB"${backup_instance_number}"_MYSQL_TLS_CA_FILE \
|
DB"${backup_instance_number}"_MYSQL_TLS_CA_FILE \
|
||||||
DB"${backup_instance_number}"_MYSQL_TLS_CERT_FILE \
|
DB"${backup_instance_number}"_MYSQL_TLS_CERT_FILE \
|
||||||
@@ -92,7 +94,7 @@ bootstrap_variables() {
|
|||||||
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT_KEY \
|
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT_KEY \
|
||||||
DB"${backup_instance_number}"_BLOBXFER_REMOTE_PATH \
|
DB"${backup_instance_number}"_BLOBXFER_REMOTE_PATH \
|
||||||
BLOBXFER_STORAGE_ACCOUNT \
|
BLOBXFER_STORAGE_ACCOUNT \
|
||||||
BLOBXFER_STORAGE_KEY \
|
BLOBXFER_STORAGE_ACCOUNT_KEY \
|
||||||
DB_HOST \
|
DB_HOST \
|
||||||
DB_NAME \
|
DB_NAME \
|
||||||
DB_PORT \
|
DB_PORT \
|
||||||
@@ -151,18 +153,38 @@ bootstrap_variables() {
|
|||||||
fi
|
fi
|
||||||
##
|
##
|
||||||
|
|
||||||
|
if grep -qo ".*_NAME='.*'" "${backup_instance_vars}"; then
|
||||||
|
print_debug "[bootstrap_variables] [backup_init] Found _NAME variable with quotes"
|
||||||
|
sed -i "s|_NAME='\(.*\)'|_NAME=\1|g" "${backup_instance_vars}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if grep -qo ".*_PASS='.*'" "${backup_instance_vars}"; then
|
||||||
|
print_debug "[bootstrap_variables] [backup_init] Found _PASS variable with quotes"
|
||||||
|
sed -i "s|_PASS='\(.*\)'|_PASS=\1|g" "${backup_instance_vars}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if grep -qo "MONGO_CUSTOM_URI='.*'" "${backup_instance_vars}"; then
|
||||||
|
print_debug "[bootstrap_variables] [backup_init] Found _MONGO_CUSTOM_URI variable with quotes"
|
||||||
|
sed -i "s|MONGO_CUSTOM_URI='\(.*\)'|MONGO_CUSTOM_URI=\1|g" "${backup_instance_vars}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if grep -qo ".*_OPTS='.*'" "${backup_instance_vars}"; then
|
||||||
|
print_debug "[bootstrap_variables] [backup_init] Found _OPTS variable with quotes"
|
||||||
|
sed -i "s|_OPTS='\(.*\)'|_OPTS=\1|g" "${backup_instance_vars}"
|
||||||
|
fi
|
||||||
|
|
||||||
transform_backup_instance_variable() {
|
transform_backup_instance_variable() {
|
||||||
if grep -q "^DB${1}_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
if grep -q "^DB${1}_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
||||||
export "$3"="$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)"
|
export "$3"="$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
|
||||||
elif grep -q "^DB_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
elif grep -q "^DB_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
||||||
# Allow old legacy work, perhaps remove old DB_ functionality in future? This should allow for seamless upgrades
|
# Allow old legacy work, perhaps remove old DB_ functionality in future? This should allow for seamless upgrades
|
||||||
#print_warn "Legacy Variable 'DB_${2}'' detected - Please upgrade your variables as they will be removed in version 4.3.0"
|
#print_warn "Legacy Variable 'DB_${2}'' detected - Please upgrade your variables as they will be removed in version 4.3.0"
|
||||||
export "$3"="$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2)"
|
export "$3"="$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
|
||||||
elif grep -q "^${2}=" "${backup_instance_vars}" && [ "$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
elif grep -q "^${2}=" "${backup_instance_vars}" && [ "$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
||||||
print_warn "Legacy unsupported variable '${2}' detected - Please upgrade your variables as they will be removed in version 4.3.0"
|
print_warn "Legacy unsupported variable '${2}' detected - Please upgrade your variables as they will be removed in version 4.3.0"
|
||||||
export "$3"="$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2)"
|
export "$3"="$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
|
||||||
elif grep -q "^DEFAULT_${2}=" "${backup_instance_vars}" && [ "$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
elif grep -q "^DEFAULT_${2}=" "${backup_instance_vars}" && [ "$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
||||||
export "$3"="$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2)"
|
export "$3"="$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -176,7 +198,7 @@ bootstrap_variables() {
|
|||||||
transform_backup_instance_variable "${backup_instance_number}" BLACKOUT_END backup_job_snapshot_blackout_finish
|
transform_backup_instance_variable "${backup_instance_number}" BLACKOUT_END backup_job_snapshot_blackout_finish
|
||||||
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_REMOTE_PATH backup_job_blobxfer_remote_path
|
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_REMOTE_PATH backup_job_blobxfer_remote_path
|
||||||
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT backup_job_blobxfer_storage_account
|
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT backup_job_blobxfer_storage_account
|
||||||
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_KEY backup_job_blobxfer_storage_key
|
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT_KEY backup_job_blobxfer_storage_account_key
|
||||||
transform_backup_instance_variable "${backup_instance_number}" CHECKSUM backup_job_checksum
|
transform_backup_instance_variable "${backup_instance_number}" CHECKSUM backup_job_checksum
|
||||||
transform_backup_instance_variable "${backup_instance_number}" CLEANUP_TIME backup_job_cleanup_time
|
transform_backup_instance_variable "${backup_instance_number}" CLEANUP_TIME backup_job_cleanup_time
|
||||||
transform_backup_instance_variable "${backup_instance_number}" COMPRESSION backup_job_compression
|
transform_backup_instance_variable "${backup_instance_number}" COMPRESSION backup_job_compression
|
||||||
@@ -185,12 +207,14 @@ bootstrap_variables() {
|
|||||||
transform_backup_instance_variable "${backup_instance_number}" ENABLE_PARALLEL_COMPRESSION backup_job_parallel_compression
|
transform_backup_instance_variable "${backup_instance_number}" ENABLE_PARALLEL_COMPRESSION backup_job_parallel_compression
|
||||||
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT backup_job_encrypt
|
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT backup_job_encrypt
|
||||||
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PASSPHRASE backup_job_encrypt_passphrase
|
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PASSPHRASE backup_job_encrypt_passphrase
|
||||||
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PUBKEY backup_job_encrypt_pubkey
|
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PRIVATE_KEY backup_job_encrypt_private_key
|
||||||
transform_backup_instance_variable "${backup_instance_number}" EXTRA_DUMP_OPTS backup_job_extra_dump_opts
|
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PUBLIC_KEY backup_job_encrypt_public_key
|
||||||
|
transform_backup_instance_variable "${backup_instance_number}" EXTRA_BACKUP_OPTS backup_job_extra_backup_opts
|
||||||
transform_backup_instance_variable "${backup_instance_number}" EXTRA_ENUMERATION_OPTS backup_job_extra_enumeration_opts
|
transform_backup_instance_variable "${backup_instance_number}" EXTRA_ENUMERATION_OPTS backup_job_extra_enumeration_opts
|
||||||
transform_backup_instance_variable "${backup_instance_number}" EXTRA_OPTS backup_job_extra_opts
|
transform_backup_instance_variable "${backup_instance_number}" EXTRA_OPTS backup_job_extra_opts
|
||||||
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_ARCHIVE_PATH backup_job_filesystem_archive_path
|
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_ARCHIVE_PATH backup_job_filesystem_archive_path
|
||||||
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PATH backup_job_filesystem_path
|
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PATH backup_job_filesystem_path
|
||||||
|
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PATH_PERMISSION backup_job_filesystem_path_permission
|
||||||
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PERMISSION backup_job_filesystem_permission
|
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PERMISSION backup_job_filesystem_permission
|
||||||
transform_backup_instance_variable "${backup_instance_number}" GZ_RSYNCABLE backup_job_gz_rsyncable
|
transform_backup_instance_variable "${backup_instance_number}" GZ_RSYNCABLE backup_job_gz_rsyncable
|
||||||
transform_backup_instance_variable "${backup_instance_number}" HOST backup_job_db_host
|
transform_backup_instance_variable "${backup_instance_number}" HOST backup_job_db_host
|
||||||
@@ -459,12 +483,7 @@ backup_couch() {
|
|||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
backup_job_filename=couch_${backup_job_db_name}_${backup_job_db_host#*//}_${now}.txt
|
backup_job_filename=couch_${backup_job_db_name}_${backup_job_db_host#*//}_${now}.txt
|
||||||
backup_job_filename_base=couch_${backup_job_db_name}_${backup_job_db_host#*//}
|
backup_job_filename_base=couch_${backup_job_db_name}_${backup_job_db_host#*//}
|
||||||
compression
|
compressionzyclonite
|
||||||
pre_dbbackup ${backup_job_db_name}
|
|
||||||
write_log notice "Dumping CouchDB database: '${backup_job_db_name}' ${compression_string}"
|
|
||||||
if var_true "${DEBUG_BACKUP_COUCH}" ; then debug on; fi
|
|
||||||
run_as_user curl -sSL -X GET ${backup_job_db_host}:${backup_job_db_port}/${backup_job_db_name}/_all_docs?include_docs=true | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
|
|
||||||
exit_code=$?
|
|
||||||
if var_true "${DEBUG_BACKUP_COUCH}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_COUCH}" ; then debug off; fi
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
timer backup finish
|
timer backup finish
|
||||||
@@ -473,6 +492,7 @@ backup_couch() {
|
|||||||
move_dbbackup
|
move_dbbackup
|
||||||
check_exit_code move "${backup_job_filename}"
|
check_exit_code move "${backup_job_filename}"
|
||||||
post_dbbackup ${backup_job_db_name}
|
post_dbbackup ${backup_job_db_name}
|
||||||
|
cleanup_old_data
|
||||||
}
|
}
|
||||||
|
|
||||||
backup_influx() {
|
backup_influx() {
|
||||||
@@ -495,24 +515,22 @@ backup_influx() {
|
|||||||
backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now}
|
backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now}
|
||||||
backup_job_filename_base=influx_${db}_${backup_job_db_host#*//}
|
backup_job_filename_base=influx_${db}_${backup_job_db_host#*//}
|
||||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
||||||
compression
|
|
||||||
pre_dbbackup "${db}"
|
pre_dbbackup "${db}"
|
||||||
write_log notice "Dumping Influx database: '${db}'"
|
write_log notice "Dumping Influx database: '${db}'"
|
||||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
||||||
run_as_user influxd backup ${influx_compression} ${bucket} -portable -host ${backup_job_db_host}:${backup_job_db_port} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} "${TEMP_PATH}"/"${backup_job_filename_dir}"
|
run_as_user influxd backup ${influx_compression} ${bucket} -portable -host ${backup_job_db_host}:${backup_job_db_port} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} "${temporary_directory}"/"${backup_job_filename}"
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
check_exit_code backup "${backup_job_filename_dir}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
write_log notice "Creating archive file of '${backup_job_filename_dir}' with tar ${compression_string}"
|
compression
|
||||||
run_as_user tar cf - "${TEMP_PATH}"/"${backup_job_filename_dir}" | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename_dir}".tar"${extension}" > /dev/null
|
create_archive
|
||||||
backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now}.tar${extension}
|
|
||||||
backup_job_filename_base=influx_${db}_${backup_job_db_host#*//}
|
|
||||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
||||||
timer backup finish
|
timer backup finish
|
||||||
file_encryption
|
file_encryption
|
||||||
generate_checksum
|
generate_checksum
|
||||||
move_dbbackup
|
move_dbbackup
|
||||||
check_exit_code move "${backup_job_filename_dir}"
|
check_exit_code move "${backup_job_filename}"
|
||||||
post_dbbackup "${db}"
|
post_dbbackup "${db}"
|
||||||
|
cleanup_old_data
|
||||||
done
|
done
|
||||||
;;
|
;;
|
||||||
2 )
|
2 )
|
||||||
@@ -524,23 +542,22 @@ backup_influx() {
|
|||||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
||||||
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}
|
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}
|
||||||
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
|
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
|
||||||
compression
|
|
||||||
pre_dbbackup "${db}"
|
pre_dbbackup "${db}"
|
||||||
write_log notice "Dumping Influx2 database: '${db}'"
|
write_log notice "Dumping Influx2 database: '${db}'"
|
||||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
||||||
run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --compression none "${TEMP_PATH}"/"${backup_job_filename_dir}"
|
run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --compression none "${temporary_directory}"/"${backup_job_filename}"
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
check_exit_code backup "${backup_job_filename_dir}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
compression
|
||||||
create_archive
|
create_archive
|
||||||
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}.tar${extension}
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
||||||
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
|
|
||||||
timer backup finish
|
timer backup finish
|
||||||
file_encryption
|
file_encryption
|
||||||
generate_checksum
|
generate_checksum
|
||||||
move_dbbackup
|
move_dbbackup
|
||||||
check_exit_code move "${backup_job_filename_dir}"
|
check_exit_code move "${backup_job_filename}"
|
||||||
post_dbbackup "${db}"
|
post_dbbackup "${db}"
|
||||||
|
cleanup_old_data
|
||||||
done
|
done
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -559,15 +576,15 @@ backup_mongo() {
|
|||||||
compression_string="and compressing with gzip"
|
compression_string="and compressing with gzip"
|
||||||
fi
|
fi
|
||||||
if [ -n "${backup_job_mongo_custom_uri}" ] ; then
|
if [ -n "${backup_job_mongo_custom_uri}" ] ; then
|
||||||
mongo_backup_parameter="--uri=${backup_job_mongo_custom_uri} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}"
|
mongo_backup_parameter="--uri=${backup_job_mongo_custom_uri} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}"
|
||||||
else
|
else
|
||||||
mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}"
|
mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}"
|
||||||
fi
|
fi
|
||||||
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
|
||||||
pre_dbbackup "${backup_job_db_name}"
|
pre_dbbackup "${backup_job_db_name}"
|
||||||
write_log notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
|
write_log notice "Dumping MongoDB database: '${backup_job_db_name}' ${compression_string}"
|
||||||
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi
|
||||||
silent run_as_user ${play_fair} mongodump --archive=${TEMP_PATH}/${backup_job_filename} ${mongo_compression} ${mongo_backup_parameter}
|
silent run_as_user ${play_fair} mongodump --archive=${temporary_directory}/${backup_job_filename} ${mongo_compression} ${mongo_backup_parameter}
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
@@ -577,6 +594,7 @@ backup_mongo() {
|
|||||||
move_dbbackup
|
move_dbbackup
|
||||||
check_exit_code move "${backup_job_filename}"
|
check_exit_code move "${backup_job_filename}"
|
||||||
post_dbbackup "${backup_job_db_name}"
|
post_dbbackup "${backup_job_db_name}"
|
||||||
|
cleanup_old_data
|
||||||
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -587,16 +605,16 @@ backup_mssql() {
|
|||||||
backup_job_filename=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.bak
|
backup_job_filename=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.bak
|
||||||
backup_job_filename_base=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}
|
backup_job_filename_base=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}
|
||||||
pre_dbbackup "${backup_job_db_name}"
|
pre_dbbackup "${backup_job_db_name}"
|
||||||
write_log notice "Dumping MSSQL database: '${DB_NAME}'"
|
write_log notice "Dumping MSSQL database: '${backup_job_db_name}'"
|
||||||
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
|
||||||
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP DATABASE [${backup_job_db_name}] TO DISK = N'${TEMP_PATH}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
|
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP DATABASE [${backup_job_db_name}] TO DISK = N'${temporary_directory}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
|
||||||
backup_job_filename_original=${backup_job_filename}
|
backup_job_filename_original=${backup_job_filename}
|
||||||
compression
|
compression
|
||||||
pre_dbbackup all
|
pre_dbbackup all
|
||||||
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
|
||||||
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
|
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
|
||||||
timer backup finish
|
timer backup finish
|
||||||
@@ -605,28 +623,30 @@ backup_mssql() {
|
|||||||
move_dbbackup
|
move_dbbackup
|
||||||
check_exit_code move "${backup_job_filename}"
|
check_exit_code move "${backup_job_filename}"
|
||||||
post_dbbackup "${backup_job_db_name}"
|
post_dbbackup "${backup_job_db_name}"
|
||||||
|
cleanup_old_data
|
||||||
;;
|
;;
|
||||||
trn|transaction )
|
trn|transaction )
|
||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
backup_job_filename=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.trn
|
backup_job_filename=mssql_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.trn
|
||||||
backup_job_filename_base=mssql_${backup_job_db_name,,}_trn_${backup_job_db_host,,}
|
backup_job_filename_base=mssql_${backup_job_db_name,,}_trn_${backup_job_db_host,,}
|
||||||
pre_dbbackup "${backup_job_db_name}"
|
pre_dbbackup "${backup_job_db_name}"
|
||||||
write_log notice "Dumping MSSQL database: '${DB_NAME}'"
|
write_log notice "Dumping MSSQL database: '${backup_job_db_name}'"
|
||||||
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
|
||||||
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP LOG [${backup_job_db_name}] TO DISK = N'${TEMP_PATH}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-log', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
|
silent run_as_user ${play_fair} /opt/mssql-tools18/bin/sqlcmd -C -S ${backup_job_db_host}\,${backup_job_db_port} -U ${backup_job_db_user} -P ${backup_job_db_pass} -Q "BACKUP LOG [${backup_job_db_name}] TO DISK = N'${temporary_directory}/${backup_job_filename}' WITH NOFORMAT, NOINIT, NAME = '${backup_job_db_name}-log', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
|
||||||
backup_job_filename_original=${backup_job_filename}
|
backup_job_filename_original=${backup_job_filename}
|
||||||
compression
|
compression
|
||||||
pre_dbbackup all
|
pre_dbbackup all
|
||||||
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
|
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
|
||||||
check_exit_code backup "${backup_job_filename}"
|
|
||||||
file_encryption
|
file_encryption
|
||||||
timer backup finish
|
timer backup finish
|
||||||
generate_checksum
|
generate_checksum
|
||||||
move_dbbackup
|
move_dbbackup
|
||||||
check_exit_code move "${backup_job_filename}"
|
check_exit_code move "${backup_job_filename}"
|
||||||
post_dbbackup "${backup_job_db_name}"
|
post_dbbackup "${backup_job_db_name}"
|
||||||
|
cleanup_old_data
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
@@ -668,7 +688,7 @@ backup_mysql() {
|
|||||||
pre_dbbackup "${db}"
|
pre_dbbackup "${db}"
|
||||||
write_log notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
|
write_log notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
|
||||||
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
|
||||||
run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} $db | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
|
run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} $db | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
@@ -678,6 +698,7 @@ backup_mysql() {
|
|||||||
move_dbbackup
|
move_dbbackup
|
||||||
check_exit_code move "${backup_job_filename}"
|
check_exit_code move "${backup_job_filename}"
|
||||||
post_dbbackup "${db}"
|
post_dbbackup "${db}"
|
||||||
|
cleanup_old_data
|
||||||
done
|
done
|
||||||
else
|
else
|
||||||
write_log debug "Not splitting database dumps into their own files"
|
write_log debug "Not splitting database dumps into their own files"
|
||||||
@@ -688,7 +709,7 @@ backup_mysql() {
|
|||||||
pre_dbbackup all
|
pre_dbbackup all
|
||||||
write_log notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
write_log notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
||||||
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
|
||||||
run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
|
run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
@@ -698,6 +719,7 @@ backup_mysql() {
|
|||||||
move_dbbackup
|
move_dbbackup
|
||||||
check_exit_code move "${backup_job_filename}"
|
check_exit_code move "${backup_job_filename}"
|
||||||
post_dbbackup all
|
post_dbbackup all
|
||||||
|
cleanup_old_data
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -709,7 +731,7 @@ backup_pgsql() {
|
|||||||
pre_dbbackup "globals"
|
pre_dbbackup "globals"
|
||||||
print_notice "Dumping PostgresSQL globals: with 'pg_dumpall -g' ${compression_string}"
|
print_notice "Dumping PostgresSQL globals: with 'pg_dumpall -g' ${compression_string}"
|
||||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
|
||||||
run_as_user ${play_fair} pg_dumpall -h "${backup_job_db_host}" -U "${backup_job_db_user}" -p "${backup_job_db_port}" -g ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
|
run_as_user ${play_fair} pg_dumpall -h "${backup_job_db_host}" -U "${backup_job_db_user}" -p "${backup_job_db_port}" -g ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
||||||
check_exit_code "${backup_job_filename}"
|
check_exit_code "${backup_job_filename}"
|
||||||
@@ -719,6 +741,7 @@ backup_pgsql() {
|
|||||||
move_dbbackup
|
move_dbbackup
|
||||||
check_exit_code move "${backup_job_filename}"
|
check_exit_code move "${backup_job_filename}"
|
||||||
post_dbbackup "globals"
|
post_dbbackup "globals"
|
||||||
|
cleanup_old_data
|
||||||
}
|
}
|
||||||
|
|
||||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
|
||||||
@@ -730,7 +753,7 @@ backup_pgsql() {
|
|||||||
fi
|
fi
|
||||||
if [ "${backup_job_db_name,,}" = "all" ] ; then
|
if [ "${backup_job_db_name,,}" = "all" ] ; then
|
||||||
write_log debug "Preparing to back up all databases"
|
write_log debug "Preparing to back up all databases"
|
||||||
db_names=$(run_as_user psql -h ${backup_job_db_host} -U ${backup_job_db_user} -p ${backup_job_db_port} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
|
db_names=$(psql -h ${backup_job_db_host} -U ${backup_job_db_user} -p ${backup_job_db_port} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
|
||||||
if [ -n "${backup_job_db_name_exclude}" ] ; then
|
if [ -n "${backup_job_db_name_exclude}" ] ; then
|
||||||
db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n')
|
db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n')
|
||||||
for db_exclude in ${db_names_exclusions} ; do
|
for db_exclude in ${db_names_exclusions} ; do
|
||||||
@@ -757,7 +780,7 @@ backup_pgsql() {
|
|||||||
pre_dbbackup "${db}"
|
pre_dbbackup "${db}"
|
||||||
write_log notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
|
write_log notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
|
||||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
|
||||||
run_as_user ${play_fair} pg_dump -h "${backup_job_db_host}" -p "${backup_job_db_port}" -U "${backup_job_db_user}" $db ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
|
run_as_user ${play_fair} pg_dump -h "${backup_job_db_host}" -p "${backup_job_db_port}" -U "${backup_job_db_user}" $db ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
@@ -767,6 +790,7 @@ backup_pgsql() {
|
|||||||
move_dbbackup
|
move_dbbackup
|
||||||
check_exit_code move "${backup_job_filename}"
|
check_exit_code move "${backup_job_filename}"
|
||||||
post_dbbackup "${db}"
|
post_dbbackup "${db}"
|
||||||
|
cleanup_old_data
|
||||||
done
|
done
|
||||||
if var_true "${_postgres_backup_globals}" ; then backup_pgsql_globals; fi
|
if var_true "${_postgres_backup_globals}" ; then backup_pgsql_globals; fi
|
||||||
else
|
else
|
||||||
@@ -778,7 +802,7 @@ backup_pgsql() {
|
|||||||
pre_dbbackup all
|
pre_dbbackup all
|
||||||
write_log notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
write_log notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
||||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
|
||||||
tmp_db_names=$(run_as_user psql -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
|
tmp_db_names=$(psql -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
|
||||||
for r_db_name in $(echo $db_names | xargs); do
|
for r_db_name in $(echo $db_names | xargs); do
|
||||||
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
|
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
|
||||||
done
|
done
|
||||||
@@ -786,7 +810,7 @@ backup_pgsql() {
|
|||||||
for x_db_name in ${tmp_db_names} ; do
|
for x_db_name in ${tmp_db_names} ; do
|
||||||
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
|
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
|
||||||
done
|
done
|
||||||
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
|
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
@@ -796,6 +820,7 @@ backup_pgsql() {
|
|||||||
move_dbbackup
|
move_dbbackup
|
||||||
check_exit_code move "${backup_job_filename}"
|
check_exit_code move "${backup_job_filename}"
|
||||||
post_dbbackup all
|
post_dbbackup all
|
||||||
|
cleanup_old_data
|
||||||
if var_true "${_postgres_backup_globals}" ; then backup_pgsql_globals; fi
|
if var_true "${_postgres_backup_globals}" ; then backup_pgsql_globals; fi
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@@ -806,7 +831,7 @@ backup_redis() {
|
|||||||
backup_job_filename=redis_all_${backup_job_db_host,,}_${now}.rdb
|
backup_job_filename=redis_all_${backup_job_db_host,,}_${now}.rdb
|
||||||
backup_job_filename_base=redis_${backup_job_db_host,,}
|
backup_job_filename_base=redis_${backup_job_db_host,,}
|
||||||
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
|
||||||
echo bgsave | silent run_as_user ${play_fair} redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} --rdb ${TEMP_PATH}/${backup_job_filename} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}
|
echo bgsave | silent run_as_user ${play_fair} redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} --rdb ${temporary_directory}/${backup_job_filename} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}
|
||||||
sleep 10
|
sleep 10
|
||||||
try=5
|
try=5
|
||||||
while [ $try -gt 0 ] ; do
|
while [ $try -gt 0 ] ; do
|
||||||
@@ -826,7 +851,7 @@ backup_redis() {
|
|||||||
compression
|
compression
|
||||||
pre_dbbackup all
|
pre_dbbackup all
|
||||||
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
|
||||||
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
|
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
|
||||||
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug off; fi
|
||||||
timer backup finish
|
timer backup finish
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
@@ -835,6 +860,7 @@ backup_redis() {
|
|||||||
move_dbbackup
|
move_dbbackup
|
||||||
check_exit_code move "${backup_job_filename}"
|
check_exit_code move "${backup_job_filename}"
|
||||||
post_dbbackup all
|
post_dbbackup all
|
||||||
|
cleanup_old_data
|
||||||
}
|
}
|
||||||
|
|
||||||
backup_sqlite3() {
|
backup_sqlite3() {
|
||||||
@@ -842,22 +868,28 @@ backup_sqlite3() {
|
|||||||
db=$(basename "${backup_job_db_host}")
|
db=$(basename "${backup_job_db_host}")
|
||||||
db="${db%.*}"
|
db="${db%.*}"
|
||||||
backup_job_filename=sqlite3_${db}_${now}.sqlite3
|
backup_job_filename=sqlite3_${db}_${now}.sqlite3
|
||||||
backup_job_filename_base=sqlite3_${db}.sqlite3
|
backup_job_filename_base=sqlite3_${db}
|
||||||
compression
|
|
||||||
pre_dbbackup "${db}"
|
pre_dbbackup "${db}"
|
||||||
write_log notice "Dumping sqlite3 database: '${backup_job_db_host}' ${compression_string}"
|
write_log notice "Dumping sqlite3 database: '${backup_job_db_host}' ${compression_string}"
|
||||||
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug on; fi
|
||||||
silent run_as_user ${play_fair} sqlite3 "${backup_job_db_host}" ".backup '${TEMP_PATH}/backup.sqlite3'"
|
silent ${play_fair} sqlite3 "${backup_job_db_host}" ".backup '${temporary_directory}/backup_${now}.sqlite3'"
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
run_as_user ${play_fair} cat "${TEMP_PATH}"/backup.sqlite3 | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}/${backup_job_filename}" > /dev/null
|
if [ ! -f "${temporary_directory}"/backup_${now}.sqlite3 ] ; then
|
||||||
timer backup finish
|
print_error "SQLite3 backup failed! Exitting"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
compression
|
||||||
|
run_as_user ${play_fair} cat "${temporary_directory}"/backup_${now}.sqlite3 | ${dir_compress_cmd} | run_as_user tee "${temporary_directory}/${backup_job_filename}" > /dev/null
|
||||||
|
rm -rf "${temporary_directory}"/backup_${now}.sqlite3
|
||||||
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug off; fi
|
||||||
|
timer backup finish
|
||||||
file_encryption
|
file_encryption
|
||||||
generate_checksum
|
generate_checksum
|
||||||
move_dbbackup
|
move_dbbackup
|
||||||
check_exit_code move "${backup_job_filename}"
|
check_exit_code move "${backup_job_filename}"
|
||||||
post_dbbackup "${db}"
|
post_dbbackup "${db}"
|
||||||
|
cleanup_old_data
|
||||||
}
|
}
|
||||||
|
|
||||||
check_availability() {
|
check_availability() {
|
||||||
@@ -1016,8 +1048,12 @@ cleanup_old_data() {
|
|||||||
write_log info "Cleaning up old backups on filesystem"
|
write_log info "Cleaning up old backups on filesystem"
|
||||||
run_as_user mkdir -p "${backup_job_filesystem_path}"
|
run_as_user mkdir -p "${backup_job_filesystem_path}"
|
||||||
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
|
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
|
||||||
|
if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then
|
||||||
|
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
|
||||||
|
else
|
||||||
write_log info "Syncing changes via blobxfer"
|
write_log info "Syncing changes via blobxfer"
|
||||||
silent run_as_user blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --local-path ${backup_job_filesystem_path} --delete --delete-only
|
silent run_as_user blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete --delete-only
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
"file" | "filesystem" )
|
"file" | "filesystem" )
|
||||||
write_log info "Cleaning up old backups on filesystem"
|
write_log info "Cleaning up old backups on filesystem"
|
||||||
@@ -1122,8 +1158,10 @@ compression() {
|
|||||||
create_archive() {
|
create_archive() {
|
||||||
if var_true "${DEBUG_CREATE_ARCHIVE}" ; then debug on; fi
|
if var_true "${DEBUG_CREATE_ARCHIVE}" ; then debug on; fi
|
||||||
if [ "${exit_code}" = "0" ] ; then
|
if [ "${exit_code}" = "0" ] ; then
|
||||||
write_log notice "Creating archive file of '${backup_job_filename_dir}' with tar ${compression_string}"
|
write_log notice "Creating archive file of '${backup_job_filename}' with tar ${compression_string}"
|
||||||
run_as_user tar cf - "${TEMP_PATH}"/"${backup_job_filename_dir}" | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename_dir}".tar"${extension}" > /dev/null
|
run_as_user tar cf - "${temporary_directory}"/"${backup_job_filename_dir}" | ${dir_compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename_dir}".tar"${extension}" > /dev/null
|
||||||
|
backup_job_filename="${backup_job_filename_dir}".tar"${extension}"
|
||||||
|
rm -rf "${temporary_directory}"/"${backup_job_filename_dir}"
|
||||||
else
|
else
|
||||||
write_log error "Skipping creating archive file because backup did not complete successfully"
|
write_log error "Skipping creating archive file because backup did not complete successfully"
|
||||||
fi
|
fi
|
||||||
@@ -1132,10 +1170,11 @@ create_archive() {
|
|||||||
|
|
||||||
create_schedulers() {
|
create_schedulers() {
|
||||||
if var_true "${DEBUG_CREATE_SCHEDULERS}" ; then debug on; fi
|
if var_true "${DEBUG_CREATE_SCHEDULERS}" ; then debug on; fi
|
||||||
|
|
||||||
backup() {
|
backup() {
|
||||||
bootstrap_variables upgrade BACKUP
|
local backup_instances=$(set -o posix ; set | grep -Pc "^(DB[0-9]._HOST=|.*MONGO_CUSTOM_URI=)")
|
||||||
local backup_instances=$(printenv | sort | grep -c "^DB[0-9]._HOST")
|
|
||||||
print_debug "[create_schedulers] Found '${backup_instances}' DB_HOST instances"
|
print_debug "[create_schedulers] Found '${backup_instances}' DB_HOST instances"
|
||||||
|
|
||||||
if [ -n "${DB_HOST}" ] && [ "${backup_instances}" ]; then
|
if [ -n "${DB_HOST}" ] && [ "${backup_instances}" ]; then
|
||||||
backup_instances=1;
|
backup_instances=1;
|
||||||
print_debug "[create_schedulers] Detected using old DB_ variables"
|
print_debug "[create_schedulers] Detected using old DB_ variables"
|
||||||
@@ -1145,13 +1184,14 @@ create_schedulers() {
|
|||||||
instance=$(printf "%02d" $instance)
|
instance=$(printf "%02d" $instance)
|
||||||
cp -R /assets/dbbackup/template-dbbackup /etc/services.available/dbbackup-"${instance}"
|
cp -R /assets/dbbackup/template-dbbackup /etc/services.available/dbbackup-"${instance}"
|
||||||
sed -i "s|{{BACKUP_NUMBER}}|${instance}|g" /etc/services.available/dbbackup-"${instance}"/run
|
sed -i "s|{{BACKUP_NUMBER}}|${instance}|g" /etc/services.available/dbbackup-"${instance}"/run
|
||||||
|
if [ "${MODE,,}" = "manual" ] ; then service_stop dbbackup-"${instance}" ; fi
|
||||||
cat <<EOF >> /usr/bin/backup"${instance}"-now
|
cat <<EOF >> /usr/bin/backup"${instance}"-now
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
source /assets/functions/00-container
|
source /assets/functions/00-container
|
||||||
PROCESS_NAME=db-backup${instance}
|
PROCESS_NAME=db-backup${instance}
|
||||||
print_info "Starting Manual Backup for db-backup${instance}"
|
print_info "Starting Manual Backup for db-backup${instance}"
|
||||||
/var/run/s6/legacy-services/dbbackup-${instance}/run now
|
#/var/run/s6/legacy-services/dbbackup-${instance}/run now
|
||||||
|
/etc/services.available/dbbackup-${instance}/run now
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
chmod +x /usr/bin/backup"${instance}"-now
|
chmod +x /usr/bin/backup"${instance}"-now
|
||||||
@@ -1161,11 +1201,11 @@ EOF
|
|||||||
cat <<EOF > /usr/bin/backup-now
|
cat <<EOF > /usr/bin/backup-now
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
/usr/bin/backup${instance}-now
|
/usr/bin/backup${instance}-now now
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
else
|
else
|
||||||
echo "/usr/bin/backup${instance}-now" >> /usr/bin/backup-now
|
echo "/usr/bin/backup${instance}-now now" >> /usr/bin/backup-now
|
||||||
fi
|
fi
|
||||||
|
|
||||||
instance=$(echo "${instance} +1" | bc)
|
instance=$(echo "${instance} +1" | bc)
|
||||||
@@ -1179,7 +1219,7 @@ EOF
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctrl_c() {
|
ctrl_c() {
|
||||||
sed -i "/^{{BACKUP_NUMBER}}/d" /tmp/.container/db-backup-backups
|
sed -i "/^${backup_instance_number}/d" /tmp/.container/db-backup-backups
|
||||||
symlink_log
|
symlink_log
|
||||||
print_warn "User aborted"
|
print_warn "User aborted"
|
||||||
exit
|
exit
|
||||||
@@ -1194,7 +1234,11 @@ db_backup_container_init() {
|
|||||||
debug() {
|
debug() {
|
||||||
case "${1}" in
|
case "${1}" in
|
||||||
off)
|
off)
|
||||||
|
backup_job_log_level=$_original_job_log_level}
|
||||||
|
CONTAINER_LOG_LEVEL=${_original_container_log_level}
|
||||||
DEBUG_MODE=${_original_debug_mode}
|
DEBUG_MODE=${_original_debug_mode}
|
||||||
|
SHOW_OUTPUT=${_original_show_output}
|
||||||
|
|
||||||
if var_true "${DEBUG_MODE}" ; then
|
if var_true "${DEBUG_MODE}" ; then
|
||||||
set -x
|
set -x
|
||||||
else
|
else
|
||||||
@@ -1202,9 +1246,25 @@ debug() {
|
|||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
on)
|
on)
|
||||||
|
if [ -z "${_original_container_log_level}" ]; then
|
||||||
|
_original_container_log_level="${CONTAINER_LOG_LEVEL}"
|
||||||
|
fi
|
||||||
|
if [ -z "${_original_job_log_level}" ]; then
|
||||||
|
_original_job_log_level="${backup_job_log_level}"
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -z "${_original_debug_mode}" ]; then
|
if [ -z "${_original_debug_mode}" ]; then
|
||||||
_original_debug_mode="${DEBUG_MODE}"
|
_original_debug_mode="${DEBUG_MODE}"
|
||||||
fi
|
fi
|
||||||
|
if [ -z "${_original_show_output}" ]; then
|
||||||
|
_original_show_output="${SHOW_OUTPUT}"
|
||||||
|
if ! [[ "${_original_show_output,,}" =~ true|false ]]; then
|
||||||
|
__original_show_output="FALSE"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
backup_job_log_level=DEBUG
|
||||||
|
CONTAINER_LOG_LEVEL=DEBUG
|
||||||
|
SHOW_OUTPUT=TRUE
|
||||||
set -x
|
set -x
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -1216,27 +1276,32 @@ file_encryption() {
|
|||||||
if [ "${exit_code}" = "0" ] ; then
|
if [ "${exit_code}" = "0" ] ; then
|
||||||
print_debug "[file_encryption] Encrypting"
|
print_debug "[file_encryption] Encrypting"
|
||||||
output_off
|
output_off
|
||||||
if [ -n "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_pubkey}" ]; then
|
if [ -n "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_public_key}" ]; then
|
||||||
print_error "Can't encrypt as both ENCRYPT_PASSPHRASE and ENCRYPT_PUBKEY exist!"
|
print_error "Can't encrypt as both ENCRYPT_PASSPHRASE and ENCRYPT_PUBKEY exist!"
|
||||||
return
|
return
|
||||||
elif [ -n "${backup_job_encrypt_passphrase}" ] && [ -z "${backup_job_encrypt_pubkey}" ]; then
|
elif [ -n "${backup_job_encrypt_passphrase}" ] && [ -z "${backup_job_encrypt_public_key}" ]; then
|
||||||
print_notice "Encrypting with GPG Passphrase"
|
print_notice "Encrypting with GPG Passphrase"
|
||||||
encrypt_routines_start_time=$(date +'%s')
|
encrypt_routines_start_time=$(date +'%s')
|
||||||
encrypt_tmp_dir=$(run_as_user mktemp -d)
|
encrypt_tmp_dir=$(run_as_user mktemp -d)
|
||||||
echo "${backup_job_encrypt_passphrase}" | silent run_as_user ${play_fair} gpg --batch --home ${encrypt_tmp_dir} --yes --passphrase-fd 0 -c "${TEMP_PATH}"/"${backup_job_filename}"
|
echo "${backup_job_encrypt_passphrase}" | silent run_as_user ${play_fair} gpg --batch --home ${encrypt_tmp_dir} --yes --passphrase-fd 0 -c "${temporary_directory}"/"${backup_job_filename}"
|
||||||
rm -rf "${encrypt_tmp_dir}"
|
rm -rf "${encrypt_tmp_dir}"
|
||||||
elif [ -z "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_pubkey}" ]; then
|
elif [ -z "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_public_key}" ] && [ -n "${backup_job_encrypt_private_key}" ]; then
|
||||||
if [ -f "${backup_job_encrypt_pubkey}" ]; then
|
if [ -f "${backup_job_encrypt_private_key}" ]; then
|
||||||
encrypt_routines_start_time=$(date +'%s')
|
encrypt_routines_start_time=$(date +'%s')
|
||||||
print_notice "Encrypting with GPG Public Key"
|
print_notice "Encrypting with GPG Private Key"
|
||||||
encrypt_tmp_dir=$(run_as_user mktemp -d)
|
encrypt_tmp_dir=$(run_as_user mktemp -d)
|
||||||
silent run_as_user ${play_fair} gpg --batch --yes --home "${encrypt_tmp_dir}" --recipient-file "${backup_job_encrypt_pubkey}" -c "${TEMP_PATH}"/"${backup_job_filename}"
|
cat "${backup_job_encrypt_private_key}" | run_as_user tee "${encrypt_tmp_dir}"/private_key.asc > /dev/null
|
||||||
|
print_debug "[file_encryption] [key] Importing Private Key"
|
||||||
|
silent run_as_user gpg --home ${encrypt_tmp_dir} --batch --import "${encrypt_tmp_dir}"/private_key.asc
|
||||||
|
print_debug "[file_encryption] [key] Encrypting to Public Key"
|
||||||
|
cat "${backup_job_encrypt_public_key}" | run_as_user tee "${encrypt_tmp_dir}"/public_key.asc > /dev/null
|
||||||
|
silent run_as_user ${play_fair} gpg --batch --yes --home "${encrypt_tmp_dir}" --encrypt --recipient-file "${encrypt_tmp_dir}"/public_key.asc "${temporary_directory}"/"${backup_job_filename}"
|
||||||
rm -rf "${encrypt_tmp_dir}"
|
rm -rf "${encrypt_tmp_dir}"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
if [ -f "${TEMP_PATH}"/"${backup_job_filename}".gpg ]; then
|
if [ -f "${temporary_directory}"/"${backup_job_filename}".gpg ]; then
|
||||||
print_debug "[file_encryption] Deleting original file"
|
print_debug "[file_encryption] Deleting original file"
|
||||||
rm -rf "${TEMP_PATH:?}"/"${backup_job_filename:?}"
|
rm -rf "${temporary_directory:?}"/"${backup_job_filename:?}"
|
||||||
backup_job_filename="${backup_job_filename}.gpg"
|
backup_job_filename="${backup_job_filename}.gpg"
|
||||||
|
|
||||||
encrypt_routines_finish_time=$(date +'%s')
|
encrypt_routines_finish_time=$(date +'%s')
|
||||||
@@ -1245,6 +1310,9 @@ file_encryption() {
|
|||||||
- dbbackup.backup.encrypt.duration.[${backup_job_db_host}.${backup_job_db_name}] ${encrypt_routines_total_time}
|
- dbbackup.backup.encrypt.duration.[${backup_job_db_host}.${backup_job_db_name}] ${encrypt_routines_total_time}
|
||||||
EOF
|
EOF
|
||||||
)
|
)
|
||||||
|
else
|
||||||
|
print_error "Encryption failed! Could not detect encrypted file"
|
||||||
|
return 99
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
write_log error "Skipping encryption because backup did not complete successfully"
|
write_log error "Skipping encryption because backup did not complete successfully"
|
||||||
@@ -1272,7 +1340,7 @@ generate_checksum() {
|
|||||||
|
|
||||||
checksum_routines_start_time=$(date +'%s')
|
checksum_routines_start_time=$(date +'%s')
|
||||||
write_log notice "Generating ${checksum_extension^^} sum for '${backup_job_filename}'"
|
write_log notice "Generating ${checksum_extension^^} sum for '${backup_job_filename}'"
|
||||||
cd "${TEMP_PATH}"
|
cd "${temporary_directory}"
|
||||||
run_as_user ${checksum_command} "${backup_job_filename}" | run_as_user tee "${backup_job_filename}"."${checksum_extension}" > /dev/null
|
run_as_user ${checksum_command} "${backup_job_filename}" | run_as_user tee "${backup_job_filename}"."${checksum_extension}" > /dev/null
|
||||||
chmod ${backup_job_filesystem_permission} "${backup_job_filename}"."${checksum_extension}"
|
chmod ${backup_job_filesystem_permission} "${backup_job_filename}"."${checksum_extension}"
|
||||||
checksum_value=$(run_as_user cat "${backup_job_filename}"."${checksum_extension}" | awk '{print $1}')
|
checksum_value=$(run_as_user cat "${backup_job_filename}"."${checksum_extension}" | awk '{print $1}')
|
||||||
@@ -1294,20 +1362,20 @@ EOF
|
|||||||
notify() {
|
notify() {
|
||||||
if var_true "${DEBUG_NOTIFY}" ; then debug on; fi
|
if var_true "${DEBUG_NOTIFY}" ; then debug on; fi
|
||||||
notification_custom() {
|
notification_custom() {
|
||||||
if [ -n "${NOTIFICATION_SCRIPT}" ] ; then
|
if [ -n "${NOTIFICATION_CUSTOM_SCRIPT}" ] ; then
|
||||||
if var_true "${NOTIFICATION_SCRIPT_SKIP_X_VERIFY}" ; then
|
if var_true "${NOTIFICATION_CUSTOM_SCRIPT_SKIP_X_VERIFY}" ; then
|
||||||
eval "${NOTIFICATION_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}"
|
eval "${NOTIFICATION_CUSTOM_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}"
|
||||||
else
|
else
|
||||||
if [ -x "${NOTIFICATION_SCRIPT}" ] ; then
|
if [ -x "${NOTIFICATION_CUSTOM_SCRIPT}" ] ; then
|
||||||
write_log notice "Found NOTIFICATION_SCRIPT environment variable. Executing '${NOTIFICATION_SCRIPT}"
|
write_log notice "Found NOTIFICATION_CUSTOM_SCRIPT environment variable. Executing '${NOTIFICATION_CUSTOM_SCRIPT}"
|
||||||
# script timestamp logfile errorcode subject body
|
# script timestamp logfile errorcode subject body
|
||||||
eval "${NOTIFICATION_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}"
|
eval "${NOTIFICATION_CUSTOM_SCRIPT}" "${1}" "${2}" "${3}" "${4}" "${5}"
|
||||||
else
|
else
|
||||||
write_log error "Can't execute NOTIFICATION_SCRIPT environment variable '${NOTIFICATION_SCRIPT}' as its filesystem bit is not executible!"
|
write_log error "Can't execute NOTIFICATION_CUSTOM_SCRIPT environment variable '${NOTIFICATION_CUSTOM_SCRIPT}' as its filesystem bit is not executible!"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
print_error "[notifications] No NOTIFICATION_SCRIPT variable set - Skipping sending Custom notifications"
|
print_error "[notifications] No NOTIFICATION_CUSTOM_SCRIPT variable set - Skipping sending Custom notifications"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1320,18 +1388,20 @@ notify() {
|
|||||||
if [ -z "${SMTP_HOST}" ] ; then write_log error "[notifications] No SMTP_HOST variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
|
if [ -z "${SMTP_HOST}" ] ; then write_log error "[notifications] No SMTP_HOST variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
|
||||||
if [ -z "${SMTP_PORT}" ] ; then write_log error "[notifications] No SMTP_PORT variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
|
if [ -z "${SMTP_PORT}" ] ; then write_log error "[notifications] No SMTP_PORT variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
|
||||||
if var_nottrue "${skip_mail}" ; then
|
if var_nottrue "${skip_mail}" ; then
|
||||||
|
if ! grep -q ^from /etc/msmtprc ; then
|
||||||
|
echo "from ${MAIL_FROM}" >> /etc/msmtprc
|
||||||
|
fi
|
||||||
mail_recipients=$(echo "${MAIL_TO}" | tr "," "\n")
|
mail_recipients=$(echo "${MAIL_TO}" | tr "," "\n")
|
||||||
for mail_recipient in $mail_recipients ; do
|
for mail_recipient in $mail_recipients ; do
|
||||||
cat <<EOF | s-nail -v \
|
cat <<EOF | msmtp -t "${mail_recipient}" -C /etc/msmtprc
|
||||||
-r "${MAIL_FROM}" \
|
To: ${mail_recipient}
|
||||||
-s "[db-backup] [${DOMAIN}] ${3}" \
|
Subject: [db-backup] ${4}
|
||||||
-S smtp="${SMTP_HOST}":"${SMTP_PORT}" \
|
|
||||||
"${mail_recipient}"
|
|
||||||
Time: ${1}
|
Time: ${1}
|
||||||
Log File: {2}
|
Log File: {2}
|
||||||
Error Code: ${3}
|
Error Code: ${3}
|
||||||
|
|
||||||
${4}
|
${5}
|
||||||
EOF
|
EOF
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
@@ -1347,7 +1417,7 @@ EOF
|
|||||||
if [ -z "${MATTERMOST_WEBHOOK_URL}" ] ; then write_log error "[notifications] No MATTERMOST_WEBHOOK_URL variable set - Skipping sending Mattermost notifications" ; skip_mattermost=true ; fi
|
if [ -z "${MATTERMOST_WEBHOOK_URL}" ] ; then write_log error "[notifications] No MATTERMOST_WEBHOOK_URL variable set - Skipping sending Mattermost notifications" ; skip_mattermost=true ; fi
|
||||||
if var_nottrue "${skip_mattermost}" ; then
|
if var_nottrue "${skip_mattermost}" ; then
|
||||||
emoji=":bomb:"
|
emoji=":bomb:"
|
||||||
message="*[db-backup] ${3}*\n${4}\n*Timestamp:* ${1}\n*Logfile:* ${2}\n*Error Code: ${3}"
|
message="*[db-backup] ${4}*\n${5}\n*Timestamp:* ${1}\n*Logfile:* ${2}\n*Error Code: ${3}"
|
||||||
mattermost_recipients=$(echo "${MATTERMOST_RECIPIENT}" | tr "," "\n")
|
mattermost_recipients=$(echo "${MATTERMOST_RECIPIENT}" | tr "," "\n")
|
||||||
for mattermost_recipient in $mattermost_recipients ; do
|
for mattermost_recipient in $mattermost_recipients ; do
|
||||||
payload="payload={\"channel\": \"${mattermost_recipient//\"/\\\"}\", \"username\": \"${MATTERMOST_USERNAME//\"/\\\"}\", \"text\": \"${message//\"/\\\"}\", \"icon_emoji\": \"${emoji}\"}"
|
payload="payload={\"channel\": \"${mattermost_recipient//\"/\\\"}\", \"username\": \"${MATTERMOST_USERNAME//\"/\\\"}\", \"text\": \"${message//\"/\\\"}\", \"icon_emoji\": \"${emoji}\"}"
|
||||||
@@ -1372,7 +1442,7 @@ EOF
|
|||||||
for matrix_room in $matrix_rooms ; do
|
for matrix_room in $matrix_rooms ; do
|
||||||
curl \
|
curl \
|
||||||
-XPOST \
|
-XPOST \
|
||||||
-d "{\"msgtype\":\"m.text\", \"body\":\"*[db-backup] ${3}*\n${4}\n*Timestamp:* ${1}\n*Logfile:* ${2}\n*Error Code: ${3}*\"}" \
|
-d "{\"msgtype\":\"m.text\", \"body\":\"*[db-backup] ${4}*\n${5}\n*Timestamp:* ${1}\n*Logfile:* ${2}\n*Error Code: ${3}*\"}" \
|
||||||
"${MATRIX_HOST}/_matrix/client/r0/rooms/${matrix_room}/send/m.room.message?access_token=${MATRIX_ACCESS_TOKEN}"
|
"${MATRIX_HOST}/_matrix/client/r0/rooms/${matrix_room}/send/m.room.message?access_token=${MATRIX_ACCESS_TOKEN}"
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
@@ -1388,7 +1458,7 @@ EOF
|
|||||||
if [ -z "${ROCKETCHAT_WEBHOOK_URL}" ] ; then write_log error "[notifications] No ROCKETCHAT_WEBHOOK_URL variable set - Skipping sending Rocket.Chat notifications" ; skip_rocketchat=true ; fi
|
if [ -z "${ROCKETCHAT_WEBHOOK_URL}" ] ; then write_log error "[notifications] No ROCKETCHAT_WEBHOOK_URL variable set - Skipping sending Rocket.Chat notifications" ; skip_rocketchat=true ; fi
|
||||||
if var_nottrue "${skip_rocketchat}" ; then
|
if var_nottrue "${skip_rocketchat}" ; then
|
||||||
emoji=":bomb:"
|
emoji=":bomb:"
|
||||||
message="*[db-backup] ${3}*\n${4}\n*Timestamp:* ${1}\n*Logfile:* ${2}\n*Error Code: ${3}"
|
message="*[db-backup] ${4}*\n${5}\n*Timestamp:* ${1}\n*Logfile:* ${2}\n*Error Code: ${3}"
|
||||||
rocketchat_recipients=$(echo "${ROCKETCHAT_RECIPIENT}" | tr "," "\n")
|
rocketchat_recipients=$(echo "${ROCKETCHAT_RECIPIENT}" | tr "," "\n")
|
||||||
for rocketchat_recipient in $rocketchat_recipients ; do
|
for rocketchat_recipient in $rocketchat_recipients ; do
|
||||||
payload="payload={\"channel\": \"${rocketchat_recipient//\"/\\\"}\", \"username\": \"${ROCKETCHAT_USERNAME//\"/\\\"}\", \"text\": \"${message//\"/\\\"}\", \"icon_emoji\": \"${emoji}\"}"
|
payload="payload={\"channel\": \"${rocketchat_recipient//\"/\\\"}\", \"username\": \"${ROCKETCHAT_USERNAME//\"/\\\"}\", \"text\": \"${message//\"/\\\"}\", \"icon_emoji\": \"${emoji}\"}"
|
||||||
@@ -1407,7 +1477,7 @@ EOF
|
|||||||
# $4 body
|
# $4 body
|
||||||
|
|
||||||
if var_true "${ENABLE_NOTIFICATIONS}" ; then
|
if var_true "${ENABLE_NOTIFICATIONS}" ; then
|
||||||
notification_types=$(echo "${NOTIIFICATION_TYPE}" | tr "," "\n")
|
notification_types=$(echo "${NOTIFICATION_TYPE}" | tr "," "\n")
|
||||||
for notification_type in $notification_types ; do
|
for notification_type in $notification_types ; do
|
||||||
case "${notification_type,,}" in
|
case "${notification_type,,}" in
|
||||||
"custom" )
|
"custom" )
|
||||||
@@ -1442,8 +1512,8 @@ EOF
|
|||||||
move_dbbackup() {
|
move_dbbackup() {
|
||||||
if var_true "${DEBUG_MOVE_DBBACKUP}" ; then debug on; fi
|
if var_true "${DEBUG_MOVE_DBBACKUP}" ; then debug on; fi
|
||||||
if [ "${exit_code}" = "0" ] ; then
|
if [ "${exit_code}" = "0" ] ; then
|
||||||
dbbackup_size="$(run_as_user stat -c%s "${TEMP_PATH}"/"${backup_job_filename}")"
|
dbbackup_size="$(run_as_user stat -c%s "${temporary_directory}"/"${backup_job_filename}")"
|
||||||
dbbackup_date="$(run_as_user date -r "${TEMP_PATH}"/"${backup_job_filename}" +'%s')"
|
dbbackup_date="$(run_as_user date -r "${temporary_directory}"/"${backup_job_filename}" +'%s')"
|
||||||
|
|
||||||
case "${backup_job_size_value,,}" in
|
case "${backup_job_size_value,,}" in
|
||||||
"b" | "bytes" )
|
"b" | "bytes" )
|
||||||
@@ -1457,37 +1527,37 @@ move_dbbackup() {
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
if [ "${backup_job_size_value}" = "1" ] ; then
|
if [ "${backup_job_size_value}" = "1" ] ; then
|
||||||
filesize=$(run_as_user stat -c%s "${TEMP_PATH}"/"${backup_job_filename}")
|
filesize=$(run_as_user stat -c%s "${temporary_directory}"/"${backup_job_filename}")
|
||||||
write_log notice "Backup of '${backup_job_filename}' created with the size of ${filesize} bytes"
|
write_log notice "Backup of '${backup_job_filename}' created with the size of ${filesize} bytes"
|
||||||
else
|
else
|
||||||
filesize=$(run_as_user du -h "${TEMP_PATH}"/"${backup_job_filename}" | awk '{ print $1}')
|
filesize=$(run_as_user du -h "${temporary_directory}"/"${backup_job_filename}" | awk '{ print $1}')
|
||||||
write_log notice "Backup of '${backup_job_filename}' created with the size of ${filesize}"
|
write_log notice "Backup of '${backup_job_filename}' created with the size of ${filesize}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
chmod "${backup_job_filesystem_permission}" "${TEMP_PATH}"/"${backup_job_filename}"
|
chmod "${backup_job_filesystem_permission}" "${temporary_directory}"/"${backup_job_filename}"
|
||||||
case "${backup_job_backup_location,,}" in
|
case "${backup_job_backup_location,,}" in
|
||||||
"file" | "filesystem" )
|
"file" | "filesystem" )
|
||||||
write_log debug "Moving backup to filesystem"
|
write_log debug "Moving backup to filesystem"
|
||||||
run_as_user mkdir -p "${backup_job_filesystem_path}"
|
run_as_user mkdir -p "${backup_job_filesystem_path}"
|
||||||
if [ "${backup_job_checksum,,}" != "none" ] ; then run_as_user mv "${TEMP_PATH}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/ ; fi
|
if [ "${backup_job_checksum,,}" != "none" ] ; then run_as_user mv "${temporary_directory}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/ ; fi
|
||||||
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
|
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
## BEGIN Before Moving file from TEMP_PATH $(TZ=${TIMEZONE} date)
|
## BEGIN Before Moving file from temporary_directory $(TZ=${TIMEZONE} date)
|
||||||
##
|
##
|
||||||
|
|
||||||
$(ls -l "${TEMP_PATH}"/*)
|
$(ls -l "${temporary_directory}"/*)
|
||||||
|
|
||||||
## END
|
## END
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
run_as_user mv "${TEMP_PATH}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
|
run_as_user mv "${temporary_directory}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
|
||||||
move_exit_code=$?
|
move_exit_code=$?
|
||||||
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
|
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
## BEGIN After Moving file from TEMP_PATH $(TZ=${TIMEZONE} date)
|
## BEGIN After Moving file from temporary_directory $(TZ=${TIMEZONE} date)
|
||||||
##
|
##
|
||||||
|
|
||||||
$(ls -l "${TEMP_PATH}"/*)
|
$(ls -l "${temporary_directory}"/*)
|
||||||
|
|
||||||
## END
|
## END
|
||||||
|
|
||||||
@@ -1529,43 +1599,49 @@ EOF
|
|||||||
|
|
||||||
[[ ( -n "${backup_job_s3_host}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${backup_job_s3_protocol}://${backup_job_s3_host}"
|
[[ ( -n "${backup_job_s3_host}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${backup_job_s3_protocol}://${backup_job_s3_host}"
|
||||||
|
|
||||||
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_PATH}/${backup_job_filename} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/${backup_job_filename} ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
|
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${temporary_directory}/${backup_job_filename} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/${backup_job_filename} ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
|
||||||
move_exit_code=$?
|
move_exit_code=$?
|
||||||
if [ "${backup_job_checksum}" != "none" ] ; then
|
if [ "${backup_job_checksum}" != "none" ] ; then
|
||||||
silent run_as_user ${play_fair} aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_PATH}/*.${checksum_extension} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/ ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
|
silent run_as_user ${play_fair} aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${temporary_directory}/*.${checksum_extension} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/ ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
|
||||||
run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"."${checksum_extension}"
|
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"."${checksum_extension}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"
|
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"
|
||||||
;;
|
;;
|
||||||
"blobxfer" )
|
"blobxfer" )
|
||||||
|
if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then
|
||||||
|
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
|
||||||
|
else
|
||||||
write_log info "Synchronize local storage from S3 Bucket with blobxfer"
|
write_log info "Synchronize local storage from S3 Bucket with blobxfer"
|
||||||
${play_fair} blobxfer download --mode file --remote-path ${backup_job_blobxfer_remote_path} --local-path ${backup_job_filesystem_path} --delete
|
${play_fair} blobxfer download --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete
|
||||||
|
|
||||||
write_log info "Moving backup to external storage with blobxfer"
|
write_log info "Moving backup to external storage with blobxfer"
|
||||||
mkdir -p "${backup_job_filesystem_path}"
|
mkdir -p "${backup_job_filesystem_path}"
|
||||||
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user mv "${TEMP_PATH}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/; fi
|
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user mv "${temporary_directory}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/; fi
|
||||||
|
|
||||||
run_as_user mv "${TEMP_PATH}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
|
run_as_user mv "${temporary_directory}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
|
||||||
|
|
||||||
silent run_as_user ${play_fair} blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --local-path ${backup_job_filesystem_path}
|
silent run_as_user ${play_fair} blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path}
|
||||||
move_exit_code=$?
|
move_exit_code=$?
|
||||||
|
|
||||||
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"."${checksum_extension}" ; fi
|
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"."${checksum_extension}" ; fi
|
||||||
run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"
|
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
else
|
else
|
||||||
write_log error "Skipping moving DB Backup to final location because backup did not complete successfully"
|
write_log error "Skipping moving DB Backup to final location because backup did not complete successfully"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
run_as_user rm -rf "${TEMP_PATH}"/"${backup_job_filename}"
|
run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"
|
||||||
if var_true "${DEBUG_MOVE_DBBACKUP}" ; then debug off; fi
|
if var_true "${DEBUG_MOVE_DBBACKUP}" ; then debug off; fi
|
||||||
}
|
}
|
||||||
|
|
||||||
prepare_dbbackup() {
|
prepare_dbbackup() {
|
||||||
timer backup start
|
timer backup start
|
||||||
now=$(run_as_user date +"%Y%m%d-%H%M%S")
|
now=$(run_as_user date +"%Y%m%d-%H%M%S")
|
||||||
|
temporary_directory=$(mktemp -d -p "${TEMP_PATH}" -t ${backup_instance_number}_dbbackup.XXXXXX)
|
||||||
|
chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${temporary_directory}"
|
||||||
backup_job_filename_base=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}
|
backup_job_filename_base=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}
|
||||||
backup_job_filename=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.sql
|
backup_job_filename=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.sql
|
||||||
}
|
}
|
||||||
@@ -1588,20 +1664,20 @@ pre_dbbackup() {
|
|||||||
|
|
||||||
### Pre Backup Custom Script Support
|
### Pre Backup Custom Script Support
|
||||||
if [ -d "/assets/custom-scripts/pre" ] && dir_notempty "/assets/custom-scripts/pre" ; then
|
if [ -d "/assets/custom-scripts/pre" ] && dir_notempty "/assets/custom-scripts/pre" ; then
|
||||||
write_log warning "Found Custom Post Scripts in /assets/custom-scripts/pre - Automatically moving them to '${backup_job_script_location_pre}'"
|
write_log warn "Found Custom Post Scripts in /assets/custom-scripts/pre - Automatically moving them to '${backup_job_script_location_pre}'"
|
||||||
run_as_user mkdir -p "${backup_job_script_location_pre}"
|
mkdir -p "${backup_job_script_location_pre}"
|
||||||
silent run_as_user cp /assets/custom-scripts/pre/* "${backup_job_script_location_pre}"
|
silent cp -aR /assets/custom-scripts/pre/* "${backup_job_script_location_pre}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -d "${backup_job_script_location_pre}" ] && dir_notempty "${backup_job_script_location_pre}" ; then
|
if [ -d "${backup_job_script_location_pre}" ] && dir_notempty "${backup_job_script_location_pre}" ; then
|
||||||
for f in $(find ${backup_job_script_location_pre} -name \*.sh -type f); do
|
for f in $(find ${backup_job_script_location_pre} -name \*.sh -type f); do
|
||||||
if var_true "${backup_job_pre_script_x_verify}" ; then
|
if var_true "${backup_job_pre_script_x_verify}" ; then
|
||||||
run_as_user ${f} "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${backup_job_filename}"
|
${f} "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${backup_job_filename}"
|
||||||
else
|
else
|
||||||
if [ -x "${f}" ] ; then
|
if [ -x "${f}" ] ; then
|
||||||
write_log notice "Executing pre backup custom script : '${f}'"
|
write_log notice "Executing pre backup custom script : '${f}'"
|
||||||
## script DB_TYPE DB_HOST DB_NAME STARTEPOCH BACKUP_FILENAME
|
## script DB_TYPE DB_HOST DB_NAME STARTEPOCH BACKUP_FILENAME
|
||||||
run_as_user ${f} "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${backup_job_filename}"
|
${f} "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${backup_job_filename}"
|
||||||
else
|
else
|
||||||
write_log error "Can't run pre backup custom script: '${f}' as its filesystem bit is not executible!"
|
write_log error "Can't run pre backup custom script: '${f}' as its filesystem bit is not executible!"
|
||||||
fi
|
fi
|
||||||
@@ -1644,11 +1720,11 @@ EOZP
|
|||||||
### Post Script Support
|
### Post Script Support
|
||||||
if [ -n "${backup_job_post_script}" ] ; then
|
if [ -n "${backup_job_post_script}" ] ; then
|
||||||
if var_true "${backup_job_post_script_x_verify}" ; then
|
if var_true "${backup_job_post_script_x_verify}" ; then
|
||||||
run_as_user eval "${backup_job_post_script}" "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}"
|
eval "${backup_job_post_script}" "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}"
|
||||||
else
|
else
|
||||||
if [ -x "${backup_job_post_script}" ] ; then
|
if [ -x "${backup_job_post_script}" ] ; then
|
||||||
write_log notice "Found POST_SCRIPT environment variable. Executing '${backup_job_post_script}"
|
write_log notice "Found POST_SCRIPT environment variable. Executing '${backup_job_post_script}"
|
||||||
run_as_user eval "${backup_job_post_script}" "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}"
|
eval "${backup_job_post_script}" "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}"
|
||||||
else
|
else
|
||||||
write_log error "Can't execute POST_SCRIPT environment variable '${backup_job_post_script}' as its filesystem bit is not executible!"
|
write_log error "Can't execute POST_SCRIPT environment variable '${backup_job_post_script}' as its filesystem bit is not executible!"
|
||||||
fi
|
fi
|
||||||
@@ -1657,20 +1733,20 @@ EOZP
|
|||||||
|
|
||||||
### Post Backup Custom Script Support
|
### Post Backup Custom Script Support
|
||||||
if [ -d "/assets/custom-scripts/" ] && dir_notempty "/assets/custom-scripts" ; then
|
if [ -d "/assets/custom-scripts/" ] && dir_notempty "/assets/custom-scripts" ; then
|
||||||
write_log warning "Found Custom Post Scripts in /assets/custom-scripts/ - Automatically moving them to '${backup_job_script_location_post}'"
|
write_log warn "Found Custom Post Scripts in /assets/custom-scripts/ - Automatically moving them to '${backup_job_script_location_post}'"
|
||||||
run_as_user mkdir -p "${backup_job_script_location_post}"
|
mkdir -p "${backup_job_script_location_post}"
|
||||||
silent run_as_user cp /assets/custom-scripts/* "${backup_job_script_location_post}"
|
cp -aR /assets/custom-scripts/* "${backup_job_script_location_post}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -d "${backup_job_script_location_post}" ] && dir_notempty "${backup_job_script_location_post}" ; then
|
if [ -d "${backup_job_script_location_post}" ] && dir_notempty "${backup_job_script_location_post}" ; then
|
||||||
for f in $(run_as_user find "${backup_job_script_location_post}" -name \*.sh -type f); do
|
for f in $(run_as_user find "${backup_job_script_location_post}" -name \*.sh -type f); do
|
||||||
if var_true "${backup_job_post_script_x_verify}" ; then
|
if var_true "${backup_job_post_script_x_verify}" ; then
|
||||||
run_as_user ${f} "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}"
|
${f} "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}"
|
||||||
else
|
else
|
||||||
if [ -x "${f}" ] ; then
|
if [ -x "${f}" ] ; then
|
||||||
write_log notice "Executing post backup custom script : '${f}'"
|
write_log notice "Executing post backup custom script : '${f}'"
|
||||||
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
|
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
|
||||||
run_as_user ${f} "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}"
|
${f} "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}"
|
||||||
else
|
else
|
||||||
write_log error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
|
write_log error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
|
||||||
fi
|
fi
|
||||||
@@ -1680,6 +1756,8 @@ EOZP
|
|||||||
|
|
||||||
write_log notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
|
write_log notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
|
||||||
if var_true "${DEBUG_POST_DBBACKUP}" ; then debug on; fi
|
if var_true "${DEBUG_POST_DBBACKUP}" ; then debug on; fi
|
||||||
|
cd "${TEMP_PATH}"
|
||||||
|
rm -rf "${temporary_directory}"
|
||||||
}
|
}
|
||||||
|
|
||||||
process_limiter() {
|
process_limiter() {
|
||||||
@@ -1706,7 +1784,7 @@ process_limiter() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
run_as_user() {
|
run_as_user() {
|
||||||
sudo -u "${DBBACKUP_USER}" $@
|
sudo -Eu "${DBBACKUP_USER}" "$@"
|
||||||
}
|
}
|
||||||
|
|
||||||
setup_mode() {
|
setup_mode() {
|
||||||
@@ -1798,7 +1876,7 @@ timer() {
|
|||||||
if [ "${expression_step}" != "${expression}" ]; then
|
if [ "${expression_step}" != "${expression}" ]; then
|
||||||
for step in ${validate_temp}; do
|
for step in ${validate_temp}; do
|
||||||
if [ $(( ( step - expression_start ) % expression_step )) -eq 0 ]; then
|
if [ $(( ( step - expression_start ) % expression_step )) -eq 0 ]; then
|
||||||
validate_all="$validate_all ${step}"
|
validate_all="${validate_all} ${step}"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
else
|
else
|
||||||
@@ -1806,15 +1884,16 @@ timer() {
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
validate_all=$(echo "${validate_all}" | tr ' ' '\n' | sort -n -u | tr '\n' ' ')
|
validate_all=$(echo "${validate_all}" | tr ' ' '\n' | sort -g -u | tr '\n' ' ')
|
||||||
for entry in $validate_all; do
|
for entry in $validate_all; do
|
||||||
if [ "${entry}" -ge "${3}" ]; then
|
if [ ${entry} -ge ${3} ]; then
|
||||||
echo "${entry}"
|
echo "${entry}"
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "${validate_all%% *}"
|
echo "${validate_all// /}"
|
||||||
|
#echo "${validate_all%% *}"
|
||||||
}
|
}
|
||||||
|
|
||||||
local cron_compare="${3}"
|
local cron_compare="${3}"
|
||||||
@@ -1822,18 +1901,21 @@ timer() {
|
|||||||
local cron_compare_difference=$(( cron_compare - ${4} ))
|
local cron_compare_difference=$(( cron_compare - ${4} ))
|
||||||
|
|
||||||
if [ "${cron_compare_difference}" -lt 60 ]; then
|
if [ "${cron_compare_difference}" -lt 60 ]; then
|
||||||
cron_compare=$((${cron_compare} + $(( 60 - cron_compare_difference )) ))
|
cron_compare=$((cron_compare + $(( 60 - cron_compare_difference )) ))
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local cron_current_seconds="$(date --date=@"${cron_compare_seconds}" +"%-S")"
|
local cron_current_seconds="$(date --date=@"${cron_compare_seconds}" +"%-S")"
|
||||||
if [ "${cron_current_seconds}" -ne 0 ]; then
|
if [ "${cron_current_seconds}" -ne 0 ]; then
|
||||||
cron_compare_seconds=$(( cron_compare_seconds - cron_current_seconds ))
|
cron_compare=$(( cron_compare_seconds - cron_current_seconds ))
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local cron_minute="$(echo -n "${2}" | awk '{print $1}')"
|
local cron_minute="$(echo -n "${2}" | awk '{print $1}')"
|
||||||
local cron_hour="$(echo -n "${2}" | awk '{print $2}')"
|
local cron_hour="$(echo -n "${2}" | awk '{print $2}')"
|
||||||
local cron_day_of_month="$(echo -n "${2}" | awk '{print $3}')"
|
local cron_day_of_month="$(echo -n "${2}" | awk '{print $3}')"
|
||||||
local cron_month="$(echo -n "${2}" | awk '{print $4}')"Generating
|
local cron_month="$(echo -n "${2}" | awk '{print $4}')"
|
||||||
|
local cron_day_of_week="$(echo -n "${2}" | awk '{print $5}')"
|
||||||
|
|
||||||
|
local cron_next_minute="$(date --date=@"${cron_compare}" +"%-M")"
|
||||||
local cron_next_hour="$(date --date=@"${cron_compare}" +"%-H")"
|
local cron_next_hour="$(date --date=@"${cron_compare}" +"%-H")"
|
||||||
local cron_next_day_of_month="$(date --date=@"${cron_compare}" +"%-d")"
|
local cron_next_day_of_month="$(date --date=@"${cron_compare}" +"%-d")"
|
||||||
local cron_next_month="$(date --date=@"${cron_compare}" +"%-m")"
|
local cron_next_month="$(date --date=@"${cron_compare}" +"%-m")"
|
||||||
@@ -1842,8 +1924,10 @@ timer() {
|
|||||||
local cron_next_year="$(date --date=@"${cron_compare}" +"%-Y")"
|
local cron_next_year="$(date --date=@"${cron_compare}" +"%-Y")"
|
||||||
|
|
||||||
local cron_next=
|
local cron_next=
|
||||||
|
local cron_parsed=1
|
||||||
|
|
||||||
while [ "$cron_parsed" != "0" ]; do
|
while [ "${cron_parsed}" != "0" ]; do
|
||||||
|
print_debug "[timer] [cron] Parse Minute"
|
||||||
cron_next=$(parse_expression "${cron_minute}" 59 "${cron_next_minute}")
|
cron_next=$(parse_expression "${cron_minute}" 59 "${cron_next_minute}")
|
||||||
if [ "${cron_next}" != "${cron_next_minute}" ]; then
|
if [ "${cron_next}" != "${cron_next_minute}" ]; then
|
||||||
if [ "${cron_next_minute}" -gt "${cron_next}" ]; then
|
if [ "${cron_next_minute}" -gt "${cron_next}" ]; then
|
||||||
@@ -1853,20 +1937,23 @@ timer() {
|
|||||||
cron_next_minute="${cron_next}"
|
cron_next_minute="${cron_next}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
print_debug "[timer] [cron] Parse Hour"
|
||||||
cron_next=$(parse_expression "${cron_hour}" 23 "${cron_next_hour}")
|
cron_next=$(parse_expression "${cron_hour}" 23 "${cron_next_hour}")
|
||||||
if [ "${cron_next}" != "${cron_next_hour}" ]; then
|
if [ "${cron_next}" != "${cron_next_hour}" ]; then
|
||||||
if [ "${cron_next_hour}" -gt "${cron_next}" ]; then
|
if [ "${cron_next_hour}" -gt "${cron_next}" ]; then
|
||||||
cron_next_day_of_month=$(( cron_next_day_of_month + 1 ))
|
cron_next_day_of_month=$(( cron_next_day_of_month + 1 ))
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cron_next_hour="${cron_next}"
|
cron_next_hour="${cron_next}"
|
||||||
#cron_next_minute=0
|
cron_next_minute=0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
print_debug "[timer] [cron] Parse Day of Week"
|
||||||
cron_next=$(parse_expression "${cron_day_of_week}" 6 "${cron_next_day_of_week}")
|
cron_next=$(parse_expression "${cron_day_of_week}" 6 "${cron_next_day_of_week}")
|
||||||
if [ "${cron_next}" != "${cron_next_day_of_week}" ]; then
|
if [ "${cron_next}" != "${cron_next_day_of_week}" ]; then
|
||||||
day_of_week_difference=$(( ${cron_next} - ${cron_next_day_of_week} ))
|
day_of_week_difference=$(( cron_next - cron_next_day_of_week ))
|
||||||
|
|
||||||
if [ "${day_of_week_difference}" -lt "0" ]; then
|
if [ "${day_of_week_difference}" -lt 0 ]; then
|
||||||
day_of_week_difference=$(( day_of_week_difference + 7 ))
|
day_of_week_difference=$(( day_of_week_difference + 7 ))
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -1875,6 +1962,7 @@ timer() {
|
|||||||
cron_next_minute=0
|
cron_next_minute=0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
print_debug "[timer] [cron] Parse day of month"
|
||||||
case "${cron_next_month}" in
|
case "${cron_next_month}" in
|
||||||
1|3|5|7|8|10|12)
|
1|3|5|7|8|10|12)
|
||||||
last_day_of_month="31"
|
last_day_of_month="31"
|
||||||
@@ -1914,6 +2002,7 @@ timer() {
|
|||||||
cron_next_day_of_month=$cron_next
|
cron_next_day_of_month=$cron_next
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
print_debug "[timer] [cron] Parse Next Month"
|
||||||
cron_next=$(parse_expression "${cron_month}" 12 "${cron_next_month}")
|
cron_next=$(parse_expression "${cron_month}" 12 "${cron_next_month}")
|
||||||
if [ "${cron_next}" != "${cron_next_month}" ]; then
|
if [ "${cron_next}" != "${cron_next_month}" ]; then
|
||||||
if [ "${cron_next}" -gt "12" ]; then
|
if [ "${cron_next}" -gt "12" ]; then
|
||||||
@@ -1930,7 +2019,6 @@ timer() {
|
|||||||
fi
|
fi
|
||||||
cron_parsed=0
|
cron_parsed=0
|
||||||
done
|
done
|
||||||
|
|
||||||
local cron_future=$(date --date="${cron_next_year}-$(printf "%02d" ${cron_next_month})-$(printf "%02d" ${cron_next_day_of_month})T$(printf "%02d" ${cron_next_hour}):$(printf "%02d" ${cron_next_minute}):00" "+%s")
|
local cron_future=$(date --date="${cron_next_year}-$(printf "%02d" ${cron_next_month})-$(printf "%02d" ${cron_next_day_of_month})T$(printf "%02d" ${cron_next_hour}):$(printf "%02d" ${cron_next_minute}):00" "+%s")
|
||||||
local cron_future_difference=$(( cron_future - cron_compare_seconds ))
|
local cron_future_difference=$(( cron_future - cron_compare_seconds ))
|
||||||
time_cron=true
|
time_cron=true
|
||||||
|
|||||||
Reference in New Issue
Block a user