Compare commits

...

32 Commits
3.2.3 ... 3.4.0

Author SHA1 Message Date
dave@tiredofit.ca
dc4ab0bfc5 Release 3.4.0 - See CHANGELOG.md 2022-09-12 07:50:55 -07:00
Dave Conroy
9ea34f5a44 Add MongoDB Atlas Support 2022-09-12 07:49:29 -07:00
Dave Conroy
1d53785e7d Fix default port for Influx 2 DBs 2022-09-12 07:19:18 -07:00
Dave Conroy
4e0878b2ad Merge pull request #150 from teun95/teun95-add-rsync
Add --rsyncable for gzip compression
2022-09-12 07:17:41 -07:00
teun95
a98d33bfdb Correct table formatting in README.md 2022-09-08 11:04:11 +00:00
teun95
00c851eda2 Update README.md to include GZ_RSYNCABLE 2022-09-07 19:54:41 +01:00
teun95
cd88285036 Added rsyncable option for gzip using GZ_RSYNCABLE
GZ_RSYNCABLE=TRUE enables --rsyncable for gzip compression. Useful to speed up backups, reduce size of incremental backups, and allow for better deduplication.
2022-09-07 19:50:50 +01:00
dave@tiredofit.ca
428c313c7b Release 3.3.12 - See CHANGELOG.md 2022-08-15 12:19:55 -07:00
Dave Conroy
210acb1e2a Merge pull request #143 from arifer612/patch-1
Fix incorrect case for filesize variable
2022-08-15 12:19:12 -07:00
Arif Er
e50a8cb0ec fix: correct case for filesize variable
Post script support expects a value from a declared variable `$FILESIZE` to provide the size of the backup files. Such a variable does not exist, leading to a situation where using `"${9}"` in a custom script furnishes the checksum hash. However, earlier up in the script the file size of the backup is indeed assigned to a variable, only that is it completely in lower case: `$filesize`. This commit aims to fix that inconsistency.
2022-08-15 21:51:40 +08:00
Dave Conroy
7453852046 Release 3.3.11 - See CHANGELOG.md 2022-07-22 12:00:05 -07:00
Dave Conroy
f115a89a3c Merge pull request #141 from khoazero123/fix_postgres_restore
Fix postgres restore wrong db type
2022-07-22 11:58:57 -07:00
KhoaZero123
8b8d243944 Fix postgres restore wrong db type 2022-07-22 09:41:55 +07:00
dave@tiredofit.ca
be34ceb6ff Release 3.3.10 - See CHANGELOG.md 2022-07-19 12:16:29 -07:00
Dave Conroy
82d6ce444b Merge branch 'master' of https://github.com/tiredofit/docker-db-backup 2022-07-11 09:26:38 -07:00
Dave Conroy
382a188b77 Release 3.3.9 - See CHANGELOG.md 2022-07-11 09:26:35 -07:00
Dave Conroy
f458c34546 Merge pull request #140 from fdarveau/fix-read-port-number-ineractive-restore
Fix reading port number in interactive restore
2022-07-11 09:25:41 -07:00
François Darveau
229db5cd03 Fix reading port number in interactive restore 2022-07-10 16:44:05 -04:00
dave@tiredofit.ca
8bb926fd20 Release 3.3.8 - See CHANGELOG.md 2022-07-09 08:59:59 -07:00
dave@tiredofit.ca
f005956c47 Release 3.3.7 - See CHANGELOG.md 2022-06-23 11:49:28 -07:00
dave@tiredofit.ca
ba20386e65 Release 3.3.6 - See CHANGELOG.md 2022-06-23 08:18:08 -07:00
dave@tiredofit.ca
12211d3b67 Release 3.3.5 - See CHANGELOG.md 2022-06-08 09:01:44 -07:00
Dave Conroy
83693d35b2 Release 3.3.4 - See CHANGELOG.md 2022-06-03 05:10:53 -07:00
Dave Conroy
52b726c821 Merge pull request #132 from rozdzynski/master
Unary operator fix
2022-06-03 05:09:27 -07:00
rozdzynski
5c43b3c907 unary operator fix 2022-06-03 14:02:42 +02:00
dave@tiredofit.ca
005e7f6e47 Release 3.3.3 - See CHANGELOG.md 2022-05-24 08:26:45 -07:00
Dave Conroy
7d7cb9587d Release 3.3.2 - See CHANGELOG.md 2022-05-02 22:28:08 -07:00
Dave Conroy
d1713fe3f0 Release 3.3.1 - See CHANGELOG.md 2022-04-30 22:31:34 -07:00
Dave Conroy
d1e98d9c4b Release 3.3.0 - See CHANGELOG.md 2022-04-30 03:23:45 -07:00
Dave Conroy
0920b671cb Release 3.2.6 - See CHANGELOG.md 2022-04-25 10:29:45 -07:00
Dave Conroy
28ed6c3bb8 Release 3.2.5 - See CHANGELOG.md 2022-04-23 14:11:29 -07:00
Dave Conroy
c1bdf26598 Release 3.2.4 - See CHANGELOG.md 2022-04-21 16:04:43 -07:00
10 changed files with 332 additions and 117 deletions

1
.dockerignore Normal file
View File

@@ -0,0 +1 @@
examples/

7
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,7 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v3
- name: Prepare - name: Prepare
id: prep id: prep
@@ -63,17 +63,17 @@ jobs:
echo ::set-output name=docker_image::${DOCKER_IMAGE} echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v1 uses: docker/setup-qemu-action@v2
with: with:
platforms: all platforms: all
- name: Set up Docker Buildx - name: Set up Docker Buildx
id: buildx id: buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v2
- name: Login to DockerHub - name: Login to DockerHub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@v1 uses: docker/login-action@v2
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
@@ -102,7 +102,7 @@ jobs:
fi fi
- name: Build - name: Build
uses: docker/build-push-action@v2 uses: docker/build-push-action@v3
with: with:
builder: ${{ steps.buildx.outputs.name }} builder: ${{ steps.buildx.outputs.name }}
context: . context: .

View File

@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v3
- name: Prepare - name: Prepare
id: prep id: prep
@@ -63,17 +63,17 @@ jobs:
echo ::set-output name=docker_image::${DOCKER_IMAGE} echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v1 uses: docker/setup-qemu-action@v2
with: with:
platforms: all platforms: all
- name: Set up Docker Buildx - name: Set up Docker Buildx
id: buildx id: buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v2
- name: Login to DockerHub - name: Login to DockerHub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@v1 uses: docker/login-action@v2
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
@@ -102,7 +102,7 @@ jobs:
fi fi
- name: Build - name: Build
uses: docker/build-push-action@v2 uses: docker/build-push-action@v3
with: with:
builder: ${{ steps.buildx.outputs.name }} builder: ${{ steps.buildx.outputs.name }}
context: . context: .

View File

@@ -1,3 +1,107 @@
## 3.4.0 2022-09-12 <dave at tiredofit dot ca>
### Added
- Add GZ_RSYNCABLE environment variable for better rsync compatibility (Credit teun95@github)
- Add Pre Backup Script Support
- Add MongoDB Atlas Support
### Changed
- Fix Default Port for Influx 2 DB Hosts
## 3.3.12 2022-08-15 <dave at tiredofit dot ca>
### Changed
- arifer612@github contributed a fix for incorrect case of "filesize" variable when using post backup scripts
## 3.3.11 2022-07-22 <khoazero123@github>
### Fixed
- Restore script not properly detecting postgres backups
## 3.3.10 2022-07-19 <dave at tiredofit dot ca>
### Changed
- Remove MSSQL install packages properly
## 3.3.9 2022-07-09 <fardeau@github>
### Fixed
- Remaining work on interactive mode for entering port on restore script
## 3.3.8 2022-07-09 <dave at tiredofit dot ca>
### Added
- MSSQL Client Tools 18.0.1.1-1
## 3.3.7 2022-06-23 <dave at tiredofit dot ca>
### Changed
- Allow overrides to actually override with the restore script
## 3.3.6 2022-06-23 <dave at tiredofit dot ca>
### Changed
- Fix for restore script when using all 7 arguments
## 3.3.5 2022-06-08 <dave at tiredofit dot ca>
### Changed
- Fix DB Port parameter not being able to be input in restore script
- Fix MongoDB restore questionnaire
## 3.3.4 2022-06-03 <rozdzynski@github>
### Fixed
- S3 backups failing with special characters in filename
## 3.3.3 2022-05-24 <dave at tiredofit dot ca>
### Added
- Alpine 3.16 base
## 3.3.2 2022-05-02 <dave at tiredofit dot ca>
### Added
- Add POST_SCRIPT_SKIP_X_VERIFY environment variables to allow for more host compatibility for post scripts
## 3.3.1 2022-04-30 <dave at tiredofit dot ca>
### Changed
- Compressing silently was causing 0 byte backups
## 3.3.0 2022-04-30 <dave at tiredofit dot ca>
### Added
- Ability to auto clean old S3 / Minio Hosts like what occurs on filesysten
- Alert user how to turn off Zabbix Monitoring if fails
- Allow Zabbix Monitoring to work with S3
- Silence some more compression statements
### Changed
- Fix for Redis not backing up properly
- Start sending checksums for S3 Outputs
- Cleanup some code functions
- FIx Container Log Level always in DEBUG
## 3.2.4 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Add -portable flag when backing up Influx
## 3.2.3 2022-04-21 <dave at tiredofit dot ca> ## 3.2.3 2022-04-21 <dave at tiredofit dot ca>
### Changed ### Changed

View File

@@ -1,10 +1,10 @@
FROM docker.io/tiredofit/alpine:3.15 FROM docker.io/tiredofit/alpine:3.16
LABEL maintainer="Dave Conroy (github.com/tiredofit)" LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables ### Set Environment Variables
ENV INFLUX2_VERSION=2.2.1 \ ENV INFLUX2_VERSION=2.2.1 \
MSSQL_VERSION=17.8.1.1-1 \ MSSQL_VERSION=18.0.1.1-1 \
CONTAINER_ENABLE_MESSAGING=FALSE \ CONTAINER_ENABLE_MESSAGING=FALSE \
CONTAINER_ENABLE_MONITORING=TRUE \ CONTAINER_ENABLE_MONITORING=TRUE \
CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \ CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \
@@ -42,8 +42,6 @@ RUN set -ex && \
zstd \ zstd \
&& \ && \
\ \
cd /usr/src && \
\
apkArch="$(apk --print-arch)"; \ apkArch="$(apk --print-arch)"; \
case "$apkArch" in \ case "$apkArch" in \
x86_64) mssql=true ; influx2=true ; influx_arch=amd64; ;; \ x86_64) mssql=true ; influx2=true ; influx_arch=amd64; ;; \
@@ -51,7 +49,7 @@ RUN set -ex && \
*) sleep 0.1 ;; \ *) sleep 0.1 ;; \
esac; \ esac; \
\ \
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql17_${MSSQL_VERSION}_amd64.apk mssql-tools_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \ if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/msodbcsql18_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/mssql-tools18_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSSQL_VERSION}_amd64.apk mssql-tools18_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \ if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
\ \
mkdir -p /usr/src/pbzip2 && \ mkdir -p /usr/src/pbzip2 && \
@@ -73,6 +71,7 @@ RUN set -ex && \
### Cleanup ### Cleanup
apk del .db-backup-build-deps && \ apk del .db-backup-build-deps && \
rm -rf /usr/src/* && \ rm -rf /usr/src/* && \
rm -rf /*.apk && \
rm -rf /etc/logrotate.d/redis && \ rm -rf /etc/logrotate.d/redis && \
rm -rf /root/.cache /tmp/* /var/cache/apk/* rm -rf /root/.cache /tmp/* /var/cache/apk/*

View File

@@ -59,6 +59,8 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
- [Manual Backups](#manual-backups) - [Manual Backups](#manual-backups)
- [Restoring Databases](#restoring-databases) - [Restoring Databases](#restoring-databases)
- [Custom Scripts](#custom-scripts) - [Custom Scripts](#custom-scripts)
- [Pre Backup](#pre-backup)
- [Post backup](#post-backup)
- [Support](#support) - [Support](#support)
- [Usage](#usage) - [Usage](#usage)
- [Bugfixes](#bugfixes) - [Bugfixes](#bugfixes)
@@ -105,8 +107,9 @@ Images are built primarily for `amd64` architecture, and may also include builds
The following directories are used for configuration and can be mapped for persistent storage. The following directories are used for configuration and can be mapped for persistent storage.
| Directory | Description | | Directory | Description |
| ------------------------ | ---------------------------------------------------------------------------------- | | ---------------------------- | ----------------------------------------------------------------------------------- |
| `/backup` | Backups | | `/backup` | Backups |
| `/assets/custom-scripts/pre` | *Optional* Put custom scripts in this directory to execute before backup operations |
| `/assets/custom-scripts` | *Optional* Put custom scripts in this directory to execute after backup operations | | `/assets/custom-scripts` | *Optional* Put custom scripts in this directory to execute after backup operations |
### Environment Variables ### Environment Variables
@@ -130,12 +133,13 @@ Be sure to view the following repositories to understand all the customizable op
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` | | `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` | | `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` | | `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` |
| `PRE_SCRIPT` | Fill this variable in with a command to execute post the script backing up | |
| `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | | | `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | |
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` | `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` |
### Database Specific Options ### Database Specific Options
| Parameter | Description | Default | | Parameter | Description | Default |
| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- | | ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | --------- |
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | | `DB_AUTH` | (Mongo Only - Optional) Authentication Database | |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | | `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | | `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | |
@@ -144,7 +148,9 @@ Be sure to view the following repositories to understand all the customizable op
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | | `DB_USER` | username for the database(s) - Can use `root` for MySQL | |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | | | `DB_PASS` | (optional if DB doesn't require it) password for the database | |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | | `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies |
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2.x` series - AMD64 and ARM64 only for `2` | | | `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | |
| `MONGO_HOST_TYPE` | Connect to regular `mongodb` or `atlas` | `mongodb` |
| | You can also skip this and override the uri prefix with `MONGO_URI_PREFIX=mongodb+srv://` or whatever you would like | |
#### For Influx DB2: #### For Influx DB2:
Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name` Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name`
@@ -166,6 +172,7 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | | `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` |
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | | `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |
| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` | | `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` |
| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` |
| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` | | `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` |
| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` | | `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | | `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | |
@@ -230,7 +237,33 @@ If you only enter some of the arguments you will be prompted to fill them in.
### Custom Scripts ### Custom Scripts
If you want to execute a custom script at the end of backup, you can drop bash scripts with the extension of `.sh` in this directory. See the following example to utilize: #### Pre Backup
If you want to execute a custom script before a backup starts, you can drop bash scripts with the extension of `.sh` in `/assets/custom/pre`. See the following example to utilize:
````bash
$ cat pre-script.sh
##!/bin/bash
# #### Example Pre Script
# #### $2=DB_TYPE (Type of Backup)
# #### $3=DB_HOST (Backup Host)
# #### #4=DB_NAME (Name of Database backed up
# #### $5=BACKUP START TIME (Seconds since Epoch)
# #### $8=BACKUP FILENAME (Filename)
echo "${2} Backup Starting on ${3} for ${4} on ${5} ending ${6} for a duration of ${7} seconds. Filename: ${8} Size: ${9} bytes MD5: ${10}"
````
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH BACKUP_FILENAME
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_time}" "${target}"
Outputs the following on the console:
`mysql Backup Starting on example-db for example on 1647370800. Filename: mysql_example_example-db_202200315-000000.sql.bz2
#### Post backup
If you want to execute a custom script at the end of backup, you can drop bash scripts with the extension of `.sh` in `/assets/custom`. See the following example to utilize:
````bash ````bash
$ cat post-script.sh $ cat post-script.sh
@@ -261,6 +294,9 @@ Outputs the following on the console:
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes` If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`
You must make your scripts executible otherwise there is an internal check that will skip trying to run it otherwise.
If for some reason your filesystem or host is not detecting it right, use the environment variable `POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
## Support ## Support
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community. These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.

View File

@@ -10,7 +10,10 @@ bootstrap_variables() {
;; ;;
influx* ) influx* )
dbtype=influx dbtype=influx
DB_PORT=${DB_PORT:-8088} case "${INFLUX_VERSION}" in
1) DB_PORT=${DB_PORT:-8088} ;;
2) DB_PORT=${DB_PORT:-8086} ;;
esac
file_env 'DB_USER' file_env 'DB_USER'
file_env 'DB_PASS' file_env 'DB_PASS'
sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'" sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'"
@@ -78,9 +81,10 @@ bootstrap_variables() {
} }
backup_couch() { backup_couch() {
pre_dbbackup prepare_dbbackup
target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt
compression compression
pre_dbbackup ${DB_NAME}
print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}" print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}"
curl -sSL -X GET ${DB_HOST}:${DB_PORT}/${DB_NAME}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > "${TEMP_LOCATION}"/"${target}" curl -sSL -X GET ${DB_HOST}:${DB_PORT}/${DB_NAME}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$? exit_code=$?
@@ -101,12 +105,13 @@ backup_influx() {
case "${INFLUX_VERSION,,}" in case "${INFLUX_VERSION,,}" in
1 ) 1 )
for db in ${db_names}; do for db in ${db_names}; do
pre_dbbackup prepare_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
target=influx_${db}_${DB_HOST#*//}_${now} target=influx_${db}_${DB_HOST#*//}_${now}
compression compression
pre_dbbackup $db
print_notice "Dumping Influx database: '${db}'" print_notice "Dumping Influx database: '${db}'"
influxd backup ${influx_compression} ${bucket} -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}" influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$? exit_code=$?
check_exit_code $target_dir check_exit_code $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}" print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
@@ -119,10 +124,11 @@ backup_influx() {
;; ;;
2 ) 2 )
for db in ${db_names}; do for db in ${db_names}; do
pre_dbbackup prepare_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
target=influx2_${db}_${DB_HOST#*//}_${now} target=influx2_${db}_${DB_HOST#*//}_${now}
compression compression
pre_dbbackup $db
print_notice "Dumping Influx2 database: '${db}'" print_notice "Dumping Influx2 database: '${db}'"
influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}" influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$? exit_code=$?
@@ -138,7 +144,7 @@ backup_influx() {
} }
backup_mongo() { backup_mongo() {
pre_dbbackup prepare_dbbackup
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive
else else
@@ -146,8 +152,14 @@ backup_mongo() {
mongo_compression="--gzip" mongo_compression="--gzip"
compression_string="and compressing with gzip" compression_string="and compressing with gzip"
fi fi
if [ "${MONGO_HOST_TYPE,,}" = "atlas" ] ; then
MONGO_URI_PREFIX=${MONGO_URI_PREFIX:-"mongodb+srv://"}
else
MONGO_URI_PREFIX=${MONGO_URI_PREFIX:-"mongodb://"}
fi
pre_dbbackup "${DB_NAME}"
print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}" print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS} mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --uri="${MONGO_URI_PREFIX}${DB_HOST}:${DB_PORT}" ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target
generate_checksum generate_checksum
@@ -156,9 +168,10 @@ backup_mongo() {
} }
backup_mssql() { backup_mssql() {
pre_dbbackup prepare_dbbackup
target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak
compression compression
pre_dbbackup "${DB_NAME}"
print_notice "Dumping MSSQL database: '${DB_NAME}'" print_notice "Dumping MSSQL database: '${DB_NAME}'"
/opt/mssql-tools/bin/sqlcmd -E -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} Q "BACKUP DATABASE \[${DB_NAME}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" /opt/mssql-tools/bin/sqlcmd -E -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} Q "BACKUP DATABASE \[${DB_NAME}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$? exit_code=$?
@@ -194,9 +207,10 @@ backup_mysql() {
if var_true "${SPLIT_DB}" ; then if var_true "${SPLIT_DB}" ; then
for db in ${db_names} ; do for db in ${db_names} ; do
pre_dbbackup prepare_dbbackup
target=mysql_${db}_${DB_HOST,,}_${now}.sql target=mysql_${db}_${DB_HOST,,}_${now}.sql
compression compression
pre_dbbackup $db
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}" print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}" mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$? exit_code=$?
@@ -207,9 +221,10 @@ backup_mysql() {
done done
else else
print_debug "Not splitting database dumps into their own files" print_debug "Not splitting database dumps into their own files"
pre_dbbackup prepare_dbbackup
target=mysql_all_${DB_HOST,,}_${now}.sql target=mysql_all_${DB_HOST,,}_${now}.sql
compression compression
pre_dbbackup all
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}" mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$? exit_code=$?
@@ -241,9 +256,10 @@ backup_pgsql() {
if var_true "${SPLIT_DB}" ; then if var_true "${SPLIT_DB}" ; then
for db in ${db_names} ; do for db in ${db_names} ; do
pre_dbbackup prepare_dbbackup
target=pgsql_${db}_${DB_HOST,,}_${now}.sql target=pgsql_${db}_${DB_HOST,,}_${now}.sql
compression compression
pre_dbbackup $db
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}" print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target} pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$? exit_code=$?
@@ -254,9 +270,10 @@ backup_pgsql() {
done done
else else
print_debug "Not splitting database dumps into their own files" print_debug "Not splitting database dumps into their own files"
pre_dbbackup prepare_dbbackup
target=pgsql_all_${DB_HOST,,}_${now}.sql target=pgsql_all_${DB_HOST,,}_${now}.sql
compression compression
pre_dbbackup all
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
tmp_db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ) tmp_db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
for r_db_name in $(echo $db_names | xargs); do for r_db_name in $(echo $db_names | xargs); do
@@ -276,10 +293,10 @@ backup_pgsql() {
} }
backup_redis() { backup_redis() {
pre_dbbackup prepare_dbbackup
print_notice "Dumping Redis - Flushing Redis Cache First" print_notice "Dumping Redis - Flushing Redis Cache First"
target=redis_all_${DB_HOST,,}_${now}.rdb target=redis_all_${DB_HOST,,}_${now}.rdb
echo bgsave | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS} echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
sleep 10 sleep 10
try=5 try=5
while [ $try -gt 0 ] ; do while [ $try -gt 0 ] ; do
@@ -287,6 +304,7 @@ backup_redis() {
ok=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}') ok=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
print_notice "Redis Backup Complete" print_notice "Redis Backup Complete"
exit_code=0
break break
fi fi
try=$((try - 1)) try=$((try - 1))
@@ -295,18 +313,21 @@ backup_redis() {
done done
target_original=${target} target_original=${target}
compression compression
pre_dbbackup all
$compress_cmd "${TEMP_LOCATION}/${target_original}" $compress_cmd "${TEMP_LOCATION}/${target_original}"
check_exit_code $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup all post_dbbackup all
} }
backup_sqlite3() { backup_sqlite3() {
pre_dbbackup prepare_dbbackup
db=$(basename "${DB_HOST}") db=$(basename "${DB_HOST}")
db="${db%.*}" db="${db%.*}"
target=sqlite3_${db}_${now}.sqlite3 target=sqlite3_${db}_${now}.sqlite3
compression compression
pre_dbbackup $db
print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}" print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}"
sqlite3 "${DB_HOST}" ".backup '${TEMP_LOCATION}/backup.sqlite3'" sqlite3 "${DB_HOST}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
exit_code=$? exit_code=$?
@@ -430,11 +451,31 @@ check_exit_code() {
cleanup_old_data() { cleanup_old_data() {
if [ -n "${DB_CLEANUP_TIME}" ]; then if [ -n "${DB_CLEANUP_TIME}" ]; then
if [ "${master_exit_code}" != 1 ]; then if [ "${master_exit_code}" != 1 ]; then
case "${BACKUP_LOCATION,,}" in
"file" | "filesystem" )
print_info "Cleaning up old backups" print_info "Cleaning up old backups"
mkdir -p "${DB_DUMP_TARGET}" mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \; find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
;;
"s3" | "minio" )
print_info "Cleaning up old backups"
aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | while read -r s3_file; do
s3_createdate=$(echo $s3_file | awk {'print $1" "$2'})
s3_createdate=$(date -d "$s3_createdate" "+%s")
s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 )))
if [[ $s3_createdate -le $s3_olderthan ]] ; then
s3_filename=$(echo $s3_file | awk {'print $4'})
if [ "$s3_filename" != "" ] ; then
print_debug "Deleting $s3_filename"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
fi
done
;;
esac
else else
print_info "Skipping Cleaning up old backups because there were errors in backing up" print_error "Skipping Cleaning up old backups because there were errors in backing up"
fi fi
fi fi
} }
@@ -446,7 +487,10 @@ compression() {
case "${COMPRESSION,,}" in case "${COMPRESSION,,}" in
gz* ) gz* )
compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} " if var_true "${GZ_RSYNCABLE}" ; then
gz_rsyncable=--rsyncable
fi
compress_cmd="pigz -q -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} ${gz_rsyncable}"
compression_type="gzip" compression_type="gzip"
extension=".gz" extension=".gz"
dir_compress_cmd=${compress_cmd} dir_compress_cmd=${compress_cmd}
@@ -454,7 +498,7 @@ compression() {
target=${target}.gz target=${target}.gz
;; ;;
bz* ) bz* )
compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} " compress_cmd="pbzip2 -q -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
compression_type="bzip2" compression_type="bzip2"
dir_compress_cmd=${compress_cmd} dir_compress_cmd=${compress_cmd}
extension=".bz2" extension=".bz2"
@@ -470,7 +514,7 @@ compression() {
target=${target}.xz target=${target}.xz
;; ;;
zst* ) zst* )
compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} " compress_cmd="zstd -q -q --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} "
compression_type="zstd" compression_type="zstd"
dir_compress_cmd=${compress_cmd} dir_compress_cmd=${compress_cmd}
extension=".zst" extension=".zst"
@@ -506,12 +550,12 @@ create_archive() {
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}" print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}" tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
else else
print_warn "Skipping creating archive file because backup did not complete successfully" print_error "Skipping creating archive file because backup did not complete successfully"
fi fi
} }
generate_checksum() { generate_checksum() {
if var_true "${ENABLE_CHECKSUM}" ;then if var_true "${ENABLE_CHECKSUM}" ; then
if [ "${exit_code}" = "0" ] ; then if [ "${exit_code}" = "0" ] ; then
case "${CHECKSUM,,}" in case "${CHECKSUM,,}" in
"md5" ) "md5" )
@@ -530,13 +574,16 @@ generate_checksum() {
checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}') checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}')
print_debug "${checksum_extension^^}: ${checksum_value} - ${target}" print_debug "${checksum_extension^^}: ${checksum_value} - ${target}"
else else
print_warn "Skipping Checksum creation because backup did not complete successfully" print_error "Skipping Checksum creation because backup did not complete successfully"
fi fi
fi fi
} }
move_dbbackup() { move_dbbackup() {
if [ "${exit_code}" = "0" ] ; then if [ "${exit_code}" = "0" ] ; then
dbbackup_size="$(stat -c%s "${TEMP_LOCATION}"/"${target}")"
dbbackup_date="$(date -r "${TEMP_LOCATION}"/"${target}" +'%s')"
case "${SIZE_VALUE,,}" in case "${SIZE_VALUE,,}" in
"b" | "bytes" ) "b" | "bytes" )
SIZE_VALUE=1 SIZE_VALUE=1
@@ -579,21 +626,23 @@ move_dbbackup() {
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}" [[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
unset s3_ssl if var_true "${ENABLE_CHECKSUM}" ; then
unset s3_ca_cert silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}" rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
rm -rf "${TEMP_LOCATION}"/"${target}" rm -rf "${TEMP_LOCATION}"/"${target}"
;; ;;
esac esac
else else
print_warn "Skipping moving DB Backup to final location because backup did not complete successfully" print_error "Skipping moving DB Backup to final location because backup did not complete successfully"
fi fi
rm -rf "${TEMP_LOCATION}"/* rm -rf "${TEMP_LOCATION}"/*
} }
pre_dbbackup() { prepare_dbbackup() {
dbbackup_start_time=$(date +"%s") dbbackup_start_time=$(date +"%s")
now=$(date +"%Y%m%d-%H%M%S") now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S") now_time=$(date +"%H:%M:%S")
@@ -601,42 +650,86 @@ pre_dbbackup() {
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql
} }
pre_dbbackup() {
### Pre Script Support
if [ -n "${PRE_SCRIPT}" ] ; then
if var_true "${PRE_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${PRE_SCRIPT}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${target}"
else
if [ -x "${PRE_SCRIPT}" ] ; then
print_notice "Found PRE_SCRIPT environment variable. Executing '${PRE_SCRIPT}"
eval "${PRE_SCRIPT}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${target}"
else
print_error "Can't execute PRE_SCRIPT environment variable '${PRE_SCRIPT}' as its filesystem bit is not executible!"
fi
fi
fi
### Pre Backup Custom Script Support
if [ -d "/assets/custom-scripts/pre/" ] ; then
for f in $(find /assets/custom-scripts/pre/ -name \*.sh -type f); do
if var_true "${PRE_SCRIPT_SKIP_X_VERIFY}" ; then
${f} "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${target}"
else
if [ -x "${f}" ] ; then
print_notice "Executing pre backup custom script : '${f}'"
## script DB_TYPE DB_HOST DB_NAME STARTEPOCH BACKUP_FILENAME
${f} "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${target}"
else
print_error "Can't run pre backup custom script: '${f}' as its filesystem bit is not executible!"
fi
fi
done
fi
}
post_dbbackup() { post_dbbackup() {
dbbackup_finish_time=$(date +"%s") dbbackup_finish_time=$(date +"%s")
dbbackup_total_time=$(echo $((dbbackup_finish_time-dbbackup_start_time))) dbbackup_total_time=$(echo $((dbbackup_finish_time-dbbackup_start_time)))
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
print_notice "Sending Backup Statistics to Zabbix" print_notice "Sending Backup Statistics to Zabbix"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}")" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "${dbbackup_size}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "${dbbackup_date}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))"
if [ "$?" != "0" ] ; then print_error "Error sending statistics, consider disabling with 'CONTAINER_ENABLE_MONITORING=FALSE'" ; fi
fi fi
### Post Script Support ### Post Script Support
if [ -n "${POST_SCRIPT}" ] ; then if [ -n "${POST_SCRIPT}" ] ; then
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}"
else
if [ -x "${POST_SCRIPT}" ] ; then if [ -x "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}" print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}"
else else
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!" print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
fi fi
fi fi
fi
### Post Backup Custom Script Support ### Post Backup Custom Script Support
if [ -d "/assets/custom-scripts/" ] ; then if [ -d "/assets/custom-scripts/" ] ; then
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}"
else
if [ -x "${f}" ] ; then if [ -x "${f}" ] ; then
print_notice "Executing post backup custom script : '${f}'" print_notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE ## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" ${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}"
else else
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!" print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
fi fi
fi
done done
fi fi
print_notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')" print_notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
unset s3_ssl
unset s3_ca_cert
} }
sanity_test() { sanity_test() {

View File

@@ -4,7 +4,6 @@ source /assets/functions/00-container
source /assets/functions/10-db-backup source /assets/functions/10-db-backup
source /assets/defaults/10-db-backup source /assets/defaults/10-db-backup
PROCESS_NAME="db-backup" PROCESS_NAME="db-backup"
CONTAINER_LOG_LEVEL=DEBUG
bootstrap_variables bootstrap_variables

View File

@@ -55,7 +55,7 @@ The image will also allow you to use environment variables or Docker secrets use
The script can also be executed skipping the interactive mode by using the following syntax/ The script can also be executed skipping the interactive mode by using the following syntax/
$(basename $0) <filename> <db_type> <db_hostname> <db_name> <db_user> <db_pass> <db_port> $(basename "$0") <filename> <db_type> <db_hostname> <db_name> <db_user> <db_pass> <db_port>
If you only enter some of the arguments you will be prompted to fill them in. If you only enter some of the arguments you will be prompted to fill them in.
@@ -78,7 +78,7 @@ fi
get_filename() { get_filename() {
COLUMNS=12 COLUMNS=12
prompt="Please select a file to restore:" prompt="Please select a file to restore:"
options=( $(find ${DB_DUMP_TARGET} -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | xargs -0) ) options=( $(find "${DB_DUMP_TARGET}" -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | xargs -0) )
PS3="$prompt " PS3="$prompt "
select opt in "${options[@]}" "Custom" "Quit" ; do select opt in "${options[@]}" "Custom" "Quit" ; do
if (( REPLY == 2 + ${#options[@]} )) ; then if (( REPLY == 2 + ${#options[@]} )) ; then
@@ -271,7 +271,7 @@ get_dbtype() {
parsed_type=true parsed_type=true
print_debug "Parsed DBType: MariaDB/MySQL" print_debug "Parsed DBType: MariaDB/MySQL"
;; ;;
psql | postgres* ) pgsql | postgres* )
parsed_type=true parsed_type=true
print_debug "Parsed DBType: Postgresql" print_debug "Parsed DBType: Postgresql"
;; ;;
@@ -641,7 +641,7 @@ EOF
2 ) 2 )
while true; do while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbport_menu read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbport_menu
case "${q_dbname_menu,,}" in case "${q_dbport_menu,,}" in
c* ) c* )
counter=1 counter=1
q_dbport=" " q_dbport=" "
@@ -829,11 +829,7 @@ print_debug "Filename to recover '${r_filename}'"
## Question Database Type ## Question Database Type
if [ -n "${2}" ]; then if [ -n "${2}" ]; then
if [ ! -f "${2}" ]; then
get_dbtype
else
r_dbtype="${2}" r_dbtype="${2}"
fi
else else
get_dbtype get_dbtype
fi fi
@@ -841,59 +837,39 @@ print_debug "Database type '${r_dbtype}'"
## Question Database Host ## Question Database Host
if [ -n "${3}" ]; then if [ -n "${3}" ]; then
if [ ! -f "${3}" ]; then
get_dbhost
else
r_dbhost="${3}" r_dbhost="${3}"
fi
else else
get_dbhost get_dbhost
fi fi
print_debug "Database Host '${r_dbhost}'" print_debug "Database Host '${r_dbhost}'"
## Question Database Name ## Question Database Name
if [ -n "${3}" ]; then if [ -n "${4}" ]; then
if [ ! -f "${3}" ]; then r_dbname="${4}"
get_dbname
else
r_dbname="${3}"
fi
else else
get_dbname get_dbname
fi fi
print_debug "Database Name '${r_dbname}'" print_debug "Database Name '${r_dbname}'"
## Question Database User ## Question Database User
if [ -n "${4}" ]; then if [ -n "${5}" ]; then
if [ ! -f "${4}" ]; then r_dbuser="${5}"
get_dbuser
else
r_dbuser="${4}"
fi
else else
get_dbuser get_dbuser
fi fi
print_debug "Database User '${r_dbuser}'" print_debug "Database User '${r_dbuser}'"
## Question Database Password ## Question Database Password
if [ -n "${5}" ]; then if [ -n "${6}" ]; then
if [ ! -f "${5}" ]; then r_dbpass="${6}"
get_dbpass
else
r_dbpass="${5}"
fi
else else
get_dbpass get_dbpass
fi fi
print_debug "Database Pass '${r_dbpass}'" print_debug "Database Pass '${r_dbpass}'"
## Question Database Port ## Question Database Port
if [ -n "${6}" ]; then if [ -n "${7}" ]; then
if [ ! -f "${6}" ]; then r_dbport="${7}"
get_dbport
else
r_dbport="${6}"
fi
else else
get_dbport get_dbport
fi fi
@@ -932,7 +908,7 @@ case "${r_dbtype}" in
pv ${r_filename} | ${decompress_cmd}cat | mysql -u${r_dbuser} -p${r_dbpass} -P${r_dbport} -h${r_dbhost} ${r_dbname} pv ${r_filename} | ${decompress_cmd}cat | mysql -u${r_dbuser} -p${r_dbpass} -P${r_dbport} -h${r_dbhost} ${r_dbname}
exit_code=$? exit_code=$?
;; ;;
psql | postgres* ) pgsql | postgres* )
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'" print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
export PGPASSWORD=${r_dbpass} export PGPASSWORD=${r_dbpass}
pv ${r_filename} | ${decompress_cmd}cat | psql -d ${r_dbname} -h ${r_dbhost} -p ${r_dbport} -U ${r_dbuser} pv ${r_filename} | ${decompress_cmd}cat | psql -d ${r_dbname} -h ${r_dbhost} -p ${r_dbport} -U ${r_dbuser}
@@ -944,12 +920,12 @@ case "${r_dbtype}" in
mongo_compression="--gzip" mongo_compression="--gzip"
fi fi
if [ -n "${r_dbuser}" ] ; then if [ -n "${r_dbuser}" ] ; then
mongo_user="-u ${r_dbuser}" mongo_user="-u=${r_dbuser}"
fi fi
if [ -n "${r_dbpass}" ] ; then if [ -n "${r_dbpass}" ] ; then
mongo_pass="-u ${r_dbpass}" mongo_pass="-p=${r_dbpass}"
fi fi
mongorestore ${mongo_compression} -d ${r_dbname} -h ${r_dbhost} --port ${r_dbport} ${mongo_user} ${mongo_pass} --archive=${r_filename} mongorestore ${mongo_compression} -d=${r_dbname} -h=${r_dbhost} --port=${r_dbport} ${mongo_user} ${mongo_pass} --archive=${r_filename}
exit_code=$? exit_code=$?
;; ;;
* ) * )