mirror of
https://github.com/tiredofit/docker-db-backup.git
synced 2025-12-21 21:33:28 +01:00
Compare commits
55 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
28c7058f37 | ||
|
|
6f15c77a0f | ||
|
|
4e04e31d84 | ||
|
|
f3fad4a893 | ||
|
|
69f0ca762c | ||
|
|
07d72163a0 | ||
|
|
b31da0b785 | ||
|
|
be490b3f4b | ||
|
|
028966d0b2 | ||
|
|
19d8e98744 | ||
|
|
25def5b6f0 | ||
|
|
03b7ef9d0d | ||
|
|
231dd63a38 | ||
|
|
666eb81846 | ||
|
|
4572ab6fca | ||
|
|
9ba51bcec9 | ||
|
|
b9edbf68d3 | ||
|
|
2022158a4e | ||
|
|
2b441f11e1 | ||
|
|
532a6b456b | ||
|
|
a8d9a0185f | ||
|
|
8b41f5efcf | ||
|
|
956904046d | ||
|
|
4d7f5e9459 | ||
|
|
d9723823c9 | ||
|
|
0067f552f1 | ||
|
|
4a8f85ddf5 | ||
|
|
6de0cc7c03 | ||
|
|
2d017e26c5 | ||
|
|
1efb2d43a8 | ||
|
|
789aa96113 | ||
|
|
9d8cfd69cb | ||
|
|
c4dbf53ced | ||
|
|
8706d3a91c | ||
|
|
73ad356ebf | ||
|
|
73c4003dc4 | ||
|
|
a377f570f1 | ||
|
|
b956bd817f | ||
|
|
7bda69b062 | ||
|
|
bc23b6a65e | ||
|
|
8fb3d8315f | ||
|
|
c16133fdd0 | ||
|
|
6967fd5e56 | ||
|
|
75acaefb64 | ||
|
|
6933b0f87c | ||
|
|
dc4ab0bfc5 | ||
|
|
9ea34f5a44 | ||
|
|
1d53785e7d | ||
|
|
4e0878b2ad | ||
|
|
a98d33bfdb | ||
|
|
00c851eda2 | ||
|
|
cd88285036 | ||
|
|
428c313c7b | ||
|
|
210acb1e2a | ||
|
|
e50a8cb0ec |
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
# Declare files that will always have LF line endings on checkout.
|
||||
*.* text eol=lf
|
||||
66
CHANGELOG.md
66
CHANGELOG.md
@@ -1,3 +1,69 @@
|
||||
## 3.5.2 2022-10-11 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Update Influxdb client to 2.4.0 (jauderho@github)
|
||||
|
||||
|
||||
## 3.5.1 2022-10-11 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Tighten up cleanup routines to not call blobxfer unless absolutely necessary
|
||||
|
||||
|
||||
## 3.5.0 2022-10-10 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Blobxfer / Microsoft Azure Support (credit: eoehen@github)
|
||||
- Introduce MONGO_CUSTOM_URI environment variable for those not wanting to use DB_* variables
|
||||
|
||||
### Changed
|
||||
- Force filenames to be in lowercase
|
||||
- Fix S3 Database cleanups (credit greenatwork@github)
|
||||
- Remove MONGO_DB_TYPE variable as MONGO_CUSTOM_URI overrides
|
||||
- Fix MSSQL Backups (credit eoehen@github)
|
||||
- Seperate examples for MySQL and MSSQL
|
||||
|
||||
|
||||
## 3.4.2 2022-09-19 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Skip availability check for Mongo Atlas connections
|
||||
|
||||
|
||||
## 3.4.1 2022-09-13 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Introduce environment variables for SCRIPT_LOCATION_POST and SCRIPT_LOCATION_PRE for better seperation
|
||||
|
||||
### Deprecated
|
||||
|
||||
- Introduce deprecation warning for the custom script paths `/assets/custom-scripts` and `/assets/custom-scripts/pre`. These
|
||||
paths will continue to work for now but support may be removed in the next major version release. To support the new
|
||||
default paths your scripts should be moved as follows:
|
||||
|
||||
|Script Type|Old Path (Deprecated)|New Environment Variable|Environment Value Default|
|
||||
|-----------|--------|-------------------------|----------------|
|
||||
|Pre|`/assets/custom-scripts/pre`|SCRIPT_LOCATION_PRE|`/assets/scripts/pre`|
|
||||
|Post|`/assets/custom-scripts`|SCRIPT_LOCATION_POST|`/assets/scripts/post`|
|
||||
|
||||
|
||||
## 3.4.0 2022-09-12 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Add GZ_RSYNCABLE environment variable for better rsync compatibility (Credit teun95@github)
|
||||
- Add Pre Backup Script Support
|
||||
- Add MongoDB Atlas Support
|
||||
|
||||
### Changed
|
||||
- Fix Default Port for Influx 2 DB Hosts
|
||||
|
||||
|
||||
## 3.3.12 2022-08-15 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- arifer612@github contributed a fix for incorrect case of "filesize" variable when using post backup scripts
|
||||
|
||||
|
||||
## 3.3.11 2022-07-22 <khoazero123@github>
|
||||
|
||||
### Fixed
|
||||
|
||||
@@ -3,7 +3,7 @@ LABEL maintainer="Dave Conroy (github.com/tiredofit)"
|
||||
|
||||
### Set Environment Variables
|
||||
|
||||
ENV INFLUX2_VERSION=2.2.1 \
|
||||
ENV INFLUX2_VERSION=2.4.0 \
|
||||
MSSQL_VERSION=18.0.1.1-1 \
|
||||
CONTAINER_ENABLE_MESSAGING=FALSE \
|
||||
CONTAINER_ENABLE_MONITORING=TRUE \
|
||||
@@ -36,6 +36,7 @@ RUN set -ex && \
|
||||
postgresql \
|
||||
postgresql-client \
|
||||
pv \
|
||||
py3-cryptography \
|
||||
redis \
|
||||
sqlite \
|
||||
xz \
|
||||
@@ -68,6 +69,9 @@ RUN set -ex && \
|
||||
make && \
|
||||
make install && \
|
||||
\
|
||||
apk add gcc build-base libressl-dev libffi-dev python3-dev py3-pip && \
|
||||
pip3 install blobxfer && \
|
||||
\
|
||||
### Cleanup
|
||||
apk del .db-backup-build-deps && \
|
||||
rm -rf /usr/src/* && \
|
||||
|
||||
122
README.md
122
README.md
@@ -14,7 +14,7 @@ This will build a container for backing up multiple types of DB Servers
|
||||
|
||||
Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
|
||||
|
||||
* dump to local filesystem or backup to S3 Compatible services
|
||||
* dump to local filesystem or backup to S3 Compatible services, and Azure.
|
||||
* select database user and password
|
||||
* backup all databases, single, or multiple databases
|
||||
* backup all to seperate files or one singular file
|
||||
@@ -37,7 +37,6 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
|
||||
- [About](#about)
|
||||
- [Maintainer](#maintainer)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Persistent Storage](#persistent-storage)
|
||||
- [Prerequisites and Assumptions](#prerequisites-and-assumptions)
|
||||
- [Installation](#installation)
|
||||
- [Build from Source](#build-from-source)
|
||||
@@ -45,7 +44,7 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
|
||||
- [Multi Architecture](#multi-architecture)
|
||||
- [Configuration](#configuration)
|
||||
- [Quick Start](#quick-start)
|
||||
- [Persistent Storage](#persistent-storage-1)
|
||||
- [Persistent Storage](#persistent-storage)
|
||||
- [Environment Variables](#environment-variables)
|
||||
- [Base Images used](#base-images-used)
|
||||
- [Container Options](#container-options)
|
||||
@@ -59,6 +58,9 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
|
||||
- [Manual Backups](#manual-backups)
|
||||
- [Restoring Databases](#restoring-databases)
|
||||
- [Custom Scripts](#custom-scripts)
|
||||
- [Path Options](#path-options)
|
||||
- [Pre Backup](#pre-backup)
|
||||
- [Post backup](#post-backup)
|
||||
- [Support](#support)
|
||||
- [Usage](#usage)
|
||||
- [Bugfixes](#bugfixes)
|
||||
@@ -67,7 +69,6 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
|
||||
- [License](#license)
|
||||
|
||||
> **NOTE**: If you are using this with a docker-compose file along with a seperate SQL container, take care not to set the variables to backup immediately, more so have it delay execution for a minute, otherwise you will get a failed first backup.
|
||||
### Persistent Storage
|
||||
|
||||
## Prerequisites and Assumptions
|
||||
* You must have a working connection to one of the supported DB Servers and appropriate credentials
|
||||
@@ -101,13 +102,15 @@ Images are built primarily for `amd64` architecture, and may also include builds
|
||||
* Set various [environment variables](#environment-variables) to understand the capabilities of this image.
|
||||
* Map [persistent storage](#data-volumes) for access to configuration and data files for backup.
|
||||
* Make [networking ports](#networking) available for public access if necessary
|
||||
|
||||
### Persistent Storage
|
||||
|
||||
The following directories are used for configuration and can be mapped for persistent storage.
|
||||
| Directory | Description |
|
||||
| ------------------------ | ---------------------------------------------------------------------------------- |
|
||||
| `/backup` | Backups |
|
||||
| `/assets/custom-scripts` | *Optional* Put custom scripts in this directory to execute after backup operations |
|
||||
| Directory | Description |
|
||||
| ---------------------- | ----------------------------------------------------------------------------------- |
|
||||
| `/backup` | Backups |
|
||||
| `/assets/scripts/pre` | *Optional* Put custom scripts in this directory to execute before backup operations |
|
||||
| `/assets/scripts/post` | *Optional* Put custom scripts in this directory to execute after backup operations |
|
||||
|
||||
### Environment Variables
|
||||
|
||||
@@ -130,35 +133,41 @@ Be sure to view the following repositories to understand all the customizable op
|
||||
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
|
||||
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
|
||||
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` |
|
||||
| `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | |
|
||||
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE`
|
||||
| `PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | |
|
||||
| `POST_SCRIPT` | Fill this variable in with a command to execute post backing up | |
|
||||
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` |
|
||||
|
||||
### Database Specific Options
|
||||
| Parameter | Description | Default |
|
||||
| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | |
|
||||
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | |
|
||||
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | |
|
||||
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | |
|
||||
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | |
|
||||
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | |
|
||||
| `DB_PASS` | (optional if DB doesn't require it) password for the database | |
|
||||
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies |
|
||||
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2.x` series - AMD64 and ARM64 only for `2` | |
|
||||
| Parameter | Description | Default |
|
||||
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------- |
|
||||
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | |
|
||||
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | |
|
||||
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | |
|
||||
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | |
|
||||
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | |
|
||||
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | |
|
||||
| `DB_PASS` | (optional if DB doesn't require it) password for the database | |
|
||||
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies |
|
||||
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | |
|
||||
| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | |
|
||||
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can overrde them by making your own entries |
|
||||
|
||||
#### For Influx DB2:
|
||||
Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name`
|
||||
|
||||
### Scheduling Options
|
||||
| Parameter | Description | Default |
|
||||
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
|
||||
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
|
||||
| | Absolute HHMM, e.g. `2330` or `0415` | |
|
||||
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
|
||||
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
|
||||
| Parameter | Description | Default |
|
||||
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- |
|
||||
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
|
||||
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
|
||||
| | Absolute HHMM, e.g. `2330` or `0415` | |
|
||||
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
|
||||
| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `/backup` |
|
||||
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
|
||||
|
||||
|
||||
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
|
||||
|
||||
### Backup Options
|
||||
| Parameter | Description | Default |
|
||||
| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | -------------- |
|
||||
@@ -166,6 +175,7 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
|
||||
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` |
|
||||
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |
|
||||
| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` |
|
||||
| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` |
|
||||
| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` |
|
||||
| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` |
|
||||
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | |
|
||||
@@ -180,11 +190,11 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
|
||||
If `BACKUP_LOCATION` = `S3` then the following options are used.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| --------------------- | ----------------------------------------------------------------------------------------- | ------- |
|
||||
|-----------------------|-------------------------------------------------------------------------------------------|---------|
|
||||
| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | |
|
||||
| `S3_KEY_ID` | S3 Key ID | |
|
||||
| `S3_KEY_SECRET` | S3 Key Secret | |
|
||||
| `S3_PATH` | S3 Pathname to save to e.g. '`backup`' | |
|
||||
| `S3_PATH` | S3 Pathname to save to (must end in a trailing slash e.g. '`backup/`') | |
|
||||
| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | |
|
||||
| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | |
|
||||
| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` |
|
||||
@@ -193,6 +203,22 @@ If `BACKUP_LOCATION` = `S3` then the following options are used.
|
||||
| _*OR*_ | | |
|
||||
| `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` |
|
||||
|
||||
#### Upload to a Azure storage account by `blobxfer`
|
||||
|
||||
Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure fileshare storage.
|
||||
|
||||
|
||||
If `BACKUP_LOCATION` = `blobxfer` then the following options are used.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ------------------------------- | ------------------------------------------------------------------------ | -------------------- |
|
||||
| `BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | |
|
||||
| `BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | |
|
||||
| `BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` |
|
||||
|
||||
> This service uploads files from backup targed directory `DB_DUMP_TARGET`.
|
||||
> If the a cleanup configuration in `DB_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
|
||||
|
||||
## Maintenance
|
||||
|
||||
### Shell Access
|
||||
@@ -230,7 +256,41 @@ If you only enter some of the arguments you will be prompted to fill them in.
|
||||
|
||||
### Custom Scripts
|
||||
|
||||
If you want to execute a custom script at the end of backup, you can drop bash scripts with the extension of `.sh` in this directory. See the following example to utilize:
|
||||
#### Path Options
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ---------------------- | --------------------------------------------------------------------------- | ----------------------- |
|
||||
| `SCRIPT_LOCATION_PRE` | Location on filesystem inside container to execute bash scripts pre backup | `/assets/scripts/pre/` |
|
||||
| `SCRIPT_LOCATION_POST` | Location on filesystem inside container to execute bash scripts post backup | `/assets/scripts/post/` |
|
||||
|
||||
#### Pre Backup
|
||||
If you want to execute a custom script before a backup starts, you can drop bash scripts with the extension of `.sh` in the location defined in `SCRIPT_LOCATION_PRE`. See the following example to utilize:
|
||||
|
||||
````bash
|
||||
$ cat pre-script.sh
|
||||
##!/bin/bash
|
||||
|
||||
# #### Example Pre Script
|
||||
# #### $1=DB_TYPE (Type of Backup)
|
||||
# #### $2=DB_HOST (Backup Host)
|
||||
# #### $3=DB_NAME (Name of Database backed up
|
||||
# #### $4=BACKUP START TIME (Seconds since Epoch)ff
|
||||
# #### $5=BACKUP FILENAME (Filename)
|
||||
|
||||
echo "${1} Backup Starting on ${2} for ${3} at ${4}. Filename: ${5}"
|
||||
````
|
||||
|
||||
## script DB_TYPE DB_HOST DB_NAME STARTEPOCH BACKUP_FILENAME
|
||||
${f} "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_time}" "${target}"
|
||||
|
||||
|
||||
Outputs the following on the console:
|
||||
|
||||
`mysql Backup Starting on example-db for example at 1647370800. Filename: mysql_example_example-db_202200315-000000.sql.bz2
|
||||
|
||||
|
||||
#### Post backup
|
||||
If you want to execute a custom script at the end of a backup, you can drop bash scripts with the extension of `.sh` in the location defined in `SCRIPT_LOCATION_POST`. Also to support legacy users `/assets/custom-scripts` is also scanned and executed.See the following example to utilize:
|
||||
|
||||
````bash
|
||||
$ cat post-script.sh
|
||||
|
||||
5
examples/.gitignore
vendored
Normal file
5
examples/.gitignore
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
# See http://help.github.com/ignore-files/ for more about ignoring files.
|
||||
|
||||
# Example container mounted folders
|
||||
**/backups/
|
||||
**/db/
|
||||
69
examples/mssql-blobxfer/docker-compose.yml
Normal file
69
examples/mssql-blobxfer/docker-compose.yml
Normal file
@@ -0,0 +1,69 @@
|
||||
#
|
||||
# Example for Microsoft SQL Server
|
||||
# upload with blobxfer to azure storage
|
||||
#
|
||||
|
||||
version: '2'
|
||||
|
||||
networks:
|
||||
example-mssql-blobxfer-net:
|
||||
name: example-mssql-blobxfer-net
|
||||
|
||||
services:
|
||||
example-mssql-s3-db:
|
||||
hostname: example-db-host
|
||||
image: mcr.microsoft.com/mssql/server:2019-latest
|
||||
container_name: example-mssql-s3-db
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:11433:1433"
|
||||
networks:
|
||||
example-mssql-blobxfer-net:
|
||||
volumes:
|
||||
- ./tmp/backups:/tmp/backups # shared tmp backup directory
|
||||
environment:
|
||||
ACCEPT_EULA: Y
|
||||
MSSQL_SA_PASSWORD: 5hQa0utRFBpIY3yhoIyE
|
||||
MSSQL_PID: Express
|
||||
|
||||
example-mssql-blobxfer-db-backup:
|
||||
container_name: example-mssql-blobxfer-db-backup
|
||||
# if you want to build and use image from current source
|
||||
# execute in terminal --> docker build -t tiredofit/db-backup-mssql-blobxfer .
|
||||
# replace --> image: tiredofit/db-backup-mssql
|
||||
# image: tiredofit/db-backup
|
||||
image: tiredofit/db-backup-mssql-blobxfer
|
||||
links:
|
||||
- example-mssql-s3-db
|
||||
volumes:
|
||||
- ./backups:/backup
|
||||
- ./tmp/backups:/tmp/backups # shared tmp backup directory
|
||||
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
|
||||
environment:
|
||||
# - DEBUG_MODE=TRUE
|
||||
- DB_TYPE=mssql
|
||||
- DB_HOST=example-db-host
|
||||
# - DB_PORT=1488
|
||||
# - DB_NAME=ALL # [ALL] not working on sql server.
|
||||
# create database with name `test1` manually first
|
||||
- DB_NAME=test1 # Create this database
|
||||
- DB_USER=sa
|
||||
- DB_PASS=5hQa0utRFBpIY3yhoIyE
|
||||
- DB_DUMP_FREQ=1 # backup every 5 minute
|
||||
# - DB_DUMP_BEGIN=0000 # backup starts immediately
|
||||
- DB_CLEANUP_TIME=3 # clean backups they are older than 60 minutes
|
||||
- ENABLE_CHECKSUM=TRUE
|
||||
- CHECKSUM=SHA1
|
||||
- COMPRESSION=GZ
|
||||
- SPLIT_DB=FALSE
|
||||
- CONTAINER_ENABLE_MONITORING=FALSE
|
||||
# === S3 Blobxfer ===
|
||||
- BACKUP_LOCATION=blobxfer
|
||||
# Add here azure storage account
|
||||
- BLOBXFER_STORAGE_ACCOUNT={TODO Add Storage Name}
|
||||
# Add here azure storage account key
|
||||
- BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key}
|
||||
- BLOBXFER_REMOTE_PATH=docker-db-backup
|
||||
restart: always
|
||||
networks:
|
||||
example-mssql-blobxfer-net:
|
||||
61
examples/mssql/docker-compose.yml
Normal file
61
examples/mssql/docker-compose.yml
Normal file
@@ -0,0 +1,61 @@
|
||||
#
|
||||
# Example for Microsoft SQL Server
|
||||
#
|
||||
|
||||
version: '2'
|
||||
|
||||
networks:
|
||||
example-mssql-net:
|
||||
name: example-mssql-net
|
||||
|
||||
services:
|
||||
example-mssql-db:
|
||||
hostname: example-db-host
|
||||
image: mcr.microsoft.com/mssql/server:2019-latest
|
||||
container_name: example-mssql-db
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:11433:1433"
|
||||
networks:
|
||||
example-mssql-net:
|
||||
volumes:
|
||||
- ./tmp/backups:/tmp/backups # shared tmp backup directory
|
||||
environment:
|
||||
ACCEPT_EULA: Y
|
||||
MSSQL_SA_PASSWORD: 5hQa0utRFBpIY3yhoIyE
|
||||
MSSQL_PID: Express
|
||||
|
||||
example-mssql-db-backup:
|
||||
container_name: example-mssql-db-backup
|
||||
# if you want to build and use image from current source
|
||||
# execute in terminal --> docker build -t tiredofit/db-backup-mssql .
|
||||
# replace --> image: tiredofit/db-backup-mssql
|
||||
# image: tiredofit/db-backup
|
||||
image: tiredofit/db-backup-mssql
|
||||
links:
|
||||
- example-mssql-db
|
||||
volumes:
|
||||
- ./backups:/backup
|
||||
- ./tmp/backups:/tmp/backups # shared tmp backup directory
|
||||
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
|
||||
environment:
|
||||
# - DEBUG_MODE=TRUE
|
||||
- DB_TYPE=mssql
|
||||
- DB_HOST=example-db-host
|
||||
# - DB_PORT=1488
|
||||
# - DB_NAME=ALL # [ALL] not working on sql server.
|
||||
# create database with name `test1` manually first
|
||||
- DB_NAME=test1
|
||||
- DB_USER=sa
|
||||
- DB_PASS=5hQa0utRFBpIY3yhoIyE
|
||||
- DB_DUMP_FREQ=1 # backup every minute
|
||||
# - DB_DUMP_BEGIN=0000 # backup starts immediately
|
||||
- DB_CLEANUP_TIME=5 # clean backups they are older than 5 minute
|
||||
- ENABLE_CHECKSUM=FALSE
|
||||
- CHECKSUM=SHA1
|
||||
- COMPRESSION=GZ
|
||||
- SPLIT_DB=FALSE
|
||||
- CONTAINER_ENABLE_MONITORING=FALSE
|
||||
restart: always
|
||||
networks:
|
||||
example-mssql-net:
|
||||
25
examples/docker-compose.yml → examples/mysql/docker-compose.yml
Executable file → Normal file
25
examples/docker-compose.yml → examples/mysql/docker-compose.yml
Executable file → Normal file
@@ -1,9 +1,16 @@
|
||||
version: '2'
|
||||
|
||||
networks:
|
||||
example-db-network:
|
||||
name: example-db-network
|
||||
|
||||
services:
|
||||
example-db:
|
||||
hostname: example-db-host
|
||||
container_name: example-db
|
||||
image: mariadb:latest
|
||||
ports:
|
||||
- 13306:3306
|
||||
volumes:
|
||||
- ./db:/var/lib/mysql
|
||||
environment:
|
||||
@@ -12,6 +19,8 @@ services:
|
||||
- MYSQL_USER=example
|
||||
- MYSQL_PASSWORD=examplepassword
|
||||
restart: always
|
||||
networks:
|
||||
- example-db-network
|
||||
|
||||
example-db-backup:
|
||||
container_name: example-db-backup
|
||||
@@ -22,17 +31,21 @@ services:
|
||||
- ./backups:/backup
|
||||
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
|
||||
environment:
|
||||
# - DEBUG_MODE=TRUE
|
||||
- DB_TYPE=mariadb
|
||||
- DB_HOST=example-db
|
||||
- DB_HOST=example-db-host
|
||||
- DB_NAME=example
|
||||
- DB_USER=example
|
||||
- DB_PASS="examplepassword"
|
||||
- DB_DUMP_FREQ=1440
|
||||
- DB_DUMP_BEGIN=0000
|
||||
- DB_CLEANUP_TIME=8640
|
||||
- DB_PASS=examplepassword
|
||||
- DB_DUMP_FREQ=1 # backup every minute
|
||||
# - DB_DUMP_BEGIN=0000 # backup starts immediately
|
||||
- DB_CLEANUP_TIME=5 # clean backups they are older than 5 minute
|
||||
- CHECKSUM=SHA1
|
||||
- COMPRESSION=ZSTD
|
||||
- COMPRESSION=GZ
|
||||
- SPLIT_DB=FALSE
|
||||
- CONTAINER_ENABLE_MONITORING=FALSE
|
||||
restart: always
|
||||
networks:
|
||||
- example-db-network
|
||||
|
||||
|
||||
0
examples/post-script.sh → examples/mysql/post-script.sh
Executable file → Normal file
0
examples/post-script.sh → examples/mysql/post-script.sh
Executable file → Normal file
6
install/assets/defaults/10-db-backup
Executable file → Normal file
6
install/assets/defaults/10-db-backup
Executable file → Normal file
@@ -1,6 +1,7 @@
|
||||
#!/command/with-contenv bash
|
||||
|
||||
BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"}
|
||||
BLOBXFER_REMOTE_PATH=${BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
|
||||
CHECKSUM=${CHECKSUM:-"MD5"}
|
||||
COMPRESSION=${COMPRESSION:-"ZSTD"}
|
||||
COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
|
||||
@@ -17,6 +18,9 @@ MYSQL_STORED_PROCEDURES=${MYSQL_STORED_PROCEDURES:-"TRUE"}
|
||||
PARALLEL_COMPRESSION_THREADS=${PARALLEL_COMPRESSION_THREADS:-"$(nproc)"}
|
||||
S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"}
|
||||
S3_PROTOCOL=${S3_PROTOCOL:-"https"}
|
||||
SCRIPT_LOCATION_PRE=${SCRIPT_LOCATION_PRE:-"/assets/scripts/pre/"}
|
||||
SCRIPT_LOCATION_POST=${SCRIPT_LOCATION_POST:-"/assets/scripts/post/"}
|
||||
SIZE_VALUE=${SIZE_VALUE:-"bytes"}
|
||||
SKIP_AVAILABILITY_CHECK=${SKIP_AVAILABILITY_CHECK:-"FALSE"}
|
||||
SPLIT_DB=${SPLIT_DB:-"TRUE"}
|
||||
TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"}
|
||||
TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"}
|
||||
380
install/assets/functions/10-db-backup
Executable file → Normal file
380
install/assets/functions/10-db-backup
Executable file → Normal file
@@ -10,21 +10,43 @@ bootstrap_variables() {
|
||||
;;
|
||||
influx* )
|
||||
dbtype=influx
|
||||
DB_PORT=${DB_PORT:-8088}
|
||||
case "${INFLUX_VERSION}" in
|
||||
1) DB_PORT=${DB_PORT:-8088} ;;
|
||||
2) DB_PORT=${DB_PORT:-8086} ;;
|
||||
esac
|
||||
file_env 'DB_USER'
|
||||
file_env 'DB_PASS'
|
||||
sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'"
|
||||
;;
|
||||
mongo* )
|
||||
dbtype=mongo
|
||||
DB_PORT=${DB_PORT:-27017}
|
||||
[[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER'
|
||||
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
|
||||
if [ -n "${MONGO_CUSTOM_URI}" ] ; then
|
||||
mongo_uri_proto=$(echo ${MONGO_CUSTOM_URI} | grep :// | sed -e's,^\(.*://\).*,\1,g')
|
||||
mongo_uri_scratch="${MONGO_CUSTOM_URI/${mongo_uri_proto}/}"
|
||||
mongo_uri_username_password=$(echo ${mongo_uri_scratch} | grep @ | rev | cut -d@ -f2- | rev)
|
||||
if [ -n "${mongo_uri_username_password}" ]; then mongo_uri_scratch=$(echo ${mongo_uri_scratch} | rev | cut -d@ -f1 | rev) ; fi
|
||||
mongo_uri_port=$(echo ${mongo_uri_scratch} | grep : | rev | cut -d: -f2- | rev)
|
||||
if [ -n "${mongo_uri_port}" ]; then mongo_uri_port=$(echo ${mongo_uri_scratch} | rev | cut -d: -f1 | cut -d/ -f2 | rev) ; fi
|
||||
mongo_uri_hostname=$(echo ${mongo_uri_scratch} | cut -d/ -f1 | cut -d: -f1 )
|
||||
mongo_uri_database=$(echo ${mongo_uri_scratch} | cut -d/ -f2 | cut -d? -f1 )
|
||||
mongo_uri_options=$(echo ${mongo_uri_scratch} | cut -d/ -f2 | cut -d? -f2 )
|
||||
DB_NAME=${DB_NAME:-"${mongo_uri_database,,}"}
|
||||
DB_HOST=${DB_HOST:-"${mongo_uri_hostname,,}"}
|
||||
else
|
||||
DB_PORT=${DB_PORT:-27017}
|
||||
[[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER'
|
||||
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
|
||||
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER}"
|
||||
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS}"
|
||||
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME}"
|
||||
[[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}"
|
||||
fi
|
||||
;;
|
||||
"mysql" | "mariadb" )
|
||||
dbtype=mysql
|
||||
DB_PORT=${DB_PORT:-3306}
|
||||
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
|
||||
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS}
|
||||
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
|
||||
;;
|
||||
"mssql" | "microsoftsql" )
|
||||
@@ -40,12 +62,14 @@ bootstrap_variables() {
|
||||
dbtype=pgsql
|
||||
DB_PORT=${DB_PORT:-5432}
|
||||
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
|
||||
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS}"
|
||||
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
|
||||
;;
|
||||
"redis" )
|
||||
dbtype=redis
|
||||
DB_PORT=${DB_PORT:-6379}
|
||||
[[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS'
|
||||
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS}"
|
||||
;;
|
||||
sqlite* )
|
||||
dbtype=sqlite3
|
||||
@@ -56,31 +80,13 @@ bootstrap_variables() {
|
||||
file_env 'S3_KEY_ID'
|
||||
file_env 'S3_KEY_SECRET'
|
||||
fi
|
||||
|
||||
### Set the Database Authentication Details
|
||||
case "$dbtype" in
|
||||
"mongo" )
|
||||
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER}"
|
||||
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS}"
|
||||
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME}"
|
||||
[[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}"
|
||||
;;
|
||||
"mysql" )
|
||||
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS}
|
||||
;;
|
||||
"postgres" )
|
||||
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS}"
|
||||
;;
|
||||
"redis" )
|
||||
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS}"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
backup_couch() {
|
||||
pre_dbbackup
|
||||
prepare_dbbackup
|
||||
target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt
|
||||
compression
|
||||
pre_dbbackup ${DB_NAME}
|
||||
print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}"
|
||||
curl -sSL -X GET ${DB_HOST}:${DB_PORT}/${DB_NAME}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
exit_code=$?
|
||||
@@ -101,10 +107,11 @@ backup_influx() {
|
||||
case "${INFLUX_VERSION,,}" in
|
||||
1 )
|
||||
for db in ${db_names}; do
|
||||
pre_dbbackup
|
||||
prepare_dbbackup
|
||||
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
|
||||
target=influx_${db}_${DB_HOST#*//}_${now}
|
||||
compression
|
||||
pre_dbbackup $db
|
||||
print_notice "Dumping Influx database: '${db}'"
|
||||
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
|
||||
exit_code=$?
|
||||
@@ -119,10 +126,11 @@ backup_influx() {
|
||||
;;
|
||||
2 )
|
||||
for db in ${db_names}; do
|
||||
pre_dbbackup
|
||||
prepare_dbbackup
|
||||
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
|
||||
target=influx2_${db}_${DB_HOST#*//}_${now}
|
||||
compression
|
||||
pre_dbbackup $db
|
||||
print_notice "Dumping Influx2 database: '${db}'"
|
||||
influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
|
||||
exit_code=$?
|
||||
@@ -138,7 +146,7 @@ backup_influx() {
|
||||
}
|
||||
|
||||
backup_mongo() {
|
||||
pre_dbbackup
|
||||
prepare_dbbackup
|
||||
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
|
||||
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive
|
||||
else
|
||||
@@ -146,21 +154,28 @@ backup_mongo() {
|
||||
mongo_compression="--gzip"
|
||||
compression_string="and compressing with gzip"
|
||||
fi
|
||||
if [ -n "${MONGO_CUSTOM_URI}" ] ; then
|
||||
mongo_backup_parameter="--uri=${MONGO_CUSTOM_URI} ${EXTRA_OPTS}"
|
||||
else
|
||||
mongo_backup_parameter="--host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}"
|
||||
fi
|
||||
pre_dbbackup "${DB_NAME}"
|
||||
print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
|
||||
mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
|
||||
silent mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} ${mongo_backup_parameter}
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
post_dbbackup
|
||||
post_dbbackup "${DB_NAME}"
|
||||
}
|
||||
|
||||
backup_mssql() {
|
||||
pre_dbbackup
|
||||
prepare_dbbackup
|
||||
target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak
|
||||
compression
|
||||
pre_dbbackup "${DB_NAME}"
|
||||
print_notice "Dumping MSSQL database: '${DB_NAME}'"
|
||||
/opt/mssql-tools/bin/sqlcmd -E -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} –Q "BACKUP DATABASE \[${DB_NAME}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
|
||||
silent /opt/mssql-tools18/bin/sqlcmd -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} -Q "BACKUP DATABASE [${DB_NAME}] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
@@ -194,11 +209,12 @@ backup_mysql() {
|
||||
|
||||
if var_true "${SPLIT_DB}" ; then
|
||||
for db in ${db_names} ; do
|
||||
pre_dbbackup
|
||||
prepare_dbbackup
|
||||
target=mysql_${db}_${DB_HOST,,}_${now}.sql
|
||||
compression
|
||||
pre_dbbackup $db
|
||||
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
|
||||
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
silent mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
@@ -207,11 +223,12 @@ backup_mysql() {
|
||||
done
|
||||
else
|
||||
print_debug "Not splitting database dumps into their own files"
|
||||
pre_dbbackup
|
||||
prepare_dbbackup
|
||||
target=mysql_all_${DB_HOST,,}_${now}.sql
|
||||
compression
|
||||
pre_dbbackup all
|
||||
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
||||
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
silent mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
@@ -241,11 +258,12 @@ backup_pgsql() {
|
||||
|
||||
if var_true "${SPLIT_DB}" ; then
|
||||
for db in ${db_names} ; do
|
||||
pre_dbbackup
|
||||
prepare_dbbackup
|
||||
target=pgsql_${db}_${DB_HOST,,}_${now}.sql
|
||||
compression
|
||||
pre_dbbackup $db
|
||||
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
|
||||
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
silent pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
@@ -254,9 +272,10 @@ backup_pgsql() {
|
||||
done
|
||||
else
|
||||
print_debug "Not splitting database dumps into their own files"
|
||||
pre_dbbackup
|
||||
prepare_dbbackup
|
||||
target=pgsql_all_${DB_HOST,,}_${now}.sql
|
||||
compression
|
||||
pre_dbbackup all
|
||||
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
||||
tmp_db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
|
||||
for r_db_name in $(echo $db_names | xargs); do
|
||||
@@ -266,7 +285,7 @@ backup_pgsql() {
|
||||
for x_db_name in ${tmp_db_names} ; do
|
||||
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
|
||||
done
|
||||
pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
silent pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
@@ -276,7 +295,7 @@ backup_pgsql() {
|
||||
}
|
||||
|
||||
backup_redis() {
|
||||
pre_dbbackup
|
||||
prepare_dbbackup
|
||||
print_notice "Dumping Redis - Flushing Redis Cache First"
|
||||
target=redis_all_${DB_HOST,,}_${now}.rdb
|
||||
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
|
||||
@@ -296,6 +315,7 @@ backup_redis() {
|
||||
done
|
||||
target_original=${target}
|
||||
compression
|
||||
pre_dbbackup all
|
||||
$compress_cmd "${TEMP_LOCATION}/${target_original}"
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
@@ -304,13 +324,14 @@ backup_redis() {
|
||||
}
|
||||
|
||||
backup_sqlite3() {
|
||||
pre_dbbackup
|
||||
prepare_dbbackup
|
||||
db=$(basename "${DB_HOST}")
|
||||
db="${db%.*}"
|
||||
target=sqlite3_${db}_${now}.sqlite3
|
||||
compression
|
||||
pre_dbbackup $db
|
||||
print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}"
|
||||
sqlite3 "${DB_HOST}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
|
||||
silent sqlite3 "${DB_HOST}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
cat "${TEMP_LOCATION}"/backup.sqlite3 | ${dir_compress_cmd} > "${TEMP_LOCATION}/${target}"
|
||||
@@ -321,99 +342,105 @@ backup_sqlite3() {
|
||||
|
||||
check_availability() {
|
||||
### Set the Database Type
|
||||
case "$dbtype" in
|
||||
"couch" )
|
||||
counter=0
|
||||
code_received=0
|
||||
while [ "${code_received}" != "200" ]; do
|
||||
code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${DB_HOST}:${DB_PORT})
|
||||
if [ "${code_received}" = "200" ] ; then break ; fi
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "CouchDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"influx" )
|
||||
counter=0
|
||||
case "${INFLUX_VERSION,,}" in
|
||||
1 )
|
||||
while ! (nc -z ${DB_HOST#*//} ${DB_PORT}) ; do
|
||||
if var_false "${SKIP_AVAILABILITY_CHECK}" ; then
|
||||
case "$dbtype" in
|
||||
"couch" )
|
||||
counter=0
|
||||
code_received=0
|
||||
while [ "${code_received}" != "200" ]; do
|
||||
code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${DB_HOST}:${DB_PORT})
|
||||
if [ "${code_received}" = "200" ] ; then break ; fi
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "CouchDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"influx" )
|
||||
counter=0
|
||||
case "${INFLUX_VERSION,,}" in
|
||||
1 )
|
||||
while ! (nc -z ${DB_HOST#*//} ${DB_PORT}) ; do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "InfluxDB Host '${DB_HOST#*//}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
2 )
|
||||
code_received=0
|
||||
while [ "${code_received}" != "200" ]; do
|
||||
code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${DB_HOST}:${DB_PORT}/health)
|
||||
if [ "${code_received}" = "200" ] ; then break ; fi
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "InfluxDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
"mongo" )
|
||||
if [ -n "${MONGO_CUSTOM_URI}" ] ; then
|
||||
print_debug "Skipping Connectivity Check"
|
||||
else
|
||||
counter=0
|
||||
while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "InfluxDB Host '${DB_HOST#*//}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
print_warn "Mongo Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
2 )
|
||||
code_received=0
|
||||
while [ "${code_received}" != "200" ]; do
|
||||
code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${DB_HOST}:${DB_PORT}/health)
|
||||
if [ "${code_received}" = "200" ] ; then break ; fi
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "InfluxDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
"mongo" )
|
||||
counter=0
|
||||
while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "Mongo Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"mysql" )
|
||||
counter=0
|
||||
export MYSQL_PWD=${DB_PASS}
|
||||
while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" status > /dev/null 2>&1) ; do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)"
|
||||
done
|
||||
;;
|
||||
"mssql" )
|
||||
counter=0
|
||||
while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "MSSQL Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"pgsql" )
|
||||
counter=0
|
||||
export PGPASSWORD=${DB_PASS}
|
||||
until pg_isready --dbname=${DB_NAME} --host=${DB_HOST} --port=${DB_PORT} --username=${DB_USER} -q
|
||||
do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "Postgres Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"redis" )
|
||||
counter=0
|
||||
while ! (nc -z "${DB_HOST}" "${DB_PORT}") ; do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "Redis Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"sqlite3" )
|
||||
if [[ ! -e "${DB_HOST}" ]]; then
|
||||
print_error "File '${DB_HOST}' does not exist."
|
||||
exit_code=2
|
||||
exit $exit_code
|
||||
elif [[ ! -f "${DB_HOST}" ]]; then
|
||||
print_error "File '${DB_HOST}' is not a file."
|
||||
exit_code=2
|
||||
exit $exit_code
|
||||
elif [[ ! -r "${DB_HOST}" ]]; then
|
||||
print_error "File '${DB_HOST}' is not readable."
|
||||
exit_code=2
|
||||
exit $exit_code
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
;;
|
||||
"mysql" )
|
||||
counter=0
|
||||
export MYSQL_PWD=${DB_PASS}
|
||||
while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" status > /dev/null 2>&1) ; do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)"
|
||||
done
|
||||
;;
|
||||
"mssql" )
|
||||
counter=0
|
||||
while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "MSSQL Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"pgsql" )
|
||||
counter=0
|
||||
export PGPASSWORD=${DB_PASS}
|
||||
until pg_isready --dbname=${DB_NAME} --host=${DB_HOST} --port=${DB_PORT} --username=${DB_USER} -q
|
||||
do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "Postgres Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"redis" )
|
||||
counter=0
|
||||
while ! (nc -z "${DB_HOST}" "${DB_PORT}") ; do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "Redis Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"sqlite3" )
|
||||
if [[ ! -e "${DB_HOST}" ]]; then
|
||||
print_error "File '${DB_HOST}' does not exist."
|
||||
exit_code=2
|
||||
exit $exit_code
|
||||
elif [[ ! -f "${DB_HOST}" ]]; then
|
||||
print_error "File '${DB_HOST}' is not a file."
|
||||
exit_code=2
|
||||
exit $exit_code
|
||||
elif [[ ! -r "${DB_HOST}" ]]; then
|
||||
print_error "File '${DB_HOST}' is not readable."
|
||||
exit_code=2
|
||||
exit $exit_code
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
}
|
||||
|
||||
check_exit_code() {
|
||||
@@ -434,13 +461,17 @@ cleanup_old_data() {
|
||||
if [ "${master_exit_code}" != 1 ]; then
|
||||
case "${BACKUP_LOCATION,,}" in
|
||||
"file" | "filesystem" )
|
||||
print_info "Cleaning up old backups"
|
||||
print_info "Cleaning up old backups on filesystem"
|
||||
mkdir -p "${DB_DUMP_TARGET}"
|
||||
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
|
||||
if [ "${BACKUP_LOCATION,,}" = "blobxfer" ] ; then
|
||||
print_info "Syncing changes via blobxfer"
|
||||
silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET} --delete --delete-only
|
||||
fi
|
||||
;;
|
||||
"s3" | "minio" )
|
||||
print_info "Cleaning up old backups"
|
||||
aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | while read -r s3_file; do
|
||||
print_info "Cleaning up old backups on S3 storage"
|
||||
aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | grep " PRE " -v | while read -r s3_file; do
|
||||
s3_createdate=$(echo $s3_file | awk {'print $1" "$2'})
|
||||
s3_createdate=$(date -d "$s3_createdate" "+%s")
|
||||
s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 )))
|
||||
@@ -448,7 +479,7 @@ cleanup_old_data() {
|
||||
s3_filename=$(echo $s3_file | awk {'print $4'})
|
||||
if [ "$s3_filename" != "" ] ; then
|
||||
print_debug "Deleting $s3_filename"
|
||||
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
|
||||
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -468,7 +499,10 @@ compression() {
|
||||
|
||||
case "${COMPRESSION,,}" in
|
||||
gz* )
|
||||
compress_cmd="pigz -q -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
|
||||
if var_true "${GZ_RSYNCABLE}" ; then
|
||||
gz_rsyncable=--rsyncable
|
||||
fi
|
||||
compress_cmd="pigz -q -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} ${gz_rsyncable}"
|
||||
compression_type="gzip"
|
||||
extension=".gz"
|
||||
dir_compress_cmd=${compress_cmd}
|
||||
@@ -609,6 +643,18 @@ move_dbbackup() {
|
||||
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
|
||||
fi
|
||||
|
||||
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
|
||||
rm -rf "${TEMP_LOCATION}"/"${target}"
|
||||
;;
|
||||
"blobxfer" )
|
||||
print_info "Moving backup to S3 Bucket with blobxfer"
|
||||
|
||||
mkdir -p "${DB_DUMP_TARGET}"
|
||||
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
|
||||
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
|
||||
|
||||
silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET}
|
||||
|
||||
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
|
||||
rm -rf "${TEMP_LOCATION}"/"${target}"
|
||||
;;
|
||||
@@ -620,7 +666,7 @@ move_dbbackup() {
|
||||
rm -rf "${TEMP_LOCATION}"/*
|
||||
}
|
||||
|
||||
pre_dbbackup() {
|
||||
prepare_dbbackup() {
|
||||
dbbackup_start_time=$(date +"%s")
|
||||
now=$(date +"%Y%m%d-%H%M%S")
|
||||
now_time=$(date +"%H:%M:%S")
|
||||
@@ -628,6 +674,45 @@ pre_dbbackup() {
|
||||
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql
|
||||
}
|
||||
|
||||
pre_dbbackup() {
|
||||
### Pre Script Support
|
||||
if [ -n "${PRE_SCRIPT}" ] ; then
|
||||
if var_true "${PRE_SCRIPT_SKIP_X_VERIFY}" ; then
|
||||
eval "${PRE_SCRIPT}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${target}"
|
||||
else
|
||||
if [ -x "${PRE_SCRIPT}" ] ; then
|
||||
print_notice "Found PRE_SCRIPT environment variable. Executing '${PRE_SCRIPT}"
|
||||
eval "${PRE_SCRIPT}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${target}"
|
||||
else
|
||||
print_error "Can't execute PRE_SCRIPT environment variable '${PRE_SCRIPT}' as its filesystem bit is not executible!"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
### Pre Backup Custom Script Support
|
||||
if [ -d "/assets/custom-scripts/pre" ] && dir_notempty "/assets/custom-scripts/pre" ; then
|
||||
print_warning "Found Custom Post Scripts in /assets/custom-scripts/pre - Automatically moving them to '${SCRIPT_LOCATION_PRE}'"
|
||||
mkdir -p "${SCRIPT_LOCATION_PRE}"
|
||||
silent cp /assets/custom-scripts/pre/* "${SCRIPT_LOCATION_PRE}"
|
||||
fi
|
||||
|
||||
if [ -d "${SCRIPT_LOCATION_PRE}" ] && dir_notempty "${SCRIPT_LOCATION_PRE}" ; then
|
||||
for f in $(find ${SCRIPT_LOCATION_PRE} -name \*.sh -type f); do
|
||||
if var_true "${PRE_SCRIPT_SKIP_X_VERIFY}" ; then
|
||||
${f} "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${target}"
|
||||
else
|
||||
if [ -x "${f}" ] ; then
|
||||
print_notice "Executing pre backup custom script : '${f}'"
|
||||
## script DB_TYPE DB_HOST DB_NAME STARTEPOCH BACKUP_FILENAME
|
||||
${f} "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${target}"
|
||||
else
|
||||
print_error "Can't run pre backup custom script: '${f}' as its filesystem bit is not executible!"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
post_dbbackup() {
|
||||
dbbackup_finish_time=$(date +"%s")
|
||||
dbbackup_total_time=$(echo $((dbbackup_finish_time-dbbackup_start_time)))
|
||||
@@ -644,11 +729,11 @@ post_dbbackup() {
|
||||
### Post Script Support
|
||||
if [ -n "${POST_SCRIPT}" ] ; then
|
||||
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
|
||||
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
|
||||
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}"
|
||||
else
|
||||
if [ -x "${POST_SCRIPT}" ] ; then
|
||||
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
|
||||
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
|
||||
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}"
|
||||
else
|
||||
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
|
||||
fi
|
||||
@@ -656,15 +741,21 @@ post_dbbackup() {
|
||||
fi
|
||||
|
||||
### Post Backup Custom Script Support
|
||||
if [ -d "/assets/custom-scripts/" ] ; then
|
||||
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
|
||||
if [ -d "/assets/custom-scripts/" ] && dir_notempty "/assets/custom-scripts" ; then
|
||||
print_warning "Found Custom Post Scripts in /assets/custom-scripts/ - Automatically moving them to '${SCRIPT_LOCATION_POST}'"
|
||||
mkdir -p "${SCRIPT_LOCATION_POST}"
|
||||
silent cp /assets/custom-scripts/* "${SCRIPT_LOCATION_POST}"
|
||||
fi
|
||||
|
||||
if [ -d "${SCRIPT_LOCATION_POST}" ] && dir_notempty "${SCRIPT_LOCATION_POST}" ; then
|
||||
for f in $(find ${SCRIPT_LOCATION_POST} -name \*.sh -type f); do
|
||||
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
|
||||
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
|
||||
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}"
|
||||
else
|
||||
if [ -x "${f}" ] ; then
|
||||
print_notice "Executing post backup custom script : '${f}'"
|
||||
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
|
||||
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
|
||||
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}"
|
||||
else
|
||||
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
|
||||
fi
|
||||
@@ -731,4 +822,3 @@ EOF
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ prepare_service 03-monitoring
|
||||
PROCESS_NAME="db-backup"
|
||||
output_off
|
||||
|
||||
bootstrap_variables
|
||||
sanity_test
|
||||
setup_mode
|
||||
create_zabbix dbbackup
|
||||
|
||||
Reference in New Issue
Block a user