Compare commits

...

35 Commits

Author SHA1 Message Date
dave@tiredofit.ca
36506091be Release 3.9.11 - See CHANGELOG.md 2023-08-24 18:12:36 -07:00
dave@tiredofit.ca
bf646381cb Release 3.9.10 - See CHANGELOG.md 2023-08-23 15:31:41 -07:00
dave@tiredofit.ca
fb3b65b33a Release 3.9.9 - See CHANGELOG.md 2023-08-21 15:38:51 -07:00
dave@tiredofit.ca
6d1ef87042 Release 3.9.8 - See CHANGELOG.md 2023-08-20 08:18:04 -07:00
Dave Conroy
c985cc8a4f Merge pull request #240 from ToshY/issue/239-armv7
Add cargo to build dependencies
2023-08-14 19:15:21 -07:00
ToshY
2265a6acf5 Add cargo to build dependencies 2023-08-05 14:39:13 +02:00
dave@tiredofit.ca
10e7debc65 Release 3.9.7 - See CHANGELOG.md 2023-07-18 07:26:59 -07:00
dave@tiredofit.ca
94e9881b7b Release 3.9.6 - See CHANGELOG.md 2023-06-16 09:50:16 -07:00
dave@tiredofit.ca
425383639a Release 3.9.5 - See CHANGELOG.md 2023-06-13 16:24:17 -07:00
dave@tiredofit.ca
1e46996812 Release 3.9.4 - See CHANGELOG.md 2023-06-13 10:16:04 -07:00
Dave Conroy
e71334564f Drop auto builds for armv7 2023-06-11 11:42:13 -07:00
dave@tiredofit.ca
f83f97bf76 Release 3.9.3 - See CHANGELOG.md 2023-06-05 10:24:46 -07:00
Dave Conroy
94a8e45af6 Merge pull request #226 from vanzhiganov/patch-1
Update README.md
2023-06-05 10:13:57 -07:00
Dave Conroy
9d90e37339 Merge pull request #225 from alwynpan/bugfix/#199
#199 Report error when move the backup file to S3 or Blob fails
2023-06-05 10:13:44 -07:00
Vyacheslav Anzhiganov
085b7cd6ce Update README.md 2023-06-03 16:27:17 +03:00
Yao (Alwyn) Pan
12484bb3f3 feat: Add zip package to the image 2023-06-01 16:54:26 +10:00
Yao (Alwyn) Pan
8fc2721dd4 fix: #199 report error when move the backup file to S3 or Blob fails 2023-06-01 16:46:13 +10:00
dave@tiredofit.ca
68174c061f Release 3.9.2 - See CHANGELOG.md 2023-05-10 08:19:01 -07:00
dave@tiredofit.ca
fd1d95090e Release 3.9.1 - See CHANGELOG.md 2023-05-03 12:13:29 -07:00
Dave Conroy
7befba0d96 Update README.md 2023-04-27 08:18:26 -07:00
Dave Conroy
583253fce7 Update README.md 2023-04-26 14:43:29 -07:00
dave@tiredofit.ca
068577001e Release 3.9.0 - See CHANGELOG.md 2023-04-26 14:32:36 -07:00
Dave Conroy
7781542816 Expand on amount of variables that can use 2023-04-24 14:54:47 -07:00
dave@tiredofit.ca
9283b5440e Release 3.8.5 - See CHANGELOG.md 2023-04-11 15:42:30 -07:00
Dave Conroy
5e62485e7f Merge pull request #216 from tpansino/bug/215
Set ltargets properly
2023-04-11 15:36:26 -07:00
Tom Pansino
f224571448 Set ltargets properly 2023-04-11 15:31:02 -07:00
dave@tiredofit.ca
01620fec00 Release 3.8.4 - See CHANGELOG.md 2023-04-06 12:14:22 -07:00
dave@tiredofit.ca
18a38b4f1d Release 3.8.3 - See CHANGELOG.md 2023-03-30 14:18:55 -07:00
dave@tiredofit.ca
150f356275 Release 3.8.2 - See CHANGELOG.md 2023-03-30 14:05:17 -07:00
dave@tiredofit.ca
e838ed0027 Release 3.8.1 - See CHANGELOG.md 2023-03-30 11:04:35 -07:00
Dave Conroy
8329b4c065 Add defaults 2023-03-27 16:41:31 -07:00
dave@tiredofit.ca
dab1ac301a Release 3.8.0 - See CHANGELOG.md 2023-03-27 15:01:10 -07:00
dave@tiredofit.ca
077201cd18 Release 3.7.7 - See CHANGELOG.md 2023-03-20 16:24:23 -07:00
Dave Conroy
eeaf59dc6f Merge pull request #210 from codemonium/simplify-pg_isready
Simplify pg_isready usage
2023-03-20 16:22:13 -07:00
Igor Artemenko
88fe0d6411 Simplify pg_isready usage
The pg_isready documentation says that it does not need a correct
database name or username to get the server status. In fact, incorrect
values result in the server logging failed connection attempts. As a
result, when we set DB_NAME to ALL, calls to the check_availability
function (which uses pg_isready) cause the server to log the following
error:

    FATAL:  database "ALL" does not exist

To eliminate this error, this change simplifies the pg_isready call.
2023-03-20 22:51:05 +00:00
9 changed files with 366 additions and 164 deletions

View File

@@ -10,6 +10,5 @@ jobs:
build:
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
secrets: inherit

View File

@@ -11,6 +11,5 @@ jobs:
build:
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
secrets: inherit

View File

@@ -1,3 +1,121 @@
## 3.9.11 2023-08-24 <dave at tiredofit dot ca>
### Changed
- AWS CLI 2.13.9
## 3.9.10 2023-08-23 <dave at tiredofit dot ca>
### Changed
- Stop trying to move a non existent checksum file when ENABLE_CHECKSUM=FALSE
## 3.9.9 2023-08-21 <dave at tiredofit dot ca>
### Changed
- Start compiling aws-cli instead of from packages to continue to support arm/v7
## 3.9.8 2023-08-20 <ToshY@github>
### Changed
- Restore armv7 and aarch64 builds
## 3.9.7 2023-07-18 <dave at tiredofit dot ca>
### Changed
- Cleanup check_exit_code parameter and reduce duplicate output
## 3.9.6 2023-06-16 <dave at tiredofit dot ca>
### Changed
- Resolve issues introduced with 3.9.3. Split exit codes to be specific for backing up and moving. Uses paremter $11 for post backup scripts
## 3.9.5 2023-06-13 <dave at tiredofit dot ca>
### Changed
- Start building Influx DB v1 manually due to being removed from Alpine repositories
## 3.9.4 2023-06-13 <dave at tiredofit dot ca>
### Added
- Add abliity to use --rsyncable argument to zstd archives
## 3.9.3 2023-06-05 <dave at tiredofit dot ca>
### Added
- Add notification if blobxfer/s3 upload fails (credit @alwynpan)
- Add zip package
## 3.9.2 2023-05-10 <dave at tiredofit dot ca>
### Changed
- Alpine 3.18 base
## 3.9.1 2023-05-03 <dave at tiredofit dot ca>
### Changed
- Properly allow multiple _FILE environment variables to execute solving an issue with MySQL backups
- Fix _FILE functionality for DB_NAME variable
## 3.9.0 2023-04-26 <dave at tiredofit dot ca>
### Added
- Add support for _FILE environment variables
## 3.8.5 2023-04-11 <tpansino@github>
### Changed
- Fix SQLite3, Influx, and MSSQL backups failing due to malformed/non existent ltarget
## 3.8.4 2023-04-06 <dave at tiredofit dot ca>
### Changed
- Fix issue with Influx2 and MSSQL clients not installing properly
## 3.8.3 2023-03-30 <dave at tiredofit dot ca>
### Changed
- Patchup for 3.8.2
## 3.8.2 2023-03-30 <dave at tiredofit dot ca>
### Changed
- Patchup for 3.8.1
## 3.8.1 2023-03-30 <dave at tiredofit dot ca>
### Changed
- Cleanup Dockerfile
- Fix issue with DB_ARCHIVE_TIME not firing correctly
## 3.8.0 2023-03-27 <dave at tiredofit dot ca>
### Added
- Introduce DB_DUMP_TARGET_ARCHIVE which works with DB_ARCHIVE_TIME to move backups older than (x) minutes from DB_DUMP_TARGET to DB_DUMP_TARGET_ARCHIVE for use with external backup systems and custom exclude rules
- Introduce CREATE_LATEST_SYMLINK which creates a symbolic link in DB_DUMP_TARGET of `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)`
## 3.7.7 2023-03-20 <codemonium@github>
### Changed
- Simplify pg_isready usage
## 3.7.6 2023-03-14 <toshy@github>
### Changed

View File

@@ -1,9 +1,14 @@
FROM docker.io/tiredofit/alpine:3.17
ARG DISTRO=alpine
ARG DISTRO_VARIANT=3.18
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables
ENV INFLUX2_VERSION=2.4.0 \
ENV INFLUX_VERSION=1.8.0 \
INFLUX2_VERSION=2.4.0 \
MSSQL_VERSION=18.0.1.1-1 \
AWS_CLI_VERSION=1.25.97 \
CONTAINER_ENABLE_MESSAGING=FALSE \
CONTAINER_ENABLE_MONITORING=TRUE \
CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \
@@ -13,12 +18,14 @@ ENV INFLUX2_VERSION=2.4.0 \
### Dependencies
RUN source /assets/functions/00-container && \
set -ex && \
apk update && \
apk upgrade && \
apk add -t .db-backup-build-deps \
package update && \
package upgrade && \
package install .db-backup-build-deps \
build-base \
bzip2-dev \
cargo \
git \
go \
libarchive-dev \
openssl-dev \
libffi-dev \
@@ -27,10 +34,9 @@ RUN source /assets/functions/00-container && \
xz-dev \
&& \
\
apk add --no-cache -t .db-backup-run-deps \
aws-cli \
package install .db-backup-run-deps \
bzip2 \
influxdb \
groff \
libarchive \
mariadb-client \
mariadb-connector-c \
@@ -40,10 +46,20 @@ RUN source /assets/functions/00-container && \
postgresql15 \
postgresql15-client \
pv \
py3-botocore \
py3-colorama \
py3-cryptography \
py3-docutils \
py3-jmespath \
py3-rsa \
py3-setuptools \
py3-s3transfer \
py3-yaml \
python3 \
redis \
sqlite \
xz \
zip \
zstd \
&& \
\
@@ -56,7 +72,11 @@ RUN source /assets/functions/00-container && \
\
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/msodbcsql18_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/mssql-tools18_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSSQL_VERSION}_amd64.apk mssql-tools18_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
\
clone_git_repo https://github.com/aws/aws-cli "${AWS_CLI_VERSION}" && \
python3 setup.py install --prefix=/usr && \
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX_VERSION}" && \
go build -o /usr/sbin/influxd ./cmd/influxd && \
strip /usr/sbin/influxd && \
mkdir -p /usr/src/pbzip2 && \
curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
cd /usr/src/pbzip2 && \
@@ -75,12 +95,15 @@ RUN source /assets/functions/00-container && \
\
pip3 install blobxfer && \
\
### Cleanup
apk del .db-backup-build-deps && \
rm -rf /usr/src/* && \
rm -rf /*.apk && \
rm -rf /etc/logrotate.d/redis && \
rm -rf /root/.cache /tmp/* /var/cache/apk/*
package remove .db-backup-build-deps && \
package cleanup && \
rm -rf \
/*.apk \
/etc/logrotate.d/* \
/root/.cache \
/root/go \
/tmp/* \
/usr/src/*
### S6 Setup
COPY install /

154
README.md
View File

@@ -82,11 +82,11 @@ Clone this repository and build the image with `docker build <arguments> (imagen
### Prebuilt Images
Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup)
Builds of the image are also available on the [Github Container Registry](https://github.com/tiredofit/docker-db-backup/pkgs/container/docker-db-backup)
Builds of the image are also available on the [Github Container Registry](https://github.com/tiredofit/docker-db-backup/pkgs/container/docker-db-backup)
```
docker pull ghcr.io/tiredofit/docker-db-backup:(imagetag)
```
```
The following image tags are available along with their tagged release based on what's written in the [Changelog](CHANGELOG.md):
@@ -123,7 +123,7 @@ The following directories are used for configuration and can be mapped for persi
#### Base Images used
This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) or [Debian Linux](https://hub.docker.com/r/tiredofit/debian) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`.
This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`.
Be sure to view the following repositories to understand all the customizable options:
@@ -133,68 +133,70 @@ Be sure to view the following repositories to understand all the customizable op
#### Container Options
| Parameter | Description | Default |
| -------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- |
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` |
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` |
| `PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | |
| `POST_SCRIPT` | Fill this variable in with a command to execute post backing up | |
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` |
| Parameter | Description | Default |
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- |
| `BACKUP_LOCATION` | Backup to `FILESYSTEM`, `blobxfer` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` |
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` |
| `CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
| `PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | |
| `POST_SCRIPT` | Fill this variable in with a command to execute post backing up | |
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` |
### Database Specific Options
| Parameter | Description | Default |
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- |
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | |
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | |
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | |
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies |
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | |
| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | |
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can overrde them by making your own entries |
| Parameter | Description | Default | `_FILE` |
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- | ------- |
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | x |
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | | x |
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | | x |
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | x |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | | x |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | x |
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | | |
| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | x |
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can overrde them by making your own entries | | |
#### For Influx DB2:
Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name`
### Scheduling Options
| Parameter | Description | Default |
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- |
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
| | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `/backup` |
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
| Parameter | Description | Default |
| ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- |
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
| | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `${DB_DUMP_TARGET}/archive/` |
| `DB_DUMP_TARGET_ARCHIVE` | Optional Directory where the database dumps archives are kept. | |
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
| `DB_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from `DB_DUMP_TARGET` to `DB_DUMP_TARGET_ARCHIVE` - which is useful when pairing against an external backup system. | |
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
### Backup Options
| Parameter | Description | Default |
| ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` |
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |
| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` |
| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` |
| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` |
| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | |
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` |
| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` |
| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` |
| `MYSQL_ENABLE_TLS` | Enable TLS functionality for MySQL client | `FALSE` |
| `MYSQL_TLS_VERIFY` | (optional) If using TLS (by means of MYSQL_TLS_* variables) verify remote host | `FALSE` |
| `MYSQL_TLS_VERSION` | What TLS `v1.1` `v1.2` `v1.3` version to utilize | `TLSv1.1,TLSv1.2,TLSv1.3` |
| `MYSQL_TLS_CA_FILE` | Filename to load custom CA certificate for connecting via TLS | `/etc/ssl/cert.pem` |
| `MYSQL_TLS_CERT_FILE` | Filename to load client certificate for connecting via TLS | |
| `MYSQL_TLS_KEY_FILE` | Filename to load client key for connecting via TLS | |
| Parameter | Description | Default | `_FILE` |
| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- | ------- |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | |
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | |
| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` | |
| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` | |
| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` | |
| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` | |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | |
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | |
| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` | |
| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` | |
| `MYSQL_ENABLE_TLS` | Enable TLS functionality for MySQL client | `FALSE` | |
| `MYSQL_TLS_VERIFY` | (optional) If using TLS (by means of MYSQL_TLS_* variables) verify remote host | `FALSE` | |
| `MYSQL_TLS_VERSION` | What TLS `v1.1` `v1.2` `v1.3` version to utilize | `TLSv1.1,TLSv1.2,TLSv1.3` | |
| `MYSQL_TLS_CA_FILE` | Filename to load custom CA certificate for connecting via TLS | `/etc/ssl/cert.pem` | x |
| `MYSQL_TLS_CERT_FILE` | Filename to load client certificate for connecting via TLS | | x |
| `MYSQL_TLS_KEY_FILE` | Filename to load client key for connecting via TLS | | x |
- When using compression with MongoDB, only `GZ` compression is possible.
@@ -202,19 +204,19 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
If `BACKUP_LOCATION` = `S3` then the following options are used.
| Parameter | Description | Default |
| --------------------- | ----------------------------------------------------------------------------------------- | ------- |
| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | |
| `S3_KEY_ID` | S3 Key ID (Optional) | |
| `S3_KEY_SECRET` | S3 Key Secret (Optional) | |
| `S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | |
| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | |
| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | |
| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` |
| `S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | |
| `S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | |
| _*OR*_ | | |
| `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` |
| Parameter | Description | Default | `_FILE` |
| --------------------- | ----------------------------------------------------------------------------------------- | ------- | ------- |
| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | x |
| `S3_KEY_ID` | S3 Key ID (Optional) | | x |
| `S3_KEY_SECRET` | S3 Key Secret (Optional) | | x |
| `S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | | x |
| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | x |
| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | x |
| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` | x |
| `S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | | x |
| `S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | | x |
| _*OR*_ | | | |
| `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | |
- When `S3_KEY_ID` and/or `S3_KEY_SECRET` is not set, will try to use IAM role assigned (if any) for uploading the backup files to S3 bucket.
@@ -222,14 +224,13 @@ If `BACKUP_LOCATION` = `S3` then the following options are used.
Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure fileshare storage.
If `BACKUP_LOCATION` = `blobxfer` then the following options are used.
| Parameter | Description | Default |
| ------------------------------ | ------------------------------------------- | ------------------- |
| `BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | |
| `BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | |
| `BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` |
| Parameter | Description | Default | `_FILE` |
| ------------------------------ | ------------------------------------------- | ------------------- | ------- |
| `BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
> This service uploads files from backup targed directory `DB_DUMP_TARGET`.
> If the a cleanup configuration in `DB_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
@@ -322,17 +323,18 @@ $ cat post-script.sh
# #### $8=BACKUP FILENAME (Filename)
# #### $9=BACKUP FILESIZE
# #### $10=HASH (If CHECKSUM enabled)
# #### $11=MOVE_EXIT_CODE
echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a duration of ${7} seconds. Filename: ${8} Size: ${9} bytes MD5: ${10}"
````
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_timme}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_timme}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" "${move_exit_code}
Outputs the following on the console:
`0 mysql Backup Completed on example-db for example on 1647370800 ending 1647370920 for a duration of 120 seconds. Filename: mysql_example_example-db_202200315-000000.sql.bz2 Size: 7795 bytes Hash: 952fbaafa30437494fdf3989a662cd40`
`0 mysql Backup Completed on example-db for example on 1647370800 ending 1647370920 for a duration of 120 seconds. Filename: mysql_example_example-db_202200315-000000.sql.bz2 Size: 7795 bytes Hash: 952fbaafa30437494fdf3989a662cd40 0`
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`

View File

@@ -31,6 +31,8 @@ services:
- ./backups:/backup
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
- TIMEZONE=America/Vancouver
- CONTAINER_ENABLE_MONITORING=FALSE
# - DEBUG_MODE=TRUE
- DB_TYPE=mariadb
- DB_HOST=example-db-host
@@ -43,7 +45,7 @@ services:
- CHECKSUM=SHA1
- COMPRESSION=GZ
- SPLIT_DB=FALSE
- CONTAINER_ENABLE_MONITORING=FALSE
restart: always
networks:
- example-db-network

View File

@@ -5,9 +5,11 @@ BLOBXFER_REMOTE_PATH=${BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
CHECKSUM=${CHECKSUM:-"MD5"}
COMPRESSION=${COMPRESSION:-"ZSTD"}
COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
CREATE_LATEST_SYMLINK=${CREATE_LATEST_SYMLINK:-"TRUE"}
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"}
DB_DUMP_TARGET_ARCHIVE=${DB_DUMP_TARGET_ARCHIVE:-"${DB_DUMP_TARGET}/archive/"}
ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"}
ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"}
@@ -27,4 +29,4 @@ SCRIPT_LOCATION_POST=${SCRIPT_LOCATION_POST:-"/assets/scripts/post/"}
SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SKIP_AVAILABILITY_CHECK=${SKIP_AVAILABILITY_CHECK:-"FALSE"}
SPLIT_DB=${SPLIT_DB:-"TRUE"}
TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"}
TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"}

View File

@@ -2,12 +2,19 @@
bootstrap_variables() {
sanity_var DB_TYPE "Set appropriate DB_TYPE"
transform_file_var \
DB_HOST \
DB_NAME \
DB_PORT \
DB_USER \
DB_PASS
case "${DB_TYPE,,}" in
couch* )
dbtype=couch
DB_PORT=${DB_PORT:-5984}
file_env 'DB_USER'
file_env 'DB_PASS'
sanity_var DB_USER
sanity_var DB_PASS
;;
influx* )
dbtype=influx
@@ -15,31 +22,31 @@ bootstrap_variables() {
1) DB_PORT=${DB_PORT:-8088} ;;
2) DB_PORT=${DB_PORT:-8086} ;;
esac
file_env 'DB_USER'
file_env 'DB_PASS'
sanity_var DB_USER
sanity_var DB_PASS
sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'"
;;
mongo* )
dbtype=mongo
transform_file_var MONGO_CUSTOM_URI
if [ -n "${MONGO_CUSTOM_URI}" ] ; then
mongo_uri_proto=$(echo ${MONGO_CUSTOM_URI} | grep :// | sed -e's,^\(.*://\).*,\1,g')
mongo_uri_proto=$(echo "${MONGO_CUSTOM_URI}" | grep :// | sed -e's,^\(.*://\).*,\1,g')
mongo_uri_scratch="${MONGO_CUSTOM_URI/${mongo_uri_proto}/}"
mongo_uri_username_password=$(echo ${mongo_uri_scratch} | grep @ | rev | cut -d@ -f2- | rev)
if [ -n "${mongo_uri_username_password}" ]; then mongo_uri_scratch=$(echo ${mongo_uri_scratch} | rev | cut -d@ -f1 | rev) ; fi
mongo_uri_port=$(echo ${mongo_uri_scratch} | grep : | rev | cut -d: -f2- | rev)
if [ -n "${mongo_uri_port}" ]; then mongo_uri_port=$(echo ${mongo_uri_scratch} | rev | cut -d: -f1 | cut -d/ -f2 | rev) ; fi
mongo_uri_hostname=$(echo ${mongo_uri_scratch} | cut -d/ -f1 | cut -d: -f1 )
mongo_uri_database=$(echo ${mongo_uri_scratch} | cut -d/ -f2 | cut -d? -f1 )
mongo_uri_options=$(echo ${mongo_uri_scratch} | cut -d/ -f2 | cut -d? -f2 )
mongo_uri_username_password="$(echo "${mongo_uri_scratch}" | grep @ | rev | cut -d@ -f2- | rev)"
if [ -n "${mongo_uri_username_password}" ]; then mongo_uri_scratch="$(echo "${mongo_uri_scratch}" | rev | cut -d@ -f1 | rev)" ; fi
mongo_uri_port="$(echo "${mongo_uri_scratch}" | grep : | rev | cut -d: -f2- | rev)"
if [ -n "${mongo_uri_port}" ]; then mongo_uri_port="$(echo "${mongo_uri_scratch}" | rev | cut -d: -f1 | cut -d/ -f2 | rev)" ; fi
mongo_uri_hostname="$(echo "${mongo_uri_scratch}" | cut -d/ -f1 | cut -d: -f1 )"
mongo_uri_database="$(echo "${mongo_uri_scratch}" | cut -d/ -f2 | cut -d? -f1 )"
mongo_uri_options="$(echo "${mongo_uri_scratch}" | cut -d/ -f2 | cut -d? -f2 )"
DB_NAME=${DB_NAME:-"${mongo_uri_database,,}"}
DB_HOST=${DB_HOST:-"${mongo_uri_hostname,,}"}
else
DB_PORT=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER'
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS}"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME}"
transform_file_var DB_AUTH
[[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}"
fi
;;
@@ -47,8 +54,8 @@ bootstrap_variables() {
dbtype=mysql
DB_PORT=${DB_PORT:-3306}
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS}
transform_file_var DB_PASS
if [ -n "${DB_PASS}" ] ; then export MYSQL_PWD=${DB_PASS} ; fi
if var_true "${MYSQL_ENABLE_TLS}" ; then
if [ -n "${MYSQL_TLS_CA_FILE}" ] ; then
mysql_tls_args="--ssl_ca=${MYSQL_TLS_CA_FILE}"
@@ -81,14 +88,12 @@ bootstrap_variables() {
postgres* | "pgsql" )
dbtype=pgsql
DB_PORT=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS}"
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
"redis" )
dbtype=redis
DB_PORT=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS}"
;;
sqlite* )
@@ -100,28 +105,39 @@ bootstrap_variables() {
;;
esac
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] && [ -n "${S3_KEY_ID}" ] && [ -n "${S3_KEY_SECRET}" ]; then
file_env 'S3_KEY_ID'
file_env 'S3_KEY_SECRET'
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then
transform_file_var \
S3_BUCKET \
S3_KEY_ID \
S3_KEY_SECRET \
S3_PATH \
S3_REGION \
S3_HOST \
S3_PROTOCOL \
S3_EXTRA_OPTS \
S3_CERT_CA_FILE
fi
if [ "${BACKUP_LOCATION,,}" = "blobxfer" ] && [ -n "${BLOBXFER_STORAGE_ACCOUNT_FILE}" ] && [ -n "${BLOBXFER_STORAGE_ACCOUNT_KEY_FILE}" ]; then
file_env 'BLOBXFER_STORAGE_ACCOUNT_FILE'
file_env 'BLOBXFER_STORAGE_ACCOUNT_KEY_FILE'
if [ "${BACKUP_LOCATION,,}" = "blobxfer" ] ; then
transform_file_var \
BLOBXFER_STORAGE_ACCOUNT \
BLOBXFER_STORAGE_KEY
fi
}
backup_couch() {
prepare_dbbackup
target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt
ltarget=couch_${DB_NAME}_${DB_HOST#*//}
compression
pre_dbbackup ${DB_NAME}
print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}"
curl -sSL -X GET ${DB_HOST}:${DB_PORT}/${DB_NAME}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
check_exit_code backup $target
generate_checksum
move_dbbackup
check_exit_code move $target
post_dbbackup ${DB_NAME}
}
@@ -139,17 +155,20 @@ backup_influx() {
prepare_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
target=influx_${db}_${DB_HOST#*//}_${now}
ltarget=influx_${db}_${DB_HOST#*//}
compression
pre_dbbackup $db
print_notice "Dumping Influx database: '${db}'"
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
check_exit_code backup $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension}
ltarget=influx_${db}_${DB_HOST#*//}
generate_checksum
move_dbbackup
check_exit_code move $target_dir
post_dbbackup $db
done
;;
@@ -158,16 +177,19 @@ backup_influx() {
prepare_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
target=influx2_${db}_${DB_HOST#*//}_${now}
ltarget=influx2_${db}_${DB_HOST#*//}
compression
pre_dbbackup $db
print_notice "Dumping Influx2 database: '${db}'"
influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
check_exit_code backup $target_dir
create_archive
target=influx2_${db}_${DB_HOST#*//}_${now}.tar${extension}
ltarget=influx2_${db}_${DB_HOST#*//}
generate_checksum
move_dbbackup
check_exit_code move $target_dir
post_dbbackup $db
done
;;
@@ -178,8 +200,10 @@ backup_mongo() {
prepare_dbbackup
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive
ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,}
else
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive.gz
ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,}
mongo_compression="--gzip"
compression_string="and compressing with gzip"
fi
@@ -192,23 +216,26 @@ backup_mongo() {
print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
silent mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} ${mongo_backup_parameter}
exit_code=$?
check_exit_code $target
check_exit_code backup $target
generate_checksum
move_dbbackup
check_exit_code move $target
post_dbbackup "${DB_NAME}"
}
backup_mssql() {
prepare_dbbackup
target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak
ltarget=mssql_${DB_NAME,,}_${DB_HOST,,}
compression
pre_dbbackup "${DB_NAME}"
print_notice "Dumping MSSQL database: '${DB_NAME}'"
silent /opt/mssql-tools18/bin/sqlcmd -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} -Q "BACKUP DATABASE [${DB_NAME}] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$?
check_exit_code $target
check_exit_code backup $target
generate_checksum
move_dbbackup
check_exit_code move $target
post_dbbackup $DB_NAME
}
@@ -240,28 +267,32 @@ backup_mysql() {
for db in ${db_names} ; do
prepare_dbbackup
target=mysql_${db}_${DB_HOST,,}_${now}.sql
ltarget=mysql_${db}_${DB_HOST,,}
compression
pre_dbbackup $db
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
check_exit_code backup $target
generate_checksum
move_dbbackup
check_exit_code move $target
post_dbbackup $db
done
else
print_debug "Not splitting database dumps into their own files"
prepare_dbbackup
target=mysql_all_${DB_HOST,,}_${now}.sql
ltarget=mysql_all_${DB_HOST,,}
compression
pre_dbbackup all
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
check_exit_code backup $target
generate_checksum
move_dbbackup
check_exit_code move $target
post_dbbackup all
fi
}
@@ -289,20 +320,23 @@ backup_pgsql() {
for db in ${db_names} ; do
prepare_dbbackup
target=pgsql_${db}_${DB_HOST,,}_${now}.sql
ltarget=pgsql_${db}_${DB_HOST,,}
compression
pre_dbbackup $db
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code $target
check_exit_code backup $target
generate_checksum
move_dbbackup
check_exit_code move $target
post_dbbackup $db
done
else
print_debug "Not splitting database dumps into their own files"
prepare_dbbackup
target=pgsql_all_${DB_HOST,,}_${now}.sql
ltarget=pgsql_${db}_${DB_HOST,,}
compression
pre_dbbackup all
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
@@ -316,9 +350,10 @@ backup_pgsql() {
done
pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code $target
check_exit_code backup $target
generate_checksum
move_dbbackup
check_exit_code move $target
post_dbbackup all
fi
}
@@ -327,6 +362,7 @@ backup_redis() {
prepare_dbbackup
print_notice "Dumping Redis - Flushing Redis Cache First"
target=redis_all_${DB_HOST,,}_${now}.rdb
ltarget=redis_${DB_HOST,,}
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
sleep 10
try=5
@@ -346,9 +382,10 @@ backup_redis() {
compression
pre_dbbackup all
$compress_cmd "${TEMP_LOCATION}/${target_original}"
check_exit_code $target
check_exit_code backup $target
generate_checksum
move_dbbackup
check_exit_code move $target
post_dbbackup all
}
@@ -357,15 +394,17 @@ backup_sqlite3() {
db=$(basename "${DB_HOST}")
db="${db%.*}"
target=sqlite3_${db}_${now}.sqlite3
ltarget=sqlite3_${db}.sqlite3
compression
pre_dbbackup $db
print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}"
silent sqlite3 "${DB_HOST}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
exit_code=$?
check_exit_code $target
check_exit_code backup $target
cat "${TEMP_LOCATION}"/backup.sqlite3 | ${dir_compress_cmd} > "${TEMP_LOCATION}/${target}"
generate_checksum
move_dbbackup
check_exit_code move $target
post_dbbackup $db
}
@@ -420,6 +459,7 @@ check_availability() {
;;
"mysql" )
counter=0
transform_file_var DB_PASS
export MYSQL_PWD=${DB_PASS}
while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" ${mysql_tls_args} status > /dev/null 2>&1) ; do
sleep 5
@@ -437,8 +477,7 @@ check_availability() {
;;
"pgsql" )
counter=0
export PGPASSWORD=${DB_PASS}
until pg_isready --dbname=${DB_NAME} --host=${DB_HOST} --port=${DB_PORT} --username=${DB_USER} -q
until pg_isready --host=${DB_HOST} --port=${DB_PORT} -q
do
sleep 5
(( counter+=5 ))
@@ -474,13 +513,28 @@ check_availability() {
check_exit_code() {
print_debug "DB Backup Exit Code is ${exit_code}"
case "${exit_code}" in
0 )
print_info "DB Backup of '${1}' completed successfully"
;;
* )
print_error "DB Backup of '${1}' reported errors"
master_exit_code=1
case "${1}" in
backup )
case "${exit_code}" in
0 )
print_info "DB Backup of '${2}' completed successfully"
;;
* )
print_error "DB Backup of '${2}' reported errors"
master_exit_code=1
;;
esac
;;
move )
case "${move_exit_code}" in
0 )
print_debug "Moving of backup '${2}' completed successfully"
;;
* )
print_error "Moving of backup '${2}' reported errors"
master_exit_code=1
;;
esac
;;
esac
}
@@ -526,15 +580,16 @@ cleanup_old_data() {
compression() {
if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then
PARALLEL_COMPRESSION_THREADS=1
fi
if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then
PARALLEL_COMPRESSION_THREADS=1
fi
case "${COMPRESSION,,}" in
if var_true "${GZ_RSYNCABLE}" ; then
gz_rsyncable=--rsyncable
fi
case "${COMPRESSION,,}" in
gz* )
if var_true "${GZ_RSYNCABLE}" ; then
gz_rsyncable=--rsyncable
fi
compress_cmd="pigz -q -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} ${gz_rsyncable}"
compression_type="gzip"
extension=".gz"
@@ -559,7 +614,7 @@ compression() {
target=${target}.xz
;;
zst* )
compress_cmd="zstd -q -q --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} "
compress_cmd="zstd -q -q --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} ${gz_rsyncable}"
compression_type="zstd"
dir_compress_cmd=${compress_cmd}
extension=".zst"
@@ -652,8 +707,16 @@ move_dbbackup() {
"file" | "filesystem" )
print_debug "Moving backup to filesystem"
mkdir -p "${DB_DUMP_TARGET}"
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
if var_true "${ENABLE_CHECKSUM}" ; then mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/ ; fi
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
move_exit_code=$?
if var_true "${CREATE_LATEST_SYMLINK}" ; then
ln -sf "${DB_DUMP_TARGET}"/"${target}" "${DB_DUMP_TARGET}"/latest-"${ltarget}"
fi
if [ -n "${DB_ARCHIVE_TIME}" ] ; then
mkdir -p "${DB_DUMP_TARGET_ARCHIVE}"
find "${DB_DUMP_TARGET}"/ -maxdepth 1 -mmin +"${DB_ARCHIVE_TIME}" -iname "*" -exec mv {} "${DB_DUMP_TARGET_ARCHIVE}" \;
fi
;;
"s3" | "minio" )
print_debug "Moving backup to S3 Bucket"
@@ -676,6 +739,7 @@ move_dbbackup() {
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
move_exit_code=$?
if var_true "${ENABLE_CHECKSUM}" ; then
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
@@ -691,6 +755,7 @@ move_dbbackup() {
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET}
move_exit_code=$?
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
rm -rf "${TEMP_LOCATION}"/"${target}"
@@ -708,6 +773,7 @@ prepare_dbbackup() {
now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d")
ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,}
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql
}
@@ -766,11 +832,11 @@ post_dbbackup() {
### Post Script Support
if [ -n "${POST_SCRIPT}" ] ; then
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else
if [ -x "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
fi
@@ -787,12 +853,12 @@ post_dbbackup() {
if [ -d "${SCRIPT_LOCATION_POST}" ] && dir_notempty "${SCRIPT_LOCATION_POST}" ; then
for f in $(find ${SCRIPT_LOCATION_POST} -name \*.sh -type f); do
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}"
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else
if [ -x "${f}" ] ; then
print_notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}"
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
fi
@@ -817,14 +883,6 @@ sanity_test() {
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
esac
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] && [ -n "${S3_KEY_ID}" ] && [ -n "${S3_KEY_SECRET}" ]; then
sanity_var S3_BUCKET "S3 Bucket"
sanity_var S3_PATH "S3 Path"
sanity_var S3_REGION "S3 Region"
file_env 'S3_KEY_ID'
file_env 'S3_KEY_SECRET'
fi
}
setup_mode() {
@@ -837,8 +895,7 @@ setup_mode() {
mkdir -p /etc/services.d/99-run_forever
cat <<EOF > /etc/services.d/99-run_forever/run
#!/bin/bash
while true
do
while true; do
sleep 86400
done
EOF

View File

@@ -37,7 +37,7 @@ else
case "$1" in
"-h" )
cat <<EOF
${IMAGE_NAME} Restore Tool
${IMAGE_NAME} Restore Tool ${IMAGE_VERSION}
(c) 2022 Dave Conroy (https://github.com/tiredofit)
This script will assist you in recovering databases taken by the Docker image.