Compare commits

..

40 Commits
3.0.8 ... 3.3.3

Author SHA1 Message Date
dave@tiredofit.ca
005e7f6e47 Release 3.3.3 - See CHANGELOG.md 2022-05-24 08:26:45 -07:00
Dave Conroy
7d7cb9587d Release 3.3.2 - See CHANGELOG.md 2022-05-02 22:28:08 -07:00
Dave Conroy
d1713fe3f0 Release 3.3.1 - See CHANGELOG.md 2022-04-30 22:31:34 -07:00
Dave Conroy
d1e98d9c4b Release 3.3.0 - See CHANGELOG.md 2022-04-30 03:23:45 -07:00
Dave Conroy
0920b671cb Release 3.2.6 - See CHANGELOG.md 2022-04-25 10:29:45 -07:00
Dave Conroy
28ed6c3bb8 Release 3.2.5 - See CHANGELOG.md 2022-04-23 14:11:29 -07:00
Dave Conroy
c1bdf26598 Release 3.2.4 - See CHANGELOG.md 2022-04-21 16:04:43 -07:00
Dave Conroy
5a4cac2cee Release 3.2.3 - See CHANGELOG.md 2022-04-21 15:46:27 -07:00
Dave Conroy
c04eec7661 Add space after compress_cmd 2022-04-21 14:19:09 -07:00
Dave Conroy
32f1959a07 Merge pull request #120 from joergmschulz/patch-1
small typo / exiting instead of exitting
2022-04-21 14:18:43 -07:00
joergmschulz
d384d5a529 small typo / exiting instead of exitting 2022-04-21 23:16:02 +02:00
Dave Conroy
56ab68dd71 Release 3.2.2 - See CHANGELOG.md 2022-04-21 12:14:17 -07:00
Dave Conroy
9a1a5efbd9 Do a different DB Ready check for Influx 1 vs 2 2022-04-21 12:12:57 -07:00
Dave Conroy
df5532c128 Fix blank database size when compression type=NONE 2022-04-21 12:08:51 -07:00
Dave Conroy
2ecd313778 Change database variables around 2022-04-21 12:08:27 -07:00
Dave Conroy
55cfe5ab02 Release 3.2.1 - See CHANGELOG.md 2022-04-03 10:28:07 -07:00
Dave Conroy
ae2e2c38e2 Sanitize DB_HOST for URLs 2022-04-02 07:37:34 -07:00
Dave Conroy
c23d7991fe Release 3.2.0 - See CHANGELOG.md 2022-04-01 18:41:58 -07:00
Dave Conroy
3f58cfd284 Release 3.1.3 - See CHANGELOG.md 2022-03-30 10:46:16 -07:00
Dave Conroy
2d01f5e692 Fix for MARIADB not sending DB name for post script 2022-03-30 10:45:27 -07:00
Dave Conroy
dbd0a03b0d SPLIT_DB is supposed to be TRUE 2022-03-30 10:43:22 -07:00
Dave Conroy
6527f4ff63 Add sanity checks for Post scripts to be executible 2022-03-30 10:37:58 -07:00
Dave Conroy
d843d21a1b Release 3.1.2 - See CHANGELOG.md 2022-03-29 08:09:36 -07:00
Dave Conroy
24ed769429 Release 3.1.1 - See CHANGELOG.md 2022-03-28 10:29:00 -07:00
Dave Conroy
cbd87a5ede Update README.md 2022-03-23 19:16:51 -07:00
Dave Conroy
13214665c9 Release 3.1.0 - See CHANGELOG.md 2022-03-23 16:21:12 -07:00
Dave Conroy
2e71f617a1 Merge pull request #107 from piemonkey/mongo-restore
Add Mongo support to restore script
2022-03-23 12:24:43 -07:00
Dave Conroy
fbe9dde4a1 Release 3.0.16 - See CHANGELOG.md 2022-03-23 07:57:28 -07:00
Dave Conroy
eb2a18672b Release 3.0.15 - See CHANGELOG.md 2022-03-22 18:27:57 -07:00
Dave Conroy
5f784ed156 Tweak Example 2022-03-22 09:57:28 -07:00
Dave Conroy
d9a4690ea2 Release 3.0.14 - See CHANGELOG.md 2022-03-22 07:52:15 -07:00
Rich
e7eb88c32a Give feedback if restore script doesn't support db type 2022-03-22 09:12:34 +01:00
Rich
52dc510b89 Add auto restore support for mongodb 2022-03-22 09:12:16 +01:00
Rich
06677dbc8b Fix typo setting dbhost in restore script 2022-03-22 09:12:05 +01:00
Rich
e0dd2bc91b Set db type from env vars correctly during restore 2022-03-22 09:11:54 +01:00
Dave Conroy
baba842373 Release 3.0.13 - See CHANGELOG.md 2022-03-21 16:26:45 -07:00
Dave Conroy
108938c17a Release 3.0.12 - See CHANGELOG.md 2022-03-21 13:51:01 -07:00
Dave Conroy
b0b39fa8c1 Release 3.0.11 - See CHANGELOG.md 2022-03-21 12:34:33 -07:00
Dave Conroy
fa8f43132c Release 3.0.10 - See CHANGELOG.md 2022-03-21 11:19:17 -07:00
Dave Conroy
3f693feefc Release 3.0.9 - See CHANGELOG.md 2022-03-21 10:57:17 -07:00
11 changed files with 822 additions and 413 deletions

7
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,7 @@
version: 2
updates:
# Maintain dependencies for GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

View File

@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v3
- name: Prepare - name: Prepare
id: prep id: prep
@@ -63,17 +63,17 @@ jobs:
echo ::set-output name=docker_image::${DOCKER_IMAGE} echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v1 uses: docker/setup-qemu-action@v2
with: with:
platforms: all platforms: all
- name: Set up Docker Buildx - name: Set up Docker Buildx
id: buildx id: buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v2
- name: Login to DockerHub - name: Login to DockerHub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@v1 uses: docker/login-action@v2
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
@@ -102,7 +102,7 @@ jobs:
fi fi
- name: Build - name: Build
uses: docker/build-push-action@v2 uses: docker/build-push-action@v3
with: with:
builder: ${{ steps.buildx.outputs.name }} builder: ${{ steps.buildx.outputs.name }}
context: . context: .

View File

@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v3
- name: Prepare - name: Prepare
id: prep id: prep
@@ -63,17 +63,17 @@ jobs:
echo ::set-output name=docker_image::${DOCKER_IMAGE} echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v1 uses: docker/setup-qemu-action@v2
with: with:
platforms: all platforms: all
- name: Set up Docker Buildx - name: Set up Docker Buildx
id: buildx id: buildx
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v2
- name: Login to DockerHub - name: Login to DockerHub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@v1 uses: docker/login-action@v2
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
@@ -102,7 +102,7 @@ jobs:
fi fi
- name: Build - name: Build
uses: docker/build-push-action@v2 uses: docker/build-push-action@v3
with: with:
builder: ${{ steps.buildx.outputs.name }} builder: ${{ steps.buildx.outputs.name }}
context: . context: .

View File

@@ -1,3 +1,160 @@
## 3.3.3 2022-05-24 <dave at tiredofit dot ca>
### Added
- Alpine 3.16 base
## 3.3.2 2022-05-02 <dave at tiredofit dot ca>
### Added
- Add POST_SCRIPT_SKIP_X_VERIFY environment variables to allow for more host compatibility for post scripts
## 3.3.1 2022-04-30 <dave at tiredofit dot ca>
### Changed
- Compressing silently was causing 0 byte backups
## 3.3.0 2022-04-30 <dave at tiredofit dot ca>
### Added
- Ability to auto clean old S3 / Minio Hosts like what occurs on filesysten
- Alert user how to turn off Zabbix Monitoring if fails
- Allow Zabbix Monitoring to work with S3
- Silence some more compression statements
### Changed
- Fix for Redis not backing up properly
- Start sending checksums for S3 Outputs
- Cleanup some code functions
- FIx Container Log Level always in DEBUG
## 3.2.4 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Add -portable flag when backing up Influx
## 3.2.3 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Fix for bucket / db name InfluxDB 1.xx
- Minor aesthetics, spacing, spelling
## 3.2.2 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Restore script properly parses DB_PORT entry
- Influx Database ready performs different checks dependent on V1/V2
- Stop using weird database lowercase variables unneccessarily breaking Docker Secrets
## 3.2.1 2022-04-03 <dave at tiredofit dot ca>
### Changed
- Fix a variety of issues iwth 3.2.0 relating to InfluxDB
## 3.2.0 2022-03-31 <dave at tiredofit dot ca>
### Added
- Install InfluxDB2 Client alongside version 1 (amd64 and arm64)
- Introduce InfluxDB 2 backup support
- Introduce multiple compression types other than Gzip for Influx 1/2
- Introduce compression for MSSQL backups
### Changed
- Testing for Host availability for CouchDB and InfluxDB
## 3.1.3 2022-03-30 <dave at tiredofit dot ca>
### Changed
- Fix for MariaDB not sending database name to post script
- Check for executible bit on post scripts both via environment variable and /assets/custom
- SPLIT_DB defaulted to TRUE
## 3.1.2 2022-03-29 <dave at tiredofit dot ca>
### Changed
- Fix for blank Notice when individual backup is completed (time taken)
## 3.1.1 2022-03-28 <dave at tiredofit dot ca>
### Changed
- Resolve some issues with backups of Mongo and others not saving the proper timestamp
## 3.1.0 2022-03-23 <dave at tiredofit dot ca>
### Added
- Backup multiple databases by seperating with comma e.g. db1,db2
- Backup ALL databases bu setting DB_NAME to ALL
- Exclude databases from being backed up comma seperated when DB_NAME is all eg DB_NAME_EXCLUDE=db3,db4
- Backup timers execute per database, not per the whole script run
- Post scripts run after each database backup
- Checksum does not occur when database backup failed
- Database cleanup does not occur when any databases backups fail throughout session
- MongoDB now supported with 'restore' script - Credit to piemonkey@github
- Lots of reshuffling, optimizations with script due to botched 3.0 release
### Changed
- ZSTD replaces GZ as default compression type
- Output is cleaner when backups are occurring
## 3.0.16 2022-03-23 <dave at tiredofit dot ca>
### Changed
- Fix for SPLIT_DB not looping through all databse names properly
## 3.0.15 2022-03-22 <dave at tiredofit dot ca>
### Changed
- Rework compression function
- Fix for Bzip compression failing
## 3.0.14 2022-03-22 <dave at tiredofit dot ca>
### Changed
- Rearrange Notice stating when next backup is going to start
## 3.0.13 2022-03-21 <dave at tiredofit dot ca>
### Added
- Add compression levels to debug mode
## 3.0.12 2022-03-21 <dave at tiredofit dot ca>
### Added
- Throw Errors for MANUAL mode when certain other CONTAINER_* services are enabled
## 3.0.11 2022-03-21 <dave at tiredofit dot ca>
### Changed
- Fix for Parallel Compression
## 3.0.10 2022-03-21 <dave at tiredofit dot ca>
### Changed
- Fix for restore script not taking "custom" usernames or passwords
## 3.0.9 2022-03-21 <dave at tiredofit dot ca>
### Changed
- Switch to using parallel versions of compression tools all the time, yet explicitly state the threads in use (1 or ++)
## 3.0.8 2022-03-21 <dave at tiredofit dot ca> ## 3.0.8 2022-03-21 <dave at tiredofit dot ca>
### Added ### Added
@@ -173,7 +330,7 @@
## 2.9.2 2021-10-22 <teenigma@github> ## 2.9.2 2021-10-22 <teenigma@github>
### Fixed ### Fixed
- Fix compression failing on Redis backup - Fix compression failing on Redis backup

View File

@@ -1,8 +1,10 @@
FROM docker.io/tiredofit/alpine:3.15 FROM docker.io/tiredofit/alpine:3.16
LABEL maintainer="Dave Conroy (github.com/tiredofit)" LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables ### Set Environment Variables
ENV MSSQL_VERSION=17.8.1.1-1 \
ENV INFLUX2_VERSION=2.2.1 \
MSSQL_VERSION=17.8.1.1-1 \
CONTAINER_ENABLE_MESSAGING=FALSE \ CONTAINER_ENABLE_MESSAGING=FALSE \
CONTAINER_ENABLE_MONITORING=TRUE \ CONTAINER_ENABLE_MONITORING=TRUE \
CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \ CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \
@@ -44,9 +46,14 @@ RUN set -ex && \
\ \
apkArch="$(apk --print-arch)"; \ apkArch="$(apk --print-arch)"; \
case "$apkArch" in \ case "$apkArch" in \
x86_64) mssql=true ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql17_${MSSQL_VERSION}_amd64.apk mssql-tools_${MSSQL_VERSION}_amd64.apk ;; \ x86_64) mssql=true ; influx2=true ; influx_arch=amd64; ;; \
*) echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ;; \ aarch64 ) influx2=true ; influx_arch=arm64 ;; \
*) sleep 0.1 ;; \
esac; \ esac; \
\
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql17_${MSSQL_VERSION}_amd64.apk mssql-tools_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
\
mkdir -p /usr/src/pbzip2 && \ mkdir -p /usr/src/pbzip2 && \
curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \ curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
cd /usr/src/pbzip2 && \ cd /usr/src/pbzip2 && \

View File

@@ -16,7 +16,8 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
* dump to local filesystem or backup to S3 Compatible services * dump to local filesystem or backup to S3 Compatible services
* select database user and password * select database user and password
* backup all databases * backup all databases, single, or multiple databases
* backup all to seperate files or one singular file
* choose to have an MD5 or SHA1 sum after backup for verification * choose to have an MD5 or SHA1 sum after backup for verification
* delete old backups after specific amount of time * delete old backups after specific amount of time
* choose compression type (none, gz, bz, xz, zstd) * choose compression type (none, gz, bz, xz, zstd)
@@ -49,6 +50,7 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
- [Base Images used](#base-images-used) - [Base Images used](#base-images-used)
- [Container Options](#container-options) - [Container Options](#container-options)
- [Database Specific Options](#database-specific-options) - [Database Specific Options](#database-specific-options)
- [For Influx DB2:](#for-influx-db2)
- [Scheduling Options](#scheduling-options) - [Scheduling Options](#scheduling-options)
- [Backup Options](#backup-options) - [Backup Options](#backup-options)
- [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services) - [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services)
@@ -121,30 +123,36 @@ Be sure to view the following repositories to understand all the customizable op
#### Container Options #### Container Options
| Parameter | Description | Default | | Parameter | Description | Default |
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- | | -------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- |
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` | | `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` | | `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` |
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` | | `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` | | `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | | `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` | | `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | |
| `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | | | `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE`
| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. | `FALSE` |
### Database Specific Options ### Database Specific Options
| Parameter | Description | Default | | Parameter | Description | Default |
| --------- | --------------------------------------------------------------------------------------------- | ------- | | ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | | `DB_AUTH` | (Mongo Only - Optional) Authentication Database | |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | | `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | |
| `DB_NAME` | Schema Name e.g. `database` | | | `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | |
| `DB_USER` | username for the database - use `root` to backup all MySQL of them. | | | `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | | | `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | | `DB_USER` | username for the database(s) - Can use `root` for MySQL | |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies |
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2.x` series - AMD64 and ARM64 only for `2` | |
#### For Influx DB2:
Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name`
### Scheduling Options ### Scheduling Options
| Parameter | Description | Default | | Parameter | Description | Default |
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | | ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. | `1440` | | `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | | | `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
| | Absolute HHMM, e.g. `2330` or `0415` | | | | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | | | | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
@@ -152,18 +160,18 @@ Be sure to view the following repositories to understand all the customizable op
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time. - You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
### Backup Options ### Backup Options
| Parameter | Description | Default | | Parameter | Description | Default |
| ----------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | ------- | | ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | -------------- |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `GZ` | | `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | | `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` |
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | | `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |
| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` | | `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` |
| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` | | `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` |
| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` | | `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | | `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | |
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | | `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` |
| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` | | `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` |
| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` | | `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` |
- When using compression with MongoDB, only `GZ` compression is possible. - When using compression with MongoDB, only `GZ` compression is possible.
@@ -187,7 +195,6 @@ If `BACKUP_LOCATION` = `S3` then the following options are used.
## Maintenance ## Maintenance
### Shell Access ### Shell Access
For debugging and maintenance purposes you may want access the containers shell. For debugging and maintenance purposes you may want access the containers shell.
@@ -201,7 +208,7 @@ Manual Backups can be performed by entering the container and typing `backup-now
- Recently there was a request to have the container work with Kukbernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`. - Recently there was a request to have the container work with Kukbernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`.
### Restoring Databases ### Restoring Databases
Entering in the container and executing `restore` will execute a menu based script to restore your backups. Entering in the container and executing `restore` will execute a menu based script to restore your backups - MariaDB, Postgres, and Mongo supported.
You will be presented with a series of menus allowing you to choose: You will be presented with a series of menus allowing you to choose:
- What file to restore - What file to restore
@@ -254,6 +261,9 @@ Outputs the following on the console:
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes` If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`
You must make your scripts executible otherwise there is an internal check that will skip trying to run it otherwise.
If for some reason your filesystem or host is not detecting it right, use the environment variable `POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
## Support ## Support
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community. These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.

View File

@@ -20,7 +20,7 @@ services:
- example-db - example-db
volumes: volumes:
- ./backups:/backup - ./backups:/backup
- ./post-script.sh:/assets/custom-scripts/post-script.sh #- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment: environment:
- DB_TYPE=mariadb - DB_TYPE=mariadb
- DB_HOST=example-db - DB_HOST=example-db
@@ -30,8 +30,8 @@ services:
- DB_DUMP_FREQ=1440 - DB_DUMP_FREQ=1440
- DB_DUMP_BEGIN=0000 - DB_DUMP_BEGIN=0000
- DB_CLEANUP_TIME=8640 - DB_CLEANUP_TIME=8640
- CHECKSUM=MD5 - CHECKSUM=SHA1
- COMPRESSION=XZ - COMPRESSION=ZSTD
- SPLIT_DB=FALSE - SPLIT_DB=FALSE
restart: always restart: always

View File

@@ -2,13 +2,13 @@
BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"} BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"}
CHECKSUM=${CHECKSUM:-"MD5"} CHECKSUM=${CHECKSUM:-"MD5"}
COMPRESSION=${COMPRESSION:-"GZ"} COMPRESSION=${COMPRESSION:-"ZSTD"}
COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"} COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0} DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440} DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"} DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"}
ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"} ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"}
ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION}:-"TRUE"} ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"} MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"}
MODE=${MODE:-"AUTO"} MODE=${MODE:-"AUTO"}
MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"} MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"}
@@ -18,10 +18,5 @@ PARALLEL_COMPRESSION_THREADS=${PARALLEL_COMPRESSION_THREADS:-"$(nproc)"}
S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"} S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"}
S3_PROTOCOL=${S3_PROTOCOL:-"https"} S3_PROTOCOL=${S3_PROTOCOL:-"https"}
SIZE_VALUE=${SIZE_VALUE:-"bytes"} SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SPLIT_DB=${SPLIT_DB:-"FALSE"} SPLIT_DB=${SPLIT_DB:-"TRUE"}
TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"} TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"}
dbhost=${DB_HOST}
dbname=${DB_NAME}
dbpass=${DB_PASS}
dbtype=${DB_TYPE}
dbuser=${DB_USER}

View File

@@ -1,44 +1,31 @@
#!/command/with-contenv bash #!/command/with-contenv bash
bootstrap_compression() {
### Set Compression Options
if var_true "${ENABLE_PARALLEL_COMPRESSION}" ; then
bzip="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS}"
gzip="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS}"
xzip="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS}"
zstd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS}"
else
bzip="bzip2 -${COMPRESSION_LEVEL}"
gzip="gzip -${COMPRESSION_LEVEL}"
xzip="xz -${COMPRESSION_LEVEL} "
zstd="zstd --rm -${COMPRESSION_LEVEL}"
fi
}
bootstrap_variables() { bootstrap_variables() {
case "${dbtype,,}" in case "${DB_TYPE,,}" in
couch* ) couch* )
dbtype=couch dbtype=couch
dbport=${DB_PORT:-5984} DB_PORT=${DB_PORT:-5984}
file_env 'DB_USER' file_env 'DB_USER'
file_env 'DB_PASS' file_env 'DB_PASS'
;; ;;
influx* ) influx* )
dbtype=influx dbtype=influx
dbport=${DB_PORT:-8088} DB_PORT=${DB_PORT:-8088}
file_env 'DB_USER' file_env 'DB_USER'
file_env 'DB_PASS' file_env 'DB_PASS'
sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'"
;; ;;
mongo* ) mongo* )
dbtype=mongo dbtype=mongo
dbport=${DB_PORT:-27017} DB_PORT=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER' [[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER'
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
;; ;;
"mysql" | "mariadb" ) "mysql" | "mariadb" )
dbtype=mysql dbtype=mysql
dbport=${DB_PORT:-3306} DB_PORT=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;; ;;
"mssql" | "microsoftsql" ) "mssql" | "microsoftsql" )
apkArch="$(apk --print-arch)"; \ apkArch="$(apk --print-arch)"; \
@@ -47,16 +34,17 @@ bootstrap_variables() {
*) print_error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;; *) print_error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;;
esac esac
dbtype=mssql dbtype=mssql
dbport=${DB_PORT:-1433} DB_PORT=${DB_PORT:-1433}
;; ;;
postgres* | "pgsql" ) postgres* | "pgsql" )
dbtype=pgsql dbtype=pgsql
dbport=${DB_PORT:-5432} DB_PORT=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;; ;;
"redis" ) "redis" )
dbtype=redis dbtype=redis
dbport=${DB_PORT:-6379} DB_PORT=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS' [[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS'
;; ;;
sqlite* ) sqlite* )
@@ -72,77 +60,112 @@ bootstrap_variables() {
### Set the Database Authentication Details ### Set the Database Authentication Details
case "$dbtype" in case "$dbtype" in
"mongo" ) "mongo" )
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${dbuser}" [[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${dbpass}" [[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS}"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${dbname}" [[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME}"
[[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}" [[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}"
;; ;;
"mysql" ) "mysql" )
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${dbpass} [[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS}
;; ;;
"postgres" ) "postgres" )
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${dbpass}" [[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS}"
;; ;;
"redis" ) "redis" )
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${dbpass}" [[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS}"
;; ;;
esac esac
} }
backup_couch() { backup_couch() {
target=couch_${dbname}_${dbhost}_${now}.txt pre_dbbackup
target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt
compression compression
print_notice "Dumping CouchDB database: '${dbname}'" print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}"
curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${dumpoutput} | $dumpoutput > ${TEMP_LOCATION}/${target} curl -sSL -X GET ${DB_HOST}:${DB_PORT}/${DB_NAME}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$? exit_code=$?
check_exit_code check_exit_code $target
generate_checksum generate_checksum
move_backup move_dbbackup
post_dbbackup ${DB_NAME}
} }
backup_influx() { backup_influx() {
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then if [ "${DB_NAME,,}" = "all" ] ; then
: print_debug "Preparing to back up everything"
db_names=justbackupeverything
else else
print_notice "Compressing InfluxDB backup with gzip" db_names=$(echo "${DB_NAME}" | tr ',' '\n')
influx_compression="-portable"
fi fi
for DB in ${DB_NAME}; do
print_notice "Dumping Influx database: '${DB}'" case "${INFLUX_VERSION,,}" in
target=influx_${DB}_${dbhost}_${now} 1 )
influxd backup ${influx_compression} -database $DB -host ${dbhost}:${dbport} ${TEMP_LOCATION}/${target} for db in ${db_names}; do
exit_code=$? pre_dbbackup
check_exit_code if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
generate_checksum target=influx_${db}_${DB_HOST#*//}_${now}
move_backup compression
done print_notice "Dumping Influx database: '${db}'"
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension}
generate_checksum
move_dbbackup
post_dbbackup $db
done
;;
2 )
for db in ${db_names}; do
pre_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
target=influx2_${db}_${DB_HOST#*//}_${now}
compression
print_notice "Dumping Influx2 database: '${db}'"
influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
create_archive
target=influx2_${db}_${DB_HOST#*//}_${now}.tar${extension}
generate_checksum
move_dbbackup
post_dbbackup $db
done
;;
esac
} }
backup_mongo() { backup_mongo() {
pre_dbbackup
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
target=${dbtype}_${dbname}_${dbhost}_${now}.archive target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive
else else
print_notice "Compressing MongoDB backup with gzip" target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive.gz
target=${dbtype}_${dbname}_${dbhost}_${now}.archive.gz
mongo_compression="--gzip" mongo_compression="--gzip"
compression_string="and compressing with gzip"
fi fi
print_notice "Dumping MongoDB database: '${DB_NAME}'" print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS} mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
exit_code=$? exit_code=$?
check_exit_code check_exit_code $target
cd "${TEMP_LOCATION}"
generate_checksum generate_checksum
move_backup move_dbbackup
post_dbbackup
} }
backup_mssql() { backup_mssql() {
target=mssql_${dbname}_${dbhost}_${now}.bak pre_dbbackup
print_notice "Dumping MSSQL database: '${dbname}'" target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak
/opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" compression
print_notice "Dumping MSSQL database: '${DB_NAME}'"
/opt/mssql-tools/bin/sqlcmd -E -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} Q "BACKUP DATABASE \[${DB_NAME}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$? exit_code=$?
check_exit_code check_exit_code $target
generate_checksum generate_checksum
move_backup move_dbbackup
post_dbbackup $DB_NAME
} }
backup_mysql() { backup_mysql() {
@@ -152,71 +175,119 @@ backup_mysql() {
if var_true "${MYSQL_STORED_PROCEDURES}" ; then if var_true "${MYSQL_STORED_PROCEDURES}" ; then
stored_procedures="--routines" stored_procedures="--routines"
fi fi
if [ "${DB_NAME,,}" = "all" ] ; then
print_debug "Preparing to back up everything except for information_schema and _* prefixes"
db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
print_debug "Excluding '${db_exclude}' from ALL DB_NAME backups"
db_names=$(echo "$db_names" | sed "/${db_exclude}/d" )
done
fi
else
db_names=$(echo "${DB_NAME}" | tr ',' '\n')
fi
print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
if var_true "${SPLIT_DB}" ; then if var_true "${SPLIT_DB}" ; then
DATABASES=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema) for db in ${db_names} ; do
for db in "${DATABASES}" ; do pre_dbbackup
if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then target=mysql_${db}_${DB_HOST,,}_${now}.sql
print_debug "Backing up everything except for information_schema and _* prefixes" compression
print_notice "Dumping MySQL/MariaDB database: '${db}'" print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
target=mysql_${db}_${dbhost}_${now}.sql mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
compression exit_code=$?
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $dumpoutput > ${TEMP_LOCATION}/${target} check_exit_code $target
exit_code=$? generate_checksum
check_exit_code move_dbbackup
generate_checksum post_dbbackup $db
move_backup
fi
done done
else else
print_debug "Not splitting database dumps into their own files"
pre_dbbackup
target=mysql_all_${DB_HOST,,}_${now}.sql
compression compression
print_notice "Dumping MySQL/MariaDB database: '${DB_NAME}'" print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -A -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target} mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$? exit_code=$?
check_exit_code check_exit_code $target
generate_checksum generate_checksum
move_backup move_dbbackup
post_dbbackup all
fi fi
} }
backup_pgsql() { backup_pgsql() {
export PGPASSWORD=${dbpass} export PGPASSWORD=${DB_PASS}
if var_true "${SPLIT_DB}" ; then authdb=${DB_USER}
if [ "${DB_NAME,,}" = "all" ] ; then
authdb=${DB_USER} print_debug "Preparing to back up all databases"
[ -n "${DB_NAME}" ] && authdb=${DB_NAME} db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
DATABASES=$(psql -h $dbhost -U $dbuser -p ${dbport} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ) if [ -n "${DB_NAME_EXCLUDE}" ] ; then
for db in "${DATABASES}"; do db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
print_notice "Dumping Postgresql database: $db" for db_exclude in ${db_names_exclusions} ; do
target=pgsql_${db}_${dbhost}_${now}.sql print_debug "Excluding '${db_exclude}' from ALL DB_NAME backups"
compression db_names=$(echo "$db_names" | sed "/${db_exclude}/d" )
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code
generate_checksum
move_backup
done done
else fi
compression else
print_notice "Dumping PostgreSQL: '${DB_NAME}'" db_names=$(echo "${DB_NAME}" | tr ',' '\n')
pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target} fi
exit_code=$?
check_exit_code print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
generate_checksum
move_backup if var_true "${SPLIT_DB}" ; then
fi for db in ${db_names} ; do
pre_dbbackup
target=pgsql_${db}_${DB_HOST,,}_${now}.sql
compression
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup $db
done
else
print_debug "Not splitting database dumps into their own files"
pre_dbbackup
target=pgsql_all_${DB_HOST,,}_${now}.sql
compression
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
tmp_db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
for r_db_name in $(echo $db_names | xargs); do
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
done
sleep 5
for x_db_name in ${tmp_db_names} ; do
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
done
pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup all
fi
} }
backup_redis() { backup_redis() {
target=redis_${db}_${dbhost}_${now}.rdb pre_dbbackup
echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
print_notice "Dumping Redis - Flushing Redis Cache First" print_notice "Dumping Redis - Flushing Redis Cache First"
target=redis_all_${DB_HOST,,}_${now}.rdb
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
sleep 10 sleep 10
try=5 try=5
while [ $try -gt 0 ] ; do while [ $try -gt 0 ] ; do
saved=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}') saved=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
ok=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}') ok=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
print_notice "Redis Backup Complete" print_notice "Redis Backup Complete"
exit_code=0
break break
fi fi
try=$((try - 1)) try=$((try - 1))
@@ -225,99 +296,119 @@ backup_redis() {
done done
target_original=${target} target_original=${target}
compression compression
$dumpoutput "${TEMP_LOCATION}/${target_original}" $compress_cmd "${TEMP_LOCATION}/${target_original}"
check_exit_code $target
generate_checksum generate_checksum
move_backup move_dbbackup
post_dbbackup all
} }
backup_sqlite3() { backup_sqlite3() {
db=$(basename "$dbhost") pre_dbbackup
db=$(basename "${DB_HOST}")
db="${db%.*}" db="${db%.*}"
target=sqlite3_${db}_${now}.sqlite3 target=sqlite3_${db}_${now}.sqlite3
compression compression
print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}"
print_notice "Dumping sqlite3 database: '${dbhost}'" sqlite3 "${DB_HOST}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
sqlite3 "${dbhost}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
exit_code=$? exit_code=$?
check_exit_code check_exit_code $target
cat "${TEMP_LOCATION}"/backup.sqlite3 | $dumpoutput > "${TEMP_LOCATION}/${target}" cat "${TEMP_LOCATION}"/backup.sqlite3 | ${dir_compress_cmd} > "${TEMP_LOCATION}/${target}"
generate_checksum generate_checksum
move_backup move_dbbackup
post_dbbackup $db
} }
check_availability() { check_availability() {
### Set the Database Type ### Set the Database Type
case "$dbtype" in case "$dbtype" in
"couch" ) "couch" )
COUNTER=0 counter=0
while ! (nc -z ${dbhost} ${dbport}) ; do code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${DB_HOST}:${DB_PORT})
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "CouchDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "CouchDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done done
;; ;;
"influx" ) "influx" )
COUNTER=0 counter=0
while ! (nc -z ${dbhost} ${dbport}) ; do case "${INFLUX_VERSION,,}" in
sleep 5 1 )
(( COUNTER+=5 )) while ! (nc -z ${DB_HOST#*//} ${DB_PORT}) ; do
print_warn "InfluxDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" sleep 5
done (( counter+=5 ))
print_warn "InfluxDB Host '${DB_HOST#*//}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
2 )
code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${DB_HOST}:${DB_PORT}/health)
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( counter+=5 ))
print_warn "InfluxDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
esac
;; ;;
"mongo" ) "mongo" )
COUNTER=0 counter=0
while ! (nc -z ${dbhost} ${dbport}) ; do while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "Mongo Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "Mongo Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done done
;; ;;
"mysql" ) "mysql" )
COUNTER=0 counter=0
export MYSQL_PWD=${dbpass} export MYSQL_PWD=${DB_PASS}
while ! (mysqladmin -u"${dbuser}" -P"${dbport}" -h"${dbhost}" status > /dev/null 2>&1) ; do while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" status > /dev/null 2>&1) ; do
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "MySQL/MariaDB Server '${dbhost}' is not accessible, retrying.. (${COUNTER} seconds so far)" print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)"
done done
;; ;;
"mssql" ) "mssql" )
COUNTER=0 counter=0
while ! (nc -z ${dbhost} ${dbport}) ; do while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "MSSQL Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "MSSQL Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done done
;; ;;
"pgsql" ) "pgsql" )
COUNTER=0 counter=0
export PGPASSWORD=${dbpass} export PGPASSWORD=${DB_PASS}
until pg_isready --dbname=${dbname} --host=${dbhost} --port=${dbport} --username=${dbuser} -q until pg_isready --dbname=${DB_NAME} --host=${DB_HOST} --port=${DB_PORT} --username=${DB_USER} -q
do do
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "Postgres Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "Postgres Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done done
;; ;;
"redis" ) "redis" )
COUNTER=0 counter=0
while ! (nc -z "${dbhost}" "${dbport}") ; do while ! (nc -z "${DB_HOST}" "${DB_PORT}") ; do
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "Redis Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "Redis Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done done
;; ;;
"sqlite3" ) "sqlite3" )
if [[ ! -e "${dbhost}" ]]; then if [[ ! -e "${DB_HOST}" ]]; then
print_error "File '${dbhost}' does not exist." print_error "File '${DB_HOST}' does not exist."
exit_code=2 exit_code=2
exit $exit_code exit $exit_code
elif [[ ! -f "${dbhost}" ]]; then elif [[ ! -f "${DB_HOST}" ]]; then
print_error "File '${dbhost}' is not a file." print_error "File '${DB_HOST}' is not a file."
exit_code=2 exit_code=2
exit $exit_code exit $exit_code
elif [[ ! -r "${dbhost}" ]]; then elif [[ ! -r "${DB_HOST}" ]]; then
print_error "File '${dbhost}' is not readable." print_error "File '${DB_HOST}' is not readable."
exit_code=2 exit_code=2
exit $exit_code exit $exit_code
fi fi
@@ -326,124 +417,278 @@ check_availability() {
} }
check_exit_code() { check_exit_code() {
print_debug "Exit Code is ${exit_code}" print_debug "DB Backup Exit Code is ${exit_code}"
case "${exit_code}" in case "${exit_code}" in
0 ) 0 )
print_info "Backup completed successfully" print_info "DB Backup of '${1}' completed successfully"
;; ;;
* ) * )
print_error "Backup reported errors" print_error "DB Backup of '${1}' reported errors"
master_exit_code=1
;; ;;
esac esac
} }
cleanup_old_data() {
if [ -n "${DB_CLEANUP_TIME}" ]; then
if [ "${master_exit_code}" != 1 ]; then
case "${BACKUP_LOCATION,,}" in
"file" | "filesystem" )
print_info "Cleaning up old backups"
mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
;;
"s3" | "minio" )
print_info "Cleaning up old backups"
aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | while read -r s3_file; do
s3_createdate=$(echo $s3_file | awk {'print $1" "$2'})
s3_createdate=$(date -d "$s3_createdate" "+%s")
s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 )))
if [[ $s3_createdate -le $s3_olderthan ]] ; then
s3_filename=$(echo $s3_file | awk {'print $4'})
if [ $s3_filename != "" ] ; then
print_debug "Deleting $s3_filename"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
fi
done
;;
esac
else
print_error "Skipping Cleaning up old backups because there were errors in backing up"
fi
fi
}
compression() { compression() {
if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then
PARALLEL_COMPRESSION_THREADS=1
fi
case "${COMPRESSION,,}" in case "${COMPRESSION,,}" in
gz* ) gz* )
print_notice "Compressing backup with gzip" compress_cmd="pigz -q -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compression_type="gzip"
extension=".gz"
dir_compress_cmd=${compress_cmd}
target_dir=${target}
target=${target}.gz target=${target}.gz
dumpoutput="$gzip "
;; ;;
bz* ) bz* )
print_notice "Compressing backup with bzip2" compress_cmd="pbzip2 -q -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
compression_type="bzip2"
dir_compress_cmd=${compress_cmd}
extension=".bz2"
target_dir=${target}
target=${target}.bz2 target=${target}.bz2
dumpoutput="$bzip "
;; ;;
xz* ) xz* )
print_notice "Compressing backup with xzip" compress_cmd="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compression_type="xzip"
dir_compress_cmd=${compress_cmd}
extension=".xz"
target_dir=${target}
target=${target}.xz target=${target}.xz
dumpoutput="$xzip "
;; ;;
zst* ) zst* )
print_notice "Compressing backup with zstd" compress_cmd="zstd -q -q --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} "
compression_type="zstd"
dir_compress_cmd=${compress_cmd}
extension=".zst"
target_dir=${target}
target=${target}.zst target=${target}.zst
dumpoutput="$zstd "
;; ;;
"none" | "false") "none" | "false")
print_notice "Not compressing backups" compress_cmd="cat "
dumpoutput="cat " compression_type="none"
dir_compress_cmd="cat "
target_dir=${target}
;; ;;
esac esac
case "${CONTAINER_LOG_LEVEL,,}" in
"debug" )
if [ "${compression_type}" = "none" ] ; then
compression_string="with '${PARALLEL_COMPRESSION_THREADS}' threads"
else
compression_string="and compressing with '${compression_type}:${COMPRESSION_LEVEL}' with '${PARALLEL_COMPRESSION_THREADS}' threads"
fi
;;
* )
if [ "${compression_type}" != "none" ] ; then
compression_string="and compressing with '${compression_type}'"
fi
;;
esac
}
create_archive() {
if [ "${exit_code}" = "0" ] ; then
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
else
print_error "Skipping creating archive file because backup did not complete successfully"
fi
} }
generate_checksum() { generate_checksum() {
if var_true "${ENABLE_CHECKSUM}" ; then if var_true "${ENABLE_CHECKSUM}" ; then
case "${CHECKSUM,,}" in if [ "${exit_code}" = "0" ] ; then
"md5" ) case "${CHECKSUM,,}" in
checksum_command="md5sum" "md5" )
checksum_extension="md5" checksum_command="md5sum"
;; checksum_extension="md5"
"sha1" ) ;;
checksum_command="sha1sum" "sha1" )
checksum_extension="sha1" checksum_command="sha1sum"
;; checksum_extension="sha1"
esac ;;
esac
print_notice "Generating ${checksum_extension^^} for '${target}'" print_notice "Generating ${checksum_extension^^} for '${target}'"
cd "${TEMP_LOCATION}" cd "${TEMP_LOCATION}"
${checksum_command} "${target}" > "${target}"."${checksum_extension}" ${checksum_command} "${target}" > "${target}"."${checksum_extension}"
checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}') checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}')
print_debug "${checksum_extension^^}: ${checksum_value} - ${target}" print_debug "${checksum_extension^^}: ${checksum_value} - ${target}"
else
print_error "Skipping Checksum creation because backup did not complete successfully"
fi
fi fi
} }
move_backup() { move_dbbackup() {
case "$SIZE_VALUE" in if [ "${exit_code}" = "0" ] ; then
"b" | "bytes" ) dbbackup_size="$(stat -c%s "${TEMP_LOCATION}"/"${target}")"
SIZE_VALUE=1 dbbackup_date="$(date -r "${TEMP_LOCATION}"/"${target}" +'%s')"
;;
"[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" ) case "${SIZE_VALUE,,}" in
SIZE_VALUE="-h" "b" | "bytes" )
;; SIZE_VALUE=1
*) ;;
SIZE_VALUE=1 "[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" )
;; SIZE_VALUE="-h"
esac ;;
if [ "$SIZE_VALUE" = "1" ] ; then *)
FILESIZE=$(stat -c%s "${TEMP_LOCATION}"/"${target}") SIZE_VALUE=1
print_notice "Backup of ${target} created with the size of ${FILESIZE} bytes" ;;
esac
if [ "$SIZE_VALUE" = "1" ] ; then
filesize=$(stat -c%s "${TEMP_LOCATION}"/"${target}")
print_notice "Backup of ${target} created with the size of ${filesize} bytes"
else
filesize=$(du -h "${TEMP_LOCATION}"/"${target}" | awk '{ print $1}')
print_notice "Backup of ${target} created with the size of ${filesize}"
fi
case "${BACKUP_LOCATION,,}" in
"file" | "filesystem" )
print_debug "Moving backup to filesystem"
mkdir -p "${DB_DUMP_TARGET}"
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
;;
"s3" | "minio" )
print_debug "Moving backup to S3 Bucket"
export AWS_ACCESS_KEY_ID=${S3_KEY_ID}
export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET}
export AWS_DEFAULT_REGION=${S3_REGION}
if [ -f "${S3_CERT_CA_FILE}" ] ; then
print_debug "Using Custom CA for S3 Backups"
s3_ca_cert="--ca-bundle ${S3_CERT_CA_FILE}"
fi
if var_true "${S3_CERT_SKIP_VERIFY}" ; then
print_debug "Skipping SSL verification for HTTPS S3 Hosts"
s3_ssl="--no-verify-ssl"
fi
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
if var_true "${ENABLE_CHECKSUM}" ; then
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
rm -rf "${TEMP_LOCATION}"/"${target}"
;;
esac
else else
FILESIZE=$(du -h "${TEMP_LOCATION}"/"${target}" | awk '{ print $1}') print_error "Skipping moving DB Backup to final location because backup did not complete successfully"
print_notice "Backup of ${target} created with the size of ${FILESIZE}"
fi fi
case "${BACKUP_LOCATION,,}" in rm -rf "${TEMP_LOCATION}"/*
"file" | "filesystem" ) }
print_debug "Moving backup to filesystem"
mkdir -p "${DB_DUMP_TARGET}"
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
;;
"s3" | "minio" )
print_debug "Moving backup to S3 Bucket"
export AWS_ACCESS_KEY_ID=${S3_KEY_ID}
export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET}
export AWS_DEFAULT_REGION=${S3_REGION}
if [ -f "${S3_CERT_CA_FILE}" ] ; then
print_debug "Using Custom CA for S3 Backups"
s3_ca_cert="--ca-bundle ${S3_CERT_CA_FILE}"
fi
if var_true "${S3_CERT_SKIP_VERIFY}" ; then
print_debug "Skipping SSL verification for HTTPS S3 Hosts"
s3_ssl="--no-verify-ssl"
fi
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}" pre_dbbackup() {
dbbackup_start_time=$(date +"%s")
now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d")
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql
}
aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} post_dbbackup() {
unset s3_ssl dbbackup_finish_time=$(date +"%s")
unset s3_ca_cert dbbackup_total_time=$(echo $((dbbackup_finish_time-dbbackup_start_time)))
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
rm -rf "${TEMP_LOCATION}"/"${target}" if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
;; print_notice "Sending Backup Statistics to Zabbix"
esac silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "${dbbackup_size}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "${dbbackup_date}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))"
if [ "$?" != "0" ] ; then print_error "Error sending statistics, consider disabling with 'CONTAINER_ENABLE_MONITORING=FALSE'" ; fi
fi
### Post Script Support
if [ -n "${POST_SCRIPT}" ] ; then
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
if [ -x "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
fi
fi
fi
### Post Backup Custom Script Support
if [ -d "/assets/custom-scripts/" ] ; then
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
if [ -x "${f}" ] ; then
print_notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
fi
fi
done
fi
print_notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
unset s3_ssl
unset s3_ca_cert
} }
sanity_test() { sanity_test() {
sanity_var DB_TYPE "Database Type" sanity_var DB_TYPE "Database Type"
sanity_var DB_HOST "Database Host" sanity_var DB_HOST "Database Host"
file_env 'DB_USER'
file_env 'DB_PASS' case "${DB_TYPE,,}" in
"mysql" | "mariadb" )
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
postgres* | "pgsql" )
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
esac
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then
sanity_var S3_BUCKET "S3 Bucket" sanity_var S3_BUCKET "S3 Bucket"
@@ -458,7 +703,7 @@ setup_mode() {
if [ "${MODE,,}" = "auto" ] || [ ${MODE,,} = "default" ] ; then if [ "${MODE,,}" = "auto" ] || [ ${MODE,,} = "default" ] ; then
print_debug "Running in Auto / Default Mode - Letting Image control scheduling" print_debug "Running in Auto / Default Mode - Letting Image control scheduling"
else else
print_info "Running in Manual mode - Execute 'backup_now' to run a manual backup" print_info "Running in Manual mode - Execute 'backup_now' or '/etc/services.available/10-db-backup/run' to perform a manual backup"
service_stop 10-db-backup service_stop 10-db-backup
if var_true "${MANUAL_RUN_FOREVER}" ; then if var_true "${MANUAL_RUN_FOREVER}" ; then
mkdir -p /etc/services.d/99-run_forever mkdir -p /etc/services.d/99-run_forever
@@ -470,6 +715,20 @@ do
done done
EOF EOF
chmod +x /etc/services.d/99-run_forever/run chmod +x /etc/services.d/99-run_forever/run
else
if var_true "${CONTAINER_ENABLE_SCHEDULING}" ; then
print_error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_SCHEDULING=TRUE'"
exit 1
fi
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
print_error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_MONITORING=TRUE'"
exit 1
fi
if var_true "${CONTAINER_ENABLE_LOGSHIPPING}" ; then
print_error "Manual / Exit after execution mode doesn't work with 'CONTAINER_ENABLE_LOGSHIPPING=TRUE'"
exit 1
fi
fi fi
fi fi
} }

View File

@@ -5,64 +5,36 @@ source /assets/functions/10-db-backup
source /assets/defaults/10-db-backup source /assets/defaults/10-db-backup
PROCESS_NAME="db-backup" PROCESS_NAME="db-backup"
bootstrap_compression
bootstrap_variables bootstrap_variables
if [ "${MODE,,}" = "manual" ] ; then if [ "${MODE,,}" = "manual" ] || [ "${1,,}" = "manual" ] || [ "${1,,}" = "now" ]; then
DB_DUMP_BEGIN=+0 DB_DUMP_BEGIN=+0
manual=TRUE manual=TRUE
print_debug "Detected Manual Mode"
else
sleep 5
current_time=$(date +"%s")
today=$(date +"%Y%m%d")
if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then
waittime=$(( ${BASH_REMATCH[1]} * 60 ))
target_time=$(($current_time + $waittime))
else
target_time=$(date --date="${today}${DB_DUMP_BEGIN}" +"%s")
if [[ "$target_time" < "$current_time" ]]; then
target_time=$(($target_time + 24*60*60))
fi
waittime=$(($target_time - $current_time))
fi
print_debug "Wait Time: ${waittime} Target time: ${target_time} Current Time: ${current_time}"
print_info "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")"
sleep $waittime
fi fi
case "${1,,}" in
"now" | "manual" )
DB_DUMP_BEGIN=+0
manual=TRUE
;;
* )
sleep 5
;;
esac
### Container Startup
print_debug "Backup routines Initialized on $(date)"
### Wait for Next time to start backup
case "${1,,}" in
"now" | "manual" )
:
;;
* )
if [ "${manual,,}" = "true" ]; then
current_time=$(date +"%s")
today=$(date +"%Y%m%d")
if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then
waittime=$(( ${BASH_REMATCH[1]} * 60 ))
target_time=$(($current_time + $waittime))
else
target_time=$(date --date="${today}${DB_DUMP_BEGIN}" +"%s")
if [[ "$target_time" < "$current_time" ]]; then
target_time=$(($target_time + 24*60*60))
fi
waittime=$(($target_time - $current_time))
fi
print_debug "Wait Time: ${waittime} Target time: ${target_time} Current Time: ${current_time}"
print_info "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")"
sleep $waittime
fi
;;
esac
### Commence Backup
while true; do while true; do
mkdir -p "${TEMP_LOCATION}" mkdir -p "${TEMP_LOCATION}"
backup_start_time=$(date +"%s") backup_start_time=$(date +"%s")
now=$(date +"%Y%m%d-%H%M%S") print_debug "Backup routines started time: $(date +'%Y-%m-%d %T %Z')"
now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d")
target=${dbtype}_${dbname}_${dbhost}_${now}.sql
### Take a Dump
case "${dbtype,,}" in case "${dbtype,,}" in
"couch" ) "couch" )
check_availability check_availability
@@ -100,48 +72,17 @@ while true; do
backup_finish_time=$(date +"%s") backup_finish_time=$(date +"%s")
backup_total_time=$(echo $((backup_finish_time-backup_start_time))) backup_total_time=$(echo $((backup_finish_time-backup_start_time)))
if [ -z "$master_exit_code" ] ; then master_exit_code="0" ; fi
print_info "Backup routines finish time: $(date -d @${backup_finish_time} +"%Y-%m-%d %T %Z") with overall exit code ${master_exit_code}"
print_notice "Backup routines time taken: $(echo ${backup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
print_info "Backup finish time: $(date -d @${backup_finish_time} +"%Y-%m-%d %T %Z") with exit code ${exit_code}" cleanup_old_data
print_notice "Backup time elapsed: $(echo ${backup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
### Zabbix / Monitoring stats
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
print_notice "Sending Backup Statistics to Zabbix"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}")"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((backup_finish_time-backup_start_time)))"
fi
### Automatic Cleanup
if [ -n "${DB_CLEANUP_TIME}" ]; then
print_info "Cleaning up old backups"
mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
fi
### Post Script Support
if [ -n "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_time}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
fi
### Post Backup Custom Script Support
if [ -d "/assets/custom-scripts/" ] ; then
print_notice "Found Post Backup Custom Script to execute"
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
print_notice "Running Script: '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_time}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
done
fi
if var_true "${manual}" ; then if var_true "${manual}" ; then
print_debug "Exitting due to manual mode" print_debug "Exiting due to manual mode"
exit ${exit_code}; exit ${master_exit_code};
else else
### Go back to sleep until next backup time
sleep $(($DB_DUMP_FREQ*60-backup_total_time))
print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") " print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") "
sleep $(($DB_DUMP_FREQ*60-backup_total_time))
fi fi
done done

View File

@@ -110,7 +110,7 @@ get_dbhost() {
print_debug "Parsed DBHost: ${p_dbhost}" print_debug "Parsed DBHost: ${p_dbhost}"
fi fi
if [ -z "${dbhost}" ] && [ -z "${parsed_host}" ]; then if [ -z "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 1 - No Env, No Parsed Filename" print_debug "Parsed DBHost Variant: 1 - No Env, No Parsed Filename"
q_dbhost_variant=1 q_dbhost_variant=1
q_dbhost_menu=$(cat <<EOF q_dbhost_menu=$(cat <<EOF
@@ -119,7 +119,7 @@ EOF
) )
fi fi
if [ -n "${dbhost}" ] && [ -z "${parsed_host}" ]; then if [ -n "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 2 - Env, No Parsed Filename" print_debug "Parsed DBHost Variant: 2 - Env, No Parsed Filename"
q_dbhost_variant=2 q_dbhost_variant=2
q_dbhost_menu=$(cat <<EOF q_dbhost_menu=$(cat <<EOF
@@ -130,7 +130,7 @@ EOF
) )
fi fi
if [ -z "${dbhost}" ] && [ -n "${parsed_host}" ]; then if [ -z "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
print_debug "Parsed DBHostpho Variant: 3 - No Env, Parsed Filename" print_debug "Parsed DBHostpho Variant: 3 - No Env, Parsed Filename"
q_dbhost_variant=3 q_dbhost_variant=3
q_dbhost_menu=$(cat <<EOF q_dbhost_menu=$(cat <<EOF
@@ -141,7 +141,7 @@ EOF
) )
fi fi
if [ -n "${dbhost}" ] && [ -n "${parsed_host}" ]; then if [ -n "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 4 - Env, Parsed Filename" print_debug "Parsed DBHost Variant: 4 - Env, Parsed Filename"
q_dbhost_variant=4 q_dbhost_variant=4
q_dbhost_menu=$(cat <<EOF q_dbhost_menu=$(cat <<EOF
@@ -188,7 +188,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbhost=${dbhost} r_dbhost=${DB_HOST}
break break
;; ;;
q* ) q* )
@@ -241,7 +241,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbhost=${dbhost} r_dbhost=${DB_HOST}
break break
;; ;;
f* ) f* )
@@ -263,6 +263,10 @@ get_dbtype() {
p_dbtype=$(basename -- "${r_filename}" | cut -d _ -f 1) p_dbtype=$(basename -- "${r_filename}" | cut -d _ -f 1)
case "${p_dbtype}" in case "${p_dbtype}" in
mongo* )
parsed_type=true
print_debug "Parsed DBType: MongoDB"
;;
mariadb | mysql ) mariadb | mysql )
parsed_type=true parsed_type=true
print_debug "Parsed DBType: MariaDB/MySQL" print_debug "Parsed DBType: MariaDB/MySQL"
@@ -276,7 +280,7 @@ get_dbtype() {
;; ;;
esac esac
if [ -z "${dbtype}" ] && [ -z "${parsed_type}" ]; then if [ -z "${DB_TYPE}" ] && [ -z "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 1 - No Env, No Parsed Filename" print_debug "Parsed DBType Variant: 1 - No Env, No Parsed Filename"
q_dbtype_variant=1 q_dbtype_variant=1
q_dbtype_menu=$(cat <<EOF q_dbtype_menu=$(cat <<EOF
@@ -285,7 +289,7 @@ EOF
) )
fi fi
if [ -n "${dbtype}" ] && [ -z "${parsed_type}" ]; then if [ -n "${DB_TYPE}" ] && [ -z "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 2 - Env, No Parsed Filename" print_debug "Parsed DBType Variant: 2 - Env, No Parsed Filename"
q_dbtype_variant=2 q_dbtype_variant=2
q_dbtype_menu=$(cat <<EOF q_dbtype_menu=$(cat <<EOF
@@ -295,7 +299,7 @@ EOF
) )
fi fi
if [ -z "${dbtype}" ] && [ -n "${parsed_type}" ]; then if [ -z "${DB_TYPE}" ] && [ -n "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 3 - No Env, Parsed Filename" print_debug "Parsed DBType Variant: 3 - No Env, Parsed Filename"
q_dbtype_variant=3 q_dbtype_variant=3
q_dbtype_menu=$(cat <<EOF q_dbtype_menu=$(cat <<EOF
@@ -305,7 +309,7 @@ EOF
) )
fi fi
if [ -n "${dbtype}" ] && [ -n "${parsed_type}" ]; then if [ -n "${DB_TYPE}" ] && [ -n "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 4 - Env, Parsed Filename" print_debug "Parsed DBType Variant: 4 - Env, Parsed Filename"
q_dbtype_variant=4 q_dbtype_variant=4
q_dbtype_menu=$(cat <<EOF q_dbtype_menu=$(cat <<EOF
@@ -320,7 +324,9 @@ EOF
What Database Type are you looking to restore? What Database Type are you looking to restore?
${q_dbtype_menu} ${q_dbtype_menu}
M ) MySQL / MariaDB M ) MySQL / MariaDB
O ) MongoDB
P ) Postgresql P ) Postgresql
Q ) Quit Q ) Quit
@@ -335,6 +341,10 @@ EOF
r_dbtype=mysql r_dbtype=mysql
break break
;; ;;
o* )
r_dbtype=mongo
break
;;
p* ) p* )
r_dbtype=postgresql r_dbtype=postgresql
break break
@@ -351,13 +361,17 @@ EOF
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in case "${q_dbtype,,}" in
e* | "" ) e* | "" )
r_dbtype=${db_name} r_dbtype=${DB_TYPE}
break break
;; ;;
m* ) m* )
r_dbtype=mysql r_dbtype=mysql
break break
;; ;;
o* )
r_dbtype=mongo
break
;;
p* ) p* )
r_dbtype=postgresql r_dbtype=postgresql
break break
@@ -381,6 +395,10 @@ EOF
r_dbtype=mysql r_dbtype=mysql
break break
;; ;;
o* )
r_dbtype=mongo
break
;;
p* ) p* )
r_dbtype=postgresql r_dbtype=postgresql
break break
@@ -398,7 +416,7 @@ EOF
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in case "${q_dbtype,,}" in
e* | "" ) e* | "" )
r_dbtype=${dbtype} r_dbtype=${DB_TYPE}
break break
;; ;;
f* ) f* )
@@ -431,7 +449,7 @@ get_dbname() {
print_debug "Parsed DBName: ${p_dbhost}" print_debug "Parsed DBName: ${p_dbhost}"
fi fi
if [ -z "${dbname}" ] && [ -z "${parsed_name}" ]; then if [ -z "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 1 - No Env, No Parsed Filename" print_debug "Parsed DBName Variant: 1 - No Env, No Parsed Filename"
q_dbname_variant=1 q_dbname_variant=1
q_dbname_menu=$(cat <<EOF q_dbname_menu=$(cat <<EOF
@@ -440,7 +458,7 @@ EOF
) )
fi fi
if [ -n "${dbname}" ] && [ -z "${parsed_name}" ]; then if [ -n "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 2 - Env, No Parsed Filename" print_debug "Parsed DBName Variant: 2 - Env, No Parsed Filename"
q_dbname_variant=2 q_dbname_variant=2
q_dbname_menu=$(cat <<EOF q_dbname_menu=$(cat <<EOF
@@ -451,7 +469,7 @@ EOF
) )
fi fi
if [ -z "${dbname}" ] && [ -n "${parsed_name}" ]; then if [ -z "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 3 - No Env, Parsed Filename" print_debug "Parsed DBName Variant: 3 - No Env, Parsed Filename"
q_dbname_variant=3 q_dbname_variant=3
q_dbname_menu=$(cat <<EOF q_dbname_menu=$(cat <<EOF
@@ -462,7 +480,7 @@ EOF
) )
fi fi
if [ -n "${dbname}" ] && [ -n "${parsed_name}" ]; then if [ -n "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBname Variant: 4 - Env, Parsed Filename" print_debug "Parsed DBname Variant: 4 - Env, Parsed Filename"
q_dbname_variant=4 q_dbname_variant=4
q_dbname_menu=$(cat <<EOF q_dbname_menu=$(cat <<EOF
@@ -509,7 +527,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbname=${dbname} r_dbname=${DB_NAME}
break break
;; ;;
q* ) q* )
@@ -562,7 +580,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbname=${dbname} r_dbname=${DB_NAME}
break break
;; ;;
f* ) f* )
@@ -580,7 +598,7 @@ EOF
} }
get_dbport() { get_dbport() {
if [ -z "${dbport}" ] ; then if [ -z "${DB_PORT}" ] ; then
print_debug "Parsed DBPort Variant: 1 - No Env" print_debug "Parsed DBPort Variant: 1 - No Env"
q_dbport_variant=1 q_dbport_variant=1
q_dbport_menu=$(cat <<EOF q_dbport_menu=$(cat <<EOF
@@ -589,20 +607,20 @@ EOF
) )
fi fi
if [ -n "${dbport}" ] ; then if [ -n "${DB_PORT}" ] ; then
print_debug "Parsed DBPort Variant: 2 - Env" print_debug "Parsed DBPort Variant: 2 - Env"
q_dbport_variant=2 q_dbport_variant=2
q_dbport_menu=$(cat <<EOF q_dbport_menu=$(cat <<EOF
C ) Custom Entered Database Port C ) Custom Entered Database Port
E ) Environment Variable DB_PORT: '${dbport}' E ) Environment Variable DB_PORT: '${DB_PORT}'
EOF EOF
) )
fi fi
cat << EOF cat << EOF
What Database Port do you wish to use? What Database Port do you wish to use? MySQL/MariaDB typcially listens on port 3306. Postrgresql port 5432. MongoDB 27017
${q_dbport_menu} ${q_dbport_menu}
Q ) Quit Q ) Quit
@@ -637,7 +655,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbport=${dbport} r_dbport=${DB_PORT}
break break
;; ;;
q* ) q* )
@@ -651,7 +669,7 @@ EOF
} }
get_dbuser() { get_dbuser() {
if [ -z "${dbuser}" ] ; then if [ -z "${DB_USER}" ] ; then
print_debug "Parsed DBUser Variant: 1 - No Env" print_debug "Parsed DBUser Variant: 1 - No Env"
q_dbuser_variant=1 q_dbuser_variant=1
q_dbuser_menu=$(cat <<EOF q_dbuser_menu=$(cat <<EOF
@@ -660,7 +678,7 @@ EOF
) )
fi fi
if [ -n "${dbuser}" ] ; then if [ -n "${DB_USER}" ] ; then
print_debug "Parsed DBUser Variant: 2 - Env" print_debug "Parsed DBUser Variant: 2 - Env"
q_dbuser_variant=2 q_dbuser_variant=2
q_dbuser_menu=$(cat <<EOF q_dbuser_menu=$(cat <<EOF
@@ -697,16 +715,16 @@ EOF
c* ) c* )
counter=1 counter=1
q_dbuser=" " q_dbuser=" "
while [[ $q_dbname = *" "* ]]; do while [[ $q_dbuser = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Usernames can't have spaces in them, please re-enter." ; fi ; if [ $counter -gt 1 ] ; then print_error "DB Usernames can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB User do you wish to use:\ ${coff})" q_dbname read -e -p "$(echo -e ${clg}** ${cdgy}What DB User do you wish to use:\ ${coff})" q_dbuser
(( counter+=1 )) (( counter+=1 ))
done done
r_dbuser=${q_dbuser} r_dbuser=${q_dbuser}
break break
;; ;;
e* | "" ) e* | "" )
r_dbuser=${dbuser} r_dbuser=${DB_USER}
break break
;; ;;
q* ) q* )
@@ -720,7 +738,7 @@ EOF
} }
get_dbpass() { get_dbpass() {
if [ -z "${dbpass}" ] ; then if [ -z "${DB_PASS}" ] ; then
print_debug "Parsed DBPass Variant: 1 - No Env" print_debug "Parsed DBPass Variant: 1 - No Env"
q_dbpass_variant=1 q_dbpass_variant=1
q_dbpass_menu=$(cat <<EOF q_dbpass_menu=$(cat <<EOF
@@ -729,13 +747,13 @@ EOF
) )
fi fi
if [ -n "${dbpass}" ] ; then if [ -n "${DB_PASS}" ] ; then
print_debug "Parsed DBPass Variant: 2 - Env" print_debug "Parsed DBPass Variant: 2 - Env"
q_dbpass_variant=2 q_dbpass_variant=2
q_dbpass_menu=$(cat <<EOF q_dbpass_menu=$(cat <<EOF
C ) Custom Entered Database Password C ) Custom Entered Database Password
E ) Environment Variable DB_PASS: '${DB_PASS}' E ) Environment Variable DB_PASS
EOF EOF
) )
fi fi
@@ -766,16 +784,16 @@ EOF
c* ) c* )
counter=1 counter=1
q_dbpass=" " q_dbpass=" "
while [[ $q_dbname = *" "* ]]; do while [[ $q_dbpass = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Passwords can't have spaces in them, please re-enter." ; fi ; if [ $counter -gt 1 ] ; then print_error "DB Passwords can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbname read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbpass
(( counter+=1 )) (( counter+=1 ))
done done
r_dbpass=${q_dbpass} r_dbpass=${q_dbpass}
break break
;; ;;
e* | "" ) e* | "" )
r_dbpass=${dbpass} r_dbpass=${DB_PASS}
break break
;; ;;
q* ) q* )
@@ -791,7 +809,7 @@ EOF
#### SCRIPT START #### SCRIPT START
cat << EOF cat << EOF
## ${IMAGE_NAME} Restore Script Version 1.0.1 ## ${IMAGE_NAME} Restore Script
## Visit ${IMAGE_REPO_URL} ## Visit ${IMAGE_REPO_URL}
## #################################################### ## ####################################################
@@ -826,7 +844,7 @@ if [ -n "${3}" ]; then
if [ ! -f "${3}" ]; then if [ ! -f "${3}" ]; then
get_dbhost get_dbhost
else else
r_dbtype="${3}" r_dbhost="${3}"
fi fi
else else
get_dbhost get_dbhost
@@ -920,8 +938,23 @@ case "${r_dbtype}" in
pv ${r_filename} | ${decompress_cmd}cat | psql -d ${r_dbname} -h ${r_dbhost} -p ${r_dbport} -U ${r_dbuser} pv ${r_filename} | ${decompress_cmd}cat | psql -d ${r_dbname} -h ${r_dbhost} -p ${r_dbport} -U ${r_dbuser}
exit_code=$? exit_code=$?
;; ;;
mongo )
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
if [ "${ENABLE_COMPRESSION,,}" != "none" ] && [ "${ENABLE_COMPRESSION,,}" != "false" ] ; then
mongo_compression="--gzip"
fi
if [ -n "${r_dbuser}" ] ; then
mongo_user="-u ${r_dbuser}"
fi
if [ -n "${r_dbpass}" ] ; then
mongo_pass="-u ${r_dbpass}"
fi
mongorestore ${mongo_compression} -d ${r_dbname} -h ${r_dbhost} --port ${r_dbport} ${mongo_user} ${mongo_pass} --archive=${r_filename}
exit_code=$?
;;
* ) * )
exit 3 print_info "Unable to restore DB of type '${r_dbtype}'"
exit_code=3
;; ;;
esac esac