Compare commits

..

27 Commits
3.7.3 ... 3.9.0

Author SHA1 Message Date
dave@tiredofit.ca
068577001e Release 3.9.0 - See CHANGELOG.md 2023-04-26 14:32:36 -07:00
Dave Conroy
7781542816 Expand on amount of variables that can use 2023-04-24 14:54:47 -07:00
dave@tiredofit.ca
9283b5440e Release 3.8.5 - See CHANGELOG.md 2023-04-11 15:42:30 -07:00
Dave Conroy
5e62485e7f Merge pull request #216 from tpansino/bug/215
Set ltargets properly
2023-04-11 15:36:26 -07:00
Tom Pansino
f224571448 Set ltargets properly 2023-04-11 15:31:02 -07:00
dave@tiredofit.ca
01620fec00 Release 3.8.4 - See CHANGELOG.md 2023-04-06 12:14:22 -07:00
dave@tiredofit.ca
18a38b4f1d Release 3.8.3 - See CHANGELOG.md 2023-03-30 14:18:55 -07:00
dave@tiredofit.ca
150f356275 Release 3.8.2 - See CHANGELOG.md 2023-03-30 14:05:17 -07:00
dave@tiredofit.ca
e838ed0027 Release 3.8.1 - See CHANGELOG.md 2023-03-30 11:04:35 -07:00
Dave Conroy
8329b4c065 Add defaults 2023-03-27 16:41:31 -07:00
dave@tiredofit.ca
dab1ac301a Release 3.8.0 - See CHANGELOG.md 2023-03-27 15:01:10 -07:00
dave@tiredofit.ca
077201cd18 Release 3.7.7 - See CHANGELOG.md 2023-03-20 16:24:23 -07:00
Dave Conroy
eeaf59dc6f Merge pull request #210 from codemonium/simplify-pg_isready
Simplify pg_isready usage
2023-03-20 16:22:13 -07:00
Igor Artemenko
88fe0d6411 Simplify pg_isready usage
The pg_isready documentation says that it does not need a correct
database name or username to get the server status. In fact, incorrect
values result in the server logging failed connection attempts. As a
result, when we set DB_NAME to ALL, calls to the check_availability
function (which uses pg_isready) cause the server to log the following
error:

    FATAL:  database "ALL" does not exist

To eliminate this error, this change simplifies the pg_isready call.
2023-03-20 22:51:05 +00:00
dave@tiredofit.ca
366c4759a5 Release 3.7.6 - See CHANGELOG.md 2023-03-14 16:10:11 -07:00
Dave Conroy
37f255ec99 Merge pull request #207 from kamartem/patch-1
Typo correction
2023-03-14 16:09:08 -07:00
Dave Conroy
efa9a678c0 Merge pull request #209 from ToshY/bug/208-mysql-extra-opts-status-check
Removed EXTRA_OPTS in MySQL status check
2023-03-14 16:08:53 -07:00
ToshY
68747a4aff Removed EXTRA_OPTS in MySQL status check 2023-03-14 20:28:05 +01:00
Artem Kamyshansky
cf736278bb Typo correction 2023-03-12 19:31:04 +03:00
dave@tiredofit.ca
1659e34fc7 Release 3.7.5 - See CHANGELOG.md 2023-03-02 07:39:58 -08:00
dave@tiredofit.ca
a8df7a2c75 Release 3.7.4 - See CHANGELOG.md 2023-02-22 08:36:46 -08:00
Dave Conroy
b5194dcce9 Merge pull request #203 from gbe0/issue/201
Fix issue #201 - 99-run_forever exec format error
2023-02-22 08:35:55 -08:00
Chris
6fb947684a fix issue #201 - 99-run_forever exec format error 2023-02-23 00:26:54 +08:00
Dave Conroy
9287f4efeb Update README.md 2023-01-30 11:47:15 -08:00
Dave Conroy
eeb5b5a119 Update README.md 2023-01-30 09:58:44 -08:00
Dave Conroy
a83dfd1a0b Update Workflows 2023-01-29 18:13:20 -08:00
Dave Conroy
8fb379b51a Update workflows 2023-01-29 16:04:15 -08:00
8 changed files with 246 additions and 332 deletions

View File

@@ -1,111 +1,15 @@
### Application Level Image CI name: "build_image"
### Dave Conroy <dave at tiredofit dot ca>
name: 'build'
on: on:
push: push:
paths: paths:
- '**' - "**"
- '!README.md' - "!README.md"
jobs: jobs:
docker: build:
runs-on: ubuntu-latest uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
steps: #uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
- name: Checkout #uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
uses: actions/checkout@v3 #uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
secrets: inherit
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"
else
BRANCH=${GITHUB_REF#refs/heads/}
fi
case ${BRANCH} in
"main" | "master" )
BRANCHTAG="${DOCKER_IMAGE}:latest"
;;
"develop" )
BRANCHTAG="${DOCKER_IMAGE}:develop"
;;
* )
if [ -n "${{ secrets.LATEST }}" ] ; then
if [ "${BRANCHTAG}" = "${{ secrets.LATEST }}" ]; then
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest,${DOCKER_IMAGE}:latest"
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
;;
esac
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
GITTAG="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed 's|refs/tags/||g')"
fi
if [ -n "${BRANCHTAG}" ] && [ -n "${GITTAG}" ]; then
TAGS=${BRANCHTAG},${GITTAG}
else
TAGS="${BRANCHTAG}${GITTAG}"
fi
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Label
id: Label
run: |
if [ -f "Dockerfile" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_repository=\"https://github.com/${GITHUB_REPOSITORY}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_commit=\"${GITHUB_SHA}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
if [ -f "CHANGELOG.md" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.db-backup.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
mkdir -p install/assets/.changelogs ; cp CHANGELOG.md install/assets/.changelogs/${GITHUB_REPOSITORY/\//_}.md
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
fi
if [[ $GITHUB_REF == refs/heads/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_branch=\"${GITHUB_REF#refs/heads/}\"" Dockerfile
fi
fi
- name: Build
uses: docker/build-push-action@v3
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm/v7,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}

View File

@@ -1,6 +1,4 @@
# Manual Workflow (Application) name: "manual_build_image"
name: manual
on: on:
workflow_dispatch: workflow_dispatch:
@@ -8,104 +6,11 @@ on:
Manual Build: Manual Build:
description: 'Manual Build' description: 'Manual Build'
required: false required: false
jobs: jobs:
docker: build:
runs-on: ubuntu-latest uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
steps: #uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
- name: Checkout #uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
uses: actions/checkout@v3 #uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
secrets: inherit
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"
else
BRANCH=${GITHUB_REF#refs/heads/}
fi
case ${BRANCH} in
"main" | "master" )
BRANCHTAG="${DOCKER_IMAGE}:latest"
;;
"develop" )
BRANCHTAG="${DOCKER_IMAGE}:develop"
;;
* )
if [ -n "${{ secrets.LATEST }}" ] ; then
if [ "${BRANCHTAG}" = "${{ secrets.LATEST }}" ]; then
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest,${DOCKER_IMAGE}:latest"
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
;;
esac
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
GITTAG="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed 's|refs/tags/||g')"
fi
if [ -n "${BRANCHTAG}" ] && [ -n "${GITTAG}" ]; then
TAGS=${BRANCHTAG},${GITTAG}
else
TAGS="${BRANCHTAG}${GITTAG}"
fi
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Label
id: Label
run: |
if [ -f "Dockerfile" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_repository=\"https://github.com/${GITHUB_REPOSITORY}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_commit=\"${GITHUB_SHA}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
if [ -f "CHANGELOG.md" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.db-backup.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
mkdir -p install/assets/.changelogs ; cp CHANGELOG.md install/assets/.changelogs/${GITHUB_REPOSITORY/\//_}.md
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
fi
if [[ $GITHUB_REF == refs/heads/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_branch=\"${GITHUB_REF#refs/heads/}\"" Dockerfile
fi
fi
- name: Build
uses: docker/build-push-action@v3
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm/v7,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}

View File

@@ -1,3 +1,71 @@
## 3.9.0 2023-04-26 <dave at tiredofit dot ca>
### Added
- Add support for _FILE environment variables
## 3.8.5 2023-04-11 <tpansino@github>
### Changed
- Fix SQLite3, Influx, and MSSQL backups failing due to malformed/non existent ltarget
## 3.8.4 2023-04-06 <dave at tiredofit dot ca>
### Changed
- Fix issue with Influx2 and MSSQL clients not installing properly
## 3.8.3 2023-03-30 <dave at tiredofit dot ca>
### Changed
- Patchup for 3.8.2
## 3.8.2 2023-03-30 <dave at tiredofit dot ca>
### Changed
- Patchup for 3.8.1
## 3.8.1 2023-03-30 <dave at tiredofit dot ca>
### Changed
- Cleanup Dockerfile
- Fix issue with DB_ARCHIVE_TIME not firing correctly
## 3.8.0 2023-03-27 <dave at tiredofit dot ca>
### Added
- Introduce DB_DUMP_TARGET_ARCHIVE which works with DB_ARCHIVE_TIME to move backups older than (x) minutes from DB_DUMP_TARGET to DB_DUMP_TARGET_ARCHIVE for use with external backup systems and custom exclude rules
- Introduce CREATE_LATEST_SYMLINK which creates a symbolic link in DB_DUMP_TARGET of `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)`
## 3.7.7 2023-03-20 <codemonium@github>
### Changed
- Simplify pg_isready usage
## 3.7.6 2023-03-14 <toshy@github>
### Changed
- Remove EXTRA_OPT variable from MySQL/MariaDB check
## 3.7.5 2023-03-02 <dave at tiredofit dot ca>
### Added
- Add support for Docker Swarm mode Secrets for BLOBXFER_STORAGE_ACCOUNT_*_FILE
## 3.7.4 2023-02-22 <gbe0@github>
### Changed
- Fix when running in MANUAL_RUN_FOREVER mode looping
## 3.7.3 2022-12-20 <dave at tiredofit dot ca> ## 3.7.3 2022-12-20 <dave at tiredofit dot ca>
### Changed ### Changed

View File

@@ -13,9 +13,9 @@ ENV INFLUX2_VERSION=2.4.0 \
### Dependencies ### Dependencies
RUN source /assets/functions/00-container && \ RUN source /assets/functions/00-container && \
set -ex && \ set -ex && \
apk update && \ package update && \
apk upgrade && \ package upgrade && \
apk add -t .db-backup-build-deps \ package install .db-backup-build-deps \
build-base \ build-base \
bzip2-dev \ bzip2-dev \
git \ git \
@@ -27,7 +27,7 @@ RUN source /assets/functions/00-container && \
xz-dev \ xz-dev \
&& \ && \
\ \
apk add --no-cache -t .db-backup-run-deps \ package install .db-backup-run-deps \
aws-cli \ aws-cli \
bzip2 \ bzip2 \
influxdb \ influxdb \
@@ -75,12 +75,14 @@ RUN source /assets/functions/00-container && \
\ \
pip3 install blobxfer && \ pip3 install blobxfer && \
\ \
### Cleanup package remove .db-backup-build-deps && \
apk del .db-backup-build-deps && \ package cleanup && \
rm -rf /usr/src/* && \ rm -rf \
rm -rf /*.apk && \ /*.apk \
rm -rf /etc/logrotate.d/redis && \ /etc/logrotate.d/* \
rm -rf /root/.cache /tmp/* /var/cache/apk/* /root/.cache \
/tmp/* \
/usr/src/*
### S6 Setup
COPY install / COPY install /

126
README.md
View File

@@ -80,7 +80,13 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
Clone this repository and build the image with `docker build <arguments> (imagename) .` Clone this repository and build the image with `docker build <arguments> (imagename) .`
### Prebuilt Images ### Prebuilt Images
Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup) and is the recommended method of installation. Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup)
Builds of the image are also available on the [Github Container Registry](https://github.com/tiredofit/docker-db-backup/pkgs/container/docker-db-backup)
```
docker pull ghcr.io/tiredofit/docker-db-backup:(imagetag)
```
The following image tags are available along with their tagged release based on what's written in the [Changelog](CHANGELOG.md): The following image tags are available along with their tagged release based on what's written in the [Changelog](CHANGELOG.md):
@@ -89,7 +95,7 @@ The following image tags are available along with their tagged release based on
| latest | `:latest` | | latest | `:latest` |
```bash ```bash
docker pull tiredofit/db-backup:(imagetag) docker pull docker.io/tiredofdit/db-backup:(imagetag)
``` ```
#### Multi Architecture #### Multi Architecture
Images are built primarily for `amd64` architecture, and may also include builds for `arm/v7`, `arm64` and others. These variants are all unsupported. Consider [sponsoring](https://github.com/sponsors/tiredofit) my work so that I can work with various hardware. To see if this image supports multiple architecures, type `docker manifest (image):(tag)` Images are built primarily for `amd64` architecture, and may also include builds for `arm/v7`, `arm64` and others. These variants are all unsupported. Consider [sponsoring](https://github.com/sponsors/tiredofit) my work so that I can work with various hardware. To see if this image supports multiple architecures, type `docker manifest (image):(tag)`
@@ -117,7 +123,7 @@ The following directories are used for configuration and can be mapped for persi
#### Base Images used #### Base Images used
This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) or [Debian Linux](https://hub.docker.com/r/tiredofit/debian) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`,`vim`. This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`.
Be sure to view the following repositories to understand all the customizable options: Be sure to view the following repositories to understand all the customizable options:
@@ -128,67 +134,68 @@ Be sure to view the following repositories to understand all the customizable op
#### Container Options #### Container Options
| Parameter | Description | Default | | Parameter | Description | Default |
| -------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- | | ----------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- |
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` | | `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` | | `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` |
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` | | `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` | | `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` | | `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` |
| `CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
| `PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | | | `PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | |
| `POST_SCRIPT` | Fill this variable in with a command to execute post backing up | | | `POST_SCRIPT` | Fill this variable in with a command to execute post backing up | |
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` | | `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` |
### Database Specific Options ### Database Specific Options
| Parameter | Description | Default | | Parameter | Description | Default | `_NAME |
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- | | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- | ------ |
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | | `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | | `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | | `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | x |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | | `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | | x |
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | | | `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | | x |
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | | | `DB_USER` | username for the database(s) - Can use `root` for MySQL | | x |
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | | `DB_PASS` | (optional if DB doesn't require it) password for the database | | x |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | | | `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | x |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | | `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | | |
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | | | `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | x |
| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | | | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can overrde them by making your own entries | | |
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can overrde them by making your own entries |
#### For Influx DB2: #### For Influx DB2:
Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name` Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name`
### Scheduling Options ### Scheduling Options
| Parameter | Description | Default | | Parameter | Description | Default |
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | | ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- |
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` | | `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | | | `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
| | Absolute HHMM, e.g. `2330` or `0415` | | | | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | | | | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `/backup` | | `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `${DB_DUMP_TARGET}/archive/` |
| `DB_DUMP_TARGET_ARCHIVE` | Optional Directory where the database dumps archives are kept. |
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` | | `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
| `DB_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from `DB_DUMP_TARGET` to `DB_DUMP_TARGET_ARCHIVE` - which is useful when pairing against an external backup system. |
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time. - You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
### Backup Options ### Backup Options
| Parameter | Description | Default | | Parameter | Description | Default | `_NAME` |
| ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | | ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- | ------- |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | | `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | | `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | |
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | | `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | |
| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` | | `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` | |
| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` | | `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` | |
| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` | | `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` | |
| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` | | `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` | |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | | `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | |
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | | `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | |
| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` | | `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` | |
| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` | | `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` | |
| `MYSQL_ENABLE_TLS` | Enable TLS functionality for MySQL client | `FALSE` | | `MYSQL_ENABLE_TLS` | Enable TLS functionality for MySQL client | `FALSE` | |
| `MYSQL_TLS_VERIFY` | (optional) If using TLS (by means of MYSQL_TLS_* variables) verify remote host | `FALSE` | | `MYSQL_TLS_VERIFY` | (optional) If using TLS (by means of MYSQL_TLS_* variables) verify remote host | `FALSE` | |
| `MYSQL_TLS_VERSION` | What TLS `v1.1` `v1.2` `v1.3` version to utilize | `TLSv1.1,TLSv1.2,TLSv1.3` | | `MYSQL_TLS_VERSION` | What TLS `v1.1` `v1.2` `v1.3` version to utilize | `TLSv1.1,TLSv1.2,TLSv1.3` | |
| `MYSQL_TLS_CA_FILE` | Filename to load custom CA certificate for connecting via TLS | `/etc/ssl/cert.pem` | | `MYSQL_TLS_CA_FILE` | Filename to load custom CA certificate for connecting via TLS | `/etc/ssl/cert.pem` | x |
| `MYSQL_TLS_CERT_FILE` | Filename to load client certificate for connecting via TLS | | | `MYSQL_TLS_CERT_FILE` | Filename to load client certificate for connecting via TLS | | x |
| `MYSQL_TLS_KEY_FILE` | Filename to load client key for connecting via TLS | | | `MYSQL_TLS_KEY_FILE` | Filename to load client key for connecting via TLS | | x |
- When using compression with MongoDB, only `GZ` compression is possible. - When using compression with MongoDB, only `GZ` compression is possible.
@@ -196,19 +203,19 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
If `BACKUP_LOCATION` = `S3` then the following options are used. If `BACKUP_LOCATION` = `S3` then the following options are used.
| Parameter | Description | Default | | Parameter | Description | Default | `_FILE` |
| --------------------- | ----------------------------------------------------------------------------------------- | ------- | | --------------------- | ----------------------------------------------------------------------------------------- | ------- | ------- |
| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | | `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | x |
| `S3_KEY_ID` | S3 Key ID (Optional) | | | `S3_KEY_ID` | S3 Key ID (Optional) | | x |
| `S3_KEY_SECRET` | S3 Key Secret (Optional) | | | `S3_KEY_SECRET` | S3 Key Secret (Optional) | | x |
| `S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | | | `S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | | x |
| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | | `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | x |
| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | | `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | x |
| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` | | `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` | x |
| `S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | | | `S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | | x |
| `S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | | | `S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | | x |
| _*OR*_ | | | | _*OR*_ | | | |
| `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | | `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | |
- When `S3_KEY_ID` and/or `S3_KEY_SECRET` is not set, will try to use IAM role assigned (if any) for uploading the backup files to S3 bucket. - When `S3_KEY_ID` and/or `S3_KEY_SECRET` is not set, will try to use IAM role assigned (if any) for uploading the backup files to S3 bucket.
@@ -216,14 +223,13 @@ If `BACKUP_LOCATION` = `S3` then the following options are used.
Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure fileshare storage. Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure fileshare storage.
If `BACKUP_LOCATION` = `blobxfer` then the following options are used. If `BACKUP_LOCATION` = `blobxfer` then the following options are used.
| Parameter | Description | Default | | Parameter | Description | Default | `_NAME` |
| ------------------------------ | ------------------------------------------- | ------------------- | | ------------------------------ | ------------------------------------------- | ------------------- | ------- |
| `BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | | `BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | | `BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | | `BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
> This service uploads files from backup targed directory `DB_DUMP_TARGET`. > This service uploads files from backup targed directory `DB_DUMP_TARGET`.
> If the a cleanup configuration in `DB_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically. > If the a cleanup configuration in `DB_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
@@ -240,7 +246,7 @@ docker exec -it (whatever your container name is) bash
### Manual Backups ### Manual Backups
Manual Backups can be performed by entering the container and typing `backup-now` Manual Backups can be performed by entering the container and typing `backup-now`
- Recently there was a request to have the container work with Kukbernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`. - Recently there was a request to have the container work with Kubernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`.
### Restoring Databases ### Restoring Databases
Entering in the container and executing `restore` will execute a menu based script to restore your backups - MariaDB, Postgres, and Mongo supported. Entering in the container and executing `restore` will execute a menu based script to restore your backups - MariaDB, Postgres, and Mongo supported.
@@ -338,7 +344,7 @@ If for some reason your filesystem or host is not detecting it right, use the en
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community. These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.
### Usage ### Usage
- The [Discussions board](../../discussions) is a great place for working with the community on tips and tricks of using this image. - The [Discussions board](../../discussions) is a great place for working with the community on tips and tricks of using this image.
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) personalized support. - Consider [sponsoring me](https://github.com/sponsors/tiredofit) for personalized support
### Bugfixes ### Bugfixes
- Please, submit a [Bug Report](issues/new) if something isn't working as expected. I'll do my best to issue a fix in short order. - Please, submit a [Bug Report](issues/new) if something isn't working as expected. I'll do my best to issue a fix in short order.

View File

@@ -5,9 +5,11 @@ BLOBXFER_REMOTE_PATH=${BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
CHECKSUM=${CHECKSUM:-"MD5"} CHECKSUM=${CHECKSUM:-"MD5"}
COMPRESSION=${COMPRESSION:-"ZSTD"} COMPRESSION=${COMPRESSION:-"ZSTD"}
COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"} COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
CREATE_LATEST_SYMLINK=${CREATE_LATEST_SYMLINK:-"TRUE"}
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0} DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440} DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"} DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"}
DB_DUMP_TARGET_ARCHIVE=${DB_DUMP_TARGET_ARCHIVE:-"${DB_DUMP_TARGET}/archive/"}
ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"} ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"}
ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION:-"TRUE"} ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"} MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"}

View File

@@ -2,12 +2,17 @@
bootstrap_variables() { bootstrap_variables() {
sanity_var DB_TYPE "Set appropriate DB_TYPE" sanity_var DB_TYPE "Set appropriate DB_TYPE"
transform_file_var \
DB_HOST \
DB_PORT \
DB_USER \
DB_PASS
case "${DB_TYPE,,}" in case "${DB_TYPE,,}" in
couch* ) couch* )
dbtype=couch dbtype=couch
DB_PORT=${DB_PORT:-5984} DB_PORT=${DB_PORT:-5984}
file_env 'DB_USER' sanity_var DB_USER
file_env 'DB_PASS' sanity_var DB_PASS
;; ;;
influx* ) influx* )
dbtype=influx dbtype=influx
@@ -15,31 +20,31 @@ bootstrap_variables() {
1) DB_PORT=${DB_PORT:-8088} ;; 1) DB_PORT=${DB_PORT:-8088} ;;
2) DB_PORT=${DB_PORT:-8086} ;; 2) DB_PORT=${DB_PORT:-8086} ;;
esac esac
file_env 'DB_USER' sanity_var DB_USER
file_env 'DB_PASS' sanity_var DB_PASS
sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'" sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'"
;; ;;
mongo* ) mongo* )
dbtype=mongo dbtype=mongo
transform_file_var MONGO_CUSTOM_URI
if [ -n "${MONGO_CUSTOM_URI}" ] ; then if [ -n "${MONGO_CUSTOM_URI}" ] ; then
mongo_uri_proto=$(echo ${MONGO_CUSTOM_URI} | grep :// | sed -e's,^\(.*://\).*,\1,g') mongo_uri_proto=$(echo "${MONGO_CUSTOM_URI}" | grep :// | sed -e's,^\(.*://\).*,\1,g')
mongo_uri_scratch="${MONGO_CUSTOM_URI/${mongo_uri_proto}/}" mongo_uri_scratch="${MONGO_CUSTOM_URI/${mongo_uri_proto}/}"
mongo_uri_username_password=$(echo ${mongo_uri_scratch} | grep @ | rev | cut -d@ -f2- | rev) mongo_uri_username_password="$(echo "${mongo_uri_scratch}" | grep @ | rev | cut -d@ -f2- | rev)"
if [ -n "${mongo_uri_username_password}" ]; then mongo_uri_scratch=$(echo ${mongo_uri_scratch} | rev | cut -d@ -f1 | rev) ; fi if [ -n "${mongo_uri_username_password}" ]; then mongo_uri_scratch="$(echo "${mongo_uri_scratch}" | rev | cut -d@ -f1 | rev)" ; fi
mongo_uri_port=$(echo ${mongo_uri_scratch} | grep : | rev | cut -d: -f2- | rev) mongo_uri_port="$(echo "${mongo_uri_scratch}" | grep : | rev | cut -d: -f2- | rev)"
if [ -n "${mongo_uri_port}" ]; then mongo_uri_port=$(echo ${mongo_uri_scratch} | rev | cut -d: -f1 | cut -d/ -f2 | rev) ; fi if [ -n "${mongo_uri_port}" ]; then mongo_uri_port="$(echo "${mongo_uri_scratch}" | rev | cut -d: -f1 | cut -d/ -f2 | rev)" ; fi
mongo_uri_hostname=$(echo ${mongo_uri_scratch} | cut -d/ -f1 | cut -d: -f1 ) mongo_uri_hostname="$(echo "${mongo_uri_scratch}" | cut -d/ -f1 | cut -d: -f1 )"
mongo_uri_database=$(echo ${mongo_uri_scratch} | cut -d/ -f2 | cut -d? -f1 ) mongo_uri_database="$(echo "${mongo_uri_scratch}" | cut -d/ -f2 | cut -d? -f1 )"
mongo_uri_options=$(echo ${mongo_uri_scratch} | cut -d/ -f2 | cut -d? -f2 ) mongo_uri_options="$(echo "${mongo_uri_scratch}" | cut -d/ -f2 | cut -d? -f2 )"
DB_NAME=${DB_NAME:-"${mongo_uri_database,,}"} DB_NAME=${DB_NAME:-"${mongo_uri_database,,}"}
DB_HOST=${DB_HOST:-"${mongo_uri_hostname,,}"} DB_HOST=${DB_HOST:-"${mongo_uri_hostname,,}"}
else else
DB_PORT=${DB_PORT:-27017} DB_PORT=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER'
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER}" [[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS}" [[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS}"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME}" [[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME}"
transform_file_var DB_AUTH
[[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}" [[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}"
fi fi
;; ;;
@@ -47,7 +52,6 @@ bootstrap_variables() {
dbtype=mysql dbtype=mysql
DB_PORT=${DB_PORT:-3306} DB_PORT=${DB_PORT:-3306}
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS} [[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS}
if var_true "${MYSQL_ENABLE_TLS}" ; then if var_true "${MYSQL_ENABLE_TLS}" ; then
if [ -n "${MYSQL_TLS_CA_FILE}" ] ; then if [ -n "${MYSQL_TLS_CA_FILE}" ] ; then
@@ -81,14 +85,12 @@ bootstrap_variables() {
postgres* | "pgsql" ) postgres* | "pgsql" )
dbtype=pgsql dbtype=pgsql
DB_PORT=${DB_PORT:-5432} DB_PORT=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS}" [[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS}"
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;; ;;
"redis" ) "redis" )
dbtype=redis dbtype=redis
DB_PORT=${DB_PORT:-6379} DB_PORT=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS}" [[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS}"
;; ;;
sqlite* ) sqlite* )
@@ -100,15 +102,30 @@ bootstrap_variables() {
;; ;;
esac esac
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] && [ -n "${S3_KEY_ID}" ] && [ -n "${S3_KEY_SECRET}" ]; then if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then
file_env 'S3_KEY_ID' transform_file_var \
file_env 'S3_KEY_SECRET' S3_BUCKET \
S3_KEY_ID \
S3_KEY_SECRET \
S3_PATH \
S3_REGION \
S3_HOST \
S3_PROTOCOL \
S3_EXTRA_OPTS \
S3_CERT_CA_FILE
fi
if [ "${BACKUP_LOCATION,,}" = "blobxfer" ] ; then
transform_file_var \
BLOBXFER_STORAGE_ACCOUNT \
BLOBXFER_STORAGE_KEY
fi fi
} }
backup_couch() { backup_couch() {
prepare_dbbackup prepare_dbbackup
target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt
ltarget=couch_${DB_NAME}_${DB_HOST#*//}
compression compression
pre_dbbackup ${DB_NAME} pre_dbbackup ${DB_NAME}
print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}" print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}"
@@ -134,6 +151,7 @@ backup_influx() {
prepare_dbbackup prepare_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
target=influx_${db}_${DB_HOST#*//}_${now} target=influx_${db}_${DB_HOST#*//}_${now}
ltarget=influx_${db}_${DB_HOST#*//}
compression compression
pre_dbbackup $db pre_dbbackup $db
print_notice "Dumping Influx database: '${db}'" print_notice "Dumping Influx database: '${db}'"
@@ -143,6 +161,7 @@ backup_influx() {
print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}" print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}" tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension} target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension}
ltarget=influx_${db}_${DB_HOST#*//}
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup $db post_dbbackup $db
@@ -153,6 +172,7 @@ backup_influx() {
prepare_dbbackup prepare_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
target=influx2_${db}_${DB_HOST#*//}_${now} target=influx2_${db}_${DB_HOST#*//}_${now}
ltarget=influx2_${db}_${DB_HOST#*//}
compression compression
pre_dbbackup $db pre_dbbackup $db
print_notice "Dumping Influx2 database: '${db}'" print_notice "Dumping Influx2 database: '${db}'"
@@ -161,6 +181,7 @@ backup_influx() {
check_exit_code $target_dir check_exit_code $target_dir
create_archive create_archive
target=influx2_${db}_${DB_HOST#*//}_${now}.tar${extension} target=influx2_${db}_${DB_HOST#*//}_${now}.tar${extension}
ltarget=influx2_${db}_${DB_HOST#*//}
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup $db post_dbbackup $db
@@ -173,8 +194,10 @@ backup_mongo() {
prepare_dbbackup prepare_dbbackup
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive
ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,}
else else
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive.gz target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive.gz
ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,}
mongo_compression="--gzip" mongo_compression="--gzip"
compression_string="and compressing with gzip" compression_string="and compressing with gzip"
fi fi
@@ -196,6 +219,7 @@ backup_mongo() {
backup_mssql() { backup_mssql() {
prepare_dbbackup prepare_dbbackup
target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak
ltarget=mssql_${DB_NAME,,}_${DB_HOST,,}
compression compression
pre_dbbackup "${DB_NAME}" pre_dbbackup "${DB_NAME}"
print_notice "Dumping MSSQL database: '${DB_NAME}'" print_notice "Dumping MSSQL database: '${DB_NAME}'"
@@ -235,6 +259,7 @@ backup_mysql() {
for db in ${db_names} ; do for db in ${db_names} ; do
prepare_dbbackup prepare_dbbackup
target=mysql_${db}_${DB_HOST,,}_${now}.sql target=mysql_${db}_${DB_HOST,,}_${now}.sql
ltarget=mysql_${db}_${DB_HOST,,}
compression compression
pre_dbbackup $db pre_dbbackup $db
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}" print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
@@ -249,6 +274,7 @@ backup_mysql() {
print_debug "Not splitting database dumps into their own files" print_debug "Not splitting database dumps into their own files"
prepare_dbbackup prepare_dbbackup
target=mysql_all_${DB_HOST,,}_${now}.sql target=mysql_all_${DB_HOST,,}_${now}.sql
ltarget=mysql_all_${DB_HOST,,}
compression compression
pre_dbbackup all pre_dbbackup all
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
@@ -284,6 +310,7 @@ backup_pgsql() {
for db in ${db_names} ; do for db in ${db_names} ; do
prepare_dbbackup prepare_dbbackup
target=pgsql_${db}_${DB_HOST,,}_${now}.sql target=pgsql_${db}_${DB_HOST,,}_${now}.sql
ltarget=pgsql_${db}_${DB_HOST,,}
compression compression
pre_dbbackup $db pre_dbbackup $db
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}" print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
@@ -298,6 +325,7 @@ backup_pgsql() {
print_debug "Not splitting database dumps into their own files" print_debug "Not splitting database dumps into their own files"
prepare_dbbackup prepare_dbbackup
target=pgsql_all_${DB_HOST,,}_${now}.sql target=pgsql_all_${DB_HOST,,}_${now}.sql
ltarget=pgsql_${db}_${DB_HOST,,}
compression compression
pre_dbbackup all pre_dbbackup all
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
@@ -322,6 +350,7 @@ backup_redis() {
prepare_dbbackup prepare_dbbackup
print_notice "Dumping Redis - Flushing Redis Cache First" print_notice "Dumping Redis - Flushing Redis Cache First"
target=redis_all_${DB_HOST,,}_${now}.rdb target=redis_all_${DB_HOST,,}_${now}.rdb
ltarget=redis_${DB_HOST,,}
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS} echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
sleep 10 sleep 10
try=5 try=5
@@ -352,6 +381,7 @@ backup_sqlite3() {
db=$(basename "${DB_HOST}") db=$(basename "${DB_HOST}")
db="${db%.*}" db="${db%.*}"
target=sqlite3_${db}_${now}.sqlite3 target=sqlite3_${db}_${now}.sqlite3
ltarget=sqlite3_${db}.sqlite3
compression compression
pre_dbbackup $db pre_dbbackup $db
print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}" print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}"
@@ -416,7 +446,7 @@ check_availability() {
"mysql" ) "mysql" )
counter=0 counter=0
export MYSQL_PWD=${DB_PASS} export MYSQL_PWD=${DB_PASS}
while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" ${mysql_tls_args} ${EXTRA_OPTS} status > /dev/null 2>&1) ; do while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" ${mysql_tls_args} status > /dev/null 2>&1) ; do
sleep 5 sleep 5
(( counter+=5 )) (( counter+=5 ))
print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)" print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)"
@@ -432,8 +462,7 @@ check_availability() {
;; ;;
"pgsql" ) "pgsql" )
counter=0 counter=0
export PGPASSWORD=${DB_PASS} until pg_isready --host=${DB_HOST} --port=${DB_PORT} -q
until pg_isready --dbname=${DB_NAME} --host=${DB_HOST} --port=${DB_PORT} --username=${DB_USER} -q
do do
sleep 5 sleep 5
(( counter+=5 )) (( counter+=5 ))
@@ -649,6 +678,13 @@ move_dbbackup() {
mkdir -p "${DB_DUMP_TARGET}" mkdir -p "${DB_DUMP_TARGET}"
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/ mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}" mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
if var_true "${CREATE_LATEST_SYMLINK}" ; then
ln -sf "${DB_DUMP_TARGET}"/"${target}" "${DB_DUMP_TARGET}"/latest-"${ltarget}"
fi
if [ -n "${DB_ARCHIVE_TIME}" ] ; then
mkdir -p "${DB_DUMP_TARGET_ARCHIVE}"
find "${DB_DUMP_TARGET}"/ -maxdepth 1 -mmin +"${DB_ARCHIVE_TIME}" -iname "*" -exec mv {} "${DB_DUMP_TARGET_ARCHIVE}" \;
fi
;; ;;
"s3" | "minio" ) "s3" | "minio" )
print_debug "Moving backup to S3 Bucket" print_debug "Moving backup to S3 Bucket"
@@ -703,6 +739,7 @@ prepare_dbbackup() {
now=$(date +"%Y%m%d-%H%M%S") now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S") now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d") now_date=$(date +"%Y-%m-%d")
ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,}
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql
} }
@@ -812,14 +849,6 @@ sanity_test() {
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;; ;;
esac esac
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] && [ -n "${S3_KEY_ID}" ] && [ -n "${S3_KEY_SECRET}" ]; then
sanity_var S3_BUCKET "S3 Bucket"
sanity_var S3_PATH "S3 Path"
sanity_var S3_REGION "S3 Region"
file_env 'S3_KEY_ID'
file_env 'S3_KEY_SECRET'
fi
} }
setup_mode() { setup_mode() {
@@ -831,10 +860,8 @@ setup_mode() {
if var_true "${MANUAL_RUN_FOREVER}" ; then if var_true "${MANUAL_RUN_FOREVER}" ; then
mkdir -p /etc/services.d/99-run_forever mkdir -p /etc/services.d/99-run_forever
cat <<EOF > /etc/services.d/99-run_forever/run cat <<EOF > /etc/services.d/99-run_forever/run
#!/bin/bash #!/bin/bash
while true while true; do
do
sleep 86400 sleep 86400
done done
EOF EOF

View File

@@ -37,7 +37,7 @@ else
case "$1" in case "$1" in
"-h" ) "-h" )
cat <<EOF cat <<EOF
${IMAGE_NAME} Restore Tool ${IMAGE_NAME} Restore Tool ${IMAGE_VERSION}
(c) 2022 Dave Conroy (https://github.com/tiredofit) (c) 2022 Dave Conroy (https://github.com/tiredofit)
This script will assist you in recovering databases taken by the Docker image. This script will assist you in recovering databases taken by the Docker image.