mirror of
https://github.com/tiredofit/docker-db-backup.git
synced 2025-12-22 13:44:08 +01:00
Compare commits
26 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e838ed0027 | ||
|
|
8329b4c065 | ||
|
|
dab1ac301a | ||
|
|
077201cd18 | ||
|
|
eeaf59dc6f | ||
|
|
88fe0d6411 | ||
|
|
366c4759a5 | ||
|
|
37f255ec99 | ||
|
|
efa9a678c0 | ||
|
|
68747a4aff | ||
|
|
cf736278bb | ||
|
|
1659e34fc7 | ||
|
|
a8df7a2c75 | ||
|
|
b5194dcce9 | ||
|
|
6fb947684a | ||
|
|
9287f4efeb | ||
|
|
eeb5b5a119 | ||
|
|
a83dfd1a0b | ||
|
|
8fb379b51a | ||
|
|
a90e52091d | ||
|
|
ac58b5cdf6 | ||
|
|
fcbe771793 | ||
|
|
168982ab53 | ||
|
|
e377fcb6ae | ||
|
|
50f27233a9 | ||
|
|
7ccbf23af6 |
116
.github/workflows/main.yml
vendored
116
.github/workflows/main.yml
vendored
@@ -1,111 +1,15 @@
|
|||||||
### Application Level Image CI
|
name: "build_image"
|
||||||
### Dave Conroy <dave at tiredofit dot ca>
|
|
||||||
|
|
||||||
name: 'build'
|
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- '**'
|
- "**"
|
||||||
- '!README.md'
|
- "!README.md"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docker:
|
build:
|
||||||
runs-on: ubuntu-latest
|
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||||
steps:
|
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
|
||||||
- name: Checkout
|
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||||
uses: actions/checkout@v3
|
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
|
||||||
|
secrets: inherit
|
||||||
- name: Prepare
|
|
||||||
id: prep
|
|
||||||
run: |
|
|
||||||
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
|
|
||||||
if [[ $GITHUB_REF == refs/heads/* ]]; then
|
|
||||||
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
|
|
||||||
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"
|
|
||||||
else
|
|
||||||
BRANCH=${GITHUB_REF#refs/heads/}
|
|
||||||
fi
|
|
||||||
|
|
||||||
case ${BRANCH} in
|
|
||||||
"main" | "master" )
|
|
||||||
BRANCHTAG="${DOCKER_IMAGE}:latest"
|
|
||||||
;;
|
|
||||||
"develop" )
|
|
||||||
BRANCHTAG="${DOCKER_IMAGE}:develop"
|
|
||||||
;;
|
|
||||||
* )
|
|
||||||
if [ -n "${{ secrets.LATEST }}" ] ; then
|
|
||||||
if [ "${BRANCHTAG}" = "${{ secrets.LATEST }}" ]; then
|
|
||||||
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest,${DOCKER_IMAGE}:latest"
|
|
||||||
else
|
|
||||||
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
|
||||||
GITTAG="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed 's|refs/tags/||g')"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${BRANCHTAG}" ] && [ -n "${GITTAG}" ]; then
|
|
||||||
TAGS=${BRANCHTAG},${GITTAG}
|
|
||||||
else
|
|
||||||
TAGS="${BRANCHTAG}${GITTAG}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo ::set-output name=tags::${TAGS}
|
|
||||||
echo ::set-output name=docker_image::${DOCKER_IMAGE}
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v2
|
|
||||||
with:
|
|
||||||
platforms: all
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
id: buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
|
|
||||||
- name: Login to DockerHub
|
|
||||||
if: github.event_name != 'pull_request'
|
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKER_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
|
||||||
|
|
||||||
- name: Label
|
|
||||||
id: Label
|
|
||||||
run: |
|
|
||||||
if [ -f "Dockerfile" ] ; then
|
|
||||||
sed -i "/FROM .*/a LABEL tiredofit.image.git_repository=\"https://github.com/${GITHUB_REPOSITORY}\"" Dockerfile
|
|
||||||
sed -i "/FROM .*/a LABEL tiredofit.image.git_commit=\"${GITHUB_SHA}\"" Dockerfile
|
|
||||||
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
|
|
||||||
sed -i "/FROM .*/a LABEL tiredofit.image.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
|
|
||||||
if [ -f "CHANGELOG.md" ] ; then
|
|
||||||
sed -i "/FROM .*/a LABEL tiredofit.db-backup.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
|
|
||||||
mkdir -p install/assets/.changelogs ; cp CHANGELOG.md install/assets/.changelogs/${GITHUB_REPOSITORY/\//_}.md
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
|
||||||
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $GITHUB_REF == refs/heads/* ]]; then
|
|
||||||
sed -i "/FROM .*/a LABEL tiredofit.image.git_branch=\"${GITHUB_REF#refs/heads/}\"" Dockerfile
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Build
|
|
||||||
uses: docker/build-push-action@v3
|
|
||||||
with:
|
|
||||||
builder: ${{ steps.buildx.outputs.name }}
|
|
||||||
context: .
|
|
||||||
file: ./Dockerfile
|
|
||||||
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
|
||||||
push: true
|
|
||||||
tags: ${{ steps.prep.outputs.tags }}
|
|
||||||
|
|||||||
111
.github/workflows/manual.yml
vendored
111
.github/workflows/manual.yml
vendored
@@ -1,6 +1,4 @@
|
|||||||
# Manual Workflow (Application)
|
name: "manual_build_image"
|
||||||
|
|
||||||
name: manual
|
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
@@ -8,104 +6,11 @@ on:
|
|||||||
Manual Build:
|
Manual Build:
|
||||||
description: 'Manual Build'
|
description: 'Manual Build'
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docker:
|
build:
|
||||||
runs-on: ubuntu-latest
|
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||||
steps:
|
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
|
||||||
- name: Checkout
|
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||||
uses: actions/checkout@v3
|
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
|
||||||
|
secrets: inherit
|
||||||
- name: Prepare
|
|
||||||
id: prep
|
|
||||||
run: |
|
|
||||||
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
|
|
||||||
if [[ $GITHUB_REF == refs/heads/* ]]; then
|
|
||||||
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
|
|
||||||
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"
|
|
||||||
else
|
|
||||||
BRANCH=${GITHUB_REF#refs/heads/}
|
|
||||||
fi
|
|
||||||
|
|
||||||
case ${BRANCH} in
|
|
||||||
"main" | "master" )
|
|
||||||
BRANCHTAG="${DOCKER_IMAGE}:latest"
|
|
||||||
;;
|
|
||||||
"develop" )
|
|
||||||
BRANCHTAG="${DOCKER_IMAGE}:develop"
|
|
||||||
;;
|
|
||||||
* )
|
|
||||||
if [ -n "${{ secrets.LATEST }}" ] ; then
|
|
||||||
if [ "${BRANCHTAG}" = "${{ secrets.LATEST }}" ]; then
|
|
||||||
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest,${DOCKER_IMAGE}:latest"
|
|
||||||
else
|
|
||||||
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
|
||||||
GITTAG="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed 's|refs/tags/||g')"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${BRANCHTAG}" ] && [ -n "${GITTAG}" ]; then
|
|
||||||
TAGS=${BRANCHTAG},${GITTAG}
|
|
||||||
else
|
|
||||||
TAGS="${BRANCHTAG}${GITTAG}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo ::set-output name=tags::${TAGS}
|
|
||||||
echo ::set-output name=docker_image::${DOCKER_IMAGE}
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v2
|
|
||||||
with:
|
|
||||||
platforms: all
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
id: buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
|
|
||||||
- name: Login to DockerHub
|
|
||||||
if: github.event_name != 'pull_request'
|
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKER_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
|
||||||
|
|
||||||
- name: Label
|
|
||||||
id: Label
|
|
||||||
run: |
|
|
||||||
if [ -f "Dockerfile" ] ; then
|
|
||||||
sed -i "/FROM .*/a LABEL tiredofit.image.git_repository=\"https://github.com/${GITHUB_REPOSITORY}\"" Dockerfile
|
|
||||||
sed -i "/FROM .*/a LABEL tiredofit.image.git_commit=\"${GITHUB_SHA}\"" Dockerfile
|
|
||||||
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
|
|
||||||
sed -i "/FROM .*/a LABEL tiredofit.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
|
|
||||||
if [ -f "CHANGELOG.md" ] ; then
|
|
||||||
sed -i "/FROM .*/a LABEL tiredofit.db-backup.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
|
|
||||||
mkdir -p install/assets/.changelogs ; cp CHANGELOG.md install/assets/.changelogs/${GITHUB_REPOSITORY/\//_}.md
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
|
||||||
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $GITHUB_REF == refs/heads/* ]]; then
|
|
||||||
sed -i "/FROM .*/a LABEL tiredofit.image.git_branch=\"${GITHUB_REF#refs/heads/}\"" Dockerfile
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Build
|
|
||||||
uses: docker/build-push-action@v3
|
|
||||||
with:
|
|
||||||
builder: ${{ steps.buildx.outputs.name }}
|
|
||||||
context: .
|
|
||||||
file: ./Dockerfile
|
|
||||||
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
|
||||||
push: true
|
|
||||||
tags: ${{ steps.prep.outputs.tags }}
|
|
||||||
|
|||||||
59
CHANGELOG.md
59
CHANGELOG.md
@@ -1,3 +1,62 @@
|
|||||||
|
## 3.8.1 2023-03-30 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Cleanup Dockerfile
|
||||||
|
- Fix issue with DB_ARCHIVE_TIME not firing correctly
|
||||||
|
|
||||||
|
|
||||||
|
## 3.8.0 2023-03-27 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Introduce DB_DUMP_TARGET_ARCHIVE which works with DB_ARCHIVE_TIME to move backups older than (x) minutes from DB_DUMP_TARGET to DB_DUMP_TARGET_ARCHIVE for use with external backup systems and custom exclude rules
|
||||||
|
- Introduce CREATE_LATEST_SYMLINK which creates a symbolic link in DB_DUMP_TARGET of `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)`
|
||||||
|
|
||||||
|
|
||||||
|
## 3.7.7 2023-03-20 <codemonium@github>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Simplify pg_isready usage
|
||||||
|
|
||||||
|
|
||||||
|
## 3.7.6 2023-03-14 <toshy@github>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Remove EXTRA_OPT variable from MySQL/MariaDB check
|
||||||
|
|
||||||
|
|
||||||
|
## 3.7.5 2023-03-02 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Add support for Docker Swarm mode Secrets for BLOBXFER_STORAGE_ACCOUNT_*_FILE
|
||||||
|
|
||||||
|
|
||||||
|
## 3.7.4 2023-02-22 <gbe0@github>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix when running in MANUAL_RUN_FOREVER mode looping
|
||||||
|
|
||||||
|
|
||||||
|
## 3.7.3 2022-12-20 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Make S3_KEY_ID and S3_KEY_SECRET optional should IAM roles be used (Credit to alwynpan@github)
|
||||||
|
|
||||||
|
|
||||||
|
## 3.7.2 2022-12-19 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Bugfix for 3.7.1
|
||||||
|
|
||||||
|
|
||||||
|
## 3.7.1 2022-12-19 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Add MYSQL_ENABLE_TLS environment variable to switch on and off
|
||||||
|
|
||||||
|
### Reverted
|
||||||
|
- Set default for MYSQL_TLS_CA_FILE to accomodate for most use cases
|
||||||
|
|
||||||
|
|
||||||
## 3.7.0 2022-12-16 <dave at tiredofit dot ca>
|
## 3.7.0 2022-12-16 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|||||||
28
Dockerfile
28
Dockerfile
@@ -13,9 +13,9 @@ ENV INFLUX2_VERSION=2.4.0 \
|
|||||||
### Dependencies
|
### Dependencies
|
||||||
RUN source /assets/functions/00-container && \
|
RUN source /assets/functions/00-container && \
|
||||||
set -ex && \
|
set -ex && \
|
||||||
apk update && \
|
package update && \
|
||||||
apk upgrade && \
|
package upgrade && \
|
||||||
apk add -t .db-backup-build-deps \
|
package install .db-backup-build-deps \
|
||||||
build-base \
|
build-base \
|
||||||
bzip2-dev \
|
bzip2-dev \
|
||||||
git \
|
git \
|
||||||
@@ -27,7 +27,7 @@ RUN source /assets/functions/00-container && \
|
|||||||
xz-dev \
|
xz-dev \
|
||||||
&& \
|
&& \
|
||||||
\
|
\
|
||||||
apk add --no-cache -t .db-backup-run-deps \
|
package install .db-backup-run-deps \
|
||||||
aws-cli \
|
aws-cli \
|
||||||
bzip2 \
|
bzip2 \
|
||||||
influxdb \
|
influxdb \
|
||||||
@@ -47,14 +47,14 @@ RUN source /assets/functions/00-container && \
|
|||||||
zstd \
|
zstd \
|
||||||
&& \
|
&& \
|
||||||
\
|
\
|
||||||
apkArch="$(apk --print-arch)"; \
|
apkArch="$(package --print-arch)"; \
|
||||||
case "$apkArch" in \
|
case "$apkArch" in \
|
||||||
x86_64) mssql=true ; influx2=true ; influx_arch=amd64; ;; \
|
x86_64) mssql=true ; influx2=true ; influx_arch=amd64; ;; \
|
||||||
aarch64 ) influx2=true ; influx_arch=arm64 ;; \
|
aarch64 ) influx2=true ; influx_arch=arm64 ;; \
|
||||||
*) sleep 0.1 ;; \
|
*) sleep 0.1 ;; \
|
||||||
esac; \
|
esac; \
|
||||||
\
|
\
|
||||||
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/msodbcsql18_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/mssql-tools18_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSSQL_VERSION}_amd64.apk mssql-tools18_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \
|
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/msodbcsql18_${MSSQL_VERSION}_amd64.package ; curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/mssql-tools18_${MSSQL_VERSION}_amd64.package ; echo y | package add --allow-untrusted msodbcsql18_${MSSQL_VERSION}_amd64.package mssql-tools18_${MSSQL_VERSION}_amd64.package ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \
|
||||||
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
|
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
|
||||||
\
|
\
|
||||||
mkdir -p /usr/src/pbzip2 && \
|
mkdir -p /usr/src/pbzip2 && \
|
||||||
@@ -75,12 +75,14 @@ RUN source /assets/functions/00-container && \
|
|||||||
\
|
\
|
||||||
pip3 install blobxfer && \
|
pip3 install blobxfer && \
|
||||||
\
|
\
|
||||||
### Cleanup
|
package remove .db-backup-build-deps && \
|
||||||
apk del .db-backup-build-deps && \
|
package cleanup && \
|
||||||
rm -rf /usr/src/* && \
|
rm -rf \
|
||||||
rm -rf /*.apk && \
|
/*.package \
|
||||||
rm -rf /etc/logrotate.d/redis && \
|
/etc/logrotate.d/* \
|
||||||
rm -rf /root/.cache /tmp/* /var/cache/apk/*
|
/root/.cache \
|
||||||
|
/tmp/* \
|
||||||
|
/usr/src/*
|
||||||
|
|
||||||
|
|
||||||
### S6 Setup
|
|
||||||
COPY install /
|
COPY install /
|
||||||
|
|||||||
37
README.md
37
README.md
@@ -80,7 +80,13 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
|
|||||||
Clone this repository and build the image with `docker build <arguments> (imagename) .`
|
Clone this repository and build the image with `docker build <arguments> (imagename) .`
|
||||||
|
|
||||||
### Prebuilt Images
|
### Prebuilt Images
|
||||||
Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup) and is the recommended method of installation.
|
Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup)
|
||||||
|
|
||||||
|
Builds of the image are also available on the [Github Container Registry](https://github.com/tiredofit/docker-db-backup/pkgs/container/docker-db-backup)
|
||||||
|
|
||||||
|
```
|
||||||
|
docker pull ghcr.io/tiredofit/docker-db-backup:(imagetag)
|
||||||
|
```
|
||||||
|
|
||||||
The following image tags are available along with their tagged release based on what's written in the [Changelog](CHANGELOG.md):
|
The following image tags are available along with their tagged release based on what's written in the [Changelog](CHANGELOG.md):
|
||||||
|
|
||||||
@@ -89,7 +95,7 @@ The following image tags are available along with their tagged release based on
|
|||||||
| latest | `:latest` |
|
| latest | `:latest` |
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker pull tiredofit/db-backup:(imagetag)
|
docker pull docker.io/tiredofdit/db-backup:(imagetag)
|
||||||
```
|
```
|
||||||
#### Multi Architecture
|
#### Multi Architecture
|
||||||
Images are built primarily for `amd64` architecture, and may also include builds for `arm/v7`, `arm64` and others. These variants are all unsupported. Consider [sponsoring](https://github.com/sponsors/tiredofit) my work so that I can work with various hardware. To see if this image supports multiple architecures, type `docker manifest (image):(tag)`
|
Images are built primarily for `amd64` architecture, and may also include builds for `arm/v7`, `arm64` and others. These variants are all unsupported. Consider [sponsoring](https://github.com/sponsors/tiredofit) my work so that I can work with various hardware. To see if this image supports multiple architecures, type `docker manifest (image):(tag)`
|
||||||
@@ -117,7 +123,7 @@ The following directories are used for configuration and can be mapped for persi
|
|||||||
|
|
||||||
#### Base Images used
|
#### Base Images used
|
||||||
|
|
||||||
This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) or [Debian Linux](https://hub.docker.com/r/tiredofit/debian) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`,`vim`.
|
This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`.
|
||||||
|
|
||||||
Be sure to view the following repositories to understand all the customizable options:
|
Be sure to view the following repositories to understand all the customizable options:
|
||||||
|
|
||||||
@@ -128,12 +134,13 @@ Be sure to view the following repositories to understand all the customizable op
|
|||||||
#### Container Options
|
#### Container Options
|
||||||
|
|
||||||
| Parameter | Description | Default |
|
| Parameter | Description | Default |
|
||||||
| -------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- |
|
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- |
|
||||||
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
|
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
|
||||||
| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` |
|
| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` |
|
||||||
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
|
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
|
||||||
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
|
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
|
||||||
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` |
|
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` |
|
||||||
|
| `CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST) | `TRUE` |
|
||||||
| `PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | |
|
| `PRE_SCRIPT` | Fill this variable in with a command to execute pre backing up | |
|
||||||
| `POST_SCRIPT` | Fill this variable in with a command to execute post backing up | |
|
| `POST_SCRIPT` | Fill this variable in with a command to execute post backing up | |
|
||||||
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` |
|
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` |
|
||||||
@@ -158,20 +165,21 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
|
|||||||
|
|
||||||
### Scheduling Options
|
### Scheduling Options
|
||||||
| Parameter | Description | Default |
|
| Parameter | Description | Default |
|
||||||
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- |
|
| ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- |
|
||||||
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
|
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
|
||||||
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
|
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
|
||||||
| | Absolute HHMM, e.g. `2330` or `0415` | |
|
| | Absolute HHMM, e.g. `2330` or `0415` | |
|
||||||
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
|
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
|
||||||
| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `/backup` |
|
| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `${DB_DUMP_TARGET}/archive/` |
|
||||||
|
| `DB_DUMP_TARGET_ARCHIVE` | Optional Directory where the database dumps archivs are kept. |
|
||||||
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
|
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
|
||||||
|
| `DB_ARCHIVE_TIME` | Value in minutes to move all files from `DB_DUMP_TARGET` to `DB_DUMP_TARGET_ARCHIVE` - which is useful when pairing against an external backup system. |
|
||||||
|
|
||||||
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
|
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
|
||||||
|
|
||||||
### Backup Options
|
### Backup Options
|
||||||
| Parameter | Description | Default |
|
| Parameter | Description | Default |
|
||||||
| ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- |
|
| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- |
|
||||||
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` |
|
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` |
|
||||||
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` |
|
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` |
|
||||||
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |
|
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |
|
||||||
@@ -183,9 +191,10 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
|
|||||||
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` |
|
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` |
|
||||||
| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` |
|
| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` |
|
||||||
| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` |
|
| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` |
|
||||||
|
| `MYSQL_ENABLE_TLS` | Enable TLS functionality for MySQL client | `FALSE` |
|
||||||
| `MYSQL_TLS_VERIFY` | (optional) If using TLS (by means of MYSQL_TLS_* variables) verify remote host | `FALSE` |
|
| `MYSQL_TLS_VERIFY` | (optional) If using TLS (by means of MYSQL_TLS_* variables) verify remote host | `FALSE` |
|
||||||
| `MYSQL_TLS_VERSION` | What TLS `v1.1` `v1.2` `v1.3` version to utilize | `TLSv1.1,TLSv1.2,TLSv1.3` |
|
| `MYSQL_TLS_VERSION` | What TLS `v1.1` `v1.2` `v1.3` version to utilize | `TLSv1.1,TLSv1.2,TLSv1.3` |
|
||||||
| `MYSQL_TLS_CA_FILE` | Filename to load custom CA certificate for connecting via TLS e.g. `/etc/ssl/cert.pem` should suffice for most non self signed setups | |
|
| `MYSQL_TLS_CA_FILE` | Filename to load custom CA certificate for connecting via TLS | `/etc/ssl/cert.pem` |
|
||||||
| `MYSQL_TLS_CERT_FILE` | Filename to load client certificate for connecting via TLS | |
|
| `MYSQL_TLS_CERT_FILE` | Filename to load client certificate for connecting via TLS | |
|
||||||
| `MYSQL_TLS_KEY_FILE` | Filename to load client key for connecting via TLS | |
|
| `MYSQL_TLS_KEY_FILE` | Filename to load client key for connecting via TLS | |
|
||||||
|
|
||||||
@@ -198,8 +207,8 @@ If `BACKUP_LOCATION` = `S3` then the following options are used.
|
|||||||
| Parameter | Description | Default |
|
| Parameter | Description | Default |
|
||||||
| --------------------- | ----------------------------------------------------------------------------------------- | ------- |
|
| --------------------- | ----------------------------------------------------------------------------------------- | ------- |
|
||||||
| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | |
|
| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | |
|
||||||
| `S3_KEY_ID` | S3 Key ID | |
|
| `S3_KEY_ID` | S3 Key ID (Optional) | |
|
||||||
| `S3_KEY_SECRET` | S3 Key Secret | |
|
| `S3_KEY_SECRET` | S3 Key Secret (Optional) | |
|
||||||
| `S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | |
|
| `S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | |
|
||||||
| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | |
|
| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | |
|
||||||
| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | |
|
| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | |
|
||||||
@@ -209,6 +218,8 @@ If `BACKUP_LOCATION` = `S3` then the following options are used.
|
|||||||
| _*OR*_ | | |
|
| _*OR*_ | | |
|
||||||
| `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` |
|
| `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` |
|
||||||
|
|
||||||
|
- When `S3_KEY_ID` and/or `S3_KEY_SECRET` is not set, will try to use IAM role assigned (if any) for uploading the backup files to S3 bucket.
|
||||||
|
|
||||||
#### Upload to a Azure storage account by `blobxfer`
|
#### Upload to a Azure storage account by `blobxfer`
|
||||||
|
|
||||||
Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure fileshare storage.
|
Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure fileshare storage.
|
||||||
@@ -237,7 +248,7 @@ docker exec -it (whatever your container name is) bash
|
|||||||
### Manual Backups
|
### Manual Backups
|
||||||
Manual Backups can be performed by entering the container and typing `backup-now`
|
Manual Backups can be performed by entering the container and typing `backup-now`
|
||||||
|
|
||||||
- Recently there was a request to have the container work with Kukbernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`.
|
- Recently there was a request to have the container work with Kubernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`.
|
||||||
|
|
||||||
### Restoring Databases
|
### Restoring Databases
|
||||||
Entering in the container and executing `restore` will execute a menu based script to restore your backups - MariaDB, Postgres, and Mongo supported.
|
Entering in the container and executing `restore` will execute a menu based script to restore your backups - MariaDB, Postgres, and Mongo supported.
|
||||||
@@ -335,7 +346,7 @@ If for some reason your filesystem or host is not detecting it right, use the en
|
|||||||
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.
|
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.
|
||||||
### Usage
|
### Usage
|
||||||
- The [Discussions board](../../discussions) is a great place for working with the community on tips and tricks of using this image.
|
- The [Discussions board](../../discussions) is a great place for working with the community on tips and tricks of using this image.
|
||||||
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) personalized support.
|
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) for personalized support
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
- Please, submit a [Bug Report](issues/new) if something isn't working as expected. I'll do my best to issue a fix in short order.
|
- Please, submit a [Bug Report](issues/new) if something isn't working as expected. I'll do my best to issue a fix in short order.
|
||||||
|
|
||||||
|
|||||||
@@ -5,16 +5,20 @@ BLOBXFER_REMOTE_PATH=${BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
|
|||||||
CHECKSUM=${CHECKSUM:-"MD5"}
|
CHECKSUM=${CHECKSUM:-"MD5"}
|
||||||
COMPRESSION=${COMPRESSION:-"ZSTD"}
|
COMPRESSION=${COMPRESSION:-"ZSTD"}
|
||||||
COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
|
COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
|
||||||
|
CREATE_LATEST_SYMLINK=${CREATE_LATEST_SYMLINK:-"TRUE"}
|
||||||
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
|
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
|
||||||
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
|
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
|
||||||
DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"}
|
DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"}
|
||||||
|
DB_DUMP_TARGET_ARCHIVE=${DB_DUMP_TARGET_ARCHIVE:-"${DB_DUMP_TARGET}/archive/"}
|
||||||
ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"}
|
ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"}
|
||||||
ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
|
ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
|
||||||
MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"}
|
MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"}
|
||||||
MODE=${MODE:-"AUTO"}
|
MODE=${MODE:-"AUTO"}
|
||||||
|
MYSQL_ENABLE_TLS=${MYSQL_ENABLE_TLS:-"FALSE"}
|
||||||
MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"}
|
MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"}
|
||||||
MYSQL_SINGLE_TRANSACTION=${MYSQL_SINGLE_TRANSACTION:-"TRUE"}
|
MYSQL_SINGLE_TRANSACTION=${MYSQL_SINGLE_TRANSACTION:-"TRUE"}
|
||||||
MYSQL_STORED_PROCEDURES=${MYSQL_STORED_PROCEDURES:-"TRUE"}
|
MYSQL_STORED_PROCEDURES=${MYSQL_STORED_PROCEDURES:-"TRUE"}
|
||||||
|
MYSQL_TLS_CA_FILE=${MYSQL_TLS_CA_FILE:-"/etc/ssl/cert.pem"}
|
||||||
MYSQL_TLS_VERIFY=${MYSQL_TLS_VERIFY:-"FALSE"}
|
MYSQL_TLS_VERIFY=${MYSQL_TLS_VERIFY:-"FALSE"}
|
||||||
MYSQL_TLS_VERSION=${MYSQL_TLS_VERSION:-"TLSv1.1,TLSv1.2,TLSv1.3"}
|
MYSQL_TLS_VERSION=${MYSQL_TLS_VERSION:-"TLSv1.1,TLSv1.2,TLSv1.3"}
|
||||||
PARALLEL_COMPRESSION_THREADS=${PARALLEL_COMPRESSION_THREADS:-"$(nproc)"}
|
PARALLEL_COMPRESSION_THREADS=${PARALLEL_COMPRESSION_THREADS:-"$(nproc)"}
|
||||||
|
|||||||
@@ -49,25 +49,25 @@ bootstrap_variables() {
|
|||||||
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
|
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
|
||||||
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
|
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
|
||||||
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS}
|
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS}
|
||||||
|
if var_true "${MYSQL_ENABLE_TLS}" ; then
|
||||||
if [ -n "${MYSQL_TLS_CA_FILE}" ] ; then
|
if [ -n "${MYSQL_TLS_CA_FILE}" ] ; then
|
||||||
mysql_tls=TRUE
|
|
||||||
mysql_tls_args="--ssl_ca=${MYSQL_TLS_CA_FILE}"
|
mysql_tls_args="--ssl_ca=${MYSQL_TLS_CA_FILE}"
|
||||||
fi
|
fi
|
||||||
if [ -n "${MYSQL_TLS_CERT_FILE}" ] ; then
|
if [ -n "${MYSQL_TLS_CERT_FILE}" ] ; then
|
||||||
mysql_tls=TRUE
|
|
||||||
mysql_tls_args="${mysql_tls_args} --ssl_cert=${MYSQL_TLS_CERT_FILE}"
|
mysql_tls_args="${mysql_tls_args} --ssl_cert=${MYSQL_TLS_CERT_FILE}"
|
||||||
fi
|
fi
|
||||||
if [ -n "${MYSQL_TLS_KEY_FILE}" ] ; then
|
if [ -n "${MYSQL_TLS_KEY_FILE}" ] ; then
|
||||||
mysql_tls=TRUE
|
|
||||||
mysql_tls_args="${mysql_tls_args} --ssl_key=${MYSQL_TLS_KEY_FILE}"
|
mysql_tls_args="${mysql_tls_args} --ssl_key=${MYSQL_TLS_KEY_FILE}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if var_true "${TLS_VERIFY}" ; then
|
if var_true "${TLS_VERIFY}" ; then
|
||||||
mysql_tls=TRUE
|
|
||||||
mysql_tls_args="${mysql_tls_args} --sslverify-server-cert"
|
mysql_tls_args="${mysql_tls_args} --sslverify-server-cert"
|
||||||
fi
|
fi
|
||||||
if var_true "${mysql_tls}" ; then
|
|
||||||
|
if [ -n "${MYSQL_TLS_VERSION}" ] ; then
|
||||||
mysql_tls_args="${mysql_tls_args} --tls_version=${MYSQL_TLS_VERSION}"
|
mysql_tls_args="${mysql_tls_args} --tls_version=${MYSQL_TLS_VERSION}"
|
||||||
fi
|
fi
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
"mssql" | "microsoftsql" )
|
"mssql" | "microsoftsql" )
|
||||||
apkArch="$(apk --print-arch)"; \
|
apkArch="$(apk --print-arch)"; \
|
||||||
@@ -100,15 +100,21 @@ bootstrap_variables() {
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then
|
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] && [ -n "${S3_KEY_ID}" ] && [ -n "${S3_KEY_SECRET}" ]; then
|
||||||
file_env 'S3_KEY_ID'
|
file_env 'S3_KEY_ID'
|
||||||
file_env 'S3_KEY_SECRET'
|
file_env 'S3_KEY_SECRET'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ "${BACKUP_LOCATION,,}" = "blobxfer" ] && [ -n "${BLOBXFER_STORAGE_ACCOUNT_FILE}" ] && [ -n "${BLOBXFER_STORAGE_ACCOUNT_KEY_FILE}" ]; then
|
||||||
|
file_env 'BLOBXFER_STORAGE_ACCOUNT_FILE'
|
||||||
|
file_env 'BLOBXFER_STORAGE_ACCOUNT_KEY_FILE'
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
backup_couch() {
|
backup_couch() {
|
||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt
|
target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt
|
||||||
|
ltarget=couch_${DB_NAME}_${DB_HOST#*//}
|
||||||
compression
|
compression
|
||||||
pre_dbbackup ${DB_NAME}
|
pre_dbbackup ${DB_NAME}
|
||||||
print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}"
|
print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}"
|
||||||
@@ -134,6 +140,7 @@ backup_influx() {
|
|||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
|
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
|
||||||
target=influx_${db}_${DB_HOST#*//}_${now}
|
target=influx_${db}_${DB_HOST#*//}_${now}
|
||||||
|
ltarget=influx_${db}_${DB_HOST#*//}
|
||||||
compression
|
compression
|
||||||
pre_dbbackup $db
|
pre_dbbackup $db
|
||||||
print_notice "Dumping Influx database: '${db}'"
|
print_notice "Dumping Influx database: '${db}'"
|
||||||
@@ -143,6 +150,7 @@ backup_influx() {
|
|||||||
print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}"
|
print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}"
|
||||||
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
|
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
|
||||||
target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension}
|
target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension}
|
||||||
|
target=influx_${db}_${DB_HOST#*//}
|
||||||
generate_checksum
|
generate_checksum
|
||||||
move_dbbackup
|
move_dbbackup
|
||||||
post_dbbackup $db
|
post_dbbackup $db
|
||||||
@@ -153,6 +161,7 @@ backup_influx() {
|
|||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
|
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
|
||||||
target=influx2_${db}_${DB_HOST#*//}_${now}
|
target=influx2_${db}_${DB_HOST#*//}_${now}
|
||||||
|
ltarget=influx2_${db}_${DB_HOST#*//}
|
||||||
compression
|
compression
|
||||||
pre_dbbackup $db
|
pre_dbbackup $db
|
||||||
print_notice "Dumping Influx2 database: '${db}'"
|
print_notice "Dumping Influx2 database: '${db}'"
|
||||||
@@ -173,8 +182,10 @@ backup_mongo() {
|
|||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
|
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
|
||||||
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive
|
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive
|
||||||
|
ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,}
|
||||||
else
|
else
|
||||||
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive.gz
|
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive.gz
|
||||||
|
ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,}
|
||||||
mongo_compression="--gzip"
|
mongo_compression="--gzip"
|
||||||
compression_string="and compressing with gzip"
|
compression_string="and compressing with gzip"
|
||||||
fi
|
fi
|
||||||
@@ -196,6 +207,7 @@ backup_mongo() {
|
|||||||
backup_mssql() {
|
backup_mssql() {
|
||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak
|
target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak
|
||||||
|
target=mssql_${DB_NAME,,}_${DB_HOST,,}
|
||||||
compression
|
compression
|
||||||
pre_dbbackup "${DB_NAME}"
|
pre_dbbackup "${DB_NAME}"
|
||||||
print_notice "Dumping MSSQL database: '${DB_NAME}'"
|
print_notice "Dumping MSSQL database: '${DB_NAME}'"
|
||||||
@@ -235,6 +247,7 @@ backup_mysql() {
|
|||||||
for db in ${db_names} ; do
|
for db in ${db_names} ; do
|
||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
target=mysql_${db}_${DB_HOST,,}_${now}.sql
|
target=mysql_${db}_${DB_HOST,,}_${now}.sql
|
||||||
|
ltarget=mysql_${db}_${DB_HOST,,}
|
||||||
compression
|
compression
|
||||||
pre_dbbackup $db
|
pre_dbbackup $db
|
||||||
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
|
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
|
||||||
@@ -249,6 +262,7 @@ backup_mysql() {
|
|||||||
print_debug "Not splitting database dumps into their own files"
|
print_debug "Not splitting database dumps into their own files"
|
||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
target=mysql_all_${DB_HOST,,}_${now}.sql
|
target=mysql_all_${DB_HOST,,}_${now}.sql
|
||||||
|
ltarget=mysql_all_${DB_HOST,,}
|
||||||
compression
|
compression
|
||||||
pre_dbbackup all
|
pre_dbbackup all
|
||||||
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
||||||
@@ -284,6 +298,7 @@ backup_pgsql() {
|
|||||||
for db in ${db_names} ; do
|
for db in ${db_names} ; do
|
||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
target=pgsql_${db}_${DB_HOST,,}_${now}.sql
|
target=pgsql_${db}_${DB_HOST,,}_${now}.sql
|
||||||
|
ltarget=pgsql_${db}_${DB_HOST,,}
|
||||||
compression
|
compression
|
||||||
pre_dbbackup $db
|
pre_dbbackup $db
|
||||||
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
|
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
|
||||||
@@ -298,6 +313,7 @@ backup_pgsql() {
|
|||||||
print_debug "Not splitting database dumps into their own files"
|
print_debug "Not splitting database dumps into their own files"
|
||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
target=pgsql_all_${DB_HOST,,}_${now}.sql
|
target=pgsql_all_${DB_HOST,,}_${now}.sql
|
||||||
|
ltarget=pgsql_${db}_${DB_HOST,,}
|
||||||
compression
|
compression
|
||||||
pre_dbbackup all
|
pre_dbbackup all
|
||||||
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
||||||
@@ -322,6 +338,7 @@ backup_redis() {
|
|||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
print_notice "Dumping Redis - Flushing Redis Cache First"
|
print_notice "Dumping Redis - Flushing Redis Cache First"
|
||||||
target=redis_all_${DB_HOST,,}_${now}.rdb
|
target=redis_all_${DB_HOST,,}_${now}.rdb
|
||||||
|
ltarget=redis_${DB_HOST,,}
|
||||||
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
|
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
|
||||||
sleep 10
|
sleep 10
|
||||||
try=5
|
try=5
|
||||||
@@ -352,6 +369,7 @@ backup_sqlite3() {
|
|||||||
db=$(basename "${DB_HOST}")
|
db=$(basename "${DB_HOST}")
|
||||||
db="${db%.*}"
|
db="${db%.*}"
|
||||||
target=sqlite3_${db}_${now}.sqlite3
|
target=sqlite3_${db}_${now}.sqlite3
|
||||||
|
target=sqlite3_${db}.sqlite3
|
||||||
compression
|
compression
|
||||||
pre_dbbackup $db
|
pre_dbbackup $db
|
||||||
print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}"
|
print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}"
|
||||||
@@ -416,7 +434,7 @@ check_availability() {
|
|||||||
"mysql" )
|
"mysql" )
|
||||||
counter=0
|
counter=0
|
||||||
export MYSQL_PWD=${DB_PASS}
|
export MYSQL_PWD=${DB_PASS}
|
||||||
while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" ${mysql_tls_args} ${EXTRA_OPTS} status > /dev/null 2>&1) ; do
|
while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" ${mysql_tls_args} status > /dev/null 2>&1) ; do
|
||||||
sleep 5
|
sleep 5
|
||||||
(( counter+=5 ))
|
(( counter+=5 ))
|
||||||
print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)"
|
print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)"
|
||||||
@@ -432,8 +450,7 @@ check_availability() {
|
|||||||
;;
|
;;
|
||||||
"pgsql" )
|
"pgsql" )
|
||||||
counter=0
|
counter=0
|
||||||
export PGPASSWORD=${DB_PASS}
|
until pg_isready --host=${DB_HOST} --port=${DB_PORT} -q
|
||||||
until pg_isready --dbname=${DB_NAME} --host=${DB_HOST} --port=${DB_PORT} --username=${DB_USER} -q
|
|
||||||
do
|
do
|
||||||
sleep 5
|
sleep 5
|
||||||
(( counter+=5 ))
|
(( counter+=5 ))
|
||||||
@@ -587,7 +604,7 @@ compression() {
|
|||||||
|
|
||||||
create_archive() {
|
create_archive() {
|
||||||
if [ "${exit_code}" = "0" ] ; then
|
if [ "${exit_code}" = "0" ] ; then
|
||||||
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
|
print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}"
|
||||||
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
|
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
|
||||||
else
|
else
|
||||||
print_error "Skipping creating archive file because backup did not complete successfully"
|
print_error "Skipping creating archive file because backup did not complete successfully"
|
||||||
@@ -649,11 +666,23 @@ move_dbbackup() {
|
|||||||
mkdir -p "${DB_DUMP_TARGET}"
|
mkdir -p "${DB_DUMP_TARGET}"
|
||||||
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
|
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
|
||||||
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
|
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
|
||||||
|
if var_true "${CREATE_LATEST_SYMLINK}" ; then
|
||||||
|
ln -sf "${DB_DUMP_TARGET}"/"${target}" "${DB_DUMP_TARGET}"/latest-"${ltarget}"
|
||||||
|
fi
|
||||||
|
if [ -n "${DB_ARCHIVE_TIME}" ] ; then
|
||||||
|
mkdir -p "${DB_DUMP_TARGET_ARCHIVE}"
|
||||||
|
find "${DB_DUMP_TARGET}"/ -maxdepth 1 -mmin +"${DB_ARCHIVE_TIME}" -iname "*" -exec mv {} foo \;
|
||||||
|
find "${DB_DUMP_TARGET}"/ -maxdepth 1 -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
"s3" | "minio" )
|
"s3" | "minio" )
|
||||||
print_debug "Moving backup to S3 Bucket"
|
print_debug "Moving backup to S3 Bucket"
|
||||||
|
if [ -n "${S3_KEY_ID}" ] && [ -n "${S3_KEY_SECRET}" ]; then
|
||||||
export AWS_ACCESS_KEY_ID=${S3_KEY_ID}
|
export AWS_ACCESS_KEY_ID=${S3_KEY_ID}
|
||||||
export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET}
|
export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET}
|
||||||
|
else
|
||||||
|
print_debug "Variable S3_KEY_ID or S3_KEY_SECRET is not set. Please ensure sufficiant IAM role is assigned."
|
||||||
|
fi
|
||||||
export AWS_DEFAULT_REGION=${S3_REGION}
|
export AWS_DEFAULT_REGION=${S3_REGION}
|
||||||
if [ -f "${S3_CERT_CA_FILE}" ] ; then
|
if [ -f "${S3_CERT_CA_FILE}" ] ; then
|
||||||
print_debug "Using Custom CA for S3 Backups"
|
print_debug "Using Custom CA for S3 Backups"
|
||||||
@@ -699,6 +728,7 @@ prepare_dbbackup() {
|
|||||||
now=$(date +"%Y%m%d-%H%M%S")
|
now=$(date +"%Y%m%d-%H%M%S")
|
||||||
now_time=$(date +"%H:%M:%S")
|
now_time=$(date +"%H:%M:%S")
|
||||||
now_date=$(date +"%Y-%m-%d")
|
now_date=$(date +"%Y-%m-%d")
|
||||||
|
ltarget=${dbtype}_${DB_NAME,,}_${DB_HOST,,}
|
||||||
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql
|
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -809,7 +839,7 @@ sanity_test() {
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then
|
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] && [ -n "${S3_KEY_ID}" ] && [ -n "${S3_KEY_SECRET}" ]; then
|
||||||
sanity_var S3_BUCKET "S3 Bucket"
|
sanity_var S3_BUCKET "S3 Bucket"
|
||||||
sanity_var S3_PATH "S3 Path"
|
sanity_var S3_PATH "S3 Path"
|
||||||
sanity_var S3_REGION "S3 Region"
|
sanity_var S3_REGION "S3 Region"
|
||||||
@@ -828,8 +858,7 @@ setup_mode() {
|
|||||||
mkdir -p /etc/services.d/99-run_forever
|
mkdir -p /etc/services.d/99-run_forever
|
||||||
cat <<EOF > /etc/services.d/99-run_forever/run
|
cat <<EOF > /etc/services.d/99-run_forever/run
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
while true
|
while true; do
|
||||||
do
|
|
||||||
sleep 86400
|
sleep 86400
|
||||||
done
|
done
|
||||||
EOF
|
EOF
|
||||||
|
|||||||
Reference in New Issue
Block a user