Compare commits

..

40 Commits

Author SHA1 Message Date
Dave Conroy
d7d4f1cc19 Merge branch 'master' of https://github.com/tiredofit/docker-db-backup 2021-02-19 08:33:48 -08:00
Dave Conroy
c8c9a80533 Release 2.6.0 - See CHANGELOG.md 2021-02-19 08:33:43 -08:00
Dave Conroy
018234b9bc Merge pull request #56 from tpansino/add-sqlite-support
Add sqlite support
2021-02-19 08:32:52 -08:00
Tom Pansino
912e60edd8 Exit on failed file checks 2021-02-18 00:35:06 -08:00
Tom Pansino
46fddb533c Add sqlite3 to README 2021-02-18 00:09:17 -08:00
Tom Pansino
e8a1859d1a Add initial sqlite3 support 2021-02-17 23:55:22 -08:00
Dave Conroy
30fe2f181c #55 - Fix xz parallel compression 2021-02-14 09:06:21 -08:00
Dave Conroy
f57ce461e9 GitHub CI 2021-01-25 17:05:25 -08:00
Dave Conroy
34aab69cc2 Release 2.5.0 - See CHANGELOG.md 2021-01-25 16:39:42 -08:00
Dave Conroy
1930358775 Multi Arch CI 2021-01-21 15:35:16 -08:00
Dave Conroy
f207f375cc Release 2.4.0 - See CHANGELOG.md 2020-12-07 15:27:20 -08:00
Dave Conroy
88b58bffc5 Release 2.3.2 - See CHANGELOG.md 2020-11-14 12:37:58 -08:00
Dave Conroy
738f7fad25 Release 2.3.1 - See CHANGELOG.md 2020-11-11 13:45:05 -08:00
Dave Conroy
8c4733bf7f Merge pull request #52 from bambi73/master
#51 Fix backup of multiple InfluxDB databases failure
2020-11-11 13:43:56 -08:00
Bambi125
be4d8c0747 #51 Fix backup of multiple InfluxDB databases failure 2020-11-11 22:38:04 +01:00
Dave Conroy
a13849df0a Release 2.3.0 - See CHANGELOG.md 2020-10-15 08:15:10 -07:00
Dave Conroy
cb5347afe5 Release 2.2.2 - See CHANGELOG.md 2020-09-22 21:14:37 -07:00
Dave Conroy
ca03c5369d Merge pull request #47 from tpansino/bug/46-fix-docker-secrets
Fix Docker Secrets injection from DB_USER_FILE/DB_PASS_FILE
2020-09-22 21:02:05 -07:00
Tom Pansino
3008d9125f Fix Docker Secrets injection from DB_USER_FILE/DB_PASS_FILE 2020-09-22 20:32:09 -07:00
Dave Conroy
19cf3d007f Release 2.2.1 - See CHANGELOG.md 2020-09-17 21:39:27 -07:00
Dave Conroy
0bbf142349 Merge pull request #45 from alwynpan/fix-backup-now-date-error-message
Fix backup now date error message
2020-09-17 21:38:10 -07:00
Yao (Alwyn) Pan
1bc357866f #42 Update README 2020-09-18 14:34:06 +10:00
Yao (Alwyn) Pan
b38ad7a5cc #44 Remove 'invalid date' error message when performing backup-now 2020-09-18 14:32:08 +10:00
Dave Conroy
8bc02ee6c8 Release 2.2.0 - See CHANGELOG.md 2020-09-14 07:07:44 -07:00
Dave Conroy
3e71c377c6 Merge pull request #43 from alwynpan/fix-optional-vars
#42 Make DB_USER and DB_PASS optional for some dbtypes; update alpine repo URI
2020-09-14 07:05:29 -07:00
Yao (Alwyn) Pan
76a857239f #42 Make DB_USER and DB_PASS optional for some dbtypes; update alpine repo URI 2020-09-14 19:22:57 +10:00
Dave Conroy
02880d6541 Release 5.1.1 - See CHANGELOG.md 2020-09-01 09:57:58 -07:00
Dave Conroy
564613f329 Merge pull request #41 from zicklag/patch-1
Fix POST_SCRIPT Environment Vairable Run
2020-09-01 09:55:37 -07:00
Zicklag
2606d3c4d5 Fix POST_SCRIPT Environment Vairable Run 2020-09-01 09:46:43 -05:00
Dave Conroy
51f0206e17 Release 2.1.0 - See CHANGELOG.md 2020-08-29 07:43:24 -07:00
Dave Conroy
8d7bea3315 Merge branch 'master' of https://github.com/tiredofit/docker-db-backup into master 2020-08-29 07:37:10 -07:00
Dave Conroy
30c56229cf Release 2.1.0 - See CHANGELOG.md 2020-08-29 07:37:03 -07:00
Dave Conroy
04594087ed Create FUNDING.yml 2020-06-24 17:08:09 -07:00
Dave Conroy
b57683e992 Update README.md 2020-06-17 08:53:22 -07:00
Dave Conroy
1323966e22 Update README.md 2020-06-17 08:21:12 -07:00
Dave Conroy
310edda88c Reduce size of temporarily files
Changed way backups are performed to reduce temporary files
Removed Rethink Support
Rework MongoDB compression
Remove function prefix from functions
Rename case on variables for easier reading
2020-06-17 08:15:34 -07:00
Dave Conroy
955a08a21b Release 1.23.0 - See CHANGELOG.md 2020-06-15 09:44:07 -07:00
Dave Conroy
bf97c3ab97 Update README.md 2020-06-10 05:48:03 -07:00
Dave Conroy
11969da1ea Release 1.22.0 - See CHANGELOG.md 2020-06-10 05:45:49 -07:00
Dave Conroy
7998156576 Release 1.21.3 - See CHANGELOG.md 2020-06-10 05:19:24 -07:00
8 changed files with 663 additions and 314 deletions

1
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1 @@
github: [tiredofit]

110
.github/workflows/main.yml vendored Normal file
View File

@@ -0,0 +1,110 @@
### Application Level Image CI
### Dave Conroy <dave at tiredofit dot ca>
name: 'Build Images'
on:
push:
paths:
- '**'
- '!README.md'
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
set -x
if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"
else
BRANCH=${GITHUB_REF#refs/heads/}
fi
case ${BRANCH} in
"main" | "master" )
BRANCHTAG="${DOCKER_IMAGE}:latest"
;;
"develop" )
BRANCHTAG="${DOCKER_IMAGE}:develop"
;;
* )
if [ -n "${{ secrets.LATEST }}" ] ; then
if [ "${BRANCHTAG}" = "${{ secrets.LATEST }}" ]; then
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest,${DOCKER_IMAGE}:latest"
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
;;
esac
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
GITTAG="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed 's|refs/tags/||g')"
fi
if [ -n "${BRANCHTAG}" ] && [ -n "${GITTAG}" ]; then
TAGS=${BRANCHTAG},${GITTAG}
else
TAGS="${BRANCHTAG}${GITTAG}"
fi
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Label
id: Label
run: |
if [ -f "Dockerfile" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_repository=\"https://github.com/${GITHUB_REPOSITORY}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_commit=\"${GITHUB_SHA}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
if [ -f "CHANGELOG.md" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
fi
if [[ $GITHUB_REF == refs/heads/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_branch=\"${GITHUB_REF#refs/heads/}\"" Dockerfile
fi
fi
- name: Build
uses: docker/build-push-action@v2
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}

View File

@@ -1,3 +1,112 @@
## 2.6.0 2021-02-19 <tpansino@github>
### Added
- SQLite support
## 2.5.1 2021-02-14 <dave at tiredofit dot ca>
### Changed
- Fix xz backups with `PARALLEL_COMPRESION=TRUE`
## 2.5.0 2021-01-25 <dave at tiredofit dot ca>
### Added
- Multi Platform Build Variants (ARMv7 AMD64 AArch64)
### Changed
- Alpine 3.13 Base
- Compile Pixz as opposed to relying on testing repository
- MSSQL Support only available under AMD64. Container exits if any other platform detected when MSSQL set to be backed up.
## 2.4.0 2020-12-07 <dave at tiredofit dot ca>
### Added
- Switch back to packges for Postgresql (now 13.1)
## 2.3.2 2020-11-14 <dave at tiredofit dot ca>
### Changed
- Reapply S6-Overlay into filesystem as Postgresql build is removing S6 files due to edge containing S6 overlay
## 2.3.1 2020-11-11 <bambi73@github>
### Fixed
- Multiple Influx DB's not being backed up correctly
## 2.3.0 2020-10-15 <dave at tiredofit dot ca>
### Added
- Microsoft SQL Server support (experimental)
### Changed
- Compiled Postgresql 13 from source to backup psql/13 hosts
## 2.2.2 2020-09-22 <tpansino@github>
### Fixed
- Patch for 2.2.0 release fixing Docker Secrets Support. Was skipping password check.
## 2.2.1 2020-09-17 <alwynpan@github>
### Fixed
- Ondemand/Manual backup with `backup-now` was throwing errors not being able to find a proper date
## 2.2.0 2020-09-14 <alwynpan@github>
### Fixed
- Allow to use MariaDB and MongoDBs with no username and password while still allowing Docker Secrets
- Changed source of Alpine package repositories
## 2.1.1 2020-09-01 <zicklag@github>
### Fixed
- Add eval to POST_SCRIPT execution
## 2.1.0 2020-08-29 <dave at tiredofit dot ca>
### Added
- Add Exit Code variable to be used for custom scripts - See README.md for placement
- Add POST_SCRIPT environment variable to execute command instead of relying on custom script
## 2.0.0 2020-06-17 <dave at tiredofit dot ca>
### Added
- Reworked compression routines to remove dependency on temporary files
- Changed the way that MongoDB compression works - only supports GZ going forward
### Changed
- Code cleanup (removed function prefixes, added verbosity)
### Reverted
- Removed Rethink Support
## 1.23.0 2020-06-15 <dave at tiredofit dot ca>
### Added
- Add zstd compression support
- Add choice of compression level
## 1.22.0 2020-06-10 <dave at tiredofit dot ca>
### Added
- Added EXTRA_OPTS variable to all backup commands to pass extra arguments
## 1.21.3 2020-06-10 <dave at tiredofit dot ca>
### Changed
- Fix `backup-now` manual script due to services.available change
## 1.21.2 2020-06-08 <dave at tiredofit dot ca> ## 1.21.2 2020-06-08 <dave at tiredofit dot ca>
### Added ### Added

View File

@@ -1,27 +1,28 @@
FROM tiredofit/alpine:edge FROM tiredofit/alpine:3.13
LABEL maintainer="Dave Conroy (dave at tiredofit dot ca)"
### Set Environment Variables ### Set Environment Variables
ENV ENABLE_CRON=FALSE \ ENV MSSQL_VERSION=17.5.2.1-1 \
ENABLE_CRON=FALSE \
ENABLE_SMTP=FALSE \ ENABLE_SMTP=FALSE \
ENABLE_ZABBIX=FALSE \ ENABLE_ZABBIX=TRUE \
ZABBIX_HOSTNAME=db-backup ZABBIX_HOSTNAME=db-backup
### Dependencies ### Dependencies
RUN set -ex && \ RUN set -ex && \
echo "@testing http://nl.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
apk update && \ apk update && \
apk upgrade && \ apk upgrade && \
apk add -t .db-backup-build-deps \ apk add -t .db-backup-build-deps \
build-base \ build-base \
bzip2-dev \ bzip2-dev \
git \ git \
libarchive-dev \
xz-dev \ xz-dev \
&& \ && \
\ \
apk add -t .db-backup-run-deps \ apk add --no-cache -t .db-backup-run-deps \
bzip2 \ bzip2 \
influxdb \ influxdb \
libarchive \
mariadb-client \ mariadb-client \
mongodb-tools \ mongodb-tools \
libressl \ libressl \
@@ -29,19 +30,34 @@ RUN set -ex && \
postgresql \ postgresql \
postgresql-client \ postgresql-client \
redis \ redis \
sqlite \
xz \ xz \
zstd \
&& \ && \
\ \
apk add \ cd /usr/src && \
pixz@testing \ \
&& \ apkArch="$(apk --print-arch)"; \
\ case "$apkArch" in \
x86_64) mssql=true ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql17_${MSSQL_VERSION}_amd64.apk mssql-tools_${MSSQL_VERSION}_amd64.apk ;; \
*) echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ;; \
esac; \
mkdir -p /usr/src/pbzip2 && \ mkdir -p /usr/src/pbzip2 && \
curl -ssL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \ curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
cd /usr/src/pbzip2 && \ cd /usr/src/pbzip2 && \
make && \ make && \
make install && \ make install && \
\ mkdir -p /usr/src/pixz && \
curl -sSL https://github.com/vasi/pixz/releases/download/v1.0.7/pixz-1.0.7.tar.xz | tar xvfJ - --strip 1 -C /usr/src/pixz && \
cd /usr/src/pixz && \
./configure \
--prefix=/usr \
--sysconfdir=/etc \
--localstatedir=/var \
&& \
make && \
make install && \
\
### Cleanup ### Cleanup
apk del .db-backup-build-deps && \ apk del .db-backup-build-deps && \
rm -rf /usr/src/* && \ rm -rf /usr/src/* && \

153
README.md
View File

@@ -1,23 +1,22 @@
# hub.docker.com/r/tiredofit/db-backup # hub.docker.com/r/tiredofit/db-backup
[![Build Status](https://img.shields.io/docker/build/tiredofit/db-backup.svg)](https://hub.docker.com/r/tiredofit/db-backup) [![Build Status](https://img.shields.io/docker/build/tiredofit/db-backup.svg)](https://hub.docker.com/r/tiredofit/db-backup)
[![Docker Pulls](https://img.shields.io/docker/pulls/tiredofit/db-backup.svg)](https://hub.docker.com/r/tiredofit/db-backup) [![Docker Pulls](https://img.shields.io/docker/pulls/tiredofit/db-backup.svg)](https://hub.docker.com/r/tiredofit/db-backup)
[![Docker Stars](https://img.shields.io/docker/stars/tiredofit/db-backup.svg)](https://hub.docker.com/r/tiredofit/db-backup) [![Docker Stars](https://img.shields.io/docker/stars/tiredofit/db-backup.svg)](https://hub.docker.com/r/tiredofit/db-backup)
[![Docker Layers](https://images.microbadger.com/badges/image/tiredofit/db-backup.svg)](https://microbadger.com/images/tiredofit/db-backup) [![Docker Layers](https://images.microbadger.com/badges/image/tiredofit/db-backup.svg)](https://microbadger.com/images/tiredofit/db-backup)
# Introduction ## Introduction
This will build a container for backing up multiple type of DB Servers This will build a container for backing up multiple type of DB Servers
Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis, Rethink servers. Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
* dump to local filesystem or backup to S3 Compatible services * dump to local filesystem or backup to S3 Compatible services
* select database user and password * select database user and password
* backup all databases * backup all databases
* choose to have an MD5 sum after backup for verification * choose to have an MD5 sum after backup for verification
* delete old backups after specific amount of time * delete old backups after specific amount of time
* choose compression type (none, gz, bz, xz) * choose compression type (none, gz, bz, xz, zstd)
* connect to any container running on the same system * connect to any container running on the same system
* select how often to run a dump * select how often to run a dump
* select when to start the first dump, whether time of day or relative to container start time * select when to start the first dump, whether time of day or relative to container start time
@@ -27,40 +26,50 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis, Rethink s
[Changelog](CHANGELOG.md) [Changelog](CHANGELOG.md)
# Authors ## Authors
- [Dave Conroy](https://github.com/tiredofit) - [Dave Conroy](https://github.com/tiredofit)
# Table of Contents ## Table of Contents
- [Introduction](#introduction) - [hub.docker.com/r/tiredofit/db-backup](#hubdockercomrtiredofitdb-backup)
- [Changelog](CHANGELOG.md) - [Introduction](#introduction)
- [Prerequisites](#prerequisites) - [Authors](#authors)
- [Installation](#installation) - [Table of Contents](#table-of-contents)
- [Quick Start](#quick-start) - [Prerequisites](#prerequisites)
- [Configuration](#configuration) - [Installation](#installation)
- [Data Volumes](#data-volumes) - [Quick Start](#quick-start)
- [Environment Variables](#environmentvariables) - [Configuration](#configuration)
- [Maintenance](#maintenance) - [Data-Volumes](#data-volumes)
- [Environment Variables](#environment-variables)
- [Maintenance](#maintenance)
- [Shell Access](#shell-access) - [Shell Access](#shell-access)
- [Custom Scripts](#custom-scripts) - [Custom Scripts](#custom-scripts)
- [Example Post Script](#example-post-script)
- [$1=EXIT_CODE (After running backup routine)](#1exit_code-after-running-backup-routine)
- [$2=DB_TYPE (Type of Backup)](#2db_type-type-of-backup)
- [$3=DB_HOST (Backup Host)](#3db_host-backup-host)
- [#4=DB_NAME (Name of Database backed up](#4db_name-name-of-database-backed-up)
- [$5=DATE (Date of Backup)](#5date-date-of-backup)
- [$6=TIME (Time of Backup)](#6time-time-of-backup)
- [$7=BACKUP_FILENAME (Filename of Backup)](#7backup_filename-filename-of-backup)
- [$8=FILESIZE (Filesize of backup)](#8filesize-filesize-of-backup)
- [$9=MD5_RESULT (MD5Sum if enabled)](#9md5_result-md5sum-if-enabled)
# Prerequisites ## Prerequisites
You must have a working DB server or container available for this to work properly, it does not provide server functionality! You must have a working DB server or container available for this to work properly, it does not provide server functionality!
## Installation
# Installation
Automated builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup) and is the recommended Automated builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup) and is the recommended
method of installation. method of installation.
```bash ```bash
docker pull tiredofit/db-backup:latest docker pull tiredofit/db-backup:latest
``` ```
# Quick Start ### Quick Start
* The quickest way to get started is using [docker-compose](https://docs.docker.com/compose/). See the examples folder for a working [docker-compose.yml](examples/docker-compose.yml) that can be modified for development or production use. * The quickest way to get started is using [docker-compose](https://docs.docker.com/compose/). See the examples folder for a working [docker-compose.yml](examples/docker-compose.yml) that can be modified for development or production use.
@@ -69,63 +78,65 @@ docker pull tiredofit/db-backup:latest
> **NOTE**: If you are using this with a docker-compose file along with a seperate SQL container, take care not to set the variables to backup immediately, more so have it delay execution for a minute, otherwise you will get a failed first backup. > **NOTE**: If you are using this with a docker-compose file along with a seperate SQL container, take care not to set the variables to backup immediately, more so have it delay execution for a minute, otherwise you will get a failed first backup.
# Configuration ## Configuration
## Data-Volumes ### Data-Volumes
The following directories are used for configuration and can be mapped for persistent storage. The following directories are used for configuration and can be mapped for persistent storage.
| Directory | Description | | Directory | Description |
|-----------|-------------| | ------------------------ | ---------------------------------------------------------------------------------- |
| `/backup` | Backups | | `/backup` | Backups |
| `/assets/custom-scripts` | *Optional* Put custom scripts in this directory to execute after backup operations | `/assets/custom-scripts` | *Optional* Put custom scripts in this directory to execute after backup operations |
### Environment Variables
## Environment Variables
Along with the Environment Variables from the [Base image](https://hub.docker.com/r/tiredofit/alpine), below is the complete list of available options that can be used to customize your installation. Along with the Environment Variables from the [Base image](https://hub.docker.com/r/tiredofit/alpine), below is the complete list of available options that can be used to customize your installation.
| Parameter | Description |
| ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi - Default `FILESYSTEM` |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` - Default `GZ` |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - Default `3` |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` |
| `DB_NAME` | Schema Name e.g. `database` |
| `DB_USER` | username for the database - use `root` to backup all MySQL of them. |
| `DB_PASS` | (optional if DB doesn't require it) password for the database |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided |
| `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. |
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats |
| | Absolute HHMM, e.g. `2330` or `0415` |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half |
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. "--extra-command" |
| `MD5` | Generate MD5 Sum in Directory, `TRUE` or `FALSE` - Default `TRUE` |
| `PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` - Default `TRUE` |
| `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | |
| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. - Default `FALSE` |
| Parameter | Description | When using compression with MongoDB, only `GZ` compression is possible.
|-----------|-------------|
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi - Default `FILESYSTEM`
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, or none `NONE` - Default `GZ`
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `rethink`
| `DB_HOST` | Server Hostname e.g. `mariadb`
| `DB_NAME` | Schema Name e.g. `database`
| `DB_USER` | username for the database - use `root` to backup all MySQL of them.
| `DB_PASS` | (optional if DB doesn't require it) password for the database
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided
| `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day.
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats
| | Absolute HHMM, e.g. `2330` or `0415`
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything.
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed.
| `MD5` | Generate MD5 Sum in Directory, `TRUE` or `FALSE` - Default `TRUE`
| `PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` - Default `TRUE` |
| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. - Default `FALSE` |
**Backing Up to S3 Compatible Services** **Backing Up to S3 Compatible Services**
If `BACKUP_LOCATION` = `S3` then the following options are used. If `BACKUP_LOCATION` = `S3` then the following options are used.
| Parameter | Description | | Parameter | Description |
|-----------|-------------| | --------------- | --------------------------------------------------------------------------------------- |
| `S3_BUCKET` | S3 Bucket name e.g. 'mybucket' | | `S3_BUCKET` | S3 Bucket name e.g. 'mybucket' |
| `S3_HOSTNAME` | Hostname of S3 Server e.g "s3.amazonaws.com" - You can also include a port if necessary | `S3_HOSTNAME` | Hostname of S3 Server e.g "s3.amazonaws.com" - You can also include a port if necessary |
| `S3_KEY_ID` | S3 Key ID | | `S3_KEY_ID` | S3 Key ID |
| `S3_KEY_SECRET` | S3 Key Secret | | `S3_KEY_SECRET` | S3 Key Secret |
| `S3_PATH` | S3 Pathname to save to e.g. '`backup`' | | `S3_PATH` | S3 Pathname to save to e.g. '`backup`' |
| `S3_PROTOCOL` | Use either `http` or `https` to access service - Default `https` | | `S3_PROTOCOL` | Use either `http` or `https` to access service - Default `https` |
| `S3_URI_STYLE` | Choose either `VIRTUALHOST` or `PATH` style - Default `VIRTUALHOST` | `S3_URI_STYLE` | Choose either `VIRTUALHOST` or `PATH` style - Default `VIRTUALHOST` |
## Maintenance ## Maintenance
Manual Backups can be performed by entering the container and typing `backup-now` Manual Backups can be performed by entering the container and typing `backup-now`
### Shell Access
#### Shell Access
For debugging and maintenance purposes you may want access the containers shell. For debugging and maintenance purposes you may want access the containers shell.
@@ -138,23 +149,25 @@ docker exec -it (whatever your container name is e.g.) db-backup bash
If you want to execute a custom script at the end of backup, you can drop bash scripts with the extension of `.sh` in this directory. See the following example to utilize: If you want to execute a custom script at the end of backup, you can drop bash scripts with the extension of `.sh` in this directory. See the following example to utilize:
````bash ````bash
$ cat post-script.sh $ cat post-script.sh
##!/bin/bash ##!/bin/bash
## Example Post Script ## Example Post Script
## $1=DB_TYPE (Type of Backup) ## $1=EXIT_CODE (After running backup routine)
## $2=DB_HOST (Backup Host) ## $2=DB_TYPE (Type of Backup)
## #3=DB_NAME (Name of Database backed up ## $3=DB_HOST (Backup Host)
## $4=DATE (Date of Backup) ## #4=DB_NAME (Name of Database backed up
## $5=TIME (Time of Backup) ## $5=DATE (Date of Backup)
## $6=BACKUP_FILENAME (Filename of Backup) ## $6=TIME (Time of Backup)
## $7=FILESIZE (Filesize of backup) ## $7=BACKUP_FILENAME (Filename of Backup)
## $8=MD5_RESULT (MD5Sum if enabled) ## $8=FILESIZE (Filesize of backup)
## $9=MD5_RESULT (MD5Sum if enabled)
echo "${1} Backup Completed on ${2} for ${3} on ${4} ${5}. Filename: ${6} Size: ${7} bytes MD5: ${8}" echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ${6}. Filename: ${7} Size: ${8} bytes MD5: ${9}"
```` ````
Outputs the following on the console: Outputs the following on the console:
`mysql Backup Completed on example-db for example on 2020-04-22 05:19:10. Filename: mysql_example_example-db_20200422-051910.sql.bz2 Size: 7795 bytes MD5: 952fbaafa30437494fdf3989a662cd40` `0 mysql Backup Completed on example-db for example on 2020-04-22 05:19:10. Filename: mysql_example_example-db_20200422-051910.sql.bz2 Size: 7795 bytes MD5: 952fbaafa30437494fdf3989a662cd40`
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes` If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`

View File

@@ -1,13 +1,14 @@
##!/bin/bash ##!/bin/bash
## Example Post Script ## Example Post Script
## $1=DB_TYPE (Type of Backup) ## $1=EXIT_CODE (After running backup routine)
## $2=DB_HOST (Backup Host) ## $2=DB_TYPE (Type of Backup)
## #3=DB_NAME (Name of Database backed up ## $3=DB_HOST (Backup Host)
## $4=DATE (Date of Backup) ## #4=DB_NAME (Name of Database backed up
## $5=TIME (Time of Backup) ## $5=DATE (Date of Backup)
## $6=BACKUP_FILENAME (Filename of Backup) ## $6=TIME (Time of Backup)
## $7=FILESIZE (Filesize of backup) ## $7=BACKUP_FILENAME (Filename of Backup)
## $8=MD5_RESULT (MD5Sum if enabled) ## $8=FILESIZE (Filesize of backup)
## $9=MD5_RESULT (MD5Sum if enabled)
echo "${1} Backup Completed on ${2} for ${3} on ${4} ${5}. Filename: ${6} Size: ${7} bytes MD5: ${8}" echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ${6}. Filename: ${7} Size: ${8} bytes MD5: ${9}"

View File

@@ -13,27 +13,76 @@ fi
### Sanity Test ### Sanity Test
sanity_var DB_TYPE "Database Type" sanity_var DB_TYPE "Database Type"
sanity_var DB_HOST "Database Host" sanity_var DB_HOST "Database Host"
file_env 'DB_USER'
file_env 'DB_PASS' ### Set the Database Type
dbtype=${DB_TYPE}
case "$dbtype" in
"couch" | "couchdb" | "COUCH" | "COUCHDB" )
dbtype=couch
dbport=${DB_PORT:-5984}
file_env 'DB_USER'
file_env 'DB_PASS'
;;
"influx" | "influxdb" | "INFLUX" | "INFLUXDB" )
dbtype=influx
dbport=${DB_PORT:-8088}
file_env 'DB_USER'
file_env 'DB_PASS'
;;
"mongo" | "mongodb" | "MONGO" | "MONGODB" )
dbtype=mongo
dbport=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER'
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
;;
"mysql" | "MYSQL" | "mariadb" | "MARIADB")
dbtype=mysql
dbport=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
;;
"mssql" | "MSSQL" | "microsoftsql" | "MICROSOFTSQL")
apkArch="$(apk --print-arch)"; \
case "$apkArch" in
x86_64) mssql=true ;;
*) print_error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;;
esac
dbtype=mssql
dbport=${DB_PORT:-1433}
;;
"postgres" | "postgresql" | "pgsql" | "POSTGRES" | "POSTGRESQL" | "PGSQL" )
dbtype=pgsql
dbport=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
;;
"redis" | "REDIS" )
dbtype=redis
dbport=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS'
;;
"sqlite" | "sqlite3" | "SQLITE" | "SQLITE3" )
dbtype=sqlite3
;;
esac
### Set Defaults ### Set Defaults
BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"} BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"}
COMPRESSION=${COMPRESSION:-GZ} COMPRESSION=${COMPRESSION:-GZ}
COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0} DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440} DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
DB_DUMP_TARGET=${DB_DUMP_TARGET:-/backup} DB_DUMP_TARGET=${DB_DUMP_TARGET:-/backup}
DBHOST=${DB_HOST} dbhost=${DB_HOST}
DBNAME=${DB_NAME} dbname=${DB_NAME}
DBPASS=${DB_PASS} dbpass=${DB_PASS}
DBTYPE=${DB_TYPE} dbuser=${DB_USER}
DBUSER=${DB_USER}
MD5=${MD5:-TRUE} MD5=${MD5:-TRUE}
PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-TRUE} PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-TRUE}
SIZE_VALUE=${SIZE_VALUE:-"bytes"} SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SPLIT_DB=${SPLIT_DB:-FALSE} SPLIT_DB=${SPLIT_DB:-FALSE}
TMPDIR=/tmp/backups tmpdir=/tmp/backups
if [ "BACKUP_TYPE" = "S3" ] || [ "BACKUP_TYPE" = "s3" ] || [ "BACKUP_TYPE" = "MINIO" ] || [ "BACKUP_TYPE" = "minio" ] ; then if [ "$BACKUP_TYPE" = "S3" ] || [ "$BACKUP_TYPE" = "s3" ] || [ "$BACKUP_TYPE" = "MINIO" ] || [ "$BACKUP_TYPE" = "minio" ] ; then
S3_PROTOCOL=${S3_PROTOCOL:-"https"} S3_PROTOCOL=${S3_PROTOCOL:-"https"}
sanity_var S3_HOST "S3 Host" sanity_var S3_HOST "S3 Host"
sanity_var S3_BUCKET "S3 Bucket" sanity_var S3_BUCKET "S3 Bucket"
@@ -43,7 +92,6 @@ if [ "BACKUP_TYPE" = "S3" ] || [ "BACKUP_TYPE" = "s3" ] || [ "BACKUP_TYPE" = "MI
sanity_var S3_PATH "S3 Path" sanity_var S3_PATH "S3 Path"
file_env 'S3_KEY_ID' file_env 'S3_KEY_ID'
file_env 'S3_KEY_SECRET' file_env 'S3_KEY_SECRET'
fi fi
if [ "$1" = "NOW" ]; then if [ "$1" = "NOW" ]; then
@@ -52,137 +100,138 @@ if [ "$1" = "NOW" ]; then
fi fi
### Set Compression Options ### Set Compression Options
if var_true $PARALLEL_COMPRESSION ; then if var_true "$PARALLEL_COMPRESSION" ; then
BZIP="pbzip2" bzip="pbzip2 -${COMPRESSION_LEVEL}"
GZIP="pigz" gzip="pigz -${COMPRESSION_LEVEL}"
XZIP="pixz" xzip="pixz -${COMPRESSION_LEVEL}"
zstd="zstd --rm -${COMPRESSION_LEVEL}"
else else
BZIP="bzip2" bzip="bzip2 -${COMPRESSION_LEVEL}"
GZIP="gzip" gzip="gzip -${COMPRESSION_LEVEL}"
XZIP="xz" xzip="xz -${COMPRESSION_LEVEL} "
zstd="zstd --rm -${COMPRESSION_LEVEL}"
fi fi
### Set the Database Authentication Details
### Set the Database Type case "$dbtype" in
case "$DBTYPE" in "mongo" )
"couch" | "couchdb" | "COUCH" | "COUCHDB" ) [[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${dbuser}"
DBTYPE=couch [[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${dbpass}"
DBPORT=${DB_PORT:-5984} [[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${dbname}"
;; ;;
"influx" | "influxdb" | "INFLUX" | "INFLUXDB" ) "mysql" )
DBTYPE=influx [[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${dbpass}
DBPORT=${DB_PORT:-8088} ;;
;; "postgres" )
"mongo" | "mongodb" | "MONGO" | "MONGODB" ) [[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${dbpass}"
DBTYPE=mongo ;;
DBPORT=${DB_PORT:-27017} "redis" )
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DBUSER}" [[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${dbpass}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DBPASS}" ;;
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DBNAME}" esac
;;
"mysql" | "MYSQL" | "mariadb" | "MARIADB")
DBTYPE=mysql
DBPORT=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DBPASS}
;;
"postgres" | "postgresql" | "pgsql" | "POSTGRES" | "POSTGRESQL" | "PGSQL" )
DBTYPE=pgsql
DBPORT=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DBPASS}"
;;
"redis" | "REDIS" )
DBTYPE=redis
DBPORT=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DBPASS}"
;;
"rethink" | "RETHINK" )
DBTYPE=rethink
DBPORT=${DB_PORT:-28015}
[[ ( -n "${DB_PASS}" ) ]] && echo $DB_PASS>/tmp/.rethink.auth; RETHINK_PASS_STR=" --password-file /tmp/.rethink.auth"
[[ ( -n "${DB_NAME}" ) ]] && RETHINK_DB_STR=" -e ${DBNAME}"
;;
esac
### Functions ### Functions
function backup_couch() { backup_couch() {
TARGET=couch_${DBNAME}_${DBHOST}_${now}.txt target=couch_${dbname}_${dbhost}_${now}.txt
curl -X GET http://${DBHOST}:${DBPORT}/${DBNAME}/_all_docs?include_docs=true >${TMPDIR}/${TARGET}
generate_md5
compression compression
curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${dumpoutput} | $dumpoutput > ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup move_backup
} }
function backup_mysql() { backup_influx() {
if var_true $SPLIT_DB ; then if [ "${COMPRESSION}" = "NONE" ] || [ "${COMPRESSION}" = "none" ] || [ "${COMPRESSION}" = "FALSE" ] || [ "${COMPRESSION}" = "false" ] ; then
DATABASES=`mysql -h ${DBHOST} -P $DBPORT -u$DBUSER --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema` :
for db in $DATABASES; do
if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then
echo "** [db-backup] Dumping database: $db"
TARGET=mysql_${db}_${DBHOST}_${now}.sql
mysqldump --max-allowed-packet=512M -h $DBHOST -P $DBPORT -u$DBUSER --databases $db > ${TMPDIR}/${TARGET}
generate_md5
compression
move_backup
fi
done
else else
mysqldump --max-allowed-packet=512M -A -h $DBHOST -P $DBPORT -u$DBUSER > ${TMPDIR}/${TARGET} print_notice "Compressing InfluxDB backup with gzip"
generate_md5 influx_compression="-portable"
compression
move_backup
fi fi
}
function backup_influx() {
for DB in $DB_NAME; do for DB in $DB_NAME; do
influxd backup -database $DB -host ${DBHOST}:${DBPORT} ${TMPDIR}/${TARGET} target=influx_${DB}_${dbhost}_${now}
influxd backup ${influx_compression} -database $DB -host ${dbhost}:${dbport} ${tmpdir}/${target}
exit_code=$?
generate_md5 generate_md5
compression
move_backup move_backup
done done
} }
function backup_mongo() { backup_mongo() {
mongodump --out ${TMPDIR}/${TARGET} --host ${DBHOST} --port ${DBPORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_DB_STR} ${EXTRA_OPTS} if [ "${COMPRESSION}" = "NONE" ] || [ "${COMPRESSION}" = "none" ] || [ "${COMPRESSION}" = "FALSE" ] || [ "${COMPRESSION}" = "false" ] ; then
cd ${TMPDIR} target=${dbtype}_${dbname}_${dbhost}_${now}.archive
tar cf ${TARGET}.tar ${TARGET}/* else
TARGET=${TARGET}.tar print_notice "Compressing MongoDB backup with gzip"
target=${dbtype}_${dbname}_${dbhost}_${now}.archivegz
mongo_compression="--gzip"
fi
mongodump --archive=${tmpdir}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
exit_code=$?
cd ${tmpdir}
generate_md5 generate_md5
compression
move_backup move_backup
} }
function backup_pgsql() { backup_mssql() {
target=mssql_${dbname}_${dbhost}_${now}.bak
/opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${tmpdir}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
}
backup_mysql() {
if var_true "$SPLIT_DB" ; then
DATABASES=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema)
for db in $DATABASES; do
if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then
print_notice "Dumping MariaDB database: $db"
target=mysql_${db}_${dbhost}_${now}.sql
compression
mysqldump --max-allowed-packet=512M -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} --databases $db | $dumpoutput > ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup
fi
done
else
compression
mysqldump --max-allowed-packet=512M -A -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup
fi
}
backup_pgsql() {
if var_true $SPLIT_DB ; then if var_true $SPLIT_DB ; then
export PGPASSWORD=${DBPASS} export PGPASSWORD=${dbpass}
DATABASES=`psql -h $DBHOST -U $DBUSER -p ${DBPORT} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ` DATABASES=$(psql -h $dbhost -U $dbuser -p ${dbport} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
for db in $DATABASES; do for db in $DATABASES; do
print_info "Dumping database: $db" print_info "Dumping database: $db"
TARGET=pgsql_${db}_${DBHOST}_${now}.sql target=pgsql_${db}_${dbhost}_${now}.sql
pg_dump -h ${DBHOST} -p ${DBPORT} -U ${DBUSER} $db > ${TMPDIR}/${TARGET}
generate_md5
compression compression
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup move_backup
done done
else else
export PGPASSWORD=${DBPASS} export PGPASSWORD=${dbpass}
pg_dump -h ${DBHOST} -U ${DBUSER} -p ${DBPORT} ${DBNAME} > ${TMPDIR}/${TARGET}
generate_md5
compression compression
pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup move_backup
fi fi
} }
function backup_redis() { backup_redis() {
TARGET=redis_${db}_${DBHOST}_${now}.rdb target=redis_${db}_${dbhost}_${now}.rdb
echo bgsave | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} --rdb ${TMPDIR}/${TARGET} echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${tmpdir}/${target} ${EXTRA_OPTS}
print_info "Dumping Redis - Flushing Redis Cache First" print_info "Dumping Redis - Flushing Redis Cache First"
sleep 10 sleep 10
try=5 try=5
while [ $try -gt 0 ] ; do while [ $try -gt 0 ] ; do
saved=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}') saved=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
ok=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}') ok=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
print_info "Redis Backup Complete" print_info "Redis Backup Complete"
fi fi
@@ -195,116 +244,150 @@ function backup_redis() {
move_backup move_backup
} }
function backup_rethink() { backup_sqlite3() {
TARGET=rethink_${db}_${DBHOST}_${now}.tar.gz db=$(basename "$dbhost")
print_info "Dumping rethink Database: $db" db="${db%.*}"
rethinkdb dump -f ${TMPDIR}/${TARGET} -c ${DBHOST}:${DBPORT} ${RETHINK_PASS_STR} ${RETHINK_DB_STR} target=sqlite3_${db}_${now}.sqlite3
compression
print_info "Dumping sqlite3 database: ${dbhost}"
sqlite3 "${dbhost}" ".backup '${tmpdir}/backup.sqlite3'"
exit_code=$?
cat "${tmpdir}/backup.sqlite3" | $dumpoutput > "${tmpdir}/${target}"
generate_md5
move_backup move_backup
} }
function check_availability() { check_availability() {
### Set the Database Type ### Set the Database Type
case "$DBTYPE" in case "$dbtype" in
"couch" ) "couch" )
COUNTER=0 COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5 sleep 5
let COUNTER+=5 (( COUNTER+=5 ))
print_warn "CouchDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "CouchDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done done
;; ;;
"influx" ) "influx" )
COUNTER=0 COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5 sleep 5
let COUNTER+=5 (( COUNTER+=5 ))
print_warn "InfluxDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "InfluxDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done done
;; ;;
"mongo" ) "mongo" )
COUNTER=0 COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5 sleep 5
let COUNTER+=5 (( COUNTER+=5 ))
print_warn "Mongo Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "Mongo Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done done
;; ;;
"mysql" ) "mysql" )
COUNTER=0 COUNTER=0
while true; do while true; do
mysqlcmd='mysql -u'${DBUSER}' -P '${DBPORT}' -h '${DBHOST}' -p'${DBPASS} mysqlcmd='mysql -u'${dbuser}' -P '${dbport}' -h '${dbhost}' -p'${dbpass}
out="`$mysqlcmd -e "SELECT COUNT(*) FROM information_schema.FILES;" 2>&1`" out="$($mysqlcmd -e "SELECT COUNT(*) FROM information_schema.FILES;" 2>&1)"
echo "$out" | grep -E "COUNT|Enter" 2>&1 > /dev/null echo "$out" | grep -E "COUNT|Enter" 2>&1 > /dev/null
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
: :
break break
fi fi
print_warn "MySQL/MariaDB Server "$DBHOST" is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "MySQL/MariaDB Server '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
sleep 5 sleep 5
let COUNTER+=5 (( COUNTER+=5 ))
done
;;
"mssql" )
COUNTER=0
while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5
(( COUNTER+=5 ))
print_warn "MSSQL Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done done
;; ;;
"pgsql" ) "pgsql" )
# Wait until mongo logs that it's ready (or timeout after 60s)
COUNTER=0 COUNTER=0
export PGPASSWORD=${DBPASS} export PGPASSWORD=${dbpass}
until pg_isready --dbname=${DBNAME} --host=${DBHOST} --port=${DBPORT} --username=${DBUSER} -q until pg_isready --dbname=${dbname} --host=${dbhost} --port=${dbport} --username=${dbuser} -q
do do
sleep 5 sleep 5
let COUNTER+=5 (( COUNTER+=5 ))
print_warn "Postgres Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "Postgres Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done done
;; ;;
"redis" ) "redis" )
COUNTER=0 COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do while ! (nc -z "${dbhost}" "${dbport}") ; do
sleep 5 sleep 5
let COUNTER+=5 (( COUNTER+=5 ))
print_warn "Redis Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "Redis Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done done
;; ;;
"rethink" ) "sqlite3" )
COUNTER=0 if [[ ! -e "${dbhost}" ]]; then
while ! (nc -z ${DBHOST} ${DBPORT}) ; do print_error "File '${dbhost}' does not exist."
sleep 5 exit_code=2
let COUNTER+=5 exit $exit_code
print_warn "RethinkDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)" elif [[ ! -f "${dbhost}" ]]; then
done print_error "File '${dbhost}' is not a file."
;; exit_code=2
esac exit $exit_code
} elif [[ ! -r "${dbhost}" ]]; then
print_error "File '${dbhost}' is not readable."
function compression() { exit_code=2
case "$COMPRESSION" in exit $exit_code
"GZ" | "gz" | "gzip" | "GZIP") fi
$GZIP ${TMPDIR}/${TARGET}
TARGET=${TARGET}.gz
;;
"BZ" | "bz" | "bzip2" | "BZIP2" | "bzip" | "BZIP" | "bz2" | "BZ2")
$BZIP ${TMPDIR}/${TARGET}
TARGET=${TARGET}.bz2
;;
"XZ" | "xz" | "XZIP" | "xzip" )
$XZIP ${TMPDIR}/${TARGET}
TARGET=${TARGET}.xz
;;
"NONE" | "none" | "FALSE" | "false")
;; ;;
esac esac
} }
function generate_md5() { compression() {
if var_true $MD5 ; then case "$COMPRESSION" in
cd $TMPDIR "GZ" | "gz" | "gzip" | "GZIP")
md5sum ${TARGET} > ${TARGET}.md5 print_notice "Compressing backup with gzip"
MD5VALUE=$(md5sum ${TARGET} | awk '{ print $1}') target=${target}.gz
dumpoutput="$gzip "
;;
"BZ" | "bz" | "bzip2" | "BZIP2" | "bzip" | "BZIP" | "bz2" | "BZ2")
print_notice "Compressing backup with bzip2"
target=${target}.bz2
dumpoutput="$bzip "
;;
"XZ" | "xz" | "XZIP" | "xzip" )
print_notice "Compressing backup with xzip"
target=${target}.xz
dumpoutput="$xzip "
;;
"ZSTD" | "zstd" | "ZST" | "zst" )
print_notice "Compressing backup with zstd"
target=${target}.zst
dumpoutput="$zstd "
;;
"NONE" | "none" | "FALSE" | "false")
dumpoutput="cat "
;;
esac
}
generate_md5() {
if var_true "$MD5" ; then
print_notice "Generating MD5 for ${target}"
cd $tmpdir
md5sum "${target}" > "${target}".md5
MD5VALUE=$(md5sum "${target}" | awk '{ print $1}')
fi fi
} }
function move_backup() { move_backup() {
case "$SIZE_VALUE" in case "$SIZE_VALUE" in
"b" | "bytes" ) "b" | "bytes" )
SIZE_VALUE=1 SIZE_VALUE=1
;; ;;
"[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" ) "[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" )
SIZE_VALUE="-h" SIZE_VALUE="-h"
@@ -314,31 +397,33 @@ function move_backup() {
;; ;;
esac esac
if [ "$SIZE_VALUE" = "1" ] ; then if [ "$SIZE_VALUE" = "1" ] ; then
FILESIZE=$(stat -c%s "${DB_DUMP_TARGET}/${TARGET}") FILESIZE=$(stat -c%s "${tmpdir}/${target}")
print_notice "Backup of ${target} created with the size of ${FILESIZE} bytes"
else else
FILESIZE=$(du -h "${DB_DUMP_TARGET}/${TARGET}" | awk '{ print $1}') FILESIZE=$(du -h "${tmpdir}/${target}" | awk '{ print $1}')
print_notice "Backup of ${target} created with the size of ${FILESIZE}"
fi fi
case "${BACKUP_LOCATION}" in case "${BACKUP_LOCATION}" in
"FILE" | "file" | "filesystem" | "FILESYSTEM" ) "FILE" | "file" | "filesystem" | "FILESYSTEM" )
mkdir -p ${DB_DUMP_TARGET} mkdir -p "${DB_DUMP_TARGET}"
mv ${TMPDIR}/*.md5 ${DB_DUMP_TARGET}/ mv ${tmpdir}/*.md5 "${DB_DUMP_TARGET}"/
mv ${TMPDIR}/${TARGET} ${DB_DUMP_TARGET}/${TARGET} mv ${tmpdir}/"${target}" "${DB_DUMP_TARGET}"/"${target}"
;; ;;
"S3" | "s3" | "MINIO" | "minio" ) "S3" | "s3" | "MINIO" | "minio" )
s3_content_type="application/octet-stream" s3_content_type="application/octet-stream"
if [ "$S3_URI_STYLE" = "VIRTUALHOST" ] || [ "$S3_URI_STYLE" = "VHOST" ] [ "$S3_URI_STYLE" = "virtualhost" ] [ "$S3_URI_STYLE" = "vhost" ] ; then if [ "$S3_URI_STYLE" = "VIRTUALHOST" ] || [ "$S3_URI_STYLE" = "VHOST" ] || [ "$S3_URI_STYLE" = "virtualhost" ] || [ "$S3_URI_STYLE" = "vhost" ] ; then
s3_url="${S3_BUCKET}.${S3_HOST}" s3_url="${S3_BUCKET}.${S3_HOST}"
else else
s3_url="${S3_HOST}/${S3_BUCKET}" s3_url="${S3_HOST}/${S3_BUCKET}"
fi fi
if var_true $MD5 ; then if var_true "$MD5" ; then
s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")" s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")"
s3_md5="$(libressl md5 -binary < "${TMPDIR}/${TARGET}.md5" | base64)" s3_md5="$(libressl md5 -binary < "${tmpdir}/${target}.md5" | base64)"
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${TARGET}.md5" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)" sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${target}.md5" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
print_debug "Uploading ${TARGET}.md5 to S3" print_debug "Uploading ${target}.md5 to S3"
curl -T "${TMPDIR}/${TARGET}.md5" ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${TARGET}.md5 \ curl -T "${tmpdir}/${target}.md5" "${S3_PROTOCOL}"://"${s3_url}"/"${S3_PATH}"/"${target}".md5 \
-H "Date: $date" \ -H "Date: $date" \
-H "Authorization: AWS ${S3_KEY_ID}:$sig" \ -H "Authorization: AWS ${S3_KEY_ID}:$sig" \
-H "Content-Type: ${s3_content_type}" \ -H "Content-Type: ${s3_content_type}" \
@@ -346,106 +431,120 @@ function move_backup() {
fi fi
s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")" s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")"
s3_md5="$(libressl md5 -binary < "${TMPDIR}/${TARGET}" | base64)" s3_md5="$(libressl md5 -binary < "${tmpdir}/${target}" | base64)"
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${TARGET}" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)" sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${target}" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
print_debug "Uploading ${TARGET} to S3" print_debug "Uploading ${target} to S3"
curl -T ${TMPDIR}/${TARGET} ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${TARGET} \ curl -T ${tmpdir}/"${target}" "${S3_PROTOCOL}"://"${s3_url}"/"${S3_PATH}"/"${target}" \
-H "Date: $s3_date" \ -H "Date: $s3_date" \
-H "Authorization: AWS ${S3_KEY_ID}:$sig" \ -H "Authorization: AWS ${S3_KEY_ID}:$sig" \
-H "Content-Type: ${s3_content_type}" \ -H "Content-Type: ${s3_content_type}" \
-H "Content-MD5: ${s3_md5}" -H "Content-MD5: ${s3_md5}"
rm -rf ${TMPDIR}/*.md5 rm -rf ${tmpdir}/*.md5
rm -rf ${TMPDIR}/${TARGET} rm -rf ${tmpdir}/"${target}"
;; ;;
esac esac
} }
### Container Startup ### Container Startup
print_info "Initialized on `date`" print_debug "Backup routines Initialized on $(date)"
### Wait for Next time to start backup ### Wait for Next time to start backup
current_time=$(date +"%s") if [ "$1" != "NOW" ]; then
today=$(date +"%Y%m%d") current_time=$(date +"%s")
today=$(date +"%Y%m%d")
if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then
waittime=$(( ${BASH_REMATCH[1]} * 60 )) waittime=$(( ${BASH_REMATCH[1]} * 60 ))
else else
target_time=$(date --date="${today}${DB_DUMP_BEGIN}" +"%s") target_time=$(date --date="${today}${DB_DUMP_BEGIN}" +"%s")
if [[ "$target_time" < "$current_time" ]]; then if [[ "$target_time" < "$current_time" ]]; then
target_time=$(($target_time + 24*60*60)) target_time=$(($target_time + 24*60*60))
fi
waittime=$(($target_time - $current_time))
fi fi
waittime=$(($target_time - $current_time))
fi
sleep $waittime print_notice "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")"
sleep $waittime
fi
### Commence Backup ### Commence Backup
while true; do while true; do
# make sure the directory exists # make sure the directory exists
mkdir -p $TMPDIR mkdir -p $tmpdir
### Define Target name ### Define Target name
now=$(date +"%Y%m%d-%H%M%S") now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S") now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d") now_date=$(date +"%Y-%m-%d")
TARGET=${DBTYPE}_${DBNAME}_${DBHOST}_${now}.sql target=${dbtype}_${dbname}_${dbhost}_${now}.sql
### Take a Dump ### Take a Dump
case "$DBTYPE" in case "$dbtype" in
"couch" ) "couch" )
check_availability check_availability
backup_couch backup_couch
;; ;;
"influx" ) "influx" )
check_availability check_availability
backup_influx backup_influx
;;
"mssql" )
check_availability
backup_mssql
;; ;;
"mysql" ) "mysql" )
check_availability check_availability
backup_mysql backup_mysql
;; ;;
"mongo" ) "mongo" )
check_availability check_availability
backup_mongo backup_mongo
;; ;;
"pgsql" ) "pgsql" )
check_availability check_availability
backup_pgsql backup_pgsql
;; ;;
"redis" ) "redis" )
check_availability check_availability
backup_redis backup_redis
;; ;;
"rethink" ) "sqlite3" )
check_availability check_availability
backup_rethink backup_sqlite3
;; ;;
esac esac
### Zabbix ### Zabbix
if var_true $ENABLE_ZABBIX ; then if var_true "$ENABLE_ZABBIX" ; then
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o `stat -c%s ${DB_DUMP_TARGET}/${TARGET}` print_notice "Sending Backup Statistics to Zabbix"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o `date -r ${DB_DUMP_TARGET}/${TARGET} +'%s'` silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}")"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')"
fi fi
### Automatic Cleanup ### Automatic Cleanup
if [[ -n "$DB_CLEANUP_TIME" ]]; then if [[ -n "$DB_CLEANUP_TIME" ]]; then
find $DB_DUMP_TARGET/ -mmin +$DB_CLEANUP_TIME -iname "*" -exec rm {} \; print_notice "Cleaning up old backups"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
fi
if [ -n "$POST_SCRIPT" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing"
eval "${POST_SCRIPT}"
fi fi
### Post Backup Custom Script Support ### Post Backup Custom Script Support
if [ -d /assets/custom-scripts/ ] ; then if [ -d /assets/custom-scripts/ ] ; then
print_info "Found Custom Scripts to Execute" print_notice "Found Custom Filesystem Scripts to Execute"
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
print_info "Running Script ${f}" print_notice "Running Script ${f}"
## script DB_TYPE DB_HOST DB_NAME DATE BACKUP_FILENAME FILESIZE MD5_VALUE ## script EXIT_CODE DB_TYPE DB_HOST DB_NAME DATE BACKUP_FILENAME FILESIZE MD5_VALUE
chmod +x ${f} chmod +x "${f}"
${f} "${DBTYPE}" "${DBHOST}" "${DBNAME}" "${now_date}" "${now_time}" "${TARGET}" "${FILESIZE}" "${MD5VALUE}" ${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${now_date}" "${now_time}" "${target}" "${FILESIZE}" "${MD5VALUE}"
done done
fi fi
### Go back to Sleep until next Backup time ### Go back to Sleep until next Backup time
if var_true $MANUAL ; then if var_true $MANUAL ; then

View File

@@ -1,4 +1,4 @@
#!/usr/bin/with-contenv bash #!/usr/bin/with-contenv bash
echo '** Performing Manual Backup' echo '** Performing Manual Backup'
/etc/s6/services/10-db-backup/run NOW /etc/services.available/10-db-backup/run NOW