commit f1d875bceb84907cae3b2966b0f127dc62c5c315 Author: Dave Conroy Date: Wed Sep 13 18:29:01 2017 -0700 1.0 - Initial Relase diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..46eb37f --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,3 @@ +## 1.0 - 2017-09-14 - dave at tiredofit dot ca +* Initial Release + diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..c76bcab --- /dev/null +++ b/Dockerfile @@ -0,0 +1,25 @@ +FROM tiredofit/alpine:edge +LABEL maintainer="Dave Conroy (dave at tiredofit dot ca)" + +### Set Environment Variables + ENV ENABLE_SMTP=FALSE + +### Dependencies + RUN echo "@testing http://nl.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \ + apk update && \ + apk add \ + bzip2 \ + influxdb@testing \ + mongodb-tools \ + mysql-client \ + postgresql-client \ + redis \ + xz && \ + rm -rf /var/cache/apk/* + +### S6 Setup + ADD install/s6 /etc/s6 + +### Entrypoint Configuration + ENTRYPOINT ["/init"] + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..8084a12 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Dave Conroy + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..d00887e --- /dev/null +++ b/README.md @@ -0,0 +1,108 @@ +# tiredofit/db-backup + +# Introduction + +This will build a container for backing up multiple type of DB Servers + +Currently backs up InfluxDB, MySQL, MongoDB Postgres, Redis, Rethink servers. + +* dump to local filesystem +* select database user and password +* backup all databases +* choose to have an MD5 sum after backup for verification +* delete old backups after specific amount of time +* choose compression type (none, gz, bz, xz) +* connect to any container running on the same system +* select how often to run a dump +* select when to start the first dump, whether time of day or relative to container start time + +This Container uses Alpine:Edge as a base. + + +[Changelog](CHANGELOG.md) + +# Authors + +- [Dave Conroy](https://github.com/tiredofit) + +# Table of Contents + +- [Introduction](#introduction) + - [Changelog](CHANGELOG.md) +- [Prerequisites](#prerequisites) +- [Installation](#installation) +- [Quick Start](#quick-start) +- [Configuration](#configuration) + - [Data Volumes](#data-volumes) + - [Environment Variables](#environmentvariables) +- [Maintenance](#maintenance) + - [Shell Access](#shell-access) + +# Prerequisites + +You must have a working DB server or container available for this to work properly, it does not provide server functionality! + + +# Installation + +Automated builds of the image are available on [Docker Hub](https://hub.docker.com/tiredofit/db-backup) and is the recommended method of installation. + + +```bash +docker pull tiredofit/db-backup +``` + +# Quick Start + +* The quickest way to get started is using [docker-compose](https://docs.docker.com/compose/). See the examples folder for a working [docker-compose.yml](examples/docker-compose.yml) that can be modified for development or production use. + +* Set various [environment variables](#environment-variables) to understand the capabiltiies of this image. +* Map [persistent storage](#data-volumes) for access to configuration and data files for backup. + +> **NOTE**: If you are using this with a docker-compose file along with a seperate SQL container, take care not to set the variables to backup immediately, more so have it delay execution for a minute, otherwise you will get a failed first backup. + +# Configuration + +## Data-Volumes + +The following directories are used for configuration and can be mapped for persistent storage. + +| Directory | Description | +|-----------|-------------| +| `/backup` | SQL Backups | + + +## Environment Variables + +Along with the Environment Variables from the [Base image](https://hub.docker.com/r/tiredofit/alpine), below is the complete list of available options that can be used to customize your installation. + + +| Parameter | Description | +|-----------|-------------| +| `DB_TYPE` | Type of DB Server to backup `influx` `mysql` `pgsql` `mongo` `redis` `rethink` +| `DB_SERVER` | Server Hostname e.g. `mariadb` +| `DB_NAME` | Schema Name e.g. `database` +| `DB_USER` | username for the database - use `root` to backup all MySQL of them. +| `DB_PASS` | (optional if DB doesn't require it) password for the database +| `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. +| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats +| | Absolute HHMM, e.g. `2330` or `0415` +| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half +| `DB_DUMP_DEBUG` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. +| `DB_DUMP_TARGET` | Where to put the dump file, should be a directory. Supports three formats | +| | Local If the value of `DB_DUMP_TARGET` starts with a `/` character, will dump to a local path, which should be volume-mounted. +| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. +| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, or none `NONE` - (Default `GZ` +| `MD5` | Generate MD5 Sum in Directory, `TRUE` or `FALSE` - Default `TRUE` +| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. - Default `FALSE` + + +## Maintenance +#### Shell Access + +For debugging and maintenance purposes you may want access the containers shell. + +```bash +docker exec -it (whatever your container name is e.g.) db-backup bash +``` + diff --git a/examples/docker-compose.yml b/examples/docker-compose.yml new file mode 100644 index 0000000..1249362 --- /dev/null +++ b/examples/docker-compose.yml @@ -0,0 +1,38 @@ +version: '2' + +services: + example-db: + container_name: example-db + image: mariadb:latest + volumes: + - ./db:/var/lib/mysql + environment: + - MYSQL_ROOT_PASSWORD=examplerootpassword + - MYSQL_DATABASE=example + - MYSQL_USER=example + - MYSQL_PASSWORD=examplepassword + restart: always + + example-db-backup: + container_name: example-db-backup + image: tiredofit/db-backup + links: + - example-db + volumes: + - ./backups:/backups + environment: + - DB_TYPE=mariadb + - DB_HOST=example-db + - DB_NAME=example + - DB_USER=example + - DB_PASSWORD="examplepassword" + - DB_DUMP_FREQ=1440 + - DB_DUMP_BEGIN=0000 + - DB_CLEANUP_TIME=8640 + - MD5=TRUE + - COMPRESSION=XZ + - SPLIT_DB=FALSE + + restart: always + + diff --git a/install/s6/services/10-db-backup/run b/install/s6/services/10-db-backup/run new file mode 100755 index 0000000..46c6bda --- /dev/null +++ b/install/s6/services/10-db-backup/run @@ -0,0 +1,256 @@ +#!/usr/bin/with-contenv bash + +date >/dev/null + +### Set Debug Mode + sleep 10 + if [ "$DEBUG_MODE" = "TRUE" ] || [ "$DEBUG_MODE" = "true" ]; then + set -x + fi + +### Sanity Test +if [ !-n "DB_TYPE"]; then + echo '** [db-backup] ERROR: No Database Type Selected! ' + exit 1 +fi + +if [ !-n "DB_HOST"]; then + echo '** [db-backup] ERROR: No Database Host Entered! ' + exit 1 +fi + + +### Set Defaults +COMPRESSION=${COMPRESSION:-GZ} +DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440} +DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0} +DB_DUMP_TARGET=${DB_DUMP_TARGET:-/backups} +DBHOST=${DB_HOST} +DBNAME=${DB_NAME} +DBPASS=${DB_PASS} +DBUSER=${DB_USER} +DBTYPE=${DB_TYPE} +MD5=${MD5:-TRUE} +SPLIT_DB=${SPLIT_DB:-FALSE} +TMPDIR=/tmp/backups + + +### Set the Database Type + case "DBTYPE" in + "influx" | "influxdb" | "INFLUX" | "INFLUXDB" ) + DBTYPE=influx + DBPORT=${DB_PORT:-8088} + ;; + "mongo" | "mongodb" | "MONGO" | "MONGODB" ) + DBTYPE=mongo + DBPORT=${DB_PORT:-27017} + [[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DBUSER}" + [[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DBPASS}" + [[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DBNAME}" + ;; + "mysql" | "MYSQL" | "mariadb" | "MARIADB") + DBTYPE=mysql + DBPORT=${DB_PORT:-3306} + [[ ( -n "${DB_PASS}" ) ]] && MYSQL_PASS_STR=" -p${DBPASS}" + ;; + "postgres" | "postgresql" | "pgsql" | "POSTGRES" | "POSTGRESQL" | "PGSQL" ) + DBTYPE=pgsql + DBPORT=${DB_PORT:-5432} + [[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DBPASS} " + ;; + "redis" | "REDIS" ) + DBTYPE=redis + DBPORT=${DB_PORT:-6379} + ;; + "rethink" | "RETHINK" ) + DBTYPE=rethink + DBPORT=${DB_PORT:-28015} + [[ ( -n "${DB_PASS}" ) ]] && echo $DB_PASS>/tmp/.rethink.auth; RETHINK_PASS_STR=" --password-file /tmp/.rethink.auth" + [[ ( -n "${DB_NAME}" ) ]] && RETHINK_DB_STR=" -e ${DBNAME}" + ;; + esac + +### Functions +function backup_mysql() { + if [ "$SPLIT_DB" = "TRUE" ] || [ "$SPLIT_DB" = "true" ]; then + DATABASES=`mysql -h $DBHOST -u$DBUSER -p$DBPASS --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema` + + for db in $DATABASES; do + if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then + echo "** [db-backup] Dumping database: $db" + TARGET=mysql_${db}_${DBHOST}_${now}.sql + mysqldump --max-allowed-packet=512M -h $DBHOST -u$DBUSER ${MYSQL_PASS_STR} --databases $db > ${TMPDIR}/${TARGET} + generate_md5 + compression + move_backup + fi + done + else + mysqldump --max-allowed-packet=512M -A -h $DBHOST -u$DBUSER ${MYSQL_PASS_STR} > ${TMPDIR}/${TARGET} + generate_md5 + compression + move_backup + fi +} + +function backup_influx() { + for DB in $DB_NAME; do + influxd backup -database $DB -host {DBHOST} ${TMPDIR}/${TARGET} + generate_md5 + compression + move_backup + done +} + +function backup_mongo() { + mongodump --out ${TMPDIR}/${TARGET} --host ${DBHOST} --port ${DBPORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_DB_STR} ${EXTRA_OPTS} + cd ${TMPDIR} + tar cf ${TARGET}.tar ${TARGET}/* + TARGET=${TARGET}.tar + generate_md5 + compression + move_backup +} + +function backup_pgsql() { + if [ "$SPLIT_DB" = "TRUE" ] || [ "$SPLIT_DB" = "true" ]; then + DATABASES=$( ${POSTGRES_PASS_STR} psql -h $DBHOST -U $DBUSER -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ) + for db in $DATABASES; do + echo "** [db-backup] Dumping database: $db" + TARGET=pgsql_${db}_${DBHOST}_${now}.sql + ${POSTGRES_PASS_STR} pg_dump -h ${DBHOST} -p ${DBPORT}-U ${DBUSER} $db > ${TMPDIR}/${TARGET} + generate_md5 + compression + move_backup + done + else + ${POSTGRES_PASS_STR} pg_dump -h ${DBHOST} -U ${DBUSER} $db > ${TMPDIR}/${TARGET} + generate_md5 + compression + move_backup + fi +} + +function backup_redis() { + TARGET=redis_${db}_${DBHOST}_${now}.rdb + echo bgsave | redis-cli -h ${DBHOST} -p ${DBPORT} --rdb ${TMPDIR]/${TARGET} + echo "** [db-backup] Dumping Redis - Flushing Redis Cache First" + sleep 10 + try=5 + while [ $try -gt 0 ] ; do + saved=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} | awk '/rdb_bgsave_in_progress:0/{print "saved"}') + ok=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} | awk '/rdb_last_bgsave_status:ok/{print "ok"}') + if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]] ; then + generate_md5 + compression + move_backup + fi + try=$((try - 1)) + echo "** [db-backup] Redis Busy - Waiting and retrying in 5 seconds" + sleep 5 + done +} + +function backup_rethink() { + TARGET=rethink_${db}_${DBHOST}_${now}.tar.gz + echo "** [db-backup] Dumping rethink Database: $db" + rethinkdb dump -f ${TMPDIR}/${TARGET} -c ${DBHOST}:${DBPORT} ${RETHINK_PASS_STR} ${RETHINK_DB_STR} + move_backup +} + +function compression() { + case "$COMPRESSION" in + "GZ" | "gz" | "gzip" | "GZIP") + gzip ${TMPDIR}/${TARGET} + TARGET=${TARGET}.gz + ;; + "BZ" | "bz" | "bzip2" | "BZIP2" | "bzip" | "BZIP" | "bz2" | "BZ2") + bzip2 ${TMPDIR}/${TARGET} + TARGET=${TARGET}.bz2 + ;; + "XZ" | "xz" | "XZIP" | "xzip" ) + xz ${TMPDIR}/${TARGET} + TARGET=${TARGET}.xz + ;; + "NONE" | "none" | "FALSE" | "false") + ;; + esac +} + +function generate_md5() { +if [ "$MD5" = "TRUE" ] || [ "$MD5" = "true" ] ; then + cd $TMPDIR + md5sum ${TARGET} > ${TARGET}.md5 +fi +} + +function move_backup() { + mkdir -p ${DB_DUMP_TARGET} + mv ${TMPDIR}/*.md5 ${DB_DUMP_TARGET}/ + mv ${TMPDIR}/${TARGET} ${DB_DUMP_TARGET}/${TARGET} +} + + +### Container Startup +echo '** [db-backup] Initialized at at '$(date) + +### Wait for Next time to start backup + current_time=$(date +"%s") + today=$(date +"%Y%m%d") + + + if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then + waittime=$(( ${BASH_REMATCH[1]} * 60 )) + else + target_time=$(date --date="${today}${DB_DUMP_BEGIN}" +"%s") + if [[ "$target_time" < "$current_time" ]]; then + target_time=$(($target_time + 24*60*60)) + fi + waittime=$(($target_time - $current_time)) + fi + + sleep $waittime + + +### Commence Backup + while true; do + # make sure the directory exists + mkdir -p $TMPDIR + + Define Target name + now=$(date +"%Y%m%d-%H%M%S") + TARGET=${DBTYPE}_${DBNAME}_${DBHOST}_${now}.sql + +### Take a Dump + case "DBTYPE" in + "influx" ) + function_backup_influx + ;; + "mysql" ) + function_backup_mysql + ;; + "mongo" ) + function_backup_mongo + ;; + "pgsql" ) + function_backup_pgsql + ;; + "redis" ) + function_backup_redis + ;; + "rethink" ) + function_backup_rethink + ;; + esac + + +### Automatic Cleanup + if [[ -n "$DB_CLEANUP_TIME" ]]; then + find $DB_DUMP_TARGET/ -mmin +$DB_CLEANUP_TIME -iname "$DBTYPE_$DBNAME_*.*" -exec rm {} \; + fi + + ### Go back to Sleep until next Backup time + sleep $(($DB_DUMP_FREQ*60)) + + done +fi