Compare commits

...

11 Commits
1.13 ... 1.16

Author SHA1 Message Date
Dave Conroy
78e7434a85 Check for host availability before backup 2019-06-17 14:54:29 -07:00
Dave Conroy
d1d093b87d Merge pull request #13 from spumer/master
Allow override MYSQL DB_PORT
2019-06-16 08:40:20 -07:00
spumer
ea00709aa1 Allow override MYSQL DB_PORT 2019-06-16 16:13:34 +05:00
Dave Conroy
219a6463f3 Merge pull request #12 from claudioaltamura/master
Update README
2019-05-29 07:46:30 -07:00
Claudio Altamura
0ed89369a9 changed DB_SERVER into DB_HOST 2019-05-29 13:26:09 +02:00
Dave Conroy
6bd534258e Add support to backup password protected Redis Hosts 2019-05-24 13:58:54 -07:00
Dave Conroy
48bea7aeee Merge pull request #11 from claudioaltamura/master
chg: added AUTH for redis
2019-05-24 13:55:59 -07:00
Claudio Altamura
c3179d58ba chg: added AUTH for redis 2019-05-22 13:03:57 +02:00
Dave Conroy
fcafa1753d Update Changelog for 1.14 2019-04-20 07:13:52 -07:00
Dave Conroy
d74a516967 Switch to locally installed MongoDB packages 2019-04-20 07:10:13 -07:00
Dave Conroy
b0a5fafc4c Update README.md 2019-03-09 07:32:13 -08:00
4 changed files with 149 additions and 46 deletions

View File

@@ -1,3 +1,16 @@
## 1.16 - 2019-06-16 - <dave at tiredofit dot ca>
* Check to see if Database Exists before performing backup
* Fix for MysQL/MariaDB custom ports - Credit to <spumer@github>
## 1.15 - 2019-05-24 - <claudioaltamura @ github>
* Added abaility to backup password protected Redis Hosts
## 1.14 - 2019-04-20 - <dave at tiredofit dot ca>
* Switch to using locally built mongodb-tools from tiredofit/mongo-builder due to Alpine removing precompiled packages from repositories
## 1.13 - 2019-03-09 - <dave at tiredofit dot ca> ## 1.13 - 2019-03-09 - <dave at tiredofit dot ca>
* Fixed Postgres backup without SPLIT_DB enabled (credit MelwinKfr@github) * Fixed Postgres backup without SPLIT_DB enabled (credit MelwinKfr@github)
@@ -60,3 +73,4 @@
* Initial Release * Initial Release
* Alpine:Edge * Alpine:Edge

View File

@@ -1,6 +1,11 @@
FROM tiredofit/mongo-builder as mongo-packages
FROM tiredofit/alpine:edge FROM tiredofit/alpine:edge
LABEL maintainer="Dave Conroy (dave at tiredofit dot ca)" LABEL maintainer="Dave Conroy (dave at tiredofit dot ca)"
### Copy Mongo Packages
COPY --from=mongo-packages / /usr/src/apk
### Set Environment Variables ### Set Environment Variables
ENV ENABLE_CRON=FALSE \ ENV ENABLE_CRON=FALSE \
ENABLE_SMTP=FALSE ENABLE_SMTP=FALSE
@@ -17,9 +22,9 @@ LABEL maintainer="Dave Conroy (dave at tiredofit dot ca)"
xz-dev \ xz-dev \
&& \ && \
\ \
apk add --virtual .db-backup-run-deps \ apk add -t .db-backup-run-deps \
bzip2 \ bzip2 \
mongodb-tools \ influxdb \
mariadb-client \ mariadb-client \
libressl \ libressl \
pigz \ pigz \
@@ -28,22 +33,27 @@ LABEL maintainer="Dave Conroy (dave at tiredofit dot ca)"
redis \ redis \
xz \ xz \
&& \ && \
apk add \ apk add \
influxdb@testing \ pixz@testing \
pixz@testing \
&& \ && \
\ \
cd /usr/src && \ ## Locally Install Mongo Package
mkdir -p pbzip2 && \ cd /usr/src/apk && \
curl -ssL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \ apk add -t .db-backup-mongo-deps --allow-untrusted \
cd pbzip2 && \ mongodb-tools*.apk \
make && \ && \
make install && \ \
\ cd /usr/src && \
# Cleanup mkdir -p pbzip2 && \
rm -rf /usr/src/* && \ curl -ssL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
apk del .db-backup-build-deps && \ cd pbzip2 && \
rm -rf /tmp/* /var/cache/apk/* make && \
make install && \
\
### Cleanup
rm -rf /usr/src/* && \
apk del .db-backup-build-deps && \
rm -rf /tmp/* /var/cache/apk/*
### S6 Setup ### S6 Setup
ADD install / ADD install /

View File

@@ -1,4 +1,4 @@
# tiredofit/db-backup # hub.docker.com/r/tiredofit/db-backup
[![Build Status](https://img.shields.io/docker/build/tiredofit/db-backup.svg)](https://hub.docker.com/r/tiredofit/db-backup) [![Build Status](https://img.shields.io/docker/build/tiredofit/db-backup.svg)](https://hub.docker.com/r/tiredofit/db-backup)
@@ -13,7 +13,7 @@ This will build a container for backing up multiple type of DB Servers
Currently backs up CouchDB, InfluxDB, MySQL, MongoDB Postgres, Redis, Rethink servers. Currently backs up CouchDB, InfluxDB, MySQL, MongoDB Postgres, Redis, Rethink servers.
* dump to local filesystem * dump to local filesystem
* select database user and password * select database user and password
* backup all databases * backup all databases
* choose to have an MD5 sum after backup for verification * choose to have an MD5 sum after backup for verification
@@ -23,7 +23,7 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB Postgres, Redis, Rethink se
* select how often to run a dump * select how often to run a dump
* select when to start the first dump, whether time of day or relative to container start time * select when to start the first dump, whether time of day or relative to container start time
This Container uses Alpine:Edge as a base. * This Container uses a [customized Alpine Linux base](https://hub.docker.com/r/tiredofit/alpine) which includes [s6 overlay](https://github.com/just-containers/s6-overlay) enabled for PID 1 Init capabilities, [zabbix-agent](https://zabbix.org) based on `3.4` compiled for individual container monitoring, Cron also installed along with other tools (bash,curl, less, logrotate, nano, vim) for easier management. It also supports sending to external SMTP servers
[Changelog](CHANGELOG.md) [Changelog](CHANGELOG.md)
@@ -40,7 +40,7 @@ This Container uses Alpine:Edge as a base.
- [Quick Start](#quick-start) - [Quick Start](#quick-start)
- [Configuration](#configuration) - [Configuration](#configuration)
- [Data Volumes](#data-volumes) - [Data Volumes](#data-volumes)
- [Environment Variables](#environmentvariables) - [Environment Variables](#environmentvariables)
- [Maintenance](#maintenance) - [Maintenance](#maintenance)
- [Shell Access](#shell-access) - [Shell Access](#shell-access)
@@ -51,12 +51,12 @@ You must have a working DB server or container available for this to work proper
# Installation # Installation
Automated builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup) and is the recommended Automated builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup) and is the recommended
method of installation. method of installation.
```bash ```bash
docker pull tiredofit/db-backup docker pull tiredofit/db-backup:latest
``` ```
# Quick Start # Quick Start
@@ -77,7 +77,7 @@ The following directories are used for configuration and can be mapped for persi
| Directory | Description | | Directory | Description |
|-----------|-------------| |-----------|-------------|
| `/backup` | Backups | | `/backup` | Backups |
## Environment Variables ## Environment Variables
@@ -86,23 +86,22 @@ Along with the Environment Variables from the [Base image](https://hub.docker.co
| Parameter | Description | | Parameter | Description |
|-----------|-------------| |-----------|-------------|
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `rethink` | `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, or none `NONE` - Default `GZ`
| `DB_SERVER` | Server Hostname e.g. `mariadb` | `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `rethink`
| `DB_HOST` | Server Hostname e.g. `mariadb`
| `DB_NAME` | Schema Name e.g. `database` | `DB_NAME` | Schema Name e.g. `database`
| `DB_USER` | username for the database - use `root` to backup all MySQL of them. | `DB_USER` | username for the database - use `root` to backup all MySQL of them.
| `DB_PASS` | (optional if DB doesn't require it) password for the database | `DB_PASS` | (optional if DB doesn't require it) password for the database
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided
| `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. | `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day.
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats
| | Absolute HHMM, e.g. `2330` or `0415` | | Absolute HHMM, e.g. `2330` or `0415`
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half
| `DB_DUMP_DEBUG` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed.
| `DB_DUMP_TARGET` | Where to put the dump file, should be a directory. Supports three formats |
| | Local If the value of `DB_DUMP_TARGET` starts with a `/` character, will dump to a local path, which should be volume-mounted.
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything.
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, or none `NONE` - Default `GZ` | `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed.
| `MD5` | Generate MD5 Sum in Directory, `TRUE` or `FALSE` - Default `TRUE` | `MD5` | Generate MD5 Sum in Directory, `TRUE` or `FALSE` - Default `TRUE`
| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. - Default `FALSE` |
| `PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` - Default `TRUE` | | `PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` - Default `TRUE` |
| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. - Default `FALSE` |
## Maintenance ## Maintenance
@@ -111,7 +110,7 @@ Manual Backups can be perforemd by entering the container and typing `backup-now
#### Shell Access #### Shell Access
For debugging and maintenance purposes you may want access the containers shell. For debugging and maintenance purposes you may want access the containers shell.
```bash ```bash
docker exec -it (whatever your container name is e.g.) db-backup bash docker exec -it (whatever your container name is e.g.) db-backup bash

View File

@@ -84,6 +84,7 @@ fi
"redis" | "REDIS" ) "redis" | "REDIS" )
DBTYPE=redis DBTYPE=redis
DBPORT=${DB_PORT:-6379} DBPORT=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DBPASS}"
;; ;;
"rethink" | "RETHINK" ) "rethink" | "RETHINK" )
DBTYPE=rethink DBTYPE=rethink
@@ -103,14 +104,14 @@ function backup_couch() {
} }
function backup_mysql() { function backup_mysql() {
if [ "$SPLIT_DB" = "TRUE" ] || [ "$SPLIT_DB" = "true" ]; then if [ "$SPLIT_DB" = "TRUE" ] || [ "$SPLIT_DB" = "true" ]; then
DATABASES=`mysql -h $DBHOST -u$DBUSER -p$DBPASS --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema` DATABASES=`mysql -h $DBHOST -P $DBPORT -u$DBUSER -p$DBPASS --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema`
for db in $DATABASES; do for db in $DATABASES; do
if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then
echo "** [db-backup] Dumping database: $db" echo "** [db-backup] Dumping database: $db"
TARGET=mysql_${db}_${DBHOST}_${now}.sql TARGET=mysql_${db}_${DBHOST}_${now}.sql
mysqldump --max-allowed-packet=512M -h $DBHOST -u$DBUSER ${MYSQL_PASS_STR} --databases $db > ${TMPDIR}/${TARGET} mysqldump --max-allowed-packet=512M -h $DBHOST -P $DBPORT -u$DBUSER ${MYSQL_PASS_STR} --databases $db > ${TMPDIR}/${TARGET}
generate_md5 generate_md5
compression compression
move_backup move_backup
@@ -137,7 +138,7 @@ function backup_mongo() {
mongodump --out ${TMPDIR}/${TARGET} --host ${DBHOST} --port ${DBPORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_DB_STR} ${EXTRA_OPTS} mongodump --out ${TMPDIR}/${TARGET} --host ${DBHOST} --port ${DBPORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
cd ${TMPDIR} cd ${TMPDIR}
tar cf ${TARGET}.tar ${TARGET}/* tar cf ${TARGET}.tar ${TARGET}/*
TARGET=${TARGET}.tar TARGET=${TARGET}.tar
generate_md5 generate_md5
compression compression
move_backup move_backup
@@ -166,13 +167,13 @@ function backup_pgsql() {
function backup_redis() { function backup_redis() {
TARGET=redis_${db}_${DBHOST}_${now}.rdb TARGET=redis_${db}_${DBHOST}_${now}.rdb
echo bgsave | redis-cli -h ${DBHOST} -p ${DBPORT} --rdb ${TMPDIR}/${TARGET} echo bgsave | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} --rdb ${TMPDIR}/${TARGET}
echo "** [db-backup] Dumping Redis - Flushing Redis Cache First" echo "** [db-backup] Dumping Redis - Flushing Redis Cache First"
sleep 10 sleep 10
try=5 try=5
while [ $try -gt 0 ] ; do while [ $try -gt 0 ] ; do
saved=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} | awk '/rdb_bgsave_in_progress:0/{print "saved"}') saved=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
ok=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} | awk '/rdb_last_bgsave_status:ok/{print "ok"}') ok=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
echo "** [db-backup] Redis Backup Complete" echo "** [db-backup] Redis Backup Complete"
fi fi
@@ -186,12 +187,84 @@ function backup_redis() {
} }
function backup_rethink() { function backup_rethink() {
TARGET=rethink_${db}_${DBHOST}_${now}.tar.gz TARGET=rethink_${db}_${DBHOST}_${now}.tar.gz
echo "** [db-backup] Dumping rethink Database: $db" echo "** [db-backup] Dumping rethink Database: $db"
rethinkdb dump -f ${TMPDIR}/${TARGET} -c ${DBHOST}:${DBPORT} ${RETHINK_PASS_STR} ${RETHINK_DB_STR} rethinkdb dump -f ${TMPDIR}/${TARGET} -c ${DBHOST}:${DBPORT} ${RETHINK_PASS_STR} ${RETHINK_DB_STR}
move_backup move_backup
} }
function check_availability() {
### Set the Database Type
case "$DBTYPE" in
"couch" )
COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
sleep 5
let COUNTER+=5
echo "** [db-backup] CouchDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"influx" )
COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
sleep 5
let COUNTER+=5
echo "** [db-backup] InfluxDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"mongo" )
COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
sleep 5
let COUNTER+=5
echo "** [db-backup] Mongo Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"mysql" )
COUNTER=0
while true; do
mysqlcmd='mysql -u'${DBUSER}' -P '${DBPORT}' -h '${DBHOST}' -p'${DBPASS}
out="`$mysqlcmd -e "SELECT COUNT(*) FROM information_schema.FILES;" 2>&1`"
echo "$out" | grep -E "COUNT|Enter" 2>&1 > /dev/null
if [ $? -eq 0 ]; then
:
break
fi
echo "** [db-backup] MySQL/MariaDB Server "$DBHOST" is not accessible, retrying.. ($COUNTER seconds so far)"
sleep 5
let COUNTER+=5
done
;;
"pgsql" )
# Wait until mongo logs that it's ready (or timeout after 60s)
COUNTER=0
export PGPASSWORD=${DBPASS}
until pg_isready --dbname=${DBNAME} --host=${DBHOST} --port=${DBPORT} --username=${DBUSER} -q
do
sleep 5
let COUNTER+=5
echo "** [db-backup] Postgres Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"redis" )
COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
sleep 5
let COUNTER+=5
echo "** [db-backup] Redis Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"rethink" )
COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
sleep 5
let COUNTER+=5
echo "** [db-backup] RethinkDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
esac
}
function compression() { function compression() {
case "$COMPRESSION" in case "$COMPRESSION" in
"GZ" | "gz" | "gzip" | "GZIP") "GZ" | "gz" | "gzip" | "GZIP")
@@ -212,7 +285,7 @@ function compression() {
} }
function generate_md5() { function generate_md5() {
if [ "$MD5" = "TRUE" ] || [ "$MD5" = "true" ] ; then if [ "$MD5" = "TRUE" ] || [ "$MD5" = "true" ] ; then
cd $TMPDIR cd $TMPDIR
md5sum ${TARGET} > ${TARGET}.md5 md5sum ${TARGET} > ${TARGET}.md5
fi fi
@@ -225,13 +298,13 @@ function move_backup() {
} }
### Container Startup ### Container Startup
echo '** [db-backup] Initialized at at '$(date) echo '** [db-backup] Initialized at at '$(date)
### Wait for Next time to start backup ### Wait for Next time to start backup
current_time=$(date +"%s") current_time=$(date +"%s")
today=$(date +"%Y%m%d") today=$(date +"%Y%m%d")
if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then
waittime=$(( ${BASH_REMATCH[1]} * 60 )) waittime=$(( ${BASH_REMATCH[1]} * 60 ))
else else
@@ -257,34 +330,41 @@ echo '** [db-backup] Initialized at at '$(date)
### Take a Dump ### Take a Dump
case "$DBTYPE" in case "$DBTYPE" in
"couch" ) "couch" )
check_availability
backup_couch backup_couch
;; ;;
"influx" ) "influx" )
check_availability
backup_influx backup_influx
;; ;;
"mysql" ) "mysql" )
check_availability
backup_mysql backup_mysql
;; ;;
"mongo" ) "mongo" )
check_availability
backup_mongo backup_mongo
;; ;;
"pgsql" ) "pgsql" )
check_availability
backup_pgsql backup_pgsql
;; ;;
"redis" ) "redis" )
check_availability
backup_redis backup_redis
;; ;;
"rethink" ) "rethink" )
check_availability
backup_rethink backup_rethink
;; ;;
esac esac
### Zabbix ### Zabbix
if [ "$ENABLE_ZABBIX" = "TRUE" ] || [ "$ENABLE_ZABBIX" = "true" ]; then if [ "$ENABLE_ZABBIX" = "TRUE" ] || [ "$ENABLE_ZABBIX" = "true" ]; then
zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o `stat -c%s ${DB_DUMP_TARGET}/${TARGET}` zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o `stat -c%s ${DB_DUMP_TARGET}/${TARGET}`
zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o `date -r ${DB_DUMP_TARGET}/${TARGET} +'%s'` zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o `date -r ${DB_DUMP_TARGET}/${TARGET} +'%s'`
fi fi
### Automatic Cleanup ### Automatic Cleanup
if [[ -n "$DB_CLEANUP_TIME" ]]; then if [[ -n "$DB_CLEANUP_TIME" ]]; then
find $DB_DUMP_TARGET/ -mmin +$DB_CLEANUP_TIME -iname "$DBTYPE_$DBNAME_*.*" -exec rm {} \; find $DB_DUMP_TARGET/ -mmin +$DB_CLEANUP_TIME -iname "$DBTYPE_$DBNAME_*.*" -exec rm {} \;