Compare commits

...

24 Commits
1.15 ... 1.20.0

Author SHA1 Message Date
Dave Conroy
4f103a5b36 Release 1.20.0 - See CHANGELOG.md 2020-04-22 14:19:20 -07:00
Dave Conroy
0472cba83d Update README.md 2020-04-22 05:36:48 -07:00
Dave Conroy
fe6fab857f Merge branch 'master' of https://github.com/tiredofit/docker-db-backup 2020-04-22 05:36:08 -07:00
Dave Conroy
6113bf64b2 Update README.md 2020-04-22 05:36:03 -07:00
Dave Conroy
95d1129a12 Merge pull request #22 from pascalberger/patch-1
Fix typo
2020-04-22 05:34:48 -07:00
Dave Conroy
f8bab5f045 Release 1.19.0 - See CHANGELOG.md 2020-04-22 05:21:02 -07:00
Dave Conroy
71802d2a28 Release 1.18.2 - See CHANGELOG.md 2020-04-08 12:08:42 -07:00
Dave Conroy
c96a2179b5 Merge pull request #27 from hyun007/master
changed mysql password to env variable
2020-04-08 12:04:55 -07:00
Hyun Jo
42d3aa0fef changed mysql password to env variable 2020-03-19 20:17:49 -04:00
Dave Conroy
e6009e7a1e Release 1.18.1 - See CHANGELOG.md 2020-03-14 07:59:31 -07:00
Pascal Berger
b5466d5b97 Fix typo 2020-03-01 19:43:49 +01:00
Dave Conroy
06b6e685c7 Support new tiredofit/alpine base image 2019-12-30 07:40:13 -08:00
Dave Conroy
d7bdcbd0dc Release 1.17.0 - See CHANGELOG.md 2019-12-09 13:58:11 -08:00
Dave Conroy
62ee7ad3dc Update README.md 2019-10-19 09:09:25 -07:00
Dave Conroy
65c879172f Fix docker-compose.yml example 2019-10-19 09:08:20 -07:00
Dave Conroy
33c942551a Merge pull request #18 from alwynpan/1.16.1
Fix couchdb backup endpoint; Set ENABLE_ZABBIX to FALSE by default
2019-10-01 18:22:55 -07:00
Yao (Alwyn) Pan
36e4d9a2a2 Fix couchdb backup endpoint; Set ENABLE_ZABBIX to FALSE by default 2019-10-02 10:27:26 +10:00
Dave Conroy
aa1c8b3591 Merge pull request #16 from tito/tito-patch-1
Fix usage of DB_PORT for single mariadb database
2019-09-16 07:12:54 -07:00
Mathieu Virbel
243bbb9709 Fix usage of DB_PORT for single mariadb database
Fixes #15
2019-09-16 12:32:16 +02:00
Dave Conroy
78e7434a85 Check for host availability before backup 2019-06-17 14:54:29 -07:00
Dave Conroy
d1d093b87d Merge pull request #13 from spumer/master
Allow override MYSQL DB_PORT
2019-06-16 08:40:20 -07:00
spumer
ea00709aa1 Allow override MYSQL DB_PORT 2019-06-16 16:13:34 +05:00
Dave Conroy
219a6463f3 Merge pull request #12 from claudioaltamura/master
Update README
2019-05-29 07:46:30 -07:00
Claudio Altamura
0ed89369a9 changed DB_SERVER into DB_HOST 2019-05-29 13:26:09 +02:00
7 changed files with 288 additions and 92 deletions

View File

@@ -1,3 +1,62 @@
## 1.20.0 2020-04-22 <dave at tiredofit dot ca>
### Added
- Docker Secrets Support for DB_USER and DB_PASS variables
## 1.19.0 2020-04-22 <dave at tiredofit dot ca>
### Added
- Custom Script support to execute upon compleition of backup
## 1.18.2 2020-04-08 <hyun007 @ github>
### Changed
- Rework to allow passwords with spaces in them for MariaDB / MySQL
## 1.18.1 2020-03-14 <dave at tiredofit dot ca>
### Changed
- Allow for passwords with spaces in them for MariaDB / MySQL
## 1.18.0 2019-12-29 <dave at tiredofit dot ca>
### Added
- Update image to support new tiredofit/alpine base images
## 1.17.3 2019-12-12 <dave at tiredofit dot ca>
### Changed
- Quiet down Zabbix Agent
## 1.17.2 2019-12-12 <dave at tiredofit dot ca>
### Changed
- Re Enable ZABBIX
## 1.17.1 2019-12-10 <dave at tiredofit dot ca>
### Changed
- Fix spelling mistake in container initialization
## 1.17.0 2019-12-09 <dave at tiredofit dot ca>
### Changed
- Stop compiling mongodb-tools as it is back in Alpine:edge repositories
- Cleanup Code
## 1.16 - 2019-06-16 - <dave at tiredofit dot ca>
* Check to see if Database Exists before performing backup
* Fix for MysQL/MariaDB custom ports - Credit to <spumer@github>
## 1.15 - 2019-05-24 - <claudioaltamura @ github> ## 1.15 - 2019-05-24 - <claudioaltamura @ github>
* Added abaility to backup password protected Redis Hosts * Added abaility to backup password protected Redis Hosts

View File

@@ -1,59 +1,51 @@
FROM tiredofit/mongo-builder as mongo-packages
FROM tiredofit/alpine:edge FROM tiredofit/alpine:edge
LABEL maintainer="Dave Conroy (dave at tiredofit dot ca)" LABEL maintainer="Dave Conroy (dave at tiredofit dot ca)"
### Copy Mongo Packages
COPY --from=mongo-packages / /usr/src/apk
### Set Environment Variables ### Set Environment Variables
ENV ENABLE_CRON=FALSE \ ENV ENABLE_CRON=FALSE \
ENABLE_SMTP=FALSE ENABLE_SMTP=FALSE \
ENABLE_ZABBIX=FALSE \
ZABBIX_HOSTNAME=db-backup
### Dependencies ### Dependencies
RUN set -ex && \ RUN set -ex && \
echo "@testing http://nl.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \ echo "@testing http://nl.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
apk update && \ apk update && \
apk upgrade && \ apk upgrade && \
apk add --virtual .db-backup-build-deps \ apk add -t .db-backup-build-deps \
build-base \ build-base \
bzip2-dev \ bzip2-dev \
git \ git \
xz-dev \ xz-dev \
&& \ && \
\ \
apk add -t .db-backup-run-deps \ apk add -t .db-backup-run-deps \
bzip2 \ bzip2 \
influxdb \ influxdb \
mariadb-client \ mariadb-client \
libressl \ mongodb-tools \
pigz \ libressl \
postgresql \ pigz \
postgresql-client \ postgresql \
redis \ postgresql-client \
xz \ redis \
&& \ xz \
apk add \ && \
pixz@testing \ \
&& \ apk add \
pixz@testing \
## Locally Install Mongo Package && \
cd /usr/src/apk && \ \
apk add -t .db-backup-mongo-deps --allow-untrusted \ mkdir -p /usr/src/pbzip2 && \
mongodb-tools*.apk \ curl -ssL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
&& \ cd /usr/src/pbzip2 && \
\ make && \
cd /usr/src && \ make install && \
mkdir -p pbzip2 && \ \
curl -ssL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
cd pbzip2 && \
make && \
make install && \
\
### Cleanup ### Cleanup
rm -rf /usr/src/* && \ apk del .db-backup-build-deps && \
apk del .db-backup-build-deps && \ rm -rf /usr/src/* && \
rm -rf /tmp/* /var/cache/apk/* rm -rf /tmp/* /var/cache/apk/*
### S6 Setup ### S6 Setup
ADD install / ADD install /

View File

@@ -1,6 +1,6 @@
The MIT License (MIT) The MIT License (MIT)
Copyright (c) 2016 Dave Conroy Copyright (c) 2020 Dave Conroy
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,4 +1,4 @@
# tiredofit/db-backup # hub.docker.com/r/tiredofit/db-backup
[![Build Status](https://img.shields.io/docker/build/tiredofit/db-backup.svg)](https://hub.docker.com/r/tiredofit/db-backup) [![Build Status](https://img.shields.io/docker/build/tiredofit/db-backup.svg)](https://hub.docker.com/r/tiredofit/db-backup)
@@ -6,14 +6,13 @@
[![Docker Stars](https://img.shields.io/docker/stars/tiredofit/db-backup.svg)](https://hub.docker.com/r/tiredofit/db-backup) [![Docker Stars](https://img.shields.io/docker/stars/tiredofit/db-backup.svg)](https://hub.docker.com/r/tiredofit/db-backup)
[![Docker Layers](https://images.microbadger.com/badges/image/tiredofit/db-backup.svg)](https://microbadger.com/images/tiredofit/db-backup) [![Docker Layers](https://images.microbadger.com/badges/image/tiredofit/db-backup.svg)](https://microbadger.com/images/tiredofit/db-backup)
# Introduction # Introduction
This will build a container for backing up multiple type of DB Servers This will build a container for backing up multiple type of DB Servers
Currently backs up CouchDB, InfluxDB, MySQL, MongoDB Postgres, Redis, Rethink servers. Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis, Rethink servers.
* dump to local filesystem * dump to local filesystem
* select database user and password * select database user and password
* backup all databases * backup all databases
* choose to have an MD5 sum after backup for verification * choose to have an MD5 sum after backup for verification
@@ -22,8 +21,9 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB Postgres, Redis, Rethink se
* connect to any container running on the same system * connect to any container running on the same system
* select how often to run a dump * select how often to run a dump
* select when to start the first dump, whether time of day or relative to container start time * select when to start the first dump, whether time of day or relative to container start time
* Execute script after backup for monitoring/alerting purposes
This Container uses Alpine:Edge as a base. * This Container uses a [customized Alpine Linux base](https://hub.docker.com/r/tiredofit/alpine) which includes [s6 overlay](https://github.com/just-containers/s6-overlay) enabled for PID 1 Init capabilities, [zabbix-agent](https://zabbix.org) for individual container monitoring, Cron also installed along with other tools (bash,curl, less, logrotate, nano, vim) for easier management. It also supports sending to external SMTP servers.
[Changelog](CHANGELOG.md) [Changelog](CHANGELOG.md)
@@ -40,9 +40,10 @@ This Container uses Alpine:Edge as a base.
- [Quick Start](#quick-start) - [Quick Start](#quick-start)
- [Configuration](#configuration) - [Configuration](#configuration)
- [Data Volumes](#data-volumes) - [Data Volumes](#data-volumes)
- [Environment Variables](#environmentvariables) - [Environment Variables](#environmentvariables)
- [Maintenance](#maintenance) - [Maintenance](#maintenance)
- [Shell Access](#shell-access) - [Shell Access](#shell-access)
- [Custom Scripts](#custom-scripts)
# Prerequisites # Prerequisites
@@ -51,12 +52,12 @@ You must have a working DB server or container available for this to work proper
# Installation # Installation
Automated builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup) and is the recommended Automated builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup) and is the recommended
method of installation. method of installation.
```bash ```bash
docker pull tiredofit/db-backup docker pull tiredofit/db-backup:latest
``` ```
# Quick Start # Quick Start
@@ -77,7 +78,8 @@ The following directories are used for configuration and can be mapped for persi
| Directory | Description | | Directory | Description |
|-----------|-------------| |-----------|-------------|
| `/backup` | Backups | | `/backup` | Backups |
| `/assets/custom-scripts | *Optional* Put custom scripts in this directory to execute after backup operations`
## Environment Variables ## Environment Variables
@@ -87,11 +89,12 @@ Along with the Environment Variables from the [Base image](https://hub.docker.co
| Parameter | Description | | Parameter | Description |
|-----------|-------------| |-----------|-------------|
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, or none `NONE` - Default `GZ` | `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, or none `NONE` - Default `GZ`
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `rethink` | `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `rethink`
| `DB_SERVER` | Server Hostname e.g. `mariadb` | `DB_HOST` | Server Hostname e.g. `mariadb`
| `DB_NAME` | Schema Name e.g. `database` | `DB_NAME` | Schema Name e.g. `database`
| `DB_USER` | username for the database - use `root` to backup all MySQL of them. | `DB_USER` | username for the database - use `root` to backup all MySQL of them.
| `DB_PASS` | (optional if DB doesn't require it) password for the database | `DB_PASS` | (optional if DB doesn't require it) password for the database
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided
| `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. | `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day.
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats
| | Absolute HHMM, e.g. `2330` or `0415` | | Absolute HHMM, e.g. `2330` or `0415`
@@ -105,13 +108,39 @@ Along with the Environment Variables from the [Base image](https://hub.docker.co
## Maintenance ## Maintenance
Manual Backups can be perforemd by entering the container and typing `backup-now` Manual Backups can be performed by entering the container and typing `backup-now`
#### Shell Access #### Shell Access
For debugging and maintenance purposes you may want access the containers shell. For debugging and maintenance purposes you may want access the containers shell.
```bash ```bash
docker exec -it (whatever your container name is e.g.) db-backup bash docker exec -it (whatever your container name is e.g.) db-backup bash
``` ```
#### Custom Scripts
If you want to execute a custom script at the end of backup, you can drop bash scripts with the extension of `.sh` in this directory. See the following example to utilize:
````bash
$ cat post-script.sh
##!/bin/bash
## Example Post Script
## $1=DB_TYPE (Type of Backup)
## $2=DB_HOST (Backup Host)
## #3=DB_NAME (Name of Database backed up
## $4=DATE (Date of Backup)
## $5=TIME (Time of Backup)
## $6=BACKUP_FILENAME (Filename of Backup)
## $7=FILESIZE (Filesize of backup)
## $8=MD5_RESULT (MD5Sum if enabled)
echo "${1} Backup Completed on ${2} for ${3} on ${4} ${5}. Filename: ${6} Size: ${7} bytes MD5: ${8}"
````
Outputs the following on the console:
`mysql Backup Completed on example-db for example on 2020-04-22 05:19:10. Filename: mysql_example_example-db_20200422-051910.sql.bz2 Size: 7795 bytes MD5: 952fbaafa30437494fdf3989a662cd40`
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`

View File

@@ -6,6 +6,7 @@ services:
image: mariadb:latest image: mariadb:latest
volumes: volumes:
- ./db:/var/lib/mysql - ./db:/var/lib/mysql
- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment: environment:
- MYSQL_ROOT_PASSWORD=examplerootpassword - MYSQL_ROOT_PASSWORD=examplerootpassword
- MYSQL_DATABASE=example - MYSQL_DATABASE=example
@@ -19,7 +20,7 @@ services:
links: links:
- example-db - example-db
volumes: volumes:
- ./backups:/backups - ./backups:/backup
environment: environment:
- DB_TYPE=mariadb - DB_TYPE=mariadb
- DB_HOST=example-db - DB_HOST=example-db
@@ -32,7 +33,6 @@ services:
- MD5=TRUE - MD5=TRUE
- COMPRESSION=XZ - COMPRESSION=XZ
- SPLIT_DB=FALSE - SPLIT_DB=FALSE
restart: always restart: always

13
examples/post-script.sh Executable file
View File

@@ -0,0 +1,13 @@
##!/bin/bash
## Example Post Script
## $1=DB_TYPE (Type of Backup)
## $2=DB_HOST (Backup Host)
## #3=DB_NAME (Name of Database backed up
## $4=DATE (Date of Backup)
## $5=TIME (Time of Backup)
## $6=BACKUP_FILENAME (Filename of Backup)
## $7=FILESIZE (Filesize of backup)
## $8=MD5_RESULT (MD5Sum if enabled)
echo "${1} Backup Completed on ${2} for ${3} on ${4} ${5}. Filename: ${6} Size: ${7} bytes MD5: ${8}"

View File

@@ -1,26 +1,19 @@
#!/usr/bin/with-contenv bash #!/usr/bin/with-contenv bash
for s in /assets/functions/*; do source $s; done
PROCESS_NAME="db-backup"
date >/dev/null date >/dev/null
if [ "$1" != "NOW" ]; then if [ "$1" != "NOW" ]; then
sleep 10 sleep 10
fi fi
### Set Debug Mode
if [ "$DEBUG_MODE" = "TRUE" ] || [ "$DEBUG_MODE" = "true" ]; then
set -x
fi
### Sanity Test ### Sanity Test
if [ ! -n "$DB_TYPE" ]; then sanity_var DB_TYPE "Database Type"
echo '** [db-backup] ERROR: No Database Type Selected! ' sanity_var DB_HOST "Database Host"
exit 1 file_env 'DB_USER'
fi file_env 'DB_PASS'
if [ ! -n "$DB_HOST" ]; then
echo '** [db-backup] ERROR: No Database Host Entered! '
exit 1
fi
### Set Defaults ### Set Defaults
COMPRESSION=${COMPRESSION:-GZ} COMPRESSION=${COMPRESSION:-GZ}
@@ -34,6 +27,7 @@ DBPASS=${DB_PASS}
DBUSER=${DB_USER} DBUSER=${DB_USER}
DBTYPE=${DB_TYPE} DBTYPE=${DB_TYPE}
MD5=${MD5:-TRUE} MD5=${MD5:-TRUE}
SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SPLIT_DB=${SPLIT_DB:-FALSE} SPLIT_DB=${SPLIT_DB:-FALSE}
TMPDIR=/tmp/backups TMPDIR=/tmp/backups
@@ -74,7 +68,7 @@ fi
"mysql" | "MYSQL" | "mariadb" | "MARIADB") "mysql" | "MYSQL" | "mariadb" | "MARIADB")
DBTYPE=mysql DBTYPE=mysql
DBPORT=${DB_PORT:-3306} DBPORT=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) ]] && MYSQL_PASS_STR=" -p${DBPASS}" [[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DBPASS}
;; ;;
"postgres" | "postgresql" | "pgsql" | "POSTGRES" | "POSTGRESQL" | "PGSQL" ) "postgres" | "postgresql" | "pgsql" | "POSTGRES" | "POSTGRESQL" | "PGSQL" )
DBTYPE=pgsql DBTYPE=pgsql
@@ -97,7 +91,7 @@ fi
### Functions ### Functions
function backup_couch() { function backup_couch() {
TARGET=couch_${DBNAME}_${DBHOST}_${now}.txt TARGET=couch_${DBNAME}_${DBHOST}_${now}.txt
curl -X GET http://${DBHOST}:${DBPORT}/${DBNAME}/ all docs? include docs=true >${TMPDIR}/${TARGET} curl -X GET http://${DBHOST}:${DBPORT}/${DBNAME}/_all_docs?include_docs=true >${TMPDIR}/${TARGET}
generate_md5 generate_md5
compression compression
move_backup move_backup
@@ -105,20 +99,20 @@ function backup_couch() {
function backup_mysql() { function backup_mysql() {
if [ "$SPLIT_DB" = "TRUE" ] || [ "$SPLIT_DB" = "true" ]; then if [ "$SPLIT_DB" = "TRUE" ] || [ "$SPLIT_DB" = "true" ]; then
DATABASES=`mysql -h $DBHOST -u$DBUSER -p$DBPASS --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema` DATABASES=`mysql -h ${DBHOST} -P $DBPORT -u$DBUSER --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema`
for db in $DATABASES; do for db in $DATABASES; do
if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then
echo "** [db-backup] Dumping database: $db" echo "** [db-backup] Dumping database: $db"
TARGET=mysql_${db}_${DBHOST}_${now}.sql TARGET=mysql_${db}_${DBHOST}_${now}.sql
mysqldump --max-allowed-packet=512M -h $DBHOST -u$DBUSER ${MYSQL_PASS_STR} --databases $db > ${TMPDIR}/${TARGET} mysqldump --max-allowed-packet=512M -h $DBHOST -P $DBPORT -u$DBUSER --databases $db > ${TMPDIR}/${TARGET}
generate_md5 generate_md5
compression compression
move_backup move_backup
fi fi
done done
else else
mysqldump --max-allowed-packet=512M -A -h $DBHOST -u$DBUSER ${MYSQL_PASS_STR} > ${TMPDIR}/${TARGET} mysqldump --max-allowed-packet=512M -A -h $DBHOST -P $DBPORT -u$DBUSER > ${TMPDIR}/${TARGET}
generate_md5 generate_md5
compression compression
move_backup move_backup
@@ -149,7 +143,7 @@ function backup_pgsql() {
export PGPASSWORD=${DBPASS} export PGPASSWORD=${DBPASS}
DATABASES=`psql -h $DBHOST -U $DBUSER -p ${DBPORT} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ` DATABASES=`psql -h $DBHOST -U $DBUSER -p ${DBPORT} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' `
for db in $DATABASES; do for db in $DATABASES; do
echo "** [db-backup] Dumping database: $db" print_info "Dumping database: $db"
TARGET=pgsql_${db}_${DBHOST}_${now}.sql TARGET=pgsql_${db}_${DBHOST}_${now}.sql
pg_dump -h ${DBHOST} -p ${DBPORT} -U ${DBUSER} $db > ${TMPDIR}/${TARGET} pg_dump -h ${DBHOST} -p ${DBPORT} -U ${DBUSER} $db > ${TMPDIR}/${TARGET}
generate_md5 generate_md5
@@ -168,17 +162,17 @@ function backup_pgsql() {
function backup_redis() { function backup_redis() {
TARGET=redis_${db}_${DBHOST}_${now}.rdb TARGET=redis_${db}_${DBHOST}_${now}.rdb
echo bgsave | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} --rdb ${TMPDIR}/${TARGET} echo bgsave | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} --rdb ${TMPDIR}/${TARGET}
echo "** [db-backup] Dumping Redis - Flushing Redis Cache First" print_info "Dumping Redis - Flushing Redis Cache First"
sleep 10 sleep 10
try=5 try=5
while [ $try -gt 0 ] ; do while [ $try -gt 0 ] ; do
saved=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}') saved=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
ok=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}') ok=$(echo 'info Persistence' | redis-cli -h ${DBHOST} -p ${DBPORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
echo "** [db-backup] Redis Backup Complete" print_info "Redis Backup Complete"
fi fi
try=$((try - 1)) try=$((try - 1))
echo "** [db-backup] Redis Busy - Waiting and retrying in 5 seconds" print_info "Redis Busy - Waiting and retrying in 5 seconds"
sleep 5 sleep 5
done done
generate_md5 generate_md5
@@ -188,11 +182,83 @@ function backup_redis() {
function backup_rethink() { function backup_rethink() {
TARGET=rethink_${db}_${DBHOST}_${now}.tar.gz TARGET=rethink_${db}_${DBHOST}_${now}.tar.gz
echo "** [db-backup] Dumping rethink Database: $db" print_info "Dumping rethink Database: $db"
rethinkdb dump -f ${TMPDIR}/${TARGET} -c ${DBHOST}:${DBPORT} ${RETHINK_PASS_STR} ${RETHINK_DB_STR} rethinkdb dump -f ${TMPDIR}/${TARGET} -c ${DBHOST}:${DBPORT} ${RETHINK_PASS_STR} ${RETHINK_DB_STR}
move_backup move_backup
} }
function check_availability() {
### Set the Database Type
case "$DBTYPE" in
"couch" )
COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
sleep 5
let COUNTER+=5
print_warn "CouchDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"influx" )
COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
sleep 5
let COUNTER+=5
print_warn "InfluxDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"mongo" )
COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
sleep 5
let COUNTER+=5
print_warn "Mongo Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"mysql" )
COUNTER=0
while true; do
mysqlcmd='mysql -u'${DBUSER}' -P '${DBPORT}' -h '${DBHOST}' -p'${DBPASS}
out="`$mysqlcmd -e "SELECT COUNT(*) FROM information_schema.FILES;" 2>&1`"
echo "$out" | grep -E "COUNT|Enter" 2>&1 > /dev/null
if [ $? -eq 0 ]; then
:
break
fi
print_warn "MySQL/MariaDB Server "$DBHOST" is not accessible, retrying.. ($COUNTER seconds so far)"
sleep 5
let COUNTER+=5
done
;;
"pgsql" )
# Wait until mongo logs that it's ready (or timeout after 60s)
COUNTER=0
export PGPASSWORD=${DBPASS}
until pg_isready --dbname=${DBNAME} --host=${DBHOST} --port=${DBPORT} --username=${DBUSER} -q
do
sleep 5
let COUNTER+=5
print_warn "Postgres Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"redis" )
COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
sleep 5
let COUNTER+=5
print_warn "Redis Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"rethink" )
COUNTER=0
while ! (nc -z ${DBHOST} ${DBPORT}) ; do
sleep 5
let COUNTER+=5
print_warn "RethinkDB Host '"$DBHOST"' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
esac
}
function compression() { function compression() {
case "$COMPRESSION" in case "$COMPRESSION" in
"GZ" | "gz" | "gzip" | "GZIP") "GZ" | "gz" | "gzip" | "GZIP")
@@ -216,6 +282,7 @@ function generate_md5() {
if [ "$MD5" = "TRUE" ] || [ "$MD5" = "true" ] ; then if [ "$MD5" = "TRUE" ] || [ "$MD5" = "true" ] ; then
cd $TMPDIR cd $TMPDIR
md5sum ${TARGET} > ${TARGET}.md5 md5sum ${TARGET} > ${TARGET}.md5
MD5VALUE=$(md5sum ${TARGET} | awk '{ print $1}')
fi fi
} }
@@ -223,11 +290,27 @@ function move_backup() {
mkdir -p ${DB_DUMP_TARGET} mkdir -p ${DB_DUMP_TARGET}
mv ${TMPDIR}/*.md5 ${DB_DUMP_TARGET}/ mv ${TMPDIR}/*.md5 ${DB_DUMP_TARGET}/
mv ${TMPDIR}/${TARGET} ${DB_DUMP_TARGET}/${TARGET} mv ${TMPDIR}/${TARGET} ${DB_DUMP_TARGET}/${TARGET}
case "$SIZE_VALUE" in
"b" | "bytes" )
SIZE_VALUE=1
;;
"[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" )
SIZE_VALUE="-h"
;;
*)
SIZE_VALUE=1
;;
esac
if [ "$SIZE_VALUE" = "1" ] ; then
FILESIZE=$(stat -c%s "${DB_DUMP_TARGET}/${TARGET}")
else
FILESIZE=$(du -h "${DB_DUMP_TARGET}/${TARGET}" | awk '{ print $1}')
fi
} }
### Container Startup ### Container Startup
echo '** [db-backup] Initialized at at '$(date) print_info "Initialized on `date`"
### Wait for Next time to start backup ### Wait for Next time to start backup
current_time=$(date +"%s") current_time=$(date +"%s")
@@ -253,37 +336,46 @@ echo '** [db-backup] Initialized at at '$(date)
### Define Target name ### Define Target name
now=$(date +"%Y%m%d-%H%M%S") now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d")
TARGET=${DBTYPE}_${DBNAME}_${DBHOST}_${now}.sql TARGET=${DBTYPE}_${DBNAME}_${DBHOST}_${now}.sql
### Take a Dump ### Take a Dump
case "$DBTYPE" in case "$DBTYPE" in
"couch" ) "couch" )
check_availability
backup_couch backup_couch
;; ;;
"influx" ) "influx" )
check_availability
backup_influx backup_influx
;; ;;
"mysql" ) "mysql" )
check_availability
backup_mysql backup_mysql
;; ;;
"mongo" ) "mongo" )
check_availability
backup_mongo backup_mongo
;; ;;
"pgsql" ) "pgsql" )
check_availability
backup_pgsql backup_pgsql
;; ;;
"redis" ) "redis" )
check_availability
backup_redis backup_redis
;; ;;
"rethink" ) "rethink" )
check_availability
backup_rethink backup_rethink
;; ;;
esac esac
### Zabbix ### Zabbix
if [ "$ENABLE_ZABBIX" = "TRUE" ] || [ "$ENABLE_ZABBIX" = "true" ]; then if [ "$ENABLE_ZABBIX" = "TRUE" ] || [ "$ENABLE_ZABBIX" = "true" ]; then
zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o `stat -c%s ${DB_DUMP_TARGET}/${TARGET}` silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o `stat -c%s ${DB_DUMP_TARGET}/${TARGET}`
zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o `date -r ${DB_DUMP_TARGET}/${TARGET} +'%s'` silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o `date -r ${DB_DUMP_TARGET}/${TARGET} +'%s'`
fi fi
### Automatic Cleanup ### Automatic Cleanup
@@ -291,6 +383,17 @@ echo '** [db-backup] Initialized at at '$(date)
find $DB_DUMP_TARGET/ -mmin +$DB_CLEANUP_TIME -iname "$DBTYPE_$DBNAME_*.*" -exec rm {} \; find $DB_DUMP_TARGET/ -mmin +$DB_CLEANUP_TIME -iname "$DBTYPE_$DBNAME_*.*" -exec rm {} \;
fi fi
### Post Backup Custom Script Support
if [ -d /assets/custom-scripts/ ] ; then
print_info "Found Custom Scripts to Execute"
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
print_info "Running Script ${f}"
## script DB_TYPE DB_HOST DB_NAME DATE BACKUP_FILENAME FILESIZE MD5_VALUE
chmod +x ${f}
${f} "${DBTYPE}" "${DBHOST}" "${DBNAME}" "${now_date}" "${now_time}" "${TARGET}" "${FILESIZE}" "${MD5VALUE}"
done
fi
### Go back to Sleep until next Backup time ### Go back to Sleep until next Backup time
if [ "$MANUAL" = "TRUE" ]; then if [ "$MANUAL" = "TRUE" ]; then
exit 1; exit 1;