Compare commits

..

26 Commits
2.1.1 ... 2.6.0

Author SHA1 Message Date
Dave Conroy
d7d4f1cc19 Merge branch 'master' of https://github.com/tiredofit/docker-db-backup 2021-02-19 08:33:48 -08:00
Dave Conroy
c8c9a80533 Release 2.6.0 - See CHANGELOG.md 2021-02-19 08:33:43 -08:00
Dave Conroy
018234b9bc Merge pull request #56 from tpansino/add-sqlite-support
Add sqlite support
2021-02-19 08:32:52 -08:00
Tom Pansino
912e60edd8 Exit on failed file checks 2021-02-18 00:35:06 -08:00
Tom Pansino
46fddb533c Add sqlite3 to README 2021-02-18 00:09:17 -08:00
Tom Pansino
e8a1859d1a Add initial sqlite3 support 2021-02-17 23:55:22 -08:00
Dave Conroy
30fe2f181c #55 - Fix xz parallel compression 2021-02-14 09:06:21 -08:00
Dave Conroy
f57ce461e9 GitHub CI 2021-01-25 17:05:25 -08:00
Dave Conroy
34aab69cc2 Release 2.5.0 - See CHANGELOG.md 2021-01-25 16:39:42 -08:00
Dave Conroy
1930358775 Multi Arch CI 2021-01-21 15:35:16 -08:00
Dave Conroy
f207f375cc Release 2.4.0 - See CHANGELOG.md 2020-12-07 15:27:20 -08:00
Dave Conroy
88b58bffc5 Release 2.3.2 - See CHANGELOG.md 2020-11-14 12:37:58 -08:00
Dave Conroy
738f7fad25 Release 2.3.1 - See CHANGELOG.md 2020-11-11 13:45:05 -08:00
Dave Conroy
8c4733bf7f Merge pull request #52 from bambi73/master
#51 Fix backup of multiple InfluxDB databases failure
2020-11-11 13:43:56 -08:00
Bambi125
be4d8c0747 #51 Fix backup of multiple InfluxDB databases failure 2020-11-11 22:38:04 +01:00
Dave Conroy
a13849df0a Release 2.3.0 - See CHANGELOG.md 2020-10-15 08:15:10 -07:00
Dave Conroy
cb5347afe5 Release 2.2.2 - See CHANGELOG.md 2020-09-22 21:14:37 -07:00
Dave Conroy
ca03c5369d Merge pull request #47 from tpansino/bug/46-fix-docker-secrets
Fix Docker Secrets injection from DB_USER_FILE/DB_PASS_FILE
2020-09-22 21:02:05 -07:00
Tom Pansino
3008d9125f Fix Docker Secrets injection from DB_USER_FILE/DB_PASS_FILE 2020-09-22 20:32:09 -07:00
Dave Conroy
19cf3d007f Release 2.2.1 - See CHANGELOG.md 2020-09-17 21:39:27 -07:00
Dave Conroy
0bbf142349 Merge pull request #45 from alwynpan/fix-backup-now-date-error-message
Fix backup now date error message
2020-09-17 21:38:10 -07:00
Yao (Alwyn) Pan
1bc357866f #42 Update README 2020-09-18 14:34:06 +10:00
Yao (Alwyn) Pan
b38ad7a5cc #44 Remove 'invalid date' error message when performing backup-now 2020-09-18 14:32:08 +10:00
Dave Conroy
8bc02ee6c8 Release 2.2.0 - See CHANGELOG.md 2020-09-14 07:07:44 -07:00
Dave Conroy
3e71c377c6 Merge pull request #43 from alwynpan/fix-optional-vars
#42 Make DB_USER and DB_PASS optional for some dbtypes; update alpine repo URI
2020-09-14 07:05:29 -07:00
Yao (Alwyn) Pan
76a857239f #42 Make DB_USER and DB_PASS optional for some dbtypes; update alpine repo URI 2020-09-14 19:22:57 +10:00
5 changed files with 389 additions and 115 deletions

110
.github/workflows/main.yml vendored Normal file
View File

@@ -0,0 +1,110 @@
### Application Level Image CI
### Dave Conroy <dave at tiredofit dot ca>
name: 'Build Images'
on:
push:
paths:
- '**'
- '!README.md'
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
set -x
if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"
else
BRANCH=${GITHUB_REF#refs/heads/}
fi
case ${BRANCH} in
"main" | "master" )
BRANCHTAG="${DOCKER_IMAGE}:latest"
;;
"develop" )
BRANCHTAG="${DOCKER_IMAGE}:develop"
;;
* )
if [ -n "${{ secrets.LATEST }}" ] ; then
if [ "${BRANCHTAG}" = "${{ secrets.LATEST }}" ]; then
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest,${DOCKER_IMAGE}:latest"
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
;;
esac
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
GITTAG="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed 's|refs/tags/||g')"
fi
if [ -n "${BRANCHTAG}" ] && [ -n "${GITTAG}" ]; then
TAGS=${BRANCHTAG},${GITTAG}
else
TAGS="${BRANCHTAG}${GITTAG}"
fi
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Label
id: Label
run: |
if [ -f "Dockerfile" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_repository=\"https://github.com/${GITHUB_REPOSITORY}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_commit=\"${GITHUB_SHA}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
if [ -f "CHANGELOG.md" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
fi
if [[ $GITHUB_REF == refs/heads/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_branch=\"${GITHUB_REF#refs/heads/}\"" Dockerfile
fi
fi
- name: Build
uses: docker/build-push-action@v2
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}

View File

@@ -1,3 +1,67 @@
## 2.6.0 2021-02-19 <tpansino@github>
### Added
- SQLite support
## 2.5.1 2021-02-14 <dave at tiredofit dot ca>
### Changed
- Fix xz backups with `PARALLEL_COMPRESION=TRUE`
## 2.5.0 2021-01-25 <dave at tiredofit dot ca>
### Added
- Multi Platform Build Variants (ARMv7 AMD64 AArch64)
### Changed
- Alpine 3.13 Base
- Compile Pixz as opposed to relying on testing repository
- MSSQL Support only available under AMD64. Container exits if any other platform detected when MSSQL set to be backed up.
## 2.4.0 2020-12-07 <dave at tiredofit dot ca>
### Added
- Switch back to packges for Postgresql (now 13.1)
## 2.3.2 2020-11-14 <dave at tiredofit dot ca>
### Changed
- Reapply S6-Overlay into filesystem as Postgresql build is removing S6 files due to edge containing S6 overlay
## 2.3.1 2020-11-11 <bambi73@github>
### Fixed
- Multiple Influx DB's not being backed up correctly
## 2.3.0 2020-10-15 <dave at tiredofit dot ca>
### Added
- Microsoft SQL Server support (experimental)
### Changed
- Compiled Postgresql 13 from source to backup psql/13 hosts
## 2.2.2 2020-09-22 <tpansino@github>
### Fixed
- Patch for 2.2.0 release fixing Docker Secrets Support. Was skipping password check.
## 2.2.1 2020-09-17 <alwynpan@github>
### Fixed
- Ondemand/Manual backup with `backup-now` was throwing errors not being able to find a proper date
## 2.2.0 2020-09-14 <alwynpan@github>
### Fixed
- Allow to use MariaDB and MongoDBs with no username and password while still allowing Docker Secrets
- Changed source of Alpine package repositories
## 2.1.1 2020-09-01 <zicklag@github> ## 2.1.1 2020-09-01 <zicklag@github>
### Fixed ### Fixed

View File

@@ -1,27 +1,28 @@
FROM tiredofit/alpine:edge FROM tiredofit/alpine:3.13
LABEL maintainer="Dave Conroy (dave at tiredofit dot ca)"
### Set Environment Variables ### Set Environment Variables
ENV ENABLE_CRON=FALSE \ ENV MSSQL_VERSION=17.5.2.1-1 \
ENABLE_CRON=FALSE \
ENABLE_SMTP=FALSE \ ENABLE_SMTP=FALSE \
ENABLE_ZABBIX=TRUE \ ENABLE_ZABBIX=TRUE \
ZABBIX_HOSTNAME=db-backup ZABBIX_HOSTNAME=db-backup
### Dependencies ### Dependencies
RUN set -ex && \ RUN set -ex && \
echo "@testing http://nl.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories && \
apk update && \ apk update && \
apk upgrade && \ apk upgrade && \
apk add -t .db-backup-build-deps \ apk add -t .db-backup-build-deps \
build-base \ build-base \
bzip2-dev \ bzip2-dev \
git \ git \
libarchive-dev \
xz-dev \ xz-dev \
&& \ && \
\ \
apk add -t .db-backup-run-deps \ apk add --no-cache -t .db-backup-run-deps \
bzip2 \ bzip2 \
influxdb \ influxdb \
libarchive \
mariadb-client \ mariadb-client \
mongodb-tools \ mongodb-tools \
libressl \ libressl \
@@ -29,19 +30,33 @@ RUN set -ex && \
postgresql \ postgresql \
postgresql-client \ postgresql-client \
redis \ redis \
sqlite \
xz \ xz \
zstd \ zstd \
&& \ && \
\ \
apk add \ cd /usr/src && \
pixz@testing \
&& \
\ \
apkArch="$(apk --print-arch)"; \
case "$apkArch" in \
x86_64) mssql=true ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql17_${MSSQL_VERSION}_amd64.apk mssql-tools_${MSSQL_VERSION}_amd64.apk ;; \
*) echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ;; \
esac; \
mkdir -p /usr/src/pbzip2 && \ mkdir -p /usr/src/pbzip2 && \
curl -ssL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \ curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
cd /usr/src/pbzip2 && \ cd /usr/src/pbzip2 && \
make && \ make && \
make install && \ make install && \
mkdir -p /usr/src/pixz && \
curl -sSL https://github.com/vasi/pixz/releases/download/v1.0.7/pixz-1.0.7.tar.xz | tar xvfJ - --strip 1 -C /usr/src/pixz && \
cd /usr/src/pixz && \
./configure \
--prefix=/usr \
--sysconfdir=/etc \
--localstatedir=/var \
&& \
make && \
make install && \
\ \
### Cleanup ### Cleanup
apk del .db-backup-build-deps && \ apk del .db-backup-build-deps && \

View File

@@ -32,6 +32,7 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
## Table of Contents ## Table of Contents
- [hub.docker.com/r/tiredofit/db-backup](#hubdockercomrtiredofitdb-backup)
- [Introduction](#introduction) - [Introduction](#introduction)
- [Authors](#authors) - [Authors](#authors)
- [Table of Contents](#table-of-contents) - [Table of Contents](#table-of-contents)
@@ -90,8 +91,6 @@ The following directories are used for configuration and can be mapped for persi
### Environment Variables ### Environment Variables
*If you are trying to backup a database that doesn't have a user or a password (you should!) make sure you set `CONTAINER_ENABLE_DOCKER_SECRETS=FALSE`*
Along with the Environment Variables from the [Base image](https://hub.docker.com/r/tiredofit/alpine), below is the complete list of available options that can be used to customize your installation. Along with the Environment Variables from the [Base image](https://hub.docker.com/r/tiredofit/alpine), below is the complete list of available options that can be used to customize your installation.
| Parameter | Description | | Parameter | Description |
@@ -99,8 +98,8 @@ Along with the Environment Variables from the [Base image](https://hub.docker.co
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi - Default `FILESYSTEM` | | `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi - Default `FILESYSTEM` |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` - Default `GZ` | | `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` - Default `GZ` |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - Default `3` | | `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - Default `3` |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` | | `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` |
| `DB_HOST` | Server Hostname e.g. `mariadb` | | `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` |
| `DB_NAME` | Schema Name e.g. `database` | | `DB_NAME` | Schema Name e.g. `database` |
| `DB_USER` | username for the database - use `root` to backup all MySQL of them. | | `DB_USER` | username for the database - use `root` to backup all MySQL of them. |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | | `DB_PASS` | (optional if DB doesn't require it) password for the database |

View File

@@ -14,8 +14,56 @@ fi
sanity_var DB_TYPE "Database Type" sanity_var DB_TYPE "Database Type"
sanity_var DB_HOST "Database Host" sanity_var DB_HOST "Database Host"
### Set the Database Type
dbtype=${DB_TYPE}
case "$dbtype" in
"couch" | "couchdb" | "COUCH" | "COUCHDB" )
dbtype=couch
dbport=${DB_PORT:-5984}
file_env 'DB_USER' file_env 'DB_USER'
file_env 'DB_PASS' file_env 'DB_PASS'
;;
"influx" | "influxdb" | "INFLUX" | "INFLUXDB" )
dbtype=influx
dbport=${DB_PORT:-8088}
file_env 'DB_USER'
file_env 'DB_PASS'
;;
"mongo" | "mongodb" | "MONGO" | "MONGODB" )
dbtype=mongo
dbport=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER'
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
;;
"mysql" | "MYSQL" | "mariadb" | "MARIADB")
dbtype=mysql
dbport=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
;;
"mssql" | "MSSQL" | "microsoftsql" | "MICROSOFTSQL")
apkArch="$(apk --print-arch)"; \
case "$apkArch" in
x86_64) mssql=true ;;
*) print_error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;;
esac
dbtype=mssql
dbport=${DB_PORT:-1433}
;;
"postgres" | "postgresql" | "pgsql" | "POSTGRES" | "POSTGRESQL" | "PGSQL" )
dbtype=pgsql
dbport=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
;;
"redis" | "REDIS" )
dbtype=redis
dbport=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS'
;;
"sqlite" | "sqlite3" | "SQLITE" | "SQLITE3" )
dbtype=sqlite3
;;
esac
### Set Defaults ### Set Defaults
BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"} BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"}
@@ -27,7 +75,6 @@ DB_DUMP_TARGET=${DB_DUMP_TARGET:-/backup}
dbhost=${DB_HOST} dbhost=${DB_HOST}
dbname=${DB_NAME} dbname=${DB_NAME}
dbpass=${DB_PASS} dbpass=${DB_PASS}
dbtype=${DB_TYPE}
dbuser=${DB_USER} dbuser=${DB_USER}
MD5=${MD5:-TRUE} MD5=${MD5:-TRUE}
PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-TRUE} PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-TRUE}
@@ -65,36 +112,20 @@ else
zstd="zstd --rm -${COMPRESSION_LEVEL}" zstd="zstd --rm -${COMPRESSION_LEVEL}"
fi fi
### Set the Database Type ### Set the Database Authentication Details
case "$dbtype" in case "$dbtype" in
"couch" | "couchdb" | "COUCH" | "COUCHDB" ) "mongo" )
dbtype=couch
dbport=${DB_PORT:-5984}
;;
"influx" | "influxdb" | "INFLUX" | "INFLUXDB" )
dbtype=influx
dbport=${DB_PORT:-8088}
;;
"mongo" | "mongodb" | "MONGO" | "MONGODB" )
dbtype=mongo
dbport=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${dbuser}" [[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${dbuser}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${dbpass}" [[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${dbpass}"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${dbname}" [[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${dbname}"
;; ;;
"mysql" | "MYSQL" | "mariadb" | "MARIADB") "mysql" )
dbtype=mysql
dbport=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${dbpass} [[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${dbpass}
;; ;;
"postgres" | "postgresql" | "pgsql" | "POSTGRES" | "POSTGRESQL" | "PGSQL" ) "postgres" )
dbtype=pgsql
dbport=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${dbpass}" [[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${dbpass}"
;; ;;
"redis" | "REDIS" ) "redis" )
dbtype=redis
dbport=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${dbpass}" [[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${dbpass}"
;; ;;
esac esac
@@ -109,6 +140,42 @@ backup_couch() {
move_backup move_backup
} }
backup_influx() {
if [ "${COMPRESSION}" = "NONE" ] || [ "${COMPRESSION}" = "none" ] || [ "${COMPRESSION}" = "FALSE" ] || [ "${COMPRESSION}" = "false" ] ; then
:
else
print_notice "Compressing InfluxDB backup with gzip"
influx_compression="-portable"
fi
for DB in $DB_NAME; do
target=influx_${DB}_${dbhost}_${now}
influxd backup ${influx_compression} -database $DB -host ${dbhost}:${dbport} ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup
done
}
backup_mongo() {
if [ "${COMPRESSION}" = "NONE" ] || [ "${COMPRESSION}" = "none" ] || [ "${COMPRESSION}" = "FALSE" ] || [ "${COMPRESSION}" = "false" ] ; then
target=${dbtype}_${dbname}_${dbhost}_${now}.archive
else
print_notice "Compressing MongoDB backup with gzip"
target=${dbtype}_${dbname}_${dbhost}_${now}.archivegz
mongo_compression="--gzip"
fi
mongodump --archive=${tmpdir}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
exit_code=$?
cd ${tmpdir}
generate_md5
move_backup
}
backup_mssql() {
target=mssql_${dbname}_${dbhost}_${now}.bak
/opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${tmpdir}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
}
backup_mysql() { backup_mysql() {
if var_true "$SPLIT_DB" ; then if var_true "$SPLIT_DB" ; then
DATABASES=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema) DATABASES=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema)
@@ -133,36 +200,6 @@ backup_mysql() {
fi fi
} }
backup_influx() {
if [ "${COMPRESSION}" = "NONE" ] || [ "${COMPRESSION}" = "none" ] || [ "${COMPRESSION}" = "FALSE" ] || [ "${COMPRESSION}" = "false" ] ; then
:
else
print_notice "Compressing InfluxDB backup with gzip"
influx_compression="-portable"
fi
for DB in $DB_NAME; do
influxd backup ${influx_compression} -database $DB -host ${dbhost}:${dbport} ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup
done
}
backup_mongo() {
if [ "${COMPRESSION}" = "NONE" ] || [ "${COMPRESSION}" = "none" ] || [ "${COMPRESSION}" = "FALSE" ] || [ "${COMPRESSION}" = "false" ] ; then
target=${dbtype}_${dbname}_${dbhost}_${now}.archive
else
print_notice "Compressing MongoDB backup with gzip"
target=${dbtype}_${dbname}_${dbhost}_${now}.archivegz
mongo_compression="--gzip"
fi
mongodump --archive=${tmpdir}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
exit_code=$?
cd ${tmpdir}
generate_md5
move_backup
}
backup_pgsql() { backup_pgsql() {
if var_true $SPLIT_DB ; then if var_true $SPLIT_DB ; then
export PGPASSWORD=${dbpass} export PGPASSWORD=${dbpass}
@@ -207,6 +244,22 @@ backup_redis() {
move_backup move_backup
} }
backup_sqlite3() {
db=$(basename "$dbhost")
db="${db%.*}"
target=sqlite3_${db}_${now}.sqlite3
compression
print_info "Dumping sqlite3 database: ${dbhost}"
sqlite3 "${dbhost}" ".backup '${tmpdir}/backup.sqlite3'"
exit_code=$?
cat "${tmpdir}/backup.sqlite3" | $dumpoutput > "${tmpdir}/${target}"
generate_md5
move_backup
}
check_availability() { check_availability() {
### Set the Database Type ### Set the Database Type
case "$dbtype" in case "$dbtype" in
@@ -249,6 +302,14 @@ check_availability() {
(( COUNTER+=5 )) (( COUNTER+=5 ))
done done
;; ;;
"mssql" )
COUNTER=0
while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5
(( COUNTER+=5 ))
print_warn "MSSQL Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"pgsql" ) "pgsql" )
COUNTER=0 COUNTER=0
export PGPASSWORD=${dbpass} export PGPASSWORD=${dbpass}
@@ -267,6 +328,21 @@ check_availability() {
print_warn "Redis Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "Redis Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done done
;; ;;
"sqlite3" )
if [[ ! -e "${dbhost}" ]]; then
print_error "File '${dbhost}' does not exist."
exit_code=2
exit $exit_code
elif [[ ! -f "${dbhost}" ]]; then
print_error "File '${dbhost}' is not a file."
exit_code=2
exit $exit_code
elif [[ ! -r "${dbhost}" ]]; then
print_error "File '${dbhost}' is not readable."
exit_code=2
exit $exit_code
fi
;;
esac esac
} }
@@ -375,6 +451,7 @@ move_backup() {
print_debug "Backup routines Initialized on $(date)" print_debug "Backup routines Initialized on $(date)"
### Wait for Next time to start backup ### Wait for Next time to start backup
if [ "$1" != "NOW" ]; then
current_time=$(date +"%s") current_time=$(date +"%s")
today=$(date +"%Y%m%d") today=$(date +"%Y%m%d")
@@ -390,6 +467,7 @@ print_debug "Backup routines Initialized on $(date)"
print_notice "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")" print_notice "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")"
sleep $waittime sleep $waittime
fi
### Commence Backup ### Commence Backup
@@ -413,6 +491,10 @@ print_debug "Backup routines Initialized on $(date)"
check_availability check_availability
backup_influx backup_influx
;; ;;
"mssql" )
check_availability
backup_mssql
;;
"mysql" ) "mysql" )
check_availability check_availability
backup_mysql backup_mysql
@@ -429,6 +511,10 @@ print_debug "Backup routines Initialized on $(date)"
check_availability check_availability
backup_redis backup_redis
;; ;;
"sqlite3" )
check_availability
backup_sqlite3
;;
esac esac
### Zabbix ### Zabbix