Compare commits

..

2 Commits
3.7.1 ... 3.5.3

Author SHA1 Message Date
dave@tiredofit.ca
9159783691 Release 3.5.3 - See CHANGELOG.md 2022-10-12 08:42:05 -07:00
dave@tiredofit.ca
7e5e9d308d Release 3.5.3 - See CHANGELOG.md 2022-10-12 08:39:31 -07:00
7 changed files with 56 additions and 140 deletions

View File

@@ -19,6 +19,7 @@ jobs:
id: prep
run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
set -x
if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"

View File

@@ -19,6 +19,7 @@ jobs:
id: prep
run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
set -x
if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"

View File

@@ -1,52 +1,3 @@
## 3.7.1 2022-12-19 <dave at tiredofit dot ca>
### Changed
- Add MYSQL_ENABLE_TLS environment variable to switch on and off
### Reverted
- Set default for MYSQL_TLS_CA_FILE to accomodate for most use cases
## 3.7.0 2022-12-16 <dave at tiredofit dot ca>
### Added
- Introduce support for connecting via TLS to MySQL / MariaDB Hosts with MYSQL_TLS_* variables - See README for more details
### Changed
- Fix for cleaning up filesystems that are syncing to Azure via blobxfer
## 3.6.1 2022-11-23 <dave at tiredofit dot ca>
### Added
- Switch to Alpine 3.17 base
- Switch to OpenSSL instead of LibreSSL
## 3.6.0 2022-11-21 <dave at tiredofit dot ca>
### Added
- Postgresql 15 Support
## 3.5.6 2022-11-15 <dave at tiredofit dot ca>
### Changed
- Add failure if DB_TYPE empty or malformed
## 3.5.5 2022-10-18 <dave at tiredofit dot ca>
### Changed
- Fix for S3 backups and trailing slashes (@greena13)
## 3.5.4 2022-10-13 <dave at tiredofit dot ca>
### Changed
- Fix for Influx DB 1 backups when compression enabled
## 3.5.3 2022-10-12 <dave at tiredofit dot ca>
### Changed

View File

@@ -1,7 +1,8 @@
FROM docker.io/tiredofit/alpine:3.17
FROM docker.io/tiredofit/alpine:3.16
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables
ENV INFLUX2_VERSION=2.4.0 \
MSSQL_VERSION=18.0.1.1-1 \
CONTAINER_ENABLE_MESSAGING=FALSE \
@@ -11,8 +12,7 @@ ENV INFLUX2_VERSION=2.4.0 \
IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/"
### Dependencies
RUN source /assets/functions/00-container && \
set -ex && \
RUN set -ex && \
apk update && \
apk upgrade && \
apk add -t .db-backup-build-deps \
@@ -20,7 +20,7 @@ RUN source /assets/functions/00-container && \
bzip2-dev \
git \
libarchive-dev \
openssl-dev \
libressl-dev \
libffi-dev \
python3-dev \
py3-pip \
@@ -35,10 +35,10 @@ RUN source /assets/functions/00-container && \
mariadb-client \
mariadb-connector-c \
mongodb-tools \
openssl \
libressl \
pigz \
postgresql15 \
postgresql15-client \
postgresql \
postgresql-client \
pv \
py3-cryptography \
redis \
@@ -83,4 +83,4 @@ RUN source /assets/functions/00-container && \
rm -rf /root/.cache /tmp/* /var/cache/apk/*
### S6 Setup
COPY install /
ADD install /

View File

@@ -1,7 +1,7 @@
# github.com/tiredofit/docker-db-backup
[![GitHub release](https://img.shields.io/github/v/tag/tiredofit/docker-db-backup?style=flat-square)](https://github.com/tiredofit/docker-db-backup/releases/latest)
[![Build Status](https://img.shields.io/github/actions/workflow/status/tiredofit/docker-db-backup/main.yml?branch=main&style=flat-square)](https://github.com/tiredofit/docker-db-backup/actions)
[![Build Status](https://img.shields.io/github/workflow/status/tiredofit/docker-db-backup/build?style=flat-square)](https://github.com/tiredofit/docker-db-backup/actions?query=workflow%3Abuild)
[![Docker Stars](https://img.shields.io/docker/stars/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/)
[![Docker Pulls](https://img.shields.io/docker/pulls/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/)
[![Become a sponsor](https://img.shields.io/badge/sponsor-tiredofit-181717.svg?logo=github&style=flat-square)](https://github.com/sponsors/tiredofit)
@@ -53,7 +53,6 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
- [Scheduling Options](#scheduling-options)
- [Backup Options](#backup-options)
- [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services)
- [Upload to a Azure storage account by `blobxfer`](#upload-to-a-azure-storage-account-by-blobxfer)
- [Maintenance](#maintenance)
- [Shell Access](#shell-access)
- [Manual Backups](#manual-backups)
@@ -140,7 +139,7 @@ Be sure to view the following repositories to understand all the customizable op
### Database Specific Options
| Parameter | Description | Default |
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- |
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------- |
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | |
@@ -171,7 +170,7 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
### Backup Options
| Parameter | Description | Default |
| ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- |
| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | -------------- |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` |
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |
@@ -183,12 +182,6 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` |
| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` |
| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` |
| `MYSQL_ENABLE_TLS` | Enable TLS functionality for MySQL client | `FALSE` |
| `MYSQL_TLS_VERIFY` | (optional) If using TLS (by means of MYSQL_TLS_* variables) verify remote host | `FALSE` |
| `MYSQL_TLS_VERSION` | What TLS `v1.1` `v1.2` `v1.3` version to utilize | `TLSv1.1,TLSv1.2,TLSv1.3` |
| `MYSQL_TLS_CA_FILE` | Filename to load custom CA certificate for connecting via TLS | `/etc/ssl/cert.pem` |
| `MYSQL_TLS_CERT_FILE` | Filename to load client certificate for connecting via TLS | |
| `MYSQL_TLS_KEY_FILE` | Filename to load client key for connecting via TLS | |
- When using compression with MongoDB, only `GZ` compression is possible.
@@ -197,11 +190,11 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
If `BACKUP_LOCATION` = `S3` then the following options are used.
| Parameter | Description | Default |
| --------------------- | ----------------------------------------------------------------------------------------- | ------- |
|-----------------------|-------------------------------------------------------------------------------------------|---------|
| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | |
| `S3_KEY_ID` | S3 Key ID | |
| `S3_KEY_SECRET` | S3 Key Secret | |
| `S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | |
| `S3_PATH` | S3 Pathname to save to (must end in a trailing slash e.g. '`backup/`') | |
| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | |
| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | |
| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` |
@@ -218,7 +211,7 @@ Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer
If `BACKUP_LOCATION` = `blobxfer` then the following options are used.
| Parameter | Description | Default |
| ------------------------------ | ------------------------------------------- | ------------------- |
| ------------------------------- | ------------------------------------------------------------------------ | -------------------- |
| `BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | |
| `BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | |
| `BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` |

View File

@@ -12,13 +12,9 @@ ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"}
ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"}
MODE=${MODE:-"AUTO"}
MYSQL_ENABLE_TLS=${MYSQL_ENABLE_TLS:-"FALSE"}
MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"}
MYSQL_SINGLE_TRANSACTION=${MYSQL_SINGLE_TRANSACTION:-"TRUE"}
MYSQL_STORED_PROCEDURES=${MYSQL_STORED_PROCEDURES:-"TRUE"}
MYSQL_TLS_CA_FILE=${MYSQL_TLS_CA_FILE:-"/etc/ssl/cert/pem"}
MYSQL_TLS_VERIFY=${MYSQL_TLS_VERIFY:-"FALSE"}
MYSQL_TLS_VERSION=${MYSQL_TLS_VERSION:-"TLSv1.1,TLSv1.2,TLSv1.3"}
PARALLEL_COMPRESSION_THREADS=${PARALLEL_COMPRESSION_THREADS:-"$(nproc)"}
S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"}
S3_PROTOCOL=${S3_PROTOCOL:-"https"}

View File

@@ -1,7 +1,6 @@
#!/command/with-contenv bash
bootstrap_variables() {
sanity_var DB_TYPE "Set appropriate DB_TYPE"
case "${DB_TYPE,,}" in
couch* )
dbtype=couch
@@ -46,26 +45,9 @@ bootstrap_variables() {
"mysql" | "mariadb" )
dbtype=mysql
DB_PORT=${DB_PORT:-3306}
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS}
if var_true "${MYSQL_ENABLE_TLS}" ; then
if [ -n "${MYSQL_TLS_CA_FILE}" ] ; then
mysql_tls_args="--ssl_ca=${MYSQL_TLS_CA_FILE}"
fi
if [ -n "${MYSQL_TLS_CERT_FILE}" ] ; then
mysql_tls_args="${mysql_tls_args} --ssl_cert=${MYSQL_TLS_CERT_FILE}"
fi
if [ -n "${MYSQL_TLS_KEY_FILE}" ] ; then
mysql_tls_args="${mysql_tls_args} --ssl_key=${MYSQL_TLS_KEY_FILE}"
fi
if var_true "${TLS_VERIFY}" ; then
mysql_tls_args="${mysql_tls_args} --sslverify-server-cert"
fi
if [ -n "${MYSQL_TLS_VERSION}" ; then
mysql_tls_args="${mysql_tls_args} --tls_version=${MYSQL_TLS_VERSION}"
fi
fi
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;;
"mssql" | "microsoftsql" )
apkArch="$(apk --print-arch)"; \
@@ -92,10 +74,6 @@ bootstrap_variables() {
sqlite* )
dbtype=sqlite3
;;
* )
print_error "I don't recognize 'DB_TYPE=${DB_TYPE}' - Exitting.."
exit 99
;;
esac
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then
@@ -138,7 +116,7 @@ backup_influx() {
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}"
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension}
generate_checksum
@@ -215,7 +193,7 @@ backup_mysql() {
if [ "${DB_NAME,,}" = "all" ] ; then
print_debug "Preparing to back up everything except for information_schema and _* prefixes"
db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER ${mysql_tls_args} ${EXTRA_OPTS} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
@@ -236,7 +214,7 @@ backup_mysql() {
compression
pre_dbbackup $db
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
generate_checksum
@@ -250,7 +228,7 @@ backup_mysql() {
compression
pre_dbbackup all
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
generate_checksum
@@ -414,7 +392,7 @@ check_availability() {
"mysql" )
counter=0
export MYSQL_PWD=${DB_PASS}
while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" ${mysql_tls_args} ${EXTRA_OPTS} status > /dev/null 2>&1) ; do
while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" status > /dev/null 2>&1) ; do
sleep 5
(( counter+=5 ))
print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)"
@@ -482,21 +460,18 @@ cleanup_old_data() {
if [ -n "${DB_CLEANUP_TIME}" ]; then
if [ "${master_exit_code}" != 1 ]; then
case "${BACKUP_LOCATION,,}" in
"blobxfer" )
print_info "Cleaning up old backups on filesystem"
mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
print_info "Syncing changes via blobxfer"
silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET} --delete --delete-only
;;
"file" | "filesystem" )
print_info "Cleaning up old backups on filesystem"
mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
if [ "${BACKUP_LOCATION,,}" = "blobxfer" ] ; then
print_info "Syncing changes via blobxfer"
silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET} --delete --delete-only
fi
;;
"s3" | "minio" )
print_info "Cleaning up old backups on S3 storage"
aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | grep " PRE " -v | while read -r s3_file; do
aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | grep " PRE " -v | while read -r s3_file; do
s3_createdate=$(echo $s3_file | awk {'print $1" "$2'})
s3_createdate=$(date -d "$s3_createdate" "+%s")
s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 )))
@@ -504,7 +479,7 @@ cleanup_old_data() {
s3_filename=$(echo $s3_file | awk {'print $4'})
if [ "$s3_filename" != "" ] ; then
print_debug "Deleting $s3_filename"
aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
fi
@@ -517,7 +492,6 @@ cleanup_old_data() {
fi
}
compression() {
if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then
PARALLEL_COMPRESSION_THREADS=1