diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f2ae60..a2795b8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,12 @@ +## 3.7.0 2022-12-16 + + ### Added + - Introduce support for connecting via TLS to MySQL / MariaDB Hosts with MYSQL_TLS_* variables - See README for more details + + ### Changed + - Fix for cleaning up filesystems that are syncing to Azure via blobxfer + + ## 3.6.1 2022-11-23 ### Added diff --git a/Dockerfile b/Dockerfile index 904f0cd..e6d9ef7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,104 +10,8 @@ ENV INFLUX2_VERSION=2.4.0 \ IMAGE_NAME="tiredofit/db-backup" \ IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/" -ENV LANG=en_US.utf8 \ - PG_MAJOR=15 \ - PG_VERSION=15.1 \ - PGDATA=/var/lib/postgresql/data - -### Create User Accounts -RUN set -ex && \ - addgroup -g 70 postgres && \ - adduser -S -D -H -h /var/lib/postgresql -s /bin/sh -G postgres -u 70 postgres && \ - mkdir -p /var/lib/postgresql && \ - chown -R postgres:postgres /var/lib/postgresql && \ - \ -### Install Dependencies - apk update && \ - apk upgrade && \ - apk add \ - openssl \ - && \ - \ - apk add --no-cache --virtual .postgres-build-deps \ - bison \ - build-base \ - coreutils \ - dpkg-dev \ - dpkg \ - flex \ - gcc \ - icu-dev \ - libc-dev \ - libedit-dev \ - libxml2-dev \ - libxslt-dev \ - linux-headers \ - make \ - openssl-dev \ - perl-utils \ - perl-ipc-run \ - util-linux-dev \ - wget \ - zlib-dev \ - && \ - \ -### Build Postgresql - mkdir -p /usr/src/postgresql && \ - curl -sSL "https://ftp.postgresql.org/pub/source/v$PG_VERSION/postgresql-$PG_VERSION.tar.bz2" | tar xvfj - --strip 1 -C /usr/src/postgresql && \ - cd /usr/src/postgresql && \ -# update "DEFAULT_PGSOCKET_DIR" to "/var/run/postgresql" (matching Debian) -# see https://anonscm.debian.org/git/pkg-postgresql/postgresql.git/tree/debian/patches/51-default-sockets-in-var.patch?id=8b539fcb3e093a521c095e70bdfa76887217b89f - awk '$1 == "#define" && $2 == "DEFAULT_PGSOCKET_DIR" && $3 == "\"/tmp\"" { $3 = "\"/var/run/postgresql\""; print; next } { print }' src/include/pg_config_manual.h > src/include/pg_config_manual.h.new && \ - grep '/var/run/postgresql' src/include/pg_config_manual.h.new && \ - mv src/include/pg_config_manual.h.new src/include/pg_config_manual.h && \ - gnuArch="$(dpkg-architecture --query DEB_BUILD_GNU_TYPE)" && \ -# explicitly update autoconf config.guess and config.sub so they support more arches/libcs - wget --inet4-only -O config/config.guess 'https://git.savannah.gnu.org/cgit/config.git/plain/config.guess?id=7d3d27baf8107b630586c962c057e22149653deb' && \ - wget --inet4-only -O config/config.sub 'https://git.savannah.gnu.org/cgit/config.git/plain/config.sub?id=7d3d27baf8107b630586c962c057e22149653deb' && \ - ./configure \ - --build="$gnuArch" \ - --enable-integer-datetimes \ - --enable-thread-safety \ - --enable-tap-tests \ - --disable-rpath \ - --with-uuid=e2fs \ - --with-gnu-ld \ - --with-pgport=5432 \ - --with-system-tzdata=/usr/share/zoneinfo \ - --prefix=/usr/local \ - --with-includes=/usr/local/include \ - --with-libraries=/usr/local/lib \ - --with-openssl \ - --with-libxml \ - --with-libxslt \ - --with-icu \ - && \ - \ - make -j "$(nproc)" world && \ - make install-world && \ - make -C contrib install && \ - runDeps="$( \ - scanelf --needed --nobanner --format '%n#p' --recursive /usr/local \ - | tr ',' '\n' \ - | sort -u \ - | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \ - )" && \ - apk add -t .postgres-additional-deps \ - $runDeps \ - && \ - \ -### Cleanup - apk del .postgres-build-deps && \ - cd / && \ - rm -rf \ - /usr/src/postgresql \ - /usr/local/share/doc \ - /usr/local/share/man && \ - find /usr/local -name '*.a' -delete && \ - rm -rf /var/cache/apk/* && \ - \ ### Dependencies +RUN source /assets/functions/00-container && \ set -ex && \ apk update && \ apk upgrade && \ @@ -133,8 +37,8 @@ RUN set -ex && \ mongodb-tools \ openssl \ pigz \ - #postgresql \ - #postgresql-client \ + postgresql15 \ + postgresql15-client \ pv \ py3-cryptography \ redis \ diff --git a/README.md b/README.md index 30395bc..e63ae06 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # github.com/tiredofit/docker-db-backup [![GitHub release](https://img.shields.io/github/v/tag/tiredofit/docker-db-backup?style=flat-square)](https://github.com/tiredofit/docker-db-backup/releases/latest) -[![Build Status](https://img.shields.io/github/workflow/status/tiredofit/docker-db-backup/build?style=flat-square)](https://github.com/tiredofit/docker-db-backup/actions?query=workflow%3Abuild) +[![Build Status](https://img.shields.io/github/actions/workflow/status/tiredofit/docker-db-backup/main.yml?branch=main&style=flat-square)](https://github.com/tiredofit/docker-db-backup/actions) [![Docker Stars](https://img.shields.io/docker/stars/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/) [![Docker Pulls](https://img.shields.io/docker/pulls/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/) [![Become a sponsor](https://img.shields.io/badge/sponsor-tiredofit-181717.svg?logo=github&style=flat-square)](https://github.com/sponsors/tiredofit) @@ -53,6 +53,7 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers. - [Scheduling Options](#scheduling-options) - [Backup Options](#backup-options) - [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services) + - [Upload to a Azure storage account by `blobxfer`](#upload-to-a-azure-storage-account-by-blobxfer) - [Maintenance](#maintenance) - [Shell Access](#shell-access) - [Manual Backups](#manual-backups) @@ -138,18 +139,18 @@ Be sure to view the following repositories to understand all the customizable op | `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` | ### Database Specific Options -| Parameter | Description | Default | -| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------- | -| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | -| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | -| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | -| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | | -| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | | -| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | -| `DB_PASS` | (optional if DB doesn't require it) password for the database | | -| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | -| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | | -| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | +| Parameter | Description | Default | +| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- | +| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | +| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | +| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | +| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | | +| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | | +| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | +| `DB_PASS` | (optional if DB doesn't require it) password for the database | | +| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | +| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | | +| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | | | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can overrde them by making your own entries | #### For Influx DB2: @@ -169,19 +170,24 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b - You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time. ### Backup Options -| Parameter | Description | Default | -| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | -------------- | -| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | -| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | -| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | -| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` | -| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` | -| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` | -| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` | -| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | -| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | -| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` | -| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` | +| Parameter | Description | Default | +| ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | +| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | +| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | +| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` | +| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` | +| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` | +| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` | +| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | +| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | +| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` | +| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` | +| `MYSQL_TLS_VERIFY` | (optional) If using TLS (by means of MYSQL_TLS_* variables) verify remote host | `FALSE` | +| `MYSQL_TLS_VERSION` | What TLS `v1.1` `v1.2` `v1.3` version to utilize | `TLSv1.1,TLSv1.2,TLSv1.3` | +| `MYSQL_TLS_CA_FILE` | Filename to load custom CA certificate for connecting via TLS e.g. `/etc/ssl/cert.pem` should suffice for most non self signed setups | | +| `MYSQL_TLS_CERT_FILE` | Filename to load client certificate for connecting via TLS | | +| `MYSQL_TLS_KEY_FILE` | Filename to load client key for connecting via TLS | | - When using compression with MongoDB, only `GZ` compression is possible. @@ -189,19 +195,19 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b If `BACKUP_LOCATION` = `S3` then the following options are used. -| Parameter | Description | Default | -|-----------------------|------------------------------------------------------------------------------------------|---------| -| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | -| `S3_KEY_ID` | S3 Key ID | | -| `S3_KEY_SECRET` | S3 Key Secret | | -| `S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | | -| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | -| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | -| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` | -| `S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | | +| Parameter | Description | Default | +| --------------------- | ----------------------------------------------------------------------------------------- | ------- | +| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | +| `S3_KEY_ID` | S3 Key ID | | +| `S3_KEY_SECRET` | S3 Key Secret | | +| `S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | | +| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | +| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | +| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` | +| `S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | | | `S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | | -| _*OR*_ | | | -| `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | +| _*OR*_ | | | +| `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | #### Upload to a Azure storage account by `blobxfer` @@ -210,13 +216,13 @@ Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer If `BACKUP_LOCATION` = `blobxfer` then the following options are used. -| Parameter | Description | Default | -| ------------------------------- | ------------------------------------------------------------------------ | -------------------- | -| `BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | -| `BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | -| `BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | +| Parameter | Description | Default | +| ------------------------------ | ------------------------------------------- | ------------------- | +| `BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | +| `BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | +| `BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | -> This service uploads files from backup targed directory `DB_DUMP_TARGET`. +> This service uploads files from backup targed directory `DB_DUMP_TARGET`. > If the a cleanup configuration in `DB_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically. ## Maintenance diff --git a/install/assets/defaults/10-db-backup b/install/assets/defaults/10-db-backup index 324940d..3430944 100644 --- a/install/assets/defaults/10-db-backup +++ b/install/assets/defaults/10-db-backup @@ -15,6 +15,8 @@ MODE=${MODE:-"AUTO"} MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"} MYSQL_SINGLE_TRANSACTION=${MYSQL_SINGLE_TRANSACTION:-"TRUE"} MYSQL_STORED_PROCEDURES=${MYSQL_STORED_PROCEDURES:-"TRUE"} +MYSQL_TLS_VERIFY=${MYSQL_TLS_VERIFY:-"FALSE"} +MYSQL_TLS_VERSION=${MYSQL_TLS_VERSION:-"TLSv1.1,TLSv1.2,TLSv1.3"} PARALLEL_COMPRESSION_THREADS=${PARALLEL_COMPRESSION_THREADS:-"$(nproc)"} S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"} S3_PROTOCOL=${S3_PROTOCOL:-"https"} diff --git a/install/assets/functions/10-db-backup b/install/assets/functions/10-db-backup index 1ebcf20..c4a2e02 100644 --- a/install/assets/functions/10-db-backup +++ b/install/assets/functions/10-db-backup @@ -46,9 +46,28 @@ bootstrap_variables() { "mysql" | "mariadb" ) dbtype=mysql DB_PORT=${DB_PORT:-3306} + sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' [[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS} - sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" + if [ -n "${MYSQL_TLS_CA_FILE}" ] ; then + mysql_tls=TRUE + mysql_tls_args="--ssl_ca=${MYSQL_TLS_CA_FILE}" + fi + if [ -n "${MYSQL_TLS_CERT_FILE}" ] ; then + mysql_tls=TRUE + mysql_tls_args="${mysql_tls_args} --ssl_cert=${MYSQL_TLS_CERT_FILE}" + fi + if [ -n "${MYSQL_TLS_KEY_FILE}" ] ; then + mysql_tls=TRUE + mysql_tls_args="${mysql_tls_args} --ssl_key=${MYSQL_TLS_KEY_FILE}" + fi + if var_true "${TLS_VERIFY}" ; then + mysql_tls=TRUE + mysql_tls_args="${mysql_tls_args} --sslverify-server-cert" + fi + if var_true "${mysql_tls}" ; then + mysql_tls_args="${mysql_tls_args} --tls_version=${MYSQL_TLS_VERSION}" + fi ;; "mssql" | "microsoftsql" ) apkArch="$(apk --print-arch)"; \ @@ -198,7 +217,7 @@ backup_mysql() { if [ "${DB_NAME,,}" = "all" ] ; then print_debug "Preparing to back up everything except for information_schema and _* prefixes" - db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema ) + db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER ${mysql_tls_args} ${EXTRA_OPTS} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema ) if [ -n "${DB_NAME_EXCLUDE}" ] ; then db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n') for db_exclude in ${db_names_exclusions} ; do @@ -219,7 +238,7 @@ backup_mysql() { compression pre_dbbackup $db print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}" - mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}" + mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}" exit_code=$? check_exit_code $target generate_checksum @@ -233,7 +252,7 @@ backup_mysql() { compression pre_dbbackup all print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" - mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}" + mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}" exit_code=$? check_exit_code $target generate_checksum @@ -397,7 +416,7 @@ check_availability() { "mysql" ) counter=0 export MYSQL_PWD=${DB_PASS} - while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" status > /dev/null 2>&1) ; do + while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" ${mysql_tls_args} ${EXTRA_OPTS} status > /dev/null 2>&1) ; do sleep 5 (( counter+=5 )) print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)" @@ -465,14 +484,17 @@ cleanup_old_data() { if [ -n "${DB_CLEANUP_TIME}" ]; then if [ "${master_exit_code}" != 1 ]; then case "${BACKUP_LOCATION,,}" in + "blobxfer" ) + print_info "Cleaning up old backups on filesystem" + mkdir -p "${DB_DUMP_TARGET}" + find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \; + print_info "Syncing changes via blobxfer" + silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET} --delete --delete-only + ;; "file" | "filesystem" ) print_info "Cleaning up old backups on filesystem" mkdir -p "${DB_DUMP_TARGET}" find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \; - if [ "${BACKUP_LOCATION,,}" = "blobxfer" ] ; then - print_info "Syncing changes via blobxfer" - silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET} --delete --delete-only - fi ;; "s3" | "minio" ) print_info "Cleaning up old backups on S3 storage" @@ -484,7 +506,7 @@ cleanup_old_data() { s3_filename=$(echo $s3_file | awk {'print $4'}) if [ "$s3_filename" != "" ] ; then print_debug "Deleting $s3_filename" - silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} + aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} fi fi @@ -497,6 +519,7 @@ cleanup_old_data() { fi } + compression() { if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then PARALLEL_COMPRESSION_THREADS=1