From c23d7991fe23fe74f79821eabc27472c514fbca6 Mon Sep 17 00:00:00 2001 From: Dave Conroy Date: Fri, 1 Apr 2022 18:41:28 -0700 Subject: [PATCH] Release 3.2.0 - See CHANGELOG.md --- CHANGELOG.md | 14 +- Dockerfile | 13 +- README.md | 6 + install/assets/functions/10-db-backup | 237 ++++++++++++++++---------- 4 files changed, 178 insertions(+), 92 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76f2058..545e77e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,15 @@ +## 3.2.0 2022-03-31 + + ### Added + - Install InfluxDB2 Client alongside version 1 (amd64 and arm64) + - Introduce InfluxDB 2 backup support + - Introduce multiple compression types other than Gzip for Influx 1/2 + - Introduce compression for MSSQL backups + + ### Changed + - Testing for Host availability for CouchDB and InfluxDB + + ## 3.1.3 2022-03-30 ### Changed @@ -259,7 +271,7 @@ ## 2.9.2 2021-10-22 - + ### Fixed - Fix compression failing on Redis backup diff --git a/Dockerfile b/Dockerfile index f5ab3d9..57b89eb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,9 @@ FROM docker.io/tiredofit/alpine:3.15 LABEL maintainer="Dave Conroy (github.com/tiredofit)" ### Set Environment Variables -ENV MSSQL_VERSION=17.8.1.1-1 \ + +ENV INFLUX2_VERSION=2.2.1 \ + MSSQL_VERSION=17.8.1.1-1 \ CONTAINER_ENABLE_MESSAGING=FALSE \ CONTAINER_ENABLE_MONITORING=TRUE \ CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \ @@ -44,9 +46,14 @@ RUN set -ex && \ \ apkArch="$(apk --print-arch)"; \ case "$apkArch" in \ - x86_64) mssql=true ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql17_${MSSQL_VERSION}_amd64.apk mssql-tools_${MSSQL_VERSION}_amd64.apk ;; \ - *) echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ;; \ + x86_64) mssql=true ; influx2=true ; influx_arch=amd64; ;; \ + aarch64 ) influx2=true ; influx_arch=arm64 ;; \ + *) sleep 0.1 ;; \ esac; \ + \ + if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql17_${MSSQL_VERSION}_amd64.apk mssql-tools_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \ + if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \ + \ mkdir -p /usr/src/pbzip2 && \ curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \ cd /usr/src/pbzip2 && \ diff --git a/README.md b/README.md index d21eca6..7485fd7 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,7 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers. - [Base Images used](#base-images-used) - [Container Options](#container-options) - [Database Specific Options](#database-specific-options) + - [For Influx DB2:](#for-influx-db2) - [Scheduling Options](#scheduling-options) - [Backup Options](#backup-options) - [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services) @@ -143,6 +144,11 @@ Be sure to view the following repositories to understand all the customizable op | `DB_USER` | username for the database(s) - Can use `root` for MySQL | | | `DB_PASS` | (optional if DB doesn't require it) password for the database | | | `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | +| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2.x` series - AMD64 and ARM64 only for `2` | | + +#### For Influx DB2: +Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name` + ### Scheduling Options | Parameter | Description | Default | | ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | diff --git a/install/assets/functions/10-db-backup b/install/assets/functions/10-db-backup index 9cb35d1..c0e7605 100755 --- a/install/assets/functions/10-db-backup +++ b/install/assets/functions/10-db-backup @@ -13,6 +13,7 @@ bootstrap_variables() { dbport=${DB_PORT:-8088} file_env 'DB_USER' file_env 'DB_PASS' + sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'" ;; mongo* ) dbtype=mongo @@ -81,7 +82,7 @@ backup_couch() { target=couch_${dbname}_${dbhost}_${now}.txt compression print_notice "Dumping CouchDB database: '${dbname}' ${compression_string}" - curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > ${TEMP_LOCATION}/${target} + curl -sSL -X GET ${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > ${TEMP_LOCATION}/${target} exit_code=$? check_exit_code $target generate_checksum @@ -90,24 +91,59 @@ backup_couch() { } backup_influx() { - - if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then - : + if [ "${dbname,,}" = "all" ] ; then + print_debug "Preparing to back up everything" + db_names=justbackupeverything else - influx_compression="-portable" - compression_string=" and compressing with gzip" + db_names=$(echo "${dbname}" | tr ',' '\n') + fi + + case "${INFLUX_VERSION,,}" in + 1 ) + for db in ${db_names}; do + pre_dbbackup + if [ "${db}" != "justbackupeverything" ] ; then bucket="-bucket $db" ; else db=all ; fi + target=influx_${db}_${dbhost}_${now} + compression + print_notice "Dumping Influx database: '${db}'" + influxd backup ${influx_compression} ${bucket} -host ${dbhost}:${dbport} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}" + exit_code=$? + check_exit_code $target_dir + print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}" + tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}" + target=influx_${db}_${dbhost}_${now}.tar${extension} + generate_checksum + move_dbbackup + post_dbbackup $db + done + ;; + 2 ) + for db in ${db_names}; do + pre_dbbackup + if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi + target=influx2_${db}_${dbhost}_${now} + compression + print_notice "Dumping Influx2 database: '${db}'" + influx backup --org ${dbuser} ${bucket} --host ${dbhost}:${dbport} --token ${dbpass} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}" + exit_code=$? + check_exit_code $target_dir + create_archive + target=influx2_${db}_${dbhost}_${now}.tar${extension} + generate_checksum + move_dbbackup + post_dbbackup $db + done + ;; + esac +} + +create_archive() { + if [ "${exit_code}" = "0" ] ; then + print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}" + tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}" + else + print_warn "Skipping creating archive file because backup did not complete successfully" fi - for db in ${DB_NAME}; do - pre_dbbackup - target=influx_${db}_${dbhost}_${now} - print_notice "Dumping Influx database: '${db}' ${compression_string}" - influxd backup ${influx_compression} -database $db -host ${dbhost}:${dbport} ${TEMP_LOCATION}/${target} - exit_code=$? - check_exit_code $target - generate_checksum - move_dbbackup - post_dbbackup $db - done } backup_mongo() { @@ -123,7 +159,6 @@ backup_mongo() { mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS} exit_code=$? check_exit_code $target - cd "${TEMP_LOCATION}" generate_checksum move_dbbackup post_dbbackup @@ -132,6 +167,7 @@ backup_mongo() { backup_mssql() { pre_dbbackup target=mssql_${dbname}_${dbhost}_${now}.bak + compression print_notice "Dumping MSSQL database: '${dbname}'" /opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} –Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" exit_code=$? @@ -284,7 +320,7 @@ backup_sqlite3() { sqlite3 "${dbhost}" ".backup '${TEMP_LOCATION}/backup.sqlite3'" exit_code=$? check_exit_code $target - cat "${TEMP_LOCATION}"/backup.sqlite3 | $compress_cmd > "${TEMP_LOCATION}/${target}" + cat "${TEMP_LOCATION}"/backup.sqlite3 | ${dir_compress_cmd} > "${TEMP_LOCATION}/${target}" generate_checksum move_dbbackup post_dbbackup $db @@ -294,62 +330,68 @@ check_availability() { ### Set the Database Type case "$dbtype" in "couch" ) - COUNTER=0 - while ! (nc -z ${dbhost} ${dbport}) ; do + counter=0 + code_received=0 + while [ "${code_received}" != "200" ]; do + code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${dbhost}:${dbport}) + if [ "${code_received}" = "200" ] ; then break ; fi sleep 5 - (( COUNTER+=5 )) - print_warn "CouchDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" + (( counter+=5 )) + print_warn "CouchDB Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)" done ;; "influx" ) - COUNTER=0 - while ! (nc -z ${dbhost} ${dbport}) ; do + counter=0 + code_received=0 + while [ "${code_received}" != "200" ]; do + code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${dbhost}:${dbport}/health) + if [ "${code_received}" = "200" ] ; then break ; fi sleep 5 - (( COUNTER+=5 )) - print_warn "InfluxDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" + (( counter+=5 )) + print_warn "InfluxDB Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)" done ;; "mongo" ) - COUNTER=0 + counter=0 while ! (nc -z ${dbhost} ${dbport}) ; do sleep 5 - (( COUNTER+=5 )) - print_warn "Mongo Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" + (( counter+=5 )) + print_warn "Mongo Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)" done ;; "mysql" ) - COUNTER=0 + counter=0 export MYSQL_PWD=${dbpass} while ! (mysqladmin -u"${dbuser}" -P"${dbport}" -h"${dbhost}" status > /dev/null 2>&1) ; do sleep 5 - (( COUNTER+=5 )) + (( counter+=5 )) print_warn "MySQL/MariaDB Server '${dbhost}' is not accessible, retrying.. (${COUNTER} seconds so far)" done ;; "mssql" ) - COUNTER=0 + counter=0 while ! (nc -z ${dbhost} ${dbport}) ; do sleep 5 - (( COUNTER+=5 )) - print_warn "MSSQL Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" + (( counter+=5 )) + print_warn "MSSQL Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)" done ;; "pgsql" ) - COUNTER=0 + counter=0 export PGPASSWORD=${dbpass} until pg_isready --dbname=${dbname} --host=${dbhost} --port=${dbport} --username=${dbuser} -q do sleep 5 - (( COUNTER+=5 )) - print_warn "Postgres Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" + (( counter+=5 )) + print_warn "Postgres Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)" done ;; "redis" ) - COUNTER=0 + counter=0 while ! (nc -z "${dbhost}" "${dbport}") ; do sleep 5 - (( COUNTER+=5 )) - print_warn "Redis Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" + (( counter+=5 )) + print_warn "Redis Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)" done ;; "sqlite3" ) @@ -405,26 +447,39 @@ compression() { gz* ) compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} " compression_type="gzip" + extension=".gz" + dir_compress_cmd=${compress_cmd} + target_dir=${target} target=${target}.gz ;; bz* ) compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} " compression_type="bzip2" + dir_compress_cmd=${compress_cmd} + extension=".bz2" + target_dir=${target} target=${target}.bz2 ;; xz* ) compress_cmd="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} " compression_type="xzip" + dir_compress_cmd=${compress_cmd} + extension=".xz" + target_dir=${target} target=${target}.xz ;; zst* ) compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS}" compression_type="zstd" + dir_compress_cmd=${compress_cmd} + extension=".zst" + target_dir=${target} target=${target}.zst ;; "none" | "false") - compress_cmd="cat " compression_type="none" + dir_compress_cmd="cat" + target_dir=${target} ;; esac @@ -470,55 +525,61 @@ generate_checksum() { } move_dbbackup() { - case "$SIZE_VALUE" in - "b" | "bytes" ) - SIZE_VALUE=1 - ;; - "[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" ) - SIZE_VALUE="-h" - ;; - *) - SIZE_VALUE=1 - ;; - esac - if [ "$SIZE_VALUE" = "1" ] ; then - FILESIZE=$(stat -c%s "${TEMP_LOCATION}"/"${target}") - print_notice "Backup of ${target} created with the size of ${FILESIZE} bytes" + if [ "${exit_code}" = "0" ] ; then + case "${SIZE_VALUE,,}" in + "b" | "bytes" ) + SIZE_VALUE=1 + ;; + "[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" ) + SIZE_VALUE="-h" + ;; + *) + SIZE_VALUE=1 + ;; + esac + if [ "$SIZE_VALUE" = "1" ] ; then + filesize=$(stat -c%s "${TEMP_LOCATION}"/"${target}") + print_notice "Backup of ${target} created with the size of ${filesize} bytes" + else + filesize=$(du -h "${TEMP_LOCATION}"/"${target}" | awk '{ print $1}') + print_notice "Backup of ${target} created with the size of ${filesize}" + fi + + case "${BACKUP_LOCATION,,}" in + "file" | "filesystem" ) + print_debug "Moving backup to filesystem" + mkdir -p "${DB_DUMP_TARGET}" + mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/ + mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}" + ;; + "s3" | "minio" ) + print_debug "Moving backup to S3 Bucket" + export AWS_ACCESS_KEY_ID=${S3_KEY_ID} + export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET} + export AWS_DEFAULT_REGION=${S3_REGION} + if [ -f "${S3_CERT_CA_FILE}" ] ; then + print_debug "Using Custom CA for S3 Backups" + s3_ca_cert="--ca-bundle ${S3_CERT_CA_FILE}" + fi + if var_true "${S3_CERT_SKIP_VERIFY}" ; then + print_debug "Skipping SSL verification for HTTPS S3 Hosts" + s3_ssl="--no-verify-ssl" + fi + + [[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}" + + aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} + unset s3_ssl + unset s3_ca_cert + rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}" + rm -rf "${TEMP_LOCATION}"/"${target}" + ;; + esac else - FILESIZE=$(du -h "${TEMP_LOCATION}"/"${target}" | awk '{ print $1}') - print_notice "Backup of ${target} created with the size of ${FILESIZE}" + print_warn "Skipping moving DB Backup to final location because backup did not complete successfully" fi - case "${BACKUP_LOCATION,,}" in - "file" | "filesystem" ) - print_debug "Moving backup to filesystem" - mkdir -p "${DB_DUMP_TARGET}" - mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/ - mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}" - ;; - "s3" | "minio" ) - print_debug "Moving backup to S3 Bucket" - export AWS_ACCESS_KEY_ID=${S3_KEY_ID} - export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET} - export AWS_DEFAULT_REGION=${S3_REGION} - if [ -f "${S3_CERT_CA_FILE}" ] ; then - print_debug "Using Custom CA for S3 Backups" - s3_ca_cert="--ca-bundle ${S3_CERT_CA_FILE}" - fi - if var_true "${S3_CERT_SKIP_VERIFY}" ; then - print_debug "Skipping SSL verification for HTTPS S3 Hosts" - s3_ssl="--no-verify-ssl" - fi - - [[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}" - - aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} - unset s3_ssl - unset s3_ca_cert - rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}" - rm -rf "${TEMP_LOCATION}"/"${target}" - ;; - esac + rm -rf "${TEMP_LOCATION}"/* } pre_dbbackup() {