Compare commits

...

38 Commits
4.1.0 ... main

Author SHA1 Message Date
dave@tiredofit.ca
016c5c1a23 Release 4.1.21 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-08-08 09:26:13 -07:00
Dave Conroy
de8a952825 Merge pull request #421 from tlex/main 2025-08-08 09:24:55 -07:00
Alex Thomae
c7912d355e fix: tabs used instead of spaces 2025-08-08 07:37:56 +02:00
Alex Thomae
15902829c0 #420: fix exit_code got changed to exitcode 2025-08-08 07:37:29 +02:00
dave@tiredofit.ca
2c8f40e37c Release 4.1.20 - See CHANGELOG.md 2025-07-23 08:55:52 -07:00
Dave Conroy
c360150117 Merge pull request #418 from alteriks/main 2025-07-23 08:54:36 -07:00
Krzysztof Dajka
7c32879e80 fix: exitcode masking db errors 2025-07-22 16:56:53 +02:00
dave@tiredofit.ca
a475f7d0f3 Release 4.1.19 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-05-28 08:05:58 -07:00
dave@tiredofit.ca
399727cd37 Release 4.1.18 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-05-12 08:33:53 -07:00
Dave Conroy
f58de70dc4 Merge pull request #412 from logicoa/logicoa-mongodb-wildcard-drop-order
fix: wildcard case order
2025-05-12 08:33:01 -07:00
logicoa
5ab0cbe7c5 fix: wildcard case order
mongoDB restore always dropped schema, irrespective of flag variant, due to wildcard being the first option in case.
2025-05-12 11:18:43 +02:00
dave@tiredofit.ca
9d5406b6a9 Release 4.1.17 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-04-17 21:08:25 -07:00
dave@tiredofit.ca
53760fbe4d Release 4.1.16 - See CHANGELOG.md 2025-02-21 07:39:11 -08:00
Dave Conroy
a72b562c70 Merge pull request #402 from fermion2020/main
Update restore script
2025-02-21 07:38:17 -08:00
Ihor Kolos
fc586c204f Update restore script
Switch the mysql command to mariadb to resolve the deprecation warning.
Fix the restore issue caused by missing SSL configuration (error message: "TLS/SSL error: SSL is required, but the server does not support it").
2025-02-20 14:31:23 -06:00
dave@tiredofit.ca
e9ed8d1a72 Release 4.1.15 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-01-29 08:01:35 -08:00
Dave Conroy
78ac4a4a81 Add username/password check and append auth details to a couch db connectivity check 2025-01-29 07:59:49 -08:00
Dave Conroy
949aafdbe1 fix - zyclonite being attached to compression for couchdb 2025-01-29 07:53:39 -08:00
dave@tiredofit.ca
7a94472055 Release 4.1.14 - See CHANGELOG.md
Some checks failed
build_image / build (push) Has been cancelled
2025-01-21 12:59:35 -08:00
dave@tiredofit.ca
23aeaf58a2 Release 4.1.13 - See CHANGELOG.md
Some checks are pending
build_image / build (push) Waiting to run
2025-01-21 09:30:06 -08:00
Dave Conroy
b88816337f Seperate TLS configuration for MariaDB and MySQL 2025-01-21 09:29:29 -08:00
Dave Conroy
ac8181b3b5 Update MySQL client to 8.4.4 2025-01-21 08:33:22 -08:00
Dave Conroy
c75c41a34d Update AWS CLI to 1.37.2 2025-01-21 08:32:52 -08:00
dave@tiredofit.ca
244e411e76 Release 4.1.12 - See CHANGELOG.md 2024-12-13 07:51:35 -08:00
dave@tiredofit.ca
e69ac23898 Release 4.1.11 - See CHANGELOG.md 2024-12-13 07:40:04 -08:00
dave@tiredofit.ca
261951045f Release 4.1.10 - See CHANGELOG.md 2024-12-12 08:38:57 -08:00
dave@tiredofit.ca
67f4326d0b Release 4.1.9 - See CHANGELOG.md 2024-11-07 11:16:32 -08:00
dave@tiredofit.ca
2cd62b8732 Release 4.1.8 - See CHANGELOG.md 2024-10-29 18:58:34 -07:00
dave@tiredofit.ca
0d2b3ccc8c Release 4.1.4 - See CHANGELOG.md 2024-08-13 16:34:44 -07:00
Dave Conroy
90f53a7f00 Merge pull request #358 from ToshY/docs/blobxfer-mode
[docs] fixed blobxfer mode correct parameter name
2024-07-31 13:07:30 -07:00
ToshY
c5f89da681 fixed blobxfermode correct parameter name 2024-07-31 08:11:32 +00:00
dave@tiredofit.ca
753a780204 Release 4.1.3 - See CHANGELOG.md 2024-07-05 12:06:15 -07:00
dave@tiredofit.ca
7c07253428 Release 4.1.2 - See CHANGELOG.md 2024-07-02 16:15:22 -07:00
Dave Conroy
0fdb447706 Merge pull request #354 from effectivelywild/main
Resolve multiple issues using Azure blobs for remote storage
2024-07-02 16:13:41 -07:00
Frank Muise
0d23c2645c Add --no-overwrite to blobxfer download 2024-06-30 16:28:16 -04:00
Frank Muise
4786ea9c7f Update log entry for blob sync 2024-06-30 14:56:50 -04:00
Frank Muise
a26dba947b Fix issues with Azure blobs 2024-06-30 14:53:31 -04:00
dave@tiredofit.ca
b9fa7d18b1 Release 4.1.1 - See CHANGELOG.md 2024-06-19 15:41:45 -07:00
7 changed files with 349 additions and 55 deletions

View File

@@ -1,3 +1,134 @@
## 4.1.21 2025-08-08 <dave at tiredofit dot ca>
### Changed
- Fix for 4.1.20 release inadvertently failing backups (credit tlex@github)
## 4.1.20 2025-07-23 <dave at tiredofit dot ca>
### Changed
- Fix backup error checking routines #417 - credit alteriks@github
## 4.1.19 2025-05-28 <dave at tiredofit dot ca>
### Changed
- Force overwrite manual scripts as opposed to append (#414)
## 4.1.18 2025-05-12 <dave at tiredofit dot ca>
### Changed
- Fix MongoDB restore from not dropping DB each time before restore except explicitly told (credit logicoa@github)
## 4.1.17 2025-04-17 <dave at tiredofit dot ca>
### Changed
- Fix issue with Postgres database cleanup when ALL databases being backed up as one file (SPLIT_DB=FALSE)
## 4.1.16 2025-02-21 <dave at tiredofit dot ca>
### Added
- Update to tiredofit/alpine:7.10.28
- Support TLS connectivity with restore script (credit fermion2020@github)
## 4.1.15 2025-01-29 <dave at tiredofit dot ca>
### Added
- Add support for username and password support when checking for connectivity to couchdb (credit: JvSomeren)
### Changed
- Fix issue with couchdb compression routines
## 4.1.14 2025-01-21 <dave at tiredofit dot ca>
### Changed
- Downgrade AWS Client to 1.36.40 due to incompatibilities with providers with 1.37x. for time being
## 4.1.13 2025-01-21 <dave at tiredofit dot ca>
### Added
- Update MySQL client to 8.4.4
- Update AWS Client to 1.37.2
### Changed
- Seperate MySQL and MariaDB TLS Configurationf for arguments that have deviated
## 4.1.12 2024-12-13 <dave at tiredofit dot ca>
### Changed
- Fix for 4.1.11
## 4.1.11 2024-12-13 <dave at tiredofit dot ca>
### Changed
- Fix when backing up 'ALL' databases with MariaDB
## 4.1.10 2024-12-12 <dave at tiredofit dot ca>
### Added
- Use tiredofit/alpine:3.21-7.10.27 base
- Use the actual binary name when dumping mariadb and mysql databases
- Silence warnings that are appearing due to filenames, ssl warnings re MariaDB / MySQL
## 4.1.9 2024-11-07 <dave at tiredofit dot ca>
### Added
- Pin to tiredofit/alpine:edge-7.10.19
- MySQL 8.4.3 client
- MSSQL and MSODBC 18.4.1.1-1
- Mysql 11.x Support
- Influx2 Client 2.7.5
- AWS Client 1.35.13
- Postgresql 17.x Support
## 4.1.8 2024-10-29 <dave at tiredofit dot ca>
Rebuild using 4.1.4 sources - ignore any versions of 4.1.5-4.1.7
### Added
## 4.1.4 2024-08-13 <dave at tiredofit dot ca>
Please note that if using encryption using a passphrase, you may be encountering issues with manual decryption. This release fixes that.
If you try to manually decrypt and your passphrase fails. Try wrapping it in single (') or double (") quotes.
### Changed
- Fix for stray quotes appearing inside of ENCRYPT_PASSPHRASE variables
## 4.1.3 2024-07-05 <dave at tiredofit dot ca>
### Changed
- Rebuild to support tiredofit/alpine:7.10.0
## 4.1.2 2024-07-02 <effectivelywild@github>
### Added
- Add support for Azure Blob containers
- Fix timestamps when comparing previous backups
- Resolve unnecessary read operations in Azure
- Resolve issues with backup cleanup operations in Azure
## 4.1.1 2024-06-19 <dave at tiredofit dot ca>
### Changed
- Fix issue where postgresql globals when backing up ALL not being deleted (#352)
## 4.1.0 2024-05-25 <dave at tiredofit dot ca>
Note that arm/v7 builds have been removed from this release going forward

View File

@@ -1,23 +1,21 @@
ARG DISTRO=alpine
ARG DISTRO_VARIANT=3.20
ARG DISTRO_VARIANT=3.21-7.10.28
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables
ENV INFLUX1_CLIENT_VERSION=1.8.0 \
INFLUX2_CLIENT_VERSION=2.7.3 \
MSODBC_VERSION=18.3.2.1-1 \
MSSQL_VERSION=18.3.1.1-1 \
MYSQL_VERSION=mysql-8.4.0 \
INFLUX2_CLIENT_VERSION=2.7.5 \
MSODBC_VERSION=18.4.1.1-1 \
MSSQL_VERSION=18.4.1.1-1 \
MYSQL_VERSION=mysql-8.4.4 \
MYSQL_REPO_URL=https://github.com/mysql/mysql-server \
AWS_CLI_VERSION=1.32.113 \
AWS_CLI_VERSION=1.36.40 \
CONTAINER_ENABLE_MESSAGING=TRUE \
CONTAINER_ENABLE_MONITORING=TRUE \
IMAGE_NAME="tiredofit/db-backup" \
IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/"
### Dependencies
RUN source /assets/functions/00-container && \
set -ex && \
addgroup -S -g 10000 dbbackup && \
@@ -33,6 +31,7 @@ RUN source /assets/functions/00-container && \
git \
go \
libarchive-dev \
libtirpc-dev \
openssl-dev \
libffi-dev \
ncurses-dev \
@@ -48,6 +47,7 @@ RUN source /assets/functions/00-container && \
gpg-agent \
groff \
libarchive \
libtirpc \
mariadb-client \
mariadb-connector-c \
mongodb-tools \
@@ -55,8 +55,8 @@ RUN source /assets/functions/00-container && \
openssl \
pigz \
pixz \
postgresql16 \
postgresql16-client \
postgresql17 \
postgresql17-client \
pv \
py3-botocore \
py3-colorama \
@@ -75,15 +75,28 @@ RUN source /assets/functions/00-container && \
zstd \
&& \
\
apkArch="$(uname -m)"; \
case "$apkArch" in \
x86_64) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \
arm64 | aarch64 ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=arm64 ;; \
case "$(uname -m)" in \
"x86_64" ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \
"arm64" | "aarch64" ) mssql=true ; mssql_arch=arm64; influx2=true ; influx_arch=arm64 ;; \
*) sleep 0.1 ;; \
esac; \
\
if [[ $mssql = "true" ]] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \
if [[ $influx2 = "true" ]] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
if [ "${mssql,,}" = "true" ] ; then \
curl -sSLO https://download.microsoft.com/download/7/6/d/76de322a-d860-4894-9945-f0cc5d6a45f8/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; \
curl -sSLO https://download.microsoft.com/download/7/6/d/76de322a-d860-4894-9945-f0cc5d6a45f8/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; \
echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; \
else \
echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; \
fi; \
\
if [ "${influx2,,}" = "true" ] ; then \
curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; \
chmod +x /usr/src/influx ; \
mv /usr/src/influx /usr/sbin/ ; \
else \
echo >&2 "Unable to build Influx 2 on this system" ; \
fi ; \
\
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX1_CLIENT_VERSION}" && \
go build -o /usr/sbin/influxd ./cmd/influxd && \
strip /usr/sbin/influxd && \

View File

@@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2023 Dave Conroy
Copyright (c) 2025 Dave Conroy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -356,11 +356,14 @@ If `DEFAULT_BACKUP_LOCATION` = `S3` then the following options are used:
If `DEFAULT_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
| Parameter | Description | Default | `_FILE` |
| -------------------------------------- | ------------------------------------------- | ------------------- | ------- |
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `DEFAULT_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
| Parameter | Description | Default | `_FILE` |
| -------------------------------------- | ------------------------------------------------------------------- | ------------------- | ------- |
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `DEFAULT_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
| `DEFAULT_BLOBXFER_MODE` | Azure Storage mode e.g. `auto`, `file`, `append`, `block` or `page` | `auto` | x |
- When `DEFAULT_BLOBXFER_MODE` is set to auto it will use blob containers by default. If the `DEFAULT_BLOBXFER_REMOTE_PATH` path does not exist a blob container with that name will be created.
> This service uploads files from backup targed directory `DEFAULT_FILESYSTEM_PATH`.
> If the a cleanup configuration in `DEFAULT_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
@@ -636,11 +639,14 @@ If `DB01_BACKUP_LOCATION` = `S3` then the following options are used:
If `DB01_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
| Parameter | Description | Default | `_FILE` |
| ----------------------------------- | ------------------------------------------- | ------------------- | ------- |
| `DB01_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `DB01_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `DB01_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
| Parameter | Description | Default | `_FILE` |
| -------------------------------------- | ------------------------------------------------------------------- | ------------------- | ------- |
| `DB01_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `DB01_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `DB01_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
| `DB01_BLOBXFER_REMOTE_MODE` | Azure Storage mode e.g. `auto`, `file`, `append`, `block` or `page` | `auto` | x |
- When `DEFAULT_BLOBXFER_MODE` is set to auto it will use blob containers by default. If the `DEFAULT_BLOBXFER_REMOTE_PATH` path does not exist a blob container with that name will be created.
> This service uploads files from backup directory `DB01_BACKUP_FILESYSTEM_PATH`.
> If the a cleanup configuration in `DB01_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.

View File

@@ -8,6 +8,7 @@ DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0}
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
DEFAULT_BACKUP_LOCATION=${DEFAULT_BACKUP_LOCATION:-"FILESYSTEM"}
DEFAULT_BLOBXFER_REMOTE_PATH=${DEFAULT_BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
DEFAULT_BLOBXFER_MODE=${DEFAULT_BLOBXFER_MODE:-"auto"}
DEFAULT_CHECKSUM=${DEFAULT_CHECKSUM:-"MD5"}
DEFAULT_COMPRESSION=${DEFAULT_COMPRESSION:-"ZSTD"}
DEFAULT_COMPRESSION_LEVEL=${DEFAULT_COMPRESSION_LEVEL:-"3"}

View File

@@ -66,6 +66,7 @@ bootstrap_variables() {
DEFAULT_BLOBXFER_STORAGE_ACCOUNT \
DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY \
DEFAULT_BLOBXFER_REMOTE_PATH \
DEFAULT_BLOBXFER_MODE \
DB"${backup_instance_number}"_AUTH \
DB"${backup_instance_number}"_TYPE \
DB"${backup_instance_number}"_HOST \
@@ -93,6 +94,7 @@ bootstrap_variables() {
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT \
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT_KEY \
DB"${backup_instance_number}"_BLOBXFER_REMOTE_PATH \
DB"${backup_instance_number}"_BLOBXFER_MODE \
BLOBXFER_STORAGE_ACCOUNT \
BLOBXFER_STORAGE_ACCOUNT_KEY \
DB_HOST \
@@ -163,6 +165,11 @@ bootstrap_variables() {
sed -i "s|_PASS='\(.*\)'|_PASS=\1|g" "${backup_instance_vars}"
fi
if grep -qo ".*_PASSPHRASE='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _PASSPHRASE variable with quotes"
sed -i "s|_PASSPHRASE='\(.*\)'|_PASSPHRASE=\1|g" "${backup_instance_vars}"
fi
if grep -qo "MONGO_CUSTOM_URI='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _MONGO_CUSTOM_URI variable with quotes"
sed -i "s|MONGO_CUSTOM_URI='\(.*\)'|MONGO_CUSTOM_URI=\1|g" "${backup_instance_vars}"
@@ -199,6 +206,7 @@ bootstrap_variables() {
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_REMOTE_PATH backup_job_blobxfer_remote_path
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT backup_job_blobxfer_storage_account
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT_KEY backup_job_blobxfer_storage_account_key
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_MODE backup_job_blobxfer_mode
transform_backup_instance_variable "${backup_instance_number}" CHECKSUM backup_job_checksum
transform_backup_instance_variable "${backup_instance_number}" CLEANUP_TIME backup_job_cleanup_time
transform_backup_instance_variable "${backup_instance_number}" COMPRESSION backup_job_compression
@@ -405,9 +413,11 @@ EOF
case "${backup_job_mysql_client,,}" in
mariadb )
_mysql_prefix=/usr/bin/
_mysql_bin_prefix=mariadb-
;;
mysql )
_mysql_prefix=/opt/mysql/bin/
_mysql_bin_prefix=mysql
;;
* )
print_error "I don't understand '${backup_job_mysql_client,,}' as a client. Exiting.."
@@ -419,6 +429,14 @@ EOF
if [ -n "${backup_job_db_pass}" ] ; then export MYSQL_PWD=${backup_job_db_pass} ; fi
if var_true "${backup_job_mysql_enable_tls}" ; then
case "${backup_job_mysql_client,,}" in
mariadb )
mysql_tls_args="--ssl"
;;
mysql )
mysql_tls_args="--ssl-mode=REQUIRED"
;;
esac
if [ -n "${backup_job_mysql_tls_ca_file}" ] ; then
mysql_tls_args="--ssl_ca=${backup_job_mysql_tls_ca_file}"
fi
@@ -430,12 +448,28 @@ EOF
fi
if var_true "${backup_job_mysql_tls_verify}" ; then
mysql_tls_args="${mysql_tls_args} --sslverify-server-cert"
case "${backup_job_mysql_client,,}" in
mariadb )
mysql_tls_args="${mysql_tls_args} --sslverify-server-cert"
;;
mysql )
mysql_tls_args="${mysql_tls_args} --ssl-mode=VERIFY_CA"
;;
esac
fi
if [ -n "${backup_job_mysql_tls_version}" ] ; then
mysql_tls_args="${mysql_tls_args} --tls_version=${backup_job_mysql_tls_version}"
fi
else
case "${backup_job_mysql_client,,}" in
mariadb )
mysql_tls_args="--disable-ssl"
;;
mysql )
mysql_tls_args="--ssl-mode=DISABLED"
;;
esac
fi
;;
"mssql" | "microsoftsql" )
@@ -498,7 +532,7 @@ backup_couch() {
prepare_dbbackup
backup_job_filename=couch_${backup_job_db_name}_${backup_job_db_host#*//}_${now}.txt
backup_job_filename_base=couch_${backup_job_db_name}_${backup_job_db_host#*//}
compressionzyclonite
compression
if var_true "${DEBUG_BACKUP_COUCH}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
timer backup finish
@@ -525,11 +559,11 @@ backup_influx() {
print_debug "[backup_influx] Influx DB Version 1 selected"
for db in ${db_names}; do
prepare_dbbackup
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now}
backup_job_filename_base=influx_${db}_${backup_job_db_host#*//}
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
pre_dbbackup "${db}"
write_log notice "Dumping Influx database: '${db}'"
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
@@ -654,7 +688,6 @@ backup_mssql() {
compression
pre_dbbackup all
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
file_encryption
timer backup finish
generate_checksum
@@ -680,7 +713,7 @@ backup_mysql() {
if [ "${backup_job_db_name,,}" = "all" ] ; then
write_log debug "Preparing to back up everything except for information_schema and _* prefixes"
db_names=$(run_as_user ${_mysql_prefix}mysql -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_enumeration_opts} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
db_names=$(run_as_user ${_mysql_prefix}${_mysql_bin_prefix/-/} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_enumeration_opts} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
if [ -n "${backup_job_db_name_exclude}" ] ; then
db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
@@ -703,8 +736,8 @@ backup_mysql() {
pre_dbbackup "${db}"
write_log notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
run_as_user ${play_fair} ${_mysql_prefix}mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} $db | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
run_as_user ${play_fair} ${_mysql_prefix}${_mysql_bin_prefix}dump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} $db | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
timer backup finish
@@ -724,8 +757,8 @@ backup_mysql() {
pre_dbbackup all
write_log notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
run_as_user ${play_fair} ${_mysql_prefix}mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
run_as_user ${play_fair} ${_mysql_prefix}${_mysql_bin_prefix}dump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
timer backup finish
@@ -742,12 +775,13 @@ backup_pgsql() {
backup_pgsql_globals() {
prepare_dbbackup
backup_job_filename=pgsql_globals_${backup_job_db_host,,}_${now}.sql
backup_job_global_base=pgsql_globals_${backup_job_db_host,,}
compression
pre_dbbackup "globals"
print_notice "Dumping PostgresSQL globals: with 'pg_dumpall -g' ${compression_string}"
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
run_as_user ${play_fair} pg_dumpall -h "${backup_job_db_host}" -U "${backup_job_db_user}" -p "${backup_job_db_port}" -g ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code "${backup_job_filename}"
timer backup finish
@@ -796,7 +830,7 @@ backup_pgsql() {
write_log notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
run_as_user ${play_fair} pg_dump -h "${backup_job_db_host}" -p "${backup_job_db_port}" -U "${backup_job_db_user}" $db ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
timer backup finish
@@ -812,7 +846,11 @@ backup_pgsql() {
write_log debug "Not splitting database dumps into their own files"
prepare_dbbackup
backup_job_filename=pgsql_all_${backup_job_db_host,,}_${now}.sql
backup_job_filename_base=pgsql_${db}_${backup_job_db_host,,}
if [ "${backup_job_db_name,,}" = "all" ] ; then
backup_job_filename_base=pgsql_all_${backup_job_db_host,,}
else
backup_job_filename_base=pgsql_${db}_${backup_job_db_host,,}
fi
compression
pre_dbbackup all
write_log notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
@@ -826,7 +864,7 @@ backup_pgsql() {
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
done
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
timer backup finish
@@ -909,14 +947,16 @@ backup_sqlite3() {
check_availability() {
if var_true "${DEBUG_CHECK_AVAILABILITY}" ; then debug on; fi
### Set the Database Type
if var_false "${backup_job_skip_availability_check}" ; then
case "${dbtype}" in
"couch" )
counter=0
code_received=0
if [ -n "${backup_job_db_user}" ] && [ -n ${backup_job_db_pass} ]; then
_ca_couch_auth="-u ${backup_job_db_user}:${backup_job_db_pass}"
fi
while [ "${code_received}" != "200" ]; do
code_received=$(run_as_user curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${backup_job_db_host}:${backup_job_db_port})
code_received=$(run_as_user curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${_ca_couch_auth} ${backup_job_db_host}:${backup_job_db_port})
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5
(( counter+=5 ))
@@ -960,7 +1000,7 @@ check_availability() {
"mysql" )
counter=0
export MYSQL_PWD=${backup_job_db_pass}
while ! (run_as_user mysqladmin -u"${backup_job_db_user}" -P"${backup_job_db_port}" -h"${backup_job_db_host}" ${mysql_tls_args} status > /dev/null 2>&1) ; do
while ! (run_as_user ${_mysql_prefix}${_mysql_bin_prefix}admin -u"${backup_job_db_user}" -P"${backup_job_db_port}" -h"${backup_job_db_host}" ${mysql_tls_args} status > /dev/null 2>&1) ; do
sleep 5
(( counter+=5 ))
write_log warn "MySQL/MariaDB Server '${backup_job_db_host}' is not accessible, retrying.. (${counter} seconds so far)"
@@ -1063,17 +1103,24 @@ cleanup_old_data() {
write_log info "Cleaning up old backups on filesystem"
run_as_user mkdir -p "${backup_job_filesystem_path}"
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
if var_true "${_postgres_backup_globals}"; then
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_global_base}*" -exec rm -f {} \;
fi
if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
else
write_log info "Syncing changes via blobxfer"
silent run_as_user blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete --delete-only
silent run_as_user blobxfer upload --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete --delete-only
fi
;;
"file" | "filesystem" )
write_log info "Cleaning up old backups on filesystem"
run_as_user mkdir -p "${backup_job_filesystem_path}"
run_as_user find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
if var_true "${_postgres_backup_globals}"; then
run_as_user find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_global_base}*" -exec rm -f {} \;
fi
;;
"s3" | "minio" )
write_log info "Cleaning up old backups on S3 storage"
@@ -1200,7 +1247,7 @@ create_schedulers() {
cp -R /assets/dbbackup/template-dbbackup /etc/services.available/dbbackup-"${instance}"
sed -i "s|{{BACKUP_NUMBER}}|${instance}|g" /etc/services.available/dbbackup-"${instance}"/run
if [ "${MODE,,}" = "manual" ] ; then service_stop dbbackup-"${instance}" ; fi
cat <<EOF >> /usr/bin/backup"${instance}"-now
cat <<EOF > /usr/bin/backup"${instance}"-now
#!/bin/bash
source /assets/functions/00-container
PROCESS_NAME=db-backup${instance}
@@ -1220,7 +1267,7 @@ EOF
EOF
else
echo "/usr/bin/backup${instance}-now now" >> /usr/bin/backup-now
echo "/usr/bin/backup${instance}-now now" > /usr/bin/backup-now
fi
instance=$(echo "${instance} +1" | bc)
@@ -1627,8 +1674,8 @@ EOF
if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
else
write_log info "Synchronize local storage from S3 Bucket with blobxfer"
${play_fair} blobxfer download --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete
write_log info "Synchronize local storage from blob container with blobxfer"
${play_fair} blobxfer download --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --restore-file-lmt --delete
write_log info "Moving backup to external storage with blobxfer"
mkdir -p "${backup_job_filesystem_path}"
@@ -1636,7 +1683,7 @@ EOF
run_as_user mv "${temporary_directory}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
silent run_as_user ${play_fair} blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path}
silent run_as_user ${play_fair} blobxfer upload --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path}
move_exit_code=$?
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"."${checksum_extension}" ; fi
@@ -1813,7 +1860,7 @@ setup_mode() {
cat <<EOF > /etc/services.d/99-run_forever/run
#!/bin/bash
while true; do
sleep 86400
sleep 86400
done
EOF
chmod +x /etc/services.d/99-run_forever/run

View File

@@ -912,6 +912,83 @@ get_filename() {
r_filename=${opt}
}
get_ssl() {
if grep -q "^DB${detected_host_num}_MYSQL_ENABLE_TLS=" "${restore_vars}" ; then
detected_ssl_value=$(grep "^DB${detected_host_num}_MYSQL_ENABLE_TLS=" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
fi
if [[ -z "${detected_ssl_value}" ]]; then
print_debug "Parsed SSL Variant: 1 - No Env Variable Found"
default_ssl="false" # Default if no env variable
q_ssl_variant=1
q_ssl_menu_opt_default="| (${cwh}N${cdgy}) * "
q_ssl_menu="" #No menu option
else
print_debug "Parsed SSL Variant: 2 - Env Variable DB${detected_host_num}_MYSQL_ENABLE_TLS = '${detected_ssl_value}'"
default_ssl="${detected_ssl_value,,}"
q_ssl_variant=2
q_ssl_menu="E ) Environment Variable DB${detected_host_num}_MYSQL_ENABLE_TLS: '${detected_ssl_value}'"
q_ssl_menu_opt_default="| (${cwh}E${cdgy}) * "
fi
cat <<EOF
Do you wish to use SSL for the connection?
${q_ssl_menu}
Y ) Yes
N ) No
Q ) Quit
EOF
r_ssl=""
case "${q_ssl_variant}" in
1) # No env variable, ask directly
while true; do
read -r -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}Y${cdgy}\) \| \(${cwh}N\*${cdgy}\) : ${cwh}${coff}) " q_ssl
case "${q_ssl,,}" in
y*)
r_ssl="true"
break
;;
n* | "")
r_ssl="false"
break
;;
q*)
print_info "Quitting Script"
exit 1
;;
esac
done
;;
2) # Env variable exists, offer it as an option
while true; do
read -r -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E\*${cdgy}\) \| \(${cwh}Y${cdgy}\) \| \(${cwh}N${cdgy}\) : ${cwh}${coff}) " q_ssl
case "${q_ssl,,}" in
e* | "") # Default to env variable if just enter is pressed.
r_ssl="${detected_ssl_value}"
break
;;
y*)
r_ssl="true"
break
;;
n*)
r_ssl="false"
break
;;
q*)
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
#### SCRIPT START
trap control_c INT
bootstrap_variables restore_init
@@ -984,6 +1061,20 @@ else
fi
print_debug "Database Port '${r_dbport}'"
## Question SSL connection
if [[ "${r_dbtype,,}" == "mariadb" || "${r_dbtype,,}" == "mysql" ]]; then
if [ -n "${8}" ]; then
r_ssl="${8}"
else
get_ssl
fi
print_debug "SSL enable: '${r_ssl}'"
else
r_ssl="false"
print_debug "SSL disabled for ${r_dbtype}"
fi
## Parse Extension
case "${r_filename##*.}" in
bz* )
@@ -1013,8 +1104,13 @@ esac
## Perform a restore
case "${r_dbtype}" in
mariadb | mysql )
if [[ "${r_ssl,,}" == "false" ]]; then
mysql_ssl_option="--disable-ssl"
else
mysql_ssl_option=""
fi
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
pv ${r_filename} | ${decompress_cmd}cat | mysql -u${r_dbuser} -p${r_dbpass} -P${r_dbport} -h${r_dbhost} ${r_dbname}
pv ${r_filename} | ${decompress_cmd}cat | mariadb -u${r_dbuser} -p${r_dbpass} -P${r_dbport} -h${r_dbhost} ${mysql_ssl_option} ${r_dbname}
exit_code=$?
;;
pgsql | postgres* )
@@ -1036,9 +1132,6 @@ EOF
echo -e "${coff}"
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}Y${cdgy}\) \| \(${cwh}N${cdgy}\) \| \(${cwh}Q${cdgy}\) : ${cwh}${coff})" q_menu_mongo_dropdb
case "${q_menu_mongo_dropdb,,}" in
"y" | "yes" | * )
mongo_dropdb="--drop"
;;
"n" | "update" )
unset mongo_dropdb
;;
@@ -1046,6 +1139,9 @@ EOF
print_info "Quitting Script"
exit 1
;;
"y" | "yes" | * )
mongo_dropdb="--drop"
;;
esac
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"