mirror of
https://github.com/tiredofit/docker-db-backup.git
synced 2025-12-21 21:33:28 +01:00
Compare commits
37 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
016c5c1a23 | ||
|
|
de8a952825 | ||
|
|
c7912d355e | ||
|
|
15902829c0 | ||
|
|
2c8f40e37c | ||
|
|
c360150117 | ||
|
|
7c32879e80 | ||
|
|
a475f7d0f3 | ||
|
|
399727cd37 | ||
|
|
f58de70dc4 | ||
|
|
5ab0cbe7c5 | ||
|
|
9d5406b6a9 | ||
|
|
53760fbe4d | ||
|
|
a72b562c70 | ||
|
|
fc586c204f | ||
|
|
e9ed8d1a72 | ||
|
|
78ac4a4a81 | ||
|
|
949aafdbe1 | ||
|
|
7a94472055 | ||
|
|
23aeaf58a2 | ||
|
|
b88816337f | ||
|
|
ac8181b3b5 | ||
|
|
c75c41a34d | ||
|
|
244e411e76 | ||
|
|
e69ac23898 | ||
|
|
261951045f | ||
|
|
67f4326d0b | ||
|
|
2cd62b8732 | ||
|
|
0d2b3ccc8c | ||
|
|
90f53a7f00 | ||
|
|
c5f89da681 | ||
|
|
753a780204 | ||
|
|
7c07253428 | ||
|
|
0fdb447706 | ||
|
|
0d23c2645c | ||
|
|
4786ea9c7f | ||
|
|
a26dba947b |
125
CHANGELOG.md
125
CHANGELOG.md
@@ -1,3 +1,128 @@
|
||||
## 4.1.21 2025-08-08 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Fix for 4.1.20 release inadvertently failing backups (credit tlex@github)
|
||||
|
||||
|
||||
## 4.1.20 2025-07-23 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Fix backup error checking routines #417 - credit alteriks@github
|
||||
|
||||
|
||||
## 4.1.19 2025-05-28 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Force overwrite manual scripts as opposed to append (#414)
|
||||
|
||||
|
||||
## 4.1.18 2025-05-12 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Fix MongoDB restore from not dropping DB each time before restore except explicitly told (credit logicoa@github)
|
||||
|
||||
|
||||
## 4.1.17 2025-04-17 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Fix issue with Postgres database cleanup when ALL databases being backed up as one file (SPLIT_DB=FALSE)
|
||||
|
||||
|
||||
## 4.1.16 2025-02-21 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Update to tiredofit/alpine:7.10.28
|
||||
- Support TLS connectivity with restore script (credit fermion2020@github)
|
||||
|
||||
|
||||
## 4.1.15 2025-01-29 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Add support for username and password support when checking for connectivity to couchdb (credit: JvSomeren)
|
||||
|
||||
### Changed
|
||||
- Fix issue with couchdb compression routines
|
||||
|
||||
|
||||
## 4.1.14 2025-01-21 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Downgrade AWS Client to 1.36.40 due to incompatibilities with providers with 1.37x. for time being
|
||||
|
||||
|
||||
## 4.1.13 2025-01-21 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Update MySQL client to 8.4.4
|
||||
- Update AWS Client to 1.37.2
|
||||
|
||||
### Changed
|
||||
- Seperate MySQL and MariaDB TLS Configurationf for arguments that have deviated
|
||||
|
||||
|
||||
## 4.1.12 2024-12-13 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Fix for 4.1.11
|
||||
|
||||
|
||||
## 4.1.11 2024-12-13 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Fix when backing up 'ALL' databases with MariaDB
|
||||
|
||||
|
||||
## 4.1.10 2024-12-12 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Use tiredofit/alpine:3.21-7.10.27 base
|
||||
- Use the actual binary name when dumping mariadb and mysql databases
|
||||
- Silence warnings that are appearing due to filenames, ssl warnings re MariaDB / MySQL
|
||||
|
||||
|
||||
## 4.1.9 2024-11-07 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Pin to tiredofit/alpine:edge-7.10.19
|
||||
- MySQL 8.4.3 client
|
||||
- MSSQL and MSODBC 18.4.1.1-1
|
||||
- Mysql 11.x Support
|
||||
- Influx2 Client 2.7.5
|
||||
- AWS Client 1.35.13
|
||||
- Postgresql 17.x Support
|
||||
|
||||
|
||||
## 4.1.8 2024-10-29 <dave at tiredofit dot ca>
|
||||
|
||||
Rebuild using 4.1.4 sources - ignore any versions of 4.1.5-4.1.7
|
||||
|
||||
### Added
|
||||
|
||||
|
||||
## 4.1.4 2024-08-13 <dave at tiredofit dot ca>
|
||||
|
||||
Please note that if using encryption using a passphrase, you may be encountering issues with manual decryption. This release fixes that.
|
||||
If you try to manually decrypt and your passphrase fails. Try wrapping it in single (') or double (") quotes.
|
||||
|
||||
### Changed
|
||||
- Fix for stray quotes appearing inside of ENCRYPT_PASSPHRASE variables
|
||||
|
||||
|
||||
## 4.1.3 2024-07-05 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Rebuild to support tiredofit/alpine:7.10.0
|
||||
|
||||
|
||||
## 4.1.2 2024-07-02 <effectivelywild@github>
|
||||
|
||||
### Added
|
||||
- Add support for Azure Blob containers
|
||||
- Fix timestamps when comparing previous backups
|
||||
- Resolve unnecessary read operations in Azure
|
||||
- Resolve issues with backup cleanup operations in Azure
|
||||
|
||||
|
||||
## 4.1.1 2024-06-19 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
|
||||
45
Dockerfile
45
Dockerfile
@@ -1,23 +1,21 @@
|
||||
ARG DISTRO=alpine
|
||||
ARG DISTRO_VARIANT=3.20
|
||||
ARG DISTRO_VARIANT=3.21-7.10.28
|
||||
|
||||
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
|
||||
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
|
||||
|
||||
### Set Environment Variables
|
||||
ENV INFLUX1_CLIENT_VERSION=1.8.0 \
|
||||
INFLUX2_CLIENT_VERSION=2.7.3 \
|
||||
MSODBC_VERSION=18.3.2.1-1 \
|
||||
MSSQL_VERSION=18.3.1.1-1 \
|
||||
MYSQL_VERSION=mysql-8.4.0 \
|
||||
INFLUX2_CLIENT_VERSION=2.7.5 \
|
||||
MSODBC_VERSION=18.4.1.1-1 \
|
||||
MSSQL_VERSION=18.4.1.1-1 \
|
||||
MYSQL_VERSION=mysql-8.4.4 \
|
||||
MYSQL_REPO_URL=https://github.com/mysql/mysql-server \
|
||||
AWS_CLI_VERSION=1.32.113 \
|
||||
AWS_CLI_VERSION=1.36.40 \
|
||||
CONTAINER_ENABLE_MESSAGING=TRUE \
|
||||
CONTAINER_ENABLE_MONITORING=TRUE \
|
||||
IMAGE_NAME="tiredofit/db-backup" \
|
||||
IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/"
|
||||
|
||||
### Dependencies
|
||||
RUN source /assets/functions/00-container && \
|
||||
set -ex && \
|
||||
addgroup -S -g 10000 dbbackup && \
|
||||
@@ -33,6 +31,7 @@ RUN source /assets/functions/00-container && \
|
||||
git \
|
||||
go \
|
||||
libarchive-dev \
|
||||
libtirpc-dev \
|
||||
openssl-dev \
|
||||
libffi-dev \
|
||||
ncurses-dev \
|
||||
@@ -48,6 +47,7 @@ RUN source /assets/functions/00-container && \
|
||||
gpg-agent \
|
||||
groff \
|
||||
libarchive \
|
||||
libtirpc \
|
||||
mariadb-client \
|
||||
mariadb-connector-c \
|
||||
mongodb-tools \
|
||||
@@ -55,8 +55,8 @@ RUN source /assets/functions/00-container && \
|
||||
openssl \
|
||||
pigz \
|
||||
pixz \
|
||||
postgresql16 \
|
||||
postgresql16-client \
|
||||
postgresql17 \
|
||||
postgresql17-client \
|
||||
pv \
|
||||
py3-botocore \
|
||||
py3-colorama \
|
||||
@@ -75,15 +75,28 @@ RUN source /assets/functions/00-container && \
|
||||
zstd \
|
||||
&& \
|
||||
\
|
||||
apkArch="$(uname -m)"; \
|
||||
case "$apkArch" in \
|
||||
x86_64) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \
|
||||
arm64 | aarch64 ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=arm64 ;; \
|
||||
case "$(uname -m)" in \
|
||||
"x86_64" ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \
|
||||
"arm64" | "aarch64" ) mssql=true ; mssql_arch=arm64; influx2=true ; influx_arch=arm64 ;; \
|
||||
*) sleep 0.1 ;; \
|
||||
esac; \
|
||||
\
|
||||
if [[ $mssql = "true" ]] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \
|
||||
if [[ $influx2 = "true" ]] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
|
||||
if [ "${mssql,,}" = "true" ] ; then \
|
||||
curl -sSLO https://download.microsoft.com/download/7/6/d/76de322a-d860-4894-9945-f0cc5d6a45f8/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; \
|
||||
curl -sSLO https://download.microsoft.com/download/7/6/d/76de322a-d860-4894-9945-f0cc5d6a45f8/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; \
|
||||
echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; \
|
||||
else \
|
||||
echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; \
|
||||
fi; \
|
||||
\
|
||||
if [ "${influx2,,}" = "true" ] ; then \
|
||||
curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; \
|
||||
chmod +x /usr/src/influx ; \
|
||||
mv /usr/src/influx /usr/sbin/ ; \
|
||||
else \
|
||||
echo >&2 "Unable to build Influx 2 on this system" ; \
|
||||
fi ; \
|
||||
\
|
||||
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX1_CLIENT_VERSION}" && \
|
||||
go build -o /usr/sbin/influxd ./cmd/influxd && \
|
||||
strip /usr/sbin/influxd && \
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2023 Dave Conroy
|
||||
Copyright (c) 2025 Dave Conroy
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
26
README.md
26
README.md
@@ -356,11 +356,14 @@ If `DEFAULT_BACKUP_LOCATION` = `S3` then the following options are used:
|
||||
|
||||
If `DEFAULT_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
|
||||
|
||||
| Parameter | Description | Default | `_FILE` |
|
||||
| -------------------------------------- | ------------------------------------------- | ------------------- | ------- |
|
||||
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
|
||||
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
|
||||
| `DEFAULT_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
|
||||
| Parameter | Description | Default | `_FILE` |
|
||||
| -------------------------------------- | ------------------------------------------------------------------- | ------------------- | ------- |
|
||||
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
|
||||
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
|
||||
| `DEFAULT_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
|
||||
| `DEFAULT_BLOBXFER_MODE` | Azure Storage mode e.g. `auto`, `file`, `append`, `block` or `page` | `auto` | x |
|
||||
|
||||
- When `DEFAULT_BLOBXFER_MODE` is set to auto it will use blob containers by default. If the `DEFAULT_BLOBXFER_REMOTE_PATH` path does not exist a blob container with that name will be created.
|
||||
|
||||
> This service uploads files from backup targed directory `DEFAULT_FILESYSTEM_PATH`.
|
||||
> If the a cleanup configuration in `DEFAULT_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
|
||||
@@ -636,11 +639,14 @@ If `DB01_BACKUP_LOCATION` = `S3` then the following options are used:
|
||||
|
||||
If `DB01_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
|
||||
|
||||
| Parameter | Description | Default | `_FILE` |
|
||||
| ----------------------------------- | ------------------------------------------- | ------------------- | ------- |
|
||||
| `DB01_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
|
||||
| `DB01_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
|
||||
| `DB01_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
|
||||
| Parameter | Description | Default | `_FILE` |
|
||||
| -------------------------------------- | ------------------------------------------------------------------- | ------------------- | ------- |
|
||||
| `DB01_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
|
||||
| `DB01_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
|
||||
| `DB01_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
|
||||
| `DB01_BLOBXFER_REMOTE_MODE` | Azure Storage mode e.g. `auto`, `file`, `append`, `block` or `page` | `auto` | x |
|
||||
|
||||
- When `DEFAULT_BLOBXFER_MODE` is set to auto it will use blob containers by default. If the `DEFAULT_BLOBXFER_REMOTE_PATH` path does not exist a blob container with that name will be created.
|
||||
|
||||
> This service uploads files from backup directory `DB01_BACKUP_FILESYSTEM_PATH`.
|
||||
> If the a cleanup configuration in `DB01_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
|
||||
|
||||
@@ -8,6 +8,7 @@ DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0}
|
||||
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
|
||||
DEFAULT_BACKUP_LOCATION=${DEFAULT_BACKUP_LOCATION:-"FILESYSTEM"}
|
||||
DEFAULT_BLOBXFER_REMOTE_PATH=${DEFAULT_BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
|
||||
DEFAULT_BLOBXFER_MODE=${DEFAULT_BLOBXFER_MODE:-"auto"}
|
||||
DEFAULT_CHECKSUM=${DEFAULT_CHECKSUM:-"MD5"}
|
||||
DEFAULT_COMPRESSION=${DEFAULT_COMPRESSION:-"ZSTD"}
|
||||
DEFAULT_COMPRESSION_LEVEL=${DEFAULT_COMPRESSION_LEVEL:-"3"}
|
||||
|
||||
@@ -66,6 +66,7 @@ bootstrap_variables() {
|
||||
DEFAULT_BLOBXFER_STORAGE_ACCOUNT \
|
||||
DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY \
|
||||
DEFAULT_BLOBXFER_REMOTE_PATH \
|
||||
DEFAULT_BLOBXFER_MODE \
|
||||
DB"${backup_instance_number}"_AUTH \
|
||||
DB"${backup_instance_number}"_TYPE \
|
||||
DB"${backup_instance_number}"_HOST \
|
||||
@@ -93,6 +94,7 @@ bootstrap_variables() {
|
||||
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT \
|
||||
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT_KEY \
|
||||
DB"${backup_instance_number}"_BLOBXFER_REMOTE_PATH \
|
||||
DB"${backup_instance_number}"_BLOBXFER_MODE \
|
||||
BLOBXFER_STORAGE_ACCOUNT \
|
||||
BLOBXFER_STORAGE_ACCOUNT_KEY \
|
||||
DB_HOST \
|
||||
@@ -163,6 +165,11 @@ bootstrap_variables() {
|
||||
sed -i "s|_PASS='\(.*\)'|_PASS=\1|g" "${backup_instance_vars}"
|
||||
fi
|
||||
|
||||
if grep -qo ".*_PASSPHRASE='.*'" "${backup_instance_vars}"; then
|
||||
print_debug "[bootstrap_variables] [backup_init] Found _PASSPHRASE variable with quotes"
|
||||
sed -i "s|_PASSPHRASE='\(.*\)'|_PASSPHRASE=\1|g" "${backup_instance_vars}"
|
||||
fi
|
||||
|
||||
if grep -qo "MONGO_CUSTOM_URI='.*'" "${backup_instance_vars}"; then
|
||||
print_debug "[bootstrap_variables] [backup_init] Found _MONGO_CUSTOM_URI variable with quotes"
|
||||
sed -i "s|MONGO_CUSTOM_URI='\(.*\)'|MONGO_CUSTOM_URI=\1|g" "${backup_instance_vars}"
|
||||
@@ -199,6 +206,7 @@ bootstrap_variables() {
|
||||
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_REMOTE_PATH backup_job_blobxfer_remote_path
|
||||
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT backup_job_blobxfer_storage_account
|
||||
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT_KEY backup_job_blobxfer_storage_account_key
|
||||
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_MODE backup_job_blobxfer_mode
|
||||
transform_backup_instance_variable "${backup_instance_number}" CHECKSUM backup_job_checksum
|
||||
transform_backup_instance_variable "${backup_instance_number}" CLEANUP_TIME backup_job_cleanup_time
|
||||
transform_backup_instance_variable "${backup_instance_number}" COMPRESSION backup_job_compression
|
||||
@@ -405,9 +413,11 @@ EOF
|
||||
case "${backup_job_mysql_client,,}" in
|
||||
mariadb )
|
||||
_mysql_prefix=/usr/bin/
|
||||
_mysql_bin_prefix=mariadb-
|
||||
;;
|
||||
mysql )
|
||||
_mysql_prefix=/opt/mysql/bin/
|
||||
_mysql_bin_prefix=mysql
|
||||
;;
|
||||
* )
|
||||
print_error "I don't understand '${backup_job_mysql_client,,}' as a client. Exiting.."
|
||||
@@ -419,6 +429,14 @@ EOF
|
||||
|
||||
if [ -n "${backup_job_db_pass}" ] ; then export MYSQL_PWD=${backup_job_db_pass} ; fi
|
||||
if var_true "${backup_job_mysql_enable_tls}" ; then
|
||||
case "${backup_job_mysql_client,,}" in
|
||||
mariadb )
|
||||
mysql_tls_args="--ssl"
|
||||
;;
|
||||
mysql )
|
||||
mysql_tls_args="--ssl-mode=REQUIRED"
|
||||
;;
|
||||
esac
|
||||
if [ -n "${backup_job_mysql_tls_ca_file}" ] ; then
|
||||
mysql_tls_args="--ssl_ca=${backup_job_mysql_tls_ca_file}"
|
||||
fi
|
||||
@@ -430,12 +448,28 @@ EOF
|
||||
fi
|
||||
|
||||
if var_true "${backup_job_mysql_tls_verify}" ; then
|
||||
mysql_tls_args="${mysql_tls_args} --sslverify-server-cert"
|
||||
case "${backup_job_mysql_client,,}" in
|
||||
mariadb )
|
||||
mysql_tls_args="${mysql_tls_args} --sslverify-server-cert"
|
||||
;;
|
||||
mysql )
|
||||
mysql_tls_args="${mysql_tls_args} --ssl-mode=VERIFY_CA"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [ -n "${backup_job_mysql_tls_version}" ] ; then
|
||||
mysql_tls_args="${mysql_tls_args} --tls_version=${backup_job_mysql_tls_version}"
|
||||
fi
|
||||
else
|
||||
case "${backup_job_mysql_client,,}" in
|
||||
mariadb )
|
||||
mysql_tls_args="--disable-ssl"
|
||||
;;
|
||||
mysql )
|
||||
mysql_tls_args="--ssl-mode=DISABLED"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
;;
|
||||
"mssql" | "microsoftsql" )
|
||||
@@ -498,7 +532,7 @@ backup_couch() {
|
||||
prepare_dbbackup
|
||||
backup_job_filename=couch_${backup_job_db_name}_${backup_job_db_host#*//}_${now}.txt
|
||||
backup_job_filename_base=couch_${backup_job_db_name}_${backup_job_db_host#*//}
|
||||
compressionzyclonite
|
||||
compression
|
||||
if var_true "${DEBUG_BACKUP_COUCH}" ; then debug off; fi
|
||||
check_exit_code backup "${backup_job_filename}"
|
||||
timer backup finish
|
||||
@@ -654,7 +688,6 @@ backup_mssql() {
|
||||
compression
|
||||
pre_dbbackup all
|
||||
run_as_user ${compress_cmd} "${temporary_directory}/${backup_job_filename_original}"
|
||||
|
||||
file_encryption
|
||||
timer backup finish
|
||||
generate_checksum
|
||||
@@ -680,7 +713,7 @@ backup_mysql() {
|
||||
|
||||
if [ "${backup_job_db_name,,}" = "all" ] ; then
|
||||
write_log debug "Preparing to back up everything except for information_schema and _* prefixes"
|
||||
db_names=$(run_as_user ${_mysql_prefix}mysql -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_enumeration_opts} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
|
||||
db_names=$(run_as_user ${_mysql_prefix}${_mysql_bin_prefix/-/} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_enumeration_opts} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
|
||||
if [ -n "${backup_job_db_name_exclude}" ] ; then
|
||||
db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n')
|
||||
for db_exclude in ${db_names_exclusions} ; do
|
||||
@@ -703,8 +736,8 @@ backup_mysql() {
|
||||
pre_dbbackup "${db}"
|
||||
write_log notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
|
||||
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
|
||||
run_as_user ${play_fair} ${_mysql_prefix}mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} $db | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
|
||||
exit_code=$?
|
||||
run_as_user ${play_fair} ${_mysql_prefix}${_mysql_bin_prefix}dump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} $db | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
|
||||
exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
|
||||
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
|
||||
check_exit_code backup "${backup_job_filename}"
|
||||
timer backup finish
|
||||
@@ -724,8 +757,8 @@ backup_mysql() {
|
||||
pre_dbbackup all
|
||||
write_log notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
||||
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
|
||||
run_as_user ${play_fair} ${_mysql_prefix}mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
|
||||
exit_code=$?
|
||||
run_as_user ${play_fair} ${_mysql_prefix}${_mysql_bin_prefix}dump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
|
||||
exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
|
||||
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
|
||||
check_exit_code backup "${backup_job_filename}"
|
||||
timer backup finish
|
||||
@@ -748,7 +781,7 @@ backup_pgsql() {
|
||||
print_notice "Dumping PostgresSQL globals: with 'pg_dumpall -g' ${compression_string}"
|
||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
|
||||
run_as_user ${play_fair} pg_dumpall -h "${backup_job_db_host}" -U "${backup_job_db_user}" -p "${backup_job_db_port}" -g ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
|
||||
exit_code=$?
|
||||
exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
|
||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
||||
check_exit_code "${backup_job_filename}"
|
||||
timer backup finish
|
||||
@@ -797,7 +830,7 @@ backup_pgsql() {
|
||||
write_log notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
|
||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
|
||||
run_as_user ${play_fair} pg_dump -h "${backup_job_db_host}" -p "${backup_job_db_port}" -U "${backup_job_db_user}" $db ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
|
||||
exit_code=$?
|
||||
exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
|
||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
||||
check_exit_code backup "${backup_job_filename}"
|
||||
timer backup finish
|
||||
@@ -813,7 +846,11 @@ backup_pgsql() {
|
||||
write_log debug "Not splitting database dumps into their own files"
|
||||
prepare_dbbackup
|
||||
backup_job_filename=pgsql_all_${backup_job_db_host,,}_${now}.sql
|
||||
backup_job_filename_base=pgsql_${db}_${backup_job_db_host,,}
|
||||
if [ "${backup_job_db_name,,}" = "all" ] ; then
|
||||
backup_job_filename_base=pgsql_all_${backup_job_db_host,,}
|
||||
else
|
||||
backup_job_filename_base=pgsql_${db}_${backup_job_db_host,,}
|
||||
fi
|
||||
compression
|
||||
pre_dbbackup all
|
||||
write_log notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
||||
@@ -827,7 +864,7 @@ backup_pgsql() {
|
||||
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
|
||||
done
|
||||
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
|
||||
exit_code=$?
|
||||
exit_code=$((PIPESTATUS[0] + PIPESTATUS[1] + PIPESTATUS[2]))
|
||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
||||
check_exit_code backup "${backup_job_filename}"
|
||||
timer backup finish
|
||||
@@ -910,14 +947,16 @@ backup_sqlite3() {
|
||||
|
||||
check_availability() {
|
||||
if var_true "${DEBUG_CHECK_AVAILABILITY}" ; then debug on; fi
|
||||
### Set the Database Type
|
||||
if var_false "${backup_job_skip_availability_check}" ; then
|
||||
case "${dbtype}" in
|
||||
"couch" )
|
||||
counter=0
|
||||
code_received=0
|
||||
if [ -n "${backup_job_db_user}" ] && [ -n ${backup_job_db_pass} ]; then
|
||||
_ca_couch_auth="-u ${backup_job_db_user}:${backup_job_db_pass}"
|
||||
fi
|
||||
while [ "${code_received}" != "200" ]; do
|
||||
code_received=$(run_as_user curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${backup_job_db_host}:${backup_job_db_port})
|
||||
code_received=$(run_as_user curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${_ca_couch_auth} ${backup_job_db_host}:${backup_job_db_port})
|
||||
if [ "${code_received}" = "200" ] ; then break ; fi
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
@@ -961,7 +1000,7 @@ check_availability() {
|
||||
"mysql" )
|
||||
counter=0
|
||||
export MYSQL_PWD=${backup_job_db_pass}
|
||||
while ! (run_as_user mysqladmin -u"${backup_job_db_user}" -P"${backup_job_db_port}" -h"${backup_job_db_host}" ${mysql_tls_args} status > /dev/null 2>&1) ; do
|
||||
while ! (run_as_user ${_mysql_prefix}${_mysql_bin_prefix}admin -u"${backup_job_db_user}" -P"${backup_job_db_port}" -h"${backup_job_db_host}" ${mysql_tls_args} status > /dev/null 2>&1) ; do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
write_log warn "MySQL/MariaDB Server '${backup_job_db_host}' is not accessible, retrying.. (${counter} seconds so far)"
|
||||
@@ -1072,7 +1111,7 @@ cleanup_old_data() {
|
||||
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
|
||||
else
|
||||
write_log info "Syncing changes via blobxfer"
|
||||
silent run_as_user blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete --delete-only
|
||||
silent run_as_user blobxfer upload --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete --delete-only
|
||||
fi
|
||||
;;
|
||||
"file" | "filesystem" )
|
||||
@@ -1208,7 +1247,7 @@ create_schedulers() {
|
||||
cp -R /assets/dbbackup/template-dbbackup /etc/services.available/dbbackup-"${instance}"
|
||||
sed -i "s|{{BACKUP_NUMBER}}|${instance}|g" /etc/services.available/dbbackup-"${instance}"/run
|
||||
if [ "${MODE,,}" = "manual" ] ; then service_stop dbbackup-"${instance}" ; fi
|
||||
cat <<EOF >> /usr/bin/backup"${instance}"-now
|
||||
cat <<EOF > /usr/bin/backup"${instance}"-now
|
||||
#!/bin/bash
|
||||
source /assets/functions/00-container
|
||||
PROCESS_NAME=db-backup${instance}
|
||||
@@ -1228,7 +1267,7 @@ EOF
|
||||
|
||||
EOF
|
||||
else
|
||||
echo "/usr/bin/backup${instance}-now now" >> /usr/bin/backup-now
|
||||
echo "/usr/bin/backup${instance}-now now" > /usr/bin/backup-now
|
||||
fi
|
||||
|
||||
instance=$(echo "${instance} +1" | bc)
|
||||
@@ -1635,8 +1674,8 @@ EOF
|
||||
if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then
|
||||
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
|
||||
else
|
||||
write_log info "Synchronize local storage from S3 Bucket with blobxfer"
|
||||
${play_fair} blobxfer download --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete
|
||||
write_log info "Synchronize local storage from blob container with blobxfer"
|
||||
${play_fair} blobxfer download --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --restore-file-lmt --delete
|
||||
|
||||
write_log info "Moving backup to external storage with blobxfer"
|
||||
mkdir -p "${backup_job_filesystem_path}"
|
||||
@@ -1644,7 +1683,7 @@ EOF
|
||||
|
||||
run_as_user mv "${temporary_directory}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
|
||||
|
||||
silent run_as_user ${play_fair} blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path}
|
||||
silent run_as_user ${play_fair} blobxfer upload --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path}
|
||||
move_exit_code=$?
|
||||
|
||||
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"."${checksum_extension}" ; fi
|
||||
@@ -1821,7 +1860,7 @@ setup_mode() {
|
||||
cat <<EOF > /etc/services.d/99-run_forever/run
|
||||
#!/bin/bash
|
||||
while true; do
|
||||
sleep 86400
|
||||
sleep 86400
|
||||
done
|
||||
EOF
|
||||
chmod +x /etc/services.d/99-run_forever/run
|
||||
|
||||
@@ -912,6 +912,83 @@ get_filename() {
|
||||
r_filename=${opt}
|
||||
}
|
||||
|
||||
get_ssl() {
|
||||
if grep -q "^DB${detected_host_num}_MYSQL_ENABLE_TLS=" "${restore_vars}" ; then
|
||||
detected_ssl_value=$(grep "^DB${detected_host_num}_MYSQL_ENABLE_TLS=" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
|
||||
fi
|
||||
|
||||
if [[ -z "${detected_ssl_value}" ]]; then
|
||||
print_debug "Parsed SSL Variant: 1 - No Env Variable Found"
|
||||
default_ssl="false" # Default if no env variable
|
||||
q_ssl_variant=1
|
||||
q_ssl_menu_opt_default="| (${cwh}N${cdgy}) * "
|
||||
q_ssl_menu="" #No menu option
|
||||
else
|
||||
print_debug "Parsed SSL Variant: 2 - Env Variable DB${detected_host_num}_MYSQL_ENABLE_TLS = '${detected_ssl_value}'"
|
||||
default_ssl="${detected_ssl_value,,}"
|
||||
q_ssl_variant=2
|
||||
q_ssl_menu="E ) Environment Variable DB${detected_host_num}_MYSQL_ENABLE_TLS: '${detected_ssl_value}'"
|
||||
q_ssl_menu_opt_default="| (${cwh}E${cdgy}) * "
|
||||
fi
|
||||
|
||||
cat <<EOF
|
||||
|
||||
Do you wish to use SSL for the connection?
|
||||
${q_ssl_menu}
|
||||
|
||||
Y ) Yes
|
||||
N ) No
|
||||
Q ) Quit
|
||||
|
||||
EOF
|
||||
|
||||
r_ssl=""
|
||||
case "${q_ssl_variant}" in
|
||||
1) # No env variable, ask directly
|
||||
while true; do
|
||||
read -r -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}Y${cdgy}\) \| \(${cwh}N\*${cdgy}\) : ${cwh}${coff}) " q_ssl
|
||||
case "${q_ssl,,}" in
|
||||
y*)
|
||||
r_ssl="true"
|
||||
break
|
||||
;;
|
||||
n* | "")
|
||||
r_ssl="false"
|
||||
break
|
||||
;;
|
||||
q*)
|
||||
print_info "Quitting Script"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
;;
|
||||
2) # Env variable exists, offer it as an option
|
||||
while true; do
|
||||
read -r -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E\*${cdgy}\) \| \(${cwh}Y${cdgy}\) \| \(${cwh}N${cdgy}\) : ${cwh}${coff}) " q_ssl
|
||||
case "${q_ssl,,}" in
|
||||
e* | "") # Default to env variable if just enter is pressed.
|
||||
r_ssl="${detected_ssl_value}"
|
||||
break
|
||||
;;
|
||||
y*)
|
||||
r_ssl="true"
|
||||
break
|
||||
;;
|
||||
n*)
|
||||
r_ssl="false"
|
||||
break
|
||||
;;
|
||||
q*)
|
||||
print_info "Quitting Script"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
#### SCRIPT START
|
||||
trap control_c INT
|
||||
bootstrap_variables restore_init
|
||||
@@ -984,6 +1061,20 @@ else
|
||||
fi
|
||||
print_debug "Database Port '${r_dbport}'"
|
||||
|
||||
## Question SSL connection
|
||||
if [[ "${r_dbtype,,}" == "mariadb" || "${r_dbtype,,}" == "mysql" ]]; then
|
||||
if [ -n "${8}" ]; then
|
||||
r_ssl="${8}"
|
||||
else
|
||||
get_ssl
|
||||
fi
|
||||
print_debug "SSL enable: '${r_ssl}'"
|
||||
else
|
||||
r_ssl="false"
|
||||
print_debug "SSL disabled for ${r_dbtype}"
|
||||
fi
|
||||
|
||||
|
||||
## Parse Extension
|
||||
case "${r_filename##*.}" in
|
||||
bz* )
|
||||
@@ -1013,8 +1104,13 @@ esac
|
||||
## Perform a restore
|
||||
case "${r_dbtype}" in
|
||||
mariadb | mysql )
|
||||
if [[ "${r_ssl,,}" == "false" ]]; then
|
||||
mysql_ssl_option="--disable-ssl"
|
||||
else
|
||||
mysql_ssl_option=""
|
||||
fi
|
||||
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
|
||||
pv ${r_filename} | ${decompress_cmd}cat | mysql -u${r_dbuser} -p${r_dbpass} -P${r_dbport} -h${r_dbhost} ${r_dbname}
|
||||
pv ${r_filename} | ${decompress_cmd}cat | mariadb -u${r_dbuser} -p${r_dbpass} -P${r_dbport} -h${r_dbhost} ${mysql_ssl_option} ${r_dbname}
|
||||
exit_code=$?
|
||||
;;
|
||||
pgsql | postgres* )
|
||||
@@ -1036,9 +1132,6 @@ EOF
|
||||
echo -e "${coff}"
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}Y${cdgy}\) \| \(${cwh}N${cdgy}\) \| \(${cwh}Q${cdgy}\) : ${cwh}${coff})" q_menu_mongo_dropdb
|
||||
case "${q_menu_mongo_dropdb,,}" in
|
||||
"y" | "yes" | * )
|
||||
mongo_dropdb="--drop"
|
||||
;;
|
||||
"n" | "update" )
|
||||
unset mongo_dropdb
|
||||
;;
|
||||
@@ -1046,6 +1139,9 @@ EOF
|
||||
print_info "Quitting Script"
|
||||
exit 1
|
||||
;;
|
||||
"y" | "yes" | * )
|
||||
mongo_dropdb="--drop"
|
||||
;;
|
||||
esac
|
||||
|
||||
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
|
||||
|
||||
Reference in New Issue
Block a user