Compare commits

...

20 Commits

Author SHA1 Message Date
dave@tiredofit.ca
753a780204 Release 4.1.3 - See CHANGELOG.md 2024-07-05 12:06:15 -07:00
dave@tiredofit.ca
7c07253428 Release 4.1.2 - See CHANGELOG.md 2024-07-02 16:15:22 -07:00
Dave Conroy
0fdb447706 Merge pull request #354 from effectivelywild/main
Resolve multiple issues using Azure blobs for remote storage
2024-07-02 16:13:41 -07:00
Frank Muise
0d23c2645c Add --no-overwrite to blobxfer download 2024-06-30 16:28:16 -04:00
Frank Muise
4786ea9c7f Update log entry for blob sync 2024-06-30 14:56:50 -04:00
Frank Muise
a26dba947b Fix issues with Azure blobs 2024-06-30 14:53:31 -04:00
dave@tiredofit.ca
b9fa7d18b1 Release 4.1.1 - See CHANGELOG.md 2024-06-19 15:41:45 -07:00
dave@tiredofit.ca
626d276c68 Release 4.1.0 - See CHANGELOG.md 2024-05-25 12:48:58 -07:00
dave@tiredofit.ca
f7f72ba2c1 Release 4.0.35 - See CHANGELOG.md 2024-01-14 20:22:08 -08:00
Dave Conroy
2f05d76f4e README weirdness 2024-01-03 17:33:52 -08:00
Dave Conroy
c9a634ff25 Convert > to - in README 2024-01-03 17:21:01 -08:00
dave@tiredofit.ca
0ce21e8f43 Release 4.0.34 - See CHANGELOG.md 2024-01-02 14:01:28 -08:00
Dave Conroy
a700eb0fef Merge pull request #315 from ToshY/docs/latest-symlink-format
[Docs] Updated `DEFAULT_CREATE_LATEST_SYMLINK` description format
2024-01-02 13:21:59 -08:00
Dave Conroy
7baa3774c7 Merge pull request #318 from devmethodgit/main
Fix environment variables in examples
2024-01-02 13:21:43 -08:00
Dave Conroy
341e4d12ea Update case statement to support arm64|aarch64 2024-01-02 13:21:07 -08:00
Dave Conroy
5c51bbcb7e Wrap if statement in double brackets 2024-01-02 12:54:57 -08:00
@vladimirzyuzin
24d9a9a937 Fix environment variables 2023-12-30 20:58:26 +03:00
ToshY
591b8d6dbd updated create latest symlink description format 2023-12-26 12:24:52 +00:00
dave@tiredofit.ca
a5b15b4412 Release 4.0.33 - See CHANGELOG.md 2023-12-18 07:58:54 -08:00
dave@tiredofit.ca
6692cf9834 Release 4.0.32 - See CHANGELOG.md 2023-12-15 15:32:32 -08:00
10 changed files with 177 additions and 75 deletions

View File

@@ -8,7 +8,7 @@ on:
jobs:
build:
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
secrets: inherit

View File

@@ -9,7 +9,7 @@ on:
jobs:
build:
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
secrets: inherit

View File

@@ -1,3 +1,61 @@
## 4.1.3 2024-07-05 <dave at tiredofit dot ca>
### Changed
- Rebuild to support tiredofit/alpine:7.10.0
## 4.1.2 2024-07-02 <effectivelywild@github>
### Added
- Add support for Azure Blob containers
- Fix timestamps when comparing previous backups
- Resolve unnecessary read operations in Azure
- Resolve issues with backup cleanup operations in Azure
## 4.1.1 2024-06-19 <dave at tiredofit dot ca>
### Changed
- Fix issue where postgresql globals when backing up ALL not being deleted (#352)
## 4.1.0 2024-05-25 <dave at tiredofit dot ca>
Note that arm/v7 builds have been removed from this release going forward
### Added
- Introduce DEFAULT/DBXX_MYSQL_CLIENT option to use mariadb or mysql for client dumping to solve incompatibility issues
- Alpine 3.20 Base
- MariaDB 10.11.8 Client
- AWS Client 1.32.113
- MySQL Client 8.4.0
## 4.0.35 2024-01-14 <dave at tiredofit dot ca>
### Changed
- Fix issue with emaail notifications and not being able to add from statement
## 4.0.34 2024-01-02 <dave at tiredofit dot ca>
### Changed
- Change the way architectures are detected to re-enable backups with MSSQL and Influx2
## 4.0.33 2023-12-18 <dave at tiredofit dot ca>
### Changed
- Allow _OPTS variabls to contain spaces
- Switch references of _DUMP_OPTS to _BACKUP_OPTS
## 4.0.32 2023-12-15 <dave at tiredofit dot ca>
### Changed
- Fix issue with directories not properly being backed up (InfluxDB)
## 4.0.31 2023-12-12 <dave at tiredofit dot ca>
### Changed

View File

@@ -1,5 +1,5 @@
ARG DISTRO=alpine
ARG DISTRO_VARIANT=3.19
ARG DISTRO_VARIANT=3.20
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
@@ -9,7 +9,9 @@ ENV INFLUX1_CLIENT_VERSION=1.8.0 \
INFLUX2_CLIENT_VERSION=2.7.3 \
MSODBC_VERSION=18.3.2.1-1 \
MSSQL_VERSION=18.3.1.1-1 \
AWS_CLI_VERSION=1.31.5 \
MYSQL_VERSION=mysql-8.4.0 \
MYSQL_REPO_URL=https://github.com/mysql/mysql-server \
AWS_CLI_VERSION=1.32.113 \
CONTAINER_ENABLE_MESSAGING=TRUE \
CONTAINER_ENABLE_MONITORING=TRUE \
IMAGE_NAME="tiredofit/db-backup" \
@@ -27,11 +29,13 @@ RUN source /assets/functions/00-container && \
build-base \
bzip2-dev \
cargo \
cmake \
git \
go \
libarchive-dev \
openssl-dev \
libffi-dev \
ncurses-dev \
python3-dev \
py3-pip \
xz-dev \
@@ -47,8 +51,10 @@ RUN source /assets/functions/00-container && \
mariadb-client \
mariadb-connector-c \
mongodb-tools \
ncurses \
openssl \
pigz \
pixz \
postgresql16 \
postgresql16-client \
pv \
@@ -72,33 +78,33 @@ RUN source /assets/functions/00-container && \
apkArch="$(uname -m)"; \
case "$apkArch" in \
x86_64) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \
arm64 ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=arm64 ;; \
arm64 | aarch64 ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=arm64 ;; \
*) sleep 0.1 ;; \
esac; \
\
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
if [[ $mssql = "true" ]] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \
if [[ $influx2 = "true" ]] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX1_CLIENT_VERSION}" && \
go build -o /usr/sbin/influxd ./cmd/influxd && \
strip /usr/sbin/influxd && \
\
clone_git_repo "${MYSQL_REPO_URL}" "${MYSQL_VERSION}" && \
cmake \
-DCMAKE_BUILD_TYPE=MinSizeRel \
-DCMAKE_INSTALL_PREFIX=/opt/mysql \
-DFORCE_INSOURCE_BUILD=1 \
-DWITHOUT_SERVER:BOOL=ON \
&& \
make -j$(nproc) install && \
\
pip3 install --break-system-packages awscli==${AWS_CLI_VERSION} && \
pip3 install --break-system-packages blobxfer && \
\
mkdir -p /usr/src/pbzip2 && \
curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
cd /usr/src/pbzip2 && \
make && \
make install && \
mkdir -p /usr/src/pixz && \
curl -sSL https://github.com/vasi/pixz/releases/download/v1.0.7/pixz-1.0.7.tar.xz | tar xvfJ - --strip 1 -C /usr/src/pixz && \
cd /usr/src/pixz && \
./configure \
--prefix=/usr \
--sysconfdir=/etc \
--localstatedir=/var \
&& \
make && \
make install && \
\
pip3 install --break-system-packages awscli==${AWS_CLI_VERSION} && \
pip3 install --break-system-packages blobxfer && \
\
package remove .db-backup-build-deps && \
package cleanup && \

View File

@@ -267,6 +267,7 @@ Encryption occurs after compression and the encrypted filename will have a `.gpg
| `DEFAULT_EXTRA_BACKUP_OPTS` | Pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
| `DEFAULT_EXTRA_ENUMERATION_OPTS` | Pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `DEFAULT_EXTRA_OPTS` | Pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
| `DEFAULT_MYSQL_CLIENT` | Choose between `mariadb` or `mysql` client to perform dump operations for compatibility purposes | `mariadb` | |
| `DEFAULT_MYSQL_EVENTS` | Backup Events | `TRUE` | |
| `DEFAULT_MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet | `512M` | |
| `DEFAULT_MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction | `TRUE` | |
@@ -325,7 +326,7 @@ If `DEFAULT_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
| Variable | Description | Default |
| ------------------------------------ | ----------------------------------------------------------------------------------------------------- | ------------------------------------- |
| `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
| `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)_(DB_NAME)_(DB_HOST)` | `TRUE` |
| `DEFAULT_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
| `DEFAULT_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` |
| `DEFAULT_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DEFAULT_FILESYSTEM_PATH}/archive/` |
@@ -356,10 +357,13 @@ If `DEFAULT_BACKUP_LOCATION` = `S3` then the following options are used:
If `DEFAULT_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
| Parameter | Description | Default | `_FILE` |
| -------------------------------------- | ------------------------------------------- | ------------------- | ------- |
| -------------------------------------- | ------------------------------------------------------------------- | ------------------- | ------- |
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `DEFAULT_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
| `DEFAULT_BLOBXFER_REMOTE_MODE` | Azure Storage mode e.g. `auto`, `file`, `append`, `block` or `page` | `auto` | x |
- When `DEFAULT_BLOBXFER_REMOTE_MODE` is set to auto it will use blob containers by default. If the `DEFAULT_BLOBXFER_REMOTE_PATH` path does not exist a blob container with that name will be created.
> This service uploads files from backup targed directory `DEFAULT_FILESYSTEM_PATH`.
> If the a cleanup configuration in `DEFAULT_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
@@ -636,10 +640,13 @@ If `DB01_BACKUP_LOCATION` = `S3` then the following options are used:
If `DB01_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
| Parameter | Description | Default | `_FILE` |
| ----------------------------------- | ------------------------------------------- | ------------------- | ------- |
| -------------------------------------- | ------------------------------------------------------------------- | ------------------- | ------- |
| `DB01_BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
| `DB01_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `DB01_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
| `DB01_BLOBXFER_REMOTE_MODE` | Azure Storage mode e.g. `auto`, `file`, `append`, `block` or `page` | `auto` | x |
- When `DEFAULT_BLOBXFER_REMOTE_MODE` is set to auto it will use blob containers by default. If the `DEFAULT_BLOBXFER_REMOTE_PATH` path does not exist a blob container with that name will be created.
> This service uploads files from backup directory `DB01_BACKUP_FILESYSTEM_PATH`.
> If the a cleanup configuration in `DB01_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.

View File

@@ -31,8 +31,8 @@ services:
- BACKUP_JOB_CONCURRENCY=1 # Only run one job at a time
- DEFAULT_CHECKSUM=NONE # Don't create checksums
- DEFAULT_COMPRESSION=ZSTD # Compress all with ZSTD
- DEFAULT_DUMP_INTERVAL=1440 # Backup every 1440 minutes
- DEFAULT_DUMP_BEGIN=0000 # Start backing up at midnight
- DEFAULT_BACKUP_INTERVAL=1440 # Backup every 1440 minutes
- DEFAULT_BACKUP_BEGIN=0000 # Start backing up at midnight
- DEFAULT_CLEANUP_TIME=8640 # Cleanup backups after a week
- DB01_TYPE=mariadb
@@ -40,8 +40,8 @@ services:
- DB01_NAME=example
- DB01_USER=example
- DB01_PASS=examplepassword
- DB01_DUMP_INTERVAL=30 # (override) Backup every 30 minutes
- DB01_DUMP_BEGIN=+1 # (override) Backup starts immediately
- DB01_BACKUP_INTERVAL=30 # (override) Backup every 30 minutes
- DB01_BACKUP_BEGIN=+1 # (override) Backup starts immediately
- DB01_CLEANUP_TIME=180 # (override) Cleanup backups they are older than 180 minutes
- DB01_CHECKSUM=SHA1 # (override) Create a SHA1 checksum
- DB01_COMPRESSION=GZ # (override) Compress with GZIP
@@ -51,8 +51,8 @@ services:
#- DB02_NAME=example
#- DB02_USER=example
#- DB02_PASS=examplepassword
#- DB02_DUMP_INTERVAL=60 # (override) Backup every 60 minutes
#- DB02_DUMP_BEGIN=+10 # (override) Backup starts in ten minutes
#- DB02_BACKUP_INTERVAL=60 # (override) Backup every 60 minutes
#- DB02_BACKUP_BEGIN=+10 # (override) Backup starts in ten minutes
#- DB02_CLEANUP_TIME=240 # (override) Cleanup backups they are older than 240 minutes
#- DB02_CHECKSUM=MD5 # (override) Create a SHA1 checksum
#- DB02_COMPRESSION=BZ # (override) Compress with BZIP

View File

@@ -45,7 +45,7 @@ services:
- DB01_NAME=test1 # Create this database
- DB01_USER=sa
- DB01_PASS=5hQa0utRFBpIY3yhoIyE
- DB01_DUMP_INTERVAL=5 # backup every 5 minute
- DB01_BACKUP_INTERVAL=5 # backup every 5 minute
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs not set immediately
- DB01_CLEANUP_TIME=60 # clean backups they are older than 60 minutes
- DB01_CHECKSUM=SHA1 # Set Checksum to be SHA1

View File

@@ -46,7 +46,7 @@ services:
- DB01_NAME=test1
- DB01_USER=sa
- DB01_PASS=5hQa0utRFBpIY3yhoIyE
- DB01_DUMP_INTERVAL=1 # backup every minute
- DB01_BACKUP_INTERVAL=1 # backup every minute
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs unset immediately
- DB01_CLEANUP_TIME=5 # clean backups they are older than 5 minute
- DB01_CHECKSUM=NONE

View File

@@ -6,9 +6,9 @@ DBBACKUP_USER=${DBBACKUP_USER:-"dbbackup"}
DBBACKUP_GROUP=${DBBACKUP_GROUP:-"${DBBACKUP_USER}"} # Must go after DBBACKUP_USER
DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0}
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
DEFAULT_BACKUP_LOCATION=${DEFAULT_BACKUP_LOCATION:-"FILESYSTEM"}
DEFAULT_BLOBXFER_REMOTE_PATH=${DEFAULT_BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
DEFAULT_BLOBXFER_MODE=${DEFAULT_BLOBXFER_MODE:-"auto"}
DEFAULT_CHECKSUM=${DEFAULT_CHECKSUM:-"MD5"}
DEFAULT_COMPRESSION=${DEFAULT_COMPRESSION:-"ZSTD"}
DEFAULT_COMPRESSION_LEVEL=${DEFAULT_COMPRESSION_LEVEL:-"3"}
@@ -20,6 +20,7 @@ DEFAULT_FILESYSTEM_PATH_PERMISSION=${DEFAULT_FILESYSTEM_PATH_PERMISSION:-"700"}
DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"600"}
DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DEFAULT_FILESYSTEM_ARCHIVE_PATH:-"${DEFAULT_FILESYSTEM_PATH}/archive/"}
DEFAULT_LOG_LEVEL=${DEFAULT_LOG_LEVEL:-"notice"}
DEFAULT_MYSQL_CLIENT=${DEFAULT_MYSQL_CLIENT:-"mariadb"}
DEFAULT_MYSQL_ENABLE_TLS=${DEFAULT_MYSQL_ENABLE_TLS:-"FALSE"}
DEFAULT_MYSQL_EVENTS=${DEFAULT_MYSQL_EVENTS:-"TRUE"}
DEFAULT_MYSQL_MAX_ALLOWED_PACKET=${DEFAULT_MYSQL_MAX_ALLOWED_PACKET:-"512M"}

View File

@@ -66,6 +66,7 @@ bootstrap_variables() {
DEFAULT_BLOBXFER_STORAGE_ACCOUNT \
DEFAULT_BLOBXFER_STORAGE_ACCOUNT_KEY \
DEFAULT_BLOBXFER_REMOTE_PATH \
DEFAULT_BLOBXFER_MODE \
DB"${backup_instance_number}"_AUTH \
DB"${backup_instance_number}"_TYPE \
DB"${backup_instance_number}"_HOST \
@@ -93,6 +94,7 @@ bootstrap_variables() {
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT \
DB"${backup_instance_number}"_BLOBXFER_STORAGE_ACCOUNT_KEY \
DB"${backup_instance_number}"_BLOBXFER_REMOTE_PATH \
DB"${backup_instance_number}"_BLOBXFER_MODE \
BLOBXFER_STORAGE_ACCOUNT \
BLOBXFER_STORAGE_ACCOUNT_KEY \
DB_HOST \
@@ -168,6 +170,11 @@ bootstrap_variables() {
sed -i "s|MONGO_CUSTOM_URI='\(.*\)'|MONGO_CUSTOM_URI=\1|g" "${backup_instance_vars}"
fi
if grep -qo ".*_OPTS='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _OPTS variable with quotes"
sed -i "s|_OPTS='\(.*\)'|_OPTS=\1|g" "${backup_instance_vars}"
fi
transform_backup_instance_variable() {
if grep -q "^DB${1}_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
export "$3"="$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
@@ -194,6 +201,7 @@ bootstrap_variables() {
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_REMOTE_PATH backup_job_blobxfer_remote_path
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT backup_job_blobxfer_storage_account
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_STORAGE_ACCOUNT_KEY backup_job_blobxfer_storage_account_key
transform_backup_instance_variable "${backup_instance_number}" BLOBXFER_MODE backup_job_blobxfer_mode
transform_backup_instance_variable "${backup_instance_number}" CHECKSUM backup_job_checksum
transform_backup_instance_variable "${backup_instance_number}" CLEANUP_TIME backup_job_cleanup_time
transform_backup_instance_variable "${backup_instance_number}" COMPRESSION backup_job_compression
@@ -204,7 +212,7 @@ bootstrap_variables() {
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PASSPHRASE backup_job_encrypt_passphrase
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PRIVATE_KEY backup_job_encrypt_private_key
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PUBLIC_KEY backup_job_encrypt_public_key
transform_backup_instance_variable "${backup_instance_number}" EXTRA_DUMP_OPTS backup_job_extra_dump_opts
transform_backup_instance_variable "${backup_instance_number}" EXTRA_BACKUP_OPTS backup_job_extra_backup_opts
transform_backup_instance_variable "${backup_instance_number}" EXTRA_ENUMERATION_OPTS backup_job_extra_enumeration_opts
transform_backup_instance_variable "${backup_instance_number}" EXTRA_OPTS backup_job_extra_opts
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_ARCHIVE_PATH backup_job_filesystem_archive_path
@@ -216,6 +224,7 @@ bootstrap_variables() {
transform_backup_instance_variable "${backup_instance_number}" INFLUX_VERSION backup_job_influx_version
transform_backup_instance_variable "${backup_instance_number}" LOG_LEVEL backup_job_log_level
transform_backup_instance_variable "${backup_instance_number}" MONGO_CUSTOM_URI backup_job_mongo_custom_uri
transform_backup_instance_variable "${backup_instance_number}" MYSQL_CLIENT backup_job_mysql_client
transform_backup_instance_variable "${backup_instance_number}" MYSQL_ENABLE_TLS backup_job_mysql_enable_tls
transform_backup_instance_variable "${backup_instance_number}" MYSQL_EVENTS backup_job_mysql_events
transform_backup_instance_variable "${backup_instance_number}" MYSQL_MAX_ALLOWED_PACKET backup_job_mysql_max_allowed_packet
@@ -396,6 +405,20 @@ EOF
dbtype=mysql
backup_job_db_port=${backup_job_db_port:-3306}
check_var backup_job_db_name DB"${v_instance}"_NAME "database name. Seperate multiple with commas"
case "${backup_job_mysql_client,,}" in
mariadb )
_mysql_prefix=/usr/bin/
;;
mysql )
_mysql_prefix=/opt/mysql/bin/
;;
* )
print_error "I don't understand '${backup_job_mysql_client,,}' as a client. Exiting.."
exit 99
;;
esac
print_debug "Using '${backup_job_mysql_client,,}' as client"
if [ -n "${backup_job_db_pass}" ] ; then export MYSQL_PWD=${backup_job_db_pass} ; fi
if var_true "${backup_job_mysql_enable_tls}" ; then
@@ -510,15 +533,14 @@ backup_influx() {
backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now}
backup_job_filename_base=influx_${db}_${backup_job_db_host#*//}
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
compression
pre_dbbackup "${db}"
write_log notice "Dumping Influx database: '${db}'"
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
run_as_user influxd backup ${influx_compression} ${bucket} -portable -host ${backup_job_db_host}:${backup_job_db_port} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} "${temporary_directory}"/"${backup_job_filename}"
run_as_user influxd backup ${influx_compression} ${bucket} -portable -host ${backup_job_db_host}:${backup_job_db_port} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} "${temporary_directory}"/"${backup_job_filename}"
exit_code=$?
check_exit_code backup "${backup_job_filename}"
compression
create_archive
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}.tar${extension}
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
timer backup finish
file_encryption
@@ -538,15 +560,14 @@ backup_influx() {
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
compression
pre_dbbackup "${db}"
write_log notice "Dumping Influx2 database: '${db}'"
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --compression none "${temporary_directory}"/"${backup_job_filename}"
run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --compression none "${temporary_directory}"/"${backup_job_filename}"
exit_code=$?
check_exit_code backup "${backup_job_filename}"
compression
create_archive
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}.tar${extension}
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
timer backup finish
file_encryption
@@ -573,9 +594,9 @@ backup_mongo() {
compression_string="and compressing with gzip"
fi
if [ -n "${backup_job_mongo_custom_uri}" ] ; then
mongo_backup_parameter="--uri=${backup_job_mongo_custom_uri} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}"
mongo_backup_parameter="--uri=${backup_job_mongo_custom_uri} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}"
else
mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}"
mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}"
fi
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
pre_dbbackup "${backup_job_db_name}"
@@ -662,7 +683,7 @@ backup_mysql() {
if [ "${backup_job_db_name,,}" = "all" ] ; then
write_log debug "Preparing to back up everything except for information_schema and _* prefixes"
db_names=$(run_as_user mysql -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_enumeration_opts} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
db_names=$(run_as_user ${_mysql_prefix}mysql -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_enumeration_opts} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
if [ -n "${backup_job_db_name_exclude}" ] ; then
db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
@@ -679,13 +700,13 @@ backup_mysql() {
if var_true "${backup_job_split_db}" ; then
for db in ${db_names} ; do
prepare_dbbackup
backup_job_filename=mysql_${db}_${backup_job_db_host,,}_${now}.sql
backup_job_filename_base=mysql_${db}_${backup_job_db_host,,}
backup_job_filename=${backup_job_mysql_client,,}_${db}_${backup_job_db_host,,}_${now}.sql
backup_job_filename_base=${backup_job_mysql_client,,}_${db}_${backup_job_db_host,,}
compression
pre_dbbackup "${db}"
write_log notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} $db | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
run_as_user ${play_fair} ${_mysql_prefix}mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} $db | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
@@ -700,13 +721,13 @@ backup_mysql() {
else
write_log debug "Not splitting database dumps into their own files"
prepare_dbbackup
backup_job_filename=mysql_all_${backup_job_db_host,,}_${now}.sql
backup_job_filename_base=mysql_all_${backup_job_db_host,,}
backup_job_filename=${backup_job_mysql_client,,}_all_${backup_job_db_host,,}_${now}.sql
backup_job_filename_base=${backup_job_mysql_client,,}_all_${backup_job_db_host,,}
compression
pre_dbbackup all
write_log notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
run_as_user ${play_fair} mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
run_as_user ${play_fair} ${_mysql_prefix}mysqldump --max-allowed-packet=${backup_job_mysql_max_allowed_packet} -h ${backup_job_db_host} -P ${backup_job_db_port} -u${backup_job_db_user} ${events} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} --databases $(echo ${db_names} | xargs) | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
@@ -724,11 +745,12 @@ backup_pgsql() {
backup_pgsql_globals() {
prepare_dbbackup
backup_job_filename=pgsql_globals_${backup_job_db_host,,}_${now}.sql
backup_job_global_base=pgsql_globals_${backup_job_db_host,,}
compression
pre_dbbackup "globals"
print_notice "Dumping PostgresSQL globals: with 'pg_dumpall -g' ${compression_string}"
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
run_as_user ${play_fair} pg_dumpall -h "${backup_job_db_host}" -U "${backup_job_db_user}" -p "${backup_job_db_port}" -g ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
run_as_user ${play_fair} pg_dumpall -h "${backup_job_db_host}" -U "${backup_job_db_user}" -p "${backup_job_db_port}" -g ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code "${backup_job_filename}"
@@ -777,7 +799,7 @@ backup_pgsql() {
pre_dbbackup "${db}"
write_log notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
run_as_user ${play_fair} pg_dump -h "${backup_job_db_host}" -p "${backup_job_db_port}" -U "${backup_job_db_user}" $db ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
run_as_user ${play_fair} pg_dump -h "${backup_job_db_host}" -p "${backup_job_db_port}" -U "${backup_job_db_user}" $db ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
@@ -807,7 +829,7 @@ backup_pgsql() {
for x_db_name in ${tmp_db_names} ; do
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
done
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_backup_opts} | ${compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}" > /dev/null
exit_code=$?
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}"
@@ -828,7 +850,7 @@ backup_redis() {
backup_job_filename=redis_all_${backup_job_db_host,,}_${now}.rdb
backup_job_filename_base=redis_${backup_job_db_host,,}
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
echo bgsave | silent run_as_user ${play_fair} redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} --rdb ${temporary_directory}/${backup_job_filename} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}
echo bgsave | silent run_as_user ${play_fair} redis-cli -h ${backup_job_db_host} -p ${backup_job_db_port} ${REDIS_PASS_STR} --rdb ${temporary_directory}/${backup_job_filename} ${backup_job_extra_opts} ${backup_job_extra_backup_opts}
sleep 10
try=5
while [ $try -gt 0 ] ; do
@@ -1045,17 +1067,24 @@ cleanup_old_data() {
write_log info "Cleaning up old backups on filesystem"
run_as_user mkdir -p "${backup_job_filesystem_path}"
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
if var_true "${_postgres_backup_globals}"; then
find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_global_base}*" -exec rm -f {} \;
fi
if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
else
write_log info "Syncing changes via blobxfer"
silent run_as_user blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete --delete-only
silent run_as_user blobxfer upload --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete --delete-only
fi
;;
"file" | "filesystem" )
write_log info "Cleaning up old backups on filesystem"
run_as_user mkdir -p "${backup_job_filesystem_path}"
run_as_user find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_filename_base}*" -exec rm -f {} \;
if var_true "${_postgres_backup_globals}"; then
run_as_user find "${backup_job_filesystem_path}"/ -type f -mmin +"${backup_job_cleanup_time}" -iname "${backup_job_global_base}*" -exec rm -f {} \;
fi
;;
"s3" | "minio" )
write_log info "Cleaning up old backups on S3 storage"
@@ -1156,8 +1185,9 @@ create_archive() {
if var_true "${DEBUG_CREATE_ARCHIVE}" ; then debug on; fi
if [ "${exit_code}" = "0" ] ; then
write_log notice "Creating archive file of '${backup_job_filename}' with tar ${compression_string}"
run_as_user tar cf - "${temporary_directory}"/"${backup_job_filename}" | ${dir_compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename}".tar"${extension}" > /dev/null
rm -rf "${temporary_directory}"/"${backup_job_filename}"
run_as_user tar cf - "${temporary_directory}"/"${backup_job_filename_dir}" | ${dir_compress_cmd} | run_as_user tee "${temporary_directory}"/"${backup_job_filename_dir}".tar"${extension}" > /dev/null
backup_job_filename="${backup_job_filename_dir}".tar"${extension}"
rm -rf "${temporary_directory}"/"${backup_job_filename_dir}"
else
write_log error "Skipping creating archive file because backup did not complete successfully"
fi
@@ -1384,7 +1414,7 @@ notify() {
if [ -z "${SMTP_HOST}" ] ; then write_log error "[notifications] No SMTP_HOST variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
if [ -z "${SMTP_PORT}" ] ; then write_log error "[notifications] No SMTP_PORT variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
if var_nottrue "${skip_mail}" ; then
if ! grep -q ^from /etc/msmptrc ; then
if ! grep -q ^from /etc/msmtprc ; then
echo "from ${MAIL_FROM}" >> /etc/msmtprc
fi
mail_recipients=$(echo "${MAIL_TO}" | tr "," "\n")
@@ -1608,8 +1638,8 @@ EOF
if [ -z "${backup_job_blobxfer_storage_account}" ] || [ -z "${backup_job_blobxfer_storage_account_key}" ]; then
write_log warn "Variable _BLOBXFER_STORAGE_ACCOUNT or _BLOBXFER_STORAGE_ACCOUNT_KEY is not set. Skipping blobxfer functions"
else
write_log info "Synchronize local storage from S3 Bucket with blobxfer"
${play_fair} blobxfer download --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --delete
write_log info "Synchronize local storage from blob container with blobxfer"
${play_fair} blobxfer download --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path} --restore-file-lmt --delete
write_log info "Moving backup to external storage with blobxfer"
mkdir -p "${backup_job_filesystem_path}"
@@ -1617,7 +1647,7 @@ EOF
run_as_user mv "${temporary_directory}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
silent run_as_user ${play_fair} blobxfer upload --mode file --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path}
silent run_as_user ${play_fair} blobxfer upload --no-overwrite --mode ${backup_job_blobxfer_mode} --remote-path ${backup_job_blobxfer_remote_path} --storage-account ${backup_job_blobxfer_storage_account} --storage-account-key ${backup_job_blobxfer_storage_account_key} --local-path ${backup_job_filesystem_path}
move_exit_code=$?
if [ "${backup_job_checksum}" != "none" ] ; then run_as_user rm -rf "${temporary_directory}"/"${backup_job_filename}"."${checksum_extension}" ; fi