Merge branch 'master' into develop

This commit is contained in:
Dave Conroy
2022-10-10 12:01:06 -07:00
committed by GitHub
10 changed files with 201 additions and 15 deletions

2
.gitattributes vendored Normal file
View File

@@ -0,0 +1,2 @@
# Declare files that will always have LF line endings on checkout.
*.* text eol=lf

View File

@@ -36,6 +36,7 @@ RUN set -ex && \
postgresql \ postgresql \
postgresql-client \ postgresql-client \
pv \ pv \
py3-cryptography \
redis \ redis \
sqlite \ sqlite \
xz \ xz \
@@ -68,6 +69,9 @@ RUN set -ex && \
make && \ make && \
make install && \ make install && \
\ \
apk add gcc build-base libressl-dev libffi-dev python3-dev py3-pip && \
pip3 install blobxfer && \
\
### Cleanup ### Cleanup
apk del .db-backup-build-deps && \ apk del .db-backup-build-deps && \
rm -rf /usr/src/* && \ rm -rf /usr/src/* && \

View File

@@ -14,7 +14,7 @@ This will build a container for backing up multiple types of DB Servers
Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers. Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
* dump to local filesystem or backup to S3 Compatible services * dump to local filesystem or backup to S3 Compatible services, and Azure.
* select database user and password * select database user and password
* backup all databases, single, or multiple databases * backup all databases, single, or multiple databases
* backup all to seperate files or one singular file * backup all to seperate files or one singular file
@@ -102,6 +102,7 @@ Images are built primarily for `amd64` architecture, and may also include builds
* Set various [environment variables](#environment-variables) to understand the capabilities of this image. * Set various [environment variables](#environment-variables) to understand the capabilities of this image.
* Map [persistent storage](#data-volumes) for access to configuration and data files for backup. * Map [persistent storage](#data-volumes) for access to configuration and data files for backup.
* Make [networking ports](#networking) available for public access if necessary * Make [networking ports](#networking) available for public access if necessary
### Persistent Storage ### Persistent Storage
The following directories are used for configuration and can be mapped for persistent storage. The following directories are used for configuration and can be mapped for persistent storage.
@@ -189,11 +190,11 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
If `BACKUP_LOCATION` = `S3` then the following options are used. If `BACKUP_LOCATION` = `S3` then the following options are used.
| Parameter | Description | Default | | Parameter | Description | Default |
| --------------------- | ----------------------------------------------------------------------------------------- | ------- | |-----------------------|-------------------------------------------------------------------------------------------|---------|
| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | | `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | |
| `S3_KEY_ID` | S3 Key ID | | | `S3_KEY_ID` | S3 Key ID | |
| `S3_KEY_SECRET` | S3 Key Secret | | | `S3_KEY_SECRET` | S3 Key Secret | |
| `S3_PATH` | S3 Pathname to save to e.g. '`backup`' | | | `S3_PATH` | S3 Pathname to save to (must end in a trailing slash e.g. '`backup/`') | |
| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | | `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | |
| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | | `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | |
| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` | | `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` |
@@ -202,6 +203,22 @@ If `BACKUP_LOCATION` = `S3` then the following options are used.
| _*OR*_ | | | | _*OR*_ | | |
| `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | | `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` |
#### Upload to a Azure storage account by `blobxfer`
Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure fileshare storage.
If `BACKUP_LOCATION` = `blobxfer` then the following options are used.
| Parameter | Description | Default |
| ------------------------------- | ------------------------------------------------------------------------ | -------------------- |
| `BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | |
| `BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | |
| `BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` |
> This service uploads files from backup targed directory `DB_DUMP_TARGET`.
> If the a cleanup configuration in `DB_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
## Maintenance ## Maintenance
### Shell Access ### Shell Access

5
examples/.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
# See http://help.github.com/ignore-files/ for more about ignoring files.
# Example container mounted folders
**/backups/
**/db/

View File

@@ -0,0 +1,69 @@
#
# Example for Microsoft SQL Server
# upload with blobxfer to azure storage
#
version: '2'
networks:
example-mssql-blobxfer-net:
name: example-mssql-blobxfer-net
services:
example-mssql-s3-db:
hostname: example-db-host
image: mcr.microsoft.com/mssql/server:2019-latest
container_name: example-mssql-s3-db
restart: unless-stopped
ports:
- "127.0.0.1:11433:1433"
networks:
example-mssql-blobxfer-net:
volumes:
- ./tmp/backups:/tmp/backups # shared tmp backup directory
environment:
ACCEPT_EULA: Y
MSSQL_SA_PASSWORD: 5hQa0utRFBpIY3yhoIyE
MSSQL_PID: Express
example-mssql-blobxfer-db-backup:
container_name: example-mssql-blobxfer-db-backup
# if you want to build and use image from current source
# execute in terminal --> docker build -t tiredofit/db-backup-mssql-blobxfer .
# replace --> image: tiredofit/db-backup-mssql
# image: tiredofit/db-backup
image: tiredofit/db-backup-mssql-blobxfer
links:
- example-mssql-s3-db
volumes:
- ./backups:/backup
- ./tmp/backups:/tmp/backups # shared tmp backup directory
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
# - DEBUG_MODE=TRUE
- DB_TYPE=mssql
- DB_HOST=example-db-host
# - DB_PORT=1488
# - DB_NAME=ALL # [ALL] not working on sql server.
# create database with name `test1` manually first
- DB_NAME=test1 # Create this database
- DB_USER=sa
- DB_PASS=5hQa0utRFBpIY3yhoIyE
- DB_DUMP_FREQ=1 # backup every 5 minute
# - DB_DUMP_BEGIN=0000 # backup starts immediately
- DB_CLEANUP_TIME=3 # clean backups they are older than 60 minutes
- ENABLE_CHECKSUM=TRUE
- CHECKSUM=SHA1
- COMPRESSION=GZ
- SPLIT_DB=FALSE
- CONTAINER_ENABLE_MONITORING=FALSE
# === S3 Blobxfer ===
- BACKUP_LOCATION=blobxfer
# Add here azure storage account
- BLOBXFER_STORAGE_ACCOUNT={TODO Add Storage Name}
# Add here azure storage account key
- BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key}
- BLOBXFER_REMOTE_PATH=docker-db-backup
restart: always
networks:
example-mssql-blobxfer-net:

View File

@@ -0,0 +1,61 @@
#
# Example for Microsoft SQL Server
#
version: '2'
networks:
example-mssql-net:
name: example-mssql-net
services:
example-mssql-db:
hostname: example-db-host
image: mcr.microsoft.com/mssql/server:2019-latest
container_name: example-mssql-db
restart: unless-stopped
ports:
- "127.0.0.1:11433:1433"
networks:
example-mssql-net:
volumes:
- ./tmp/backups:/tmp/backups # shared tmp backup directory
environment:
ACCEPT_EULA: Y
MSSQL_SA_PASSWORD: 5hQa0utRFBpIY3yhoIyE
MSSQL_PID: Express
example-mssql-db-backup:
container_name: example-mssql-db-backup
# if you want to build and use image from current source
# execute in terminal --> docker build -t tiredofit/db-backup-mssql .
# replace --> image: tiredofit/db-backup-mssql
# image: tiredofit/db-backup
image: tiredofit/db-backup-mssql
links:
- example-mssql-db
volumes:
- ./backups:/backup
- ./tmp/backups:/tmp/backups # shared tmp backup directory
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
# - DEBUG_MODE=TRUE
- DB_TYPE=mssql
- DB_HOST=example-db-host
# - DB_PORT=1488
# - DB_NAME=ALL # [ALL] not working on sql server.
# create database with name `test1` manually first
- DB_NAME=test1
- DB_USER=sa
- DB_PASS=5hQa0utRFBpIY3yhoIyE
- DB_DUMP_FREQ=1 # backup every minute
# - DB_DUMP_BEGIN=0000 # backup starts immediately
- DB_CLEANUP_TIME=5 # clean backups they are older than 5 minute
- ENABLE_CHECKSUM=FALSE
- CHECKSUM=SHA1
- COMPRESSION=GZ
- SPLIT_DB=FALSE
- CONTAINER_ENABLE_MONITORING=FALSE
restart: always
networks:
example-mssql-net:

View File

@@ -1,9 +1,16 @@
version: '2' version: '2'
networks:
example-db-network:
name: example-db-network
services: services:
example-db: example-db:
hostname: example-db-host
container_name: example-db container_name: example-db
image: mariadb:latest image: mariadb:latest
ports:
- 13306:3306
volumes: volumes:
- ./db:/var/lib/mysql - ./db:/var/lib/mysql
environment: environment:
@@ -12,6 +19,8 @@ services:
- MYSQL_USER=example - MYSQL_USER=example
- MYSQL_PASSWORD=examplepassword - MYSQL_PASSWORD=examplepassword
restart: always restart: always
networks:
- example-db-network
example-db-backup: example-db-backup:
container_name: example-db-backup container_name: example-db-backup
@@ -22,17 +31,21 @@ services:
- ./backups:/backup - ./backups:/backup
#- ./post-script.sh:/assets/custom-scripts/post-script.sh #- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment: environment:
# - DEBUG_MODE=TRUE
- DB_TYPE=mariadb - DB_TYPE=mariadb
- DB_HOST=example-db - DB_HOST=example-db-host
- DB_NAME=example - DB_NAME=example
- DB_USER=example - DB_USER=example
- DB_PASS="examplepassword" - DB_PASS=examplepassword
- DB_DUMP_FREQ=1440 - DB_DUMP_FREQ=1 # backup every minute
- DB_DUMP_BEGIN=0000 # - DB_DUMP_BEGIN=0000 # backup starts immediately
- DB_CLEANUP_TIME=8640 - DB_CLEANUP_TIME=5 # clean backups they are older than 5 minute
- CHECKSUM=SHA1 - CHECKSUM=SHA1
- COMPRESSION=ZSTD - COMPRESSION=GZ
- SPLIT_DB=FALSE - SPLIT_DB=FALSE
- CONTAINER_ENABLE_MONITORING=FALSE
restart: always restart: always
networks:
- example-db-network

View File

2
install/assets/defaults/10-db-backup Executable file → Normal file
View File

@@ -1,6 +1,7 @@
#!/command/with-contenv bash #!/command/with-contenv bash
BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"} BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"}
BLOBXFER_REMOTE_PATH=${BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
CHECKSUM=${CHECKSUM:-"MD5"} CHECKSUM=${CHECKSUM:-"MD5"}
COMPRESSION=${COMPRESSION:-"ZSTD"} COMPRESSION=${COMPRESSION:-"ZSTD"}
COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"} COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
@@ -23,4 +24,3 @@ SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SKIP_AVAILABILITY_CHECK=${SKIP_AVAILABILITY_CHECK:-"FALSE"} SKIP_AVAILABILITY_CHECK=${SKIP_AVAILABILITY_CHECK:-"FALSE"}
SPLIT_DB=${SPLIT_DB:-"TRUE"} SPLIT_DB=${SPLIT_DB:-"TRUE"}
TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"} TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"}

23
install/assets/functions/10-db-backup Executable file → Normal file
View File

@@ -175,7 +175,7 @@ backup_mssql() {
compression compression
pre_dbbackup "${DB_NAME}" pre_dbbackup "${DB_NAME}"
print_notice "Dumping MSSQL database: '${DB_NAME}'" print_notice "Dumping MSSQL database: '${DB_NAME}'"
silent /opt/mssql-tools/bin/sqlcmd -E -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} Q "BACKUP DATABASE \[${DB_NAME}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" silent /opt/mssql-tools18/bin/sqlcmd -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} -Q "BACKUP DATABASE [${DB_NAME}] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target
generate_checksum generate_checksum
@@ -460,14 +460,17 @@ cleanup_old_data() {
if [ -n "${DB_CLEANUP_TIME}" ]; then if [ -n "${DB_CLEANUP_TIME}" ]; then
if [ "${master_exit_code}" != 1 ]; then if [ "${master_exit_code}" != 1 ]; then
case "${BACKUP_LOCATION,,}" in case "${BACKUP_LOCATION,,}" in
"file" | "filesystem" ) "file" | "filesystem" | "blobxfer" )
print_info "Cleaning up old backups" print_info "Cleaning up old backups"
mkdir -p "${DB_DUMP_TARGET}" mkdir -p "${DB_DUMP_TARGET}"
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \; find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
print_info "Cleaning up old backups on S3 storage with blobxfer"
blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET} --delete --delete-only
;; ;;
"s3" | "minio" ) "s3" | "minio" )
print_info "Cleaning up old backups" print_info "Cleaning up old backups"
aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | while read -r s3_file; do aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | grep " PRE " -v | while read -r s3_file; do
s3_createdate=$(echo $s3_file | awk {'print $1" "$2'}) s3_createdate=$(echo $s3_file | awk {'print $1" "$2'})
s3_createdate=$(date -d "$s3_createdate" "+%s") s3_createdate=$(date -d "$s3_createdate" "+%s")
s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 ))) s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 )))
@@ -475,7 +478,7 @@ cleanup_old_data() {
s3_filename=$(echo $s3_file | awk {'print $4'}) s3_filename=$(echo $s3_file | awk {'print $4'})
if [ "$s3_filename" != "" ] ; then if [ "$s3_filename" != "" ] ; then
print_debug "Deleting $s3_filename" print_debug "Deleting $s3_filename"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi fi
fi fi
@@ -639,6 +642,18 @@ move_dbbackup() {
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi fi
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
rm -rf "${TEMP_LOCATION}"/"${target}"
;;
"blobxfer" )
print_info "Moving backup to S3 Bucket with blobxfer"
mkdir -p "${DB_DUMP_TARGET}"
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET}
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}" rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
rm -rf "${TEMP_LOCATION}"/"${target}" rm -rf "${TEMP_LOCATION}"/"${target}"
;; ;;