diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..59c288a --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +# Declare files that will always have LF line endings on checkout. +*.* text eol=lf \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 78fbb33..b7ebda0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,6 +36,7 @@ RUN set -ex && \ postgresql \ postgresql-client \ pv \ + py3-cryptography \ redis \ sqlite \ xz \ @@ -68,6 +69,9 @@ RUN set -ex && \ make && \ make install && \ \ + apk add gcc build-base libressl-dev libffi-dev python3-dev py3-pip && \ + pip3 install blobxfer && \ + \ ### Cleanup apk del .db-backup-build-deps && \ rm -rf /usr/src/* && \ diff --git a/README.md b/README.md index 4cfa835..6def421 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ This will build a container for backing up multiple types of DB Servers Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers. -* dump to local filesystem or backup to S3 Compatible services +* dump to local filesystem or backup to S3 Compatible services, and Azure. * select database user and password * backup all databases, single, or multiple databases * backup all to seperate files or one singular file @@ -102,6 +102,7 @@ Images are built primarily for `amd64` architecture, and may also include builds * Set various [environment variables](#environment-variables) to understand the capabilities of this image. * Map [persistent storage](#data-volumes) for access to configuration and data files for backup. * Make [networking ports](#networking) available for public access if necessary + ### Persistent Storage The following directories are used for configuration and can be mapped for persistent storage. @@ -189,11 +190,11 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b If `BACKUP_LOCATION` = `S3` then the following options are used. | Parameter | Description | Default | -| --------------------- | ----------------------------------------------------------------------------------------- | ------- | +|-----------------------|-------------------------------------------------------------------------------------------|---------| | `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | | `S3_KEY_ID` | S3 Key ID | | | `S3_KEY_SECRET` | S3 Key Secret | | -| `S3_PATH` | S3 Pathname to save to e.g. '`backup`' | | +| `S3_PATH` | S3 Pathname to save to (must end in a trailing slash e.g. '`backup/`') | | | `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | | `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | | `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` | @@ -202,6 +203,22 @@ If `BACKUP_LOCATION` = `S3` then the following options are used. | _*OR*_ | | | | `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | +#### Upload to a Azure storage account by `blobxfer` + +Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure fileshare storage. + + +If `BACKUP_LOCATION` = `blobxfer` then the following options are used. + +| Parameter | Description | Default | +| ------------------------------- | ------------------------------------------------------------------------ | -------------------- | +| `BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | +| `BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | +| `BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | + +> This service uploads files from backup targed directory `DB_DUMP_TARGET`. +> If the a cleanup configuration in `DB_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically. + ## Maintenance ### Shell Access diff --git a/examples/.gitignore b/examples/.gitignore new file mode 100644 index 0000000..6180330 --- /dev/null +++ b/examples/.gitignore @@ -0,0 +1,5 @@ +# See http://help.github.com/ignore-files/ for more about ignoring files. + +# Example container mounted folders +**/backups/ +**/db/ \ No newline at end of file diff --git a/examples/mssql-blobxfer/docker-compose.yml b/examples/mssql-blobxfer/docker-compose.yml new file mode 100644 index 0000000..efe648f --- /dev/null +++ b/examples/mssql-blobxfer/docker-compose.yml @@ -0,0 +1,69 @@ +# +# Example for Microsoft SQL Server +# upload with blobxfer to azure storage +# + +version: '2' + +networks: + example-mssql-blobxfer-net: + name: example-mssql-blobxfer-net + +services: + example-mssql-s3-db: + hostname: example-db-host + image: mcr.microsoft.com/mssql/server:2019-latest + container_name: example-mssql-s3-db + restart: unless-stopped + ports: + - "127.0.0.1:11433:1433" + networks: + example-mssql-blobxfer-net: + volumes: + - ./tmp/backups:/tmp/backups # shared tmp backup directory + environment: + ACCEPT_EULA: Y + MSSQL_SA_PASSWORD: 5hQa0utRFBpIY3yhoIyE + MSSQL_PID: Express + + example-mssql-blobxfer-db-backup: + container_name: example-mssql-blobxfer-db-backup + # if you want to build and use image from current source + # execute in terminal --> docker build -t tiredofit/db-backup-mssql-blobxfer . + # replace --> image: tiredofit/db-backup-mssql + # image: tiredofit/db-backup + image: tiredofit/db-backup-mssql-blobxfer + links: + - example-mssql-s3-db + volumes: + - ./backups:/backup + - ./tmp/backups:/tmp/backups # shared tmp backup directory + #- ./post-script.sh:/assets/custom-scripts/post-script.sh + environment: + # - DEBUG_MODE=TRUE + - DB_TYPE=mssql + - DB_HOST=example-db-host + # - DB_PORT=1488 + # - DB_NAME=ALL # [ALL] not working on sql server. + # create database with name `test1` manually first + - DB_NAME=test1 # Create this database + - DB_USER=sa + - DB_PASS=5hQa0utRFBpIY3yhoIyE + - DB_DUMP_FREQ=1 # backup every 5 minute + # - DB_DUMP_BEGIN=0000 # backup starts immediately + - DB_CLEANUP_TIME=3 # clean backups they are older than 60 minutes + - ENABLE_CHECKSUM=TRUE + - CHECKSUM=SHA1 + - COMPRESSION=GZ + - SPLIT_DB=FALSE + - CONTAINER_ENABLE_MONITORING=FALSE + # === S3 Blobxfer === + - BACKUP_LOCATION=blobxfer + # Add here azure storage account + - BLOBXFER_STORAGE_ACCOUNT={TODO Add Storage Name} + # Add here azure storage account key + - BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key} + - BLOBXFER_REMOTE_PATH=docker-db-backup + restart: always + networks: + example-mssql-blobxfer-net: \ No newline at end of file diff --git a/examples/mssql/docker-compose.yml b/examples/mssql/docker-compose.yml new file mode 100644 index 0000000..4e9554c --- /dev/null +++ b/examples/mssql/docker-compose.yml @@ -0,0 +1,61 @@ +# +# Example for Microsoft SQL Server +# + +version: '2' + +networks: + example-mssql-net: + name: example-mssql-net + +services: + example-mssql-db: + hostname: example-db-host + image: mcr.microsoft.com/mssql/server:2019-latest + container_name: example-mssql-db + restart: unless-stopped + ports: + - "127.0.0.1:11433:1433" + networks: + example-mssql-net: + volumes: + - ./tmp/backups:/tmp/backups # shared tmp backup directory + environment: + ACCEPT_EULA: Y + MSSQL_SA_PASSWORD: 5hQa0utRFBpIY3yhoIyE + MSSQL_PID: Express + + example-mssql-db-backup: + container_name: example-mssql-db-backup + # if you want to build and use image from current source + # execute in terminal --> docker build -t tiredofit/db-backup-mssql . + # replace --> image: tiredofit/db-backup-mssql + # image: tiredofit/db-backup + image: tiredofit/db-backup-mssql + links: + - example-mssql-db + volumes: + - ./backups:/backup + - ./tmp/backups:/tmp/backups # shared tmp backup directory + #- ./post-script.sh:/assets/custom-scripts/post-script.sh + environment: + # - DEBUG_MODE=TRUE + - DB_TYPE=mssql + - DB_HOST=example-db-host + # - DB_PORT=1488 + # - DB_NAME=ALL # [ALL] not working on sql server. + # create database with name `test1` manually first + - DB_NAME=test1 + - DB_USER=sa + - DB_PASS=5hQa0utRFBpIY3yhoIyE + - DB_DUMP_FREQ=1 # backup every minute + # - DB_DUMP_BEGIN=0000 # backup starts immediately + - DB_CLEANUP_TIME=5 # clean backups they are older than 5 minute + - ENABLE_CHECKSUM=FALSE + - CHECKSUM=SHA1 + - COMPRESSION=GZ + - SPLIT_DB=FALSE + - CONTAINER_ENABLE_MONITORING=FALSE + restart: always + networks: + example-mssql-net: \ No newline at end of file diff --git a/examples/docker-compose.yml b/examples/mysql/docker-compose.yml old mode 100755 new mode 100644 similarity index 55% rename from examples/docker-compose.yml rename to examples/mysql/docker-compose.yml index 66a93ed..f65b18b --- a/examples/docker-compose.yml +++ b/examples/mysql/docker-compose.yml @@ -1,9 +1,16 @@ version: '2' +networks: + example-db-network: + name: example-db-network + services: example-db: + hostname: example-db-host container_name: example-db image: mariadb:latest + ports: + - 13306:3306 volumes: - ./db:/var/lib/mysql environment: @@ -12,6 +19,8 @@ services: - MYSQL_USER=example - MYSQL_PASSWORD=examplepassword restart: always + networks: + - example-db-network example-db-backup: container_name: example-db-backup @@ -22,17 +31,21 @@ services: - ./backups:/backup #- ./post-script.sh:/assets/custom-scripts/post-script.sh environment: + # - DEBUG_MODE=TRUE - DB_TYPE=mariadb - - DB_HOST=example-db + - DB_HOST=example-db-host - DB_NAME=example - DB_USER=example - - DB_PASS="examplepassword" - - DB_DUMP_FREQ=1440 - - DB_DUMP_BEGIN=0000 - - DB_CLEANUP_TIME=8640 + - DB_PASS=examplepassword + - DB_DUMP_FREQ=1 # backup every minute + # - DB_DUMP_BEGIN=0000 # backup starts immediately + - DB_CLEANUP_TIME=5 # clean backups they are older than 5 minute - CHECKSUM=SHA1 - - COMPRESSION=ZSTD + - COMPRESSION=GZ - SPLIT_DB=FALSE + - CONTAINER_ENABLE_MONITORING=FALSE restart: always + networks: + - example-db-network diff --git a/examples/post-script.sh b/examples/mysql/post-script.sh old mode 100755 new mode 100644 similarity index 100% rename from examples/post-script.sh rename to examples/mysql/post-script.sh diff --git a/install/assets/defaults/10-db-backup b/install/assets/defaults/10-db-backup old mode 100755 new mode 100644 index fd35b46..324940d --- a/install/assets/defaults/10-db-backup +++ b/install/assets/defaults/10-db-backup @@ -1,6 +1,7 @@ #!/command/with-contenv bash BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"} +BLOBXFER_REMOTE_PATH=${BLOBXFER_REMOTE_PATH:-"/docker-db-backup"} CHECKSUM=${CHECKSUM:-"MD5"} COMPRESSION=${COMPRESSION:-"ZSTD"} COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"} @@ -22,5 +23,4 @@ SCRIPT_LOCATION_POST=${SCRIPT_LOCATION_POST:-"/assets/scripts/post/"} SIZE_VALUE=${SIZE_VALUE:-"bytes"} SKIP_AVAILABILITY_CHECK=${SKIP_AVAILABILITY_CHECK:-"FALSE"} SPLIT_DB=${SPLIT_DB:-"TRUE"} -TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"} - +TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"} \ No newline at end of file diff --git a/install/assets/functions/10-db-backup b/install/assets/functions/10-db-backup old mode 100755 new mode 100644 index 3a30f2d..e7a5244 --- a/install/assets/functions/10-db-backup +++ b/install/assets/functions/10-db-backup @@ -175,7 +175,7 @@ backup_mssql() { compression pre_dbbackup "${DB_NAME}" print_notice "Dumping MSSQL database: '${DB_NAME}'" - silent /opt/mssql-tools/bin/sqlcmd -E -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} –Q "BACKUP DATABASE \[${DB_NAME}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" + silent /opt/mssql-tools18/bin/sqlcmd -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} -Q "BACKUP DATABASE [${DB_NAME}] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" exit_code=$? check_exit_code $target generate_checksum @@ -460,14 +460,17 @@ cleanup_old_data() { if [ -n "${DB_CLEANUP_TIME}" ]; then if [ "${master_exit_code}" != 1 ]; then case "${BACKUP_LOCATION,,}" in - "file" | "filesystem" ) + "file" | "filesystem" | "blobxfer" ) print_info "Cleaning up old backups" mkdir -p "${DB_DUMP_TARGET}" find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \; + + print_info "Cleaning up old backups on S3 storage with blobxfer" + blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET} --delete --delete-only ;; "s3" | "minio" ) print_info "Cleaning up old backups" - aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | while read -r s3_file; do + aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | grep " PRE " -v | while read -r s3_file; do s3_createdate=$(echo $s3_file | awk {'print $1" "$2'}) s3_createdate=$(date -d "$s3_createdate" "+%s") s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 ))) @@ -475,7 +478,7 @@ cleanup_old_data() { s3_filename=$(echo $s3_file | awk {'print $4'}) if [ "$s3_filename" != "" ] ; then print_debug "Deleting $s3_filename" - silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} + silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} fi fi @@ -639,6 +642,18 @@ move_dbbackup() { silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} fi + rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}" + rm -rf "${TEMP_LOCATION}"/"${target}" + ;; + "blobxfer" ) + print_info "Moving backup to S3 Bucket with blobxfer" + + mkdir -p "${DB_DUMP_TARGET}" + mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/ + mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}" + + blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET} + rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}" rm -rf "${TEMP_LOCATION}"/"${target}" ;;