mirror of
https://github.com/tiredofit/docker-db-backup.git
synced 2025-12-21 21:33:28 +01:00
Compare commits
12 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1e46996812 | ||
|
|
e71334564f | ||
|
|
f83f97bf76 | ||
|
|
94a8e45af6 | ||
|
|
9d90e37339 | ||
|
|
085b7cd6ce | ||
|
|
12484bb3f3 | ||
|
|
8fc2721dd4 | ||
|
|
68174c061f | ||
|
|
fd1d95090e | ||
|
|
7befba0d96 | ||
|
|
583253fce7 |
4
.github/workflows/main.yml
vendored
4
.github/workflows/main.yml
vendored
@@ -8,8 +8,8 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
|
||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
|
||||
uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
|
||||
secrets: inherit
|
||||
|
||||
4
.github/workflows/manual.yml
vendored
4
.github/workflows/manual.yml
vendored
@@ -9,8 +9,8 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
|
||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
|
||||
uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
|
||||
secrets: inherit
|
||||
|
||||
26
CHANGELOG.md
26
CHANGELOG.md
@@ -1,3 +1,29 @@
|
||||
## 3.9.4 2023-06-13 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Add abliity to use --rsyncable argument to zstd archives
|
||||
|
||||
|
||||
## 3.9.3 2023-06-05 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Add notification if blobxfer/s3 upload fails (credit @alwynpan)
|
||||
- Add zip package
|
||||
|
||||
|
||||
## 3.9.2 2023-05-10 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Alpine 3.18 base
|
||||
|
||||
|
||||
## 3.9.1 2023-05-03 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Properly allow multiple _FILE environment variables to execute solving an issue with MySQL backups
|
||||
- Fix _FILE functionality for DB_NAME variable
|
||||
|
||||
|
||||
## 3.9.0 2023-04-26 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
FROM docker.io/tiredofit/alpine:3.17
|
||||
ARG DISTRO=alpine
|
||||
ARG DISTRO_VARIANT=3.18
|
||||
|
||||
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
|
||||
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
|
||||
|
||||
### Set Environment Variables
|
||||
@@ -44,6 +47,7 @@ RUN source /assets/functions/00-container && \
|
||||
redis \
|
||||
sqlite \
|
||||
xz \
|
||||
zip \
|
||||
zstd \
|
||||
&& \
|
||||
\
|
||||
|
||||
35
README.md
35
README.md
@@ -135,7 +135,7 @@ Be sure to view the following repositories to understand all the customizable op
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- |
|
||||
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
|
||||
| `BACKUP_LOCATION` | Backup to `FILESYSTEM`, `blobxfer` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
|
||||
| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` |
|
||||
| `MANUAL_RUN_FOREVER` | `TRUE` or `FALSE` if you wish to try to make the container exit after the backup | `TRUE` |
|
||||
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
|
||||
@@ -146,18 +146,19 @@ Be sure to view the following repositories to understand all the customizable op
|
||||
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` |
|
||||
|
||||
### Database Specific Options
|
||||
| Parameter | Description | Default | `_NAME |
|
||||
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- | ------ |
|
||||
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | | `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | |
|
||||
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | x |
|
||||
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | | x |
|
||||
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | | x |
|
||||
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | x |
|
||||
| `DB_PASS` | (optional if DB doesn't require it) password for the database | | x |
|
||||
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | x |
|
||||
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | | |
|
||||
| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | x |
|
||||
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can overrde them by making your own entries | | |
|
||||
| Parameter | Description | Default | `_FILE` |
|
||||
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- | ------- |
|
||||
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | |
|
||||
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | |
|
||||
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | x |
|
||||
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | | x |
|
||||
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | | x |
|
||||
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | x |
|
||||
| `DB_PASS` | (optional if DB doesn't require it) password for the database | | x |
|
||||
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | x |
|
||||
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | | |
|
||||
| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | x |
|
||||
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can overrde them by making your own entries | | |
|
||||
|
||||
#### For Influx DB2:
|
||||
Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name`
|
||||
@@ -170,14 +171,14 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
|
||||
| | Absolute HHMM, e.g. `2330` or `0415` | |
|
||||
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
|
||||
| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `${DB_DUMP_TARGET}/archive/` |
|
||||
| `DB_DUMP_TARGET_ARCHIVE` | Optional Directory where the database dumps archives are kept. |
|
||||
| `DB_DUMP_TARGET_ARCHIVE` | Optional Directory where the database dumps archives are kept. | |
|
||||
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
|
||||
| `DB_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from `DB_DUMP_TARGET` to `DB_DUMP_TARGET_ARCHIVE` - which is useful when pairing against an external backup system. |
|
||||
| `DB_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from `DB_DUMP_TARGET` to `DB_DUMP_TARGET_ARCHIVE` - which is useful when pairing against an external backup system. | |
|
||||
|
||||
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
|
||||
|
||||
### Backup Options
|
||||
| Parameter | Description | Default | `_NAME` |
|
||||
| Parameter | Description | Default | `_FILE` |
|
||||
| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- | ------- |
|
||||
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | |
|
||||
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | |
|
||||
@@ -225,7 +226,7 @@ Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer
|
||||
|
||||
If `BACKUP_LOCATION` = `blobxfer` then the following options are used.
|
||||
|
||||
| Parameter | Description | Default | `_NAME` |
|
||||
| Parameter | Description | Default | `_FILE` |
|
||||
| ------------------------------ | ------------------------------------------- | ------------------- | ------- |
|
||||
| `BLOBXFER_STORAGE_ACCOUNT` | Microsoft Azure Cloud storage account name. | | x |
|
||||
| `BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
|
||||
|
||||
@@ -4,9 +4,11 @@ bootstrap_variables() {
|
||||
sanity_var DB_TYPE "Set appropriate DB_TYPE"
|
||||
transform_file_var \
|
||||
DB_HOST \
|
||||
DB_NAME \
|
||||
DB_PORT \
|
||||
DB_USER \
|
||||
DB_PASS
|
||||
|
||||
case "${DB_TYPE,,}" in
|
||||
couch* )
|
||||
dbtype=couch
|
||||
@@ -52,7 +54,8 @@ bootstrap_variables() {
|
||||
dbtype=mysql
|
||||
DB_PORT=${DB_PORT:-3306}
|
||||
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
|
||||
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS}
|
||||
transform_file_var DB_PASS
|
||||
if [ -n "${DB_PASS}" ] ; then export MYSQL_PWD=${DB_PASS} ; fi
|
||||
if var_true "${MYSQL_ENABLE_TLS}" ; then
|
||||
if [ -n "${MYSQL_TLS_CA_FILE}" ] ; then
|
||||
mysql_tls_args="--ssl_ca=${MYSQL_TLS_CA_FILE}"
|
||||
@@ -134,6 +137,7 @@ backup_couch() {
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
check_exit_code "move backup file"
|
||||
post_dbbackup ${DB_NAME}
|
||||
}
|
||||
|
||||
@@ -164,6 +168,7 @@ backup_influx() {
|
||||
ltarget=influx_${db}_${DB_HOST#*//}
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
check_exit_code "move backup file"
|
||||
post_dbbackup $db
|
||||
done
|
||||
;;
|
||||
@@ -184,6 +189,7 @@ backup_influx() {
|
||||
ltarget=influx2_${db}_${DB_HOST#*//}
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
check_exit_code "move backup file"
|
||||
post_dbbackup $db
|
||||
done
|
||||
;;
|
||||
@@ -213,6 +219,7 @@ backup_mongo() {
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
check_exit_code "move backup file"
|
||||
post_dbbackup "${DB_NAME}"
|
||||
}
|
||||
|
||||
@@ -228,6 +235,7 @@ backup_mssql() {
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
check_exit_code "move backup file"
|
||||
post_dbbackup $DB_NAME
|
||||
}
|
||||
|
||||
@@ -268,6 +276,7 @@ backup_mysql() {
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
check_exit_code "move backup file"
|
||||
post_dbbackup $db
|
||||
done
|
||||
else
|
||||
@@ -283,6 +292,7 @@ backup_mysql() {
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
check_exit_code "move backup file"
|
||||
post_dbbackup all
|
||||
fi
|
||||
}
|
||||
@@ -319,6 +329,7 @@ backup_pgsql() {
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
check_exit_code "move backup file"
|
||||
post_dbbackup $db
|
||||
done
|
||||
else
|
||||
@@ -342,6 +353,7 @@ backup_pgsql() {
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
check_exit_code "move backup file"
|
||||
post_dbbackup all
|
||||
fi
|
||||
}
|
||||
@@ -373,6 +385,7 @@ backup_redis() {
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
check_exit_code "move backup file"
|
||||
post_dbbackup all
|
||||
}
|
||||
|
||||
@@ -391,6 +404,7 @@ backup_sqlite3() {
|
||||
cat "${TEMP_LOCATION}"/backup.sqlite3 | ${dir_compress_cmd} > "${TEMP_LOCATION}/${target}"
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
check_exit_code "move backup file"
|
||||
post_dbbackup $db
|
||||
}
|
||||
|
||||
@@ -445,6 +459,7 @@ check_availability() {
|
||||
;;
|
||||
"mysql" )
|
||||
counter=0
|
||||
transform_file_var DB_PASS
|
||||
export MYSQL_PWD=${DB_PASS}
|
||||
while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" ${mysql_tls_args} status > /dev/null 2>&1) ; do
|
||||
sleep 5
|
||||
@@ -550,15 +565,16 @@ cleanup_old_data() {
|
||||
|
||||
|
||||
compression() {
|
||||
if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then
|
||||
PARALLEL_COMPRESSION_THREADS=1
|
||||
fi
|
||||
if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then
|
||||
PARALLEL_COMPRESSION_THREADS=1
|
||||
fi
|
||||
|
||||
case "${COMPRESSION,,}" in
|
||||
if var_true "${GZ_RSYNCABLE}" ; then
|
||||
gz_rsyncable=--rsyncable
|
||||
fi
|
||||
|
||||
case "${COMPRESSION,,}" in
|
||||
gz* )
|
||||
if var_true "${GZ_RSYNCABLE}" ; then
|
||||
gz_rsyncable=--rsyncable
|
||||
fi
|
||||
compress_cmd="pigz -q -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} ${gz_rsyncable}"
|
||||
compression_type="gzip"
|
||||
extension=".gz"
|
||||
@@ -583,7 +599,7 @@ compression() {
|
||||
target=${target}.xz
|
||||
;;
|
||||
zst* )
|
||||
compress_cmd="zstd -q -q --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} "
|
||||
compress_cmd="zstd -q -q --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} ${gz_rsyncable}"
|
||||
compression_type="zstd"
|
||||
dir_compress_cmd=${compress_cmd}
|
||||
extension=".zst"
|
||||
@@ -707,6 +723,7 @@ move_dbbackup() {
|
||||
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
|
||||
|
||||
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
|
||||
exit_code=$?
|
||||
if var_true "${ENABLE_CHECKSUM}" ; then
|
||||
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
|
||||
fi
|
||||
@@ -722,6 +739,7 @@ move_dbbackup() {
|
||||
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
|
||||
|
||||
silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET}
|
||||
exit_code=$?
|
||||
|
||||
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
|
||||
rm -rf "${TEMP_LOCATION}"/"${target}"
|
||||
|
||||
Reference in New Issue
Block a user