mirror of
https://github.com/tiredofit/docker-db-backup.git
synced 2025-12-22 05:33:53 +01:00
Compare commits
18 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
83693d35b2 | ||
|
|
52b726c821 | ||
|
|
5c43b3c907 | ||
|
|
005e7f6e47 | ||
|
|
7d7cb9587d | ||
|
|
d1713fe3f0 | ||
|
|
d1e98d9c4b | ||
|
|
0920b671cb | ||
|
|
28ed6c3bb8 | ||
|
|
c1bdf26598 | ||
|
|
5a4cac2cee | ||
|
|
c04eec7661 | ||
|
|
32f1959a07 | ||
|
|
d384d5a529 | ||
|
|
56ab68dd71 | ||
|
|
9a1a5efbd9 | ||
|
|
df5532c128 | ||
|
|
2ecd313778 |
1
.dockerignore
Normal file
1
.dockerignore
Normal file
@@ -0,0 +1 @@
|
||||
examples/
|
||||
7
.github/dependabot.yml
vendored
Normal file
7
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
version: 2
|
||||
updates:
|
||||
# Maintain dependencies for GitHub Actions
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
10
.github/workflows/main.yml
vendored
10
.github/workflows/main.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Prepare
|
||||
id: prep
|
||||
@@ -63,17 +63,17 @@ jobs:
|
||||
echo ::set-output name=docker_image::${DOCKER_IMAGE}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
@@ -102,7 +102,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Build
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: .
|
||||
|
||||
10
.github/workflows/manual.yml
vendored
10
.github/workflows/manual.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Prepare
|
||||
id: prep
|
||||
@@ -63,17 +63,17 @@ jobs:
|
||||
echo ::set-output name=docker_image::${DOCKER_IMAGE}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
@@ -102,7 +102,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Build
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: .
|
||||
|
||||
59
CHANGELOG.md
59
CHANGELOG.md
@@ -1,3 +1,62 @@
|
||||
## 3.3.4 2022-06-03 <rozdzynski@github>
|
||||
|
||||
### Fixed
|
||||
- S3 backups failing with special characters in filename
|
||||
|
||||
|
||||
## 3.3.3 2022-05-24 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Alpine 3.16 base
|
||||
|
||||
|
||||
## 3.3.2 2022-05-02 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Add POST_SCRIPT_SKIP_X_VERIFY environment variables to allow for more host compatibility for post scripts
|
||||
|
||||
|
||||
## 3.3.1 2022-04-30 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Compressing silently was causing 0 byte backups
|
||||
|
||||
|
||||
## 3.3.0 2022-04-30 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Ability to auto clean old S3 / Minio Hosts like what occurs on filesysten
|
||||
- Alert user how to turn off Zabbix Monitoring if fails
|
||||
- Allow Zabbix Monitoring to work with S3
|
||||
- Silence some more compression statements
|
||||
### Changed
|
||||
- Fix for Redis not backing up properly
|
||||
- Start sending checksums for S3 Outputs
|
||||
- Cleanup some code functions
|
||||
- FIx Container Log Level always in DEBUG
|
||||
|
||||
|
||||
## 3.2.4 2022-04-21 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Add -portable flag when backing up Influx
|
||||
|
||||
|
||||
## 3.2.3 2022-04-21 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Fix for bucket / db name InfluxDB 1.xx
|
||||
- Minor aesthetics, spacing, spelling
|
||||
|
||||
|
||||
## 3.2.2 2022-04-21 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Restore script properly parses DB_PORT entry
|
||||
- Influx Database ready performs different checks dependent on V1/V2
|
||||
- Stop using weird database lowercase variables unneccessarily breaking Docker Secrets
|
||||
|
||||
|
||||
## 3.2.1 2022-04-03 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM docker.io/tiredofit/alpine:3.15
|
||||
FROM docker.io/tiredofit/alpine:3.16
|
||||
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
|
||||
|
||||
### Set Environment Variables
|
||||
|
||||
@@ -261,6 +261,9 @@ Outputs the following on the console:
|
||||
|
||||
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`
|
||||
|
||||
You must make your scripts executible otherwise there is an internal check that will skip trying to run it otherwise.
|
||||
If for some reason your filesystem or host is not detecting it right, use the environment variable `POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
|
||||
|
||||
## Support
|
||||
|
||||
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.
|
||||
|
||||
@@ -20,8 +20,3 @@ S3_PROTOCOL=${S3_PROTOCOL:-"https"}
|
||||
SIZE_VALUE=${SIZE_VALUE:-"bytes"}
|
||||
SPLIT_DB=${SPLIT_DB:-"TRUE"}
|
||||
TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"}
|
||||
dbhost=${DB_HOST}
|
||||
dbname=${DB_NAME}
|
||||
dbpass=${DB_PASS}
|
||||
dbtype=${DB_TYPE}
|
||||
dbuser=${DB_USER}
|
||||
@@ -1,29 +1,29 @@
|
||||
#!/command/with-contenv bash
|
||||
|
||||
bootstrap_variables() {
|
||||
case "${dbtype,,}" in
|
||||
case "${DB_TYPE,,}" in
|
||||
couch* )
|
||||
dbtype=couch
|
||||
dbport=${DB_PORT:-5984}
|
||||
DB_PORT=${DB_PORT:-5984}
|
||||
file_env 'DB_USER'
|
||||
file_env 'DB_PASS'
|
||||
;;
|
||||
influx* )
|
||||
dbtype=influx
|
||||
dbport=${DB_PORT:-8088}
|
||||
DB_PORT=${DB_PORT:-8088}
|
||||
file_env 'DB_USER'
|
||||
file_env 'DB_PASS'
|
||||
sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'"
|
||||
;;
|
||||
mongo* )
|
||||
dbtype=mongo
|
||||
dbport=${DB_PORT:-27017}
|
||||
DB_PORT=${DB_PORT:-27017}
|
||||
[[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER'
|
||||
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
|
||||
;;
|
||||
"mysql" | "mariadb" )
|
||||
dbtype=mysql
|
||||
dbport=${DB_PORT:-3306}
|
||||
DB_PORT=${DB_PORT:-3306}
|
||||
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
|
||||
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
|
||||
;;
|
||||
@@ -34,17 +34,17 @@ bootstrap_variables() {
|
||||
*) print_error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;;
|
||||
esac
|
||||
dbtype=mssql
|
||||
dbport=${DB_PORT:-1433}
|
||||
DB_PORT=${DB_PORT:-1433}
|
||||
;;
|
||||
postgres* | "pgsql" )
|
||||
dbtype=pgsql
|
||||
dbport=${DB_PORT:-5432}
|
||||
DB_PORT=${DB_PORT:-5432}
|
||||
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
|
||||
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
|
||||
;;
|
||||
"redis" )
|
||||
dbtype=redis
|
||||
dbport=${DB_PORT:-6379}
|
||||
DB_PORT=${DB_PORT:-6379}
|
||||
[[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS'
|
||||
;;
|
||||
sqlite* )
|
||||
@@ -60,58 +60,58 @@ bootstrap_variables() {
|
||||
### Set the Database Authentication Details
|
||||
case "$dbtype" in
|
||||
"mongo" )
|
||||
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${dbuser}"
|
||||
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${dbpass}"
|
||||
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${dbname}"
|
||||
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER}"
|
||||
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS}"
|
||||
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME}"
|
||||
[[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}"
|
||||
;;
|
||||
"mysql" )
|
||||
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${dbpass}
|
||||
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS}
|
||||
;;
|
||||
"postgres" )
|
||||
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${dbpass}"
|
||||
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS}"
|
||||
;;
|
||||
"redis" )
|
||||
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${dbpass}"
|
||||
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS}"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
backup_couch() {
|
||||
pre_dbbackup
|
||||
target=couch_${dbname}_${dbhost#*//}_${now}.txt
|
||||
target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt
|
||||
compression
|
||||
print_notice "Dumping CouchDB database: '${dbname}' ${compression_string}"
|
||||
curl -sSL -X GET ${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}"
|
||||
curl -sSL -X GET ${DB_HOST}:${DB_PORT}/${DB_NAME}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
post_dbbackup $dbname
|
||||
post_dbbackup ${DB_NAME}
|
||||
}
|
||||
|
||||
backup_influx() {
|
||||
if [ "${dbname,,}" = "all" ] ; then
|
||||
if [ "${DB_NAME,,}" = "all" ] ; then
|
||||
print_debug "Preparing to back up everything"
|
||||
db_names=justbackupeverything
|
||||
else
|
||||
db_names=$(echo "${dbname}" | tr ',' '\n')
|
||||
db_names=$(echo "${DB_NAME}" | tr ',' '\n')
|
||||
fi
|
||||
|
||||
case "${INFLUX_VERSION,,}" in
|
||||
1 )
|
||||
for db in ${db_names}; do
|
||||
pre_dbbackup
|
||||
if [ "${db}" != "justbackupeverything" ] ; then bucket="-bucket $db" ; else db=all ; fi
|
||||
target=influx_${db}_${dbhost#*//}_${now}
|
||||
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
|
||||
target=influx_${db}_${DB_HOST#*//}_${now}
|
||||
compression
|
||||
print_notice "Dumping Influx database: '${db}'"
|
||||
influxd backup ${influx_compression} ${bucket} -host ${dbhost}:${dbport} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
|
||||
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
|
||||
exit_code=$?
|
||||
check_exit_code $target_dir
|
||||
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
|
||||
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
|
||||
target=influx_${db}_${dbhost#*//}_${now}.tar${extension}
|
||||
target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension}
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
post_dbbackup $db
|
||||
@@ -121,14 +121,14 @@ backup_influx() {
|
||||
for db in ${db_names}; do
|
||||
pre_dbbackup
|
||||
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
|
||||
target=influx2_${db}_${dbhost#*//}_${now}
|
||||
target=influx2_${db}_${DB_HOST#*//}_${now}
|
||||
compression
|
||||
print_notice "Dumping Influx2 database: '${db}'"
|
||||
influx backup --org ${dbuser} ${bucket} --host ${dbhost}:${dbport} --token ${dbpass} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
|
||||
influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
|
||||
exit_code=$?
|
||||
check_exit_code $target_dir
|
||||
create_archive
|
||||
target=influx2_${db}_${dbhost#*//}_${now}.tar${extension}
|
||||
target=influx2_${db}_${DB_HOST#*//}_${now}.tar${extension}
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
post_dbbackup $db
|
||||
@@ -140,14 +140,14 @@ backup_influx() {
|
||||
backup_mongo() {
|
||||
pre_dbbackup
|
||||
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
|
||||
target=${dbtype}_${dbname}_${dbhost}_${now}.archive
|
||||
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive
|
||||
else
|
||||
target=${dbtype}_${dbname}_${dbhost}_${now}.archive.gz
|
||||
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive.gz
|
||||
mongo_compression="--gzip"
|
||||
compression_string="and compressing with gzip"
|
||||
fi
|
||||
print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
|
||||
mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
|
||||
mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
@@ -157,15 +157,15 @@ backup_mongo() {
|
||||
|
||||
backup_mssql() {
|
||||
pre_dbbackup
|
||||
target=mssql_${dbname}_${dbhost}_${now}.bak
|
||||
target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak
|
||||
compression
|
||||
print_notice "Dumping MSSQL database: '${dbname}'"
|
||||
/opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} –Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
|
||||
print_notice "Dumping MSSQL database: '${DB_NAME}'"
|
||||
/opt/mssql-tools/bin/sqlcmd -E -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} –Q "BACKUP DATABASE \[${DB_NAME}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
post_dbbackup $dbname
|
||||
post_dbbackup $DB_NAME
|
||||
}
|
||||
|
||||
backup_mysql() {
|
||||
@@ -176,9 +176,9 @@ backup_mysql() {
|
||||
stored_procedures="--routines"
|
||||
fi
|
||||
|
||||
if [ "${dbname,,}" = "all" ] ; then
|
||||
if [ "${DB_NAME,,}" = "all" ] ; then
|
||||
print_debug "Preparing to back up everything except for information_schema and _* prefixes"
|
||||
db_names=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
|
||||
db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
|
||||
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
|
||||
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
|
||||
for db_exclude in ${db_names_exclusions} ; do
|
||||
@@ -187,7 +187,7 @@ backup_mysql() {
|
||||
done
|
||||
fi
|
||||
else
|
||||
db_names=$(echo "${dbname}" | tr ',' '\n')
|
||||
db_names=$(echo "${DB_NAME}" | tr ',' '\n')
|
||||
fi
|
||||
|
||||
print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
|
||||
@@ -195,10 +195,10 @@ backup_mysql() {
|
||||
if var_true "${SPLIT_DB}" ; then
|
||||
for db in ${db_names} ; do
|
||||
pre_dbbackup
|
||||
target=mysql_${db}_${dbhost}_${now}.sql
|
||||
target=mysql_${db}_${DB_HOST,,}_${now}.sql
|
||||
compression
|
||||
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
|
||||
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
@@ -208,10 +208,10 @@ backup_mysql() {
|
||||
else
|
||||
print_debug "Not splitting database dumps into their own files"
|
||||
pre_dbbackup
|
||||
target=mysql_all_${dbhost}_${now}.sql
|
||||
target=mysql_all_${DB_HOST,,}_${now}.sql
|
||||
compression
|
||||
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
||||
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
@@ -221,11 +221,11 @@ backup_mysql() {
|
||||
}
|
||||
|
||||
backup_pgsql() {
|
||||
export PGPASSWORD=${dbpass}
|
||||
export PGPASSWORD=${DB_PASS}
|
||||
authdb=${DB_USER}
|
||||
if [ "${dbname,,}" = "all" ] ; then
|
||||
if [ "${DB_NAME,,}" = "all" ] ; then
|
||||
print_debug "Preparing to back up all databases"
|
||||
db_names=$(psql -h $dbhost -U $dbuser -p ${dbport} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
|
||||
db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
|
||||
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
|
||||
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
|
||||
for db_exclude in ${db_names_exclusions} ; do
|
||||
@@ -234,7 +234,7 @@ backup_pgsql() {
|
||||
done
|
||||
fi
|
||||
else
|
||||
db_names=$(echo "${dbname}" | tr ',' '\n')
|
||||
db_names=$(echo "${DB_NAME}" | tr ',' '\n')
|
||||
fi
|
||||
|
||||
print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
|
||||
@@ -242,10 +242,10 @@ backup_pgsql() {
|
||||
if var_true "${SPLIT_DB}" ; then
|
||||
for db in ${db_names} ; do
|
||||
pre_dbbackup
|
||||
target=pgsql_${db}_${dbhost}_${now}.sql
|
||||
target=pgsql_${db}_${DB_HOST,,}_${now}.sql
|
||||
compression
|
||||
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
|
||||
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
@@ -255,10 +255,10 @@ backup_pgsql() {
|
||||
else
|
||||
print_debug "Not splitting database dumps into their own files"
|
||||
pre_dbbackup
|
||||
target=pgsql_all_${dbhost}_${now}.sql
|
||||
target=pgsql_all_${DB_HOST,,}_${now}.sql
|
||||
compression
|
||||
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
||||
tmp_db_names=$(psql -h $dbhost -U $dbuser -p ${dbport} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
|
||||
tmp_db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
|
||||
for r_db_name in $(echo $db_names | xargs); do
|
||||
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
|
||||
done
|
||||
@@ -266,7 +266,7 @@ backup_pgsql() {
|
||||
for x_db_name in ${tmp_db_names} ; do
|
||||
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
|
||||
done
|
||||
pg_dumpall -h ${dbhost} -U ${dbuser} -p ${dbport} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
@@ -278,15 +278,16 @@ backup_pgsql() {
|
||||
backup_redis() {
|
||||
pre_dbbackup
|
||||
print_notice "Dumping Redis - Flushing Redis Cache First"
|
||||
target=redis_all_${dbhost}_${now}.rdb
|
||||
echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
|
||||
target=redis_all_${DB_HOST,,}_${now}.rdb
|
||||
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
|
||||
sleep 10
|
||||
try=5
|
||||
while [ $try -gt 0 ] ; do
|
||||
saved=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
|
||||
ok=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
|
||||
saved=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
|
||||
ok=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
|
||||
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
|
||||
print_notice "Redis Backup Complete"
|
||||
exit_code=0
|
||||
break
|
||||
fi
|
||||
try=$((try - 1))
|
||||
@@ -296,6 +297,7 @@ backup_redis() {
|
||||
target_original=${target}
|
||||
compression
|
||||
$compress_cmd "${TEMP_LOCATION}/${target_original}"
|
||||
check_exit_code $target
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
post_dbbackup all
|
||||
@@ -303,12 +305,12 @@ backup_redis() {
|
||||
|
||||
backup_sqlite3() {
|
||||
pre_dbbackup
|
||||
db=$(basename "$dbhost")
|
||||
db=$(basename "${DB_HOST}")
|
||||
db="${db%.*}"
|
||||
target=sqlite3_${db}_${now}.sqlite3
|
||||
compression
|
||||
print_notice "Dumping sqlite3 database: '${dbhost}' ${compression_string}"
|
||||
sqlite3 "${dbhost}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
|
||||
print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}"
|
||||
sqlite3 "${DB_HOST}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
|
||||
exit_code=$?
|
||||
check_exit_code $target
|
||||
cat "${TEMP_LOCATION}"/backup.sqlite3 | ${dir_compress_cmd} > "${TEMP_LOCATION}/${target}"
|
||||
@@ -324,78 +326,89 @@ check_availability() {
|
||||
counter=0
|
||||
code_received=0
|
||||
while [ "${code_received}" != "200" ]; do
|
||||
code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${dbhost}:${dbport})
|
||||
code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${DB_HOST}:${DB_PORT})
|
||||
if [ "${code_received}" = "200" ] ; then break ; fi
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "CouchDB Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
print_warn "CouchDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"influx" )
|
||||
counter=0
|
||||
code_received=0
|
||||
while [ "${code_received}" != "200" ]; do
|
||||
code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${dbhost}:${dbport}/health)
|
||||
if [ "${code_received}" = "200" ] ; then break ; fi
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "InfluxDB Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
case "${INFLUX_VERSION,,}" in
|
||||
1 )
|
||||
while ! (nc -z ${DB_HOST#*//} ${DB_PORT}) ; do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "InfluxDB Host '${DB_HOST#*//}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
2 )
|
||||
code_received=0
|
||||
while [ "${code_received}" != "200" ]; do
|
||||
code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${DB_HOST}:${DB_PORT}/health)
|
||||
if [ "${code_received}" = "200" ] ; then break ; fi
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "InfluxDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
"mongo" )
|
||||
counter=0
|
||||
while ! (nc -z ${dbhost} ${dbport}) ; do
|
||||
while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "Mongo Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
print_warn "Mongo Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"mysql" )
|
||||
counter=0
|
||||
export MYSQL_PWD=${dbpass}
|
||||
while ! (mysqladmin -u"${dbuser}" -P"${dbport}" -h"${dbhost}" status > /dev/null 2>&1) ; do
|
||||
export MYSQL_PWD=${DB_PASS}
|
||||
while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" status > /dev/null 2>&1) ; do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "MySQL/MariaDB Server '${dbhost}' is not accessible, retrying.. (${COUNTER} seconds so far)"
|
||||
print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)"
|
||||
done
|
||||
;;
|
||||
"mssql" )
|
||||
counter=0
|
||||
while ! (nc -z ${dbhost} ${dbport}) ; do
|
||||
while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "MSSQL Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
print_warn "MSSQL Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"pgsql" )
|
||||
counter=0
|
||||
export PGPASSWORD=${dbpass}
|
||||
until pg_isready --dbname=${dbname} --host=${dbhost} --port=${dbport} --username=${dbuser} -q
|
||||
export PGPASSWORD=${DB_PASS}
|
||||
until pg_isready --dbname=${DB_NAME} --host=${DB_HOST} --port=${DB_PORT} --username=${DB_USER} -q
|
||||
do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "Postgres Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
print_warn "Postgres Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"redis" )
|
||||
counter=0
|
||||
while ! (nc -z "${dbhost}" "${dbport}") ; do
|
||||
while ! (nc -z "${DB_HOST}" "${DB_PORT}") ; do
|
||||
sleep 5
|
||||
(( counter+=5 ))
|
||||
print_warn "Redis Host '${dbhost}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
print_warn "Redis Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
|
||||
done
|
||||
;;
|
||||
"sqlite3" )
|
||||
if [[ ! -e "${dbhost}" ]]; then
|
||||
print_error "File '${dbhost}' does not exist."
|
||||
if [[ ! -e "${DB_HOST}" ]]; then
|
||||
print_error "File '${DB_HOST}' does not exist."
|
||||
exit_code=2
|
||||
exit $exit_code
|
||||
elif [[ ! -f "${dbhost}" ]]; then
|
||||
print_error "File '${dbhost}' is not a file."
|
||||
elif [[ ! -f "${DB_HOST}" ]]; then
|
||||
print_error "File '${DB_HOST}' is not a file."
|
||||
exit_code=2
|
||||
exit $exit_code
|
||||
elif [[ ! -r "${dbhost}" ]]; then
|
||||
print_error "File '${dbhost}' is not readable."
|
||||
elif [[ ! -r "${DB_HOST}" ]]; then
|
||||
print_error "File '${DB_HOST}' is not readable."
|
||||
exit_code=2
|
||||
exit $exit_code
|
||||
fi
|
||||
@@ -419,11 +432,31 @@ check_exit_code() {
|
||||
cleanup_old_data() {
|
||||
if [ -n "${DB_CLEANUP_TIME}" ]; then
|
||||
if [ "${master_exit_code}" != 1 ]; then
|
||||
print_info "Cleaning up old backups"
|
||||
mkdir -p "${DB_DUMP_TARGET}"
|
||||
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
|
||||
case "${BACKUP_LOCATION,,}" in
|
||||
"file" | "filesystem" )
|
||||
print_info "Cleaning up old backups"
|
||||
mkdir -p "${DB_DUMP_TARGET}"
|
||||
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
|
||||
;;
|
||||
"s3" | "minio" )
|
||||
print_info "Cleaning up old backups"
|
||||
aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | while read -r s3_file; do
|
||||
s3_createdate=$(echo $s3_file | awk {'print $1" "$2'})
|
||||
s3_createdate=$(date -d "$s3_createdate" "+%s")
|
||||
s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 )))
|
||||
if [[ $s3_createdate -le $s3_olderthan ]] ; then
|
||||
s3_filename=$(echo $s3_file | awk {'print $4'})
|
||||
if [ "$s3_filename" != "" ] ; then
|
||||
print_debug "Deleting $s3_filename"
|
||||
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
|
||||
fi
|
||||
fi
|
||||
|
||||
done
|
||||
;;
|
||||
esac
|
||||
else
|
||||
print_info "Skipping Cleaning up old backups because there were errors in backing up"
|
||||
print_error "Skipping Cleaning up old backups because there were errors in backing up"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
@@ -435,7 +468,7 @@ compression() {
|
||||
|
||||
case "${COMPRESSION,,}" in
|
||||
gz* )
|
||||
compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
|
||||
compress_cmd="pigz -q -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
|
||||
compression_type="gzip"
|
||||
extension=".gz"
|
||||
dir_compress_cmd=${compress_cmd}
|
||||
@@ -443,7 +476,7 @@ compression() {
|
||||
target=${target}.gz
|
||||
;;
|
||||
bz* )
|
||||
compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
|
||||
compress_cmd="pbzip2 -q -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
|
||||
compression_type="bzip2"
|
||||
dir_compress_cmd=${compress_cmd}
|
||||
extension=".bz2"
|
||||
@@ -459,7 +492,7 @@ compression() {
|
||||
target=${target}.xz
|
||||
;;
|
||||
zst* )
|
||||
compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS}"
|
||||
compress_cmd="zstd -q -q --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} "
|
||||
compression_type="zstd"
|
||||
dir_compress_cmd=${compress_cmd}
|
||||
extension=".zst"
|
||||
@@ -467,8 +500,9 @@ compression() {
|
||||
target=${target}.zst
|
||||
;;
|
||||
"none" | "false")
|
||||
compress_cmd="cat "
|
||||
compression_type="none"
|
||||
dir_compress_cmd="cat"
|
||||
dir_compress_cmd="cat "
|
||||
target_dir=${target}
|
||||
;;
|
||||
esac
|
||||
@@ -494,12 +528,12 @@ create_archive() {
|
||||
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
|
||||
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
|
||||
else
|
||||
print_warn "Skipping creating archive file because backup did not complete successfully"
|
||||
print_error "Skipping creating archive file because backup did not complete successfully"
|
||||
fi
|
||||
}
|
||||
|
||||
generate_checksum() {
|
||||
if var_true "${ENABLE_CHECKSUM}" ;then
|
||||
if var_true "${ENABLE_CHECKSUM}" ; then
|
||||
if [ "${exit_code}" = "0" ] ; then
|
||||
case "${CHECKSUM,,}" in
|
||||
"md5" )
|
||||
@@ -518,13 +552,16 @@ generate_checksum() {
|
||||
checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}')
|
||||
print_debug "${checksum_extension^^}: ${checksum_value} - ${target}"
|
||||
else
|
||||
print_warn "Skipping Checksum creation because backup did not complete successfully"
|
||||
print_error "Skipping Checksum creation because backup did not complete successfully"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
move_dbbackup() {
|
||||
if [ "${exit_code}" = "0" ] ; then
|
||||
dbbackup_size="$(stat -c%s "${TEMP_LOCATION}"/"${target}")"
|
||||
dbbackup_date="$(date -r "${TEMP_LOCATION}"/"${target}" +'%s')"
|
||||
|
||||
case "${SIZE_VALUE,,}" in
|
||||
"b" | "bytes" )
|
||||
SIZE_VALUE=1
|
||||
@@ -567,15 +604,17 @@ move_dbbackup() {
|
||||
|
||||
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
|
||||
|
||||
aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
|
||||
unset s3_ssl
|
||||
unset s3_ca_cert
|
||||
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
|
||||
if var_true "${ENABLE_CHECKSUM}" ; then
|
||||
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
|
||||
fi
|
||||
|
||||
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
|
||||
rm -rf "${TEMP_LOCATION}"/"${target}"
|
||||
;;
|
||||
esac
|
||||
else
|
||||
print_warn "Skipping moving DB Backup to final location because backup did not complete successfully"
|
||||
print_error "Skipping moving DB Backup to final location because backup did not complete successfully"
|
||||
fi
|
||||
|
||||
rm -rf "${TEMP_LOCATION}"/*
|
||||
@@ -586,7 +625,7 @@ pre_dbbackup() {
|
||||
now=$(date +"%Y%m%d-%H%M%S")
|
||||
now_time=$(date +"%H:%M:%S")
|
||||
now_date=$(date +"%Y-%m-%d")
|
||||
target=${dbtype}_${dbname}_${dbhost}_${now}.sql
|
||||
target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql
|
||||
}
|
||||
|
||||
post_dbbackup() {
|
||||
@@ -595,45 +634,54 @@ post_dbbackup() {
|
||||
|
||||
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
|
||||
print_notice "Sending Backup Statistics to Zabbix"
|
||||
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}")"
|
||||
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')"
|
||||
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "${dbbackup_size}"
|
||||
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "${dbbackup_date}"
|
||||
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}"
|
||||
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))"
|
||||
if [ "$?" != "0" ] ; then print_error "Error sending statistics, consider disabling with 'CONTAINER_ENABLE_MONITORING=FALSE'" ; fi
|
||||
fi
|
||||
|
||||
### Post Script Support
|
||||
if [ -n "${POST_SCRIPT}" ] ; then
|
||||
if [ -x "${POST_SCRIPT}" ] ; then
|
||||
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
|
||||
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${dbhost}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
|
||||
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
|
||||
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
|
||||
else
|
||||
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
|
||||
if [ -x "${POST_SCRIPT}" ] ; then
|
||||
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
|
||||
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
|
||||
else
|
||||
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
### Post Backup Custom Script Support
|
||||
if [ -d "/assets/custom-scripts/" ] ; then
|
||||
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
|
||||
if [ -x "${f}" ] ; then
|
||||
print_notice "Executing post backup custom script : '${f}'"
|
||||
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
|
||||
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
|
||||
if var_true "${POST_SCRIPT_SKIP_X_VERIFY}" ; then
|
||||
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
|
||||
else
|
||||
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
|
||||
if [ -x "${f}" ] ; then
|
||||
print_notice "Executing post backup custom script : '${f}'"
|
||||
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
|
||||
${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
|
||||
else
|
||||
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
print_notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
|
||||
unset s3_ssl
|
||||
unset s3_ca_cert
|
||||
}
|
||||
|
||||
sanity_test() {
|
||||
sanity_var DB_TYPE "Database Type"
|
||||
sanity_var DB_HOST "Database Host"
|
||||
file_env 'DB_USER'
|
||||
file_env 'DB_PASS'
|
||||
|
||||
case "${dbtype,,}" in
|
||||
case "${DB_TYPE,,}" in
|
||||
"mysql" | "mariadb" )
|
||||
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
|
||||
;;
|
||||
|
||||
@@ -4,7 +4,6 @@ source /assets/functions/00-container
|
||||
source /assets/functions/10-db-backup
|
||||
source /assets/defaults/10-db-backup
|
||||
PROCESS_NAME="db-backup"
|
||||
CONTAINER_LOG_LEVEL=DEBUG
|
||||
|
||||
bootstrap_variables
|
||||
|
||||
@@ -80,7 +79,7 @@ while true; do
|
||||
cleanup_old_data
|
||||
|
||||
if var_true "${manual}" ; then
|
||||
print_debug "Exitting due to manual mode"
|
||||
print_debug "Exiting due to manual mode"
|
||||
exit ${master_exit_code};
|
||||
else
|
||||
print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") "
|
||||
|
||||
@@ -110,7 +110,7 @@ get_dbhost() {
|
||||
print_debug "Parsed DBHost: ${p_dbhost}"
|
||||
fi
|
||||
|
||||
if [ -z "${dbhost}" ] && [ -z "${parsed_host}" ]; then
|
||||
if [ -z "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then
|
||||
print_debug "Parsed DBHost Variant: 1 - No Env, No Parsed Filename"
|
||||
q_dbhost_variant=1
|
||||
q_dbhost_menu=$(cat <<EOF
|
||||
@@ -119,7 +119,7 @@ EOF
|
||||
)
|
||||
fi
|
||||
|
||||
if [ -n "${dbhost}" ] && [ -z "${parsed_host}" ]; then
|
||||
if [ -n "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then
|
||||
print_debug "Parsed DBHost Variant: 2 - Env, No Parsed Filename"
|
||||
q_dbhost_variant=2
|
||||
q_dbhost_menu=$(cat <<EOF
|
||||
@@ -130,7 +130,7 @@ EOF
|
||||
)
|
||||
fi
|
||||
|
||||
if [ -z "${dbhost}" ] && [ -n "${parsed_host}" ]; then
|
||||
if [ -z "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
|
||||
print_debug "Parsed DBHostpho Variant: 3 - No Env, Parsed Filename"
|
||||
q_dbhost_variant=3
|
||||
q_dbhost_menu=$(cat <<EOF
|
||||
@@ -141,7 +141,7 @@ EOF
|
||||
)
|
||||
fi
|
||||
|
||||
if [ -n "${dbhost}" ] && [ -n "${parsed_host}" ]; then
|
||||
if [ -n "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
|
||||
print_debug "Parsed DBHost Variant: 4 - Env, Parsed Filename"
|
||||
q_dbhost_variant=4
|
||||
q_dbhost_menu=$(cat <<EOF
|
||||
@@ -188,7 +188,7 @@ EOF
|
||||
break
|
||||
;;
|
||||
e* | "" )
|
||||
r_dbhost=${dbhost}
|
||||
r_dbhost=${DB_HOST}
|
||||
break
|
||||
;;
|
||||
q* )
|
||||
@@ -241,7 +241,7 @@ EOF
|
||||
break
|
||||
;;
|
||||
e* | "" )
|
||||
r_dbhost=${dbhost}
|
||||
r_dbhost=${DB_HOST}
|
||||
break
|
||||
;;
|
||||
f* )
|
||||
@@ -280,7 +280,7 @@ get_dbtype() {
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -z "${dbtype}" ] && [ -z "${parsed_type}" ]; then
|
||||
if [ -z "${DB_TYPE}" ] && [ -z "${parsed_type}" ]; then
|
||||
print_debug "Parsed DBType Variant: 1 - No Env, No Parsed Filename"
|
||||
q_dbtype_variant=1
|
||||
q_dbtype_menu=$(cat <<EOF
|
||||
@@ -289,7 +289,7 @@ EOF
|
||||
)
|
||||
fi
|
||||
|
||||
if [ -n "${dbtype}" ] && [ -z "${parsed_type}" ]; then
|
||||
if [ -n "${DB_TYPE}" ] && [ -z "${parsed_type}" ]; then
|
||||
print_debug "Parsed DBType Variant: 2 - Env, No Parsed Filename"
|
||||
q_dbtype_variant=2
|
||||
q_dbtype_menu=$(cat <<EOF
|
||||
@@ -299,7 +299,7 @@ EOF
|
||||
)
|
||||
fi
|
||||
|
||||
if [ -z "${dbtype}" ] && [ -n "${parsed_type}" ]; then
|
||||
if [ -z "${DB_TYPE}" ] && [ -n "${parsed_type}" ]; then
|
||||
print_debug "Parsed DBType Variant: 3 - No Env, Parsed Filename"
|
||||
q_dbtype_variant=3
|
||||
q_dbtype_menu=$(cat <<EOF
|
||||
@@ -309,7 +309,7 @@ EOF
|
||||
)
|
||||
fi
|
||||
|
||||
if [ -n "${dbtype}" ] && [ -n "${parsed_type}" ]; then
|
||||
if [ -n "${DB_TYPE}" ] && [ -n "${parsed_type}" ]; then
|
||||
print_debug "Parsed DBType Variant: 4 - Env, Parsed Filename"
|
||||
q_dbtype_variant=4
|
||||
q_dbtype_menu=$(cat <<EOF
|
||||
@@ -449,7 +449,7 @@ get_dbname() {
|
||||
print_debug "Parsed DBName: ${p_dbhost}"
|
||||
fi
|
||||
|
||||
if [ -z "${dbname}" ] && [ -z "${parsed_name}" ]; then
|
||||
if [ -z "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
|
||||
print_debug "Parsed DBName Variant: 1 - No Env, No Parsed Filename"
|
||||
q_dbname_variant=1
|
||||
q_dbname_menu=$(cat <<EOF
|
||||
@@ -458,7 +458,7 @@ EOF
|
||||
)
|
||||
fi
|
||||
|
||||
if [ -n "${dbname}" ] && [ -z "${parsed_name}" ]; then
|
||||
if [ -n "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
|
||||
print_debug "Parsed DBName Variant: 2 - Env, No Parsed Filename"
|
||||
q_dbname_variant=2
|
||||
q_dbname_menu=$(cat <<EOF
|
||||
@@ -469,7 +469,7 @@ EOF
|
||||
)
|
||||
fi
|
||||
|
||||
if [ -z "${dbname}" ] && [ -n "${parsed_name}" ]; then
|
||||
if [ -z "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
|
||||
print_debug "Parsed DBName Variant: 3 - No Env, Parsed Filename"
|
||||
q_dbname_variant=3
|
||||
q_dbname_menu=$(cat <<EOF
|
||||
@@ -480,7 +480,7 @@ EOF
|
||||
)
|
||||
fi
|
||||
|
||||
if [ -n "${dbname}" ] && [ -n "${parsed_name}" ]; then
|
||||
if [ -n "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
|
||||
print_debug "Parsed DBname Variant: 4 - Env, Parsed Filename"
|
||||
q_dbname_variant=4
|
||||
q_dbname_menu=$(cat <<EOF
|
||||
@@ -527,7 +527,7 @@ EOF
|
||||
break
|
||||
;;
|
||||
e* | "" )
|
||||
r_dbname=${dbname}
|
||||
r_dbname=${DB_NAME}
|
||||
break
|
||||
;;
|
||||
q* )
|
||||
@@ -580,7 +580,7 @@ EOF
|
||||
break
|
||||
;;
|
||||
e* | "" )
|
||||
r_dbname=${dbname}
|
||||
r_dbname=${DB_NAME}
|
||||
break
|
||||
;;
|
||||
f* )
|
||||
@@ -598,7 +598,7 @@ EOF
|
||||
}
|
||||
|
||||
get_dbport() {
|
||||
if [ -z "${dbport}" ] ; then
|
||||
if [ -z "${DB_PORT}" ] ; then
|
||||
print_debug "Parsed DBPort Variant: 1 - No Env"
|
||||
q_dbport_variant=1
|
||||
q_dbport_menu=$(cat <<EOF
|
||||
@@ -607,20 +607,20 @@ EOF
|
||||
)
|
||||
fi
|
||||
|
||||
if [ -n "${dbport}" ] ; then
|
||||
if [ -n "${DB_PORT}" ] ; then
|
||||
print_debug "Parsed DBPort Variant: 2 - Env"
|
||||
q_dbport_variant=2
|
||||
q_dbport_menu=$(cat <<EOF
|
||||
|
||||
C ) Custom Entered Database Port
|
||||
E ) Environment Variable DB_PORT: '${dbport}'
|
||||
E ) Environment Variable DB_PORT: '${DB_PORT}'
|
||||
EOF
|
||||
)
|
||||
fi
|
||||
|
||||
cat << EOF
|
||||
|
||||
What Database Port do you wish to use?
|
||||
What Database Port do you wish to use? MySQL/MariaDB typcially listens on port 3306. Postrgresql port 5432. MongoDB 27017
|
||||
${q_dbport_menu}
|
||||
Q ) Quit
|
||||
|
||||
@@ -655,7 +655,7 @@ EOF
|
||||
break
|
||||
;;
|
||||
e* | "" )
|
||||
r_dbport=${dbport}
|
||||
r_dbport=${DB_PORT}
|
||||
break
|
||||
;;
|
||||
q* )
|
||||
@@ -669,7 +669,7 @@ EOF
|
||||
}
|
||||
|
||||
get_dbuser() {
|
||||
if [ -z "${dbuser}" ] ; then
|
||||
if [ -z "${DB_USER}" ] ; then
|
||||
print_debug "Parsed DBUser Variant: 1 - No Env"
|
||||
q_dbuser_variant=1
|
||||
q_dbuser_menu=$(cat <<EOF
|
||||
@@ -678,7 +678,7 @@ EOF
|
||||
)
|
||||
fi
|
||||
|
||||
if [ -n "${dbuser}" ] ; then
|
||||
if [ -n "${DB_USER}" ] ; then
|
||||
print_debug "Parsed DBUser Variant: 2 - Env"
|
||||
q_dbuser_variant=2
|
||||
q_dbuser_menu=$(cat <<EOF
|
||||
@@ -724,7 +724,7 @@ EOF
|
||||
break
|
||||
;;
|
||||
e* | "" )
|
||||
r_dbuser=${dbuser}
|
||||
r_dbuser=${DB_USER}
|
||||
break
|
||||
;;
|
||||
q* )
|
||||
@@ -738,7 +738,7 @@ EOF
|
||||
}
|
||||
|
||||
get_dbpass() {
|
||||
if [ -z "${dbpass}" ] ; then
|
||||
if [ -z "${DB_PASS}" ] ; then
|
||||
print_debug "Parsed DBPass Variant: 1 - No Env"
|
||||
q_dbpass_variant=1
|
||||
q_dbpass_menu=$(cat <<EOF
|
||||
@@ -747,7 +747,7 @@ EOF
|
||||
)
|
||||
fi
|
||||
|
||||
if [ -n "${dbpass}" ] ; then
|
||||
if [ -n "${DB_PASS}" ] ; then
|
||||
print_debug "Parsed DBPass Variant: 2 - Env"
|
||||
q_dbpass_variant=2
|
||||
q_dbpass_menu=$(cat <<EOF
|
||||
@@ -793,7 +793,7 @@ EOF
|
||||
break
|
||||
;;
|
||||
e* | "" )
|
||||
r_dbpass=${dbpass}
|
||||
r_dbpass=${DB_PASS}
|
||||
break
|
||||
;;
|
||||
q* )
|
||||
@@ -809,7 +809,7 @@ EOF
|
||||
#### SCRIPT START
|
||||
cat << EOF
|
||||
|
||||
## ${IMAGE_NAME} Restore Script Version 1.0.1
|
||||
## ${IMAGE_NAME} Restore Script
|
||||
## Visit ${IMAGE_REPO_URL}
|
||||
## ####################################################
|
||||
|
||||
|
||||
Reference in New Issue
Block a user