Compare commits

...

17 Commits
3.1.2 ... 3.2.5

Author SHA1 Message Date
Dave Conroy
28ed6c3bb8 Release 3.2.5 - See CHANGELOG.md 2022-04-23 14:11:29 -07:00
Dave Conroy
c1bdf26598 Release 3.2.4 - See CHANGELOG.md 2022-04-21 16:04:43 -07:00
Dave Conroy
5a4cac2cee Release 3.2.3 - See CHANGELOG.md 2022-04-21 15:46:27 -07:00
Dave Conroy
c04eec7661 Add space after compress_cmd 2022-04-21 14:19:09 -07:00
Dave Conroy
32f1959a07 Merge pull request #120 from joergmschulz/patch-1
small typo / exiting instead of exitting
2022-04-21 14:18:43 -07:00
joergmschulz
d384d5a529 small typo / exiting instead of exitting 2022-04-21 23:16:02 +02:00
Dave Conroy
56ab68dd71 Release 3.2.2 - See CHANGELOG.md 2022-04-21 12:14:17 -07:00
Dave Conroy
9a1a5efbd9 Do a different DB Ready check for Influx 1 vs 2 2022-04-21 12:12:57 -07:00
Dave Conroy
df5532c128 Fix blank database size when compression type=NONE 2022-04-21 12:08:51 -07:00
Dave Conroy
2ecd313778 Change database variables around 2022-04-21 12:08:27 -07:00
Dave Conroy
55cfe5ab02 Release 3.2.1 - See CHANGELOG.md 2022-04-03 10:28:07 -07:00
Dave Conroy
ae2e2c38e2 Sanitize DB_HOST for URLs 2022-04-02 07:37:34 -07:00
Dave Conroy
c23d7991fe Release 3.2.0 - See CHANGELOG.md 2022-04-01 18:41:58 -07:00
Dave Conroy
3f58cfd284 Release 3.1.3 - See CHANGELOG.md 2022-03-30 10:46:16 -07:00
Dave Conroy
2d01f5e692 Fix for MARIADB not sending DB name for post script 2022-03-30 10:45:27 -07:00
Dave Conroy
dbd0a03b0d SPLIT_DB is supposed to be TRUE 2022-03-30 10:43:22 -07:00
Dave Conroy
6527f4ff63 Add sanity checks for Post scripts to be executible 2022-03-30 10:37:58 -07:00
7 changed files with 379 additions and 242 deletions

View File

@@ -1,3 +1,56 @@
## 3.2.5 2022-04-23 <dave at tiredofit dot ca>
### Changed
- Fix for restore still not working with DB_PORT variable
## 3.2.4 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Add -portable flag when backing up Influx
## 3.2.3 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Fix for bucket / db name InfluxDB 1.xx
- Minor aesthetics, spacing, spelling
## 3.2.2 2022-04-21 <dave at tiredofit dot ca>
### Changed
- Restore script properly parses DB_PORT entry
- Influx Database ready performs different checks dependent on V1/V2
- Stop using weird database lowercase variables unneccessarily breaking Docker Secrets
## 3.2.1 2022-04-03 <dave at tiredofit dot ca>
### Changed
- Fix a variety of issues iwth 3.2.0 relating to InfluxDB
## 3.2.0 2022-03-31 <dave at tiredofit dot ca>
### Added
- Install InfluxDB2 Client alongside version 1 (amd64 and arm64)
- Introduce InfluxDB 2 backup support
- Introduce multiple compression types other than Gzip for Influx 1/2
- Introduce compression for MSSQL backups
### Changed
- Testing for Host availability for CouchDB and InfluxDB
## 3.1.3 2022-03-30 <dave at tiredofit dot ca>
### Changed
- Fix for MariaDB not sending database name to post script
- Check for executible bit on post scripts both via environment variable and /assets/custom
- SPLIT_DB defaulted to TRUE
## 3.1.2 2022-03-29 <dave at tiredofit dot ca> ## 3.1.2 2022-03-29 <dave at tiredofit dot ca>
### Changed ### Changed

View File

@@ -2,7 +2,9 @@ FROM docker.io/tiredofit/alpine:3.15
LABEL maintainer="Dave Conroy (github.com/tiredofit)" LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables ### Set Environment Variables
ENV MSSQL_VERSION=17.8.1.1-1 \
ENV INFLUX2_VERSION=2.2.1 \
MSSQL_VERSION=17.8.1.1-1 \
CONTAINER_ENABLE_MESSAGING=FALSE \ CONTAINER_ENABLE_MESSAGING=FALSE \
CONTAINER_ENABLE_MONITORING=TRUE \ CONTAINER_ENABLE_MONITORING=TRUE \
CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \ CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \
@@ -44,9 +46,14 @@ RUN set -ex && \
\ \
apkArch="$(apk --print-arch)"; \ apkArch="$(apk --print-arch)"; \
case "$apkArch" in \ case "$apkArch" in \
x86_64) mssql=true ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql17_${MSSQL_VERSION}_amd64.apk mssql-tools_${MSSQL_VERSION}_amd64.apk ;; \ x86_64) mssql=true ; influx2=true ; influx_arch=amd64; ;; \
*) echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ;; \ aarch64 ) influx2=true ; influx_arch=arm64 ;; \
*) sleep 0.1 ;; \
esac; \ esac; \
\
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/msodbcsql17_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/e/4/e/e4e67866-dffd-428c-aac7-8d28ddafb39b/mssql-tools_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql17_${MSSQL_VERSION}_amd64.apk mssql-tools_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
\
mkdir -p /usr/src/pbzip2 && \ mkdir -p /usr/src/pbzip2 && \
curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \ curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
cd /usr/src/pbzip2 && \ cd /usr/src/pbzip2 && \

View File

@@ -50,6 +50,7 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
- [Base Images used](#base-images-used) - [Base Images used](#base-images-used)
- [Container Options](#container-options) - [Container Options](#container-options)
- [Database Specific Options](#database-specific-options) - [Database Specific Options](#database-specific-options)
- [For Influx DB2:](#for-influx-db2)
- [Scheduling Options](#scheduling-options) - [Scheduling Options](#scheduling-options)
- [Backup Options](#backup-options) - [Backup Options](#backup-options)
- [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services) - [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services)
@@ -143,6 +144,11 @@ Be sure to view the following repositories to understand all the customizable op
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | | `DB_USER` | username for the database(s) - Can use `root` for MySQL | |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | | | `DB_PASS` | (optional if DB doesn't require it) password for the database | |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | | `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies |
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2.x` series - AMD64 and ARM64 only for `2` | |
#### For Influx DB2:
Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name`
### Scheduling Options ### Scheduling Options
| Parameter | Description | Default | | Parameter | Description | Default |
| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | | ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |

View File

@@ -18,10 +18,5 @@ PARALLEL_COMPRESSION_THREADS=${PARALLEL_COMPRESSION_THREADS:-"$(nproc)"}
S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"} S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"}
S3_PROTOCOL=${S3_PROTOCOL:-"https"} S3_PROTOCOL=${S3_PROTOCOL:-"https"}
SIZE_VALUE=${SIZE_VALUE:-"bytes"} SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SPLIT_DB=${SPLIT_DB:-"FALSE"} SPLIT_DB=${SPLIT_DB:-"TRUE"}
TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"} TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"}
dbhost=${DB_HOST}
dbname=${DB_NAME}
dbpass=${DB_PASS}
dbtype=${DB_TYPE}
dbuser=${DB_USER}

View File

@@ -1,28 +1,29 @@
#!/command/with-contenv bash #!/command/with-contenv bash
bootstrap_variables() { bootstrap_variables() {
case "${dbtype,,}" in case "${DB_TYPE,,}" in
couch* ) couch* )
dbtype=couch dbtype=couch
dbport=${DB_PORT:-5984} DB_PORT=${DB_PORT:-5984}
file_env 'DB_USER' file_env 'DB_USER'
file_env 'DB_PASS' file_env 'DB_PASS'
;; ;;
influx* ) influx* )
dbtype=influx dbtype=influx
dbport=${DB_PORT:-8088} DB_PORT=${DB_PORT:-8088}
file_env 'DB_USER' file_env 'DB_USER'
file_env 'DB_PASS' file_env 'DB_PASS'
sanity_var INFLUX_VERSION "What InfluxDB version you are backing up from '1' or '2'"
;; ;;
mongo* ) mongo* )
dbtype=mongo dbtype=mongo
dbport=${DB_PORT:-27017} DB_PORT=${DB_PORT:-27017}
[[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER' [[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER'
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
;; ;;
"mysql" | "mariadb" ) "mysql" | "mariadb" )
dbtype=mysql dbtype=mysql
dbport=${DB_PORT:-3306} DB_PORT=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;; ;;
@@ -33,17 +34,17 @@ bootstrap_variables() {
*) print_error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;; *) print_error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;;
esac esac
dbtype=mssql dbtype=mssql
dbport=${DB_PORT:-1433} DB_PORT=${DB_PORT:-1433}
;; ;;
postgres* | "pgsql" ) postgres* | "pgsql" )
dbtype=pgsql dbtype=pgsql
dbport=${DB_PORT:-5432} DB_PORT=${DB_PORT:-5432}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;; ;;
"redis" ) "redis" )
dbtype=redis dbtype=redis
dbport=${DB_PORT:-6379} DB_PORT=${DB_PORT:-6379}
[[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS' [[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS'
;; ;;
sqlite* ) sqlite* )
@@ -59,71 +60,96 @@ bootstrap_variables() {
### Set the Database Authentication Details ### Set the Database Authentication Details
case "$dbtype" in case "$dbtype" in
"mongo" ) "mongo" )
[[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${dbuser}" [[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER}"
[[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${dbpass}" [[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS}"
[[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${dbname}" [[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME}"
[[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}" [[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}"
;; ;;
"mysql" ) "mysql" )
[[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${dbpass} [[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS}
;; ;;
"postgres" ) "postgres" )
[[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${dbpass}" [[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS}"
;; ;;
"redis" ) "redis" )
[[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${dbpass}" [[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS}"
;; ;;
esac esac
} }
backup_couch() { backup_couch() {
pre_dbbackup pre_dbbackup
target=couch_${dbname}_${dbhost}_${now}.txt target=couch_${DB_NAME}_${DB_HOST#*//}_${now}.txt
compression compression
print_notice "Dumping CouchDB database: '${dbname}' ${compression_string}" print_notice "Dumping CouchDB database: '${DB_NAME}' ${compression_string}"
curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > ${TEMP_LOCATION}/${target} curl -sSL -X GET ${DB_HOST}:${DB_PORT}/${DB_NAME}/_all_docs?include_docs=true ${compress_cmd} | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup $dbname post_dbbackup ${DB_NAME}
} }
backup_influx() { backup_influx() {
if [ "${DB_NAME,,}" = "all" ] ; then
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then print_debug "Preparing to back up everything"
: db_names=justbackupeverything
else else
influx_compression="-portable" db_names=$(echo "${DB_NAME}" | tr ',' '\n')
compression_string=" and compressing with gzip"
fi fi
for db in ${DB_NAME}; do
case "${INFLUX_VERSION,,}" in
1 )
for db in ${db_names}; do
pre_dbbackup pre_dbbackup
target=influx_${db}_${dbhost}_${now} if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
print_notice "Dumping Influx database: '${db}' ${compression_string}" target=influx_${db}_${DB_HOST#*//}_${now}
influxd backup ${influx_compression} -database $db -host ${dbhost}:${dbport} ${TEMP_LOCATION}/${target} compression
print_notice "Dumping Influx database: '${db}'"
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension}
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup $db post_dbbackup $db
done done
;;
2 )
for db in ${db_names}; do
pre_dbbackup
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
target=influx2_${db}_${DB_HOST#*//}_${now}
compression
print_notice "Dumping Influx2 database: '${db}'"
influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code $target_dir
create_archive
target=influx2_${db}_${DB_HOST#*//}_${now}.tar${extension}
generate_checksum
move_dbbackup
post_dbbackup $db
done
;;
esac
} }
backup_mongo() { backup_mongo() {
pre_dbbackup pre_dbbackup
if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then
target=${dbtype}_${dbname}_${dbhost}_${now}.archive target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive
else else
target=${dbtype}_${dbname}_${dbhost}_${now}.archive.gz target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.archive.gz
mongo_compression="--gzip" mongo_compression="--gzip"
compression_string="and compressing with gzip" compression_string="and compressing with gzip"
fi fi
print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}" print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS} mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target
cd "${TEMP_LOCATION}"
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup post_dbbackup
@@ -131,14 +157,15 @@ backup_mongo() {
backup_mssql() { backup_mssql() {
pre_dbbackup pre_dbbackup
target=mssql_${dbname}_${dbhost}_${now}.bak target=mssql_${DB_NAME,,}_${DB_HOST,,}_${now}.bak
print_notice "Dumping MSSQL database: '${dbname}'" compression
/opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" print_notice "Dumping MSSQL database: '${DB_NAME}'"
/opt/mssql-tools/bin/sqlcmd -E -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} Q "BACKUP DATABASE \[${DB_NAME}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup $dbname post_dbbackup $DB_NAME
} }
backup_mysql() { backup_mysql() {
@@ -149,9 +176,9 @@ backup_mysql() {
stored_procedures="--routines" stored_procedures="--routines"
fi fi
if [ "${dbname,,}" = "all" ] ; then if [ "${DB_NAME,,}" = "all" ] ; then
print_debug "Preparing to back up everything except for information_schema and _* prefixes" print_debug "Preparing to back up everything except for information_schema and _* prefixes"
db_names=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema ) db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n') db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do for db_exclude in ${db_names_exclusions} ; do
@@ -160,7 +187,7 @@ backup_mysql() {
done done
fi fi
else else
db_names=$(echo "${dbname}" | tr ',' '\n') db_names=$(echo "${DB_NAME}" | tr ',' '\n')
fi fi
print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')" print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
@@ -168,57 +195,10 @@ backup_mysql() {
if var_true "${SPLIT_DB}" ; then if var_true "${SPLIT_DB}" ; then
for db in ${db_names} ; do for db in ${db_names} ; do
pre_dbbackup pre_dbbackup
target=mysql_${db}_${dbhost}_${now}.sql target=mysql_${db}_${DB_HOST,,}_${now}.sql
compression compression
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}" print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}" mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup
done
else
print_debug "Not splitting database dumps into their own files"
pre_dbbackup
target=mysql_all_${dbhost}_${now}.sql
compression
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup all
fi
}
backup_pgsql() {
export PGPASSWORD=${dbpass}
authdb=${DB_USER}
if [ "${dbname,,}" = "all" ] ; then
print_debug "Preparing to back up all databases"
db_names=$(psql -h $dbhost -U $dbuser -p ${dbport} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
print_debug "Excluding '${db_exclude}' from ALL DB_NAME backups"
db_names=$(echo "$db_names" | sed "/${db_exclude}/d" )
done
fi
else
db_names=$(echo "${dbname}" | tr ',' '\n')
fi
print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
if var_true "${SPLIT_DB}" ; then
for db in ${db_names} ; do
pre_dbbackup
target=pgsql_${db}_${dbhost}_${now}.sql
compression
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target
generate_checksum generate_checksum
@@ -228,10 +208,57 @@ backup_pgsql() {
else else
print_debug "Not splitting database dumps into their own files" print_debug "Not splitting database dumps into their own files"
pre_dbbackup pre_dbbackup
target=pgsql_all_${dbhost}_${now}.sql target=mysql_all_${DB_HOST,,}_${now}.sql
compression
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup all
fi
}
backup_pgsql() {
export PGPASSWORD=${DB_PASS}
authdb=${DB_USER}
if [ "${DB_NAME,,}" = "all" ] ; then
print_debug "Preparing to back up all databases"
db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
print_debug "Excluding '${db_exclude}' from ALL DB_NAME backups"
db_names=$(echo "$db_names" | sed "/${db_exclude}/d" )
done
fi
else
db_names=$(echo "${DB_NAME}" | tr ',' '\n')
fi
print_debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
if var_true "${SPLIT_DB}" ; then
for db in ${db_names} ; do
pre_dbbackup
target=pgsql_${db}_${DB_HOST,,}_${now}.sql
compression
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code $target
generate_checksum
move_dbbackup
post_dbbackup $db
done
else
print_debug "Not splitting database dumps into their own files"
pre_dbbackup
target=pgsql_all_${DB_HOST,,}_${now}.sql
compression compression
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
tmp_db_names=$(psql -h $dbhost -U $dbuser -p ${dbport} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ) tmp_db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
for r_db_name in $(echo $db_names | xargs); do for r_db_name in $(echo $db_names | xargs); do
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" ) tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
done done
@@ -239,7 +266,7 @@ backup_pgsql() {
for x_db_name in ${tmp_db_names} ; do for x_db_name in ${tmp_db_names} ; do
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name}) pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
done done
pg_dumpall -h ${dbhost} -U ${dbuser} -p ${dbport} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target} pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target
generate_checksum generate_checksum
@@ -251,13 +278,13 @@ backup_pgsql() {
backup_redis() { backup_redis() {
pre_dbbackup pre_dbbackup
print_notice "Dumping Redis - Flushing Redis Cache First" print_notice "Dumping Redis - Flushing Redis Cache First"
target=redis_all_${dbhost}_${now}.rdb target=redis_all_${DB_HOST,,}_${now}.rdb
echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS} echo bgsave | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
sleep 10 sleep 10
try=5 try=5
while [ $try -gt 0 ] ; do while [ $try -gt 0 ] ; do
saved=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}') saved=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
ok=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}') ok=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
print_notice "Redis Backup Complete" print_notice "Redis Backup Complete"
break break
@@ -276,15 +303,15 @@ backup_redis() {
backup_sqlite3() { backup_sqlite3() {
pre_dbbackup pre_dbbackup
db=$(basename "$dbhost") db=$(basename "${DB_HOST}")
db="${db%.*}" db="${db%.*}"
target=sqlite3_${db}_${now}.sqlite3 target=sqlite3_${db}_${now}.sqlite3
compression compression
print_notice "Dumping sqlite3 database: '${dbhost}' ${compression_string}" print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}"
sqlite3 "${dbhost}" ".backup '${TEMP_LOCATION}/backup.sqlite3'" sqlite3 "${DB_HOST}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
exit_code=$? exit_code=$?
check_exit_code $target check_exit_code $target
cat "${TEMP_LOCATION}"/backup.sqlite3 | $compress_cmd > "${TEMP_LOCATION}/${target}" cat "${TEMP_LOCATION}"/backup.sqlite3 | ${dir_compress_cmd} > "${TEMP_LOCATION}/${target}"
generate_checksum generate_checksum
move_dbbackup move_dbbackup
post_dbbackup $db post_dbbackup $db
@@ -294,75 +321,92 @@ check_availability() {
### Set the Database Type ### Set the Database Type
case "$dbtype" in case "$dbtype" in
"couch" ) "couch" )
COUNTER=0 counter=0
while ! (nc -z ${dbhost} ${dbport}) ; do code_received=0
while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${DB_HOST}:${DB_PORT})
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "CouchDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "CouchDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done done
;; ;;
"influx" ) "influx" )
COUNTER=0 counter=0
while ! (nc -z ${dbhost} ${dbport}) ; do case "${INFLUX_VERSION,,}" in
1 )
while ! (nc -z ${DB_HOST#*//} ${DB_PORT}) ; do
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "InfluxDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "InfluxDB Host '${DB_HOST#*//}' is not accessible, retrying.. ($counter seconds so far)"
done done
;; ;;
"mongo" ) 2 )
COUNTER=0 code_received=0
while ! (nc -z ${dbhost} ${dbport}) ; do while [ "${code_received}" != "200" ]; do
code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${DB_HOST}:${DB_PORT}/health)
if [ "${code_received}" = "200" ] ; then break ; fi
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "Mongo Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "InfluxDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done
;;
esac
;;
"mongo" )
counter=0
while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
sleep 5
(( counter+=5 ))
print_warn "Mongo Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done done
;; ;;
"mysql" ) "mysql" )
COUNTER=0 counter=0
export MYSQL_PWD=${dbpass} export MYSQL_PWD=${DB_PASS}
while ! (mysqladmin -u"${dbuser}" -P"${dbport}" -h"${dbhost}" status > /dev/null 2>&1) ; do while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" status > /dev/null 2>&1) ; do
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "MySQL/MariaDB Server '${dbhost}' is not accessible, retrying.. (${COUNTER} seconds so far)" print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)"
done done
;; ;;
"mssql" ) "mssql" )
COUNTER=0 counter=0
while ! (nc -z ${dbhost} ${dbport}) ; do while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "MSSQL Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "MSSQL Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done done
;; ;;
"pgsql" ) "pgsql" )
COUNTER=0 counter=0
export PGPASSWORD=${dbpass} export PGPASSWORD=${DB_PASS}
until pg_isready --dbname=${dbname} --host=${dbhost} --port=${dbport} --username=${dbuser} -q until pg_isready --dbname=${DB_NAME} --host=${DB_HOST} --port=${DB_PORT} --username=${DB_USER} -q
do do
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "Postgres Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "Postgres Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done done
;; ;;
"redis" ) "redis" )
COUNTER=0 counter=0
while ! (nc -z "${dbhost}" "${dbport}") ; do while ! (nc -z "${DB_HOST}" "${DB_PORT}") ; do
sleep 5 sleep 5
(( COUNTER+=5 )) (( counter+=5 ))
print_warn "Redis Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" print_warn "Redis Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)"
done done
;; ;;
"sqlite3" ) "sqlite3" )
if [[ ! -e "${dbhost}" ]]; then if [[ ! -e "${DB_HOST}" ]]; then
print_error "File '${dbhost}' does not exist." print_error "File '${DB_HOST}' does not exist."
exit_code=2 exit_code=2
exit $exit_code exit $exit_code
elif [[ ! -f "${dbhost}" ]]; then elif [[ ! -f "${DB_HOST}" ]]; then
print_error "File '${dbhost}' is not a file." print_error "File '${DB_HOST}' is not a file."
exit_code=2 exit_code=2
exit $exit_code exit $exit_code
elif [[ ! -r "${dbhost}" ]]; then elif [[ ! -r "${DB_HOST}" ]]; then
print_error "File '${dbhost}' is not readable." print_error "File '${DB_HOST}' is not readable."
exit_code=2 exit_code=2
exit $exit_code exit $exit_code
fi fi
@@ -395,7 +439,6 @@ cleanup_old_data() {
fi fi
} }
compression() { compression() {
if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then if var_false "${ENABLE_PARALLEL_COMPRESSION}" ; then
PARALLEL_COMPRESSION_THREADS=1 PARALLEL_COMPRESSION_THREADS=1
@@ -405,26 +448,40 @@ compression() {
gz* ) gz* )
compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} " compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compression_type="gzip" compression_type="gzip"
extension=".gz"
dir_compress_cmd=${compress_cmd}
target_dir=${target}
target=${target}.gz target=${target}.gz
;; ;;
bz* ) bz* )
compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} " compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
compression_type="bzip2" compression_type="bzip2"
dir_compress_cmd=${compress_cmd}
extension=".bz2"
target_dir=${target}
target=${target}.bz2 target=${target}.bz2
;; ;;
xz* ) xz* )
compress_cmd="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} " compress_cmd="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
compression_type="xzip" compression_type="xzip"
dir_compress_cmd=${compress_cmd}
extension=".xz"
target_dir=${target}
target=${target}.xz target=${target}.xz
;; ;;
zst* ) zst* )
compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} " compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} "
compression_type="zstd" compression_type="zstd"
dir_compress_cmd=${compress_cmd}
extension=".zst"
target_dir=${target}
target=${target}.zst target=${target}.zst
;; ;;
"none" | "false") "none" | "false")
compress_cmd="cat " compress_cmd="cat "
compression_type="none" compression_type="none"
dir_compress_cmd="cat "
target_dir=${target}
;; ;;
esac esac
@@ -444,6 +501,15 @@ compression() {
esac esac
} }
create_archive() {
if [ "${exit_code}" = "0" ] ; then
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
else
print_warn "Skipping creating archive file because backup did not complete successfully"
fi
}
generate_checksum() { generate_checksum() {
if var_true "${ENABLE_CHECKSUM}" ;then if var_true "${ENABLE_CHECKSUM}" ;then
if [ "${exit_code}" = "0" ] ; then if [ "${exit_code}" = "0" ] ; then
@@ -470,7 +536,8 @@ generate_checksum() {
} }
move_dbbackup() { move_dbbackup() {
case "$SIZE_VALUE" in if [ "${exit_code}" = "0" ] ; then
case "${SIZE_VALUE,,}" in
"b" | "bytes" ) "b" | "bytes" )
SIZE_VALUE=1 SIZE_VALUE=1
;; ;;
@@ -482,11 +549,11 @@ move_dbbackup() {
;; ;;
esac esac
if [ "$SIZE_VALUE" = "1" ] ; then if [ "$SIZE_VALUE" = "1" ] ; then
FILESIZE=$(stat -c%s "${TEMP_LOCATION}"/"${target}") filesize=$(stat -c%s "${TEMP_LOCATION}"/"${target}")
print_notice "Backup of ${target} created with the size of ${FILESIZE} bytes" print_notice "Backup of ${target} created with the size of ${filesize} bytes"
else else
FILESIZE=$(du -h "${TEMP_LOCATION}"/"${target}" | awk '{ print $1}') filesize=$(du -h "${TEMP_LOCATION}"/"${target}" | awk '{ print $1}')
print_notice "Backup of ${target} created with the size of ${FILESIZE}" print_notice "Backup of ${target} created with the size of ${filesize}"
fi fi
case "${BACKUP_LOCATION,,}" in case "${BACKUP_LOCATION,,}" in
@@ -519,6 +586,11 @@ move_dbbackup() {
rm -rf "${TEMP_LOCATION}"/"${target}" rm -rf "${TEMP_LOCATION}"/"${target}"
;; ;;
esac esac
else
print_warn "Skipping moving DB Backup to final location because backup did not complete successfully"
fi
rm -rf "${TEMP_LOCATION}"/*
} }
pre_dbbackup() { pre_dbbackup() {
@@ -526,7 +598,7 @@ pre_dbbackup() {
now=$(date +"%Y%m%d-%H%M%S") now=$(date +"%Y%m%d-%H%M%S")
now_time=$(date +"%H:%M:%S") now_time=$(date +"%H:%M:%S")
now_date=$(date +"%Y-%m-%d") now_date=$(date +"%Y-%m-%d")
target=${dbtype}_${dbname}_${dbhost}_${now}.sql target=${dbtype}_${DB_NAME,,}_${DB_HOST,,}_${now}.sql
} }
post_dbbackup() { post_dbbackup() {
@@ -543,17 +615,24 @@ post_dbbackup() {
### Post Script Support ### Post Script Support
if [ -n "${POST_SCRIPT}" ] ; then if [ -n "${POST_SCRIPT}" ] ; then
if [ -x "${POST_SCRIPT}" ] ; then
print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}" print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}"
eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${dbhost}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't execute POST_SCRIPT environment variable '${POST_SCRIPT}' as its filesystem bit is not executible!"
fi
fi fi
### Post Backup Custom Script Support ### Post Backup Custom Script Support
if [ -d "/assets/custom-scripts/" ] ; then if [ -d "/assets/custom-scripts/" ] ; then
print_notice "Found Post Backup Custom Script to execute"
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
print_notice "Running Script: '${f}'" if [ -x "${f}" ] ; then
print_notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE ## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" ${f} "${exit_code}" "${dbtype}" "${DB_HOST}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}"
else
print_error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
fi
done done
fi fi
@@ -563,10 +642,8 @@ post_dbbackup() {
sanity_test() { sanity_test() {
sanity_var DB_TYPE "Database Type" sanity_var DB_TYPE "Database Type"
sanity_var DB_HOST "Database Host" sanity_var DB_HOST "Database Host"
file_env 'DB_USER'
file_env 'DB_PASS'
case "${dbtype,,}" in case "${DB_TYPE,,}" in
"mysql" | "mariadb" ) "mysql" | "mariadb" )
sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas"
;; ;;

View File

@@ -80,7 +80,7 @@ while true; do
cleanup_old_data cleanup_old_data
if var_true "${manual}" ; then if var_true "${manual}" ; then
print_debug "Exitting due to manual mode" print_debug "Exiting due to manual mode"
exit ${master_exit_code}; exit ${master_exit_code};
else else
print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") " print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") "

View File

@@ -66,7 +66,6 @@ EOF
exit 0 exit 0
;; ;;
"-i" ) "-i" )
echo "interactive mode"
interactive_mode=true interactive_mode=true
;; ;;
* ) * )
@@ -110,7 +109,7 @@ get_dbhost() {
print_debug "Parsed DBHost: ${p_dbhost}" print_debug "Parsed DBHost: ${p_dbhost}"
fi fi
if [ -z "${dbhost}" ] && [ -z "${parsed_host}" ]; then if [ -z "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 1 - No Env, No Parsed Filename" print_debug "Parsed DBHost Variant: 1 - No Env, No Parsed Filename"
q_dbhost_variant=1 q_dbhost_variant=1
q_dbhost_menu=$(cat <<EOF q_dbhost_menu=$(cat <<EOF
@@ -119,7 +118,7 @@ EOF
) )
fi fi
if [ -n "${dbhost}" ] && [ -z "${parsed_host}" ]; then if [ -n "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 2 - Env, No Parsed Filename" print_debug "Parsed DBHost Variant: 2 - Env, No Parsed Filename"
q_dbhost_variant=2 q_dbhost_variant=2
q_dbhost_menu=$(cat <<EOF q_dbhost_menu=$(cat <<EOF
@@ -130,7 +129,7 @@ EOF
) )
fi fi
if [ -z "${dbhost}" ] && [ -n "${parsed_host}" ]; then if [ -z "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
print_debug "Parsed DBHostpho Variant: 3 - No Env, Parsed Filename" print_debug "Parsed DBHostpho Variant: 3 - No Env, Parsed Filename"
q_dbhost_variant=3 q_dbhost_variant=3
q_dbhost_menu=$(cat <<EOF q_dbhost_menu=$(cat <<EOF
@@ -141,7 +140,7 @@ EOF
) )
fi fi
if [ -n "${dbhost}" ] && [ -n "${parsed_host}" ]; then if [ -n "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 4 - Env, Parsed Filename" print_debug "Parsed DBHost Variant: 4 - Env, Parsed Filename"
q_dbhost_variant=4 q_dbhost_variant=4
q_dbhost_menu=$(cat <<EOF q_dbhost_menu=$(cat <<EOF
@@ -188,7 +187,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbhost=${dbhost} r_dbhost=${DB_HOST}
break break
;; ;;
q* ) q* )
@@ -241,7 +240,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbhost=${dbhost} r_dbhost=${DB_HOST}
break break
;; ;;
f* ) f* )
@@ -280,7 +279,7 @@ get_dbtype() {
;; ;;
esac esac
if [ -z "${dbtype}" ] && [ -z "${parsed_type}" ]; then if [ -z "${DB_TYPE}" ] && [ -z "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 1 - No Env, No Parsed Filename" print_debug "Parsed DBType Variant: 1 - No Env, No Parsed Filename"
q_dbtype_variant=1 q_dbtype_variant=1
q_dbtype_menu=$(cat <<EOF q_dbtype_menu=$(cat <<EOF
@@ -289,7 +288,7 @@ EOF
) )
fi fi
if [ -n "${dbtype}" ] && [ -z "${parsed_type}" ]; then if [ -n "${DB_TYPE}" ] && [ -z "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 2 - Env, No Parsed Filename" print_debug "Parsed DBType Variant: 2 - Env, No Parsed Filename"
q_dbtype_variant=2 q_dbtype_variant=2
q_dbtype_menu=$(cat <<EOF q_dbtype_menu=$(cat <<EOF
@@ -299,7 +298,7 @@ EOF
) )
fi fi
if [ -z "${dbtype}" ] && [ -n "${parsed_type}" ]; then if [ -z "${DB_TYPE}" ] && [ -n "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 3 - No Env, Parsed Filename" print_debug "Parsed DBType Variant: 3 - No Env, Parsed Filename"
q_dbtype_variant=3 q_dbtype_variant=3
q_dbtype_menu=$(cat <<EOF q_dbtype_menu=$(cat <<EOF
@@ -309,7 +308,7 @@ EOF
) )
fi fi
if [ -n "${dbtype}" ] && [ -n "${parsed_type}" ]; then if [ -n "${DB_TYPE}" ] && [ -n "${parsed_type}" ]; then
print_debug "Parsed DBType Variant: 4 - Env, Parsed Filename" print_debug "Parsed DBType Variant: 4 - Env, Parsed Filename"
q_dbtype_variant=4 q_dbtype_variant=4
q_dbtype_menu=$(cat <<EOF q_dbtype_menu=$(cat <<EOF
@@ -449,7 +448,7 @@ get_dbname() {
print_debug "Parsed DBName: ${p_dbhost}" print_debug "Parsed DBName: ${p_dbhost}"
fi fi
if [ -z "${dbname}" ] && [ -z "${parsed_name}" ]; then if [ -z "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 1 - No Env, No Parsed Filename" print_debug "Parsed DBName Variant: 1 - No Env, No Parsed Filename"
q_dbname_variant=1 q_dbname_variant=1
q_dbname_menu=$(cat <<EOF q_dbname_menu=$(cat <<EOF
@@ -458,7 +457,7 @@ EOF
) )
fi fi
if [ -n "${dbname}" ] && [ -z "${parsed_name}" ]; then if [ -n "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 2 - Env, No Parsed Filename" print_debug "Parsed DBName Variant: 2 - Env, No Parsed Filename"
q_dbname_variant=2 q_dbname_variant=2
q_dbname_menu=$(cat <<EOF q_dbname_menu=$(cat <<EOF
@@ -469,7 +468,7 @@ EOF
) )
fi fi
if [ -z "${dbname}" ] && [ -n "${parsed_name}" ]; then if [ -z "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 3 - No Env, Parsed Filename" print_debug "Parsed DBName Variant: 3 - No Env, Parsed Filename"
q_dbname_variant=3 q_dbname_variant=3
q_dbname_menu=$(cat <<EOF q_dbname_menu=$(cat <<EOF
@@ -480,7 +479,7 @@ EOF
) )
fi fi
if [ -n "${dbname}" ] && [ -n "${parsed_name}" ]; then if [ -n "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBname Variant: 4 - Env, Parsed Filename" print_debug "Parsed DBname Variant: 4 - Env, Parsed Filename"
q_dbname_variant=4 q_dbname_variant=4
q_dbname_menu=$(cat <<EOF q_dbname_menu=$(cat <<EOF
@@ -527,7 +526,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbname=${dbname} r_dbname=${DB_NAME}
break break
;; ;;
q* ) q* )
@@ -580,7 +579,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbname=${dbname} r_dbname=${DB_NAME}
break break
;; ;;
f* ) f* )
@@ -598,7 +597,7 @@ EOF
} }
get_dbport() { get_dbport() {
if [ -z "${dbport}" ] ; then if [ -z "${DB_PORT}" ] ; then
print_debug "Parsed DBPort Variant: 1 - No Env" print_debug "Parsed DBPort Variant: 1 - No Env"
q_dbport_variant=1 q_dbport_variant=1
q_dbport_menu=$(cat <<EOF q_dbport_menu=$(cat <<EOF
@@ -607,20 +606,20 @@ EOF
) )
fi fi
if [ -n "${dbport}" ] ; then if [ -n "${DB_PORT}" ] ; then
print_debug "Parsed DBPort Variant: 2 - Env" print_debug "Parsed DBPort Variant: 2 - Env"
q_dbport_variant=2 q_dbport_variant=2
q_dbport_menu=$(cat <<EOF q_dbport_menu=$(cat <<EOF
C ) Custom Entered Database Port C ) Custom Entered Database Port
E ) Environment Variable DB_PORT: '${dbport}' E ) Environment Variable DB_PORT: '${DB_PORT}'
EOF EOF
) )
fi fi
cat << EOF cat << EOF
What Database Port do you wish to use? What Database Port do you wish to use? MySQL/MariaDB typcially listens on port 3306. Postrgresql port 5432. MongoDB 27017
${q_dbport_menu} ${q_dbport_menu}
Q ) Quit Q ) Quit
@@ -641,7 +640,7 @@ EOF
2 ) 2 )
while true; do while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbport_menu read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbport_menu
case "${q_dbname_menu,,}" in case "${q_dbport_menu,,}" in
c* ) c* )
counter=1 counter=1
q_dbport=" " q_dbport=" "
@@ -655,7 +654,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbport=${dbport} r_dbport=${DB_PORT}
break break
;; ;;
q* ) q* )
@@ -669,7 +668,7 @@ EOF
} }
get_dbuser() { get_dbuser() {
if [ -z "${dbuser}" ] ; then if [ -z "${DB_USER}" ] ; then
print_debug "Parsed DBUser Variant: 1 - No Env" print_debug "Parsed DBUser Variant: 1 - No Env"
q_dbuser_variant=1 q_dbuser_variant=1
q_dbuser_menu=$(cat <<EOF q_dbuser_menu=$(cat <<EOF
@@ -678,7 +677,7 @@ EOF
) )
fi fi
if [ -n "${dbuser}" ] ; then if [ -n "${DB_USER}" ] ; then
print_debug "Parsed DBUser Variant: 2 - Env" print_debug "Parsed DBUser Variant: 2 - Env"
q_dbuser_variant=2 q_dbuser_variant=2
q_dbuser_menu=$(cat <<EOF q_dbuser_menu=$(cat <<EOF
@@ -724,7 +723,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbuser=${dbuser} r_dbuser=${DB_USER}
break break
;; ;;
q* ) q* )
@@ -738,7 +737,7 @@ EOF
} }
get_dbpass() { get_dbpass() {
if [ -z "${dbpass}" ] ; then if [ -z "${DB_PASS}" ] ; then
print_debug "Parsed DBPass Variant: 1 - No Env" print_debug "Parsed DBPass Variant: 1 - No Env"
q_dbpass_variant=1 q_dbpass_variant=1
q_dbpass_menu=$(cat <<EOF q_dbpass_menu=$(cat <<EOF
@@ -747,7 +746,7 @@ EOF
) )
fi fi
if [ -n "${dbpass}" ] ; then if [ -n "${DB_PASS}" ] ; then
print_debug "Parsed DBPass Variant: 2 - Env" print_debug "Parsed DBPass Variant: 2 - Env"
q_dbpass_variant=2 q_dbpass_variant=2
q_dbpass_menu=$(cat <<EOF q_dbpass_menu=$(cat <<EOF
@@ -793,7 +792,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbpass=${dbpass} r_dbpass=${DB_PASS}
break break
;; ;;
q* ) q* )
@@ -809,7 +808,7 @@ EOF
#### SCRIPT START #### SCRIPT START
cat << EOF cat << EOF
## ${IMAGE_NAME} Restore Script Version 1.0.1 ## ${IMAGE_NAME} Restore Script
## Visit ${IMAGE_REPO_URL} ## Visit ${IMAGE_REPO_URL}
## #################################################### ## ####################################################