diff --git a/CHANGELOG.md b/CHANGELOG.md index 75cc7ee..fbc91e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +## develop 2022-09-20 + + ### Added + - MONGO_CUSTOM_URI support to ignore the seperate environment variables - Parses the URI and populates DB_HOST and DB_NAME for filenaming. Can be overridden + ## 3.4.2 2022-09-19 ### Changed diff --git a/README.md b/README.md index ab189f8..6def421 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,6 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers. - [About](#about) - [Maintainer](#maintainer) - [Table of Contents](#table-of-contents) - - [Persistent Storage](#persistent-storage) - [Prerequisites and Assumptions](#prerequisites-and-assumptions) - [Installation](#installation) - [Build from Source](#build-from-source) @@ -45,7 +44,7 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers. - [Multi Architecture](#multi-architecture) - [Configuration](#configuration) - [Quick Start](#quick-start) - - [Persistent Storage](#persistent-storage-1) + - [Persistent Storage](#persistent-storage) - [Environment Variables](#environment-variables) - [Base Images used](#base-images-used) - [Container Options](#container-options) @@ -70,7 +69,6 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers. - [License](#license) > **NOTE**: If you are using this with a docker-compose file along with a seperate SQL container, take care not to set the variables to backup immediately, more so have it delay execution for a minute, otherwise you will get a failed first backup. -### Persistent Storage ## Prerequisites and Assumptions * You must have a working connection to one of the supported DB Servers and appropriate credentials @@ -140,32 +138,32 @@ Be sure to view the following repositories to understand all the customizable op | `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` | ### Database Specific Options -| Parameter | Description | Default | -| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | --------- | -| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | -| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | -| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | -| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | | -| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | | -| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | -| `DB_PASS` | (optional if DB doesn't require it) password for the database | | -| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | -| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | | -| `MONGO_HOST_TYPE` | Connect to regular `mongodb` or `atlas` | `mongodb` | -| | You can also skip this and override the uri prefix with `MONGO_URI_PREFIX=mongodb+srv://` or whatever you would like | | +| Parameter | Description | Default | +| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------- | +| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | +| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | +| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | +| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | | +| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | | +| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | +| `DB_PASS` | (optional if DB doesn't require it) password for the database | | +| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | +| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | | +| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | +| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can overrde them by making your own entries | #### For Influx DB2: Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name` ### Scheduling Options -| Parameter | Description | Default | -|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------| -| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` | -| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | | -| | Absolute HHMM, e.g. `2330` or `0415` | | -| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | | -| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `/backup` | -| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` | +| Parameter | Description | Default | +| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | +| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` | +| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | | +| | Absolute HHMM, e.g. `2330` or `0415` | | +| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | | +| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `/backup` | +| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` | - You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time. diff --git a/install/assets/defaults/10-db-backup b/install/assets/defaults/10-db-backup old mode 100755 new mode 100644 index 2e318de..324940d --- a/install/assets/defaults/10-db-backup +++ b/install/assets/defaults/10-db-backup @@ -1,6 +1,7 @@ #!/command/with-contenv bash BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"} +BLOBXFER_REMOTE_PATH=${BLOBXFER_REMOTE_PATH:-"/docker-db-backup"} CHECKSUM=${CHECKSUM:-"MD5"} COMPRESSION=${COMPRESSION:-"ZSTD"} COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"} @@ -20,6 +21,6 @@ S3_PROTOCOL=${S3_PROTOCOL:-"https"} SCRIPT_LOCATION_PRE=${SCRIPT_LOCATION_PRE:-"/assets/scripts/pre/"} SCRIPT_LOCATION_POST=${SCRIPT_LOCATION_POST:-"/assets/scripts/post/"} SIZE_VALUE=${SIZE_VALUE:-"bytes"} +SKIP_AVAILABILITY_CHECK=${SKIP_AVAILABILITY_CHECK:-"FALSE"} SPLIT_DB=${SPLIT_DB:-"TRUE"} -TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"} -BLOBXFER_REMOTE_PATH=${BLOBXFER_REMOTE_PATH:-"/docker-db-backup"} +TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"} \ No newline at end of file diff --git a/install/assets/functions/10-db-backup b/install/assets/functions/10-db-backup old mode 100755 new mode 100644 index b9643af..e7a5244 --- a/install/assets/functions/10-db-backup +++ b/install/assets/functions/10-db-backup @@ -20,14 +20,33 @@ bootstrap_variables() { ;; mongo* ) dbtype=mongo - DB_PORT=${DB_PORT:-27017} - [[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER' - [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' + if [ -n "${MONGO_CUSTOM_URI}" ] ; then + mongo_uri_proto=$(echo ${MONGO_CUSTOM_URI} | grep :// | sed -e's,^\(.*://\).*,\1,g') + mongo_uri_scratch="${MONGO_CUSTOM_URI/${mongo_uri_proto}/}" + mongo_uri_username_password=$(echo ${mongo_uri_scratch} | grep @ | rev | cut -d@ -f2- | rev) + if [ -n "${mongo_uri_username_password}" ]; then mongo_uri_scratch=$(echo ${mongo_uri_scratch} | rev | cut -d@ -f1 | rev) ; fi + mongo_uri_port=$(echo ${mongo_uri_scratch} | grep : | rev | cut -d: -f2- | rev) + if [ -n "${mongo_uri_port}" ]; then mongo_uri_port=$(echo ${mongo_uri_scratch} | rev | cut -d: -f1 | cut -d/ -f2 | rev) ; fi + mongo_uri_hostname=$(echo ${mongo_uri_scratch} | cut -d/ -f1 | cut -d: -f1 ) + mongo_uri_database=$(echo ${mongo_uri_scratch} | cut -d/ -f2 | cut -d? -f1 ) + mongo_uri_options=$(echo ${mongo_uri_scratch} | cut -d/ -f2 | cut -d? -f2 ) + DB_NAME=${DB_NAME:-"${mongo_uri_database,,}"} + DB_HOST=${DB_HOST:-"${mongo_uri_hostname,,}"} + else + DB_PORT=${DB_PORT:-27017} + [[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER' + [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' + [[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER}" + [[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS}" + [[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME}" + [[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}" + fi ;; "mysql" | "mariadb" ) dbtype=mysql DB_PORT=${DB_PORT:-3306} [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' + [[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS} sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" ;; "mssql" | "microsoftsql" ) @@ -43,12 +62,14 @@ bootstrap_variables() { dbtype=pgsql DB_PORT=${DB_PORT:-5432} [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' + [[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS}" sanity_var DB_NAME "Database Name to backup. Multiple seperated by commas" ;; "redis" ) dbtype=redis DB_PORT=${DB_PORT:-6379} [[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS' + [[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS}" ;; sqlite* ) dbtype=sqlite3 @@ -59,25 +80,6 @@ bootstrap_variables() { file_env 'S3_KEY_ID' file_env 'S3_KEY_SECRET' fi - - ### Set the Database Authentication Details - case "$dbtype" in - "mongo" ) - [[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${DB_USER}" - [[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${DB_PASS}" - [[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${DB_NAME}" - [[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}" - ;; - "mysql" ) - [[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${DB_PASS} - ;; - "postgres" ) - [[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${DB_PASS}" - ;; - "redis" ) - [[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${DB_PASS}" - ;; - esac } backup_couch() { @@ -152,19 +154,19 @@ backup_mongo() { mongo_compression="--gzip" compression_string="and compressing with gzip" fi - if [ "${MONGO_HOST_TYPE,,}" = "atlas" ] ; then - MONGO_URI_PREFIX=${MONGO_URI_PREFIX:-"mongodb+srv://"} + if [ -n "${MONGO_CUSTOM_URI}" ] ; then + mongo_backup_parameter="--uri=${MONGO_CUSTOM_URI} ${EXTRA_OPTS}" else - MONGO_URI_PREFIX=${MONGO_URI_PREFIX:-"mongodb://"} + mongo_backup_parameter="--host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}" fi pre_dbbackup "${DB_NAME}" print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}" - mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --uri="${MONGO_URI_PREFIX}${DB_HOST}:${DB_PORT}" ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS} + silent mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} ${mongo_backup_parameter} exit_code=$? check_exit_code $target generate_checksum move_dbbackup - post_dbbackup + post_dbbackup "${DB_NAME}" } backup_mssql() { @@ -173,7 +175,7 @@ backup_mssql() { compression pre_dbbackup "${DB_NAME}" print_notice "Dumping MSSQL database: '${DB_NAME}'" - /opt/mssql-tools18/bin/sqlcmd -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} -Q "BACKUP DATABASE [${DB_NAME}] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" + silent /opt/mssql-tools18/bin/sqlcmd -C -S ${DB_HOST}\,${DB_PORT} -U ${DB_USER} -P ${DB_PASS} -Q "BACKUP DATABASE [${DB_NAME}] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${DB_NAME}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" exit_code=$? check_exit_code $target generate_checksum @@ -212,7 +214,7 @@ backup_mysql() { compression pre_dbbackup $db print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}" - mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}" + silent mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}" exit_code=$? check_exit_code $target generate_checksum @@ -226,7 +228,7 @@ backup_mysql() { compression pre_dbbackup all print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" - mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}" + silent mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}" exit_code=$? check_exit_code $target generate_checksum @@ -261,7 +263,7 @@ backup_pgsql() { compression pre_dbbackup $db print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}" - pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target} + silent pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target} exit_code=$? check_exit_code $target generate_checksum @@ -283,7 +285,7 @@ backup_pgsql() { for x_db_name in ${tmp_db_names} ; do pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name}) done - pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target} + silent pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target} exit_code=$? check_exit_code $target generate_checksum @@ -329,7 +331,7 @@ backup_sqlite3() { compression pre_dbbackup $db print_notice "Dumping sqlite3 database: '${DB_HOST}' ${compression_string}" - sqlite3 "${DB_HOST}" ".backup '${TEMP_LOCATION}/backup.sqlite3'" + silent sqlite3 "${DB_HOST}" ".backup '${TEMP_LOCATION}/backup.sqlite3'" exit_code=$? check_exit_code $target cat "${TEMP_LOCATION}"/backup.sqlite3 | ${dir_compress_cmd} > "${TEMP_LOCATION}/${target}" @@ -340,101 +342,105 @@ backup_sqlite3() { check_availability() { ### Set the Database Type - case "$dbtype" in - "couch" ) - counter=0 - code_received=0 - while [ "${code_received}" != "200" ]; do - code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${DB_HOST}:${DB_PORT}) - if [ "${code_received}" = "200" ] ; then break ; fi - sleep 5 - (( counter+=5 )) - print_warn "CouchDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)" - done - ;; - "influx" ) - counter=0 - case "${INFLUX_VERSION,,}" in - 1 ) - while ! (nc -z ${DB_HOST#*//} ${DB_PORT}) ; do + if var_false "${SKIP_AVAILABILITY_CHECK}" ; then + case "$dbtype" in + "couch" ) + counter=0 + code_received=0 + while [ "${code_received}" != "200" ]; do + code_received=$(curl -XGET -sSL -o /dev/null -L -w ''%{http_code}'' ${DB_HOST}:${DB_PORT}) + if [ "${code_received}" = "200" ] ; then break ; fi + sleep 5 + (( counter+=5 )) + print_warn "CouchDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)" + done + ;; + "influx" ) + counter=0 + case "${INFLUX_VERSION,,}" in + 1 ) + while ! (nc -z ${DB_HOST#*//} ${DB_PORT}) ; do + sleep 5 + (( counter+=5 )) + print_warn "InfluxDB Host '${DB_HOST#*//}' is not accessible, retrying.. ($counter seconds so far)" + done + ;; + 2 ) + code_received=0 + while [ "${code_received}" != "200" ]; do + code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${DB_HOST}:${DB_PORT}/health) + if [ "${code_received}" = "200" ] ; then break ; fi + sleep 5 + (( counter+=5 )) + print_warn "InfluxDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)" + done + ;; + esac + ;; + "mongo" ) + if [ -n "${MONGO_CUSTOM_URI}" ] ; then + print_debug "Skipping Connectivity Check" + else + counter=0 + while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do sleep 5 (( counter+=5 )) - print_warn "InfluxDB Host '${DB_HOST#*//}' is not accessible, retrying.. ($counter seconds so far)" + print_warn "Mongo Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)" done - ;; - 2 ) - code_received=0 - while [ "${code_received}" != "200" ]; do - code_received=$(curl -XGET -sSL -o /dev/null -w ''%{http_code}'' ${DB_HOST}:${DB_PORT}/health) - if [ "${code_received}" = "200" ] ; then break ; fi - sleep 5 - (( counter+=5 )) - print_warn "InfluxDB Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)" - done - ;; - esac - ;; - "mongo" ) - if [ "${MONGO_HOST_TYPE,,}" != "atlas" ] ; then + fi + ;; + "mysql" ) + counter=0 + export MYSQL_PWD=${DB_PASS} + while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" status > /dev/null 2>&1) ; do + sleep 5 + (( counter+=5 )) + print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)" + done + ;; + "mssql" ) counter=0 while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do sleep 5 (( counter+=5 )) - print_warn "Mongo Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)" + print_warn "MSSQL Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)" done - fi - ;; - "mysql" ) - counter=0 - export MYSQL_PWD=${DB_PASS} - while ! (mysqladmin -u"${DB_USER}" -P"${DB_PORT}" -h"${DB_HOST}" status > /dev/null 2>&1) ; do - sleep 5 - (( counter+=5 )) - print_warn "MySQL/MariaDB Server '${DB_HOST}' is not accessible, retrying.. (${counter} seconds so far)" - done - ;; - "mssql" ) - counter=0 - while ! (nc -z ${DB_HOST} ${DB_PORT}) ; do - sleep 5 - (( counter+=5 )) - print_warn "MSSQL Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)" - done - ;; - "pgsql" ) - counter=0 - export PGPASSWORD=${DB_PASS} - until pg_isready --dbname=${DB_NAME} --host=${DB_HOST} --port=${DB_PORT} --username=${DB_USER} -q - do - sleep 5 - (( counter+=5 )) - print_warn "Postgres Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)" - done - ;; - "redis" ) - counter=0 - while ! (nc -z "${DB_HOST}" "${DB_PORT}") ; do - sleep 5 - (( counter+=5 )) - print_warn "Redis Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)" - done - ;; - "sqlite3" ) - if [[ ! -e "${DB_HOST}" ]]; then - print_error "File '${DB_HOST}' does not exist." - exit_code=2 - exit $exit_code - elif [[ ! -f "${DB_HOST}" ]]; then - print_error "File '${DB_HOST}' is not a file." - exit_code=2 - exit $exit_code - elif [[ ! -r "${DB_HOST}" ]]; then - print_error "File '${DB_HOST}' is not readable." - exit_code=2 - exit $exit_code - fi - ;; - esac + ;; + "pgsql" ) + counter=0 + export PGPASSWORD=${DB_PASS} + until pg_isready --dbname=${DB_NAME} --host=${DB_HOST} --port=${DB_PORT} --username=${DB_USER} -q + do + sleep 5 + (( counter+=5 )) + print_warn "Postgres Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)" + done + ;; + "redis" ) + counter=0 + while ! (nc -z "${DB_HOST}" "${DB_PORT}") ; do + sleep 5 + (( counter+=5 )) + print_warn "Redis Host '${DB_HOST}' is not accessible, retrying.. ($counter seconds so far)" + done + ;; + "sqlite3" ) + if [[ ! -e "${DB_HOST}" ]]; then + print_error "File '${DB_HOST}' does not exist." + exit_code=2 + exit $exit_code + elif [[ ! -f "${DB_HOST}" ]]; then + print_error "File '${DB_HOST}' is not a file." + exit_code=2 + exit $exit_code + elif [[ ! -r "${DB_HOST}" ]]; then + print_error "File '${DB_HOST}' is not readable." + exit_code=2 + exit $exit_code + fi + ;; + esac + fi } check_exit_code() { diff --git a/install/etc/cont-init.d/10-db-backup b/install/etc/cont-init.d/10-db-backup index 4c30eed..46a75d4 100755 --- a/install/etc/cont-init.d/10-db-backup +++ b/install/etc/cont-init.d/10-db-backup @@ -6,6 +6,7 @@ prepare_service 03-monitoring PROCESS_NAME="db-backup" output_off +bootstrap_variables sanity_test setup_mode create_zabbix dbbackup