Compare commits

...

35 Commits

Author SHA1 Message Date
dave@tiredofit.ca
dfa94ecab7 Release 4.0.16 - See CHANGELOG.md 2023-11-17 08:07:54 -08:00
Dave Conroy
eaea6dc348 Update README.md 2023-11-16 09:38:18 -08:00
dave@tiredofit.ca
34abe88159 Release 4.0.15 - See CHANGELOG.md 2023-11-16 09:35:56 -08:00
Dave Conroy
5ffbeeb163 Merge pull request #280 from joergmschulz/patch-1
warn instead of warning
2023-11-14 07:14:55 -08:00
joergmschulz
c82cee80f8 warn instead of warning
see #279
2023-11-14 08:53:38 +01:00
dave@tiredofit.ca
ab059ccdf1 Release 4.0.14 - See CHANGELOG.md 2023-11-13 15:16:36 -08:00
dave@tiredofit.ca
1e8ccf4d56 Release 4.0.13 - See CHANGELOG.md 2023-11-12 17:07:07 -08:00
dave@tiredofit.ca
65c40cac0a Release 4.0.12 - See CHANGELOG.md 2023-11-12 09:03:01 -08:00
dave@tiredofit.ca
a9f2d51ff9 Release 4.0.11 - See CHANGELOG.md 2023-11-11 13:43:57 -08:00
dave@tiredofit.ca
7f455abc1a Release 4.0.10 - See CHANGELOG.md 2023-11-11 09:34:11 -08:00
dave@tiredofit.ca
c16add4525 Release 4.0.9 - See CHANGELOG.md 2023-11-11 09:16:02 -08:00
Dave Conroy
d5769b1588 Fix Ctrl-C Backup Concurrency 2023-11-11 08:48:59 -08:00
dave@tiredofit.ca
0b2c7836cf Release 4.0.8 - See CHANGELOG.md 2023-11-11 08:32:58 -08:00
Dave Conroy
535e011740 Add safety net to debug() SHOW_OUTPUT 2023-11-11 07:28:38 -08:00
Dave Conroy
5a391b908a Fix debug() duplicate variable 2023-11-11 07:23:13 -08:00
dave@tiredofit.ca
fddca646c8 Release 4.0.7 - See CHANGELOG.md 2023-11-11 07:15:00 -08:00
Dave Conroy
68f954c59b Fix SQLite3 Backups and clean up temp directory properly 2023-11-11 07:12:29 -08:00
Dave Conroy
0ab0a6d182 sqlit3 scheduler process name remove slashes 2023-11-11 06:48:39 -08:00
Dave Conroy
f6bf2993f7 Add seperate persmissions for _FILESYSTEM_PATH 2023-11-11 06:36:26 -08:00
dave@tiredofit.ca
5cf00a8b8e Release 4.0.6 - See CHANGELOG.md 2023-11-10 17:53:47 -08:00
dave@tiredofit.ca
2bc730013e Release 4.0.5 - See CHANGELOG.md 2023-11-10 07:25:25 -08:00
Dave Conroy
d628ed8ff4 Expand upon DEBUG_ statements to give more detail 2023-11-10 07:24:31 -08:00
Dave Conroy
d7399667a1 Update _FILESYSTEM_PERMISSIONS from 700 to 600 and add undocumented DBBACKUP_USER|GROUP variable 2023-11-10 07:16:56 -08:00
dave@tiredofit.ca
9caec737e0 Release 4.0.4 - See CHANGELOG.md 2023-11-09 11:49:26 -08:00
Dave Conroy
87a803512d Merge pull request #269 from tiredofit/4.x
New Restore Script
2023-11-09 11:48:19 -08:00
Dave Conroy
c6a8fb0ae0 Merge branch 'main' into 4.x 2023-11-09 11:48:08 -08:00
Dave Conroy
8fafdeb45c Restore - Support multiple DB Hosts 2023-11-09 11:46:04 -08:00
Dave Conroy
4a3a79d328 restore - we're actually using FILESYSTEM_PATH 2023-11-09 09:21:19 -08:00
dave@tiredofit.ca
bad5057bcf Release 4.0.3 - See CHANGELOG.md 2023-11-09 09:20:27 -08:00
Dave Conroy
d2acfc4a88 restore - dont browse .gpg files 2023-11-09 09:19:39 -08:00
Dave Conroy
3d794a819f Commence work on restore scripts 2023-11-09 09:19:13 -08:00
dave@tiredofit.ca
aaf6309cc4 Release 4.0.2 - See CHANGELOG.md 2023-11-09 08:09:59 -08:00
dave@tiredofit.ca
55d2067b43 Release 4.0.1 - See CHANGELOG.md 2023-11-09 08:04:05 -08:00
Dave Conroy
0d56a26f0f restore - remove reference to DB_DUMP_TARGET and instead use DEFAULT_BACKUP_PATH 2023-11-09 08:03:39 -08:00
Dave Conroy
635411bdd5 Update README.md 2023-11-08 22:37:39 -08:00
7 changed files with 846 additions and 437 deletions

View File

@@ -1,3 +1,115 @@
## 4.0.16 2023-11-17 <dave at tiredofit dot ca>
### Changed
- Switch to using msmtp instead of s-mail for notify()
## 4.0.15 2023-11-16 <dave at tiredofit dot ca>
### Changed
- Fix cleanup of old backups
## 4.0.14 2023-11-13 <dave at tiredofit dot ca>
### Changed
- Bugfix when PRE/POST scripts found not giving legacy warning
- Run pre / post scripts as root
## 4.0.13 2023-11-12 <dave at tiredofit dot ca>
### Changed
- Check for any quotes if using MONGO_CUSTOM_URI and remove
## 4.0.12 2023-11-12 <dave at tiredofit dot ca>
### Changed
- Allow creating schedulers if _MONGO_CUSTOM_URI is set and _DB_HOST blank
## 4.0.11 2023-11-11 <dave at tiredofit dot ca>
### Changed
- Resolve issue with backing up ALL databases with PGSQL and MySQL
## 4.0.10 2023-11-11 <dave at tiredofit dot ca>
### Changed
- Change environment variable parsing routines to properly accomodate for Passwords containing '=='
## 4.0.9 2023-11-11 <dave at tiredofit dot ca>
### Changed
- Fix issue with quotes being wrapped around _PASS variables
## 4.0.8 2023-11-11 <dave at tiredofit dot ca>
### Changed
- Tidy up file_encryption() routines
- Change environment variable _ENCRYPT_PUBKEY to _ENCRYPT_PUBLIC_KEY
- Add new environment variable _ENCRYPT_PRIVATE_KEY
## 4.0.7 2023-11-11 <dave at tiredofit dot ca>
### Added
- Add seperate permissions for _FILESYSTEM_PATH
### Changed
- More output and debugging additions
- SQLite3 now backs up without running into file permission/access problems
- Cleanup old sqlite backups from temp directory
- Handle multiple SQLite3 backups concurrently
## 4.0.6 2023-11-10 <dave at tiredofit dot ca>
### Added
- Add additional DEBUG_ statements
### Changed
- Fix issue with Influx DB not properly detecting the correct version
## 4.0.5 2023-11-10 <dave at tiredofit dot ca>
### Added
- Add undocumented DBBACKUP_USER|GROUP environment variables for troubleshooting permissions
- Add more verbosity when using DEBUG_ statements
### Changed
- Change _FILESYSTEM_PERMISSION to 600 from 700
## 4.0.4 2023-11-09 <dave at tiredofit dot ca>
### Added
- Add support for restoring from different DB_ variables in restore script
## 4.0.3 2023-11-09 <dave at tiredofit dot ca>
### Changed
- Resolve issue with _MYSQL_TLS_CERT_FILE not being read
## 4.0.2 2023-11-09 <dave at tiredofit dot ca>
### Changed
- Properly use custom _S3_HOST variables
## 4.0.1 2023-11-09 <dave at tiredofit dot ca>
### Changed
- Restore - Stop using DB_DUMP_TARGET and instead browse using DEFAULT_BACKUP_PATH
## 4.0.0 2023-11-08 <dave at tiredofit dot ca> ## 4.0.0 2023-11-08 <dave at tiredofit dot ca>
This is the fourth major release to the DB Backup image which started as a basic MySQL backup service in early 2017. With each major release brings enhancements, bugfixes, removals along with breaking changes and this one is no different. This is the fourth major release to the DB Backup image which started as a basic MySQL backup service in early 2017. With each major release brings enhancements, bugfixes, removals along with breaking changes and this one is no different.

View File

@@ -10,7 +10,7 @@ ENV INFLUX1_CLIENT_VERSION=1.8.0 \
MSODBC_VERSION=18.3.2.1-1 \ MSODBC_VERSION=18.3.2.1-1 \
MSSQL_VERSION=18.3.1.1-1 \ MSSQL_VERSION=18.3.1.1-1 \
AWS_CLI_VERSION=1.29.78 \ AWS_CLI_VERSION=1.29.78 \
CONTAINER_ENABLE_MESSAGING=FALSE \ CONTAINER_ENABLE_MESSAGING=TRUE \
CONTAINER_ENABLE_MONITORING=TRUE \ CONTAINER_ENABLE_MONITORING=TRUE \
IMAGE_NAME="tiredofit/db-backup" \ IMAGE_NAME="tiredofit/db-backup" \
IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/" IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/"

View File

@@ -212,12 +212,15 @@ If these are set and no other defaults or variables are set explicitly, they wil
##### Encryption Options ##### Encryption Options
| Variable | Description | Default | Encryption occurs after compression and the encrypted filename will have a `.gpg` suffix
| ---------------------------- | ------------------------------------------- | ------- |
| `DEFAULT_ENCRYPT` | Encrypt file after backing up with GPG | `FALSE` | | Variable | Description | Default | `_FILE` |
| `DEFAULT_ENCRYPT_PASSPHRASE` | Passphrase to encrypt file with GPG | | | ----------------------------- | -------------------------------------------- | ------- | ------- |
| *or* | | | | `DEFAULT_ENCRYPT` | Encrypt file after backing up with GPG | `FALSE` | |
| `DEFAULT_ENCRYPT_PUBKEY` | Path of public key to encrypt file with GPG | | | `DEFAULT_ENCRYPT_PASSPHRASE` | Passphrase to encrypt file with GPG | | x |
| *or* | | | |
| `DEFAULT_ENCRYPT_PUBLIC_KEY` | Path of public key to encrypt file with GPG | | x |
| `DEFAULT_ENCRYPT_PRIVATE_KEY` | Path of private key to encrypt file with GPG | | x |
##### Scheduling Options ##### Scheduling Options
@@ -229,7 +232,7 @@ If these are set and no other defaults or variables are set explicitly, they wil
| | Absolute HHMM, e.g. `2330` or `0415` | | | | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | | | | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
| | Full datestamp e.g. `2023-12-21 23:30:00` | | | | Full datestamp e.g. `2023-12-21 23:30:00` | |
| | Cron expression e.g. `30 23 * * *` - [Understand the format](https://en.wikipedia.org/wiki/ Cron) - *BACKUP_INTERVAL is ignored* | | | | Cron expression e.g. `30 23 * * *` [Understand the format](https://en.wikipedia.org/wiki/Cron) - *BACKUP_INTERVAL is ignored* | |
| `DEFAULT_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when backup interval executes) | `FALSE` | | `DEFAULT_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when backup interval executes) | `FALSE` |
| | 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | | | | 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | |
| `DEFAULT_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from | | | `DEFAULT_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from | |
@@ -320,12 +323,13 @@ Options that are related to the value of `DEFAULT_BACKUP_LOCATION`
If `DEFAULT_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used: If `DEFAULT_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
| Variable | Description | Default | | Variable | Description | Default |
| --------------------------------- | ----------------------------------------------------------------------------------------------------- | ------------------------------------- | | ------------------------------------ | ----------------------------------------------------------------------------------------------------- | ------------------------------------- |
| `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` | | `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
| `DEFAULT_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` | | `DEFAULT_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
| `DEFAULT_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DEFAULT_FILESYSTEM_PATH}/archive/` | | `DEFAULT_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` |
| `DEFAULT_FILESYSTEM_PERMISSION` | Directory and File permissions to apply to files. | `700` | | `DEFAULT_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DEFAULT_FILESYSTEM_PATH}/archive/` |
| `DEFAULT_FILESYSTEM_PERMISSION` | Permissions to apply to files. | `600` |
###### S3 ###### S3
@@ -390,11 +394,11 @@ echo "${1} Backup Starting on ${2} for ${3} at ${4}. Filename: ${5}"
``` ```
## script DBXX_TYPE DBXX_HOST DBXX_NAME STARTEPOCH BACKUP_FILENAME ## script DBXX_TYPE DBXX_HOST DBXX_NAME STARTEPOCH BACKUP_FILENAME
${f} "${backup_job_db_type}" "${backup_job_db_host}" "${backup_job_db_name}" "${backup_routines_start_time}" "${target}" ${f} "${backup_job_db_type}" "${backup_job_db_host}" "${backup_job_db_name}" "${backup_routines_start_time}" "${backup_job_file}"
Outputs the following on the console: Outputs the following on the console:
`mysql Backup Starting on example-db for example at 1647370800. Filename: mysql_example_example-db_202200315-000000.sql.bz2 `mysql Backup Starting on example-db for example at 1647370800. Filename: mysql_example_example-db_202200315-000000.sql.bz2`
###### Post backup ###### Post backup
@@ -421,7 +425,7 @@ echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a dura
``` ```
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE ## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${backup_job_db_name}" "${backup_routines_start_time}" "${backup_routines_finish_time}" "${backup_routines_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" "${move_exit_code} ${f} "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${backup_job_db_name}" "${backup_routines_start_time}" "${backup_routines_finish_time}" "${backup_routines_total_time}" "${backup_job_file}" "${filesize}" "${checksum_value}" "${move_exit_code}
Outputs the following on the console: Outputs the following on the console:
@@ -471,12 +475,16 @@ Otherwise, override them per backup job. Additional backup jobs can be scheduled
##### Encryption Options ##### Encryption Options
| Variable | Description | Default | Encryption will occur after compression and the resulting filename will have a `.gpg` suffix
| ------------------------- | ------------------------------------------- | ------- |
| `DB01_ENCRYPT` | Encrypt file after backing up with GPG | `FALSE` |
| `DB01_ENCRYPT_PASSPHRASE` | Passphrase to encrypt file with GPG | | | Variable | Description | Default | `_FILE` |
| *or* | | | | -------------------------- | -------------------------------------------- | ------- | ------- |
| `DB01_ENCRYPT_PUBKEY` | Path of public key to encrypt file with GPG | | | `DB01_ENCRYPT` | Encrypt file after backing up with GPG | `FALSE` | |
| `DB01_ENCRYPT_PASSPHRASE` | Passphrase to encrypt file with GPG | | x |
| *or* | | | |
| `DB01_ENCRYPT_PUBLIC_KEY` | Path of public key to encrypt file with GPG | | x |
| `DB01_ENCRYPT_PRIVATE_KEY` | Path of private key to encrypt file with GPG | | x |
##### Scheduling Options ##### Scheduling Options
@@ -488,7 +496,7 @@ Otherwise, override them per backup job. Additional backup jobs can be scheduled
| | Absolute HHMM, e.g. `2330` or `0415` | | | | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | | | | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
| | Full datestamp e.g. `2023-12-21 23:30:00` | | | | Full datestamp e.g. `2023-12-21 23:30:00` | |
| | Cron expression e.g. `30 23 * * *` - [Understand the format](https://en.wikipedia.org/wiki/ Cron) - *BACKUP_INTERVAL is ignored* | | | | Cron expression e.g. `30 23 * * *` [Understand the format](https://en.wikipedia.org/wiki/Cron) - *BACKUP_INTERVAL is ignored* | |
| `DB01_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when backup interval executes) | `FALSE` | | `DB01_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when backup interval executes) | `FALSE` |
| | 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | | | | 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | |
| `DB01_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from `DB01_BACKUP_FILESYSTEM_PATH` | | | `DB01_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from `DB01_BACKUP_FILESYSTEM_PATH` | |
@@ -593,12 +601,13 @@ Options that are related to the value of `DB01_BACKUP_LOCATION`
If `DB01_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used: If `DB01_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
| Variable | Description | Default | | Variable | Description | Default |
| ------------------------------ | ----------------------------------------------------------------------------------------------------- | --------------------------------- | | --------------------------------- | ----------------------------------------------------------------------------------------------------- | ---------------------------------- |
| `DB01_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` | | `DB01_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
| `DB01_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` | | `DB01_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
| `DB01_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DB01_FILESYSTEM_PATH/archive/` | | `DB01_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` |
| `DB01_FILESYSTEM_PERMISSION` | Directory and File permissions to apply to files. | `700` | | `DB01_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DB01_FILESYSTEM_PATH}/archive/` |
| `DB01_FILESYSTEM_PERMISSION` | Directory and File permissions to apply to files. | `600` |
###### S3 ###### S3
@@ -630,7 +639,7 @@ If `DB01_BACKUP_LOCATION` = `blobxfer` then the following options are used:.
| `DB01_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x | | `DB01_BLOBXFER_STORAGE_ACCOUNT_KEY` | Microsoft Azure Cloud storage account key. | | x |
| `DB01_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x | | `DB01_BLOBXFER_REMOTE_PATH` | Remote Azure path | `/docker-db-backup` | x |
> This service uploads files from backup target directory `DB01_BACKUP_FILESYSTEM_PATH`. > This service uploads files from backup directory `DB01_BACKUP_FILESYSTEM_PATH`.
> If the a cleanup configuration in `DB01_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically. > If the a cleanup configuration in `DB01_CLEANUP_TIME` is defined, the remote directory on Azure storage will also be cleaned automatically.
##### Hooks ##### Hooks
@@ -663,11 +672,11 @@ echo "${1} Backup Starting on ${2} for ${3} at ${4}. Filename: ${5}"
``` ```
## script DB01_TYPE DB01_HOST DB01_NAME STARTEPOCH BACKUP_FILENAME ## script DB01_TYPE DB01_HOST DB01_NAME STARTEPOCH BACKUP_FILENAME
${f} "${backup_job_db_type}" "${backup_job_db_host}" "${backup_job_db_name}" "${backup_routines_start_time}" "${target}" ${f} "${backup_job_db_type}" "${backup_job_db_host}" "${backup_job_db_name}" "${backup_routines_start_time}" "${backup_job_filename}"
Outputs the following on the console: Outputs the following on the console:
`mysql Backup Starting on example-db for example at 1647370800. Filename: mysql_example_example-db_202200315-000000.sql.bz2 `mysql Backup Starting on example-db for example at 1647370800. Filename: mysql_example_example-db_202200315-000000.sql.bz2`
###### Post backup ###### Post backup
@@ -694,7 +703,7 @@ echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a dura
``` ```
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE ## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_routines_start_time}" "${backup_routines_finish_time}" "${backup_routines_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" "${move_exit_code} ${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_routines_start_time}" "${backup_routines_finish_time}" "${backup_routines_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}
Outputs the following on the console: Outputs the following on the console:
@@ -733,6 +742,9 @@ $5 body/error message
##### Email Notifications ##### Email Notifications
See more details in the base image listed above for more mail environment variables.
| Parameter | Description | Default | `_FILE` | | Parameter | Description | Default | `_FILE` |
| ----------- | ----------------------------------------------------------------------------------------- | ------- | ------- | | ----------- | ----------------------------------------------------------------------------------------- | ------- | ------- |
| `MAIL_FROM` | What email address to send mail from for errors | | | | `MAIL_FROM` | What email address to send mail from for errors | | |

View File

@@ -8,8 +8,11 @@ source /assets/functions/10-db-backup
source /assets/defaults/10-db-backup source /assets/defaults/10-db-backup
bootstrap_variables backup_init {{BACKUP_NUMBER}} bootstrap_variables backup_init {{BACKUP_NUMBER}}
bootstrap_variables parse_variables {{BACKUP_NUMBER}} bootstrap_variables parse_variables {{BACKUP_NUMBER}}
PROCESS_NAME="{{BACKUP_NUMBER}}-${backup_job_db_host}__${backup_job_db_name}" if [ -z "${backup_job_db_name}" ]; then
PROCESS_NAME="{{BACKUP_NUMBER}}${backup_job_db_host//\//_}"
else
PROCESS_NAME="{{BACKUP_NUMBER}}-${backup_job_db_host//\//_}__${backup_job_db_name}"
fi
trap ctrl_c INT trap ctrl_c INT
@@ -84,6 +87,7 @@ while true; do
fi fi
symlink_log symlink_log
cleanup_old_data
if var_false "${persist}" ; then if var_false "${persist}" ; then
print_debug "Exiting due to manual mode" print_debug "Exiting due to manual mode"

View File

@@ -1,6 +1,8 @@
#!/command/with-contenv bash #!/command/with-contenv bash
BACKUP_JOB_CONCURRENCY=${BACKUP_JOB_CONCURRENCY:-"1"} BACKUP_JOB_CONCURRENCY=${BACKUP_JOB_CONCURRENCY:-"1"}
DBBACKUP_USER=${DBBACKUP_USER:-"dbbackup"}
DBBACKUP_GROUP=${DBBACKUP_GROUP:-"${DBBACKUP_USER}"} # Must go after DBBACKUP_USER
DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0} DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0}
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440} DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440} DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
@@ -13,7 +15,8 @@ DEFAULT_CREATE_LATEST_SYMLINK=${DEFAULT_CREATE_LATEST_SYMLINK:-"TRUE"}
DEFAULT_ENABLE_PARALLEL_COMPRESSION=${DEFAULT_ENABLE_PARALLEL_COMPRESSION:-"TRUE"} DEFAULT_ENABLE_PARALLEL_COMPRESSION=${DEFAULT_ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
DEFAULT_ENCRYPT=${DEFAULT_ENCRYPT:-"FALSE"} DEFAULT_ENCRYPT=${DEFAULT_ENCRYPT:-"FALSE"}
DEFAULT_FILESYSTEM_PATH=${DEFAULT_FILESYSTEM_PATH:-"/backup"} DEFAULT_FILESYSTEM_PATH=${DEFAULT_FILESYSTEM_PATH:-"/backup"}
DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"700"} DEFAULT_FILESYSTEM_PATH_PERMISSION=${DEFAULT_FILESYSTEM_PATH_PERMISSION:-"700"}
DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"600"}
DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DEFAULT_FILESYSTEM_ARCHIVE_PATH:-"${DEFAULT_FILESYSTEM_PATH}/archive/"} DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DEFAULT_FILESYSTEM_ARCHIVE_PATH:-"${DEFAULT_FILESYSTEM_PATH}/archive/"}
DEFAULT_LOG_LEVEL=${DEFAULT_LOG_LEVEL:-"notice"} DEFAULT_LOG_LEVEL=${DEFAULT_LOG_LEVEL:-"notice"}
DEFAULT_MYSQL_ENABLE_TLS=${DEFAULT_MYSQL_ENABLE_TLS:-"FALSE"} DEFAULT_MYSQL_ENABLE_TLS=${DEFAULT_MYSQL_ENABLE_TLS:-"FALSE"}

View File

@@ -5,26 +5,26 @@ bootstrap_filesystem() {
if [ ! -d "${backup_job_filesystem_path}" ]; then if [ ! -d "${backup_job_filesystem_path}" ]; then
mkdir -p "${backup_job_filesystem_path}" mkdir -p "${backup_job_filesystem_path}"
fi fi
if [ "$(stat -c %U "${backup_job_filesystem_path}")" != "dbbackup" ] ; then chown -R dbbackup:dbbackup "${backup_job_filesystem_path}" ; fi if [ "$(stat -c %U "${backup_job_filesystem_path}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${backup_job_filesystem_path}" ; fi
if [ "$(stat -c %a "${backup_job_filesystem_path}")" != "${backup_job_filesystem_permission}" ] ; then chmod -R "${backup_job_filesystem_permission}" "${backup_job_filesystem_path}" ; fi if [ "$(stat -c %a "${backup_job_filesystem_path}")" != "${backup_job_filesystem_path_permission}" ] ; then chmod "${backup_job_filesystem_path_permission}" "${backup_job_filesystem_path}" ; fi
if [ -d "${backup_job_filesystem_archive_path}" ]; then if [ -d "${backup_job_filesystem_archive_path}" ]; then
if [ "$(stat -c %U "${backup_job_filesystem_archive_path}")" != "dbbackup" ] ; then chown -R dbbackup:dbbackup "${backup_job_filesystem_archive_path}" ; fi if [ "$(stat -c %U "${backup_job_filesystem_archive_path}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${backup_job_filesystem_archive_path}" ; fi
if [ "$(stat -c %a "${backup_job_filesystem_archive_path}")" != "${backup_job_filesystem_permission}" ] ; then chmod -R "${backup_job_filesystem_permission}" "${backup_job_filesystem_archive_path}" ; fi if [ "$(stat -c %a "${backup_job_filesystem_archive_path}")" != "${backup_job_filesystem_path_permission}" ] ; then chmod "${backup_job_filesystem_path_permission}" "${backup_job_filesystem_archive_path}" ; fi
fi fi
if [ ! -d "${LOG_PATH}" ]; then if [ ! -d "${LOG_PATH}" ]; then
mkdir -p "${LOG_PATH}" mkdir -p "${LOG_PATH}"
fi fi
if [ "$(stat -c %U "${LOG_PATH}")" != "dbbackup" ] ; then chown dbbackup:dbbackup "${LOG_PATH}" ; fi if [ "$(stat -c %U "${LOG_PATH}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${LOG_PATH}" ; fi
if [ ! -d "${LOG_PATH}"/"$(date +'%Y%m%d')" ]; then run_as_user mkdir -p "${LOG_PATH}"/"$(date +'%Y%m%d')"; fi if [ ! -d "${LOG_PATH}"/"$(date +'%Y%m%d')" ]; then run_as_user mkdir -p "${LOG_PATH}"/"$(date +'%Y%m%d')"; fi
if [ "$(stat -c %a "${LOG_PATH}")" != "755" ] ; then chmod -R 755 "${LOG_PATH}" ; fi if [ "$(stat -c %a "${LOG_PATH}")" != "755" ] ; then chmod -R 755 "${LOG_PATH}" ; fi
if [ ! -d "${TEMP_PATH}" ]; then if [ ! -d "${TEMP_PATH}" ]; then
mkdir -p "${TEMP_PATH}" mkdir -p "${TEMP_PATH}"
fi fi
if [ "$(stat -c %U "${TEMP_PATH}")" != "dbbackup" ] ; then chown -R dbbackup:dbbackup "${TEMP_PATH}" ; fi if [ "$(stat -c %U "${TEMP_PATH}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${TEMP_PATH}" ; fi
if var_true "${DEBUG_BOOTSTRAP_FILESYSTEM}" ; then debug off; fi if var_true "${DEBUG_BOOTSTRAP_FILESYSTEM}" ; then debug off; fi
} }
@@ -48,10 +48,11 @@ bootstrap_variables() {
DEFAULT_USER \ DEFAULT_USER \
DEFAULT_PASS \ DEFAULT_PASS \
DEFAULT_ENCRYPT_PASSPHRASE \ DEFAULT_ENCRYPT_PASSPHRASE \
DEFAULT_ENCRYPT_PUBKEY \ DEFAULT_ENCRYPT_PUBLIC_KEY \
DEFAULT_ENCRYPT_PRIVATE_KEY \
DEFAULT_MONGO_CUSTOM_URI \ DEFAULT_MONGO_CUSTOM_URI \
DEFAULT_MYSQL_TLS_CA_FILE \ DEFAULT_MYSQL_TLS_CA_FILE \
DEFAULT_MYSQL_TLS_backup_job_filenameCERT_FILE \ DEFAULT_MYSQL_TLS_CERT_FILE \
DEFAULT_MYSQL_TLS_KEY_FILE \ DEFAULT_MYSQL_TLS_KEY_FILE \
DEFAULT_S3_BUCKET \ DEFAULT_S3_BUCKET \
DEFAULT_S3_KEY_ID \ DEFAULT_S3_KEY_ID \
@@ -74,7 +75,8 @@ bootstrap_variables() {
DB"${backup_instance_number}"_USER \ DB"${backup_instance_number}"_USER \
DB"${backup_instance_number}"_PASS \ DB"${backup_instance_number}"_PASS \
DB"${backup_instance_number}"_ENCRYPT_PASSPHRASE \ DB"${backup_instance_number}"_ENCRYPT_PASSPHRASE \
DB"${backup_instance_number}"_ENCRYPT_PUBKEY \ DB"${backup_instance_number}"_ENCRYPT_PUBLIC_KEY \
DB"${backup_instance_number}"_ENCRYPT_PRIVATE_KEY \
DB"${backup_instance_number}"_MONGO_CUSTOM_URI \ DB"${backup_instance_number}"_MONGO_CUSTOM_URI \
DB"${backup_instance_number}"_MYSQL_TLS_CA_FILE \ DB"${backup_instance_number}"_MYSQL_TLS_CA_FILE \
DB"${backup_instance_number}"_MYSQL_TLS_CERT_FILE \ DB"${backup_instance_number}"_MYSQL_TLS_CERT_FILE \
@@ -151,18 +153,28 @@ bootstrap_variables() {
fi fi
## ##
if grep -qo ".*_PASS='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _PASS variable with quotes"
sed -i "s|_PASS='\(.*\)'|_PASS=\1|g" "${backup_instance_vars}"
fi
if grep -qo "MONGO_CUSTOM_URI='.*'" "${backup_instance_vars}"; then
print_debug "[bootstrap_variables] [backup_init] Found _MONGO_CUSTOM_URI variable with quotes"
sed -i "s|MONGO_CUSTOM_URI='\(.*\)'|MONGO_CUSTOM_URI=\1|g" "${backup_instance_vars}"
fi
transform_backup_instance_variable() { transform_backup_instance_variable() {
if grep -q "^DB${1}_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then if grep -q "^DB${1}_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
export "$3"="$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)" export "$3"="$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
elif grep -q "^DB_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then elif grep -q "^DB_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
# Allow old legacy work, perhaps remove old DB_ functionality in future? This should allow for seamless upgrades # Allow old legacy work, perhaps remove old DB_ functionality in future? This should allow for seamless upgrades
#print_warn "Legacy Variable 'DB_${2}'' detected - Please upgrade your variables as they will be removed in version 4.3.0" #print_warn "Legacy Variable 'DB_${2}'' detected - Please upgrade your variables as they will be removed in version 4.3.0"
export "$3"="$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2)" export "$3"="$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
elif grep -q "^${2}=" "${backup_instance_vars}" && [ "$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then elif grep -q "^${2}=" "${backup_instance_vars}" && [ "$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
print_warn "Legacy unsupported variable '${2}' detected - Please upgrade your variables as they will be removed in version 4.3.0" print_warn "Legacy unsupported variable '${2}' detected - Please upgrade your variables as they will be removed in version 4.3.0"
export "$3"="$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2)" export "$3"="$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
elif grep -q "^DEFAULT_${2}=" "${backup_instance_vars}" && [ "$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then elif grep -q "^DEFAULT_${2}=" "${backup_instance_vars}" && [ "$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
export "$3"="$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2)" export "$3"="$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
fi fi
} }
@@ -185,12 +197,14 @@ bootstrap_variables() {
transform_backup_instance_variable "${backup_instance_number}" ENABLE_PARALLEL_COMPRESSION backup_job_parallel_compression transform_backup_instance_variable "${backup_instance_number}" ENABLE_PARALLEL_COMPRESSION backup_job_parallel_compression
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT backup_job_encrypt transform_backup_instance_variable "${backup_instance_number}" ENCRYPT backup_job_encrypt
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PASSPHRASE backup_job_encrypt_passphrase transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PASSPHRASE backup_job_encrypt_passphrase
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PUBKEY backup_job_encrypt_pubkey transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PRIVATE_KEY backup_job_encrypt_private_key
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PUBLIC_KEY backup_job_encrypt_public_key
transform_backup_instance_variable "${backup_instance_number}" EXTRA_DUMP_OPTS backup_job_extra_dump_opts transform_backup_instance_variable "${backup_instance_number}" EXTRA_DUMP_OPTS backup_job_extra_dump_opts
transform_backup_instance_variable "${backup_instance_number}" EXTRA_ENUMERATION_OPTS backup_job_extra_enumeration_opts transform_backup_instance_variable "${backup_instance_number}" EXTRA_ENUMERATION_OPTS backup_job_extra_enumeration_opts
transform_backup_instance_variable "${backup_instance_number}" EXTRA_OPTS backup_job_extra_opts transform_backup_instance_variable "${backup_instance_number}" EXTRA_OPTS backup_job_extra_opts
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_ARCHIVE_PATH backup_job_filesystem_archive_path transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_ARCHIVE_PATH backup_job_filesystem_archive_path
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PATH backup_job_filesystem_path transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PATH backup_job_filesystem_path
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PATH_PERMISSION backup_job_filesystem_path_permission
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PERMISSION backup_job_filesystem_permission transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PERMISSION backup_job_filesystem_permission
transform_backup_instance_variable "${backup_instance_number}" GZ_RSYNCABLE backup_job_gz_rsyncable transform_backup_instance_variable "${backup_instance_number}" GZ_RSYNCABLE backup_job_gz_rsyncable
transform_backup_instance_variable "${backup_instance_number}" HOST backup_job_db_host transform_backup_instance_variable "${backup_instance_number}" HOST backup_job_db_host
@@ -237,21 +251,84 @@ bootstrap_variables() {
transform_backup_instance_variable "${backup_instance_number}" USER backup_job_db_user transform_backup_instance_variable "${backup_instance_number}" USER backup_job_db_user
backup_job_backup_begin=$(echo "${backup_job_backup_begin}" | sed -e "s|'||g" -e 's|"||g') backup_job_backup_begin=$(echo "${backup_job_backup_begin}" | sed -e "s|'||g" -e 's|"||g')
if var_true "${DEBUG_BACKUP_INSTANCE_VARIABLE}" ; then cat <<EOF
## BEGIN Variable Dump $(TZ=${TIMEZONE} date)
$(cat ${backup_instance_vars})
## END
EOF
fi
rm -rf "${backup_instance_vars}" rm -rf "${backup_instance_vars}"
} }
upgrade_lonely_variables() { restore_init() {
upgrade_lonely_variables_tmp=$(mktemp) restore_vars=$(mktemp)
set -o posix; set | grep "^$1" | grep -v "^$1[0-9]._" > "${upgrade_lonely_variables_tmp}" set -o posix ; set | grep -oE "^restore_job_.*=" | grep -oE ".*=" | sed "/--/d" > "${restore_vars}"
while read -r exist_var ; do while read -r restore_var ; do
if [ ! -v "${1}"01_"$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2)" ] ; then unset "$(echo "${restore_var}" | cut -d = -f 1)"
export "${1}"01_"$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2-9)"="$(echo "${exist_var}" | cut -d = -f2)" done < "${restore_vars}"
else
print_error "Variable ${1}01_$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2) already exists, fix your variables and start again.." if [ -n "${DB_NAME}" ] && [ -z "${DB01_NAME}" ] ; then export DB01_NAME="${DB_NAME}" ; unset DB_NAME ; fi
exit 1 if [ -n "${DB_USER}" ] && [ -z "${DB01_USER}" ] ; then export DB01_USER="${DB_USER}" ; unset DB_USER ; fi
fi if [ -n "${DB_PASS}" ] && [ -z "${DB01_PASS}" ] ; then export DB01_PASS="${DB_PASS}" ; unset DB_PASS ; fi
done < "${upgrade_lonely_variables_tmp}" if [ -n "${DB_TYPE}" ] && [ -z "${DB01_TYPE}" ] ; then export DB01_TYPE="${DB_TYPE}" ; unset DB_TYPE ; fi
rm -rf "${upgrade_lonely_variables_tmp}" if [ -n "${DB_PORT}" ] && [ -z "${DB01_PORT}" ] ; then export DB01_PORT="${DB_PORT}" ; unset DB_PORT ; fi
if [ -n "${DB_HOST}" ] && [ -z "${DB01_HOST}" ] ; then export DB01_HOST="${DB_HOST}" ; unset DB_HOST ; fi
if [ -n "${DB_AUTH}" ] && [ -z "${DB01_AUTH}" ] ; then export DB01_AUTH="${DB_AUTH}" ; unset DB_AUTH ; fi
if [ -n "${MONGO_CUSTOM_URI}" ] && [ -z "${DB01_MONGO_CUSTOM_URI}" ] ; then export DB01_MONGO_CUSTOM_URI="${DB_MONGO_CUSTOM_URI}" ; unset MONGO_CUSTOM_URI ; fi
if [ -n "${MYSQL_TLS_CA_FILE}" ] && [ -z "${DB01_MYSQL_TLS_CA_FILE}" ] ; then export DB01_MYSQL_TLS_CA_FILE="${MYSQL_TLS_CA_FILE}" ; unset MYSQL_TLS_CA_FILE ; fi
if [ -n "${MYSQL_TLS_CERT_FILE}" ] && [ -z "${DB01_MYSQL_TLS_CERT_FILE}" ] ; then export DB01_MYSQL_TLS_CERT_FILE="${MYSQL_TLS_CERT_FILE}" ; unset MYSQL_TLS_CERT_FILE ; fi
if [ -n "${MYSQL_TLS_KEY_FILE}" ] && [ -z "${DB01_MYSQL_TLS_KEY_FILE}" ] ; then export DB01_MYSQL_TLS_KEY_FILE="${MYSQL_TLS_KEY_FILE}" ; unset MYSQL_TLS_KEY_FILE ; fi
transform_file_var \
DEFAULT_AUTH \
DEFAULT_HOST \
DEFAULT_NAME \
DEFAULT_PASS \
DEFAULT_PORT \
DEFAULT_TYPE \
DEFAULT_USER \
DEFAULT_MONGO_CUSTOM_URI \
DEFAULT_MYSQL_TLS_CA_FILE \
DEFAULT_MYSQL_TLS_CERT_FILE \
DEFAULT_MYSQL_TLS_KEY_FILE
set -o posix ; set | grep -E "^DEFAULT_" > "${restore_vars}"
restore_instances=$(printenv | sort | grep -c '^DB.*_HOST')
for (( restore_instance_number = 01; restore_instance_number <= restore_instances; restore_instance_number++ )) ; do
restore_instance_number=$(printf "%02d" $restore_instance_number)
transform_file_var \
DB"${restore_instance_number}"_AUTH \
DB"${restore_instance_number}"_HOST \
DB"${restore_instance_number}"_NAME \
DB"${restore_instance_number}"_PASS \
DB"${restore_instance_number}"_PORT \
DB"${restore_instance_number}"_TYPE \
DB"${restore_instance_number}"_USER \
DB"${restore_instance_number}"_MONGO_CUSTOM_URI \
DB"${restore_instance_number}"_MYSQL_TLS_CA_FILE \
DB"${restore_instance_number}"_MYSQL_TLS_CERT_FILE \
DB"${restore_instance_number}"_MYSQL_TLS_KEY_FILE
set -o posix ; set | grep -E "^DB${restore_instance_number}_" >> "${restore_vars}"
done
if [ -n "${DB_DUMP_TARGET}" ]; then
print_warn "Deprecated and unsupported variable 'DB_DUMP_TARGET' detected - Please upgrade your variables as they will be removed in version 4.3.0"
sed -i "/DEFAULT_FILESYSTEM_PATH=/d" "${restore_vars}"
echo "DEFAULT_FILESYSTEM_PATH=${DB_DUMP_TARGET}" >> "${restore_vars}"
fi
if [ -n "${DB_DUMP_TARGET_ARCHIVE}" ]; then
print_warn "Deprecated and unsupported variable 'DB_DUMP_TARGET_ACRHIVE' detected - Please upgrade your variables as they will be removed in version 4.3.0"
sed -i "/DEFAULT_FILESYSTEM_ARCHIVE_PATH=/d" "${restore_vars}"
echo "DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DB_DUMP_TARGET_ARCHIVE}" >> "${restore_vars}"
fi
echo "RESTORE_VARS is ${restore_vars}"
} }
parse_variables() { parse_variables() {
@@ -260,7 +337,7 @@ bootstrap_variables() {
## Check is Variable is Defined ## Check is Variable is Defined
## Usage: check_var transformed_varname real_varname "Description" ## Usage: check_var transformed_varname real_varname "Description"
output_off output_off
print_debug "Looking for existence of $2 environment variable" print_debug "[parse_variables] Looking for existence of $2 environment variable"
if [ ! -v "$1" ]; then if [ ! -v "$1" ]; then
print_error "No '$3' Entered! - Set '\$$2' environment variable - Halting Backup Number ${v_instance}" print_error "No '$3' Entered! - Set '\$$2' environment variable - Halting Backup Number ${v_instance}"
s6-svc -d /var/run/s6/legacy-services/dbbackup-"${v_instance}" s6-svc -d /var/run/s6/legacy-services/dbbackup-"${v_instance}"
@@ -368,9 +445,24 @@ bootstrap_variables() {
if var_true "${backup_job_resource_optimized}" ; then play_fair="nice -19 ionice -c2" ; fi if var_true "${backup_job_resource_optimized}" ; then play_fair="nice -19 ionice -c2" ; fi
} }
upgrade_lonely_variables() {
upgrade_lonely_variables_tmp=$(mktemp)
set -o posix; set | grep "^$1" | grep -v "^$1[0-9]._" > "${upgrade_lonely_variables_tmp}"
while read -r exist_var ; do
if [ ! -v "${1}"01_"$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2)" ] ; then
export "${1}"01_"$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2-9)"="$(echo "${exist_var}" | cut -d = -f2)"
else
print_error "Variable ${1}01_$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2) already exists, fix your variables and start again.."
exit 1
fi
done < "${upgrade_lonely_variables_tmp}"
rm -rf "${upgrade_lonely_variables_tmp}"
}
case "${1}" in case "${1}" in
backup_init ) backup_init "$2" ;; backup_init ) backup_init "$2" ;;
parse_variables) parse_variables "$2" ;; parse_variables) parse_variables "$2" ;;
restore_init) restore_init ;;
upgrade ) upgrade_lonely_variables "$2" ;; upgrade ) upgrade_lonely_variables "$2" ;;
esac esac
@@ -381,12 +473,7 @@ backup_couch() {
prepare_dbbackup prepare_dbbackup
backup_job_filename=couch_${backup_job_db_name}_${backup_job_db_host#*//}_${now}.txt backup_job_filename=couch_${backup_job_db_name}_${backup_job_db_host#*//}_${now}.txt
backup_job_filename_base=couch_${backup_job_db_name}_${backup_job_db_host#*//} backup_job_filename_base=couch_${backup_job_db_name}_${backup_job_db_host#*//}
compression compressionzyclonite
pre_dbbackup ${backup_job_db_name}
write_log notice "Dumping CouchDB database: '${backup_job_db_name}' ${compression_string}"
if var_true "${DEBUG_BACKUP_COUCH}" ; then debug on; fi
run_as_user curl -sSL -X GET ${backup_job_db_host}:${backup_job_db_port}/${backup_job_db_name}/_all_docs?include_docs=true | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
exit_code=$?
if var_true "${DEBUG_BACKUP_COUCH}" ; then debug off; fi if var_true "${DEBUG_BACKUP_COUCH}" ; then debug off; fi
check_exit_code backup "${backup_job_filename}" check_exit_code backup "${backup_job_filename}"
timer backup finish timer backup finish
@@ -398,20 +485,25 @@ backup_couch() {
} }
backup_influx() { backup_influx() {
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if [ "${backup_job_db_name,,}" = "all" ] ; then if [ "${backup_job_db_name,,}" = "all" ] ; then
write_log debug "Preparing to back up everything" write_log debug "[backup_influx] Preparing to back up everything"
db_names=justbackupeverything db_names=justbackupeverything
else else
db_names=$(echo "${backup_job_db_name}" | tr ',' '\n') db_names=$(echo "${backup_job_db_name}" | tr ',' '\n')
fi fi
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
case "${backup_job_db_influx_version,,}" in case "${backup_job_influx_version,,}" in
1 ) 1 )
print_debug "[backup_influx] Influx DB Version 1 selected"
for db in ${db_names}; do for db in ${db_names}; do
prepare_dbbackup prepare_dbbackup
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now} backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now}
backup_job_filename_base=influx_${db}_${backup_job_db_host#*//} backup_job_filename_base=influx_${db}_${backup_job_db_host#*//}
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
compression compression
pre_dbbackup "${db}" pre_dbbackup "${db}"
write_log notice "Dumping Influx database: '${db}'" write_log notice "Dumping Influx database: '${db}'"
@@ -433,9 +525,12 @@ backup_influx() {
done done
;; ;;
2 ) 2 )
print_debug "[backup_influx] Influx DB Version 2 selected"
for db in ${db_names}; do for db in ${db_names}; do
prepare_dbbackup prepare_dbbackup
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now} backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//} backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
compression compression
@@ -445,10 +540,10 @@ backup_influx() {
run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --compression none "${TEMP_PATH}"/"${backup_job_filename_dir}" run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --compression none "${TEMP_PATH}"/"${backup_job_filename_dir}"
exit_code=$? exit_code=$?
check_exit_code backup "${backup_job_filename_dir}" check_exit_code backup "${backup_job_filename_dir}"
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
create_archive create_archive
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}.tar${extension} backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}.tar${extension}
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//} backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
timer backup finish timer backup finish
file_encryption file_encryption
generate_checksum generate_checksum
@@ -462,6 +557,7 @@ backup_influx() {
backup_mongo() { backup_mongo() {
prepare_dbbackup prepare_dbbackup
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi
if [ "${backup_job_compression,,}" = "none" ] ; then if [ "${backup_job_compression,,}" = "none" ] ; then
backup_job_filename=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.archive backup_job_filename=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.archive
backup_job_filename_base=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,} backup_job_filename_base=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}
@@ -476,6 +572,7 @@ backup_mongo() {
else else
mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}" mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}"
fi fi
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
pre_dbbackup "${backup_job_db_name}" pre_dbbackup "${backup_job_db_name}"
write_log notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}" write_log notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi
@@ -507,8 +604,10 @@ backup_mssql() {
backup_job_filename_original=${backup_job_filename} backup_job_filename_original=${backup_job_filename}
compression compression
pre_dbbackup all pre_dbbackup all
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}" run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
check_exit_code backup "${backup_job_filename}" check_exit_code backup "${backup_job_filename}"
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
timer backup finish timer backup finish
file_encryption file_encryption
generate_checksum generate_checksum
@@ -542,7 +641,7 @@ backup_mssql() {
} }
backup_mysql() { backup_mysql() {
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
if var_true "${backup_job_mysql_events}" ; then if var_true "${backup_job_mysql_events}" ; then
events="--events" events="--events"
fi fi
@@ -566,7 +665,7 @@ backup_mysql() {
else else
db_names=$(echo "${backup_job_db_name}" | tr ',' '\n') db_names=$(echo "${backup_job_db_name}" | tr ',' '\n')
fi fi
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
write_log debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')" write_log debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
if var_true "${backup_job_split_db}" ; then if var_true "${backup_job_split_db}" ; then
@@ -631,6 +730,7 @@ backup_pgsql() {
post_dbbackup "globals" post_dbbackup "globals"
} }
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
export PGPASSWORD=${backup_job_db_pass} export PGPASSWORD=${backup_job_db_pass}
if [ -n "${backup_job_db_auth}" ] ; then if [ -n "${backup_job_db_auth}" ] ; then
authdb=${backup_job_db_auth} authdb=${backup_job_db_auth}
@@ -639,7 +739,7 @@ backup_pgsql() {
fi fi
if [ "${backup_job_db_name,,}" = "all" ] ; then if [ "${backup_job_db_name,,}" = "all" ] ; then
write_log debug "Preparing to back up all databases" write_log debug "Preparing to back up all databases"
db_names=$(run_as_user psql -h ${backup_job_db_host} -U ${backup_job_db_user} -p ${backup_job_db_port} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ) db_names=$(psql -h ${backup_job_db_host} -U ${backup_job_db_user} -p ${backup_job_db_port} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
if [ -n "${backup_job_db_name_exclude}" ] ; then if [ -n "${backup_job_db_name_exclude}" ] ; then
db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n') db_names_exclusions=$(echo "${backup_job_db_name_exclude}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do for db_exclude in ${db_names_exclusions} ; do
@@ -654,7 +754,7 @@ backup_pgsql() {
fi fi
if var_false "${_postgres_backup_globals}" && var_true "${backup_job_backup_pgsql_globals}" ; then _postgres_backup_globals=true; fi if var_false "${_postgres_backup_globals}" && var_true "${backup_job_backup_pgsql_globals}" ; then _postgres_backup_globals=true; fi
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
write_log debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')" write_log debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
if var_true "${backup_job_split_db}" ; then if var_true "${backup_job_split_db}" ; then
@@ -686,7 +786,8 @@ backup_pgsql() {
compression compression
pre_dbbackup all pre_dbbackup all
write_log notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}" write_log notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
tmp_db_names=$(run_as_user psql -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ) if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
tmp_db_names=$(psql -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
for r_db_name in $(echo $db_names | xargs); do for r_db_name in $(echo $db_names | xargs); do
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" ) tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
done done
@@ -694,7 +795,6 @@ backup_pgsql() {
for x_db_name in ${tmp_db_names} ; do for x_db_name in ${tmp_db_names} ; do
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name}) pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
done done
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
exit_code=$? exit_code=$?
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
@@ -731,11 +831,13 @@ backup_redis() {
sleep 5 sleep 5
done done
backup_job_filename_original=${backup_job_filename} backup_job_filename_original=${backup_job_filename}
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug off; fi
compression compression
pre_dbbackup all pre_dbbackup all
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
timer backup finish
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug off; fi
timer backup finish
check_exit_code backup "${backup_job_filename}" check_exit_code backup "${backup_job_filename}"
file_encryption file_encryption
generate_checksum generate_checksum
@@ -750,16 +852,21 @@ backup_sqlite3() {
db="${db%.*}" db="${db%.*}"
backup_job_filename=sqlite3_${db}_${now}.sqlite3 backup_job_filename=sqlite3_${db}_${now}.sqlite3
backup_job_filename_base=sqlite3_${db}.sqlite3 backup_job_filename_base=sqlite3_${db}.sqlite3
compression
pre_dbbackup "${db}" pre_dbbackup "${db}"
write_log notice "Dumping sqlite3 database: '${backup_job_db_host}' ${compression_string}" write_log notice "Dumping sqlite3 database: '${backup_job_db_host}' ${compression_string}"
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug on; fi if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug on; fi
silent run_as_user ${play_fair} sqlite3 "${backup_job_db_host}" ".backup '${TEMP_PATH}/backup.sqlite3'" silent ${play_fair} sqlite3 "${backup_job_db_host}" ".backup '${TEMP_PATH}/backup_${now}.sqlite3'"
exit_code=$? exit_code=$?
check_exit_code backup "${backup_job_filename}" check_exit_code backup "${backup_job_filename}"
run_as_user ${play_fair} cat "${TEMP_PATH}"/backup.sqlite3 | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}/${backup_job_filename}" > /dev/null if [ ! -f "${TEMP_PATH}"/backup_${now}.sqlite3 ] ; then
timer backup finish print_error "SQLite3 backup failed! Exitting"
return 1
fi
compression
run_as_user ${play_fair} cat "${TEMP_PATH}"/backup_${now}.sqlite3 | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}/${backup_job_filename}" > /dev/null
rm -rf "${TEMP_PATH}"/backup_${now}.sqlite3
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug off; fi if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug off; fi
timer backup finish
file_encryption file_encryption
generate_checksum generate_checksum
move_dbbackup move_dbbackup
@@ -966,6 +1073,7 @@ compression() {
case "${backup_job_compression,,}" in case "${backup_job_compression,,}" in
bz* ) bz* )
print_debug "[compression] Selected BZIP"
compress_cmd="${play_fair} pbzip2 -q -${backup_job_compression_level} -p${backup_job_parallel_compression_threads} " compress_cmd="${play_fair} pbzip2 -q -${backup_job_compression_level} -p${backup_job_parallel_compression_threads} "
compression_type="bzip2" compression_type="bzip2"
dir_compress_cmd=${compress_cmd} dir_compress_cmd=${compress_cmd}
@@ -974,6 +1082,7 @@ compression() {
backup_job_filename=${backup_job_filename}.bz2 backup_job_filename=${backup_job_filename}.bz2
;; ;;
gz* ) gz* )
print_debug "[compression] Selected GZIP"
compress_cmd="${play_fair} pigz -q -${backup_job_compression_level} -p ${backup_job_parallel_compression_threads} ${gz_rsyncable}" compress_cmd="${play_fair} pigz -q -${backup_job_compression_level} -p ${backup_job_parallel_compression_threads} ${gz_rsyncable}"
compression_type="gzip" compression_type="gzip"
extension=".gz" extension=".gz"
@@ -982,6 +1091,7 @@ compression() {
backup_job_filename=${backup_job_filename}.gz backup_job_filename=${backup_job_filename}.gz
;; ;;
xz* ) xz* )
print_debug "[compression] Selected XZIP"
compress_cmd="${play_fair} pixz -${backup_job_compression_level} -p ${backup_job_parallel_compression_threads} " compress_cmd="${play_fair} pixz -${backup_job_compression_level} -p ${backup_job_parallel_compression_threads} "
compression_type="xzip" compression_type="xzip"
dir_compress_cmd=${compress_cmd} dir_compress_cmd=${compress_cmd}
@@ -990,6 +1100,7 @@ compression() {
backup_job_filename=${backup_job_filename}.xz backup_job_filename=${backup_job_filename}.xz
;; ;;
zst* ) zst* )
print_debug "[compression] Selected ZSTD"
compress_cmd="${play_fair} zstd -q -q --rm -${backup_job_compression_level} -T${backup_job_parallel_compression_threads} ${gz_rsyncable}" compress_cmd="${play_fair} zstd -q -q --rm -${backup_job_compression_level} -T${backup_job_parallel_compression_threads} ${gz_rsyncable}"
compression_type="zstd" compression_type="zstd"
dir_compress_cmd=${compress_cmd} dir_compress_cmd=${compress_cmd}
@@ -1035,12 +1146,14 @@ create_archive() {
create_schedulers() { create_schedulers() {
if var_true "${DEBUG_CREATE_SCHEDULERS}" ; then debug on; fi if var_true "${DEBUG_CREATE_SCHEDULERS}" ; then debug on; fi
backup() { backup() {
bootstrap_variables upgrade BACKUP local backup_instances=$(set -o posix ; set | grep -Pc "^(DB[0-9]._HOST=|.*MONGO_CUSTOM_URI=)")
local backup_instances=$(printenv | sort | grep -c "^DB[0-9]._HOST") print_debug "[create_schedulers] Found '${backup_instances}' DB_HOST instances"
if [ -n "${DB_HOST}" ] && [ "${backup_instances}" ]; then if [ -n "${DB_HOST}" ] && [ "${backup_instances}" ]; then
backup_instances=1; backup_instances=1;
print_debug "Detected using old DB_ variables" print_debug "[create_schedulers] Detected using old DB_ variables"
fi fi
for (( instance = 01; instance <= backup_instances; )) ; do for (( instance = 01; instance <= backup_instances; )) ; do
@@ -1081,7 +1194,7 @@ EOF
} }
ctrl_c() { ctrl_c() {
sed -i "/^{{BACKUP_NUMBER}}/d" /tmp/.container/db-backup-backups sed -i "/^${backup_instance_number}/d" /tmp/.container/db-backup-backups
symlink_log symlink_log
print_warn "User aborted" print_warn "User aborted"
exit exit
@@ -1096,7 +1209,11 @@ db_backup_container_init() {
debug() { debug() {
case "${1}" in case "${1}" in
off) off)
backup_job_log_level=$_original_job_log_level}
CONTAINER_LOG_LEVEL=${_original_container_log_level}
DEBUG_MODE=${_original_debug_mode} DEBUG_MODE=${_original_debug_mode}
SHOW_OUTPUT=${_original_show_output}
if var_true "${DEBUG_MODE}" ; then if var_true "${DEBUG_MODE}" ; then
set -x set -x
else else
@@ -1104,9 +1221,25 @@ debug() {
fi fi
;; ;;
on) on)
if [ -z "${_original_container_log_level}" ]; then
_original_container_log_level="${CONTAINER_LOG_LEVEL}"
fi
if [ -z "${_original_job_log_level}" ]; then
_original_job_log_level="${backup_job_log_level}"
fi
if [ -z "${_original_debug_mode}" ]; then if [ -z "${_original_debug_mode}" ]; then
_original_debug_mode="${DEBUG_MODE}" _original_debug_mode="${DEBUG_MODE}"
fi fi
if [ -z "${_original_show_output}" ]; then
_original_show_output="${SHOW_OUTPUT}"
if ! [[ "${_original_show_output,,}" =~ true|false ]]; then
__original_show_output="FALSE"
fi
fi
backup_job_log_level=DEBUG
CONTAINER_LOG_LEVEL=DEBUG
SHOW_OUTPUT=TRUE
set -x set -x
;; ;;
esac esac
@@ -1116,27 +1249,33 @@ file_encryption() {
if var_true "${DEBUG_FILE_ENCRYPTION}" ; then debug on; fi if var_true "${DEBUG_FILE_ENCRYPTION}" ; then debug on; fi
if var_true "${backup_job_encrypt}" ; then if var_true "${backup_job_encrypt}" ; then
if [ "${exit_code}" = "0" ] ; then if [ "${exit_code}" = "0" ] ; then
print_debug "Encrypting" print_debug "[file_encryption] Encrypting"
output_off output_off
if [ -n "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_pubkey}" ]; then if [ -n "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_public_key}" ]; then
print_error "Can't encrypt as both ENCRYPT_PASSPHRASE and ENCRYPT_PUBKEY exist!" print_error "Can't encrypt as both ENCRYPT_PASSPHRASE and ENCRYPT_PUBKEY exist!"
return return
elif [ -n "${backup_job_encrypt_passphrase}" ] && [ -z "${backup_job_encrypt_pubkey}" ]; then elif [ -n "${backup_job_encrypt_passphrase}" ] && [ -z "${backup_job_encrypt_public_key}" ]; then
print_notice "Encrypting with GPG Passphrase" print_notice "Encrypting with GPG Passphrase"
encrypt_routines_start_time=$(date +'%s') encrypt_routines_start_time=$(date +'%s')
encrypt_tmp_dir=$(run_as_user mktemp -d) encrypt_tmp_dir=$(run_as_user mktemp -d)
echo "${backup_job_encrypt_passphrase}" | silent run_as_user ${play_fair} gpg --batch --home ${encrypt_tmp_dir} --yes --passphrase-fd 0 -c "${TEMP_PATH}"/"${backup_job_filename}" echo "${backup_job_encrypt_passphrase}" | silent run_as_user ${play_fair} gpg --batch --home ${encrypt_tmp_dir} --yes --passphrase-fd 0 -c "${TEMP_PATH}"/"${backup_job_filename}"
rm -rf "${encrypt_tmp_dir}" rm -rf "${encrypt_tmp_dir}"
elif [ -z "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_pubkey}" ]; then elif [ -z "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_public_key}" ] && [ -n "${backup_job_encrypt_private_key}" ]; then
if [ -f "${backup_job_encrypt_pubkey}" ]; then if [ -f "${backup_job_encrypt_private_key}" ]; then
encrypt_routines_start_time=$(date +'%s') encrypt_routines_start_time=$(date +'%s')
print_notice "Encrypting with GPG Public Key" print_notice "Encrypting with GPG Private Key"
encrypt_tmp_dir=$(run_as_user mktemp -d) encrypt_tmp_dir=$(run_as_user mktemp -d)
silent run_as_user ${play_fair} gpg --batch --yes --home "${encrypt_tmp_dir}" --recipient-file "${backup_job_encrypt_pubkey}" -c "${TEMP_PATH}"/"${backup_job_filename}" cat "${backup_job_encrypt_private_key}" | run_as_user tee "${encrypt_tmp_dir}"/private_key.asc > /dev/null
print_debug "[file_encryption] [key] Importing Private Key"
silent run_as_user gpg --home ${encrypt_tmp_dir} --batch --import "${encrypt_tmp_dir}"/private_key.asc
print_debug "[file_encryption] [key] Encrypting to Public Key"
cat "${backup_job_encrypt_public_key}" | run_as_user tee "${encrypt_tmp_dir}"/public_key.asc > /dev/null
silent run_as_user ${play_fair} gpg --batch --yes --home "${encrypt_tmp_dir}" --encrypt --recipient-file "${encrypt_tmp_dir}"/public_key.asc "${TEMP_PATH}"/"${backup_job_filename}"
rm -rf "${encrypt_tmp_dir}" rm -rf "${encrypt_tmp_dir}"
fi fi
fi fi
if [ -f "${TEMP_PATH}"/"${backup_job_filename}".gpg ]; then if [ -f "${TEMP_PATH}"/"${backup_job_filename}".gpg ]; then
print_debug "[file_encryption] Deleting original file"
rm -rf "${TEMP_PATH:?}"/"${backup_job_filename:?}" rm -rf "${TEMP_PATH:?}"/"${backup_job_filename:?}"
backup_job_filename="${backup_job_filename}.gpg" backup_job_filename="${backup_job_filename}.gpg"
@@ -1146,6 +1285,9 @@ file_encryption() {
- dbbackup.backup.encrypt.duration.[${backup_job_db_host}.${backup_job_db_name}] ${encrypt_routines_total_time} - dbbackup.backup.encrypt.duration.[${backup_job_db_host}.${backup_job_db_name}] ${encrypt_routines_total_time}
EOF EOF
) )
else
print_error "Encryption failed! Could not detect encrypted file"
return 99
fi fi
else else
write_log error "Skipping encryption because backup did not complete successfully" write_log error "Skipping encryption because backup did not complete successfully"
@@ -1221,13 +1363,15 @@ notify() {
if [ -z "${SMTP_HOST}" ] ; then write_log error "[notifications] No SMTP_HOST variable set - Skipping sending Email notifications" ; skip_mail=true ; fi if [ -z "${SMTP_HOST}" ] ; then write_log error "[notifications] No SMTP_HOST variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
if [ -z "${SMTP_PORT}" ] ; then write_log error "[notifications] No SMTP_PORT variable set - Skipping sending Email notifications" ; skip_mail=true ; fi if [ -z "${SMTP_PORT}" ] ; then write_log error "[notifications] No SMTP_PORT variable set - Skipping sending Email notifications" ; skip_mail=true ; fi
if var_nottrue "${skip_mail}" ; then if var_nottrue "${skip_mail}" ; then
if ! grep -q ^from /etc/msmptrc ; then
echo "from ${MAIL_FROM}" >> /etc/msmtprc
fi
mail_recipients=$(echo "${MAIL_TO}" | tr "," "\n") mail_recipients=$(echo "${MAIL_TO}" | tr "," "\n")
for mail_recipient in $mail_recipients ; do for mail_recipient in $mail_recipients ; do
cat <<EOF | s-nail -v \ cat <<EOF | msmtp -t "${mail_recipient}" -c /etc/msmtprc
-r "${MAIL_FROM}" \ To: ${mail_recipient}
-s "[db-backup] [${DOMAIN}] ${3}" \ Subject: [db-backup] [${DOMAIN}] ${3}
-S smtp="${SMTP_HOST}":"${SMTP_PORT}" \
"${mail_recipient}"
Time: ${1} Time: ${1}
Log File: {2} Log File: {2}
Error Code: ${3} Error Code: ${3}
@@ -1312,23 +1456,23 @@ EOF
for notification_type in $notification_types ; do for notification_type in $notification_types ; do
case "${notification_type,,}" in case "${notification_type,,}" in
"custom" ) "custom" )
print_debug "Sending Notification via custom" print_debug "[notify] Sending Notification via custom"
notification_custom "${1}" "${2}" "${3}" "${4}" "${5}" notification_custom "${1}" "${2}" "${3}" "${4}" "${5}"
;; ;;
"email" | "mail" ) "email" | "mail" )
print_debug "Sending Notification via email" print_debug "[notify] Sending Notification via email"
notification_email "${1}" "${2}" "${3}" "${4}" "${5}" notification_email "${1}" "${2}" "${3}" "${4}" "${5}"
;; ;;
"matrix" ) "matrix" )
print_debug "Sending Notification via Matrix" print_debug "[notify] Sending Notification via Matrix"
notification_matrix "${1}" "${2}" "${3}" "${4}" "${5}" notification_matrix "${1}" "${2}" "${3}" "${4}" "${5}"
;; ;;
"mattermost" ) "mattermost" )
print_debug "Sending Notification via Mattermost" print_debug "[notify] Sending Notification via Mattermost"
notification_mattermost "${1}" "${2}" "${3}" "${4}" "${5}" notification_mattermost "${1}" "${2}" "${3}" "${4}" "${5}"
;; ;;
"rocketchat" ) "rocketchat" )
print_debug "Sending Notification via Rocketchat" print_debug "[notify] Sending Notification via Rocketchat"
notification_rocketchat "${1}" "${2}" "${3}" "${4}" "${5}" notification_rocketchat "${1}" "${2}" "${3}" "${4}" "${5}"
;; ;;
* ) * )
@@ -1371,8 +1515,37 @@ move_dbbackup() {
write_log debug "Moving backup to filesystem" write_log debug "Moving backup to filesystem"
run_as_user mkdir -p "${backup_job_filesystem_path}" run_as_user mkdir -p "${backup_job_filesystem_path}"
if [ "${backup_job_checksum,,}" != "none" ] ; then run_as_user mv "${TEMP_PATH}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/ ; fi if [ "${backup_job_checksum,,}" != "none" ] ; then run_as_user mv "${TEMP_PATH}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/ ; fi
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
cat <<EOF
## BEGIN Before Moving file from TEMP_PATH $(TZ=${TIMEZONE} date)
##
$(ls -l "${TEMP_PATH}"/*)
## END
EOF
fi
run_as_user mv "${TEMP_PATH}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}" run_as_user mv "${TEMP_PATH}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
move_exit_code=$? move_exit_code=$?
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
cat <<EOF
## BEGIN After Moving file from TEMP_PATH $(TZ=${TIMEZONE} date)
##
$(ls -l "${TEMP_PATH}"/*)
## END
## BEGIN After Moving file to _FILESYSTEM_PATH $(TZ=${TIMEZONE} date)
##
$(ls -l "${backup_job_filesystem_path}"/*)
## END
EOF
fi
if var_true "${backup_job_create_latest_symlink}" ; then if var_true "${backup_job_create_latest_symlink}" ; then
run_as_user ln -sfr "${backup_job_filesystem_path}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/latest-"${backup_job_filename_base}" run_as_user ln -sfr "${backup_job_filesystem_path}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/latest-"${backup_job_filename_base}"
fi fi
@@ -1399,7 +1572,7 @@ move_dbbackup() {
s3_ssl="--no-verify-ssl" s3_ssl="--no-verify-ssl"
fi fi
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${backup_job_s3_protocol}://${backup_job_s3_host}" [[ ( -n "${backup_job_s3_host}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${backup_job_s3_protocol}://${backup_job_s3_host}"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_PATH}/${backup_job_filename} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/${backup_job_filename} ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts} silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_PATH}/${backup_job_filename} s3://${backup_job_s3_bucket}/${backup_job_s3_path}/${backup_job_filename} ${s3_ssl} ${s3_ca_cert} ${backup_job_s3_extra_opts}
move_exit_code=$? move_exit_code=$?
@@ -1460,20 +1633,20 @@ pre_dbbackup() {
### Pre Backup Custom Script Support ### Pre Backup Custom Script Support
if [ -d "/assets/custom-scripts/pre" ] && dir_notempty "/assets/custom-scripts/pre" ; then if [ -d "/assets/custom-scripts/pre" ] && dir_notempty "/assets/custom-scripts/pre" ; then
write_log warning "Found Custom Post Scripts in /assets/custom-scripts/pre - Automatically moving them to '${backup_job_script_location_pre}'" write_log warn "Found Custom Post Scripts in /assets/custom-scripts/pre - Automatically moving them to '${backup_job_script_location_pre}'"
run_as_user mkdir -p "${backup_job_script_location_pre}" mkdir -p "${backup_job_script_location_pre}"
silent run_as_user cp /assets/custom-scripts/pre/* "${backup_job_script_location_pre}" silent cp -aR /assets/custom-scripts/pre/* "${backup_job_script_location_pre}"
fi fi
if [ -d "${backup_job_script_location_pre}" ] && dir_notempty "${backup_job_script_location_pre}" ; then if [ -d "${backup_job_script_location_pre}" ] && dir_notempty "${backup_job_script_location_pre}" ; then
for f in $(find ${backup_job_script_location_pre} -name \*.sh -type f); do for f in $(find ${backup_job_script_location_pre} -name \*.sh -type f); do
if var_true "${backup_job_pre_script_x_verify}" ; then if var_true "${backup_job_pre_script_x_verify}" ; then
run_as_user ${f} "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${backup_job_filename}" ${f} "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${backup_job_filename}"
else else
if [ -x "${f}" ] ; then if [ -x "${f}" ] ; then
write_log notice "Executing pre backup custom script : '${f}'" write_log notice "Executing pre backup custom script : '${f}'"
## script DB_TYPE DB_HOST DB_NAME STARTEPOCH BACKUP_FILENAME ## script DB_TYPE DB_HOST DB_NAME STARTEPOCH BACKUP_FILENAME
run_as_user ${f} "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${backup_job_filename}" ${f} "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${backup_job_filename}"
else else
write_log error "Can't run pre backup custom script: '${f}' as its filesystem bit is not executible!" write_log error "Can't run pre backup custom script: '${f}' as its filesystem bit is not executible!"
fi fi
@@ -1516,11 +1689,11 @@ EOZP
### Post Script Support ### Post Script Support
if [ -n "${backup_job_post_script}" ] ; then if [ -n "${backup_job_post_script}" ] ; then
if var_true "${backup_job_post_script_x_verify}" ; then if var_true "${backup_job_post_script_x_verify}" ; then
run_as_user eval "${backup_job_post_script}" "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}" eval "${backup_job_post_script}" "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else else
if [ -x "${backup_job_post_script}" ] ; then if [ -x "${backup_job_post_script}" ] ; then
write_log notice "Found POST_SCRIPT environment variable. Executing '${backup_job_post_script}" write_log notice "Found POST_SCRIPT environment variable. Executing '${backup_job_post_script}"
run_as_user eval "${backup_job_post_script}" "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}" eval "${backup_job_post_script}" "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else else
write_log error "Can't execute POST_SCRIPT environment variable '${backup_job_post_script}' as its filesystem bit is not executible!" write_log error "Can't execute POST_SCRIPT environment variable '${backup_job_post_script}' as its filesystem bit is not executible!"
fi fi
@@ -1529,20 +1702,20 @@ EOZP
### Post Backup Custom Script Support ### Post Backup Custom Script Support
if [ -d "/assets/custom-scripts/" ] && dir_notempty "/assets/custom-scripts" ; then if [ -d "/assets/custom-scripts/" ] && dir_notempty "/assets/custom-scripts" ; then
write_log warning "Found Custom Post Scripts in /assets/custom-scripts/ - Automatically moving them to '${backup_job_script_location_post}'" write_log warn "Found Custom Post Scripts in /assets/custom-scripts/ - Automatically moving them to '${backup_job_script_location_post}'"
run_as_user mkdir -p "${backup_job_script_location_post}" mkdir -p "${backup_job_script_location_post}"
silent run_as_user cp /assets/custom-scripts/* "${backup_job_script_location_post}" cp -aR /assets/custom-scripts/* "${backup_job_script_location_post}"
fi fi
if [ -d "${backup_job_script_location_post}" ] && dir_notempty "${backup_job_script_location_post}" ; then if [ -d "${backup_job_script_location_post}" ] && dir_notempty "${backup_job_script_location_post}" ; then
for f in $(run_as_user find "${backup_job_script_location_post}" -name \*.sh -type f); do for f in $(run_as_user find "${backup_job_script_location_post}" -name \*.sh -type f); do
if var_true "${backup_job_post_script_x_verify}" ; then if var_true "${backup_job_post_script_x_verify}" ; then
run_as_user ${f} "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}" ${f} "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else else
if [ -x "${f}" ] ; then if [ -x "${f}" ] ; then
write_log notice "Executing post backup custom script : '${f}'" write_log notice "Executing post backup custom script : '${f}'"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE ## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
run_as_user ${f} "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}" ${f} "${exit_code}" "${dbtype}" "${backup_job_db_host}" "${1}" "${dbbackup_start_time}" "${dbbackup_finish_time}" "${dbbackup_total_time}" "${backup_job_filename}" "${filesize}" "${checksum_value}" "${move_exit_code}"
else else
write_log error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!" write_log error "Can't run post backup custom script: '${f}' as its filesystem bit is not executible!"
fi fi
@@ -1578,7 +1751,7 @@ process_limiter() {
} }
run_as_user() { run_as_user() {
s6-setuidgid dbbackup $@ sudo -Eu "${DBBACKUP_USER}" "$@"
} }
setup_mode() { setup_mode() {
@@ -1811,18 +1984,18 @@ timer() {
;; ;;
datetime) datetime)
time_begin=$(date -d "${backup_job_backup_begin}" +%s) time_begin=$(date -d "${backup_job_backup_begin}" +%s)
print_debug "BACKUP_BEGIN time = ${time_begin}" print_debug "[timer] [datetime] BACKUP_BEGIN time = ${time_begin}"
time_wait=$(( time_begin - time_current )) time_wait=$(( time_begin - time_current ))
print_debug "Difference in seconds: ${time_wait}" print_debug "[timer] [datetime] Difference in seconds: ${time_wait}"
if (( ${time_wait} < 0 )); then if (( ${time_wait} < 0 )); then
time_wait=$(( (${time_wait} + (${backup_job_backup_interval} - 1)) / (${backup_job_backup_interval} * 60) )) time_wait=$(( (${time_wait} + (${backup_job_backup_interval} - 1)) / (${backup_job_backup_interval} * 60) ))
time_wait=$(( ${time_wait} * -1 )) time_wait=$(( ${time_wait} * -1 ))
print_debug "Difference in seconds (rounded) time_wait is in the past : ${time_wait}" print_debug "[timer] [datetime] Difference in seconds (rounded) time_wait is in the past : ${time_wait}"
fi fi
time_future=$(( time_current + time_wait )) time_future=$(( time_current + time_wait ))
print_debug "Future execution time = ${time_future}" print_debug "[timer] [datetime] Future execution time = ${time_future}"
;; ;;
job) job)
case "${2}" in case "${2}" in

View File

@@ -29,7 +29,6 @@ bdgy="\e[100m" # Background Color Dark Gray
blr="\e[101m" # Background Color Light Red blr="\e[101m" # Background Color Light Red
boff="\e[49m" # Background Color Off boff="\e[49m" # Background Color Off
bootstrap_variables
if [ -z "${1}" ] ; then if [ -z "${1}" ] ; then
interactive_mode=true interactive_mode=true
@@ -75,10 +74,17 @@ EOF
esac esac
fi fi
control_c() {
if [ -f "${restore_vars}" ] ; then rm -rf "${restore_vars}" ; fi
print_warn "User aborted"
exit
}
get_filename() { get_filename() {
COLUMNS=12 COLUMNS=12
prompt="Please select a file to restore:" prompt="Please select a file to restore:"
options=( $(find "${DB_DUMP_TARGET}" -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | sort -z | xargs -0) ) options=( $(find "${DEFAULT_FILESYSTEM_PATH}" -type f -maxdepth 2 -not -name '*.md5' -not -name '*.sha1' -print0 | sort -z | xargs -0) )
PS3="$prompt " PS3="$prompt "
select opt in "${options[@]}" "Custom" "Quit" ; do select opt in "${options[@]}" "Custom" "Quit" ; do
if (( REPLY == 2 + ${#options[@]} )) ; then if (( REPLY == 2 + ${#options[@]} )) ; then
@@ -104,13 +110,17 @@ get_filename() {
get_dbhost() { get_dbhost() {
p_dbhost=$(basename -- "${r_filename}" | cut -d _ -f 3) p_dbhost=$(basename -- "${r_filename}" | cut -d _ -f 3)
if [ -n "${p_dbhost}" ]; then if [ -n "${p_dbhost}" ]; then
parsed_host=true parsed_host=true
print_debug "Parsed DBHost: ${p_dbhost}" print_debug "Parsed DBHost: ${p_dbhost}"
if grep -q "${p_dbhost}" "${restore_vars}" ; then
detected_host_num=$(grep "${p_dbhost}" "${restore_vars}" | head -n1 | cut -c 3,4)
detected_host_value=$(grep "${p_dbhost}" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
fi
fi fi
if [ -z "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then if [ -z "${detected_host_value}" ] && [ -z "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 1 - No Env, No Parsed Filename" print_debug "Parsed DBHost Variant: 1 - No Env, No Parsed Filename"
q_dbhost_variant=1 q_dbhost_variant=1
q_dbhost_menu=$(cat <<EOF q_dbhost_menu=$(cat <<EOF
@@ -119,18 +129,18 @@ EOF
) )
fi fi
if [ -n "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then if [ -n "${detected_host_value}" ] && [ -z "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 2 - Env, No Parsed Filename" print_debug "Parsed DBHost Variant: 2 - Env, No Parsed Filename"
q_dbhost_variant=2 q_dbhost_variant=2
q_dbhost_menu=$(cat <<EOF q_dbhost_menu=$(cat <<EOF
C ) Custom Entered Hostname C ) Custom Entered Hostname
E ) Environment Variable DB_HOST: '${DB_HOST}' E ) Environment Variable DB${detected_host_num}_HOST: '${detected_host_value}'
EOF EOF
) )
fi fi
if [ -z "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then if [ -z "${detected_host_value}" ] && [ -n "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 3 - No Env, Parsed Filename" print_debug "Parsed DBHost Variant: 3 - No Env, Parsed Filename"
q_dbhost_variant=3 q_dbhost_variant=3
q_dbhost_menu=$(cat <<EOF q_dbhost_menu=$(cat <<EOF
@@ -141,13 +151,13 @@ EOF
) )
fi fi
if [ -n "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then if [ -n "${detected_host_value}" ] && [ -n "${parsed_host}" ]; then
print_debug "Parsed DBHost Variant: 4 - Env, Parsed Filename" print_debug "Parsed DBHost Variant: 4 - Env, Parsed Filename"
q_dbhost_variant=4 q_dbhost_variant=4
q_dbhost_menu=$(cat <<EOF q_dbhost_menu=$(cat <<EOF
C ) Custom Entered Hostname C ) Custom Entered Hostname
E ) Environment Variable DB_HOST: '${DB_HOST}' E ) Environment Variable DB${detected_host_num}_HOST: '${detected_host_value}'
F ) Parsed Filename Host: '${p_dbhost}' F ) Parsed Filename Host: '${p_dbhost}'
EOF EOF
) )
@@ -174,7 +184,7 @@ EOF
;; ;;
2 ) 2 )
while true; do while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E\*${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
case "${q_dbhost_menu,,}" in case "${q_dbhost_menu,,}" in
c* ) c* )
counter=1 counter=1
@@ -188,7 +198,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbhost=${DB_HOST} r_dbhost=${detected_host_value}
break break
;; ;;
q* ) q* )
@@ -200,7 +210,7 @@ EOF
;; ;;
3 ) 3 )
while true; do while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}F\*${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
case "${q_dbhost_menu,,}" in case "${q_dbhost_menu,,}" in
c* ) c* )
counter=1 counter=1
@@ -227,7 +237,7 @@ EOF
4 ) 4 )
while true; do while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E\*${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
case "${q_dbhost_menu,,}" in case "${q_dbhost_menu,,}" in
c* ) c* )
counter=1 counter=1
@@ -241,7 +251,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbhost=${DB_HOST} r_dbhost=${detected_host_value}
break break
;; ;;
f* ) f* )
@@ -258,6 +268,337 @@ EOF
esac esac
} }
get_dbname() {
p_dbname=$(basename -- "${r_filename}" | cut -d _ -f 2)
if [ -n "${p_dbname}" ]; then
parsed_name=true
print_debug "Parsed DBName: ${p_dbname}"
fi
if grep -q "^DB${detected_host_num}_NAME=${p_dbname}" "${restore_vars}" ; then
detected_name_value=$(grep -q "^DB${detected_host_num}_NAME=${p_dbname}" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
fi
if [ -z "${detected_name_value}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 1 - No Env, No Parsed Filename"
q_dbname_variant=1
q_dbname_menu=$(cat <<EOF
EOF
)
fi
if [ -n "${detected_name_value}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 2 - Env, No Parsed Filename"
q_dbname_variant=2
q_dbname_menu=$(cat <<EOF
C ) Custom Entered Database Name
E ) Environment Variable DB${detected_host_num}_NAME: '${detected_name_value}'
EOF
)
fi
if [ -z "${detected_name_value}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 3 - No Env, Parsed Filename"
q_dbname_variant=3
q_dbname_menu=$(cat <<EOF
C ) Custom Entered Database Name
F ) Parsed Filename DB Name: '${p_dbname}'
EOF
)
fi
if [ -n "${detected_name_value}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBname Variant: 4 - Env, Parsed Filename"
q_dbname_variant=4
q_dbname_menu=$(cat <<EOF
C ) Custom Entered Database Name
E ) Environment Variable DB${detected_host_num}_NAME: '${detected_name_value}'
F ) Parsed Filename DB Name: '${p_dbname}'
EOF
)
fi
cat << EOF
What Database Name do you want to restore to?
${q_dbname_menu}
Q ) Quit
EOF
case "${q_dbname_variant}" in
1 )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E\*${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
case "${q_dbname_menu,,}" in
c* )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
break
;;
e* | "" )
r_dbname=${detected_name_value}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
3 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}F\*${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
case "${q_dbname_menu,,}" in
c* )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
break
;;
f* | "" )
r_dbname=${p_dbname}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
4 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E\*${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
case "${q_dbname_menu,,}" in
c* )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
break
;;
e* | "" )
r_dbname=${detected_name_value}
break
;;
f* )
r_dbname=${p_dbname}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
get_dbpass() {
if grep -q "^DB${detected_host_num}_PASS=" "${restore_vars}" ; then
detected_pass_value=$(grep "^DB${detected_host_num}_PASS=" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
fi
if [ -z "${detected_pass_value}" ] ; then
print_debug "Parsed DBPass Variant: 1 - No Env"
q_dbpass_variant=1
q_dbpass_menu=$(cat <<EOF
EOF
)
fi
if [ -n "${detected_pass_value}" ] ; then
print_debug "Parsed DBPass Variant: 2 - Env"
q_dbpass_variant=2
q_dbpass_menu=$(cat <<EOF
C ) Custom Entered Database Password
E ) Environment Variable DB${detected_host_num}_PASS
EOF
)
fi
cat << EOF
What Database Password will be used to restore?
${q_dbpass_menu}
Q ) Quit
EOF
case "${q_dbpass_variant}" in
1 )
counter=1
q_dbpass=" "
while [[ $q_dbpass = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Passwords can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbpass
(( counter+=1 ))
done
r_dbpass=${q_dbpass}
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E\*${cdgy}\) : ${cwh}${coff}) " q_dbpass_menu
case "${q_dbpass_menu,,}" in
c* )
counter=1
q_dbpass=" "
while [[ $q_dbpass = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Passwords can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbpass
(( counter+=1 ))
done
r_dbpass=${q_dbpass}
break
;;
e* | "" )
r_dbpass=${detected_pass_value}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
get_dbport() {
if grep -q "^DB${detected_host_num}_PORT=" "${restore_vars}" ; then
detected_port_value=$(grep "^DB${detected_host_num}_PORT=" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
fi
if [ -z "${detected_port_value}" ] ; then
print_debug "Parsed DBPort Variant: 1 - No Env"
q_dbport_variant=1
q_dbport_menu_opt_default="| (${cwh}D${cdgy}) * "
q_dbport_menu=$(cat <<EOF
C ) Custom Entered Database Port
D ) Default Port for Database type '${r_dbtype}': '${DEFAULT_PORT}'
EOF
)
fi
if [ -n "${detected_port_value}" ] ; then
print_debug "Parsed DBPort Variant: 2 - Env"
q_dbport_variant=2
q_dbport_menu=$(cat <<EOF
C ) Custom Entered Database Port
D ) Default Port for Database type '${r_dbtype}': '${DEFAULT_PORT}'
E ) Environment Variable DB${detected_host_num}_PORT: '${detected_port_value}'
EOF
)
fi
cat << EOF
What Database Port do you wish to use? MySQL/MariaDB typcially listens on port 3306. Postrgresql port 5432. MongoDB 27017
${q_dbport_menu}
Q ) Quit
EOF
case "${q_dbport_variant}" in
1 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}D\*${cdgy}\) : ${cwh}${coff}) " q_dbport_menu
case "${q_dbport_menu,,}" in
c* )
counter=1
q_dbport=" "
q_dbportre='^[0-9]+$'
while ! [[ $q_dbport =~ ${q_dbportre} ]]; do
if [ $counter -gt 1 ] ; then print_error "Must be a port number, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Port do you want to use:\ ${coff})" q_dbport
(( counter+=1 ))
done
r_dbport=${q_dbport}
break
;;
d* | "" )
r_dbport=${DEFAULT_PORT}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}D${cdgy}\) \| \(${cwh}E\*${cdgy}\) : ${cwh}${coff}) " q_dbport_menu
case "${q_dbport_menu,,}" in
c* )
counter=1
q_dbport=" "
q_dbportre='^[0-9]+$'
while ! [[ $q_dbport =~ ${q_dbportre} ]]; do
if [ $counter -gt 1 ] ; then print_error "Must be a port number, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Port do you want to use:\ ${coff})" q_dbport
(( counter+=1 ))
done
r_dbport=${q_dbport}
break
;;
d* )
r_dbport=${DEFAULT_PORT}
break
;;
e* | "" )
r_dbport=${detected_port_value}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
get_dbtype() { get_dbtype() {
p_dbtype=$(basename -- "${r_filename}" | cut -d _ -f 1) p_dbtype=$(basename -- "${r_filename}" | cut -d _ -f 1)
@@ -265,14 +606,17 @@ get_dbtype() {
case "${p_dbtype}" in case "${p_dbtype}" in
mongo* ) mongo* )
parsed_type=true parsed_type=true
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
print_debug "Parsed DBType: MongoDB" print_debug "Parsed DBType: MongoDB"
;; ;;
mariadb | mysql ) mariadb | mysql )
parsed_type=true parsed_type=true
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
print_debug "Parsed DBType: MariaDB/MySQL" print_debug "Parsed DBType: MariaDB/MySQL"
;; ;;
pgsql | postgres* ) pgsql | postgres* )
parsed_type=true parsed_type=true
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
print_debug "Parsed DBType: Postgresql" print_debug "Parsed DBType: Postgresql"
;; ;;
* ) * )
@@ -339,14 +683,17 @@ EOF
case "${q_dbtype,,}" in case "${q_dbtype,,}" in
m* ) m* )
r_dbtype=mysql r_dbtype=mysql
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
break break
;; ;;
o* ) o* )
r_dbtype=mongo r_dbtype=mongo
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
break break
;; ;;
p* ) p* )
r_dbtype=postgresql r_dbtype=postgresql
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
break break
;; ;;
q* ) q* )
@@ -366,14 +713,17 @@ EOF
;; ;;
m* ) m* )
r_dbtype=mysql r_dbtype=mysql
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
break break
;; ;;
o* ) o* )
r_dbtype=mongo r_dbtype=mongo
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
break break
;; ;;
p* ) p* )
r_dbtype=postgresql r_dbtype=postgresql
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
break break
;; ;;
q* ) q* )
@@ -385,22 +735,36 @@ EOF
;; ;;
3 ) 3 )
while true; do while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}F${cdgy}\) \(Default\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in case "${q_dbtype,,}" in
f* | "" ) f* | "" )
r_dbtype=${p_dbtype} r_dbtype=${p_dbtype}
case "${r_dbtype}" in
mongo )
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
;;
mysql )
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
;;
pgsql )
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
;;
esac
break break
;; ;;
m* ) m* )
r_dbtype=mysql r_dbtype=mysql
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
break break
;; ;;
o* ) o* )
r_dbtype=mongo r_dbtype=mongo
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
break break
;; ;;
p* ) p* )
r_dbtype=postgresql r_dbtype=postgresql
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
break break
;; ;;
q* ) q* )
@@ -425,14 +789,17 @@ EOF
;; ;;
m* ) m* )
r_dbtype=mysql r_dbtype=mysql
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
break break
;; ;;
o* ) o* )
r_dbtype=mongo r_dbtype=mongo
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
break break
;; ;;
p* ) p* )
r_dbtype=postgresql r_dbtype=postgresql
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
break break
;; ;;
q* ) q* )
@@ -445,235 +812,12 @@ EOF
esac esac
} }
get_dbname() {
p_dbname=$(basename -- "${r_filename}" | cut -d _ -f 2)
if [ -n "${p_dbname}" ]; then
parsed_name=true
print_debug "Parsed DBName: ${p_dbhost}"
fi
if [ -z "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 1 - No Env, No Parsed Filename"
q_dbname_variant=1
q_dbname_menu=$(cat <<EOF
EOF
)
fi
if [ -n "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 2 - Env, No Parsed Filename"
q_dbname_variant=2
q_dbname_menu=$(cat <<EOF
C ) Custom Entered Database Name
E ) Environment Variable DB_NAME: '${DB_NAME}'
EOF
)
fi
if [ -z "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBName Variant: 3 - No Env, Parsed Filename"
q_dbname_variant=3
q_dbname_menu=$(cat <<EOF
C ) Custom Entered Database Name
F ) Parsed Filename DB Name: '${p_dbname}'
EOF
)
fi
if [ -n "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
print_debug "Parsed DBname Variant: 4 - Env, Parsed Filename"
q_dbname_variant=4
q_dbname_menu=$(cat <<EOF
C ) Custom Entered Database Name
E ) Environment Variable DB_NAME: '${DB_NAME}'
F ) Parsed Filename DB Name: '${p_dbname}'
EOF
)
fi
cat << EOF
What Database Name do you want to restore to?
${q_dbname_menu}
Q ) Quit
EOF
case "${q_dbname_variant}" in
1 )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
case "${q_dbname_menu,,}" in
c* )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
break
;;
e* | "" )
r_dbname=${DB_NAME}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
3 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
case "${q_dbname_menu,,}" in
c* )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
break
;;
f* | "" )
r_dbname=${p_dbname}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
4 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
case "${q_dbname_menu,,}" in
c* )
counter=1
q_dbname=" "
while [[ $q_dbname = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
(( counter+=1 ))
done
r_dbname=${q_dbname}
break
;;
e* | "" )
r_dbname=${DB_NAME}
break
;;
f* )
r_dbname=${p_dbname}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
get_dbport() {
if [ -z "${DB_PORT}" ] ; then
print_debug "Parsed DBPort Variant: 1 - No Env"
q_dbport_variant=1
q_dbport_menu=$(cat <<EOF
EOF
)
fi
if [ -n "${DB_PORT}" ] ; then
print_debug "Parsed DBPort Variant: 2 - Env"
q_dbport_variant=2
q_dbport_menu=$(cat <<EOF
C ) Custom Entered Database Port
E ) Environment Variable DB_PORT: '${DB_PORT}'
EOF
)
fi
cat << EOF
What Database Port do you wish to use? MySQL/MariaDB typcially listens on port 3306. Postrgresql port 5432. MongoDB 27017
${q_dbport_menu}
Q ) Quit
EOF
case "${q_dbport_variant}" in
1 )
counter=1
q_dbport=" "
q_dbportre='^[0-9]+$'
while ! [[ $q_dbport =~ ${q_dbportre} ]]; do
if [ $counter -gt 1 ] ; then print_error "Must be a port number, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Port do you want to use:\ ${coff})" q_dbport
(( counter+=1 ))
done
r_dbport=${q_dbport}
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbport_menu
case "${q_dbport_menu,,}" in
c* )
counter=1
q_dbport=" "
q_dbportre='^[0-9]+$'
while ! [[ $q_dbport =~ ${q_dbportre} ]]; do
if [ $counter -gt 1 ] ; then print_error "Must be a port number, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Port do you want to use:\ ${coff})" q_dbport
(( counter+=1 ))
done
r_dbport=${q_dbport}
break
;;
e* | "" )
r_dbport=${DB_PORT}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
}
get_dbuser() { get_dbuser() {
if [ -z "${DB_USER}" ] ; then if grep -q "^DB${detected_host_num}_USER=" "${restore_vars}" ; then
detected_user_value=$(grep "^DB${detected_host_num}_USER=" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
fi
if [ -z "${detected_user_value}" ] ; then
print_debug "Parsed DBUser Variant: 1 - No Env" print_debug "Parsed DBUser Variant: 1 - No Env"
q_dbuser_variant=1 q_dbuser_variant=1
q_dbuser_menu=$(cat <<EOF q_dbuser_menu=$(cat <<EOF
@@ -682,13 +826,13 @@ EOF
) )
fi fi
if [ -n "${DB_USER}" ] ; then if [ -n "${detected_user_value}" ] ; then
print_debug "Parsed DBUser Variant: 2 - Env" print_debug "Parsed DBUser Variant: 2 - Env"
q_dbuser_variant=2 q_dbuser_variant=2
q_dbuser_menu=$(cat <<EOF q_dbuser_menu=$(cat <<EOF
C ) Custom Entered Database User C ) Custom Entered Database User
E ) Environment Variable DB_USER: '${DB_USER}' E ) Environment Variable DB${detected_host_num}_USER: '${detected_user_value}'
EOF EOF
) )
fi fi
@@ -728,7 +872,7 @@ EOF
break break
;; ;;
e* | "" ) e* | "" )
r_dbuser=${DB_USER} r_dbuser=${detected_user_value}
break break
;; ;;
q* ) q* )
@@ -741,76 +885,37 @@ EOF
esac esac
} }
get_dbpass() { get_filename() {
if [ -z "${DB_PASS}" ] ; then COLUMNS=12
print_debug "Parsed DBPass Variant: 1 - No Env" prompt="Please select a file to restore:"
q_dbpass_variant=1 options=( $(find "${DEFAULT_FILESYSTEM_PATH}" -type f -maxdepth 2 -not -name '*.md5' -not -name '*.sha1' -not -name '*.gpg' -print0 | sort -z | xargs -0) )
q_dbpass_menu=$(cat <<EOF PS3="$prompt "
select opt in "${options[@]}" "Custom" "Quit" ; do
EOF if (( REPLY == 2 + ${#options[@]} )) ; then
) echo "Bye!"
fi exit 2
elif (( REPLY == 1 + ${#options[@]} )) ; then
if [ -n "${DB_PASS}" ] ; then while [ ! -f "${opt}" ] ; do
print_debug "Parsed DBPass Variant: 2 - Env" read -p "What path and filename to restore: " opt
q_dbpass_variant=2 if [ ! -f "${opt}" ] ; then
q_dbpass_menu=$(cat <<EOF print_error "File not found. Please retry.."
fi
C ) Custom Entered Database Password
E ) Environment Variable DB_PASS
EOF
)
fi
cat << EOF
What Database Password will be used to restore?
${q_dbpass_menu}
Q ) Quit
EOF
case "${q_dbpass_variant}" in
1 )
counter=1
q_dbpass=" "
while [[ $q_dbpass = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Passwords can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbpass
(( counter+=1 ))
done done
r_dbpass=${q_dbpass} break
;; elif (( REPLY > 0 && REPLY <= ${#options[@]} )) ; then
2 ) break
while true; do else
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbpass_menu echo "Invalid option. Try another one."
case "${q_dbpass_menu,,}" in fi
c* ) done
counter=1 COLUMNS=$oldcolumns
q_dbpass=" " r_filename=${opt}
while [[ $q_dbpass = *" "* ]]; do
if [ $counter -gt 1 ] ; then print_error "DB Passwords can't have spaces in them, please re-enter." ; fi ;
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbpass
(( counter+=1 ))
done
r_dbpass=${q_dbpass}
break
;;
e* | "" )
r_dbpass=${DB_PASS}
break
;;
q* )
print_info "Quitting Script"
exit 1
;;
esac
done
;;
esac
} }
#### SCRIPT START #### SCRIPT START
trap control_c INT
bootstrap_variables restore_init
cat << EOF cat << EOF
## ${IMAGE_NAME} Restore Script ## ${IMAGE_NAME} Restore Script