mirror of
https://github.com/tiredofit/docker-db-backup.git
synced 2025-12-22 13:44:08 +01:00
Compare commits
22 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7f455abc1a | ||
|
|
c16add4525 | ||
|
|
d5769b1588 | ||
|
|
0b2c7836cf | ||
|
|
535e011740 | ||
|
|
5a391b908a | ||
|
|
fddca646c8 | ||
|
|
68f954c59b | ||
|
|
0ab0a6d182 | ||
|
|
f6bf2993f7 | ||
|
|
5cf00a8b8e | ||
|
|
2bc730013e | ||
|
|
d628ed8ff4 | ||
|
|
d7399667a1 | ||
|
|
9caec737e0 | ||
|
|
87a803512d | ||
|
|
c6a8fb0ae0 | ||
|
|
8fafdeb45c | ||
|
|
4a3a79d328 | ||
|
|
bad5057bcf | ||
|
|
d2acfc4a88 | ||
|
|
3d794a819f |
63
CHANGELOG.md
63
CHANGELOG.md
@@ -1,3 +1,66 @@
|
|||||||
|
## 4.0.10 2023-11-11 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Change environment variable parsing routines to properly accomodate for Passwords containing '=='
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.9 2023-11-11 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix issue with quotes being wrapped around _PASS variables
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.8 2023-11-11 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Tidy up file_encryption() routines
|
||||||
|
- Change environment variable _ENCRYPT_PUBKEY to _ENCRYPT_PUBLIC_KEY
|
||||||
|
- Add new environment variable _ENCRYPT_PRIVATE_KEY
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.7 2023-11-11 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Add seperate permissions for _FILESYSTEM_PATH
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- More output and debugging additions
|
||||||
|
- SQLite3 now backs up without running into file permission/access problems
|
||||||
|
- Cleanup old sqlite backups from temp directory
|
||||||
|
- Handle multiple SQLite3 backups concurrently
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.6 2023-11-10 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Add additional DEBUG_ statements
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Fix issue with Influx DB not properly detecting the correct version
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.5 2023-11-10 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Add undocumented DBBACKUP_USER|GROUP environment variables for troubleshooting permissions
|
||||||
|
- Add more verbosity when using DEBUG_ statements
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Change _FILESYSTEM_PERMISSION to 600 from 700
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.4 2023-11-09 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Add support for restoring from different DB_ variables in restore script
|
||||||
|
|
||||||
|
|
||||||
|
## 4.0.3 2023-11-09 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Resolve issue with _MYSQL_TLS_CERT_FILE not being read
|
||||||
|
|
||||||
|
|
||||||
## 4.0.2 2023-11-09 <dave at tiredofit dot ca>
|
## 4.0.2 2023-11-09 <dave at tiredofit dot ca>
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|||||||
55
README.md
55
README.md
@@ -214,12 +214,13 @@ If these are set and no other defaults or variables are set explicitly, they wil
|
|||||||
|
|
||||||
Encryption occurs after compression and the encrypted filename will have a `.gpg` suffix
|
Encryption occurs after compression and the encrypted filename will have a `.gpg` suffix
|
||||||
|
|
||||||
| Variable | Description | Default |
|
| Variable | Description | Default | `_FILE` |
|
||||||
| ---------------------------- | ------------------------------------------- | ------- |
|
| ----------------------------- | -------------------------------------------- | ------- | ------- |
|
||||||
| `DEFAULT_ENCRYPT` | Encrypt file after backing up with GPG | `FALSE` |
|
| `DEFAULT_ENCRYPT` | Encrypt file after backing up with GPG | `FALSE` | |
|
||||||
| `DEFAULT_ENCRYPT_PASSPHRASE` | Passphrase to encrypt file with GPG | |
|
| `DEFAULT_ENCRYPT_PASSPHRASE` | Passphrase to encrypt file with GPG | | x |
|
||||||
| *or* | | |
|
| *or* | | | |
|
||||||
| `DEFAULT_ENCRYPT_PUBKEY` | Path of public key to encrypt file with GPG | |
|
| `DEFAULT_ENCRYPT_PUBLIC_KEY` | Path of public key to encrypt file with GPG | | x |
|
||||||
|
| `DEFAULT_ENCRYPT_PRIVATE_KEY` | Path of private key to encrypt file with GPG | | x |
|
||||||
|
|
||||||
##### Scheduling Options
|
##### Scheduling Options
|
||||||
|
|
||||||
@@ -322,12 +323,13 @@ Options that are related to the value of `DEFAULT_BACKUP_LOCATION`
|
|||||||
|
|
||||||
If `DEFAULT_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
|
If `DEFAULT_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
|
||||||
|
|
||||||
| Variable | Description | Default |
|
| Variable | Description | Default |
|
||||||
| --------------------------------- | ----------------------------------------------------------------------------------------------------- | ------------------------------------- |
|
| ------------------------------------ | ----------------------------------------------------------------------------------------------------- | ------------------------------------- |
|
||||||
| `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
|
| `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
|
||||||
| `DEFAULT_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
|
| `DEFAULT_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
|
||||||
| `DEFAULT_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DEFAULT_FILESYSTEM_PATH}/archive/` |
|
| `DEFAULT_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` |
|
||||||
| `DEFAULT_FILESYSTEM_PERMISSION` | Directory and File permissions to apply to files. | `700` |
|
| `DEFAULT_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DEFAULT_FILESYSTEM_PATH}/archive/` |
|
||||||
|
| `DEFAULT_FILESYSTEM_PERMISSION` | Permissions to apply to files. | `600` |
|
||||||
|
|
||||||
###### S3
|
###### S3
|
||||||
|
|
||||||
@@ -475,12 +477,14 @@ Otherwise, override them per backup job. Additional backup jobs can be scheduled
|
|||||||
|
|
||||||
Encryption will occur after compression and the resulting filename will have a `.gpg` suffix
|
Encryption will occur after compression and the resulting filename will have a `.gpg` suffix
|
||||||
|
|
||||||
| Variable | Description | Default |
|
|
||||||
| ------------------------- | ------------------------------------------- | ------- |
|
| Variable | Description | Default | `_FILE` |
|
||||||
| `DB01_ENCRYPT` | Encrypt file after backing up with GPG | `FALSE` |
|
| -------------------------- | -------------------------------------------- | ------- | ------- |
|
||||||
| `DB01_ENCRYPT_PASSPHRASE` | Passphrase to encrypt file with GPG | |
|
| `DB01_ENCRYPT` | Encrypt file after backing up with GPG | `FALSE` | |
|
||||||
| *or* | | |
|
| `DB01_ENCRYPT_PASSPHRASE` | Passphrase to encrypt file with GPG | | x |
|
||||||
| `DB01_ENCRYPT_PUBKEY` | Path of public key to encrypt file with GPG | |
|
| *or* | | | |
|
||||||
|
| `DB01_ENCRYPT_PUBLIC_KEY` | Path of public key to encrypt file with GPG | | x |
|
||||||
|
| `DB01_ENCRYPT_PRIVATE_KEY` | Path of private key to encrypt file with GPG | | x |
|
||||||
|
|
||||||
##### Scheduling Options
|
##### Scheduling Options
|
||||||
|
|
||||||
@@ -492,7 +496,7 @@ Encryption will occur after compression and the resulting filename will have a `
|
|||||||
| | Absolute HHMM, e.g. `2330` or `0415` | |
|
| | Absolute HHMM, e.g. `2330` or `0415` | |
|
||||||
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
|
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
|
||||||
| | Full datestamp e.g. `2023-12-21 23:30:00` | |
|
| | Full datestamp e.g. `2023-12-21 23:30:00` | |
|
||||||
| | Cron expression e.g. `30 23 * * *` [Understand the format](https://en.wikipedia.org/wiki/Cron) - *BACKUP_INTERVAL is ignored* | |
|
| | Cron expression e.g. `30 23 * * *` [Understand the format](https://en.wikipedia.org/wiki/Cron) - *BACKUP_INTERVAL is ignored* | |
|
||||||
| `DB01_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when backup interval executes) | `FALSE` |
|
| `DB01_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when backup interval executes) | `FALSE` |
|
||||||
| | 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | |
|
| | 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | |
|
||||||
| `DB01_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from `DB01_BACKUP_FILESYSTEM_PATH` | |
|
| `DB01_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from `DB01_BACKUP_FILESYSTEM_PATH` | |
|
||||||
@@ -597,12 +601,13 @@ Options that are related to the value of `DB01_BACKUP_LOCATION`
|
|||||||
|
|
||||||
If `DB01_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
|
If `DB01_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
|
||||||
|
|
||||||
| Variable | Description | Default |
|
| Variable | Description | Default |
|
||||||
| ------------------------------ | ----------------------------------------------------------------------------------------------------- | --------------------------------- |
|
| --------------------------------- | ----------------------------------------------------------------------------------------------------- | --------------------------------- |
|
||||||
| `DB01_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
|
| `DB01_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
|
||||||
| `DB01_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
|
| `DB01_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
|
||||||
| `DB01_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DB01_FILESYSTEM_PATH/archive/` |
|
| `DB01_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` |
|
||||||
| `DB01_FILESYSTEM_PERMISSION` | Directory and File permissions to apply to files. | `700` |
|
| `DB01_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DB01_FILESYSTEM_PATH/archive/` |
|
||||||
|
| `DB01_FILESYSTEM_PERMISSION` | Directory and File permissions to apply to files. | `600` |
|
||||||
|
|
||||||
###### S3
|
###### S3
|
||||||
|
|
||||||
|
|||||||
@@ -8,8 +8,11 @@ source /assets/functions/10-db-backup
|
|||||||
source /assets/defaults/10-db-backup
|
source /assets/defaults/10-db-backup
|
||||||
bootstrap_variables backup_init {{BACKUP_NUMBER}}
|
bootstrap_variables backup_init {{BACKUP_NUMBER}}
|
||||||
bootstrap_variables parse_variables {{BACKUP_NUMBER}}
|
bootstrap_variables parse_variables {{BACKUP_NUMBER}}
|
||||||
PROCESS_NAME="{{BACKUP_NUMBER}}-${backup_job_db_host}__${backup_job_db_name}"
|
if [ -z "${backup_job_db_name}" ]; then
|
||||||
|
PROCESS_NAME="{{BACKUP_NUMBER}}${backup_job_db_host//\//_}"
|
||||||
|
else
|
||||||
|
PROCESS_NAME="{{BACKUP_NUMBER}}-${backup_job_db_host//\//_}__${backup_job_db_name}"
|
||||||
|
fi
|
||||||
|
|
||||||
trap ctrl_c INT
|
trap ctrl_c INT
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
#!/command/with-contenv bash
|
#!/command/with-contenv bash
|
||||||
|
|
||||||
BACKUP_JOB_CONCURRENCY=${BACKUP_JOB_CONCURRENCY:-"1"}
|
BACKUP_JOB_CONCURRENCY=${BACKUP_JOB_CONCURRENCY:-"1"}
|
||||||
|
DBBACKUP_USER=${DBBACKUP_USER:-"dbbackup"}
|
||||||
|
DBBACKUP_GROUP=${DBBACKUP_GROUP:-"${DBBACKUP_USER}"} # Must go after DBBACKUP_USER
|
||||||
DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0}
|
DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0}
|
||||||
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
|
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
|
||||||
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
|
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
|
||||||
@@ -13,7 +15,8 @@ DEFAULT_CREATE_LATEST_SYMLINK=${DEFAULT_CREATE_LATEST_SYMLINK:-"TRUE"}
|
|||||||
DEFAULT_ENABLE_PARALLEL_COMPRESSION=${DEFAULT_ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
|
DEFAULT_ENABLE_PARALLEL_COMPRESSION=${DEFAULT_ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
|
||||||
DEFAULT_ENCRYPT=${DEFAULT_ENCRYPT:-"FALSE"}
|
DEFAULT_ENCRYPT=${DEFAULT_ENCRYPT:-"FALSE"}
|
||||||
DEFAULT_FILESYSTEM_PATH=${DEFAULT_FILESYSTEM_PATH:-"/backup"}
|
DEFAULT_FILESYSTEM_PATH=${DEFAULT_FILESYSTEM_PATH:-"/backup"}
|
||||||
DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"700"}
|
DEFAULT_FILESYSTEM_PATH_PERMISSION=${DEFAULT_FILESYSTEM_PATH_PERMISSION:-"700"}
|
||||||
|
DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"600"}
|
||||||
DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DEFAULT_FILESYSTEM_ARCHIVE_PATH:-"${DEFAULT_FILESYSTEM_PATH}/archive/"}
|
DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DEFAULT_FILESYSTEM_ARCHIVE_PATH:-"${DEFAULT_FILESYSTEM_PATH}/archive/"}
|
||||||
DEFAULT_LOG_LEVEL=${DEFAULT_LOG_LEVEL:-"notice"}
|
DEFAULT_LOG_LEVEL=${DEFAULT_LOG_LEVEL:-"notice"}
|
||||||
DEFAULT_MYSQL_ENABLE_TLS=${DEFAULT_MYSQL_ENABLE_TLS:-"FALSE"}
|
DEFAULT_MYSQL_ENABLE_TLS=${DEFAULT_MYSQL_ENABLE_TLS:-"FALSE"}
|
||||||
|
|||||||
@@ -5,26 +5,26 @@ bootstrap_filesystem() {
|
|||||||
if [ ! -d "${backup_job_filesystem_path}" ]; then
|
if [ ! -d "${backup_job_filesystem_path}" ]; then
|
||||||
mkdir -p "${backup_job_filesystem_path}"
|
mkdir -p "${backup_job_filesystem_path}"
|
||||||
fi
|
fi
|
||||||
if [ "$(stat -c %U "${backup_job_filesystem_path}")" != "dbbackup" ] ; then chown -R dbbackup:dbbackup "${backup_job_filesystem_path}" ; fi
|
if [ "$(stat -c %U "${backup_job_filesystem_path}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${backup_job_filesystem_path}" ; fi
|
||||||
if [ "$(stat -c %a "${backup_job_filesystem_path}")" != "${backup_job_filesystem_permission}" ] ; then chmod -R "${backup_job_filesystem_permission}" "${backup_job_filesystem_path}" ; fi
|
if [ "$(stat -c %a "${backup_job_filesystem_path}")" != "${backup_job_filesystem_path_permission}" ] ; then chmod "${backup_job_filesystem_path_permission}" "${backup_job_filesystem_path}" ; fi
|
||||||
|
|
||||||
if [ -d "${backup_job_filesystem_archive_path}" ]; then
|
if [ -d "${backup_job_filesystem_archive_path}" ]; then
|
||||||
if [ "$(stat -c %U "${backup_job_filesystem_archive_path}")" != "dbbackup" ] ; then chown -R dbbackup:dbbackup "${backup_job_filesystem_archive_path}" ; fi
|
if [ "$(stat -c %U "${backup_job_filesystem_archive_path}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${backup_job_filesystem_archive_path}" ; fi
|
||||||
if [ "$(stat -c %a "${backup_job_filesystem_archive_path}")" != "${backup_job_filesystem_permission}" ] ; then chmod -R "${backup_job_filesystem_permission}" "${backup_job_filesystem_archive_path}" ; fi
|
if [ "$(stat -c %a "${backup_job_filesystem_archive_path}")" != "${backup_job_filesystem_path_permission}" ] ; then chmod "${backup_job_filesystem_path_permission}" "${backup_job_filesystem_archive_path}" ; fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -d "${LOG_PATH}" ]; then
|
if [ ! -d "${LOG_PATH}" ]; then
|
||||||
mkdir -p "${LOG_PATH}"
|
mkdir -p "${LOG_PATH}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$(stat -c %U "${LOG_PATH}")" != "dbbackup" ] ; then chown dbbackup:dbbackup "${LOG_PATH}" ; fi
|
if [ "$(stat -c %U "${LOG_PATH}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${LOG_PATH}" ; fi
|
||||||
if [ ! -d "${LOG_PATH}"/"$(date +'%Y%m%d')" ]; then run_as_user mkdir -p "${LOG_PATH}"/"$(date +'%Y%m%d')"; fi
|
if [ ! -d "${LOG_PATH}"/"$(date +'%Y%m%d')" ]; then run_as_user mkdir -p "${LOG_PATH}"/"$(date +'%Y%m%d')"; fi
|
||||||
if [ "$(stat -c %a "${LOG_PATH}")" != "755" ] ; then chmod -R 755 "${LOG_PATH}" ; fi
|
if [ "$(stat -c %a "${LOG_PATH}")" != "755" ] ; then chmod -R 755 "${LOG_PATH}" ; fi
|
||||||
|
|
||||||
if [ ! -d "${TEMP_PATH}" ]; then
|
if [ ! -d "${TEMP_PATH}" ]; then
|
||||||
mkdir -p "${TEMP_PATH}"
|
mkdir -p "${TEMP_PATH}"
|
||||||
fi
|
fi
|
||||||
if [ "$(stat -c %U "${TEMP_PATH}")" != "dbbackup" ] ; then chown -R dbbackup:dbbackup "${TEMP_PATH}" ; fi
|
if [ "$(stat -c %U "${TEMP_PATH}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${TEMP_PATH}" ; fi
|
||||||
if var_true "${DEBUG_BOOTSTRAP_FILESYSTEM}" ; then debug off; fi
|
if var_true "${DEBUG_BOOTSTRAP_FILESYSTEM}" ; then debug off; fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -48,10 +48,11 @@ bootstrap_variables() {
|
|||||||
DEFAULT_USER \
|
DEFAULT_USER \
|
||||||
DEFAULT_PASS \
|
DEFAULT_PASS \
|
||||||
DEFAULT_ENCRYPT_PASSPHRASE \
|
DEFAULT_ENCRYPT_PASSPHRASE \
|
||||||
DEFAULT_ENCRYPT_PUBKEY \
|
DEFAULT_ENCRYPT_PUBLIC_KEY \
|
||||||
|
DEFAULT_ENCRYPT_PRIVATE_KEY \
|
||||||
DEFAULT_MONGO_CUSTOM_URI \
|
DEFAULT_MONGO_CUSTOM_URI \
|
||||||
DEFAULT_MYSQL_TLS_CA_FILE \
|
DEFAULT_MYSQL_TLS_CA_FILE \
|
||||||
DEFAULT_MYSQL_TLS_backup_job_filenameCERT_FILE \
|
DEFAULT_MYSQL_TLS_CERT_FILE \
|
||||||
DEFAULT_MYSQL_TLS_KEY_FILE \
|
DEFAULT_MYSQL_TLS_KEY_FILE \
|
||||||
DEFAULT_S3_BUCKET \
|
DEFAULT_S3_BUCKET \
|
||||||
DEFAULT_S3_KEY_ID \
|
DEFAULT_S3_KEY_ID \
|
||||||
@@ -74,7 +75,8 @@ bootstrap_variables() {
|
|||||||
DB"${backup_instance_number}"_USER \
|
DB"${backup_instance_number}"_USER \
|
||||||
DB"${backup_instance_number}"_PASS \
|
DB"${backup_instance_number}"_PASS \
|
||||||
DB"${backup_instance_number}"_ENCRYPT_PASSPHRASE \
|
DB"${backup_instance_number}"_ENCRYPT_PASSPHRASE \
|
||||||
DB"${backup_instance_number}"_ENCRYPT_PUBKEY \
|
DB"${backup_instance_number}"_ENCRYPT_PUBLIC_KEY \
|
||||||
|
DB"${backup_instance_number}"_ENCRYPT_PRIVATE_KEY \
|
||||||
DB"${backup_instance_number}"_MONGO_CUSTOM_URI \
|
DB"${backup_instance_number}"_MONGO_CUSTOM_URI \
|
||||||
DB"${backup_instance_number}"_MYSQL_TLS_CA_FILE \
|
DB"${backup_instance_number}"_MYSQL_TLS_CA_FILE \
|
||||||
DB"${backup_instance_number}"_MYSQL_TLS_CERT_FILE \
|
DB"${backup_instance_number}"_MYSQL_TLS_CERT_FILE \
|
||||||
@@ -151,18 +153,23 @@ bootstrap_variables() {
|
|||||||
fi
|
fi
|
||||||
##
|
##
|
||||||
|
|
||||||
|
if grep -qo ".*_PASS='.*'" "${backup_instance_vars}"; then
|
||||||
|
print_debug "[bootstrap_variables] [backup_init] Found _PASS variable with quotes"
|
||||||
|
sed -i "s|_PASS='\(.*\)'|_PASS=\1|g" "${backup_instance_vars}"
|
||||||
|
fi
|
||||||
|
|
||||||
transform_backup_instance_variable() {
|
transform_backup_instance_variable() {
|
||||||
if grep -q "^DB${1}_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
if grep -q "^DB${1}_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
||||||
export "$3"="$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2)"
|
export "$3"="$(grep "^DB${1}_${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
|
||||||
elif grep -q "^DB_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
elif grep -q "^DB_${2}=" "${backup_instance_vars}" && [ "$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
||||||
# Allow old legacy work, perhaps remove old DB_ functionality in future? This should allow for seamless upgrades
|
# Allow old legacy work, perhaps remove old DB_ functionality in future? This should allow for seamless upgrades
|
||||||
#print_warn "Legacy Variable 'DB_${2}'' detected - Please upgrade your variables as they will be removed in version 4.3.0"
|
#print_warn "Legacy Variable 'DB_${2}'' detected - Please upgrade your variables as they will be removed in version 4.3.0"
|
||||||
export "$3"="$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2)"
|
export "$3"="$(grep "^DB_${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
|
||||||
elif grep -q "^${2}=" "${backup_instance_vars}" && [ "$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
elif grep -q "^${2}=" "${backup_instance_vars}" && [ "$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
||||||
print_warn "Legacy unsupported variable '${2}' detected - Please upgrade your variables as they will be removed in version 4.3.0"
|
print_warn "Legacy unsupported variable '${2}' detected - Please upgrade your variables as they will be removed in version 4.3.0"
|
||||||
export "$3"="$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2)"
|
export "$3"="$(grep "^${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
|
||||||
elif grep -q "^DEFAULT_${2}=" "${backup_instance_vars}" && [ "$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
elif grep -q "^DEFAULT_${2}=" "${backup_instance_vars}" && [ "$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2)" != "unset" ]; then
|
||||||
export "$3"="$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2)"
|
export "$3"="$(grep "^DEFAULT_${2}=" "${backup_instance_vars}" | cut -d = -f2-)"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -185,12 +192,14 @@ bootstrap_variables() {
|
|||||||
transform_backup_instance_variable "${backup_instance_number}" ENABLE_PARALLEL_COMPRESSION backup_job_parallel_compression
|
transform_backup_instance_variable "${backup_instance_number}" ENABLE_PARALLEL_COMPRESSION backup_job_parallel_compression
|
||||||
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT backup_job_encrypt
|
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT backup_job_encrypt
|
||||||
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PASSPHRASE backup_job_encrypt_passphrase
|
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PASSPHRASE backup_job_encrypt_passphrase
|
||||||
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PUBKEY backup_job_encrypt_pubkey
|
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PRIVATE_KEY backup_job_encrypt_private_key
|
||||||
|
transform_backup_instance_variable "${backup_instance_number}" ENCRYPT_PUBLIC_KEY backup_job_encrypt_public_key
|
||||||
transform_backup_instance_variable "${backup_instance_number}" EXTRA_DUMP_OPTS backup_job_extra_dump_opts
|
transform_backup_instance_variable "${backup_instance_number}" EXTRA_DUMP_OPTS backup_job_extra_dump_opts
|
||||||
transform_backup_instance_variable "${backup_instance_number}" EXTRA_ENUMERATION_OPTS backup_job_extra_enumeration_opts
|
transform_backup_instance_variable "${backup_instance_number}" EXTRA_ENUMERATION_OPTS backup_job_extra_enumeration_opts
|
||||||
transform_backup_instance_variable "${backup_instance_number}" EXTRA_OPTS backup_job_extra_opts
|
transform_backup_instance_variable "${backup_instance_number}" EXTRA_OPTS backup_job_extra_opts
|
||||||
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_ARCHIVE_PATH backup_job_filesystem_archive_path
|
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_ARCHIVE_PATH backup_job_filesystem_archive_path
|
||||||
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PATH backup_job_filesystem_path
|
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PATH backup_job_filesystem_path
|
||||||
|
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PATH_PERMISSION backup_job_filesystem_path_permission
|
||||||
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PERMISSION backup_job_filesystem_permission
|
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PERMISSION backup_job_filesystem_permission
|
||||||
transform_backup_instance_variable "${backup_instance_number}" GZ_RSYNCABLE backup_job_gz_rsyncable
|
transform_backup_instance_variable "${backup_instance_number}" GZ_RSYNCABLE backup_job_gz_rsyncable
|
||||||
transform_backup_instance_variable "${backup_instance_number}" HOST backup_job_db_host
|
transform_backup_instance_variable "${backup_instance_number}" HOST backup_job_db_host
|
||||||
@@ -237,21 +246,84 @@ bootstrap_variables() {
|
|||||||
transform_backup_instance_variable "${backup_instance_number}" USER backup_job_db_user
|
transform_backup_instance_variable "${backup_instance_number}" USER backup_job_db_user
|
||||||
|
|
||||||
backup_job_backup_begin=$(echo "${backup_job_backup_begin}" | sed -e "s|'||g" -e 's|"||g')
|
backup_job_backup_begin=$(echo "${backup_job_backup_begin}" | sed -e "s|'||g" -e 's|"||g')
|
||||||
|
if var_true "${DEBUG_BACKUP_INSTANCE_VARIABLE}" ; then cat <<EOF
|
||||||
|
## BEGIN Variable Dump $(TZ=${TIMEZONE} date)
|
||||||
|
|
||||||
|
$(cat ${backup_instance_vars})
|
||||||
|
|
||||||
|
## END
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
rm -rf "${backup_instance_vars}"
|
rm -rf "${backup_instance_vars}"
|
||||||
}
|
}
|
||||||
|
|
||||||
upgrade_lonely_variables() {
|
restore_init() {
|
||||||
upgrade_lonely_variables_tmp=$(mktemp)
|
restore_vars=$(mktemp)
|
||||||
set -o posix; set | grep "^$1" | grep -v "^$1[0-9]._" > "${upgrade_lonely_variables_tmp}"
|
set -o posix ; set | grep -oE "^restore_job_.*=" | grep -oE ".*=" | sed "/--/d" > "${restore_vars}"
|
||||||
while read -r exist_var ; do
|
while read -r restore_var ; do
|
||||||
if [ ! -v "${1}"01_"$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2)" ] ; then
|
unset "$(echo "${restore_var}" | cut -d = -f 1)"
|
||||||
export "${1}"01_"$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2-9)"="$(echo "${exist_var}" | cut -d = -f2)"
|
done < "${restore_vars}"
|
||||||
else
|
|
||||||
print_error "Variable ${1}01_$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2) already exists, fix your variables and start again.."
|
if [ -n "${DB_NAME}" ] && [ -z "${DB01_NAME}" ] ; then export DB01_NAME="${DB_NAME}" ; unset DB_NAME ; fi
|
||||||
exit 1
|
if [ -n "${DB_USER}" ] && [ -z "${DB01_USER}" ] ; then export DB01_USER="${DB_USER}" ; unset DB_USER ; fi
|
||||||
fi
|
if [ -n "${DB_PASS}" ] && [ -z "${DB01_PASS}" ] ; then export DB01_PASS="${DB_PASS}" ; unset DB_PASS ; fi
|
||||||
done < "${upgrade_lonely_variables_tmp}"
|
if [ -n "${DB_TYPE}" ] && [ -z "${DB01_TYPE}" ] ; then export DB01_TYPE="${DB_TYPE}" ; unset DB_TYPE ; fi
|
||||||
rm -rf "${upgrade_lonely_variables_tmp}"
|
if [ -n "${DB_PORT}" ] && [ -z "${DB01_PORT}" ] ; then export DB01_PORT="${DB_PORT}" ; unset DB_PORT ; fi
|
||||||
|
if [ -n "${DB_HOST}" ] && [ -z "${DB01_HOST}" ] ; then export DB01_HOST="${DB_HOST}" ; unset DB_HOST ; fi
|
||||||
|
if [ -n "${DB_AUTH}" ] && [ -z "${DB01_AUTH}" ] ; then export DB01_AUTH="${DB_AUTH}" ; unset DB_AUTH ; fi
|
||||||
|
if [ -n "${MONGO_CUSTOM_URI}" ] && [ -z "${DB01_MONGO_CUSTOM_URI}" ] ; then export DB01_MONGO_CUSTOM_URI="${DB_MONGO_CUSTOM_URI}" ; unset MONGO_CUSTOM_URI ; fi
|
||||||
|
if [ -n "${MYSQL_TLS_CA_FILE}" ] && [ -z "${DB01_MYSQL_TLS_CA_FILE}" ] ; then export DB01_MYSQL_TLS_CA_FILE="${MYSQL_TLS_CA_FILE}" ; unset MYSQL_TLS_CA_FILE ; fi
|
||||||
|
if [ -n "${MYSQL_TLS_CERT_FILE}" ] && [ -z "${DB01_MYSQL_TLS_CERT_FILE}" ] ; then export DB01_MYSQL_TLS_CERT_FILE="${MYSQL_TLS_CERT_FILE}" ; unset MYSQL_TLS_CERT_FILE ; fi
|
||||||
|
if [ -n "${MYSQL_TLS_KEY_FILE}" ] && [ -z "${DB01_MYSQL_TLS_KEY_FILE}" ] ; then export DB01_MYSQL_TLS_KEY_FILE="${MYSQL_TLS_KEY_FILE}" ; unset MYSQL_TLS_KEY_FILE ; fi
|
||||||
|
|
||||||
|
transform_file_var \
|
||||||
|
DEFAULT_AUTH \
|
||||||
|
DEFAULT_HOST \
|
||||||
|
DEFAULT_NAME \
|
||||||
|
DEFAULT_PASS \
|
||||||
|
DEFAULT_PORT \
|
||||||
|
DEFAULT_TYPE \
|
||||||
|
DEFAULT_USER \
|
||||||
|
DEFAULT_MONGO_CUSTOM_URI \
|
||||||
|
DEFAULT_MYSQL_TLS_CA_FILE \
|
||||||
|
DEFAULT_MYSQL_TLS_CERT_FILE \
|
||||||
|
DEFAULT_MYSQL_TLS_KEY_FILE
|
||||||
|
|
||||||
|
set -o posix ; set | grep -E "^DEFAULT_" > "${restore_vars}"
|
||||||
|
|
||||||
|
restore_instances=$(printenv | sort | grep -c '^DB.*_HOST')
|
||||||
|
|
||||||
|
for (( restore_instance_number = 01; restore_instance_number <= restore_instances; restore_instance_number++ )) ; do
|
||||||
|
restore_instance_number=$(printf "%02d" $restore_instance_number)
|
||||||
|
transform_file_var \
|
||||||
|
DB"${restore_instance_number}"_AUTH \
|
||||||
|
DB"${restore_instance_number}"_HOST \
|
||||||
|
DB"${restore_instance_number}"_NAME \
|
||||||
|
DB"${restore_instance_number}"_PASS \
|
||||||
|
DB"${restore_instance_number}"_PORT \
|
||||||
|
DB"${restore_instance_number}"_TYPE \
|
||||||
|
DB"${restore_instance_number}"_USER \
|
||||||
|
DB"${restore_instance_number}"_MONGO_CUSTOM_URI \
|
||||||
|
DB"${restore_instance_number}"_MYSQL_TLS_CA_FILE \
|
||||||
|
DB"${restore_instance_number}"_MYSQL_TLS_CERT_FILE \
|
||||||
|
DB"${restore_instance_number}"_MYSQL_TLS_KEY_FILE
|
||||||
|
|
||||||
|
set -o posix ; set | grep -E "^DB${restore_instance_number}_" >> "${restore_vars}"
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ -n "${DB_DUMP_TARGET}" ]; then
|
||||||
|
print_warn "Deprecated and unsupported variable 'DB_DUMP_TARGET' detected - Please upgrade your variables as they will be removed in version 4.3.0"
|
||||||
|
sed -i "/DEFAULT_FILESYSTEM_PATH=/d" "${restore_vars}"
|
||||||
|
echo "DEFAULT_FILESYSTEM_PATH=${DB_DUMP_TARGET}" >> "${restore_vars}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${DB_DUMP_TARGET_ARCHIVE}" ]; then
|
||||||
|
print_warn "Deprecated and unsupported variable 'DB_DUMP_TARGET_ACRHIVE' detected - Please upgrade your variables as they will be removed in version 4.3.0"
|
||||||
|
sed -i "/DEFAULT_FILESYSTEM_ARCHIVE_PATH=/d" "${restore_vars}"
|
||||||
|
echo "DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DB_DUMP_TARGET_ARCHIVE}" >> "${restore_vars}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "RESTORE_VARS is ${restore_vars}"
|
||||||
}
|
}
|
||||||
|
|
||||||
parse_variables() {
|
parse_variables() {
|
||||||
@@ -260,7 +332,7 @@ bootstrap_variables() {
|
|||||||
## Check is Variable is Defined
|
## Check is Variable is Defined
|
||||||
## Usage: check_var transformed_varname real_varname "Description"
|
## Usage: check_var transformed_varname real_varname "Description"
|
||||||
output_off
|
output_off
|
||||||
print_debug "Looking for existence of $2 environment variable"
|
print_debug "[parse_variables] Looking for existence of $2 environment variable"
|
||||||
if [ ! -v "$1" ]; then
|
if [ ! -v "$1" ]; then
|
||||||
print_error "No '$3' Entered! - Set '\$$2' environment variable - Halting Backup Number ${v_instance}"
|
print_error "No '$3' Entered! - Set '\$$2' environment variable - Halting Backup Number ${v_instance}"
|
||||||
s6-svc -d /var/run/s6/legacy-services/dbbackup-"${v_instance}"
|
s6-svc -d /var/run/s6/legacy-services/dbbackup-"${v_instance}"
|
||||||
@@ -368,9 +440,24 @@ bootstrap_variables() {
|
|||||||
if var_true "${backup_job_resource_optimized}" ; then play_fair="nice -19 ionice -c2" ; fi
|
if var_true "${backup_job_resource_optimized}" ; then play_fair="nice -19 ionice -c2" ; fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
upgrade_lonely_variables() {
|
||||||
|
upgrade_lonely_variables_tmp=$(mktemp)
|
||||||
|
set -o posix; set | grep "^$1" | grep -v "^$1[0-9]._" > "${upgrade_lonely_variables_tmp}"
|
||||||
|
while read -r exist_var ; do
|
||||||
|
if [ ! -v "${1}"01_"$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2)" ] ; then
|
||||||
|
export "${1}"01_"$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2-9)"="$(echo "${exist_var}" | cut -d = -f2)"
|
||||||
|
else
|
||||||
|
print_error "Variable ${1}01_$(echo "${exist_var}" | cut -d = -f1 | cut -d _ -f2) already exists, fix your variables and start again.."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done < "${upgrade_lonely_variables_tmp}"
|
||||||
|
rm -rf "${upgrade_lonely_variables_tmp}"
|
||||||
|
}
|
||||||
|
|
||||||
case "${1}" in
|
case "${1}" in
|
||||||
backup_init ) backup_init "$2" ;;
|
backup_init ) backup_init "$2" ;;
|
||||||
parse_variables) parse_variables "$2" ;;
|
parse_variables) parse_variables "$2" ;;
|
||||||
|
restore_init) restore_init ;;
|
||||||
upgrade ) upgrade_lonely_variables "$2" ;;
|
upgrade ) upgrade_lonely_variables "$2" ;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
@@ -381,12 +468,7 @@ backup_couch() {
|
|||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
backup_job_filename=couch_${backup_job_db_name}_${backup_job_db_host#*//}_${now}.txt
|
backup_job_filename=couch_${backup_job_db_name}_${backup_job_db_host#*//}_${now}.txt
|
||||||
backup_job_filename_base=couch_${backup_job_db_name}_${backup_job_db_host#*//}
|
backup_job_filename_base=couch_${backup_job_db_name}_${backup_job_db_host#*//}
|
||||||
compression
|
compressionzyclonite
|
||||||
pre_dbbackup ${backup_job_db_name}
|
|
||||||
write_log notice "Dumping CouchDB database: '${backup_job_db_name}' ${compression_string}"
|
|
||||||
if var_true "${DEBUG_BACKUP_COUCH}" ; then debug on; fi
|
|
||||||
run_as_user curl -sSL -X GET ${backup_job_db_host}:${backup_job_db_port}/${backup_job_db_name}/_all_docs?include_docs=true | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
|
|
||||||
exit_code=$?
|
|
||||||
if var_true "${DEBUG_BACKUP_COUCH}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_COUCH}" ; then debug off; fi
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
timer backup finish
|
timer backup finish
|
||||||
@@ -398,20 +480,25 @@ backup_couch() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
backup_influx() {
|
backup_influx() {
|
||||||
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
||||||
if [ "${backup_job_db_name,,}" = "all" ] ; then
|
if [ "${backup_job_db_name,,}" = "all" ] ; then
|
||||||
write_log debug "Preparing to back up everything"
|
write_log debug "[backup_influx] Preparing to back up everything"
|
||||||
db_names=justbackupeverything
|
db_names=justbackupeverything
|
||||||
else
|
else
|
||||||
db_names=$(echo "${backup_job_db_name}" | tr ',' '\n')
|
db_names=$(echo "${backup_job_db_name}" | tr ',' '\n')
|
||||||
fi
|
fi
|
||||||
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
||||||
|
|
||||||
case "${backup_job_db_influx_version,,}" in
|
case "${backup_job_influx_version,,}" in
|
||||||
1 )
|
1 )
|
||||||
|
print_debug "[backup_influx] Influx DB Version 1 selected"
|
||||||
for db in ${db_names}; do
|
for db in ${db_names}; do
|
||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
||||||
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
|
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
|
||||||
backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now}
|
backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now}
|
||||||
backup_job_filename_base=influx_${db}_${backup_job_db_host#*//}
|
backup_job_filename_base=influx_${db}_${backup_job_db_host#*//}
|
||||||
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
||||||
compression
|
compression
|
||||||
pre_dbbackup "${db}"
|
pre_dbbackup "${db}"
|
||||||
write_log notice "Dumping Influx database: '${db}'"
|
write_log notice "Dumping Influx database: '${db}'"
|
||||||
@@ -433,9 +520,12 @@ backup_influx() {
|
|||||||
done
|
done
|
||||||
;;
|
;;
|
||||||
2 )
|
2 )
|
||||||
|
print_debug "[backup_influx] Influx DB Version 2 selected"
|
||||||
for db in ${db_names}; do
|
for db in ${db_names}; do
|
||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
||||||
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
|
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
|
||||||
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
||||||
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}
|
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}
|
||||||
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
|
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
|
||||||
compression
|
compression
|
||||||
@@ -445,10 +535,10 @@ backup_influx() {
|
|||||||
run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --compression none "${TEMP_PATH}"/"${backup_job_filename_dir}"
|
run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --compression none "${TEMP_PATH}"/"${backup_job_filename_dir}"
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
check_exit_code backup "${backup_job_filename_dir}"
|
check_exit_code backup "${backup_job_filename_dir}"
|
||||||
|
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
||||||
create_archive
|
create_archive
|
||||||
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}.tar${extension}
|
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}.tar${extension}
|
||||||
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
|
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
|
||||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
|
||||||
timer backup finish
|
timer backup finish
|
||||||
file_encryption
|
file_encryption
|
||||||
generate_checksum
|
generate_checksum
|
||||||
@@ -462,6 +552,7 @@ backup_influx() {
|
|||||||
|
|
||||||
backup_mongo() {
|
backup_mongo() {
|
||||||
prepare_dbbackup
|
prepare_dbbackup
|
||||||
|
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi
|
||||||
if [ "${backup_job_compression,,}" = "none" ] ; then
|
if [ "${backup_job_compression,,}" = "none" ] ; then
|
||||||
backup_job_filename=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.archive
|
backup_job_filename=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.archive
|
||||||
backup_job_filename_base=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}
|
backup_job_filename_base=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}
|
||||||
@@ -476,6 +567,7 @@ backup_mongo() {
|
|||||||
else
|
else
|
||||||
mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}"
|
mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}"
|
||||||
fi
|
fi
|
||||||
|
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
|
||||||
pre_dbbackup "${backup_job_db_name}"
|
pre_dbbackup "${backup_job_db_name}"
|
||||||
write_log notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
|
write_log notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
|
||||||
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi
|
||||||
@@ -507,8 +599,10 @@ backup_mssql() {
|
|||||||
backup_job_filename_original=${backup_job_filename}
|
backup_job_filename_original=${backup_job_filename}
|
||||||
compression
|
compression
|
||||||
pre_dbbackup all
|
pre_dbbackup all
|
||||||
|
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
|
||||||
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
|
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
|
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
|
||||||
timer backup finish
|
timer backup finish
|
||||||
file_encryption
|
file_encryption
|
||||||
generate_checksum
|
generate_checksum
|
||||||
@@ -542,7 +636,7 @@ backup_mssql() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
backup_mysql() {
|
backup_mysql() {
|
||||||
|
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
|
||||||
if var_true "${backup_job_mysql_events}" ; then
|
if var_true "${backup_job_mysql_events}" ; then
|
||||||
events="--events"
|
events="--events"
|
||||||
fi
|
fi
|
||||||
@@ -566,7 +660,7 @@ backup_mysql() {
|
|||||||
else
|
else
|
||||||
db_names=$(echo "${backup_job_db_name}" | tr ',' '\n')
|
db_names=$(echo "${backup_job_db_name}" | tr ',' '\n')
|
||||||
fi
|
fi
|
||||||
|
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
|
||||||
write_log debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
|
write_log debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
|
||||||
|
|
||||||
if var_true "${backup_job_split_db}" ; then
|
if var_true "${backup_job_split_db}" ; then
|
||||||
@@ -631,6 +725,7 @@ backup_pgsql() {
|
|||||||
post_dbbackup "globals"
|
post_dbbackup "globals"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
|
||||||
export PGPASSWORD=${backup_job_db_pass}
|
export PGPASSWORD=${backup_job_db_pass}
|
||||||
if [ -n "${backup_job_db_auth}" ] ; then
|
if [ -n "${backup_job_db_auth}" ] ; then
|
||||||
authdb=${backup_job_db_auth}
|
authdb=${backup_job_db_auth}
|
||||||
@@ -654,7 +749,7 @@ backup_pgsql() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if var_false "${_postgres_backup_globals}" && var_true "${backup_job_backup_pgsql_globals}" ; then _postgres_backup_globals=true; fi
|
if var_false "${_postgres_backup_globals}" && var_true "${backup_job_backup_pgsql_globals}" ; then _postgres_backup_globals=true; fi
|
||||||
|
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
||||||
write_log debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
|
write_log debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
|
||||||
|
|
||||||
if var_true "${backup_job_split_db}" ; then
|
if var_true "${backup_job_split_db}" ; then
|
||||||
@@ -686,6 +781,7 @@ backup_pgsql() {
|
|||||||
compression
|
compression
|
||||||
pre_dbbackup all
|
pre_dbbackup all
|
||||||
write_log notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
write_log notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
||||||
|
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
|
||||||
tmp_db_names=$(run_as_user psql -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
|
tmp_db_names=$(run_as_user psql -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
|
||||||
for r_db_name in $(echo $db_names | xargs); do
|
for r_db_name in $(echo $db_names | xargs); do
|
||||||
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
|
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
|
||||||
@@ -694,7 +790,6 @@ backup_pgsql() {
|
|||||||
for x_db_name in ${tmp_db_names} ; do
|
for x_db_name in ${tmp_db_names} ; do
|
||||||
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
|
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
|
||||||
done
|
done
|
||||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
|
|
||||||
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
|
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
|
||||||
@@ -731,11 +826,13 @@ backup_redis() {
|
|||||||
sleep 5
|
sleep 5
|
||||||
done
|
done
|
||||||
backup_job_filename_original=${backup_job_filename}
|
backup_job_filename_original=${backup_job_filename}
|
||||||
|
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug off; fi
|
||||||
compression
|
compression
|
||||||
pre_dbbackup all
|
pre_dbbackup all
|
||||||
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
|
|
||||||
timer backup finish
|
|
||||||
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
|
||||||
|
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
|
||||||
|
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug off; fi
|
||||||
|
timer backup finish
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
file_encryption
|
file_encryption
|
||||||
generate_checksum
|
generate_checksum
|
||||||
@@ -750,16 +847,21 @@ backup_sqlite3() {
|
|||||||
db="${db%.*}"
|
db="${db%.*}"
|
||||||
backup_job_filename=sqlite3_${db}_${now}.sqlite3
|
backup_job_filename=sqlite3_${db}_${now}.sqlite3
|
||||||
backup_job_filename_base=sqlite3_${db}.sqlite3
|
backup_job_filename_base=sqlite3_${db}.sqlite3
|
||||||
compression
|
|
||||||
pre_dbbackup "${db}"
|
pre_dbbackup "${db}"
|
||||||
write_log notice "Dumping sqlite3 database: '${backup_job_db_host}' ${compression_string}"
|
write_log notice "Dumping sqlite3 database: '${backup_job_db_host}' ${compression_string}"
|
||||||
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug on; fi
|
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug on; fi
|
||||||
silent run_as_user ${play_fair} sqlite3 "${backup_job_db_host}" ".backup '${TEMP_PATH}/backup.sqlite3'"
|
silent ${play_fair} sqlite3 "${backup_job_db_host}" ".backup '${TEMP_PATH}/backup_${now}.sqlite3'"
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
check_exit_code backup "${backup_job_filename}"
|
check_exit_code backup "${backup_job_filename}"
|
||||||
run_as_user ${play_fair} cat "${TEMP_PATH}"/backup.sqlite3 | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}/${backup_job_filename}" > /dev/null
|
if [ ! -f "${TEMP_PATH}"/backup_${now}.sqlite3 ] ; then
|
||||||
timer backup finish
|
print_error "SQLite3 backup failed! Exitting"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
compression
|
||||||
|
run_as_user ${play_fair} cat "${TEMP_PATH}"/backup_${now}.sqlite3 | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}/${backup_job_filename}" > /dev/null
|
||||||
|
rm -rf "${TEMP_PATH}"/backup_${now}.sqlite3
|
||||||
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug off; fi
|
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug off; fi
|
||||||
|
timer backup finish
|
||||||
file_encryption
|
file_encryption
|
||||||
generate_checksum
|
generate_checksum
|
||||||
move_dbbackup
|
move_dbbackup
|
||||||
@@ -966,6 +1068,7 @@ compression() {
|
|||||||
|
|
||||||
case "${backup_job_compression,,}" in
|
case "${backup_job_compression,,}" in
|
||||||
bz* )
|
bz* )
|
||||||
|
print_debug "[compression] Selected BZIP"
|
||||||
compress_cmd="${play_fair} pbzip2 -q -${backup_job_compression_level} -p${backup_job_parallel_compression_threads} "
|
compress_cmd="${play_fair} pbzip2 -q -${backup_job_compression_level} -p${backup_job_parallel_compression_threads} "
|
||||||
compression_type="bzip2"
|
compression_type="bzip2"
|
||||||
dir_compress_cmd=${compress_cmd}
|
dir_compress_cmd=${compress_cmd}
|
||||||
@@ -974,6 +1077,7 @@ compression() {
|
|||||||
backup_job_filename=${backup_job_filename}.bz2
|
backup_job_filename=${backup_job_filename}.bz2
|
||||||
;;
|
;;
|
||||||
gz* )
|
gz* )
|
||||||
|
print_debug "[compression] Selected GZIP"
|
||||||
compress_cmd="${play_fair} pigz -q -${backup_job_compression_level} -p ${backup_job_parallel_compression_threads} ${gz_rsyncable}"
|
compress_cmd="${play_fair} pigz -q -${backup_job_compression_level} -p ${backup_job_parallel_compression_threads} ${gz_rsyncable}"
|
||||||
compression_type="gzip"
|
compression_type="gzip"
|
||||||
extension=".gz"
|
extension=".gz"
|
||||||
@@ -982,6 +1086,7 @@ compression() {
|
|||||||
backup_job_filename=${backup_job_filename}.gz
|
backup_job_filename=${backup_job_filename}.gz
|
||||||
;;
|
;;
|
||||||
xz* )
|
xz* )
|
||||||
|
print_debug "[compression] Selected XZIP"
|
||||||
compress_cmd="${play_fair} pixz -${backup_job_compression_level} -p ${backup_job_parallel_compression_threads} "
|
compress_cmd="${play_fair} pixz -${backup_job_compression_level} -p ${backup_job_parallel_compression_threads} "
|
||||||
compression_type="xzip"
|
compression_type="xzip"
|
||||||
dir_compress_cmd=${compress_cmd}
|
dir_compress_cmd=${compress_cmd}
|
||||||
@@ -990,6 +1095,7 @@ compression() {
|
|||||||
backup_job_filename=${backup_job_filename}.xz
|
backup_job_filename=${backup_job_filename}.xz
|
||||||
;;
|
;;
|
||||||
zst* )
|
zst* )
|
||||||
|
print_debug "[compression] Selected ZSTD"
|
||||||
compress_cmd="${play_fair} zstd -q -q --rm -${backup_job_compression_level} -T${backup_job_parallel_compression_threads} ${gz_rsyncable}"
|
compress_cmd="${play_fair} zstd -q -q --rm -${backup_job_compression_level} -T${backup_job_parallel_compression_threads} ${gz_rsyncable}"
|
||||||
compression_type="zstd"
|
compression_type="zstd"
|
||||||
dir_compress_cmd=${compress_cmd}
|
dir_compress_cmd=${compress_cmd}
|
||||||
@@ -1038,9 +1144,10 @@ create_schedulers() {
|
|||||||
backup() {
|
backup() {
|
||||||
bootstrap_variables upgrade BACKUP
|
bootstrap_variables upgrade BACKUP
|
||||||
local backup_instances=$(printenv | sort | grep -c "^DB[0-9]._HOST")
|
local backup_instances=$(printenv | sort | grep -c "^DB[0-9]._HOST")
|
||||||
|
print_debug "[create_schedulers] Found '${backup_instances}' DB_HOST instances"
|
||||||
if [ -n "${DB_HOST}" ] && [ "${backup_instances}" ]; then
|
if [ -n "${DB_HOST}" ] && [ "${backup_instances}" ]; then
|
||||||
backup_instances=1;
|
backup_instances=1;
|
||||||
print_debug "Detected using old DB_ variables"
|
print_debug "[create_schedulers] Detected using old DB_ variables"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for (( instance = 01; instance <= backup_instances; )) ; do
|
for (( instance = 01; instance <= backup_instances; )) ; do
|
||||||
@@ -1081,7 +1188,7 @@ EOF
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctrl_c() {
|
ctrl_c() {
|
||||||
sed -i "/^{{BACKUP_NUMBER}}/d" /tmp/.container/db-backup-backups
|
sed -i "/^${backup_instance_number}/d" /tmp/.container/db-backup-backups
|
||||||
symlink_log
|
symlink_log
|
||||||
print_warn "User aborted"
|
print_warn "User aborted"
|
||||||
exit
|
exit
|
||||||
@@ -1096,7 +1203,11 @@ db_backup_container_init() {
|
|||||||
debug() {
|
debug() {
|
||||||
case "${1}" in
|
case "${1}" in
|
||||||
off)
|
off)
|
||||||
|
backup_job_log_level=$_original_job_log_level}
|
||||||
|
CONTAINER_LOG_LEVEL=${_original_container_log_level}
|
||||||
DEBUG_MODE=${_original_debug_mode}
|
DEBUG_MODE=${_original_debug_mode}
|
||||||
|
SHOW_OUTPUT=${_original_show_output}
|
||||||
|
|
||||||
if var_true "${DEBUG_MODE}" ; then
|
if var_true "${DEBUG_MODE}" ; then
|
||||||
set -x
|
set -x
|
||||||
else
|
else
|
||||||
@@ -1104,9 +1215,25 @@ debug() {
|
|||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
on)
|
on)
|
||||||
|
if [ -z "${_original_container_log_level}" ]; then
|
||||||
|
_original_container_log_level="${CONTAINER_LOG_LEVEL}"
|
||||||
|
fi
|
||||||
|
if [ -z "${_original_job_log_level}" ]; then
|
||||||
|
_original_job_log_level="${backup_job_log_level}"
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -z "${_original_debug_mode}" ]; then
|
if [ -z "${_original_debug_mode}" ]; then
|
||||||
_original_debug_mode="${DEBUG_MODE}"
|
_original_debug_mode="${DEBUG_MODE}"
|
||||||
fi
|
fi
|
||||||
|
if [ -z "${_original_show_output}" ]; then
|
||||||
|
_original_show_output="${SHOW_OUTPUT}"
|
||||||
|
if ! [[ "${_original_show_output,,}" =~ true|false ]]; then
|
||||||
|
__original_show_output="FALSE"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
backup_job_log_level=DEBUG
|
||||||
|
CONTAINER_LOG_LEVEL=DEBUG
|
||||||
|
SHOW_OUTPUT=TRUE
|
||||||
set -x
|
set -x
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@@ -1116,27 +1243,33 @@ file_encryption() {
|
|||||||
if var_true "${DEBUG_FILE_ENCRYPTION}" ; then debug on; fi
|
if var_true "${DEBUG_FILE_ENCRYPTION}" ; then debug on; fi
|
||||||
if var_true "${backup_job_encrypt}" ; then
|
if var_true "${backup_job_encrypt}" ; then
|
||||||
if [ "${exit_code}" = "0" ] ; then
|
if [ "${exit_code}" = "0" ] ; then
|
||||||
print_debug "Encrypting"
|
print_debug "[file_encryption] Encrypting"
|
||||||
output_off
|
output_off
|
||||||
if [ -n "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_pubkey}" ]; then
|
if [ -n "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_public_key}" ]; then
|
||||||
print_error "Can't encrypt as both ENCRYPT_PASSPHRASE and ENCRYPT_PUBKEY exist!"
|
print_error "Can't encrypt as both ENCRYPT_PASSPHRASE and ENCRYPT_PUBKEY exist!"
|
||||||
return
|
return
|
||||||
elif [ -n "${backup_job_encrypt_passphrase}" ] && [ -z "${backup_job_encrypt_pubkey}" ]; then
|
elif [ -n "${backup_job_encrypt_passphrase}" ] && [ -z "${backup_job_encrypt_public_key}" ]; then
|
||||||
print_notice "Encrypting with GPG Passphrase"
|
print_notice "Encrypting with GPG Passphrase"
|
||||||
encrypt_routines_start_time=$(date +'%s')
|
encrypt_routines_start_time=$(date +'%s')
|
||||||
encrypt_tmp_dir=$(run_as_user mktemp -d)
|
encrypt_tmp_dir=$(run_as_user mktemp -d)
|
||||||
echo "${backup_job_encrypt_passphrase}" | silent run_as_user ${play_fair} gpg --batch --home ${encrypt_tmp_dir} --yes --passphrase-fd 0 -c "${TEMP_PATH}"/"${backup_job_filename}"
|
echo "${backup_job_encrypt_passphrase}" | silent run_as_user ${play_fair} gpg --batch --home ${encrypt_tmp_dir} --yes --passphrase-fd 0 -c "${TEMP_PATH}"/"${backup_job_filename}"
|
||||||
rm -rf "${encrypt_tmp_dir}"
|
rm -rf "${encrypt_tmp_dir}"
|
||||||
elif [ -z "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_pubkey}" ]; then
|
elif [ -z "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_public_key}" ] && [ -n "${backup_job_encrypt_private_key}" ]; then
|
||||||
if [ -f "${backup_job_encrypt_pubkey}" ]; then
|
if [ -f "${backup_job_encrypt_private_key}" ]; then
|
||||||
encrypt_routines_start_time=$(date +'%s')
|
encrypt_routines_start_time=$(date +'%s')
|
||||||
print_notice "Encrypting with GPG Public Key"
|
print_notice "Encrypting with GPG Private Key"
|
||||||
encrypt_tmp_dir=$(run_as_user mktemp -d)
|
encrypt_tmp_dir=$(run_as_user mktemp -d)
|
||||||
silent run_as_user ${play_fair} gpg --batch --yes --home "${encrypt_tmp_dir}" --recipient-file "${backup_job_encrypt_pubkey}" -c "${TEMP_PATH}"/"${backup_job_filename}"
|
cat "${backup_job_encrypt_private_key}" | run_as_user tee "${encrypt_tmp_dir}"/private_key.asc > /dev/null
|
||||||
|
print_debug "[file_encryption] [key] Importing Private Key"
|
||||||
|
silent run_as_user gpg --home ${encrypt_tmp_dir} --batch --import "${encrypt_tmp_dir}"/private_key.asc
|
||||||
|
print_debug "[file_encryption] [key] Encrypting to Public Key"
|
||||||
|
cat "${backup_job_encrypt_public_key}" | run_as_user tee "${encrypt_tmp_dir}"/public_key.asc > /dev/null
|
||||||
|
silent run_as_user ${play_fair} gpg --batch --yes --home "${encrypt_tmp_dir}" --encrypt --recipient-file "${encrypt_tmp_dir}"/public_key.asc "${TEMP_PATH}"/"${backup_job_filename}"
|
||||||
rm -rf "${encrypt_tmp_dir}"
|
rm -rf "${encrypt_tmp_dir}"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
if [ -f "${TEMP_PATH}"/"${backup_job_filename}".gpg ]; then
|
if [ -f "${TEMP_PATH}"/"${backup_job_filename}".gpg ]; then
|
||||||
|
print_debug "[file_encryption] Deleting original file"
|
||||||
rm -rf "${TEMP_PATH:?}"/"${backup_job_filename:?}"
|
rm -rf "${TEMP_PATH:?}"/"${backup_job_filename:?}"
|
||||||
backup_job_filename="${backup_job_filename}.gpg"
|
backup_job_filename="${backup_job_filename}.gpg"
|
||||||
|
|
||||||
@@ -1146,6 +1279,9 @@ file_encryption() {
|
|||||||
- dbbackup.backup.encrypt.duration.[${backup_job_db_host}.${backup_job_db_name}] ${encrypt_routines_total_time}
|
- dbbackup.backup.encrypt.duration.[${backup_job_db_host}.${backup_job_db_name}] ${encrypt_routines_total_time}
|
||||||
EOF
|
EOF
|
||||||
)
|
)
|
||||||
|
else
|
||||||
|
print_error "Encryption failed! Could not detect encrypted file"
|
||||||
|
return 99
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
write_log error "Skipping encryption because backup did not complete successfully"
|
write_log error "Skipping encryption because backup did not complete successfully"
|
||||||
@@ -1312,23 +1448,23 @@ EOF
|
|||||||
for notification_type in $notification_types ; do
|
for notification_type in $notification_types ; do
|
||||||
case "${notification_type,,}" in
|
case "${notification_type,,}" in
|
||||||
"custom" )
|
"custom" )
|
||||||
print_debug "Sending Notification via custom"
|
print_debug "[notify] Sending Notification via custom"
|
||||||
notification_custom "${1}" "${2}" "${3}" "${4}" "${5}"
|
notification_custom "${1}" "${2}" "${3}" "${4}" "${5}"
|
||||||
;;
|
;;
|
||||||
"email" | "mail" )
|
"email" | "mail" )
|
||||||
print_debug "Sending Notification via email"
|
print_debug "[notify] Sending Notification via email"
|
||||||
notification_email "${1}" "${2}" "${3}" "${4}" "${5}"
|
notification_email "${1}" "${2}" "${3}" "${4}" "${5}"
|
||||||
;;
|
;;
|
||||||
"matrix" )
|
"matrix" )
|
||||||
print_debug "Sending Notification via Matrix"
|
print_debug "[notify] Sending Notification via Matrix"
|
||||||
notification_matrix "${1}" "${2}" "${3}" "${4}" "${5}"
|
notification_matrix "${1}" "${2}" "${3}" "${4}" "${5}"
|
||||||
;;
|
;;
|
||||||
"mattermost" )
|
"mattermost" )
|
||||||
print_debug "Sending Notification via Mattermost"
|
print_debug "[notify] Sending Notification via Mattermost"
|
||||||
notification_mattermost "${1}" "${2}" "${3}" "${4}" "${5}"
|
notification_mattermost "${1}" "${2}" "${3}" "${4}" "${5}"
|
||||||
;;
|
;;
|
||||||
"rocketchat" )
|
"rocketchat" )
|
||||||
print_debug "Sending Notification via Rocketchat"
|
print_debug "[notify] Sending Notification via Rocketchat"
|
||||||
notification_rocketchat "${1}" "${2}" "${3}" "${4}" "${5}"
|
notification_rocketchat "${1}" "${2}" "${3}" "${4}" "${5}"
|
||||||
;;
|
;;
|
||||||
* )
|
* )
|
||||||
@@ -1371,8 +1507,37 @@ move_dbbackup() {
|
|||||||
write_log debug "Moving backup to filesystem"
|
write_log debug "Moving backup to filesystem"
|
||||||
run_as_user mkdir -p "${backup_job_filesystem_path}"
|
run_as_user mkdir -p "${backup_job_filesystem_path}"
|
||||||
if [ "${backup_job_checksum,,}" != "none" ] ; then run_as_user mv "${TEMP_PATH}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/ ; fi
|
if [ "${backup_job_checksum,,}" != "none" ] ; then run_as_user mv "${TEMP_PATH}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/ ; fi
|
||||||
|
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
|
||||||
|
cat <<EOF
|
||||||
|
## BEGIN Before Moving file from TEMP_PATH $(TZ=${TIMEZONE} date)
|
||||||
|
##
|
||||||
|
|
||||||
|
$(ls -l "${TEMP_PATH}"/*)
|
||||||
|
|
||||||
|
## END
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
run_as_user mv "${TEMP_PATH}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
|
run_as_user mv "${TEMP_PATH}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
|
||||||
move_exit_code=$?
|
move_exit_code=$?
|
||||||
|
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
|
||||||
|
cat <<EOF
|
||||||
|
## BEGIN After Moving file from TEMP_PATH $(TZ=${TIMEZONE} date)
|
||||||
|
##
|
||||||
|
|
||||||
|
$(ls -l "${TEMP_PATH}"/*)
|
||||||
|
|
||||||
|
## END
|
||||||
|
|
||||||
|
## BEGIN After Moving file to _FILESYSTEM_PATH $(TZ=${TIMEZONE} date)
|
||||||
|
##
|
||||||
|
|
||||||
|
$(ls -l "${backup_job_filesystem_path}"/*)
|
||||||
|
|
||||||
|
## END
|
||||||
|
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
if var_true "${backup_job_create_latest_symlink}" ; then
|
if var_true "${backup_job_create_latest_symlink}" ; then
|
||||||
run_as_user ln -sfr "${backup_job_filesystem_path}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/latest-"${backup_job_filename_base}"
|
run_as_user ln -sfr "${backup_job_filesystem_path}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/latest-"${backup_job_filename_base}"
|
||||||
fi
|
fi
|
||||||
@@ -1578,7 +1743,7 @@ process_limiter() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
run_as_user() {
|
run_as_user() {
|
||||||
s6-setuidgid dbbackup $@
|
sudo -Eu "${DBBACKUP_USER}" $@
|
||||||
}
|
}
|
||||||
|
|
||||||
setup_mode() {
|
setup_mode() {
|
||||||
@@ -1811,18 +1976,18 @@ timer() {
|
|||||||
;;
|
;;
|
||||||
datetime)
|
datetime)
|
||||||
time_begin=$(date -d "${backup_job_backup_begin}" +%s)
|
time_begin=$(date -d "${backup_job_backup_begin}" +%s)
|
||||||
print_debug "BACKUP_BEGIN time = ${time_begin}"
|
print_debug "[timer] [datetime] BACKUP_BEGIN time = ${time_begin}"
|
||||||
time_wait=$(( time_begin - time_current ))
|
time_wait=$(( time_begin - time_current ))
|
||||||
print_debug "Difference in seconds: ${time_wait}"
|
print_debug "[timer] [datetime] Difference in seconds: ${time_wait}"
|
||||||
|
|
||||||
if (( ${time_wait} < 0 )); then
|
if (( ${time_wait} < 0 )); then
|
||||||
time_wait=$(( (${time_wait} + (${backup_job_backup_interval} - 1)) / (${backup_job_backup_interval} * 60) ))
|
time_wait=$(( (${time_wait} + (${backup_job_backup_interval} - 1)) / (${backup_job_backup_interval} * 60) ))
|
||||||
time_wait=$(( ${time_wait} * -1 ))
|
time_wait=$(( ${time_wait} * -1 ))
|
||||||
print_debug "Difference in seconds (rounded) time_wait is in the past : ${time_wait}"
|
print_debug "[timer] [datetime] Difference in seconds (rounded) time_wait is in the past : ${time_wait}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
time_future=$(( time_current + time_wait ))
|
time_future=$(( time_current + time_wait ))
|
||||||
print_debug "Future execution time = ${time_future}"
|
print_debug "[timer] [datetime] Future execution time = ${time_future}"
|
||||||
;;
|
;;
|
||||||
job)
|
job)
|
||||||
case "${2}" in
|
case "${2}" in
|
||||||
|
|||||||
@@ -74,10 +74,17 @@ EOF
|
|||||||
esac
|
esac
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
control_c() {
|
||||||
|
if [ -f "${restore_vars}" ] ; then rm -rf "${restore_vars}" ; fi
|
||||||
|
print_warn "User aborted"
|
||||||
|
exit
|
||||||
|
}
|
||||||
|
|
||||||
get_filename() {
|
get_filename() {
|
||||||
COLUMNS=12
|
COLUMNS=12
|
||||||
prompt="Please select a file to restore:"
|
prompt="Please select a file to restore:"
|
||||||
options=( $(find "${DEFAULT_BACKUP_PATH}" -type f -maxdepth 2 -not -name '*.md5' -not -name '*.sha1' -print0 | sort -z | xargs -0) )
|
options=( $(find "${DEFAULT_FILESYSTEM_PATH}" -type f -maxdepth 2 -not -name '*.md5' -not -name '*.sha1' -print0 | sort -z | xargs -0) )
|
||||||
PS3="$prompt "
|
PS3="$prompt "
|
||||||
select opt in "${options[@]}" "Custom" "Quit" ; do
|
select opt in "${options[@]}" "Custom" "Quit" ; do
|
||||||
if (( REPLY == 2 + ${#options[@]} )) ; then
|
if (( REPLY == 2 + ${#options[@]} )) ; then
|
||||||
@@ -103,13 +110,17 @@ get_filename() {
|
|||||||
|
|
||||||
get_dbhost() {
|
get_dbhost() {
|
||||||
p_dbhost=$(basename -- "${r_filename}" | cut -d _ -f 3)
|
p_dbhost=$(basename -- "${r_filename}" | cut -d _ -f 3)
|
||||||
|
|
||||||
if [ -n "${p_dbhost}" ]; then
|
if [ -n "${p_dbhost}" ]; then
|
||||||
parsed_host=true
|
parsed_host=true
|
||||||
print_debug "Parsed DBHost: ${p_dbhost}"
|
print_debug "Parsed DBHost: ${p_dbhost}"
|
||||||
|
|
||||||
|
if grep -q "${p_dbhost}" "${restore_vars}" ; then
|
||||||
|
detected_host_num=$(grep "${p_dbhost}" "${restore_vars}" | head -n1 | cut -c 3,4)
|
||||||
|
detected_host_value=$(grep "${p_dbhost}" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then
|
if [ -z "${detected_host_value}" ] && [ -z "${parsed_host}" ]; then
|
||||||
print_debug "Parsed DBHost Variant: 1 - No Env, No Parsed Filename"
|
print_debug "Parsed DBHost Variant: 1 - No Env, No Parsed Filename"
|
||||||
q_dbhost_variant=1
|
q_dbhost_variant=1
|
||||||
q_dbhost_menu=$(cat <<EOF
|
q_dbhost_menu=$(cat <<EOF
|
||||||
@@ -118,18 +129,18 @@ EOF
|
|||||||
)
|
)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "${DB_HOST}" ] && [ -z "${parsed_host}" ]; then
|
if [ -n "${detected_host_value}" ] && [ -z "${parsed_host}" ]; then
|
||||||
print_debug "Parsed DBHost Variant: 2 - Env, No Parsed Filename"
|
print_debug "Parsed DBHost Variant: 2 - Env, No Parsed Filename"
|
||||||
q_dbhost_variant=2
|
q_dbhost_variant=2
|
||||||
q_dbhost_menu=$(cat <<EOF
|
q_dbhost_menu=$(cat <<EOF
|
||||||
|
|
||||||
C ) Custom Entered Hostname
|
C ) Custom Entered Hostname
|
||||||
E ) Environment Variable DB_HOST: '${DB_HOST}'
|
E ) Environment Variable DB${detected_host_num}_HOST: '${detected_host_value}'
|
||||||
EOF
|
EOF
|
||||||
)
|
)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
|
if [ -z "${detected_host_value}" ] && [ -n "${parsed_host}" ]; then
|
||||||
print_debug "Parsed DBHost Variant: 3 - No Env, Parsed Filename"
|
print_debug "Parsed DBHost Variant: 3 - No Env, Parsed Filename"
|
||||||
q_dbhost_variant=3
|
q_dbhost_variant=3
|
||||||
q_dbhost_menu=$(cat <<EOF
|
q_dbhost_menu=$(cat <<EOF
|
||||||
@@ -140,13 +151,13 @@ EOF
|
|||||||
)
|
)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
|
if [ -n "${detected_host_value}" ] && [ -n "${parsed_host}" ]; then
|
||||||
print_debug "Parsed DBHost Variant: 4 - Env, Parsed Filename"
|
print_debug "Parsed DBHost Variant: 4 - Env, Parsed Filename"
|
||||||
q_dbhost_variant=4
|
q_dbhost_variant=4
|
||||||
q_dbhost_menu=$(cat <<EOF
|
q_dbhost_menu=$(cat <<EOF
|
||||||
|
|
||||||
C ) Custom Entered Hostname
|
C ) Custom Entered Hostname
|
||||||
E ) Environment Variable DB_HOST: '${DB_HOST}'
|
E ) Environment Variable DB${detected_host_num}_HOST: '${detected_host_value}'
|
||||||
F ) Parsed Filename Host: '${p_dbhost}'
|
F ) Parsed Filename Host: '${p_dbhost}'
|
||||||
EOF
|
EOF
|
||||||
)
|
)
|
||||||
@@ -173,7 +184,7 @@ EOF
|
|||||||
;;
|
;;
|
||||||
2 )
|
2 )
|
||||||
while true; do
|
while true; do
|
||||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
|
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E\*${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
|
||||||
case "${q_dbhost_menu,,}" in
|
case "${q_dbhost_menu,,}" in
|
||||||
c* )
|
c* )
|
||||||
counter=1
|
counter=1
|
||||||
@@ -187,7 +198,7 @@ EOF
|
|||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
e* | "" )
|
e* | "" )
|
||||||
r_dbhost=${DB_HOST}
|
r_dbhost=${detected_host_value}
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
q* )
|
q* )
|
||||||
@@ -199,7 +210,7 @@ EOF
|
|||||||
;;
|
;;
|
||||||
3 )
|
3 )
|
||||||
while true; do
|
while true; do
|
||||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
|
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}F\*${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
|
||||||
case "${q_dbhost_menu,,}" in
|
case "${q_dbhost_menu,,}" in
|
||||||
c* )
|
c* )
|
||||||
counter=1
|
counter=1
|
||||||
@@ -226,7 +237,7 @@ EOF
|
|||||||
|
|
||||||
4 )
|
4 )
|
||||||
while true; do
|
while true; do
|
||||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
|
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E\*${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbhost_menu
|
||||||
case "${q_dbhost_menu,,}" in
|
case "${q_dbhost_menu,,}" in
|
||||||
c* )
|
c* )
|
||||||
counter=1
|
counter=1
|
||||||
@@ -240,7 +251,7 @@ EOF
|
|||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
e* | "" )
|
e* | "" )
|
||||||
r_dbhost=${DB_HOST}
|
r_dbhost=${detected_host_value}
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
f* )
|
f* )
|
||||||
@@ -257,6 +268,337 @@ EOF
|
|||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
|
get_dbname() {
|
||||||
|
p_dbname=$(basename -- "${r_filename}" | cut -d _ -f 2)
|
||||||
|
|
||||||
|
if [ -n "${p_dbname}" ]; then
|
||||||
|
parsed_name=true
|
||||||
|
print_debug "Parsed DBName: ${p_dbname}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if grep -q "^DB${detected_host_num}_NAME=${p_dbname}" "${restore_vars}" ; then
|
||||||
|
detected_name_value=$(grep -q "^DB${detected_host_num}_NAME=${p_dbname}" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${detected_name_value}" ] && [ -z "${parsed_name}" ]; then
|
||||||
|
print_debug "Parsed DBName Variant: 1 - No Env, No Parsed Filename"
|
||||||
|
q_dbname_variant=1
|
||||||
|
q_dbname_menu=$(cat <<EOF
|
||||||
|
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${detected_name_value}" ] && [ -z "${parsed_name}" ]; then
|
||||||
|
print_debug "Parsed DBName Variant: 2 - Env, No Parsed Filename"
|
||||||
|
q_dbname_variant=2
|
||||||
|
q_dbname_menu=$(cat <<EOF
|
||||||
|
|
||||||
|
C ) Custom Entered Database Name
|
||||||
|
E ) Environment Variable DB${detected_host_num}_NAME: '${detected_name_value}'
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${detected_name_value}" ] && [ -n "${parsed_name}" ]; then
|
||||||
|
print_debug "Parsed DBName Variant: 3 - No Env, Parsed Filename"
|
||||||
|
q_dbname_variant=3
|
||||||
|
q_dbname_menu=$(cat <<EOF
|
||||||
|
|
||||||
|
C ) Custom Entered Database Name
|
||||||
|
F ) Parsed Filename DB Name: '${p_dbname}'
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${detected_name_value}" ] && [ -n "${parsed_name}" ]; then
|
||||||
|
print_debug "Parsed DBname Variant: 4 - Env, Parsed Filename"
|
||||||
|
q_dbname_variant=4
|
||||||
|
q_dbname_menu=$(cat <<EOF
|
||||||
|
|
||||||
|
C ) Custom Entered Database Name
|
||||||
|
E ) Environment Variable DB${detected_host_num}_NAME: '${detected_name_value}'
|
||||||
|
F ) Parsed Filename DB Name: '${p_dbname}'
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
cat << EOF
|
||||||
|
|
||||||
|
What Database Name do you want to restore to?
|
||||||
|
${q_dbname_menu}
|
||||||
|
Q ) Quit
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
case "${q_dbname_variant}" in
|
||||||
|
1 )
|
||||||
|
counter=1
|
||||||
|
q_dbname=" "
|
||||||
|
while [[ $q_dbname = *" "* ]]; do
|
||||||
|
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
|
||||||
|
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Name do you want to restore to:\ ${coff})" q_dbname
|
||||||
|
(( counter+=1 ))
|
||||||
|
done
|
||||||
|
r_dbname=${q_dbname}
|
||||||
|
;;
|
||||||
|
2 )
|
||||||
|
while true; do
|
||||||
|
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E\*${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
|
||||||
|
case "${q_dbname_menu,,}" in
|
||||||
|
c* )
|
||||||
|
counter=1
|
||||||
|
q_dbname=" "
|
||||||
|
while [[ $q_dbname = *" "* ]]; do
|
||||||
|
if [ $counter -gt 1 ] ; then print_error "DB Names can't have spaces in them, please re-enter." ; fi ;
|
||||||
|
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
|
||||||
|
(( counter+=1 ))
|
||||||
|
done
|
||||||
|
r_dbname=${q_dbname}
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
e* | "" )
|
||||||
|
r_dbname=${detected_name_value}
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
q* )
|
||||||
|
print_info "Quitting Script"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
;;
|
||||||
|
3 )
|
||||||
|
while true; do
|
||||||
|
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}F\*${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
|
||||||
|
case "${q_dbname_menu,,}" in
|
||||||
|
c* )
|
||||||
|
counter=1
|
||||||
|
q_dbname=" "
|
||||||
|
while [[ $q_dbname = *" "* ]]; do
|
||||||
|
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
|
||||||
|
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
|
||||||
|
(( counter+=1 ))
|
||||||
|
done
|
||||||
|
r_dbname=${q_dbname}
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
f* | "" )
|
||||||
|
r_dbname=${p_dbname}
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
q* )
|
||||||
|
print_info "Quitting Script"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
;;
|
||||||
|
|
||||||
|
4 )
|
||||||
|
while true; do
|
||||||
|
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E\*${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
|
||||||
|
case "${q_dbname_menu,,}" in
|
||||||
|
c* )
|
||||||
|
counter=1
|
||||||
|
q_dbname=" "
|
||||||
|
while [[ $q_dbname = *" "* ]]; do
|
||||||
|
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
|
||||||
|
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
|
||||||
|
(( counter+=1 ))
|
||||||
|
done
|
||||||
|
r_dbname=${q_dbname}
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
e* | "" )
|
||||||
|
r_dbname=${detected_name_value}
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
f* )
|
||||||
|
r_dbname=${p_dbname}
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
q* )
|
||||||
|
print_info "Quitting Script"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
get_dbpass() {
|
||||||
|
if grep -q "^DB${detected_host_num}_PASS=" "${restore_vars}" ; then
|
||||||
|
detected_pass_value=$(grep "^DB${detected_host_num}_PASS=" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${detected_pass_value}" ] ; then
|
||||||
|
print_debug "Parsed DBPass Variant: 1 - No Env"
|
||||||
|
q_dbpass_variant=1
|
||||||
|
q_dbpass_menu=$(cat <<EOF
|
||||||
|
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${detected_pass_value}" ] ; then
|
||||||
|
print_debug "Parsed DBPass Variant: 2 - Env"
|
||||||
|
q_dbpass_variant=2
|
||||||
|
q_dbpass_menu=$(cat <<EOF
|
||||||
|
|
||||||
|
C ) Custom Entered Database Password
|
||||||
|
E ) Environment Variable DB${detected_host_num}_PASS
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
cat << EOF
|
||||||
|
|
||||||
|
What Database Password will be used to restore?
|
||||||
|
${q_dbpass_menu}
|
||||||
|
Q ) Quit
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
case "${q_dbpass_variant}" in
|
||||||
|
1 )
|
||||||
|
counter=1
|
||||||
|
q_dbpass=" "
|
||||||
|
while [[ $q_dbpass = *" "* ]]; do
|
||||||
|
if [ $counter -gt 1 ] ; then print_error "DB Passwords can't have spaces in them, please re-enter." ; fi ;
|
||||||
|
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbpass
|
||||||
|
(( counter+=1 ))
|
||||||
|
done
|
||||||
|
r_dbpass=${q_dbpass}
|
||||||
|
;;
|
||||||
|
2 )
|
||||||
|
while true; do
|
||||||
|
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E\*${cdgy}\) : ${cwh}${coff}) " q_dbpass_menu
|
||||||
|
case "${q_dbpass_menu,,}" in
|
||||||
|
c* )
|
||||||
|
counter=1
|
||||||
|
q_dbpass=" "
|
||||||
|
while [[ $q_dbpass = *" "* ]]; do
|
||||||
|
if [ $counter -gt 1 ] ; then print_error "DB Passwords can't have spaces in them, please re-enter." ; fi ;
|
||||||
|
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbpass
|
||||||
|
(( counter+=1 ))
|
||||||
|
done
|
||||||
|
r_dbpass=${q_dbpass}
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
e* | "" )
|
||||||
|
r_dbpass=${detected_pass_value}
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
q* )
|
||||||
|
print_info "Quitting Script"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
get_dbport() {
|
||||||
|
if grep -q "^DB${detected_host_num}_PORT=" "${restore_vars}" ; then
|
||||||
|
detected_port_value=$(grep "^DB${detected_host_num}_PORT=" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${detected_port_value}" ] ; then
|
||||||
|
print_debug "Parsed DBPort Variant: 1 - No Env"
|
||||||
|
q_dbport_variant=1
|
||||||
|
q_dbport_menu_opt_default="| (${cwh}D${cdgy}) * "
|
||||||
|
q_dbport_menu=$(cat <<EOF
|
||||||
|
|
||||||
|
C ) Custom Entered Database Port
|
||||||
|
D ) Default Port for Database type '${r_dbtype}': '${DEFAULT_PORT}'
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${detected_port_value}" ] ; then
|
||||||
|
print_debug "Parsed DBPort Variant: 2 - Env"
|
||||||
|
q_dbport_variant=2
|
||||||
|
q_dbport_menu=$(cat <<EOF
|
||||||
|
|
||||||
|
C ) Custom Entered Database Port
|
||||||
|
D ) Default Port for Database type '${r_dbtype}': '${DEFAULT_PORT}'
|
||||||
|
E ) Environment Variable DB${detected_host_num}_PORT: '${detected_port_value}'
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
|
||||||
|
cat << EOF
|
||||||
|
|
||||||
|
What Database Port do you wish to use? MySQL/MariaDB typcially listens on port 3306. Postrgresql port 5432. MongoDB 27017
|
||||||
|
${q_dbport_menu}
|
||||||
|
Q ) Quit
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
case "${q_dbport_variant}" in
|
||||||
|
1 )
|
||||||
|
while true; do
|
||||||
|
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}D\*${cdgy}\) : ${cwh}${coff}) " q_dbport_menu
|
||||||
|
case "${q_dbport_menu,,}" in
|
||||||
|
c* )
|
||||||
|
counter=1
|
||||||
|
q_dbport=" "
|
||||||
|
q_dbportre='^[0-9]+$'
|
||||||
|
while ! [[ $q_dbport =~ ${q_dbportre} ]]; do
|
||||||
|
if [ $counter -gt 1 ] ; then print_error "Must be a port number, please re-enter." ; fi ;
|
||||||
|
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Port do you want to use:\ ${coff})" q_dbport
|
||||||
|
(( counter+=1 ))
|
||||||
|
done
|
||||||
|
r_dbport=${q_dbport}
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
d* | "" )
|
||||||
|
r_dbport=${DEFAULT_PORT}
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
q* )
|
||||||
|
print_info "Quitting Script"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
;;
|
||||||
|
2 )
|
||||||
|
while true; do
|
||||||
|
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}D${cdgy}\) \| \(${cwh}E\*${cdgy}\) : ${cwh}${coff}) " q_dbport_menu
|
||||||
|
case "${q_dbport_menu,,}" in
|
||||||
|
c* )
|
||||||
|
counter=1
|
||||||
|
q_dbport=" "
|
||||||
|
q_dbportre='^[0-9]+$'
|
||||||
|
while ! [[ $q_dbport =~ ${q_dbportre} ]]; do
|
||||||
|
if [ $counter -gt 1 ] ; then print_error "Must be a port number, please re-enter." ; fi ;
|
||||||
|
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Port do you want to use:\ ${coff})" q_dbport
|
||||||
|
(( counter+=1 ))
|
||||||
|
done
|
||||||
|
r_dbport=${q_dbport}
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
d* )
|
||||||
|
r_dbport=${DEFAULT_PORT}
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
e* | "" )
|
||||||
|
r_dbport=${detected_port_value}
|
||||||
|
break
|
||||||
|
;;
|
||||||
|
q* )
|
||||||
|
print_info "Quitting Script"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
get_dbtype() {
|
get_dbtype() {
|
||||||
p_dbtype=$(basename -- "${r_filename}" | cut -d _ -f 1)
|
p_dbtype=$(basename -- "${r_filename}" | cut -d _ -f 1)
|
||||||
@@ -264,14 +606,17 @@ get_dbtype() {
|
|||||||
case "${p_dbtype}" in
|
case "${p_dbtype}" in
|
||||||
mongo* )
|
mongo* )
|
||||||
parsed_type=true
|
parsed_type=true
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
|
||||||
print_debug "Parsed DBType: MongoDB"
|
print_debug "Parsed DBType: MongoDB"
|
||||||
;;
|
;;
|
||||||
mariadb | mysql )
|
mariadb | mysql )
|
||||||
parsed_type=true
|
parsed_type=true
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
|
||||||
print_debug "Parsed DBType: MariaDB/MySQL"
|
print_debug "Parsed DBType: MariaDB/MySQL"
|
||||||
;;
|
;;
|
||||||
pgsql | postgres* )
|
pgsql | postgres* )
|
||||||
parsed_type=true
|
parsed_type=true
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
|
||||||
print_debug "Parsed DBType: Postgresql"
|
print_debug "Parsed DBType: Postgresql"
|
||||||
;;
|
;;
|
||||||
* )
|
* )
|
||||||
@@ -338,14 +683,17 @@ EOF
|
|||||||
case "${q_dbtype,,}" in
|
case "${q_dbtype,,}" in
|
||||||
m* )
|
m* )
|
||||||
r_dbtype=mysql
|
r_dbtype=mysql
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
o* )
|
o* )
|
||||||
r_dbtype=mongo
|
r_dbtype=mongo
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
p* )
|
p* )
|
||||||
r_dbtype=postgresql
|
r_dbtype=postgresql
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
q* )
|
q* )
|
||||||
@@ -365,14 +713,17 @@ EOF
|
|||||||
;;
|
;;
|
||||||
m* )
|
m* )
|
||||||
r_dbtype=mysql
|
r_dbtype=mysql
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
o* )
|
o* )
|
||||||
r_dbtype=mongo
|
r_dbtype=mongo
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
p* )
|
p* )
|
||||||
r_dbtype=postgresql
|
r_dbtype=postgresql
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
q* )
|
q* )
|
||||||
@@ -384,22 +735,36 @@ EOF
|
|||||||
;;
|
;;
|
||||||
3 )
|
3 )
|
||||||
while true; do
|
while true; do
|
||||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}F${cdgy}\) \(Default\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
||||||
case "${q_dbtype,,}" in
|
case "${q_dbtype,,}" in
|
||||||
f* | "" )
|
f* | "" )
|
||||||
r_dbtype=${p_dbtype}
|
r_dbtype=${p_dbtype}
|
||||||
|
case "${r_dbtype}" in
|
||||||
|
mongo )
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
|
||||||
|
;;
|
||||||
|
mysql )
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
|
||||||
|
;;
|
||||||
|
pgsql )
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
|
||||||
|
;;
|
||||||
|
esac
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
m* )
|
m* )
|
||||||
r_dbtype=mysql
|
r_dbtype=mysql
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
o* )
|
o* )
|
||||||
r_dbtype=mongo
|
r_dbtype=mongo
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
p* )
|
p* )
|
||||||
r_dbtype=postgresql
|
r_dbtype=postgresql
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
q* )
|
q* )
|
||||||
@@ -424,14 +789,17 @@ EOF
|
|||||||
;;
|
;;
|
||||||
m* )
|
m* )
|
||||||
r_dbtype=mysql
|
r_dbtype=mysql
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"3306"}
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
o* )
|
o* )
|
||||||
r_dbtype=mongo
|
r_dbtype=mongo
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"27017"}
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
p* )
|
p* )
|
||||||
r_dbtype=postgresql
|
r_dbtype=postgresql
|
||||||
|
DEFAULT_PORT=${DEFAULT_PORT:-"5432"}
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
q* )
|
q* )
|
||||||
@@ -444,235 +812,12 @@ EOF
|
|||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
get_dbname() {
|
|
||||||
p_dbname=$(basename -- "${r_filename}" | cut -d _ -f 2)
|
|
||||||
|
|
||||||
if [ -n "${p_dbname}" ]; then
|
|
||||||
parsed_name=true
|
|
||||||
print_debug "Parsed DBName: ${p_dbhost}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
|
|
||||||
print_debug "Parsed DBName Variant: 1 - No Env, No Parsed Filename"
|
|
||||||
q_dbname_variant=1
|
|
||||||
q_dbname_menu=$(cat <<EOF
|
|
||||||
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${DB_NAME}" ] && [ -z "${parsed_name}" ]; then
|
|
||||||
print_debug "Parsed DBName Variant: 2 - Env, No Parsed Filename"
|
|
||||||
q_dbname_variant=2
|
|
||||||
q_dbname_menu=$(cat <<EOF
|
|
||||||
|
|
||||||
C ) Custom Entered Database Name
|
|
||||||
E ) Environment Variable DB_NAME: '${DB_NAME}'
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
|
|
||||||
print_debug "Parsed DBName Variant: 3 - No Env, Parsed Filename"
|
|
||||||
q_dbname_variant=3
|
|
||||||
q_dbname_menu=$(cat <<EOF
|
|
||||||
|
|
||||||
C ) Custom Entered Database Name
|
|
||||||
F ) Parsed Filename DB Name: '${p_dbname}'
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${DB_NAME}" ] && [ -n "${parsed_name}" ]; then
|
|
||||||
print_debug "Parsed DBname Variant: 4 - Env, Parsed Filename"
|
|
||||||
q_dbname_variant=4
|
|
||||||
q_dbname_menu=$(cat <<EOF
|
|
||||||
|
|
||||||
C ) Custom Entered Database Name
|
|
||||||
E ) Environment Variable DB_NAME: '${DB_NAME}'
|
|
||||||
F ) Parsed Filename DB Name: '${p_dbname}'
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
|
|
||||||
cat << EOF
|
|
||||||
|
|
||||||
What Database Name do you want to restore to?
|
|
||||||
${q_dbname_menu}
|
|
||||||
Q ) Quit
|
|
||||||
|
|
||||||
EOF
|
|
||||||
|
|
||||||
case "${q_dbname_variant}" in
|
|
||||||
1 )
|
|
||||||
counter=1
|
|
||||||
q_dbname=" "
|
|
||||||
while [[ $q_dbname = *" "* ]]; do
|
|
||||||
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
|
|
||||||
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Name do you want to restore to:\ ${coff})" q_dbname
|
|
||||||
(( counter+=1 ))
|
|
||||||
done
|
|
||||||
r_dbname=${q_dbname}
|
|
||||||
;;
|
|
||||||
2 )
|
|
||||||
while true; do
|
|
||||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
|
|
||||||
case "${q_dbname_menu,,}" in
|
|
||||||
c* )
|
|
||||||
counter=1
|
|
||||||
q_dbname=" "
|
|
||||||
while [[ $q_dbname = *" "* ]]; do
|
|
||||||
if [ $counter -gt 1 ] ; then print_error "DB Names can't have spaces in them, please re-enter." ; fi ;
|
|
||||||
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
|
|
||||||
(( counter+=1 ))
|
|
||||||
done
|
|
||||||
r_dbname=${q_dbname}
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
e* | "" )
|
|
||||||
r_dbname=${DB_NAME}
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
q* )
|
|
||||||
print_info "Quitting Script"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
;;
|
|
||||||
3 )
|
|
||||||
while true; do
|
|
||||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
|
|
||||||
case "${q_dbname_menu,,}" in
|
|
||||||
c* )
|
|
||||||
counter=1
|
|
||||||
q_dbname=" "
|
|
||||||
while [[ $q_dbname = *" "* ]]; do
|
|
||||||
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
|
|
||||||
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
|
|
||||||
(( counter+=1 ))
|
|
||||||
done
|
|
||||||
r_dbname=${q_dbname}
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
f* | "" )
|
|
||||||
r_dbname=${p_dbname}
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
q* )
|
|
||||||
print_info "Quitting Script"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
;;
|
|
||||||
|
|
||||||
4 )
|
|
||||||
while true; do
|
|
||||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) : ${cwh}${coff}) " q_dbname_menu
|
|
||||||
case "${q_dbname_menu,,}" in
|
|
||||||
c* )
|
|
||||||
counter=1
|
|
||||||
q_dbname=" "
|
|
||||||
while [[ $q_dbname = *" "* ]]; do
|
|
||||||
if [ $counter -gt 1 ] ; then print_error "DB names can't have spaces in them, please re-enter." ; fi ;
|
|
||||||
read -e -p "$(echo -e ${clg}** ${cdgy}What DB name do you want to restore to:\ ${coff})" q_dbname
|
|
||||||
(( counter+=1 ))
|
|
||||||
done
|
|
||||||
r_dbname=${q_dbname}
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
e* | "" )
|
|
||||||
r_dbname=${DB_NAME}
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
f* )
|
|
||||||
r_dbname=${p_dbname}
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
q* )
|
|
||||||
print_info "Quitting Script"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
get_dbport() {
|
|
||||||
if [ -z "${DB_PORT}" ] ; then
|
|
||||||
print_debug "Parsed DBPort Variant: 1 - No Env"
|
|
||||||
q_dbport_variant=1
|
|
||||||
q_dbport_menu=$(cat <<EOF
|
|
||||||
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${DB_PORT}" ] ; then
|
|
||||||
print_debug "Parsed DBPort Variant: 2 - Env"
|
|
||||||
q_dbport_variant=2
|
|
||||||
q_dbport_menu=$(cat <<EOF
|
|
||||||
|
|
||||||
C ) Custom Entered Database Port
|
|
||||||
E ) Environment Variable DB_PORT: '${DB_PORT}'
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
|
|
||||||
cat << EOF
|
|
||||||
|
|
||||||
What Database Port do you wish to use? MySQL/MariaDB typcially listens on port 3306. Postrgresql port 5432. MongoDB 27017
|
|
||||||
${q_dbport_menu}
|
|
||||||
Q ) Quit
|
|
||||||
|
|
||||||
EOF
|
|
||||||
|
|
||||||
case "${q_dbport_variant}" in
|
|
||||||
1 )
|
|
||||||
counter=1
|
|
||||||
q_dbport=" "
|
|
||||||
q_dbportre='^[0-9]+$'
|
|
||||||
while ! [[ $q_dbport =~ ${q_dbportre} ]]; do
|
|
||||||
if [ $counter -gt 1 ] ; then print_error "Must be a port number, please re-enter." ; fi ;
|
|
||||||
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Port do you want to use:\ ${coff})" q_dbport
|
|
||||||
(( counter+=1 ))
|
|
||||||
done
|
|
||||||
r_dbport=${q_dbport}
|
|
||||||
;;
|
|
||||||
2 )
|
|
||||||
while true; do
|
|
||||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbport_menu
|
|
||||||
case "${q_dbport_menu,,}" in
|
|
||||||
c* )
|
|
||||||
counter=1
|
|
||||||
q_dbport=" "
|
|
||||||
q_dbportre='^[0-9]+$'
|
|
||||||
while ! [[ $q_dbport =~ ${q_dbportre} ]]; do
|
|
||||||
if [ $counter -gt 1 ] ; then print_error "Must be a port number, please re-enter." ; fi ;
|
|
||||||
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Port do you want to use:\ ${coff})" q_dbport
|
|
||||||
(( counter+=1 ))
|
|
||||||
done
|
|
||||||
r_dbport=${q_dbport}
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
e* | "" )
|
|
||||||
r_dbport=${DB_PORT}
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
q* )
|
|
||||||
print_info "Quitting Script"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
get_dbuser() {
|
get_dbuser() {
|
||||||
if [ -z "${DB_USER}" ] ; then
|
if grep -q "^DB${detected_host_num}_USER=" "${restore_vars}" ; then
|
||||||
|
detected_user_value=$(grep "^DB${detected_host_num}_USER=" "${restore_vars}" | head -n1 | cut -d '=' -f 2)
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${detected_user_value}" ] ; then
|
||||||
print_debug "Parsed DBUser Variant: 1 - No Env"
|
print_debug "Parsed DBUser Variant: 1 - No Env"
|
||||||
q_dbuser_variant=1
|
q_dbuser_variant=1
|
||||||
q_dbuser_menu=$(cat <<EOF
|
q_dbuser_menu=$(cat <<EOF
|
||||||
@@ -681,13 +826,13 @@ EOF
|
|||||||
)
|
)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "${DB_USER}" ] ; then
|
if [ -n "${detected_user_value}" ] ; then
|
||||||
print_debug "Parsed DBUser Variant: 2 - Env"
|
print_debug "Parsed DBUser Variant: 2 - Env"
|
||||||
q_dbuser_variant=2
|
q_dbuser_variant=2
|
||||||
q_dbuser_menu=$(cat <<EOF
|
q_dbuser_menu=$(cat <<EOF
|
||||||
|
|
||||||
C ) Custom Entered Database User
|
C ) Custom Entered Database User
|
||||||
E ) Environment Variable DB_USER: '${DB_USER}'
|
E ) Environment Variable DB${detected_host_num}_USER: '${detected_user_value}'
|
||||||
EOF
|
EOF
|
||||||
)
|
)
|
||||||
fi
|
fi
|
||||||
@@ -727,7 +872,7 @@ EOF
|
|||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
e* | "" )
|
e* | "" )
|
||||||
r_dbuser=${DB_USER}
|
r_dbuser=${detected_user_value}
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
q* )
|
q* )
|
||||||
@@ -740,76 +885,37 @@ EOF
|
|||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
get_dbpass() {
|
get_filename() {
|
||||||
if [ -z "${DB_PASS}" ] ; then
|
COLUMNS=12
|
||||||
print_debug "Parsed DBPass Variant: 1 - No Env"
|
prompt="Please select a file to restore:"
|
||||||
q_dbpass_variant=1
|
options=( $(find "${DEFAULT_FILESYSTEM_PATH}" -type f -maxdepth 2 -not -name '*.md5' -not -name '*.sha1' -not -name '*.gpg' -print0 | sort -z | xargs -0) )
|
||||||
q_dbpass_menu=$(cat <<EOF
|
PS3="$prompt "
|
||||||
|
select opt in "${options[@]}" "Custom" "Quit" ; do
|
||||||
EOF
|
if (( REPLY == 2 + ${#options[@]} )) ; then
|
||||||
)
|
echo "Bye!"
|
||||||
fi
|
exit 2
|
||||||
|
elif (( REPLY == 1 + ${#options[@]} )) ; then
|
||||||
if [ -n "${DB_PASS}" ] ; then
|
while [ ! -f "${opt}" ] ; do
|
||||||
print_debug "Parsed DBPass Variant: 2 - Env"
|
read -p "What path and filename to restore: " opt
|
||||||
q_dbpass_variant=2
|
if [ ! -f "${opt}" ] ; then
|
||||||
q_dbpass_menu=$(cat <<EOF
|
print_error "File not found. Please retry.."
|
||||||
|
fi
|
||||||
C ) Custom Entered Database Password
|
|
||||||
E ) Environment Variable DB_PASS
|
|
||||||
EOF
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
|
|
||||||
cat << EOF
|
|
||||||
|
|
||||||
What Database Password will be used to restore?
|
|
||||||
${q_dbpass_menu}
|
|
||||||
Q ) Quit
|
|
||||||
|
|
||||||
EOF
|
|
||||||
|
|
||||||
case "${q_dbpass_variant}" in
|
|
||||||
1 )
|
|
||||||
counter=1
|
|
||||||
q_dbpass=" "
|
|
||||||
while [[ $q_dbpass = *" "* ]]; do
|
|
||||||
if [ $counter -gt 1 ] ; then print_error "DB Passwords can't have spaces in them, please re-enter." ; fi ;
|
|
||||||
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbpass
|
|
||||||
(( counter+=1 ))
|
|
||||||
done
|
done
|
||||||
r_dbpass=${q_dbpass}
|
break
|
||||||
;;
|
elif (( REPLY > 0 && REPLY <= ${#options[@]} )) ; then
|
||||||
2 )
|
break
|
||||||
while true; do
|
else
|
||||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}C${cdgy}\) \| \(${cwh}E${cdgy}\) : ${cwh}${coff}) " q_dbpass_menu
|
echo "Invalid option. Try another one."
|
||||||
case "${q_dbpass_menu,,}" in
|
fi
|
||||||
c* )
|
done
|
||||||
counter=1
|
COLUMNS=$oldcolumns
|
||||||
q_dbpass=" "
|
r_filename=${opt}
|
||||||
while [[ $q_dbpass = *" "* ]]; do
|
|
||||||
if [ $counter -gt 1 ] ; then print_error "DB Passwords can't have spaces in them, please re-enter." ; fi ;
|
|
||||||
read -e -p "$(echo -e ${clg}** ${cdgy}What DB Password do you wish to use:\ ${coff})" q_dbpass
|
|
||||||
(( counter+=1 ))
|
|
||||||
done
|
|
||||||
r_dbpass=${q_dbpass}
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
e* | "" )
|
|
||||||
r_dbpass=${DB_PASS}
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
q* )
|
|
||||||
print_info "Quitting Script"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#### SCRIPT START
|
#### SCRIPT START
|
||||||
|
trap control_c INT
|
||||||
|
bootstrap_variables restore_init
|
||||||
|
|
||||||
cat << EOF
|
cat << EOF
|
||||||
|
|
||||||
## ${IMAGE_NAME} Restore Script
|
## ${IMAGE_NAME} Restore Script
|
||||||
|
|||||||
Reference in New Issue
Block a user