mirror of
https://github.com/tiredofit/docker-db-backup.git
synced 2025-12-21 21:33:28 +01:00
Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
edeadade4d | ||
|
|
31b256b02d | ||
|
|
d5cacdb32a | ||
|
|
238b4d852c | ||
|
|
8d6e72eead | ||
|
|
a9037f97ac | ||
|
|
ebcd4fcde4 | ||
|
|
adf52c1160 | ||
|
|
1eee4a49d7 | ||
|
|
e3faab5c36 | ||
|
|
768d5e60fe | ||
|
|
e3e0d7ed67 | ||
|
|
db808d25c7 | ||
|
|
cb5b49b90b | ||
|
|
48a1ff8bbe | ||
|
|
8b1308ffd1 | ||
|
|
3ab3f67be9 | ||
|
|
cd1899d849 | ||
|
|
663667dbff | ||
|
|
36506091be |
70
CHANGELOG.md
70
CHANGELOG.md
@@ -1,3 +1,73 @@
|
||||
## 3.11.1 2023-10-23 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Switch to tiredofit/alpine:edge for newer packages
|
||||
- Postgresql 16 support
|
||||
|
||||
### Changed
|
||||
- Add --break-system-packages flag to pip when installing blobxfer
|
||||
|
||||
|
||||
## 3.11.0 2023-10-12 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Introduce aarch64 (ARMv8) support for Microsoft SQL Server backups
|
||||
- Microsoft ODBC Driver 18.3.2.1-1
|
||||
- Microsoft SQL Client 18.3.1.1-1
|
||||
|
||||
|
||||
## 3.10.5 2023-10-11 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Add option to drop exsiting data from MongoDB restore
|
||||
|
||||
### Changed
|
||||
- Fix some capabilities of not being able to select mongodb manually to restore
|
||||
|
||||
|
||||
## 3.10.4 2023-10-11 <thomas-negrault@github>
|
||||
|
||||
### Changed
|
||||
- Use authentication database for MongoDB restores
|
||||
|
||||
|
||||
## 3.10.3 2023-10-11 <thomas-negrault@github>
|
||||
|
||||
### Changed
|
||||
- Change sorting for restore script
|
||||
|
||||
|
||||
## 3.10.2 2023-09-14 <pimjansen@github>
|
||||
|
||||
### Changed
|
||||
- Update to wording when sending files to blobxfer
|
||||
- Remove --databases flag when backing up a single mysql/mariadb backup which allows to omit the "USE <db_name>" statement in the backup allowing for better restores
|
||||
|
||||
|
||||
## 3.10.1 2023-09-13 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Bugfix to 3.10.0 with syntax error revolving around unbraced variable
|
||||
|
||||
|
||||
## 3.10.0 2023-09-13 <jcdirks@github>
|
||||
|
||||
### Added
|
||||
- Add EXTRA_DUMP_OPTS and EXTRA_ENUMERATION_OPTS to add different arguments when checking for databases, vs doing the actual backup
|
||||
|
||||
|
||||
## 3.9.12 2023-09-04 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- Perform additional checks for ENABLE_CHECKSUM=FALSE and skip executing actions for S3/BlobXfer
|
||||
|
||||
|
||||
## 3.9.11 2023-08-24 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
- AWS CLI 2.13.9
|
||||
|
||||
|
||||
## 3.9.10 2023-08-23 <dave at tiredofit dot ca>
|
||||
|
||||
### Changed
|
||||
|
||||
23
Dockerfile
23
Dockerfile
@@ -1,5 +1,5 @@
|
||||
ARG DISTRO=alpine
|
||||
ARG DISTRO_VARIANT=3.18
|
||||
ARG DISTRO_VARIANT=edge
|
||||
|
||||
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
|
||||
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
|
||||
@@ -7,7 +7,8 @@ LABEL maintainer="Dave Conroy (github.com/tiredofit)"
|
||||
### Set Environment Variables
|
||||
ENV INFLUX_VERSION=1.8.0 \
|
||||
INFLUX2_VERSION=2.4.0 \
|
||||
MSSQL_VERSION=18.0.1.1-1 \
|
||||
MSODBC_VERSION=18.3.2.1-1 \
|
||||
MSSQL_VERSION=18.3.1.1-1 \
|
||||
AWS_CLI_VERSION=1.25.97 \
|
||||
CONTAINER_ENABLE_MESSAGING=FALSE \
|
||||
CONTAINER_ENABLE_MONITORING=TRUE \
|
||||
@@ -30,7 +31,6 @@ RUN source /assets/functions/00-container && \
|
||||
openssl-dev \
|
||||
libffi-dev \
|
||||
python3-dev \
|
||||
py3-setuptools \
|
||||
py3-pip \
|
||||
xz-dev \
|
||||
&& \
|
||||
@@ -44,8 +44,8 @@ RUN source /assets/functions/00-container && \
|
||||
mongodb-tools \
|
||||
openssl \
|
||||
pigz \
|
||||
postgresql15 \
|
||||
postgresql15-client \
|
||||
postgresql16 \
|
||||
postgresql16-client \
|
||||
pv \
|
||||
py3-botocore \
|
||||
py3-colorama \
|
||||
@@ -53,6 +53,7 @@ RUN source /assets/functions/00-container && \
|
||||
py3-docutils \
|
||||
py3-jmespath \
|
||||
py3-rsa \
|
||||
py3-setuptools \
|
||||
py3-s3transfer \
|
||||
py3-yaml \
|
||||
python3 \
|
||||
@@ -63,14 +64,14 @@ RUN source /assets/functions/00-container && \
|
||||
zstd \
|
||||
&& \
|
||||
\
|
||||
apkArch="$(apk --print-arch)"; \
|
||||
apkArch="$(uname -m)"; \
|
||||
case "$apkArch" in \
|
||||
x86_64) mssql=true ; influx2=true ; influx_arch=amd64; ;; \
|
||||
aarch64 ) influx2=true ; influx_arch=arm64 ;; \
|
||||
*) sleep 0.1 ;; \
|
||||
x86_64) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \
|
||||
arm64 ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=arm64 ;; \
|
||||
*) sleep 0.1 ;; \
|
||||
esac; \
|
||||
\
|
||||
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/msodbcsql18_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/mssql-tools18_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSSQL_VERSION}_amd64.apk mssql-tools18_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \
|
||||
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; ls -l ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \
|
||||
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
|
||||
clone_git_repo https://github.com/aws/aws-cli "${AWS_CLI_VERSION}" && \
|
||||
python3 setup.py install --prefix=/usr && \
|
||||
@@ -93,7 +94,7 @@ RUN source /assets/functions/00-container && \
|
||||
make && \
|
||||
make install && \
|
||||
\
|
||||
pip3 install blobxfer && \
|
||||
pip3 install --break-system-packages blobxfer && \
|
||||
\
|
||||
package remove .db-backup-build-deps && \
|
||||
package cleanup && \
|
||||
|
||||
10
README.md
10
README.md
@@ -12,7 +12,7 @@
|
||||
|
||||
This will build a container for backing up multiple types of DB Servers
|
||||
|
||||
Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
|
||||
Currently backs up CouchDB, InfluxDB, MySQL, Microsoft SQL, MongoDB, Postgres, Redis servers.
|
||||
|
||||
* dump to local filesystem or backup to S3 Compatible services, and Azure.
|
||||
* select database user and password
|
||||
@@ -149,7 +149,7 @@ Be sure to view the following repositories to understand all the customizable op
|
||||
| Parameter | Description | Default | `_FILE` |
|
||||
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- | ------- |
|
||||
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | |
|
||||
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | |
|
||||
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `mssql` `pgsql` `mongo` `redis` `sqlite3` | | |
|
||||
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | x |
|
||||
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | | x |
|
||||
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | | x |
|
||||
@@ -179,7 +179,7 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
|
||||
|
||||
### Backup Options
|
||||
| Parameter | Description | Default | `_FILE` |
|
||||
| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- | ------- |
|
||||
|--------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------|---------|
|
||||
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | |
|
||||
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | |
|
||||
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | |
|
||||
@@ -187,7 +187,9 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
|
||||
| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` | |
|
||||
| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` | |
|
||||
| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` | |
|
||||
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | |
|
||||
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
|
||||
| `EXTRA_DUMP_OPTS` | If you need to pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
|
||||
| `EXTRA_ENUMERATION_OPTS` | If you need to pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
|
||||
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | |
|
||||
| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` | |
|
||||
| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` | |
|
||||
|
||||
@@ -159,7 +159,7 @@ backup_influx() {
|
||||
compression
|
||||
pre_dbbackup $db
|
||||
print_notice "Dumping Influx database: '${db}'"
|
||||
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
|
||||
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
|
||||
exit_code=$?
|
||||
check_exit_code backup $target_dir
|
||||
print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}"
|
||||
@@ -181,7 +181,7 @@ backup_influx() {
|
||||
compression
|
||||
pre_dbbackup $db
|
||||
print_notice "Dumping Influx2 database: '${db}'"
|
||||
influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
|
||||
influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
|
||||
exit_code=$?
|
||||
check_exit_code backup $target_dir
|
||||
create_archive
|
||||
@@ -208,9 +208,9 @@ backup_mongo() {
|
||||
compression_string="and compressing with gzip"
|
||||
fi
|
||||
if [ -n "${MONGO_CUSTOM_URI}" ] ; then
|
||||
mongo_backup_parameter="--uri=${MONGO_CUSTOM_URI} ${EXTRA_OPTS}"
|
||||
mongo_backup_parameter="--uri=${MONGO_CUSTOM_URI} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS}"
|
||||
else
|
||||
mongo_backup_parameter="--host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}"
|
||||
mongo_backup_parameter="--host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS}"
|
||||
fi
|
||||
pre_dbbackup "${DB_NAME}"
|
||||
print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
|
||||
@@ -249,7 +249,7 @@ backup_mysql() {
|
||||
|
||||
if [ "${DB_NAME,,}" = "all" ] ; then
|
||||
print_debug "Preparing to back up everything except for information_schema and _* prefixes"
|
||||
db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER ${mysql_tls_args} ${EXTRA_OPTS} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
|
||||
db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER ${mysql_tls_args} ${EXTRA_OPTS} ${EXTRA_ENUMERATION_OPTS} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
|
||||
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
|
||||
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
|
||||
for db_exclude in ${db_names_exclusions} ; do
|
||||
@@ -271,7 +271,7 @@ backup_mysql() {
|
||||
compression
|
||||
pre_dbbackup $db
|
||||
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
|
||||
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
exit_code=$?
|
||||
check_exit_code backup $target
|
||||
generate_checksum
|
||||
@@ -287,7 +287,7 @@ backup_mysql() {
|
||||
compression
|
||||
pre_dbbackup all
|
||||
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
||||
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
exit_code=$?
|
||||
check_exit_code backup $target
|
||||
generate_checksum
|
||||
@@ -324,7 +324,7 @@ backup_pgsql() {
|
||||
compression
|
||||
pre_dbbackup $db
|
||||
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
|
||||
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
exit_code=$?
|
||||
check_exit_code backup $target
|
||||
generate_checksum
|
||||
@@ -348,7 +348,7 @@ backup_pgsql() {
|
||||
for x_db_name in ${tmp_db_names} ; do
|
||||
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
|
||||
done
|
||||
pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
exit_code=$?
|
||||
check_exit_code backup $target
|
||||
generate_checksum
|
||||
@@ -363,7 +363,7 @@ backup_redis() {
|
||||
print_notice "Dumping Redis - Flushing Redis Cache First"
|
||||
target=redis_all_${DB_HOST,,}_${now}.rdb
|
||||
ltarget=redis_${DB_HOST,,}
|
||||
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
|
||||
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS}
|
||||
sleep 10
|
||||
try=5
|
||||
while [ $try -gt 0 ] ; do
|
||||
@@ -744,20 +744,21 @@ move_dbbackup() {
|
||||
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
|
||||
fi
|
||||
|
||||
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
|
||||
if var_true "${ENABLE_CHECKSUM}" ; then rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"; fi
|
||||
rm -rf "${TEMP_LOCATION}"/"${target}"
|
||||
;;
|
||||
"blobxfer" )
|
||||
print_info "Moving backup to S3 Bucket with blobxfer"
|
||||
print_info "Moving backup to external storage with blobxfer"
|
||||
|
||||
mkdir -p "${DB_DUMP_TARGET}"
|
||||
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
|
||||
if var_true "${ENABLE_CHECKSUM}" ; then mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/; fi
|
||||
|
||||
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
|
||||
|
||||
silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET}
|
||||
move_exit_code=$?
|
||||
|
||||
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
|
||||
if var_true "${ENABLE_CHECKSUM}" ; then rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}" ; fi
|
||||
rm -rf "${TEMP_LOCATION}"/"${target}"
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -78,7 +78,7 @@ fi
|
||||
get_filename() {
|
||||
COLUMNS=12
|
||||
prompt="Please select a file to restore:"
|
||||
options=( $(find "${DB_DUMP_TARGET}" -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | xargs -0) )
|
||||
options=( $(find "${DB_DUMP_TARGET}" -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | sort -z | xargs -0) )
|
||||
PS3="$prompt "
|
||||
select opt in "${options[@]}" "Custom" "Quit" ; do
|
||||
if (( REPLY == 2 + ${#options[@]} )) ; then
|
||||
@@ -131,7 +131,7 @@ EOF
|
||||
fi
|
||||
|
||||
if [ -z "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
|
||||
print_debug "Parsed DBHostpho Variant: 3 - No Env, Parsed Filename"
|
||||
print_debug "Parsed DBHost Variant: 3 - No Env, Parsed Filename"
|
||||
q_dbhost_variant=3
|
||||
q_dbhost_menu=$(cat <<EOF
|
||||
|
||||
@@ -335,7 +335,7 @@ EOF
|
||||
case "${q_dbtype_variant}" in
|
||||
1 )
|
||||
while true; do
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}M${cdgy}\) | \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
||||
case "${q_dbtype,,}" in
|
||||
m* )
|
||||
r_dbtype=mysql
|
||||
@@ -358,7 +358,7 @@ EOF
|
||||
;;
|
||||
2 )
|
||||
while true; do
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
||||
case "${q_dbtype,,}" in
|
||||
e* | "" )
|
||||
r_dbtype=${DB_TYPE}
|
||||
@@ -385,7 +385,7 @@ EOF
|
||||
;;
|
||||
3 )
|
||||
while true; do
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
||||
case "${q_dbtype,,}" in
|
||||
f* | "" )
|
||||
r_dbtype=${p_dbtype}
|
||||
@@ -413,7 +413,7 @@ EOF
|
||||
|
||||
4 )
|
||||
while true; do
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
||||
case "${q_dbtype,,}" in
|
||||
e* | "" )
|
||||
r_dbtype=${DB_TYPE}
|
||||
@@ -427,6 +427,10 @@ EOF
|
||||
r_dbtype=mysql
|
||||
break
|
||||
;;
|
||||
o* )
|
||||
r_dbtype=mongo
|
||||
break
|
||||
;;
|
||||
p* )
|
||||
r_dbtype=postgresql
|
||||
break
|
||||
@@ -915,6 +919,30 @@ case "${r_dbtype}" in
|
||||
exit_code=$?
|
||||
;;
|
||||
mongo )
|
||||
cat << EOF
|
||||
|
||||
Do you wish to drop any existing data before restoring?
|
||||
Y ) Yes
|
||||
N ) No
|
||||
Q ) Quit
|
||||
|
||||
EOF
|
||||
|
||||
echo -e "${coff}"
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}Y${cdgy}\) \| \(${cwh}N${cdgy}\) \| \(${cwh}Q${cdgy}\) : ${cwh}${coff})" q_menu_mongo_dropdb
|
||||
case "${q_menu_mongo_dropdb,,}" in
|
||||
"y" | "yes" | * )
|
||||
mongo_dropdb="--drop"
|
||||
;;
|
||||
"n" | "update" )
|
||||
unset mongo_dropdb
|
||||
;;
|
||||
"q" | "exit" )
|
||||
print_info "Quitting Script"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
|
||||
if [ "${ENABLE_COMPRESSION,,}" != "none" ] && [ "${ENABLE_COMPRESSION,,}" != "false" ] ; then
|
||||
mongo_compression="--gzip"
|
||||
@@ -925,7 +953,11 @@ case "${r_dbtype}" in
|
||||
if [ -n "${r_dbpass}" ] ; then
|
||||
mongo_pass="-p=${r_dbpass}"
|
||||
fi
|
||||
mongorestore ${mongo_compression} -d=${r_dbname} -h=${r_dbhost} --port=${r_dbport} ${mongo_user} ${mongo_pass} --archive=${r_filename}
|
||||
if [ -n "${DB_AUTH}" ] ; then
|
||||
mongo_auth_database="--authenticationDatabase=${DB_AUTH}"
|
||||
fi
|
||||
|
||||
mongorestore ${mongo_compression} -d=${r_dbname} -h=${r_dbhost} --port=${r_dbport} ${mongo_dropdb} ${mongo_user} ${mongo_pass} --archive=${r_filename} ${mongo_auth_database}
|
||||
exit_code=$?
|
||||
;;
|
||||
* )
|
||||
@@ -939,4 +971,4 @@ if [ "${exit_code}" = 0 ] ; then
|
||||
print_info "Restore complete!"
|
||||
else
|
||||
print_error "Restore reported errors"
|
||||
fi
|
||||
fi
|
||||
|
||||
Reference in New Issue
Block a user