Compare commits

...

17 Commits

Author SHA1 Message Date
dave@tiredofit.ca
d5cacdb32a Release 3.10.5 - See CHANGELOG.md 2023-10-11 15:44:26 -07:00
dave@tiredofit.ca
238b4d852c Release 3.10.4 - See CHANGELOG.md 2023-10-11 15:17:54 -07:00
Dave Conroy
8d6e72eead Merge pull request #258 from thomas-negrault/fix/mongo-restore-authentication-database
Use authentification database in mongorestore
2023-10-11 15:16:42 -07:00
Thomas Negrault
a9037f97ac Use authentification database in mongorestore 2023-10-11 22:57:29 +02:00
dave@tiredofit.ca
ebcd4fcde4 Release 3.10.3 - See CHANGELOG.md 2023-10-11 12:31:25 -07:00
Dave Conroy
adf52c1160 Merge pull request #257 from thomas-negrault/fix/alphabetical-filenames-sorting
Sort filenames alphabetically when using the restore command
2023-10-11 12:30:26 -07:00
Thomas Negrault
1eee4a49d7 Sort filenames alphabetically when using the restore command 2023-10-11 18:41:05 +02:00
dave@tiredofit.ca
e3faab5c36 Release 3.10.2 - See CHANGELOG.md 2023-09-14 08:13:56 -07:00
Dave Conroy
768d5e60fe Merge pull request #252 from pimjansen/feature/name-typo
Updated name where it is not writing to s3
2023-09-14 08:12:39 -07:00
Dave Conroy
e3e0d7ed67 Merge pull request #251 from pimjansen/feature/split-db-use
Remove the --database flag for a single db dump
2023-09-14 08:03:41 -07:00
Pim Jansen
db808d25c7 Updated name where it is not writing to s3 2023-09-14 10:39:34 +02:00
Pim Jansen
cb5b49b90b Remove the --database flag for a single db dump which ensures there is no use statement in the dump 2023-09-14 10:36:55 +02:00
dave@tiredofit.ca
48a1ff8bbe Release 3.10.1 - See CHANGELOG.md 2023-09-13 22:37:21 -07:00
dave@tiredofit.ca
8b1308ffd1 Release 3.10.0 - See CHANGELOG.md 2023-09-13 08:32:22 -07:00
Dave Conroy
3ab3f67be9 Merge pull request #248 from jcdirks/#247-env-variable-for-additional-arguments-to-the-dump-command-only
add env variables EXTRA_DUMP_OPTS and EXTRA_ENUMERATION_OPTS
2023-09-13 08:31:13 -07:00
Jan-Claas Dirks
cd1899d849 add env variables EXTRA_DUMP_OPTS and EXTRA_ENUMERATION_OPTS 2023-09-13 10:00:41 +02:00
dave@tiredofit.ca
663667dbff Release 3.9.12 - See CHANGELOG.md 2023-09-04 08:32:05 -07:00
4 changed files with 105 additions and 24 deletions

View File

@@ -1,3 +1,49 @@
## 3.10.5 2023-10-11 <dave at tiredofit dot ca>
### Added
- Add option to drop exsiting data from MongoDB restore
### Changed
- Fix some capabilities of not being able to select mongodb manually to restore
## 3.10.4 2023-10-11 <thomas-negrault@github>
### Changed
- Use authentication database for MongoDB restores
## 3.10.3 2023-10-11 <thomas-negrault@github>
### Changed
- Change sorting for restore script
## 3.10.2 2023-09-14 <pimjansen@github>
### Changed
- Update to wording when sending files to blobxfer
- Remove --databases flag when backing up a single mysql/mariadb backup which allows to omit the "USE <db_name>" statement in the backup allowing for better restores
## 3.10.1 2023-09-13 <dave at tiredofit dot ca>
### Changed
- Bugfix to 3.10.0 with syntax error revolving around unbraced variable
## 3.10.0 2023-09-13 <jcdirks@github>
### Added
- Add EXTRA_DUMP_OPTS and EXTRA_ENUMERATION_OPTS to add different arguments when checking for databases, vs doing the actual backup
## 3.9.12 2023-09-04 <dave at tiredofit dot ca>
### Changed
- Perform additional checks for ENABLE_CHECKSUM=FALSE and skip executing actions for S3/BlobXfer
## 3.9.11 2023-08-24 <dave at tiredofit dot ca>
### Changed

View File

@@ -179,7 +179,7 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
### Backup Options
| Parameter | Description | Default | `_FILE` |
| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- | ------- |
|--------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------|---------|
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | |
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | |
@@ -187,7 +187,9 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` | |
| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` | |
| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` | |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
| `EXTRA_DUMP_OPTS` | If you need to pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
| `EXTRA_ENUMERATION_OPTS` | If you need to pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | |
| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` | |
| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` | |

View File

@@ -159,7 +159,7 @@ backup_influx() {
compression
pre_dbbackup $db
print_notice "Dumping Influx database: '${db}'"
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code backup $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}"
@@ -181,7 +181,7 @@ backup_influx() {
compression
pre_dbbackup $db
print_notice "Dumping Influx2 database: '${db}'"
influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$?
check_exit_code backup $target_dir
create_archive
@@ -208,9 +208,9 @@ backup_mongo() {
compression_string="and compressing with gzip"
fi
if [ -n "${MONGO_CUSTOM_URI}" ] ; then
mongo_backup_parameter="--uri=${MONGO_CUSTOM_URI} ${EXTRA_OPTS}"
mongo_backup_parameter="--uri=${MONGO_CUSTOM_URI} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS}"
else
mongo_backup_parameter="--host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}"
mongo_backup_parameter="--host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS}"
fi
pre_dbbackup "${DB_NAME}"
print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
@@ -249,7 +249,7 @@ backup_mysql() {
if [ "${DB_NAME,,}" = "all" ] ; then
print_debug "Preparing to back up everything except for information_schema and _* prefixes"
db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER ${mysql_tls_args} ${EXTRA_OPTS} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER ${mysql_tls_args} ${EXTRA_OPTS} ${EXTRA_ENUMERATION_OPTS} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
for db_exclude in ${db_names_exclusions} ; do
@@ -271,7 +271,7 @@ backup_mysql() {
compression
pre_dbbackup $db
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code backup $target
generate_checksum
@@ -287,7 +287,7 @@ backup_mysql() {
compression
pre_dbbackup all
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
exit_code=$?
check_exit_code backup $target
generate_checksum
@@ -324,7 +324,7 @@ backup_pgsql() {
compression
pre_dbbackup $db
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code backup $target
generate_checksum
@@ -348,7 +348,7 @@ backup_pgsql() {
for x_db_name in ${tmp_db_names} ; do
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
done
pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
exit_code=$?
check_exit_code backup $target
generate_checksum
@@ -363,7 +363,7 @@ backup_redis() {
print_notice "Dumping Redis - Flushing Redis Cache First"
target=redis_all_${DB_HOST,,}_${now}.rdb
ltarget=redis_${DB_HOST,,}
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS}
sleep 10
try=5
while [ $try -gt 0 ] ; do
@@ -744,20 +744,21 @@ move_dbbackup() {
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
if var_true "${ENABLE_CHECKSUM}" ; then rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"; fi
rm -rf "${TEMP_LOCATION}"/"${target}"
;;
"blobxfer" )
print_info "Moving backup to S3 Bucket with blobxfer"
print_info "Moving backup to external storage with blobxfer"
mkdir -p "${DB_DUMP_TARGET}"
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
if var_true "${ENABLE_CHECKSUM}" ; then mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/; fi
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET}
move_exit_code=$?
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
if var_true "${ENABLE_CHECKSUM}" ; then rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}" ; fi
rm -rf "${TEMP_LOCATION}"/"${target}"
;;
esac

View File

@@ -78,7 +78,7 @@ fi
get_filename() {
COLUMNS=12
prompt="Please select a file to restore:"
options=( $(find "${DB_DUMP_TARGET}" -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | xargs -0) )
options=( $(find "${DB_DUMP_TARGET}" -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | sort -z | xargs -0) )
PS3="$prompt "
select opt in "${options[@]}" "Custom" "Quit" ; do
if (( REPLY == 2 + ${#options[@]} )) ; then
@@ -131,7 +131,7 @@ EOF
fi
if [ -z "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
print_debug "Parsed DBHostpho Variant: 3 - No Env, Parsed Filename"
print_debug "Parsed DBHost Variant: 3 - No Env, Parsed Filename"
q_dbhost_variant=3
q_dbhost_menu=$(cat <<EOF
@@ -335,7 +335,7 @@ EOF
case "${q_dbtype_variant}" in
1 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}M${cdgy}\) | \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in
m* )
r_dbtype=mysql
@@ -358,7 +358,7 @@ EOF
;;
2 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in
e* | "" )
r_dbtype=${DB_TYPE}
@@ -385,7 +385,7 @@ EOF
;;
3 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in
f* | "" )
r_dbtype=${p_dbtype}
@@ -413,7 +413,7 @@ EOF
4 )
while true; do
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
case "${q_dbtype,,}" in
e* | "" )
r_dbtype=${DB_TYPE}
@@ -427,6 +427,10 @@ EOF
r_dbtype=mysql
break
;;
o* )
r_dbtype=mongo
break
;;
p* )
r_dbtype=postgresql
break
@@ -915,6 +919,30 @@ case "${r_dbtype}" in
exit_code=$?
;;
mongo )
cat << EOF
Do you wish to drop any existing data before restoring?
Y ) Yes
N ) No
Q ) Quit
EOF
echo -e "${coff}"
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}Y${cdgy}\) \| \(${cwh}N${cdgy}\) \| \(${cwh}Q${cdgy}\) : ${cwh}${coff})" q_menu_mongo_dropdb
case "${q_menu_mongo_dropdb,,}" in
"y" | "yes" | * )
mongo_dropdb="--drop"
;;
"n" | "update" )
unset mongo_dropdb
;;
"q" | "exit" )
print_info "Quitting Script"
exit 1
;;
esac
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
if [ "${ENABLE_COMPRESSION,,}" != "none" ] && [ "${ENABLE_COMPRESSION,,}" != "false" ] ; then
mongo_compression="--gzip"
@@ -925,7 +953,11 @@ case "${r_dbtype}" in
if [ -n "${r_dbpass}" ] ; then
mongo_pass="-p=${r_dbpass}"
fi
mongorestore ${mongo_compression} -d=${r_dbname} -h=${r_dbhost} --port=${r_dbport} ${mongo_user} ${mongo_pass} --archive=${r_filename}
if [ -n "${DB_AUTH}" ] ; then
mongo_auth_database="--authenticationDatabase=${DB_AUTH}"
fi
mongorestore ${mongo_compression} -d=${r_dbname} -h=${r_dbhost} --port=${r_dbport} ${mongo_dropdb} ${mongo_user} ${mongo_pass} --archive=${r_filename} ${mongo_auth_database}
exit_code=$?
;;
* )
@@ -939,4 +971,4 @@ if [ "${exit_code}" = 0 ] ; then
print_info "Restore complete!"
else
print_error "Restore reported errors"
fi
fi