Compare commits

...

5 Commits
4.x ... 4.0.5

Author SHA1 Message Date
dave@tiredofit.ca
2bc730013e Release 4.0.5 - See CHANGELOG.md 2023-11-10 07:25:25 -08:00
Dave Conroy
d628ed8ff4 Expand upon DEBUG_ statements to give more detail 2023-11-10 07:24:31 -08:00
Dave Conroy
d7399667a1 Update _FILESYSTEM_PERMISSIONS from 700 to 600 and add undocumented DBBACKUP_USER|GROUP variable 2023-11-10 07:16:56 -08:00
dave@tiredofit.ca
9caec737e0 Release 4.0.4 - See CHANGELOG.md 2023-11-09 11:49:26 -08:00
Dave Conroy
87a803512d Merge pull request #269 from tiredofit/4.x
New Restore Script
2023-11-09 11:48:19 -08:00
4 changed files with 46 additions and 15 deletions

View File

@@ -1,3 +1,19 @@
## 4.0.5 2023-11-10 <dave at tiredofit dot ca>
### Added
- Add undocumented DBBACKUP_USER|GROUP environment variables for troubleshooting permissions
- Add more verbosity when using DEBUG_ statements
### Changed
- Change _FILESYSTEM_PERMISSION to 600 from 700
## 4.0.4 2023-11-09 <dave at tiredofit dot ca>
### Added
- Add support for restoring from different DB_ variables in restore script
## 4.0.3 2023-11-09 <dave at tiredofit dot ca>
### Changed

View File

@@ -327,7 +327,7 @@ If `DEFAULT_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
| `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
| `DEFAULT_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
| `DEFAULT_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DEFAULT_FILESYSTEM_PATH}/archive/` |
| `DEFAULT_FILESYSTEM_PERMISSION` | Directory and File permissions to apply to files. | `700` |
| `DEFAULT_FILESYSTEM_PERMISSION` | Directory and File permissions to apply to files. | `600` |
###### S3
@@ -602,7 +602,7 @@ If `DB01_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
| `DB01_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
| `DB01_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
| `DB01_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DB01_FILESYSTEM_PATH/archive/` |
| `DB01_FILESYSTEM_PERMISSION` | Directory and File permissions to apply to files. | `700` |
| `DB01_FILESYSTEM_PERMISSION` | Directory and File permissions to apply to files. | `600` |
###### S3

View File

@@ -1,6 +1,8 @@
#!/command/with-contenv bash
BACKUP_JOB_CONCURRENCY=${BACKUP_JOB_CONCURRENCY:-"1"}
DBBACKUP_USER=${DBBACKUP_USER:-"dbbackup"}
DBBACKUP_GROUP=${DBBACKUP_USER:-"${DBBACKUP_USER}"} # Must go after DBBACKUP_USER
DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0}
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
@@ -13,7 +15,7 @@ DEFAULT_CREATE_LATEST_SYMLINK=${DEFAULT_CREATE_LATEST_SYMLINK:-"TRUE"}
DEFAULT_ENABLE_PARALLEL_COMPRESSION=${DEFAULT_ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
DEFAULT_ENCRYPT=${DEFAULT_ENCRYPT:-"FALSE"}
DEFAULT_FILESYSTEM_PATH=${DEFAULT_FILESYSTEM_PATH:-"/backup"}
DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"700"}
DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"600"}
DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DEFAULT_FILESYSTEM_ARCHIVE_PATH:-"${DEFAULT_FILESYSTEM_PATH}/archive/"}
DEFAULT_LOG_LEVEL=${DEFAULT_LOG_LEVEL:-"notice"}
DEFAULT_MYSQL_ENABLE_TLS=${DEFAULT_MYSQL_ENABLE_TLS:-"FALSE"}

View File

@@ -5,11 +5,11 @@ bootstrap_filesystem() {
if [ ! -d "${backup_job_filesystem_path}" ]; then
mkdir -p "${backup_job_filesystem_path}"
fi
if [ "$(stat -c %U "${backup_job_filesystem_path}")" != "dbbackup" ] ; then chown -R dbbackup:dbbackup "${backup_job_filesystem_path}" ; fi
if [ "$(stat -c %U "${backup_job_filesystem_path}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${backup_job_filesystem_path}" ; fi
if [ "$(stat -c %a "${backup_job_filesystem_path}")" != "${backup_job_filesystem_permission}" ] ; then chmod -R "${backup_job_filesystem_permission}" "${backup_job_filesystem_path}" ; fi
if [ -d "${backup_job_filesystem_archive_path}" ]; then
if [ "$(stat -c %U "${backup_job_filesystem_archive_path}")" != "dbbackup" ] ; then chown -R dbbackup:dbbackup "${backup_job_filesystem_archive_path}" ; fi
if [ "$(stat -c %U "${backup_job_filesystem_archive_path}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${backup_job_filesystem_archive_path}" ; fi
if [ "$(stat -c %a "${backup_job_filesystem_archive_path}")" != "${backup_job_filesystem_permission}" ] ; then chmod -R "${backup_job_filesystem_permission}" "${backup_job_filesystem_archive_path}" ; fi
fi
@@ -17,14 +17,14 @@ bootstrap_filesystem() {
mkdir -p "${LOG_PATH}"
fi
if [ "$(stat -c %U "${LOG_PATH}")" != "dbbackup" ] ; then chown dbbackup:dbbackup "${LOG_PATH}" ; fi
if [ "$(stat -c %U "${LOG_PATH}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${LOG_PATH}" ; fi
if [ ! -d "${LOG_PATH}"/"$(date +'%Y%m%d')" ]; then run_as_user mkdir -p "${LOG_PATH}"/"$(date +'%Y%m%d')"; fi
if [ "$(stat -c %a "${LOG_PATH}")" != "755" ] ; then chmod -R 755 "${LOG_PATH}" ; fi
if [ ! -d "${TEMP_PATH}" ]; then
mkdir -p "${TEMP_PATH}"
fi
if [ "$(stat -c %U "${TEMP_PATH}")" != "dbbackup" ] ; then chown -R dbbackup:dbbackup "${TEMP_PATH}" ; fi
if [ "$(stat -c %U "${TEMP_PATH}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${TEMP_PATH}" ; fi
if var_true "${DEBUG_BOOTSTRAP_FILESYSTEM}" ; then debug off; fi
}
@@ -468,20 +468,24 @@ backup_couch() {
}
backup_influx() {
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if [ "${backup_job_db_name,,}" = "all" ] ; then
write_log debug "Preparing to back up everything"
db_names=justbackupeverything
else
db_names=$(echo "${backup_job_db_name}" | tr ',' '\n')
fi
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
case "${backup_job_db_influx_version,,}" in
1 )
for db in ${db_names}; do
prepare_dbbackup
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if [ "${db}" != "justbackupeverything" ] ; then bucket="-db ${db}" ; else db=all ; fi
backup_job_filename=influx_${db}_${backup_job_db_host#*//}_${now}
backup_job_filename_base=influx_${db}_${backup_job_db_host#*//}
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
compression
pre_dbbackup "${db}"
write_log notice "Dumping Influx database: '${db}'"
@@ -505,7 +509,9 @@ backup_influx() {
2 )
for db in ${db_names}; do
prepare_dbbackup
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
if [ "${db}" != "justbackupeverything" ] ; then bucket="--bucket $db" ; else db=all ; fi
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
compression
@@ -515,10 +521,10 @@ backup_influx() {
run_as_user influx backup --org ${backup_job_db_user} ${bucket} --host ${backup_job_db_host}:${backup_job_db_port} --token ${backup_job_db_pass} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} --compression none "${TEMP_PATH}"/"${backup_job_filename_dir}"
exit_code=$?
check_exit_code backup "${backup_job_filename_dir}"
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
create_archive
backup_job_filename=influx2_${db}_${backup_job_db_host#*//}_${now}.tar${extension}
backup_job_filename_base=influx2_${db}_${backup_job_db_host#*//}
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
timer backup finish
file_encryption
generate_checksum
@@ -532,6 +538,7 @@ backup_influx() {
backup_mongo() {
prepare_dbbackup
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi
if [ "${backup_job_compression,,}" = "none" ] ; then
backup_job_filename=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}_${now}.archive
backup_job_filename_base=${dbtype}_${backup_job_db_name,,}_${backup_job_db_host,,}
@@ -546,6 +553,7 @@ backup_mongo() {
else
mongo_backup_parameter="--host ${backup_job_db_host} --port ${backup_job_db_port} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${backup_job_extra_opts} ${backup_job_extra_dump_opts}"
fi
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug off; fi
pre_dbbackup "${backup_job_db_name}"
write_log notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
if var_true "${DEBUG_BACKUP_MONGO}" ; then debug on; fi
@@ -577,8 +585,10 @@ backup_mssql() {
backup_job_filename_original=${backup_job_filename}
compression
pre_dbbackup all
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug on; fi
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
check_exit_code backup "${backup_job_filename}"
if var_true "${DEBUG_BACKUP_MSSQL}" ; then debug off; fi
timer backup finish
file_encryption
generate_checksum
@@ -612,7 +622,7 @@ backup_mssql() {
}
backup_mysql() {
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug on; fi
if var_true "${backup_job_mysql_events}" ; then
events="--events"
fi
@@ -636,7 +646,7 @@ backup_mysql() {
else
db_names=$(echo "${backup_job_db_name}" | tr ',' '\n')
fi
if var_true "${DEBUG_BACKUP_MYSQL}" ; then debug off; fi
write_log debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
if var_true "${backup_job_split_db}" ; then
@@ -701,6 +711,7 @@ backup_pgsql() {
post_dbbackup "globals"
}
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
export PGPASSWORD=${backup_job_db_pass}
if [ -n "${backup_job_db_auth}" ] ; then
authdb=${backup_job_db_auth}
@@ -724,7 +735,7 @@ backup_pgsql() {
fi
if var_false "${_postgres_backup_globals}" && var_true "${backup_job_backup_pgsql_globals}" ; then _postgres_backup_globals=true; fi
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
write_log debug "Databases Found: $(echo ${db_names} | xargs | tr ' ' ',')"
if var_true "${backup_job_split_db}" ; then
@@ -756,6 +767,7 @@ backup_pgsql() {
compression
pre_dbbackup all
write_log notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
tmp_db_names=$(run_as_user psql -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
for r_db_name in $(echo $db_names | xargs); do
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
@@ -764,7 +776,6 @@ backup_pgsql() {
for x_db_name in ${tmp_db_names} ; do
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
done
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug on; fi
run_as_user ${play_fair} pg_dumpall -h ${backup_job_db_host} -p ${backup_job_db_port} -U ${backup_job_db_user} ${pgexclude_arg} ${backup_job_extra_opts} ${backup_job_extra_dump_opts} | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
exit_code=$?
if var_true "${DEBUG_BACKUP_PGSQL}" ; then debug off; fi
@@ -801,11 +812,13 @@ backup_redis() {
sleep 5
done
backup_job_filename_original=${backup_job_filename}
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug off; fi
compression
pre_dbbackup all
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
timer backup finish
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug on; fi
run_as_user ${compress_cmd} "${TEMP_PATH}/${backup_job_filename_original}"
if var_true "${DEBUG_BACKUP_REDIS}" ; then debug off; fi
timer backup finish
check_exit_code backup "${backup_job_filename}"
file_encryption
generate_checksum
@@ -1648,7 +1661,7 @@ process_limiter() {
}
run_as_user() {
s6-setuidgid dbbackup $@
s6-setuidgid "${DBBACKUP_USER}" $@
}
setup_mode() {