mirror of
https://github.com/tiredofit/docker-db-backup.git
synced 2025-12-22 13:44:08 +01:00
Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fddca646c8 | ||
|
|
68f954c59b | ||
|
|
0ab0a6d182 | ||
|
|
f6bf2993f7 | ||
|
|
5cf00a8b8e |
21
CHANGELOG.md
21
CHANGELOG.md
@@ -1,3 +1,24 @@
|
||||
## 4.0.7 2023-11-11 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Add seperate permissions for _FILESYSTEM_PATH
|
||||
|
||||
### Changed
|
||||
- More output and debugging additions
|
||||
- SQLite3 now backs up without running into file permission/access problems
|
||||
- Cleanup old sqlite backups from temp directory
|
||||
- Handle multiple SQLite3 backups concurrently
|
||||
|
||||
|
||||
## 4.0.6 2023-11-10 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
- Add additional DEBUG_ statements
|
||||
|
||||
### Changed
|
||||
- Fix issue with Influx DB not properly detecting the correct version
|
||||
|
||||
|
||||
## 4.0.5 2023-11-10 <dave at tiredofit dot ca>
|
||||
|
||||
### Added
|
||||
|
||||
@@ -323,11 +323,12 @@ Options that are related to the value of `DEFAULT_BACKUP_LOCATION`
|
||||
If `DEFAULT_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
|
||||
|
||||
| Variable | Description | Default |
|
||||
| --------------------------------- | ----------------------------------------------------------------------------------------------------- | ------------------------------------- |
|
||||
| ------------------------------------ | ----------------------------------------------------------------------------------------------------- | ------------------------------------- |
|
||||
| `DEFAULT_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
|
||||
| `DEFAULT_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
|
||||
| `DEFAULT_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` |
|
||||
| `DEFAULT_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DEFAULT_FILESYSTEM_PATH}/archive/` |
|
||||
| `DEFAULT_FILESYSTEM_PERMISSION` | Directory and File permissions to apply to files. | `600` |
|
||||
| `DEFAULT_FILESYSTEM_PERMISSION` | Permissions to apply to files. | `600` |
|
||||
|
||||
###### S3
|
||||
|
||||
@@ -598,9 +599,10 @@ Options that are related to the value of `DB01_BACKUP_LOCATION`
|
||||
If `DB01_BACKUP_LOCTION` = `FILESYSTEM` then the following options are used:
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ------------------------------ | ----------------------------------------------------------------------------------------------------- | --------------------------------- |
|
||||
| --------------------------------- | ----------------------------------------------------------------------------------------------------- | --------------------------------- |
|
||||
| `DB01_CREATE_LATEST_SYMLINK` | Create a symbolic link pointing to last backup in this format: `latest-(DB_TYPE)-(DB_NAME)-(DB_HOST)` | `TRUE` |
|
||||
| `DB01_FILESYSTEM_PATH` | Directory where the database dumps are kept. | `/backup` |
|
||||
| `DB01_FILESYSTEM_PATH_PERMISSION` | Permissions to apply to backup directory | `700` |
|
||||
| `DB01_FILESYSTEM_ARCHIVE_PATH` | Optional Directory where the database dumps archives are kept | `${DB01_FILESYSTEM_PATH/archive/` |
|
||||
| `DB01_FILESYSTEM_PERMISSION` | Directory and File permissions to apply to files. | `600` |
|
||||
|
||||
|
||||
@@ -8,8 +8,11 @@ source /assets/functions/10-db-backup
|
||||
source /assets/defaults/10-db-backup
|
||||
bootstrap_variables backup_init {{BACKUP_NUMBER}}
|
||||
bootstrap_variables parse_variables {{BACKUP_NUMBER}}
|
||||
PROCESS_NAME="{{BACKUP_NUMBER}}-${backup_job_db_host}__${backup_job_db_name}"
|
||||
|
||||
if [ -z "${backup_job_db_name}" ]; then
|
||||
PROCESS_NAME="{{BACKUP_NUMBER}}${backup_job_db_host//\//_}"
|
||||
else
|
||||
PROCESS_NAME="{{BACKUP_NUMBER}}-${backup_job_db_host//\//_}__${backup_job_db_name}"
|
||||
fi
|
||||
|
||||
trap ctrl_c INT
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
BACKUP_JOB_CONCURRENCY=${BACKUP_JOB_CONCURRENCY:-"1"}
|
||||
DBBACKUP_USER=${DBBACKUP_USER:-"dbbackup"}
|
||||
DBBACKUP_GROUP=${DBBACKUP_USER:-"${DBBACKUP_USER}"} # Must go after DBBACKUP_USER
|
||||
DBBACKUP_GROUP=${DBBACKUP_GROUP:-"${DBBACKUP_USER}"} # Must go after DBBACKUP_USER
|
||||
DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0}
|
||||
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
|
||||
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440}
|
||||
@@ -15,6 +15,7 @@ DEFAULT_CREATE_LATEST_SYMLINK=${DEFAULT_CREATE_LATEST_SYMLINK:-"TRUE"}
|
||||
DEFAULT_ENABLE_PARALLEL_COMPRESSION=${DEFAULT_ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
|
||||
DEFAULT_ENCRYPT=${DEFAULT_ENCRYPT:-"FALSE"}
|
||||
DEFAULT_FILESYSTEM_PATH=${DEFAULT_FILESYSTEM_PATH:-"/backup"}
|
||||
DEFAULT_FILESYSTEM_PATH_PERMISSION=${DEFAULT_FILESYSTEM_PATH_PERMISSION:-"700"}
|
||||
DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"600"}
|
||||
DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DEFAULT_FILESYSTEM_ARCHIVE_PATH:-"${DEFAULT_FILESYSTEM_PATH}/archive/"}
|
||||
DEFAULT_LOG_LEVEL=${DEFAULT_LOG_LEVEL:-"notice"}
|
||||
|
||||
@@ -6,11 +6,11 @@ bootstrap_filesystem() {
|
||||
mkdir -p "${backup_job_filesystem_path}"
|
||||
fi
|
||||
if [ "$(stat -c %U "${backup_job_filesystem_path}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${backup_job_filesystem_path}" ; fi
|
||||
if [ "$(stat -c %a "${backup_job_filesystem_path}")" != "${backup_job_filesystem_permission}" ] ; then chmod -R "${backup_job_filesystem_permission}" "${backup_job_filesystem_path}" ; fi
|
||||
if [ "$(stat -c %a "${backup_job_filesystem_path}")" != "${backup_job_filesystem_path_permission}" ] ; then chmod "${backup_job_filesystem_path_permission}" "${backup_job_filesystem_path}" ; fi
|
||||
|
||||
if [ -d "${backup_job_filesystem_archive_path}" ]; then
|
||||
if [ "$(stat -c %U "${backup_job_filesystem_archive_path}")" != "${DBBACKUP_USER}" ] ; then chown -R "${DBBACKUP_USER}":"${DBBACKUP_GROUP}" "${backup_job_filesystem_archive_path}" ; fi
|
||||
if [ "$(stat -c %a "${backup_job_filesystem_archive_path}")" != "${backup_job_filesystem_permission}" ] ; then chmod -R "${backup_job_filesystem_permission}" "${backup_job_filesystem_archive_path}" ; fi
|
||||
if [ "$(stat -c %a "${backup_job_filesystem_archive_path}")" != "${backup_job_filesystem_path_permission}" ] ; then chmod "${backup_job_filesystem_path_permission}" "${backup_job_filesystem_archive_path}" ; fi
|
||||
fi
|
||||
|
||||
if [ ! -d "${LOG_PATH}" ]; then
|
||||
@@ -191,6 +191,7 @@ bootstrap_variables() {
|
||||
transform_backup_instance_variable "${backup_instance_number}" EXTRA_OPTS backup_job_extra_opts
|
||||
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_ARCHIVE_PATH backup_job_filesystem_archive_path
|
||||
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PATH backup_job_filesystem_path
|
||||
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PATH_PERMISSION backup_job_filesystem_path_permission
|
||||
transform_backup_instance_variable "${backup_instance_number}" FILESYSTEM_PERMISSION backup_job_filesystem_permission
|
||||
transform_backup_instance_variable "${backup_instance_number}" GZ_RSYNCABLE backup_job_gz_rsyncable
|
||||
transform_backup_instance_variable "${backup_instance_number}" HOST backup_job_db_host
|
||||
@@ -237,6 +238,14 @@ bootstrap_variables() {
|
||||
transform_backup_instance_variable "${backup_instance_number}" USER backup_job_db_user
|
||||
|
||||
backup_job_backup_begin=$(echo "${backup_job_backup_begin}" | sed -e "s|'||g" -e 's|"||g')
|
||||
if var_true "${DEBUG_BACKUP_INSTANCE_VARIABLE}" ; then cat <<EOF
|
||||
## BEGIN Variable Dump $(TZ=${TIMEZONE} date)
|
||||
|
||||
$(cat ${backup_instance_vars})
|
||||
|
||||
## END
|
||||
EOF
|
||||
fi
|
||||
rm -rf "${backup_instance_vars}"
|
||||
}
|
||||
|
||||
@@ -315,7 +324,7 @@ bootstrap_variables() {
|
||||
## Check is Variable is Defined
|
||||
## Usage: check_var transformed_varname real_varname "Description"
|
||||
output_off
|
||||
print_debug "Looking for existence of $2 environment variable"
|
||||
print_debug "[parse_variables] Looking for existence of $2 environment variable"
|
||||
if [ ! -v "$1" ]; then
|
||||
print_error "No '$3' Entered! - Set '\$$2' environment variable - Halting Backup Number ${v_instance}"
|
||||
s6-svc -d /var/run/s6/legacy-services/dbbackup-"${v_instance}"
|
||||
@@ -451,12 +460,7 @@ backup_couch() {
|
||||
prepare_dbbackup
|
||||
backup_job_filename=couch_${backup_job_db_name}_${backup_job_db_host#*//}_${now}.txt
|
||||
backup_job_filename_base=couch_${backup_job_db_name}_${backup_job_db_host#*//}
|
||||
compression
|
||||
pre_dbbackup ${backup_job_db_name}
|
||||
write_log notice "Dumping CouchDB database: '${backup_job_db_name}' ${compression_string}"
|
||||
if var_true "${DEBUG_BACKUP_COUCH}" ; then debug on; fi
|
||||
run_as_user curl -sSL -X GET ${backup_job_db_host}:${backup_job_db_port}/${backup_job_db_name}/_all_docs?include_docs=true | ${compress_cmd} | run_as_user tee "${TEMP_PATH}"/"${backup_job_filename}" > /dev/null
|
||||
exit_code=$?
|
||||
compressionzyclonite
|
||||
if var_true "${DEBUG_BACKUP_COUCH}" ; then debug off; fi
|
||||
check_exit_code backup "${backup_job_filename}"
|
||||
timer backup finish
|
||||
@@ -470,15 +474,16 @@ backup_couch() {
|
||||
backup_influx() {
|
||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
||||
if [ "${backup_job_db_name,,}" = "all" ] ; then
|
||||
write_log debug "Preparing to back up everything"
|
||||
write_log debug "[backup_influx] Preparing to back up everything"
|
||||
db_names=justbackupeverything
|
||||
else
|
||||
db_names=$(echo "${backup_job_db_name}" | tr ',' '\n')
|
||||
fi
|
||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug off; fi
|
||||
|
||||
case "${backup_job_db_influx_version,,}" in
|
||||
case "${backup_job_influx_version,,}" in
|
||||
1 )
|
||||
print_debug "[backup_influx] Influx DB Version 1 selected"
|
||||
for db in ${db_names}; do
|
||||
prepare_dbbackup
|
||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
||||
@@ -507,6 +512,7 @@ backup_influx() {
|
||||
done
|
||||
;;
|
||||
2 )
|
||||
print_debug "[backup_influx] Influx DB Version 2 selected"
|
||||
for db in ${db_names}; do
|
||||
prepare_dbbackup
|
||||
if var_true "${DEBUG_BACKUP_INFLUX}" ; then debug on; fi
|
||||
@@ -833,16 +839,21 @@ backup_sqlite3() {
|
||||
db="${db%.*}"
|
||||
backup_job_filename=sqlite3_${db}_${now}.sqlite3
|
||||
backup_job_filename_base=sqlite3_${db}.sqlite3
|
||||
compression
|
||||
pre_dbbackup "${db}"
|
||||
write_log notice "Dumping sqlite3 database: '${backup_job_db_host}' ${compression_string}"
|
||||
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug on; fi
|
||||
silent run_as_user ${play_fair} sqlite3 "${backup_job_db_host}" ".backup '${TEMP_PATH}/backup.sqlite3'"
|
||||
silent ${play_fair} sqlite3 "${backup_job_db_host}" ".backup '${TEMP_PATH}/backup_${now}.sqlite3'"
|
||||
exit_code=$?
|
||||
check_exit_code backup "${backup_job_filename}"
|
||||
run_as_user ${play_fair} cat "${TEMP_PATH}"/backup.sqlite3 | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}/${backup_job_filename}" > /dev/null
|
||||
timer backup finish
|
||||
if [ ! -f "${TEMP_PATH}"/backup_${now}.sqlite3 ] ; then
|
||||
print_error "SQLite3 backup failed! Exitting"
|
||||
return 1
|
||||
fi
|
||||
compression
|
||||
run_as_user ${play_fair} cat "${TEMP_PATH}"/backup_${now}.sqlite3 | ${dir_compress_cmd} | run_as_user tee "${TEMP_PATH}/${backup_job_filename}" > /dev/null
|
||||
rm -rf "${TEMP_PATH}"/backup_${now}.sqlite3
|
||||
if var_true "${DEBUG_BACKUP_SQLITE3}" ; then debug off; fi
|
||||
timer backup finish
|
||||
file_encryption
|
||||
generate_checksum
|
||||
move_dbbackup
|
||||
@@ -1049,6 +1060,7 @@ compression() {
|
||||
|
||||
case "${backup_job_compression,,}" in
|
||||
bz* )
|
||||
print_debug "[compression] Selected BZIP"
|
||||
compress_cmd="${play_fair} pbzip2 -q -${backup_job_compression_level} -p${backup_job_parallel_compression_threads} "
|
||||
compression_type="bzip2"
|
||||
dir_compress_cmd=${compress_cmd}
|
||||
@@ -1057,6 +1069,7 @@ compression() {
|
||||
backup_job_filename=${backup_job_filename}.bz2
|
||||
;;
|
||||
gz* )
|
||||
print_debug "[compression] Selected GZIP"
|
||||
compress_cmd="${play_fair} pigz -q -${backup_job_compression_level} -p ${backup_job_parallel_compression_threads} ${gz_rsyncable}"
|
||||
compression_type="gzip"
|
||||
extension=".gz"
|
||||
@@ -1065,6 +1078,7 @@ compression() {
|
||||
backup_job_filename=${backup_job_filename}.gz
|
||||
;;
|
||||
xz* )
|
||||
print_debug "[compression] Selected XZIP"
|
||||
compress_cmd="${play_fair} pixz -${backup_job_compression_level} -p ${backup_job_parallel_compression_threads} "
|
||||
compression_type="xzip"
|
||||
dir_compress_cmd=${compress_cmd}
|
||||
@@ -1073,6 +1087,7 @@ compression() {
|
||||
backup_job_filename=${backup_job_filename}.xz
|
||||
;;
|
||||
zst* )
|
||||
print_debug "[compression] Selected ZSTD"
|
||||
compress_cmd="${play_fair} zstd -q -q --rm -${backup_job_compression_level} -T${backup_job_parallel_compression_threads} ${gz_rsyncable}"
|
||||
compression_type="zstd"
|
||||
dir_compress_cmd=${compress_cmd}
|
||||
@@ -1121,9 +1136,10 @@ create_schedulers() {
|
||||
backup() {
|
||||
bootstrap_variables upgrade BACKUP
|
||||
local backup_instances=$(printenv | sort | grep -c "^DB[0-9]._HOST")
|
||||
print_debug "[create_schedulers] Found '${backup_instances}' DB_HOST instances"
|
||||
if [ -n "${DB_HOST}" ] && [ "${backup_instances}" ]; then
|
||||
backup_instances=1;
|
||||
print_debug "Detected using old DB_ variables"
|
||||
print_debug "[create_schedulers] Detected using old DB_ variables"
|
||||
fi
|
||||
|
||||
for (( instance = 01; instance <= backup_instances; )) ; do
|
||||
@@ -1179,7 +1195,11 @@ db_backup_container_init() {
|
||||
debug() {
|
||||
case "${1}" in
|
||||
off)
|
||||
backup_job_log_level=$_original_job_log_log_level}
|
||||
CONTAINER_LOG_LEVEL=${_original_container_log_level}
|
||||
DEBUG_MODE=${_original_debug_mode}
|
||||
SHOW_OUTPUT=${_original_show_output}
|
||||
|
||||
if var_true "${DEBUG_MODE}" ; then
|
||||
set -x
|
||||
else
|
||||
@@ -1187,9 +1207,22 @@ debug() {
|
||||
fi
|
||||
;;
|
||||
on)
|
||||
if [ -z "${_original_container_log_level}" ]; then
|
||||
_original_container_log_level="${CONTAINER_LOG_LEVEL}"
|
||||
fi
|
||||
if [ -z "${_original_job_log_level}" ]; then
|
||||
_original_job_log_level="${backup_job_log_level}"
|
||||
fi
|
||||
|
||||
if [ -z "${_original_debug_mode}" ]; then
|
||||
_original_debug_mode="${DEBUG_MODE}"
|
||||
fi
|
||||
if [ -z "${_original_show_output}" ]; then
|
||||
_original_show_output="${SHOW_OUTPUT}"
|
||||
fi
|
||||
backup_job_log_level=DEBUG
|
||||
CONTAINER_LOG_LEVEL=DEBUG
|
||||
SHOW_OUTPUT=TRUE
|
||||
set -x
|
||||
;;
|
||||
esac
|
||||
@@ -1199,7 +1232,7 @@ file_encryption() {
|
||||
if var_true "${DEBUG_FILE_ENCRYPTION}" ; then debug on; fi
|
||||
if var_true "${backup_job_encrypt}" ; then
|
||||
if [ "${exit_code}" = "0" ] ; then
|
||||
print_debug "Encrypting"
|
||||
print_debug "[file_encryption] Encrypting"
|
||||
output_off
|
||||
if [ -n "${backup_job_encrypt_passphrase}" ] && [ -n "${backup_job_encrypt_pubkey}" ]; then
|
||||
print_error "Can't encrypt as both ENCRYPT_PASSPHRASE and ENCRYPT_PUBKEY exist!"
|
||||
@@ -1220,6 +1253,7 @@ file_encryption() {
|
||||
fi
|
||||
fi
|
||||
if [ -f "${TEMP_PATH}"/"${backup_job_filename}".gpg ]; then
|
||||
print_debug "[file_encryption] Deleting original file"
|
||||
rm -rf "${TEMP_PATH:?}"/"${backup_job_filename:?}"
|
||||
backup_job_filename="${backup_job_filename}.gpg"
|
||||
|
||||
@@ -1395,23 +1429,23 @@ EOF
|
||||
for notification_type in $notification_types ; do
|
||||
case "${notification_type,,}" in
|
||||
"custom" )
|
||||
print_debug "Sending Notification via custom"
|
||||
print_debug "[notify] Sending Notification via custom"
|
||||
notification_custom "${1}" "${2}" "${3}" "${4}" "${5}"
|
||||
;;
|
||||
"email" | "mail" )
|
||||
print_debug "Sending Notification via email"
|
||||
print_debug "[notify] Sending Notification via email"
|
||||
notification_email "${1}" "${2}" "${3}" "${4}" "${5}"
|
||||
;;
|
||||
"matrix" )
|
||||
print_debug "Sending Notification via Matrix"
|
||||
print_debug "[notify] Sending Notification via Matrix"
|
||||
notification_matrix "${1}" "${2}" "${3}" "${4}" "${5}"
|
||||
;;
|
||||
"mattermost" )
|
||||
print_debug "Sending Notification via Mattermost"
|
||||
print_debug "[notify] Sending Notification via Mattermost"
|
||||
notification_mattermost "${1}" "${2}" "${3}" "${4}" "${5}"
|
||||
;;
|
||||
"rocketchat" )
|
||||
print_debug "Sending Notification via Rocketchat"
|
||||
print_debug "[notify] Sending Notification via Rocketchat"
|
||||
notification_rocketchat "${1}" "${2}" "${3}" "${4}" "${5}"
|
||||
;;
|
||||
* )
|
||||
@@ -1454,8 +1488,37 @@ move_dbbackup() {
|
||||
write_log debug "Moving backup to filesystem"
|
||||
run_as_user mkdir -p "${backup_job_filesystem_path}"
|
||||
if [ "${backup_job_checksum,,}" != "none" ] ; then run_as_user mv "${TEMP_PATH}"/*."${checksum_extension}" "${backup_job_filesystem_path}"/ ; fi
|
||||
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
|
||||
cat <<EOF
|
||||
## BEGIN Before Moving file from TEMP_PATH $(TZ=${TIMEZONE} date)
|
||||
##
|
||||
|
||||
$(ls -l "${TEMP_PATH}"/*)
|
||||
|
||||
## END
|
||||
EOF
|
||||
fi
|
||||
run_as_user mv "${TEMP_PATH}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/"${backup_job_filename}"
|
||||
move_exit_code=$?
|
||||
if var_true "${DEBUG_MOVE_DBBACKUP}"; then
|
||||
cat <<EOF
|
||||
## BEGIN After Moving file from TEMP_PATH $(TZ=${TIMEZONE} date)
|
||||
##
|
||||
|
||||
$(ls -l "${TEMP_PATH}"/*)
|
||||
|
||||
## END
|
||||
|
||||
## BEGIN After Moving file to _FILESYSTEM_PATH $(TZ=${TIMEZONE} date)
|
||||
##
|
||||
|
||||
$(ls -l "${backup_job_filesystem_path}"/*)
|
||||
|
||||
## END
|
||||
|
||||
EOF
|
||||
fi
|
||||
|
||||
if var_true "${backup_job_create_latest_symlink}" ; then
|
||||
run_as_user ln -sfr "${backup_job_filesystem_path}"/"${backup_job_filename}" "${backup_job_filesystem_path}"/latest-"${backup_job_filename_base}"
|
||||
fi
|
||||
@@ -1661,7 +1724,7 @@ process_limiter() {
|
||||
}
|
||||
|
||||
run_as_user() {
|
||||
s6-setuidgid "${DBBACKUP_USER}" $@
|
||||
sudo -u "${DBBACKUP_USER}" $@
|
||||
}
|
||||
|
||||
setup_mode() {
|
||||
@@ -1894,18 +1957,18 @@ timer() {
|
||||
;;
|
||||
datetime)
|
||||
time_begin=$(date -d "${backup_job_backup_begin}" +%s)
|
||||
print_debug "BACKUP_BEGIN time = ${time_begin}"
|
||||
print_debug "[timer] [datetime] BACKUP_BEGIN time = ${time_begin}"
|
||||
time_wait=$(( time_begin - time_current ))
|
||||
print_debug "Difference in seconds: ${time_wait}"
|
||||
print_debug "[timer] [datetime] Difference in seconds: ${time_wait}"
|
||||
|
||||
if (( ${time_wait} < 0 )); then
|
||||
time_wait=$(( (${time_wait} + (${backup_job_backup_interval} - 1)) / (${backup_job_backup_interval} * 60) ))
|
||||
time_wait=$(( ${time_wait} * -1 ))
|
||||
print_debug "Difference in seconds (rounded) time_wait is in the past : ${time_wait}"
|
||||
print_debug "[timer] [datetime] Difference in seconds (rounded) time_wait is in the past : ${time_wait}"
|
||||
fi
|
||||
|
||||
time_future=$(( time_current + time_wait ))
|
||||
print_debug "Future execution time = ${time_future}"
|
||||
print_debug "[timer] [datetime] Future execution time = ${time_future}"
|
||||
;;
|
||||
job)
|
||||
case "${2}" in
|
||||
|
||||
Reference in New Issue
Block a user