Release 2.1.0 - See CHANGELOG.md

This commit is contained in:
Dave Conroy
2020-08-29 07:37:03 -07:00
parent b57683e992
commit 30c56229cf
4 changed files with 143 additions and 119 deletions

View File

@@ -35,7 +35,7 @@ SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SPLIT_DB=${SPLIT_DB:-FALSE}
tmpdir=/tmp/backups
if [ "BACKUP_TYPE" = "S3" ] || [ "BACKUP_TYPE" = "s3" ] || [ "BACKUP_TYPE" = "MINIO" ] || [ "BACKUP_TYPE" = "minio" ] ; then
if [ "$BACKUP_TYPE" = "S3" ] || [ "$BACKUP_TYPE" = "s3" ] || [ "$BACKUP_TYPE" = "MINIO" ] || [ "$BACKUP_TYPE" = "minio" ] ; then
S3_PROTOCOL=${S3_PROTOCOL:-"https"}
sanity_var S3_HOST "S3 Host"
sanity_var S3_BUCKET "S3 Bucket"
@@ -53,7 +53,7 @@ if [ "$1" = "NOW" ]; then
fi
### Set Compression Options
if var_true $PARALLEL_COMPRESSION ; then
if var_true "$PARALLEL_COMPRESSION" ; then
bzip="pbzip2 -${COMPRESSION_LEVEL}"
gzip="pigz -${COMPRESSION_LEVEL}"
xzip="pixz -${COMPRESSION_LEVEL}"
@@ -104,13 +104,14 @@ backup_couch() {
target=couch_${dbname}_${dbhost}_${now}.txt
compression
curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${dumpoutput} | $dumpoutput > ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup
}
backup_mysql() {
if var_true $SPLIT_DB ; then
DATABASES=`mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema`
if var_true "$SPLIT_DB" ; then
DATABASES=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema)
for db in $DATABASES; do
if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then
@@ -118,6 +119,7 @@ backup_mysql() {
target=mysql_${db}_${dbhost}_${now}.sql
compression
mysqldump --max-allowed-packet=512M -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} --databases $db | $dumpoutput > ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup
fi
@@ -125,6 +127,7 @@ backup_mysql() {
else
compression
mysqldump --max-allowed-packet=512M -A -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup
fi
@@ -139,6 +142,7 @@ backup_influx() {
fi
for DB in $DB_NAME; do
influxd backup ${influx_compression} -database $DB -host ${dbhost}:${dbport} ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup
done
@@ -152,7 +156,8 @@ backup_mongo() {
target=${dbtype}_${dbname}_${dbhost}_${now}.archivegz
mongo_compression="--gzip"
fi
mongodump --archive=${tmpdir}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
mongodump --archive=${tmpdir}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
exit_code=$?
cd ${tmpdir}
generate_md5
move_backup
@@ -161,12 +166,13 @@ backup_mongo() {
backup_pgsql() {
if var_true $SPLIT_DB ; then
export PGPASSWORD=${dbpass}
DATABASES=`psql -h $dbhost -U $dbuser -p ${dbport} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' `
DATABASES=$(psql -h $dbhost -U $dbuser -p ${dbport} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
for db in $DATABASES; do
print_info "Dumping database: $db"
target=pgsql_${db}_${dbhost}_${now}.sql
compression
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup
done
@@ -174,6 +180,7 @@ backup_pgsql() {
export PGPASSWORD=${dbpass}
compression
pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
exit_code=$?
generate_md5
move_backup
fi
@@ -207,60 +214,60 @@ check_availability() {
COUNTER=0
while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5
let COUNTER+=5
print_warn "CouchDB Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
(( COUNTER+=5 ))
print_warn "CouchDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"influx" )
COUNTER=0
while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5
let COUNTER+=5
print_warn "InfluxDB Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
(( COUNTER+=5 ))
print_warn "InfluxDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"mongo" )
COUNTER=0
while ! (nc -z ${dbhost} ${dbport}) ; do
sleep 5
let COUNTER+=5
print_warn "Mongo Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
(( COUNTER+=5 ))
print_warn "Mongo Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"mysql" )
COUNTER=0
while true; do
mysqlcmd='mysql -u'${dbuser}' -P '${dbport}' -h '${dbhost}' -p'${dbpass}
out="`$mysqlcmd -e "SELECT COUNT(*) FROM information_schema.FILES;" 2>&1`"
out="$($mysqlcmd -e "SELECT COUNT(*) FROM information_schema.FILES;" 2>&1)"
echo "$out" | grep -E "COUNT|Enter" 2>&1 > /dev/null
if [ $? -eq 0 ]; then
:
break
fi
print_warn "MySQL/MariaDB Server "$dbhost" is not accessible, retrying.. ($COUNTER seconds so far)"
print_warn "MySQL/MariaDB Server '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
sleep 5
let COUNTER+=5
(( COUNTER+=5 ))
done
;;
"pgsql" )
COUNTER=0
export PGPASSWORD=${dbpass}
export PGPASSWORD=${dbpass}
until pg_isready --dbname=${dbname} --host=${dbhost} --port=${dbport} --username=${dbuser} -q
do
sleep 5
let COUNTER+=5
print_warn "Postgres Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
(( COUNTER+=5 ))
print_warn "Postgres Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
"redis" )
COUNTER=0
while ! (nc -z ${dbhost} ${dbport}) ; do
while ! (nc -z "${dbhost}" "${dbport}") ; do
sleep 5
let COUNTER+=5
print_warn "Redis Host '"$dbhost"' is not accessible, retrying.. ($COUNTER seconds so far)"
(( COUNTER+=5 ))
print_warn "Redis Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
done
;;
esac
esac
}
compression() {
@@ -292,11 +299,11 @@ compression() {
}
generate_md5() {
if var_true $MD5 ; then
if var_true "$MD5" ; then
print_notice "Generating MD5 for ${target}"
cd $tmpdir
md5sum ${target} > ${target}.md5
MD5VALUE=$(md5sum ${target} | awk '{ print $1}')
md5sum "${target}" > "${target}".md5
MD5VALUE=$(md5sum "${target}" | awk '{ print $1}')
fi
}
@@ -320,27 +327,27 @@ move_backup() {
FILESIZE=$(du -h "${tmpdir}/${target}" | awk '{ print $1}')
print_notice "Backup of ${target} created with the size of ${FILESIZE}"
fi
case "${BACKUP_LOCATION}" in
"FILE" | "file" | "filesystem" | "FILESYSTEM" )
mkdir -p ${DB_DUMP_TARGET}
mv ${tmpdir}/*.md5 ${DB_DUMP_TARGET}/
mv ${tmpdir}/${target} ${DB_DUMP_TARGET}/${target}
mkdir -p "${DB_DUMP_TARGET}"
mv ${tmpdir}/*.md5 "${DB_DUMP_TARGET}"/
mv ${tmpdir}/"${target}" "${DB_DUMP_TARGET}"/"${target}"
;;
"S3" | "s3" | "MINIO" | "minio" )
s3_content_type="application/octet-stream"
if [ "$S3_URI_STYLE" = "VIRTUALHOST" ] || [ "$S3_URI_STYLE" = "VHOST" ] [ "$S3_URI_STYLE" = "virtualhost" ] [ "$S3_URI_STYLE" = "vhost" ] ; then
if [ "$S3_URI_STYLE" = "VIRTUALHOST" ] || [ "$S3_URI_STYLE" = "VHOST" ] || [ "$S3_URI_STYLE" = "virtualhost" ] || [ "$S3_URI_STYLE" = "vhost" ] ; then
s3_url="${S3_BUCKET}.${S3_HOST}"
else
s3_url="${S3_HOST}/${S3_BUCKET}"
fi
if var_true $MD5 ; then
if var_true "$MD5" ; then
s3_date="$(LC_ALL=C date -u +"%a, %d %b %Y %X %z")"
s3_md5="$(libressl md5 -binary < "${tmpdir}/${target}.md5" | base64)"
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${target}.md5" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
print_debug "Uploading ${target}.md5 to S3"
curl -T "${tmpdir}/${target}.md5" ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${target}.md5 \
curl -T "${tmpdir}/${target}.md5" "${S3_PROTOCOL}"://"${s3_url}"/"${S3_PATH}"/"${target}".md5 \
-H "Date: $date" \
-H "Authorization: AWS ${S3_KEY_ID}:$sig" \
-H "Content-Type: ${s3_content_type}" \
@@ -351,21 +358,21 @@ move_backup() {
s3_md5="$(libressl md5 -binary < "${tmpdir}/${target}" | base64)"
sig="$(printf "PUT\n$s3_md5\n${s3_content_type}\n$s3_date\n/$S3_BUCKET/$S3_PATH/${target}" | libressl sha1 -binary -hmac "${S3_KEY_SECRET}" | base64)"
print_debug "Uploading ${target} to S3"
curl -T ${tmpdir}/${target} ${S3_PROTOCOL}://${s3_url}/${S3_PATH}/${target} \
curl -T ${tmpdir}/"${target}" "${S3_PROTOCOL}"://"${s3_url}"/"${S3_PATH}"/"${target}" \
-H "Date: $s3_date" \
-H "Authorization: AWS ${S3_KEY_ID}:$sig" \
-H "Content-Type: ${s3_content_type}" \
-H "Content-MD5: ${s3_md5}"
rm -rf ${tmpdir}/*.md5
rm -rf ${tmpdir}/${target}
rm -rf ${tmpdir}/"${target}"
;;
esac
}
### Container Startup
print_info "Backup routines Initialized on `date`"
print_debug "Backup routines Initialized on $(date)"
### Wait for Next time to start backup
current_time=$(date +"%s")
@@ -381,6 +388,7 @@ print_info "Backup routines Initialized on `date`"
waittime=$(($target_time - $current_time))
fi
print_notice "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")"
sleep $waittime
@@ -424,16 +432,16 @@ print_info "Backup routines Initialized on `date`"
esac
### Zabbix
if var_true $ENABLE_ZABBIX ; then
if var_true "$ENABLE_ZABBIX" ; then
print_notice "Sending Backup Statistics to Zabbix"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o `stat -c%s ${DB_DUMP_TARGET}/${target}`
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o `date -r ${DB_DUMP_TARGET}/${target} +'%s'`
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}")"
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')"
fi
### Automatic Cleanup
if [[ -n "$DB_CLEANUP_TIME" ]]; then
print_notice "Cleaning up old backups"
find $DB_DUMP_TARGET/ -mmin +$DB_CLEANUP_TIME -iname "*" -exec rm {} \;
find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
fi
### Post Backup Custom Script Support
@@ -441,11 +449,11 @@ print_info "Backup routines Initialized on `date`"
print_notice "Found Custom Scripts to Execute"
for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do
print_notice "Running Script ${f}"
## script DB_TYPE DB_HOST DB_NAME DATE BACKUP_FILENAME FILESIZE MD5_VALUE
chmod +x ${f}
${f} "${dbtype}" "${dbhost}" "${dbname}" "${now_date}" "${now_time}" "${target}" "${FILESIZE}" "${MD5VALUE}"
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME DATE BACKUP_FILENAME FILESIZE MD5_VALUE
chmod +x "${f}"
${f} "$"{exit_code}"" "${dbtype}" "${dbhost}" "${dbname}" "${now_date}" "${now_time}" "${target}" "${FILESIZE}" "${MD5VALUE}"
done
fi
fi
### Go back to Sleep until next Backup time
if var_true $MANUAL ; then