Compare commits

..

2 Commits
3.5.6 ... 3.5.3

Author SHA1 Message Date
dave@tiredofit.ca
9159783691 Release 3.5.3 - See CHANGELOG.md 2022-10-12 08:42:05 -07:00
dave@tiredofit.ca
7e5e9d308d Release 3.5.3 - See CHANGELOG.md 2022-10-12 08:39:31 -07:00
6 changed files with 18 additions and 41 deletions

View File

@@ -19,6 +19,7 @@ jobs:
id: prep id: prep
run: | run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/} DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
set -x
if [[ $GITHUB_REF == refs/heads/* ]]; then if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")" BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"

View File

@@ -19,6 +19,7 @@ jobs:
id: prep id: prep
run: | run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/} DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
set -x
if [[ $GITHUB_REF == refs/heads/* ]]; then if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")" BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"

View File

@@ -1,21 +1,3 @@
## 3.5.6 2022-11-15 <dave at tiredofit dot ca>
### Changed
- Add failure if DB_TYPE empty or malformed
## 3.5.5 2022-10-18 <dave at tiredofit dot ca>
### Changed
- Fix for S3 backups and trailing slashes (@greena13)
## 3.5.4 2022-10-13 <dave at tiredofit dot ca>
### Changed
- Fix for Influx DB 1 backups when compression enabled
## 3.5.3 2022-10-12 <dave at tiredofit dot ca> ## 3.5.3 2022-10-12 <dave at tiredofit dot ca>
### Changed ### Changed

View File

@@ -83,4 +83,4 @@ RUN set -ex && \
rm -rf /root/.cache /tmp/* /var/cache/apk/* rm -rf /root/.cache /tmp/* /var/cache/apk/*
### S6 Setup ### S6 Setup
COPY install / ADD install /

View File

@@ -189,19 +189,19 @@ Your Organization will be mapped to `DB_USER` and your root token will need to b
If `BACKUP_LOCATION` = `S3` then the following options are used. If `BACKUP_LOCATION` = `S3` then the following options are used.
| Parameter | Description | Default | | Parameter | Description | Default |
|-----------------------|------------------------------------------------------------------------------------------|---------| |-----------------------|-------------------------------------------------------------------------------------------|---------|
| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | | `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | |
| `S3_KEY_ID` | S3 Key ID | | | `S3_KEY_ID` | S3 Key ID | |
| `S3_KEY_SECRET` | S3 Key Secret | | | `S3_KEY_SECRET` | S3 Key Secret | |
| `S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | | | `S3_PATH` | S3 Pathname to save to (must end in a trailing slash e.g. '`backup/`') | |
| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | | `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | |
| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | | `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | |
| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` | | `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` |
| `S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | | | `S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | |
| `S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | | | `S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | |
| _*OR*_ | | | | _*OR*_ | | |
| `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | | `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` |
#### Upload to a Azure storage account by `blobxfer` #### Upload to a Azure storage account by `blobxfer`

View File

@@ -1,7 +1,6 @@
#!/command/with-contenv bash #!/command/with-contenv bash
bootstrap_variables() { bootstrap_variables() {
sanity_var DB_TYPE "Set appropriate DB_TYPE"
case "${DB_TYPE,,}" in case "${DB_TYPE,,}" in
couch* ) couch* )
dbtype=couch dbtype=couch
@@ -75,18 +74,12 @@ bootstrap_variables() {
sqlite* ) sqlite* )
dbtype=sqlite3 dbtype=sqlite3
;; ;;
* )
print_error "I don't recognize 'DB_TYPE=${DB_TYPE}' - Exitting.."
exit 99
;;
esac esac
if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then
file_env 'S3_KEY_ID' file_env 'S3_KEY_ID'
file_env 'S3_KEY_SECRET' file_env 'S3_KEY_SECRET'
fi fi
} }
backup_couch() { backup_couch() {
@@ -123,7 +116,7 @@ backup_influx() {
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}" influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
exit_code=$? exit_code=$?
check_exit_code $target_dir check_exit_code $target_dir
print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}" print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}" tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension} target=influx_${db}_${DB_HOST#*//}_${now}.tar${extension}
generate_checksum generate_checksum
@@ -478,7 +471,7 @@ cleanup_old_data() {
;; ;;
"s3" | "minio" ) "s3" | "minio" )
print_info "Cleaning up old backups on S3 storage" print_info "Cleaning up old backups on S3 storage"
aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | grep " PRE " -v | while read -r s3_file; do aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | grep " PRE " -v | while read -r s3_file; do
s3_createdate=$(echo $s3_file | awk {'print $1" "$2'}) s3_createdate=$(echo $s3_file | awk {'print $1" "$2'})
s3_createdate=$(date -d "$s3_createdate" "+%s") s3_createdate=$(date -d "$s3_createdate" "+%s")
s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 ))) s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 )))
@@ -486,7 +479,7 @@ cleanup_old_data() {
s3_filename=$(echo $s3_file | awk {'print $4'}) s3_filename=$(echo $s3_file | awk {'print $4'})
if [ "$s3_filename" != "" ] ; then if [ "$s3_filename" != "" ] ; then
print_debug "Deleting $s3_filename" print_debug "Deleting $s3_filename"
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
fi fi
fi fi