diff --git a/README.md b/README.md index 2976a4a..92bfe85 100644 --- a/README.md +++ b/README.md @@ -199,8 +199,8 @@ If `BACKUP_LOCATION` = `S3` then the following options are used. | Parameter | Description | Default | | --------------------- | ----------------------------------------------------------------------------------------- | ------- | | `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | -| `S3_KEY_ID` | S3 Key ID | | -| `S3_KEY_SECRET` | S3 Key Secret | | +| `S3_KEY_ID` | S3 Key ID (Optional) | | +| `S3_KEY_SECRET` | S3 Key Secret (Optional) | | | `S3_PATH` | S3 Pathname to save to (must NOT end in a trailing slash e.g. '`backup`') | | | `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | | `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | @@ -210,6 +210,8 @@ If `BACKUP_LOCATION` = `S3` then the following options are used. | _*OR*_ | | | | `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | +- When `S3_KEY_ID` and/or `S3_KEY_SECRET` is not set, will try to use IAM role assigned (if any) for uploading the backup files to S3 bucket. + #### Upload to a Azure storage account by `blobxfer` Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure fileshare storage. diff --git a/install/assets/functions/10-db-backup b/install/assets/functions/10-db-backup index e6cde2a..7ca9b4e 100644 --- a/install/assets/functions/10-db-backup +++ b/install/assets/functions/10-db-backup @@ -100,7 +100,7 @@ bootstrap_variables() { ;; esac - if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then + if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] && [ -n "${S3_KEY_ID}" ] && [ -n "${S3_KEY_SECRET}" ]; then file_env 'S3_KEY_ID' file_env 'S3_KEY_SECRET' fi @@ -652,8 +652,12 @@ move_dbbackup() { ;; "s3" | "minio" ) print_debug "Moving backup to S3 Bucket" - export AWS_ACCESS_KEY_ID=${S3_KEY_ID} - export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET} + if [ -n "${S3_KEY_ID}" ] && [ -n "${S3_KEY_SECRET}" ]; then + export AWS_ACCESS_KEY_ID=${S3_KEY_ID} + export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET} + else + print_debug "Variable S3_KEY_ID or S3_KEY_SECRET is not set. Please ensure sufficiant IAM role is assigned." + fi export AWS_DEFAULT_REGION=${S3_REGION} if [ -f "${S3_CERT_CA_FILE}" ] ; then print_debug "Using Custom CA for S3 Backups" @@ -809,7 +813,7 @@ sanity_test() { ;; esac - if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then + if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] && [ -n "${S3_KEY_ID}" ] && [ -n "${S3_KEY_SECRET}" ]; then sanity_var S3_BUCKET "S3 Bucket" sanity_var S3_PATH "S3 Path" sanity_var S3_REGION "S3 Region" @@ -827,6 +831,7 @@ setup_mode() { if var_true "${MANUAL_RUN_FOREVER}" ; then mkdir -p /etc/services.d/99-run_forever cat < /etc/services.d/99-run_forever/run + #!/bin/bash while true do