diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f3522f..22a0ceb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,21 @@ +## 3.0.0 2022-03-17 + + ### Added + - Rewrote entire image + - Ability to choose which file hash after backup (MD5 or SHA1) + - Restore Script (execute 'restore' in container) + - Allow to map custom CA certs for S3 backups + - Allow to skip certificate certification for S3 Backups + - Revamped Logging and parameters - File logs also exist in /var/log/container/container.log + - Added more functionality to send to zabbix to track start, end, duration and status + - Ability to backup stored procedures for MySQL / MariaDB + - Ability to backup as a single transaction for MySQL / MariaDB + - Ability to execute "manually" and still allow container to execute to accommodate for Kubernetes cron usage + + ### Changed + - Environment variables have changed! Specifically relating to COMPRESSION, PARALLEL COMPRESSION, CHECKSUMs + + ## 2.12.0 2022-03-16 ### Changed diff --git a/Dockerfile b/Dockerfile index 416280c..f5ab3d9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,6 +5,7 @@ LABEL maintainer="Dave Conroy (github.com/tiredofit)" ENV MSSQL_VERSION=17.8.1.1-1 \ CONTAINER_ENABLE_MESSAGING=FALSE \ CONTAINER_ENABLE_MONITORING=TRUE \ + CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \ IMAGE_NAME="tiredofit/db-backup" \ IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/" @@ -32,6 +33,7 @@ RUN set -ex && \ pigz \ postgresql \ postgresql-client \ + pv \ redis \ sqlite \ xz \ diff --git a/README.md b/README.md index a087cdf..a93b9fb 100644 --- a/README.md +++ b/README.md @@ -17,10 +17,12 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers. * dump to local filesystem or backup to S3 Compatible services * select database user and password * backup all databases -* choose to have an MD5 sum after backup for verification +* choose to have an MD5 or SHA1 sum after backup for verification * delete old backups after specific amount of time * choose compression type (none, gz, bz, xz, zstd) * connect to any container running on the same system +* Script to perform restores +* Zabbix Monitoring capabilities * select how often to run a dump * select when to start the first dump, whether time of day or relative to container start time * Execute script after backup for monitoring/alerting purposes @@ -34,13 +36,15 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers. - [About](#about) - [Maintainer](#maintainer) - [Table of Contents](#table-of-contents) + - [Persistent Storage](#persistent-storage) - [Prerequisites and Assumptions](#prerequisites-and-assumptions) - [Installation](#installation) - [Build from Source](#build-from-source) - [Prebuilt Images](#prebuilt-images) + - [Multi Architecture](#multi-architecture) - [Configuration](#configuration) - [Quick Start](#quick-start) - - [Persistent Storage](#persistent-storage) + - [Persistent Storage](#persistent-storage-1) - [Environment Variables](#environment-variables) - [Base Images used](#base-images-used) - [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services) @@ -55,28 +59,31 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers. - [Updates](#updates) - [License](#license) -## Prerequisites and Assumptions +> **NOTE**: If you are using this with a docker-compose file along with a seperate SQL container, take care not to set the variables to backup immediately, more so have it delay execution for a minute, otherwise you will get a failed first backup. +### Persistent Storage -You must have a working DB server or container available for this to work properly, it does not provide server functionality! +## Prerequisites and Assumptions +* You must have a working connection to one of the supported DB Servers and appropriate credentials ## Installation ### Build from Source -Clone this repository and build the image with `docker build -t (imagename) .` +Clone this repository and build the image with `docker build (imagename) .` ### Prebuilt Images Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup) and is the recommended method of installation. +The following image tags are available along with their tagged release based on what's written in the [Changelog](CHANGELOG.md): + +| Alpine Base | Tag | +| ----------- | --------- | +| latest | `:latest` | + ```bash docker pull tiredofit/db-backup:(imagetag) ``` - -The following image tags are available along with their tagged release based on what's written in the [Changelog](CHANGELOG.md): - -| Container OS | Tag | -| ------------ | --------- | -| Alpine | `:latest` | - +#### Multi Architecture +Images are built primarily for `amd64` architecture, and may also include builds for `arm/v7`, `arm64` and others. These variants are all unsupported. Consider [sponsoring](https://github.com/sponsors/tiredofit) my work so that I can work with various hardware. To see if this image supports multiple architecures, type `docker manifest (image):(tag)` ## Configuration @@ -84,23 +91,22 @@ The following image tags are available along with their tagged release based on * The quickest way to get started is using [docker-compose](https://docs.docker.com/compose/). See the examples folder for a working [docker-compose.yml](examples/docker-compose.yml) that can be modified for development or production use. -* Set various [environment variables](#environment-variables) to understand the capabiltiies of this image. +* Set various [environment variables](#environment-variables) to understand the capabilities of this image. * Map [persistent storage](#data-volumes) for access to configuration and data files for backup. - -> **NOTE**: If you are using this with a docker-compose file along with a seperate SQL container, take care not to set the variables to backup immediately, more so have it delay execution for a minute, otherwise you will get a failed first backup. +* Make [networking ports](#networking) available for public access if necessary ### Persistent Storage The following directories are used for configuration and can be mapped for persistent storage. - | Directory | Description | | ------------------------ | ---------------------------------------------------------------------------------- | | `/backup` | Backups | | `/assets/custom-scripts` | *Optional* Put custom scripts in this directory to execute after backup operations | + ### Environment Variables #### Base Images used -This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`,`vim`. +This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) or [Debian Linux](https://hub.docker.com/r/tiredofit/debian) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`,`vim`. Be sure to view the following repositories to understand all the customizable options: @@ -108,52 +114,68 @@ Be sure to view the following repositories to understand all the customizable op | ------------------------------------------------------ | -------------------------------------- | | [OS Base](https://github.com/tiredofit/docker-alpine/) | Customized Image based on Alpine Linux | +#### Container Options -| Parameter | Description | Default | -| -------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | -| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` | -| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `GZ` | -| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | -| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | -| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | -| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | -| `DB_NAME` | Schema Name e.g. `database` | | -| `DB_USER` | username for the database - use `root` to backup all MySQL of them. | | -| `DB_PASS` | (optional if DB doesn't require it) password for the database | | -| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | -| `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. | `1440` | -| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | | -| | Absolute HHMM, e.g. `2330` or `0415` | | -| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | | -| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` | -| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | | -| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | -| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | -| `MD5` | Generate MD5 Sum in Directory, `TRUE` or `FALSE` | `TRUE` | -| `PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | -| `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | | -| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. | `FALSE` | -| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` | +| Parameter | Description | Default | +| ----------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` | +| `MODE` | `AUTO` mode to use internal scheduling routines or `MANUAL` to simply use this as manual backups only executed by your own means | `AUTO` | +| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` | +| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | +| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | `FALSE` | +| `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | | +| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. | `FALSE` | + +### Database Specific Options +| Parameter | Description | Default | +| --------- | --------------------------------------------------------------------------------------------- | ------- | +| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | +| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | +| `DB_NAME` | Schema Name e.g. `database` | | +| `DB_USER` | username for the database - use `root` to backup all MySQL of them. | | +| `DB_PASS` | (optional if DB doesn't require it) password for the database | | +| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | +### Scheduling Options +| Parameter | Description | Default | +| ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. | `1440` | +| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | | +| | Absolute HHMM, e.g. `2330` or `0415` | | +| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | | +| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` | -- When using compression with MongoDB, only `GZ` compression is possible. - You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time. - - +### Backup Options +| Parameter | Description | Default | +| ----------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | ------- | +| `ENABLE_COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `GZ` | +| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | +| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | +| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` | +| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` | +| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | +| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | +| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` | +| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` | +- When using compression with MongoDB, only `GZ` compression is possible. #### Backing Up to S3 Compatible Services If `BACKUP_LOCATION` = `S3` then the following options are used. -| Parameter | Description | -| --------------- | --------------------------------------------------------------------------------------- | -| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | -| `S3_KEY_ID` | S3 Key ID | -| `S3_KEY_SECRET` | S3 Key Secret | -| `S3_PATH` | S3 Pathname to save to e.g. '`backup`' | -| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | -| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | -| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | - +| Parameter | Description | Default | +| --------------------- | ----------------------------------------------------------------------------------------- | ------- | +| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | | +| `S3_KEY_ID` | S3 Key ID | | +| `S3_KEY_SECRET` | S3 Key Secret | | +| `S3_PATH` | S3 Pathname to save to e.g. '`backup`' | | +| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | | +| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | | +| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | `https` | +| `S3_EXTRA_OPTS` | Add any extra options to the end of the `aws-cli` process execution | | +| `S3_CERT_CA_FILE` | Map a volume and point to your custom CA Bundle for verification e.g. `/certs/bundle.pem` | | +| _*OR*_ | | | +| `S3_CERT_SKIP_VERIFY` | Skip verifying self signed certificates when connecting | `TRUE` | ## Maintenance @@ -168,6 +190,26 @@ docker exec -it (whatever your container name is) bash ### Manual Backups Manual Backups can be performed by entering the container and typing `backup-now` +### Restoring Databases +Entering in the container and executing `restore` will execute a menu based script to restore your backups. + +You will be presented with a series of menus allowing you to choose: + - What file to restore + - What type of DB Backup + - What Host to restore to + - What Database Name to restore to + - What Database User to use + - What Database Password to use + - What Database Port to use + +The image will try to do autodetection based on the filename for the type, hostname, and database name. +The image will also allow you to use environment variables or Docker secrets used to backup the images + +The script can also be executed skipping the interactive mode by using the following syntax/ + + `restore ` + +If you only enter some of the arguments you will be prompted to fill them in. ### Custom Scripts @@ -182,18 +224,23 @@ $ cat post-script.sh # #### $2=DB_TYPE (Type of Backup) # #### $3=DB_HOST (Backup Host) # #### #4=DB_NAME (Name of Database backed up -# #### $5=DATE (Date of Backup) -# #### $6=TIME (Time of Backup) -# #### $7=BACKUP_FILENAME (Filename of Backup) -# #### $8=FILESIZE (Filesize of backup) -# #### $9=MD5_RESULT (MD5Sum if enabled) +# #### $5=BACKUP START TIME (Seconds since Epoch) +# #### $6=BACKUP FINISH TIME (Seconds since Epoch) +# #### $7=BACKUP TOTAL TIME (Seconds between Start and Finish) +# #### $8=BACKUP FILENAME (Filename) +# #### $9=BACKUP FILESIZE +# #### $10=HASH (If CHECKSUM enabled) -echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ${6}. Filename: ${7} Size: ${8} bytes MD5: ${9}" +echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a duration of ${7} seconds. Filename: ${8} Size: ${9} bytes MD5: ${10}" ```` + ## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE + ${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_timme}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" + + Outputs the following on the console: -`0 mysql Backup Completed on example-db for example on 2020-04-22 05:19:10. Filename: mysql_example_example-db_20200422-051910.sql.bz2 Size: 7795 bytes MD5: 952fbaafa30437494fdf3989a662cd40` +`0 mysql Backup Completed on example-db for example on 1647370800 ending 1647370920 for a duration of 120 seconds. Filename: mysql_example_example-db_202200315-000000.sql.bz2 Size: 7795 bytes Hash: 952fbaafa30437494fdf3989a662cd40` If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes` diff --git a/README.md.save b/README.md.save deleted file mode 100644 index e2231f5..0000000 --- a/README.md.save +++ /dev/null @@ -1,219 +0,0 @@ -# github.com/tiredofit/docker-db-backup - -[![GitHub release](https://img.shields.io/github/v/tag/tiredofit/docker-db-backup?style=flat-square)](https://github.com/tiredofit/docker-db-backup/releases/latest) -[![Build Status](https://img.shields.io/github/workflow/status/tiredofit/docker-db-backup/build?style=flat-square)](https://github.com/tiredofit/docker-db-backup/actions?query=workflow%3Abuild) -[![Docker Stars](https://img.shields.io/docker/stars/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/) -[![Docker Pulls](https://img.shields.io/docker/pulls/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/) -[![Become a sponsor](https://img.shields.io/badge/sponsor-tiredofit-181717.svg?logo=github&style=flat-square)](https://github.com/sponsors/tiredofit) -[![Paypal Donate](https://img.shields.io/badge/donate-paypal-00457c.svg?logo=paypal&style=flat-square)](https://www.paypal.me/tiredofit) - -* * * -## About - -This will build a container for backing up multiple types of DB Servers - -Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers. - -* dump to local filesystem or backup to S3 Compatible services -* select database user and password -* backup all databases -* choose to have an MD5 sum after backup for verification -* delete old backups after specific amount of time -* choose compression type (none, gz, bz, xz, zstd) -* connect to any container running on the same system -* select how often to run a dump -* select when to start the first dump, whether time of day or relative to container start time -* Execute script after backup for monitoring/alerting purposes - -## Maintainer - -- [Dave Conroy](https://github.com/tiredofit) - -## Table of Contents - -- [About](#about) -- [Maintainer](#maintainer) -- [Table of Contents](#table-of-contents) -- [Prerequisites and Assumptions](#prerequisites-and-assumptions) -- [Installation](#installation) - - [Build from Source](#build-from-source) - - [Prebuilt Images](#prebuilt-images) -- [Configuration](#configuration) - - [Quick Start](#quick-start) - - [Persistent Storage](#persistent-storage) - - [Environment Variables](#environment-variables) - - [Base Images used](#base-images-used) - - [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services) -- [Maintenance](#maintenance) - - [Shell Access](#shell-access) - - [Manual Backups](#manual-backups) - - [Custom Scripts](#custom-scripts) -- [Support](#support) - - [Usage](#usage) - - [Bugfixes](#bugfixes) - - [Feature Requests](#feature-requests) - - [Updates](#updates) -- [License](#license) - -## Prerequisites and Assumptions - -You must have a working DB server or container available for this to work properly, it does not provide server functionality! - -## Installation - -### Build from Source -Clone this repository and build the image with `docker build -t (imagename) .` - -### Prebuilt Images -Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup) and is the recommended method of installation. - -```bash -docker pull tiredofit/db-backup:(imagetag) -``` - -The following image tags are available along with their tagged release based on what's written in the [Changelog](CHANGELOG.md): - -| Container OS | Tag | -| ------------ | --------- | -| Alpine | `:latest` | - - -## Configuration - -### Quick Start - -* The quickest way to get started is using [docker-compose](https://docs.docker.com/compose/). See the examples folder for a working [docker-compose.yml](examples/docker-compose.yml) that can be modified for development or production use. - -* Set various [environment variables](#environment-variables) to understand the capabiltiies of this image. -* Map [persistent storage](#data-volumes) for access to configuration and data files for backup. - -> **NOTE**: If you are using this with a docker-compose file along with a seperate SQL container, take care not to set the variables to backup immediately, more so have it delay execution for a minute, otherwise you will get a failed first backup. -### Persistent Storage - -The following directories are used for configuration and can be mapped for persistent storage. - -| Directory | Description | -| ------------------------ | ---------------------------------------------------------------------------------- | -| `/backup` | Backups | -| `/assets/custom-scripts` | *Optional* Put custom scripts in this directory to execute after backup operations | -### Environment Variables - -#### Base Images used - -This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`,`vim`. - -Be sure to view the following repositories to understand all the customizable options: - -| Image | Description | -| ------------------------------------------------------ | -------------------------------------- | -| [OS Base](https://github.com/tiredofit/docker-alpine/) | Customized Image based on Alpine Linux | - - -| Parameter | Description | Default | -| -------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | -| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` | -| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `GZ` | -| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | -| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | -| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | -| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | -| `DB_NAME` | Schema Name e.g. `database` | | -| `DB_USER` | username for the database - use `root` to backup all MySQL of them. | | -| `DB_PASS` | (optional if DB doesn't require it) password for the database | | -| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | -| `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. | `1440` | -| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | | -| | Absolute HHMM, e.g. `2330` or `0415` | | -| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | | -| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` | -| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | | -| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | -| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | -| `MD5` | Generate MD5 Sum in Directory, `TRUE` or `FALSE` | `TRUE` | -| `PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | -| `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | | -| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. | `FALSE` | -| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` | - -- When using compression with MongoDB, only `GZ` compression is possible. - -- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time. - - - -#### Backing Up to S3 Compatible Services - -If `BACKUP_LOCATION` = `S3` then the following options are used. - -| Parameter | Description | -| --------------- | --------------------------------------------------------------------------------------- | -| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` | -| `S3_KEY_ID` | S3 Key ID | -| `S3_KEY_SECRET` | S3 Key Secret | -| `S3_PATH` | S3 Pathname to save to e.g. '`backup`' | -| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` | -| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. | -| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. | - - -## Maintenance - - -### Shell Access - -For debugging and maintenance purposes you may want access the containers shell. - -``bash -docker exec -it (whatever your container name is) bash -`` -### Manual Backups -Manual Backups can be performed by entering the container and typing `backup-now` - - -### Custom Scripts - -If you want to execute a custom script at the end of backup, you can drop bash scripts with the extension of `.sh` in this directory. See the following example to utilize: - -````bash -$ cat post-script.sh -##!/bin/bash - -# #### Example Post Script -# #### $1=EXIT_CODE (After running backup routine) -# #### $2=DB_TYPE (Type of Backup) -# #### $3=DB_HOST (Backup Host) -# #### #4=DB_NAME (Name of Database backed up -# #### $5=DATE (Date of Backup) -# #### $6=TIME (Time of Backup) -# #### $7=BACKUP_FILENAME (Filename of Backup) -# #### $8=FILESIZE (Filesize of backup) -# #### $9=MD5_RESULT (MD5Sum if enabled) - -echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ${6}. Filename: ${7} Size: ${8} bytes MD5: ${9}" -```` - -Outputs the following on the console: - -`0 mysql Backup Completed on example-db for example on 2020-04-22 05:19:10. Filename: mysql_example_example-db_20200422-051910.sql.bz2 Size: 7795 bytes MD5: 952fbaafa30437494fdf3989a662cd40` - -If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes` - -## Support - -These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community. -### Usage -- The [Discussions board](../../discussions) is a great place for working with the community on tips and tricks of using this image. -- Consider [sponsoring me](https://github.com/sponsors/tiredofit) personalized support. -### Bugfixes -- Please, submit a [Bug Report](issues/new) if something isn't working as expected. I'll do my best to issue a fix in short order. - -### Feature Requests -- Feel free to submit a feature request, however there is no guarantee that it will be added, or at what timeline. -- Consider [sponsoring me](https://github.com/sponsors/tiredofit) regarding development of features. - -### Updates -- Best effort to track upstream changes, More priority if I am actively using the image in a production environment. -- Consider [sponsoring me](https://github.com/sponsors/tiredofit) for up to date releases. - -## License -MIT. See [LICENSE](LICENSE) for more details. diff --git a/examples/docker-compose.yml b/examples/docker-compose.yml index 6ef4df8..52d0ae3 100755 --- a/examples/docker-compose.yml +++ b/examples/docker-compose.yml @@ -30,7 +30,7 @@ services: - DB_DUMP_FREQ=1440 - DB_DUMP_BEGIN=0000 - DB_CLEANUP_TIME=8640 - - MD5=TRUE + - CHECKSUM=MD5 - COMPRESSION=XZ - SPLIT_DB=FALSE restart: always diff --git a/examples/post-script.sh b/examples/post-script.sh index bf33192..4307134 100755 --- a/examples/post-script.sh +++ b/examples/post-script.sh @@ -1,14 +1,15 @@ ##!/bin/bash -## Example Post Script -## $1=EXIT_CODE (After running backup routine) -## $2=DB_TYPE (Type of Backup) -## $3=DB_HOST (Backup Host) -## #4=DB_NAME (Name of Database backed up -## $5=DATE (Date of Backup) -## $6=TIME (Time of Backup) -## $7=BACKUP_FILENAME (Filename of Backup) -## $8=FILESIZE (Filesize of backup) -## $9=MD5_RESULT (MD5Sum if enabled) +# #### Example Post Script +# #### $1=EXIT_CODE (After running backup routine) +# #### $2=DB_TYPE (Type of Backup) +# #### $3=DB_HOST (Backup Host) +# #### #4=DB_NAME (Name of Database backed up +# #### $5=BACKUP START TIME (Seconds since Epoch) +# #### $6=BACKUP FINISH TIME (Seconds since Epoch) +# #### $7=BACKUP TOTAL TIME (Seconds between Start and Finish) +# #### $8=BACKUP FILENAME (Filename) +# #### $9=BACKUP FILESIZE +# #### $10=HASH (If CHECKSUM enabled) -echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ${6}. Filename: ${7} Size: ${8} bytes MD5: ${9}" +echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a duration of ${7} seconds. Filename: ${8} Size: ${9} bytes MD5: ${10}" diff --git a/install/assets/defaults/10-db-backup b/install/assets/defaults/10-db-backup new file mode 100755 index 0000000..e7fc7c3 --- /dev/null +++ b/install/assets/defaults/10-db-backup @@ -0,0 +1,28 @@ +#!/command/with-contenv bash + +BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"} +CHECKSUM=${CHECKSUM:-"MD5"} +COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"} +DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0} +DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440} +DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"} +ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"} +ENABLE_COMPRESSION=${ENABLE_COMPRESSION:-"GZ"} +ENABLE_PARALLEL_COMPRESSION={ENABLE_PARALLEL_COMPRESSION:-"TRUE"} +LOG_PATH=${LOG_PATH:-"/logs/"} +LOG_TYPE=${LOG_TYPE:-"BOTH"} +MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"} +MODE=${MODE:-"AUTO"} +MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"} +MYSQL_SINGLE_TRANSACTION=${MYSQL_SINGLE_TRANSACTION:-"TRUE"} +MYSQL_STORED_PROCEDURES=${MYSQL_STORED_PROCEDURES:-"TRUE"} +S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"} +S3_PROTOCOL=${S3_PROTOCOL:-"https"} +SIZE_VALUE=${SIZE_VALUE:-"bytes"} +SPLIT_DB=${SPLIT_DB:-"FALSE"} +TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"} +dbhost=${DB_HOST} +dbname=${DB_NAME} +dbpass=${DB_PASS} +dbtype=${DB_TYPE} +dbuser=${DB_USER} \ No newline at end of file diff --git a/install/assets/functions/10-db-backup b/install/assets/functions/10-db-backup new file mode 100755 index 0000000..342d771 --- /dev/null +++ b/install/assets/functions/10-db-backup @@ -0,0 +1,475 @@ +#!/command/with-contenv bash + +bootstrap_compression() { + ### Set Compression Options + if var_true "${ENABLE_PARALLEL_COMPRESSION}" ; then + bzip="pbzip2 -${COMPRESSION_LEVEL}" + gzip="pigz -${COMPRESSION_LEVEL}" + xzip="pixz -${COMPRESSION_LEVEL}" + zstd="zstd --rm -${COMPRESSION_LEVEL}" + else + bzip="bzip2 -${COMPRESSION_LEVEL}" + gzip="gzip -${COMPRESSION_LEVEL}" + xzip="xz -${COMPRESSION_LEVEL} " + zstd="zstd --rm -${COMPRESSION_LEVEL}" + fi +} + +bootstrap_variables() { + case "${dbtype,,}" in + couch* ) + dbtype=couch + dbport=${DB_PORT:-5984} + file_env 'DB_USER' + file_env 'DB_PASS' + ;; + influx* ) + dbtype=influx + dbport=${DB_PORT:-8088} + file_env 'DB_USER' + file_env 'DB_PASS' + ;; + mongo* ) + dbtype=mongo + dbport=${DB_PORT:-27017} + [[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER' + [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' + ;; + "mysql" | "mariadb" ) + dbtype=mysql + dbport=${DB_PORT:-3306} + [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' + ;; + "mssql" | "microsoftsql" ) + apkArch="$(apk --print-arch)"; \ + case "$apkArch" in + x86_64) mssql=true ;; + *) print_error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;; + esac + dbtype=mssql + dbport=${DB_PORT:-1433} + ;; + postgres* | "pgsql" ) + dbtype=pgsql + dbport=${DB_PORT:-5432} + [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' + ;; + "redis" ) + dbtype=redis + dbport=${DB_PORT:-6379} + [[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS' + ;; + sqlite* ) + dbtype=sqlite3 + ;; + esac + + if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then + file_env 'S3_KEY_ID' + file_env 'S3_KEY_SECRET' + fi + + ### Set the Database Authentication Details + case "$dbtype" in + "mongo" ) + [[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${dbuser}" + [[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${dbpass}" + [[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${dbname}" + [[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}" + ;; + "mysql" ) + [[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${dbpass} + ;; + "postgres" ) + [[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${dbpass}" + ;; + "redis" ) + [[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${dbpass}" + ;; + esac +} + +backup_couch() { + target=couch_${dbname}_${dbhost}_${now}.txt + compression + print_notice "Dumping CouchDB database: '${dbname}'" + curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${dumpoutput} | $dumpoutput > ${TEMP_LOCATION}/${target} + exit_code=$? + check_exit_code + generate_checksum + move_backup +} + +backup_influx() { + if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then + : + else + print_notice "Compressing InfluxDB backup with gzip" + influx_compression="-portable" + fi + for DB in ${DB_NAME}; do + print_notice "Dumping Influx database: '${DB}'" + target=influx_${DB}_${dbhost}_${now} + influxd backup ${influx_compression} -database $DB -host ${dbhost}:${dbport} ${TEMP_LOCATION}/${target} + exit_code=$? + check_exit_code + generate_checksum + move_backup + done +} + +backup_mongo() { + if [ "${ENABLE_COMPRESSION,,}" = "none" ] || [ "${ENABLE_COMPRESSION,,}" = "false" ] ; then + target=${dbtype}_${dbname}_${dbhost}_${now}.archive + else + print_notice "Compressing MongoDB backup with gzip" + target=${dbtype}_${dbname}_${dbhost}_${now}.archive.gz + mongo_compression="--gzip" + fi + print_notice "Dumping MongoDB database: '${DB_NAME}'" + mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS} + exit_code=$? + check_exit_code + cd "${TEMP_LOCATION}" + generate_checksum + move_backup +} + +backup_mssql() { + target=mssql_${dbname}_${dbhost}_${now}.bak + print_notice "Dumping MSSQL database: '${dbname}'" + /opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} –Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" + exit_code=$? + check_exit_code + generate_checksum + move_backup +} + +backup_mysql() { + if var_true "${MYSQL_SINGLE_TRANSACTION}" ; then + single_transaction="--single-transaction" + fi + if var_true "${MYSQL_STORED_PROCEDURES}" ; then + stored_procedures="--routines" + fi + if var_true "${SPLIT_DB}" ; then + DATABASES=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema) + for db in "${DATABASES}" ; do + if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then + print_debug "Backing up everything except for information_schema and _* prefixes" + print_notice "Dumping MySQL/MariaDB database: '${db}'" + target=mysql_${db}_${dbhost}_${now}.sql + compression + mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} --databases $db | $dumpoutput > ${TEMP_LOCATION}/${target} + exit_code=$? + check_exit_code + generate_checksum + move_backup + fi + done + else + compression + print_notice "Dumping MySQL/MariaDB database: '${DB_NAME}'" + mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -A -h $dbhost -P $dbport -u$dbuser ${single_transaction} ${stored_procedures} ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target} + exit_code=$? + check_exit_code + generate_checksum + move_backup + fi +} + +backup_pgsql() { + export PGPASSWORD=${dbpass} + if var_true "${SPLIT_DB}" ; then + + authdb=${DB_USER} + [ -n "${DB_NAME}" ] && authdb=${DB_NAME} + DATABASES=$(psql -h $dbhost -U $dbuser -p ${dbport} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ) + for db in "${DATABASES}"; do + print_notice "Dumping Postgresql database: $db" + target=pgsql_${db}_${dbhost}_${now}.sql + compression + pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target} + exit_code=$? + check_exit_code + generate_checksum + move_backup + done + else + compression + print_notice "Dumping PostgreSQL: '${DB_NAME}'" + pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target} + exit_code=$? + check_exit_code + generate_checksum + move_backup + fi +} + +backup_redis() { + target=redis_${db}_${dbhost}_${now}.rdb + echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS} + print_notice "Dumping Redis - Flushing Redis Cache First" + sleep 10 + try=5 + while [ $try -gt 0 ] ; do + saved=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}') + ok=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}') + if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then + print_notice "Redis Backup Complete" + break + fi + try=$((try - 1)) + print_warn "Redis Busy - Waiting and retrying in 5 seconds" + sleep 5 + done + target_original=${target} + compression + $dumpoutput "${TEMP_LOCATION}/${target_original}" + generate_checksum + move_backup +} + +backup_sqlite3() { + db=$(basename "$dbhost") + db="${db%.*}" + target=sqlite3_${db}_${now}.sqlite3 + compression + + print_notice "Dumping sqlite3 database: '${dbhost}'" + sqlite3 "${dbhost}" ".backup '${TEMP_LOCATION}/backup.sqlite3'" + exit_code=$? + check_exit_code + cat "${TEMP_LOCATION}"/backup.sqlite3 | $dumpoutput > "${TEMP_LOCATION}/${target}" + generate_checksum + move_backup +} + +check_availability() { +### Set the Database Type + case "$dbtype" in + "couch" ) + COUNTER=0 + while ! (nc -z ${dbhost} ${dbport}) ; do + sleep 5 + (( COUNTER+=5 )) + print_warn "CouchDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" + done + ;; + "influx" ) + COUNTER=0 + while ! (nc -z ${dbhost} ${dbport}) ; do + sleep 5 + (( COUNTER+=5 )) + print_warn "InfluxDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" + done + ;; + "mongo" ) + COUNTER=0 + while ! (nc -z ${dbhost} ${dbport}) ; do + sleep 5 + (( COUNTER+=5 )) + print_warn "Mongo Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" + done + ;; + "mysql" ) + COUNTER=0 + export MYSQL_PWD=${dbpass} + while ! (mysqladmin -u"${dbuser}" -P"${dbport}" -h"${dbhost}" status > /dev/null 2>&1) ; do + sleep 5 + (( COUNTER+=5 )) + print_warn "MySQL/MariaDB Server '${dbhost}' is not accessible, retrying.. (${COUNTER} seconds so far)" + done + ;; + "mssql" ) + COUNTER=0 + while ! (nc -z ${dbhost} ${dbport}) ; do + sleep 5 + (( COUNTER+=5 )) + print_warn "MSSQL Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" + done + ;; + "pgsql" ) + COUNTER=0 + export PGPASSWORD=${dbpass} + until pg_isready --dbname=${dbname} --host=${dbhost} --port=${dbport} --username=${dbuser} -q + do + sleep 5 + (( COUNTER+=5 )) + print_warn "Postgres Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" + done + ;; + "redis" ) + COUNTER=0 + while ! (nc -z "${dbhost}" "${dbport}") ; do + sleep 5 + (( COUNTER+=5 )) + print_warn "Redis Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" + done + ;; + "sqlite3" ) + if [[ ! -e "${dbhost}" ]]; then + print_error "File '${dbhost}' does not exist." + exit_code=2 + exit $exit_code + elif [[ ! -f "${dbhost}" ]]; then + print_error "File '${dbhost}' is not a file." + exit_code=2 + exit $exit_code + elif [[ ! -r "${dbhost}" ]]; then + print_error "File '${dbhost}' is not readable." + exit_code=2 + exit $exit_code + fi + ;; + esac +} + +check_exit_code() { + print_debug "Exit Code is ${exit_code}" + case "${exit_code}" in + 0 ) + print_info "Backup completed successfully" + ;; + * ) + print_error "Backup reported errors - Aborting" + exit 1 + ;; + esac +} + +compression() { + case "${ENABLE_COMPRESSION,,}" in + gz* ) + print_notice "Compressing backup with gzip" + target=${target}.gz + dumpoutput="$gzip " + ;; + bz* ) + print_notice "Compressing backup with bzip2" + target=${target}.bz2 + dumpoutput="$bzip " + ;; + xz* ) + print_notice "Compressing backup with xzip" + target=${target}.xz + dumpoutput="$xzip " + ;; + zst* ) + print_notice "Compressing backup with zstd" + target=${target}.zst + dumpoutput="$zstd " + ;; + "none" | "false") + print_notice "Not compressing backups" + dumpoutput="cat " + ;; + esac +} + +generate_checksum() { + if var_true "${ENABLE_CHECKSUM}" ; then + case "${CHECKSUM,,}" in + "md5" ) + checksum_command="md5sum" + checksum_extension="md5" + ;; + "sha1" ) + checksum_command="sha1sum" + checksum_extension="sha1" + ;; + esac + + print_notice "Generating ${checksum_extension^^} for '${target}'" + cd "${TEMP_LOCATION}" + ${checksum_command} "${target}" > "${target}"."${checksum_extension}" + checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}') + print_debug "${checksum_extension^^}: ${checksum_value} - ${target}" + fi +} + +move_backup() { + case "$SIZE_VALUE" in + "b" | "bytes" ) + SIZE_VALUE=1 + ;; + "[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" ) + SIZE_VALUE="-h" + ;; + *) + SIZE_VALUE=1 + ;; + esac + if [ "$SIZE_VALUE" = "1" ] ; then + FILESIZE=$(stat -c%s "${TEMP_LOCATION}"/"${target}") + print_notice "Backup of ${target} created with the size of ${FILESIZE} bytes" + else + FILESIZE=$(du -h "${TEMP_LOCATION}"/"${target}" | awk '{ print $1}') + print_notice "Backup of ${target} created with the size of ${FILESIZE}" + fi + + case "${BACKUP_LOCATION,,}" in + "file" | "filesystem" ) + print_debug "Moving backup to filesystem" + mkdir -p "${DB_DUMP_TARGET}" + mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/ + mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}" + ;; + "s3" | "minio" ) + print_debug "Moving backup to S3 Bucket" + export AWS_ACCESS_KEY_ID=${S3_KEY_ID} + export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET} + export AWS_DEFAULT_REGION=${S3_REGION} + if [ -f "${S3_CERT_CA_FILE}" ] ; then + print_debug "Using Custom CA for S3 Backups" + s3_ssl=" --ca-bundle ${S3_CERT_CA_FILE}" + fi + if var_true "${S3_CERT_SKIP_VERIFY}" ; then + print_debug "Skipping SSL verification for HTTPS S3 Hosts" + s3_ssl="${s3_ssl} --no-verify-ssl" + fi + + [[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}" + + aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${S3_EXTRA_OPTS} + + rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}" + rm -rf "${TEMP_LOCATION}"/"${target}" + ;; + esac +} + +sanity_test() { + sanity_var DB_TYPE "Database Type" + sanity_var DB_HOST "Database Host" + file_env 'DB_USER' + file_env 'DB_PASS' + + if [ "${BACKUP_LOCATION,,}" = "s3" ] || [ "${BACKUP_LOCATION,,}" = "minio" ] ; then + sanity_var S3_BUCKET "S3 Bucket" + sanity_var S3_PATH "S3 Path" + sanity_var S3_REGION "S3 Region" + file_env 'S3_KEY_ID' + file_env 'S3_KEY_SECRET' + fi +} + +setup_mode() { + if [ "${MODE,,}" = "auto" ] || [ ${MODE,,} = "default" ] ; then + print_debug "Running in Auto / Default Mode - Letting Image control scheduling" + else + print_info "Running in Manual mode - Execute 'backup_now' to run a manual backup" + service_stop 10-db-backup + if var_true "${MANUAL_RUN_FOREVER}" ; then + mkdir -p /etc/services.d/99-run_forever + cat < /etc/services.d/99-run_forever/run +#!/bin/bash +while true +do + sleep 86400 +done +EOF + chmod +x /etc/services.d/99-run_forever/run + fi + fi +} \ No newline at end of file diff --git a/install/etc/cont-finish.d/10-db-backup b/install/etc/cont-finish.d/10-db-backup deleted file mode 100755 index b45d825..0000000 --- a/install/etc/cont-finish.d/10-db-backup +++ /dev/null @@ -1,3 +0,0 @@ -#!/command/with-contenv bash - -pkill bash diff --git a/install/etc/cont-init.d/10-db-backup b/install/etc/cont-init.d/10-db-backup index a3f7112..4c30eed 100755 --- a/install/etc/cont-init.d/10-db-backup +++ b/install/etc/cont-init.d/10-db-backup @@ -6,6 +6,8 @@ prepare_service 03-monitoring PROCESS_NAME="db-backup" output_off +sanity_test +setup_mode create_zabbix dbbackup liftoff diff --git a/install/etc/services.available/10-db-backup/run b/install/etc/services.available/10-db-backup/run index 3fe5719..2f10299 100755 --- a/install/etc/services.available/10-db-backup/run +++ b/install/etc/services.available/10-db-backup/run @@ -1,469 +1,62 @@ #!/command/with-contenv bash source /assets/functions/00-container +source /assets/functions/10-db-backup +source /assets/defaults/10-db-backup PROCESS_NAME="db-backup" -date >/dev/null - -if [ "$1" != "NOW" ]; then - sleep 10 -fi - -### Sanity Test -sanity_var DB_TYPE "Database Type" -sanity_var DB_HOST "Database Host" - -### Set the Database Type -dbtype=${DB_TYPE} - -case "$dbtype" in - "couch" | "couchdb" | "COUCH" | "COUCHDB" ) - dbtype=couch - dbport=${DB_PORT:-5984} - file_env 'DB_USER' - file_env 'DB_PASS' +case "${1,,}" in + "now" | "manual" ) + DB_DUMP_BEGIN=+0 + manual=TRUE ;; - "influx" | "influxdb" | "INFLUX" | "INFLUXDB" ) - dbtype=influx - dbport=${DB_PORT:-8088} - file_env 'DB_USER' - file_env 'DB_PASS' - ;; - "mongo" | "mongodb" | "MONGO" | "MONGODB" ) - dbtype=mongo - dbport=${DB_PORT:-27017} - [[ ( -n "${DB_USER}" ) || ( -n "${DB_USER_FILE}" ) ]] && file_env 'DB_USER' - [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' - ;; - "mysql" | "MYSQL" | "mariadb" | "MARIADB") - dbtype=mysql - dbport=${DB_PORT:-3306} - [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' - MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"} - ;; - "mssql" | "MSSQL" | "microsoftsql" | "MICROSOFTSQL") - apkArch="$(apk --print-arch)"; \ - case "$apkArch" in - x86_64) mssql=true ;; - *) print_error "MSSQL cannot operate on $apkArch processor!" ; exit 1 ;; - esac - dbtype=mssql - dbport=${DB_PORT:-1433} - ;; - "postgres" | "postgresql" | "pgsql" | "POSTGRES" | "POSTGRESQL" | "PGSQL" ) - dbtype=pgsql - dbport=${DB_PORT:-5432} - [[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS' - ;; - "redis" | "REDIS" ) - dbtype=redis - dbport=${DB_PORT:-6379} - [[ ( -n "${DB_PASS}" || ( -n "${DB_PASS_FILE}" ) ) ]] && file_env 'DB_PASS' - ;; - "sqlite" | "sqlite3" | "SQLITE" | "SQLITE3" ) - dbtype=sqlite3 - ;; -esac - -### Set Defaults -BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"} -COMPRESSION=${COMPRESSION:-"GZ"} -COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"} -DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0} -DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440} -DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"} -dbhost=${DB_HOST} -dbname=${DB_NAME} -dbpass=${DB_PASS} -dbuser=${DB_USER} -MD5=${MD5:-TRUE} -PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-"TRUE"} -SIZE_VALUE=${SIZE_VALUE:-"bytes"} -SPLIT_DB=${SPLIT_DB:-"FALSE"} -TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"} - -if [ "$BACKUP_LOCATION" = "S3" ] || [ "$BACKUP_LOCATION" = "s3" ] || [ "$BACKUP_LOCATION" = "MINIO" ] || [ "$BACKUP_LOCATION" = "minio" ] ; then - S3_PROTOCOL=${S3_PROTOCOL:-"https"} - sanity_var S3_BUCKET "S3 Bucket" - sanity_var S3_KEY_ID "S3 Key ID" - sanity_var S3_KEY_SECRET "S3 Key Secret" - sanity_var S3_PATH "S3 Path" - sanity_var S3_REGION "S3 Region" - file_env 'S3_KEY_ID' - file_env 'S3_KEY_SECRET' -fi - -if [ "$1" = "NOW" ]; then - DB_DUMP_BEGIN=+0 - MANUAL=TRUE -fi - -### Set Compression Options -if var_true "${PARALLEL_COMPRESSION}" ; then - bzip="pbzip2 -${COMPRESSION_LEVEL}" - gzip="pigz -${COMPRESSION_LEVEL}" - xzip="pixz -${COMPRESSION_LEVEL}" - zstd="zstd --rm -${COMPRESSION_LEVEL}" -else - bzip="bzip2 -${COMPRESSION_LEVEL}" - gzip="gzip -${COMPRESSION_LEVEL}" - xzip="xz -${COMPRESSION_LEVEL} " - zstd="zstd --rm -${COMPRESSION_LEVEL}" -fi - -### Set the Database Authentication Details -case "$dbtype" in - "mongo" ) - [[ ( -n "${DB_USER}" ) ]] && MONGO_USER_STR=" --username ${dbuser}" - [[ ( -n "${DB_PASS}" ) ]] && MONGO_PASS_STR=" --password ${dbpass}" - [[ ( -n "${DB_NAME}" ) ]] && MONGO_DB_STR=" --db ${dbname}" - [[ ( -n "${DB_AUTH}" ) ]] && MONGO_AUTH_STR=" --authenticationDatabase ${DB_AUTH}" - ;; - "mysql" ) - [[ ( -n "${DB_PASS}" ) ]] && export MYSQL_PWD=${dbpass} - ;; - "postgres" ) - [[ ( -n "${DB_PASS}" ) ]] && POSTGRES_PASS_STR="PGPASSWORD=${dbpass}" - ;; - "redis" ) - [[ ( -n "${DB_PASS}" ) ]] && REDIS_PASS_STR=" -a ${dbpass}" - ;; -esac - -### Functions -backup_couch() { - target=couch_${dbname}_${dbhost}_${now}.txt - compression - curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${dumpoutput} | $dumpoutput > ${TEMP_LOCATION}/${target} - exit_code=$? - generate_md5 - move_backup -} - -backup_influx() { - if [ "${COMPRESSION}" = "NONE" ] || [ "${COMPRESSION}" = "none" ] || [ "${COMPRESSION}" = "FALSE" ] || [ "${COMPRESSION}" = "false" ] ; then - : - else - print_notice "Compressing InfluxDB backup with gzip" - influx_compression="-portable" - fi - for DB in ${DB_NAME}; do - target=influx_${DB}_${dbhost}_${now} - influxd backup ${influx_compression} -database $DB -host ${dbhost}:${dbport} ${TEMP_LOCATION}/${target} - exit_code=$? - generate_md5 - move_backup - done -} - -backup_mongo() { - if [ "${COMPRESSION}" = "NONE" ] || [ "${COMPRESSION}" = "none" ] || [ "${COMPRESSION}" = "FALSE" ] || [ "${COMPRESSION}" = "false" ] ; then - target=${dbtype}_${dbname}_${dbhost}_${now}.archive - else - print_notice "Compressing MongoDB backup with gzip" - target=${dbtype}_${dbname}_${dbhost}_${now}.archivegz - mongo_compression="--gzip" - fi - mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS} - exit_code=$? - cd ${TEMP_LOCATION} - generate_md5 - move_backup -} - -backup_mssql() { - target=mssql_${dbname}_${dbhost}_${now}.bak - /opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} –Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10" -} - -backup_mysql() { - if var_true "${SPLIT_DB}" ; then - DATABASES=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema) - - for db in $DATABASES; do - if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then - print_notice "Dumping MariaDB database: $db" - target=mysql_${db}_${dbhost}_${now}.sql - compression - mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} --databases $db | $dumpoutput > ${TEMP_LOCATION}/${target} - exit_code=$? - generate_md5 - move_backup - fi - done - else - compression - mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -A -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target} - exit_code=$? - generate_md5 - move_backup - fi -} - -backup_pgsql() { - if var_true "${SPLIT_DB}" ; then - export PGPASSWORD=${dbpass} - authdb=${DB_USER} - [ -n "${DB_NAME}" ] && authdb=${DB_NAME} - DATABASES=$(psql -h $dbhost -U $dbuser -p ${dbport} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' ) - for db in $DATABASES; do - print_info "Dumping database: $db" - target=pgsql_${db}_${dbhost}_${now}.sql - compression - pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target} - exit_code=$? - generate_md5 - move_backup - done - else - export PGPASSWORD=${dbpass} - compression - pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target} - exit_code=$? - generate_md5 - move_backup - fi -} - -backup_redis() { - target=redis_${db}_${dbhost}_${now}.rdb - echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS} - print_info "Dumping Redis - Flushing Redis Cache First" - sleep 10 - try=5 - while [ $try -gt 0 ] ; do - saved=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}') - ok=$(echo 'info Persistence' | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}') - if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then - print_info "Redis Backup Complete" - break - fi - try=$((try - 1)) - print_info "Redis Busy - Waiting and retrying in 5 seconds" + * ) sleep 5 - done - target_original=${target} - compression - $dumpoutput "${TEMP_LOCATION}/${target_original}" - generate_md5 - move_backup -} - -backup_sqlite3() { - db=$(basename "$dbhost") - db="${db%.*}" - target=sqlite3_${db}_${now}.sqlite3 - compression - - print_info "Dumping sqlite3 database: ${dbhost}" - sqlite3 "${dbhost}" ".backup '${TEMP_LOCATION}/backup.sqlite3'" - exit_code=$? - - cat "${TEMP_LOCATION}/backup.sqlite3" | $dumpoutput > "${TEMP_LOCATION}/${target}" - - generate_md5 - move_backup -} - -check_availability() { -### Set the Database Type - case "$dbtype" in - "couch" ) - COUNTER=0 - while ! (nc -z ${dbhost} ${dbport}) ; do - sleep 5 - (( COUNTER+=5 )) - print_warn "CouchDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" - done - ;; - "influx" ) - COUNTER=0 - while ! (nc -z ${dbhost} ${dbport}) ; do - sleep 5 - (( COUNTER+=5 )) - print_warn "InfluxDB Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" - done - ;; - "mongo" ) - COUNTER=0 - while ! (nc -z ${dbhost} ${dbport}) ; do - sleep 5 - (( COUNTER+=5 )) - print_warn "Mongo Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" - done - ;; - "mysql" ) - COUNTER=0 - export MYSQL_PWD=${dbpass} - while ! (mysqladmin -u"${dbuser}" -P"${dbport}" -h"${dbhost}" status > /dev/null 2>&1) ; do - sleep 5 - (( COUNTER+=5 )) - print_warn "MySQL/MariaDB Server '${dbhost}' is not accessible, retrying.. (${COUNTER} seconds so far)" - done - ;; - "mssql" ) - COUNTER=0 - while ! (nc -z ${dbhost} ${dbport}) ; do - sleep 5 - (( COUNTER+=5 )) - print_warn "MSSQL Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" - done - ;; - "pgsql" ) - COUNTER=0 - export PGPASSWORD=${dbpass} - until pg_isready --dbname=${dbname} --host=${dbhost} --port=${dbport} --username=${dbuser} -q - do - sleep 5 - (( COUNTER+=5 )) - print_warn "Postgres Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" - done - ;; - "redis" ) - COUNTER=0 - while ! (nc -z "${dbhost}" "${dbport}") ; do - sleep 5 - (( COUNTER+=5 )) - print_warn "Redis Host '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)" - done - ;; - "sqlite3" ) - if [[ ! -e "${dbhost}" ]]; then - print_error "File '${dbhost}' does not exist." - exit_code=2 - exit $exit_code - elif [[ ! -f "${dbhost}" ]]; then - print_error "File '${dbhost}' is not a file." - exit_code=2 - exit $exit_code - elif [[ ! -r "${dbhost}" ]]; then - print_error "File '${dbhost}' is not readable." - exit_code=2 - exit $exit_code - fi - ;; - esac -} - -compression() { - case "$COMPRESSION" in - "GZ" | "gz" | "gzip" | "GZIP") - print_notice "Compressing backup with gzip" - target=${target}.gz - dumpoutput="$gzip " - ;; - "BZ" | "bz" | "bzip2" | "BZIP2" | "bzip" | "BZIP" | "bz2" | "BZ2") - print_notice "Compressing backup with bzip2" - target=${target}.bz2 - dumpoutput="$bzip " - ;; - "XZ" | "xz" | "XZIP" | "xzip" ) - print_notice "Compressing backup with xzip" - target=${target}.xz - dumpoutput="$xzip " - ;; - "ZSTD" | "zstd" | "ZST" | "zst" ) - print_notice "Compressing backup with zstd" - target=${target}.zst - dumpoutput="$zstd " - ;; - "NONE" | "none" | "FALSE" | "false") - dumpoutput="cat " - ;; - esac -} - -generate_md5() { -if var_true "$MD5" ; then - print_notice "Generating MD5 for ${target}" - cd ${TEMP_LOCATION} - md5sum "${target}" > "${target}".md5 - MD5VALUE=$(md5sum "${target}" | awk '{ print $1}') -fi -} - -move_backup() { - case "$SIZE_VALUE" in - "b" | "bytes" ) - SIZE_VALUE=1 - - ;; - "[kK]" | "[kK][bB]" | "kilobytes" | "[mM]" | "[mM][bB]" | "megabytes" ) - SIZE_VALUE="-h" - ;; - *) - SIZE_VALUE=1 - ;; - esac - if [ "$SIZE_VALUE" = "1" ] ; then - FILESIZE=$(stat -c%s "${TEMP_LOCATION}/${target}") - print_notice "Backup of ${target} created with the size of ${FILESIZE} bytes" - else - FILESIZE=$(du -h "${TEMP_LOCATION}/${target}" | awk '{ print $1}') - print_notice "Backup of ${target} created with the size of ${FILESIZE}" - fi - - case "${BACKUP_LOCATION}" in - "FILE" | "file" | "filesystem" | "FILESYSTEM" ) - mkdir -p "${DB_DUMP_TARGET}" - mv ${TEMP_LOCATION}/*.md5 "${DB_DUMP_TARGET}"/ - mv ${TEMP_LOCATION}/"${target}" "${DB_DUMP_TARGET}"/"${target}" - ;; - "S3" | "s3" | "MINIO" | "minio" ) - export AWS_ACCESS_KEY_ID=${S3_KEY_ID} - export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET} - export AWS_DEFAULT_REGION=${S3_REGION} - - [[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}" - - aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} - - rm -rf ${TEMP_LOCATION}/*.md5 - rm -rf ${TEMP_LOCATION}/"${target}" - ;; - esac -} + ;; +esac +bootstrap_compression +bootstrap_variables ### Container Startup print_debug "Backup routines Initialized on $(date)" ### Wait for Next time to start backup - if [ "$1" != "NOW" ]; then - current_time=$(date +"%s") - today=$(date +"%Y%m%d") +case "${1,,}" in + "now" | "manual" ) + : + ;; + * ) + current_time=$(date +"%s") + today=$(date +"%Y%m%d") - if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then - waittime=$(( ${BASH_REMATCH[1]} * 60 )) - target_time=$(($current_time + $waittime)) - else - target_time=$(date --date="${today}${DB_DUMP_BEGIN}" +"%s") - if [[ "$target_time" < "$current_time" ]]; then - target_time=$(($target_time + 24*60*60)) + if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then + waittime=$(( ${BASH_REMATCH[1]} * 60 )) + target_time=$(($current_time + $waittime)) + else + target_time=$(date --date="${today}${DB_DUMP_BEGIN}" +"%s") + if [[ "$target_time" < "$current_time" ]]; then + target_time=$(($target_time + 24*60*60)) + fi + waittime=$(($target_time - $current_time)) fi - waittime=$(($target_time - $current_time)) - fi - print_debug "Wait Time: ${waittime} Target time: ${target_time} Current Time: ${current_time}" - print_notice "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")" - sleep $waittime - fi - + print_debug "Wait Time: ${waittime} Target time: ${target_time} Current Time: ${current_time}" + print_info "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")" + sleep $waittime + ;; +esac ### Commence Backup - while true; do - # make sure the directory exists - mkdir -p $TEMP_LOCATION - -### Define Target name +while true; do + mkdir -p "${TEMP_LOCATION}" backup_start_time=$(date +"%s") - print_debug "Backup start time: ${backup_start_time} - $(date -d @${backup_start_time} +"%Y-%m-%d %T %Z")" now=$(date +"%Y%m%d-%H%M%S") now_time=$(date +"%H:%M:%S") now_date=$(date +"%Y-%m-%d") target=${dbtype}_${dbname}_${dbhost}_${now}.sql -### Take a Dump - case "$dbtype" in + ### Take a Dump + case "${dbtype,,}" in "couch" ) check_availability backup_couch @@ -498,45 +91,50 @@ print_debug "Backup routines Initialized on $(date)" ;; esac -### Zabbix + backup_finish_time=$(date +"%s") + backup_total_time=$(echo $((backup_finish_time-backup_start_time))) + + print_info "Backup finish time: $(date -d @${backup_finish_time} +"%Y-%m-%d %T %Z")" + print_notice "Backup time elapsed: $(echo ${backup_total_time} | awk '{printf "Hours: *%d* Minutes: *%02d* Seconds: *%02d*", $1/3600, ($1/60)%60, $1%60}')" + + ### Zabbix / Monitoring stats if var_true "${CONTAINER_ENABLE_MONITORING}" ; then print_notice "Sending Backup Statistics to Zabbix" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}")" silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')" + silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s')" + silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((backup_finish_time-backup_start_time)))" fi -### Automatic Cleanup - if [[ -n "$DB_CLEANUP_TIME" ]]; then - print_notice "Cleaning up old backups" + ### Automatic Cleanup + if [ -n "${DB_CLEANUP_TIME}" ]; then + print_info "Cleaning up old backups" + mkdir -p "${DB_DUMP_TARGET}" find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \; fi - if [ -n "$POST_SCRIPT" ] ; then - print_notice "Found POST_SCRIPT environment variable. Executing" - eval "${POST_SCRIPT}" + ### Post Script Support + if [ -n "${POST_SCRIPT}" ] ; then + print_notice "Found POST_SCRIPT environment variable. Executing '${POST_SCRIPT}" + eval "${POST_SCRIPT}" "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_timme}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" fi -### Post Backup Custom Script Support - if [ -d /assets/custom-scripts/ ] ; then - print_notice "Found Custom Filesystem Scripts to Execute" + ### Post Backup Custom Script Support + if [ -d "/assets/custom-scripts/" ] ; then + print_notice "Found Post Backup Custom Script to execute" for f in $(find /assets/custom-scripts/ -name \*.sh -type f); do - print_notice "Running Script ${f}" - ## script EXIT_CODE DB_TYPE DB_HOST DB_NAME DATE BACKUP_FILENAME FILESIZE MD5_VALUE - chmod +x "${f}" - ${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${now_date}" "${now_time}" "${target}" "${FILESIZE}" "${MD5VALUE}" + print_notice "Running Script: '${f}'" + ## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE + ${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_timme}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" done fi - ### Go back to Sleep until next Backup time - if var_true $MANUAL ; then - exit 0; + if var_true "${manual}" ; then + print_debug "Exitting due to manual mode" + exit ${exit_code}; else - backup_finish_time=$(date +"%s") - backup_total_time=$(echo $((backup_finish_time-backup_start_time))) - print_debug "Backup finish time: ${backup_finish_time} - $(date -d @${backup_start_time} +"%Y-%m-%d %T %Z")" - print_debug "Backup time elapsed: $(echo ${backup_total_time} | awk '{printf "Hours: *%d* Minutes: *%02d* Seconds: *%02d*", $1/3600, ($1/60)%60, $1%60}')" + ### Go back to sleep until next backup time sleep $(($DB_DUMP_FREQ*60-backup_total_time)) + print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") " fi - done - - +done diff --git a/install/usr/local/bin/backup-now b/install/usr/local/bin/backup-now index e62bd3b..cd6e333 100755 --- a/install/usr/local/bin/backup-now +++ b/install/usr/local/bin/backup-now @@ -1,4 +1,4 @@ #!/command/with-contenv bash echo '** Performing Manual Backup' -/etc/services.available/10-db-backup/run NOW +/etc/services.available/10-db-backup/run manual diff --git a/install/usr/local/bin/restore b/install/usr/local/bin/restore new file mode 100755 index 0000000..0403113 --- /dev/null +++ b/install/usr/local/bin/restore @@ -0,0 +1,933 @@ +#!/command/with-contenv /bin/bash + +source /assets/functions/00-container +source /assets/defaults/10-db-backup +source /assets/functions/10-db-backup +PROCESS_NAME="db-backup-restore" + +oldcolumns=$COLUMNS + +######################################################################################## +### System Functions ### +######################################################################################## +### Colours +# Foreground (Text) Colors +cdgy="\e[90m" # Color Dark Gray +clg="\e[92m" # Color Light Green +clm="\e[95m" # Color Light Magenta +cwh="\e[97m" # Color White + +# Turns off all formatting +coff="\e[0m" # Color Off + +# Background Colors +bdr="\e[41m" # Background Color Dark Red +bdg="\e[42m" # Background Color Dark Green +bdb="\e[44m" # Background Color Dark Blue +bdm="\e[45m" # Background Color Dark Magenta +bdgy="\e[100m" # Background Color Dark Gray +blr="\e[101m" # Background Color Light Red +boff="\e[49m" # Background Color Off + +bootstrap_variables + +if [ -z "${1}" ] ; then + interactive_mode=true +else + case "$1" in + "-h" ) + cat < + +If you only enter some of the arguments you will be prompted to fill them in. + +Other arguments + -h This help screen + +EOF + exit 0 + ;; + "-i" ) + echo "interactive mode" + interactive_mode=true + ;; + * ) + interactive_mode=false + ;; + esac +fi + +get_filename() { + COLUMNS=12 + prompt="Please select a file to restore:" + options=( $(find ${DB_DUMP_TARGET} -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | xargs -0) ) + PS3="$prompt " + select opt in "${options[@]}" "Custom" "Quit" ; do + if (( REPLY == 2 + ${#options[@]} )) ; then + echo "Bye!" + exit 2 + elif (( REPLY == 1 + ${#options[@]} )) ; then + while [ ! -f "${opt}" ] ; do + read -p "What path and filename to restore: " opt + if [ ! -f "${opt}" ] ; then + print_error "File not found. Please retry.." + fi + done + break + elif (( REPLY > 0 && REPLY <= ${#options[@]} )) ; then + break + else + echo "Invalid option. Try another one." + fi + done + COLUMNS=$oldcolumns + r_filename=${opt} +} + +get_dbhost() { + p_dbhost=$(basename -- "${r_filename}" | cut -d _ -f 3) + + if [ -n "${p_dbhost}" ]; then + parsed_host=true + print_debug "Parsed DBHost: ${p_dbhost}" + fi + + if [ -z "${dbhost}" ] && [ -z "${parsed_host}" ]; then + print_debug "Parsed DBHost Variant: 1 - No Env, No Parsed Filename" + q_dbhost_variant=1 + q_dbhost_menu=$(cat <