Compare commits

...

19 Commits

Author SHA1 Message Date
Dave Conroy
c515e2aaa0 Release 2.11.4 - See CHANGELOG.md 2022-03-15 10:29:37 -07:00
Dave Conroy
e5dea03d9e Github Workflow fix 2022-03-01 16:11:30 -08:00
Dave Conroy
31e4487827 Update github configuration 2022-02-10 09:28:38 -08:00
Dave Conroy
fa1dde53c0 Release 2.11.3 - See CHANGELOG.md 2022-02-09 19:34:27 -08:00
Dave Conroy
789a9e5f5a Release 2.11.2 - See CHANGELOG.md 2022-02-09 17:14:25 -08:00
Dave Conroy
b80d61f997 Merge pull request #102 from jacksgt/fix-s3-variable
Fix S3 variables (again)
2022-02-08 13:58:33 -08:00
Jack Henschel
9e23def7b4 Fix S3 variables (again)
S3_ENDPOINT is not used anywhere in the code, but S3_HOST.
2022-02-08 21:52:53 +01:00
Dave Conroy
ca31eb840d 2.11.1 - See CHANGELOG.md 2022-02-06 17:03:36 -08:00
Dave Conroy
6bec188633 Merge pull request #101 from jacksgt/add-s3-endpoint
Correct several variables for S3 backups
2022-02-06 17:01:18 -08:00
Dave Conroy
43562077b0 Merge pull request #100 from jacksgt/patch-1
Use exit code 0 when running in manual mode
2022-02-06 16:59:00 -08:00
Jack Henschel
c39bdeebae Correct several variables for S3 backups
* S3_URI_STYLE is not used anywhere (anymore)
* BACKUP_TYPE is not documented anywhere, redundant with BACKUP_LOCATION
* S3_HOST is not used anymore, document S3_ENDPOINT instead

ref 4d6419fd18
2022-02-06 00:50:35 +01:00
Jack Henschel
36b5909483 Use exit code 0 when running in manual mode
When running this container as a Kubernetes cronjob, it is really inconvenient that the script exits with return code "1", thereby marking the job as a failure.
Instead, the script should return "0" because everything was successful.
2022-02-06 00:20:30 +01:00
Dave Conroy
6033b1b0a9 Update README to talk about quoting the values 2022-01-31 13:59:53 -08:00
Dave Conroy
88218915e1 Release 2.11.0 - See CHANGELOG.md 2022-01-20 09:23:06 -08:00
Dave Conroy
065887f789 Release 2.10.3 - See CHANGELOG.md 2022-01-07 06:33:46 -08:00
Dave Conroy
5aba713b73 Release 2.10.2 - See CHANGELOG.md 2021-12-28 14:16:46 -08:00
Dave Conroy
9a6039d71d Release 2.10.1 - See CHANGELOG.md 2021-12-24 17:43:18 -08:00
Dave Conroy
c5f1618231 Merge pull request #93 from milenkara/master
Provide region when using S3
2021-12-24 17:42:16 -08:00
milenkara
7dd9fa890f Provide region when using S3 2021-12-24 16:26:03 +01:00
12 changed files with 368 additions and 95 deletions

1
.github/config.yml vendored Normal file
View File

@@ -0,0 +1 @@
blank_issues_enabled: false

View File

@@ -87,9 +87,11 @@ jobs:
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
if [ -f "CHANGELOG.md" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.db-backup.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
mkdir -p install/assets/.changelogs ; cp CHANGELOG.md install/assets/.changelogs/${GITHUB_REPOSITORY/\//_}.md
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
fi
@@ -105,6 +107,6 @@ jobs:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64
platforms: linux/amd64,linux/arm/v7,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}

View File

@@ -87,9 +87,11 @@ jobs:
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
if [ -f "CHANGELOG.md" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.db-backup.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
mkdir -p install/assets/.changelogs ; cp CHANGELOG.md install/assets/.changelogs/${GITHUB_REPOSITORY/\//_}.md
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
fi
@@ -105,6 +107,6 @@ jobs:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64
platforms: linux/amd64,linux/arm/v7,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}

View File

@@ -1,3 +1,55 @@
## 2.11.4 2022-03-15 <dave at tiredofit dot ca>
### Added
- Add debug statement around the scheduling component
## 2.11.3 2022-02-09 <dave at tiredofit dot ca>
### Changed
- Rework to support new base image
## 2.11.2 2022-02-09 <dave at tiredofit dot ca>
### Changed
- Refresh base image
## 2.11.1 2022-01-20 <jacksgt@github>
### Changed
- Modernized S3 variables and sanity checks
- Change exit code to 0 when executing a manual backup
## 2.11.0 2022-01-20 <dave at tiredofit dot ca>
### Added
- Add capability to select `TEMP_LOCATION` for initial backup and compression before backup completes to avoid filling system memory
### Changed
- Cleanup for MariaDB/MySQL DB ready routines that half worked in 2.10.3
- Code cleanup
## 2.10.3 2022-01-07 <dave at tiredofit dot ca>
### Changed
- Change the way MariaD/MySQL connectivity check is performed to allow for better compatibility without requiring the DB_USER to have PROCESS privileges
## 2.10.2 2021-12-28 <dave at tiredofit dot ca>
### Changed
- Remove logrotate configuration for redis which shouldn't exist in the first place
## 2.10.1 2021-12-22 <milenkara@github>
### Added
- Allow for choosing region when backing up to S3
## 2.10.0 2021-12-22 <dave at tiredofit dot ca>
### Changed

View File

@@ -1,9 +1,12 @@
FROM docker.io/tiredofit/alpine:3.15
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables
ENV MSSQL_VERSION=17.8.1.1-1 \
CONTAINER_ENABLE_MESSAGING=FALSE \
CONTAINER_ENABLE_MONITORING=TRUE
CONTAINER_ENABLE_MONITORING=TRUE \
IMAGE_NAME="tiredofit/db-backup" \
IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/"
### Dependencies
RUN set -ex && \
@@ -61,7 +64,8 @@ RUN set -ex && \
### Cleanup
apk del .db-backup-build-deps && \
rm -rf /usr/src/* && \
rm -rf /etc/logrotate.d/redis && \
rm -rf /root/.cache /tmp/* /var/cache/apk/*
### S6 Setup
ADD install /
ADD install /

View File

@@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2021 Dave Conroy
Copyright (c) 2022 Dave Conroy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -109,31 +109,36 @@ Be sure to view the following repositories to understand all the customizable op
| [OS Base](https://github.com/tiredofit/docker-alpine/) | Customized Image based on Alpine Linux |
| Parameter | Description |
| ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi - Default `FILESYSTEM` |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` - Default `GZ` |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - Default `3` |
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` |
| `DB_NAME` | Schema Name e.g. `database` |
| `DB_USER` | username for the database - use `root` to backup all MySQL of them. |
| `DB_PASS` | (optional if DB doesn't require it) password for the database |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided |
| `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. |
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats |
| | Absolute HHMM, e.g. `2330` or `0415` |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half |
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. "--extra-command" |
| `MD5` | Generate MD5 Sum in Directory, `TRUE` or `FALSE` - Default `TRUE` |
| `PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` - Default `TRUE` |
| `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | |
| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. - Default `FALSE` |
| Parameter | Description | Default |
| -------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- |
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `GZ` |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` |
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | |
| `DB_NAME` | Schema Name e.g. `database` | |
| `DB_USER` | username for the database - use `root` to backup all MySQL of them. | |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies |
| `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. | `1440` |
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
| | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | |
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` |
| `MD5` | Generate MD5 Sum in Directory, `TRUE` or `FALSE` | `TRUE` |
| `PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |
| `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | |
| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. | `FALSE` |
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
- When using compression with MongoDB, only `GZ` compression is possible.
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
When using compression with MongoDB, only `GZ` compression is possible.
#### Backing Up to S3 Compatible Services
@@ -141,12 +146,13 @@ If `BACKUP_LOCATION` = `S3` then the following options are used.
| Parameter | Description |
| --------------- | --------------------------------------------------------------------------------------- |
| `S3_BUCKET` | S3 Bucket name e.g. 'mybucket' |
| `S3_HOST` | Hostname of S3 Server e.g "s3.amazonaws.com" - You can also include a port if necessary |
| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` |
| `S3_KEY_ID` | S3 Key ID |
| `S3_KEY_SECRET` | S3 Key Secret |
| `S3_PATH` | S3 Pathname to save to e.g. '`backup`' |
| `S3_PROTOCOL` | Use either `http` or `https` to access service - Default `https` |
| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` |
| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. |
| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. |
## Maintenance

219
README.md.save Normal file
View File

@@ -0,0 +1,219 @@
# github.com/tiredofit/docker-db-backup
[![GitHub release](https://img.shields.io/github/v/tag/tiredofit/docker-db-backup?style=flat-square)](https://github.com/tiredofit/docker-db-backup/releases/latest)
[![Build Status](https://img.shields.io/github/workflow/status/tiredofit/docker-db-backup/build?style=flat-square)](https://github.com/tiredofit/docker-db-backup/actions?query=workflow%3Abuild)
[![Docker Stars](https://img.shields.io/docker/stars/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/)
[![Docker Pulls](https://img.shields.io/docker/pulls/tiredofit/db-backup.svg?style=flat-square&logo=docker)](https://hub.docker.com/r/tiredofit/db-backup/)
[![Become a sponsor](https://img.shields.io/badge/sponsor-tiredofit-181717.svg?logo=github&style=flat-square)](https://github.com/sponsors/tiredofit)
[![Paypal Donate](https://img.shields.io/badge/donate-paypal-00457c.svg?logo=paypal&style=flat-square)](https://www.paypal.me/tiredofit)
* * *
## About
This will build a container for backing up multiple types of DB Servers
Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
* dump to local filesystem or backup to S3 Compatible services
* select database user and password
* backup all databases
* choose to have an MD5 sum after backup for verification
* delete old backups after specific amount of time
* choose compression type (none, gz, bz, xz, zstd)
* connect to any container running on the same system
* select how often to run a dump
* select when to start the first dump, whether time of day or relative to container start time
* Execute script after backup for monitoring/alerting purposes
## Maintainer
- [Dave Conroy](https://github.com/tiredofit)
## Table of Contents
- [About](#about)
- [Maintainer](#maintainer)
- [Table of Contents](#table-of-contents)
- [Prerequisites and Assumptions](#prerequisites-and-assumptions)
- [Installation](#installation)
- [Build from Source](#build-from-source)
- [Prebuilt Images](#prebuilt-images)
- [Configuration](#configuration)
- [Quick Start](#quick-start)
- [Persistent Storage](#persistent-storage)
- [Environment Variables](#environment-variables)
- [Base Images used](#base-images-used)
- [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services)
- [Maintenance](#maintenance)
- [Shell Access](#shell-access)
- [Manual Backups](#manual-backups)
- [Custom Scripts](#custom-scripts)
- [Support](#support)
- [Usage](#usage)
- [Bugfixes](#bugfixes)
- [Feature Requests](#feature-requests)
- [Updates](#updates)
- [License](#license)
## Prerequisites and Assumptions
You must have a working DB server or container available for this to work properly, it does not provide server functionality!
## Installation
### Build from Source
Clone this repository and build the image with `docker build -t (imagename) .`
### Prebuilt Images
Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup) and is the recommended method of installation.
```bash
docker pull tiredofit/db-backup:(imagetag)
```
The following image tags are available along with their tagged release based on what's written in the [Changelog](CHANGELOG.md):
| Container OS | Tag |
| ------------ | --------- |
| Alpine | `:latest` |
## Configuration
### Quick Start
* The quickest way to get started is using [docker-compose](https://docs.docker.com/compose/). See the examples folder for a working [docker-compose.yml](examples/docker-compose.yml) that can be modified for development or production use.
* Set various [environment variables](#environment-variables) to understand the capabiltiies of this image.
* Map [persistent storage](#data-volumes) for access to configuration and data files for backup.
> **NOTE**: If you are using this with a docker-compose file along with a seperate SQL container, take care not to set the variables to backup immediately, more so have it delay execution for a minute, otherwise you will get a failed first backup.
### Persistent Storage
The following directories are used for configuration and can be mapped for persistent storage.
| Directory | Description |
| ------------------------ | ---------------------------------------------------------------------------------- |
| `/backup` | Backups |
| `/assets/custom-scripts` | *Optional* Put custom scripts in this directory to execute after backup operations |
### Environment Variables
#### Base Images used
This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`,`vim`.
Be sure to view the following repositories to understand all the customizable options:
| Image | Description |
| ------------------------------------------------------ | -------------------------------------- |
| [OS Base](https://github.com/tiredofit/docker-alpine/) | Customized Image based on Alpine Linux |
| Parameter | Description | Default |
| -------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- |
| `BACKUP_LOCATION` | Backup to `FILESYSTEM` or `S3` compatible services like S3, Minio, Wasabi | `FILESYSTEM` |
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `GZ` |
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` |
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | |
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | |
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | |
| `DB_NAME` | Schema Name e.g. `database` | |
| `DB_USER` | username for the database - use `root` to backup all MySQL of them. | |
| `DB_PASS` | (optional if DB doesn't require it) password for the database | |
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies |
| `DB_DUMP_FREQ` | How often to do a dump, in minutes. Defaults to 1440 minutes, or once per day. | `1440` |
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
| | Absolute HHMM, e.g. `2330` or `0415` | |
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
| `DEBUG_MODE` | If set to `true`, print copious shell script messages to the container log. Otherwise only basic messages are printed. | |
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | |
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` |
| `MD5` | Generate MD5 Sum in Directory, `TRUE` or `FALSE` | `TRUE` |
| `PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` |
| `POST_SCRIPT` | Fill this variable in with a command to execute post the script backing up | |
| `SPLIT_DB` | If using root as username and multiple DBs on system, set to TRUE to create Seperate DB Backups instead of all in one. | `FALSE` |
| `TEMP_LOCATION` | Perform Backups and Compression in this temporary directory | `/tmp/backups/` |
- When using compression with MongoDB, only `GZ` compression is possible.
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
#### Backing Up to S3 Compatible Services
If `BACKUP_LOCATION` = `S3` then the following options are used.
| Parameter | Description |
| --------------- | --------------------------------------------------------------------------------------- |
| `S3_BUCKET` | S3 Bucket name e.g. `mybucket` |
| `S3_KEY_ID` | S3 Key ID |
| `S3_KEY_SECRET` | S3 Key Secret |
| `S3_PATH` | S3 Pathname to save to e.g. '`backup`' |
| `S3_REGION` | Define region in which bucket is defined. Example: `ap-northeast-2` |
| `S3_HOST` | Hostname (and port) of S3-compatible service, e.g. `minio:8080`. Defaults to AWS. |
| `S3_PROTOCOL` | Protocol to connect to `S3_HOST`. Either `http` or `https`. Defaults to `https`. |
## Maintenance
### Shell Access
For debugging and maintenance purposes you may want access the containers shell.
``bash
docker exec -it (whatever your container name is) bash
``
### Manual Backups
Manual Backups can be performed by entering the container and typing `backup-now`
### Custom Scripts
If you want to execute a custom script at the end of backup, you can drop bash scripts with the extension of `.sh` in this directory. See the following example to utilize:
````bash
$ cat post-script.sh
##!/bin/bash
# #### Example Post Script
# #### $1=EXIT_CODE (After running backup routine)
# #### $2=DB_TYPE (Type of Backup)
# #### $3=DB_HOST (Backup Host)
# #### #4=DB_NAME (Name of Database backed up
# #### $5=DATE (Date of Backup)
# #### $6=TIME (Time of Backup)
# #### $7=BACKUP_FILENAME (Filename of Backup)
# #### $8=FILESIZE (Filesize of backup)
# #### $9=MD5_RESULT (MD5Sum if enabled)
echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ${6}. Filename: ${7} Size: ${8} bytes MD5: ${9}"
````
Outputs the following on the console:
`0 mysql Backup Completed on example-db for example on 2020-04-22 05:19:10. Filename: mysql_example_example-db_20200422-051910.sql.bz2 Size: 7795 bytes MD5: 952fbaafa30437494fdf3989a662cd40`
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`
## Support
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.
### Usage
- The [Discussions board](../../discussions) is a great place for working with the community on tips and tricks of using this image.
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) personalized support.
### Bugfixes
- Please, submit a [Bug Report](issues/new) if something isn't working as expected. I'll do my best to issue a fix in short order.
### Feature Requests
- Feel free to submit a feature request, however there is no guarantee that it will be added, or at what timeline.
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) regarding development of features.
### Updates
- Best effort to track upstream changes, More priority if I am actively using the image in a production environment.
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) for up to date releases.
## License
MIT. See [LICENSE](LICENSE) for more details.

View File

@@ -1,3 +1,3 @@
#!/usr/bin/with-contenv bash
#!/command/with-contenv bash
pkill bash

View File

@@ -1,4 +1,4 @@
#!/usr/bin/with-contenv bash
#!/command/with-contenv bash
source /assets/functions/00-container
prepare_service single
@@ -6,12 +6,6 @@ prepare_service 03-monitoring
PROCESS_NAME="db-backup"
output_off
if var_true "${CONTAINER_ENABLE_MONITORING}" && [ "${CONTAINER_MONITORING_BACKEND,,}" = "zabbix" ]; then
cat <<EOF > "${ZABBIX_CONFIG_PATH}"/"${ZABBIX_CONFIG_FILE}.d"/tiredofit_dbbackup.conf
# Zabbix DB Backup Configuration - Automatically Generated
# Find Companion Zabbix Server Templates at https://github.com/tiredofit/docker-dbbackup
# Autoregister=dbbackup
EOF
fi
create_zabbix dbbackup
liftoff

View File

@@ -1,4 +1,4 @@
#!/usr/bin/with-contenv bash
#!/command/with-contenv bash
source /assets/functions/00-container
PROCESS_NAME="db-backup"
@@ -39,7 +39,8 @@ case "$dbtype" in
dbtype=mysql
dbport=${DB_PORT:-3306}
[[ ( -n "${DB_PASS}" ) || ( -n "${DB_PASS_FILE}" ) ]] && file_env 'DB_PASS'
;;
MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"}
;;
"mssql" | "MSSQL" | "microsoftsql" | "MICROSOFTSQL")
apkArch="$(apk --print-arch)"; \
case "$apkArch" in
@@ -66,29 +67,28 @@ esac
### Set Defaults
BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"}
COMPRESSION=${COMPRESSION:-GZ}
COMPRESSION=${COMPRESSION:-"GZ"}
COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
DB_DUMP_TARGET=${DB_DUMP_TARGET:-/backup}
DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"}
dbhost=${DB_HOST}
dbname=${DB_NAME}
dbpass=${DB_PASS}
dbuser=${DB_USER}
MD5=${MD5:-TRUE}
PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-TRUE}
PARALLEL_COMPRESSION=${PARALLEL_COMPRESSION:-"TRUE"}
SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SPLIT_DB=${SPLIT_DB:-FALSE}
tmpdir=/tmp/backups
SPLIT_DB=${SPLIT_DB:-"FALSE"}
TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"}
if [ "$BACKUP_TYPE" = "S3" ] || [ "$BACKUP_TYPE" = "s3" ] || [ "$BACKUP_TYPE" = "MINIO" ] || [ "$BACKUP_TYPE" = "minio" ] ; then
if [ "$BACKUP_LOCATION" = "S3" ] || [ "$BACKUP_LOCATION" = "s3" ] || [ "$BACKUP_LOCATION" = "MINIO" ] || [ "$BACKUP_LOCATION" = "minio" ] ; then
S3_PROTOCOL=${S3_PROTOCOL:-"https"}
sanity_var S3_HOST "S3 Host"
sanity_var S3_BUCKET "S3 Bucket"
sanity_var S3_KEY_ID "S3 Key ID"
sanity_var S3_KEY_SECRET "S3 Key Secret"
sanity_var S3_URI_STYLE "S3 URI Style (Virtualhost or Path)"
sanity_var S3_PATH "S3 Path"
sanity_var S3_REGION "S3 Region"
file_env 'S3_KEY_ID'
file_env 'S3_KEY_SECRET'
fi
@@ -99,7 +99,7 @@ if [ "$1" = "NOW" ]; then
fi
### Set Compression Options
if var_true "$PARALLEL_COMPRESSION" ; then
if var_true "${PARALLEL_COMPRESSION}" ; then
bzip="pbzip2 -${COMPRESSION_LEVEL}"
gzip="pigz -${COMPRESSION_LEVEL}"
xzip="pixz -${COMPRESSION_LEVEL}"
@@ -134,7 +134,7 @@ esac
backup_couch() {
target=couch_${dbname}_${dbhost}_${now}.txt
compression
curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${dumpoutput} | $dumpoutput > ${tmpdir}/${target}
curl -X GET http://${dbhost}:${dbport}/${dbname}/_all_docs?include_docs=true ${dumpoutput} | $dumpoutput > ${TEMP_LOCATION}/${target}
exit_code=$?
generate_md5
move_backup
@@ -147,9 +147,9 @@ backup_influx() {
print_notice "Compressing InfluxDB backup with gzip"
influx_compression="-portable"
fi
for DB in $DB_NAME; do
for DB in ${DB_NAME}; do
target=influx_${DB}_${dbhost}_${now}
influxd backup ${influx_compression} -database $DB -host ${dbhost}:${dbport} ${tmpdir}/${target}
influxd backup ${influx_compression} -database $DB -host ${dbhost}:${dbport} ${TEMP_LOCATION}/${target}
exit_code=$?
generate_md5
move_backup
@@ -164,28 +164,28 @@ backup_mongo() {
target=${dbtype}_${dbname}_${dbhost}_${now}.archivegz
mongo_compression="--gzip"
fi
mongodump --archive=${tmpdir}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
mongodump --archive=${TEMP_LOCATION}/${target} ${mongo_compression} --host ${dbhost} --port ${dbport} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}
exit_code=$?
cd ${tmpdir}
cd ${TEMP_LOCATION}
generate_md5
move_backup
}
backup_mssql() {
target=mssql_${dbname}_${dbhost}_${now}.bak
/opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${tmpdir}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
/opt/mssql-tools/bin/sqlcmd -E -C -S ${dbhost}\,${dbport} -U ${dbuser} -P ${dbpass} Q "BACKUP DATABASE \[${dbname}\] TO DISK = N'${TEMP_LOCATION}/${target}' WITH NOFORMAT, NOINIT, NAME = '${dbname}-full', SKIP, NOREWIND, NOUNLOAD, STATS = 10"
}
backup_mysql() {
if var_true "$SPLIT_DB" ; then
DATABASES=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database|grep -v schema)
if var_true "${SPLIT_DB}" ; then
DATABASES=$(mysql -h ${dbhost} -P $dbport -u$dbuser --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema)
for db in $DATABASES; do
if [[ "$db" != "information_schema" ]] && [[ "$db" != _* ]] ; then
print_notice "Dumping MariaDB database: $db"
target=mysql_${db}_${dbhost}_${now}.sql
compression
mysqldump --max-allowed-packet=512M -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} --databases $db | $dumpoutput > ${tmpdir}/${target}
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} --databases $db | $dumpoutput > ${TEMP_LOCATION}/${target}
exit_code=$?
generate_md5
move_backup
@@ -193,7 +193,7 @@ backup_mysql() {
done
else
compression
mysqldump --max-allowed-packet=512M -A -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -A -h $dbhost -P $dbport -u$dbuser ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target}
exit_code=$?
generate_md5
move_backup
@@ -201,7 +201,7 @@ backup_mysql() {
}
backup_pgsql() {
if var_true $SPLIT_DB ; then
if var_true "${SPLIT_DB}" ; then
export PGPASSWORD=${dbpass}
authdb=${DB_USER}
[ -n "${DB_NAME}" ] && authdb=${DB_NAME}
@@ -210,7 +210,7 @@ backup_pgsql() {
print_info "Dumping database: $db"
target=pgsql_${db}_${dbhost}_${now}.sql
compression
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
pg_dump -h ${dbhost} -p ${dbport} -U ${dbuser} $db ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target}
exit_code=$?
generate_md5
move_backup
@@ -218,7 +218,7 @@ backup_pgsql() {
else
export PGPASSWORD=${dbpass}
compression
pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $dumpoutput > ${tmpdir}/${target}
pg_dump -h ${dbhost} -U ${dbuser} -p ${dbport} ${dbname} ${EXTRA_OPTS} | $dumpoutput > ${TEMP_LOCATION}/${target}
exit_code=$?
generate_md5
move_backup
@@ -227,7 +227,7 @@ backup_pgsql() {
backup_redis() {
target=redis_${db}_${dbhost}_${now}.rdb
echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${tmpdir}/${target} ${EXTRA_OPTS}
echo bgsave | redis-cli -h ${dbhost} -p ${dbport} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
print_info "Dumping Redis - Flushing Redis Cache First"
sleep 10
try=5
@@ -244,7 +244,7 @@ backup_redis() {
done
target_original=${target}
compression
$dumpoutput "${tmpdir}/${target_original}"
$dumpoutput "${TEMP_LOCATION}/${target_original}"
generate_md5
move_backup
}
@@ -256,10 +256,10 @@ backup_sqlite3() {
compression
print_info "Dumping sqlite3 database: ${dbhost}"
sqlite3 "${dbhost}" ".backup '${tmpdir}/backup.sqlite3'"
sqlite3 "${dbhost}" ".backup '${TEMP_LOCATION}/backup.sqlite3'"
exit_code=$?
cat "${tmpdir}/backup.sqlite3" | $dumpoutput > "${tmpdir}/${target}"
cat "${TEMP_LOCATION}/backup.sqlite3" | $dumpoutput > "${TEMP_LOCATION}/${target}"
generate_md5
move_backup
@@ -295,17 +295,10 @@ check_availability() {
"mysql" )
COUNTER=0
export MYSQL_PWD=${dbpass}
while true; do
mysqlcmd='mysql -u'${dbuser}' -P '${dbport}' -h '${dbhost}
out="$($mysqlcmd -e "SELECT COUNT(*) FROM information_schema.FILES;" 2>&1)"
echo "$out" | grep -E "COUNT|Enter" 2>&1 > /dev/null
if [ $? -eq 0 ]; then
:
break
fi
print_warn "MySQL/MariaDB Server '${dbhost}' is not accessible, retrying.. ($COUNTER seconds so far)"
sleep 5
(( COUNTER+=5 ))
while ! (mysqladmin -u"${dbuser}" -P"${dbport}" -h"${dbhost}" status > /dev/null 2>&1) ; do
sleep 5
(( COUNTER+=5 ))
print_warn "MySQL/MariaDB Server '${dbhost}' is not accessible, retrying.. (${COUNTER} seconds so far)"
done
;;
"mssql" )
@@ -383,7 +376,7 @@ compression() {
generate_md5() {
if var_true "$MD5" ; then
print_notice "Generating MD5 for ${target}"
cd $tmpdir
cd ${TEMP_LOCATION}
md5sum "${target}" > "${target}".md5
MD5VALUE=$(md5sum "${target}" | awk '{ print $1}')
fi
@@ -403,30 +396,30 @@ move_backup() {
;;
esac
if [ "$SIZE_VALUE" = "1" ] ; then
FILESIZE=$(stat -c%s "${tmpdir}/${target}")
FILESIZE=$(stat -c%s "${TEMP_LOCATION}/${target}")
print_notice "Backup of ${target} created with the size of ${FILESIZE} bytes"
else
FILESIZE=$(du -h "${tmpdir}/${target}" | awk '{ print $1}')
FILESIZE=$(du -h "${TEMP_LOCATION}/${target}" | awk '{ print $1}')
print_notice "Backup of ${target} created with the size of ${FILESIZE}"
fi
case "${BACKUP_LOCATION}" in
"FILE" | "file" | "filesystem" | "FILESYSTEM" )
mkdir -p "${DB_DUMP_TARGET}"
mv ${tmpdir}/*.md5 "${DB_DUMP_TARGET}"/
mv ${tmpdir}/"${target}" "${DB_DUMP_TARGET}"/"${target}"
mv ${TEMP_LOCATION}/*.md5 "${DB_DUMP_TARGET}"/
mv ${TEMP_LOCATION}/"${target}" "${DB_DUMP_TARGET}"/"${target}"
;;
"S3" | "s3" | "MINIO" | "minio" )
export AWS_ACCESS_KEY_ID=${S3_KEY_ID}
export AWS_SECRET_ACCESS_KEY=${S3_KEY_SECRET}
export AWS_DEFAULT_REGION=ap-northeast-2
export AWS_DEFAULT_REGION=${S3_REGION}
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${tmpdir}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target}
aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target}
rm -rf ${tmpdir}/*.md5
rm -rf ${tmpdir}/"${target}"
rm -rf ${TEMP_LOCATION}/*.md5
rm -rf ${TEMP_LOCATION}/"${target}"
;;
esac
}
@@ -450,7 +443,7 @@ print_debug "Backup routines Initialized on $(date)"
fi
waittime=$(($target_time - $current_time))
fi
print_debug "Wait Time: ${waittime} Target time: ${target_time} Current Time: ${current_time}"
print_notice "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")"
sleep $waittime
fi
@@ -459,7 +452,7 @@ print_debug "Backup routines Initialized on $(date)"
### Commence Backup
while true; do
# make sure the directory exists
mkdir -p $tmpdir
mkdir -p $TEMP_LOCATION
### Define Target name
now=$(date +"%Y%m%d-%H%M%S")
@@ -534,7 +527,7 @@ print_debug "Backup routines Initialized on $(date)"
### Go back to Sleep until next Backup time
if var_true $MANUAL ; then
exit 1;
exit 0;
else
sleep $(($DB_DUMP_FREQ*60))
fi

View File

@@ -1,4 +1,4 @@
#!/usr/bin/with-contenv bash
#!/command/with-contenv bash
echo '** Performing Manual Backup'
/etc/services.available/10-db-backup/run NOW