mirror of
https://github.com/tiredofit/docker-db-backup.git
synced 2025-12-21 21:33:28 +01:00
Merge pull request #263 from alwynpan/bugfix/#262-pgsql-db-not-exist
fix: Add an option DB_AUTH for PGSQL
This commit is contained in:
668
CHANGELOG.md
668
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
229
README.md
229
README.md
@@ -7,26 +7,27 @@
|
|||||||
[](https://github.com/sponsors/tiredofit)
|
[](https://github.com/sponsors/tiredofit)
|
||||||
[](https://www.paypal.me/tiredofit)
|
[](https://www.paypal.me/tiredofit)
|
||||||
|
|
||||||
* * *
|
---
|
||||||
|
|
||||||
## About
|
## About
|
||||||
|
|
||||||
This will build a container for backing up multiple types of DB Servers
|
This will build a container for backing up multiple types of DB Servers
|
||||||
|
|
||||||
Currently backs up CouchDB, InfluxDB, MySQL, Microsoft SQL, MongoDB, Postgres, Redis servers.
|
Currently backs up CouchDB, InfluxDB, MySQL, Microsoft SQL, MongoDB, Postgres, Redis servers.
|
||||||
|
|
||||||
* dump to local filesystem or backup to S3 Compatible services, and Azure.
|
- dump to local filesystem or backup to S3 Compatible services, and Azure.
|
||||||
* select database user and password
|
- select database user and password
|
||||||
* backup all databases, single, or multiple databases
|
- backup all databases, single, or multiple databases
|
||||||
* backup all to seperate files or one singular file
|
- backup all to separate files or one singular file
|
||||||
* choose to have an MD5 or SHA1 sum after backup for verification
|
- choose to have an MD5 or SHA1 sum after backup for verification
|
||||||
* delete old backups after specific amount of time
|
- delete old backups after specific amount of time
|
||||||
* choose compression type (none, gz, bz, xz, zstd)
|
- choose compression type (none, gz, bz, xz, zstd)
|
||||||
* connect to any container running on the same system
|
- connect to any container running on the same system
|
||||||
* Script to perform restores
|
- Script to perform restores
|
||||||
* Zabbix Monitoring capabilities
|
- Zabbix Monitoring capabilities
|
||||||
* select how often to run a dump
|
- select how often to run a dump
|
||||||
* select when to start the first dump, whether time of day or relative to container start time
|
- select when to start the first dump, whether time of day or relative to container start time
|
||||||
* Execute script after backup for monitoring/alerting purposes
|
- Execute script after backup for monitoring/alerting purposes
|
||||||
|
|
||||||
## Maintainer
|
## Maintainer
|
||||||
|
|
||||||
@@ -34,57 +35,61 @@ Currently backs up CouchDB, InfluxDB, MySQL, Microsoft SQL, MongoDB, Postgres, R
|
|||||||
|
|
||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
|
||||||
- [About](#about)
|
- [github.com/tiredofit/docker-db-backup](#githubcomtiredofitdocker-db-backup)
|
||||||
- [Maintainer](#maintainer)
|
- [About](#about)
|
||||||
- [Table of Contents](#table-of-contents)
|
- [Maintainer](#maintainer)
|
||||||
- [Prerequisites and Assumptions](#prerequisites-and-assumptions)
|
- [Table of Contents](#table-of-contents)
|
||||||
- [Installation](#installation)
|
- [Prerequisites and Assumptions](#prerequisites-and-assumptions)
|
||||||
- [Build from Source](#build-from-source)
|
- [Installation](#installation)
|
||||||
- [Prebuilt Images](#prebuilt-images)
|
- [Build from Source](#build-from-source)
|
||||||
- [Multi Architecture](#multi-architecture)
|
- [Prebuilt Images](#prebuilt-images)
|
||||||
- [Configuration](#configuration)
|
- [Multi Architecture](#multi-architecture)
|
||||||
- [Quick Start](#quick-start)
|
- [Configuration](#configuration)
|
||||||
- [Persistent Storage](#persistent-storage)
|
- [Quick Start](#quick-start)
|
||||||
- [Environment Variables](#environment-variables)
|
- [Persistent Storage](#persistent-storage)
|
||||||
- [Base Images used](#base-images-used)
|
- [Environment Variables](#environment-variables)
|
||||||
- [Container Options](#container-options)
|
- [Base Images used](#base-images-used)
|
||||||
- [Database Specific Options](#database-specific-options)
|
- [Container Options](#container-options)
|
||||||
- [For Influx DB2:](#for-influx-db2)
|
- [Database Specific Options](#database-specific-options)
|
||||||
- [Scheduling Options](#scheduling-options)
|
- [For Influx DB2](#for-influx-db2)
|
||||||
- [Backup Options](#backup-options)
|
- [Scheduling Options](#scheduling-options)
|
||||||
- [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services)
|
- [Backup Options](#backup-options)
|
||||||
- [Upload to a Azure storage account by `blobxfer`](#upload-to-a-azure-storage-account-by-blobxfer)
|
- [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services)
|
||||||
- [Maintenance](#maintenance)
|
- [Upload to a Azure storage account by `blobxfer`](#upload-to-a-azure-storage-account-by-blobxfer)
|
||||||
- [Shell Access](#shell-access)
|
- [Maintenance](#maintenance)
|
||||||
- [Manual Backups](#manual-backups)
|
- [Shell Access](#shell-access)
|
||||||
- [Restoring Databases](#restoring-databases)
|
- [Manual Backups](#manual-backups)
|
||||||
- [Custom Scripts](#custom-scripts)
|
- [Restoring Databases](#restoring-databases)
|
||||||
- [Path Options](#path-options)
|
- [Custom Scripts](#custom-scripts)
|
||||||
- [Pre Backup](#pre-backup)
|
- [Path Options](#path-options)
|
||||||
- [Post backup](#post-backup)
|
- [Pre Backup](#pre-backup)
|
||||||
- [Support](#support)
|
- [Post backup](#post-backup)
|
||||||
- [Usage](#usage)
|
- [Support](#support)
|
||||||
- [Bugfixes](#bugfixes)
|
- [Usage](#usage)
|
||||||
- [Feature Requests](#feature-requests)
|
- [Bugfixes](#bugfixes)
|
||||||
- [Updates](#updates)
|
- [Feature Requests](#feature-requests)
|
||||||
- [License](#license)
|
- [Updates](#updates)
|
||||||
|
- [License](#license)
|
||||||
|
|
||||||
> **NOTE**: If you are using this with a docker-compose file along with a seperate SQL container, take care not to set the variables to backup immediately, more so have it delay execution for a minute, otherwise you will get a failed first backup.
|
> **NOTE**: If you are using this with a docker-compose file along with a seperate SQL container, take care not to set the variables to backup immediately, more so have it delay execution for a minute, otherwise you will get a failed first backup.
|
||||||
|
|
||||||
## Prerequisites and Assumptions
|
## Prerequisites and Assumptions
|
||||||
* You must have a working connection to one of the supported DB Servers and appropriate credentials
|
|
||||||
|
- You must have a working connection to one of the supported DB Servers and appropriate credentials
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
### Build from Source
|
### Build from Source
|
||||||
|
|
||||||
Clone this repository and build the image with `docker build <arguments> (imagename) .`
|
Clone this repository and build the image with `docker build <arguments> (imagename) .`
|
||||||
|
|
||||||
### Prebuilt Images
|
### Prebuilt Images
|
||||||
|
|
||||||
Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup)
|
Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup)
|
||||||
|
|
||||||
Builds of the image are also available on the [Github Container Registry](https://github.com/tiredofit/docker-db-backup/pkgs/container/docker-db-backup)
|
Builds of the image are also available on the [Github Container Registry](https://github.com/tiredofit/docker-db-backup/pkgs/container/docker-db-backup)
|
||||||
|
|
||||||
```
|
```bash
|
||||||
docker pull ghcr.io/tiredofit/docker-db-backup:(imagetag)
|
docker pull ghcr.io/tiredofit/docker-db-backup:(imagetag)
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -95,20 +100,22 @@ The following image tags are available along with their tagged release based on
|
|||||||
| latest | `:latest` |
|
| latest | `:latest` |
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker pull docker.io/tiredofdit/db-backup:(imagetag)
|
docker pull docker.io/tiredofit/db-backup:(imagetag)
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Multi Architecture
|
#### Multi Architecture
|
||||||
Images are built primarily for `amd64` architecture, and may also include builds for `arm/v7`, `arm64` and others. These variants are all unsupported. Consider [sponsoring](https://github.com/sponsors/tiredofit) my work so that I can work with various hardware. To see if this image supports multiple architecures, type `docker manifest (image):(tag)`
|
|
||||||
|
Images are built primarily for `amd64` architecture, and may also include builds for `arm/v7`, `arm64` and others. These variants are all unsupported. Consider [sponsoring](https://github.com/sponsors/tiredofit) my work so that I can work with various hardware. To see if this image supports multiple architectures, type `docker manifest (image):(tag)`
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
### Quick Start
|
### Quick Start
|
||||||
|
|
||||||
* The quickest way to get started is using [docker-compose](https://docs.docker.com/compose/). See the examples folder for a working [docker-compose.yml](examples/docker-compose.yml) that can be modified for development or production use.
|
- The quickest way to get started is using [docker-compose](https://docs.docker.com/compose/). See the examples folder for a working [docker-compose.yml](examples/docker-compose.yml) that can be modified for development or production use.
|
||||||
|
|
||||||
* Set various [environment variables](#environment-variables) to understand the capabilities of this image.
|
- Set various [environment variables](#environment-variables) to understand the capabilities of this image.
|
||||||
* Map [persistent storage](#data-volumes) for access to configuration and data files for backup.
|
- Map [persistent storage](#data-volumes) for access to configuration and data files for backup.
|
||||||
* Make [networking ports](#networking) available for public access if necessary
|
- Make [networking ports](#networking) available for public access if necessary
|
||||||
|
|
||||||
### Persistent Storage
|
### Persistent Storage
|
||||||
|
|
||||||
@@ -116,14 +123,14 @@ The following directories are used for configuration and can be mapped for persi
|
|||||||
| Directory | Description |
|
| Directory | Description |
|
||||||
| ---------------------- | ----------------------------------------------------------------------------------- |
|
| ---------------------- | ----------------------------------------------------------------------------------- |
|
||||||
| `/backup` | Backups |
|
| `/backup` | Backups |
|
||||||
| `/assets/scripts/pre` | *Optional* Put custom scripts in this directory to execute before backup operations |
|
| `/assets/scripts/pre` | _Optional_ Put custom scripts in this directory to execute before backup operations |
|
||||||
| `/assets/scripts/post` | *Optional* Put custom scripts in this directory to execute after backup operations |
|
| `/assets/scripts/post` | _Optional_ Put custom scripts in this directory to execute after backup operations |
|
||||||
|
|
||||||
### Environment Variables
|
### Environment Variables
|
||||||
|
|
||||||
#### Base Images used
|
#### Base Images used
|
||||||
|
|
||||||
This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`.
|
This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handled via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`.
|
||||||
|
|
||||||
Be sure to view the following repositories to understand all the customizable options:
|
Be sure to view the following repositories to understand all the customizable options:
|
||||||
|
|
||||||
@@ -146,42 +153,46 @@ Be sure to view the following repositories to understand all the customizable op
|
|||||||
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` |
|
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` |
|
||||||
|
|
||||||
### Database Specific Options
|
### Database Specific Options
|
||||||
| Parameter | Description | Default | `_FILE` |
|
|
||||||
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- | ------- |
|
|
||||||
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | |
|
|
||||||
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `mssql` `pgsql` `mongo` `redis` `sqlite3` | | |
|
|
||||||
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | x |
|
|
||||||
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | | x |
|
|
||||||
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | | x |
|
|
||||||
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | x |
|
|
||||||
| `DB_PASS` | (optional if DB doesn't require it) password for the database | | x |
|
|
||||||
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | x |
|
|
||||||
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | | |
|
|
||||||
| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | x |
|
|
||||||
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can overrde them by making your own entries | | |
|
|
||||||
|
|
||||||
#### For Influx DB2:
|
| Parameter | Description | Default | `_FILE` |
|
||||||
|
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | ------- |
|
||||||
|
| `DB_AUTH` | (Mongo/PGSQL Only - Optional) Authentication Database | | |
|
||||||
|
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `mssql` `pgsql` `mongo` `redis` `sqlite3` | | |
|
||||||
|
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | x |
|
||||||
|
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by separating with commas eg `db1,db2` | | x |
|
||||||
|
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases separated via commas from being backed up | | x |
|
||||||
|
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | x |
|
||||||
|
| `DB_PASS` | (optional if DB doesn't require it) password for the database | | x |
|
||||||
|
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | x |
|
||||||
|
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | | |
|
||||||
|
| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | x |
|
||||||
|
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can override them by making your own entries | | |
|
||||||
|
|
||||||
|
#### For Influx DB2
|
||||||
|
|
||||||
Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name`
|
Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name`
|
||||||
|
|
||||||
### Scheduling Options
|
### Scheduling Options
|
||||||
| Parameter | Description | Default |
|
|
||||||
| ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- |
|
| Parameter | Description | Default |
|
||||||
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
|
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- |
|
||||||
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
|
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
|
||||||
| | Absolute HHMM, e.g. `2330` or `0415` | |
|
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
|
||||||
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
|
| | Absolute HHMM, e.g. `2330` or `0415` | |
|
||||||
| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `${DB_DUMP_TARGET}/archive/` |
|
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
|
||||||
| `DB_DUMP_TARGET_ARCHIVE` | Optional Directory where the database dumps archives are kept. | |
|
| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `${DB_DUMP_TARGET}/archive/` |
|
||||||
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
|
| `DB_DUMP_TARGET_ARCHIVE` | Optional Directory where the database dumps archives are kept. | |
|
||||||
| `DB_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from `DB_DUMP_TARGET` to `DB_DUMP_TARGET_ARCHIVE` - which is useful when pairing against an external backup system. | |
|
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump frequency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
|
||||||
|
| `DB_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from `DB_DUMP_TARGET` to `DB_DUMP_TARGET_ARCHIVE` - which is useful when pairing against an external backup system. | |
|
||||||
|
|
||||||
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
|
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
|
||||||
|
|
||||||
### Backup Options
|
### Backup Options
|
||||||
|
|
||||||
| Parameter | Description | Default | `_FILE` |
|
| Parameter | Description | Default | `_FILE` |
|
||||||
|--------------------------------|------------------------------------------------------------------------------------------------------------------------------|---------------------------|---------|
|
| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- | ------- |
|
||||||
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | |
|
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | |
|
||||||
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | |
|
| `COMPRESSION_LEVEL` | Numerical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | |
|
||||||
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | |
|
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | |
|
||||||
| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` | |
|
| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` | |
|
||||||
| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` | |
|
| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` | |
|
||||||
@@ -224,7 +235,7 @@ If `BACKUP_LOCATION` = `S3` then the following options are used.
|
|||||||
|
|
||||||
#### Upload to a Azure storage account by `blobxfer`
|
#### Upload to a Azure storage account by `blobxfer`
|
||||||
|
|
||||||
Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure fileshare storage.
|
Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure file share storage.
|
||||||
|
|
||||||
If `BACKUP_LOCATION` = `blobxfer` then the following options are used.
|
If `BACKUP_LOCATION` = `blobxfer` then the following options are used.
|
||||||
|
|
||||||
@@ -243,27 +254,31 @@ If `BACKUP_LOCATION` = `blobxfer` then the following options are used.
|
|||||||
|
|
||||||
For debugging and maintenance purposes you may want access the containers shell.
|
For debugging and maintenance purposes you may want access the containers shell.
|
||||||
|
|
||||||
``bash
|
`bash
|
||||||
docker exec -it (whatever your container name is) bash
|
docker exec -it (whatever your container name is) bash
|
||||||
``
|
`
|
||||||
|
|
||||||
### Manual Backups
|
### Manual Backups
|
||||||
|
|
||||||
Manual Backups can be performed by entering the container and typing `backup-now`
|
Manual Backups can be performed by entering the container and typing `backup-now`
|
||||||
|
|
||||||
- Recently there was a request to have the container work with Kubernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`.
|
- Recently there was a request to have the container work with Kubernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`.
|
||||||
|
|
||||||
### Restoring Databases
|
### Restoring Databases
|
||||||
|
|
||||||
Entering in the container and executing `restore` will execute a menu based script to restore your backups - MariaDB, Postgres, and Mongo supported.
|
Entering in the container and executing `restore` will execute a menu based script to restore your backups - MariaDB, Postgres, and Mongo supported.
|
||||||
|
|
||||||
You will be presented with a series of menus allowing you to choose:
|
You will be presented with a series of menus allowing you to choose:
|
||||||
- What file to restore
|
|
||||||
- What type of DB Backup
|
|
||||||
- What Host to restore to
|
|
||||||
- What Database Name to restore to
|
|
||||||
- What Database User to use
|
|
||||||
- What Database Password to use
|
|
||||||
- What Database Port to use
|
|
||||||
|
|
||||||
The image will try to do autodetection based on the filename for the type, hostname, and database name.
|
- What file to restore
|
||||||
|
- What type of DB Backup
|
||||||
|
- What Host to restore to
|
||||||
|
- What Database Name to restore to
|
||||||
|
- What Database User to use
|
||||||
|
- What Database Password to use
|
||||||
|
- What Database Port to use
|
||||||
|
|
||||||
|
The image will try to do auto detection based on the filename for the type, hostname, and database name.
|
||||||
The image will also allow you to use environment variables or Docker secrets used to backup the images
|
The image will also allow you to use environment variables or Docker secrets used to backup the images
|
||||||
|
|
||||||
The script can also be executed skipping the interactive mode by using the following syntax/
|
The script can also be executed skipping the interactive mode by using the following syntax/
|
||||||
@@ -282,9 +297,10 @@ If you only enter some of the arguments you will be prompted to fill them in.
|
|||||||
| `SCRIPT_LOCATION_POST` | Location on filesystem inside container to execute bash scripts post backup | `/assets/scripts/post/` |
|
| `SCRIPT_LOCATION_POST` | Location on filesystem inside container to execute bash scripts post backup | `/assets/scripts/post/` |
|
||||||
|
|
||||||
#### Pre Backup
|
#### Pre Backup
|
||||||
|
|
||||||
If you want to execute a custom script before a backup starts, you can drop bash scripts with the extension of `.sh` in the location defined in `SCRIPT_LOCATION_PRE`. See the following example to utilize:
|
If you want to execute a custom script before a backup starts, you can drop bash scripts with the extension of `.sh` in the location defined in `SCRIPT_LOCATION_PRE`. See the following example to utilize:
|
||||||
|
|
||||||
````bash
|
```bash
|
||||||
$ cat pre-script.sh
|
$ cat pre-script.sh
|
||||||
##!/bin/bash
|
##!/bin/bash
|
||||||
|
|
||||||
@@ -296,21 +312,20 @@ $ cat pre-script.sh
|
|||||||
# #### $5=BACKUP FILENAME (Filename)
|
# #### $5=BACKUP FILENAME (Filename)
|
||||||
|
|
||||||
echo "${1} Backup Starting on ${2} for ${3} at ${4}. Filename: ${5}"
|
echo "${1} Backup Starting on ${2} for ${3} at ${4}. Filename: ${5}"
|
||||||
````
|
```
|
||||||
|
|
||||||
## script DB_TYPE DB_HOST DB_NAME STARTEPOCH BACKUP_FILENAME
|
## script DB_TYPE DB_HOST DB_NAME STARTEPOCH BACKUP_FILENAME
|
||||||
${f} "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_time}" "${target}"
|
${f} "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_time}" "${target}"
|
||||||
|
|
||||||
|
|
||||||
Outputs the following on the console:
|
Outputs the following on the console:
|
||||||
|
|
||||||
`mysql Backup Starting on example-db for example at 1647370800. Filename: mysql_example_example-db_202200315-000000.sql.bz2
|
`mysql Backup Starting on example-db for example at 1647370800. Filename: mysql_example_example-db_202200315-000000.sql.bz2
|
||||||
|
|
||||||
|
|
||||||
#### Post backup
|
#### Post backup
|
||||||
|
|
||||||
If you want to execute a custom script at the end of a backup, you can drop bash scripts with the extension of `.sh` in the location defined in `SCRIPT_LOCATION_POST`. Also to support legacy users `/assets/custom-scripts` is also scanned and executed.See the following example to utilize:
|
If you want to execute a custom script at the end of a backup, you can drop bash scripts with the extension of `.sh` in the location defined in `SCRIPT_LOCATION_POST`. Also to support legacy users `/assets/custom-scripts` is also scanned and executed.See the following example to utilize:
|
||||||
|
|
||||||
````bash
|
```bash
|
||||||
$ cat post-script.sh
|
$ cat post-script.sh
|
||||||
##!/bin/bash
|
##!/bin/bash
|
||||||
|
|
||||||
@@ -328,37 +343,43 @@ $ cat post-script.sh
|
|||||||
# #### $11=MOVE_EXIT_CODE
|
# #### $11=MOVE_EXIT_CODE
|
||||||
|
|
||||||
echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a duration of ${7} seconds. Filename: ${8} Size: ${9} bytes MD5: ${10}"
|
echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a duration of ${7} seconds. Filename: ${8} Size: ${9} bytes MD5: ${10}"
|
||||||
````
|
```
|
||||||
|
|
||||||
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
|
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
|
||||||
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_timme}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" "${move_exit_code}
|
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_timme}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" "${move_exit_code}
|
||||||
|
|
||||||
|
|
||||||
Outputs the following on the console:
|
Outputs the following on the console:
|
||||||
|
|
||||||
`0 mysql Backup Completed on example-db for example on 1647370800 ending 1647370920 for a duration of 120 seconds. Filename: mysql_example_example-db_202200315-000000.sql.bz2 Size: 7795 bytes Hash: 952fbaafa30437494fdf3989a662cd40 0`
|
`0 mysql Backup Completed on example-db for example on 1647370800 ending 1647370920 for a duration of 120 seconds. Filename: mysql_example_example-db_202200315-000000.sql.bz2 Size: 7795 bytes Hash: 952fbaafa30437494fdf3989a662cd40 0`
|
||||||
|
|
||||||
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`
|
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`
|
||||||
|
|
||||||
You must make your scripts executible otherwise there is an internal check that will skip trying to run it otherwise.
|
You must make your scripts executable otherwise there is an internal check that will skip trying to run it otherwise.
|
||||||
If for some reason your filesystem or host is not detecting it right, use the environment variable `POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
|
If for some reason your filesystem or host is not detecting it right, use the environment variable `POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
|
||||||
|
|
||||||
## Support
|
## Support
|
||||||
|
|
||||||
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.
|
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.
|
||||||
|
|
||||||
### Usage
|
### Usage
|
||||||
|
|
||||||
- The [Discussions board](../../discussions) is a great place for working with the community on tips and tricks of using this image.
|
- The [Discussions board](../../discussions) is a great place for working with the community on tips and tricks of using this image.
|
||||||
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) for personalized support
|
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) for personalized support
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
|
|
||||||
- Please, submit a [Bug Report](issues/new) if something isn't working as expected. I'll do my best to issue a fix in short order.
|
- Please, submit a [Bug Report](issues/new) if something isn't working as expected. I'll do my best to issue a fix in short order.
|
||||||
|
|
||||||
### Feature Requests
|
### Feature Requests
|
||||||
|
|
||||||
- Feel free to submit a feature request, however there is no guarantee that it will be added, or at what timeline.
|
- Feel free to submit a feature request, however there is no guarantee that it will be added, or at what timeline.
|
||||||
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) regarding development of features.
|
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) regarding development of features.
|
||||||
|
|
||||||
### Updates
|
### Updates
|
||||||
|
|
||||||
- Best effort to track upstream changes, More priority if I am actively using the image in a production environment.
|
- Best effort to track upstream changes, More priority if I am actively using the image in a production environment.
|
||||||
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) for up to date releases.
|
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) for up to date releases.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
MIT. See [LICENSE](LICENSE) for more details.
|
MIT. See [LICENSE](LICENSE) for more details.
|
||||||
|
|||||||
@@ -298,8 +298,12 @@ backup_mysql() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
backup_pgsql() {
|
backup_pgsql() {
|
||||||
export PGPASSWORD=${DB_PASS}
|
export PGPASSWORD=${DB_PASS}
|
||||||
authdb=${DB_USER}
|
if [ -n "${DB_AUTH}" ] ; then
|
||||||
|
authdb=${DB_AUTH}
|
||||||
|
else
|
||||||
|
authdb=${DB_USER}
|
||||||
|
fi
|
||||||
if [ "${DB_NAME,,}" = "all" ] ; then
|
if [ "${DB_NAME,,}" = "all" ] ; then
|
||||||
print_debug "Preparing to back up all databases"
|
print_debug "Preparing to back up all databases"
|
||||||
db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
|
db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
|
||||||
@@ -341,7 +345,7 @@ backup_pgsql() {
|
|||||||
pre_dbbackup all
|
pre_dbbackup all
|
||||||
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
||||||
tmp_db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
|
tmp_db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
|
||||||
for r_db_name in $(echo $db_names | xargs); do
|
for r_db_name in $(echo $db_names | xargs); do
|
||||||
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
|
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
|
||||||
done
|
done
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|||||||
Reference in New Issue
Block a user