mirror of
https://github.com/tiredofit/docker-db-backup.git
synced 2025-12-21 21:33:28 +01:00
Compare commits
33 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d48a15d37f | ||
|
|
6b7b16c42b | ||
|
|
b64bd3168d | ||
|
|
35c806c369 | ||
|
|
8fb2972b32 | ||
|
|
82eac9ad2e | ||
|
|
6bad6d8d65 | ||
|
|
a922922374 | ||
|
|
edeadade4d | ||
|
|
31b256b02d | ||
|
|
d5cacdb32a | ||
|
|
238b4d852c | ||
|
|
8d6e72eead | ||
|
|
a9037f97ac | ||
|
|
ebcd4fcde4 | ||
|
|
adf52c1160 | ||
|
|
1eee4a49d7 | ||
|
|
e3faab5c36 | ||
|
|
768d5e60fe | ||
|
|
e3e0d7ed67 | ||
|
|
db808d25c7 | ||
|
|
cb5b49b90b | ||
|
|
48a1ff8bbe | ||
|
|
8b1308ffd1 | ||
|
|
3ab3f67be9 | ||
|
|
cd1899d849 | ||
|
|
663667dbff | ||
|
|
36506091be | ||
|
|
bf646381cb | ||
|
|
fb3b65b33a | ||
|
|
6d1ef87042 | ||
|
|
c985cc8a4f | ||
|
|
2265a6acf5 |
5
.github/workflows/main.yml
vendored
5
.github/workflows/main.yml
vendored
@@ -8,8 +8,7 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
|
||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||
uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
|
||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
|
||||
secrets: inherit
|
||||
|
||||
5
.github/workflows/manual.yml
vendored
5
.github/workflows/manual.yml
vendored
@@ -9,8 +9,7 @@ on:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main
|
||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main
|
||||
uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
|
||||
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main
|
||||
secrets: inherit
|
||||
|
||||
725
CHANGELOG.md
725
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
35
Dockerfile
35
Dockerfile
@@ -1,5 +1,5 @@
|
||||
ARG DISTRO=alpine
|
||||
ARG DISTRO_VARIANT=3.18
|
||||
ARG DISTRO_VARIANT=edge
|
||||
|
||||
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
|
||||
LABEL maintainer="Dave Conroy (github.com/tiredofit)"
|
||||
@@ -7,7 +7,9 @@ LABEL maintainer="Dave Conroy (github.com/tiredofit)"
|
||||
### Set Environment Variables
|
||||
ENV INFLUX_VERSION=1.8.0 \
|
||||
INFLUX2_VERSION=2.4.0 \
|
||||
MSSQL_VERSION=18.0.1.1-1 \
|
||||
MSODBC_VERSION=18.3.2.1-1 \
|
||||
MSSQL_VERSION=18.3.1.1-1 \
|
||||
AWS_CLI_VERSION=1.31.5 \
|
||||
CONTAINER_ENABLE_MESSAGING=FALSE \
|
||||
CONTAINER_ENABLE_MONITORING=TRUE \
|
||||
CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \
|
||||
@@ -22,6 +24,7 @@ RUN source /assets/functions/00-container && \
|
||||
package install .db-backup-build-deps \
|
||||
build-base \
|
||||
bzip2-dev \
|
||||
cargo \
|
||||
git \
|
||||
go \
|
||||
libarchive-dev \
|
||||
@@ -33,18 +36,27 @@ RUN source /assets/functions/00-container && \
|
||||
&& \
|
||||
\
|
||||
package install .db-backup-run-deps \
|
||||
aws-cli \
|
||||
bzip2 \
|
||||
groff \
|
||||
libarchive \
|
||||
mariadb-client \
|
||||
mariadb-connector-c \
|
||||
mongodb-tools \
|
||||
openssl \
|
||||
pigz \
|
||||
postgresql15 \
|
||||
postgresql15-client \
|
||||
postgresql16 \
|
||||
postgresql16-client \
|
||||
pv \
|
||||
py3-botocore \
|
||||
py3-colorama \
|
||||
py3-cryptography \
|
||||
py3-docutils \
|
||||
py3-jmespath \
|
||||
py3-rsa \
|
||||
py3-setuptools \
|
||||
py3-s3transfer \
|
||||
py3-yaml \
|
||||
python3 \
|
||||
redis \
|
||||
sqlite \
|
||||
xz \
|
||||
@@ -52,15 +64,16 @@ RUN source /assets/functions/00-container && \
|
||||
zstd \
|
||||
&& \
|
||||
\
|
||||
apkArch="$(apk --print-arch)"; \
|
||||
apkArch="$(uname -m)"; \
|
||||
case "$apkArch" in \
|
||||
x86_64) mssql=true ; influx2=true ; influx_arch=amd64; ;; \
|
||||
aarch64 ) influx2=true ; influx_arch=arm64 ;; \
|
||||
*) sleep 0.1 ;; \
|
||||
x86_64) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \
|
||||
arm64 ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=arm64 ;; \
|
||||
*) sleep 0.1 ;; \
|
||||
esac; \
|
||||
\
|
||||
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/msodbcsql18_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/mssql-tools18_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSSQL_VERSION}_amd64.apk mssql-tools18_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \
|
||||
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; ls -l ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \
|
||||
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
|
||||
pip3 install --break-system-packages awscli==${AWS_CLI_VERSION} && \
|
||||
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX_VERSION}" && \
|
||||
go build -o /usr/sbin/influxd ./cmd/influxd && \
|
||||
strip /usr/sbin/influxd && \
|
||||
@@ -80,7 +93,7 @@ RUN source /assets/functions/00-container && \
|
||||
make && \
|
||||
make install && \
|
||||
\
|
||||
pip3 install blobxfer && \
|
||||
pip3 install --break-system-packages blobxfer && \
|
||||
\
|
||||
package remove .db-backup-build-deps && \
|
||||
package cleanup && \
|
||||
|
||||
233
README.md
233
README.md
@@ -7,26 +7,27 @@
|
||||
[](https://github.com/sponsors/tiredofit)
|
||||
[](https://www.paypal.me/tiredofit)
|
||||
|
||||
* * *
|
||||
---
|
||||
|
||||
## About
|
||||
|
||||
This will build a container for backing up multiple types of DB Servers
|
||||
|
||||
Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
|
||||
Currently backs up CouchDB, InfluxDB, MySQL, Microsoft SQL, MongoDB, Postgres, Redis servers.
|
||||
|
||||
* dump to local filesystem or backup to S3 Compatible services, and Azure.
|
||||
* select database user and password
|
||||
* backup all databases, single, or multiple databases
|
||||
* backup all to seperate files or one singular file
|
||||
* choose to have an MD5 or SHA1 sum after backup for verification
|
||||
* delete old backups after specific amount of time
|
||||
* choose compression type (none, gz, bz, xz, zstd)
|
||||
* connect to any container running on the same system
|
||||
* Script to perform restores
|
||||
* Zabbix Monitoring capabilities
|
||||
* select how often to run a dump
|
||||
* select when to start the first dump, whether time of day or relative to container start time
|
||||
* Execute script after backup for monitoring/alerting purposes
|
||||
- dump to local filesystem or backup to S3 Compatible services, and Azure.
|
||||
- select database user and password
|
||||
- backup all databases, single, or multiple databases
|
||||
- backup all to separate files or one singular file
|
||||
- choose to have an MD5 or SHA1 sum after backup for verification
|
||||
- delete old backups after specific amount of time
|
||||
- choose compression type (none, gz, bz, xz, zstd)
|
||||
- connect to any container running on the same system
|
||||
- Script to perform restores
|
||||
- Zabbix Monitoring capabilities
|
||||
- select how often to run a dump
|
||||
- select when to start the first dump, whether time of day or relative to container start time
|
||||
- Execute script after backup for monitoring/alerting purposes
|
||||
|
||||
## Maintainer
|
||||
|
||||
@@ -34,57 +35,61 @@ Currently backs up CouchDB, InfluxDB, MySQL, MongoDB, Postgres, Redis servers.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [About](#about)
|
||||
- [Maintainer](#maintainer)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Prerequisites and Assumptions](#prerequisites-and-assumptions)
|
||||
- [Installation](#installation)
|
||||
- [Build from Source](#build-from-source)
|
||||
- [Prebuilt Images](#prebuilt-images)
|
||||
- [Multi Architecture](#multi-architecture)
|
||||
- [Configuration](#configuration)
|
||||
- [Quick Start](#quick-start)
|
||||
- [Persistent Storage](#persistent-storage)
|
||||
- [Environment Variables](#environment-variables)
|
||||
- [Base Images used](#base-images-used)
|
||||
- [Container Options](#container-options)
|
||||
- [Database Specific Options](#database-specific-options)
|
||||
- [For Influx DB2:](#for-influx-db2)
|
||||
- [Scheduling Options](#scheduling-options)
|
||||
- [Backup Options](#backup-options)
|
||||
- [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services)
|
||||
- [Upload to a Azure storage account by `blobxfer`](#upload-to-a-azure-storage-account-by-blobxfer)
|
||||
- [Maintenance](#maintenance)
|
||||
- [Shell Access](#shell-access)
|
||||
- [Manual Backups](#manual-backups)
|
||||
- [Restoring Databases](#restoring-databases)
|
||||
- [Custom Scripts](#custom-scripts)
|
||||
- [Path Options](#path-options)
|
||||
- [Pre Backup](#pre-backup)
|
||||
- [Post backup](#post-backup)
|
||||
- [Support](#support)
|
||||
- [Usage](#usage)
|
||||
- [Bugfixes](#bugfixes)
|
||||
- [Feature Requests](#feature-requests)
|
||||
- [Updates](#updates)
|
||||
- [License](#license)
|
||||
- [github.com/tiredofit/docker-db-backup](#githubcomtiredofitdocker-db-backup)
|
||||
- [About](#about)
|
||||
- [Maintainer](#maintainer)
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Prerequisites and Assumptions](#prerequisites-and-assumptions)
|
||||
- [Installation](#installation)
|
||||
- [Build from Source](#build-from-source)
|
||||
- [Prebuilt Images](#prebuilt-images)
|
||||
- [Multi Architecture](#multi-architecture)
|
||||
- [Configuration](#configuration)
|
||||
- [Quick Start](#quick-start)
|
||||
- [Persistent Storage](#persistent-storage)
|
||||
- [Environment Variables](#environment-variables)
|
||||
- [Base Images used](#base-images-used)
|
||||
- [Container Options](#container-options)
|
||||
- [Database Specific Options](#database-specific-options)
|
||||
- [For Influx DB2](#for-influx-db2)
|
||||
- [Scheduling Options](#scheduling-options)
|
||||
- [Backup Options](#backup-options)
|
||||
- [Backing Up to S3 Compatible Services](#backing-up-to-s3-compatible-services)
|
||||
- [Upload to a Azure storage account by `blobxfer`](#upload-to-a-azure-storage-account-by-blobxfer)
|
||||
- [Maintenance](#maintenance)
|
||||
- [Shell Access](#shell-access)
|
||||
- [Manual Backups](#manual-backups)
|
||||
- [Restoring Databases](#restoring-databases)
|
||||
- [Custom Scripts](#custom-scripts)
|
||||
- [Path Options](#path-options)
|
||||
- [Pre Backup](#pre-backup)
|
||||
- [Post backup](#post-backup)
|
||||
- [Support](#support)
|
||||
- [Usage](#usage)
|
||||
- [Bugfixes](#bugfixes)
|
||||
- [Feature Requests](#feature-requests)
|
||||
- [Updates](#updates)
|
||||
- [License](#license)
|
||||
|
||||
> **NOTE**: If you are using this with a docker-compose file along with a seperate SQL container, take care not to set the variables to backup immediately, more so have it delay execution for a minute, otherwise you will get a failed first backup.
|
||||
|
||||
## Prerequisites and Assumptions
|
||||
* You must have a working connection to one of the supported DB Servers and appropriate credentials
|
||||
|
||||
- You must have a working connection to one of the supported DB Servers and appropriate credentials
|
||||
|
||||
## Installation
|
||||
|
||||
### Build from Source
|
||||
|
||||
Clone this repository and build the image with `docker build <arguments> (imagename) .`
|
||||
|
||||
### Prebuilt Images
|
||||
|
||||
Builds of the image are available on [Docker Hub](https://hub.docker.com/r/tiredofit/db-backup)
|
||||
|
||||
Builds of the image are also available on the [Github Container Registry](https://github.com/tiredofit/docker-db-backup/pkgs/container/docker-db-backup)
|
||||
|
||||
```
|
||||
```bash
|
||||
docker pull ghcr.io/tiredofit/docker-db-backup:(imagetag)
|
||||
```
|
||||
|
||||
@@ -95,20 +100,22 @@ The following image tags are available along with their tagged release based on
|
||||
| latest | `:latest` |
|
||||
|
||||
```bash
|
||||
docker pull docker.io/tiredofdit/db-backup:(imagetag)
|
||||
docker pull docker.io/tiredofit/db-backup:(imagetag)
|
||||
```
|
||||
|
||||
#### Multi Architecture
|
||||
Images are built primarily for `amd64` architecture, and may also include builds for `arm/v7`, `arm64` and others. These variants are all unsupported. Consider [sponsoring](https://github.com/sponsors/tiredofit) my work so that I can work with various hardware. To see if this image supports multiple architecures, type `docker manifest (image):(tag)`
|
||||
|
||||
Images are built primarily for `amd64` architecture, and may also include builds for `arm/v7`, `arm64` and others. These variants are all unsupported. Consider [sponsoring](https://github.com/sponsors/tiredofit) my work so that I can work with various hardware. To see if this image supports multiple architectures, type `docker manifest (image):(tag)`
|
||||
|
||||
## Configuration
|
||||
|
||||
### Quick Start
|
||||
|
||||
* The quickest way to get started is using [docker-compose](https://docs.docker.com/compose/). See the examples folder for a working [docker-compose.yml](examples/docker-compose.yml) that can be modified for development or production use.
|
||||
- The quickest way to get started is using [docker-compose](https://docs.docker.com/compose/). See the examples folder for a working [docker-compose.yml](examples/docker-compose.yml) that can be modified for development or production use.
|
||||
|
||||
* Set various [environment variables](#environment-variables) to understand the capabilities of this image.
|
||||
* Map [persistent storage](#data-volumes) for access to configuration and data files for backup.
|
||||
* Make [networking ports](#networking) available for public access if necessary
|
||||
- Set various [environment variables](#environment-variables) to understand the capabilities of this image.
|
||||
- Map [persistent storage](#data-volumes) for access to configuration and data files for backup.
|
||||
- Make [networking ports](#networking) available for public access if necessary
|
||||
|
||||
### Persistent Storage
|
||||
|
||||
@@ -116,14 +123,14 @@ The following directories are used for configuration and can be mapped for persi
|
||||
| Directory | Description |
|
||||
| ---------------------- | ----------------------------------------------------------------------------------- |
|
||||
| `/backup` | Backups |
|
||||
| `/assets/scripts/pre` | *Optional* Put custom scripts in this directory to execute before backup operations |
|
||||
| `/assets/scripts/post` | *Optional* Put custom scripts in this directory to execute after backup operations |
|
||||
| `/assets/scripts/pre` | _Optional_ Put custom scripts in this directory to execute before backup operations |
|
||||
| `/assets/scripts/post` | _Optional_ Put custom scripts in this directory to execute after backup operations |
|
||||
|
||||
### Environment Variables
|
||||
|
||||
#### Base Images used
|
||||
|
||||
This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handlded via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`.
|
||||
This image relies on an [Alpine Linux](https://hub.docker.com/r/tiredofit/alpine) base image that relies on an [init system](https://github.com/just-containers/s6-overlay) for added capabilities. Outgoing SMTP capabilities are handled via `msmtp`. Individual container performance monitoring is performed by [zabbix-agent](https://zabbix.org). Additional tools include: `bash`,`curl`,`less`,`logrotate`, `nano`.
|
||||
|
||||
Be sure to view the following repositories to understand all the customizable options:
|
||||
|
||||
@@ -146,48 +153,54 @@ Be sure to view the following repositories to understand all the customizable op
|
||||
| `SPLIT_DB` | For each backup, create a new archive. `TRUE` or `FALSE` (MySQL and Postgresql Only) | `TRUE` |
|
||||
|
||||
### Database Specific Options
|
||||
| Parameter | Description | Default | `_FILE` |
|
||||
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- | ------- |
|
||||
| `DB_AUTH` | (Mongo Only - Optional) Authentication Database | | |
|
||||
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `pgsql` `mongo` `redis` `sqlite3` | | |
|
||||
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | x |
|
||||
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by seperating with commas eg `db1,db2` | | x |
|
||||
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases seperated via commas from being backed up | | x |
|
||||
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | x |
|
||||
| `DB_PASS` | (optional if DB doesn't require it) password for the database | | x |
|
||||
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | x |
|
||||
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | | |
|
||||
| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | x |
|
||||
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can overrde them by making your own entries | | |
|
||||
|
||||
#### For Influx DB2:
|
||||
| Parameter | Description | Default | `_FILE` |
|
||||
| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | ------- |
|
||||
| `DB_AUTH` | (Mongo/PGSQL Only - Optional) Authentication Database | | |
|
||||
| `DB_TYPE` | Type of DB Server to backup `couch` `influx` `mysql` `mssql` `pgsql` `mongo` `redis` `sqlite3` | | |
|
||||
| `DB_HOST` | Server Hostname e.g. `mariadb`. For `sqlite3`, full path to DB file e.g. `/backup/db.sqlite3` | | x |
|
||||
| `DB_NAME` | Schema Name e.g. `database` or `ALL` to backup all databases the user has access to. Backup multiple by separating with commas eg `db1,db2` | | x |
|
||||
| `DB_NAME_EXCLUDE` | If using `ALL` - use this as to exclude databases separated via commas from being backed up | | x |
|
||||
| `DB_USER` | username for the database(s) - Can use `root` for MySQL | | x |
|
||||
| `DB_PASS` | (optional if DB doesn't require it) password for the database | | x |
|
||||
| `DB_PORT` | (optional) Set port to connect to DB_HOST. Defaults are provided | varies | x |
|
||||
| `INFLUX_VERSION` | What Version of Influx are you backing up from `1`.x or `2` series - AMD64 and ARM64 only for `2` | | |
|
||||
| `MONGO_CUSTOM_URI` | If you wish to override the MongoDB Connection string enter it here e.g. `mongodb+srv://username:password@cluster.id.mongodb.net` | | x |
|
||||
| | This environment variable will be parsed and populate the `DB_NAME` and `DB_HOST` variables to properly build your backup filenames. You can override them by making your own entries | | |
|
||||
|
||||
#### For Influx DB2
|
||||
|
||||
Your Organization will be mapped to `DB_USER` and your root token will need to be mapped to `DB_PASS`. You may use `DB_NAME=ALL` to backup the entire set of databases. For `DB_HOST` use syntax of `http(s)://db-name`
|
||||
|
||||
### Scheduling Options
|
||||
| Parameter | Description | Default |
|
||||
| ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- |
|
||||
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
|
||||
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
|
||||
| | Absolute HHMM, e.g. `2330` or `0415` | |
|
||||
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
|
||||
| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `${DB_DUMP_TARGET}/archive/` |
|
||||
| `DB_DUMP_TARGET_ARCHIVE` | Optional Directory where the database dumps archives are kept. | |
|
||||
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump freqency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
|
||||
| `DB_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from `DB_DUMP_TARGET` to `DB_DUMP_TARGET_ARCHIVE` - which is useful when pairing against an external backup system. | |
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- |
|
||||
| `DB_DUMP_FREQ` | How often to do a dump, in minutes after the first backup. Defaults to 1440 minutes, or once per day. | `1440` |
|
||||
| `DB_DUMP_BEGIN` | What time to do the first dump. Defaults to immediate. Must be in one of two formats | |
|
||||
| | Absolute HHMM, e.g. `2330` or `0415` | |
|
||||
| | Relative +MM, i.e. how many minutes after starting the container, e.g. `+0` (immediate), `+10` (in 10 minutes), or `+90` in an hour and a half | |
|
||||
| `DB_DUMP_TARGET` | Directory where the database dumps are kept. | `${DB_DUMP_TARGET}/archive/` |
|
||||
| `DB_DUMP_TARGET_ARCHIVE` | Optional Directory where the database dumps archives are kept. | |
|
||||
| `DB_CLEANUP_TIME` | Value in minutes to delete old backups (only fired when dump frequency fires). 1440 would delete anything above 1 day old. You don't need to set this variable if you want to hold onto everything. | `FALSE` |
|
||||
| `DB_ARCHIVE_TIME` | Value in minutes to move all files files older than (x) from `DB_DUMP_TARGET` to `DB_DUMP_TARGET_ARCHIVE` - which is useful when pairing against an external backup system. | |
|
||||
|
||||
- You may need to wrap your `DB_DUMP_BEGIN` value in quotes for it to properly parse. There have been reports of backups that start with a `0` get converted into a different format which will not allow the timer to start at the correct time.
|
||||
|
||||
### Backup Options
|
||||
|
||||
| Parameter | Description | Default | `_FILE` |
|
||||
| ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- | ------- |
|
||||
| `COMPRESSION` | Use either Gzip `GZ`, Bzip2 `BZ`, XZip `XZ`, ZSTD `ZSTD` or none `NONE` | `ZSTD` | |
|
||||
| `COMPRESSION_LEVEL` | Numberical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | |
|
||||
| `COMPRESSION_LEVEL` | Numerical value of what level of compression to use, most allow `1` to `9` except for `ZSTD` which allows for `1` to `19` - | `3` | |
|
||||
| `ENABLE_PARALLEL_COMPRESSION` | Use multiple cores when compressing backups `TRUE` or `FALSE` | `TRUE` | |
|
||||
| `PARALLEL_COMPRESSION_THREADS` | Maximum amount of threads to use when compressing - Integer value e.g. `8` | `autodetected` | |
|
||||
| `GZ_RSYNCABLE` | Use `--rsyncable` (gzip only) for faster rsync transfers and incremental backup deduplication. e.g. `TRUE` | `FALSE` | |
|
||||
| `ENABLE_CHECKSUM` | Generate either a MD5 or SHA1 in Directory, `TRUE` or `FALSE` | `TRUE` | |
|
||||
| `CHECKSUM` | Either `MD5` or `SHA1` | `MD5` | |
|
||||
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup command, add them here e.g. `--extra-command` | | |
|
||||
| `EXTRA_OPTS` | If you need to pass extra arguments to the backup and database enumeration command, add them here e.g. `--extra-command` | | |
|
||||
| `EXTRA_DUMP_OPTS` | If you need to pass extra arguments to the backup command only, add them here e.g. `--extra-command` | | |
|
||||
| `EXTRA_ENUMERATION_OPTS` | If you need to pass extra arguments to the database enumeration command only, add them here e.g. `--extra-command` | | |
|
||||
| `MYSQL_MAX_ALLOWED_PACKET` | Max allowed packet if backing up MySQL / MariaDB | `512M` | |
|
||||
| `MYSQL_SINGLE_TRANSACTION` | Backup in a single transaction with MySQL / MariaDB | `TRUE` | |
|
||||
| `MYSQL_STORED_PROCEDURES` | Backup stored procedures with MySQL / MariaDB | `TRUE` | |
|
||||
@@ -222,7 +235,7 @@ If `BACKUP_LOCATION` = `S3` then the following options are used.
|
||||
|
||||
#### Upload to a Azure storage account by `blobxfer`
|
||||
|
||||
Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure fileshare storage.
|
||||
Support to upload backup files with [blobxfer](https://github.com/Azure/blobxfer) to the Azure file share storage.
|
||||
|
||||
If `BACKUP_LOCATION` = `blobxfer` then the following options are used.
|
||||
|
||||
@@ -241,27 +254,31 @@ If `BACKUP_LOCATION` = `blobxfer` then the following options are used.
|
||||
|
||||
For debugging and maintenance purposes you may want access the containers shell.
|
||||
|
||||
``bash
|
||||
`bash
|
||||
docker exec -it (whatever your container name is) bash
|
||||
``
|
||||
`
|
||||
|
||||
### Manual Backups
|
||||
|
||||
Manual Backups can be performed by entering the container and typing `backup-now`
|
||||
|
||||
- Recently there was a request to have the container work with Kubernetes cron scheduling. This can theoretically be accomplished by setting the container `MODE=MANUAL` and then setting `MANUAL_RUN_FOREVER=FALSE` - You would also want to disable a few features from the upstream base images specifically `CONTAINER_ENABLE_SCHEDULING` and `CONTAINER_ENABLE_MONITORING`. This should allow the container to start, execute a backup by executing and then exit cleanly. An alternative way to running the script is to execute `/etc/services.available/10-db-backup/run`.
|
||||
|
||||
### Restoring Databases
|
||||
|
||||
Entering in the container and executing `restore` will execute a menu based script to restore your backups - MariaDB, Postgres, and Mongo supported.
|
||||
|
||||
You will be presented with a series of menus allowing you to choose:
|
||||
- What file to restore
|
||||
- What type of DB Backup
|
||||
- What Host to restore to
|
||||
- What Database Name to restore to
|
||||
- What Database User to use
|
||||
- What Database Password to use
|
||||
- What Database Port to use
|
||||
|
||||
The image will try to do autodetection based on the filename for the type, hostname, and database name.
|
||||
- What file to restore
|
||||
- What type of DB Backup
|
||||
- What Host to restore to
|
||||
- What Database Name to restore to
|
||||
- What Database User to use
|
||||
- What Database Password to use
|
||||
- What Database Port to use
|
||||
|
||||
The image will try to do auto detection based on the filename for the type, hostname, and database name.
|
||||
The image will also allow you to use environment variables or Docker secrets used to backup the images
|
||||
|
||||
The script can also be executed skipping the interactive mode by using the following syntax/
|
||||
@@ -280,9 +297,10 @@ If you only enter some of the arguments you will be prompted to fill them in.
|
||||
| `SCRIPT_LOCATION_POST` | Location on filesystem inside container to execute bash scripts post backup | `/assets/scripts/post/` |
|
||||
|
||||
#### Pre Backup
|
||||
|
||||
If you want to execute a custom script before a backup starts, you can drop bash scripts with the extension of `.sh` in the location defined in `SCRIPT_LOCATION_PRE`. See the following example to utilize:
|
||||
|
||||
````bash
|
||||
```bash
|
||||
$ cat pre-script.sh
|
||||
##!/bin/bash
|
||||
|
||||
@@ -294,21 +312,20 @@ $ cat pre-script.sh
|
||||
# #### $5=BACKUP FILENAME (Filename)
|
||||
|
||||
echo "${1} Backup Starting on ${2} for ${3} at ${4}. Filename: ${5}"
|
||||
````
|
||||
```
|
||||
|
||||
## script DB_TYPE DB_HOST DB_NAME STARTEPOCH BACKUP_FILENAME
|
||||
${f} "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_time}" "${target}"
|
||||
|
||||
|
||||
Outputs the following on the console:
|
||||
|
||||
`mysql Backup Starting on example-db for example at 1647370800. Filename: mysql_example_example-db_202200315-000000.sql.bz2
|
||||
|
||||
|
||||
#### Post backup
|
||||
|
||||
If you want to execute a custom script at the end of a backup, you can drop bash scripts with the extension of `.sh` in the location defined in `SCRIPT_LOCATION_POST`. Also to support legacy users `/assets/custom-scripts` is also scanned and executed.See the following example to utilize:
|
||||
|
||||
````bash
|
||||
```bash
|
||||
$ cat post-script.sh
|
||||
##!/bin/bash
|
||||
|
||||
@@ -326,37 +343,43 @@ $ cat post-script.sh
|
||||
# #### $11=MOVE_EXIT_CODE
|
||||
|
||||
echo "${1} ${2} Backup Completed on ${3} for ${4} on ${5} ending ${6} for a duration of ${7} seconds. Filename: ${8} Size: ${9} bytes MD5: ${10}"
|
||||
````
|
||||
```
|
||||
|
||||
## script EXIT_CODE DB_TYPE DB_HOST DB_NAME STARTEPOCH FINISHEPOCH DURATIONEPOCH BACKUP_FILENAME FILESIZE CHECKSUMVALUE
|
||||
${f} "${exit_code}" "${dbtype}" "${dbhost}" "${dbname}" "${backup_start_timme}" "${backup_finish_time}" "${backup_total_time}" "${target}" "${FILESIZE}" "${checksum_value}" "${move_exit_code}
|
||||
|
||||
|
||||
Outputs the following on the console:
|
||||
|
||||
`0 mysql Backup Completed on example-db for example on 1647370800 ending 1647370920 for a duration of 120 seconds. Filename: mysql_example_example-db_202200315-000000.sql.bz2 Size: 7795 bytes Hash: 952fbaafa30437494fdf3989a662cd40 0`
|
||||
|
||||
If you wish to change the size value from bytes to megabytes set environment variable `SIZE_VALUE=megabytes`
|
||||
|
||||
You must make your scripts executible otherwise there is an internal check that will skip trying to run it otherwise.
|
||||
You must make your scripts executable otherwise there is an internal check that will skip trying to run it otherwise.
|
||||
If for some reason your filesystem or host is not detecting it right, use the environment variable `POST_SCRIPT_SKIP_X_VERIFY=TRUE` to bypass.
|
||||
|
||||
## Support
|
||||
|
||||
These images were built to serve a specific need in a production environment and gradually have had more functionality added based on requests from the community.
|
||||
|
||||
### Usage
|
||||
|
||||
- The [Discussions board](../../discussions) is a great place for working with the community on tips and tricks of using this image.
|
||||
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) for personalized support
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Please, submit a [Bug Report](issues/new) if something isn't working as expected. I'll do my best to issue a fix in short order.
|
||||
|
||||
### Feature Requests
|
||||
|
||||
- Feel free to submit a feature request, however there is no guarantee that it will be added, or at what timeline.
|
||||
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) regarding development of features.
|
||||
|
||||
### Updates
|
||||
|
||||
- Best effort to track upstream changes, More priority if I am actively using the image in a production environment.
|
||||
- Consider [sponsoring me](https://github.com/sponsors/tiredofit) for up to date releases.
|
||||
|
||||
## License
|
||||
|
||||
MIT. See [LICENSE](LICENSE) for more details.
|
||||
|
||||
@@ -159,7 +159,7 @@ backup_influx() {
|
||||
compression
|
||||
pre_dbbackup $db
|
||||
print_notice "Dumping Influx database: '${db}'"
|
||||
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
|
||||
influxd backup ${influx_compression} ${bucket} -portable -host ${DB_HOST}:${DB_PORT} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} "${TEMP_LOCATION}"/"${target_dir}"
|
||||
exit_code=$?
|
||||
check_exit_code backup $target_dir
|
||||
print_notice "Creating archive file of '${target_dir}' with tar ${compression_string}"
|
||||
@@ -181,7 +181,7 @@ backup_influx() {
|
||||
compression
|
||||
pre_dbbackup $db
|
||||
print_notice "Dumping Influx2 database: '${db}'"
|
||||
influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
|
||||
influx backup --org ${DB_USER} ${bucket} --host ${DB_HOST}:${DB_PORT} --token ${DB_PASS} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} --compression none "${TEMP_LOCATION}"/"${target_dir}"
|
||||
exit_code=$?
|
||||
check_exit_code backup $target_dir
|
||||
create_archive
|
||||
@@ -208,9 +208,9 @@ backup_mongo() {
|
||||
compression_string="and compressing with gzip"
|
||||
fi
|
||||
if [ -n "${MONGO_CUSTOM_URI}" ] ; then
|
||||
mongo_backup_parameter="--uri=${MONGO_CUSTOM_URI} ${EXTRA_OPTS}"
|
||||
mongo_backup_parameter="--uri=${MONGO_CUSTOM_URI} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS}"
|
||||
else
|
||||
mongo_backup_parameter="--host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS}"
|
||||
mongo_backup_parameter="--host ${DB_HOST} --port ${DB_PORT} ${MONGO_USER_STR}${MONGO_PASS_STR}${MONGO_AUTH_STR}${MONGO_DB_STR} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS}"
|
||||
fi
|
||||
pre_dbbackup "${DB_NAME}"
|
||||
print_notice "Dumping MongoDB database: '${DB_NAME}' ${compression_string}"
|
||||
@@ -249,7 +249,7 @@ backup_mysql() {
|
||||
|
||||
if [ "${DB_NAME,,}" = "all" ] ; then
|
||||
print_debug "Preparing to back up everything except for information_schema and _* prefixes"
|
||||
db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER ${mysql_tls_args} ${EXTRA_OPTS} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
|
||||
db_names=$(mysql -h ${DB_HOST} -P $DB_PORT -u$DB_USER ${mysql_tls_args} ${EXTRA_OPTS} ${EXTRA_ENUMERATION_OPTS} --batch -e "SHOW DATABASES;" | grep -v Database | grep -v schema )
|
||||
if [ -n "${DB_NAME_EXCLUDE}" ] ; then
|
||||
db_names_exclusions=$(echo "${DB_NAME_EXCLUDE}" | tr ',' '\n')
|
||||
for db_exclude in ${db_names_exclusions} ; do
|
||||
@@ -271,7 +271,7 @@ backup_mysql() {
|
||||
compression
|
||||
pre_dbbackup $db
|
||||
print_notice "Dumping MySQL/MariaDB database: '${db}' ${compression_string}"
|
||||
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} --databases $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} $db | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
exit_code=$?
|
||||
check_exit_code backup $target
|
||||
generate_checksum
|
||||
@@ -287,7 +287,7 @@ backup_mysql() {
|
||||
compression
|
||||
pre_dbbackup all
|
||||
print_notice "Dumping all MySQL / MariaDB databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
||||
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
mysqldump --max-allowed-packet=${MYSQL_MAX_ALLOWED_PACKET} -h ${DB_HOST} -P ${DB_PORT} -u${DB_USER} ${single_transaction} ${stored_procedures} ${mysql_tls_args} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} --databases $(echo ${db_names} | xargs) | $compress_cmd > "${TEMP_LOCATION}"/"${target}"
|
||||
exit_code=$?
|
||||
check_exit_code backup $target
|
||||
generate_checksum
|
||||
@@ -298,8 +298,12 @@ backup_mysql() {
|
||||
}
|
||||
|
||||
backup_pgsql() {
|
||||
export PGPASSWORD=${DB_PASS}
|
||||
authdb=${DB_USER}
|
||||
export PGPASSWORD=${DB_PASS}
|
||||
if [ -n "${DB_AUTH}" ] ; then
|
||||
authdb=${DB_AUTH}
|
||||
else
|
||||
authdb=${DB_USER}
|
||||
fi
|
||||
if [ "${DB_NAME,,}" = "all" ] ; then
|
||||
print_debug "Preparing to back up all databases"
|
||||
db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
|
||||
@@ -324,7 +328,7 @@ backup_pgsql() {
|
||||
compression
|
||||
pre_dbbackup $db
|
||||
print_notice "Dumping PostgresSQL database: '${db}' ${compression_string}"
|
||||
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
pg_dump -h ${DB_HOST} -p ${DB_PORT} -U ${DB_USER} $db ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
exit_code=$?
|
||||
check_exit_code backup $target
|
||||
generate_checksum
|
||||
@@ -341,14 +345,14 @@ backup_pgsql() {
|
||||
pre_dbbackup all
|
||||
print_notice "Dumping all PostgreSQL databases: '$(echo ${db_names} | xargs | tr ' ' ',')' ${compression_string}"
|
||||
tmp_db_names=$(psql -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} -d ${authdb} -c 'COPY (SELECT datname FROM pg_database WHERE datistemplate = false) TO STDOUT;' )
|
||||
for r_db_name in $(echo $db_names | xargs); do
|
||||
for r_db_name in $(echo $db_names | xargs); do
|
||||
tmp_db_names=$(echo "$tmp_db_names" | xargs | sed "s|${r_db_name}||g" )
|
||||
done
|
||||
sleep 5
|
||||
for x_db_name in ${tmp_db_names} ; do
|
||||
pgexclude_arg=$(echo ${pgexclude_arg} --exclude-database=${x_db_name})
|
||||
done
|
||||
pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
pg_dumpall -h ${DB_HOST} -U ${DB_USER} -p ${DB_PORT} ${pgexclude_arg} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS} | $compress_cmd > ${TEMP_LOCATION}/${target}
|
||||
exit_code=$?
|
||||
check_exit_code backup $target
|
||||
generate_checksum
|
||||
@@ -363,7 +367,7 @@ backup_redis() {
|
||||
print_notice "Dumping Redis - Flushing Redis Cache First"
|
||||
target=redis_all_${DB_HOST,,}_${now}.rdb
|
||||
ltarget=redis_${DB_HOST,,}
|
||||
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
|
||||
echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS} ${EXTRA_DUMP_OPTS}
|
||||
sleep 10
|
||||
try=5
|
||||
while [ $try -gt 0 ] ; do
|
||||
@@ -707,7 +711,7 @@ move_dbbackup() {
|
||||
"file" | "filesystem" )
|
||||
print_debug "Moving backup to filesystem"
|
||||
mkdir -p "${DB_DUMP_TARGET}"
|
||||
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
|
||||
if var_true "${ENABLE_CHECKSUM}" ; then mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/ ; fi
|
||||
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
|
||||
move_exit_code=$?
|
||||
if var_true "${CREATE_LATEST_SYMLINK}" ; then
|
||||
@@ -744,20 +748,21 @@ move_dbbackup() {
|
||||
silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
|
||||
fi
|
||||
|
||||
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
|
||||
if var_true "${ENABLE_CHECKSUM}" ; then rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"; fi
|
||||
rm -rf "${TEMP_LOCATION}"/"${target}"
|
||||
;;
|
||||
"blobxfer" )
|
||||
print_info "Moving backup to S3 Bucket with blobxfer"
|
||||
print_info "Moving backup to external storage with blobxfer"
|
||||
|
||||
mkdir -p "${DB_DUMP_TARGET}"
|
||||
mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
|
||||
if var_true "${ENABLE_CHECKSUM}" ; then mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/; fi
|
||||
|
||||
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
|
||||
|
||||
silent blobxfer upload --mode file --remote-path ${BLOBXFER_REMOTE_PATH} --local-path ${DB_DUMP_TARGET}
|
||||
move_exit_code=$?
|
||||
|
||||
rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
|
||||
if var_true "${ENABLE_CHECKSUM}" ; then rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}" ; fi
|
||||
rm -rf "${TEMP_LOCATION}"/"${target}"
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -78,7 +78,7 @@ fi
|
||||
get_filename() {
|
||||
COLUMNS=12
|
||||
prompt="Please select a file to restore:"
|
||||
options=( $(find "${DB_DUMP_TARGET}" -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | xargs -0) )
|
||||
options=( $(find "${DB_DUMP_TARGET}" -type f -maxdepth 1 -not -name '*.md5' -not -name '*.sha1' -print0 | sort -z | xargs -0) )
|
||||
PS3="$prompt "
|
||||
select opt in "${options[@]}" "Custom" "Quit" ; do
|
||||
if (( REPLY == 2 + ${#options[@]} )) ; then
|
||||
@@ -131,7 +131,7 @@ EOF
|
||||
fi
|
||||
|
||||
if [ -z "${DB_HOST}" ] && [ -n "${parsed_host}" ]; then
|
||||
print_debug "Parsed DBHostpho Variant: 3 - No Env, Parsed Filename"
|
||||
print_debug "Parsed DBHost Variant: 3 - No Env, Parsed Filename"
|
||||
q_dbhost_variant=3
|
||||
q_dbhost_menu=$(cat <<EOF
|
||||
|
||||
@@ -335,7 +335,7 @@ EOF
|
||||
case "${q_dbtype_variant}" in
|
||||
1 )
|
||||
while true; do
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}M${cdgy}\) | \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
||||
case "${q_dbtype,,}" in
|
||||
m* )
|
||||
r_dbtype=mysql
|
||||
@@ -358,7 +358,7 @@ EOF
|
||||
;;
|
||||
2 )
|
||||
while true; do
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
||||
case "${q_dbtype,,}" in
|
||||
e* | "" )
|
||||
r_dbtype=${DB_TYPE}
|
||||
@@ -385,7 +385,7 @@ EOF
|
||||
;;
|
||||
3 )
|
||||
while true; do
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
||||
case "${q_dbtype,,}" in
|
||||
f* | "" )
|
||||
r_dbtype=${p_dbtype}
|
||||
@@ -413,7 +413,7 @@ EOF
|
||||
|
||||
4 )
|
||||
while true; do
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}E${cdgy}\) \| \(${cwh}F${cdgy}\) \| \(${cwh}M${cdgy}\) \| \(${cwh}O${cdgy}\) \| \(${cwh}P${cdgy}\) : ${cwh}${coff}) " q_dbtype
|
||||
case "${q_dbtype,,}" in
|
||||
e* | "" )
|
||||
r_dbtype=${DB_TYPE}
|
||||
@@ -427,6 +427,10 @@ EOF
|
||||
r_dbtype=mysql
|
||||
break
|
||||
;;
|
||||
o* )
|
||||
r_dbtype=mongo
|
||||
break
|
||||
;;
|
||||
p* )
|
||||
r_dbtype=postgresql
|
||||
break
|
||||
@@ -915,6 +919,30 @@ case "${r_dbtype}" in
|
||||
exit_code=$?
|
||||
;;
|
||||
mongo )
|
||||
cat << EOF
|
||||
|
||||
Do you wish to drop any existing data before restoring?
|
||||
Y ) Yes
|
||||
N ) No
|
||||
Q ) Quit
|
||||
|
||||
EOF
|
||||
|
||||
echo -e "${coff}"
|
||||
read -p "$(echo -e ${clg}** ${cdgy}Enter Value \(${cwh}Y${cdgy}\) \| \(${cwh}N${cdgy}\) \| \(${cwh}Q${cdgy}\) : ${cwh}${coff})" q_menu_mongo_dropdb
|
||||
case "${q_menu_mongo_dropdb,,}" in
|
||||
"y" | "yes" | * )
|
||||
mongo_dropdb="--drop"
|
||||
;;
|
||||
"n" | "update" )
|
||||
unset mongo_dropdb
|
||||
;;
|
||||
"q" | "exit" )
|
||||
print_info "Quitting Script"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
print_info "Restoring '${r_filename}' into '${r_dbhost}'/'${r_dbname}'"
|
||||
if [ "${ENABLE_COMPRESSION,,}" != "none" ] && [ "${ENABLE_COMPRESSION,,}" != "false" ] ; then
|
||||
mongo_compression="--gzip"
|
||||
@@ -925,7 +953,11 @@ case "${r_dbtype}" in
|
||||
if [ -n "${r_dbpass}" ] ; then
|
||||
mongo_pass="-p=${r_dbpass}"
|
||||
fi
|
||||
mongorestore ${mongo_compression} -d=${r_dbname} -h=${r_dbhost} --port=${r_dbport} ${mongo_user} ${mongo_pass} --archive=${r_filename}
|
||||
if [ -n "${DB_AUTH}" ] ; then
|
||||
mongo_auth_database="--authenticationDatabase=${DB_AUTH}"
|
||||
fi
|
||||
|
||||
mongorestore ${mongo_compression} -d=${r_dbname} -h=${r_dbhost} --port=${r_dbport} ${mongo_dropdb} ${mongo_user} ${mongo_pass} --archive=${r_filename} ${mongo_auth_database}
|
||||
exit_code=$?
|
||||
;;
|
||||
* )
|
||||
@@ -939,4 +971,4 @@ if [ "${exit_code}" = 0 ] ; then
|
||||
print_info "Restore complete!"
|
||||
else
|
||||
print_error "Restore reported errors"
|
||||
fi
|
||||
fi
|
||||
|
||||
Reference in New Issue
Block a user