Compare commits

..

2 Commits

Author SHA1 Message Date
dave@tiredofit.ca
9159783691 Release 3.5.3 - See CHANGELOG.md 2022-10-12 08:42:05 -07:00
dave@tiredofit.ca
7e5e9d308d Release 3.5.3 - See CHANGELOG.md 2022-10-12 08:39:31 -07:00
20 changed files with 1807 additions and 4242 deletions

View File

@@ -1,14 +1,112 @@
name: "build_image" ### Application Level Image CI
### Dave Conroy <dave at tiredofit dot ca>
name: 'build'
on: on:
push: push:
paths: paths:
- "**" - '**'
- "!README.md" - '!README.md'
jobs: jobs:
build: docker:
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main runs-on: ubuntu-latest
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main steps:
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main - name: Checkout
secrets: inherit uses: actions/checkout@v3
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
set -x
if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"
else
BRANCH=${GITHUB_REF#refs/heads/}
fi
case ${BRANCH} in
"main" | "master" )
BRANCHTAG="${DOCKER_IMAGE}:latest"
;;
"develop" )
BRANCHTAG="${DOCKER_IMAGE}:develop"
;;
* )
if [ -n "${{ secrets.LATEST }}" ] ; then
if [ "${BRANCHTAG}" = "${{ secrets.LATEST }}" ]; then
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest,${DOCKER_IMAGE}:latest"
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
;;
esac
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
GITTAG="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed 's|refs/tags/||g')"
fi
if [ -n "${BRANCHTAG}" ] && [ -n "${GITTAG}" ]; then
TAGS=${BRANCHTAG},${GITTAG}
else
TAGS="${BRANCHTAG}${GITTAG}"
fi
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Label
id: Label
run: |
if [ -f "Dockerfile" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_repository=\"https://github.com/${GITHUB_REPOSITORY}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_commit=\"${GITHUB_SHA}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
if [ -f "CHANGELOG.md" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.db-backup.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
mkdir -p install/assets/.changelogs ; cp CHANGELOG.md install/assets/.changelogs/${GITHUB_REPOSITORY/\//_}.md
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
fi
if [[ $GITHUB_REF == refs/heads/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_branch=\"${GITHUB_REF#refs/heads/}\"" Dockerfile
fi
fi
- name: Build
uses: docker/build-push-action@v3
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm/v7,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}

View File

@@ -1,4 +1,6 @@
name: "manual_build_image" # Manual Workflow (Application)
name: manual
on: on:
workflow_dispatch: workflow_dispatch:
@@ -6,10 +8,105 @@ on:
Manual Build: Manual Build:
description: 'Manual Build' description: 'Manual Build'
required: false required: false
jobs: jobs:
build: docker:
uses: tiredofit/github_actions/.github/workflows/default_amd64_armv7_arm64.yml@main runs-on: ubuntu-latest
#uses: tiredofit/github_actions/.github/workflows/default_amd64.yml@main steps:
#uses: tiredofit/github_actions/.github/workflows/default_amd64_arm64.yml@main - name: Checkout
secrets: inherit uses: actions/checkout@v3
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=${GITHUB_REPOSITORY/docker-/}
set -x
if [[ $GITHUB_REF == refs/heads/* ]]; then
if [[ $GITHUB_REF == refs/heads/*/* ]] ; then
BRANCH="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed "s|refs/heads/||g" | sed "s|/|-|g")"
else
BRANCH=${GITHUB_REF#refs/heads/}
fi
case ${BRANCH} in
"main" | "master" )
BRANCHTAG="${DOCKER_IMAGE}:latest"
;;
"develop" )
BRANCHTAG="${DOCKER_IMAGE}:develop"
;;
* )
if [ -n "${{ secrets.LATEST }}" ] ; then
if [ "${BRANCHTAG}" = "${{ secrets.LATEST }}" ]; then
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest,${DOCKER_IMAGE}:latest"
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
else
BRANCHTAG="${DOCKER_IMAGE}:${BRANCH},${DOCKER_IMAGE}:${BRANCH}-latest"
fi
;;
esac
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
GITTAG="${DOCKER_IMAGE}:$(echo $GITHUB_REF | sed 's|refs/tags/||g')"
fi
if [ -n "${BRANCHTAG}" ] && [ -n "${GITTAG}" ]; then
TAGS=${BRANCHTAG},${GITTAG}
else
TAGS="${BRANCHTAG}${GITTAG}"
fi
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Label
id: Label
run: |
if [ -f "Dockerfile" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_repository=\"https://github.com/${GITHUB_REPOSITORY}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_commit=\"${GITHUB_SHA}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image.git_committed_by=\"${GITHUB_ACTOR}\"" Dockerfile
sed -i "/FROM .*/a LABEL tiredofit.image_build_date=\"$(date +'%Y-%m-%d %H:%M:%S')\"" Dockerfile
if [ -f "CHANGELOG.md" ] ; then
sed -i "/FROM .*/a LABEL tiredofit.db-backup.git_changelog_version=\"$(head -n1 ./CHANGELOG.md | awk '{print $2}')\"" Dockerfile
mkdir -p install/assets/.changelogs ; cp CHANGELOG.md install/assets/.changelogs/${GITHUB_REPOSITORY/\//_}.md
fi
if [[ $GITHUB_REF == refs/tags/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_tag=\"${GITHUB_REF#refs/tags/v}\"" Dockerfile
fi
if [[ $GITHUB_REF == refs/heads/* ]]; then
sed -i "/FROM .*/a LABEL tiredofit.image.git_branch=\"${GITHUB_REF#refs/heads/}\"" Dockerfile
fi
fi
- name: Build
uses: docker/build-push-action@v3
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm/v7,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}

File diff suppressed because it is too large Load Diff

View File

@@ -1,88 +1,62 @@
ARG DISTRO=alpine FROM docker.io/tiredofit/alpine:3.16
ARG DISTRO_VARIANT=edge
FROM docker.io/tiredofit/${DISTRO}:${DISTRO_VARIANT}
LABEL maintainer="Dave Conroy (github.com/tiredofit)" LABEL maintainer="Dave Conroy (github.com/tiredofit)"
### Set Environment Variables ### Set Environment Variables
ENV INFLUX1_CLIENT_VERSION=1.8.0 \
INFLUX2_CLIENT_VERSION=2.7.3 \ ENV INFLUX2_VERSION=2.4.0 \
MSODBC_VERSION=18.3.2.1-1 \ MSSQL_VERSION=18.0.1.1-1 \
MSSQL_VERSION=18.3.1.1-1 \ CONTAINER_ENABLE_MESSAGING=FALSE \
AWS_CLI_VERSION=1.31.4 \
CONTAINER_ENABLE_MESSAGING=TRUE \
CONTAINER_ENABLE_MONITORING=TRUE \ CONTAINER_ENABLE_MONITORING=TRUE \
CONTAINER_PROCESS_RUNAWAY_PROTECTOR=FALSE \
IMAGE_NAME="tiredofit/db-backup" \ IMAGE_NAME="tiredofit/db-backup" \
IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/" IMAGE_REPO_URL="https://github.com/tiredofit/docker-db-backup/"
### Dependencies ### Dependencies
RUN source /assets/functions/00-container && \ RUN set -ex && \
set -ex && \ apk update && \
addgroup -S -g 10000 dbbackup && \ apk upgrade && \
adduser -S -D -H -u 10000 -G dbbackup -g "Tired of I.T! DB Backup" dbbackup && \ apk add -t .db-backup-build-deps \
\
package update && \
package upgrade && \
package install .db-backup-build-deps \
build-base \ build-base \
bzip2-dev \ bzip2-dev \
cargo \
git \ git \
go \
libarchive-dev \ libarchive-dev \
openssl-dev \ libressl-dev \
libffi-dev \ libffi-dev \
python3-dev \ python3-dev \
py3-pip \ py3-pip \
xz-dev \ xz-dev \
&& \ && \
\ \
package install .db-backup-run-deps \ apk add --no-cache -t .db-backup-run-deps \
aws-cli \
bzip2 \ bzip2 \
coreutils \ influxdb \
gpg \
gpg-agent \
groff \
libarchive \ libarchive \
mariadb-client \ mariadb-client \
mariadb-connector-c \ mariadb-connector-c \
mongodb-tools \ mongodb-tools \
openssl \ libressl \
pigz \ pigz \
postgresql16 \ postgresql \
postgresql16-client \ postgresql-client \
pv \ pv \
py3-botocore \
py3-colorama \
py3-cryptography \ py3-cryptography \
py3-docutils \
py3-jmespath \
py3-rsa \
py3-setuptools \
py3-s3transfer \
py3-yaml \
python3 \
redis \ redis \
sqlite \ sqlite \
xz \ xz \
zip \
zstd \ zstd \
&& \ && \
\ \
apkArch="$(uname -m)"; \ apkArch="$(apk --print-arch)"; \
case "$apkArch" in \ case "$apkArch" in \
x86_64) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=amd64; ;; \ x86_64) mssql=true ; influx2=true ; influx_arch=amd64; ;; \
arm64 ) mssql=true ; mssql_arch=amd64; influx2=true ; influx_arch=arm64 ;; \ aarch64 ) influx2=true ; influx_arch=arm64 ;; \
*) sleep 0.1 ;; \ *) sleep 0.1 ;; \
esac; \ esac; \
\ \
if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk ; curl -O https://download.microsoft.com/download/3/5/5/355d7943-a338-41a7-858d-53b259ea33f5/mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSODBC_VERSION}_${mssql_arch}.apk mssql-tools18_${MSSQL_VERSION}_${mssql_arch}.apk ; else echo >&2 "Detected non x86_64 or ARM64 build variant, skipping MSSQL installation" ; fi; \ if [ $mssql = "true" ] ; then curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/msodbcsql18_${MSSQL_VERSION}_amd64.apk ; curl -O https://download.microsoft.com/download/b/9/f/b9f3cce4-3925-46d4-9f46-da08869c6486/mssql-tools18_${MSSQL_VERSION}_amd64.apk ; echo y | apk add --allow-untrusted msodbcsql18_${MSSQL_VERSION}_amd64.apk mssql-tools18_${MSSQL_VERSION}_amd64.apk ; else echo >&2 "Detected non x86_64 build variant, skipping MSSQL installation" ; fi; \
if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_CLIENT_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \ if [ $influx2 = "true" ] ; then curl -sSL https://dl.influxdata.com/influxdb/releases/influxdb2-client-${INFLUX2_VERSION}-linux-${influx_arch}.tar.gz | tar xvfz - --strip=1 -C /usr/src/ ; chmod +x /usr/src/influx ; mv /usr/src/influx /usr/sbin/ ; else echo >&2 "Unable to build Influx 2 on this system" ; fi ; \
clone_git_repo https://github.com/aws/aws-cli "${AWS_CLI_VERSION}" && \ \
python3 setup.py install --prefix=/usr && \
clone_git_repo https://github.com/influxdata/influxdb "${INFLUX1_CLIENT_VERSION}" && \
go build -o /usr/sbin/influxd ./cmd/influxd && \
strip /usr/sbin/influxd && \
mkdir -p /usr/src/pbzip2 && \ mkdir -p /usr/src/pbzip2 && \
curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \ curl -sSL https://launchpad.net/pbzip2/1.1/1.1.13/+download/pbzip2-1.1.13.tar.gz | tar xvfz - --strip=1 -C /usr/src/pbzip2 && \
cd /usr/src/pbzip2 && \ cd /usr/src/pbzip2 && \
@@ -99,16 +73,14 @@ RUN source /assets/functions/00-container && \
make && \ make && \
make install && \ make install && \
\ \
pip3 install --break-system-packages blobxfer && \ pip3 install blobxfer && \
\ \
package remove .db-backup-build-deps && \ ### Cleanup
package cleanup && \ apk del .db-backup-build-deps && \
rm -rf \ rm -rf /usr/src/* && \
/*.apk \ rm -rf /*.apk && \
/etc/logrotate.d/* \ rm -rf /etc/logrotate.d/redis && \
/root/.cache \ rm -rf /root/.cache /tmp/* /var/cache/apk/*
/root/go \
/tmp/* \
/usr/src/*
COPY install / ### S6 Setup
ADD install /

View File

@@ -1,6 +1,6 @@
The MIT License (MIT) The MIT License (MIT)
Copyright (c) 2023 Dave Conroy Copyright (c) 2022 Dave Conroy
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

874
README.md

File diff suppressed because it is too large Load Diff

View File

@@ -1,67 +0,0 @@
services:
example-db:
hostname: example-db-host
container_name: example-db
image: tiredofit/mariadb:10.11
ports:
- 3306:3306
volumes:
- ./db:/var/lib/mysql
environment:
- ROOT_PASS=examplerootpassword
- DB_NAME=example
- DB_USER=example
- DB_PASS=examplepassword
restart: always
networks:
- example-db-network
example-db-backup:
container_name: example-db-backup
image: tiredofit/db-backup
volumes:
- ./backups:/backup
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
- TIMEZONE=America/Vancouver
- CONTAINER_NAME=example-db-backup
- CONTAINER_ENABLE_MONITORING=FALSE
# - DEBUG_MODE=TRUE
- BACKUP_JOB_CONCURRENCY=1 # Only run one job at a time
- DEFAULT_CHECKSUM=NONE # Don't create checksums
- DEFAULT_COMPRESSION=ZSTD # Compress all with ZSTD
- DEFAULT_DUMP_INTERVAL=1440 # Backup every 1440 minutes
- DEFAULT_DUMP_BEGIN=0000 # Start backing up at midnight
- DEFAULT_CLEANUP_TIME=8640 # Cleanup backups after a week
- DB01_TYPE=mariadb
- DB01_HOST=example-db-host
- DB01_NAME=example
- DB01_USER=example
- DB01_PASS=examplepassword
- DB01_DUMP_INTERVAL=30 # (override) Backup every 30 minutes
- DB01_DUMP_BEGIN=+1 # (override) Backup starts immediately
- DB01_CLEANUP_TIME=180 # (override) Cleanup backups they are older than 180 minutes
- DB01_CHECKSUM=SHA1 # (override) Create a SHA1 checksum
- DB01_COMPRESSION=GZ # (override) Compress with GZIP
#- DB02_TYPE=postgres
#- DB02_HOST=example-postgres-host
#- DB02_NAME=example
#- DB02_USER=example
#- DB02_PASS=examplepassword
#- DB02_DUMP_INTERVAL=60 # (override) Backup every 60 minutes
#- DB02_DUMP_BEGIN=+10 # (override) Backup starts in ten minutes
#- DB02_CLEANUP_TIME=240 # (override) Cleanup backups they are older than 240 minutes
#- DB02_CHECKSUM=MD5 # (override) Create a SHA1 checksum
#- DB02_COMPRESSION=BZ # (override) Compress with BZIP
restart: always
networks:
- example-db-network
networks:
example-db-network:
name: example-db-network

View File

@@ -3,6 +3,12 @@
# upload with blobxfer to azure storage # upload with blobxfer to azure storage
# #
version: '2'
networks:
example-mssql-blobxfer-net:
name: example-mssql-blobxfer-net
services: services:
example-mssql-s3-db: example-mssql-s3-db:
hostname: example-db-host hostname: example-db-host
@@ -26,7 +32,7 @@ services:
# execute in terminal --> docker build -t tiredofit/db-backup-mssql-blobxfer . # execute in terminal --> docker build -t tiredofit/db-backup-mssql-blobxfer .
# replace --> image: tiredofit/db-backup-mssql # replace --> image: tiredofit/db-backup-mssql
# image: tiredofit/db-backup # image: tiredofit/db-backup
image: tiredofit/db-backup image: tiredofit/db-backup-mssql-blobxfer
links: links:
- example-mssql-s3-db - example-mssql-s3-db
volumes: volumes:
@@ -34,35 +40,30 @@ services:
- ./tmp/backups:/tmp/backups # shared tmp backup directory - ./tmp/backups:/tmp/backups # shared tmp backup directory
#- ./post-script.sh:/assets/custom-scripts/post-script.sh #- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment: environment:
- TIMEZONE=America/Vancouver
- CONTAINER_ENABLE_MONITORING=FALSE
- CONTAINER_NAME=example-mssql-blobxfer-db-backup
# - DEBUG_MODE=TRUE # - DEBUG_MODE=TRUE
- DB01_TYPE=mssql - DB_TYPE=mssql
- DB01_HOST=example-db-host - DB_HOST=example-db-host
# - DB01_PORT=1488 # - DB_PORT=1488
# - DB_NAME=ALL # [ALL] not working on sql server.
# create database with name `test1` manually first # create database with name `test1` manually first
- DB01_NAME=test1 # Create this database - DB_NAME=test1 # Create this database
- DB01_USER=sa - DB_USER=sa
- DB01_PASS=5hQa0utRFBpIY3yhoIyE - DB_PASS=5hQa0utRFBpIY3yhoIyE
- DB01_DUMP_INTERVAL=5 # backup every 5 minute - DB_DUMP_FREQ=1 # backup every 5 minute
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs not set immediately # - DB_DUMP_BEGIN=0000 # backup starts immediately
- DB01_CLEANUP_TIME=60 # clean backups they are older than 60 minutes - DB_CLEANUP_TIME=3 # clean backups they are older than 60 minutes
- DB01_CHECKSUM=SHA1 # Set Checksum to be SHA1 - ENABLE_CHECKSUM=TRUE
- DB01_COMPRESSION=GZ # Set compression to use GZIP - CHECKSUM=SHA1
- COMPRESSION=GZ
- SPLIT_DB=FALSE
- CONTAINER_ENABLE_MONITORING=FALSE
# === S3 Blobxfer === # === S3 Blobxfer ===
- DB01_BACKUP_LOCATION=blobxfer - BACKUP_LOCATION=blobxfer
# Add here azure storage account # Add here azure storage account
- DB01_BLOBXFER_STORAGE_ACCOUNT={TODO Add Storage Name} - BLOBXFER_STORAGE_ACCOUNT={TODO Add Storage Name}
# Add here azure storage account key # Add here azure storage account key
- SB01_BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key} - BLOBXFER_STORAGE_ACCOUNT_KEY={TODO Add Key}
- DB01_BLOBXFER_REMOTE_PATH=docker-db-backup - BLOBXFER_REMOTE_PATH=docker-db-backup
restart: always restart: always
networks: networks:
example-mssql-blobxfer-net: example-mssql-blobxfer-net:
networks:
example-mssql-blobxfer-net:
name: example-mssql-blobxfer-net

View File

@@ -2,6 +2,12 @@
# Example for Microsoft SQL Server # Example for Microsoft SQL Server
# #
version: '2'
networks:
example-mssql-net:
name: example-mssql-net
services: services:
example-mssql-db: example-mssql-db:
hostname: example-db-host hostname: example-db-host
@@ -25,7 +31,7 @@ services:
# execute in terminal --> docker build -t tiredofit/db-backup-mssql . # execute in terminal --> docker build -t tiredofit/db-backup-mssql .
# replace --> image: tiredofit/db-backup-mssql # replace --> image: tiredofit/db-backup-mssql
# image: tiredofit/db-backup # image: tiredofit/db-backup
image: tiredofit/db-backup image: tiredofit/db-backup-mssql
links: links:
- example-mssql-db - example-mssql-db
volumes: volumes:
@@ -33,28 +39,23 @@ services:
- ./tmp/backups:/tmp/backups # shared tmp backup directory - ./tmp/backups:/tmp/backups # shared tmp backup directory
#- ./post-script.sh:/assets/custom-scripts/post-script.sh #- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment: environment:
- TIMEZONE=America/Vancouver
- CONTAINER_ENABLE_MONITORING=FALSE
- CONTAINER_NAME=example-mssql-blobxfer-db-backup
# - DEBUG_MODE=TRUE # - DEBUG_MODE=TRUE
- DB01_TYPE=mssql - DB_TYPE=mssql
- DB01_HOST=example-db-host - DB_HOST=example-db-host
# - DB_PORT=1488 # - DB_PORT=1488
# - DB_NAME=ALL # [ALL] not working on sql server. # - DB_NAME=ALL # [ALL] not working on sql server.
# create database with name `test1` manually first # create database with name `test1` manually first
- DB01_NAME=test1 - DB_NAME=test1
- DB01_USER=sa - DB_USER=sa
- DB01_PASS=5hQa0utRFBpIY3yhoIyE - DB_PASS=5hQa0utRFBpIY3yhoIyE
- DB01_DUMP_INTERVAL=1 # backup every minute - DB_DUMP_FREQ=1 # backup every minute
# - DB01_DUMP_BEGIN=0000 # backup starts at midnight vs unset immediately # - DB_DUMP_BEGIN=0000 # backup starts immediately
- DB01_CLEANUP_TIME=5 # clean backups they are older than 5 minute - DB_CLEANUP_TIME=5 # clean backups they are older than 5 minute
- DB01_CHECKSUM=NONE - ENABLE_CHECKSUM=FALSE
- DB01_COMPRESSION=GZ - CHECKSUM=SHA1
- COMPRESSION=GZ
- SPLIT_DB=FALSE
- CONTAINER_ENABLE_MONITORING=FALSE
restart: always restart: always
networks: networks:
example-mssql-net: example-mssql-net:
networks:
example-mssql-net:
name: example-mssql-net

View File

@@ -0,0 +1,51 @@
version: '2'
networks:
example-db-network:
name: example-db-network
services:
example-db:
hostname: example-db-host
container_name: example-db
image: mariadb:latest
ports:
- 13306:3306
volumes:
- ./db:/var/lib/mysql
environment:
- MYSQL_ROOT_PASSWORD=examplerootpassword
- MYSQL_DATABASE=example
- MYSQL_USER=example
- MYSQL_PASSWORD=examplepassword
restart: always
networks:
- example-db-network
example-db-backup:
container_name: example-db-backup
image: tiredofit/db-backup
links:
- example-db
volumes:
- ./backups:/backup
#- ./post-script.sh:/assets/custom-scripts/post-script.sh
environment:
# - DEBUG_MODE=TRUE
- DB_TYPE=mariadb
- DB_HOST=example-db-host
- DB_NAME=example
- DB_USER=example
- DB_PASS=examplepassword
- DB_DUMP_FREQ=1 # backup every minute
# - DB_DUMP_BEGIN=0000 # backup starts immediately
- DB_CLEANUP_TIME=5 # clean backups they are older than 5 minute
- CHECKSUM=SHA1
- COMPRESSION=GZ
- SPLIT_DB=FALSE
- CONTAINER_ENABLE_MONITORING=FALSE
restart: always
networks:
- example-db-network

View File

@@ -4,7 +4,7 @@
# #### $1=EXIT_CODE (After running backup routine) # #### $1=EXIT_CODE (After running backup routine)
# #### $2=DB_TYPE (Type of Backup) # #### $2=DB_TYPE (Type of Backup)
# #### $3=DB_HOST (Backup Host) # #### $3=DB_HOST (Backup Host)
# #### #4=DB_NAME (Name of Database backed up) # #### #4=DB_NAME (Name of Database backed up
# #### $5=BACKUP START TIME (Seconds since Epoch) # #### $5=BACKUP START TIME (Seconds since Epoch)
# #### $6=BACKUP FINISH TIME (Seconds since Epoch) # #### $6=BACKUP FINISH TIME (Seconds since Epoch)
# #### $7=BACKUP TOTAL TIME (Seconds between Start and Finish) # #### $7=BACKUP TOTAL TIME (Seconds between Start and Finish)

View File

@@ -1,113 +0,0 @@
#!/command/with-contenv bash
source /assets/functions/00-container
PROCESS_NAME="db-backup{{BACKUP_NUMBER}}-scheduler"
check_container_initialized
check_service_initialized init 10-db-backup
source /assets/functions/10-db-backup
source /assets/defaults/10-db-backup
bootstrap_variables backup_init {{BACKUP_NUMBER}}
bootstrap_variables parse_variables {{BACKUP_NUMBER}}
if [ -z "${backup_job_db_name}" ]; then
PROCESS_NAME="{{BACKUP_NUMBER}}${backup_job_db_host//\//_}"
else
PROCESS_NAME="{{BACKUP_NUMBER}}-${backup_job_db_host//\//_}__${backup_job_db_name}"
fi
trap ctrl_c INT
if [[ "${MODE,,}" =~ "standalone" ]] || [ "${MODE,,}" = "manual" ] || [ "${1,,}" = "manual" ] || [ "${1,,}" = "now" ]; then
print_debug "Detected Manual Mode"
persist=false
backup_job_backup_begin=+0
else
silent sleep {{BACKUP_NUMBER}}
time_last_run=0
time_current=$(date +'%s')
if [[ "${backup_job_backup_begin}" =~ ^\+(.*)$ ]]; then
print_debug "BACKUP_BEGIN is a jump of minute starting with +"
timer plusvalue
elif [[ "${backup_job_backup_begin}" =~ ^[0-9]{4}$ ]]; then
print_debug "BACKUP_BEGIN is a HHMM value"
timer time
elif [[ "${backup_job_backup_begin}" =~ ([0-9]{4})-([0-9]{2})-([0-9]{2})[[:space:]]([0-9]{2}):([0-9]{2}):([0-9]{2}) ]]; then
print_debug "BACKUP_BEGIN is a full date timestamp"
timer datetime
#elif echo "${backup_job_backup_begin//\*/#}" | grep -qP "^(.*((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|#) ?){5}$" ; then # Allow slashes, yet not supporting advanced cron yet
elif echo "${backup_job_backup_begin//\*/#}" | grep -qP "^(((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|#) ?){5}$" ; then
print_debug "BACKUP_BEGIN is a cron expression"
time_last_run=$(date +"%s")
backup_job_backup_begin=${backup_job_backup_begin//\"/}
backup_job_backup_begin=${backup_job_backup_begin//\'/}
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
else
print_error "_BACKUP_BEGIN is invalid - Unable to perform scheduling"
cat <<EOF
Valid Methods:
+(number) - Start in however many minutes
HHMM - Start at hour (00-24) and minute (00-59)
YYYY-MM-DD HH:mm:ss - Start at a specific date and time
0 23 * * * - Cron expression
EOF
print_error "Stopping backup_scheduler {{BACKUP_NUMBER}} due to detected errors. Fix and restart container."
stop_scheduler_backup=true
s6-svc -d /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}
fi
print_debug "Wait Time: ${time_wait} Future execution time: ${time_future} Current Time: ${time_current}"
print_info "Next Backup at $(date -d @"${time_future}" +'%Y-%m-%d %T %Z')"
silent sleep "${time_wait}"
fi
while true; do
if [ -n "${backup_job_blackout_start}" ] && [ -n "${backup_job_blackout_finish}" ] ; then
time_current_hour_minute=$(date +%H%M)
if [[ "${time_current_hour_minute}" > "${backup_job_blackout_start}" ]] && [[ "${time_current_hour_minute}" < "${backup_job_blackout_finish}" ]] ; then
blackout=true
else
blackout=false
fi
fi
if var_true "${blackout}" ; then
print_notice "Detected Blackout Period - Not performing backup operations"
else
timer job start
process_limiter
echo "{{BACKUP_NUMBER}}" >> /tmp/.container/db-backup-backups
print_debug "Backup {{BACKUP_NUMBER}} routines started time: $(date +'%Y-%m-%d %T %Z')"
bootstrap_filesystem
check_availability
backup_"${dbtype,,}"
timer job stop
if [ -z "${exitcode_backup}" ] ; then exitcode_backup="0" ; fi
print_info "Backup {{BACKUP_NUMBER}} routines finish time: $(date -d @"${backup_job_finish_time}" +'%Y-%m-%d %T %Z') with exit code ${exitcode_backup}"
print_notice "Backup {{BACKUP_NUMBER}} routines time taken: $(echo "${backup_job_total_time}" | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
sed -i "/^{{BACKUP_NUMBER}}/d" /tmp/.container/db-backup-backups
fi
symlink_log
if var_false "${persist}" ; then
print_debug "Exiting due to manual mode"
exit "${exitcode_backup}";
else
if var_true "${stop_scheduler_backup}" ; then
print_error "Stopping backup_scheduler {{BACKUP_NUMBER}} due to detected errors. Fix and restart container."
s6-svc -d /var/run/s6/legacy-services/dbbackup-{{BACKUP_NUMBER}}
else
if [ ! "${time_cron}" = "true" ]; then
print_notice "Sleeping for another $(($backup_job_backup_interval*60-backup_job_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($backup_job_backup_interval*60-backup_job_total_time))))" +'%Y-%m-%d %T %Z') "
silent sleep $(($backup_job_backup_interval*60-backup_job_total_time))
else
time_last_run=$(date +"%s")
timer cron "${backup_job_backup_begin}" "${time_current}" "${time_last_run}"
print_notice "Sleeping for another ${time_wait} seconds. Waking up at $(date -d@"${time_future}" +'%Y-%m-%d %T %Z') "
silent sleep "${time_wait}"
fi
fi
fi
done

View File

@@ -1,46 +1,26 @@
#!/command/with-contenv bash #!/command/with-contenv bash
BACKUP_JOB_CONCURRENCY=${BACKUP_JOB_CONCURRENCY:-"1"} BACKUP_LOCATION=${BACKUP_LOCATION:-"FILESYSTEM"}
DBBACKUP_USER=${DBBACKUP_USER:-"dbbackup"} BLOBXFER_REMOTE_PATH=${BLOBXFER_REMOTE_PATH:-"/docker-db-backup"}
DBBACKUP_GROUP=${DBBACKUP_GROUP:-"${DBBACKUP_USER}"} # Must go after DBBACKUP_USER CHECKSUM=${CHECKSUM:-"MD5"}
DEFAULT_BACKUP_BEGIN=${DEFAULT_BACKUP_BEGIN:-+0} COMPRESSION=${COMPRESSION:-"ZSTD"}
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440} COMPRESSION_LEVEL=${COMPRESSION_LEVEL:-"3"}
DEFAULT_BACKUP_INTERVAL=${DEFAULT_BACKUP_INTERVAL:-1440} DB_DUMP_BEGIN=${DB_DUMP_BEGIN:-+0}
DEFAULT_BACKUP_LOCATION=${DEFAULT_BACKUP_LOCATION:-"FILESYSTEM"} DB_DUMP_FREQ=${DB_DUMP_FREQ:-1440}
DEFAULT_BLOBXFER_REMOTE_PATH=${DEFAULT_BLOBXFER_REMOTE_PATH:-"/docker-db-backup"} DB_DUMP_TARGET=${DB_DUMP_TARGET:-"/backup"}
DEFAULT_CHECKSUM=${DEFAULT_CHECKSUM:-"MD5"} ENABLE_CHECKSUM=${ENABLE_CHECKSUM:-"TRUE"}
DEFAULT_COMPRESSION=${DEFAULT_COMPRESSION:-"ZSTD"} ENABLE_PARALLEL_COMPRESSION=${ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
DEFAULT_COMPRESSION_LEVEL=${DEFAULT_COMPRESSION_LEVEL:-"3"}
DEFAULT_CREATE_LATEST_SYMLINK=${DEFAULT_CREATE_LATEST_SYMLINK:-"TRUE"}
DEFAULT_ENABLE_PARALLEL_COMPRESSION=${DEFAULT_ENABLE_PARALLEL_COMPRESSION:-"TRUE"}
DEFAULT_ENCRYPT=${DEFAULT_ENCRYPT:-"FALSE"}
DEFAULT_FILESYSTEM_PATH=${DEFAULT_FILESYSTEM_PATH:-"/backup"}
DEFAULT_FILESYSTEM_PATH_PERMISSION=${DEFAULT_FILESYSTEM_PATH_PERMISSION:-"700"}
DEFAULT_FILESYSTEM_PERMISSION=${DEFAULT_FILESYSTEM_PERMISSION:-"600"}
DEFAULT_FILESYSTEM_ARCHIVE_PATH=${DEFAULT_FILESYSTEM_ARCHIVE_PATH:-"${DEFAULT_FILESYSTEM_PATH}/archive/"}
DEFAULT_LOG_LEVEL=${DEFAULT_LOG_LEVEL:-"notice"}
DEFAULT_MYSQL_ENABLE_TLS=${DEFAULT_MYSQL_ENABLE_TLS:-"FALSE"}
DEFAULT_MYSQL_EVENTS=${DEFAULT_MYSQL_EVENTS:-"TRUE"}
DEFAULT_MYSQL_MAX_ALLOWED_PACKET=${DEFAULT_MYSQL_MAX_ALLOWED_PACKET:-"512M"}
DEFAULT_MYSQL_SINGLE_TRANSACTION=${DEFAULT_MYSQL_SINGLE_TRANSACTION:-"TRUE"}
DEFAULT_MYSQL_STORED_PROCEDURES=${DEFAULT_MYSQL_STORED_PROCEDURES:-"TRUE"}
DEFAULT_MYSQL_TLS_CA_FILE=${DEFAULT_MYSQL_TLS_CA_FILE:-"/etc/ssl/cert.pem"}
DEFAULT_MYSQL_TLS_VERIFY=${DEFAULT_MYSQL_TLS_VERIFY:-"FALSE"}
DEFAULT_MYSQL_TLS_VERSION=${DEFAULT_MYSQL_TLS_VERSION:-"TLSv1.1,TLSv1.2,TLSv1.3"}
DEFAULT_MSSQL_MODE=${DEFAULT_MSSQL_MODE:-"database"}
DEFAULT_PARALLEL_COMPRESSION_THREADS=${DEFAULT_PARALLEL_COMPRESSION_THREADS:-"$(nproc)"}
DEFAULT_RESOURCE_OPTIMIZED=${DEFAULT_RESOURCE_OPTIMIZED:-"FALSE"}
DEFAULT_S3_CERT_SKIP_VERIFY=${DEFAULT_S3_CERT_SKIP_VERIFY:-"TRUE"}
DEFAULT_S3_PROTOCOL=${DEFAULT_S3_PROTOCOL:-"https"}
DEFAULT_SCRIPT_LOCATION_PRE=${DEFAULT_SCRIPT_LOCATION_PRE:-"/assets/scripts/pre/"}
DEFAULT_SCRIPT_LOCATION_POST=${DEFAULT_SCRIPT_LOCATION_POST:-"/assets/scripts/post/"}
DEFAULT_SIZE_VALUE=${DEFAULT_SIZE_VALUE:-"bytes"}
DEFAULT_SKIP_AVAILABILITY_CHECK=${DEFAULT_SKIP_AVAILABILITY_CHECK:-"FALSE"}
DEFAULT_SPLIT_DB=${DEFAULT_SPLIT_DB:-"TRUE"}
LOG_PATH=${LOG_PATH:-"/logs"}
MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"} MANUAL_RUN_FOREVER=${MANUAL_RUN_FOREVER:-"TRUE"}
MODE=${MODE:-"AUTO"} MODE=${MODE:-"AUTO"}
MYSQL_MAX_ALLOWED_PACKET=${MYSQL_MAX_ALLOWED_PACKET:-"512M"}
TEMP_PATH=${TEMP_PATH:-"/tmp/backups"} MYSQL_SINGLE_TRANSACTION=${MYSQL_SINGLE_TRANSACTION:-"TRUE"}
if [ -n "${TEMP_LOCATION}" ] ; then TEMP_PATH=${TEMP_LOCATION:-"/tmp/backups"} ; fi # To be removed 4.3.0 MYSQL_STORED_PROCEDURES=${MYSQL_STORED_PROCEDURES:-"TRUE"}
PARALLEL_COMPRESSION_THREADS=${PARALLEL_COMPRESSION_THREADS:-"$(nproc)"}
S3_CERT_SKIP_VERIFY=${S3_CERT_SKIP_VERIFY:-"TRUE"}
S3_PROTOCOL=${S3_PROTOCOL:-"https"}
SCRIPT_LOCATION_PRE=${SCRIPT_LOCATION_PRE:-"/assets/scripts/pre/"}
SCRIPT_LOCATION_POST=${SCRIPT_LOCATION_POST:-"/assets/scripts/post/"}
SIZE_VALUE=${SIZE_VALUE:-"bytes"}
SKIP_AVAILABILITY_CHECK=${SKIP_AVAILABILITY_CHECK:-"FALSE"}
SPLIT_DB=${SPLIT_DB:-"TRUE"}
TEMP_LOCATION=${TEMP_LOCATION:-"/tmp/backups"}

File diff suppressed because it is too large Load Diff

View File

@@ -6,9 +6,9 @@ prepare_service 03-monitoring
PROCESS_NAME="db-backup" PROCESS_NAME="db-backup"
output_off output_off
bootstrap_variables
sanity_test
setup_mode setup_mode
db_backup_container_init create_zabbix dbbackup
create_schedulers backup
create_zabbix dbbackup4
liftoff liftoff

View File

@@ -0,0 +1,88 @@
#!/command/with-contenv bash
source /assets/functions/00-container
source /assets/functions/10-db-backup
source /assets/defaults/10-db-backup
PROCESS_NAME="db-backup"
bootstrap_variables
if [ "${MODE,,}" = "manual" ] || [ "${1,,}" = "manual" ] || [ "${1,,}" = "now" ]; then
DB_DUMP_BEGIN=+0
manual=TRUE
print_debug "Detected Manual Mode"
else
sleep 5
current_time=$(date +"%s")
today=$(date +"%Y%m%d")
if [[ $DB_DUMP_BEGIN =~ ^\+(.*)$ ]]; then
waittime=$(( ${BASH_REMATCH[1]} * 60 ))
target_time=$(($current_time + $waittime))
else
target_time=$(date --date="${today}${DB_DUMP_BEGIN}" +"%s")
if [[ "$target_time" < "$current_time" ]]; then
target_time=$(($target_time + 24*60*60))
fi
waittime=$(($target_time - $current_time))
fi
print_debug "Wait Time: ${waittime} Target time: ${target_time} Current Time: ${current_time}"
print_info "Next Backup at $(date -d @${target_time} +"%Y-%m-%d %T %Z")"
sleep $waittime
fi
while true; do
mkdir -p "${TEMP_LOCATION}"
backup_start_time=$(date +"%s")
print_debug "Backup routines started time: $(date +'%Y-%m-%d %T %Z')"
case "${dbtype,,}" in
"couch" )
check_availability
backup_couch
;;
"influx" )
check_availability
backup_influx
;;
"mssql" )
check_availability
backup_mssql
;;
"mysql" )
check_availability
backup_mysql
;;
"mongo" )
check_availability
backup_mongo
;;
"pgsql" )
check_availability
backup_pgsql
;;
"redis" )
check_availability
backup_redis
;;
"sqlite3" )
check_availability
backup_sqlite3
;;
esac
backup_finish_time=$(date +"%s")
backup_total_time=$(echo $((backup_finish_time-backup_start_time)))
if [ -z "$master_exit_code" ] ; then master_exit_code="0" ; fi
print_info "Backup routines finish time: $(date -d @${backup_finish_time} +"%Y-%m-%d %T %Z") with overall exit code ${master_exit_code}"
print_notice "Backup routines time taken: $(echo ${backup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
cleanup_old_data
if var_true "${manual}" ; then
print_debug "Exiting due to manual mode"
exit ${master_exit_code};
else
print_notice "Sleeping for another $(($DB_DUMP_FREQ*60-backup_total_time)) seconds. Waking up at $(date -d@"$(( $(date +%s)+$(($DB_DUMP_FREQ*60-backup_total_time))))" +"%Y-%m-%d %T %Z") "
sleep $(($DB_DUMP_FREQ*60-backup_total_time))
fi
done

View File

@@ -0,0 +1,4 @@
#!/command/with-contenv bash
echo '** Performing Manual Backup'
/etc/services.available/10-db-backup/run manual

View File

@@ -1,24 +0,0 @@
#!/command/with-contenv bash
source /assets/functions/00-container
source /assets/defaults/05-logging
source /assets/defaults/10-db-backup
## Compress each log 2 days old
timestamp_2dayold_unixtime="$(stat -c %Y "${LOG_PATH}"/"$(date --date='2 days ago' +'%Y%m%d')")"
for logfile in "${LOG_PATH}"/"$(date --date='2 days ago' +'%Y%m%d')"/"$(date --date='2 days ago' +'%Y%m%d')"_*.log ; do
sudo -u restic zstd --rm --rsyncable "${logfile}"
done
touch -t $(date -d"@${timestamp_2dayold_unixtime}" +'%Y%m%d%H%m.%S') "${LOG_PATH}"/"$(date --date='2 days ago' +'%Y%m%d')"
# Look fook files older than certain day and delete
if [ -n "${LOG_PATH}" ] && [ -d "${LOG_PATH}" ] ; then
find "${LOG_PATH}" -mtime +"${LOGROTATE_RETAIN_DAYS}" -type d -exec rm -rf {} +
fi
# Look for stale symbolic links and delete accordingly
for symbolic_link in "${LOG_PATH}"/latest*.log ; do
if [ ! -e "${symbolic_link}" ] ; then
rm -rf "${symbolic_link}"
fi
done

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,10 @@
{ {
"zabbix_export": { "zabbix_export": {
"version": "6.4", "version": "6.0",
"template_groups": [ "date": "2022-03-18T13:32:12Z",
"groups": [
{ {
"uuid": "10b88d2b3a3a4c72b43bdce9310e1162", "uuid": "fa56524b5dbb4ec09d9777a6f7ccfbe4",
"name": "DB/Backup" "name": "DB/Backup"
}, },
{ {
@@ -13,10 +14,10 @@
], ],
"templates": [ "templates": [
{ {
"uuid": "5a16c1bd694145389eed5ee803d954cc", "uuid": "5fc64d517afb4cc5bc09a3ef58b43ef7",
"template": "DB Backup4", "template": "DB Backup",
"name": "DB Backup4", "name": "DB Backup",
"description": "Template for Docker DB Backup Image\n\nMeant for use specifically with https://github.com/tiredofit/docker-db-backup Version > 4.0.0\n\nSupports auto discovery of backup jobs and creates graphs and triggers", "description": "Template for Docker DB Backup Image\n\nMeant for use specifically with https://github.com/tiredofit/docker-db-backup\nLast tested with version 3.0.2",
"groups": [ "groups": [
{ {
"name": "DB/Backup" "name": "DB/Backup"
@@ -25,22 +26,16 @@
"name": "Templates/Databases" "name": "Templates/Databases"
} }
], ],
"discovery_rules": [ "items": [
{ {
"uuid": "94bb6f862e1841f8b2834b04c41c1d86", "uuid": "72fd00fa2dd24e479f5affe03e8711d8",
"name": "Backup", "name": "DB Backup: Backup Duration",
"type": "TRAP", "type": "TRAP",
"key": "dbbackup.backup", "key": "dbbackup.backup_duration",
"delay": "0",
"item_prototypes": [
{
"uuid": "5a2c4d1cacf844829bc1fbf912e071c5",
"name": "[{#NAME}] Checksum - Duration",
"type": "TRAP",
"key": "dbbackup.backup.checksum.duration.[{#NAME}]",
"delay": "0", "delay": "0",
"history": "7d", "history": "7d",
"units": "uptime", "units": "uptime",
"description": "How long the backup took",
"tags": [ "tags": [
{ {
"tag": "Application", "tag": "Application",
@@ -49,162 +44,102 @@
] ]
}, },
{ {
"uuid": "6e49769ec07344a4974b13dab00c3539", "uuid": "3549a2c9d56849babc6dc3c855484c1e",
"name": "[{#NAME}] Checksum - Hash", "name": "DB Backup: Backup Time",
"type": "TRAP", "type": "TRAP",
"key": "dbbackup.backup.checksum.hash.[{#NAME}]", "key": "dbbackup.datetime",
"delay": "0",
"history": "30d",
"trends": "0",
"value_type": "TEXT",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
]
},
{
"uuid": "bb6472e30bff4d9c908b1d34b893e622",
"name": "[{#NAME}] Backup - Last Backup",
"type": "TRAP",
"key": "dbbackup.backup.datetime.[{#NAME}]",
"delay": "0", "delay": "0",
"history": "7d", "history": "7d",
"units": "unixtime", "units": "unixtime",
"description": "Datestamp of last database backup", "request_method": "POST",
"tags": [ "tags": [
{ {
"tag": "Application", "tag": "Application",
"value": "DB Backup" "value": "DB Backup"
} }
], ],
"trigger_prototypes": [ "triggers": [
{ {
"uuid": "3681b56bb882466fb304a48b4beb15f0", "uuid": "3ac1e074ffea46eb8002c9c08a85e7b4",
"expression": "fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],172800s)=0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],259200s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],345600s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],432800s)<>0", "expression": "nodata(/DB Backup/dbbackup.datetime,2d)=1",
"name": "[{#NAME}] No backups detected in 2 days", "name": "DB-Backup: No backups detected in 2 days",
"priority": "HIGH",
"manual_close": "YES"
},
{
"uuid": "6c70136c84994197b6396a143b4e956f",
"expression": "fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],172800s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],259200s)=0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],345600s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],432800s)<>0",
"name": "[{#NAME}] No backups detected in 3 days",
"priority": "DISASTER", "priority": "DISASTER",
"manual_close": "YES" "manual_close": "YES"
}, },
{ {
"uuid": "d2038025cab643019cb9610c301f0cb9", "uuid": "b8b5933dfa1a488c9c37dd7f4784c1ff",
"expression": "fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],172800s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],259200s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],345600s)=0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],432800s)<>0", "expression": "fuzzytime(/DB Backup/dbbackup.datetime,172800s)=0 and fuzzytime(/DB Backup/dbbackup.datetime,259200s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,345600s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,432800s)<>0",
"name": "[{#NAME}] No backups detected in 4 days", "name": "DB Backup: No Backups occurred in 2 days",
"priority": "DISASTER", "priority": "AVERAGE"
"manual_close": "YES"
}, },
{ {
"uuid": "ea85f02d032c4a1dbc1b6e91a3b2b37b", "uuid": "35c5f420d0e142cc9601bae38decdc40",
"expression": "fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],172800s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],259200s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],345600s)<>0 and fuzzytime(/DB Backup4/dbbackup.backup.datetime.[{#NAME}],432800s)=0", "expression": "fuzzytime(/DB Backup/dbbackup.datetime,172800s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,259200s)=0 and fuzzytime(/DB Backup/dbbackup.datetime,345600s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,432800s)<>0",
"name": "[{#NAME}] No backups detected in 5 days", "name": "DB Backup: No Backups occurred in 3 days",
"priority": "DISASTER", "priority": "AVERAGE"
"manual_close": "YES" },
{
"uuid": "03c3719d82c241e886a0383c7d908a77",
"expression": "fuzzytime(/DB Backup/dbbackup.datetime,172800s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,259200s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,345600s)=0 and fuzzytime(/DB Backup/dbbackup.datetime,432800s)<>0",
"name": "DB Backup: No Backups occurred in 4 days",
"priority": "AVERAGE"
},
{
"uuid": "1634a03e44964e42b7e0101f5f68499c",
"expression": "fuzzytime(/DB Backup/dbbackup.datetime,172800s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,259200s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,345600s)<>0 and fuzzytime(/DB Backup/dbbackup.datetime,432800s)=0",
"name": "DB Backup: No Backups occurred in 5 days or more",
"priority": "HIGH"
} }
] ]
}, },
{ {
"uuid": "8ec2b2f44ddf4f36b3dbb2aa15e3a32f", "uuid": "467dfec952b34f5aa4cc890b4351b62d",
"name": "[{#NAME}] Backup - Duration", "name": "DB Backup: Backup Size",
"type": "TRAP", "type": "TRAP",
"key": "dbbackup.backup.duration.[{#NAME}]", "key": "dbbackup.size",
"delay": "0", "delay": "0",
"history": "7d", "history": "7d",
"units": "uptime", "units": "B",
"description": "How long the DB Backup job took", "request_method": "POST",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
]
},
{
"uuid": "3f0dc3c75261447c93482815c3d69524",
"name": "[{#NAME}] Encrypt - Duration",
"type": "TRAP",
"key": "dbbackup.backup.encrypt.duration.[{#NAME}]",
"delay": "0",
"history": "7d",
"units": "uptime",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
]
},
{
"uuid": "c3d5ad0789c443859d6a673e03db9cec",
"name": "[{#NAME}] Backup - Filename",
"type": "TRAP",
"key": "dbbackup.backup.filename.[{#NAME}]",
"delay": "0",
"history": "30d",
"trends": "0",
"value_type": "TEXT",
"tags": [
{
"tag": "Application",
"value": "DB Backup"
}
]
},
{
"uuid": "43b700c03897465eb7e49bbfe8fc9fc5",
"name": "[{#NAME}] Backup - Size",
"type": "TRAP",
"key": "dbbackup.backup.size.[{#NAME}]",
"delay": "0",
"history": "7d",
"description": "Backup Size",
"tags": [ "tags": [
{ {
"tag": "Application", "tag": "Application",
"value": "DB Backup" "value": "DB Backup"
} }
], ],
"trigger_prototypes": [ "triggers": [
{ {
"uuid": "849f8660bee04427aff55af47b6f509c", "uuid": "a41eb49b8a3541afb6de247dca750e38",
"expression": "last(/DB Backup4/dbbackup.backup.size.[{#NAME}])/last(/DB Backup4/dbbackup.backup.size.[{#NAME}],#2)>1.2", "expression": "last(/DB Backup/dbbackup.size)/last(/DB Backup/dbbackup.size,#2)>1.2",
"name": "[{#NAME}] Backup 20% Greater in size", "name": "DB Backup: 20% Greater in Size",
"priority": "WARNING", "priority": "WARNING",
"manual_close": "YES" "manual_close": "YES"
}, },
{ {
"uuid": "74d16a7680544c65af22cc568ce3d59d", "uuid": "422f66be5049403293f3d96fc53f20cd",
"expression": "last(/DB Backup4/dbbackup.backup.size.[{#NAME}])/last(/DB Backup4/dbbackup.backup.size.[{#NAME}],#2)<0.2", "expression": "last(/DB Backup/dbbackup.size)/last(/DB Backup/dbbackup.size,#2)<0.2",
"name": "[{#NAME}] Backup 20% Smaller in Size", "name": "DB Backup: 20% Smaller in Size",
"priority": "WARNING", "priority": "WARNING",
"manual_close": "YES" "manual_close": "YES"
}, },
{ {
"uuid": "5595d769c73f4eaeadda95c84c2c0f17", "uuid": "d6d9d875b92f4d799d4bc89aabd4e90e",
"expression": "last(/DB Backup4/dbbackup.backup.size.[{#NAME}])<1K", "expression": "last(/DB Backup/dbbackup.size)<1K",
"name": "[{#NAME}] Backup Empty", "name": "DB Backup: empty",
"priority": "HIGH", "priority": "HIGH"
"manual_close": "YES"
} }
] ]
}, },
{ {
"uuid": "a6fc542a565c4baba8429ed9ab31b5ae", "uuid": "a6b13e8b46a64abab64a4d44d620d272",
"name": "[{#NAME}] Backup - Status", "name": "DB Backup: Last Backup Status",
"type": "TRAP", "type": "TRAP",
"key": "dbbackup.backup.status.[{#NAME}]", "key": "dbbackup.status",
"delay": "0", "delay": "0",
"history": "7d", "history": "7d",
"description": "Maps exit code by DB Backup procedure", "description": "Maps Exit Codes received by backup applications",
"valuemap": { "valuemap": {
"name": "Backup Status" "name": "DB Backup Status"
}, },
"tags": [ "tags": [
{ {
@@ -212,77 +147,17 @@
"value": "DB Backup" "value": "DB Backup"
} }
], ],
"trigger_prototypes": [ "triggers": [
{ {
"uuid": "74b91e28453b4c2a84743f5e371495c1", "uuid": "23d71e356f96493180f02d4b84a79fd6",
"expression": "last(/DB Backup4/dbbackup.backup.status.[{#NAME}])=1", "expression": "last(/DB Backup/dbbackup.status)=1",
"name": "[{#NAME}] Backup - Failed with errors", "name": "DB Backup: Failed Backup Detected",
"priority": "WARNING", "priority": "HIGH",
"manual_close": "YES" "manual_close": "YES"
} }
] ]
} }
], ],
"graph_prototypes": [
{
"uuid": "b5e8e9fe0c474fedba2b06366234afdf",
"name": "[{#NAME}] Backup Duration",
"graph_items": [
{
"color": "199C0D",
"calc_fnc": "ALL",
"item": {
"host": "DB Backup4",
"key": "dbbackup.backup.duration.[{#NAME}]"
}
}
]
},
{
"uuid": "99b5deb4e28f40059c50846c7be2ef26",
"name": "[{#NAME}] Backup Size",
"graph_items": [
{
"color": "199C0D",
"calc_fnc": "ALL",
"item": {
"host": "DB Backup4",
"key": "dbbackup.backup.size.[{#NAME}]"
}
}
]
},
{
"uuid": "8c641e33659e4c8b866da64e252cfc2a",
"name": "[{#NAME}] Checksum Duration",
"graph_items": [
{
"color": "199C0D",
"calc_fnc": "ALL",
"item": {
"host": "DB Backup4",
"key": "dbbackup.backup.checksum.duration.[{#NAME}]"
}
}
]
},
{
"uuid": "65b8770f71ed4cff9111b82c42b17571",
"name": "[{#NAME}] Encrypt Duration",
"graph_items": [
{
"color": "199C0D",
"calc_fnc": "ALL",
"item": {
"host": "DB Backup4",
"key": "dbbackup.backup.encrypt.duration.[{#NAME}]"
}
}
]
}
]
}
],
"tags": [ "tags": [
{ {
"tag": "Service", "tag": "Service",
@@ -293,10 +168,38 @@
"value": "Database" "value": "Database"
} }
], ],
"dashboards": [
{
"uuid": "90c81bb47184401ca9663626784a6f30",
"name": "DB Backup",
"pages": [
{
"widgets": [
{
"type": "GRAPH_CLASSIC",
"name": "Backup Size",
"width": "23",
"height": "5",
"fields": [
{
"type": "GRAPH",
"name": "graphid",
"value": {
"name": "DB Backup: Backup Size",
"host": "DB Backup"
}
}
]
}
]
}
]
}
],
"valuemaps": [ "valuemaps": [
{ {
"uuid": "92a87279388b4fd1ac51c1e417e1776e", "uuid": "82f3a3d01b3c42b8942b59d2363724e0",
"name": "Backup Status", "name": "DB Backup Status",
"mappings": [ "mappings": [
{ {
"value": "0", "value": "0",
@@ -311,6 +214,36 @@
} }
] ]
} }
],
"graphs": [
{
"uuid": "6e02c200b76046bab76062cd1ab086b2",
"name": "DB Backup: Backup Duration",
"graph_items": [
{
"color": "199C0D",
"item": {
"host": "DB Backup",
"key": "dbbackup.backup_duration"
}
}
]
},
{
"uuid": "b881ee18f05c4f4c835982c9dfbb55d6",
"name": "DB Backup: Backup Size",
"type": "STACKED",
"graph_items": [
{
"sortorder": "1",
"color": "1A7C11",
"item": {
"host": "DB Backup",
"key": "dbbackup.size"
}
}
]
}
] ]
} }
} }