4 Commits

Author SHA1 Message Date
e360c3d8f8 remove unused traefik labels 2023-06-13 14:34:53 +02:00
50b317c12d fix shellcheck prevent globbing 2023-06-13 11:57:42 +02:00
9cb3a469f3 mount volume ro 2023-06-13 11:48:25 +02:00
24d2c0e85b Backup volumes from host instead of copying paths
* Backupbot will now copy all volumes from a service with
  backupbot.enabled = 'true' label from the /var/lib/docker/volumes/
  path directly. This reduces the resource overhead of copying
  stuff from one volume to another.
  Recipes need to be adjustet that db-dumps are saved into a volume
  now!
* Remove the Dockerfile and move stuff into a entrypoint. This
  simplifies the whole versioning thing and makes this "just"
  a recipe

Co-authored-by: Moritz < moritz.m@local-it.org>
2023-06-05 11:15:54 +02:00
10 changed files with 51 additions and 130 deletions

View File

@ -7,38 +7,6 @@ steps:
commands: commands:
- shellcheck backup.sh - shellcheck backup.sh
- name: publish image
image: plugins/docker
settings:
username: 3wordchant
password:
from_secret: git_coopcloud_tech_token_3wc
repo: git.coopcloud.tech/coop-cloud/backup-bot-two
tags: 1.0.0
registry: git.coopcloud.tech
depends_on:
- run shellcheck
when:
event:
exclude:
- pull_request
trigger: trigger:
branch: branch:
- bb2-classic - main
---
kind: pipeline
name: generate recipe catalogue
steps:
- name: release a new version
image: plugins/downstream
settings:
server: https://build.coopcloud.tech
token:
from_secret: drone_abra-bot_token
fork: true
repositories:
- coop-cloud/auto-recipes-catalogue-json
trigger:
event: tag

View File

@ -22,8 +22,3 @@ REMOVE_BACKUP_VOLUME_AFTER_UPLOAD=1
#SECRET_AWS_SECRET_ACCESS_KEY_VERSION=v1 #SECRET_AWS_SECRET_ACCESS_KEY_VERSION=v1
#AWS_ACCESS_KEY_ID=something-secret #AWS_ACCESS_KEY_ID=something-secret
#COMPOSE_FILE="$COMPOSE_FILE:compose.s3.yml" #COMPOSE_FILE="$COMPOSE_FILE:compose.s3.yml"
# HTTPS storage
#SECRET_HTTPS_PASSWORD_VERSION=v1
#COMPOSE_FILE="$COMPOSE_FILE:compose.https.yml"
#RESTIC_USER=<somebody>

View File

@ -10,8 +10,6 @@ export DOCKER_CONTEXT=$SERVER_NAME
# or this: # or this:
#export AWS_SECRET_ACCESS_KEY_FILE=s3 #export AWS_SECRET_ACCESS_KEY_FILE=s3
#export AWS_ACCESS_KEY_ID=easter-october-emphatic-tug-urgent-customer #export AWS_ACCESS_KEY_ID=easter-october-emphatic-tug-urgent-customer
# or this:
#export HTTPS_PASSWORD_FILE=/run/secrets/https_password
# optionally limit subset of services for testing # optionally limit subset of services for testing
#export SERVICES_OVERRIDE="ghost_domain_tld_app ghost_domain_tld_db" #export SERVICES_OVERRIDE="ghost_domain_tld_app ghost_domain_tld_db"

View File

@ -1,13 +0,0 @@
FROM docker:24.0.6-dind
RUN apk add --upgrade --no-cache \
bash \
curl \
jq \
restic
COPY backup.sh /usr/bin/backup.sh
COPY setup-cron.sh /usr/bin/setup-cron.sh
RUN chmod +x /usr/bin/backup.sh /usr/bin/setup-cron.sh
ENTRYPOINT [ "/usr/bin/setup-cron.sh" ]

View File

@ -4,7 +4,7 @@
_This Time, It's Easily Configurable_ _This Time, It's Easily Configurable_
Automatically take backups from running Docker Swarm services into a volume. Automatically take backups from all volumes of running Docker Swarm services and runs pre- and post commands.
## Background ## Background
@ -49,15 +49,13 @@ services:
db: db:
deploy: deploy:
labels: labels:
backupbot.backup: "true" backupbot.backup: ${BACKUP:-"true"}
backupbot.backup.pre-hook: 'mysqldump -u root -p"$(cat /run/secrets/db_root_password)" -f /tmp/dump/dump.db' backupbot.backup.pre-hook: 'mysqldump -u root -p"$(cat /run/secrets/db_root_password)" -f /volume_path/dump.db'
backupbot.backup.post-hook: "rm -rf /tmp/dump/dump.db" backupbot.backup.post-hook: "rm -rf /volume_path/dump.db"
backupbot.backup.path: "/tmp/dump/,/etc/foo/"
``` ```
- `backupbot.backup` -- set to `true` to back up this service (REQUIRED) - `backupbot.backup` -- set to `true` to back up this service (REQUIRED)
- `backupbot.backup.path` -- comma separated list of file paths within the service to copy (REQUIRED) - `backupbot.backup.pre-hook` -- command to run before copying files (optional), save all dumps into the volumes
- `backupbot.backup.pre-hook` -- command to run before copying files (optional)
- `backupbot.backup.post-hook` -- command to run after copying files (optional) - `backupbot.backup.post-hook` -- command to run after copying files (optional)
As in the above example, you can reference Docker Secrets, e.g. for looking up database passwords, by reading the files in `/run/secrets` directly. As in the above example, you can reference Docker Secrets, e.g. for looking up database passwords, by reading the files in `/run/secrets` directly.

2
abra.sh Normal file
View File

@ -0,0 +1,2 @@
export ENTRYPOINT_VERSION=v1
export BACKUP_VERSION=v1

View File

@ -1,18 +1,18 @@
#!/bin/bash #!/bin/bash
set -e
server_name="${SERVER_NAME:?SERVER_NAME not set}" server_name="${SERVER_NAME:?SERVER_NAME not set}"
restic_password_file="${RESTIC_PASSWORD_FILE:?RESTIC_PASSWORD_FILE not set}" restic_password_file="${RESTIC_PASSWORD_FILE:?RESTIC_PASSWORD_FILE not set}"
restic_host="${RESTIC_HOST:?RESTIC_HOST not set}" restic_host="${RESTIC_HOST:?RESTIC_HOST not set}"
backup_path="${BACKUP_DEST:?BACKUP_DEST not set}" backup_paths=()
# shellcheck disable=SC2153 # shellcheck disable=SC2153
ssh_key_file="${SSH_KEY_FILE}" ssh_key_file="${SSH_KEY_FILE}"
s3_key_file="${AWS_SECRET_ACCESS_KEY_FILE}" s3_key_file="${AWS_SECRET_ACCESS_KEY_FILE}"
# shellcheck disable=SC2153
https_password_file="${HTTPS_PASSWORD_FILE}"
restic_repo= restic_repo=
restic_extra_options= restic_extra_options=
@ -41,15 +41,8 @@ if [ -n "$s3_key_file" ] && [ -f "$s3_key_file" ] && [ -n "$AWS_ACCESS_KEY_ID" ]
restic_repo="s3:$restic_host:/$server_name" restic_repo="s3:$restic_host:/$server_name"
fi fi
if [ -n "$https_password_file" ] && [ -f "$https_password_file" ]; then
HTTPS_PASSWORD="$(cat "${https_password_file}")"
export HTTPS_PASSWORD
restic_user="${RESTIC_USER:?RESTIC_USER not set}"
restic_repo="rest:https://$restic_user:$HTTPS_PASSWORD@$restic_host"
fi
if [ -z "$restic_repo" ]; then if [ -z "$restic_repo" ]; then
echo "you must configure either SFTP, S3, or HTTPS storage, see README" echo "you must configure either SFTP or S3 storage, see README"
exit 1 exit 1
fi fi
@ -80,8 +73,8 @@ else
mapfile -t services < <(docker service ls --format '{{ .Name }}') mapfile -t services < <(docker service ls --format '{{ .Name }}')
fi fi
post_commands=()
if [[ \ $*\ != *\ --skip-backup\ * ]]; then if [[ \ $*\ != *\ --skip-backup\ * ]]; then
rm -rf "${backup_path}"
for service in "${services[@]}"; do for service in "${services[@]}"; do
echo "service: $service" echo "service: $service"
@ -89,36 +82,21 @@ if [[ \ $*\ != *\ --skip-backup\ * ]]; then
if echo "$details" | jq -r '.["backupbot.backup"]' | grep -q 'true'; then if echo "$details" | jq -r '.["backupbot.backup"]' | grep -q 'true'; then
pre=$(echo "$details" | jq -r '.["backupbot.backup.pre-hook"]') pre=$(echo "$details" | jq -r '.["backupbot.backup.pre-hook"]')
post=$(echo "$details" | jq -r '.["backupbot.backup.post-hook"]') post=$(echo "$details" | jq -r '.["backupbot.backup.post-hook"]')
path=$(echo "$details" | jq -r '.["backupbot.backup.path"]')
if [ "$path" = "null" ]; then
echo "ERROR: missing 'path' for $service"
continue # or maybe exit?
fi
container=$(docker container ls -f "name=$service" --format '{{ .ID }}') container=$(docker container ls -f "name=$service" --format '{{ .ID }}')
stack_name=$(echo "$details" | jq -r '.["com.docker.stack.namespace"]')
echo "backing up $service"
if [ "$pre" != "null" ]; then if [ "$pre" != "null" ]; then
# run the precommand # run the precommand
# shellcheck disable=SC2086 echo "executing precommand $pre in container $container"
docker exec "$container" sh -c "$pre" docker exec "$container" sh -c "$pre"
fi fi
# run the backup
for p in ${path//,/ }; do
# creates the parent folder, so `docker cp` has reliable behaviour no matter if $p ends with `/` or `/.`
dir=$backup_path/$service/$(dirname "$p")
test -d "$dir" || mkdir -p "$dir"
docker cp -a "$container:$p" "$dir/$(basename "$p")"
done
if [ "$post" != "null" ]; then if [ "$post" != "null" ]; then
# run the postcommand # append post command
# shellcheck disable=SC2086 post_commands+=("docker exec $container sh -c \"$post\"")
docker exec "$container" sh -c "$post"
fi fi
# add volume paths to backup path
backup_paths+=(/var/lib/docker/volumes/"${stack_name}"_*)
fi fi
done done
@ -130,10 +108,12 @@ if [[ \ $*\ != *\ --skip-backup\ * ]]; then
fi fi
if [[ \ $*\ != *\ --skip-upload\ * ]]; then if [[ \ $*\ != *\ --skip-upload\ * ]]; then
_restic backup --host "$server_name" --tag coop-cloud "$backup_path" echo "${backup_paths[@]}"
_restic backup --host "$server_name" --tag coop-cloud "${backup_paths[@]}"
fi
if [ "$REMOVE_BACKUP_VOLUME_AFTER_UPLOAD" -eq 1 ]; then # run post commands
echo "Cleaning up ${backup_path}" for post in "${post_commands[@]}"; do
rm -rf "${backup_path:?}"/* echo "executing postcommand $post"
fi eval "$post"
fi done

View File

@ -1,15 +0,0 @@
---
version: "3.8"
services:
app:
environment:
- HTTPS_PASSWORD_FILE=/run/secrets/https_password
- RESTIC_USER
secrets:
- source: https_password
mode: 0400
secrets:
https_password:
external: true
name: ${STACK_NAME}_https_password_${SECRET_HTTPS_PASSWORD_VERSION}

View File

@ -2,11 +2,10 @@
version: "3.8" version: "3.8"
services: services:
app: app:
image: git.coopcloud.tech:1.0.0 image: docker:24.0.2-dind
# build: .
volumes: volumes:
- "/var/run/docker.sock:/var/run/docker.sock" - "/var/run/docker.sock:/var/run/docker.sock"
- "backups:/backups" - "/var/lib/docker/volumes/:/var/lib/docker/volumes/:ro"
environment: environment:
- CRON_SCHEDULE - CRON_SCHEDULE
- RESTIC_REPO - RESTIC_REPO
@ -19,17 +18,25 @@ services:
- restic_password - restic_password
deploy: deploy:
labels: labels:
- "traefik.enable=true" - coop-cloud.${STACK_NAME}.version=0.1.0+latest
- "traefik.http.services.${STACK_NAME}.loadbalancer.server.port=8008" configs:
- "traefik.http.routers.${STACK_NAME}.rule=" - source: entrypoint
- "traefik.http.routers.${STACK_NAME}.entrypoints=web-secure" target: /entrypoint.sh
- "traefik.http.routers.${STACK_NAME}.tls.certresolver=${LETS_ENCRYPT_ENV}" mode: 0555
- coop-cloud.${STACK_NAME}.version=0.2.0+1.0.0 - source: backup
target: /backup.sh
volumes: mode: 0555
backups: entrypoint: ['/entrypoint.sh']
secrets: secrets:
restic_password: restic_password:
external: true external: true
name: ${STACK_NAME}_restic_password_${SECRET_RESTIC_PASSWORD_VERSION} name: ${STACK_NAME}_restic_password_${SECRET_RESTIC_PASSWORD_VERSION}
configs:
entrypoint:
name: ${STACK_NAME}_entrypoint_${ENTRYPOINT_VERSION}
file: entrypoint.sh
backup:
name: ${STACK_NAME}_backup_${BACKUP_VERSION}
file: backup.sh

View File

@ -1,11 +1,12 @@
#!/bin/bash #!/bin/sh
set -e set -e
set -o pipefail
apk add --upgrade --no-cache bash curl jq restic
cron_schedule="${CRON_SCHEDULE:?CRON_SCHEDULE not set}" cron_schedule="${CRON_SCHEDULE:?CRON_SCHEDULE not set}"
echo "$cron_schedule /usr/bin/backup.sh" | crontab - echo "$cron_schedule /backup.sh" | crontab -
crontab -l crontab -l
crond -f -d8 -L /dev/stdout crond -f -d8 -L /dev/stdout