forked from coop-cloud/backup-bot-two
initial rewrite
This commit is contained in:
@ -4,11 +4,9 @@ SECRET_RESTIC_PASSWORD_VERSION=v1
|
||||
|
||||
COMPOSE_FILE=compose.yml
|
||||
|
||||
SERVER_NAME=example.com
|
||||
RESTIC_HOST=minio.example.com
|
||||
RESTIC_REPO=/backups/restic
|
||||
|
||||
CRON_SCHEDULE='*/5 * * * *'
|
||||
REMOVE_BACKUP_VOLUME_AFTER_UPLOAD=1
|
||||
|
||||
# swarm-cronjob, instead of built-in cron
|
||||
#COMPOSE_FILE="$COMPOSE_FILE:compose.swarm-cronjob.yml"
|
||||
|
2
abra.sh
2
abra.sh
@ -1,2 +1,2 @@
|
||||
export ENTRYPOINT_VERSION=v1
|
||||
export BACKUP_VERSION=v1
|
||||
export BACKUPBOT_VERSION=v1
|
||||
|
119
backup.sh
119
backup.sh
@ -1,119 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
server_name="${SERVER_NAME:?SERVER_NAME not set}"
|
||||
|
||||
restic_password_file="${RESTIC_PASSWORD_FILE:?RESTIC_PASSWORD_FILE not set}"
|
||||
|
||||
restic_host="${RESTIC_HOST:?RESTIC_HOST not set}"
|
||||
|
||||
backup_paths=()
|
||||
|
||||
# shellcheck disable=SC2153
|
||||
ssh_key_file="${SSH_KEY_FILE}"
|
||||
s3_key_file="${AWS_SECRET_ACCESS_KEY_FILE}"
|
||||
|
||||
restic_repo=
|
||||
restic_extra_options=
|
||||
|
||||
if [ -n "$ssh_key_file" ] && [ -f "$ssh_key_file" ]; then
|
||||
restic_repo="sftp:$restic_host:/$server_name"
|
||||
|
||||
# Only check server against provided SSH_HOST_KEY, if set
|
||||
if [ -n "$SSH_HOST_KEY" ]; then
|
||||
tmpfile=$(mktemp)
|
||||
echo "$SSH_HOST_KEY" >>"$tmpfile"
|
||||
echo "using host key $SSH_HOST_KEY"
|
||||
ssh_options="-o 'UserKnownHostsFile $tmpfile'"
|
||||
elif [ "$SSH_HOST_KEY_DISABLE" = "1" ]; then
|
||||
echo "disabling SSH host key checking"
|
||||
ssh_options="-o 'StrictHostKeyChecking=No'"
|
||||
else
|
||||
echo "neither SSH_HOST_KEY nor SSH_HOST_KEY_DISABLE set"
|
||||
fi
|
||||
restic_extra_options="sftp.command=ssh $ssh_options -i $ssh_key_file $restic_host -s sftp"
|
||||
fi
|
||||
|
||||
if [ -n "$s3_key_file" ] && [ -f "$s3_key_file" ] && [ -n "$AWS_ACCESS_KEY_ID" ]; then
|
||||
AWS_SECRET_ACCESS_KEY="$(cat "${s3_key_file}")"
|
||||
export AWS_SECRET_ACCESS_KEY
|
||||
restic_repo="s3:$restic_host:/$server_name"
|
||||
fi
|
||||
|
||||
if [ -z "$restic_repo" ]; then
|
||||
echo "you must configure either SFTP or S3 storage, see README"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "restic_repo: $restic_repo"
|
||||
|
||||
# Pre-bake-in some default restic options
|
||||
_restic() {
|
||||
if [ -z "$restic_extra_options" ]; then
|
||||
# shellcheck disable=SC2068
|
||||
restic -p "$restic_password_file" \
|
||||
--quiet -r "$restic_repo" \
|
||||
$@
|
||||
else
|
||||
# shellcheck disable=SC2068
|
||||
restic -p "$restic_password_file" \
|
||||
--quiet -r "$restic_repo" \
|
||||
-o "$restic_extra_options" \
|
||||
$@
|
||||
fi
|
||||
}
|
||||
|
||||
if [ -n "$SERVICES_OVERRIDE" ]; then
|
||||
# this is fine because docker service names should never include spaces or
|
||||
# glob characters
|
||||
# shellcheck disable=SC2206
|
||||
services=($SERVICES_OVERRIDE)
|
||||
else
|
||||
mapfile -t services < <(docker service ls --format '{{ .Name }}')
|
||||
fi
|
||||
|
||||
post_commands=()
|
||||
if [[ \ $*\ != *\ --skip-backup\ * ]]; then
|
||||
|
||||
for service in "${services[@]}"; do
|
||||
echo "service: $service"
|
||||
details=$(docker service inspect "$service" --format "{{ json .Spec.Labels }}")
|
||||
if echo "$details" | jq -r '.["backupbot.backup"]' | grep -q 'true'; then
|
||||
pre=$(echo "$details" | jq -r '.["backupbot.backup.pre-hook"]')
|
||||
post=$(echo "$details" | jq -r '.["backupbot.backup.post-hook"]')
|
||||
container=$(docker container ls -f "name=$service" --format '{{ .ID }}')
|
||||
stack_name=$(echo "$details" | jq -r '.["com.docker.stack.namespace"]')
|
||||
|
||||
if [ "$pre" != "null" ]; then
|
||||
# run the precommand
|
||||
echo "executing precommand $pre in container $container"
|
||||
docker exec "$container" sh -c "$pre"
|
||||
fi
|
||||
if [ "$post" != "null" ]; then
|
||||
# append post command
|
||||
post_commands+=("docker exec $container sh -c \"$post\"")
|
||||
fi
|
||||
|
||||
# add volume paths to backup path
|
||||
backup_paths+=(/var/lib/docker/volumes/"${stack_name}"_*)
|
||||
fi
|
||||
done
|
||||
|
||||
# check if restic repo exists, initialise if not
|
||||
if [ -z "$(_restic cat config)" ] 2>/dev/null; then
|
||||
echo "initializing restic repo"
|
||||
_restic init
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ \ $*\ != *\ --skip-upload\ * ]]; then
|
||||
echo "${backup_paths[@]}"
|
||||
_restic backup --host "$server_name" --tag coop-cloud "${backup_paths[@]}"
|
||||
fi
|
||||
|
||||
# run post commands
|
||||
for post in "${post_commands[@]}"; do
|
||||
echo "executing postcommand $post"
|
||||
eval "$post"
|
||||
done
|
154
backupbot.py
Executable file
154
backupbot.py
Executable file
@ -0,0 +1,154 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import click
|
||||
import json
|
||||
import subprocess
|
||||
# todo json logging
|
||||
import logging
|
||||
import docker
|
||||
import restic
|
||||
from restic.errors import ResticFailedError
|
||||
from pathlib import Path
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
VOLUME_PATH = "/var/lib/docker/volumes/"
|
||||
SERVICE = None
|
||||
|
||||
@click.group()
|
||||
@click.option('-l', '--log', 'loglevel')
|
||||
@click.option('service', '--host', '-h', envvar='SERVICE')
|
||||
def cli(loglevel, service):
|
||||
global SERVICE
|
||||
if service:
|
||||
SERVICE = service.replace('.','_')
|
||||
if loglevel:
|
||||
numeric_level = getattr(logging, loglevel.upper(), None)
|
||||
if not isinstance(numeric_level, int):
|
||||
raise ValueError('Invalid log level: %s' % loglevel)
|
||||
logging.basicConfig(level=numeric_level)
|
||||
init_repo()
|
||||
|
||||
|
||||
def init_repo():
|
||||
export_secrets()
|
||||
restic.repository = os.environ['RESTIC_REPO']
|
||||
restic.password_file = '/var/run/secrets/restic_password'
|
||||
try:
|
||||
restic.cat.config()
|
||||
except ResticFailedError as error:
|
||||
if 'unable to open config file' in str(error):
|
||||
result = restic.init()
|
||||
logging.info(f"Initialized restic repo: {result}")
|
||||
else:
|
||||
raise error
|
||||
|
||||
def export_secrets():
|
||||
for env in os.environ:
|
||||
if env.endswith('PASSWORD_FILE') or env.endswith('KEY_FILE'):
|
||||
logging.debug(f"exported secret: {env}")
|
||||
with open(os.environ[env]) as file:
|
||||
os.environ[env.removesuffix('_FILE')] = file.read()
|
||||
|
||||
@cli.command()
|
||||
def create():
|
||||
pre_commands, post_commands, backup_paths, apps = get_backup_cmds()
|
||||
run_commands(pre_commands)
|
||||
backup_volumes(backup_paths, apps)
|
||||
run_commands(post_commands)
|
||||
|
||||
def get_backup_cmds():
|
||||
client = docker.from_env()
|
||||
containers = dict(map(lambda c: (
|
||||
c.labels['com.docker.swarm.service.name'], c), client.containers.list()))
|
||||
backup_paths = set()
|
||||
backup_apps = set()
|
||||
pre_commands = {}
|
||||
post_commands = {}
|
||||
services = client.services.list()
|
||||
for s in services:
|
||||
labels = s.attrs['Spec']['Labels']
|
||||
if (backup := labels.get('backupbot.backup')) and bool(backup):
|
||||
stack_name = labels['com.docker.stack.namespace']
|
||||
if SERVICE and SERVICE != stack_name:
|
||||
continue
|
||||
backup_apps.add(stack_name)
|
||||
container = containers[s.name]
|
||||
if prehook:= labels.get('backupbot.backup.pre-hook'):
|
||||
pre_commands[container] = prehook
|
||||
if posthook:= labels.get('backupbot.backup.post-hook'):
|
||||
post_commands[container] = posthook
|
||||
backup_paths = backup_paths.union(
|
||||
Path(VOLUME_PATH).glob(f"{stack_name}_*"))
|
||||
return pre_commands, post_commands, list(backup_paths), list(backup_apps)
|
||||
|
||||
def run_commands(commands):
|
||||
for container, command in commands.items():
|
||||
if not command:
|
||||
continue
|
||||
# Use bash's pipefail to return exit codes inside a pipe to prevent silent failure
|
||||
command = command.removeprefix('bash -c \'').removeprefix('sh -c \'')
|
||||
command = command.removesuffix('\'')
|
||||
command = f"bash -c 'set -o pipefail;{command}'"
|
||||
result = container.exec_run(command)
|
||||
logging.info(f"run command in {container.name}")
|
||||
logging.info(command)
|
||||
if result.exit_code:
|
||||
logging.error(result.output.decode())
|
||||
else:
|
||||
logging.info(result.output.decode())
|
||||
|
||||
def backup_volumes(backup_paths, apps, dry_run=False):
|
||||
result = restic.backup(backup_paths, dry_run=dry_run, tags=apps)
|
||||
logging.info(result)
|
||||
|
||||
@cli.command()
|
||||
@click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', required=True)
|
||||
def restore(snapshot):
|
||||
service_paths = f'/var/lib/docker/volumes/{SERVICE}_*'
|
||||
result = restic.restore(snapshot_id=snapshot, include=service_paths, target_dir='/')
|
||||
|
||||
|
||||
@cli.command()
|
||||
def snapshots():
|
||||
snapshots = restic.snapshots()
|
||||
for snap in snapshots:
|
||||
if not SERVICE or (tags:= snap.get('tags')) and SERVICE in tags:
|
||||
print(snap['time'], snap['id'])
|
||||
|
||||
@cli.command()
|
||||
@click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', required=True)
|
||||
@click.option('path', '--path', '-p', envvar='INCLUDE_PATH')
|
||||
def ls(snapshot, path):
|
||||
results = list_files(snapshot, path)
|
||||
for r in results:
|
||||
if r.get('path'):
|
||||
print(f"{r['ctime']}\t{r['path']}")
|
||||
|
||||
def list_files(snapshot, path):
|
||||
cmd = restic.cat.base_command() + ['ls', snapshot]
|
||||
if path:
|
||||
cmd.append(path)
|
||||
output = restic.internal.command_executor.execute(cmd)
|
||||
output = output.replace('}\n{', '}|{')
|
||||
results = list(map(json.loads, output.split('|')))
|
||||
return results
|
||||
|
||||
@cli.command()
|
||||
@click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', required=True)
|
||||
@click.option('path', '--path', '-p', envvar='INCLUDE_PATH')
|
||||
def download(snapshot, path):
|
||||
files = list_files(snapshot, path)
|
||||
filetype = [f.get('type') for f in files if f.get('path') == path][0]
|
||||
cmd = restic.cat.base_command() + ['dump', snapshot, path]
|
||||
output = subprocess.run(cmd, capture_output=True).stdout
|
||||
filename = "/tmp/" + Path(path).name
|
||||
if filetype == 'dir':
|
||||
filename = filename + ".tar"
|
||||
with open(filename, "wb") as file:
|
||||
file.write(output)
|
||||
print(filename)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
cli()
|
18
compose.yml
18
compose.yml
@ -6,14 +6,11 @@ services:
|
||||
volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock"
|
||||
- "/var/lib/docker/volumes/:/var/lib/docker/volumes/:ro"
|
||||
- backups:/backups
|
||||
environment:
|
||||
- CRON_SCHEDULE
|
||||
- RESTIC_REPO
|
||||
- RESTIC_PASSWORD_FILE=/run/secrets/restic_password
|
||||
- BACKUP_DEST=/backups
|
||||
- RESTIC_HOST
|
||||
- SERVER_NAME
|
||||
- REMOVE_BACKUP_VOLUME_AFTER_UPLOAD=1
|
||||
secrets:
|
||||
- restic_password
|
||||
deploy:
|
||||
@ -23,8 +20,8 @@ services:
|
||||
- source: entrypoint
|
||||
target: /entrypoint.sh
|
||||
mode: 0555
|
||||
- source: backup
|
||||
target: /backup.sh
|
||||
- source: backupbot
|
||||
target: /backup
|
||||
mode: 0555
|
||||
entrypoint: ['/entrypoint.sh']
|
||||
|
||||
@ -33,10 +30,13 @@ secrets:
|
||||
external: true
|
||||
name: ${STACK_NAME}_restic_password_${SECRET_RESTIC_PASSWORD_VERSION}
|
||||
|
||||
volumes:
|
||||
backups:
|
||||
|
||||
configs:
|
||||
entrypoint:
|
||||
name: ${STACK_NAME}_entrypoint_${ENTRYPOINT_VERSION}
|
||||
file: entrypoint.sh
|
||||
backup:
|
||||
name: ${STACK_NAME}_backup_${BACKUP_VERSION}
|
||||
file: backup.sh
|
||||
backupbot:
|
||||
name: ${STACK_NAME}_backupbot_${BACKUPBOT_VERSION}
|
||||
file: backupbot.py
|
||||
|
@ -1,12 +1,15 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
set -eu
|
||||
|
||||
apk add --upgrade --no-cache bash curl jq restic
|
||||
apk add --upgrade --no-cache bash curl jq restic python3 py3-pip
|
||||
|
||||
# Todo use requirements file
|
||||
pip install click docker resticpy
|
||||
|
||||
cron_schedule="${CRON_SCHEDULE:?CRON_SCHEDULE not set}"
|
||||
|
||||
echo "$cron_schedule /backup.sh" | crontab -
|
||||
#echo "$cron_schedule /backupbot.py" | crontab -
|
||||
crontab -l
|
||||
|
||||
crond -f -d8 -L /dev/stdout
|
||||
|
Reference in New Issue
Block a user