Compare commits

..

11 Commits

Author SHA1 Message Date
3wc
d998b61117 Don't duplicate stack name in vol path 2023-11-10 22:02:54 +00:00
3wc
c93d5c6f44 set.add() returns None 🤡 2023-11-10 22:00:35 +00:00
3wc
52e52a1e1d Set theory 2023-11-10 21:56:51 +00:00
3wc
771cf31824 Unglob 2023-11-10 21:55:21 +00:00
3wc
83834c6570 Whoops fix Mounts path 2023-11-10 21:53:34 +00:00
3wc
98b5f077e2 Allow selective path spec 2023-11-10 21:50:54 +00:00
3wc
ed687e52c3 Remove redundant stuff from entrypoint 2023-11-10 18:11:32 +00:00
3wc
cf06532da9 Whoops, wrong image 2023-11-10 18:09:50 +00:00
3wc
319deaba4b Switch to backup-bot-two image 2023-11-10 16:55:41 +00:00
3wc
3c44300a2e Whoops skip shellcheck 2023-11-10 14:53:48 +00:00
3wc
5ac3a48125 Reinstate Docker image 2023-11-10 14:52:59 +00:00
8 changed files with 91 additions and 170 deletions

View File

@ -4,17 +4,10 @@ SECRET_RESTIC_PASSWORD_VERSION=v1
COMPOSE_FILE=compose.yml COMPOSE_FILE=compose.yml
DOMAIN=backup-bot-two.example.com
RESTIC_REPOSITORY=/backups/restic RESTIC_REPOSITORY=/backups/restic
CRON_SCHEDULE='30 3 * * *' CRON_SCHEDULE='30 3 * * *'
# Push Notifiactions
#PUSH_URL_START=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=start
#PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK
#PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail
# swarm-cronjob, instead of built-in cron # swarm-cronjob, instead of built-in cron
#COMPOSE_FILE="$COMPOSE_FILE:compose.swarm-cronjob.yml" #COMPOSE_FILE="$COMPOSE_FILE:compose.swarm-cronjob.yml"

View File

@ -1,6 +0,0 @@
# Change log
## 2.0.0 (unreleased)
- Rewrite from Bash to Python
- Add support for push notifications (#24)

View File

@ -1,11 +1,10 @@
FROM docker:24.0.7-dind FROM docker:24.0.7-dind
RUN apk add --upgrade --no-cache restic bash python3 py3-pip py3-click py3-docker-py py3-json-logger curl RUN apk add --upgrade --no-cache restic bash python3 py3-pip
# Todo use requirements file with specific versions # Todo use requirements file with specific versions
RUN pip install --break-system-packages resticpy==1.0.2 RUN pip install click==8.1.7 docker==6.1.3 resticpy==1.0.2
COPY backupbot.py /usr/bin/backup COPY backupbot.py /usr/bin/backup
COPY entrypoint.sh /entrypoint.sh
ENTRYPOINT /entrypoint.sh ENTRYPOINT /bin/bash

View File

@ -10,7 +10,7 @@ Automatically take backups from all volumes of running Docker Swarm services and
* **Category**: Utilities * **Category**: Utilities
* **Status**: 0, work-in-progress * **Status**: 0, work-in-progress
* **Image**: [`git.coopcloud.tech/coop-cloud/backup-bot-two`](https://git.coopcloud.tech/coop-cloud/-/packages/container/backup-bot-two), 4, upstream * **Image**: [`thecoopcloud/backup-bot-two`](https://hub.docker.com/r/thecoopcloud/backup-bot-two), 4, upstream
* **Healthcheck**: No * **Healthcheck**: No
* **Backups**: N/A * **Backups**: N/A
* **Email**: N/A * **Email**: N/A
@ -38,12 +38,12 @@ Backupbot II tries to help, by
* `abra app new backup-bot-two` * `abra app new backup-bot-two`
* `abra app config <app-name>` * `abra app config <app-name>`
- set storage options. Either configure `CRON_SCHEDULE`, or set up `swarm-cronjob` - set storage options. Either configure `CRON_SCHEDULE`, or set up `swarm-cronjob`
* `abra app secret generate -a <backupbot_name>` * `abra app secret generate -a <app_name>`
* `abra app deploy <app-name>` * `abra app deploy <app-name>`
## Configuration ## Configuration
Per default Backupbot stores the backups locally in the repository `/backups/restic`, which is accessible as volume at `/var/lib/docker/volumes/<backupbot_name>_backups/_data/restic/` Per default Backupbot stores the backups locally in the repository `/backups/restic`, which is accessible as volume at `/var/lib/docker/volumes/<app_name>_backups/_data/restic/`
The backup location can be changed using the `RESTIC_REPOSITORY` env variable. The backup location can be changed using the `RESTIC_REPOSITORY` env variable.
@ -57,7 +57,7 @@ AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
COMPOSE_FILE="$COMPOSE_FILE:compose.s3.yml" COMPOSE_FILE="$COMPOSE_FILE:compose.s3.yml"
``` ```
and add your `<SECRET_ACCESS_KEY>` as docker secret: and add your `<SECRET_ACCESS_KEY>` as docker secret:
`abra app secret insert <backupbot_name> aws_secret_access_key v1 <SECRET_ACCESS_KEY>` `abra app secret insert <app_name> aws_secret_access_key v1 <SECRET_ACCESS_KEY>`
See [restic s3 docs](https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#amazon-s3) for more information. See [restic s3 docs](https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#amazon-s3) for more information.
@ -79,10 +79,9 @@ Add the key to your `authorized_keys`:
`ssh-copy-id -i backupkey <user>@<hostname>` `ssh-copy-id -i backupkey <user>@<hostname>`
Add your `SSH_KEY` as docker secret: Add your `SSH_KEY` as docker secret:
``` ```
abra app secret insert <backupbot_name> ssh_key v1 """$(cat backupkey) abra app secret insert <app_name> ssh_key v1 """$(cat backupkey)
""" """
``` ```
> Attention: This command needs to be executed exactly as stated above, because it places a trailing newline at the end, if this is missing you will get the following error: `Load key "/run/secrets/ssh_key": error in libcrypto`
### Restic REST server Storage ### Restic REST server Storage
@ -95,81 +94,67 @@ COMPOSE_FILE="$COMPOSE_FILE:compose.secret.yml"
``` ```
Add your REST server url as secret: Add your REST server url as secret:
``` ```
`abra app secret insert <backupbot_name> restic_repo v1 "rest:https://user:pass@host:8000/"` `abra app secret insert <app_name> restic_repo v1 "rest:https://user:pass@host:8000/"`
``` ```
The secret will overwrite the `RESTIC_REPOSITORY` variable. The secret will overwrite the `RESTIC_REPOSITORY` variable.
See [restic REST docs](https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#rest-server) for more information. See [restic REST docs](https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#rest-server) for more information.
## Push notifications
The following env variables can be used to setup push notifications for backups. `PUSH_URL_START` is requested just before the backups starts, `PUSH_URL_SUCCESS` is only requested if the backup was successful and if the backup fails `PUSH_URL_FAIL` will be requested.
Each variable is optional and independent of the other.
```
PUSH_URL_START=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=start
PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK
PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail
```
## Usage ## Usage
Run the cronjob that creates a backup, including the push notifications and docker logging:
`abra app cmd <backupbot_name> app run_cron`
Create a backup of all apps: Create a backup of all apps:
`abra app run <backupbot_name> app -- backup create` `abra app run <app_name> app -- backup create`
> The apps to backup up need to be deployed > The apps to backup up need to be deployed
Create an individual backup: Create an individual backup:
`abra app run <backupbot_name> app -- backup --host <target_app_name> create` `abra app run <app_name> app -- backup --host <target_app_name> create`
Create a backup to a local repository: Create a backup to a local repository:
`abra app run <backupbot_name> app -- backup create -r /backups/restic` `abra app run <app_name> app -- backup create -r /backups/restic`
> It is recommended to shutdown/undeploy an app before restoring the data > It is recommended to shutdown/undeploy an app before restoring the data
Restore the latest snapshot of all including apps: Restore the latest snapshot of all including apps:
`abra app run <backupbot_name> app -- backup restore` `abra app run <app_name> app -- backup restore`
Restore a specific snapshot of an individual app: Restore a specific snapshot of an individual app:
`abra app run <backupbot_name> app -- backup --host <target_app_name> restore --snapshot <snapshot_id>` `abra app run <app_name> app -- backup --host <target_app_name> restore --snapshot <snapshot_id>`
Show all snapshots: Show all snapshots:
`abra app run <backupbot_name> app -- backup snapshots` `abra app run <app_name> app -- backup snapshots`
Show all snapshots containing a specific app: Show all snapshots containing a specific app:
`abra app run <backupbot_name> app -- backup --host <target_app_name> snapshots` `abra app run <app_name> app -- backup --host <target_app_name> snapshots`
Show all files inside the latest snapshot (can be very verbose): Show all files inside the latest snapshot (can be very verbose):
`abra app run <backupbot_name> app -- backup ls` `abra app run <app_name> app -- backup ls`
Show specific files inside a selected snapshot: Show specific files inside a selected snapshot:
`abra app run <backupbot_name> app -- backup ls --snapshot <snapshot_id> --path /var/lib/docker/volumes/` `abra app run <app_name> app -- backup ls --snapshot <snapshot_id> --path /var/lib/docker/volumes/`
Download files from a snapshot: Download files from a snapshot:
``` ```
filename=$(abra app run <backupbot_name> app -- backup download --snapshot <snapshot_id> --path <absolute_path>) filename=$(abra app run <app_name> app -- backup download --snapshot <snapshot_id> --path <absolute_path>)
abra app cp <backupbot_name> app:$filename . abra app cp <app_name> app:$filename .
``` ```
## Run restic ## Run restic
``` ```
abra app run <backupbot_name> app bash abra app run <app_name> app bash
export AWS_SECRET_ACCESS_KEY=$(cat $AWS_SECRET_ACCESS_KEY_FILE) export AWS_SECRET_ACCESS_KEY=$(cat $AWS_SECRET_ACCESS_KEY_FILE)
export RESTIC_PASSWORD=$(cat $RESTIC_PASSWORD_FILE) export RESTIC_PASSWORD=$(cat $RESTIC_PASSWORD_FILE)
restic snapshots restic snapshots

View File

@ -1,10 +1,3 @@
export ENTRYPOINT_VERSION=v2
export BACKUPBOT_VERSION=v1 export BACKUPBOT_VERSION=v1
export SSH_CONFIG_VERSION=v1 export SSH_CONFIG_VERSION=v1
run_cron () {
schedule="$(crontab -l | tr -s " " | cut -d ' ' -f-5)"
rm -f /tmp/backup.log
echo "* * * * * $(crontab -l | tr -s " " | cut -d ' ' -f6-)" | crontab -
while [ ! -f /tmp/backup.log ]; do sleep 1; done
echo "$schedule $(crontab -l | tr -s " " | cut -d ' ' -f6-)" | crontab -
}

View File

@ -1,7 +1,6 @@
#!/usr/bin/python3 #!/usr/bin/python3
import os import os
import sys
import click import click
import json import json
import subprocess import subprocess
@ -10,40 +9,22 @@ import docker
import restic import restic
import tarfile import tarfile
import io import io
from pythonjsonlogger import jsonlogger
from datetime import datetime, timezone from datetime import datetime, timezone
from restic.errors import ResticFailedError from restic.errors import ResticFailedError
from pathlib import Path from pathlib import Path
from shutil import copyfile, rmtree from shutil import copyfile, rmtree
# logging.basicConfig(level=logging.INFO)
VOLUME_PATH = "/var/lib/docker/volumes/" VOLUME_PATH = "/var/lib/docker/volumes/"
SECRET_PATH = '/secrets/' SECRET_PATH = '/secrets/'
SERVICE = None SERVICE = None
logger = logging.getLogger("backupbot")
logging.addLevelName(55, 'SUMMARY')
setattr(logging, 'SUMMARY', 55)
setattr(logger, 'summary', lambda message, *args, **
kwargs: logger.log(55, message, *args, **kwargs))
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger.critical("Uncaught exception", exc_info=(
exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
@click.group() @click.group()
@click.option('-l', '--log', 'loglevel') @click.option('-l', '--log', 'loglevel')
@click.option('-m', '--machine-logs', 'machine_logs', is_flag=True)
@click.option('service', '--host', '-h', envvar='SERVICE') @click.option('service', '--host', '-h', envvar='SERVICE')
@click.option('repository', '--repo', '-r', envvar='RESTIC_REPOSITORY') @click.option('repository', '--repo', '-r', envvar='RESTIC_REPOSITORY', required=True)
def cli(loglevel, service, repository, machine_logs): def cli(loglevel, service, repository):
global SERVICE global SERVICE
if service: if service:
SERVICE = service.replace('.', '_') SERVICE = service.replace('.', '_')
@ -53,33 +34,22 @@ def cli(loglevel, service, repository, machine_logs):
numeric_level = getattr(logging, loglevel.upper(), None) numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int): if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel) raise ValueError('Invalid log level: %s' % loglevel)
logger.setLevel(numeric_level) logging.basicConfig(level=numeric_level)
logHandler = logging.StreamHandler()
if machine_logs:
formatter = jsonlogger.JsonFormatter(
"%(levelname)s %(filename)s %(lineno)s %(process)d %(message)s", rename_fields={"levelname": "message_type"})
logHandler.setFormatter(formatter)
logger.addHandler(logHandler)
export_secrets() export_secrets()
init_repo() init_repo()
def init_repo(): def init_repo():
if repo:= os.environ.get('RESTIC_REPOSITORY_FILE'): repo = os.environ['RESTIC_REPOSITORY']
# RESTIC_REPOSITORY_FILE and RESTIC_REPOSITORY are mutually exclusive logging.debug(f"set restic repository location: {repo}")
del os.environ['RESTIC_REPOSITORY'] restic.repository = repo
else:
repo = os.environ['RESTIC_REPOSITORY']
restic.repository = repo
logger.debug(f"set restic repository location: {repo}")
restic.password_file = '/var/run/secrets/restic_password' restic.password_file = '/var/run/secrets/restic_password'
try: try:
restic.cat.config() restic.cat.config()
except ResticFailedError as error: except ResticFailedError as error:
if 'unable to open config file' in str(error): if 'unable to open config file' in str(error):
result = restic.init() result = restic.init()
logger.info(f"Initialized restic repo: {result}") logging.info(f"Initialized restic repo: {result}")
else: else:
raise error raise error
@ -87,28 +57,27 @@ def init_repo():
def export_secrets(): def export_secrets():
for env in os.environ: for env in os.environ:
if env.endswith('FILE') and not "COMPOSE_FILE" in env: if env.endswith('FILE') and not "COMPOSE_FILE" in env:
logger.debug(f"exported secret: {env}") logging.debug(f"exported secret: {env}")
with open(os.environ[env]) as file: with open(os.environ[env]) as file:
secret = file.read() secret = file.read()
os.environ[env.removesuffix('_FILE')] = secret os.environ[env.removesuffix('_FILE')] = secret
# logger.debug(f"Read secret value: {secret}") # logging.debug(f"Read secret value: {secret}")
@cli.command() @cli.command()
@click.option('retries', '--retries', '-r', envvar='RETRIES', default=1) def create():
def create(retries):
pre_commands, post_commands, backup_paths, apps = get_backup_cmds() pre_commands, post_commands, backup_paths, apps = get_backup_cmds()
copy_secrets(apps) copy_secrets(apps)
backup_paths.append(SECRET_PATH) backup_paths.append(SECRET_PATH)
run_commands(pre_commands) run_commands(pre_commands)
backup_volumes(backup_paths, apps, int(retries)) backup_volumes(backup_paths, apps)
run_commands(post_commands) run_commands(post_commands)
def get_backup_cmds(): def get_backup_cmds():
client = docker.from_env() client = docker.from_env()
container_by_service = { container_by_service = {
c.labels.get('com.docker.swarm.service.name'): c for c in client.containers.list()} c.labels['com.docker.swarm.service.name']: c for c in client.containers.list()}
backup_paths = set() backup_paths = set()
backup_apps = set() backup_apps = set()
pre_commands = {} pre_commands = {}
@ -116,18 +85,24 @@ def get_backup_cmds():
services = client.services.list() services = client.services.list()
for s in services: for s in services:
labels = s.attrs['Spec']['Labels'] labels = s.attrs['Spec']['Labels']
mounts = s.attrs['Spec']['TaskTemplate']['ContainerSpec']['Mounts']
if (backup := labels.get('backupbot.backup')) and bool(backup): if (backup := labels.get('backupbot.backup')) and bool(backup):
# volumes: s.attrs['Spec']['TaskTemplate']['ContainerSpec']['Mounts'][0]['Source']
stack_name = labels['com.docker.stack.namespace'] stack_name = labels['com.docker.stack.namespace']
# Remove this lines to backup only a specific service # Remove this lines to backup only a specific service
# This will unfortenately decrease restice performance # This will unfortenately decrease restice performance
# if SERVICE and SERVICE != stack_name: # if SERVICE and SERVICE != stack_name:
# continue # continue
backup_apps.add(stack_name) backup_apps.add(stack_name)
backup_paths = backup_paths.union( for mount in mounts:
Path(VOLUME_PATH).glob(f"{stack_name}_*")) if path := labels.get('backupbot.backup.path'):
path_ = Path(VOLUME_PATH) / f"{mount['Source']}/_data/{path}"
else:
path_ = Path(VOLUME_PATH) / f"{mount['Source']}"
logging.debug(
f"Added backup path {path_}")
backup_paths.add(path_)
if not (container := container_by_service.get(s.name)): if not (container := container_by_service.get(s.name)):
logger.error( logging.error(
f"Container {s.name} is not running, hooks can not be executed") f"Container {s.name} is not running, hooks can not be executed")
continue continue
if prehook := labels.get('backupbot.backup.pre-hook'): if prehook := labels.get('backupbot.backup.pre-hook'):
@ -138,30 +113,28 @@ def get_backup_cmds():
def copy_secrets(apps): def copy_secrets(apps):
# TODO: check if it is deployed #TODO: check if it is deployed
rmtree(SECRET_PATH, ignore_errors=True) rmtree(SECRET_PATH, ignore_errors=True)
os.mkdir(SECRET_PATH) os.mkdir(SECRET_PATH)
client = docker.from_env() client = docker.from_env()
container_by_service = { container_by_service = {
c.labels.get('com.docker.swarm.service.name'): c for c in client.containers.list()} c.labels['com.docker.swarm.service.name']: c for c in client.containers.list()}
services = client.services.list() services = client.services.list()
for s in services: for s in services:
app_name = s.attrs['Spec']['Labels']['com.docker.stack.namespace'] app_name = s.attrs['Spec']['Labels']['com.docker.stack.namespace']
if (app_name in apps and if (app_name in apps and
(app_secs := s.attrs['Spec']['TaskTemplate']['ContainerSpec'].get('Secrets'))): (app_secs := s.attrs['Spec']['TaskTemplate']['ContainerSpec'].get('Secrets'))):
if not container_by_service.get(s.name): if not container_by_service.get(s.name):
logger.warning( logging.error(
f"Container {s.name} is not running, secrets can not be copied.") f"Container {s.name} is not running, secrets can not be copied.")
continue continue
container_id = container_by_service[s.name].id container_id = container_by_service[s.name].id
for sec in app_secs: for sec in app_secs:
src = f'/var/lib/docker/containers/{container_id}/mounts/secrets/{sec["SecretID"]}' src = f'/var/lib/docker/containers/{container_id}/mounts/secrets/{sec["SecretID"]}'
if not Path(src).exists(): if not Path(src).exists():
logger.error( logging.error(f"For the secret {sec['SecretName']} the file {src} does not exist for {s.name}")
f"For the secret {sec['SecretName']} the file {src} does not exist for {s.name}")
continue continue
dst = SECRET_PATH + sec['SecretName'] dst = SECRET_PATH + sec['SecretName']
logger.debug("Copy Secret {sec['SecretName']}")
copyfile(src, dst) copyfile(src, dst)
@ -170,44 +143,37 @@ def run_commands(commands):
if not command: if not command:
continue continue
# Remove bash/sh wrapping # Remove bash/sh wrapping
command = command.removeprefix('bash -c').removeprefix('sh -c').removeprefix(' ') command = command.removeprefix('bash -c').removeprefix('sh -c')
# Remove quotes surrounding the command # Remove quotes surrounding the command
if (len(command) >= 2 and command[0] == command[-1] and (command[0] == "'" or command[0] == '"')): if (len(command) >= 2 and command[0] == command[-1] and (command[0] == "'" or command[0] == '"')):
command = command[1:-1] command[1:-1]
# Use bash's pipefail to return exit codes inside a pipe to prevent silent failure # Use bash's pipefail to return exit codes inside a pipe to prevent silent failure
command = f"bash -c 'set -o pipefail;{command}'" command = f"bash -c 'set -o pipefail;{command}'"
logger.info(f"run command in {container.name}:") logging.info(f"run command in {container.name}:")
logger.info(command) logging.info(command)
result = container.exec_run(command) result = container.exec_run(command)
if result.exit_code: if result.exit_code:
logger.error( logging.error(
f"Failed to run command {command} in {container.name}: {result.output.decode()}") f"Failed to run command {command} in {container.name}: {result.output.decode()}")
else: else:
logger.info(result.output.decode()) logging.info(result.output.decode())
def backup_volumes(backup_paths, apps, retries, dry_run=False): def backup_volumes(backup_paths, apps, dry_run=False):
while True: try:
try: result = restic.backup(backup_paths, dry_run=dry_run, tags=apps)
logger.info("Start volume backup") print(result)
logger.debug(backup_paths) logging.info(result)
result = restic.backup(backup_paths, dry_run=dry_run, tags=apps) except ResticFailedError as error:
logger.summary("backup finished", extra=result) logging.error(f"Backup failed for {apps}. Could not Backup these paths: {backup_paths}")
return logging.error(error)
except ResticFailedError as error: exit(1)
logger.error(
f"Backup failed for {apps}. Could not Backup these paths: {backup_paths}")
logger.error(error, exc_info=True)
if retries > 0:
retries -= 1
else:
exit(1)
@cli.command() @cli.command()
@click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', default='latest') @click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', default='latest')
@click.option('target', '--target', '-t', envvar='TARGET', default='/') @click.option('target', '--target', '-t', envvar='TARGET', default='/')
@click.option('noninteractive', '--noninteractive', envvar='NONINTERACTIVE', is_flag=True) @click.option('noninteractive', '--noninteractive', envvar='NONINTERACTIVE', default=False)
def restore(snapshot, target, noninteractive): def restore(snapshot, target, noninteractive):
# Todo: recommend to shutdown the container # Todo: recommend to shutdown the container
service_paths = VOLUME_PATH service_paths = VOLUME_PATH
@ -215,7 +181,7 @@ def restore(snapshot, target, noninteractive):
service_paths = service_paths + f'{SERVICE}_*' service_paths = service_paths + f'{SERVICE}_*'
snapshots = restic.snapshots(snapshot_id=snapshot) snapshots = restic.snapshots(snapshot_id=snapshot)
if not snapshot: if not snapshot:
logger.error("No Snapshots with ID {snapshots}") logging.error("No Snapshots with ID {snapshots}")
exit(1) exit(1)
if not noninteractive: if not noninteractive:
snapshot_date = datetime.fromisoformat(snapshots[0]['time']) snapshot_date = datetime.fromisoformat(snapshots[0]['time'])
@ -227,13 +193,12 @@ def restore(snapshot, target, noninteractive):
f"THIS COMMAND WILL IRREVERSIBLY OVERWRITES {target}{service_paths.removeprefix('/')}") f"THIS COMMAND WILL IRREVERSIBLY OVERWRITES {target}{service_paths.removeprefix('/')}")
prompt = input("Type YES (uppercase) to continue: ") prompt = input("Type YES (uppercase) to continue: ")
if prompt != 'YES': if prompt != 'YES':
logger.error("Restore aborted") logging.error("Restore aborted")
exit(1) exit(1)
print(f"Restoring Snapshot {snapshot} of {service_paths} at {target}") print(f"Restoring Snapshot {snapshot} of {service_paths} at {target}")
# TODO: use tags if no snapshot is selected, to use a snapshot including SERVICE
result = restic.restore(snapshot_id=snapshot, result = restic.restore(snapshot_id=snapshot,
include=service_paths, target_dir=target) include=service_paths, target_dir=target)
logger.debug(result) logging.debug(result)
@cli.command() @cli.command()
@ -247,9 +212,8 @@ def snapshots():
if no_snapshots: if no_snapshots:
err_msg = "No Snapshots found" err_msg = "No Snapshots found"
if SERVICE: if SERVICE:
service_name = SERVICE.replace('_', '.') err_msg += f' for app {SERVICE}'
err_msg += f' for app {service_name}' logging.warning(err_msg)
logger.warning(err_msg)
@cli.command() @cli.command()
@ -273,10 +237,10 @@ def list_files(snapshot, path):
output = restic.internal.command_executor.execute(cmd) output = restic.internal.command_executor.execute(cmd)
except ResticFailedError as error: except ResticFailedError as error:
if 'no snapshot found' in str(error): if 'no snapshot found' in str(error):
err_msg = f'There is no snapshot "{snapshot}"' err_msg = f'There is no snapshot {snapshot}'
if SERVICE: if SERVICE:
err_msg += f' for the app "{SERVICE}"' err_msg += f'for the app {SERVICE}'
logger.error(err_msg) logging.error(err_msg)
exit(1) exit(1)
else: else:
raise error raise error
@ -288,8 +252,8 @@ def list_files(snapshot, path):
@cli.command() @cli.command()
@click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', default='latest') @click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', default='latest')
@click.option('path', '--path', '-p', envvar='INCLUDE_PATH') @click.option('path', '--path', '-p', envvar='INCLUDE_PATH')
@click.option('volumes', '--volumes', '-v', envvar='VOLUMES') @click.option('volumes', '--volumes', '-v', is_flag=True)
@click.option('secrets', '--secrets', '-c', is_flag=True, envvar='SECRETS') @click.option('secrets', '--secrets', '-c', is_flag=True)
def download(snapshot, path, volumes, secrets): def download(snapshot, path, volumes, secrets):
file_dumps = [] file_dumps = []
if not any([path, volumes, secrets]): if not any([path, volumes, secrets]):
@ -307,7 +271,7 @@ def download(snapshot, path, volumes, secrets):
file_dumps.append((binary_output, tarinfo)) file_dumps.append((binary_output, tarinfo))
if volumes: if volumes:
if not SERVICE: if not SERVICE:
logger.error("Please specify '--host' when using '--volumes'") logging.error("Please specify '--host' when using '--volumes'")
exit(1) exit(1)
files = list_files(snapshot, VOLUME_PATH) files = list_files(snapshot, VOLUME_PATH)
for f in files[1:]: for f in files[1:]:
@ -320,7 +284,7 @@ def download(snapshot, path, volumes, secrets):
file_dumps.append((binary_output, tarinfo)) file_dumps.append((binary_output, tarinfo))
if secrets: if secrets:
if not SERVICE: if not SERVICE:
logger.error("Please specify '--host' when using '--secrets'") logging.error("Please specify '--host' when using '--secrets'")
exit(1) exit(1)
filename = f"{SERVICE}.json" filename = f"{SERVICE}.json"
files = list_files(snapshot, SECRET_PATH) files = list_files(snapshot, SECRET_PATH)
@ -340,8 +304,7 @@ def download(snapshot, path, volumes, secrets):
for binary_output, tarinfo in file_dumps: for binary_output, tarinfo in file_dumps:
tar.addfile(tarinfo, fileobj=io.BytesIO(binary_output)) tar.addfile(tarinfo, fileobj=io.BytesIO(binary_output))
size = get_formatted_size('/tmp/backup.tar.gz') size = get_formatted_size('/tmp/backup.tar.gz')
print( print(f"Backup has been written to /tmp/backup.tar.gz with a size of {size}")
f"Backup has been written to /tmp/backup.tar.gz with a size of {size}")
def get_formatted_size(file_path): def get_formatted_size(file_path):
@ -362,7 +325,7 @@ def dump(snapshot, path):
print(f"Dumping {path} from snapshot '{snapshot}'") print(f"Dumping {path} from snapshot '{snapshot}'")
output = subprocess.run(cmd, capture_output=True) output = subprocess.run(cmd, capture_output=True)
if output.returncode: if output.returncode:
logger.error( logging.error(
f"error while dumping {path} from snapshot '{snapshot}': {output.stderr}") f"error while dumping {path} from snapshot '{snapshot}': {output.stderr}")
exit(1) exit(1)
return output.stdout return output.stdout

View File

@ -19,7 +19,11 @@ services:
- coop-cloud.${STACK_NAME}.version=0.1.0+latest - coop-cloud.${STACK_NAME}.version=0.1.0+latest
- coop-cloud.${STACK_NAME}.timeout=${TIMEOUT:-300} - coop-cloud.${STACK_NAME}.timeout=${TIMEOUT:-300}
- coop-cloud.backupbot.enabled=true - coop-cloud.backupbot.enabled=true
#entrypoint: ['tail', '-f','/dev/null'] configs:
- source: entrypoint
target: /entrypoint.sh
mode: 0555
entrypoint: ['/entrypoint.sh']
healthcheck: healthcheck:
test: "pgrep crond" test: "pgrep crond"
interval: 30s interval: 30s
@ -34,3 +38,8 @@ secrets:
volumes: volumes:
backups: backups:
configs:
entrypoint:
name: ${STACK_NAME}_entrypoint_${ENTRYPOINT_VERSION}
file: entrypoint.sh

19
entrypoint.sh Executable file → Normal file
View File

@ -1,6 +1,6 @@
#!/bin/sh #!/bin/sh
set -e set -e -o pipefail
if [ -n "$SSH_HOST_KEY" ] if [ -n "$SSH_HOST_KEY" ]
then then
@ -9,22 +9,7 @@ fi
cron_schedule="${CRON_SCHEDULE:?CRON_SCHEDULE not set}" cron_schedule="${CRON_SCHEDULE:?CRON_SCHEDULE not set}"
if [ -n "$PUSH_URL_START" ] echo "$cron_schedule backup create" | crontab -
then
push_start_notification="curl -s '$PUSH_URL_START' &&"
fi
if [ -n "$PUSH_URL_FAIL" ]
then
push_fail_notification="|| curl -s '$PUSH_URL_FAIL'"
fi
if [ -n "$PUSH_URL_SUCCESS" ]
then
push_notification=" && (grep -q 'backup finished' /tmp/backup.log && curl -s '$PUSH_URL_SUCCESS' $push_fail_notification)"
fi
echo "$cron_schedule $push_start_notification backup --machine-logs create 2>&1 | tee /tmp/backup.log $push_notification" | crontab -
crontab -l crontab -l
crond -f -d8 -L /dev/stdout crond -f -d8 -L /dev/stdout