Compare commits

...

21 Commits

Author SHA1 Message Date
54e32ab422
backupbot formatting
All checks were successful
continuous-integration/drone/push Build is passing
2025-05-14 12:26:20 +02:00
4cda3c1018
make some logoutput more useful
All checks were successful
continuous-integration/drone/push Build is passing
2025-04-24 01:13:16 +02:00
f7f46d7b7b Merge pull request 'feat: Adds monitoring setup for prometheus push gateway' (#69) from prom-mon into main
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #69
2025-01-14 13:34:34 +00:00
c1902b2dbc feat: Adds monitoring setup for prometheus push gateway
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-12-30 14:08:05 +01:00
f40eb00435 Cleaner output for snapshots closes #63
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-29 12:29:35 +01:00
3eea69ddee fix(restore): don't restore everything if no backup_path is given.
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-28 17:24:41 +01:00
f1661c04e7 refactor(ls): INCLUDE_PATH parsing via click
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-28 17:01:52 +01:00
4b4371ed3f add restore-path argument for undeployed apps closes #59 2024-10-28 17:01:14 +01:00
1214f59c79 expose MACHINE_LOGS flag as env
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-28 16:08:56 +01:00
8798e2feb5 abra.sh: remove old BACKUPBOT_VERSION
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-24 18:26:36 +02:00
119787ed39 chore: publish 2.3.0+2.3.0-beta release
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2024-10-24 18:23:32 +02:00
141bedb069 feat(ls): add --timestamps flag #37
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-24 18:09:33 +02:00
14b55bbc79 feat(ls): default to /var/lib/docker/volumes/ if no path is given #37
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-24 17:58:38 +02:00
ebcb0d42c5 feat(ls): default to show selected paths, --all flag to show all #37
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-24 17:54:34 +02:00
dccc93ac6b optimize logging 2024-10-24 17:44:54 +02:00
826bec925f add example pg_backup.sh script
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-24 17:03:02 +02:00
49dd989302 update README
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-22 22:01:18 +02:00
2f965a93dc fix: select correct latest snapshot to restore 2024-10-22 21:30:12 +02:00
4054d3417e fix backup label parsing 2024-10-22 17:24:20 +02:00
f8cfcef029 refactor: move latest snapshot checking 2024-10-22 14:18:46 +02:00
4a49c4a7f0 fix download command / dump function 2024-10-22 14:18:15 +02:00
10 changed files with 465 additions and 201 deletions

View File

@ -9,10 +9,16 @@ RESTIC_REPOSITORY=/backups/restic
CRON_SCHEDULE='30 3 * * *' CRON_SCHEDULE='30 3 * * *'
# Push Notifiactions # Push Notifiactions
#PUSH_PROMETHEUS_URL=https://pushgateway.example.com/metrics/job/backup
# or
#PUSH_URL_START=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=start #PUSH_URL_START=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=start
#PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK #PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK
#PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail #PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail
# Push Basic Auth
#COMPOSE_FILE="$COMPOSE_FILE:compose.pushbasicauth.yml"
#SECRET_PUSH_BASICAUTH=v1
# swarm-cronjob, instead of built-in cron # swarm-cronjob, instead of built-in cron
#COMPOSE_FILE="$COMPOSE_FILE:compose.swarm-cronjob.yml" #COMPOSE_FILE="$COMPOSE_FILE:compose.swarm-cronjob.yml"

2
.gitignore vendored
View File

@ -1 +1 @@
/testing .venv

View File

@ -104,15 +104,38 @@ See [restic REST docs](https://restic.readthedocs.io/en/latest/030_preparing_a_n
## Push notifications ## Push notifications
It is possible to configure three push events, that may trigger on the backup cronjob. Those can be used to detect failures from mointoring systems.
The events are:
- start
- success
- fail
### Using a Prometheus Push Gateway
[A prometheus push gateway](https://git.coopcloud.tech/coop-cloud/monitoring-ng#setup-push-gateway) can be used by setting the following env variables:
- `PUSH_PROMETHEUS_URL=pushgateway.example.com/metrics/job/backup`
### Using custom URLs
The following env variables can be used to setup push notifications for backups. `PUSH_URL_START` is requested just before the backups starts, `PUSH_URL_SUCCESS` is only requested if the backup was successful and if the backup fails `PUSH_URL_FAIL` will be requested. The following env variables can be used to setup push notifications for backups. `PUSH_URL_START` is requested just before the backups starts, `PUSH_URL_SUCCESS` is only requested if the backup was successful and if the backup fails `PUSH_URL_FAIL` will be requested.
Each variable is optional and independent of the other. Each variable is optional and independent of the other.
```
```
PUSH_URL_START=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=start PUSH_URL_START=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=start
PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK
PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail
``` ```
### Push endpoint behind basic auth
Insert the basic auth secret
`abra app secret insert <backupbot_name> push_basicauth v1 "user:password"`
Enable basic auth in the env file, by uncommenting the following line:
```
#COMPOSE_FILE="$COMPOSE_FILE:compose.pushbasicauth.yml"
#SECRET_PUSH_BASICAUTH=v1
```
## Usage ## Usage
@ -157,7 +180,7 @@ Show all files inside the latest snapshot (can be very verbose):
Show specific files inside a selected snapshot: Show specific files inside a selected snapshot:
`abra app run <backupbot_name> app -- backup ls --snapshot <snapshot_id> --path /var/lib/docker/volumes/` `abra app run <backupbot_name> app -- backup ls --snapshot <snapshot_id> /var/lib/docker/volumes/`
Download files from a snapshot: Download files from a snapshot:
@ -179,19 +202,55 @@ restic snapshots
Like Traefik, or `swarm-cronjob`, Backupbot II uses access to the Docker socket to read labels from running Docker Swarm services: Like Traefik, or `swarm-cronjob`, Backupbot II uses access to the Docker socket to read labels from running Docker Swarm services:
1. Add `ENABLE_BACKUPS=true` to .env.sample
2. Add backupbot labels to the compose file
``` ```
services: services:
db: db:
deploy: deploy:
labels: labels:
backupbot.backup: ${BACKUP:-"true"} backupbot.backup: "${ENABLE_BACKUPS:-true}"
backupbot.backup.pre-hook: 'mysqldump -u root -p"$(cat /run/secrets/db_root_password)" -f /volume_path/dump.db' backupbot.backup.pre-hook: "/pg_backup.sh backup"
backupbot.backup.post-hook: "rm -rf /volume_path/dump.db" backupbot.backup.volumes.db.path: "backup.sql"
backupbot.restore.post-hook: '/pg_backup.sh restore'
backupbot.backup.volumes.redis: "false"
``` ```
- `backupbot.backup` -- set to `true` to back up this service (REQUIRED) - `backupbot.backup` -- set to `true` to back up this service (REQUIRED)
- `backupbot.backup.pre-hook` -- command to run before copying files (optional), save all dumps into the volumes - this is the only required backup label, per default it will backup all volumes
- `backupbot.backup.post-hook` -- command to run after copying files (optional) - `backupbot.backup.volumes.<volume_name>.path` -- only backup the listed relative paths from `<volume_name>`
- `backupbot.backup.volumes.<volume_name>: false` -- exclude <volume_name> from the backup
- `backupbot.backup.pre-hook` -- command to run before copying files
- i.e. save all database dumps into the volumes
- `backupbot.backup.post-hook` -- command to run after copying files
- `backupbot.restore.pre-hook` -- command to run before restoring files
- `backupbot.restore.post-hook` -- command to run after restoring files
- i.e. read all database dumps from the volumes
3. (Optional) add backup/restore scripts to the compose file
```
services:
db:
configs:
- source: pg_backup
target: /pg_backup.sh
mode: 0555
configs:
pg_backup:
name: ${STACK_NAME}_pg_backup_${PG_BACKUP_VERSION}
file: pg_backup.sh
```
Version the config file in `abra.sh`:
```
export PG_BACKUP_VERSION=v1
```
As in the above example, you can reference Docker Secrets, e.g. for looking up database passwords, by reading the files in `/run/secrets` directly. As in the above example, you can reference Docker Secrets, e.g. for looking up database passwords, by reading the files in `/run/secrets` directly.

View File

@ -1,5 +1,6 @@
export BACKUPBOT_VERSION=v1
export SSH_CONFIG_VERSION=v1 export SSH_CONFIG_VERSION=v1
export ENTRYPOINT_VERSION=v17
export CRONJOB_VERSION=v2
run_cron () { run_cron () {
schedule="$(crontab -l | tr -s " " | cut -d ' ' -f-5)" schedule="$(crontab -l | tr -s " " | cut -d ' ' -f-5)"

View File

@ -17,47 +17,53 @@ from pathlib import Path
from shutil import copyfile, rmtree from shutil import copyfile, rmtree
VOLUME_PATH = "/var/lib/docker/volumes/" VOLUME_PATH = "/var/lib/docker/volumes/"
SECRET_PATH = '/secrets/' SECRET_PATH = "/secrets/"
SERVICE = 'ALL' SERVICE = "ALL"
logger = logging.getLogger("backupbot") logger = logging.getLogger("backupbot")
logging.addLevelName(55, 'SUMMARY') logging.addLevelName(55, "SUMMARY")
setattr(logging, 'SUMMARY', 55) setattr(logging, "SUMMARY", 55)
setattr(logger, 'summary', lambda message, *args, ** setattr(
kwargs: logger.log(55, message, *args, **kwargs)) logger,
"summary",
lambda message, *args, **kwargs: logger.log(55, message, *args, **kwargs),
)
def handle_exception(exc_type, exc_value, exc_traceback): def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt): if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback) sys.__excepthook__(exc_type, exc_value, exc_traceback)
return return
logger.critical("Uncaught exception", exc_info=( logger.critical("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception sys.excepthook = handle_exception
@click.group() @click.group()
@click.option('-l', '--log', 'loglevel') @click.option("-l", "--log", "loglevel")
@click.option('-m', '--machine-logs', 'machine_logs', is_flag=True) @click.option(
@click.option('service', '--host', '-h', envvar='SERVICE') "-m", "--machine-logs", "machine_logs", is_flag=True, envvar="MACHINE_LOGS"
@click.option('repository', '--repo', '-r', envvar='RESTIC_REPOSITORY') )
@click.option("service", "--host", "-h", envvar="SERVICE")
@click.option("repository", "--repo", "-r", envvar="RESTIC_REPOSITORY")
def cli(loglevel, service, repository, machine_logs): def cli(loglevel, service, repository, machine_logs):
global SERVICE global SERVICE
if service: if service:
SERVICE = service.replace('.', '_') SERVICE = service.replace(".", "_")
if repository: if repository:
os.environ['RESTIC_REPOSITORY'] = repository os.environ["RESTIC_REPOSITORY"] = repository
if loglevel: if loglevel:
numeric_level = getattr(logging, loglevel.upper(), None) numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int): if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel) raise ValueError("Invalid log level: %s" % loglevel)
logger.setLevel(numeric_level) logger.setLevel(numeric_level)
logHandler = logging.StreamHandler() logHandler = logging.StreamHandler()
if machine_logs: if machine_logs:
formatter = jsonlogger.JsonFormatter( formatter = jsonlogger.JsonFormatter(
"%(levelname)s %(filename)s %(lineno)s %(process)d %(message)s", rename_fields={"levelname": "message_type"}) "%(levelname)s %(filename)s %(lineno)s %(process)d %(message)s",
rename_fields={"levelname": "message_type"},
)
logHandler.setFormatter(formatter) logHandler.setFormatter(formatter)
logger.addHandler(logHandler) logger.addHandler(logHandler)
@ -66,18 +72,18 @@ def cli(loglevel, service, repository, machine_logs):
def init_repo(): def init_repo():
if repo:= os.environ.get('RESTIC_REPOSITORY_FILE'): if repo := os.environ.get("RESTIC_REPOSITORY_FILE"):
# RESTIC_REPOSITORY_FILE and RESTIC_REPOSITORY are mutually exclusive # RESTIC_REPOSITORY_FILE and RESTIC_REPOSITORY are mutually exclusive
del os.environ['RESTIC_REPOSITORY'] del os.environ["RESTIC_REPOSITORY"]
else: else:
repo = os.environ['RESTIC_REPOSITORY'] repo = os.environ["RESTIC_REPOSITORY"]
restic.repository = repo restic.repository = repo
logger.debug(f"set restic repository location: {repo}") logger.debug(f"set restic repository location: {repo}")
restic.password_file = '/var/run/secrets/restic_password' restic.password_file = "/var/run/secrets/restic_password"
try: try:
restic.cat.config() restic.cat.config()
except ResticFailedError as error: except ResticFailedError as error:
if 'unable to open config file' in str(error): if "unable to open config file" in str(error):
result = restic.init() result = restic.init()
logger.info(f"Initialized restic repo: {result}") logger.info(f"Initialized restic repo: {result}")
else: else:
@ -86,19 +92,21 @@ def init_repo():
def export_secrets(): def export_secrets():
for env in os.environ: for env in os.environ:
if env.endswith('FILE') and not "COMPOSE_FILE" in env: if env.endswith("FILE") and not "COMPOSE_FILE" in env:
logger.debug(f"exported secret: {env}") logger.debug(f"exported secret: {env}")
with open(os.environ[env]) as file: with open(os.environ[env]) as file:
secret = file.read() secret = file.read()
os.environ[env.removesuffix('_FILE')] = secret os.environ[env.removesuffix("_FILE")] = secret
# logger.debug(f"Read secret value: {secret}") # logger.debug(f"Read secret value: {secret}")
@cli.command() @cli.command()
@click.option('retries', '--retries', '-r', envvar='RETRIES', default=1) @click.option("retries", "--retries", "-r", envvar="RETRIES", default=1)
def create(retries): def create(retries):
app_settings = parse_backup_labels() app_settings = parse_backup_labels()
pre_commands, post_commands, backup_paths, apps_versions = get_backup_details(app_settings) pre_commands, post_commands, backup_paths, apps_versions = get_backup_details(
app_settings
)
copy_secrets(apps_versions) copy_secrets(apps_versions)
backup_paths.append(Path(SECRET_PATH)) backup_paths.append(Path(SECRET_PATH))
run_commands(pre_commands) run_commands(pre_commands)
@ -107,24 +115,37 @@ def create(retries):
@cli.command() @cli.command()
@click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', default='latest') @click.option("snapshot_id", "--snapshot", "-s", envvar="SNAPSHOT", default="latest")
@click.option('target', '--target', '-t', envvar='TARGET', default='/') @click.option("target", "--target", "-t", envvar="TARGET", default="/")
@click.option('noninteractive', '--noninteractive', envvar='NONINTERACTIVE', is_flag=True) @click.option(
@click.option('volumes', '--volumes', '-v', envvar='VOLUMES', multiple=True) "noninteractive", "--noninteractive", envvar="NONINTERACTIVE", is_flag=True
@click.option('container', '--container', '-c', envvar='CONTAINER', multiple=True) )
@click.option('no_commands', '--no-commands', envvar='NO_COMMANDS', is_flag=True) @click.option("volumes", "--volumes", "-v", envvar="VOLUMES", multiple=True)
def restore(snapshot, target, noninteractive, volumes, container, no_commands): @click.option("container", "--container", "-c", envvar="CONTAINER", multiple=True)
app_settings = parse_backup_labels('restore', container) @click.option("no_commands", "--no-commands", envvar="NO_COMMANDS", is_flag=True)
if SERVICE != 'ALL': def restore(snapshot_id, target, noninteractive, volumes, container, no_commands):
app_settings = {SERVICE: app_settings[SERVICE]} app_settings = parse_backup_labels("restore", container)
pre_commands, post_commands, backup_paths, apps_versions = get_backup_details(app_settings, volumes) if SERVICE != "ALL":
snapshots = get_snapshots(snapshot_id=snapshot) if not app_settings.get(SERVICE):
if not snapshot: logger.error(
logger.error(f"No Snapshots with ID {snapshots} for {apps_versions.keys()} found.") f"The app {SERVICE} is not running, use the restore-path argument to restore paths of undeployed apps"
)
exit(1)
app_settings = {SERVICE: app_settings.get(SERVICE)}
pre_commands, post_commands, backup_paths, apps_versions = get_backup_details(
app_settings, volumes
)
snapshots = get_snapshots(snapshot_id)
if not snapshots:
logger.error(
f"No Snapshots with ID {snapshot_id} for {apps_versions.keys()} found."
)
exit(1) exit(1)
snapshot = snapshots[0]
snapshot_id = snapshot["short_id"]
if not noninteractive: if not noninteractive:
print(f"Snapshot to restore: \t{snapshot}") print(f"Snapshot to restore: \t{snapshot_id}")
restore_app_versions = app_versions_from_tags(snapshots[0].get('tags')) restore_app_versions = app_versions_from_tags(snapshot.get("tags"))
print("Apps:") print("Apps:")
for app, version in apps_versions.items(): for app, version in apps_versions.items():
restore_version = restore_app_versions.get(app) restore_version = restore_app_versions.get(app)
@ -133,49 +154,98 @@ def restore(snapshot, target, noninteractive, volumes, container, no_commands):
print(f"WARNING!!! The running app is deployed with version {version}") print(f"WARNING!!! The running app is deployed with version {version}")
print("The following volume paths will be restored:") print("The following volume paths will be restored:")
for p in backup_paths: for p in backup_paths:
print(f'\t{p}') print(f"\t{p}")
if not no_commands: if not no_commands:
print("The following commands will be executed:") print("The following commands will be executed:")
for container, cmd in list(pre_commands.items()) + list(post_commands.items()): for container, cmd in list(pre_commands.items()) + list(
post_commands.items()
):
print(f"\t{container.labels['com.docker.swarm.service.name']}:\t{cmd}") print(f"\t{container.labels['com.docker.swarm.service.name']}:\t{cmd}")
snapshot_date = datetime.fromisoformat(snapshots[0]['time']) snapshot_date = datetime.fromisoformat(snapshot["time"])
delta = datetime.now(tz=timezone.utc) - snapshot_date delta = datetime.now(tz=timezone.utc) - snapshot_date
print(f"This snapshot is {delta} old") print(f"This snapshot is {delta} old")
print("\nTHIS COMMAND WILL IRREVERSIBLY OVERWRITES FILES") print("\nTHIS COMMAND WILL IRREVERSIBLY OVERWRITES FILES")
prompt = input("Type YES (uppercase) to continue: ") prompt = input("Type YES (uppercase) to continue: ")
if prompt != 'YES': if prompt != "YES":
logger.error("Restore aborted") logger.error("Restore aborted")
exit(1) exit(1)
print(f"Restoring Snapshot {snapshot} at {target}") print(f"Restoring Snapshot {snapshot_id} at {target}")
if not no_commands and pre_commands: if not no_commands and pre_commands:
print(f"Run pre commands.") print(f"Run pre commands.")
run_commands(pre_commands) run_commands(pre_commands)
result = restic_restore(snapshot_id=snapshot, include=backup_paths, target_dir=target) if backup_paths:
result = restic_restore(
snapshot_id=snapshot_id, include=backup_paths, target_dir=target
)
logger.debug(result)
else:
print("No paths to restore.")
if not no_commands and post_commands: if not no_commands and post_commands:
print(f"Run post commands.") print(f"Run post commands.")
run_commands(post_commands) run_commands(post_commands)
@cli.command()
@click.option("snapshot_id", "--snapshot", "-s", envvar="SNAPSHOT", default="latest")
@click.option("target", "--target", "-t", envvar="TARGET", default="/")
@click.option(
"noninteractive", "--noninteractive", envvar="NONINTERACTIVE", is_flag=True
)
@click.argument("paths", nargs=-1, required=True, envvar="INCLUDE_PATH")
def restore_path(snapshot_id, target, noninteractive, paths):
"""PATHS: list of paths to restore"""
snapshots = get_snapshots(snapshot_id)
if not snapshots:
logger.error(f"No Snapshots with ID {snapshot_id} for app {SERVICE} found.")
exit(1)
snapshot = snapshots[0]
snapshot_id = snapshot["short_id"]
if not noninteractive:
print(f"Snapshot to restore: \t{snapshot_id}")
restore_app_versions = app_versions_from_tags(snapshot.get("tags"))
print("Apps:")
for app, version in restore_app_versions.items():
if SERVICE == "ALL" or SERVICE == app:
print(f"\t{app} \t {version}")
print("The following paths will be restored:")
for p in paths:
print(f"\t{p}")
snapshot_date = datetime.fromisoformat(snapshot["time"])
delta = datetime.now(tz=timezone.utc) - snapshot_date
print(f"This snapshot is {delta} old")
print("\nTHIS COMMAND WILL IRREVERSIBLY OVERWRITES FILES")
prompt = input("Type YES (uppercase) to continue: ")
if prompt != "YES":
logger.error("Restore aborted")
exit(1)
print(f"Restoring Snapshot {snapshot_id} at {target}")
result = restic_restore(snapshot_id=snapshot_id, include=paths, target_dir=target)
logger.debug(result) logger.debug(result)
def restic_restore(snapshot_id='latest', include=[], target_dir=None): def restic_restore(snapshot_id, include=[], target_dir=None):
cmd = restic.cat.base_command() + ['restore', snapshot_id] cmd = restic.cat.base_command() + ["restore", snapshot_id]
for path in include: for path in include:
cmd.extend(['--include', path]) cmd.extend(["--include", path])
if target_dir: if target_dir:
cmd.extend(['--target', target_dir]) cmd.extend(["--target", target_dir])
return restic.internal.command_executor.execute(cmd) return restic.internal.command_executor.execute(cmd)
def get_snapshots(snapshot_id=None): def get_snapshots(snapshot_id=None):
if snapshot_id and snapshot_id != 'latest': if snapshot_id and snapshot_id != "latest":
snapshots = restic.snapshots(snapshot_id=snapshot_id) snapshots = restic.snapshots(snapshot_id=snapshot_id)
if not SERVICE in app_versions_from_tags(snapshots[0].get('tags')): if not SERVICE in app_versions_from_tags(snapshots[0].get("tags")):
logger.error(f'Snapshot with ID {snapshot_id} does not contain {SERVICE}') logger.error(f"Snapshot with ID {snapshot_id} does not contain {SERVICE}")
exit(1) exit(1)
else: else:
snapshots = restic.snapshots() snapshots = restic.snapshots()
snapshots = list(filter(lambda x: SERVICE in app_versions_from_tags(x.get('tags')), snapshots)) snapshots = list(
if snapshot_id == 'latest': filter(
lambda x: SERVICE in app_versions_from_tags(x.get("tags")), snapshots
)
)
if snapshot_id == "latest":
return snapshots[-1:] return snapshots[-1:]
else: else:
return snapshots return snapshots
@ -183,50 +253,63 @@ def get_snapshots(snapshot_id=None):
def app_versions_from_tags(tags): def app_versions_from_tags(tags):
if tags: if tags:
app_versions = map(lambda x: x.split(':'), tags) app_versions = map(lambda x: x.split(":"), tags)
return {i[0]: i[1] if len(i) > 1 else None for i in app_versions} return {i[0]: i[1] if len(i) > 1 else None for i in app_versions}
else: else:
return {} return {}
def parse_backup_labels(hook_type='backup', selected_container=[]):
def str2bool(value: str) -> bool:
return value.lower() in ("yes", "true", "t", "1")
def parse_backup_labels(hook_type="backup", selected_container=[]):
client = docker.from_env() client = docker.from_env()
container_by_service = { container_by_service = {
c.labels.get('com.docker.swarm.service.name'): c for c in client.containers.list()} c.labels.get("com.docker.swarm.service.name"): c
for c in client.containers.list()
}
services = client.services.list() services = client.services.list()
app_settings = {} app_settings = {}
for s in services: for s in services:
specs = s.attrs['Spec'] specs = s.attrs["Spec"]
labels = specs['Labels'] labels = specs["Labels"]
stack_name = labels['com.docker.stack.namespace'] stack_name = labels["com.docker.stack.namespace"]
container_name = s.name.removeprefix(f"{stack_name}_") container_name = s.name.removeprefix(f"{stack_name}_")
version = labels.get(f'coop-cloud.{stack_name}.version') version = labels.get(f"coop-cloud.{stack_name}.version")
settings = app_settings[stack_name] = app_settings.get(stack_name) or {} settings = app_settings[stack_name] = app_settings.get(stack_name) or {}
if (backup := labels.get('backupbot.backup')) and bool(backup): if (backup := labels.get("backupbot.backup")) and str2bool(backup):
settings['enabled'] = True settings["enabled"] = True
if version: if version:
settings['version'] = version settings["version"] = version
if selected_container and container_name not in selected_container: if selected_container and container_name not in selected_container:
logger.debug(f"Skipping {s.name} because it's not a selected container") logger.debug(f"Skipping {s.name} because it's not a selected container")
continue continue
if mounts:= specs['TaskTemplate']['ContainerSpec'].get('Mounts'): if mounts := specs["TaskTemplate"]["ContainerSpec"].get("Mounts"):
volumes = parse_volumes(stack_name, mounts) volumes = parse_volumes(stack_name, mounts)
volumes.update(settings.get('volumes') or {}) volumes.update(settings.get("volumes") or {})
settings['volumes'] = volumes settings["volumes"] = volumes
excluded_volumes, included_volume_paths = parse_excludes_includes(labels) excluded_volumes, included_volume_paths = parse_excludes_includes(labels)
settings['excluded_volumes'] = excluded_volumes.union(settings.get('excluded_volumes') or set()) settings["excluded_volumes"] = excluded_volumes.union(
settings['included_volume_paths'] = included_volume_paths.union(settings.get('included_volume_paths') or set()) settings.get("excluded_volumes") or set()
)
settings["included_volume_paths"] = included_volume_paths.union(
settings.get("included_volume_paths") or set()
)
if container := container_by_service.get(s.name): if container := container_by_service.get(s.name):
if command := labels.get(f'backupbot.{hook_type}.pre-hook'): if command := labels.get(f"backupbot.{hook_type}.pre-hook"):
if not (pre_hooks:= settings.get('pre_hooks')): if not (pre_hooks := settings.get("pre_hooks")):
pre_hooks = settings['pre_hooks'] = {} pre_hooks = settings["pre_hooks"] = {}
pre_hooks[container] = command pre_hooks[container] = command
if command := labels.get(f'backupbot.{hook_type}.post-hook'): if command := labels.get(f"backupbot.{hook_type}.post-hook"):
if not (post_hooks:= settings.get('post_hooks')): if not (post_hooks := settings.get("post_hooks")):
post_hooks = settings['post_hooks'] = {} post_hooks = settings["post_hooks"] = {}
post_hooks[container] = command post_hooks[container] = command
else: else:
logger.debug(f"Container {s.name} is not running.") logger.debug(f"Container {s.name} is not running.")
if labels.get(f'backupbot.{hook_type}.pre-hook') or labels.get(f'backupbot.{hook_type}.post-hook'): if labels.get(f"backupbot.{hook_type}.pre-hook") or labels.get(
f"backupbot.{hook_type}.post-hook"
):
logger.error(f"Container {s.name} contain hooks but it's not running") logger.error(f"Container {s.name} contain hooks but it's not running")
return app_settings return app_settings
@ -234,47 +317,53 @@ def parse_backup_labels(hook_type='backup', selected_container=[]):
def get_backup_details(app_settings, volumes=[]): def get_backup_details(app_settings, volumes=[]):
backup_paths = set() backup_paths = set()
backup_apps_versions = {} backup_apps_versions = {}
pre_hooks= {} pre_hooks = {}
post_hooks = {} post_hooks = {}
for app, settings in app_settings.items(): for app, settings in app_settings.items():
if settings.get('enabled'): if settings.get("enabled"):
if SERVICE != 'ALL' and SERVICE != app: if SERVICE != "ALL" and SERVICE != app:
continue continue
backup_apps_versions[app] = settings.get('version') backup_apps_versions[app] = settings.get("version")
add_backup_paths(backup_paths, settings, app, volumes) add_backup_paths(backup_paths, settings, app, volumes)
if hooks:= settings.get('pre_hooks'): if hooks := settings.get("pre_hooks"):
pre_hooks.update(hooks) pre_hooks.update(hooks)
if hooks:= settings.get('post_hooks'): if hooks := settings.get("post_hooks"):
post_hooks.update(hooks) post_hooks.update(hooks)
return pre_hooks, post_hooks, list(backup_paths), backup_apps_versions return pre_hooks, post_hooks, list(backup_paths), backup_apps_versions
def add_backup_paths(backup_paths, settings, app, selected_volumes): def add_backup_paths(backup_paths, settings, app, selected_volumes):
if (volumes := settings.get('volumes')): if volumes := settings.get("volumes"):
if includes:= settings.get('included_volume_paths'): if includes := settings.get("included_volume_paths"):
included_volumes = list(zip(*includes))[0] included_volumes = list(zip(*includes))[0]
for volume, rel_paths in includes: for volume, rel_paths in includes:
if not (volume_path:= volumes.get(volume)): if not (volume_path := volumes.get(volume)):
logger.error(f'Can not find volume with the name {volume}') logger.error(
f"Can not find volume with the name {volume} for {app}"
)
continue continue
if selected_volumes and volume not in selected_volumes: if selected_volumes and volume not in selected_volumes:
logger.debug(f'Skipping {volume}:{rel_paths} because the volume is not selected') logger.debug(
f"Skipping {volume}:{rel_paths} because the volume is not selected"
)
continue continue
for p in rel_paths: for p in rel_paths:
absolute_path = Path(f"{volume_path}/{p}") absolute_path = Path(f"{volume_path}/{p}")
backup_paths.add(absolute_path) backup_paths.add(absolute_path)
else: else:
included_volumes = [] included_volumes = []
excluded_volumes = settings.get('excluded_volumes') or [] excluded_volumes = settings.get("excluded_volumes") or []
for name, path in volumes.items(): for name, path in volumes.items():
if selected_volumes and name not in selected_volumes: if selected_volumes and name not in selected_volumes:
logger.debug(f'Skipping volume: {name} because the volume is not selected') logger.debug(
f"Skipping volume: {name} because the volume is not selected"
)
continue continue
if name in excluded_volumes: if name in excluded_volumes:
logger.debug(f'Skipping volume: {name} because the volume is excluded') logger.debug(f"Skipping volume: {name} because the volume is excluded")
continue continue
if name in included_volumes: if name in included_volumes:
logger.debug(f'Skipping volume: {name} because a path is selected') logger.debug(f"Skipping volume: {name} because a path is selected")
continue continue
backup_paths.add(path) backup_paths.add(path)
else: else:
@ -284,10 +373,10 @@ def add_backup_paths(backup_paths, settings, app, selected_volumes):
def parse_volumes(stack_name, mounts): def parse_volumes(stack_name, mounts):
volumes = {} volumes = {}
for m in mounts: for m in mounts:
if m['Type'] != 'volume': if m["Type"] != "volume":
continue continue
relative_path = m['Source'] relative_path = m["Source"]
name = relative_path.removeprefix(stack_name + '_') name = relative_path.removeprefix(stack_name + "_")
absolute_path = Path(f"{VOLUME_PATH}{relative_path}/_data/") absolute_path = Path(f"{VOLUME_PATH}{relative_path}/_data/")
volumes[name] = absolute_path volumes[name] = absolute_path
return volumes return volumes
@ -297,12 +386,14 @@ def parse_excludes_includes(labels):
excluded_volumes = set() excluded_volumes = set()
included_volume_paths = set() included_volume_paths = set()
for label, value in labels.items(): for label, value in labels.items():
if label.startswith('backupbot.backup.volumes.'): if label.startswith("backupbot.backup.volumes."):
volume_name = label.removeprefix('backupbot.backup.volumes.').removesuffix('.path') volume_name = label.removeprefix("backupbot.backup.volumes.").removesuffix(
if label.endswith('path'): ".path"
relative_paths = tuple(value.split(',')) )
if label.endswith("path"):
relative_paths = tuple(value.split(","))
included_volume_paths.add((volume_name, relative_paths)) included_volume_paths.add((volume_name, relative_paths))
elif bool(value): elif not str2bool(value):
excluded_volumes.add(volume_name) excluded_volumes.add(volume_name)
return excluded_volumes, included_volume_paths return excluded_volumes, included_volume_paths
@ -313,24 +404,29 @@ def copy_secrets(apps):
os.mkdir(SECRET_PATH) os.mkdir(SECRET_PATH)
client = docker.from_env() client = docker.from_env()
container_by_service = { container_by_service = {
c.labels.get('com.docker.swarm.service.name'): c for c in client.containers.list()} c.labels.get("com.docker.swarm.service.name"): c
for c in client.containers.list()
}
services = client.services.list() services = client.services.list()
for s in services: for s in services:
app_name = s.attrs['Spec']['Labels']['com.docker.stack.namespace'] app_name = s.attrs["Spec"]["Labels"]["com.docker.stack.namespace"]
if (app_name in apps and if app_name in apps and (
(app_secs := s.attrs['Spec']['TaskTemplate']['ContainerSpec'].get('Secrets'))): app_secs := s.attrs["Spec"]["TaskTemplate"]["ContainerSpec"].get("Secrets")
):
if not container_by_service.get(s.name): if not container_by_service.get(s.name):
logger.warning( logger.warning(
f"Container {s.name} is not running, secrets can not be copied.") f"Container {s.name} is not running, secrets can not be copied."
)
continue continue
container_id = container_by_service[s.name].id container_id = container_by_service[s.name].id
for sec in app_secs: for sec in app_secs:
src = f'/var/lib/docker/containers/{container_id}/mounts/secrets/{sec["SecretID"]}' src = f"/var/lib/docker/containers/{container_id}/mounts/secrets/{sec['SecretID']}"
if not Path(src).exists(): if not Path(src).exists():
logger.error( logger.error(
f"For the secret {sec['SecretName']} the file {src} does not exist for {s.name}") f"For the secret {sec['SecretName']} the file {src} does not exist for {s.name}"
)
continue continue
dst = SECRET_PATH + sec['SecretName'] dst = SECRET_PATH + sec["SecretName"]
logger.debug(f"Copy Secret {sec['SecretName']}") logger.debug(f"Copy Secret {sec['SecretName']}")
copyfile(src, dst) copyfile(src, dst)
@ -340,9 +436,15 @@ def run_commands(commands):
if not command: if not command:
continue continue
# Remove bash/sh wrapping # Remove bash/sh wrapping
command = command.removeprefix('bash -c').removeprefix('sh -c').removeprefix(' ') command = (
command.removeprefix("bash -c").removeprefix("sh -c").removeprefix(" ")
)
# Remove quotes surrounding the command # Remove quotes surrounding the command
if (len(command) >= 2 and command[0] == command[-1] and (command[0] == "'" or command[0] == '"')): if (
len(command) >= 2
and command[0] == command[-1]
and (command[0] == "'" or command[0] == '"')
):
command = command[1:-1] command = command[1:-1]
# Use bash's pipefail to return exit codes inside a pipe to prevent silent failure # Use bash's pipefail to return exit codes inside a pipe to prevent silent failure
command = f"bash -c 'set -o pipefail;{command}'" command = f"bash -c 'set -o pipefail;{command}'"
@ -351,27 +453,30 @@ def run_commands(commands):
result = container.exec_run(command) result = container.exec_run(command)
if result.exit_code: if result.exit_code:
logger.error( logger.error(
f"Failed to run command {command} in {container.name}: {result.output.decode()}") f"Failed to run command {command} in {container.name}: {result.output.decode()}"
)
else: else:
logger.info(result.output.decode()) logger.debug(result.output.decode())
def backup_volumes(backup_paths, apps_versions, retries, dry_run=False): def backup_volumes(backup_paths, apps_versions, retries, dry_run=False):
while True: while True:
try: try:
logger.info("Backup these paths:") logger.info("Backup these paths:")
logger.debug("\n".join(map(str, backup_paths))) logger.info("\n".join(map(str, backup_paths)))
backup_paths = list(filter(path_exists, backup_paths)) backup_paths = list(filter(path_exists, backup_paths))
cmd = restic.cat.base_command() cmd = restic.cat.base_command()
parent = get_snapshots('latest') parent = get_snapshots("latest")
if parent: if parent:
# https://restic.readthedocs.io/en/stable/040_backup.html#file-change-detection # https://restic.readthedocs.io/en/stable/040_backup.html#file-change-detection
cmd.extend(['--parent', parent[0]['short_id']]) cmd.extend(["--parent", parent[0]["short_id"]])
tags = [f"{app}:{version}" for app,version in apps_versions.items()] tags = [f"{app}:{version}" for app, version in apps_versions.items()]
if SERVICE == 'ALL': if SERVICE == "ALL":
tags.append(SERVICE) tags.append(SERVICE)
logger.info("Start volume backup") logger.info("Start volume backup")
result = restic.internal.backup.run(cmd, backup_paths, dry_run=dry_run, tags=tags) result = restic.internal.backup.run(
cmd, backup_paths, dry_run=dry_run, tags=tags
)
logger.summary("backup finished", extra=result) logger.summary("backup finished", extra=result)
return return
except ResticFailedError as error: except ResticFailedError as error:
@ -385,7 +490,7 @@ def backup_volumes(backup_paths, apps_versions, retries, dry_run=False):
def path_exists(path): def path_exists(path):
if not path.exists(): if not path.exists():
logger.error(f'{path} does not exist') logger.error(f"{path} does not exist")
return path.exists() return path.exists()
@ -393,119 +498,133 @@ def path_exists(path):
def snapshots(): def snapshots():
snapshots = get_snapshots() snapshots = get_snapshots()
for snap in snapshots: for snap in snapshots:
output = [snap['time'], snap['id']] output = [snap["time"].split(".")[0], snap["short_id"]]
if tags:= snap.get('tags'): if tags := snap.get("tags"):
app_versions = app_versions_from_tags(tags) app_versions = app_versions_from_tags(tags)
if version:= app_versions.get(SERVICE): if version := app_versions.get(SERVICE):
output.append(version) output.append(version)
print(*output) print(*output)
if not snapshots: if not snapshots:
err_msg = "No Snapshots found" err_msg = "No Snapshots found"
if SERVICE != 'ALL': if SERVICE != "ALL":
service_name = SERVICE.replace('_', '.') service_name = SERVICE.replace("_", ".")
err_msg += f' for app {service_name}' err_msg += f" for app {service_name}"
logger.warning(err_msg) logger.warning(err_msg)
@cli.command() @cli.command()
@click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', default='latest') @click.option("snapshot", "--snapshot", "-s", envvar="SNAPSHOT", default="latest")
@click.option('path', '--path', '-p', envvar='INCLUDE_PATH') @click.option("show_all", "--all", "-a", envvar="SHOW_ALL", is_flag=True)
def ls(snapshot, path): @click.option("timestamps", "--timestamps", "-t", envvar="TIMESTAMPS", is_flag=True)
results = list_files(snapshot, path) @click.argument(
for r in results: "path", required=False, default="/var/lib/docker/volumes/", envvar="INCLUDE_PATH"
if r.get('path'): )
print(f"{r['ctime']}\t{r['path']}") def ls(snapshot, show_all, timestamps, path):
if snapshot == "latest":
latest_snapshot = get_snapshots("latest")
def list_files(snapshot, path):
cmd = restic.cat.base_command() + ['ls']
if snapshot == 'latest':
latest_snapshot = get_snapshots('latest')
if not latest_snapshot: if not latest_snapshot:
logger.error(f"There is no latest snapshot for {SERVICE}") logger.error(f"There is no latest snapshot for {SERVICE}")
exit(1) exit(1)
snapshot = latest_snapshot[0]['short_id'] snapshot = latest_snapshot[0]["short_id"]
if show_all:
path = None
results = list_files(snapshot, path)
for r in results:
if r.get("path"):
if timestamps:
print(f"{r['ctime']}\t{r['path']}")
else:
print(f"{r['path']}")
def list_files(snapshot, path):
cmd = restic.cat.base_command() + ["ls"]
cmd.append(snapshot) cmd.append(snapshot)
if path: if path:
cmd.append(path) cmd.append(path)
try: try:
output = restic.internal.command_executor.execute(cmd) output = restic.internal.command_executor.execute(cmd)
except ResticFailedError as error: except ResticFailedError as error:
if 'no snapshot found' in str(error): if "no snapshot found" in str(error):
err_msg = f'There is no snapshot "{snapshot}"' err_msg = f'There is no snapshot "{snapshot}"'
if SERVICE != 'ALL': if SERVICE != "ALL":
err_msg += f' for the app "{SERVICE}"' err_msg += f' for the app "{SERVICE}"'
logger.error(err_msg) logger.error(err_msg)
exit(1) exit(1)
else: else:
raise error raise error
output = output.replace('}\n{', '}|{') output = output.replace("}\n{", "}|{")
results = list(map(json.loads, output.split('|'))) results = list(map(json.loads, output.split("|")))
return results return results
@cli.command() @cli.command()
@click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', default='latest') @click.option("snapshot", "--snapshot", "-s", envvar="SNAPSHOT", default="latest")
@click.option('path', '--path', '-p', envvar='INCLUDE_PATH') @click.option("path", "--path", "-p", envvar="INCLUDE_PATH")
@click.option('volumes', '--volumes', '-v', envvar='VOLUMES') @click.option("volumes", "--volumes", "-v", envvar="VOLUMES")
@click.option('secrets', '--secrets', '-c', is_flag=True, envvar='SECRETS') @click.option("secrets", "--secrets", "-c", is_flag=True, envvar="SECRETS")
def download(snapshot, path, volumes, secrets): def download(snapshot, path, volumes, secrets):
file_dumps = [] file_dumps = []
if snapshot == "latest":
latest_snapshot = get_snapshots("latest")
if not latest_snapshot:
logger.error(f"There is no latest snapshot for {SERVICE}")
exit(1)
snapshot = latest_snapshot[0]["short_id"]
if not any([path, volumes, secrets]): if not any([path, volumes, secrets]):
volumes = secrets = True volumes = secrets = True
if path: if path:
path = path.removesuffix('/') path = path.removesuffix("/")
binary_output = dump(snapshot, path) binary_output = dump(snapshot, path)
files = list_files(snapshot, path) files = list_files(snapshot, path)
filetype = [f.get('type') for f in files if f.get('path') == path][0] filetype = [f.get("type") for f in files if f.get("path") == path][0]
filename = Path(path).name filename = Path(path).name
if filetype == 'dir': if filetype == "dir":
filename = filename + ".tar" filename = filename + ".tar"
tarinfo = tarfile.TarInfo(name=filename) tarinfo = tarfile.TarInfo(name=filename)
tarinfo.size = len(binary_output) tarinfo.size = len(binary_output)
file_dumps.append((binary_output, tarinfo)) file_dumps.append((binary_output, tarinfo))
if volumes: if volumes:
if SERVICE == 'ALL': if SERVICE == "ALL":
logger.error("Please specify '--host' when using '--volumes'") logger.error("Please specify '--host' when using '--volumes'")
exit(1) exit(1)
files = list_files(snapshot, VOLUME_PATH) files = list_files(snapshot, VOLUME_PATH)
for f in files[1:]: for f in files[1:]:
path = f['path'] path = f["path"]
if Path(path).name.startswith(SERVICE) and f['type'] == 'dir': if Path(path).name.startswith(SERVICE) and f["type"] == "dir":
binary_output = dump(snapshot, path) binary_output = dump(snapshot, path)
filename = f"{Path(path).name}.tar" filename = f"{Path(path).name}.tar"
tarinfo = tarfile.TarInfo(name=filename) tarinfo = tarfile.TarInfo(name=filename)
tarinfo.size = len(binary_output) tarinfo.size = len(binary_output)
file_dumps.append((binary_output, tarinfo)) file_dumps.append((binary_output, tarinfo))
if secrets: if secrets:
if SERVICE == 'ALL': if SERVICE == "ALL":
logger.error("Please specify '--host' when using '--secrets'") logger.error("Please specify '--host' when using '--secrets'")
exit(1) exit(1)
filename = f"{SERVICE}.json" filename = f"{SERVICE}.json"
files = list_files(snapshot, SECRET_PATH) files = list_files(snapshot, SECRET_PATH)
secrets = {} secrets = {}
for f in files[1:]: for f in files[1:]:
path = f['path'] path = f["path"]
if Path(path).name.startswith(SERVICE) and f['type'] == 'file': if Path(path).name.startswith(SERVICE) and f["type"] == "file":
secret = dump(snapshot, path).decode() secret = dump(snapshot, path).decode()
secret_name = path.removeprefix(f'{SECRET_PATH}{SERVICE}_') secret_name = path.removeprefix(f"{SECRET_PATH}{SERVICE}_")
secrets[secret_name] = secret secrets[secret_name] = secret
binary_output = json.dumps(secrets).encode() binary_output = json.dumps(secrets).encode()
tarinfo = tarfile.TarInfo(name=filename) tarinfo = tarfile.TarInfo(name=filename)
tarinfo.size = len(binary_output) tarinfo.size = len(binary_output)
file_dumps.append((binary_output, tarinfo)) file_dumps.append((binary_output, tarinfo))
with tarfile.open('/tmp/backup.tar.gz', "w:gz") as tar: with tarfile.open("/tmp/backup.tar.gz", "w:gz") as tar:
print(f"Writing files to /tmp/backup.tar.gz...") print(f"Writing files to /tmp/backup.tar.gz...")
for binary_output, tarinfo in file_dumps: for binary_output, tarinfo in file_dumps:
tar.addfile(tarinfo, fileobj=io.BytesIO(binary_output)) tar.addfile(tarinfo, fileobj=io.BytesIO(binary_output))
size = get_formatted_size('/tmp/backup.tar.gz') size = get_formatted_size("/tmp/backup.tar.gz")
print( print(f"Backup has been written to /tmp/backup.tar.gz with a size of {size}")
f"Backup has been written to /tmp/backup.tar.gz with a size of {size}")
def get_formatted_size(file_path): def get_formatted_size(file_path):
file_size = os.path.getsize(file_path) file_size = os.path.getsize(file_path)
units = ['Bytes', 'KB', 'MB', 'GB', 'TB'] units = ["Bytes", "KB", "MB", "GB", "TB"]
for unit in units: for unit in units:
if file_size < 1024: if file_size < 1024:
return f"{round(file_size, 3)} {unit}" return f"{round(file_size, 3)} {unit}"
@ -514,23 +633,17 @@ def get_formatted_size(file_path):
def dump(snapshot, path): def dump(snapshot, path):
cmd = restic.cat.base_command() + ['dump'] cmd = restic.cat.base_command() + ["dump"]
if snapshot == 'latest':
latest_snapshot = get_snapshots('latest')
if not latest_snapshot:
logger.error(f"There is no latest snapshot for {SERVICE}")
exit(1)
snapshot = latest_snapshot[0]['short_id']
cmd.append(snapshot)
cmd = cmd + [snapshot, path] cmd = cmd + [snapshot, path]
print(f"Dumping {path} from snapshot '{snapshot}'") print(f"Dumping {path} from snapshot '{snapshot}'")
output = subprocess.run(cmd, capture_output=True) output = subprocess.run(cmd, capture_output=True)
if output.returncode: if output.returncode:
logger.error( logger.error(
f"error while dumping {path} from snapshot '{snapshot}': {output.stderr}") f"error while dumping {path} from snapshot '{snapshot}': {output.stderr}"
)
exit(1) exit(1)
return output.stdout return output.stdout
if __name__ == '__main__': if __name__ == "__main__":
cli() cli()

11
compose.pushbasicauth.yml Normal file
View File

@ -0,0 +1,11 @@
---
version: "3.8"
services:
app:
secrets:
- push_basicauth
secrets:
push_basicauth:
external: true
name: ${STACK_NAME}_push_basicauth_${SECRET_PUSH_BASICAUTH}

View File

@ -2,7 +2,7 @@
version: "3.8" version: "3.8"
services: services:
app: app:
image: git.coopcloud.tech/coop-cloud/backup-bot-two:2.2.1-beta image: git.coopcloud.tech/coop-cloud/backup-bot-two:2.3.0-beta
volumes: volumes:
- "/var/run/docker.sock:/var/run/docker.sock" - "/var/run/docker.sock:/var/run/docker.sock"
- "/var/lib/docker/volumes/:/var/lib/docker/volumes/" - "/var/lib/docker/volumes/:/var/lib/docker/volumes/"
@ -14,9 +14,16 @@ services:
- RESTIC_PASSWORD_FILE=/run/secrets/restic_password - RESTIC_PASSWORD_FILE=/run/secrets/restic_password
secrets: secrets:
- restic_password - restic_password
configs:
- source: entrypoint
target: /entrypoint.sh
mode: 666
- source: cronjob
target: /cronjob.sh
mode: 666
deploy: deploy:
labels: labels:
- coop-cloud.${STACK_NAME}.version=2.2.0+2.2.1-beta - coop-cloud.${STACK_NAME}.version=2.3.0+2.3.0-beta
- coop-cloud.${STACK_NAME}.timeout=${TIMEOUT:-300} - coop-cloud.${STACK_NAME}.timeout=${TIMEOUT:-300}
- coop-cloud.backupbot.enabled=true - coop-cloud.backupbot.enabled=true
#entrypoint: ['tail', '-f','/dev/null'] #entrypoint: ['tail', '-f','/dev/null']
@ -31,6 +38,14 @@ secrets:
restic_password: restic_password:
external: true external: true
name: ${STACK_NAME}_restic_password_${SECRET_RESTIC_PASSWORD_VERSION} name: ${STACK_NAME}_restic_password_${SECRET_RESTIC_PASSWORD_VERSION}
configs:
entrypoint:
name: ${STACK_NAME}_entrypoint_${ENTRYPOINT_VERSION}
file: entrypoint.sh
cronjob:
name: ${STACK_NAME}_cronjob_${CRONJOB_VERSION}
file: cronjob.sh
volumes: volumes:
backups: backups:

40
cronjob.sh Executable file
View File

@ -0,0 +1,40 @@
#!/bin/sh
set -e
CURL_OPTS="-s"
# Check for basic auth
if [ -n "$(cat /run/secrets/push_basicauth)" ]
then
CURL_OPTS="$CURL_OPTS -u $(cat /run/secrets/push_basicauth)"
fi
if [ -n "$PUSH_PROMETHEUS_URL" ]
then
push_start_notification="(echo 'backup 1' | curl $CURL_OPTS --data-binary @- $PUSH_PROMETHEUS_URL)"
push_success_notification="(echo 'backup 0' | curl $CURL_OPTS --data-binary @- $PUSH_PROMETHEUS_URL)"
push_fail_notification="(echo 'backup -1' | curl $CURL_OPTS --data-binary @- $PUSH_PROMETHEUS_URL)"
else
if [ -n "$PUSH_URL_START" ]
then
push_start_notification="curl $CURL_OPTS '$PUSH_URL_START'"
fi
if [ -n "$PUSH_URL_FAIL" ]
then
push_fail_notification="curl $CURL_OPTS '$PUSH_URL_FAIL'"
fi
if [ -n "$PUSH_URL_SUCCESS" ]
then
push_success_notification="curl $CURL_OPTS '$PUSH_URL_SUCCESS'"
fi
fi
eval "$push_start_notification"
if [ "$(backup --machine-logs create 2>&1 | tee /tmp/backup.log && (grep -q 'backup finished' /tmp/backup.log))" ]
then
eval "$push_success_notification"
else
eval "$push_fail_notification"
fi

View File

@ -9,22 +9,7 @@ fi
cron_schedule="${CRON_SCHEDULE:?CRON_SCHEDULE not set}" cron_schedule="${CRON_SCHEDULE:?CRON_SCHEDULE not set}"
if [ -n "$PUSH_URL_START" ] echo "$cron_schedule /cronjob.sh" | crontab -
then
push_start_notification="curl -s '$PUSH_URL_START' &&"
fi
if [ -n "$PUSH_URL_FAIL" ]
then
push_fail_notification="|| curl -s '$PUSH_URL_FAIL'"
fi
if [ -n "$PUSH_URL_SUCCESS" ]
then
push_notification=" && (grep -q 'backup finished' /tmp/backup.log && curl -s '$PUSH_URL_SUCCESS' $push_fail_notification)"
fi
echo "$cron_schedule $push_start_notification backup --machine-logs create 2>&1 | tee /tmp/backup.log $push_notification" | crontab -
crontab -l crontab -l
crond -f -d8 -L /dev/stdout crond -f -d8 -L /dev/stdout

34
pg_backup.sh Normal file
View File

@ -0,0 +1,34 @@
#!/bin/bash
set -e
BACKUP_FILE='/var/lib/postgresql/data/backup.sql'
function backup {
export PGPASSWORD=$(cat $POSTGRES_PASSWORD_FILE)
pg_dump -U ${POSTGRES_USER} ${POSTGRES_DB} > $BACKUP_FILE
}
function restore {
cd /var/lib/postgresql/data/
restore_config(){
# Restore allowed connections
cat pg_hba.conf.bak > pg_hba.conf
su postgres -c 'pg_ctl reload'
}
# Don't allow any other connections than local
cp pg_hba.conf pg_hba.conf.bak
echo "local all all trust" > pg_hba.conf
su postgres -c 'pg_ctl reload'
trap restore_config EXIT INT TERM
# Recreate Database
psql -U ${POSTGRES_USER} -d postgres -c "DROP DATABASE ${POSTGRES_DB} WITH (FORCE);"
createdb -U ${POSTGRES_USER} ${POSTGRES_DB}
psql -U ${POSTGRES_USER} -d ${POSTGRES_DB} -1 -f $BACKUP_FILE
trap - EXIT INT TERM
restore_config
}
$@