2 Commits

Author SHA1 Message Date
0abb4827e7 Update backupbot.py and compose.yaml 2024-05-28 20:20:40 +00:00
0588a06a97 Remove unnecessary files and configurations 2024-04-09 22:32:35 +00:00
21 changed files with 148 additions and 455 deletions

View File

@ -1,17 +0,0 @@
---
kind: pipeline
name: linters
steps:
- name: publish image
image: plugins/docker
settings:
username: 3wordchant
password:
from_secret: git_coopcloud_tech_token_3wc
repo: git.coopcloud.tech/coop-cloud/backup-bot-two
tags: 2.0.0
registry: git.coopcloud.tech
when:
event:
exclude:
- pull_request

View File

@ -1,40 +1,10 @@
TYPE=backup-bot-two STACK_NAME=backup-bot-two
SECRET_RESTIC_PASSWORD_VERSION=v1
COMPOSE_FILE=compose.yml
RESTIC_REPOSITORY=/backups/restic RESTIC_REPOSITORY=/backups/restic
CRON_SCHEDULE='30 3 * * *' CRON_SCHEDULE='30 3 * * *'
# Push Notifiactions # Push Notifications
#PUSH_PROMETHEUS_URL=https://pushgateway.example.com/metrics/job/backup
# or
#PUSH_URL_START=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=start #PUSH_URL_START=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=start
#PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK #PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK
#PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail #PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail
# Push Basic Auth
#COMPOSE_FILE="$COMPOSE_FILE:compose.pushbasicauth.yml"
#SECRET_PUSH_BASICAUTH=v1
# swarm-cronjob, instead of built-in cron
#COMPOSE_FILE="$COMPOSE_FILE:compose.swarm-cronjob.yml"
# SSH storage
#SECRET_SSH_KEY_VERSION=v1
#SSH_HOST_KEY="hostname ssh-rsa AAAAB3...
#COMPOSE_FILE="$COMPOSE_FILE:compose.ssh.yml"
# S3 storage
#SECRET_AWS_SECRET_ACCESS_KEY_VERSION=v1
#AWS_ACCESS_KEY_ID=something-secret
#COMPOSE_FILE="$COMPOSE_FILE:compose.s3.yml"
# Secret restic repository
# use a secret to store the RESTIC_REPOSITORY if the repository location contains a secret value
# i.E rest:https://user:SECRET_PASSWORD@host:8000/
# it overwrites the RESTIC_REPOSITORY variable
#SECRET_RESTIC_REPO_VERSION=v1
#COMPOSE_FILE="$COMPOSE_FILE:compose.secret.yml"

View File

@ -1,17 +0,0 @@
export RESTIC_HOST="user@domain.tld"
export RESTIC_PASSWORD_FILE=/run/secrets/restic-password
export BACKUP_DEST=/backups
export SERVER_NAME=domain.tld
export DOCKER_CONTEXT=$SERVER_NAME
# uncomment either this:
#export SSH_KEY_FILE=~/.ssh/id_rsa
# or this:
#export AWS_SECRET_ACCESS_KEY_FILE=s3
#export AWS_ACCESS_KEY_ID=easter-october-emphatic-tug-urgent-customer
# or this:
#export HTTPS_PASSWORD_FILE=/run/secrets/https_password
# optionally limit subset of services for testing
#export SERVICES_OVERRIDE="ghost_domain_tld_app ghost_domain_tld_db"

2
.gitignore vendored
View File

@ -1 +1 @@
/testing .env

View File

@ -1,6 +0,0 @@
# Change log
## 2.0.0 (unreleased)
- Rewrite from Bash to Python
- Add support for push notifications (#24)

View File

@ -1,11 +0,0 @@
FROM docker:24.0.7-dind
RUN apk add --upgrade --no-cache restic bash python3 py3-pip py3-click py3-docker-py py3-json-logger curl
# Todo use requirements file with specific versions
RUN pip install --break-system-packages resticpy==1.0.2
COPY backupbot.py /usr/bin/backup
COPY entrypoint.sh /entrypoint.sh
ENTRYPOINT /entrypoint.sh

225
README.md
View File

@ -1,206 +1,115 @@
# Backupbot II # Backupbot II
[![Build Status](https://build.coopcloud.tech/api/badges/coop-cloud/backup-bot-two/status.svg)](https://build.coopcloud.tech/coop-cloud/backup-bot-two) Wiki Cafe's configuration for a Backupbot II deployment. Originally slimmed down from an `abra` [recipe](https://git.coopcloud.tech/coop-cloud/backup-bot-two) by [Co-op Cloud](https://coopcloud.tech/).
_This Time, It's Easily Configurable_
Automatically take backups from all volumes of running Docker Swarm services and runs pre- and post commands.
<!-- metadata -->
* **Category**: Utilities
* **Status**: 0, work-in-progress
* **Image**: [`git.coopcloud.tech/coop-cloud/backup-bot-two`](https://git.coopcloud.tech/coop-cloud/-/packages/container/backup-bot-two), 4, upstream
* **Healthcheck**: No
* **Backups**: N/A
* **Email**: N/A
* **Tests**: No
* **SSO**: N/A
<!-- endmetadata -->
## Background ## Deploying the app with Docker Swarm
There are lots of Docker volume backup systems; all of them have one or both of these limitations: Set the environment variables from the .env file during the shell session.
- You need to define all the volumes to back up in the configuration system
- Backups require services to be stopped to take consistent copies
Backupbot II tries to help, by
1. **letting you define backups using Docker labels**, so you can **easily collect your backups for use with another system** like docker-volume-backup.
2. **running pre- and post-commands** before and after backups, for example to use database tools to take a backup from a running service.
## Deployment
### With Co-op Cloud
* `abra app new backup-bot-two`
* `abra app config <app-name>`
- set storage options. Either configure `CRON_SCHEDULE`, or set up `swarm-cronjob`
* `abra app secret generate -a <backupbot_name>`
* `abra app deploy <app-name>`
## Configuration
Per default Backupbot stores the backups locally in the repository `/backups/restic`, which is accessible as volume at `/var/lib/docker/volumes/<backupbot_name>_backups/_data/restic/`
The backup location can be changed using the `RESTIC_REPOSITORY` env variable.
### S3 Storage
To use S3 storage as backup location set the following envs:
``` ```
RESTIC_REPOSITORY=s3:<S3-SERVICE-URL>/<BUCKET-NAME> set -a && source .env && set +a
SECRET_AWS_SECRET_ACCESS_KEY_VERSION=v1
AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
COMPOSE_FILE="$COMPOSE_FILE:compose.s3.yml"
``` ```
and add your `<SECRET_ACCESS_KEY>` as docker secret:
`abra app secret insert <backupbot_name> aws_secret_access_key v1 <SECRET_ACCESS_KEY>`
See [restic s3 docs](https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#amazon-s3) for more information. Set the secrets.
### SFTP Storage
> With sftp it is not possible to prevent the backupbot from deleting backups in case of a compromised machine. Therefore we recommend to use S3, REST or rclone server without delete permissions.
To use SFTP storage as backup location set the following envs:
``` ```
RESTIC_REPOSITORY=sftp:user@host:/restic-repo-path printf "SECRET_HERE" | docker secret create SECRET_NAME -
SECRET_SSH_KEY_VERSION=v1
SSH_HOST_KEY="hostname ssh-rsa AAAAB3...
COMPOSE_FILE="$COMPOSE_FILE:compose.ssh.yml"
``` ```
To get the `SSH_HOST_KEY` run the following command `ssh-keyscan <hostname>`
Generate an ssh keypair: `ssh-keygen -t ed25519 -f backupkey -P ''` Deploy using the `-c` flag to specify one or multiple compose files.
Add the key to your `authorized_keys`:
`ssh-copy-id -i backupkey <user>@<hostname>`
Add your `SSH_KEY` as docker secret:
```
abra app secret insert <backupbot_name> ssh_key v1 """$(cat backupkey)
"""
```
> Attention: This command needs to be executed exactly as stated above, because it places a trailing newline at the end, if this is missing you will get the following error: `Load key "/run/secrets/ssh_key": error in libcrypto`
### Restic REST server Storage
You can simply set the `RESTIC_REPOSITORY` variable to your REST server URL `rest:http://host:8000/`.
If you access the REST server with a password `rest:https://user:pass@host:8000/` you should hide the whole URL containing the password inside a secret.
Uncomment these lines:
``` ```
SECRET_RESTIC_REPO_VERSION=v1 docker stack deploy backup-bot-two -c compose.yaml
COMPOSE_FILE="$COMPOSE_FILE:compose.secret.yml"
``` ```
Add your REST server url as secret:
```
`abra app secret insert <backupbot_name> restic_repo v1 "rest:https://user:pass@host:8000/"`
```
The secret will overwrite the `RESTIC_REPOSITORY` variable.
See [restic REST docs](https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#rest-server) for more information.
## Push notifications ## Push notifications
It is possible to configure three push events, that may trigger on the backup cronjob. Those can be used to detect failures from mointoring systems.
The events are:
- start
- success
- fail
### Using a Prometheus Push Gateway
[A prometheus push gateway](https://git.coopcloud.tech/coop-cloud/monitoring-ng#setup-push-gateway) can be used by setting the following env variables:
- `PUSH_PROMETHEUS_URL=pushgateway.example.com/metrics/job/backup`
### Using custom URLs
The following env variables can be used to setup push notifications for backups. `PUSH_URL_START` is requested just before the backups starts, `PUSH_URL_SUCCESS` is only requested if the backup was successful and if the backup fails `PUSH_URL_FAIL` will be requested. The following env variables can be used to setup push notifications for backups. `PUSH_URL_START` is requested just before the backups starts, `PUSH_URL_SUCCESS` is only requested if the backup was successful and if the backup fails `PUSH_URL_FAIL` will be requested.
Each variable is optional and independent of the other. Each variable is optional and independent of the other.
``` ```
PUSH_URL_START=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=start PUSH_URL_START=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=start
PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK
PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail
``` ```
### Push endpoint behind basic auth ## Commands
Insert the basic auth secret
`abra app secret insert <backupbot_name> push_basicauth v1 "user:password"`
Enable basic auth in the env file, by uncommenting the following line: - Find the ID or name of the backup container:
``` ```
#COMPOSE_FILE="$COMPOSE_FILE:compose.pushbasicauth.yml" docker ps --filter "name=backup-bot-two_app"
#SECRET_PUSH_BASICAUTH=v1
``` ```
## Usage 2. Run the desired command using `docker exec`:
```
docker exec -it <container_id_or_name> backup <command> [options]
```
Replace `<container_id_or_name>` with the ID or name of the backup container.
Run the cronjob that creates a backup, including the push notifications and docker logging: Available commands:
`abra app cmd <backupbot_name> app run_cron` - `create`: Initiate the backup process.
- `restore`: Restore a specific snapshot to a target directory.
- `snapshots`: List available snapshots.
- `ls`: List files in a specific snapshot.
- `download`: Download specific files, volumes, or secrets from a snapshot.
Create a backup of all apps: Options:
- `--host`, `-h`: Specify the service name (e.g., `app`).
- `--repo`, `-r`: Specify the Restic repository location (e.g., `/run/secrets/restic_repo`).
- `--log`, `-l`: Set the log level (e.g., `debug`, `info`, `warning`, `error`).
- `--machine-logs`, `-m`: Enable machine-readable JSON logging.
`abra app run <backupbot_name> app -- backup create` ## Examples
> The apps to backup up need to be deployed Create a backup:
Create an individual backup:
`abra app run <backupbot_name> app -- backup --host <target_app_name> create`
Create a backup to a local repository:
`abra app run <backupbot_name> app -- backup create -r /backups/restic`
> It is recommended to shutdown/undeploy an app before restoring the data
Restore the latest snapshot of all including apps:
`abra app run <backupbot_name> app -- backup restore`
Restore a specific snapshot of an individual app:
`abra app run <backupbot_name> app -- backup --host <target_app_name> restore --snapshot <snapshot_id>`
Show all snapshots:
`abra app run <backupbot_name> app -- backup snapshots`
Show all snapshots containing a specific app:
`abra app run <backupbot_name> app -- backup --host <target_app_name> snapshots`
Show all files inside the latest snapshot (can be very verbose):
`abra app run <backupbot_name> app -- backup ls`
Show specific files inside a selected snapshot:
`abra app run <backupbot_name> app -- backup ls --snapshot <snapshot_id> --path /var/lib/docker/volumes/`
Download files from a snapshot:
``` ```
filename=$(abra app run <backupbot_name> app -- backup download --snapshot <snapshot_id> --path <absolute_path>) docker exec -it <container_id_or_name> backup create --host app
abra app cp <backupbot_name> app:$filename .
``` ```
## Run restic Restore a snapshot:
``` ```
abra app run <backupbot_name> app bash docker exec -it <container_id_or_name> backup restore --snapshot <snapshot_id> --target /path/to/restore
export AWS_SECRET_ACCESS_KEY=$(cat $AWS_SECRET_ACCESS_KEY_FILE)
export RESTIC_PASSWORD=$(cat $RESTIC_PASSWORD_FILE)
restic snapshots
``` ```
List snapshots:
```
docker exec -it <container_id_or_name> backup snapshots
```
List files in a snapshot:
```
docker exec -it <container_id_or_name> backup ls --snapshot <snapshot_id> --path /path/to/directory
```
Download files, volumes, or secrets from a snapshot:
```
docker exec -it <container_id_or_name> backup download --snapshot <snapshot_id> [--path /path/to/file] [--volumes] [--secrets]
```
Note: Make sure to replace `<container_id_or_name>` and `<snapshot_id>` with the appropriate values for your setup.
Remember to review and adjust the Docker Compose file and environment variables according to your specific requirements before running the backup commands.
When using `docker exec`, you don't need to specify the volume mounts or the Restic repository location as command-line arguments because they are already defined in the Docker Compose file and are available within the running container.
If you need to access the downloaded files, volumes, or secrets from the backup, you can use `docker cp` to copy them from the container to the host machine:
```
docker cp <container_id_or_name>:/path/to/backup/file /path/on/host
```
This allows you to retrieve the backed-up data from the container.
## Recipe Configuration ## Recipe Configuration
Like Traefik, or `swarm-cronjob`, Backupbot II uses access to the Docker socket to read labels from running Docker Swarm services: Backupbot II uses access to the Docker socket to read labels from running Docker Swarm services:
``` ```
services: services:
@ -217,5 +126,3 @@ services:
- `backupbot.backup.post-hook` -- command to run after copying files (optional) - `backupbot.backup.post-hook` -- command to run after copying files (optional)
As in the above example, you can reference Docker Secrets, e.g. for looking up database passwords, by reading the files in `/run/secrets` directly. As in the above example, you can reference Docker Secrets, e.g. for looking up database passwords, by reading the files in `/run/secrets` directly.
[abra]: https://git.autonomic.zone/autonomic-cooperative/abra

12
abra.sh
View File

@ -1,12 +0,0 @@
export BACKUPBOT_VERSION=v1
export SSH_CONFIG_VERSION=v1
export ENTRYPOINT_VERSION=v17
export CRONJOB_VERSION=v2
run_cron () {
schedule="$(crontab -l | tr -s " " | cut -d ' ' -f-5)"
rm -f /tmp/backup.log
echo "* * * * * $(crontab -l | tr -s " " | cut -d ' ' -f6-)" | crontab -
while [ ! -f /tmp/backup.log ]; do sleep 1; done
echo "$schedule $(crontab -l | tr -s " " | cut -d ' ' -f6-)" | crontab -
}

View File

@ -54,8 +54,8 @@ def cli(loglevel, service, repository, machine_logs):
if not isinstance(numeric_level, int): if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel) raise ValueError('Invalid log level: %s' % loglevel)
logger.setLevel(numeric_level) logger.setLevel(numeric_level)
logHandler = logging.StreamHandler()
if machine_logs: if machine_logs:
logHandler = logging.StreamHandler()
formatter = jsonlogger.JsonFormatter( formatter = jsonlogger.JsonFormatter(
"%(levelname)s %(filename)s %(lineno)s %(process)d %(message)s", rename_fields={"levelname": "message_type"}) "%(levelname)s %(filename)s %(lineno)s %(process)d %(message)s", rename_fields={"levelname": "message_type"})
logHandler.setFormatter(formatter) logHandler.setFormatter(formatter)
@ -66,13 +66,9 @@ def cli(loglevel, service, repository, machine_logs):
def init_repo(): def init_repo():
if repo:= os.environ.get('RESTIC_REPOSITORY_FILE'):
# RESTIC_REPOSITORY_FILE and RESTIC_REPOSITORY are mutually exclusive
del os.environ['RESTIC_REPOSITORY']
else:
repo = os.environ['RESTIC_REPOSITORY'] repo = os.environ['RESTIC_REPOSITORY']
restic.repository = repo
logger.debug(f"set restic repository location: {repo}") logger.debug(f"set restic repository location: {repo}")
restic.repository = repo
restic.password_file = '/var/run/secrets/restic_password' restic.password_file = '/var/run/secrets/restic_password'
try: try:
restic.cat.config() restic.cat.config()
@ -91,7 +87,11 @@ def export_secrets():
with open(os.environ[env]) as file: with open(os.environ[env]) as file:
secret = file.read() secret = file.read()
os.environ[env.removesuffix('_FILE')] = secret os.environ[env.removesuffix('_FILE')] = secret
# logger.debug(f"Read secret value: {secret}")
if env == 'RESTIC_REPOSITORY_FILE':
# RESTIC_REPOSITORY_FILE and RESTIC_REPOSITORY are mutually exclusive
logger.info("RESTIC_REPOSITORY set to RESTIC_REPOSITORY_FILE. Unsetting RESTIC_REPOSITORY_FILE.")
del os.environ['RESTIC_REPOSITORY_FILE']
@cli.command() @cli.command()
@ -108,7 +108,7 @@ def create(retries):
def get_backup_cmds(): def get_backup_cmds():
client = docker.from_env() client = docker.from_env()
container_by_service = { container_by_service = {
c.labels.get('com.docker.swarm.service.name'): c for c in client.containers.list()} c.labels['com.docker.swarm.service.name']: c for c in client.containers.list()}
backup_paths = set() backup_paths = set()
backup_apps = set() backup_apps = set()
pre_commands = {} pre_commands = {}
@ -143,14 +143,14 @@ def copy_secrets(apps):
os.mkdir(SECRET_PATH) os.mkdir(SECRET_PATH)
client = docker.from_env() client = docker.from_env()
container_by_service = { container_by_service = {
c.labels.get('com.docker.swarm.service.name'): c for c in client.containers.list()} c.labels['com.docker.swarm.service.name']: c for c in client.containers.list()}
services = client.services.list() services = client.services.list()
for s in services: for s in services:
app_name = s.attrs['Spec']['Labels']['com.docker.stack.namespace'] app_name = s.attrs['Spec']['Labels']['com.docker.stack.namespace']
if (app_name in apps and if (app_name in apps and
(app_secs := s.attrs['Spec']['TaskTemplate']['ContainerSpec'].get('Secrets'))): (app_secs := s.attrs['Spec']['TaskTemplate']['ContainerSpec'].get('Secrets'))):
if not container_by_service.get(s.name): if not container_by_service.get(s.name):
logger.warning( logger.error(
f"Container {s.name} is not running, secrets can not be copied.") f"Container {s.name} is not running, secrets can not be copied.")
continue continue
container_id = container_by_service[s.name].id container_id = container_by_service[s.name].id
@ -161,7 +161,6 @@ def copy_secrets(apps):
f"For the secret {sec['SecretName']} the file {src} does not exist for {s.name}") f"For the secret {sec['SecretName']} the file {src} does not exist for {s.name}")
continue continue
dst = SECRET_PATH + sec['SecretName'] dst = SECRET_PATH + sec['SecretName']
logger.debug("Copy Secret {sec['SecretName']}")
copyfile(src, dst) copyfile(src, dst)
@ -189,8 +188,6 @@ def run_commands(commands):
def backup_volumes(backup_paths, apps, retries, dry_run=False): def backup_volumes(backup_paths, apps, retries, dry_run=False):
while True: while True:
try: try:
logger.info("Start volume backup")
logger.debug(backup_paths)
result = restic.backup(backup_paths, dry_run=dry_run, tags=apps) result = restic.backup(backup_paths, dry_run=dry_run, tags=apps)
logger.summary("backup finished", extra=result) logger.summary("backup finished", extra=result)
return return

View File

@ -1,11 +0,0 @@
---
version: "3.8"
services:
app:
secrets:
- push_basicauth
secrets:
push_basicauth:
external: true
name: ${STACK_NAME}_push_basicauth_${SECRET_PUSH_BASICAUTH}

View File

@ -1,14 +0,0 @@
---
version: "3.8"
services:
app:
environment:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY_FILE=/run/secrets/aws_secret_access_key
secrets:
- aws_secret_access_key
secrets:
aws_secret_access_key:
external: true
name: ${STACK_NAME}_aws_secret_access_key_${SECRET_AWS_SECRET_ACCESS_KEY_VERSION}

View File

@ -1,13 +0,0 @@
---
version: "3.8"
services:
app:
environment:
- RESTIC_REPOSITORY_FILE=/run/secrets/restic_repo
secrets:
- restic_repo
secrets:
restic_repo:
external: true
name: ${STACK_NAME}_restic_repo_${SECRET_RESTIC_REPO_VERSION}

View File

@ -1,23 +0,0 @@
---
version: "3.8"
services:
app:
environment:
- SSH_KEY_FILE=/run/secrets/ssh_key
- SSH_HOST_KEY
secrets:
- source: ssh_key
mode: 0400
configs:
- source: ssh_config
target: /root/.ssh/config
secrets:
ssh_key:
external: true
name: ${STACK_NAME}_ssh_key_${SECRET_SSH_KEY_VERSION}
configs:
ssh_config:
name: ${STACK_NAME}_ssh_config_${SSH_CONFIG_VERSION}
file: ssh_config

View File

@ -1,15 +0,0 @@
---
version: "3.8"
services:
app:
deploy:
mode: replicated
replicas: 0
labels:
- "swarm.cronjob.enable=true"
# Note(3wc): every 5m, testing
- "swarm.cronjob.schedule=*/5 * * * *"
# Note(3wc): blank label to be picked up by `abra recipe sync`
restart_policy:
condition: none
entrypoint: [ "/usr/bin/backup.sh" ]

44
compose.yaml Normal file
View File

@ -0,0 +1,44 @@
services:
app:
image: docker:24.0.7-dind
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
- "/var/lib/docker/volumes/:/var/lib/docker/volumes/"
- "/var/lib/docker/containers/:/var/lib/docker/containers/:ro"
environment:
- CRON_SCHEDULE
- RESTIC_REPOSITORY_FILE=/run/secrets/restic_repo
- RESTIC_PASSWORD_FILE=/run/secrets/restic_password
secrets:
- restic_repo
- restic_password
configs:
- source: entrypoint
target: /entrypoint.sh
mode: 0555
- source: backupbot
target: /usr/bin/backup
mode: 0555
entrypoint: ['/entrypoint.sh']
healthcheck:
test: "pgrep crond"
interval: 30s
timeout: 10s
retries: 10
start_period: 5m
secrets:
restic_repo:
external: true
name: ${STACK_NAME}_restic_repo
restic_password:
external: true
name: ${STACK_NAME}_restic_password
configs:
entrypoint:
name: ${STACK_NAME}_entrypoint
file: entrypoint.sh
backupbot:
name: ${STACK_NAME}_backupbot
file: backupbot.py

View File

@ -1,51 +0,0 @@
---
version: "3.8"
services:
app:
image: git.coopcloud.tech/coop-cloud/backup-bot-two:2.0.0
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
- "/var/lib/docker/volumes/:/var/lib/docker/volumes/"
- "/var/lib/docker/containers/:/var/lib/docker/containers/:ro"
- backups:/backups
environment:
- CRON_SCHEDULE
- RESTIC_REPOSITORY
- RESTIC_PASSWORD_FILE=/run/secrets/restic_password
secrets:
- restic_password
configs:
- source: entrypoint
target: /entrypoint.sh
mode: 666
- source: cronjob
target: /cronjob.sh
mode: 666
deploy:
labels:
- coop-cloud.${STACK_NAME}.version=0.1.0+latest
- coop-cloud.${STACK_NAME}.timeout=${TIMEOUT:-300}
- coop-cloud.backupbot.enabled=true
#entrypoint: ['tail', '-f','/dev/null']
healthcheck:
test: "pgrep crond"
interval: 30s
timeout: 10s
retries: 10
start_period: 5m
secrets:
restic_password:
external: true
name: ${STACK_NAME}_restic_password_${SECRET_RESTIC_PASSWORD_VERSION}
configs:
entrypoint:
name: ${STACK_NAME}_entrypoint_${ENTRYPOINT_VERSION}
file: entrypoint.sh
cronjob:
name: ${STACK_NAME}_cronjob_${CRONJOB_VERSION}
file: cronjob.sh
volumes:
backups:

View File

@ -1,40 +0,0 @@
#!/bin/sh
set -e
CURL_OPTS="-s"
# Check for basic auth
if [ -n "$(cat /run/secrets/push_basicauth)" ]
then
CURL_OPTS="$CURL_OPTS -u $(cat /run/secrets/push_basicauth)"
fi
if [ -n "$PUSH_PROMETHEUS_URL" ]
then
push_start_notification="(echo 'backup 1' | curl $CURL_OPTS --data-binary @- $PUSH_PROMETHEUS_URL)"
push_success_notification="(echo 'backup 0' | curl $CURL_OPTS --data-binary @- $PUSH_PROMETHEUS_URL)"
push_fail_notification="(echo 'backup -1' | curl $CURL_OPTS --data-binary @- $PUSH_PROMETHEUS_URL)"
else
if [ -n "$PUSH_URL_START" ]
then
push_start_notification="curl $CURL_OPTS '$PUSH_URL_START'"
fi
if [ -n "$PUSH_URL_FAIL" ]
then
push_fail_notification="curl $CURL_OPTS '$PUSH_URL_FAIL'"
fi
if [ -n "$PUSH_URL_SUCCESS" ]
then
push_success_notification="curl $CURL_OPTS '$PUSH_URL_SUCCESS'"
fi
fi
eval "$push_start_notification"
if [ "$(backup --machine-logs create 2>&1 | tee /tmp/backup.log && (grep -q 'backup finished' /tmp/backup.log))" ]
then
eval "$push_success_notification"
else
eval "$push_fail_notification"
fi

27
entrypoint.sh Executable file → Normal file
View File

@ -1,15 +1,30 @@
#!/bin/sh #!/bin/sh
set -e set -e -o pipefail
if [ -n "$SSH_HOST_KEY" ] apk add --upgrade --no-cache restic bash python3 py3-pip py3-click py3-docker-py py3-json-logger curl
then
echo "$SSH_HOST_KEY" > /root/.ssh/known_hosts # Todo use requirements file with specific versions
fi pip install --break-system-packages resticpy==1.0.2
cron_schedule="${CRON_SCHEDULE:?CRON_SCHEDULE not set}" cron_schedule="${CRON_SCHEDULE:?CRON_SCHEDULE not set}"
echo "$cron_schedule /cronjob.sh" | crontab - if [ -n "$PUSH_URL_START" ]
then
push_start_notification="curl -s '$PUSH_URL_START' &&"
fi
if [ -n "$PUSH_URL_FAIL" ]
then
push_fail_notification="|| curl -s '$PUSH_URL_FAIL'"
fi
if [ -n "$PUSH_URL_SUCCESS" ]
then
push_notification=" && (grep -q 'backup finished' /tmp/backup.log && curl -s '$PUSH_URL_SUCCESS' $push_fail_notification)"
fi
echo "$cron_schedule $push_start_notification backup --machine-logs create 2>&1 | tee /tmp/backup.log $push_notification" | crontab -
crontab -l crontab -l
crond -f -d8 -L /dev/stdout crond -f -d8 -L /dev/stdout

View File

@ -1,3 +0,0 @@
Breaking Change: the variables `SERVER_NAME` and `RESTIC_HOST` are merged into `RESTIC_REPOSITORY`. The format can be looked up here: https://restic.readthedocs.io/en/stable/030_preparing_a_new_repo.html
ssh/sftp: `sftp:user@host:/repo-path`
S3: `s3:https://s3.example.com/bucket_name`

View File

@ -1,3 +0,0 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json"
}

View File

@ -1,4 +0,0 @@
Host *
IdentityFile /run/secrets/ssh_key
ServerAliveInterval 60
ServerAliveCountMax 240