Compare commits
	
		
			55 Commits
		
	
	
		
			enable-lab
			...
			prom-mon
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| c1902b2dbc | |||
| cc049b858b | |||
| b7bc8ed58f | |||
| 68e37f5c23 | |||
| 4d39d84733 | |||
| e5b9bc0446 | |||
| ec4c4509dc | |||
| 26162a9e38 | |||
| bd581fd8d7 | |||
| e77432e3ab | |||
| 001a654e37 | |||
| c5574edc54 | |||
| 50e4d68717 | |||
| c7830ceb6f | |||
| b6f859efbb | |||
| 7f14698824 | |||
| 2a9a98172f | |||
| 282215cf9c | |||
| ae7a14b6f1 | |||
| 8acdb20e5b | |||
| 5582744073 | |||
| 84d606fa80 | |||
| 7865907811 | |||
| dc66c02e23 | |||
| f730c70bfe | |||
| faa7ae3dd1 | |||
| 79eeec428a | |||
| 4164760dc6 | |||
| e644679b8b | |||
| 0c587ac926 | |||
| 65686cd891 | |||
| ac055c932e | |||
| 64328c79b1 | |||
| 15275b2571 | |||
| 4befebba38 | |||
| d2087a441e | |||
| f4d96b0875 | |||
| c73bbe8c0d | |||
| ff2b5a25a2 | |||
| e186813a49 | |||
| 37cb51674f | |||
| 2ea59b4230 | |||
| 354f964e7d | |||
| 2bb27aadc4 | |||
| 66e1c9617d | |||
| 79d19e7ac5 | |||
| 359140781e | |||
| 8750ec1813 | |||
| 8e76ad591e | |||
| a3faa5d51f | |||
| a3f27fa6ba | |||
| fe5d846c5f | |||
| 79b7a01dda | |||
| f8a8547b70 | |||
| 192b1f1d9c | 
							
								
								
									
										21
									
								
								.drone.yml
									
									
									
									
									
								
							
							
						
						
									
										21
									
								
								.drone.yml
									
									
									
									
									
								
							| @ -2,11 +2,16 @@ | |||||||
| kind: pipeline | kind: pipeline | ||||||
| name: linters | name: linters | ||||||
| steps: | steps: | ||||||
|   - name: run shellcheck |   - name: publish image | ||||||
|     image: koalaman/shellcheck-alpine |     image: plugins/docker | ||||||
|     commands: |     settings: | ||||||
|       - shellcheck backup.sh |       username: 3wordchant | ||||||
|  |       password: | ||||||
| trigger: |         from_secret: git_coopcloud_tech_token_3wc | ||||||
|   branch: |       repo: git.coopcloud.tech/coop-cloud/backup-bot-two | ||||||
|     - main |       tags: 2.0.0 | ||||||
|  |       registry: git.coopcloud.tech | ||||||
|  |     when: | ||||||
|  |       event: | ||||||
|  |         exclude: | ||||||
|  |           - pull_request | ||||||
|  | |||||||
							
								
								
									
										17
									
								
								.env.sample
									
									
									
									
									
								
							
							
						
						
									
										17
									
								
								.env.sample
									
									
									
									
									
								
							| @ -4,10 +4,21 @@ SECRET_RESTIC_PASSWORD_VERSION=v1 | |||||||
|  |  | ||||||
| COMPOSE_FILE=compose.yml | COMPOSE_FILE=compose.yml | ||||||
|  |  | ||||||
| RESTIC_REPO=/backups/restic | RESTIC_REPOSITORY=/backups/restic | ||||||
|  |  | ||||||
| CRON_SCHEDULE='30 3 * * *' | CRON_SCHEDULE='30 3 * * *' | ||||||
|  |  | ||||||
|  | # Push Notifiactions | ||||||
|  | #PUSH_PROMETHEUS_URL=https://pushgateway.example.com/metrics/job/backup | ||||||
|  | # or | ||||||
|  | #PUSH_URL_START=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=start | ||||||
|  | #PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK | ||||||
|  | #PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail | ||||||
|  |  | ||||||
|  | # Push Basic Auth | ||||||
|  | #COMPOSE_FILE="$COMPOSE_FILE:compose.pushbasicauth.yml" | ||||||
|  | #SECRET_PUSH_BASICAUTH=v1 | ||||||
|  |  | ||||||
| # swarm-cronjob, instead of built-in cron | # swarm-cronjob, instead of built-in cron | ||||||
| #COMPOSE_FILE="$COMPOSE_FILE:compose.swarm-cronjob.yml" | #COMPOSE_FILE="$COMPOSE_FILE:compose.swarm-cronjob.yml" | ||||||
|  |  | ||||||
| @ -22,8 +33,8 @@ CRON_SCHEDULE='30 3 * * *' | |||||||
| #COMPOSE_FILE="$COMPOSE_FILE:compose.s3.yml" | #COMPOSE_FILE="$COMPOSE_FILE:compose.s3.yml" | ||||||
|  |  | ||||||
| # Secret restic repository | # Secret restic repository | ||||||
| # use a secret to store the RESTIC_REPO if the repository location contains a secret value | # use a secret to store the RESTIC_REPOSITORY if the repository location contains a secret value | ||||||
| # i.E rest:https://user:SECRET_PASSWORD@host:8000/ | # i.E rest:https://user:SECRET_PASSWORD@host:8000/ | ||||||
| # it overwrites the RESTIC_REPO variable | # it overwrites the RESTIC_REPOSITORY variable | ||||||
| #SECRET_RESTIC_REPO_VERSION=v1 | #SECRET_RESTIC_REPO_VERSION=v1 | ||||||
| #COMPOSE_FILE="$COMPOSE_FILE:compose.secret.yml" | #COMPOSE_FILE="$COMPOSE_FILE:compose.secret.yml" | ||||||
|  | |||||||
							
								
								
									
										6
									
								
								CHANGELOG.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										6
									
								
								CHANGELOG.md
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,6 @@ | |||||||
|  | # Change log | ||||||
|  |  | ||||||
|  | ## 2.0.0 (unreleased) | ||||||
|  |  | ||||||
|  | - Rewrite from Bash to Python | ||||||
|  | - Add support for push notifications (#24) | ||||||
							
								
								
									
										11
									
								
								Dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								Dockerfile
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,11 @@ | |||||||
|  | FROM docker:24.0.7-dind | ||||||
|  |  | ||||||
|  | RUN apk add --upgrade --no-cache restic bash python3 py3-pip py3-click py3-docker-py py3-json-logger curl | ||||||
|  |  | ||||||
|  | # Todo use requirements file with specific versions | ||||||
|  | RUN pip install --break-system-packages resticpy==1.0.2 | ||||||
|  |  | ||||||
|  | COPY backupbot.py /usr/bin/backup | ||||||
|  | COPY entrypoint.sh /entrypoint.sh | ||||||
|  |  | ||||||
|  | ENTRYPOINT /entrypoint.sh | ||||||
							
								
								
									
										90
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										90
									
								
								README.md
									
									
									
									
									
								
							| @ -10,7 +10,7 @@ Automatically take backups from all volumes of running Docker Swarm services and | |||||||
|  |  | ||||||
| * **Category**: Utilities | * **Category**: Utilities | ||||||
| * **Status**: 0, work-in-progress | * **Status**: 0, work-in-progress | ||||||
| * **Image**: [`thecoopcloud/backup-bot-two`](https://hub.docker.com/r/thecoopcloud/backup-bot-two), 4, upstream | * **Image**: [`git.coopcloud.tech/coop-cloud/backup-bot-two`](https://git.coopcloud.tech/coop-cloud/-/packages/container/backup-bot-two), 4, upstream | ||||||
| * **Healthcheck**: No | * **Healthcheck**: No | ||||||
| * **Backups**: N/A | * **Backups**: N/A | ||||||
| * **Email**: N/A | * **Email**: N/A | ||||||
| @ -38,26 +38,26 @@ Backupbot II tries to help, by | |||||||
| * `abra app new backup-bot-two` | * `abra app new backup-bot-two` | ||||||
| * `abra app config <app-name>` | * `abra app config <app-name>` | ||||||
|     - set storage options. Either configure `CRON_SCHEDULE`, or set up `swarm-cronjob` |     - set storage options. Either configure `CRON_SCHEDULE`, or set up `swarm-cronjob` | ||||||
| * `abra app secret generate -a <app_name>` | * `abra app secret generate -a <backupbot_name>` | ||||||
| * `abra app deploy <app-name>` | * `abra app deploy <app-name>` | ||||||
|  |  | ||||||
| ## Configuration | ## Configuration | ||||||
|  |  | ||||||
| Per default Backupbot stores the backups locally in the repository `/backups/restic`, which is accessible as volume at `/var/lib/docker/volumes/<app_name>_backups/_data/restic/` | Per default Backupbot stores the backups locally in the repository `/backups/restic`, which is accessible as volume at `/var/lib/docker/volumes/<backupbot_name>_backups/_data/restic/` | ||||||
|  |  | ||||||
| The backup location can be changed using the `RESTIC_REPO` env variable. | The backup location can be changed using the `RESTIC_REPOSITORY` env variable. | ||||||
|  |  | ||||||
| ### S3 Storage | ### S3 Storage | ||||||
|  |  | ||||||
| To use S3 storage as backup location set the following envs: | To use S3 storage as backup location set the following envs: | ||||||
| ``` | ``` | ||||||
| RESTIC_REPO=s3:<S3-SERVICE-URL>/<BUCKET-NAME> | RESTIC_REPOSITORY=s3:<S3-SERVICE-URL>/<BUCKET-NAME> | ||||||
| SECRET_AWS_SECRET_ACCESS_KEY_VERSION=v1 | SECRET_AWS_SECRET_ACCESS_KEY_VERSION=v1 | ||||||
| AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY> | AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY> | ||||||
| COMPOSE_FILE="$COMPOSE_FILE:compose.s3.yml" | COMPOSE_FILE="$COMPOSE_FILE:compose.s3.yml" | ||||||
| ``` | ``` | ||||||
| and add your `<SECRET_ACCESS_KEY>` as docker secret: | and add your `<SECRET_ACCESS_KEY>` as docker secret: | ||||||
| `abra app secret insert <app_name> aws_secret_access_key v1 <SECRET_ACCESS_KEY>` | `abra app secret insert <backupbot_name> aws_secret_access_key v1 <SECRET_ACCESS_KEY>` | ||||||
|  |  | ||||||
| See [restic s3 docs](https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#amazon-s3) for more information. | See [restic s3 docs](https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#amazon-s3) for more information. | ||||||
|  |  | ||||||
| @ -67,7 +67,7 @@ See [restic s3 docs](https://restic.readthedocs.io/en/latest/030_preparing_a_new | |||||||
|  |  | ||||||
| To use SFTP storage as backup location set the following envs: | To use SFTP storage as backup location set the following envs: | ||||||
| ``` | ``` | ||||||
| RESTIC_REPO=sftp:user@host:/restic-repo-path | RESTIC_REPOSITORY=sftp:user@host:/restic-repo-path | ||||||
| SECRET_SSH_KEY_VERSION=v1 | SECRET_SSH_KEY_VERSION=v1 | ||||||
| SSH_HOST_KEY="hostname ssh-rsa AAAAB3... | SSH_HOST_KEY="hostname ssh-rsa AAAAB3... | ||||||
| COMPOSE_FILE="$COMPOSE_FILE:compose.ssh.yml" | COMPOSE_FILE="$COMPOSE_FILE:compose.ssh.yml" | ||||||
| @ -79,13 +79,14 @@ Add the key to your `authorized_keys`: | |||||||
| `ssh-copy-id -i backupkey <user>@<hostname>` | `ssh-copy-id -i backupkey <user>@<hostname>` | ||||||
| Add your `SSH_KEY` as docker secret: | Add your `SSH_KEY` as docker secret: | ||||||
| ``` | ``` | ||||||
| abra app secret insert <app_name> ssh_key v1 """$(cat backupkey) | abra app secret insert <backupbot_name> ssh_key v1 """$(cat backupkey) | ||||||
| """ | """ | ||||||
| ``` | ``` | ||||||
|  | > Attention: This command needs to be executed exactly as stated above, because it places a trailing newline at the end, if this is missing you will get the following error: `Load key "/run/secrets/ssh_key": error in libcrypto` | ||||||
|  |  | ||||||
| ### Restic REST server Storage | ### Restic REST server Storage | ||||||
|  |  | ||||||
| You can simply set the `RESTIC_REPO` variable to your REST server URL `rest:http://host:8000/`. | You can simply set the `RESTIC_REPOSITORY` variable to your REST server URL `rest:http://host:8000/`. | ||||||
| If you access the REST server with a password `rest:https://user:pass@host:8000/` you should hide the whole URL containing the password inside a secret. | If you access the REST server with a password `rest:https://user:pass@host:8000/` you should hide the whole URL containing the password inside a secret. | ||||||
| Uncomment these lines: | Uncomment these lines: | ||||||
| ``` | ``` | ||||||
| @ -94,63 +95,108 @@ COMPOSE_FILE="$COMPOSE_FILE:compose.secret.yml" | |||||||
| ``` | ``` | ||||||
| Add your REST server url as secret: | Add your REST server url as secret: | ||||||
| ``` | ``` | ||||||
| `abra app secret insert <app_name> restic_repo v1 "rest:https://user:pass@host:8000/"` | `abra app secret insert <backupbot_name> restic_repo v1 "rest:https://user:pass@host:8000/"` | ||||||
| ``` | ``` | ||||||
| The secret will overwrite the `RESTIC_REPO` variable. | The secret will overwrite the `RESTIC_REPOSITORY` variable. | ||||||
|  |  | ||||||
|  |  | ||||||
| See [restic REST docs](https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#rest-server) for more information. | See [restic REST docs](https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#rest-server) for more information. | ||||||
|  |  | ||||||
|  | ## Push notifications | ||||||
|  |  | ||||||
|  | It is possible to configure three push events, that may trigger on the backup cronjob. Those can be used to detect failures from mointoring systems. | ||||||
|  | The events are: | ||||||
|  | - start | ||||||
|  | - success | ||||||
|  | - fail | ||||||
|  |  | ||||||
|  | ### Using a Prometheus Push Gateway | ||||||
|  |  | ||||||
|  | [A prometheus push gateway](https://git.coopcloud.tech/coop-cloud/monitoring-ng#setup-push-gateway) can be used by setting the following env variables: | ||||||
|  | - `PUSH_PROMETHEUS_URL=pushgateway.example.com/metrics/job/backup` | ||||||
|  |  | ||||||
|  | ### Using custom URLs | ||||||
|  |  | ||||||
|  | The following env variables can be used to setup push notifications for backups. `PUSH_URL_START` is requested just before the backups starts, `PUSH_URL_SUCCESS` is only requested if the backup was successful and if the backup fails `PUSH_URL_FAIL` will be requested. | ||||||
|  | Each variable is optional and independent of the other. | ||||||
|  |  | ||||||
|  | ``` | ||||||
|  | PUSH_URL_START=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=start | ||||||
|  | PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK | ||||||
|  | PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ### Push endpoint behind basic auth | ||||||
|  |  | ||||||
|  | Insert the basic auth secret | ||||||
|  | `abra app secret insert <backupbot_name> push_basicauth v1 "user:password"` | ||||||
|  |  | ||||||
|  | Enable basic auth in the env file, by uncommenting the following line: | ||||||
|  | ``` | ||||||
|  | #COMPOSE_FILE="$COMPOSE_FILE:compose.pushbasicauth.yml" | ||||||
|  | #SECRET_PUSH_BASICAUTH=v1 | ||||||
|  | ``` | ||||||
|  |  | ||||||
| ## Usage | ## Usage | ||||||
|  |  | ||||||
|  | Run the cronjob that creates a backup, including the push notifications and docker logging: | ||||||
|  | `abra app cmd <backupbot_name> app run_cron` | ||||||
|  |  | ||||||
| Create a backup of all apps: | Create a backup of all apps: | ||||||
|  |  | ||||||
| `abra app run <app_name> app -- backup create` | `abra app run <backupbot_name> app -- backup create` | ||||||
|  |  | ||||||
| > The apps to backup up need to be deployed | > The apps to backup up need to be deployed | ||||||
|  |  | ||||||
| Create an individual backup: | Create an individual backup: | ||||||
|  |  | ||||||
| `abra app run <app_name> app -- backup --host <target_app_name> create` | `abra app run <backupbot_name> app -- backup --host <target_app_name> create` | ||||||
|  |  | ||||||
| Create a backup to a local repository: | Create a backup to a local repository: | ||||||
|  |  | ||||||
| `abra app run <app_name> app -- backup create -r /backups/restic` | `abra app run <backupbot_name> app -- backup create -r /backups/restic` | ||||||
|  |  | ||||||
| > It is recommended to shutdown/undeploy an app before restoring the data | > It is recommended to shutdown/undeploy an app before restoring the data | ||||||
|  |  | ||||||
| Restore the latest snapshot of all including apps: | Restore the latest snapshot of all including apps: | ||||||
|  |  | ||||||
| `abra app run <app_name> app -- backup restore` | `abra app run <backupbot_name> app -- backup restore` | ||||||
|  |  | ||||||
| Restore a specific snapshot of an individual app: | Restore a specific snapshot of an individual app: | ||||||
|  |  | ||||||
| `abra app run <app_name> app -- backup --host <target_app_name> restore --snapshot <snapshot_id>` | `abra app run <backupbot_name> app -- backup --host <target_app_name> restore --snapshot <snapshot_id>` | ||||||
|  |  | ||||||
| Show all snapshots: | Show all snapshots: | ||||||
|  |  | ||||||
| `abra app run <app_name> app -- backup snapshots` | `abra app run <backupbot_name> app -- backup snapshots` | ||||||
|  |  | ||||||
| Show all snapshots containing a specific app: | Show all snapshots containing a specific app: | ||||||
|  |  | ||||||
| `abra app run <app_name> app -- backup --host <target_app_name> snapshots` | `abra app run <backupbot_name> app -- backup --host <target_app_name> snapshots` | ||||||
|  |  | ||||||
| Show all files inside the latest snapshot (can be very verbose): | Show all files inside the latest snapshot (can be very verbose): | ||||||
|  |  | ||||||
| `abra app run <app_name> app -- backup ls` | `abra app run <backupbot_name> app -- backup ls` | ||||||
|  |  | ||||||
| Show specific files inside a selected snapshot: | Show specific files inside a selected snapshot: | ||||||
|  |  | ||||||
| `abra app run <app_name> app -- backup ls --snapshot <snapshot_id> --path /var/lib/docker/volumes/` | `abra app run <backupbot_name> app -- backup ls --snapshot <snapshot_id> --path /var/lib/docker/volumes/` | ||||||
|  |  | ||||||
| Download files from a snapshot: | Download files from a snapshot: | ||||||
|  |  | ||||||
| ``` | ``` | ||||||
| filename=$(abra app run <app_name> app -- backup download --snapshot <snapshot_id> --path <absolute_path>) | filename=$(abra app run <backupbot_name> app -- backup download --snapshot <snapshot_id> --path <absolute_path>) | ||||||
| abra app cp <app_name> app:$filename . | abra app cp <backupbot_name> app:$filename . | ||||||
| ``` | ``` | ||||||
|  |  | ||||||
|  | ## Run restic | ||||||
|  |  | ||||||
|  | ``` | ||||||
|  | abra app run <backupbot_name> app bash | ||||||
|  | export AWS_SECRET_ACCESS_KEY=$(cat $AWS_SECRET_ACCESS_KEY_FILE) | ||||||
|  | export RESTIC_PASSWORD=$(cat $RESTIC_PASSWORD_FILE) | ||||||
|  | restic snapshots | ||||||
|  | ``` | ||||||
|  |  | ||||||
| ## Recipe Configuration | ## Recipe Configuration | ||||||
|  |  | ||||||
|  | |||||||
							
								
								
									
										11
									
								
								abra.sh
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								abra.sh
									
									
									
									
									
								
							| @ -1,3 +1,12 @@ | |||||||
| export ENTRYPOINT_VERSION=v1 |  | ||||||
| export BACKUPBOT_VERSION=v1 | export BACKUPBOT_VERSION=v1 | ||||||
| export SSH_CONFIG_VERSION=v1 | export SSH_CONFIG_VERSION=v1 | ||||||
|  | export ENTRYPOINT_VERSION=v17 | ||||||
|  | export CRONJOB_VERSION=v2 | ||||||
|  |  | ||||||
|  | run_cron () { | ||||||
|  |     schedule="$(crontab -l | tr -s " " | cut -d ' ' -f-5)" | ||||||
|  |     rm -f /tmp/backup.log | ||||||
|  |     echo "* * * * *  $(crontab -l | tr -s " " | cut -d ' ' -f6-)" | crontab - | ||||||
|  |     while [ ! -f /tmp/backup.log ]; do sleep 1; done | ||||||
|  |     echo "$schedule $(crontab -l | tr -s " " | cut -d ' ' -f6-)" | crontab - | ||||||
|  | } | ||||||
|  | |||||||
							
								
								
									
										232
									
								
								backupbot.py
									
									
									
									
									
								
							
							
						
						
									
										232
									
								
								backupbot.py
									
									
									
									
									
								
							| @ -1,53 +1,85 @@ | |||||||
| #!/usr/bin/python3 | #!/usr/bin/python3 | ||||||
|  |  | ||||||
| import os | import os | ||||||
|  | import sys | ||||||
| import click | import click | ||||||
| import json | import json | ||||||
| import subprocess | import subprocess | ||||||
| import logging | import logging | ||||||
| import docker | import docker | ||||||
| import restic | import restic | ||||||
|  | import tarfile | ||||||
|  | import io | ||||||
|  | from pythonjsonlogger import jsonlogger | ||||||
| from datetime import datetime, timezone | from datetime import datetime, timezone | ||||||
| from restic.errors import ResticFailedError | from restic.errors import ResticFailedError | ||||||
| from pathlib import Path | from pathlib import Path | ||||||
| from shutil import copyfile, rmtree | from shutil import copyfile, rmtree | ||||||
| # logging.basicConfig(level=logging.INFO) |  | ||||||
|  |  | ||||||
| VOLUME_PATH = "/var/lib/docker/volumes/" | VOLUME_PATH = "/var/lib/docker/volumes/" | ||||||
| SECRET_PATH = '/secrets/' | SECRET_PATH = '/secrets/' | ||||||
| SERVICE = None | SERVICE = None | ||||||
|  |  | ||||||
|  | logger = logging.getLogger("backupbot") | ||||||
|  | logging.addLevelName(55, 'SUMMARY') | ||||||
|  | setattr(logging, 'SUMMARY', 55) | ||||||
|  | setattr(logger, 'summary', lambda message, *args, ** | ||||||
|  |         kwargs: logger.log(55, message, *args, **kwargs)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def handle_exception(exc_type, exc_value, exc_traceback): | ||||||
|  |     if issubclass(exc_type, KeyboardInterrupt): | ||||||
|  |         sys.__excepthook__(exc_type, exc_value, exc_traceback) | ||||||
|  |         return | ||||||
|  |     logger.critical("Uncaught exception", exc_info=( | ||||||
|  |         exc_type, exc_value, exc_traceback)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | sys.excepthook = handle_exception | ||||||
|  |  | ||||||
|  |  | ||||||
| @click.group() | @click.group() | ||||||
| @click.option('-l', '--log', 'loglevel') | @click.option('-l', '--log', 'loglevel') | ||||||
|  | @click.option('-m', '--machine-logs', 'machine_logs', is_flag=True) | ||||||
| @click.option('service', '--host', '-h', envvar='SERVICE') | @click.option('service', '--host', '-h', envvar='SERVICE') | ||||||
| @click.option('repository', '--repo', '-r', envvar='RESTIC_REPO', required=True) | @click.option('repository', '--repo', '-r', envvar='RESTIC_REPOSITORY') | ||||||
| def cli(loglevel, service, repository): | def cli(loglevel, service, repository, machine_logs): | ||||||
|     global SERVICE |     global SERVICE | ||||||
|     if service: |     if service: | ||||||
|         SERVICE = service.replace('.', '_') |         SERVICE = service.replace('.', '_') | ||||||
|     if repository: |     if repository: | ||||||
|         os.environ['RESTIC_REPO'] = repository |         os.environ['RESTIC_REPOSITORY'] = repository | ||||||
|     if loglevel: |     if loglevel: | ||||||
|         numeric_level = getattr(logging, loglevel.upper(), None) |         numeric_level = getattr(logging, loglevel.upper(), None) | ||||||
|         if not isinstance(numeric_level, int): |         if not isinstance(numeric_level, int): | ||||||
|             raise ValueError('Invalid log level: %s' % loglevel) |             raise ValueError('Invalid log level: %s' % loglevel) | ||||||
|         logging.basicConfig(level=numeric_level) |         logger.setLevel(numeric_level) | ||||||
|  |     logHandler = logging.StreamHandler() | ||||||
|  |     if machine_logs: | ||||||
|  |         formatter = jsonlogger.JsonFormatter( | ||||||
|  |             "%(levelname)s %(filename)s %(lineno)s %(process)d %(message)s", rename_fields={"levelname": "message_type"}) | ||||||
|  |         logHandler.setFormatter(formatter) | ||||||
|  |     logger.addHandler(logHandler) | ||||||
|  |  | ||||||
|     export_secrets() |     export_secrets() | ||||||
|     init_repo() |     init_repo() | ||||||
|  |  | ||||||
|  |  | ||||||
| def init_repo(): | def init_repo(): | ||||||
|     repo = os.environ['RESTIC_REPO'] |     if repo:= os.environ.get('RESTIC_REPOSITORY_FILE'): | ||||||
|     logging.debug(f"set restic repository location: {repo}") |         # RESTIC_REPOSITORY_FILE and RESTIC_REPOSITORY are mutually exclusive | ||||||
|  |         del os.environ['RESTIC_REPOSITORY'] | ||||||
|  |     else: | ||||||
|  |         repo = os.environ['RESTIC_REPOSITORY'] | ||||||
|         restic.repository = repo |         restic.repository = repo | ||||||
|  |     logger.debug(f"set restic repository location: {repo}") | ||||||
|     restic.password_file = '/var/run/secrets/restic_password' |     restic.password_file = '/var/run/secrets/restic_password' | ||||||
|     try: |     try: | ||||||
|         restic.cat.config() |         restic.cat.config() | ||||||
|     except ResticFailedError as error: |     except ResticFailedError as error: | ||||||
|         if 'unable to open config file' in str(error): |         if 'unable to open config file' in str(error): | ||||||
|             result = restic.init() |             result = restic.init() | ||||||
|             logging.info(f"Initialized restic repo: {result}") |             logger.info(f"Initialized restic repo: {result}") | ||||||
|         else: |         else: | ||||||
|             raise error |             raise error | ||||||
|  |  | ||||||
| @ -55,27 +87,28 @@ def init_repo(): | |||||||
| def export_secrets(): | def export_secrets(): | ||||||
|     for env in os.environ: |     for env in os.environ: | ||||||
|         if env.endswith('FILE') and not "COMPOSE_FILE" in env: |         if env.endswith('FILE') and not "COMPOSE_FILE" in env: | ||||||
|             logging.debug(f"exported secret: {env}") |             logger.debug(f"exported secret: {env}") | ||||||
|             with open(os.environ[env]) as file: |             with open(os.environ[env]) as file: | ||||||
|                 secret = file.read() |                 secret = file.read() | ||||||
|                 os.environ[env.removesuffix('_FILE')] = secret |                 os.environ[env.removesuffix('_FILE')] = secret | ||||||
|                 # logging.debug(f"Read secret value: {secret}") |                 # logger.debug(f"Read secret value: {secret}") | ||||||
|  |  | ||||||
|  |  | ||||||
| @cli.command() | @cli.command() | ||||||
| def create(): | @click.option('retries', '--retries', '-r', envvar='RETRIES', default=1) | ||||||
|  | def create(retries): | ||||||
|     pre_commands, post_commands, backup_paths, apps = get_backup_cmds() |     pre_commands, post_commands, backup_paths, apps = get_backup_cmds() | ||||||
|     copy_secrets(apps) |     copy_secrets(apps) | ||||||
|     backup_paths.append(SECRET_PATH) |     backup_paths.append(SECRET_PATH) | ||||||
|     run_commands(pre_commands) |     run_commands(pre_commands) | ||||||
|     backup_volumes(backup_paths, apps) |     backup_volumes(backup_paths, apps, int(retries)) | ||||||
|     run_commands(post_commands) |     run_commands(post_commands) | ||||||
|  |  | ||||||
|  |  | ||||||
| def get_backup_cmds(): | def get_backup_cmds(): | ||||||
|     client = docker.from_env() |     client = docker.from_env() | ||||||
|     container_by_service = { |     container_by_service = { | ||||||
|         c.labels['com.docker.swarm.service.name']: c for c in client.containers.list()} |         c.labels.get('com.docker.swarm.service.name'): c for c in client.containers.list()} | ||||||
|     backup_paths = set() |     backup_paths = set() | ||||||
|     backup_apps = set() |     backup_apps = set() | ||||||
|     pre_commands = {} |     pre_commands = {} | ||||||
| @ -84,42 +117,51 @@ def get_backup_cmds(): | |||||||
|     for s in services: |     for s in services: | ||||||
|         labels = s.attrs['Spec']['Labels'] |         labels = s.attrs['Spec']['Labels'] | ||||||
|         if (backup := labels.get('backupbot.backup')) and bool(backup): |         if (backup := labels.get('backupbot.backup')) and bool(backup): | ||||||
|  |             # volumes: s.attrs['Spec']['TaskTemplate']['ContainerSpec']['Mounts'][0]['Source'] | ||||||
|             stack_name = labels['com.docker.stack.namespace'] |             stack_name = labels['com.docker.stack.namespace'] | ||||||
|             if SERVICE and SERVICE != stack_name: |             # Remove this lines to backup only a specific service | ||||||
|                 continue |             # This will unfortenately decrease restice performance | ||||||
|  |             # if SERVICE and SERVICE != stack_name: | ||||||
|  |             #     continue | ||||||
|             backup_apps.add(stack_name) |             backup_apps.add(stack_name) | ||||||
|             container = container_by_service.get(s.name) |             backup_paths = backup_paths.union( | ||||||
|             if not container: |                 Path(VOLUME_PATH).glob(f"{stack_name}_*")) | ||||||
|                 logging.error( |             if not (container := container_by_service.get(s.name)): | ||||||
|  |                 logger.error( | ||||||
|                     f"Container {s.name} is not running, hooks can not be executed") |                     f"Container {s.name} is not running, hooks can not be executed") | ||||||
|  |                 continue | ||||||
|             if prehook := labels.get('backupbot.backup.pre-hook'): |             if prehook := labels.get('backupbot.backup.pre-hook'): | ||||||
|                 pre_commands[container] = prehook |                 pre_commands[container] = prehook | ||||||
|             if posthook := labels.get('backupbot.backup.post-hook'): |             if posthook := labels.get('backupbot.backup.post-hook'): | ||||||
|                 post_commands[container] = posthook |                 post_commands[container] = posthook | ||||||
|             backup_paths = backup_paths.union( |  | ||||||
|                 Path(VOLUME_PATH).glob(f"{stack_name}_*")) |  | ||||||
|     return pre_commands, post_commands, list(backup_paths), list(backup_apps) |     return pre_commands, post_commands, list(backup_paths), list(backup_apps) | ||||||
|  |  | ||||||
|  |  | ||||||
| def copy_secrets(apps): | def copy_secrets(apps): | ||||||
|  |     # TODO: check if it is deployed | ||||||
|     rmtree(SECRET_PATH, ignore_errors=True) |     rmtree(SECRET_PATH, ignore_errors=True) | ||||||
|     os.mkdir(SECRET_PATH) |     os.mkdir(SECRET_PATH) | ||||||
|     client = docker.from_env() |     client = docker.from_env() | ||||||
|     container_by_service = { |     container_by_service = { | ||||||
|         c.labels['com.docker.swarm.service.name']: c for c in client.containers.list()} |         c.labels.get('com.docker.swarm.service.name'): c for c in client.containers.list()} | ||||||
|     services = client.services.list() |     services = client.services.list() | ||||||
|     for s in services: |     for s in services: | ||||||
|         app_name = s.attrs['Spec']['Labels']['com.docker.stack.namespace'] |         app_name = s.attrs['Spec']['Labels']['com.docker.stack.namespace'] | ||||||
|         if (app_name in apps and |         if (app_name in apps and | ||||||
|                 (app_secs := s.attrs['Spec']['TaskTemplate']['ContainerSpec'].get('Secrets'))): |                 (app_secs := s.attrs['Spec']['TaskTemplate']['ContainerSpec'].get('Secrets'))): | ||||||
|             if not container_by_service.get(s.name): |             if not container_by_service.get(s.name): | ||||||
|                 logging.error( |                 logger.warning( | ||||||
|                     f"Container {s.name} is not running, secrets can not be copied.") |                     f"Container {s.name} is not running, secrets can not be copied.") | ||||||
|                 continue |                 continue | ||||||
|             container_id = container_by_service[s.name].id |             container_id = container_by_service[s.name].id | ||||||
|             for sec in app_secs: |             for sec in app_secs: | ||||||
|                 src = f'/var/lib/docker/containers/{container_id}/mounts/secrets/{sec["SecretID"]}' |                 src = f'/var/lib/docker/containers/{container_id}/mounts/secrets/{sec["SecretID"]}' | ||||||
|  |                 if not Path(src).exists(): | ||||||
|  |                     logger.error( | ||||||
|  |                         f"For the secret {sec['SecretName']} the file {src} does not exist for {s.name}") | ||||||
|  |                     continue | ||||||
|                 dst = SECRET_PATH + sec['SecretName'] |                 dst = SECRET_PATH + sec['SecretName'] | ||||||
|  |                 logger.debug("Copy Secret {sec['SecretName']}") | ||||||
|                 copyfile(src, dst) |                 copyfile(src, dst) | ||||||
|  |  | ||||||
|  |  | ||||||
| @ -127,30 +169,45 @@ def run_commands(commands): | |||||||
|     for container, command in commands.items(): |     for container, command in commands.items(): | ||||||
|         if not command: |         if not command: | ||||||
|             continue |             continue | ||||||
|  |         # Remove bash/sh wrapping | ||||||
|  |         command = command.removeprefix('bash -c').removeprefix('sh -c').removeprefix(' ') | ||||||
|  |         # Remove quotes surrounding the command | ||||||
|  |         if (len(command) >= 2 and command[0] == command[-1] and (command[0] == "'" or command[0] == '"')): | ||||||
|  |             command = command[1:-1] | ||||||
|         # Use bash's pipefail to return exit codes inside a pipe to prevent silent failure |         # Use bash's pipefail to return exit codes inside a pipe to prevent silent failure | ||||||
|         command = command.removeprefix('bash -c \'').removeprefix('sh -c \'') |  | ||||||
|         command = command.removesuffix('\'') |  | ||||||
|         command = f"bash -c 'set -o pipefail;{command}'" |         command = f"bash -c 'set -o pipefail;{command}'" | ||||||
|  |         logger.info(f"run command in {container.name}:") | ||||||
|  |         logger.info(command) | ||||||
|         result = container.exec_run(command) |         result = container.exec_run(command) | ||||||
|         logging.info(f"run command in {container.name}") |  | ||||||
|         logging.info(command) |  | ||||||
|         if result.exit_code: |         if result.exit_code: | ||||||
|             logging.error( |             logger.error( | ||||||
|                 f"Failed to run command {command} in {container.name}: {result.output.decode()}") |                 f"Failed to run command {command} in {container.name}: {result.output.decode()}") | ||||||
|         else: |         else: | ||||||
|             logging.info(result.output.decode()) |             logger.info(result.output.decode()) | ||||||
|  |  | ||||||
|  |  | ||||||
| def backup_volumes(backup_paths, apps, dry_run=False): | def backup_volumes(backup_paths, apps, retries, dry_run=False): | ||||||
|  |     while True: | ||||||
|  |         try: | ||||||
|  |             logger.info("Start volume backup") | ||||||
|  |             logger.debug(backup_paths) | ||||||
|             result = restic.backup(backup_paths, dry_run=dry_run, tags=apps) |             result = restic.backup(backup_paths, dry_run=dry_run, tags=apps) | ||||||
|     print(result) |             logger.summary("backup finished", extra=result) | ||||||
|     logging.info(result) |             return | ||||||
|  |         except ResticFailedError as error: | ||||||
|  |             logger.error( | ||||||
|  |                 f"Backup failed for {apps}. Could not Backup these paths: {backup_paths}") | ||||||
|  |             logger.error(error, exc_info=True) | ||||||
|  |             if retries > 0: | ||||||
|  |                 retries -= 1 | ||||||
|  |             else: | ||||||
|  |                 exit(1) | ||||||
|  |  | ||||||
|  |  | ||||||
| @cli.command() | @cli.command() | ||||||
| @click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', default='latest') | @click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', default='latest') | ||||||
| @click.option('target', '--target', '-t', envvar='TARGET', default='/') | @click.option('target', '--target', '-t', envvar='TARGET', default='/') | ||||||
| @click.option('noninteractive', '--noninteractive', envvar='NONINTERACTIVE', default=False) | @click.option('noninteractive', '--noninteractive', envvar='NONINTERACTIVE', is_flag=True) | ||||||
| def restore(snapshot, target, noninteractive): | def restore(snapshot, target, noninteractive): | ||||||
|     # Todo: recommend to shutdown the container |     # Todo: recommend to shutdown the container | ||||||
|     service_paths = VOLUME_PATH |     service_paths = VOLUME_PATH | ||||||
| @ -158,30 +215,41 @@ def restore(snapshot, target, noninteractive): | |||||||
|         service_paths = service_paths + f'{SERVICE}_*' |         service_paths = service_paths + f'{SERVICE}_*' | ||||||
|     snapshots = restic.snapshots(snapshot_id=snapshot) |     snapshots = restic.snapshots(snapshot_id=snapshot) | ||||||
|     if not snapshot: |     if not snapshot: | ||||||
|         logging.error("No Snapshots with ID {snapshots}") |         logger.error("No Snapshots with ID {snapshots}") | ||||||
|         exit(1) |         exit(1) | ||||||
|     if not noninteractive: |     if not noninteractive: | ||||||
|         snapshot_date = datetime.fromisoformat(snapshots[0]['time']) |         snapshot_date = datetime.fromisoformat(snapshots[0]['time']) | ||||||
|         delta = datetime.now(tz=timezone.utc) - snapshot_date |         delta = datetime.now(tz=timezone.utc) - snapshot_date | ||||||
|         print(f"You are going to restore Snapshot {snapshot} of {service_paths} at {target}") |         print( | ||||||
|  |             f"You are going to restore Snapshot {snapshot} of {service_paths} at {target}") | ||||||
|         print(f"This snapshot is {delta} old") |         print(f"This snapshot is {delta} old") | ||||||
|         print(f"THIS COMMAND WILL IRREVERSIBLY OVERWRITES {target}{service_paths.removeprefix('/')}") |         print( | ||||||
|  |             f"THIS COMMAND WILL IRREVERSIBLY OVERWRITES {target}{service_paths.removeprefix('/')}") | ||||||
|         prompt = input("Type YES (uppercase) to continue: ") |         prompt = input("Type YES (uppercase) to continue: ") | ||||||
|         if prompt != 'YES': |         if prompt != 'YES': | ||||||
|             logging.error("Restore aborted") |             logger.error("Restore aborted") | ||||||
|             exit(1) |             exit(1) | ||||||
|     print(f"Restoring Snapshot {snapshot} of {service_paths} at {target}") |     print(f"Restoring Snapshot {snapshot} of {service_paths} at {target}") | ||||||
|  |     # TODO: use tags if no snapshot is selected, to use a snapshot including SERVICE | ||||||
|     result = restic.restore(snapshot_id=snapshot, |     result = restic.restore(snapshot_id=snapshot, | ||||||
|                             include=service_paths, target_dir=target) |                             include=service_paths, target_dir=target) | ||||||
|     logging.debug(result) |     logger.debug(result) | ||||||
|  |  | ||||||
|  |  | ||||||
| @cli.command() | @cli.command() | ||||||
| def snapshots(): | def snapshots(): | ||||||
|     snapshots = restic.snapshots() |     snapshots = restic.snapshots() | ||||||
|  |     no_snapshots = True | ||||||
|     for snap in snapshots: |     for snap in snapshots: | ||||||
|         if not SERVICE or (tags := snap.get('tags')) and SERVICE in tags: |         if not SERVICE or (tags := snap.get('tags')) and SERVICE in tags: | ||||||
|             print(snap['time'], snap['id']) |             print(snap['time'], snap['id']) | ||||||
|  |             no_snapshots = False | ||||||
|  |     if no_snapshots: | ||||||
|  |         err_msg = "No Snapshots found" | ||||||
|  |         if SERVICE: | ||||||
|  |             service_name = SERVICE.replace('_', '.') | ||||||
|  |             err_msg += f' for app {service_name}' | ||||||
|  |         logger.warning(err_msg) | ||||||
|  |  | ||||||
|  |  | ||||||
| @cli.command() | @cli.command() | ||||||
| @ -201,7 +269,17 @@ def list_files(snapshot, path): | |||||||
|     cmd.append(snapshot) |     cmd.append(snapshot) | ||||||
|     if path: |     if path: | ||||||
|         cmd.append(path) |         cmd.append(path) | ||||||
|  |     try: | ||||||
|         output = restic.internal.command_executor.execute(cmd) |         output = restic.internal.command_executor.execute(cmd) | ||||||
|  |     except ResticFailedError as error: | ||||||
|  |         if 'no snapshot found' in str(error): | ||||||
|  |             err_msg = f'There is no snapshot "{snapshot}"' | ||||||
|  |             if SERVICE: | ||||||
|  |                 err_msg += f' for the app "{SERVICE}"' | ||||||
|  |             logger.error(err_msg) | ||||||
|  |             exit(1) | ||||||
|  |         else: | ||||||
|  |             raise error | ||||||
|     output = output.replace('}\n{', '}|{') |     output = output.replace('}\n{', '}|{') | ||||||
|     results = list(map(json.loads, output.split('|'))) |     results = list(map(json.loads, output.split('|'))) | ||||||
|     return results |     return results | ||||||
| @ -210,62 +288,82 @@ def list_files(snapshot, path): | |||||||
| @cli.command() | @cli.command() | ||||||
| @click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', default='latest') | @click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', default='latest') | ||||||
| @click.option('path', '--path', '-p', envvar='INCLUDE_PATH') | @click.option('path', '--path', '-p', envvar='INCLUDE_PATH') | ||||||
| @click.option('volumes', '--volumes', '-v', is_flag=True) | @click.option('volumes', '--volumes', '-v', envvar='VOLUMES') | ||||||
| @click.option('secrets', '--secrets', '-c', is_flag=True) | @click.option('secrets', '--secrets', '-c', is_flag=True, envvar='SECRETS') | ||||||
| def download(snapshot, path, volumes, secrets): | def download(snapshot, path, volumes, secrets): | ||||||
|     if sum(map(bool, [path, volumes, secrets])) != 1: |     file_dumps = [] | ||||||
|         logging.error("Please specify exactly one of '--path', '--volumes', '--secrets'") |     if not any([path, volumes, secrets]): | ||||||
|         exit(1) |         volumes = secrets = True | ||||||
|     if path: |     if path: | ||||||
|         path = path.removesuffix('/') |         path = path.removesuffix('/') | ||||||
|  |         binary_output = dump(snapshot, path) | ||||||
|         files = list_files(snapshot, path) |         files = list_files(snapshot, path) | ||||||
|         filetype = [f.get('type') for f in files if f.get('path') == path][0] |         filetype = [f.get('type') for f in files if f.get('path') == path][0] | ||||||
|         filename = "/tmp/" + Path(path).name |         filename = Path(path).name | ||||||
|         if filetype == 'dir': |         if filetype == 'dir': | ||||||
|             filename = filename + ".tar" |             filename = filename + ".tar" | ||||||
|         output = dump(snapshot, path) |         tarinfo = tarfile.TarInfo(name=filename) | ||||||
|         with open(filename, "wb") as file: |         tarinfo.size = len(binary_output) | ||||||
|             file.write(output) |         file_dumps.append((binary_output, tarinfo)) | ||||||
|         print(filename) |     if volumes: | ||||||
|     elif volumes: |  | ||||||
|         if not SERVICE: |         if not SERVICE: | ||||||
|             logging.error("Please specify '--host' when using '--volumes'") |             logger.error("Please specify '--host' when using '--volumes'") | ||||||
|             exit(1) |             exit(1) | ||||||
|         filename = f"/tmp/{SERVICE}.tar" |  | ||||||
|         files = list_files(snapshot, VOLUME_PATH) |         files = list_files(snapshot, VOLUME_PATH) | ||||||
|         for f in files[1:]: |         for f in files[1:]: | ||||||
|             path = f[ 'path' ] |             path = f['path'] | ||||||
|             if SERVICE in path and f['type'] == 'dir': |             if Path(path).name.startswith(SERVICE) and f['type'] == 'dir': | ||||||
|                 content = dump(snapshot, path) |                 binary_output = dump(snapshot, path) | ||||||
|                 # Concatenate tar files (extract with tar -xi) |                 filename = f"{Path(path).name}.tar" | ||||||
|                 with open(filename, "ab") as file: |                 tarinfo = tarfile.TarInfo(name=filename) | ||||||
|                     file.write(content) |                 tarinfo.size = len(binary_output) | ||||||
|     elif secrets: |                 file_dumps.append((binary_output, tarinfo)) | ||||||
|  |     if secrets: | ||||||
|         if not SERVICE: |         if not SERVICE: | ||||||
|             logging.error("Please specify '--host' when using '--secrets'") |             logger.error("Please specify '--host' when using '--secrets'") | ||||||
|             exit(1) |             exit(1) | ||||||
|         filename = f"/tmp/SECRETS_{SERVICE}.json" |         filename = f"{SERVICE}.json" | ||||||
|         files = list_files(snapshot, SECRET_PATH) |         files = list_files(snapshot, SECRET_PATH) | ||||||
|         secrets = {} |         secrets = {} | ||||||
|         for f in files[1:]: |         for f in files[1:]: | ||||||
|             path = f[ 'path' ] |             path = f['path'] | ||||||
|             if SERVICE in path and f['type'] == 'file': |             if Path(path).name.startswith(SERVICE) and f['type'] == 'file': | ||||||
|                 secret = dump(snapshot, path).decode() |                 secret = dump(snapshot, path).decode() | ||||||
|                 secret_name = path.removeprefix(f'{SECRET_PATH}{SERVICE}_') |                 secret_name = path.removeprefix(f'{SECRET_PATH}{SERVICE}_') | ||||||
|                 secrets[secret_name] = secret |                 secrets[secret_name] = secret | ||||||
|         with open(filename, "w") as file: |         binary_output = json.dumps(secrets).encode() | ||||||
|             json.dump(secrets, file) |         tarinfo = tarfile.TarInfo(name=filename) | ||||||
|         print(filename) |         tarinfo.size = len(binary_output) | ||||||
|  |         file_dumps.append((binary_output, tarinfo)) | ||||||
|  |     with tarfile.open('/tmp/backup.tar.gz', "w:gz") as tar: | ||||||
|  |         print(f"Writing files to /tmp/backup.tar.gz...") | ||||||
|  |         for binary_output, tarinfo in file_dumps: | ||||||
|  |             tar.addfile(tarinfo, fileobj=io.BytesIO(binary_output)) | ||||||
|  |     size = get_formatted_size('/tmp/backup.tar.gz') | ||||||
|  |     print( | ||||||
|  |         f"Backup has been written to /tmp/backup.tar.gz with a size of {size}") | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def get_formatted_size(file_path): | ||||||
|  |     file_size = os.path.getsize(file_path) | ||||||
|  |     units = ['Bytes', 'KB', 'MB', 'GB', 'TB'] | ||||||
|  |     for unit in units: | ||||||
|  |         if file_size < 1024: | ||||||
|  |             return f"{round(file_size, 3)} {unit}" | ||||||
|  |         file_size /= 1024 | ||||||
|  |     return f"{round(file_size, 3)} {units[-1]}" | ||||||
|  |  | ||||||
|  |  | ||||||
| def dump(snapshot, path): | def dump(snapshot, path): | ||||||
|     cmd = restic.cat.base_command() + ['dump'] |     cmd = restic.cat.base_command() + ['dump'] | ||||||
|     if SERVICE: |     if SERVICE: | ||||||
|         cmd = cmd + ['--tag', SERVICE] |         cmd = cmd + ['--tag', SERVICE] | ||||||
|     cmd = cmd +[snapshot, path] |     cmd = cmd + [snapshot, path] | ||||||
|     logging.debug(f"Dumping {path} from snapshot '{snapshot}'") |     print(f"Dumping {path} from snapshot '{snapshot}'") | ||||||
|     output = subprocess.run(cmd, capture_output=True) |     output = subprocess.run(cmd, capture_output=True) | ||||||
|     if output.returncode: |     if output.returncode: | ||||||
|         logging.error(f"error while dumping {path} from snapshot '{snapshot}': {output.stderr}") |         logger.error( | ||||||
|  |             f"error while dumping {path} from snapshot '{snapshot}': {output.stderr}") | ||||||
|         exit(1) |         exit(1) | ||||||
|     return output.stdout |     return output.stdout | ||||||
|  |  | ||||||
|  | |||||||
							
								
								
									
										11
									
								
								compose.pushbasicauth.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								compose.pushbasicauth.yml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,11 @@ | |||||||
|  | --- | ||||||
|  | version: "3.8" | ||||||
|  | services: | ||||||
|  |   app: | ||||||
|  |     secrets: | ||||||
|  |       - push_basicauth | ||||||
|  |  | ||||||
|  | secrets: | ||||||
|  |   push_basicauth: | ||||||
|  |     external: true | ||||||
|  |     name: ${STACK_NAME}_push_basicauth_${SECRET_PUSH_BASICAUTH} | ||||||
| @ -3,7 +3,7 @@ version: "3.8" | |||||||
| services: | services: | ||||||
|   app: |   app: | ||||||
|     environment: |     environment: | ||||||
|       - RESTIC_REPO_FILE=/run/secrets/restic_repo |       - RESTIC_REPOSITORY_FILE=/run/secrets/restic_repo | ||||||
|     secrets: |     secrets: | ||||||
|       - restic_repo |       - restic_repo | ||||||
|  |  | ||||||
|  | |||||||
							
								
								
									
										35
									
								
								compose.yml
									
									
									
									
									
								
							
							
						
						
									
										35
									
								
								compose.yml
									
									
									
									
									
								
							| @ -2,7 +2,7 @@ | |||||||
| version: "3.8" | version: "3.8" | ||||||
| services: | services: | ||||||
|   app: |   app: | ||||||
|     image: docker:24.0.2-dind |     image: git.coopcloud.tech/coop-cloud/backup-bot-two:2.0.0 | ||||||
|     volumes: |     volumes: | ||||||
|       - "/var/run/docker.sock:/var/run/docker.sock" |       - "/var/run/docker.sock:/var/run/docker.sock" | ||||||
|       - "/var/lib/docker/volumes/:/var/lib/docker/volumes/" |       - "/var/lib/docker/volumes/:/var/lib/docker/volumes/" | ||||||
| @ -10,26 +10,23 @@ services: | |||||||
|       - backups:/backups |       - backups:/backups | ||||||
|     environment: |     environment: | ||||||
|       - CRON_SCHEDULE |       - CRON_SCHEDULE | ||||||
|       - RESTIC_REPO |       - RESTIC_REPOSITORY | ||||||
|       - RESTIC_PASSWORD_FILE=/run/secrets/restic_password |       - RESTIC_PASSWORD_FILE=/run/secrets/restic_password | ||||||
|     secrets: |     secrets: | ||||||
|       - restic_password |       - restic_password | ||||||
|  |     configs: | ||||||
|  |       - source: entrypoint | ||||||
|  |         target: /entrypoint.sh | ||||||
|  |         mode: 666 | ||||||
|  |       - source: cronjob | ||||||
|  |         target: /cronjob.sh | ||||||
|  |         mode: 666 | ||||||
|     deploy: |     deploy: | ||||||
|       labels: |       labels: | ||||||
|         - coop-cloud.${STACK_NAME}.version=0.1.0+latest |         - coop-cloud.${STACK_NAME}.version=0.1.0+latest | ||||||
|         - coop-cloud.${STACK_NAME}.timeout=${TIMEOUT:-300} |         - coop-cloud.${STACK_NAME}.timeout=${TIMEOUT:-300} | ||||||
|         - coop-cloud.backupbot.enabled=true |         - coop-cloud.backupbot.enabled=true | ||||||
|     configs: |     #entrypoint: ['tail', '-f','/dev/null'] | ||||||
|       - source: entrypoint |  | ||||||
|         target: /entrypoint.sh |  | ||||||
|         mode: 0555 |  | ||||||
|       - source: backupbot |  | ||||||
|         target: /usr/bin/backup |  | ||||||
|         mode: 0555 |  | ||||||
|     entrypoint: ['/entrypoint.sh'] |  | ||||||
|     deploy: |  | ||||||
|       labels: |  | ||||||
|         - "coop-cloud.backupbot.enabled=true" |  | ||||||
|     healthcheck: |     healthcheck: | ||||||
|       test: "pgrep crond" |       test: "pgrep crond" | ||||||
|       interval: 30s |       interval: 30s | ||||||
| @ -42,13 +39,13 @@ secrets: | |||||||
|     external: true |     external: true | ||||||
|     name: ${STACK_NAME}_restic_password_${SECRET_RESTIC_PASSWORD_VERSION} |     name: ${STACK_NAME}_restic_password_${SECRET_RESTIC_PASSWORD_VERSION} | ||||||
|  |  | ||||||
| volumes: |  | ||||||
|   backups: |  | ||||||
|  |  | ||||||
| configs: | configs: | ||||||
|   entrypoint: |   entrypoint: | ||||||
|     name: ${STACK_NAME}_entrypoint_${ENTRYPOINT_VERSION} |     name: ${STACK_NAME}_entrypoint_${ENTRYPOINT_VERSION} | ||||||
|     file: entrypoint.sh |     file: entrypoint.sh | ||||||
|   backupbot: |   cronjob: | ||||||
|     name: ${STACK_NAME}_backupbot_${BACKUPBOT_VERSION} |     name: ${STACK_NAME}_cronjob_${CRONJOB_VERSION} | ||||||
|     file: backupbot.py |     file: cronjob.sh | ||||||
|  |  | ||||||
|  | volumes: | ||||||
|  |   backups: | ||||||
|  | |||||||
							
								
								
									
										40
									
								
								cronjob.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										40
									
								
								cronjob.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,40 @@ | |||||||
|  | #!/bin/sh | ||||||
|  |  | ||||||
|  | set -e | ||||||
|  |  | ||||||
|  | CURL_OPTS="-s" | ||||||
|  | # Check for basic auth | ||||||
|  | if [ -n "$(cat /run/secrets/push_basicauth)" ] | ||||||
|  | then | ||||||
|  |     CURL_OPTS="$CURL_OPTS -u $(cat /run/secrets/push_basicauth)" | ||||||
|  | fi | ||||||
|  |  | ||||||
|  | if [ -n "$PUSH_PROMETHEUS_URL" ] | ||||||
|  | then | ||||||
|  |     push_start_notification="(echo 'backup 1' | curl $CURL_OPTS --data-binary @- $PUSH_PROMETHEUS_URL)" | ||||||
|  |     push_success_notification="(echo 'backup 0' | curl $CURL_OPTS --data-binary @- $PUSH_PROMETHEUS_URL)" | ||||||
|  |     push_fail_notification="(echo 'backup -1' | curl $CURL_OPTS --data-binary @- $PUSH_PROMETHEUS_URL)" | ||||||
|  | else | ||||||
|  |     if [ -n "$PUSH_URL_START" ] | ||||||
|  |     then | ||||||
|  |         push_start_notification="curl $CURL_OPTS '$PUSH_URL_START'" | ||||||
|  |     fi | ||||||
|  |  | ||||||
|  |     if [ -n "$PUSH_URL_FAIL" ] | ||||||
|  |     then | ||||||
|  |         push_fail_notification="curl $CURL_OPTS '$PUSH_URL_FAIL'" | ||||||
|  |     fi | ||||||
|  |  | ||||||
|  |     if [ -n "$PUSH_URL_SUCCESS" ] | ||||||
|  |     then | ||||||
|  |         push_success_notification="curl $CURL_OPTS '$PUSH_URL_SUCCESS'" | ||||||
|  |     fi | ||||||
|  | fi | ||||||
|  |  | ||||||
|  | eval "$push_start_notification" | ||||||
|  | if [ "$(backup --machine-logs create  2>&1 | tee /tmp/backup.log && (grep -q 'backup finished' /tmp/backup.log))" ] | ||||||
|  | then | ||||||
|  |     eval "$push_success_notification" | ||||||
|  | else | ||||||
|  |     eval "$push_fail_notification" | ||||||
|  | fi | ||||||
							
								
								
									
										9
									
								
								entrypoint.sh
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										9
									
								
								entrypoint.sh
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							| @ -1,11 +1,6 @@ | |||||||
| #!/bin/sh | #!/bin/sh | ||||||
|  |  | ||||||
| set -e -o pipefail | set -e | ||||||
|  |  | ||||||
| apk add --upgrade --no-cache restic bash python3 py3-pip |  | ||||||
|  |  | ||||||
| # Todo use requirements file with specific versions |  | ||||||
| pip install click==8.1.7 docker==6.1.3 resticpy==1.0.2 |  | ||||||
|  |  | ||||||
| if [ -n "$SSH_HOST_KEY" ] | if [ -n "$SSH_HOST_KEY" ] | ||||||
| then | then | ||||||
| @ -14,7 +9,7 @@ fi | |||||||
|  |  | ||||||
| cron_schedule="${CRON_SCHEDULE:?CRON_SCHEDULE not set}" | cron_schedule="${CRON_SCHEDULE:?CRON_SCHEDULE not set}" | ||||||
|  |  | ||||||
| echo "$cron_schedule backup create" | crontab - | echo "$cron_schedule /cronjob.sh" | crontab - | ||||||
| crontab -l | crontab -l | ||||||
|  |  | ||||||
| crond -f -d8 -L /dev/stdout | crond -f -d8 -L /dev/stdout | ||||||
|  | |||||||
| @ -1,3 +1,3 @@ | |||||||
| Breaking Change: the variables `SERVER_NAME` and `RESTIC_HOST` are merged into `RESTIC_REPO`. The format can be looked up here: https://restic.readthedocs.io/en/stable/030_preparing_a_new_repo.html | Breaking Change: the variables `SERVER_NAME` and `RESTIC_HOST` are merged into `RESTIC_REPOSITORY`. The format can be looked up here: https://restic.readthedocs.io/en/stable/030_preparing_a_new_repo.html | ||||||
| ssh/sftp: `sftp:user@host:/repo-path` | ssh/sftp: `sftp:user@host:/repo-path` | ||||||
| S3:  `s3:https://s3.example.com/bucket_name` | S3:  `s3:https://s3.example.com/bucket_name` | ||||||
|  | |||||||
		Reference in New Issue
	
	Block a user