Compare commits
	
		
			2 Commits
		
	
	
		
			2.0.1+2.1.
			...
			main
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 0abb4827e7 | |||
| 0588a06a97 | 
							
								
								
									
										17
									
								
								.drone.yml
									
									
									
									
									
								
							
							
						
						
									
										17
									
								
								.drone.yml
									
									
									
									
									
								
							| @ -1,17 +0,0 @@ | ||||
| --- | ||||
| kind: pipeline | ||||
| name: linters | ||||
| steps: | ||||
|   - name: publish image | ||||
|     image: plugins/docker | ||||
|     settings: | ||||
|       username: 3wordchant | ||||
|       password: | ||||
|         from_secret: git_coopcloud_tech_token_3wc | ||||
|       repo: git.coopcloud.tech/coop-cloud/backup-bot-two | ||||
|       tags: ${DRONE_SEMVER_BUILD} | ||||
|       registry: git.coopcloud.tech | ||||
|     when: | ||||
|       event: | ||||
|         include: | ||||
|           - tag | ||||
							
								
								
									
										28
									
								
								.env.sample
									
									
									
									
									
								
							
							
						
						
									
										28
									
								
								.env.sample
									
									
									
									
									
								
							| @ -1,34 +1,10 @@ | ||||
| TYPE=backup-bot-two | ||||
|  | ||||
| SECRET_RESTIC_PASSWORD_VERSION=v1 | ||||
|  | ||||
| COMPOSE_FILE=compose.yml | ||||
| STACK_NAME=backup-bot-two | ||||
|  | ||||
| RESTIC_REPOSITORY=/backups/restic | ||||
|  | ||||
| CRON_SCHEDULE='30 3 * * *' | ||||
|  | ||||
| # Push Notifiactions | ||||
| # Push Notifications | ||||
| #PUSH_URL_START=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=start | ||||
| #PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK | ||||
| #PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail | ||||
|  | ||||
| # swarm-cronjob, instead of built-in cron | ||||
| #COMPOSE_FILE="$COMPOSE_FILE:compose.swarm-cronjob.yml" | ||||
|  | ||||
| # SSH storage | ||||
| #SECRET_SSH_KEY_VERSION=v1 | ||||
| #SSH_HOST_KEY="hostname ssh-rsa AAAAB3... | ||||
| #COMPOSE_FILE="$COMPOSE_FILE:compose.ssh.yml" | ||||
|  | ||||
| # S3 storage | ||||
| #SECRET_AWS_SECRET_ACCESS_KEY_VERSION=v1 | ||||
| #AWS_ACCESS_KEY_ID=something-secret | ||||
| #COMPOSE_FILE="$COMPOSE_FILE:compose.s3.yml" | ||||
|  | ||||
| # Secret restic repository | ||||
| # use a secret to store the RESTIC_REPOSITORY if the repository location contains a secret value | ||||
| # i.E rest:https://user:SECRET_PASSWORD@host:8000/ | ||||
| # it overwrites the RESTIC_REPOSITORY variable | ||||
| #SECRET_RESTIC_REPO_VERSION=v1 | ||||
| #COMPOSE_FILE="$COMPOSE_FILE:compose.secret.yml" | ||||
|  | ||||
| @ -1,17 +0,0 @@ | ||||
| export RESTIC_HOST="user@domain.tld" | ||||
| export RESTIC_PASSWORD_FILE=/run/secrets/restic-password | ||||
| export BACKUP_DEST=/backups | ||||
|  | ||||
| export SERVER_NAME=domain.tld | ||||
| export DOCKER_CONTEXT=$SERVER_NAME | ||||
|  | ||||
| # uncomment either this: | ||||
| #export SSH_KEY_FILE=~/.ssh/id_rsa | ||||
| # or this: | ||||
| #export AWS_SECRET_ACCESS_KEY_FILE=s3 | ||||
| #export AWS_ACCESS_KEY_ID=easter-october-emphatic-tug-urgent-customer | ||||
| # or this: | ||||
| #export HTTPS_PASSWORD_FILE=/run/secrets/https_password | ||||
|  | ||||
| # optionally limit subset of services for testing | ||||
| #export SERVICES_OVERRIDE="ghost_domain_tld_app ghost_domain_tld_db" | ||||
							
								
								
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @ -1 +1 @@ | ||||
| /testing | ||||
| .env | ||||
| @ -1,6 +0,0 @@ | ||||
| # Change log | ||||
|  | ||||
| ## 2.0.0 (unreleased) | ||||
|  | ||||
| - Rewrite from Bash to Python | ||||
| - Add support for push notifications (#24) | ||||
							
								
								
									
										11
									
								
								Dockerfile
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								Dockerfile
									
									
									
									
									
								
							| @ -1,11 +0,0 @@ | ||||
| FROM docker:24.0.7-dind | ||||
|  | ||||
| RUN apk add --upgrade --no-cache restic bash python3 py3-pip py3-click py3-docker-py py3-json-logger curl | ||||
|  | ||||
| # Todo use requirements file with specific versions | ||||
| RUN pip install --break-system-packages resticpy==1.0.2 | ||||
|  | ||||
| COPY backupbot.py /usr/bin/backup | ||||
| COPY entrypoint.sh /entrypoint.sh | ||||
|  | ||||
| ENTRYPOINT /entrypoint.sh | ||||
							
								
								
									
										204
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										204
									
								
								README.md
									
									
									
									
									
								
							| @ -1,106 +1,27 @@ | ||||
| # Backupbot II | ||||
|  | ||||
| [](https://build.coopcloud.tech/coop-cloud/backup-bot-two) | ||||
|  | ||||
| _This Time, It's Easily Configurable_ | ||||
|  | ||||
| Automatically take backups from all volumes of running Docker Swarm services and runs pre- and post commands. | ||||
|  | ||||
| <!-- metadata --> | ||||
|  | ||||
| * **Category**: Utilities | ||||
| * **Status**: 0, work-in-progress | ||||
| * **Image**: [`git.coopcloud.tech/coop-cloud/backup-bot-two`](https://git.coopcloud.tech/coop-cloud/-/packages/container/backup-bot-two), 4, upstream | ||||
| * **Healthcheck**: No | ||||
| * **Backups**: N/A | ||||
| * **Email**: N/A | ||||
| * **Tests**: No | ||||
| * **SSO**: N/A | ||||
|  | ||||
| <!-- endmetadata --> | ||||
| Wiki Cafe's configuration for a Backupbot II deployment. Originally slimmed down from an `abra` [recipe](https://git.coopcloud.tech/coop-cloud/backup-bot-two) by [Co-op Cloud](https://coopcloud.tech/). | ||||
|  | ||||
|  | ||||
| ## Background | ||||
| ## Deploying the app with Docker Swarm | ||||
|  | ||||
| There are lots of Docker volume backup systems; all of them have one or both of these limitations: | ||||
|  - You need to define all the volumes to back up in the configuration system | ||||
|  - Backups require services to be stopped to take consistent copies | ||||
| Set the environment variables from the .env file during the shell session. | ||||
|  | ||||
| Backupbot II tries to help, by | ||||
| 1. **letting you define backups using Docker labels**, so you can **easily collect your backups for use with another system** like docker-volume-backup. | ||||
| 2. **running pre- and post-commands** before and after backups, for example to use database tools to take a backup from a running service. | ||||
|  | ||||
| ## Deployment | ||||
|  | ||||
| ### With Co-op Cloud | ||||
|  | ||||
|  | ||||
| * `abra app new backup-bot-two` | ||||
| * `abra app config <app-name>` | ||||
|     - set storage options. Either configure `CRON_SCHEDULE`, or set up `swarm-cronjob` | ||||
| * `abra app secret generate -a <backupbot_name>` | ||||
| * `abra app deploy <app-name>` | ||||
|  | ||||
| ## Configuration | ||||
|  | ||||
| Per default Backupbot stores the backups locally in the repository `/backups/restic`, which is accessible as volume at `/var/lib/docker/volumes/<backupbot_name>_backups/_data/restic/` | ||||
|  | ||||
| The backup location can be changed using the `RESTIC_REPOSITORY` env variable. | ||||
|  | ||||
| ### S3 Storage | ||||
|  | ||||
| To use S3 storage as backup location set the following envs: | ||||
| ``` | ||||
| RESTIC_REPOSITORY=s3:<S3-SERVICE-URL>/<BUCKET-NAME> | ||||
| SECRET_AWS_SECRET_ACCESS_KEY_VERSION=v1 | ||||
| AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY> | ||||
| COMPOSE_FILE="$COMPOSE_FILE:compose.s3.yml" | ||||
| set -a && source .env && set +a | ||||
| ``` | ||||
| and add your `<SECRET_ACCESS_KEY>` as docker secret: | ||||
| `abra app secret insert <backupbot_name> aws_secret_access_key v1 <SECRET_ACCESS_KEY>` | ||||
|  | ||||
| See [restic s3 docs](https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#amazon-s3) for more information. | ||||
| Set the secrets. | ||||
|  | ||||
| ### SFTP Storage | ||||
|  | ||||
| > With sftp it is not possible to prevent the backupbot from deleting backups in case of a compromised machine. Therefore we recommend to use S3, REST or rclone server without delete permissions. | ||||
|  | ||||
| To use SFTP storage as backup location set the following envs: | ||||
| ``` | ||||
| RESTIC_REPOSITORY=sftp:user@host:/restic-repo-path | ||||
| SECRET_SSH_KEY_VERSION=v1 | ||||
| SSH_HOST_KEY="hostname ssh-rsa AAAAB3... | ||||
| COMPOSE_FILE="$COMPOSE_FILE:compose.ssh.yml" | ||||
| printf "SECRET_HERE" | docker secret create SECRET_NAME - | ||||
| ``` | ||||
| To get the `SSH_HOST_KEY` run the following command `ssh-keyscan <hostname>` | ||||
|  | ||||
| Generate an ssh keypair: `ssh-keygen -t ed25519 -f backupkey -P ''` | ||||
| Add the key to your `authorized_keys`: | ||||
| `ssh-copy-id -i backupkey <user>@<hostname>` | ||||
| Add your `SSH_KEY` as docker secret: | ||||
| ``` | ||||
| abra app secret insert <backupbot_name> ssh_key v1 """$(cat backupkey) | ||||
| """ | ||||
| ``` | ||||
| > Attention: This command needs to be executed exactly as stated above, because it places a trailing newline at the end, if this is missing you will get the following error: `Load key "/run/secrets/ssh_key": error in libcrypto` | ||||
| Deploy using the `-c` flag to specify one or multiple compose files. | ||||
|  | ||||
| ### Restic REST server Storage | ||||
|  | ||||
| You can simply set the `RESTIC_REPOSITORY` variable to your REST server URL `rest:http://host:8000/`. | ||||
| If you access the REST server with a password `rest:https://user:pass@host:8000/` you should hide the whole URL containing the password inside a secret. | ||||
| Uncomment these lines: | ||||
| ``` | ||||
| SECRET_RESTIC_REPO_VERSION=v1 | ||||
| COMPOSE_FILE="$COMPOSE_FILE:compose.secret.yml" | ||||
| docker stack deploy backup-bot-two -c compose.yaml | ||||
| ``` | ||||
| Add your REST server url as secret: | ||||
| ``` | ||||
| abra app secret insert <backupbot_name> restic_repo v1 "rest:https://user:pass@host:8000/" | ||||
| ``` | ||||
| The secret will overwrite the `RESTIC_REPOSITORY` variable. | ||||
|  | ||||
|  | ||||
| See [restic REST docs](https://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#rest-server) for more information. | ||||
|  | ||||
| ## Push notifications | ||||
|  | ||||
| @ -113,71 +34,82 @@ PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK | ||||
| PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail | ||||
| ``` | ||||
|  | ||||
| ## Commands | ||||
|  | ||||
| ## Usage | ||||
|  | ||||
| Run the cronjob that creates a backup, including the push notifications and docker logging: | ||||
| `abra app cmd <backupbot_name> app run_cron` | ||||
| - Find the ID or name of the backup container: | ||||
|    ``` | ||||
|    docker ps --filter "name=backup-bot-two_app" | ||||
|    ``` | ||||
|  | ||||
| Create a backup of all apps: | ||||
| 2. Run the desired command using `docker exec`: | ||||
|    ``` | ||||
|    docker exec -it <container_id_or_name> backup <command> [options] | ||||
|    ``` | ||||
|    Replace `<container_id_or_name>` with the ID or name of the backup container. | ||||
|  | ||||
| `abra app run <backupbot_name> app -- backup create` | ||||
|    Available commands: | ||||
|    - `create`: Initiate the backup process. | ||||
|    - `restore`: Restore a specific snapshot to a target directory. | ||||
|    - `snapshots`: List available snapshots. | ||||
|    - `ls`: List files in a specific snapshot. | ||||
|    - `download`: Download specific files, volumes, or secrets from a snapshot. | ||||
|  | ||||
| > The apps to backup up need to be deployed | ||||
|    Options: | ||||
|    - `--host`, `-h`: Specify the service name (e.g., `app`). | ||||
|    - `--repo`, `-r`: Specify the Restic repository location (e.g., `/run/secrets/restic_repo`). | ||||
|    - `--log`, `-l`: Set the log level (e.g., `debug`, `info`, `warning`, `error`). | ||||
|    - `--machine-logs`, `-m`: Enable machine-readable JSON logging. | ||||
|  | ||||
| Create an individual backup: | ||||
| ## Examples | ||||
|  | ||||
| `abra app run <backupbot_name> app -- backup --host <target_app_name> create` | ||||
|  | ||||
| Create a backup to a local repository: | ||||
|  | ||||
| `abra app run <backupbot_name> app -- backup create -r /backups/restic` | ||||
|  | ||||
| > It is recommended to shutdown/undeploy an app before restoring the data | ||||
|  | ||||
| Restore the latest snapshot of all including apps: | ||||
|  | ||||
| `abra app run <backupbot_name> app -- backup restore` | ||||
|  | ||||
| Restore a specific snapshot of an individual app: | ||||
|  | ||||
| `abra app run <backupbot_name> app -- backup --host <target_app_name> restore --snapshot <snapshot_id>` | ||||
|  | ||||
| Show all snapshots: | ||||
|  | ||||
| `abra app run <backupbot_name> app -- backup snapshots` | ||||
|  | ||||
| Show all snapshots containing a specific app: | ||||
|  | ||||
| `abra app run <backupbot_name> app -- backup --host <target_app_name> snapshots` | ||||
|  | ||||
| Show all files inside the latest snapshot (can be very verbose): | ||||
|  | ||||
| `abra app run <backupbot_name> app -- backup ls` | ||||
|  | ||||
| Show specific files inside a selected snapshot: | ||||
|  | ||||
| `abra app run <backupbot_name> app -- backup ls --snapshot <snapshot_id> --path /var/lib/docker/volumes/` | ||||
|  | ||||
| Download files from a snapshot: | ||||
| Create a backup: | ||||
|  | ||||
| ``` | ||||
| filename=$(abra app run <backupbot_name> app -- backup download --snapshot <snapshot_id> --path <absolute_path>) | ||||
| abra app cp <backupbot_name> app:$filename . | ||||
| docker exec -it <container_id_or_name> backup create --host app | ||||
| ``` | ||||
|  | ||||
| ## Run restic | ||||
| Restore a snapshot: | ||||
|  | ||||
| ``` | ||||
| abra app run <backupbot_name> app bash | ||||
| export AWS_SECRET_ACCESS_KEY=$(cat $AWS_SECRET_ACCESS_KEY_FILE) | ||||
| export RESTIC_PASSWORD=$(cat $RESTIC_PASSWORD_FILE) | ||||
| restic snapshots | ||||
| docker exec -it <container_id_or_name> backup restore --snapshot <snapshot_id> --target /path/to/restore | ||||
| ``` | ||||
|  | ||||
| List snapshots: | ||||
|  | ||||
|   ``` | ||||
|   docker exec -it <container_id_or_name> backup snapshots | ||||
|   ``` | ||||
|  | ||||
| List files in a snapshot: | ||||
|  | ||||
| ``` | ||||
| docker exec -it <container_id_or_name> backup ls --snapshot <snapshot_id> --path /path/to/directory | ||||
| ``` | ||||
|  | ||||
| Download files, volumes, or secrets from a snapshot: | ||||
|  | ||||
| ``` | ||||
| docker exec -it <container_id_or_name> backup download --snapshot <snapshot_id> [--path /path/to/file] [--volumes] [--secrets] | ||||
| ``` | ||||
|  | ||||
| Note: Make sure to replace `<container_id_or_name>` and `<snapshot_id>` with the appropriate values for your setup. | ||||
|  | ||||
| Remember to review and adjust the Docker Compose file and environment variables according to your specific requirements before running the backup commands. | ||||
|  | ||||
| When using `docker exec`, you don't need to specify the volume mounts or the Restic repository location as command-line arguments because they are already defined in the Docker Compose file and are available within the running container. | ||||
|  | ||||
| If you need to access the downloaded files, volumes, or secrets from the backup, you can use `docker cp` to copy them from the container to the host machine: | ||||
|  | ||||
| ``` | ||||
| docker cp <container_id_or_name>:/path/to/backup/file /path/on/host | ||||
| ``` | ||||
|  | ||||
| This allows you to retrieve the backed-up data from the container. | ||||
|  | ||||
| ## Recipe Configuration | ||||
|  | ||||
| Like Traefik, or `swarm-cronjob`, Backupbot II uses access to the Docker socket to read labels from running Docker Swarm services: | ||||
| Backupbot II uses access to the Docker socket to read labels from running Docker Swarm services: | ||||
|  | ||||
| ``` | ||||
| services: | ||||
| @ -194,5 +126,3 @@ services: | ||||
| - `backupbot.backup.post-hook` -- command to run after copying files (optional) | ||||
|  | ||||
| As in the above example, you can reference Docker Secrets, e.g. for looking up database passwords, by reading the files in `/run/secrets` directly. | ||||
|  | ||||
| [abra]: https://git.autonomic.zone/autonomic-cooperative/abra | ||||
|  | ||||
							
								
								
									
										10
									
								
								abra.sh
									
									
									
									
									
								
							
							
						
						
									
										10
									
								
								abra.sh
									
									
									
									
									
								
							| @ -1,10 +0,0 @@ | ||||
| export BACKUPBOT_VERSION=v1 | ||||
| export SSH_CONFIG_VERSION=v1 | ||||
|  | ||||
| run_cron () { | ||||
|     schedule="$(crontab -l | tr -s " " | cut -d ' ' -f-5)" | ||||
|     rm -f /tmp/backup.log | ||||
|     echo "* * * * *  $(crontab -l | tr -s " " | cut -d ' ' -f6-)" | crontab - | ||||
|     while [ ! -f /tmp/backup.log ]; do sleep 1; done | ||||
|     echo "$schedule $(crontab -l | tr -s " " | cut -d ' ' -f6-)" | crontab - | ||||
| } | ||||
							
								
								
									
										306
									
								
								backupbot.py
									
									
									
									
									
								
							
							
						
						
									
										306
									
								
								backupbot.py
									
									
									
									
									
								
							| @ -18,7 +18,7 @@ from shutil import copyfile, rmtree | ||||
|  | ||||
| VOLUME_PATH = "/var/lib/docker/volumes/" | ||||
| SECRET_PATH = '/secrets/' | ||||
| SERVICE = 'ALL' | ||||
| SERVICE = None | ||||
|  | ||||
| logger = logging.getLogger("backupbot") | ||||
| logging.addLevelName(55, 'SUMMARY') | ||||
| @ -54,25 +54,21 @@ def cli(loglevel, service, repository, machine_logs): | ||||
|         if not isinstance(numeric_level, int): | ||||
|             raise ValueError('Invalid log level: %s' % loglevel) | ||||
|         logger.setLevel(numeric_level) | ||||
|     logHandler = logging.StreamHandler() | ||||
|     if machine_logs: | ||||
|         logHandler = logging.StreamHandler() | ||||
|         formatter = jsonlogger.JsonFormatter( | ||||
|             "%(levelname)s %(filename)s %(lineno)s %(process)d %(message)s", rename_fields={"levelname": "message_type"}) | ||||
|         logHandler.setFormatter(formatter) | ||||
|     logger.addHandler(logHandler) | ||||
|         logger.addHandler(logHandler) | ||||
|  | ||||
|     export_secrets() | ||||
|     init_repo() | ||||
|  | ||||
|  | ||||
| def init_repo(): | ||||
|     if repo:= os.environ.get('RESTIC_REPOSITORY_FILE'): | ||||
|         # RESTIC_REPOSITORY_FILE and RESTIC_REPOSITORY are mutually exclusive | ||||
|         del os.environ['RESTIC_REPOSITORY'] | ||||
|     else: | ||||
|         repo = os.environ['RESTIC_REPOSITORY'] | ||||
|         restic.repository = repo | ||||
|     repo = os.environ['RESTIC_REPOSITORY'] | ||||
|     logger.debug(f"set restic repository location: {repo}") | ||||
|     restic.repository = repo | ||||
|     restic.password_file = '/var/run/secrets/restic_password' | ||||
|     try: | ||||
|         restic.cat.config() | ||||
| @ -91,203 +87,54 @@ def export_secrets(): | ||||
|             with open(os.environ[env]) as file: | ||||
|                 secret = file.read() | ||||
|                 os.environ[env.removesuffix('_FILE')] = secret | ||||
|                 # logger.debug(f"Read secret value: {secret}") | ||||
|  | ||||
|             if env == 'RESTIC_REPOSITORY_FILE': | ||||
|                 # RESTIC_REPOSITORY_FILE and RESTIC_REPOSITORY are mutually exclusive | ||||
|                 logger.info("RESTIC_REPOSITORY set to RESTIC_REPOSITORY_FILE. Unsetting RESTIC_REPOSITORY_FILE.") | ||||
|                 del os.environ['RESTIC_REPOSITORY_FILE'] | ||||
|  | ||||
|  | ||||
| @cli.command() | ||||
| @click.option('retries', '--retries', '-r', envvar='RETRIES', default=1) | ||||
| def create(retries): | ||||
|     app_settings = parse_backup_labels() | ||||
|     pre_commands, post_commands, backup_paths, apps = get_backup_details(app_settings) | ||||
|     pre_commands, post_commands, backup_paths, apps = get_backup_cmds() | ||||
|     copy_secrets(apps) | ||||
|     backup_paths.append(Path(SECRET_PATH)) | ||||
|     backup_paths.append(SECRET_PATH) | ||||
|     run_commands(pre_commands) | ||||
|     backup_volumes(backup_paths, apps, int(retries)) | ||||
|     run_commands(post_commands) | ||||
|  | ||||
|  | ||||
| @cli.command() | ||||
| @click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', default='latest') | ||||
| @click.option('target', '--target', '-t', envvar='TARGET', default='/') | ||||
| @click.option('noninteractive', '--noninteractive', envvar='NONINTERACTIVE', is_flag=True) | ||||
| @click.option('volumes', '--volumes', '-v', envvar='VOLUMES', multiple=True) | ||||
| @click.option('container', '--container', '-c', envvar='CONTAINER', multiple=True) | ||||
| @click.option('no_commands', '--no-commands', envvar='NO_COMMANDS', is_flag=True) | ||||
| def restore(snapshot, target, noninteractive, volumes, container, no_commands): | ||||
|     app_settings = parse_backup_labels('restore', container) | ||||
|     if SERVICE != 'ALL': | ||||
|         app_settings = {SERVICE: app_settings[SERVICE]} | ||||
|     pre_commands, post_commands, backup_paths, apps = get_backup_details(app_settings, volumes) | ||||
|     snapshots = get_snapshots(snapshot_id=snapshot) | ||||
|     if not snapshot: | ||||
|         logger.error("No Snapshots with ID {snapshots} for {apps} found.") | ||||
|         exit(1) | ||||
|     if not noninteractive: | ||||
|         snapshot_date = datetime.fromisoformat(snapshots[0]['time']) | ||||
|         delta = datetime.now(tz=timezone.utc) - snapshot_date | ||||
|         print(f"You are going to restore Snapshot {snapshot} of {apps} at {target}") | ||||
|         print("The following volume paths will be restored:") | ||||
|         for p in backup_paths: | ||||
|             print(f'\t{p}') | ||||
|         if not no_commands: | ||||
|             print("The following commands will be executed:") | ||||
|             for container, cmd in list(pre_commands.items()) + list(post_commands.items()): | ||||
|                 print(f"\t{container.labels['com.docker.swarm.service.name']}:\t{cmd}") | ||||
|         print(f"This snapshot is {delta} old") | ||||
|         print("\nTHIS COMMAND WILL IRREVERSIBLY OVERWRITES FILES") | ||||
|         prompt = input("Type YES (uppercase) to continue: ") | ||||
|         if prompt != 'YES': | ||||
|             logger.error("Restore aborted") | ||||
|             exit(1) | ||||
|     print(f"Restoring Snapshot {snapshot} at {target}") | ||||
|     if not no_commands and pre_commands: | ||||
|         print(f"Run pre commands.") | ||||
|         run_commands(pre_commands) | ||||
|     result = restic_restore(snapshot_id=snapshot, include=backup_paths, target_dir=target) | ||||
|     if not no_commands and post_commands: | ||||
|         print(f"Run post commands.") | ||||
|         run_commands(post_commands) | ||||
|     logger.debug(result) | ||||
|  | ||||
|  | ||||
| def restic_restore(snapshot_id='latest', include=[], target_dir=None): | ||||
|     cmd = restic.cat.base_command() + ['restore', snapshot_id] | ||||
|     for path in include: | ||||
|         cmd.extend(['--include', path]) | ||||
|     if target_dir: | ||||
|         cmd.extend(['--target', target_dir]) | ||||
|     return restic.internal.command_executor.execute(cmd) | ||||
|  | ||||
|  | ||||
| def get_snapshots(snapshot_id=None): | ||||
|     if snapshot_id and snapshot_id != 'latest': | ||||
|         snapshots = restic.snapshots(snapshot_id=snapshot_id) | ||||
|         if SERVICE not in snapshots[0]['tags']: | ||||
|             logger.error(f'Snapshot with ID {snapshot_id} does not contain {SERVICE}') | ||||
|             exit(1) | ||||
|     else: | ||||
|         snapshots = restic.snapshots() | ||||
|         snapshots = list(filter(lambda x: x.get('tags') and SERVICE in x.get('tags'), snapshots)) | ||||
|     if snapshot_id == 'latest': | ||||
|         return snapshots[-1:] | ||||
|     else: | ||||
|         return snapshots | ||||
|  | ||||
|  | ||||
| def parse_backup_labels(hook_type='backup', selected_container=[]): | ||||
| def get_backup_cmds(): | ||||
|     client = docker.from_env() | ||||
|     container_by_service = { | ||||
|         c.labels.get('com.docker.swarm.service.name'): c for c in client.containers.list()} | ||||
|     services = client.services.list() | ||||
|     app_settings = {} | ||||
|     for s in services: | ||||
|         specs = s.attrs['Spec'] | ||||
|         labels = specs['Labels'] | ||||
|         stack_name = labels['com.docker.stack.namespace'] | ||||
|         container_name = s.name.removeprefix(f"{stack_name}_") | ||||
|         settings = app_settings[stack_name] = app_settings.get(stack_name) or {} | ||||
|         if (backup := labels.get('backupbot.backup')) and bool(backup): | ||||
|             settings['enabled'] = True | ||||
|         if selected_container and container_name not in selected_container: | ||||
|             logger.debug(f"Skipping {s.name} because it's not a selected container") | ||||
|             continue | ||||
|         if mounts:= specs['TaskTemplate']['ContainerSpec'].get('Mounts'): | ||||
|             volumes = parse_volumes(stack_name, mounts) | ||||
|             volumes.update(settings.get('volumes') or {}) | ||||
|             settings['volumes'] = volumes | ||||
|             excluded_volumes, included_volume_paths = parse_excludes_includes(labels) | ||||
|             settings['excluded_volumes'] = excluded_volumes.union(settings.get('excluded_volumes') or set()) | ||||
|             settings['included_volume_paths'] = included_volume_paths.union(settings.get('included_volume_paths') or set()) | ||||
|         if container := container_by_service.get(s.name): | ||||
|             if command := labels.get(f'backupbot.{hook_type}.pre-hook'): | ||||
|                 if not (pre_hooks:= settings.get('pre_hooks')): | ||||
|                     pre_hooks = settings['pre_hooks'] = {} | ||||
|                 pre_hooks[container] = command | ||||
|             if command := labels.get(f'backupbot.{hook_type}.post-hook'): | ||||
|                 if not (post_hooks:= settings.get('post_hooks')): | ||||
|                     post_hooks = settings['post_hooks'] = {} | ||||
|                 post_hooks[container] = command | ||||
|         else: | ||||
|             logger.debug(f"Container {s.name} is not running.") | ||||
|             if labels.get(f'backupbot.{hook_type}.pre-hook') or labels.get(f'backupbot.{hook_type}.post-hook'): | ||||
|                 logger.error(f"Container {s.name} contain hooks but it's not running") | ||||
|     return app_settings | ||||
|  | ||||
|  | ||||
| def get_backup_details(app_settings, volumes=[]): | ||||
|         c.labels['com.docker.swarm.service.name']: c for c in client.containers.list()} | ||||
|     backup_paths = set() | ||||
|     backup_apps = [] | ||||
|     pre_hooks= {} | ||||
|     post_hooks = {} | ||||
|     for app, settings in app_settings.items(): | ||||
|         if settings.get('enabled'): | ||||
|             if SERVICE != 'ALL' and SERVICE != app: | ||||
|     backup_apps = set() | ||||
|     pre_commands = {} | ||||
|     post_commands = {} | ||||
|     services = client.services.list() | ||||
|     for s in services: | ||||
|         labels = s.attrs['Spec']['Labels'] | ||||
|         if (backup := labels.get('backupbot.backup')) and bool(backup): | ||||
|             # volumes: s.attrs['Spec']['TaskTemplate']['ContainerSpec']['Mounts'][0]['Source'] | ||||
|             stack_name = labels['com.docker.stack.namespace'] | ||||
|             # Remove this lines to backup only a specific service | ||||
|             # This will unfortenately decrease restice performance | ||||
|             # if SERVICE and SERVICE != stack_name: | ||||
|             #     continue | ||||
|             backup_apps.add(stack_name) | ||||
|             backup_paths = backup_paths.union( | ||||
|                 Path(VOLUME_PATH).glob(f"{stack_name}_*")) | ||||
|             if not (container := container_by_service.get(s.name)): | ||||
|                 logger.error( | ||||
|                     f"Container {s.name} is not running, hooks can not be executed") | ||||
|                 continue | ||||
|             backup_apps.append(app) | ||||
|             add_backup_paths(backup_paths, settings, app, volumes) | ||||
|             if hooks:= settings.get('pre_hooks'): | ||||
|                 pre_hooks.update(hooks) | ||||
|             if hooks:= settings.get('post_hooks'): | ||||
|                 post_hooks.update(hooks) | ||||
|     return pre_hooks, post_hooks, list(backup_paths), backup_apps | ||||
|  | ||||
|  | ||||
| def add_backup_paths(backup_paths, settings, app, selected_volumes): | ||||
|     if (volumes := settings.get('volumes')): | ||||
|         if includes:= settings.get('included_volume_paths'): | ||||
|             included_volumes = list(zip(*includes))[0] | ||||
|             for volume, rel_paths in includes: | ||||
|                 if not (volume_path:= volumes.get(volume)): | ||||
|                     logger.error(f'Can not find volume with the name {volume}') | ||||
|                     continue | ||||
|                 if selected_volumes and volume not in selected_volumes: | ||||
|                     logger.debug(f'Skipping {volume}:{rel_paths} because the volume is not selected') | ||||
|                     continue | ||||
|                 for p in rel_paths: | ||||
|                     absolute_path = Path(f"{volume_path}/{p}") | ||||
|                     backup_paths.add(absolute_path) | ||||
|         else: | ||||
|             included_volumes = [] | ||||
|         excluded_volumes = settings.get('excluded_volumes') or [] | ||||
|         for name, path in volumes.items(): | ||||
|             if selected_volumes and name not in selected_volumes: | ||||
|                 logger.debug(f'Skipping volume: {name} because the volume is not selected') | ||||
|                 continue | ||||
|             if name in excluded_volumes: | ||||
|                 logger.debug(f'Skipping volume: {name} because the volume is excluded') | ||||
|                 continue | ||||
|             if name in included_volumes: | ||||
|                 logger.debug(f'Skipping volume: {name} because a path is selected') | ||||
|                 continue | ||||
|             backup_paths.add(path) | ||||
|     else: | ||||
|         logger.warning(f"{app} does not contain any volumes") | ||||
|  | ||||
|  | ||||
| def parse_volumes(stack_name, mounts): | ||||
|     volumes = {} | ||||
|     for m in mounts: | ||||
|         if m['Type'] != 'volume': | ||||
|             continue | ||||
|         relative_path = m['Source'] | ||||
|         name = relative_path.removeprefix(stack_name + '_') | ||||
|         absolute_path = Path(f"{VOLUME_PATH}{relative_path}/_data/") | ||||
|         volumes[name] = absolute_path | ||||
|     return volumes | ||||
|  | ||||
|  | ||||
| def parse_excludes_includes(labels): | ||||
|     excluded_volumes = set() | ||||
|     included_volume_paths = set() | ||||
|     for label, value in labels.items(): | ||||
|         if label.startswith('backupbot.backup.volumes.'): | ||||
|             volume_name = label.removeprefix('backupbot.backup.volumes.').removesuffix('.path') | ||||
|             if label.endswith('path'):  | ||||
|                 relative_paths = tuple(value.split(',')) | ||||
|                 included_volume_paths.add((volume_name, relative_paths)) | ||||
|             elif bool(value): | ||||
|                 excluded_volumes.add(volume_name) | ||||
|     return excluded_volumes, included_volume_paths | ||||
|             if prehook := labels.get('backupbot.backup.pre-hook'): | ||||
|                 pre_commands[container] = prehook | ||||
|             if posthook := labels.get('backupbot.backup.post-hook'): | ||||
|                 post_commands[container] = posthook | ||||
|     return pre_commands, post_commands, list(backup_paths), list(backup_apps) | ||||
|  | ||||
|  | ||||
| def copy_secrets(apps): | ||||
| @ -296,14 +143,14 @@ def copy_secrets(apps): | ||||
|     os.mkdir(SECRET_PATH) | ||||
|     client = docker.from_env() | ||||
|     container_by_service = { | ||||
|         c.labels.get('com.docker.swarm.service.name'): c for c in client.containers.list()} | ||||
|         c.labels['com.docker.swarm.service.name']: c for c in client.containers.list()} | ||||
|     services = client.services.list() | ||||
|     for s in services: | ||||
|         app_name = s.attrs['Spec']['Labels']['com.docker.stack.namespace'] | ||||
|         if (app_name in apps and | ||||
|                 (app_secs := s.attrs['Spec']['TaskTemplate']['ContainerSpec'].get('Secrets'))): | ||||
|             if not container_by_service.get(s.name): | ||||
|                 logger.warning( | ||||
|                 logger.error( | ||||
|                     f"Container {s.name} is not running, secrets can not be copied.") | ||||
|                 continue | ||||
|             container_id = container_by_service[s.name].id | ||||
| @ -314,7 +161,6 @@ def copy_secrets(apps): | ||||
|                         f"For the secret {sec['SecretName']} the file {src} does not exist for {s.name}") | ||||
|                     continue | ||||
|                 dst = SECRET_PATH + sec['SecretName'] | ||||
|                 logger.debug(f"Copy Secret {sec['SecretName']}") | ||||
|                 copyfile(src, dst) | ||||
|  | ||||
|  | ||||
| @ -342,21 +188,12 @@ def run_commands(commands): | ||||
| def backup_volumes(backup_paths, apps, retries, dry_run=False): | ||||
|     while True: | ||||
|         try: | ||||
|             logger.info("Backup these paths:") | ||||
|             logger.debug("\n".join(map(str, backup_paths))) | ||||
|             backup_paths = list(filter(path_exists, backup_paths)) | ||||
|             cmd = restic.cat.base_command() | ||||
|             parent = get_snapshots('latest')  | ||||
|             if parent: | ||||
|                 # https://restic.readthedocs.io/en/stable/040_backup.html#file-change-detection | ||||
|                 cmd.extend(['--parent', parent[0]['short_id']]) | ||||
|             tags = set(apps + [SERVICE]) | ||||
|             logger.info("Start volume backup") | ||||
|             result = restic.internal.backup.run(cmd, backup_paths, dry_run=dry_run, tags=tags) | ||||
|             result = restic.backup(backup_paths, dry_run=dry_run, tags=apps) | ||||
|             logger.summary("backup finished", extra=result) | ||||
|             return | ||||
|         except ResticFailedError as error: | ||||
|             logger.error(f"Backup failed for {SERVICE}.") | ||||
|             logger.error( | ||||
|                 f"Backup failed for {apps}. Could not Backup these paths: {backup_paths}") | ||||
|             logger.error(error, exc_info=True) | ||||
|             if retries > 0: | ||||
|                 retries -= 1 | ||||
| @ -364,20 +201,49 @@ def backup_volumes(backup_paths, apps, retries, dry_run=False): | ||||
|                 exit(1) | ||||
|  | ||||
|  | ||||
| def path_exists(path): | ||||
|     if not path.exists(): | ||||
|         logger.error(f'{path} does not exist') | ||||
|     return path.exists() | ||||
| @cli.command() | ||||
| @click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', default='latest') | ||||
| @click.option('target', '--target', '-t', envvar='TARGET', default='/') | ||||
| @click.option('noninteractive', '--noninteractive', envvar='NONINTERACTIVE', is_flag=True) | ||||
| def restore(snapshot, target, noninteractive): | ||||
|     # Todo: recommend to shutdown the container | ||||
|     service_paths = VOLUME_PATH | ||||
|     if SERVICE: | ||||
|         service_paths = service_paths + f'{SERVICE}_*' | ||||
|     snapshots = restic.snapshots(snapshot_id=snapshot) | ||||
|     if not snapshot: | ||||
|         logger.error("No Snapshots with ID {snapshots}") | ||||
|         exit(1) | ||||
|     if not noninteractive: | ||||
|         snapshot_date = datetime.fromisoformat(snapshots[0]['time']) | ||||
|         delta = datetime.now(tz=timezone.utc) - snapshot_date | ||||
|         print( | ||||
|             f"You are going to restore Snapshot {snapshot} of {service_paths} at {target}") | ||||
|         print(f"This snapshot is {delta} old") | ||||
|         print( | ||||
|             f"THIS COMMAND WILL IRREVERSIBLY OVERWRITES {target}{service_paths.removeprefix('/')}") | ||||
|         prompt = input("Type YES (uppercase) to continue: ") | ||||
|         if prompt != 'YES': | ||||
|             logger.error("Restore aborted") | ||||
|             exit(1) | ||||
|     print(f"Restoring Snapshot {snapshot} of {service_paths} at {target}") | ||||
|     # TODO: use tags if no snapshot is selected, to use a snapshot including SERVICE | ||||
|     result = restic.restore(snapshot_id=snapshot, | ||||
|                             include=service_paths, target_dir=target) | ||||
|     logger.debug(result) | ||||
|  | ||||
|  | ||||
| @cli.command() | ||||
| def snapshots(): | ||||
|     snapshots = get_snapshots() | ||||
|     snapshots = restic.snapshots() | ||||
|     no_snapshots = True | ||||
|     for snap in snapshots: | ||||
|         print(snap['time'], snap['id']) | ||||
|     if not snapshots: | ||||
|         if not SERVICE or (tags := snap.get('tags')) and SERVICE in tags: | ||||
|             print(snap['time'], snap['id']) | ||||
|             no_snapshots = False | ||||
|     if no_snapshots: | ||||
|         err_msg = "No Snapshots found" | ||||
|         if SERVICE != 'ALL': | ||||
|         if SERVICE: | ||||
|             service_name = SERVICE.replace('_', '.') | ||||
|             err_msg += f' for app {service_name}' | ||||
|         logger.warning(err_msg) | ||||
| @ -395,7 +261,8 @@ def ls(snapshot, path): | ||||
|  | ||||
| def list_files(snapshot, path): | ||||
|     cmd = restic.cat.base_command() + ['ls'] | ||||
|     cmd = cmd + ['--tag', SERVICE] | ||||
|     if SERVICE: | ||||
|         cmd = cmd + ['--tag', SERVICE] | ||||
|     cmd.append(snapshot) | ||||
|     if path: | ||||
|         cmd.append(path) | ||||
| @ -404,7 +271,7 @@ def list_files(snapshot, path): | ||||
|     except ResticFailedError as error: | ||||
|         if 'no snapshot found' in str(error): | ||||
|             err_msg = f'There is no snapshot "{snapshot}"' | ||||
|             if SERVICE != 'ALL': | ||||
|             if SERVICE: | ||||
|                 err_msg += f' for the app "{SERVICE}"' | ||||
|             logger.error(err_msg) | ||||
|             exit(1) | ||||
| @ -436,7 +303,7 @@ def download(snapshot, path, volumes, secrets): | ||||
|         tarinfo.size = len(binary_output) | ||||
|         file_dumps.append((binary_output, tarinfo)) | ||||
|     if volumes: | ||||
|         if SERVICE == 'ALL': | ||||
|         if not SERVICE: | ||||
|             logger.error("Please specify '--host' when using '--volumes'") | ||||
|             exit(1) | ||||
|         files = list_files(snapshot, VOLUME_PATH) | ||||
| @ -449,7 +316,7 @@ def download(snapshot, path, volumes, secrets): | ||||
|                 tarinfo.size = len(binary_output) | ||||
|                 file_dumps.append((binary_output, tarinfo)) | ||||
|     if secrets: | ||||
|         if SERVICE == 'ALL': | ||||
|         if not SERVICE: | ||||
|             logger.error("Please specify '--host' when using '--secrets'") | ||||
|             exit(1) | ||||
|         filename = f"{SERVICE}.json" | ||||
| @ -486,7 +353,8 @@ def get_formatted_size(file_path): | ||||
|  | ||||
| def dump(snapshot, path): | ||||
|     cmd = restic.cat.base_command() + ['dump'] | ||||
|     cmd = cmd + ['--tag', SERVICE] | ||||
|     if SERVICE: | ||||
|         cmd = cmd + ['--tag', SERVICE] | ||||
|     cmd = cmd + [snapshot, path] | ||||
|     print(f"Dumping {path} from snapshot '{snapshot}'") | ||||
|     output = subprocess.run(cmd, capture_output=True) | ||||
|  | ||||
| @ -1,14 +0,0 @@ | ||||
| --- | ||||
| version: "3.8" | ||||
| services: | ||||
|   app: | ||||
|     environment: | ||||
|       - AWS_ACCESS_KEY_ID | ||||
|       - AWS_SECRET_ACCESS_KEY_FILE=/run/secrets/aws_secret_access_key | ||||
|     secrets: | ||||
|       - aws_secret_access_key | ||||
|  | ||||
| secrets: | ||||
|   aws_secret_access_key: | ||||
|     external: true | ||||
|     name: ${STACK_NAME}_aws_secret_access_key_${SECRET_AWS_SECRET_ACCESS_KEY_VERSION} | ||||
| @ -1,13 +0,0 @@ | ||||
| --- | ||||
| version: "3.8" | ||||
| services: | ||||
|   app: | ||||
|     environment: | ||||
|       - RESTIC_REPOSITORY_FILE=/run/secrets/restic_repo | ||||
|     secrets: | ||||
|       - restic_repo | ||||
|  | ||||
| secrets: | ||||
|   restic_repo: | ||||
|     external: true | ||||
|     name: ${STACK_NAME}_restic_repo_${SECRET_RESTIC_REPO_VERSION} | ||||
| @ -1,23 +0,0 @@ | ||||
| --- | ||||
| version: "3.8" | ||||
| services: | ||||
|   app: | ||||
|     environment: | ||||
|       - SSH_KEY_FILE=/run/secrets/ssh_key | ||||
|       - SSH_HOST_KEY | ||||
|     secrets: | ||||
|       - source: ssh_key | ||||
|         mode: 0400 | ||||
|     configs: | ||||
|       - source: ssh_config | ||||
|         target: /root/.ssh/config | ||||
|  | ||||
| secrets: | ||||
|   ssh_key: | ||||
|     external: true | ||||
|     name: ${STACK_NAME}_ssh_key_${SECRET_SSH_KEY_VERSION} | ||||
|  | ||||
| configs: | ||||
|   ssh_config: | ||||
|     name: ${STACK_NAME}_ssh_config_${SSH_CONFIG_VERSION} | ||||
|     file: ssh_config | ||||
| @ -1,15 +0,0 @@ | ||||
| --- | ||||
| version: "3.8" | ||||
| services: | ||||
|   app: | ||||
|     deploy: | ||||
|       mode: replicated | ||||
|       replicas: 0 | ||||
|       labels: | ||||
|         - "swarm.cronjob.enable=true" | ||||
|         # Note(3wc): every 5m, testing | ||||
|         - "swarm.cronjob.schedule=*/5 * * * *" | ||||
|         # Note(3wc): blank label to be picked up by `abra recipe sync` | ||||
|       restart_policy: | ||||
|         condition: none | ||||
|     entrypoint: [ "/usr/bin/backup.sh" ] | ||||
							
								
								
									
										44
									
								
								compose.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								compose.yaml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,44 @@ | ||||
| services: | ||||
|   app: | ||||
|     image: docker:24.0.7-dind | ||||
|     volumes: | ||||
|       - "/var/run/docker.sock:/var/run/docker.sock" | ||||
|       - "/var/lib/docker/volumes/:/var/lib/docker/volumes/" | ||||
|       - "/var/lib/docker/containers/:/var/lib/docker/containers/:ro" | ||||
|     environment: | ||||
|       - CRON_SCHEDULE | ||||
|       - RESTIC_REPOSITORY_FILE=/run/secrets/restic_repo | ||||
|       - RESTIC_PASSWORD_FILE=/run/secrets/restic_password | ||||
|     secrets: | ||||
|       - restic_repo | ||||
|       - restic_password | ||||
|     configs: | ||||
|       - source: entrypoint | ||||
|         target: /entrypoint.sh | ||||
|         mode: 0555 | ||||
|       - source: backupbot | ||||
|         target: /usr/bin/backup | ||||
|         mode: 0555 | ||||
|     entrypoint: ['/entrypoint.sh'] | ||||
|     healthcheck: | ||||
|       test: "pgrep crond" | ||||
|       interval: 30s | ||||
|       timeout: 10s | ||||
|       retries: 10 | ||||
|       start_period: 5m | ||||
|  | ||||
| secrets: | ||||
|   restic_repo: | ||||
|     external: true | ||||
|     name: ${STACK_NAME}_restic_repo | ||||
|   restic_password: | ||||
|     external: true | ||||
|     name: ${STACK_NAME}_restic_password | ||||
|  | ||||
| configs: | ||||
|   entrypoint: | ||||
|     name: ${STACK_NAME}_entrypoint | ||||
|     file: entrypoint.sh | ||||
|   backupbot: | ||||
|     name: ${STACK_NAME}_backupbot | ||||
|     file: backupbot.py | ||||
							
								
								
									
										36
									
								
								compose.yml
									
									
									
									
									
								
							
							
						
						
									
										36
									
								
								compose.yml
									
									
									
									
									
								
							| @ -1,36 +0,0 @@ | ||||
| --- | ||||
| version: "3.8" | ||||
| services: | ||||
|   app: | ||||
|     image: git.coopcloud.tech/coop-cloud/backup-bot-two:2.1.1-beta | ||||
|     volumes: | ||||
|       - "/var/run/docker.sock:/var/run/docker.sock" | ||||
|       - "/var/lib/docker/volumes/:/var/lib/docker/volumes/" | ||||
|       - "/var/lib/docker/containers/:/var/lib/docker/containers/:ro" | ||||
|       - backups:/backups | ||||
|     environment: | ||||
|       - CRON_SCHEDULE | ||||
|       - RESTIC_REPOSITORY | ||||
|       - RESTIC_PASSWORD_FILE=/run/secrets/restic_password | ||||
|     secrets: | ||||
|       - restic_password | ||||
|     deploy: | ||||
|       labels: | ||||
|         - coop-cloud.${STACK_NAME}.version=2.0.1+2.1.1-beta | ||||
|         - coop-cloud.${STACK_NAME}.timeout=${TIMEOUT:-300} | ||||
|         - coop-cloud.backupbot.enabled=true | ||||
|     #entrypoint: ['tail', '-f','/dev/null'] | ||||
|     healthcheck: | ||||
|       test: "pgrep crond" | ||||
|       interval: 30s | ||||
|       timeout: 10s | ||||
|       retries: 10 | ||||
|       start_period: 5m | ||||
|  | ||||
| secrets: | ||||
|   restic_password: | ||||
|     external: true | ||||
|     name: ${STACK_NAME}_restic_password_${SECRET_RESTIC_PASSWORD_VERSION} | ||||
|      | ||||
| volumes: | ||||
|   backups: | ||||
							
								
								
									
										10
									
								
								entrypoint.sh
									
									
									
									
									
										
										
										Executable file → Normal file
									
								
							
							
						
						
									
										10
									
								
								entrypoint.sh
									
									
									
									
									
										
										
										Executable file → Normal file
									
								
							| @ -1,11 +1,11 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| set -e | ||||
| set -e -o pipefail | ||||
|  | ||||
| if [ -n "$SSH_HOST_KEY" ] | ||||
| then | ||||
|     echo "$SSH_HOST_KEY" > /root/.ssh/known_hosts | ||||
| fi | ||||
| apk add --upgrade --no-cache restic bash python3 py3-pip py3-click py3-docker-py py3-json-logger curl | ||||
|  | ||||
| # Todo use requirements file with specific versions | ||||
| pip install --break-system-packages resticpy==1.0.2 | ||||
|  | ||||
| cron_schedule="${CRON_SCHEDULE:?CRON_SCHEDULE not set}" | ||||
|  | ||||
|  | ||||
| @ -1 +0,0 @@ | ||||
| This is the first beta release of the new backup-bot-two rewrite in python. Be aware when updating, it can break. Please read the readme and update your config according to it. | ||||
| @ -1,3 +0,0 @@ | ||||
| Breaking Change: the variables `SERVER_NAME` and `RESTIC_HOST` are merged into `RESTIC_REPOSITORY`. The format can be looked up here: https://restic.readthedocs.io/en/stable/030_preparing_a_new_repo.html | ||||
| ssh/sftp: `sftp:user@host:/repo-path` | ||||
| S3:  `s3:https://s3.example.com/bucket_name` | ||||
| @ -1,3 +0,0 @@ | ||||
| { | ||||
|   "$schema": "https://docs.renovatebot.com/renovate-schema.json" | ||||
| } | ||||
| @ -1,4 +0,0 @@ | ||||
| Host * | ||||
|     IdentityFile    /run/secrets/ssh_key | ||||
|     ServerAliveInterval 60 | ||||
|     ServerAliveCountMax 240 | ||||
		Reference in New Issue
	
	Block a user
	