Compare commits
50 Commits
1.0.0+2.0.
...
main
Author | SHA1 | Date | |
---|---|---|---|
54e32ab422 | |||
4cda3c1018 | |||
f7f46d7b7b | |||
c1902b2dbc | |||
f40eb00435 | |||
3eea69ddee | |||
f1661c04e7 | |||
4b4371ed3f | |||
1214f59c79 | |||
8798e2feb5 | |||
119787ed39 | |||
141bedb069 | |||
14b55bbc79 | |||
ebcb0d42c5 | |||
dccc93ac6b | |||
826bec925f | |||
49dd989302 | |||
2f965a93dc | |||
4054d3417e | |||
f8cfcef029 | |||
4a49c4a7f0 | |||
79cdec6705 | |||
2bc9400807 | |||
9b141a5185 | |||
6ff2312090 | |||
8b66b80332 | |||
c9b04db7a0 | |||
333b7ec16d | |||
aeccd605ee | |||
f877186a57 | |||
9dc239b199 | |||
43548273fe | |||
5a0467dbdd | |||
3aefae61c0 | |||
ac7c5fb50d | |||
cc59087b8c | |||
eb7c35d4cd | |||
249772ec03 | |||
45af6e8b5e | |||
f7207cdf36 | |||
241fe3ce92 | |||
b8d61d01cd | |||
6ac781c7e6 | |||
197cabf564 | |||
fe35f1ede8 | |||
f254a365f2 | |||
0d15765673 | |||
e09e1b476c | |||
72688dc42c | |||
10e460ff2d |
@ -9,9 +9,9 @@ steps:
|
|||||||
password:
|
password:
|
||||||
from_secret: git_coopcloud_tech_token_3wc
|
from_secret: git_coopcloud_tech_token_3wc
|
||||||
repo: git.coopcloud.tech/coop-cloud/backup-bot-two
|
repo: git.coopcloud.tech/coop-cloud/backup-bot-two
|
||||||
tags: 2.0.0
|
tags: ${DRONE_SEMVER_BUILD}
|
||||||
registry: git.coopcloud.tech
|
registry: git.coopcloud.tech
|
||||||
when:
|
when:
|
||||||
event:
|
event:
|
||||||
exclude:
|
include:
|
||||||
- pull_request
|
- tag
|
||||||
|
@ -9,10 +9,16 @@ RESTIC_REPOSITORY=/backups/restic
|
|||||||
CRON_SCHEDULE='30 3 * * *'
|
CRON_SCHEDULE='30 3 * * *'
|
||||||
|
|
||||||
# Push Notifiactions
|
# Push Notifiactions
|
||||||
|
#PUSH_PROMETHEUS_URL=https://pushgateway.example.com/metrics/job/backup
|
||||||
|
# or
|
||||||
#PUSH_URL_START=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=start
|
#PUSH_URL_START=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=start
|
||||||
#PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK
|
#PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK
|
||||||
#PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail
|
#PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail
|
||||||
|
|
||||||
|
# Push Basic Auth
|
||||||
|
#COMPOSE_FILE="$COMPOSE_FILE:compose.pushbasicauth.yml"
|
||||||
|
#SECRET_PUSH_BASICAUTH=v1
|
||||||
|
|
||||||
# swarm-cronjob, instead of built-in cron
|
# swarm-cronjob, instead of built-in cron
|
||||||
#COMPOSE_FILE="$COMPOSE_FILE:compose.swarm-cronjob.yml"
|
#COMPOSE_FILE="$COMPOSE_FILE:compose.swarm-cronjob.yml"
|
||||||
|
|
||||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -1 +1 @@
|
|||||||
/testing
|
.venv
|
||||||
|
104
README.md
104
README.md
@ -95,7 +95,7 @@ COMPOSE_FILE="$COMPOSE_FILE:compose.secret.yml"
|
|||||||
```
|
```
|
||||||
Add your REST server url as secret:
|
Add your REST server url as secret:
|
||||||
```
|
```
|
||||||
`abra app secret insert <backupbot_name> restic_repo v1 "rest:https://user:pass@host:8000/"`
|
abra app secret insert <backupbot_name> restic_repo v1 "rest:https://user:pass@host:8000/"
|
||||||
```
|
```
|
||||||
The secret will overwrite the `RESTIC_REPOSITORY` variable.
|
The secret will overwrite the `RESTIC_REPOSITORY` variable.
|
||||||
|
|
||||||
@ -104,15 +104,38 @@ See [restic REST docs](https://restic.readthedocs.io/en/latest/030_preparing_a_n
|
|||||||
|
|
||||||
## Push notifications
|
## Push notifications
|
||||||
|
|
||||||
|
It is possible to configure three push events, that may trigger on the backup cronjob. Those can be used to detect failures from mointoring systems.
|
||||||
|
The events are:
|
||||||
|
- start
|
||||||
|
- success
|
||||||
|
- fail
|
||||||
|
|
||||||
|
### Using a Prometheus Push Gateway
|
||||||
|
|
||||||
|
[A prometheus push gateway](https://git.coopcloud.tech/coop-cloud/monitoring-ng#setup-push-gateway) can be used by setting the following env variables:
|
||||||
|
- `PUSH_PROMETHEUS_URL=pushgateway.example.com/metrics/job/backup`
|
||||||
|
|
||||||
|
### Using custom URLs
|
||||||
|
|
||||||
The following env variables can be used to setup push notifications for backups. `PUSH_URL_START` is requested just before the backups starts, `PUSH_URL_SUCCESS` is only requested if the backup was successful and if the backup fails `PUSH_URL_FAIL` will be requested.
|
The following env variables can be used to setup push notifications for backups. `PUSH_URL_START` is requested just before the backups starts, `PUSH_URL_SUCCESS` is only requested if the backup was successful and if the backup fails `PUSH_URL_FAIL` will be requested.
|
||||||
Each variable is optional and independent of the other.
|
Each variable is optional and independent of the other.
|
||||||
```
|
|
||||||
|
|
||||||
|
```
|
||||||
PUSH_URL_START=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=start
|
PUSH_URL_START=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=start
|
||||||
PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK
|
PUSH_URL_SUCCESS=https://status.example.com/api/push/xxxxxxxxxx?status=up&msg=OK
|
||||||
PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail
|
PUSH_URL_FAIL=https://status.example.com/api/push/xxxxxxxxxx?status=down&msg=fail
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Push endpoint behind basic auth
|
||||||
|
|
||||||
|
Insert the basic auth secret
|
||||||
|
`abra app secret insert <backupbot_name> push_basicauth v1 "user:password"`
|
||||||
|
|
||||||
|
Enable basic auth in the env file, by uncommenting the following line:
|
||||||
|
```
|
||||||
|
#COMPOSE_FILE="$COMPOSE_FILE:compose.pushbasicauth.yml"
|
||||||
|
#SECRET_PUSH_BASICAUTH=v1
|
||||||
|
```
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
@ -157,7 +180,7 @@ Show all files inside the latest snapshot (can be very verbose):
|
|||||||
|
|
||||||
Show specific files inside a selected snapshot:
|
Show specific files inside a selected snapshot:
|
||||||
|
|
||||||
`abra app run <backupbot_name> app -- backup ls --snapshot <snapshot_id> --path /var/lib/docker/volumes/`
|
`abra app run <backupbot_name> app -- backup ls --snapshot <snapshot_id> /var/lib/docker/volumes/`
|
||||||
|
|
||||||
Download files from a snapshot:
|
Download files from a snapshot:
|
||||||
|
|
||||||
@ -179,20 +202,85 @@ restic snapshots
|
|||||||
|
|
||||||
Like Traefik, or `swarm-cronjob`, Backupbot II uses access to the Docker socket to read labels from running Docker Swarm services:
|
Like Traefik, or `swarm-cronjob`, Backupbot II uses access to the Docker socket to read labels from running Docker Swarm services:
|
||||||
|
|
||||||
|
1. Add `ENABLE_BACKUPS=true` to .env.sample
|
||||||
|
|
||||||
|
2. Add backupbot labels to the compose file
|
||||||
|
|
||||||
```
|
```
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
deploy:
|
deploy:
|
||||||
labels:
|
labels:
|
||||||
backupbot.backup: ${BACKUP:-"true"}
|
backupbot.backup: "${ENABLE_BACKUPS:-true}"
|
||||||
backupbot.backup.pre-hook: 'mysqldump -u root -p"$(cat /run/secrets/db_root_password)" -f /volume_path/dump.db'
|
backupbot.backup.pre-hook: "/pg_backup.sh backup"
|
||||||
backupbot.backup.post-hook: "rm -rf /volume_path/dump.db"
|
backupbot.backup.volumes.db.path: "backup.sql"
|
||||||
|
backupbot.restore.post-hook: '/pg_backup.sh restore'
|
||||||
|
backupbot.backup.volumes.redis: "false"
|
||||||
```
|
```
|
||||||
|
|
||||||
- `backupbot.backup` -- set to `true` to back up this service (REQUIRED)
|
- `backupbot.backup` -- set to `true` to back up this service (REQUIRED)
|
||||||
- `backupbot.backup.pre-hook` -- command to run before copying files (optional), save all dumps into the volumes
|
- this is the only required backup label, per default it will backup all volumes
|
||||||
- `backupbot.backup.post-hook` -- command to run after copying files (optional)
|
- `backupbot.backup.volumes.<volume_name>.path` -- only backup the listed relative paths from `<volume_name>`
|
||||||
|
- `backupbot.backup.volumes.<volume_name>: false` -- exclude <volume_name> from the backup
|
||||||
|
- `backupbot.backup.pre-hook` -- command to run before copying files
|
||||||
|
- i.e. save all database dumps into the volumes
|
||||||
|
- `backupbot.backup.post-hook` -- command to run after copying files
|
||||||
|
- `backupbot.restore.pre-hook` -- command to run before restoring files
|
||||||
|
- `backupbot.restore.post-hook` -- command to run after restoring files
|
||||||
|
- i.e. read all database dumps from the volumes
|
||||||
|
|
||||||
|
3. (Optional) add backup/restore scripts to the compose file
|
||||||
|
|
||||||
|
```
|
||||||
|
services:
|
||||||
|
db:
|
||||||
|
configs:
|
||||||
|
- source: pg_backup
|
||||||
|
target: /pg_backup.sh
|
||||||
|
mode: 0555
|
||||||
|
|
||||||
|
|
||||||
|
configs:
|
||||||
|
pg_backup:
|
||||||
|
name: ${STACK_NAME}_pg_backup_${PG_BACKUP_VERSION}
|
||||||
|
file: pg_backup.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
Version the config file in `abra.sh`:
|
||||||
|
|
||||||
|
```
|
||||||
|
export PG_BACKUP_VERSION=v1
|
||||||
|
```
|
||||||
|
|
||||||
As in the above example, you can reference Docker Secrets, e.g. for looking up database passwords, by reading the files in `/run/secrets` directly.
|
As in the above example, you can reference Docker Secrets, e.g. for looking up database passwords, by reading the files in `/run/secrets` directly.
|
||||||
|
|
||||||
[abra]: https://git.autonomic.zone/autonomic-cooperative/abra
|
[abra]: https://git.autonomic.zone/autonomic-cooperative/abra
|
||||||
|
|
||||||
|
## Backupbot Development
|
||||||
|
|
||||||
|
1. Copy modified backupbot.py into the container:
|
||||||
|
|
||||||
|
```
|
||||||
|
cp backupbot.py /tmp/backupbot.py; git stash; abra app cp <backupbot_name> /tmp/backupbot.py app:/usr/bin/backupbot.py; git checkout main; git stash pop
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Testing stuff with the python interpreter inside the container:
|
||||||
|
|
||||||
|
```
|
||||||
|
abra app run <backupbot_name> app bash
|
||||||
|
cd /usr/bin/
|
||||||
|
python
|
||||||
|
from backupbot import *
|
||||||
|
```
|
||||||
|
|
||||||
|
### Versioning
|
||||||
|
|
||||||
|
- App version: changes to `backup.py` (build a new image)
|
||||||
|
- Co-op Cloud package version: changes to recipe.
|
||||||
|
|
||||||
|
For example, starting with 1.0.0+2.0.0:
|
||||||
|
"patch" change to recipe: 1.0.1+2.0.0
|
||||||
|
"patch" change to backup.py: increment both, so 1.1.0+2.0.1
|
||||||
|
because bumping the image version would result in a minor recipe release
|
||||||
|
|
||||||
|
https://git.coopcloud.tech/coop-cloud/backup-bot-two/issues/4
|
||||||
|
3
abra.sh
3
abra.sh
@ -1,5 +1,6 @@
|
|||||||
export BACKUPBOT_VERSION=v1
|
|
||||||
export SSH_CONFIG_VERSION=v1
|
export SSH_CONFIG_VERSION=v1
|
||||||
|
export ENTRYPOINT_VERSION=v17
|
||||||
|
export CRONJOB_VERSION=v2
|
||||||
|
|
||||||
run_cron () {
|
run_cron () {
|
||||||
schedule="$(crontab -l | tr -s " " | cut -d ' ' -f-5)"
|
schedule="$(crontab -l | tr -s " " | cut -d ' ' -f-5)"
|
||||||
|
569
backupbot.py
569
backupbot.py
@ -17,47 +17,53 @@ from pathlib import Path
|
|||||||
from shutil import copyfile, rmtree
|
from shutil import copyfile, rmtree
|
||||||
|
|
||||||
VOLUME_PATH = "/var/lib/docker/volumes/"
|
VOLUME_PATH = "/var/lib/docker/volumes/"
|
||||||
SECRET_PATH = '/secrets/'
|
SECRET_PATH = "/secrets/"
|
||||||
SERVICE = None
|
SERVICE = "ALL"
|
||||||
|
|
||||||
logger = logging.getLogger("backupbot")
|
logger = logging.getLogger("backupbot")
|
||||||
logging.addLevelName(55, 'SUMMARY')
|
logging.addLevelName(55, "SUMMARY")
|
||||||
setattr(logging, 'SUMMARY', 55)
|
setattr(logging, "SUMMARY", 55)
|
||||||
setattr(logger, 'summary', lambda message, *args, **
|
setattr(
|
||||||
kwargs: logger.log(55, message, *args, **kwargs))
|
logger,
|
||||||
|
"summary",
|
||||||
|
lambda message, *args, **kwargs: logger.log(55, message, *args, **kwargs),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def handle_exception(exc_type, exc_value, exc_traceback):
|
def handle_exception(exc_type, exc_value, exc_traceback):
|
||||||
if issubclass(exc_type, KeyboardInterrupt):
|
if issubclass(exc_type, KeyboardInterrupt):
|
||||||
sys.__excepthook__(exc_type, exc_value, exc_traceback)
|
sys.__excepthook__(exc_type, exc_value, exc_traceback)
|
||||||
return
|
return
|
||||||
logger.critical("Uncaught exception", exc_info=(
|
logger.critical("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
|
||||||
exc_type, exc_value, exc_traceback))
|
|
||||||
|
|
||||||
|
|
||||||
sys.excepthook = handle_exception
|
sys.excepthook = handle_exception
|
||||||
|
|
||||||
|
|
||||||
@click.group()
|
@click.group()
|
||||||
@click.option('-l', '--log', 'loglevel')
|
@click.option("-l", "--log", "loglevel")
|
||||||
@click.option('-m', '--machine-logs', 'machine_logs', is_flag=True)
|
@click.option(
|
||||||
@click.option('service', '--host', '-h', envvar='SERVICE')
|
"-m", "--machine-logs", "machine_logs", is_flag=True, envvar="MACHINE_LOGS"
|
||||||
@click.option('repository', '--repo', '-r', envvar='RESTIC_REPOSITORY')
|
)
|
||||||
|
@click.option("service", "--host", "-h", envvar="SERVICE")
|
||||||
|
@click.option("repository", "--repo", "-r", envvar="RESTIC_REPOSITORY")
|
||||||
def cli(loglevel, service, repository, machine_logs):
|
def cli(loglevel, service, repository, machine_logs):
|
||||||
global SERVICE
|
global SERVICE
|
||||||
if service:
|
if service:
|
||||||
SERVICE = service.replace('.', '_')
|
SERVICE = service.replace(".", "_")
|
||||||
if repository:
|
if repository:
|
||||||
os.environ['RESTIC_REPOSITORY'] = repository
|
os.environ["RESTIC_REPOSITORY"] = repository
|
||||||
if loglevel:
|
if loglevel:
|
||||||
numeric_level = getattr(logging, loglevel.upper(), None)
|
numeric_level = getattr(logging, loglevel.upper(), None)
|
||||||
if not isinstance(numeric_level, int):
|
if not isinstance(numeric_level, int):
|
||||||
raise ValueError('Invalid log level: %s' % loglevel)
|
raise ValueError("Invalid log level: %s" % loglevel)
|
||||||
logger.setLevel(numeric_level)
|
logger.setLevel(numeric_level)
|
||||||
logHandler = logging.StreamHandler()
|
logHandler = logging.StreamHandler()
|
||||||
if machine_logs:
|
if machine_logs:
|
||||||
formatter = jsonlogger.JsonFormatter(
|
formatter = jsonlogger.JsonFormatter(
|
||||||
"%(levelname)s %(filename)s %(lineno)s %(process)d %(message)s", rename_fields={"levelname": "message_type"})
|
"%(levelname)s %(filename)s %(lineno)s %(process)d %(message)s",
|
||||||
|
rename_fields={"levelname": "message_type"},
|
||||||
|
)
|
||||||
logHandler.setFormatter(formatter)
|
logHandler.setFormatter(formatter)
|
||||||
logger.addHandler(logHandler)
|
logger.addHandler(logHandler)
|
||||||
|
|
||||||
@ -66,18 +72,18 @@ def cli(loglevel, service, repository, machine_logs):
|
|||||||
|
|
||||||
|
|
||||||
def init_repo():
|
def init_repo():
|
||||||
if repo:= os.environ.get('RESTIC_REPOSITORY_FILE'):
|
if repo := os.environ.get("RESTIC_REPOSITORY_FILE"):
|
||||||
# RESTIC_REPOSITORY_FILE and RESTIC_REPOSITORY are mutually exclusive
|
# RESTIC_REPOSITORY_FILE and RESTIC_REPOSITORY are mutually exclusive
|
||||||
del os.environ['RESTIC_REPOSITORY']
|
del os.environ["RESTIC_REPOSITORY"]
|
||||||
else:
|
else:
|
||||||
repo = os.environ['RESTIC_REPOSITORY']
|
repo = os.environ["RESTIC_REPOSITORY"]
|
||||||
restic.repository = repo
|
restic.repository = repo
|
||||||
logger.debug(f"set restic repository location: {repo}")
|
logger.debug(f"set restic repository location: {repo}")
|
||||||
restic.password_file = '/var/run/secrets/restic_password'
|
restic.password_file = "/var/run/secrets/restic_password"
|
||||||
try:
|
try:
|
||||||
restic.cat.config()
|
restic.cat.config()
|
||||||
except ResticFailedError as error:
|
except ResticFailedError as error:
|
||||||
if 'unable to open config file' in str(error):
|
if "unable to open config file" in str(error):
|
||||||
result = restic.init()
|
result = restic.init()
|
||||||
logger.info(f"Initialized restic repo: {result}")
|
logger.info(f"Initialized restic repo: {result}")
|
||||||
else:
|
else:
|
||||||
@ -86,55 +92,310 @@ def init_repo():
|
|||||||
|
|
||||||
def export_secrets():
|
def export_secrets():
|
||||||
for env in os.environ:
|
for env in os.environ:
|
||||||
if env.endswith('FILE') and not "COMPOSE_FILE" in env:
|
if env.endswith("FILE") and not "COMPOSE_FILE" in env:
|
||||||
logger.debug(f"exported secret: {env}")
|
logger.debug(f"exported secret: {env}")
|
||||||
with open(os.environ[env]) as file:
|
with open(os.environ[env]) as file:
|
||||||
secret = file.read()
|
secret = file.read()
|
||||||
os.environ[env.removesuffix('_FILE')] = secret
|
os.environ[env.removesuffix("_FILE")] = secret
|
||||||
# logger.debug(f"Read secret value: {secret}")
|
# logger.debug(f"Read secret value: {secret}")
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
@click.option('retries', '--retries', '-r', envvar='RETRIES', default=1)
|
@click.option("retries", "--retries", "-r", envvar="RETRIES", default=1)
|
||||||
def create(retries):
|
def create(retries):
|
||||||
pre_commands, post_commands, backup_paths, apps = get_backup_cmds()
|
app_settings = parse_backup_labels()
|
||||||
copy_secrets(apps)
|
pre_commands, post_commands, backup_paths, apps_versions = get_backup_details(
|
||||||
backup_paths.append(SECRET_PATH)
|
app_settings
|
||||||
|
)
|
||||||
|
copy_secrets(apps_versions)
|
||||||
|
backup_paths.append(Path(SECRET_PATH))
|
||||||
run_commands(pre_commands)
|
run_commands(pre_commands)
|
||||||
backup_volumes(backup_paths, apps, int(retries))
|
backup_volumes(backup_paths, apps_versions, int(retries))
|
||||||
run_commands(post_commands)
|
run_commands(post_commands)
|
||||||
|
|
||||||
|
|
||||||
def get_backup_cmds():
|
@cli.command()
|
||||||
|
@click.option("snapshot_id", "--snapshot", "-s", envvar="SNAPSHOT", default="latest")
|
||||||
|
@click.option("target", "--target", "-t", envvar="TARGET", default="/")
|
||||||
|
@click.option(
|
||||||
|
"noninteractive", "--noninteractive", envvar="NONINTERACTIVE", is_flag=True
|
||||||
|
)
|
||||||
|
@click.option("volumes", "--volumes", "-v", envvar="VOLUMES", multiple=True)
|
||||||
|
@click.option("container", "--container", "-c", envvar="CONTAINER", multiple=True)
|
||||||
|
@click.option("no_commands", "--no-commands", envvar="NO_COMMANDS", is_flag=True)
|
||||||
|
def restore(snapshot_id, target, noninteractive, volumes, container, no_commands):
|
||||||
|
app_settings = parse_backup_labels("restore", container)
|
||||||
|
if SERVICE != "ALL":
|
||||||
|
if not app_settings.get(SERVICE):
|
||||||
|
logger.error(
|
||||||
|
f"The app {SERVICE} is not running, use the restore-path argument to restore paths of undeployed apps"
|
||||||
|
)
|
||||||
|
exit(1)
|
||||||
|
app_settings = {SERVICE: app_settings.get(SERVICE)}
|
||||||
|
pre_commands, post_commands, backup_paths, apps_versions = get_backup_details(
|
||||||
|
app_settings, volumes
|
||||||
|
)
|
||||||
|
snapshots = get_snapshots(snapshot_id)
|
||||||
|
if not snapshots:
|
||||||
|
logger.error(
|
||||||
|
f"No Snapshots with ID {snapshot_id} for {apps_versions.keys()} found."
|
||||||
|
)
|
||||||
|
exit(1)
|
||||||
|
snapshot = snapshots[0]
|
||||||
|
snapshot_id = snapshot["short_id"]
|
||||||
|
if not noninteractive:
|
||||||
|
print(f"Snapshot to restore: \t{snapshot_id}")
|
||||||
|
restore_app_versions = app_versions_from_tags(snapshot.get("tags"))
|
||||||
|
print("Apps:")
|
||||||
|
for app, version in apps_versions.items():
|
||||||
|
restore_version = restore_app_versions.get(app)
|
||||||
|
print(f"\t{app} \t {restore_version}")
|
||||||
|
if version != restore_version:
|
||||||
|
print(f"WARNING!!! The running app is deployed with version {version}")
|
||||||
|
print("The following volume paths will be restored:")
|
||||||
|
for p in backup_paths:
|
||||||
|
print(f"\t{p}")
|
||||||
|
if not no_commands:
|
||||||
|
print("The following commands will be executed:")
|
||||||
|
for container, cmd in list(pre_commands.items()) + list(
|
||||||
|
post_commands.items()
|
||||||
|
):
|
||||||
|
print(f"\t{container.labels['com.docker.swarm.service.name']}:\t{cmd}")
|
||||||
|
snapshot_date = datetime.fromisoformat(snapshot["time"])
|
||||||
|
delta = datetime.now(tz=timezone.utc) - snapshot_date
|
||||||
|
print(f"This snapshot is {delta} old")
|
||||||
|
print("\nTHIS COMMAND WILL IRREVERSIBLY OVERWRITES FILES")
|
||||||
|
prompt = input("Type YES (uppercase) to continue: ")
|
||||||
|
if prompt != "YES":
|
||||||
|
logger.error("Restore aborted")
|
||||||
|
exit(1)
|
||||||
|
print(f"Restoring Snapshot {snapshot_id} at {target}")
|
||||||
|
if not no_commands and pre_commands:
|
||||||
|
print(f"Run pre commands.")
|
||||||
|
run_commands(pre_commands)
|
||||||
|
if backup_paths:
|
||||||
|
result = restic_restore(
|
||||||
|
snapshot_id=snapshot_id, include=backup_paths, target_dir=target
|
||||||
|
)
|
||||||
|
logger.debug(result)
|
||||||
|
else:
|
||||||
|
print("No paths to restore.")
|
||||||
|
if not no_commands and post_commands:
|
||||||
|
print(f"Run post commands.")
|
||||||
|
run_commands(post_commands)
|
||||||
|
|
||||||
|
|
||||||
|
@cli.command()
|
||||||
|
@click.option("snapshot_id", "--snapshot", "-s", envvar="SNAPSHOT", default="latest")
|
||||||
|
@click.option("target", "--target", "-t", envvar="TARGET", default="/")
|
||||||
|
@click.option(
|
||||||
|
"noninteractive", "--noninteractive", envvar="NONINTERACTIVE", is_flag=True
|
||||||
|
)
|
||||||
|
@click.argument("paths", nargs=-1, required=True, envvar="INCLUDE_PATH")
|
||||||
|
def restore_path(snapshot_id, target, noninteractive, paths):
|
||||||
|
"""PATHS: list of paths to restore"""
|
||||||
|
snapshots = get_snapshots(snapshot_id)
|
||||||
|
if not snapshots:
|
||||||
|
logger.error(f"No Snapshots with ID {snapshot_id} for app {SERVICE} found.")
|
||||||
|
exit(1)
|
||||||
|
snapshot = snapshots[0]
|
||||||
|
snapshot_id = snapshot["short_id"]
|
||||||
|
if not noninteractive:
|
||||||
|
print(f"Snapshot to restore: \t{snapshot_id}")
|
||||||
|
restore_app_versions = app_versions_from_tags(snapshot.get("tags"))
|
||||||
|
print("Apps:")
|
||||||
|
for app, version in restore_app_versions.items():
|
||||||
|
if SERVICE == "ALL" or SERVICE == app:
|
||||||
|
print(f"\t{app} \t {version}")
|
||||||
|
print("The following paths will be restored:")
|
||||||
|
for p in paths:
|
||||||
|
print(f"\t{p}")
|
||||||
|
snapshot_date = datetime.fromisoformat(snapshot["time"])
|
||||||
|
delta = datetime.now(tz=timezone.utc) - snapshot_date
|
||||||
|
print(f"This snapshot is {delta} old")
|
||||||
|
print("\nTHIS COMMAND WILL IRREVERSIBLY OVERWRITES FILES")
|
||||||
|
prompt = input("Type YES (uppercase) to continue: ")
|
||||||
|
if prompt != "YES":
|
||||||
|
logger.error("Restore aborted")
|
||||||
|
exit(1)
|
||||||
|
print(f"Restoring Snapshot {snapshot_id} at {target}")
|
||||||
|
result = restic_restore(snapshot_id=snapshot_id, include=paths, target_dir=target)
|
||||||
|
logger.debug(result)
|
||||||
|
|
||||||
|
|
||||||
|
def restic_restore(snapshot_id, include=[], target_dir=None):
|
||||||
|
cmd = restic.cat.base_command() + ["restore", snapshot_id]
|
||||||
|
for path in include:
|
||||||
|
cmd.extend(["--include", path])
|
||||||
|
if target_dir:
|
||||||
|
cmd.extend(["--target", target_dir])
|
||||||
|
return restic.internal.command_executor.execute(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def get_snapshots(snapshot_id=None):
|
||||||
|
if snapshot_id and snapshot_id != "latest":
|
||||||
|
snapshots = restic.snapshots(snapshot_id=snapshot_id)
|
||||||
|
if not SERVICE in app_versions_from_tags(snapshots[0].get("tags")):
|
||||||
|
logger.error(f"Snapshot with ID {snapshot_id} does not contain {SERVICE}")
|
||||||
|
exit(1)
|
||||||
|
else:
|
||||||
|
snapshots = restic.snapshots()
|
||||||
|
snapshots = list(
|
||||||
|
filter(
|
||||||
|
lambda x: SERVICE in app_versions_from_tags(x.get("tags")), snapshots
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if snapshot_id == "latest":
|
||||||
|
return snapshots[-1:]
|
||||||
|
else:
|
||||||
|
return snapshots
|
||||||
|
|
||||||
|
|
||||||
|
def app_versions_from_tags(tags):
|
||||||
|
if tags:
|
||||||
|
app_versions = map(lambda x: x.split(":"), tags)
|
||||||
|
return {i[0]: i[1] if len(i) > 1 else None for i in app_versions}
|
||||||
|
else:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def str2bool(value: str) -> bool:
|
||||||
|
return value.lower() in ("yes", "true", "t", "1")
|
||||||
|
|
||||||
|
|
||||||
|
def parse_backup_labels(hook_type="backup", selected_container=[]):
|
||||||
client = docker.from_env()
|
client = docker.from_env()
|
||||||
container_by_service = {
|
container_by_service = {
|
||||||
c.labels.get('com.docker.swarm.service.name'): c for c in client.containers.list()}
|
c.labels.get("com.docker.swarm.service.name"): c
|
||||||
backup_paths = set()
|
for c in client.containers.list()
|
||||||
backup_apps = set()
|
}
|
||||||
pre_commands = {}
|
|
||||||
post_commands = {}
|
|
||||||
services = client.services.list()
|
services = client.services.list()
|
||||||
|
app_settings = {}
|
||||||
for s in services:
|
for s in services:
|
||||||
labels = s.attrs['Spec']['Labels']
|
specs = s.attrs["Spec"]
|
||||||
if (backup := labels.get('backupbot.backup')) and bool(backup):
|
labels = specs["Labels"]
|
||||||
# volumes: s.attrs['Spec']['TaskTemplate']['ContainerSpec']['Mounts'][0]['Source']
|
stack_name = labels["com.docker.stack.namespace"]
|
||||||
stack_name = labels['com.docker.stack.namespace']
|
container_name = s.name.removeprefix(f"{stack_name}_")
|
||||||
# Remove this lines to backup only a specific service
|
version = labels.get(f"coop-cloud.{stack_name}.version")
|
||||||
# This will unfortenately decrease restice performance
|
settings = app_settings[stack_name] = app_settings.get(stack_name) or {}
|
||||||
# if SERVICE and SERVICE != stack_name:
|
if (backup := labels.get("backupbot.backup")) and str2bool(backup):
|
||||||
# continue
|
settings["enabled"] = True
|
||||||
backup_apps.add(stack_name)
|
if version:
|
||||||
backup_paths = backup_paths.union(
|
settings["version"] = version
|
||||||
Path(VOLUME_PATH).glob(f"{stack_name}_*"))
|
if selected_container and container_name not in selected_container:
|
||||||
if not (container := container_by_service.get(s.name)):
|
logger.debug(f"Skipping {s.name} because it's not a selected container")
|
||||||
logger.error(
|
continue
|
||||||
f"Container {s.name} is not running, hooks can not be executed")
|
if mounts := specs["TaskTemplate"]["ContainerSpec"].get("Mounts"):
|
||||||
|
volumes = parse_volumes(stack_name, mounts)
|
||||||
|
volumes.update(settings.get("volumes") or {})
|
||||||
|
settings["volumes"] = volumes
|
||||||
|
excluded_volumes, included_volume_paths = parse_excludes_includes(labels)
|
||||||
|
settings["excluded_volumes"] = excluded_volumes.union(
|
||||||
|
settings.get("excluded_volumes") or set()
|
||||||
|
)
|
||||||
|
settings["included_volume_paths"] = included_volume_paths.union(
|
||||||
|
settings.get("included_volume_paths") or set()
|
||||||
|
)
|
||||||
|
if container := container_by_service.get(s.name):
|
||||||
|
if command := labels.get(f"backupbot.{hook_type}.pre-hook"):
|
||||||
|
if not (pre_hooks := settings.get("pre_hooks")):
|
||||||
|
pre_hooks = settings["pre_hooks"] = {}
|
||||||
|
pre_hooks[container] = command
|
||||||
|
if command := labels.get(f"backupbot.{hook_type}.post-hook"):
|
||||||
|
if not (post_hooks := settings.get("post_hooks")):
|
||||||
|
post_hooks = settings["post_hooks"] = {}
|
||||||
|
post_hooks[container] = command
|
||||||
|
else:
|
||||||
|
logger.debug(f"Container {s.name} is not running.")
|
||||||
|
if labels.get(f"backupbot.{hook_type}.pre-hook") or labels.get(
|
||||||
|
f"backupbot.{hook_type}.post-hook"
|
||||||
|
):
|
||||||
|
logger.error(f"Container {s.name} contain hooks but it's not running")
|
||||||
|
return app_settings
|
||||||
|
|
||||||
|
|
||||||
|
def get_backup_details(app_settings, volumes=[]):
|
||||||
|
backup_paths = set()
|
||||||
|
backup_apps_versions = {}
|
||||||
|
pre_hooks = {}
|
||||||
|
post_hooks = {}
|
||||||
|
for app, settings in app_settings.items():
|
||||||
|
if settings.get("enabled"):
|
||||||
|
if SERVICE != "ALL" and SERVICE != app:
|
||||||
continue
|
continue
|
||||||
if prehook := labels.get('backupbot.backup.pre-hook'):
|
backup_apps_versions[app] = settings.get("version")
|
||||||
pre_commands[container] = prehook
|
add_backup_paths(backup_paths, settings, app, volumes)
|
||||||
if posthook := labels.get('backupbot.backup.post-hook'):
|
if hooks := settings.get("pre_hooks"):
|
||||||
post_commands[container] = posthook
|
pre_hooks.update(hooks)
|
||||||
return pre_commands, post_commands, list(backup_paths), list(backup_apps)
|
if hooks := settings.get("post_hooks"):
|
||||||
|
post_hooks.update(hooks)
|
||||||
|
return pre_hooks, post_hooks, list(backup_paths), backup_apps_versions
|
||||||
|
|
||||||
|
|
||||||
|
def add_backup_paths(backup_paths, settings, app, selected_volumes):
|
||||||
|
if volumes := settings.get("volumes"):
|
||||||
|
if includes := settings.get("included_volume_paths"):
|
||||||
|
included_volumes = list(zip(*includes))[0]
|
||||||
|
for volume, rel_paths in includes:
|
||||||
|
if not (volume_path := volumes.get(volume)):
|
||||||
|
logger.error(
|
||||||
|
f"Can not find volume with the name {volume} for {app}"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
if selected_volumes and volume not in selected_volumes:
|
||||||
|
logger.debug(
|
||||||
|
f"Skipping {volume}:{rel_paths} because the volume is not selected"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
for p in rel_paths:
|
||||||
|
absolute_path = Path(f"{volume_path}/{p}")
|
||||||
|
backup_paths.add(absolute_path)
|
||||||
|
else:
|
||||||
|
included_volumes = []
|
||||||
|
excluded_volumes = settings.get("excluded_volumes") or []
|
||||||
|
for name, path in volumes.items():
|
||||||
|
if selected_volumes and name not in selected_volumes:
|
||||||
|
logger.debug(
|
||||||
|
f"Skipping volume: {name} because the volume is not selected"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
if name in excluded_volumes:
|
||||||
|
logger.debug(f"Skipping volume: {name} because the volume is excluded")
|
||||||
|
continue
|
||||||
|
if name in included_volumes:
|
||||||
|
logger.debug(f"Skipping volume: {name} because a path is selected")
|
||||||
|
continue
|
||||||
|
backup_paths.add(path)
|
||||||
|
else:
|
||||||
|
logger.warning(f"{app} does not contain any volumes")
|
||||||
|
|
||||||
|
|
||||||
|
def parse_volumes(stack_name, mounts):
|
||||||
|
volumes = {}
|
||||||
|
for m in mounts:
|
||||||
|
if m["Type"] != "volume":
|
||||||
|
continue
|
||||||
|
relative_path = m["Source"]
|
||||||
|
name = relative_path.removeprefix(stack_name + "_")
|
||||||
|
absolute_path = Path(f"{VOLUME_PATH}{relative_path}/_data/")
|
||||||
|
volumes[name] = absolute_path
|
||||||
|
return volumes
|
||||||
|
|
||||||
|
|
||||||
|
def parse_excludes_includes(labels):
|
||||||
|
excluded_volumes = set()
|
||||||
|
included_volume_paths = set()
|
||||||
|
for label, value in labels.items():
|
||||||
|
if label.startswith("backupbot.backup.volumes."):
|
||||||
|
volume_name = label.removeprefix("backupbot.backup.volumes.").removesuffix(
|
||||||
|
".path"
|
||||||
|
)
|
||||||
|
if label.endswith("path"):
|
||||||
|
relative_paths = tuple(value.split(","))
|
||||||
|
included_volume_paths.add((volume_name, relative_paths))
|
||||||
|
elif not str2bool(value):
|
||||||
|
excluded_volumes.add(volume_name)
|
||||||
|
return excluded_volumes, included_volume_paths
|
||||||
|
|
||||||
|
|
||||||
def copy_secrets(apps):
|
def copy_secrets(apps):
|
||||||
@ -143,25 +404,30 @@ def copy_secrets(apps):
|
|||||||
os.mkdir(SECRET_PATH)
|
os.mkdir(SECRET_PATH)
|
||||||
client = docker.from_env()
|
client = docker.from_env()
|
||||||
container_by_service = {
|
container_by_service = {
|
||||||
c.labels.get('com.docker.swarm.service.name'): c for c in client.containers.list()}
|
c.labels.get("com.docker.swarm.service.name"): c
|
||||||
|
for c in client.containers.list()
|
||||||
|
}
|
||||||
services = client.services.list()
|
services = client.services.list()
|
||||||
for s in services:
|
for s in services:
|
||||||
app_name = s.attrs['Spec']['Labels']['com.docker.stack.namespace']
|
app_name = s.attrs["Spec"]["Labels"]["com.docker.stack.namespace"]
|
||||||
if (app_name in apps and
|
if app_name in apps and (
|
||||||
(app_secs := s.attrs['Spec']['TaskTemplate']['ContainerSpec'].get('Secrets'))):
|
app_secs := s.attrs["Spec"]["TaskTemplate"]["ContainerSpec"].get("Secrets")
|
||||||
|
):
|
||||||
if not container_by_service.get(s.name):
|
if not container_by_service.get(s.name):
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"Container {s.name} is not running, secrets can not be copied.")
|
f"Container {s.name} is not running, secrets can not be copied."
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
container_id = container_by_service[s.name].id
|
container_id = container_by_service[s.name].id
|
||||||
for sec in app_secs:
|
for sec in app_secs:
|
||||||
src = f'/var/lib/docker/containers/{container_id}/mounts/secrets/{sec["SecretID"]}'
|
src = f"/var/lib/docker/containers/{container_id}/mounts/secrets/{sec['SecretID']}"
|
||||||
if not Path(src).exists():
|
if not Path(src).exists():
|
||||||
logger.error(
|
logger.error(
|
||||||
f"For the secret {sec['SecretName']} the file {src} does not exist for {s.name}")
|
f"For the secret {sec['SecretName']} the file {src} does not exist for {s.name}"
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
dst = SECRET_PATH + sec['SecretName']
|
dst = SECRET_PATH + sec["SecretName"]
|
||||||
logger.debug("Copy Secret {sec['SecretName']}")
|
logger.debug(f"Copy Secret {sec['SecretName']}")
|
||||||
copyfile(src, dst)
|
copyfile(src, dst)
|
||||||
|
|
||||||
|
|
||||||
@ -170,9 +436,15 @@ def run_commands(commands):
|
|||||||
if not command:
|
if not command:
|
||||||
continue
|
continue
|
||||||
# Remove bash/sh wrapping
|
# Remove bash/sh wrapping
|
||||||
command = command.removeprefix('bash -c').removeprefix('sh -c').removeprefix(' ')
|
command = (
|
||||||
|
command.removeprefix("bash -c").removeprefix("sh -c").removeprefix(" ")
|
||||||
|
)
|
||||||
# Remove quotes surrounding the command
|
# Remove quotes surrounding the command
|
||||||
if (len(command) >= 2 and command[0] == command[-1] and (command[0] == "'" or command[0] == '"')):
|
if (
|
||||||
|
len(command) >= 2
|
||||||
|
and command[0] == command[-1]
|
||||||
|
and (command[0] == "'" or command[0] == '"')
|
||||||
|
):
|
||||||
command = command[1:-1]
|
command = command[1:-1]
|
||||||
# Use bash's pipefail to return exit codes inside a pipe to prevent silent failure
|
# Use bash's pipefail to return exit codes inside a pipe to prevent silent failure
|
||||||
command = f"bash -c 'set -o pipefail;{command}'"
|
command = f"bash -c 'set -o pipefail;{command}'"
|
||||||
@ -181,22 +453,34 @@ def run_commands(commands):
|
|||||||
result = container.exec_run(command)
|
result = container.exec_run(command)
|
||||||
if result.exit_code:
|
if result.exit_code:
|
||||||
logger.error(
|
logger.error(
|
||||||
f"Failed to run command {command} in {container.name}: {result.output.decode()}")
|
f"Failed to run command {command} in {container.name}: {result.output.decode()}"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logger.info(result.output.decode())
|
logger.debug(result.output.decode())
|
||||||
|
|
||||||
|
|
||||||
def backup_volumes(backup_paths, apps, retries, dry_run=False):
|
def backup_volumes(backup_paths, apps_versions, retries, dry_run=False):
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
|
logger.info("Backup these paths:")
|
||||||
|
logger.info("\n".join(map(str, backup_paths)))
|
||||||
|
backup_paths = list(filter(path_exists, backup_paths))
|
||||||
|
cmd = restic.cat.base_command()
|
||||||
|
parent = get_snapshots("latest")
|
||||||
|
if parent:
|
||||||
|
# https://restic.readthedocs.io/en/stable/040_backup.html#file-change-detection
|
||||||
|
cmd.extend(["--parent", parent[0]["short_id"]])
|
||||||
|
tags = [f"{app}:{version}" for app, version in apps_versions.items()]
|
||||||
|
if SERVICE == "ALL":
|
||||||
|
tags.append(SERVICE)
|
||||||
logger.info("Start volume backup")
|
logger.info("Start volume backup")
|
||||||
logger.debug(backup_paths)
|
result = restic.internal.backup.run(
|
||||||
result = restic.backup(backup_paths, dry_run=dry_run, tags=apps)
|
cmd, backup_paths, dry_run=dry_run, tags=tags
|
||||||
|
)
|
||||||
logger.summary("backup finished", extra=result)
|
logger.summary("backup finished", extra=result)
|
||||||
return
|
return
|
||||||
except ResticFailedError as error:
|
except ResticFailedError as error:
|
||||||
logger.error(
|
logger.error(f"Backup failed for {SERVICE}.")
|
||||||
f"Backup failed for {apps}. Could not Backup these paths: {backup_paths}")
|
|
||||||
logger.error(error, exc_info=True)
|
logger.error(error, exc_info=True)
|
||||||
if retries > 0:
|
if retries > 0:
|
||||||
retries -= 1
|
retries -= 1
|
||||||
@ -204,149 +488,143 @@ def backup_volumes(backup_paths, apps, retries, dry_run=False):
|
|||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
def path_exists(path):
|
||||||
@click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', default='latest')
|
if not path.exists():
|
||||||
@click.option('target', '--target', '-t', envvar='TARGET', default='/')
|
logger.error(f"{path} does not exist")
|
||||||
@click.option('noninteractive', '--noninteractive', envvar='NONINTERACTIVE', is_flag=True)
|
return path.exists()
|
||||||
def restore(snapshot, target, noninteractive):
|
|
||||||
# Todo: recommend to shutdown the container
|
|
||||||
service_paths = VOLUME_PATH
|
|
||||||
if SERVICE:
|
|
||||||
service_paths = service_paths + f'{SERVICE}_*'
|
|
||||||
snapshots = restic.snapshots(snapshot_id=snapshot)
|
|
||||||
if not snapshot:
|
|
||||||
logger.error("No Snapshots with ID {snapshots}")
|
|
||||||
exit(1)
|
|
||||||
if not noninteractive:
|
|
||||||
snapshot_date = datetime.fromisoformat(snapshots[0]['time'])
|
|
||||||
delta = datetime.now(tz=timezone.utc) - snapshot_date
|
|
||||||
print(
|
|
||||||
f"You are going to restore Snapshot {snapshot} of {service_paths} at {target}")
|
|
||||||
print(f"This snapshot is {delta} old")
|
|
||||||
print(
|
|
||||||
f"THIS COMMAND WILL IRREVERSIBLY OVERWRITES {target}{service_paths.removeprefix('/')}")
|
|
||||||
prompt = input("Type YES (uppercase) to continue: ")
|
|
||||||
if prompt != 'YES':
|
|
||||||
logger.error("Restore aborted")
|
|
||||||
exit(1)
|
|
||||||
print(f"Restoring Snapshot {snapshot} of {service_paths} at {target}")
|
|
||||||
# TODO: use tags if no snapshot is selected, to use a snapshot including SERVICE
|
|
||||||
result = restic.restore(snapshot_id=snapshot,
|
|
||||||
include=service_paths, target_dir=target)
|
|
||||||
logger.debug(result)
|
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
def snapshots():
|
def snapshots():
|
||||||
snapshots = restic.snapshots()
|
snapshots = get_snapshots()
|
||||||
no_snapshots = True
|
|
||||||
for snap in snapshots:
|
for snap in snapshots:
|
||||||
if not SERVICE or (tags := snap.get('tags')) and SERVICE in tags:
|
output = [snap["time"].split(".")[0], snap["short_id"]]
|
||||||
print(snap['time'], snap['id'])
|
if tags := snap.get("tags"):
|
||||||
no_snapshots = False
|
app_versions = app_versions_from_tags(tags)
|
||||||
if no_snapshots:
|
if version := app_versions.get(SERVICE):
|
||||||
|
output.append(version)
|
||||||
|
print(*output)
|
||||||
|
if not snapshots:
|
||||||
err_msg = "No Snapshots found"
|
err_msg = "No Snapshots found"
|
||||||
if SERVICE:
|
if SERVICE != "ALL":
|
||||||
service_name = SERVICE.replace('_', '.')
|
service_name = SERVICE.replace("_", ".")
|
||||||
err_msg += f' for app {service_name}'
|
err_msg += f" for app {service_name}"
|
||||||
logger.warning(err_msg)
|
logger.warning(err_msg)
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
@click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', default='latest')
|
@click.option("snapshot", "--snapshot", "-s", envvar="SNAPSHOT", default="latest")
|
||||||
@click.option('path', '--path', '-p', envvar='INCLUDE_PATH')
|
@click.option("show_all", "--all", "-a", envvar="SHOW_ALL", is_flag=True)
|
||||||
def ls(snapshot, path):
|
@click.option("timestamps", "--timestamps", "-t", envvar="TIMESTAMPS", is_flag=True)
|
||||||
|
@click.argument(
|
||||||
|
"path", required=False, default="/var/lib/docker/volumes/", envvar="INCLUDE_PATH"
|
||||||
|
)
|
||||||
|
def ls(snapshot, show_all, timestamps, path):
|
||||||
|
if snapshot == "latest":
|
||||||
|
latest_snapshot = get_snapshots("latest")
|
||||||
|
if not latest_snapshot:
|
||||||
|
logger.error(f"There is no latest snapshot for {SERVICE}")
|
||||||
|
exit(1)
|
||||||
|
snapshot = latest_snapshot[0]["short_id"]
|
||||||
|
if show_all:
|
||||||
|
path = None
|
||||||
results = list_files(snapshot, path)
|
results = list_files(snapshot, path)
|
||||||
for r in results:
|
for r in results:
|
||||||
if r.get('path'):
|
if r.get("path"):
|
||||||
print(f"{r['ctime']}\t{r['path']}")
|
if timestamps:
|
||||||
|
print(f"{r['ctime']}\t{r['path']}")
|
||||||
|
else:
|
||||||
|
print(f"{r['path']}")
|
||||||
|
|
||||||
|
|
||||||
def list_files(snapshot, path):
|
def list_files(snapshot, path):
|
||||||
cmd = restic.cat.base_command() + ['ls']
|
cmd = restic.cat.base_command() + ["ls"]
|
||||||
if SERVICE:
|
|
||||||
cmd = cmd + ['--tag', SERVICE]
|
|
||||||
cmd.append(snapshot)
|
cmd.append(snapshot)
|
||||||
if path:
|
if path:
|
||||||
cmd.append(path)
|
cmd.append(path)
|
||||||
try:
|
try:
|
||||||
output = restic.internal.command_executor.execute(cmd)
|
output = restic.internal.command_executor.execute(cmd)
|
||||||
except ResticFailedError as error:
|
except ResticFailedError as error:
|
||||||
if 'no snapshot found' in str(error):
|
if "no snapshot found" in str(error):
|
||||||
err_msg = f'There is no snapshot "{snapshot}"'
|
err_msg = f'There is no snapshot "{snapshot}"'
|
||||||
if SERVICE:
|
if SERVICE != "ALL":
|
||||||
err_msg += f' for the app "{SERVICE}"'
|
err_msg += f' for the app "{SERVICE}"'
|
||||||
logger.error(err_msg)
|
logger.error(err_msg)
|
||||||
exit(1)
|
exit(1)
|
||||||
else:
|
else:
|
||||||
raise error
|
raise error
|
||||||
output = output.replace('}\n{', '}|{')
|
output = output.replace("}\n{", "}|{")
|
||||||
results = list(map(json.loads, output.split('|')))
|
results = list(map(json.loads, output.split("|")))
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
@click.option('snapshot', '--snapshot', '-s', envvar='SNAPSHOT', default='latest')
|
@click.option("snapshot", "--snapshot", "-s", envvar="SNAPSHOT", default="latest")
|
||||||
@click.option('path', '--path', '-p', envvar='INCLUDE_PATH')
|
@click.option("path", "--path", "-p", envvar="INCLUDE_PATH")
|
||||||
@click.option('volumes', '--volumes', '-v', envvar='VOLUMES')
|
@click.option("volumes", "--volumes", "-v", envvar="VOLUMES")
|
||||||
@click.option('secrets', '--secrets', '-c', is_flag=True, envvar='SECRETS')
|
@click.option("secrets", "--secrets", "-c", is_flag=True, envvar="SECRETS")
|
||||||
def download(snapshot, path, volumes, secrets):
|
def download(snapshot, path, volumes, secrets):
|
||||||
file_dumps = []
|
file_dumps = []
|
||||||
|
if snapshot == "latest":
|
||||||
|
latest_snapshot = get_snapshots("latest")
|
||||||
|
if not latest_snapshot:
|
||||||
|
logger.error(f"There is no latest snapshot for {SERVICE}")
|
||||||
|
exit(1)
|
||||||
|
snapshot = latest_snapshot[0]["short_id"]
|
||||||
if not any([path, volumes, secrets]):
|
if not any([path, volumes, secrets]):
|
||||||
volumes = secrets = True
|
volumes = secrets = True
|
||||||
if path:
|
if path:
|
||||||
path = path.removesuffix('/')
|
path = path.removesuffix("/")
|
||||||
binary_output = dump(snapshot, path)
|
binary_output = dump(snapshot, path)
|
||||||
files = list_files(snapshot, path)
|
files = list_files(snapshot, path)
|
||||||
filetype = [f.get('type') for f in files if f.get('path') == path][0]
|
filetype = [f.get("type") for f in files if f.get("path") == path][0]
|
||||||
filename = Path(path).name
|
filename = Path(path).name
|
||||||
if filetype == 'dir':
|
if filetype == "dir":
|
||||||
filename = filename + ".tar"
|
filename = filename + ".tar"
|
||||||
tarinfo = tarfile.TarInfo(name=filename)
|
tarinfo = tarfile.TarInfo(name=filename)
|
||||||
tarinfo.size = len(binary_output)
|
tarinfo.size = len(binary_output)
|
||||||
file_dumps.append((binary_output, tarinfo))
|
file_dumps.append((binary_output, tarinfo))
|
||||||
if volumes:
|
if volumes:
|
||||||
if not SERVICE:
|
if SERVICE == "ALL":
|
||||||
logger.error("Please specify '--host' when using '--volumes'")
|
logger.error("Please specify '--host' when using '--volumes'")
|
||||||
exit(1)
|
exit(1)
|
||||||
files = list_files(snapshot, VOLUME_PATH)
|
files = list_files(snapshot, VOLUME_PATH)
|
||||||
for f in files[1:]:
|
for f in files[1:]:
|
||||||
path = f['path']
|
path = f["path"]
|
||||||
if Path(path).name.startswith(SERVICE) and f['type'] == 'dir':
|
if Path(path).name.startswith(SERVICE) and f["type"] == "dir":
|
||||||
binary_output = dump(snapshot, path)
|
binary_output = dump(snapshot, path)
|
||||||
filename = f"{Path(path).name}.tar"
|
filename = f"{Path(path).name}.tar"
|
||||||
tarinfo = tarfile.TarInfo(name=filename)
|
tarinfo = tarfile.TarInfo(name=filename)
|
||||||
tarinfo.size = len(binary_output)
|
tarinfo.size = len(binary_output)
|
||||||
file_dumps.append((binary_output, tarinfo))
|
file_dumps.append((binary_output, tarinfo))
|
||||||
if secrets:
|
if secrets:
|
||||||
if not SERVICE:
|
if SERVICE == "ALL":
|
||||||
logger.error("Please specify '--host' when using '--secrets'")
|
logger.error("Please specify '--host' when using '--secrets'")
|
||||||
exit(1)
|
exit(1)
|
||||||
filename = f"{SERVICE}.json"
|
filename = f"{SERVICE}.json"
|
||||||
files = list_files(snapshot, SECRET_PATH)
|
files = list_files(snapshot, SECRET_PATH)
|
||||||
secrets = {}
|
secrets = {}
|
||||||
for f in files[1:]:
|
for f in files[1:]:
|
||||||
path = f['path']
|
path = f["path"]
|
||||||
if Path(path).name.startswith(SERVICE) and f['type'] == 'file':
|
if Path(path).name.startswith(SERVICE) and f["type"] == "file":
|
||||||
secret = dump(snapshot, path).decode()
|
secret = dump(snapshot, path).decode()
|
||||||
secret_name = path.removeprefix(f'{SECRET_PATH}{SERVICE}_')
|
secret_name = path.removeprefix(f"{SECRET_PATH}{SERVICE}_")
|
||||||
secrets[secret_name] = secret
|
secrets[secret_name] = secret
|
||||||
binary_output = json.dumps(secrets).encode()
|
binary_output = json.dumps(secrets).encode()
|
||||||
tarinfo = tarfile.TarInfo(name=filename)
|
tarinfo = tarfile.TarInfo(name=filename)
|
||||||
tarinfo.size = len(binary_output)
|
tarinfo.size = len(binary_output)
|
||||||
file_dumps.append((binary_output, tarinfo))
|
file_dumps.append((binary_output, tarinfo))
|
||||||
with tarfile.open('/tmp/backup.tar.gz', "w:gz") as tar:
|
with tarfile.open("/tmp/backup.tar.gz", "w:gz") as tar:
|
||||||
print(f"Writing files to /tmp/backup.tar.gz...")
|
print(f"Writing files to /tmp/backup.tar.gz...")
|
||||||
for binary_output, tarinfo in file_dumps:
|
for binary_output, tarinfo in file_dumps:
|
||||||
tar.addfile(tarinfo, fileobj=io.BytesIO(binary_output))
|
tar.addfile(tarinfo, fileobj=io.BytesIO(binary_output))
|
||||||
size = get_formatted_size('/tmp/backup.tar.gz')
|
size = get_formatted_size("/tmp/backup.tar.gz")
|
||||||
print(
|
print(f"Backup has been written to /tmp/backup.tar.gz with a size of {size}")
|
||||||
f"Backup has been written to /tmp/backup.tar.gz with a size of {size}")
|
|
||||||
|
|
||||||
|
|
||||||
def get_formatted_size(file_path):
|
def get_formatted_size(file_path):
|
||||||
file_size = os.path.getsize(file_path)
|
file_size = os.path.getsize(file_path)
|
||||||
units = ['Bytes', 'KB', 'MB', 'GB', 'TB']
|
units = ["Bytes", "KB", "MB", "GB", "TB"]
|
||||||
for unit in units:
|
for unit in units:
|
||||||
if file_size < 1024:
|
if file_size < 1024:
|
||||||
return f"{round(file_size, 3)} {unit}"
|
return f"{round(file_size, 3)} {unit}"
|
||||||
@ -355,18 +633,17 @@ def get_formatted_size(file_path):
|
|||||||
|
|
||||||
|
|
||||||
def dump(snapshot, path):
|
def dump(snapshot, path):
|
||||||
cmd = restic.cat.base_command() + ['dump']
|
cmd = restic.cat.base_command() + ["dump"]
|
||||||
if SERVICE:
|
|
||||||
cmd = cmd + ['--tag', SERVICE]
|
|
||||||
cmd = cmd + [snapshot, path]
|
cmd = cmd + [snapshot, path]
|
||||||
print(f"Dumping {path} from snapshot '{snapshot}'")
|
print(f"Dumping {path} from snapshot '{snapshot}'")
|
||||||
output = subprocess.run(cmd, capture_output=True)
|
output = subprocess.run(cmd, capture_output=True)
|
||||||
if output.returncode:
|
if output.returncode:
|
||||||
logger.error(
|
logger.error(
|
||||||
f"error while dumping {path} from snapshot '{snapshot}': {output.stderr}")
|
f"error while dumping {path} from snapshot '{snapshot}': {output.stderr}"
|
||||||
|
)
|
||||||
exit(1)
|
exit(1)
|
||||||
return output.stdout
|
return output.stdout
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
cli()
|
cli()
|
||||||
|
11
compose.pushbasicauth.yml
Normal file
11
compose.pushbasicauth.yml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
version: "3.8"
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
secrets:
|
||||||
|
- push_basicauth
|
||||||
|
|
||||||
|
secrets:
|
||||||
|
push_basicauth:
|
||||||
|
external: true
|
||||||
|
name: ${STACK_NAME}_push_basicauth_${SECRET_PUSH_BASICAUTH}
|
21
compose.yml
21
compose.yml
@ -2,7 +2,7 @@
|
|||||||
version: "3.8"
|
version: "3.8"
|
||||||
services:
|
services:
|
||||||
app:
|
app:
|
||||||
image: git.coopcloud.tech/coop-cloud/backup-bot-two:2.0.0
|
image: git.coopcloud.tech/coop-cloud/backup-bot-two:2.3.0-beta
|
||||||
volumes:
|
volumes:
|
||||||
- "/var/run/docker.sock:/var/run/docker.sock"
|
- "/var/run/docker.sock:/var/run/docker.sock"
|
||||||
- "/var/lib/docker/volumes/:/var/lib/docker/volumes/"
|
- "/var/lib/docker/volumes/:/var/lib/docker/volumes/"
|
||||||
@ -14,9 +14,16 @@ services:
|
|||||||
- RESTIC_PASSWORD_FILE=/run/secrets/restic_password
|
- RESTIC_PASSWORD_FILE=/run/secrets/restic_password
|
||||||
secrets:
|
secrets:
|
||||||
- restic_password
|
- restic_password
|
||||||
|
configs:
|
||||||
|
- source: entrypoint
|
||||||
|
target: /entrypoint.sh
|
||||||
|
mode: 666
|
||||||
|
- source: cronjob
|
||||||
|
target: /cronjob.sh
|
||||||
|
mode: 666
|
||||||
deploy:
|
deploy:
|
||||||
labels:
|
labels:
|
||||||
- coop-cloud.${STACK_NAME}.version=1.0.0+2.0.0-beta
|
- coop-cloud.${STACK_NAME}.version=2.3.0+2.3.0-beta
|
||||||
- coop-cloud.${STACK_NAME}.timeout=${TIMEOUT:-300}
|
- coop-cloud.${STACK_NAME}.timeout=${TIMEOUT:-300}
|
||||||
- coop-cloud.backupbot.enabled=true
|
- coop-cloud.backupbot.enabled=true
|
||||||
#entrypoint: ['tail', '-f','/dev/null']
|
#entrypoint: ['tail', '-f','/dev/null']
|
||||||
@ -31,6 +38,14 @@ secrets:
|
|||||||
restic_password:
|
restic_password:
|
||||||
external: true
|
external: true
|
||||||
name: ${STACK_NAME}_restic_password_${SECRET_RESTIC_PASSWORD_VERSION}
|
name: ${STACK_NAME}_restic_password_${SECRET_RESTIC_PASSWORD_VERSION}
|
||||||
|
|
||||||
|
configs:
|
||||||
|
entrypoint:
|
||||||
|
name: ${STACK_NAME}_entrypoint_${ENTRYPOINT_VERSION}
|
||||||
|
file: entrypoint.sh
|
||||||
|
cronjob:
|
||||||
|
name: ${STACK_NAME}_cronjob_${CRONJOB_VERSION}
|
||||||
|
file: cronjob.sh
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
backups:
|
backups:
|
||||||
|
40
cronjob.sh
Executable file
40
cronjob.sh
Executable file
@ -0,0 +1,40 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
CURL_OPTS="-s"
|
||||||
|
# Check for basic auth
|
||||||
|
if [ -n "$(cat /run/secrets/push_basicauth)" ]
|
||||||
|
then
|
||||||
|
CURL_OPTS="$CURL_OPTS -u $(cat /run/secrets/push_basicauth)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "$PUSH_PROMETHEUS_URL" ]
|
||||||
|
then
|
||||||
|
push_start_notification="(echo 'backup 1' | curl $CURL_OPTS --data-binary @- $PUSH_PROMETHEUS_URL)"
|
||||||
|
push_success_notification="(echo 'backup 0' | curl $CURL_OPTS --data-binary @- $PUSH_PROMETHEUS_URL)"
|
||||||
|
push_fail_notification="(echo 'backup -1' | curl $CURL_OPTS --data-binary @- $PUSH_PROMETHEUS_URL)"
|
||||||
|
else
|
||||||
|
if [ -n "$PUSH_URL_START" ]
|
||||||
|
then
|
||||||
|
push_start_notification="curl $CURL_OPTS '$PUSH_URL_START'"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "$PUSH_URL_FAIL" ]
|
||||||
|
then
|
||||||
|
push_fail_notification="curl $CURL_OPTS '$PUSH_URL_FAIL'"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "$PUSH_URL_SUCCESS" ]
|
||||||
|
then
|
||||||
|
push_success_notification="curl $CURL_OPTS '$PUSH_URL_SUCCESS'"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
eval "$push_start_notification"
|
||||||
|
if [ "$(backup --machine-logs create 2>&1 | tee /tmp/backup.log && (grep -q 'backup finished' /tmp/backup.log))" ]
|
||||||
|
then
|
||||||
|
eval "$push_success_notification"
|
||||||
|
else
|
||||||
|
eval "$push_fail_notification"
|
||||||
|
fi
|
@ -9,22 +9,7 @@ fi
|
|||||||
|
|
||||||
cron_schedule="${CRON_SCHEDULE:?CRON_SCHEDULE not set}"
|
cron_schedule="${CRON_SCHEDULE:?CRON_SCHEDULE not set}"
|
||||||
|
|
||||||
if [ -n "$PUSH_URL_START" ]
|
echo "$cron_schedule /cronjob.sh" | crontab -
|
||||||
then
|
|
||||||
push_start_notification="curl -s '$PUSH_URL_START' &&"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$PUSH_URL_FAIL" ]
|
|
||||||
then
|
|
||||||
push_fail_notification="|| curl -s '$PUSH_URL_FAIL'"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$PUSH_URL_SUCCESS" ]
|
|
||||||
then
|
|
||||||
push_notification=" && (grep -q 'backup finished' /tmp/backup.log && curl -s '$PUSH_URL_SUCCESS' $push_fail_notification)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "$cron_schedule $push_start_notification backup --machine-logs create 2>&1 | tee /tmp/backup.log $push_notification" | crontab -
|
|
||||||
crontab -l
|
crontab -l
|
||||||
|
|
||||||
crond -f -d8 -L /dev/stdout
|
crond -f -d8 -L /dev/stdout
|
||||||
|
34
pg_backup.sh
Normal file
34
pg_backup.sh
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
BACKUP_FILE='/var/lib/postgresql/data/backup.sql'
|
||||||
|
|
||||||
|
function backup {
|
||||||
|
export PGPASSWORD=$(cat $POSTGRES_PASSWORD_FILE)
|
||||||
|
pg_dump -U ${POSTGRES_USER} ${POSTGRES_DB} > $BACKUP_FILE
|
||||||
|
}
|
||||||
|
|
||||||
|
function restore {
|
||||||
|
cd /var/lib/postgresql/data/
|
||||||
|
restore_config(){
|
||||||
|
# Restore allowed connections
|
||||||
|
cat pg_hba.conf.bak > pg_hba.conf
|
||||||
|
su postgres -c 'pg_ctl reload'
|
||||||
|
}
|
||||||
|
# Don't allow any other connections than local
|
||||||
|
cp pg_hba.conf pg_hba.conf.bak
|
||||||
|
echo "local all all trust" > pg_hba.conf
|
||||||
|
su postgres -c 'pg_ctl reload'
|
||||||
|
trap restore_config EXIT INT TERM
|
||||||
|
|
||||||
|
# Recreate Database
|
||||||
|
psql -U ${POSTGRES_USER} -d postgres -c "DROP DATABASE ${POSTGRES_DB} WITH (FORCE);"
|
||||||
|
createdb -U ${POSTGRES_USER} ${POSTGRES_DB}
|
||||||
|
psql -U ${POSTGRES_USER} -d ${POSTGRES_DB} -1 -f $BACKUP_FILE
|
||||||
|
|
||||||
|
trap - EXIT INT TERM
|
||||||
|
restore_config
|
||||||
|
}
|
||||||
|
|
||||||
|
$@
|
Loading…
x
Reference in New Issue
Block a user