mirror of
https://github.com/bonfire-networks/bonfire-deploy.git
synced 2025-06-15 05:46:04 +00:00
WIP for backup/restore
This commit is contained in:
parent
4521bd7f81
commit
8fcf1d4bcb
11
.env.sample
11
.env.sample
@ -2,17 +2,19 @@ TYPE=bonfire
|
||||
|
||||
# choose what flavour of Bonfire to run
|
||||
FLAVOUR=classic
|
||||
APP_VERSION=latest
|
||||
|
||||
# choose what extra services you want to run
|
||||
COMPOSE_FILE="compose.yml:compose.meilisearch.yml"
|
||||
|
||||
APP_VERSION_FLAVOUR=${APP_VERSION}-${FLAVOUR}
|
||||
# Different flavours/forks or architectures may require different builds of bonfire:
|
||||
# for ARM (manual build):
|
||||
# APP_DOCKER_IMAGE=bonfirenetworks/bonfire:latest-${FLAVOUR}-aarch64
|
||||
# APP_DOCKER_IMAGE=bonfirenetworks/bonfire:${APP_VERSION_FLAVOUR}-aarch64
|
||||
# for x86 (built by CI):
|
||||
APP_DOCKER_IMAGE=bonfirenetworks/bonfire:latest-${FLAVOUR}-amd64
|
||||
APP_DOCKER_IMAGE=bonfirenetworks/bonfire:${APP_VERSION_FLAVOUR}-amd64
|
||||
# multi-arch image (built by CI, but currently not working):
|
||||
#APP_DOCKER_IMAGE=bonfirenetworks/bonfire:latest-${FLAVOUR}
|
||||
#APP_DOCKER_IMAGE=bonfirenetworks/bonfire:${APP_VERSION_FLAVOUR}
|
||||
|
||||
DB_DOCKER_VERSION=17-3.5
|
||||
# note that different flavours or architectures may require different postgres builds:
|
||||
@ -32,6 +34,9 @@ DOMAIN=bonfire.example.com
|
||||
## Domain aliases
|
||||
#EXTRA_DOMAINS=', `www.bonfire.example.com`'
|
||||
|
||||
# enable abra backups
|
||||
ENABLE_BACKUPS=true
|
||||
|
||||
# what service to use for sending out emails (eg. smtp, mailgun, none) NOTE: you should also set the corresponding keys in secrets.env
|
||||
MAIL_BACKEND=none
|
||||
|
||||
|
3
abra.sh
3
abra.sh
@ -1 +1,4 @@
|
||||
export APP_ENTRYPOINT_VERSION=v1
|
||||
export PG_BACKUP_VERSION=v1
|
||||
export MEILI_BACKUP_VERSION=v1
|
||||
|
||||
|
@ -9,19 +9,36 @@ services:
|
||||
- SEARCH_MEILI_INSTANCE=http://${STACK_NAME}_search:7700
|
||||
|
||||
search:
|
||||
image: getmeili/meilisearch:v1
|
||||
image: getmeili/meilisearch:v1.11 # TODO: upgrade to 1.14
|
||||
secrets:
|
||||
- meili_master_key
|
||||
volumes:
|
||||
- "search-data:/meili_data"
|
||||
- "dump-data:/meili_dumps"
|
||||
networks:
|
||||
- internal
|
||||
entrypoint: ["tini", "--", "/docker-entrypoint.sh", "/bin/meilisearch"]
|
||||
environment:
|
||||
- MEILI_DUMP_DIR=/meili_dumps
|
||||
configs:
|
||||
- source: app_entrypoint
|
||||
target: /docker-entrypoint.sh
|
||||
mode: 0555
|
||||
|
||||
- source: meili_backup
|
||||
target: /meili_backup.sh
|
||||
mode: 0555
|
||||
labels:
|
||||
backupbot.backup: ${ENABLE_BACKUPS:-true}
|
||||
backupbot.backup.volumes.search-data: "false"
|
||||
backupbot.backup.volumes.dump-data.path: "/meili_dumps/meilisearch_latest.dump"
|
||||
backupbot.backup.pre-hook: "/meili_backup.sh backup"
|
||||
backupbot.restore.post-hook: '/meili_backup.sh restore'
|
||||
|
||||
volumes:
|
||||
search-data:
|
||||
|
||||
dump-data:
|
||||
|
||||
configs:
|
||||
meili_backup:
|
||||
name: ${STACK_NAME}_meili_backup_${MEILI_BACKUP_VERSION}
|
||||
file: meili_backup.sh
|
||||
|
37
compose.yml
37
compose.yml
@ -112,17 +112,20 @@ services:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.services.${STACK_NAME}.loadbalancer.server.port=4000"
|
||||
- "traefik.http.routers.${STACK_NAME}.rule=Host(`${DOMAIN}`${EXTRA_DOMAINS})"
|
||||
- "traefik.http.routers.${STACK_NAME}.entrypoints=web-secure"
|
||||
- "traefik.http.routers.${STACK_NAME}.tls.certresolver=${LETS_ENCRYPT_ENV}"
|
||||
#- "traefik.http.routers.${STACK_NAME}.middlewares=error-pages-middleware"
|
||||
#- "traefik.http.services.${STACK_NAME}.loadbalancer.server.port=80"
|
||||
backupbot.backup: ${ENABLE_BACKUPS:-true}
|
||||
backupbot.backup.volumes.upload-data: "true"
|
||||
# backupbot.backup.volumes.upload-data.path: "/opt/app/data/uploads"
|
||||
traefik.enable: "true"
|
||||
traefik.http.services.${STACK_NAME}.loadbalancer.server.port: "4000"
|
||||
traefik.http.routers.${STACK_NAME}.rule: Host(`${DOMAIN}`${EXTRA_DOMAINS})
|
||||
traefik.http.routers.${STACK_NAME}.entrypoints: web-secure
|
||||
traefik.http.routers.${STACK_NAME}.tls.certresolver: ${LETS_ENCRYPT_ENV}
|
||||
#traefik.http.routers.${STACK_NAME}.middlewares: error-pages-middleware
|
||||
#traefik.http.services.${STACK_NAME}.loadbalancer.server.port: 80
|
||||
## Redirect from EXTRA_DOMAINS to DOMAIN
|
||||
#- "traefik.http.routers.${STACK_NAME}.middlewares=${STACK_NAME}-redirect"
|
||||
#- "traefik.http.middlewares.${STACK_NAME}-redirect.headers.SSLForceHost=true"
|
||||
#- "traefik.http.middlewares.${STACK_NAME}-redirect.headers.SSLHost=${DOMAIN}"
|
||||
#traefik.http.routers.${STACK_NAME}.middlewares: ${STACK_NAME}-redirect
|
||||
#traefik.http.middlewares.${STACK_NAME}-redirect.headers.SSLForceHost: true
|
||||
#traefik.http.middlewares.${STACK_NAME}-redirect.headers.SSLHost: ${DOMAIN}
|
||||
# healthcheck:
|
||||
# test: ["CMD", "curl", "-f", "http://localhost"]
|
||||
# interval: 30s
|
||||
@ -159,7 +162,16 @@ services:
|
||||
# -c statement_timeout=1800000
|
||||
# -c pg_stat_statements.track=all
|
||||
#entrypoint: ['tail', '-f', '/dev/null'] # uncomment when the Postgres DB is corrupted and won't start
|
||||
|
||||
labels:
|
||||
backupbot.backup: ${ENABLE_BACKUPS:-true}
|
||||
# backupbot.backup.volumes.db-data: false
|
||||
backupbot.backup.volumes.db-data.path: "backup.sql"
|
||||
backupbot.backup.pre-hook: "/pg_backup.sh backup"
|
||||
backupbot.restore.post-hook: '/pg_backup.sh restore'
|
||||
configs:
|
||||
- source: pg_backup
|
||||
target: /pg_backup.sh
|
||||
mode: 0555
|
||||
|
||||
volumes:
|
||||
db-data:
|
||||
@ -175,6 +187,9 @@ configs:
|
||||
name: ${STACK_NAME}_app_entrypoint_${APP_ENTRYPOINT_VERSION}
|
||||
file: entrypoint.sh.tmpl
|
||||
template_driver: golang
|
||||
pg_backup:
|
||||
name: ${STACK_NAME}_pg_backup_${PG_BACKUP_VERSION}
|
||||
file: pg_backup.sh
|
||||
|
||||
secrets:
|
||||
postgres_password:
|
||||
|
44
meili_backup.sh
Normal file
44
meili_backup.sh
Normal file
@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
BACKUP_FILE='/var/lib/postgresql/data/backup.sql'
|
||||
|
||||
function backup {
|
||||
export SECRET=$(cat /run/secrets/meili_master_key)
|
||||
// pre-hook command for compose.meilisearch.yml
|
||||
TASK_UID=$(curl -s -X POST 'http://localhost:7700/dumps' -H 'Authorization: Bearer $SECRET' | grep -o '\"uid\":[0-9]*' | cut -d':' -f2) && \
|
||||
echo "Waiting for dump creation (task $TASK_UID)..." && \
|
||||
MAX_ATTEMPTS=600 && \
|
||||
ATTEMPT=0 && \
|
||||
while [ $ATTEMPT -lt $MAX_ATTEMPTS ]; do \
|
||||
TASK_STATUS=$(curl -s "http://localhost:7700/tasks/$TASK_UID" -H "Authorization: Bearer $SECRET" | grep -o '\"status\":\"[^\"]*\"' | cut -d':' -f2 | tr -d '\"'); \
|
||||
if [ "$TASK_STATUS" = "succeeded" ]; then \
|
||||
echo "Dump creation succeeded" && \
|
||||
break; \
|
||||
elif [ "$TASK_STATUS" = "enqueued" ] || [ "$TASK_STATUS" = "processing" ]; then \
|
||||
echo "Dump creation in progress... ($TASK_STATUS)" && \
|
||||
ATTEMPT=$((ATTEMPT+1)) && \
|
||||
sleep 2; \
|
||||
else \
|
||||
echo "Dump creation in unexpected state: $TASK_STATUS" && \
|
||||
exit 1; \
|
||||
fi; \
|
||||
done && \
|
||||
if [ $ATTEMPT -eq $MAX_ATTEMPTS ]; then \
|
||||
echo "Timed out waiting for dump creation" && \
|
||||
exit 1; \
|
||||
fi && \
|
||||
DUMP_UID=$(curl -s "http://localhost:7700/tasks/$TASK_UID" -H "Authorization: Bearer $SECRET" | grep -o '\"dumpUid\":\"[^\"]*\"' | cut -d':' -f2 | tr -d '\"') && \
|
||||
echo "Using dump $DUMP_UID" && \
|
||||
cp "/meili_dumps/$DUMP_UID.dump" "/meili_dumps/meilisearch_latest.dump" && \
|
||||
echo "Dump created and copied successfully"
|
||||
}
|
||||
|
||||
function restore {
|
||||
echo 'Restarting Meilisearch with imported dump, may take a while to become available...'
|
||||
pkill meilisearch
|
||||
MEILI_NO_ANALYTICS=true /bin/meilisearch --import-dump /meili_dumps/meilisearch_latest.dump &
|
||||
}
|
||||
|
||||
$@
|
34
pg_backup.sh
Normal file
34
pg_backup.sh
Normal file
@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
BACKUP_FILE='/var/lib/postgresql/data/backup.sql'
|
||||
|
||||
function backup {
|
||||
export PGPASSWORD=$(cat $POSTGRES_PASSWORD_FILE)
|
||||
pg_dump -U ${POSTGRES_USER} ${POSTGRES_DB} > $BACKUP_FILE
|
||||
}
|
||||
|
||||
function restore {
|
||||
cd /var/lib/postgresql/data/
|
||||
restore_config(){
|
||||
# Restore allowed connections
|
||||
cat pg_hba.conf.bak > pg_hba.conf
|
||||
su postgres -c 'pg_ctl reload'
|
||||
}
|
||||
# Don't allow any other connections than local
|
||||
cp pg_hba.conf pg_hba.conf.bak
|
||||
echo "local all all trust" > pg_hba.conf
|
||||
su postgres -c 'pg_ctl reload'
|
||||
trap restore_config EXIT INT TERM
|
||||
|
||||
# Recreate Database
|
||||
psql -U ${POSTGRES_USER} -d postgres -c "DROP DATABASE ${POSTGRES_DB} WITH (FORCE);"
|
||||
createdb -U ${POSTGRES_USER} ${POSTGRES_DB}
|
||||
psql -U ${POSTGRES_USER} -d ${POSTGRES_DB} -1 -f $BACKUP_FILE
|
||||
|
||||
trap - EXIT INT TERM
|
||||
restore_config
|
||||
}
|
||||
|
||||
$@
|
Loading…
x
Reference in New Issue
Block a user