22 Commits

Author SHA1 Message Date
853832355a backup scripts 2025-04-21 15:17:05 +01:00
d52472eb3b Update README.md 2025-04-21 12:33:46 +01:00
52dfa64b61 misc 2025-04-21 12:04:42 +01:00
847306e168 dated backup files 2025-04-21 11:08:12 +01:00
8fcf1d4bcb WIP for backup/restore 2025-04-21 11:00:21 +01:00
4521bd7f81 v 2024-12-15 13:16:17 +00:00
f1e5ad0f76 enable search by default 2024-12-15 13:15:13 +00:00
23ffe6b17d Update compose.meilisearch.yml 2024-12-15 10:54:47 +00:00
12813a3900 env 2024-12-04 15:19:49 +00:00
7f82ad0da8 postgres version 2024-10-26 16:24:49 +01:00
992284418a add more mail config keys + tmpfs 2024-10-20 14:58:17 +01:00
e619a451f0 OPENID_1_DISCOVERY 2024-09-09 19:23:00 +01:00
bbd3d9e989 add openid config 2024-09-09 19:12:22 +01:00
f4a1658d38 update default DB_DOCKER_VERSION to 16 (existing instance should upgrade manually if they wish) 2024-08-27 21:03:59 +01:00
083ddc8516 add AKISMET_API_KEY 2024-08-19 16:52:11 +01:00
1843f0ee27 versions and docs 2024-08-08 13:34:23 +01:00
fb9c6a9faa multiarch 2024-04-01 19:50:32 +01:00
c92c669f39 postgis required 2024-04-01 19:44:28 +01:00
6c0dd9d3cb misc 2024-04-01 18:57:17 +01:00
a5d24231e8 update default env 2024-02-16 20:07:08 +00:00
8beb727d24 make meilisearch optional 2024-02-16 20:07:01 +00:00
b0cc2dd9c2 logging limits 2023-10-10 11:53:46 +01:00
8 changed files with 381 additions and 78 deletions

View File

@ -1,20 +1,32 @@
TYPE=bonfire
# choose what flavour of Bonfire to run
FLAVOUR=classic
FLAVOUR=social
APP_VERSION=latest
# choose what extra services you want to run
COMPOSE_FILE="compose.yml:compose.meilisearch.yml"
APP_VERSION_FLAVOUR=${APP_VERSION}-${FLAVOUR}
# Different flavours/forks or architectures may require different builds of bonfire:
# for ARM (manual build):
APP_DOCKER_IMAGE=bonfirenetworks/bonfire:latest-${FLAVOUR}-aarch64
for x86 (built by CI):
APP_DOCKER_IMAGE=bonfirenetworks/bonfire:latest-${FLAVOUR}-amd64
# APP_DOCKER_IMAGE=bonfirenetworks/bonfire:${APP_VERSION_FLAVOUR}-aarch64
# for x86 (built by CI):
APP_DOCKER_IMAGE=bonfirenetworks/bonfire:${APP_VERSION_FLAVOUR}-amd64
# multi-arch image (built by CI, but currently not working):
#APP_DOCKER_IMAGE=bonfirenetworks/bonfire:latest-${FLAVOUR}
#APP_DOCKER_IMAGE=bonfirenetworks/bonfire:${APP_VERSION_FLAVOUR}
DB_DOCKER_VERSION=17-3.5
# note that different flavours or architectures may require different postgres builds:
# For ARM or x86:
DB_DOCKER_IMAGE=ghcr.io/baosystems/postgis:${DB_DOCKER_VERSION}
# for x86:
# DB_DOCKER_IMAGE=postgis/postgis:${DB_DOCKER_VERSION}-alpine
# multiarch (but doesn't have required Postgis extension)
#DB_DOCKER_IMAGE=postgres:${DB_DOCKER_VERSION}-alpine
# TODO: maybe to use for Upgrading to a new Postgres version? (NOTE: does not work with postgis data)
# DB_DOCKER_IMAGE=pgautoupgrade/pgautoupgrade:16-alpine
# different flavours or architectures may require different postgres builds:
DB_DOCKER_IMAGE=postgres:12-alpine
#DB_DOCKER_IMAGE=postgis/postgis:12-3.2-alpine
#DB_DOCKER_IMAGE=ghcr.io/baosystems/postgis:12-3.2
# enter your instance's domain name
DOMAIN=bonfire.example.com
@ -22,8 +34,11 @@ DOMAIN=bonfire.example.com
## Domain aliases
#EXTRA_DOMAINS=', `www.bonfire.example.com`'
# enable abra backups
ENABLE_BACKUPS=true
# what service to use for sending out emails (eg. smtp, mailgun, none) NOTE: you should also set the corresponding keys in secrets.env
MAIL_BACKEND=mailgun
MAIL_BACKEND=none
# require an email address to be invited before being able to sign up? (true or false)
INVITE_ONLY=true
@ -34,7 +49,8 @@ INVITE_ONLY=true
# max file upload size - default is 20 meg
UPLOAD_LIMIT=20000000
DB_MEMORY_LIMIT=1000M
# in megabytes
DB_MEMORY_LIMIT=1000
# how much info to include in logs (from less to more: emergency, alert, critical, error, warning, notice, info, debug)
LOG_LEVEL=info
@ -47,17 +63,28 @@ LOG_LEVEL=info
# change ALL the values:
# if `INVITE_ONLY` is true, what should be the secret code to sign up?
INVITE_KEY=123
# INVITE_KEY=123
# signup to mailgun.com and edit with your domain and API key
MAIL_DOMAIN=mgo.example.com
MAIL_KEY=xyz
MAIL_FROM=admin@example.com
# signup to an email service and edit with relevant info, see: https://docs.bonfirenetworks.org/Bonfire.Mailer.html
# MAIL_DOMAIN=mgo.example.com
# MAIL_KEY=xyz
# MAIL_FROM=admin@example.com
# MAIL_PROJECT_ID=
# MAIL_PRIVATE_KEY=
# MAIL_BASE_URI=
# MAIL_REGION=
# MAIL_SESSION_TOKEN=
# MAIL_SERVER=
# MAIL_USER=
# MAIL_PASSWORD=
# MAIL_PORT=
# MAIL_TLS=
# MAIL_SSL=
# MAIL_SMTP_AUTH=
# MAIL_RETRIES=
# MAIL_ARGS=
# error reporting
SENTRY_DSN=
# Store uploads in S3-compatible service
# Store uploads in S3-compatible service:
# UPLOADS_S3_BUCKET=
# UPLOADS_S3_ACCESS_KEY_ID=
# UPLOADS_S3_SECRET_ACCESS_KEY=
@ -66,12 +93,32 @@ SENTRY_DSN=
# UPLOADS_S3_SCHEME=https://
# UPLOADS_S3_URL=
# OpenID Connect:
# OPENID_1_DISCOVERY=
# OPENID_1_DISPLAY_NAME=
# OPENID_1_CLIENT_ID=
# OPENID_1_CLIENT_SECRET=
# OPENID_1_SCOPE=
# OPENID_1_RESPONSE_TYPE=code
# ^ can be code, token or id_token
# ORCID_CLIENT_ID=
# ORCID_CLIENT_SECRET=
# Bonfire extensions configs:
WEB_PUSH_SUBJECT=mailto:admin@example.com
WEB_PUSH_PUBLIC_KEY=xyz
WEB_PUSH_PRIVATE_KEY=abc
GEOLOCATE_OPENCAGEDATA=
GITHUB_TOKEN=xyz
# WEB_PUSH_SUBJECT=mailto:admin@example.com
# WEB_PUSH_PUBLIC_KEY=xyz
# WEB_PUSH_PRIVATE_KEY=abc
# GEOLOCATE_OPENCAGEDATA=
# GITHUB_TOKEN=xyz
# AKISMET_API_KEY=
WITH_LV_NATIVE=0
WITH_IMAGE_VIX=1
WITH_AI=0
# error reporting:
# SENTRY_DSN=
# ====================================
# these secrets will be autogenerated/managed by abra and docker"

View File

@ -15,12 +15,11 @@ A [coop-cloud](https://coopcloud.tech) recipe for deploying [Bonfire](https://bo
## Basic usage
1. Install [`abra`] on your computer
2. Prepare your server with `abra server add --provision your-server.domain.name server_username 22`
3. Deploy the [`coop-cloud/traefik`] proxy if you haven't already
3. `abra app new --secrets bonfire`
4. `abra app config your-server.domain.name` to check and edit the config (there are comments to explain the different options)
5. `abra app deploy your-server.domain.name`
1. Set up Docker Swarm and [`abra`]
2. Deploy [`coop-cloud/traefik`]
3. `abra app new bonfire --secrets` (optionally with `--pass` if you'd like to save secrets in `pass`) and select your server from the list and enter the domain name you want Bonfire to be served from
4. `abra app config YOUR_APP_DOMAIN_NAME` and check/edit the config keys
5. `abra app deploy YOUR_APP_DOMAIN_NAME`
6. Open the configured domain in your browser and sign up!
## Upgrades
@ -34,5 +33,8 @@ A [coop-cloud](https://coopcloud.tech) recipe for deploying [Bonfire](https://bo
### The app isn't starting
On the server, try this command to see what services are starting or not: `docker service ls` and this one to debug why one isn't starting: `docker service ps $container_name`
### How can I sign up via CLI?
Go into your app's Elixir console and enter something like `Bonfire.Me.make_account_only("my@email.net", "my pw")`
### How can I get to the app's Elixir console?
`abra app run your-server.domain.name app bin/bonfire remote`

View File

@ -1 +1,4 @@
export APP_ENTRYPOINT_VERSION=v1
export PG_BACKUP_VERSION=v4
export MEILI_BACKUP_VERSION=v4

44
compose.meilisearch.yml Normal file
View File

@ -0,0 +1,44 @@
---
version: "3.8"
services:
app:
depends_on:
- search
environment:
- SEARCH_MEILI_INSTANCE=http://${STACK_NAME}_search:7700
search:
image: getmeili/meilisearch:v1.11 # WIP: upgrade from v1.11 to 1.14
secrets:
- meili_master_key
volumes:
- "search-data:/meili_data"
- "dump-data:/meili_dumps"
networks:
- internal
entrypoint: ["tini", "--", "/docker-entrypoint.sh", "/bin/meilisearch"]
environment:
- MEILI_DUMP_DIR=/meili_dumps
configs:
- source: app_entrypoint
target: /docker-entrypoint.sh
mode: 0555
- source: meili_backup
target: /meili_backup.sh
mode: 0555
labels:
backupbot.backup: ${ENABLE_BACKUPS:-true}
backupbot.backup.volumes.search-data: "false"
backupbot.backup.volumes.dump-data.path: "/meili_dumps/meilisearch_latest.dump"
backupbot.backup.pre-hook: "/meili_backup.sh backup"
backupbot.restore.post-hook: '/meili_backup.sh restore'
volumes:
search-data:
dump-data:
configs:
meili_backup:
name: ${STACK_NAME}_meili_backup_${MEILI_BACKUP_VERSION}
file: meili_backup.sh

View File

@ -4,14 +4,15 @@ version: "3.8"
services:
app:
image: ${APP_DOCKER_IMAGE}
# logging:
# driver: none
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "10"
depends_on:
- db
- search
environment:
- POSTGRES_HOST=${STACK_NAME}_db
- SEARCH_MEILI_INSTANCE=http://${STACK_NAME}_search:7700
- POSTGRES_USER=postgres
- POSTGRES_DB=bonfire_db
- PUBLIC_PORT=443
@ -30,32 +31,47 @@ services:
- LIVEVIEW_ENABLED
- APP_NAME
- PLUG_SERVER
- WITH_LV_NATIVE
- WITH_IMAGE_VIX
- WITH_AI
- DB_SLOW_QUERY_MS
- DB_STATEMENT_TIMEOUT
- MAIL_BACKEND
- MAIL_DOMAIN
- MAIL_FROM
# for Mailgun
- MAIL_KEY
- MAIL_PROJECT_ID
- MAIL_PRIVATE_KEY
- MAIL_BASE_URI
# for SMTP
- MAIL_REGION
- MAIL_SESSION_TOKEN
- MAIL_SERVER
- MAIL_USER
- MAIL_PASSWORD
- MAIL_PORT
- MAIL_TLS
- MAIL_SSL
- MAIL_SMTP_AUTH
- MAIL_RETRIES
- MAIL_ARGS
- SENTRY_DSN
- OTEL_ENABLED
- OTEL_SERVICE_NAME
- OTEL_HONEYCOMB_API_KEY
- OTEL_LIGHTSTEP_API_KEY
- WEB_PUSH_SUBJECT
- WEB_PUSH_PUBLIC_KEY
- WEB_PUSH_PRIVATE_KEY
- AKISMET_API_KEY
- MAPBOX_API_KEY
- GEOLOCATE_OPENCAGEDATA
- GITHUB_TOKEN
# for S3 -- note if you don't set these up upfront images will break later.
- UPLOADS_S3_BUCKET
- UPLOADS_S3_ACCESS_KEY_ID
- UPLOADS_S3_SECRET_ACCESS_KEY
@ -63,15 +79,16 @@ services:
- UPLOADS_S3_HOST
- UPLOADS_S3_SCHEME
- UPLOADS_S3_URL
# for OpenID -- see also https://github.com/bonfire-networks/bonfire_open_id/blob/main/lib/runtime_config.ex.
- OAUTH_ISSUER
- OPENID_1_CLIENT_SECRET
- OPENID_1_DISPLAY_NAME
- OPENID_1_DISCOVERY
- OPENID_1_REDIRECT
- OPENID_1_RESPONSE_TYPE
- OPENID_1_CLIENT_ID
- OPENID_1_CLIENT_SECRET
- OPENID_1_SCOPE
- OPENID_1_RESPONSE_TYPE
- ORCID_CLIENT_ID
- ORCID_CLIENT_SECRET
secrets:
- postgres_password
@ -83,6 +100,7 @@ services:
- livebook_password
volumes:
- upload-data:/opt/app/data/uploads
# - backup-data:/opt/app/data/backup
networks:
- proxy
- internal
@ -95,17 +113,20 @@ services:
restart_policy:
condition: on-failure
labels:
- "traefik.enable=true"
- "traefik.http.services.${STACK_NAME}.loadbalancer.server.port=4000"
- "traefik.http.routers.${STACK_NAME}.rule=Host(`${DOMAIN}`${EXTRA_DOMAINS})"
- "traefik.http.routers.${STACK_NAME}.entrypoints=web-secure"
- "traefik.http.routers.${STACK_NAME}.tls.certresolver=${LETS_ENCRYPT_ENV}"
#- "traefik.http.routers.${STACK_NAME}.middlewares=error-pages-middleware"
#- "traefik.http.services.${STACK_NAME}.loadbalancer.server.port=80"
backupbot.backup: ${ENABLE_BACKUPS:-true}
# backupbot.backup.volumes.upload-data: "true"
# backupbot.backup.volumes.upload-data.path: "/opt/app/data/uploads"
traefik.enable: "true"
traefik.http.services.${STACK_NAME}.loadbalancer.server.port: "4000"
traefik.http.routers.${STACK_NAME}.rule: Host(`${DOMAIN}`${EXTRA_DOMAINS})
traefik.http.routers.${STACK_NAME}.entrypoints: web-secure
traefik.http.routers.${STACK_NAME}.tls.certresolver: ${LETS_ENCRYPT_ENV}
#traefik.http.routers.${STACK_NAME}.middlewares: error-pages-middleware
#traefik.http.services.${STACK_NAME}.loadbalancer.server.port: 80
## Redirect from EXTRA_DOMAINS to DOMAIN
#- "traefik.http.routers.${STACK_NAME}.middlewares=${STACK_NAME}-redirect"
#- "traefik.http.middlewares.${STACK_NAME}-redirect.headers.SSLForceHost=true"
#- "traefik.http.middlewares.${STACK_NAME}-redirect.headers.SSLHost=${DOMAIN}"
#traefik.http.routers.${STACK_NAME}.middlewares: ${STACK_NAME}-redirect
#traefik.http.middlewares.${STACK_NAME}-redirect.headers.SSLForceHost: true
#traefik.http.middlewares.${STACK_NAME}-redirect.headers.SSLHost: ${DOMAIN}
# healthcheck:
# test: ["CMD", "curl", "-f", "http://localhost"]
# interval: 30s
@ -124,35 +145,39 @@ services:
- postgres_password
volumes:
- db-data:/var/lib/postgresql/data
# - type: tmpfs
# target: /dev/shm
# tmpfs:
# size: 1096000000 # (about 1GB)
- type: tmpfs
target: /dev/shm
tmpfs:
size: 1000000000
# about 1 GB in bytes ^
networks:
- internal
# shm_size: ${DB_MEMORY_LIMIT}
# tmpfs:
# - /tmp:size=${DB_MEMORY_LIMIT}
# shm_size: ${DB_MEMORY_LIMIT} # unsupported by docker swarm
tmpfs:
- /tmp:size=${DB_MEMORY_LIMIT}M
command: >
postgres
-c max_connections=200
-c shared_buffers=${DB_MEMORY_LIMIT}MB
# -c shared_preload_libraries='pg_stat_statements'
# -c statement_timeout=1800000
# -c pg_stat_statements.track=all
#entrypoint: ['tail', '-f', '/dev/null'] # uncomment when the Postgres DB is corrupted and won't start
search:
image: getmeili/meilisearch:latest
secrets:
- meili_master_key
volumes:
- "search-data:/data.ms"
networks:
- internal
entrypoint: ["tini", "--", "/docker-entrypoint.sh", "/bin/meilisearch"]
labels:
backupbot.backup: ${ENABLE_BACKUPS:-true}
# backupbot.backup.volumes.db-data: false
backupbot.backup.volumes.db-data.path: "backup.sql"
backupbot.backup.pre-hook: "/pg_backup.sh backup"
backupbot.restore.post-hook: '/pg_backup.sh restore'
configs:
- source: app_entrypoint
target: /docker-entrypoint.sh
mode: 0555
- source: pg_backup
target: /pg_backup.sh
mode: 0555
volumes:
db-data:
search-data:
upload-data:
# backup-data:
networks:
proxy:
@ -164,6 +189,9 @@ configs:
name: ${STACK_NAME}_app_entrypoint_${APP_ENTRYPOINT_VERSION}
file: entrypoint.sh.tmpl
template_driver: golang
pg_backup:
name: ${STACK_NAME}_pg_backup_${PG_BACKUP_VERSION}
file: pg_backup.sh
secrets:
postgres_password:

107
meili_backup.sh Executable file
View File

@ -0,0 +1,107 @@
#!/bin/sh
set -e
backup() {
SECRET=$(cat /run/secrets/meili_master_key)
# Create dump
echo "Creating new Meilisearch dump..."
RESPONSE=$(curl -s -X POST 'http://localhost:7700/dumps' -H "Authorization: Bearer $SECRET")
echo "Response: $RESPONSE"
# More robust extraction of task UID
TASK_UID=$(echo "$RESPONSE" | sed -n 's/.*"taskUid":\([0-9]*\).*/\1/p')
if [ -z "$TASK_UID" ]; then
echo "Failed to extract task UID from response. Aborting."
exit 1
fi
echo "Waiting for dump creation (task $TASK_UID)..."
MAX_ATTEMPTS=600
ATTEMPT=0
while [ $ATTEMPT -lt $MAX_ATTEMPTS ]; do
RESPONSE=$(curl -s "http://localhost:7700/tasks/$TASK_UID" -H "Authorization: Bearer $SECRET")
echo "Task status response: $RESPONSE"
TASK_STATUS=$(echo "$RESPONSE" | sed -n 's/.*"status":"\([^"]*\)".*/\1/p')
if [ -z "$TASK_STATUS" ]; then
echo "Failed to extract task status. Retrying..."
ATTEMPT=$((ATTEMPT+1))
sleep 5
continue
fi
echo "Current status: $TASK_STATUS"
if [ "$TASK_STATUS" = "succeeded" ]; then
echo "Dump creation succeeded"
break
elif [ "$TASK_STATUS" = "enqueued" ] || [ "$TASK_STATUS" = "processing" ]; then
echo "Dump creation in progress... ($TASK_STATUS)"
ATTEMPT=$((ATTEMPT+1))
sleep 5
else
echo "Dump creation in unexpected state: $TASK_STATUS. Giving up."
exit 1
fi
done
if [ $ATTEMPT -eq $MAX_ATTEMPTS ]; then
echo "Timed out waiting for dump creation"
exit 1
fi
# Extract dump UID more reliably
DUMP_UID=$(echo "$RESPONSE" | sed -n 's/.*"dumpUid":"\([^"]*\)".*/\1/p')
if [ -z "$DUMP_UID" ]; then
echo "Failed to extract dump UID. Aborting."
exit 1
fi
echo "Using dump $DUMP_UID"
# Check if file exists before copying
if [ ! -f "/meili_dumps/$DUMP_UID.dump" ]; then
echo "Dump file /meili_dumps/$DUMP_UID.dump not found!"
ls -la /meili_dumps/
exit 1
fi
cp -f "/meili_dumps/$DUMP_UID.dump" "/meili_dumps/meilisearch_latest.dump"
echo "Dump created and copied successfully. You can find it at /meili_dumps/meilisearch_latest.dump"
}
restore() {
echo 'Restarting Meilisearch with imported dump, may take a while to become available...'
# Check if dump file exists
if [ ! -f "/meili_dumps/meilisearch_latest.dump" ]; then
echo "Error: Dump file not found at /meili_dumps/meilisearch_latest.dump"
exit 1
fi
pkill meilisearch || echo "No Meilisearch process found to kill"
echo "Starting Meilisearch with import dump option..."
MEILI_NO_ANALYTICS=true /bin/meilisearch --import-dump /meili_dumps/meilisearch_latest.dump &
echo "Meilisearch restore process initiated..."
}
# Handle command line argument
case "$1" in
backup)
backup
;;
restore)
restore
;;
*)
echo "Usage: $0 {backup|restore}"
exit 1
;;
esac

70
pg_backup.sh Executable file
View File

@ -0,0 +1,70 @@
#!/bin/bash
set -e
BACKUP_PATH="/var/lib/postgresql/data"
LATEST_BACKUP_FILE="${BACKUP_PATH}/backup.sql"
function backup {
FILE_WITH_DATE="${BACKUP_PATH}/backup_$(date +%F).sql"
if [ -f "$POSTGRES_PASSWORD_FILE" ]; then
export PGPASSWORD=$(cat "$POSTGRES_PASSWORD_FILE")
fi
echo "Creating backup at ${FILE_WITH_DATE}..."
pg_dump -U "${POSTGRES_USER:-postgres}" "${POSTGRES_DB:-postgres}" > "${FILE_WITH_DATE}"
echo "Copying to ${LATEST_BACKUP_FILE}..."
cp -f "${FILE_WITH_DATE}" "${LATEST_BACKUP_FILE}"
echo "Backup done. You will find it at ${LATEST_BACKUP_FILE}"
}
function restore {
echo "Restoring database from ${LATEST_BACKUP_FILE}..."
cd /var/lib/postgresql/data/
function restore_config {
echo "Restoring original pg_hba.conf configuration..."
cat pg_hba.conf.bak > pg_hba.conf
su postgres -c 'pg_ctl reload'
}
# Don't allow any other connections than local
echo "Setting up temporary pg_hba.conf to only allow local connections..."
cp pg_hba.conf pg_hba.conf.bak
echo "local all all trust" > pg_hba.conf
su postgres -c 'pg_ctl reload'
trap restore_config EXIT INT TERM
# Recreate Database
echo "Dropping existing database ${POSTGRES_DB:-postgres}..."
psql -U "${POSTGRES_USER:-postgres}" -d postgres -c "DROP DATABASE \"${POSTGRES_DB:-postgres}\" WITH (FORCE);"
echo "Creating fresh database ${POSTGRES_DB:-postgres}..."
createdb -U "${POSTGRES_USER:-postgres}" "${POSTGRES_DB:-postgres}"
echo "Restoring data from ${LATEST_BACKUP_FILE}..."
psql -U "${POSTGRES_USER:-postgres}" -d "${POSTGRES_DB:-postgres}" -1 -f "${LATEST_BACKUP_FILE}"
trap - EXIT INT TERM
restore_config
echo "Database restore completed successfully."
}
# Execute the function passed as argument
case "$1" in
backup)
backup
;;
restore)
restore
;;
*)
echo "Usage: $0 {backup|restore}"
exit 1
;;
esac

View File

@ -1,5 +1,7 @@
#/bin/sh
# NOTE: this script should no longer be needed now that we have `# length=128` next to these secrets in the .env
# abra app secret generate --all $1
s1=$(openssl rand -base64 128)