Compare commits

..

No commits in common. "main" and "simon-add-local-storage-support" have entirely different histories.

9 changed files with 38 additions and 115 deletions

View File

@ -18,8 +18,6 @@ steps:
STACK_NAME: outline
LETS_ENCRYPT_ENV: production
APP_ENTRYPOINT_VERSION: v1
DB_ENTRYPOINT_VERSION: v1
PG_BACKUP_VERSION: v1
SECRET_DB_PASSWORD_VERSION: v1
SECRET_SECRET_KEY_VERSION: v1 # length=64
SECRET_UTILS_SECRET_VERSION: v1 # length=64
@ -38,7 +36,7 @@ steps:
from_secret: drone_abra-bot_token
fork: true
repositories:
- toolshed/auto-recipes-catalogue-json
- coop-cloud/auto-recipes-catalogue-json
trigger:
event: tag

View File

@ -8,8 +8,6 @@ DOMAIN=outline.example.com
#EXTRA_DOMAINS=', `www.outline.example.com`'
LETS_ENCRYPT_ENV=production
ENABLE_BACKUPS=true
COMPOSE_FILE="compose.yml"
# REQUIRED
@ -41,7 +39,7 @@ WEB_CONCURRENCY=1
# Override the maxium size of document imports, could be required if you have
# especially large Word documents with embedded imagery
FILE_STORAGE_IMPORT_MAX_SIZE=5120000
MAXIMUM_IMPORT_SIZE=5120000
# You can remove this line if your reverse proxy already logs incoming http
# requests and this ends up being duplicative

View File

@ -22,12 +22,13 @@ Wiki and knowledge base for growing teams
3. `abra app new ${REPO_NAME}`
- **WARNING**: Choose "n" when `abra` asks if you'd like to generate secrets
4. `abra app config YOURAPPNAME` - be sure to change `$DOMAIN` to something that resolves to
your Docker swarm box
5. Insert secrets:
- `abra app secret insert YOURAPPNAME secret_key v1 $(openssl rand -hex 32)` #12
- `abra app secret generate -a YOURAPPNAME`
6. `abra app deploy YOURAPPNAME`
8. Open the configured domain in your browser to finish set-up
your Docker swarm box. For Minio, you'll want:
- `AWS_ACCESS_KEY_ID=<minio username>`
- `AWS_REGION="us-east-1"`
- `AWS_S3_UPLOAD_BUCKET_URL=https://minio.example.com`
- `AWS_S3_UPLOAD_BUCKET_NAME=
5. `abra app deploy YOURAPPNAME`
7. Open the configured domain in your browser to finish set-up
[`abra`]: https://git.coopcloud.tech/coop-cloud/abra
[`coop-cloud/traefik`]: https://git.coopcloud.tech/coop-cloud/traefik
@ -40,6 +41,14 @@ Wiki and knowledge base for growing teams
abra app cmd YOURAPPNAME app create_email_user test@example.com
```
### Post-deploy migration
```
abra app cmd YOURAPPNAME app migrate
```
_As of 2022-03-30, this requires `abra` RC version, run `abra upgrade --rc`._
### Setting up your `.env` config
Avoid the use of quotes (`"..."`) as much as possible, the NodeJS scripts flip out for some reason on some vars.
@ -52,30 +61,14 @@ Where `<username-to-delete>` is the username of the user to be removed, and
`<username-to-replace>` is the username of another user, to assign documents and
revisions to (instead of deleting them).
### Migrate from S3 to local storage
_As of 2022-03-30, this requires `abra` RC version, run `abra upgrade --rc`._
- `abra app config <domain>`, add
- `COMPOSE_FILE="$COMPOSE_FILE:compose.local.yml"`
- `FILE_STORAGE_UPLOAD_MAX_SIZE=26214400`
- `abra app deploy <domain> -f`
- compose.aws.yml should still be deployed!
- `abra app undeploy <domain>`
- on the docker host, find mountpoint of newly created volume via `docker volume ls` and `docker volume inspect`
- volume name is smth like `<domain>_storage-data`
- take note which linux user owns `<storage_mountpoint>` (likely `1001`)
- use s3cmd/rclone/... to sync your bucket to `<storage_mountpoint>`
- `chown -R <storage_user>:<storage_user> <storage_mountpoint>`
- `abra app config <domain>`, switch storage backend
- remove `AWS_*` vars, `SECRET_AWS_SECRET_KEY_VERSION` and `COMPOSE_FILE="$COMPOSE_FILE:compose.aws.yml"`
- set `FILE_STORAGE=local`
- `abra app deploy <domain> -f`
- enjoy getting rid of S3 🥳
## Single Sign On with Keycloak
## Single Sign On with Keycloak/Authentik
`abra app config YOURAPPNAME`, then uncomment everything in the `OIDC_` section.
- Create an OIDC client in Keycloak (in Authentik this is called a provider and application)
- Run `abra app config YOURAPPNAME`, then uncomment everything in the `OIDC_` section.
- **Valid Redirect URIs**: `https://YOURAPPDOMAIN/auth/oidc.callback`
- Reference the client/provider info to populate the `_AUTH_URI` `_TOKEN_URI` and `_USERINFO_URI` values
- Set the OIDC secret using the value from the client/provider `abra app secret insert YOURAPPNAME oidc_client_secret v1 SECRETVALUE`
- `abra app deploy YOURAPPDOMAIN`
Create a new client in Keycloak:
- **Valid Redirect URIs**: `https://YOURAPPDOMAIN/auth/oidc.callback`
`abra app deploy YOURAPPDOMAIN`

View File

@ -1,6 +1,5 @@
export APP_ENTRYPOINT_VERSION=v9
export APP_ENTRYPOINT_VERSION=v8
export DB_ENTRYPOINT_VERSION=v2
export PG_BACKUP_VERSION=v1
create_email_user() {
if [ -z "$1" ]; then
@ -20,10 +19,6 @@ migrate() {
yarn db:migrate --env=production-ssl-disabled
}
generate_secret() {
abra app secret insert $DOMAIN secret_key v1 $(openssl rand -hex 32)
}
delete_user_by_id() {
if [ -z "$1" ] || [ -z "$2" ]; then
echo "Usage: ... delete_user_by_id <userid-to-delete> <userid-to-replace>"

View File

@ -1,15 +0,0 @@
authentik:
env:
OIDC_CLIENT_ID: outline
OIDC_AUTH_URI: https://authentik.example.com/application/o/authorize/
OIDC_TOKEN_URI: https://authentik.example.com/application/o/token/
OIDC_USERINFO_URI: https://authentik.example.com/application/o/userinfo/
OIDC_DISPLAY_NAME: "Authentik"
uncomment:
- compose.oidc.yml
- OIDC_ENABLED
- OIDC_USERNAME_CLAIM
- OIDC_SCOPES
- SECRET_OIDC_CLIENT_SECRET_VERSION
shared_secrets:
outline_secret: oidc_client_secret

View File

@ -6,7 +6,7 @@ services:
networks:
- backend
- proxy
image: outlinewiki/outline:0.82.0
image: outlinewiki/outline:0.73.1
secrets:
- db_password
- secret_key
@ -34,20 +34,19 @@ services:
- "traefik.http.routers.${STACK_NAME}.rule=Host(`${DOMAIN}`${EXTRA_DOMAINS})"
- "traefik.http.routers.${STACK_NAME}.entrypoints=web-secure"
- "traefik.http.routers.${STACK_NAME}.tls.certresolver=${LETS_ENCRYPT_ENV}"
- "coop-cloud.${STACK_NAME}.version=2.9.0+0.82.0"
# Redirect from EXTRA_DOMAINS to DOMAIN
- "traefik.http.routers.${STACK_NAME}.middlewares=${STACK_NAME}-redirect"
- "traefik.http.middlewares.${STACK_NAME}-redirect.headers.SSLForceHost=true"
- "traefik.http.middlewares.${STACK_NAME}-redirect.headers.SSLHost=${DOMAIN}"
- "coop-cloud.${STACK_NAME}.timeout=${TIMEOUT:-80}"
- "coop-cloud.${STACK_NAME}.version=1.1.0+0.73.1"
## Redirect from EXTRA_DOMAINS to DOMAIN
#- "traefik.http.routers.${STACK_NAME}.middlewares=${STACK_NAME}-redirect"
#- "traefik.http.middlewares.${STACK_NAME}-redirect.headers.SSLForceHost=true"
#- "traefik.http.middlewares.${STACK_NAME}-redirect.headers.SSLHost=${DOMAIN}"
cache:
image: redis:7.4.2
image: redis:7.2.3
networks:
- backend
db:
image: postgres:17.3
image: postgres:15.5
networks:
- backend
secrets:
@ -56,9 +55,6 @@ services:
- source: db_entrypoint
target: /docker-entrypoint.sh
mode: 0555
- source: pg_backup
target: /pg_backup.sh
mode: 0555
environment:
POSTGRES_DB: outline
POSTGRES_PASSWORD_FILE: /run/secrets/db_password
@ -68,10 +64,10 @@ services:
entrypoint: /docker-entrypoint.sh
deploy:
labels:
backupbot.backup: "${ENABLE_BACKUPS:-true}"
backupbot.backup.pre-hook: "/pg_backup.sh backup"
backupbot.backup.volumes.postgres_data.path: "backup.sql"
backupbot.restore.post-hook: '/pg_backup.sh restore'
backupbot.backup: "true"
backupbot.backup.path: "/tmp/dump.sql.gz"
backupbot.backup.post-hook: "rm -f /tmp/dump.sql.gz"
backupbot.backup.pre-hook: "sh -c 'PGPASSWORD=$$(cat $${POSTGRES_PASSWORD_FILE}) pg_dump -U outline outline | gzip > /tmp/dump.sql.gz'"
secrets:
secret_key:
@ -101,6 +97,3 @@ configs:
name: ${STACK_NAME}_db_entrypoint_${DB_ENTRYPOINT_VERSION}
file: entrypoint.postgres.sh.tmpl
template_driver: golang
pg_backup:
name: ${STACK_NAME}_pg_backup_${PG_BACKUP_VERSION}
file: pg_backup.sh

View File

@ -1,34 +0,0 @@
#!/bin/bash
set -e
BACKUP_FILE='/var/lib/postgresql/data/backup.sql'
function backup {
export PGPASSWORD=$(cat $POSTGRES_PASSWORD_FILE)
pg_dump -U ${POSTGRES_USER} ${POSTGRES_DB} > $BACKUP_FILE
}
function restore {
cd /var/lib/postgresql/data/
restore_config(){
# Restore allowed connections
cat pg_hba.conf.bak > pg_hba.conf
su postgres -c 'pg_ctl reload'
}
# Don't allow any other connections than local
cp pg_hba.conf pg_hba.conf.bak
echo "local all all trust" > pg_hba.conf
su postgres -c 'pg_ctl reload'
trap restore_config EXIT INT TERM
# Recreate Database
psql -U ${POSTGRES_USER} -d postgres -c "DROP DATABASE ${POSTGRES_DB} WITH (FORCE);"
createdb -U ${POSTGRES_USER} ${POSTGRES_DB}
psql -U ${POSTGRES_USER} -d ${POSTGRES_DB} -1 -f $BACKUP_FILE
trap - EXIT INT TERM
restore_config
}
$@

View File

@ -1,4 +0,0 @@
Due to the introduction of local storage, you need to adapt your config to continue using S3 storage. Just add the following lines to your config:
FILE_STORAGE=s3
COMPOSE_FILE="$COMPOSE_FILE:compose.aws.yml"

View File

@ -1 +0,0 @@
Fixes a problem where deployments were consistently giving a timeout response even though they were successful