Compare commits

..

No commits in common. "main" and "pg15" have entirely different histories.
main ... pg15

14 changed files with 89 additions and 247 deletions

View File

@ -18,11 +18,10 @@ steps:
STACK_NAME: outline STACK_NAME: outline
LETS_ENCRYPT_ENV: production LETS_ENCRYPT_ENV: production
APP_ENTRYPOINT_VERSION: v1 APP_ENTRYPOINT_VERSION: v1
DB_ENTRYPOINT_VERSION: v1
PG_BACKUP_VERSION: v1
SECRET_DB_PASSWORD_VERSION: v1 SECRET_DB_PASSWORD_VERSION: v1
SECRET_SECRET_KEY_VERSION: v1 # length=64 SECRET_SECRET_KEY_VERSION: v1 # length=64
SECRET_UTILS_SECRET_VERSION: v1 # length=64 SECRET_UTILS_SECRET_VERSION: v1 # length=64
SECRET_AWS_SECRET_KEY_VERSION: v1
trigger: trigger:
branch: branch:
- main - main
@ -38,7 +37,7 @@ steps:
from_secret: drone_abra-bot_token from_secret: drone_abra-bot_token
fork: true fork: true
repositories: repositories:
- toolshed/auto-recipes-catalogue-json - coop-cloud/auto-recipes-catalogue-json
trigger: trigger:
event: tag event: tag

View File

@ -8,8 +8,6 @@ DOMAIN=outline.example.com
#EXTRA_DOMAINS=', `www.outline.example.com`' #EXTRA_DOMAINS=', `www.outline.example.com`'
LETS_ENCRYPT_ENV=production LETS_ENCRYPT_ENV=production
ENABLE_BACKUPS=true
COMPOSE_FILE="compose.yml" COMPOSE_FILE="compose.yml"
# REQUIRED # REQUIRED
@ -17,9 +15,15 @@ COMPOSE_FILE="compose.yml"
SECRET_DB_PASSWORD_VERSION=v1 SECRET_DB_PASSWORD_VERSION=v1
SECRET_SECRET_KEY_VERSION=v1 # length=64 SECRET_SECRET_KEY_VERSION=v1 # length=64
SECRET_UTILS_SECRET_VERSION=v1 # length=64 SECRET_UTILS_SECRET_VERSION=v1 # length=64
SECRET_AWS_SECRET_KEY_VERSION=v1
# Set to s3 to use AWS S3 bucket AWS_ACCESS_KEY_ID=
FILE_STORAGE=local AWS_REGION=
AWS_S3_UPLOAD_BUCKET_URL=
AWS_S3_UPLOAD_BUCKET_NAME=
AWS_S3_UPLOAD_MAX_SIZE=26214400
AWS_S3_FORCE_PATH_STYLE=true
AWS_S3_ACL=private
# OPTIONAL # OPTIONAL
@ -41,7 +45,7 @@ WEB_CONCURRENCY=1
# Override the maxium size of document imports, could be required if you have # Override the maxium size of document imports, could be required if you have
# especially large Word documents with embedded imagery # especially large Word documents with embedded imagery
FILE_STORAGE_IMPORT_MAX_SIZE=5120000 MAXIMUM_IMPORT_SIZE=5120000
# You can remove this line if your reverse proxy already logs incoming http # You can remove this line if your reverse proxy already logs incoming http
# requests and this ends up being duplicative # requests and this ends up being duplicative
@ -51,20 +55,18 @@ DEBUG=http
# set, all domains are allowed by default when using Google OAuth to signin # set, all domains are allowed by default when using Google OAuth to signin
ALLOWED_DOMAINS= ALLOWED_DOMAINS=
# TODO: setup compose.smtp.yml
# To support sending outgoing transactional emails such as "document updated" or # To support sending outgoing transactional emails such as "document updated" or
# "you've been invited" you'll need to provide authentication for an SMTP server # "you've been invited" you'll need to provide authentication for an SMTP server
# By default, this enables email login. You can disable this in the settings
# for configuration details see https://docs.getoutline.com/s/hosting/doc/smtp-cqCJyZGMIB
#COMPOSE_FILE="$COMPOSE_FILE:compose.smtp.yml"
#SMTP_ENABLED=1 #SMTP_ENABLED=1
#SMTP_HOST= #SMTP_HOST=
#SMTP_PORT= #SMTP_PORT=
#SMTP_USERNAME= #SMTP_USERNAME=
#SMTP_PASSWORD=
#SMTP_FROM_EMAIL= #SMTP_FROM_EMAIL=
#SMTP_REPLY_EMAIL= #SMTP_REPLY_EMAIL=
#SMTP_TLS_CIPHERS= #SMTP_TLS_CIPHERS=
#SMTP_SECURE=true #SMTP_SECURE=true
#SECRET_SMTP_PASSWORD_VERSION=v1
#COMPOSE_FILE="$COMPOSE_FILE:compose.oidc.yml" #COMPOSE_FILE="$COMPOSE_FILE:compose.oidc.yml"
#OIDC_ENABLED=1 #OIDC_ENABLED=1
@ -81,16 +83,3 @@ ALLOWED_DOMAINS=
#GOOGLE_ENABLED=1 #GOOGLE_ENABLED=1
#GOOGLE_CLIENT_ID= #GOOGLE_CLIENT_ID=
#SECRET_GOOGLE_CLIENT_SECRET_VERSION=v1 #SECRET_GOOGLE_CLIENT_SECRET_VERSION=v1
COMPOSE_FILE="$COMPOSE_FILE:compose.local.yml"
FILE_STORAGE_UPLOAD_MAX_SIZE=26214400
#COMPOSE_FILE="$COMPOSE_FILE:compose.aws.yml"
#AWS_ACCESS_KEY_ID=
#AWS_REGION=
#AWS_S3_UPLOAD_BUCKET_URL=
#AWS_S3_UPLOAD_BUCKET_NAME=
#AWS_S3_UPLOAD_MAX_SIZE=26214400
#AWS_S3_FORCE_PATH_STYLE=true
#AWS_S3_ACL=private
#SECRET_AWS_SECRET_KEY_VERSION=v1

View File

@ -5,12 +5,12 @@ Wiki and knowledge base for growing teams
<!-- metadata --> <!-- metadata -->
* **Category**: Apps * **Category**: Apps
* **Status**: 3, beta * **Status**: 1, alpha
* **Image**: [outlinewiki/outline](https://hub.docker.com/r/outlinewiki/outline), 4, upstream * **Image**: [outlinewiki/outline](https://hub.docker.com/r/outlinewiki/outline)
* **Healthcheck**: No * **Healthcheck**: No
* **Backups**: Yes * **Backups**: No
* **Email**: Yes * **Email**: No
* **Tests**: 2 * **Tests**: No
* **SSO**: 3 (OAuth) * **SSO**: 3 (OAuth)
<!-- endmetadata --> <!-- endmetadata -->
@ -19,27 +19,26 @@ Wiki and knowledge base for growing teams
1. Set up Docker Swarm and [`abra`] 1. Set up Docker Swarm and [`abra`]
2. Deploy [`coop-cloud/traefik`] 2. Deploy [`coop-cloud/traefik`]
3. `abra app new ${REPO_NAME}` 3. `abra app new ${REPO_NAME} --secrets` (optionally with `--pass` if you'd like
- **WARNING**: Choose "n" when `abra` asks if you'd like to generate secrets to save secrets in `pass`)
4. `abra app config YOURAPPNAME` - be sure to change `$DOMAIN` to something that resolves to 4. `abra app config YOURAPPNAME` - be sure to change `$DOMAIN` to something that resolves to
your Docker swarm box your Docker swarm box
5. Insert secrets: 5. `abra app deploy YOURAPPNAME`
- `abra app secret insert YOURAPPNAME secret_key v1 $(openssl rand -hex 32)` #12 7. Open the configured domain in your browser to finish set-up
- `abra app secret generate -a YOURAPPNAME`
6. `abra app deploy YOURAPPNAME`
8. Open the configured domain in your browser to finish set-up
[`abra`]: https://git.coopcloud.tech/coop-cloud/abra [`abra`]: https://git.coopcloud.tech/coop-cloud/abra
[`coop-cloud/traefik`]: https://git.coopcloud.tech/coop-cloud/traefik [`coop-cloud/traefik`]: https://git.coopcloud.tech/coop-cloud/traefik
## Tips & Tricks ## Tips & Tricks
### Create an initial admin user ### Post-deploy migration
``` ```
abra app cmd YOURAPPNAME app create_email_user test@example.com abra app cmd YOURAPPNAME app migrate
``` ```
_As of 2022-03-30, this requires `abra` RC version, run `abra upgrade --rc`._
### Setting up your `.env` config ### Setting up your `.env` config
Avoid the use of quotes (`"..."`) as much as possible, the NodeJS scripts flip out for some reason on some vars. Avoid the use of quotes (`"..."`) as much as possible, the NodeJS scripts flip out for some reason on some vars.
@ -52,30 +51,14 @@ Where `<username-to-delete>` is the username of the user to be removed, and
`<username-to-replace>` is the username of another user, to assign documents and `<username-to-replace>` is the username of another user, to assign documents and
revisions to (instead of deleting them). revisions to (instead of deleting them).
### Migrate from S3 to local storage _As of 2022-03-30, this requires `abra` RC version, run `abra upgrade --rc`._
- `abra app config <domain>`, add ## Single Sign On with Keycloak
- `COMPOSE_FILE="$COMPOSE_FILE:compose.local.yml"`
- `FILE_STORAGE_UPLOAD_MAX_SIZE=26214400`
- `abra app deploy <domain> -f`
- compose.aws.yml should still be deployed!
- `abra app undeploy <domain>`
- on the docker host, find mountpoint of newly created volume via `docker volume ls` and `docker volume inspect`
- volume name is smth like `<domain>_storage-data`
- take note which linux user owns `<storage_mountpoint>` (likely `1001`)
- use s3cmd/rclone/... to sync your bucket to `<storage_mountpoint>`
- `chown -R <storage_user>:<storage_user> <storage_mountpoint>`
- `abra app config <domain>`, switch storage backend
- remove `AWS_*` vars, `SECRET_AWS_SECRET_KEY_VERSION` and `COMPOSE_FILE="$COMPOSE_FILE:compose.aws.yml"`
- set `FILE_STORAGE=local`
- `abra app deploy <domain> -f`
- enjoy getting rid of S3 🥳
## Single Sign On with Keycloak/Authentik `abra app config YOURAPPNAME`, then uncomment everything in the `OIDC_` section.
- Create an OIDC client in Keycloak (in Authentik this is called a provider and application) Create a new client in Keycloak:
- Run `abra app config YOURAPPNAME`, then uncomment everything in the `OIDC_` section.
- **Valid Redirect URIs**: `https://YOURAPPDOMAIN/auth/oidc.callback` - **Valid Redirect URIs**: `https://YOURAPPDOMAIN/auth/oidc.callback`
- Reference the client/provider info to populate the `_AUTH_URI` `_TOKEN_URI` and `_USERINFO_URI` values
- Set the OIDC secret using the value from the client/provider `abra app secret insert YOURAPPNAME oidc_client_secret v1 SECRETVALUE` `abra app deploy YOURAPPDOMAIN`
- `abra app deploy YOURAPPDOMAIN`

21
abra.sh
View File

@ -1,18 +1,5 @@
export APP_ENTRYPOINT_VERSION=v9 export APP_ENTRYPOINT_VERSION=v6
export DB_ENTRYPOINT_VERSION=v2 export DB_ENTRYPOINT_VERSION=v1
export PG_BACKUP_VERSION=v1
create_email_user() {
if [ -z "$1" ]; then
echo "Usage: ... create_email_user <email_address>"
exit 1
fi
export DATABASE_PASSWORD=$(cat /run/secrets/db_password)
export DATABASE_URL="postgres://outline:${DATABASE_PASSWORD}@${STACK_NAME}_db:5432/outline"
export UTILS_SECRET=$(cat /run/secrets/utils_secret)
export SECRET_KEY=$(cat /run/secrets/secret_key)
node build/server/scripts/seed.js "$1"
}
migrate() { migrate() {
export DATABASE_PASSWORD=$(cat /run/secrets/db_password) export DATABASE_PASSWORD=$(cat /run/secrets/db_password)
@ -20,10 +7,6 @@ migrate() {
yarn db:migrate --env=production-ssl-disabled yarn db:migrate --env=production-ssl-disabled
} }
generate_secret() {
abra app secret insert $DOMAIN secret_key v1 $(openssl rand -hex 32)
}
delete_user_by_id() { delete_user_by_id() {
if [ -z "$1" ] || [ -z "$2" ]; then if [ -z "$1" ] || [ -z "$2" ]; then
echo "Usage: ... delete_user_by_id <userid-to-delete> <userid-to-replace>" echo "Usage: ... delete_user_by_id <userid-to-delete> <userid-to-replace>"

View File

@ -1,15 +0,0 @@
authentik:
env:
OIDC_CLIENT_ID: outline
OIDC_AUTH_URI: https://authentik.example.com/application/o/authorize/
OIDC_TOKEN_URI: https://authentik.example.com/application/o/token/
OIDC_USERINFO_URI: https://authentik.example.com/application/o/userinfo/
OIDC_DISPLAY_NAME: "Authentik"
uncomment:
- compose.oidc.yml
- OIDC_ENABLED
- OIDC_USERNAME_CLAIM
- OIDC_SCOPES
- SECRET_OIDC_CLIENT_SECRET_VERSION
shared_secrets:
outline_secret: oidc_client_secret

View File

@ -1,22 +0,0 @@
---
version: "3.8"
services:
app:
secrets:
- aws_secret_key
environment:
- AWS_ACCESS_KEY_ID
- AWS_REGION
- AWS_S3_ACL
- AWS_S3_FORCE_PATH_STYLE
- AWS_S3_UPLOAD_BUCKET_NAME
- AWS_S3_UPLOAD_BUCKET_URL
- AWS_S3_UPLOAD_MAX_SIZE
- AWS_SDK_LOAD_CONFIG=0
- AWS_SECRET_KEY_FILE=/run/secrets/aws_secret_key
secrets:
aws_secret_key:
name: ${STACK_NAME}_aws_secret_key_${SECRET_AWS_SECRET_KEY_VERSION}
external: true

View File

@ -1,13 +0,0 @@
---
version: "3.8"
services:
app:
volumes:
- storage-data:/var/lib/outline/data
environment:
- FILE_STORAGE
- FILE_STORAGE_UPLOAD_MAX_SIZE
volumes:
storage-data:

View File

@ -1,18 +0,0 @@
version: "3.8"
services:
app:
secrets:
- smtp_password
environment:
- SMTP_HOST
- SMTP_PORT
- SMTP_USERNAME
- SMTP_FROM_EMAIL
- SMTP_REPLY_EMAIL
- SMTP_TLS_CIPHERS
- SMTP_SECURE
secrets:
smtp_password:
external: true
name: ${STACK_NAME}_smtp_password_${SECRET_SMTP_PASSWORD_VERSION}

View File

@ -6,8 +6,9 @@ services:
networks: networks:
- backend - backend
- proxy - proxy
image: outlinewiki/outline:0.84.0 image: outlinewiki/outline:0.69.2
secrets: secrets:
- aws_secret_key
- db_password - db_password
- secret_key - secret_key
- utils_secret - utils_secret
@ -16,7 +17,15 @@ services:
target: /docker-entrypoint.sh target: /docker-entrypoint.sh
mode: 0555 mode: 0555
environment: environment:
- FILE_STORAGE - AWS_ACCESS_KEY_ID
- AWS_REGION
- AWS_S3_ACL
- AWS_S3_FORCE_PATH_STYLE
- AWS_S3_UPLOAD_BUCKET_NAME
- AWS_S3_UPLOAD_BUCKET_URL
- AWS_S3_UPLOAD_MAX_SIZE
- AWS_SDK_LOAD_CONFIG=0
- AWS_SECRET_KEY_FILE=/run/secrets/aws_secret_key
- DATABASE_PASSWORD_FILE=/run/secrets/db_password - DATABASE_PASSWORD_FILE=/run/secrets/db_password
- FORCE_HTTPS=true - FORCE_HTTPS=true
- PGSSLMODE=disable - PGSSLMODE=disable
@ -34,20 +43,19 @@ services:
- "traefik.http.routers.${STACK_NAME}.rule=Host(`${DOMAIN}`${EXTRA_DOMAINS})" - "traefik.http.routers.${STACK_NAME}.rule=Host(`${DOMAIN}`${EXTRA_DOMAINS})"
- "traefik.http.routers.${STACK_NAME}.entrypoints=web-secure" - "traefik.http.routers.${STACK_NAME}.entrypoints=web-secure"
- "traefik.http.routers.${STACK_NAME}.tls.certresolver=${LETS_ENCRYPT_ENV}" - "traefik.http.routers.${STACK_NAME}.tls.certresolver=${LETS_ENCRYPT_ENV}"
- "coop-cloud.${STACK_NAME}.version=2.10.0+0.84.0" - "coop-cloud.${STACK_NAME}.version=0.8.0+0.69.2"
# Redirect from EXTRA_DOMAINS to DOMAIN ## Redirect from EXTRA_DOMAINS to DOMAIN
- "traefik.http.routers.${STACK_NAME}.middlewares=${STACK_NAME}-redirect" #- "traefik.http.routers.${STACK_NAME}.middlewares=${STACK_NAME}-redirect"
- "traefik.http.middlewares.${STACK_NAME}-redirect.headers.SSLForceHost=true" #- "traefik.http.middlewares.${STACK_NAME}-redirect.headers.SSLForceHost=true"
- "traefik.http.middlewares.${STACK_NAME}-redirect.headers.SSLHost=${DOMAIN}" #- "traefik.http.middlewares.${STACK_NAME}-redirect.headers.SSLHost=${DOMAIN}"
- "coop-cloud.${STACK_NAME}.timeout=${TIMEOUT:-80}"
cache: cache:
image: redis:8.0.2 image: redis:7.0.11
networks: networks:
- backend - backend
db: db:
image: postgres:17.5 image: postgres:15.3
networks: networks:
- backend - backend
secrets: secrets:
@ -56,9 +64,6 @@ services:
- source: db_entrypoint - source: db_entrypoint
target: /docker-entrypoint.sh target: /docker-entrypoint.sh
mode: 0555 mode: 0555
- source: pg_backup
target: /pg_backup.sh
mode: 0555
environment: environment:
POSTGRES_DB: outline POSTGRES_DB: outline
POSTGRES_PASSWORD_FILE: /run/secrets/db_password POSTGRES_PASSWORD_FILE: /run/secrets/db_password
@ -68,10 +73,10 @@ services:
entrypoint: /docker-entrypoint.sh entrypoint: /docker-entrypoint.sh
deploy: deploy:
labels: labels:
backupbot.backup: "${ENABLE_BACKUPS:-true}" backupbot.backup: "true"
backupbot.backup.pre-hook: "/pg_backup.sh backup" backupbot.backup.path: "/tmp/dump.sql.gz"
backupbot.backup.volumes.postgres_data.path: "backup.sql" backupbot.backup.post-hook: "rm -f /tmp/dump.sql.gz"
backupbot.restore.post-hook: '/pg_backup.sh restore' backupbot.backup.pre-hook: "sh -c 'PGPASSWORD=$$(cat $${POSTGRES_PASSWORD_FILE}) pg_dump -U outline outline | gzip > /tmp/dump.sql.gz'"
secrets: secrets:
secret_key: secret_key:
@ -80,6 +85,9 @@ secrets:
utils_secret: utils_secret:
name: ${STACK_NAME}_utils_secret_${SECRET_UTILS_SECRET_VERSION} name: ${STACK_NAME}_utils_secret_${SECRET_UTILS_SECRET_VERSION}
external: true external: true
aws_secret_key:
name: ${STACK_NAME}_aws_secret_key_${SECRET_AWS_SECRET_KEY_VERSION}
external: true
db_password: db_password:
name: ${STACK_NAME}_db_password_${SECRET_DB_PASSWORD_VERSION} name: ${STACK_NAME}_db_password_${SECRET_DB_PASSWORD_VERSION}
external: true external: true
@ -101,6 +109,3 @@ configs:
name: ${STACK_NAME}_db_entrypoint_${DB_ENTRYPOINT_VERSION} name: ${STACK_NAME}_db_entrypoint_${DB_ENTRYPOINT_VERSION}
file: entrypoint.postgres.sh.tmpl file: entrypoint.postgres.sh.tmpl
template_driver: golang template_driver: golang
pg_backup:
name: ${STACK_NAME}_pg_backup_${PG_BACKUP_VERSION}
file: pg_backup.sh

View File

@ -7,38 +7,36 @@ OLDDATA=$PGDATA/old_data
NEWDATA=$PGDATA/new_data NEWDATA=$PGDATA/new_data
if [ -e $MIGRATION_MARKER ]; then if [ -e $MIGRATION_MARKER ]; then
echo "FATAL: migration was started but did not complete in a previous run. manual recovery necessary" echo "FATAL: previous migration not completed. manual restore necessary"
exit 1 exit 1
fi fi
if [ -f $PGDATA/PG_VERSION ]; then DATA_VERSION=$(cat $PGDATA/PG_VERSION)
DATA_VERSION=$(cat $PGDATA/PG_VERSION)
if [ -n "$DATA_VERSION" -a "$PG_MAJOR" != "$DATA_VERSION" ]; then if [ -n "$DATA_VERSION" -a "$PG_MAJOR" != "$DATA_VERSION" ]; then
echo "postgres data version $DATA_VERSION found, but need $PG_MAJOR. Starting migration" echo "postgres data version $DATA_VERSION found, but need $PG_MAJOR. Starting migration"
echo "Installing postgres $DATA_VERSION" echo "Installing postgres $DATA_VERSION"
sed -i "s/$/ $DATA_VERSION/" /etc/apt/sources.list.d/pgdg.list sed -i "s/$/ $DATA_VERSION/" /etc/apt/sources.list.d/pgdg.list
apt-get update && apt-get install -y --no-install-recommends \ apt-get update && apt-get install -y --no-install-recommends \
postgresql-$DATA_VERSION \ postgresql-$DATA_VERSION \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
echo "shuffling around" echo "shuffling around"
gosu postgres mkdir $OLDDATA $NEWDATA gosu postgres mkdir $OLDDATA $NEWDATA
chmod 700 $OLDDATA $NEWDATA chmod 700 $OLDDATA $NEWDATA
mv $PGDATA/* $OLDDATA/ || true mv $PGDATA/* $OLDDATA/ || true
touch $MIGRATION_MARKER touch $MIGRATION_MARKER
echo "running initdb" echo "running initdb"
# abuse entrypoint script for initdb by making server error out # abuse entrypoint script for initdb by making server error out
gosu postgres bash -c "export PGDATA=$NEWDATA ; /usr/local/bin/docker-entrypoint.sh --invalid-arg || true" gosu postgres bash -c "export PGDATA=$NEWDATA ; /usr/local/bin/docker-entrypoint.sh --invalid-arg"
echo "running pg_upgrade" echo "running pg_upgrade"
cd /tmp cd /tmp
gosu postgres pg_upgrade --link -b /usr/lib/postgresql/$DATA_VERSION/bin -d $OLDDATA -D $NEWDATA -U $POSTGRES_USER gosu postgres pg_upgrade --link -b /usr/lib/postgresql/$DATA_VERSION/bin -d $OLDDATA -D $NEWDATA -U $POSTGRES_USER
cp $OLDDATA/pg_hba.conf $NEWDATA/ cp $OLDDATA/pg_hba.conf $NEWDATA/
mv $NEWDATA/* $PGDATA mv $NEWDATA/* $PGDATA
rm -rf $OLDDATA rm -rf $OLDDATA
rmdir $NEWDATA rmdir $NEWDATA
rm $MIGRATION_MARKER rm $MIGRATION_MARKER
echo "migration complete" echo "migration complete"
fi
fi fi
/usr/local/bin/docker-entrypoint.sh postgres /usr/local/bin/docker-entrypoint.sh postgres

View File

@ -1,12 +1,6 @@
#!/bin/sh #!/bin/sh
{{ if eq (env "FILE_STORAGE") "s3" }}
export AWS_SECRET_ACCESS_KEY=$(cat /run/secrets/aws_secret_key) export AWS_SECRET_ACCESS_KEY=$(cat /run/secrets/aws_secret_key)
{{ end }}
{{ if eq (env "SMTP_ENABLED") "1" }}
export SMTP_PASSWORD=$(cat /run/secrets/smtp_password)
{{ end }}
{{ if eq (env "OIDC_ENABLED") "1" }} {{ if eq (env "OIDC_ENABLED") "1" }}
export OIDC_CLIENT_SECRET=$(cat /run/secrets/oidc_client_secret) export OIDC_CLIENT_SECRET=$(cat /run/secrets/oidc_client_secret)
@ -21,7 +15,5 @@ export SECRET_KEY=$(cat /run/secrets/secret_key)
export DATABASE_PASSWORD=$(cat /run/secrets/db_password) export DATABASE_PASSWORD=$(cat /run/secrets/db_password)
export DATABASE_URL="postgres://outline:${DATABASE_PASSWORD}@${STACK_NAME}_db:5432/outline" export DATABASE_URL="postgres://outline:${DATABASE_PASSWORD}@${STACK_NAME}_db:5432/outline"
if [ ! "$1" = "-e" ]; then /usr/local/bin/yarn db:migrate --env=production-ssl-disabled
/usr/local/bin/yarn db:migrate --env=production-ssl-disabled /usr/local/bin/yarn start "$@"
/usr/local/bin/yarn start "$@"
fi

View File

@ -1,34 +0,0 @@
#!/bin/bash
set -e
BACKUP_FILE='/var/lib/postgresql/data/backup.sql'
function backup {
export PGPASSWORD=$(cat $POSTGRES_PASSWORD_FILE)
pg_dump -U ${POSTGRES_USER} ${POSTGRES_DB} > $BACKUP_FILE
}
function restore {
cd /var/lib/postgresql/data/
restore_config(){
# Restore allowed connections
cat pg_hba.conf.bak > pg_hba.conf
su postgres -c 'pg_ctl reload'
}
# Don't allow any other connections than local
cp pg_hba.conf pg_hba.conf.bak
echo "local all all trust" > pg_hba.conf
su postgres -c 'pg_ctl reload'
trap restore_config EXIT INT TERM
# Recreate Database
psql -U ${POSTGRES_USER} -d postgres -c "DROP DATABASE ${POSTGRES_DB} WITH (FORCE);"
createdb -U ${POSTGRES_USER} ${POSTGRES_DB}
psql -U ${POSTGRES_USER} -d ${POSTGRES_DB} -1 -f $BACKUP_FILE
trap - EXIT INT TERM
restore_config
}
$@

View File

@ -1,4 +0,0 @@
Due to the introduction of local storage, you need to adapt your config to continue using S3 storage. Just add the following lines to your config:
FILE_STORAGE=s3
COMPOSE_FILE="$COMPOSE_FILE:compose.aws.yml"

View File

@ -1 +0,0 @@
Fixes a problem where deployments were consistently giving a timeout response even though they were successful