48 Commits

Author SHA1 Message Date
5637a59368 chore: publish 1.1.0+0.24.6 release
All checks were successful
continuous-integration/drone/tag Build is passing
continuous-integration/drone/push Build is passing
2025-01-19 10:28:23 -08:00
e039b08ff0 Update .drone.yml
All checks were successful
continuous-integration/drone/push Build is passing
2025-01-08 10:09:13 -08:00
db3edafbee add REDIRECT_DOMAIN
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-26 00:06:03 +02:00
ce8c3eec63 add EXTRA_DOMAINS env
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-25 22:43:56 +02:00
c4a74a817a fix drone runner
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-25 22:13:55 +02:00
2011a91921 chore: publish 1.0.3+0.24.2 release
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/tag Build is passing
2024-10-25 21:50:45 +02:00
dddc5f7df2 fix redis, don't publish ports! 2024-10-25 21:50:14 +02:00
3bb2a94d3a chore: publish 1.0.2+0.24.2 release
Some checks failed
continuous-integration/drone/tag Build is passing
continuous-integration/drone/push Build is failing
2024-10-22 21:11:28 +02:00
27f33cd0d5 update backupbot labels 2024-10-22 21:11:23 +02:00
4e5dc4e185 add debug Dockerfile
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-15 14:07:29 +02:00
c18d143b7c fix drone runner
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-08 15:43:13 +02:00
bc1bdc825b chore: publish 1.0.1+0.24.2 release
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/tag Build is passing
2024-10-08 15:39:45 +02:00
2e40802f55 specify redis version 2024-10-08 15:39:06 +02:00
95cf5c9374 add healthcheck binary
Some checks failed
continuous-integration/drone/push Build is failing
2024-10-08 15:32:06 +02:00
38e3cbaa24 chore: publish 1.0.0+0.24.2 release
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-02 17:07:58 +02:00
49dec504db fix release
All checks were successful
continuous-integration/drone/push Build is passing
2024-10-02 17:03:57 +02:00
c8467e8916 wip fix redis 2024-07-23 14:11:49 +02:00
45337b4b2c fix redis 2024-07-23 10:19:39 +02:00
9aae7e1a15 fix redis config 2024-07-22 14:00:09 +02:00
a2f08b8ac7 fix redis config 2024-07-22 13:39:03 +02:00
f8d3cf166c fix volume value 2024-07-22 12:27:22 +02:00
1d80415fc9 fix compose files 2024-07-15 22:30:33 +02:00
bd83587abe update config and compose 2024-07-15 12:37:26 +02:00
a45b5e158b add alakazam integration file alaconnect.yml
All checks were successful
continuous-integration/drone/push Build is passing
2024-05-13 17:36:24 +02:00
1669d64a5a chore: publish 0.4.0+0.22.1 release
All checks were successful
continuous-integration/drone/push Build is passing
2024-02-27 23:12:55 +01:00
8be72aa8df add optional VIKUNJA_RATELIMIT_NOAUTHLIMIT env
All checks were successful
continuous-integration/drone/push Build is passing
2024-02-27 16:30:19 +01:00
e501cc662d Add sample oauth data 2024-02-27 16:26:49 +01:00
8050d24c7c chore: publish 0.3.0+0.21.0 release
All checks were successful
continuous-integration/drone/push Build is passing
2023-07-13 11:52:56 +02:00
d3c98de025 chore: point backup to a volume directory 2023-06-26 17:22:51 +02:00
365448458c Remove on-failure restart policy
All checks were successful
continuous-integration/drone/push Build is passing
Always restart the container
2023-05-23 16:39:06 +02:00
4bbec31d8a add auto update and timeout env
All checks were successful
continuous-integration/drone/push Build is passing
2023-04-18 18:24:41 +02:00
1ce54b1fe3 add timeout label
All checks were successful
continuous-integration/drone/push Build is passing
2023-04-18 13:03:37 +02:00
fe83250372 chore: publish 0.2.4+0.20.5 release
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-03-21 16:34:00 +01:00
4cfe5f66a8 chore: publish 0.2.3+0.20.3 release
All checks were successful
continuous-integration/drone/push Build is passing
2023-03-06 16:30:11 +01:00
6955772632 fix: bump config version 2023-03-06 16:30:01 +01:00
e7addc8405 chore: add .drone file
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/tag Build is passing
2023-03-06 16:22:39 +01:00
d7412e71cf chore: publish 0.2.2+0.20.3 release 2023-03-06 16:18:58 +01:00
2206aeca68 fix: remove healthcheck, it breaks everthing as usual ... 2023-03-06 16:18:18 +01:00
464d2cf3ba feat: use redis for keyvalue cache 2023-03-06 16:17:47 +01:00
35d1e14a81 feat: increase jwtttl 2023-03-06 16:17:25 +01:00
5bbe653775 feat: sso logout url 2023-03-06 16:15:54 +01:00
e6d99ff449 feat: add healthcheck for api 2023-02-16 10:28:55 +01:00
e070a5edbf fix: example domain in .env 2023-01-20 15:51:27 +01:00
5e41b22f61 chore: publish 0.2.1+0.20.1 release 2022-12-02 13:08:31 +01:00
e9eae9a9d5 chore: publish 0.2.0+0.20.0 release 2022-11-08 09:21:19 +01:00
3ec29424e8 make user and email search optin default 2022-10-24 13:32:06 +02:00
b1b1cc1bed .env.sample template domain 2022-09-14 11:30:28 +02:00
cdbf159805 update readme 2022-08-19 10:57:37 +02:00
14 changed files with 300 additions and 363 deletions

45
.drone.yml Normal file
View File

@ -0,0 +1,45 @@
---
kind: pipeline
name: deploy to swarm-test.autonomic.zone
steps:
- name: deployment
image: git.coopcloud.tech/coop-cloud/stack-ssh-deploy:latest
settings:
host: swarm-test.autonomic.zone
stack: vikunja
generate_secrets: true
purge: true
deploy_key:
from_secret: drone_ssh_swarm_test
networks:
- proxy
environment:
DOMAIN: authentik.swarm-test.autonomic.zone
STACK_NAME: authentik
LETS_ENCRYPT_ENV: production
CONFIG_YML_VERSION: v8
HEALTHCHECK_VERSION: v1
PG_BACKUP_VERSION: v1
SECRET_DB_PASSWORD_VERSION: v1
SECRET_JWT_SECRET_VERSION: v1
trigger:
branch:
- main
---
kind: pipeline
name: generate recipe catalogue
steps:
- name: release a new version
image: plugins/downstream
settings:
server: https://build.coopcloud.tech
token:
from_secret: drone_abra-bot_token
fork: true
repositories:
- toolshed/auto-recipes-catalogue-json
trigger:
event: tag

View File

@ -1,33 +1,48 @@
TYPE=vikunja
TIMEOUT=300
ENABLE_AUTO_UPDATE=true
ENABLE_BACKUPS=true
DOMAIN=vikunja.example.com
## Domain aliases
#EXTRA_DOMAINS=', `www.vikunja.example.com`'
#REDIRECT_DOMAIN=www.vikunja.example.com
LETS_ENCRYPT_ENV=production
SECRET_DB_PASSWORD_VERSION=v1
SECRET_JWT_SECRET_VERSION=v1
LOG_LEVEL=INFO
VIKUNJA_LOG_LEVEL=INFO
VIKUNJA_LOG_DATABASELEVEL=INFO
VIKUNJA_LOG_DATABASE=stdout
VIKUNJA_LOG_EVENTS=stdout
VIKUNJA_LOG_MAIL=stdout
COMPOSE_FILE=compose.yml
#VIKUNJA_RATELIMIT_NOAUTHLIMIT=10
VIKUNJA_DEFAULTSETTINGS_DISCOVERABLE_BY_NAME=true
VIKUNJA_DEFAULTSETTINGS_DISCOVERABLE_BY_EMAIL=true
VIKUNJA_SERVICE_ENABLEREGISTRATION=false
VIKUNJA_AUTH_LOCAL_ENABLED=false
# SSO OAUTH
# e.g. see https://goauthentik.io/integrations/services/vikunja/
# COMPOSE_FILE="${COMPOSE_FILE}:compose.oauth.yml"
# OAUTH_ENABLED=true
# OAUTH_NAME
# OAUTH_URL
# OAUTH_CLIENT_ID
# OAUTH_NAME=authentik
# OAUTH_URL=https://login.example.com/application/o/vikunja/
# OAUTH_CLIENT_ID=vikunja
# OAUTH_LOGOUT_URL=https://login.example.com/application/o/vikunja/end-session/
# SECRET_OAUTH_SECRET_VERSION=v1
# E-MAIL
# COMPOSE_FILE="${COMPOSE_FILE}:compose.smtp.yml"
# SMTP_ENABLED=true
# SMTP_HOST=mail.example.com
# SMTP_AUTHTYPE=plain # possible: plain, login, cram-md5
# SMTP_USER=user
# SMTP_FROM_EMAIL=user@example.com
# VIKUNJA_MAILER_ENABLED=true
# VIKUNJA_MAILER_HOST=mail.example.com
# VIKUNJA_MAILER_AUTHTYPE=plain # possible: plain, login, cram-md5
# VIKUNJA_MAILER_USERNAME=user
# VIKUNJA_MAILER_FROMEMAIL=user@example.com
# SECRET_SMTP_PASSWORD_VERSION=v1

View File

@ -1,17 +1,18 @@
# vikunja
> One line description of the recipe
> The open-source, self-hostable to-do app.
Organize everything, on all platforms
<!-- metadata -->
* **Category**: Apps
* **Status**: 0
* **Image**: [`vikunja`](https://hub.docker.com/r/vikunja), 4, upstream
* **Healthcheck**: No
* **Backups**: No
* **Email**: No
* **Healthcheck**: Almost
* **Backups**: Yes
* **Email**: Yes
* **Tests**: No
* **SSO**: No
* **SSO**: Yes
<!-- endmetadata -->
@ -22,3 +23,28 @@
* `abra app deploy <app-name>`
For more, see [`docs.coopcloud.tech`](https://docs.coopcloud.tech).
## Healthcheck
Vikunja uses a docker [scratch](https://hub.docker.com/_/scratch/) image, that is completely empty, therefore it is necessary to copy a statically build healthcheck binary into the container to perform the healthcheck.
To verify the binary in this recipe run this code:
```
# Set the source date epoch for reproducibility
export SOURCE_DATE_EPOCH=1640995200
export DOCKER_BUILDKIT=1
# Build the Docker image
docker build --build-arg SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH} -t healthcheck -f healthcheck_Dockerfile .
# Create container, extract binary and remove the container
docker create --name healthcheck_builder healthcheck
docker cp healthcheck_builder:/app/healthcheck .
docker rm healthcheck_builder
# Check if the build is reproducible by calculating hash
sha256sum healthcheck
```
The sha256 checksum should be **c7c12a0eb019edd275c3f5a9302c70b2112941a8c0b9d9128d26c66a81a263c6**

View File

@ -1 +1,3 @@
export CONFIG_YML_VERSION=v2
export CONFIG_YML_VERSION=v8
export HEALTHCHECK_VERSION=v1
export PG_BACKUP_VERSION=v2

12
alaconnect.yml Normal file
View File

@ -0,0 +1,12 @@
authentik:
env:
OAUTH_NAME: authentik
OAUTH_URL: https://authentik.example.com/application/o/vikunja/
OAUTH_LOGOUT_URL: https://authentik.example.com/application/o/vikunja/end-session/
OAUTH_CLIENT_ID: vikunja
uncomment:
- compose.oauth.yml
- OAUTH_ENABLED
- SECRET_OAUTH_SECRET_VERSION
shared_secrets:
vikunja_secret: oauth_secret

View File

@ -1,12 +1,13 @@
version: '3.8'
services:
api:
app:
environment:
- OAUTH_ENABLED
- OAUTH_NAME
- OAUTH_URL
- OAUTH_CLIENT_ID
- OAUTH_LOGOUT_URL
- SECRET_OAUTH_SECRET_VERSION=V1
secrets:
- oauth_secret

View File

@ -2,12 +2,41 @@
version: "3.8"
services:
api:
image: vikunja/api:0.19.2
app:
image: vikunja/vikunja:0.24.6
environment:
- DOMAIN
- LOG_LEVEL
volumes:
- VIKUNJA_RATELIMIT_NOAUTHLIMIT
- VIKUNJA_SERVICE_PUBLICURL=${DOMAIN}
- VIKUNJA_DATABASE_HOST=db
- VIKUNJA_DATABASE_PASSWORD_FILE=/run/secrets/db_password
- VIKUNJA_DATABASE_TYPE=postgres
- VIKUNJA_DATABASE_USER=vikunja
- VIKUNJA_DATABASE_DATABASE=vikunja
- VIKUNJA_SERVICE_JWTSECRET_FILE=/run/secrets/jwt_secret
- VIKUNJA_REDIS_ENABLED=1
- VIKUNJA_REDIS_HOST=redis:6379
- VIKUNJA_CACHE_ENABLED=1
- VIKUNJA_CACHE_TYPE=redis
- VIKUNJA_SERVICE_ENABLEREGISTRATION=false
- VIKUNJA_SERVICE_JWTTTL=604800
- VIKUNJA_MAILER_ENABLED
- VIKUNJA_MAILER_HOST
- VIKUNJA_MAILER_AUTHTYPE
- VIKUNJA_MAILER_USERNAME
- VIKUNJA_MAILER_PASSWORD_FILE=/run/secrets/smtp_password
- VIKUNJA_MAILER_FROMEMAIL
- VIKUNJA_LOG_LEVEL
- VIKUNJA_LOG_DATABASE
- VIKUNJA_LOG_DATABASELEVEL
- VIKUNJA_LOG_EVENTS
- VIKUNJA_LOG_MAIL
- VIKUNJA_KEYVALUE_TYPE=redis
- VIKUNJA_AUTH_LOCAL_ENABLED
- VIKUNJA_DEFAULTSETTINGS_DISCOVERABLE_BY_NAME
- VIKUNJA_DEFAULTSETTINGS_DISCOVERABLE_BY_EMAIL
volumes:
- files:/app/vikunja/files
networks:
- proxy
@ -17,40 +46,31 @@ services:
- db_password
configs:
- source: config_yml
target: /app/vikunja/config.yml
target: /etc/vikunja/config.yml
- source: healthcheck
target: /healthcheck
mode: 555
deploy:
restart_policy:
condition: on-failure
labels:
- "traefik.enable=true"
- "traefik.http.services.${STACK_NAME}_api.loadbalancer.server.port=3456"
- "traefik.http.routers.${STACK_NAME}_api.rule=Host(`${DOMAIN}`) && PathPrefix(`/api/v1`, `/dav/`, `/.well-known/`)"
- "traefik.http.routers.${STACK_NAME}_api.entrypoints=web-secure"
- "traefik.http.routers.${STACK_NAME}_api.tls.certresolver=${LETS_ENCRYPT_ENV}"
app:
image: vikunja/frontend:0.19.1
networks:
- proxy
deploy:
restart_policy:
condition: on-failure
labels:
- "traefik.enable=true"
- "traefik.http.services.${STACK_NAME}.loadbalancer.server.port=80"
- "traefik.http.routers.${STACK_NAME}.rule=Host(`${DOMAIN}`)"
- "traefik.http.services.${STACK_NAME}.loadbalancer.server.port=3456"
- "traefik.http.routers.${STACK_NAME}.rule=Host(`${DOMAIN}`${EXTRA_DOMAINS})"
- "traefik.http.routers.${STACK_NAME}.middlewares=${STACK_NAME}-redirect-aliases@docker"
- "traefik.http.middlewares.${STACK_NAME}-redirect-aliases.redirectregex.regex=^https://${REDIRECT_DOMAIN:-example.com}(.*)"
- "traefik.http.middlewares.${STACK_NAME}-redirect-aliases.redirectregex.replacement=https://${DOMAIN}$${1}"
- "traefik.http.routers.${STACK_NAME}.entrypoints=web-secure"
- "traefik.http.routers.${STACK_NAME}.tls.certresolver=${LETS_ENCRYPT_ENV}"
- "coop-cloud.${STACK_NAME}.version=0.1.0+0.19.1"
- "coop-cloud.${STACK_NAME}.version=1.1.0+0.24.6"
- "coop-cloud.${STACK_NAME}.timeout=${TIMEOUT:-120}"
healthcheck:
test: [ "CMD", "curl", "-f", "http://localhost" ]
test: [ "CMD", "/healthcheck"]
interval: 30s
timeout: 10s
retries: 10
start_period: 1m
redis:
image: redis
image: redis:7.4.1-alpine
networks:
- internal
@ -63,19 +83,22 @@ services:
volumes:
- db:/var/lib/postgresql/data
healthcheck:
test: [ "CMD", "pg_isready", "-U", "vikunja" ]
test: ["CMD-SHELL", "pg_isready -h localhost -U $$POSTGRES_USER"]
interval: 2s
networks:
- internal
secrets:
- db_password
deploy:
restart_policy:
condition: on-failure
labels:
backupbot.backup: "true"
backupbot.backup.pre-hook: "mkdir -p /tmp/backup/ && PGPASSWORD=$$(cat $${POSTGRES_PASSWORD_FILE}) pg_dump -U $${POSTGRES_USER} $${POSTGRES_DB} > /tmp/backup/backup.sql"
backupbot.backup.post-hook: "rm -rf /tmp/backup"
backupbot.backup.path: "/tmp/backup/"
backupbot.backup: "${ENABLE_BACKUPS:-true}"
backupbot.backup.pre-hook: "/pg_backup.sh backup"
backupbot.backup.volumes.db.path: "backup.sql"
backupbot.restore.post-hook: '/pg_backup.sh restore'
configs:
- source: pg_backup
target: /pg_backup.sh
mode: 0555
volumes:
files:
@ -93,6 +116,12 @@ configs:
name: ${STACK_NAME}_config_yml_${CONFIG_YML_VERSION}
file: config.yml.tmpl
template_driver: golang
healthcheck:
name: ${STACK_NAME}_healthcheck_${HEALTHCHECK_VERSION}
file: healthcheck
pg_backup:
name: ${STACK_NAME}_pg_backup_${PG_BACKUP_VERSION}
file: pg_backup.sh
secrets:
db_password:

View File

@ -1,328 +1,21 @@
# https://kolaente.dev/vikunja/vikunja/src/commit/eee7b060b65fb9b35c0bca0e4f69b66b56a8fe0f/config.yml.sample
# https://vikunja.io/docs/config-options
service:
# This token is used to verify issued JWT tokens.
# Default is a random token which will be generated at each startup of vikunja.
# (This means all already issued tokens will be invalid once you restart vikunja)
JWTSecret: {{ secret "jwt_secret" }}
# # The duration of the issed JWT tokens in seconds.
# # The default is 259200 seconds (3 Days).
# jwtttl: 259200
# # The duration of the "remember me" time in seconds. When the login request is made with
# # the long param set, the token returned will be valid for this period.
# # The default is 2592000 seconds (30 Days).
# jwtttllong: 2592000
# # The interface on which to run the webserver
# interface: ":3456"
# # Path to Unix socket. If set, it will be created and used instead of tcp
# unixsocket:
# # Permission bits for the Unix socket. Note that octal values must be prefixed by "0o", e.g. 0o660
# unixsocketmode:
# # The URL of the frontend, used to send password reset emails.
frontendurl: https://{{ env "DOMAIN" }}
# # The base path on the file system where the binary and assets are.
# # Vikunja will also look in this path for a config file, so you could provide only this variable to point to a folder
# # with a config file which will then be used.
# rootpath: <rootpath>
# # Path on the file system to serve static files from. Set to the path of the frontend files to host frontend alongside the api.
# staticpath: ""
# # The max number of items which can be returned per page
# maxitemsperpage: 50
# # Enable the caldav endpoint, see the docs for more details
# enablecaldav: true
# # Set the motd message, available from the /info endpoint
# motd: ""
# # Enable sharing of lists via a link
# enablelinksharing: true
# # Whether to let new users registering themselves or not
# enableregistration: true
# # Whether to enable task attachments or not
# enabletaskattachments: true
# # The time zone all timestamps are in. Please note that time zones have to use [the official tz database names](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones). UTC or GMT offsets won't work.
# timezone: GMT
# # Whether task comments should be enabled or not
# enabletaskcomments: true
# # Whether totp is enabled. In most cases you want to leave that enabled.
# enabletotp: true
# # If not empty, enables logging of crashes and unhandled errors in sentry.
# sentrydsn: ''
# # If not empty, this will enable `/test/{table}` endpoints which allow to put any content in the database.
# # Used to reset the db before frontend tests. Because this is quite a dangerous feature allowing for lots of harm,
# # each request made to this endpoint neefs to provide an `Authorization: <token>` header with the token from below. <br/>
# # **You should never use this unless you know exactly what you're doing**
# testingtoken: ''
# # If enabled, vikunja will send an email to everyone who is either assigned to a task or created it when a task reminder
# # is due.
# enableemailreminders: true
# # If true, will allow users to request the complete deletion of their account. When using external authentication methods
# # it may be required to coordinate with them in order to delete the account. This setting will not affect the cli commands
# # for user deletion.
# enableuserdeletion: true
# # The maximum size clients will be able to request for user avatars.
# # If clients request a size bigger than this, it will be changed on the fly.
# maxavatarsize: 1024
#
database:
# Database type to use. Supported types are mysql, postgres and sqlite.
type: "postgres"
# Database user which is used to connect to the database.
user: "vikunja"
# Database password
password: {{ secret "db_password" }}
# Database host
host: "db"
# Database to use
database: "vikunja"
# # When using sqlite, this is the path where to store the data
# path: "./vikunja.db"
# # Sets the max open connections to the database. Only used when using mysql and postgres.
# maxopenconnections: 100
# # Sets the maximum number of idle connections to the db.
# maxidleconnections: 50
# # The maximum lifetime of a single db connection in miliseconds.
# maxconnectionlifetime: 10000
# # Secure connection mode. Only used with postgres.
# # (see https://pkg.go.dev/github.com/lib/pq?tab=doc#hdr-Connection_String_Parameters)
# sslmode: disable
# # The path to the client cert. Only used with postgres.
# sslcert: ""
# # The path to the client key. Only used with postgres.
# sslkey: ""
# # The path to the ca cert. Only used with postgres.
# sslrootcert: ""
# # Enable SSL/TLS for mysql connections. Options: false, true, skip-verify, preferred
# tls: false
#
cache:
# If cache is enabled or not
enabled: true
# Cache type. Possible values are "keyvalue", "memory" or "redis".
# When choosing "keyvalue" this setting follows the one configured in the "keyvalue" section.
# When choosing "redis" you will need to configure the redis connection seperately.
type: redis
# When using memory this defines the maximum size an element can take
# maxelementsize: 1000
redis:
# Whether to enable redis or not
enabled: true
# The host of the redis server including its port.
host: 'redis:6379'
# The password used to authenicate against the redis server
password: ''
# 0 means default database
db: 0
#
# cors:
# # Whether to enable or disable cors headers.
# # Note: If you want to put the frontend and the api on seperate domains or ports, you will need to enable this.
# # Otherwise the frontend won't be able to make requests to the api through the browser.
# enable: true
# # A list of origins which may access the api. These need to include the protocol (`http://` or `https://`) and port, if any.
# origins:
# - "*"
# # How long (in seconds) the results of a preflight request can be cached.
# maxage: 0
#
{{ if eq (env "SMTP_ENABLED") "true" }}
password: "{{ secret "db_password" }}"
mailer:
# Whether to enable the mailer or not. If it is disabled, all users are enabled right away and password reset is not possible.
enabled: {{ env "SMTP_ENABLED" }}
# SMTP Host
host: {{ env "SMTP_HOST" }}
# SMTP Host port
port: 587
# SMTP Auth Type. Can be either `plain`, `login` or `cram-md5`.
authtype: {{ env "SMTP_AUTHTYPE" }}
# SMTP username
username: {{ env "SMTP_USER" }}
# SMTP password
password: {{ secret "smtp_password" }}
# Wether to skip verification of the tls certificate on the server
skiptlsverify: false
# The default from address when sending emails
fromemail: {{ env "SMTP_FROM_EMAIL" }}
# The length of the mail queue.
queuelength: 100
# The timeout in seconds after which the current open connection to the mailserver will be closed.
queuetimeout: 30
# By default, vikunja will try to connect with starttls, use this option to force it to use ssl.
forcessl: false
{{ end }}
log:
# # A folder where all the logfiles should go.
# path: <rootpath>logs
# # Whether to show any logging at all or none
enabled: true
# # Where the normal log should go. Possible values are stdout, stderr, file or off to disable standard logging.
standard: "stdout"
# # Change the log level. Possible values (case-insensitive) are CRITICAL, ERROR, WARNING, NOTICE, INFO, DEBUG.
level: {{ env "LOG_LEVEL" }}
# # Whether or not to log database queries. Useful for debugging. Possible values are stdout, stderr, file or off to disable database logging.
# database: "stdout"
# # The log level for database log messages. Possible values (case-insensitive) are CRITICAL, ERROR, WARNING, NOTICE, INFO, DEBUG.
# databaselevel: "DEBUG"
# # Whether to log http requests or not. Possible values are stdout, stderr, file or off to disable http logging.
# http: "stdout"
# # Echo has its own logging which usually is unnessecary, which is why it is disabled by default. Possible values are stdout, stderr, file or off to disable standard logging.
# echo: "off"
# # Whether or not to log events. Useful for debugging. Possible values are stdout, stderr, file or off to disable events logging.
# events: "stdout"
# # The log level for event log messages. Possible values (case-insensitive) are ERROR, INFO, DEBUG.
# eventslevel: "DEBUG"
#
# ratelimit:
# # whether or not to enable the rate limit
# enabled: false
# # The kind on which rates are based. Can be either "user" for a rate limit per user or "ip" for an ip-based rate limit.
# kind: user
# # The time period in seconds for the limit
# period: 60
# # The max number of requests a user is allowed to do in the configured time period
# limit: 100
# # The store where the limit counter for each user is stored.
# # Possible values are "keyvalue", "memory" or "redis".
# # When choosing "keyvalue" this setting follows the one configured in the "keyvalue" section.
# store: keyvalue
#
# files:
# # The path where files are stored
# basepath: ./files # relative to the binary
# # The maximum size of a file, as a human-readable string.
# # Warning: The max size is limited 2^64-1 bytes due to the underlying datatype
# maxsize: 20MB
#
# migration:
# # These are the settings for the wunderlist migrator
# wunderlist:
# # Wheter to enable the wunderlist migrator or not
# enable: false
# # The client id, required for making requests to the wunderlist api
# # You need to register your vikunja instance at https://developer.wunderlist.com/apps/new to get this
# clientid:
# # The client secret, also required for making requests to the wunderlist api
# clientsecret:
# # The url where clients are redirected after they authorized Vikunja to access their wunderlist stuff.
# # This needs to match the url you entered when registering your Vikunja instance at wunderlist.
# # This is usually the frontend url where the frontend then makes a request to /migration/wunderlist/migrate
# # with the code obtained from the wunderlist api.
# # Note that the vikunja frontend expects this to be /migrate/wunderlist
# redirecturl:
# todoist:
# # Wheter to enable the todoist migrator or not
# enable: false
# # The client id, required for making requests to the todoist api
# # You need to register your vikunja instance at https://developer.todoist.com/appconsole.html to get this
# clientid:
# # The client secret, also required for making requests to the todoist api
# clientsecret:
# # The url where clients are redirected after they authorized Vikunja to access their todoist items.
# # This needs to match the url you entered when registering your Vikunja instance at todoist.
# # This is usually the frontend url where the frontend then makes a request to /migration/todoist/migrate
# # with the code obtained from the todoist api.
# # Note that the vikunja frontend expects this to be /migrate/todoist
# redirecturl: <frontend url>/migrate/todoist
# trello:
# # Wheter to enable the trello migrator or not
# enable: false
# # The client id, required for making requests to the trello api
# # You need to register your vikunja instance at https://trello.com/app-key (log in before you visit that link) to get this
# key:
# # The url where clients are redirected after they authorized Vikunja to access their trello cards.
# # This needs to match the url you entered when registering your Vikunja instance at trello.
# # This is usually the frontend url where the frontend then makes a request to /migration/trello/migrate
# # with the code obtained from the trello api.
# # Note that the vikunja frontend expects this to end on /migrate/trello.
# redirecturl: <frontend url>/migrate/trello
# microsofttodo:
# # Wheter to enable the microsoft todo migrator or not
# enable: false
# # The client id, required for making requests to the microsoft graph api
# # See https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app#register-an-application
# # for information about how to register your vikuinja instance.
# clientid:
# # The client secret, also required for making requests to the microsoft graph api
# clientsecret:
# # The url where clients are redirected after they authorized Vikunja to access their microsoft todo tasks.
# # This needs to match the url you entered when registering your Vikunja instance at microsoft.
# # This is usually the frontend url where the frontend then makes a request to /migration/microsoft-todo/migrate
# # with the code obtained from the microsoft graph api.
# # Note that the vikunja frontend expects this to be /migrate/microsoft-todo
# redirecturl: <frontend url>/migrate/microsoft-todo
#
# avatar:
# # When using gravatar, this is the duration in seconds until a cached gravatar user avatar expires
# gravatarexpiration: 3600
#
# backgrounds:
# # Whether to enable backgrounds for lists at all.
# enabled: true
# providers:
# upload:
# # Whethere to enable uploaded list backgrounds
# enabled: true
# unsplash:
# # Whether to enable setting backgrounds from unsplash as list backgrounds
# enabled: false
# # You need to create an application for your installation at https://unsplash.com/oauth/applications/new
# # and set the access token below.
# accesstoken:
# # The unsplash application id is only used for pingback and required as per their api guidelines.
# # You can find the Application ID in the dashboard for your API application. It should be a numeric ID.
# # It will only show in the UI if your application has been approved for Enterprise usage, therefore if
# # youre in Demo mode, you can also find the ID in the URL at the end: https://unsplash.com/oauth/applications/:application_id
# applicationid:
#
# # Legal urls
# # Will be shown in the frontend if configured here
# legal:
# imprinturl:
# privacyurl:
#
# # Key Value Storage settings
# # The Key Value Storage is used for different kinds of things like metrics and a few cache systems.
# keyvalue:
# # The type of the storage backend. Can be either "memory" or "redis". If "redis" is chosen it needs to be configured seperately.
# type: "memory"
#
auth:
# Local authentication will let users log in and register (if enabled) through the db.
# This is the default auth mechanism and does not require any additional configuration.
local:
# Enable or disable local authentication
enabled: false
# OpenID configuration will allow users to authenticate through a third-party OpenID Connect compatible provider.<br/>
# The provider needs to support the `openid`, `profile` and `email` scopes.<br/>
# **Note:** Some openid providers (like gitlab) only make the email of the user available through openid claims if they have set it to be publicly visible.
# If the email is not public in those cases, authenticating will fail.
# **Note 2:** The frontend expects to be redirected after authentication by the third party
# to <frontend-url>/auth/openid/<auth key>. Please make sure to configure the redirect url with your third party
# auth service accordingy if you're using the default vikunja frontend.
# Take a look at the [default config file](https://kolaente.dev/vikunja/api/src/branch/main/config.yml.sample) for more information about how to configure openid authentication.
{{ if eq (env "OAUTH_ENABLED") "true" }}
auth:
openid:
# Enable or disable OpenID Connect authentication
enabled: {{ env "OAUTH_ENABLED" }}
# The url to redirect clients to. Defaults to the configured frontend url. If you're using Vikunja with the official
# frontend, you don't need to change this value.
# redirecturl: <frontend url>
# A list of enabled providers
enabled: {{ env "OAUTH_ENABLED" }}
providers:
# The name of the provider as it will appear in the frontend.
- name: {{ env "OAUTH_NAME" }}
# The auth url to send users to if they want to authenticate using OpenID Connect.
authurl: {{ env "OAUTH_URL" }}
# The client ID used to authenticate Vikunja at the OpenID Connect provider.
logouturl: {{ env "OAUTH_LOGOUT_URL" }}
clientid: {{ env "OAUTH_CLIENT_ID" }}
# The client secret used to authenticate Vikunja at the OpenID Connect provider.
clientsecret: {{ secret "oauth_secret" }}
scope: openid email profile
{{ end }}
# # Prometheus metrics endpoint
# metrics:
# # If set to true, enables a /metrics endpoint for prometheus to collect metrics about Vikunja.
# enabled: false
# # If set to a non-empty value the /metrics endpoint will require this as a username via basic auth in combination with the password below.
# username:
# # If set to a non-empty value the /metrics endpoint will require this as a password via basic auth in combination with the username below.
# password:
#

16
debug_Dockerfile Normal file
View File

@ -0,0 +1,16 @@
FROM vikunja/vikunja:0.24.2 AS vikunja-scratch
FROM alpine
RUN apk add --upgrade --no-cache vim bash curl
WORKDIR /app/vikunja
CMD [ "/app/vikunja/vikunja" ]
EXPOSE 3456
USER 1000
ENV VIKUNJA_SERVICE_ROOTPATH=/app/vikunja/
ENV VIKUNJA_DATABASE_PATH=/db/vikunja.db
COPY --from=vikunja-scratch /app/vikunja /app/vikunja
COPY --from=vikunja-scratch /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/

BIN
healthcheck Executable file

Binary file not shown.

50
healthcheck.c Normal file
View File

@ -0,0 +1,50 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <unistd.h>
int main() {
int sockfd;
struct sockaddr_in server_addr;
char request[] = "HEAD / HTTP/1.1\r\nHost: localhost\r\n\r\n";
char response[1024];
int received_bytes;
sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (sockfd < 0) {
perror("socket");
return 1;
}
server_addr.sin_family = AF_INET;
server_addr.sin_port = htons(3456);
server_addr.sin_addr.s_addr = inet_addr("127.0.0.1");
if (connect(sockfd, (struct sockaddr *)&server_addr, sizeof(server_addr)) < 0) {
perror("connect");
close(sockfd);
return 1;
}
send(sockfd, request, strlen(request), 0);
received_bytes = recv(sockfd, response, sizeof(response) - 1, 0);
if (received_bytes < 0) {
perror("recv");
close(sockfd);
return 1;
}
// Null-terminieren der empfangenen Bytes
response[received_bytes] = '\0';
// Statuscode extrahieren (erste Zeile enthält den Statuscode)
char *status_line = strtok(response, "\r\n");
printf("Response: %s\n", status_line);
close(sockfd);
return 0;
}

13
healthcheck_Dockerfile Normal file
View File

@ -0,0 +1,13 @@
FROM alpine:latest
ENV SOURCE_DATE_EPOCH=1640995200
RUN apk add --no-cache gcc musl-dev
WORKDIR /app
COPY healthcheck.c /app
RUN gcc -o healthcheck healthcheck.c -static
CMD ["./healthcheck"]

34
pg_backup.sh Normal file
View File

@ -0,0 +1,34 @@
#!/bin/bash
set -e
BACKUP_FILE='/var/lib/postgresql/data/backup.sql'
function backup {
export PGPASSWORD=$(cat /run/secrets/db_password)
pg_dump -U ${POSTGRES_USER} ${POSTGRES_DB} > $BACKUP_FILE
}
function restore {
cd /var/lib/postgresql/data/
restore_config(){
# Restore allowed connections
cat pg_hba.conf.bak > pg_hba.conf
su postgres -c 'pg_ctl reload'
}
# Don't allow any other connections than local
cp pg_hba.conf pg_hba.conf.bak
echo "local all all trust" > pg_hba.conf
su postgres -c 'pg_ctl reload'
trap restore_config EXIT INT TERM
# Recreate Database
psql -U ${POSTGRES_USER} -d postgres -c "DROP DATABASE ${POSTGRES_DB} WITH (FORCE);"
createdb -U ${POSTGRES_USER} ${POSTGRES_DB}
psql -U ${POSTGRES_USER} -d ${POSTGRES_DB} -1 -f $BACKUP_FILE
trap - EXIT INT TERM
restore_config
}
$@

1
release/1.0.0+0.24.2 Normal file
View File

@ -0,0 +1 @@
API and frontend are merged. Undeploy and deploy for an upgrade. Do a backup before upgrading.