23 Commits

Author SHA1 Message Date
c9abef3bef chore: publish 0.0.1+2.1.0 release 2025-12-10 20:01:43 -05:00
20bfd2c625 Update README.md 2025-10-28 16:39:29 +00:00
9f8e8ae105 provide backupbot with proper path labels, note: does not work because of the lack of any shell in the container. 2025-10-27 15:25:30 -04:00
4d2cf81e2b Minor typos, compose.fullbackup.yml not compose-fullbackup.yml 2025-10-27 15:24:45 -04:00
b8693dfd07 update docs 2025-10-23 09:37:45 -04:00
e584bf7c06 rename BOOTSTRAP_IP env var to _HOST 2025-10-23 09:34:39 -04:00
00de463c1b documentation updates 2025-10-23 09:19:28 -04:00
3a729d5692 support node bootstrapping 2025-10-23 08:24:48 -04:00
66e621cb0f Update README.md 2025-10-23 06:28:50 +00:00
8471dacb88 Support metadata or full data backup + documentation 2025-10-23 00:11:52 -04:00
a4c1afcedf automatically generate a hex value for rpc secret 2025-10-22 23:12:01 -04:00
9ca66f0f1c define rpc_public_addr 2025-10-22 21:14:43 -04:00
72a07ac017 working RPC port 3901 2025-10-22 20:58:58 -04:00
8fedfe5ef7 Update garage.toml.tmpl 2025-10-22 22:55:22 +00:00
4bc064164e Update compose.yml 2025-10-22 22:53:41 +00:00
9ed696c9bd boop 2025-10-22 18:53:01 -04:00
e34f6d3bc3 add internal network 2025-10-22 18:51:23 -04:00
d0b4ae388b rpc troubleshooting 2025-10-22 18:23:33 -04:00
4d5a91a70d add tcp services label for port 9999 availability 2025-10-22 17:52:04 -04:00
ffacc3a214 test rpc service avail on 9999 2025-10-22 17:32:59 -04:00
b32bf01571 change rpc bind port 2025-10-22 17:24:12 -04:00
3bf58e964d update docs for alias command 2025-10-22 16:57:06 -04:00
564e5f01cc add backupbot support 2025-10-22 15:11:50 -04:00
6 changed files with 80 additions and 32 deletions

View File

@ -5,10 +5,10 @@ DOMAIN=garage.example.com
LETS_ENCRYPT_ENV=production
COMPOSE_FILE="compose.yml"
SECRET_RPC_SECRET_VERSION=v1
SECRET_RPC_SECRET_VERSION=v1 # length=32 charset=hex
# Changing the replication factor after initial deployment is not
# supported and requires deleting the existing cluster layout metadata.
# Changing the replication factor after initial deployment is not
# supported and requires deleting the existing cluster layout metadata.
REPLICATION_FACTOR=2
CONSISTENCY_MODE=consistent
@ -17,6 +17,14 @@ DATA_FSYNC=false
DISABLE_SCRUB=false
BLOCK_SIZE=1MiB # only increase if there is a fast network connection between nodes
## Bootstrap this node in an existing Garage cluster
#BOOTSTRAP_HOST=""
#BOOTSTRAP_ID=""
#BOOTSTRAP_PORT=3901
# Use a directory on the host instead of a docker volume for storage
#LOCAL_FOLDER_META=/path/on/docker/host
#LOCAL_FOLDER_DATA=/path/on/docker/host
#LOCAL_FOLDER_DATA=/path/on/docker/host
## Enable Full Data Backups (not just metadata)
# COMPOSE_FILE="$COMPOSE_FILE:compose.fullbackup.yml"

View File

@ -9,48 +9,63 @@
* **Image**: [`garage`](https://hub.docker.com/r/dxflrs/garage), 4, upstream
* **Healthcheck**: No
* **Backups**: No
* **Email**: No
* **Email**: N/A
* **Tests**: No
* **SSO**: No
* **SSO**: N/A
<!-- endmetadata -->
## Quick start
* `abra app new garage`
* Garage is particular about the rpc secret, generate it locally with `openssl rand -hex 32` then insert the result
* `abra app secret i <app-domain> rpc_secret v1 <rpc-secret>`
> Note: all nodes must share the same rpc secret, do not lose this value if you plan to cluster garage!
* `abra app config <app-domain>`
* `abra app deploy <app-domain>`
- `abra app new garage`
- If you are **creating a new cluster**:
- Generate a new rpc_secret: `abra app secret generate --all`
- Note: all nodes must share the same rpc secret, do not lose this value if you plan to cluster garage!
- Note: In older versions of abra you must generate the secret locally with `openssl rand -hex 32` then insert the result as described below
- `abra app config <app-domain>`
- If this Garage node is **joining an existing cluster**:
- Insert the existing rpc_secret: `abra app secret insert <app-domain> rpc_secret v1 <rpc-secret>`
- `abra app config <app-domain>`
- Uncomment the block that starts with `## Bootstrap this node`
- Set `BOOTSTRAP_HOST` and `BOOTSTRAP_ID`
- `abra app deploy <app-domain>`
## Peering
## Configuration
#### Garage CLI
Start by creating an alias for the abra run command
### Allow RPC Connections
* Your ingress controller must be set up to allow connections on port 3901. We assume you're using Traefik
* `abra app configure <traefik-app-name>`
* Uncomment the block that starts with `## Garage`
* Re-deploy Traefik: `abra app undeploy -n <traefik-app-name> && sleep 5 && abra app deploy -n <traefik-app-name>`
### Prepare the Garage Client
To interact with garage inside docker, it's best to create an alias for the following abra run command.
```
alias garage="abra app run <app-domain> app /garage"
alias garage="abra app run <app domain> -- app /garage"
```
Run `garage status` to verify everything is working
Run `garage status` to verify everything is working.
#### Assign Roles
You can optionally add this alias to your `.bashrc` (or similar) file to avoid having to define it repeatedly.
### Garage Quick Start Guide
Once `garage status` works, you can follow the guide here: https://garagehq.deuxfleurs.fr/documentation/quick-start/#checking-that-garage-runs-correctly
Terms:
* `node id` (reqired) - Node identifier supplied by the garage CLI, can be found by running `garage node id`.
* `zone` (reqired) - Identifier for how nodes will be grouped, a zone usually refers to a geographical location (us-east, paris-1, etc.) no specific syntax is required, zones can be called anything.
* `capacity` (reqired) - Disk space the node will be allocating to the cluster, use T and G for units (Terabytes and Gigabytes respectively).
* `tag` (optional) - Additional notes appended to garage status, usually a title for the node.
> Role assignment command conflicts with `abra app run`'s -t option\
> Connecting not currently implemented
## Backups
> Not currently implemented
> In development, not currently reliable
Backups will only capture a snapshot of the metadata directory, which includes bucket names, hashed secrets, and other related information. However, they do not include the actual data!
By default, backups will only capture a snapshot of the metadata directory, which includes bucket names, hashed secrets, and other related information.
By default, the actual data will not be backed up!
If you're running Garage in a cluster, when you restore the metadata, other nodes will provide any missing data.
### To enable full data backups
* `abra app config <app domain>`
* Uncomment the block that starts with `## Enable Full Data Backups`
* Re-deploy Garage: `abra app undeploy -n <app domain> && sleep 5 && abra app deploy -n <app domain>`
If you're running Garage in a cluster, when you restore the metadata, other nodes will send the new node any missing data.\
Finally, please note that Abra backups are not a substitute for a proper data replication strategy, and it's recommended to run Garage in a cluster if you need data redundancy.
For more, see [`garagehq.deuxfleurs.fr`](https://garagehq.deuxfleurs.fr/documentation/cookbook/real-world/).

9
compose.fullbackup.yml Normal file
View File

@ -0,0 +1,9 @@
---
version: "3.8"
services:
app:
deploy:
labels:
- "backupbot.backup=true"
- "backupbot.backup.path=/var/lib/garage/meta,/var/lib/garage/data"

View File

@ -3,7 +3,7 @@ version: "3.8"
services:
app:
image: dxflrs/garage:v1.0.0
image: dxflrs/garage:v2.1.0
configs:
- source: garage_conf
target: /etc/garage.toml
@ -11,6 +11,7 @@ services:
- rpc_secret
networks:
- proxy
- internal
deploy:
restart_policy:
condition: on-failure
@ -20,7 +21,13 @@ services:
- "traefik.http.routers.${STACK_NAME}.rule=Host(`${DOMAIN}`)"
- "traefik.http.routers.${STACK_NAME}.entrypoints=web-secure"
- "traefik.http.routers.${STACK_NAME}.tls.certresolver=${LETS_ENCRYPT_ENV}"
- "coop-cloud.${STACK_NAME}.version=0.0.1+1.0.0"
- "traefik.tcp.routers.${STACK_NAME}-rpc.rule=HostSNI(`*`)"
- "traefik.tcp.routers.${STACK_NAME}-rpc.entrypoints=garage-rpc"
- "traefik.tcp.services.${STACK_NAME}-rpc.loadbalancer.server.port=3901"
- "coop-cloud.${STACK_NAME}.version=0.0.1+2.1.0"
- "backupbot.backup=true"
- "backupbot.backup.pre-hook=/garage meta snapshot --all"
- "backupbot.backup.path=/var/lib/garage/meta/snapshots/,/var/lib/garage/meta/cluster_layout,/var/lib/garage/meta/data_layout,/var/lib/garage/meta/node_key,/var/lib/garage/meta/node_key.pub"
volumes:
- "${LOCAL_FOLDER_META:-meta}:/var/lib/garage/meta"
- "${LOCAL_FOLDER_DATA:-data}:/var/lib/garage/data"
@ -28,6 +35,7 @@ services:
networks:
proxy:
external: true
internal:
configs:
garage_conf:
@ -43,4 +51,4 @@ secrets:
volumes:
meta:
data:
conf:
conf:

View File

@ -13,9 +13,16 @@ block_size = '{{ env "BLOCK_SIZE" }}'
compression_level = 2
rpc_bind_addr = "[::]:3901"
rpc_public_addr = "{{ env "DOMAIN" }}:3901"
rpc_addr = "[::]:3901"
rpc_secret = "{{ secret "rpc_secret" }}"
{{ if ne (env "BOOTSTRAP_ID") "" }}
bootstrap_peers = [
"{{ env "BOOTSTRAP_ID" }}@{{ env "BOOTSTRAP_HOST" }}:{{ env "BOOTSTRAP_PORT" }}"
]
{{ end }}
[s3_api]
s3_region = "garage"
api_bind_addr = "[::]:3900"

1
release/0.0.1+2.1.0 Normal file
View File

@ -0,0 +1 @@
unstable release