Compare commits
402 Commits
0.8.0-rc2-
...
main
Author | SHA1 | Date | |
---|---|---|---|
3ac29d54d9 | |||
877c17fab5 | |||
f01fd26ce3 | |||
273c165a41 | |||
c88fc66c99 | |||
9b271a6963 | |||
8af87aa382 | |||
ac0b9cd052 | |||
4923984e84 | |||
2bc77de751 | |||
b3a2402cec | |||
a773fd4256 | |||
b1a0d54bd3 | |||
3869d6bce9 | |||
0ff07ab224 | |||
936c1b0626 | |||
b576cba227 | |||
d087f3debf | |||
e57a6d87a3 | |||
74b64099de | |||
354712ca46 | |||
81cdc843ec | |||
d2931e3af0 | |||
b9f2d1f568 | |||
a379b31a19 | |||
17e15dba77 | |||
1194f3b228 | |||
2dc8034c16 | |||
c5ddeb2d8a | |||
0a63f9ce27 | |||
3a71dc47f8 | |||
f07c64f7b8 | |||
dd03c40e10 | |||
48198d55bd | |||
c0931b96d8 | |||
64ea0f9684 | |||
b0cd8ccbb9 | |||
5975be6870 | |||
bfed51a69c | |||
5d0faf5e13 | |||
cd6af9708c | |||
ef95bce1e4 | |||
a159583874 | |||
e3b0500875 | |||
994310a4ff | |||
74108b0dd9 | |||
3727c7fa78 | |||
9a4414fd13 | |||
9f189680f3 | |||
356e527f1f | |||
7ec61c6d03 | |||
fab93a559a | |||
8ac31330be | |||
03000c25e0 | |||
3f32dbb1a3 | |||
27f68b1dc5 | |||
a0da5299fe | |||
866c5c4536 | |||
dc4c6784cb | |||
97959ef5da | |||
b6573720ec | |||
4e8995cc0e | |||
efb3fd8759 | |||
008582c3d9 | |||
8fa20e2c7f | |||
aa1dc795ef | |||
18df498295 | |||
671e1ca276 | |||
0df2b15c33 | |||
3f29084664 | |||
0bb25a00ec | |||
|
28c7676413 | ||
|
730fef09a3 | ||
|
8d076a308a | ||
|
9510c04aeb | ||
d9e60afd71 | |||
31fa9b1a7a | |||
|
f664599836 | ||
|
bba1640913 | ||
|
7b54c2b5b9 | ||
|
8ee1947fe9 | ||
b313b0a145 | |||
1f9b863be0 | |||
3b3ce85ef9 | |||
1f8662cd95 | |||
375e17a4a0 | |||
04aec8232f | |||
2a5985e44e | |||
c65be64e7d | |||
fd8652e26d | |||
518c5795f4 | |||
827edcb0da | |||
05489a129c | |||
c02e11eb0a | |||
8b8e158664 | |||
e5a6dea10c | |||
1132b09b5b | |||
b2436174b0 | |||
ea10019068 | |||
9b0b3c2e4c | |||
8084bff104 | |||
d22e2c38ce | |||
e945087f79 | |||
7734dd555d | |||
aedf5e5ff7 | |||
95c598d030 | |||
56068362e8 | |||
cf14731b46 | |||
486cfa68d8 | |||
1718903834 | |||
eb9894e5bb | |||
a2116774e8 | |||
d2efdf8bf5 | |||
b15c05929c | |||
f167a91868 | |||
8cded8752a | |||
d1876e2fae | |||
e42a1bca29 | |||
b5493ba059 | |||
a41a36b8fd | |||
de006782b6 | |||
f28cffe6d8 | |||
d3ede0f0f6 | |||
ae4653f5e3 | |||
|
7f0a74d3c3 | ||
|
e99114e695 | ||
|
b1208f9db5 | ||
b8e1a3b75f | |||
ff90b43929 | |||
c5724d56f8 | |||
ce7dda1eae | |||
d38f3ab7f5 | |||
4be8c8daed | |||
3c9405a4ed | |||
f6b7510da6 | |||
7596982282 | |||
4085eb6654 | |||
790dbca362 | |||
d7a870b887 | |||
1a3ec7a107 | |||
7f910b4e5b | |||
b82ac3bd63 | |||
00d60f7114 | |||
71d93cbbea | |||
2fb5493ab5 | |||
0ff8e49cfd | |||
addbda9145 | |||
c33ca1c6bc | |||
4580df72cb | |||
f003430a8d | |||
5426464092 | |||
72c021c727 | |||
f2e076b35f | |||
4ccb4198d6 | |||
a9f7579ca9 | |||
9cd1fe658b | |||
41c16db670 | |||
87ecc05962 | |||
f14d49cc64 | |||
f638b6a16b | |||
5617a9ba07 | |||
c1b03bcbd7 | |||
99da8d4e57 | |||
ca1db33e97 | |||
eb62e0ecc3 | |||
6f90fc3025 | |||
c861c09cce | |||
2f41b6d8b4 | |||
73e9b818b4 | |||
f268e5893b | |||
47013c63d6 | |||
4cf6155fb8 | |||
01f3f4be17 | |||
eee2ecda06 | |||
950f85e2b4 | |||
9ef64778f5 | |||
735f521bc0 | |||
96a25425a4 | |||
1a8dca9804 | |||
465827d5ee | |||
cde06f4f00 | |||
050a479df7 | |||
ef108d63e1 | |||
cf8ff410cc | |||
6ec678208f | |||
a001be3021 | |||
6712bd446f | |||
1097daa69f | |||
beaa233421 | |||
f871f9beee | |||
0f8f0f908f | |||
c5211fbd7e | |||
0076b31253 | |||
37aff723c0 | |||
f18c642226 | |||
ac695ae28e | |||
ac87898005 | |||
32ae2499b6 | |||
1136ec5dcd | |||
6a2db1abaa | |||
9554ad40c8 | |||
2014cd6622 | |||
a9ce2106c6 | |||
34de38928a | |||
f58522d822 | |||
712ebfb701 | |||
1fe601cd16 | |||
7b7e1bfa97 | |||
1a12bef53e | |||
d787f71215 | |||
9bf44c15ed | |||
349cacc1f2 | |||
938534f5ac | |||
6cd331ebd6 | |||
40517171f7 | |||
b2485cc122 | |||
9ec99c7712 | |||
aa3910f8df | |||
43990b6fae | |||
91ea2c01a5 | |||
316fdd3643 | |||
e07ae8cccd | |||
300a4ead01 | |||
f209b6f564 | |||
791183adfe | |||
e6b35e8524 | |||
8a0274cac0 | |||
e609924af0 | |||
70e2943301 | |||
0590c1824d | |||
459abecfa5 | |||
183ad8f576 | |||
03f94da2d8 | |||
|
766f69b0fd | ||
004cd70aed | |||
a4de446f58 | |||
d21c35965d | |||
63ea58ffaa | |||
2ecace3e90 | |||
d5ac3958a4 | |||
72c20e0039 | |||
575f9905f1 | |||
e3a0af5840 | |||
9a3a39a185 | |||
cea56dddde | |||
2c515ce70a | |||
40c0fb4bac | |||
0643df6d73 | |||
e9b99fe921 | |||
4920dfedb3 | |||
0a3624c15b | |||
c5687dfbd7 | |||
ca91abbed9 | |||
d4727db8f9 | |||
af8cd1f67a | |||
cdd7516e54 | |||
|
99e3ed416f | ||
02b726db02 | |||
2de6934322 | |||
cb49cf06d1 | |||
9affda8a70 | |||
3957b7c965 | |||
0d83339d80 | |||
6e54ec7213 | |||
66b40a9189 | |||
049f02f063 | |||
15857e6453 | |||
31e0ed75b0 | |||
b1d3fcbb0b | |||
7b6134f35e | |||
316b59b465 | |||
92b073d5b6 | |||
9b0dd933b5 | |||
f255fa1555 | |||
74200318ab | |||
609656b4e1 | |||
856c9f2f7d | |||
bd5cdd3443 | |||
79d274e074 | |||
51e3df17f1 | |||
ccf0215495 | |||
254df7f2be | |||
6a673ef101 | |||
7f7f7224c6 | |||
f96bf9a8ac | |||
dcecf32999 | |||
bc88dac150 | |||
704c0e9c74 | |||
c9bb7e15c2 | |||
d90c9b88f1 | |||
69ce07f81f | |||
85b90ef80c | |||
3e511446aa | |||
7566b4262b | |||
c249c6ae9c | |||
be693e9df0 | |||
a43125701c | |||
b57edb440a | |||
6fc4573a71 | |||
cbe6676881 | |||
b4fd39828f | |||
14f2d72aba | |||
57692ec3c9 | |||
47d3b77003 | |||
8078e91e52 | |||
dc5d3a8dd6 | |||
ab6107610c | |||
e837835e00 | |||
c646263e9e | |||
422c642949 | |||
379915587c | |||
970ae0fc4e | |||
d11ad61efb | |||
54dc696c69 | |||
7e3ce9c42a | |||
7751423c7d | |||
f18f0b6f82 | |||
892f6c0730 | |||
b53fd2689c | |||
906bf65d47 | |||
1e6a6e6174 | |||
1e4f1b4ade | |||
306fe02d1c | |||
e4610f8ad5 | |||
e1f900de14 | |||
d5b18d74ef | |||
776a83d8d1 | |||
810cea8269 | |||
c0f3e6f2a4 | |||
7b240059b0 | |||
c456d13881 | |||
c7c553164d | |||
7616528f4e | |||
6cd85f7239 | |||
b1774cc44b | |||
e438fc6e8e | |||
c065ceb1f0 | |||
ce4b775428 | |||
d02f659bf8 | |||
f3ded88ed8 | |||
bf648eeb5d | |||
533edbf172 | |||
78b8cf9725 | |||
f0560ca975 | |||
ce7b4733d7 | |||
575bfbb0fb | |||
510ce66570 | |||
82631d9ab1 | |||
358490e939 | |||
79b9cc9be7 | |||
9b6eb613aa | |||
8f1231e409 | |||
aa37c936eb | |||
3d1158a425 | |||
8788558cf1 | |||
76035e003e | |||
b708382d26 | |||
557b670fc5 | |||
e116148c49 | |||
d5593b69e0 | |||
0be532692d | |||
7a9224b2b2 | |||
e73d1a8359 | |||
f8c49c82c8 | |||
ab7edd2a62 | |||
b1888dcf0f | |||
e5e122296f | |||
83bf148304 | |||
d80b882b83 | |||
c345c6f5f1 | |||
f8c4fd72a3 | |||
10f612f998 | |||
58e78e4d7c | |||
25258d3d64 | |||
b3bd058962 | |||
b4fd7fd77c | |||
64cfdae6b7 | |||
0a765794f2 | |||
18dc6e9434 | |||
4ba4107288 | |||
d9b4f4ef3b | |||
c365dcf96d | |||
0c6a7cc0b8 | |||
6640cfab64 | |||
71addcd1b2 | |||
60c0e55e3d | |||
e42139fd83 | |||
2d826e47d0 | |||
2db172ea5a | |||
2077658f6a | |||
502e26b534 | |||
e22b692ada | |||
5ae73f700e | |||
63d419caae | |||
179b66d65c | |||
c9144d90f3 | |||
ebf5d82c56 | |||
8bb98ed0ed | |||
23f5745cb8 | |||
2cd453ae8d | |||
e42cc0f91d | |||
1de45a6508 |
@ -1,4 +1,8 @@
|
||||
Dockerfile
|
||||
.dockerignore
|
||||
*.swp
|
||||
*.swo
|
||||
*.swp
|
||||
.dockerignore
|
||||
Dockerfile
|
||||
abra
|
||||
dist
|
||||
kadabra
|
||||
tags
|
||||
|
54
.drone.yml
54
.drone.yml
@ -3,20 +3,17 @@ kind: pipeline
|
||||
name: coopcloud.tech/abra
|
||||
steps:
|
||||
- name: make check
|
||||
image: golang:1.20
|
||||
image: golang:1.22
|
||||
commands:
|
||||
- make check
|
||||
|
||||
- name: make build
|
||||
image: golang:1.20
|
||||
commands:
|
||||
- make build
|
||||
depends_on:
|
||||
- make check
|
||||
|
||||
- name: make test
|
||||
image: golang:1.20
|
||||
image: golang:1.22
|
||||
environment:
|
||||
CATL_URL: https://git.coopcloud.tech/toolshed/recipes-catalogue-json.git
|
||||
commands:
|
||||
- mkdir -p $HOME/.abra
|
||||
- git clone $CATL_URL $HOME/.abra/catalogue
|
||||
- make test
|
||||
depends_on:
|
||||
- make check
|
||||
@ -27,13 +24,12 @@ steps:
|
||||
- git fetch --tags
|
||||
depends_on:
|
||||
- make check
|
||||
- make build
|
||||
- make test
|
||||
when:
|
||||
event: tag
|
||||
|
||||
- name: release
|
||||
image: goreleaser/goreleaser:v1.18.2
|
||||
image: goreleaser/goreleaser:v2.5.1
|
||||
environment:
|
||||
GITEA_TOKEN:
|
||||
from_secret: goreleaser_gitea_token
|
||||
@ -51,18 +47,42 @@ steps:
|
||||
image: plugins/docker
|
||||
settings:
|
||||
auto_tag: true
|
||||
username: 3wordchant
|
||||
username: abra-bot
|
||||
password:
|
||||
from_secret: git_coopcloud_tech_token_3wc
|
||||
repo: git.coopcloud.tech/coop-cloud/abra
|
||||
from_secret: git_coopcloud_tech_token_abra_bot
|
||||
repo: git.coopcloud.tech/toolshed/abra
|
||||
tags: dev
|
||||
registry: git.coopcloud.tech
|
||||
when:
|
||||
event:
|
||||
exclude:
|
||||
- pull_request
|
||||
branch:
|
||||
- main
|
||||
depends_on:
|
||||
- make check
|
||||
- make test
|
||||
|
||||
- name: integration test
|
||||
image: appleboy/drone-ssh
|
||||
settings:
|
||||
host:
|
||||
- int.coopcloud.tech
|
||||
username: abra
|
||||
key:
|
||||
from_secret: abra_int_private_key
|
||||
port: 22
|
||||
command_timeout: 60m
|
||||
script_stop: true
|
||||
request_pty: true
|
||||
script:
|
||||
- |
|
||||
wget https://git.coopcloud.tech/toolshed/abra/raw/branch/main/scripts/tests/run-ci-int -O run-ci-int
|
||||
chmod +x run-ci-int
|
||||
sh run-ci-int
|
||||
when:
|
||||
event:
|
||||
- cron:
|
||||
cron:
|
||||
# @daily https://docs.drone.io/cron/
|
||||
- integration
|
||||
|
||||
volumes:
|
||||
- name: deps
|
||||
|
@ -1,4 +0,0 @@
|
||||
GANDI_TOKEN=...
|
||||
HCLOUD_TOKEN=...
|
||||
REGISTRY_PASSWORD=...
|
||||
REGISTRY_USERNAME=...
|
@ -1,6 +1,7 @@
|
||||
go env -w GOPRIVATE=coopcloud.tech
|
||||
# integration test suite
|
||||
# export ABRA_DIR="$HOME/.abra_test"
|
||||
# export ABRA_TEST_DOMAIN=test.example.com
|
||||
# export ABRA_CI=1
|
||||
|
||||
# export PASSWORD_STORE_DIR=$(pwd)/../../autonomic/passwords/passwords/
|
||||
# export HCLOUD_TOKEN=$(pass show logins/hetzner/cicd/api_key)
|
||||
# export CAPSUL_TOKEN=...
|
||||
# export GITEA_TOKEN=...
|
||||
# release automation
|
||||
# export GITEA_TOKEN=
|
||||
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
name: "Do not use this issue tracker"
|
||||
about: "Do not use this issue tracker"
|
||||
title: "Do not use this issue tracker"
|
||||
labels: []
|
||||
---
|
||||
|
||||
Please report your issue on [`coop-cloud/organising`](https://git.coopcloud.tech/coop-cloud/organising)
|
5
.gitignore
vendored
5
.gitignore
vendored
@ -2,8 +2,7 @@
|
||||
.e2e.env
|
||||
.envrc
|
||||
.vscode/
|
||||
/abra
|
||||
/kadabra
|
||||
abra
|
||||
dist/
|
||||
tests/integration/.abra/catalogue
|
||||
vendor/
|
||||
tests/integration/.bats
|
||||
|
@ -29,6 +29,8 @@ builds:
|
||||
ldflags:
|
||||
- "-X 'main.Commit={{ .Commit }}'"
|
||||
- "-X 'main.Version={{ .Version }}'"
|
||||
- "-s"
|
||||
- "-w"
|
||||
|
||||
- id: kadabra
|
||||
binary: kadabra
|
||||
@ -47,15 +49,13 @@ builds:
|
||||
- 5
|
||||
- 6
|
||||
- 7
|
||||
gcflags:
|
||||
- "all=-l -B"
|
||||
ldflags:
|
||||
- "-X 'main.Commit={{ .Commit }}'"
|
||||
- "-X 'main.Version={{ .Version }}'"
|
||||
|
||||
archives:
|
||||
- replacements:
|
||||
386: i386
|
||||
amd64: x86_64
|
||||
format: binary
|
||||
- "-s"
|
||||
- "-w"
|
||||
|
||||
checksum:
|
||||
name_template: "checksums.txt"
|
||||
|
@ -1,17 +1,22 @@
|
||||
# authors
|
||||
|
||||
> If you're looking at this and you hack on `abra` and you're not listed here,
|
||||
> please do add yourself! This is a community project, let's show some :heart:
|
||||
> please do add yourself! This is a community project, let's show some 💞
|
||||
|
||||
- 3wordchant
|
||||
- ammaratef45
|
||||
- cassowary
|
||||
- codegod100
|
||||
- decentral1se
|
||||
- fauno
|
||||
- frando
|
||||
- kawaiipunk
|
||||
- knoflook
|
||||
- moritz
|
||||
- p4u1
|
||||
- rix
|
||||
- roxxers
|
||||
- vera
|
||||
- yksflip
|
||||
- basebuilder
|
||||
- mayel
|
||||
|
21
Dockerfile
21
Dockerfile
@ -1,8 +1,13 @@
|
||||
FROM golang:1.20-alpine AS build
|
||||
# Build image
|
||||
FROM golang:1.22-alpine AS build
|
||||
|
||||
ENV GOPRIVATE coopcloud.tech
|
||||
ENV GOPRIVATE=coopcloud.tech
|
||||
|
||||
RUN apk add --no-cache make git gcc musl-dev
|
||||
RUN apk add --no-cache \
|
||||
gcc \
|
||||
git \
|
||||
make \
|
||||
musl-dev
|
||||
|
||||
COPY . /app
|
||||
|
||||
@ -10,7 +15,15 @@ WORKDIR /app
|
||||
|
||||
RUN CGO_ENABLED=0 make build
|
||||
|
||||
FROM scratch
|
||||
# Release image ("slim")
|
||||
FROM alpine:3.19.1
|
||||
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
git \
|
||||
openssh
|
||||
|
||||
RUN update-ca-certificates
|
||||
|
||||
COPY --from=build /app/abra /abra
|
||||
|
||||
|
50
Makefile
50
Makefile
@ -2,35 +2,53 @@ ABRA := ./cmd/abra
|
||||
KADABRA := ./cmd/kadabra
|
||||
COMMIT := $(shell git rev-list -1 HEAD)
|
||||
GOPATH := $(shell go env GOPATH)
|
||||
GOVERSION := 1.22
|
||||
LDFLAGS := "-X 'main.Commit=$(COMMIT)'"
|
||||
DIST_LDFLAGS := $(LDFLAGS)" -s -w"
|
||||
GCFLAGS := "all=-l -B"
|
||||
|
||||
export GOPRIVATE=coopcloud.tech
|
||||
|
||||
all: format check build test
|
||||
# NOTE(d1): default `make` optimised for Abra hacking
|
||||
all: format check build-abra test
|
||||
|
||||
run:
|
||||
@go run -ldflags=$(LDFLAGS) $(ABRA)
|
||||
run-abra:
|
||||
@go run -gcflags=$(GCFLAGS) -ldflags=$(LDFLAGS) $(ABRA)
|
||||
|
||||
install:
|
||||
@go install -ldflags=$(LDFLAGS) $(ABRA)
|
||||
run-kadabra:
|
||||
@go run -gcflags=$(GCFLAGS) -ldflags=$(LDFLAGS) $(KADABRA)
|
||||
|
||||
build-dev:
|
||||
@go build -v -ldflags=$(LDFLAGS) $(ABRA)
|
||||
install-abra:
|
||||
@go install -gcflags=$(GCFLAGS) -ldflags=$(LDFLAGS) $(ABRA)
|
||||
|
||||
build:
|
||||
@go build -v -ldflags=$(DIST_LDFLAGS) $(ABRA)
|
||||
@go build -v -ldflags=$(DIST_LDFLAGS) $(KADABRA)
|
||||
install-kadabra:
|
||||
@go install -gcflags=$(GCFLAGS) -ldflags=$(LDFLAGS) $(KADABRA)
|
||||
|
||||
install: install-abra install-kadabra
|
||||
|
||||
build-abra:
|
||||
@go build -v -gcflags=$(GCFLAGS) -ldflags=$(DIST_LDFLAGS) $(ABRA)
|
||||
|
||||
build-kadabra:
|
||||
@go build -v -gcflags=$(GCFLAGS) -ldflags=$(DIST_LDFLAGS) $(KADABRA)
|
||||
|
||||
build: build-abra build-kadabra
|
||||
|
||||
build-docker-abra:
|
||||
@docker run -it -v $(PWD):/abra golang:$(GOVERSION) \
|
||||
bash -c 'cd /abra; ./scripts/docker/build.sh'
|
||||
|
||||
build-docker: build-docker-abra
|
||||
|
||||
clean:
|
||||
@rm '$(GOPATH)/bin/abra'
|
||||
@rm '$(GOPATH)/bin/kadabra'
|
||||
|
||||
format:
|
||||
@gofmt -s -w .
|
||||
@gofmt -s -w $$(find . -type f -name '*.go' | grep -v "/vendor/")
|
||||
|
||||
check:
|
||||
@test -z $$(gofmt -l .) || \
|
||||
@test -z $$(gofmt -l $$(find . -type f -name '*.go' | grep -v "/vendor/")) || \
|
||||
(echo "gofmt: formatting issue - run 'make format' to resolve" && exit 1)
|
||||
|
||||
test:
|
||||
@ -39,9 +57,5 @@ test:
|
||||
loc:
|
||||
@find . -name "*.go" | xargs wc -l
|
||||
|
||||
loc-author:
|
||||
@git ls-files -z | \
|
||||
xargs -0rn 1 -P "$$(nproc)" -I{} sh -c 'git blame -w -M -C -C --line-porcelain -- {} | grep -I --line-buffered "^author "' | \
|
||||
sort -f | \
|
||||
uniq -ic | \
|
||||
sort -n
|
||||
deps:
|
||||
@go get -t -u ./...
|
||||
|
@ -1,13 +1,13 @@
|
||||
# `abra`
|
||||
|
||||
[![Build Status](https://build.coopcloud.tech/api/badges/coop-cloud/abra/status.svg?ref=refs/heads/main)](https://build.coopcloud.tech/coop-cloud/abra)
|
||||
[![Go Report Card](https://goreportcard.com/badge/git.coopcloud.tech/coop-cloud/abra)](https://goreportcard.com/report/git.coopcloud.tech/coop-cloud/abra)
|
||||
[![Build Status](https://build.coopcloud.tech/api/badges/toolshed/abra/status.svg?ref=refs/heads/main)](https://build.coopcloud.tech/toolshed/abra)
|
||||
[![Go Report Card](https://goreportcard.com/badge/git.coopcloud.tech/toolshed/abra)](https://goreportcard.com/report/git.coopcloud.tech/toolshed/abra)
|
||||
[![Go Reference](https://pkg.go.dev/badge/coopcloud.tech/abra.svg)](https://pkg.go.dev/coopcloud.tech/abra)
|
||||
|
||||
The Co-op Cloud utility belt 🎩🐇
|
||||
|
||||
<a href="https://github.com/egonelbre/gophers"><img align="right" width="150" src="https://github.com/egonelbre/gophers/raw/master/.thumb/sketch/adventure/poking-fire.png"/></a>
|
||||
|
||||
`abra` is the flagship client & command-line tool for Co-op Cloud. It has been developed specifically for the purpose of making the day-to-day operations of [operators](https://docs.coopcloud.tech/operators/) and [maintainers](https://docs.coopcloud.tech/maintainers/) pleasant & convenient. It is libre software, written in [Go](https://go.dev) and maintained and extended by the community :heart:
|
||||
`abra` is the flagship client & command-line tool for Co-op Cloud. It has been developed specifically for the purpose of making the day-to-day operations of [operators](https://docs.coopcloud.tech/operators/) and [maintainers](https://docs.coopcloud.tech/maintainers/) pleasant & convenient. It is libre software, written in [Go](https://go.dev) and maintained and extended by the community 💖
|
||||
|
||||
Please see [docs.coopcloud.tech/abra](https://docs.coopcloud.tech/abra) for help on install, upgrade, hacking, troubleshooting & more!
|
||||
|
@ -1,37 +1,11 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var AppCommand = cli.Command{
|
||||
Name: "app",
|
||||
Aliases: []string{"a"},
|
||||
Usage: "Manage apps",
|
||||
ArgsUsage: "<domain>",
|
||||
Description: "Functionality for managing the life cycle of your apps",
|
||||
Subcommands: []cli.Command{
|
||||
appBackupCommand,
|
||||
appCheckCommand,
|
||||
appCmdCommand,
|
||||
appConfigCommand,
|
||||
appCpCommand,
|
||||
appDeployCommand,
|
||||
appErrorsCommand,
|
||||
appListCommand,
|
||||
appLogsCommand,
|
||||
appNewCommand,
|
||||
appPsCommand,
|
||||
appRemoveCommand,
|
||||
appRestartCommand,
|
||||
appRestoreCommand,
|
||||
appRollbackCommand,
|
||||
appRunCommand,
|
||||
appSecretCommand,
|
||||
appServicesCommand,
|
||||
appUndeployCommand,
|
||||
appUpgradeCommand,
|
||||
appVersionCommand,
|
||||
appVolumeCommand,
|
||||
},
|
||||
var AppCommand = &cobra.Command{
|
||||
Use: "app [cmd] [args] [flags]",
|
||||
Aliases: []string{"a"},
|
||||
Short: "Manage apps",
|
||||
}
|
||||
|
@ -1,399 +1,307 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
containerPkg "coopcloud.tech/abra/pkg/container"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/upstream/container"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/klauspost/pgzip"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type backupConfig struct {
|
||||
preHookCmd string
|
||||
postHookCmd string
|
||||
backupPaths []string
|
||||
}
|
||||
|
||||
var appBackupCommand = cli.Command{
|
||||
Name: "backup",
|
||||
Aliases: []string{"bk"},
|
||||
Usage: "Run app backup",
|
||||
ArgsUsage: "<domain> [<service>]",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.OfflineFlag,
|
||||
var AppBackupListCommand = &cobra.Command{
|
||||
Use: "list <domain> [flags]",
|
||||
Aliases: []string{"ls"},
|
||||
Short: "List the contents of a snapshot",
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.AppNameComplete()
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
Description: `
|
||||
Run an app backup.
|
||||
|
||||
A backup command and pre/post hook commands are defined in the recipe
|
||||
configuration. Abra reads this configuration and run the comands in the context
|
||||
of the deployed services. Pass <service> if you only want to back up a single
|
||||
service. All backups are placed in the ~/.abra/backups directory.
|
||||
|
||||
A single backup file is produced for all backup paths specified for a service.
|
||||
If we have the following backup configuration:
|
||||
|
||||
- "backupbot.backup.path=/var/lib/foo,/var/lib/bar"
|
||||
|
||||
And we run "abra app backup example.com app", Abra will produce a file that
|
||||
looks like:
|
||||
|
||||
~/.abra/backups/example_com_app_609341138.tar.gz
|
||||
|
||||
This file is a compressed archive which contains all backup paths. To see paths, run:
|
||||
|
||||
tar -tf ~/.abra/backups/example_com_app_609341138.tar.gz
|
||||
|
||||
(Make sure to change the name of the backup file)
|
||||
|
||||
This single file can be used to restore your app. See "abra app restore" for more.
|
||||
`,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
recipe, err := recipe.Get(app.Recipe, conf)
|
||||
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
backupConfigs := make(map[string]backupConfig)
|
||||
for _, service := range recipe.Config.Services {
|
||||
if backupsEnabled, ok := service.Deploy.Labels["backupbot.backup"]; ok {
|
||||
if backupsEnabled == "true" {
|
||||
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), service.Name)
|
||||
bkConfig := backupConfig{}
|
||||
|
||||
logrus.Debugf("backup config detected for %s", fullServiceName)
|
||||
|
||||
if paths, ok := service.Deploy.Labels["backupbot.backup.path"]; ok {
|
||||
logrus.Debugf("detected backup paths for %s: %s", fullServiceName, paths)
|
||||
bkConfig.backupPaths = strings.Split(paths, ",")
|
||||
}
|
||||
|
||||
if preHookCmd, ok := service.Deploy.Labels["backupbot.backup.pre-hook"]; ok {
|
||||
logrus.Debugf("detected pre-hook command for %s: %s", fullServiceName, preHookCmd)
|
||||
bkConfig.preHookCmd = preHookCmd
|
||||
}
|
||||
|
||||
if postHookCmd, ok := service.Deploy.Labels["backupbot.backup.post-hook"]; ok {
|
||||
logrus.Debugf("detected post-hook command for %s: %s", fullServiceName, postHookCmd)
|
||||
bkConfig.postHookCmd = postHookCmd
|
||||
}
|
||||
|
||||
backupConfigs[service.Name] = bkConfig
|
||||
}
|
||||
}
|
||||
execEnv := []string{
|
||||
fmt.Sprintf("SERVICE=%s", app.Domain),
|
||||
"MACHINE_LOGS=true",
|
||||
}
|
||||
|
||||
serviceName := c.Args().Get(1)
|
||||
if serviceName != "" {
|
||||
backupConfig, ok := backupConfigs[serviceName]
|
||||
if !ok {
|
||||
logrus.Fatalf("no backup config for %s? does %s exist?", serviceName, serviceName)
|
||||
}
|
||||
|
||||
logrus.Infof("running backup for the %s service", serviceName)
|
||||
|
||||
if err := runBackup(cl, app, serviceName, backupConfig); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
if len(backupConfigs) == 0 {
|
||||
logrus.Fatalf("no backup configs discovered for %s?", app.Name)
|
||||
}
|
||||
|
||||
for serviceName, backupConfig := range backupConfigs {
|
||||
logrus.Infof("running backup for the %s service", serviceName)
|
||||
|
||||
if err := runBackup(cl, app, serviceName, backupConfig); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
if snapshot != "" {
|
||||
log.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
|
||||
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
|
||||
}
|
||||
|
||||
return nil
|
||||
if showAllPaths {
|
||||
log.Debugf("including SHOW_ALL=%v in backupbot exec invocation", showAllPaths)
|
||||
execEnv = append(execEnv, fmt.Sprintf("SHOW_ALL=%v", showAllPaths))
|
||||
}
|
||||
|
||||
if timestamps {
|
||||
log.Debugf("including TIMESTAMPS=%v in backupbot exec invocation", timestamps)
|
||||
execEnv = append(execEnv, fmt.Sprintf("TIMESTAMPS=%v", timestamps))
|
||||
}
|
||||
|
||||
if _, err = internal.RunBackupCmdRemote(cl, "ls", targetContainer.ID, execEnv); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// TimeStamp generates a file name friendly timestamp.
|
||||
func TimeStamp() string {
|
||||
ts := time.Now().UTC().Format(time.RFC3339)
|
||||
return strings.Replace(ts, ":", "-", -1)
|
||||
}
|
||||
var AppBackupDownloadCommand = &cobra.Command{
|
||||
Use: "download <domain> [flags]",
|
||||
Aliases: []string{"d"},
|
||||
Short: "Download a snapshot",
|
||||
Long: `Downloads a backup.tar.gz to the current working directory.
|
||||
|
||||
// runBackup does the actual backup logic.
|
||||
func runBackup(cl *dockerClient.Client, app config.App, serviceName string, bkConfig backupConfig) error {
|
||||
if len(bkConfig.backupPaths) == 0 {
|
||||
return fmt.Errorf("backup paths are empty for %s?", serviceName)
|
||||
}
|
||||
"--volumes/-v" includes data contained in volumes alongide paths specified in
|
||||
"backupbot.backup.path" labels.`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.AppNameComplete()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
// FIXME: avoid instantiating a new CLI
|
||||
dcli, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), serviceName))
|
||||
|
||||
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), serviceName)
|
||||
if bkConfig.preHookCmd != "" {
|
||||
splitCmd := internal.SafeSplit(bkConfig.preHookCmd)
|
||||
|
||||
logrus.Debugf("split pre-hook command for %s into %s", fullServiceName, splitCmd)
|
||||
|
||||
preHookExecOpts := types.ExecConfig{
|
||||
AttachStderr: true,
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
Cmd: splitCmd,
|
||||
Detach: false,
|
||||
Tty: true,
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := container.RunExec(dcli, cl, targetContainer.ID, &preHookExecOpts); err != nil {
|
||||
return fmt.Errorf("failed to run %s on %s: %s", bkConfig.preHookCmd, targetContainer.ID, err.Error())
|
||||
}
|
||||
|
||||
logrus.Infof("succesfully ran %s pre-hook command: %s", fullServiceName, bkConfig.preHookCmd)
|
||||
}
|
||||
|
||||
var tempBackupPaths []string
|
||||
for _, remoteBackupPath := range bkConfig.backupPaths {
|
||||
sanitisedPath := strings.ReplaceAll(remoteBackupPath, "/", "_")
|
||||
localBackupPath := filepath.Join(config.BACKUP_DIR, fmt.Sprintf("%s%s_%s.tar.gz", fullServiceName, sanitisedPath, TimeStamp()))
|
||||
logrus.Debugf("temporarily backing up %s:%s to %s", fullServiceName, remoteBackupPath, localBackupPath)
|
||||
|
||||
logrus.Infof("backing up %s:%s", fullServiceName, remoteBackupPath)
|
||||
|
||||
content, _, err := cl.CopyFromContainer(context.Background(), targetContainer.ID, remoteBackupPath)
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to copy %s from container: %s", remoteBackupPath, err.Error())
|
||||
if err := cleanupTempArchives(tempBackupPaths); err != nil {
|
||||
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
|
||||
}
|
||||
return fmt.Errorf("failed to copy %s from container: %s", remoteBackupPath, err.Error())
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
_, srcBase := archive.SplitPathDirEntry(remoteBackupPath)
|
||||
preArchive := archive.RebaseArchiveEntries(content, srcBase, remoteBackupPath)
|
||||
if err := copyToFile(localBackupPath, preArchive); err != nil {
|
||||
logrus.Debugf("failed to create tar archive (%s): %s", localBackupPath, err.Error())
|
||||
if err := cleanupTempArchives(tempBackupPaths); err != nil {
|
||||
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
|
||||
}
|
||||
return fmt.Errorf("failed to create tar archive (%s): %s", localBackupPath, err.Error())
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
tempBackupPaths = append(tempBackupPaths, localBackupPath)
|
||||
}
|
||||
|
||||
logrus.Infof("compressing and merging archives...")
|
||||
|
||||
if err := mergeArchives(tempBackupPaths, fullServiceName); err != nil {
|
||||
logrus.Debugf("failed to merge archive files: %s", err.Error())
|
||||
if err := cleanupTempArchives(tempBackupPaths); err != nil {
|
||||
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
|
||||
}
|
||||
return fmt.Errorf("failed to merge archive files: %s", err.Error())
|
||||
}
|
||||
|
||||
if err := cleanupTempArchives(tempBackupPaths); err != nil {
|
||||
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
|
||||
}
|
||||
|
||||
if bkConfig.postHookCmd != "" {
|
||||
splitCmd := internal.SafeSplit(bkConfig.postHookCmd)
|
||||
|
||||
logrus.Debugf("split post-hook command for %s into %s", fullServiceName, splitCmd)
|
||||
|
||||
postHookExecOpts := types.ExecConfig{
|
||||
AttachStderr: true,
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
Cmd: splitCmd,
|
||||
Detach: false,
|
||||
Tty: true,
|
||||
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := container.RunExec(dcli, cl, targetContainer.ID, &postHookExecOpts); err != nil {
|
||||
return err
|
||||
execEnv := []string{
|
||||
fmt.Sprintf("SERVICE=%s", app.Domain),
|
||||
"MACHINE_LOGS=true",
|
||||
}
|
||||
|
||||
logrus.Infof("succesfully ran %s post-hook command: %s", fullServiceName, bkConfig.postHookCmd)
|
||||
}
|
||||
if snapshot != "" {
|
||||
log.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
|
||||
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
|
||||
}
|
||||
|
||||
return nil
|
||||
if includePath != "" {
|
||||
log.Debugf("including INCLUDE_PATH=%s in backupbot exec invocation", includePath)
|
||||
execEnv = append(execEnv, fmt.Sprintf("INCLUDE_PATH=%s", includePath))
|
||||
}
|
||||
|
||||
if includeSecrets {
|
||||
log.Debugf("including SECRETS=%v in backupbot exec invocation", includeSecrets)
|
||||
execEnv = append(execEnv, fmt.Sprintf("SECRETS=%v", includeSecrets))
|
||||
}
|
||||
|
||||
if includeVolumes {
|
||||
log.Debugf("including VOLUMES=%v in backupbot exec invocation", includeVolumes)
|
||||
execEnv = append(execEnv, fmt.Sprintf("VOLUMES=%v", includeVolumes))
|
||||
}
|
||||
|
||||
if _, err := internal.RunBackupCmdRemote(cl, "download", targetContainer.ID, execEnv); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
remoteBackupDir := "/tmp/backup.tar.gz"
|
||||
currentWorkingDir := "."
|
||||
if err = CopyFromContainer(cl, targetContainer.ID, remoteBackupDir, currentWorkingDir); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func copyToFile(outfile string, r io.Reader) error {
|
||||
tmpFile, err := os.CreateTemp(filepath.Dir(outfile), ".tar_temp")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var AppBackupCreateCommand = &cobra.Command{
|
||||
Use: "create <domain> [flags]",
|
||||
Aliases: []string{"c"},
|
||||
Short: "Create a new snapshot",
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.AppNameComplete()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
tmpPath := tmpFile.Name()
|
||||
|
||||
_, err = io.Copy(tmpFile, r)
|
||||
tmpFile.Close()
|
||||
|
||||
if err != nil {
|
||||
os.Remove(tmpPath)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = os.Rename(tmpPath, outfile); err != nil {
|
||||
os.Remove(tmpPath)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanupTempArchives(tarPaths []string) error {
|
||||
for _, tarPath := range tarPaths {
|
||||
if err := os.RemoveAll(tarPath); err != nil {
|
||||
return err
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Debugf("remove temporary archive file %s", tarPath)
|
||||
}
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
execEnv := []string{
|
||||
fmt.Sprintf("SERVICE=%s", app.Domain),
|
||||
"MACHINE_LOGS=true",
|
||||
}
|
||||
|
||||
if retries != "" {
|
||||
log.Debugf("including RETRIES=%s in backupbot exec invocation", retries)
|
||||
execEnv = append(execEnv, fmt.Sprintf("RETRIES=%s", retries))
|
||||
}
|
||||
|
||||
if _, err := internal.RunBackupCmdRemote(cl, "create", targetContainer.ID, execEnv); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func mergeArchives(tarPaths []string, serviceName string) error {
|
||||
var out io.Writer
|
||||
var cout *pgzip.Writer
|
||||
var AppBackupSnapshotsCommand = &cobra.Command{
|
||||
Use: "snapshots <domain> [flags]",
|
||||
Aliases: []string{"s"},
|
||||
Short: "List all snapshots",
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.AppNameComplete()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
localBackupPath := filepath.Join(config.BACKUP_DIR, fmt.Sprintf("%s_%s.tar.gz", serviceName, TimeStamp()))
|
||||
|
||||
fout, err := os.Create(localBackupPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to open %s: %s", localBackupPath, err)
|
||||
}
|
||||
|
||||
defer fout.Close()
|
||||
out = fout
|
||||
|
||||
cout = pgzip.NewWriter(out)
|
||||
out = cout
|
||||
|
||||
tw := tar.NewWriter(out)
|
||||
|
||||
for _, tarPath := range tarPaths {
|
||||
if err := addTar(tw, tarPath); err != nil {
|
||||
return fmt.Errorf("failed to merge %s: %v", tarPath, err)
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tw.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close tar writer %v", err)
|
||||
}
|
||||
|
||||
if cout != nil {
|
||||
if err := cout.Flush(); err != nil {
|
||||
return fmt.Errorf("failed to flush: %s", err)
|
||||
} else if err = cout.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close compressed writer: %s", err)
|
||||
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("backed up %s to %s", serviceName, localBackupPath)
|
||||
execEnv := []string{
|
||||
fmt.Sprintf("SERVICE=%s", app.Domain),
|
||||
"MACHINE_LOGS=true",
|
||||
}
|
||||
|
||||
return nil
|
||||
if _, err = internal.RunBackupCmdRemote(cl, "snapshots", targetContainer.ID, execEnv); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func addTar(tw *tar.Writer, pth string) (err error) {
|
||||
var tr *tar.Reader
|
||||
var rc io.ReadCloser
|
||||
var hdr *tar.Header
|
||||
|
||||
if tr, rc, err = openTarFile(pth); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
if hdr, err = tr.Next(); err != nil {
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
break
|
||||
}
|
||||
if err = tw.WriteHeader(hdr); err != nil {
|
||||
break
|
||||
} else if _, err = io.Copy(tw, tr); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
err = rc.Close()
|
||||
} else {
|
||||
rc.Close()
|
||||
}
|
||||
return
|
||||
var AppBackupCommand = &cobra.Command{
|
||||
Use: "backup [cmd] [args] [flags]",
|
||||
Aliases: []string{"b"},
|
||||
Short: "Manage app backups",
|
||||
}
|
||||
|
||||
func openTarFile(pth string) (tr *tar.Reader, rc io.ReadCloser, err error) {
|
||||
var fin *os.File
|
||||
var n int
|
||||
buff := make([]byte, 1024)
|
||||
var (
|
||||
snapshot string
|
||||
retries string
|
||||
includePath string
|
||||
showAllPaths bool
|
||||
timestamps bool
|
||||
includeSecrets bool
|
||||
includeVolumes bool
|
||||
)
|
||||
|
||||
if fin, err = os.Open(pth); err != nil {
|
||||
return
|
||||
}
|
||||
func init() {
|
||||
AppBackupListCommand.Flags().StringVarP(
|
||||
&snapshot,
|
||||
"snapshot",
|
||||
"s",
|
||||
"",
|
||||
"list specific snapshot",
|
||||
)
|
||||
|
||||
if n, err = fin.Read(buff); err != nil {
|
||||
fin.Close()
|
||||
return
|
||||
} else if n == 0 {
|
||||
fin.Close()
|
||||
err = fmt.Errorf("%s is empty", pth)
|
||||
return
|
||||
}
|
||||
AppBackupListCommand.Flags().BoolVarP(
|
||||
&showAllPaths,
|
||||
"all",
|
||||
"a",
|
||||
false,
|
||||
"show all paths",
|
||||
)
|
||||
|
||||
if _, err = fin.Seek(0, 0); err != nil {
|
||||
fin.Close()
|
||||
return
|
||||
}
|
||||
AppBackupListCommand.Flags().BoolVarP(
|
||||
×tamps,
|
||||
"timestamps",
|
||||
"t",
|
||||
false,
|
||||
"include timestamps",
|
||||
)
|
||||
|
||||
rc = fin
|
||||
tr = tar.NewReader(rc)
|
||||
AppBackupDownloadCommand.Flags().StringVarP(
|
||||
&snapshot,
|
||||
"snapshot",
|
||||
"s",
|
||||
"",
|
||||
"list specific snapshot",
|
||||
)
|
||||
|
||||
return tr, rc, nil
|
||||
AppBackupDownloadCommand.Flags().StringVarP(
|
||||
&includePath,
|
||||
"path",
|
||||
"p",
|
||||
"",
|
||||
"volumes path",
|
||||
)
|
||||
|
||||
AppBackupDownloadCommand.Flags().BoolVarP(
|
||||
&includeSecrets,
|
||||
"secrets",
|
||||
"S",
|
||||
false,
|
||||
"include secrets",
|
||||
)
|
||||
|
||||
AppBackupDownloadCommand.Flags().BoolVarP(
|
||||
&includeVolumes,
|
||||
"volumes",
|
||||
"v",
|
||||
false,
|
||||
"include volumes",
|
||||
)
|
||||
|
||||
AppBackupDownloadCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
|
||||
AppBackupCreateCommand.Flags().StringVarP(
|
||||
&retries,
|
||||
"retries",
|
||||
"r",
|
||||
"1",
|
||||
"number of retry attempts",
|
||||
)
|
||||
|
||||
AppBackupCreateCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
}
|
||||
|
111
cli/app/check.go
111
cli/app/check.go
@ -1,60 +1,91 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"fmt"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var appCheckCommand = cli.Command{
|
||||
Name: "check",
|
||||
Aliases: []string{"chk"},
|
||||
Usage: "Check if app is configured correctly",
|
||||
ArgsUsage: "<domain>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
var AppCheckCommand = &cobra.Command{
|
||||
Use: "check <domain> [flags]",
|
||||
Aliases: []string{"chk"},
|
||||
Short: "Ensure an app is well configured",
|
||||
Long: `Compare env vars in both the app ".env" and recipe ".env.sample" file.
|
||||
|
||||
envSamplePath := path.Join(config.RECIPES_DIR, app.Recipe, ".env.sample")
|
||||
if _, err := os.Stat(envSamplePath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
logrus.Fatalf("%s does not exist?", envSamplePath)
|
||||
}
|
||||
logrus.Fatal(err)
|
||||
The goal is to ensure that recipe ".env.sample" env vars are defined in your
|
||||
app ".env" file. Only env var definitions in the ".env.sample" which are
|
||||
uncommented, e.g. "FOO=bar" are checked. If an app ".env" file does not include
|
||||
these env vars, then "check" will complain.
|
||||
|
||||
Recipe maintainers may or may not provide defaults for env vars within their
|
||||
recipes regardless of commenting or not (e.g. through the use of
|
||||
${FOO:<default>} syntax). "check" does not confirm or deny this for you.`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.AppNameComplete()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
envSample, err := config.ReadEnv(envSamplePath)
|
||||
table, err := formatter.CreateTable()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var missing []string
|
||||
for k := range envSample {
|
||||
if _, ok := app.Env[k]; !ok {
|
||||
missing = append(missing, k)
|
||||
table.
|
||||
Headers(
|
||||
fmt.Sprintf("%s .env.sample", app.Recipe.Name),
|
||||
fmt.Sprintf("%s.env", app.Name),
|
||||
).
|
||||
StyleFunc(func(row, col int) lipgloss.Style {
|
||||
switch {
|
||||
case col == 1:
|
||||
return lipgloss.NewStyle().Padding(0, 1, 0, 1).Align(lipgloss.Center)
|
||||
default:
|
||||
return lipgloss.NewStyle().Padding(0, 1, 0, 1)
|
||||
}
|
||||
})
|
||||
|
||||
envVars, err := appPkg.CheckEnv(app)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, envVar := range envVars {
|
||||
if envVar.Present {
|
||||
val := []string{envVar.Name, "✅"}
|
||||
table.Row(val...)
|
||||
} else {
|
||||
val := []string{envVar.Name, "❌"}
|
||||
table.Row(val...)
|
||||
}
|
||||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
missingEnvVars := strings.Join(missing, ", ")
|
||||
logrus.Fatalf("%s is missing %s", app.Path, missingEnvVars)
|
||||
if err := formatter.PrintTable(table); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Infof("all necessary environment variables defined for %s", app.Name)
|
||||
|
||||
return nil
|
||||
},
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
}
|
||||
|
||||
func init() {
|
||||
AppCheckCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
}
|
||||
|
276
cli/app/cmd.go
276
cli/app/cmd.go
@ -5,136 +5,214 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var appCmdCommand = cli.Command{
|
||||
Name: "command",
|
||||
var AppCmdCommand = &cobra.Command{
|
||||
Use: "command <domain> [service | --local] <cmd> [[args] [flags] | [flags] -- [args]]",
|
||||
Aliases: []string{"cmd"},
|
||||
Usage: "Run app commands",
|
||||
Description: `
|
||||
Run an app specific command.
|
||||
Short: "Run app commands",
|
||||
Long: `Run an app specific command.
|
||||
|
||||
These commands are bash functions, defined in the abra.sh of the recipe itself.
|
||||
They can be run within the context of a service (e.g. app) or locally on your
|
||||
work station by passing "--local". Arguments can be passed into these functions
|
||||
using the "-- <args>" syntax.
|
||||
work station by passing "--local/-l".
|
||||
|
||||
Example:
|
||||
N.B. If using the "--" style to pass arguments, flags (e.g. "--local/-l") must
|
||||
be passed *before* the "--". It is possible to pass arguments without the "--"
|
||||
as long as no dashes are present (i.e. "foo" works without "--", "-foo"
|
||||
does not).`,
|
||||
Example: ` # pass <cmd> args/flags without "--"
|
||||
abra app cmd 1312.net app my_cmd_arg foo --user bar
|
||||
|
||||
abra app cmd example.com app create_user -- me@example.com
|
||||
`,
|
||||
ArgsUsage: "<domain> [<service>] <command> [-- <args>]",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.LocalCmdFlag,
|
||||
internal.RemoteUserFlag,
|
||||
internal.TtyFlag,
|
||||
internal.OfflineFlag,
|
||||
# pass <cmd> args/flags with "--"
|
||||
abra app cmd 1312.net app my_cmd_args --user bar -- foo -vvv
|
||||
|
||||
# drop the [service] arg if using "--local/-l"
|
||||
abra app cmd 1312.net my_cmd --local`,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if local {
|
||||
if !(len(args) >= 2) {
|
||||
return errors.New("requires at least 2 arguments with --local/-l")
|
||||
}
|
||||
|
||||
if slices.Contains(os.Args, "--") {
|
||||
if cmd.ArgsLenAtDash() > 2 {
|
||||
return errors.New("accepts at most 2 args with --local/-l")
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE(d1): it is unclear how to correctly validate this case
|
||||
//
|
||||
// abra app cmd 1312.net app test_cmd_args foo --local
|
||||
// FATAL <recipe> doesn't have a app function
|
||||
//
|
||||
// "app" should not be there, but there is no reliable way to detect arg
|
||||
// count when the user can pass an arbitrary amount of recipe command
|
||||
// arguments
|
||||
return nil
|
||||
}
|
||||
|
||||
if !(len(args) >= 3) {
|
||||
return errors.New("requires at least 3 arguments")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
Before: internal.SubCommandBefore,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.AppNameComplete()
|
||||
case 1:
|
||||
if !local {
|
||||
return autocomplete.ServiceNameComplete(args[0])
|
||||
}
|
||||
return autocomplete.CommandNameComplete(args[0])
|
||||
case 2:
|
||||
if !local {
|
||||
return autocomplete.CommandNameComplete(args[0])
|
||||
}
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
default:
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if internal.LocalCmd && internal.RemoteUser != "" {
|
||||
internal.ShowSubcommandHelpAndError(c, errors.New("cannot use --local & --user together"))
|
||||
if local && remoteUser != "" {
|
||||
log.Fatal("cannot use --local & --user together")
|
||||
}
|
||||
|
||||
hasCmdArgs, parsedCmdArgs := parseCmdArgs(c.Args(), internal.LocalCmd)
|
||||
hasCmdArgs, parsedCmdArgs := parseCmdArgs(args, local)
|
||||
|
||||
abraSh := path.Join(config.RECIPES_DIR, app.Recipe, "abra.sh")
|
||||
if _, err := os.Stat(abraSh); err != nil {
|
||||
if _, err := os.Stat(app.Recipe.AbraShPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
logrus.Fatalf("%s does not exist for %s?", abraSh, app.Name)
|
||||
log.Fatalf("%s does not exist for %s?", app.Recipe.AbraShPath, app.Name)
|
||||
}
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if internal.LocalCmd {
|
||||
cmdName := c.Args().Get(1)
|
||||
if err := internal.EnsureCommand(abraSh, app.Recipe, cmdName); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if local {
|
||||
cmdName := args[1]
|
||||
if err := internal.EnsureCommand(app.Recipe.AbraShPath, app.Recipe.Name, cmdName); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Debugf("--local detected, running %s on local work station", cmdName)
|
||||
log.Debugf("--local detected, running %s on local work station", cmdName)
|
||||
|
||||
var exportEnv string
|
||||
for k, v := range app.Env {
|
||||
exportEnv = exportEnv + fmt.Sprintf("%s='%s'; ", k, v)
|
||||
}
|
||||
|
||||
var sourceAndExec string
|
||||
if hasCmdArgs {
|
||||
logrus.Debugf("parsed following command arguments: %s", parsedCmdArgs)
|
||||
sourceAndExec = fmt.Sprintf("TARGET=local; APP_NAME=%s; STACK_NAME=%s; %s . %s; %s %s", app.Name, app.StackName(), exportEnv, abraSh, cmdName, parsedCmdArgs)
|
||||
log.Debugf("parsed following command arguments: %s", parsedCmdArgs)
|
||||
sourceAndExec = fmt.Sprintf("TARGET=local; APP_NAME=%s; STACK_NAME=%s; %s . %s; %s %s", app.Name, app.StackName(), exportEnv, app.Recipe.AbraShPath, cmdName, parsedCmdArgs)
|
||||
} else {
|
||||
logrus.Debug("did not detect any command arguments")
|
||||
sourceAndExec = fmt.Sprintf("TARGET=local; APP_NAME=%s; STACK_NAME=%s; %s . %s; %s", app.Name, app.StackName(), exportEnv, abraSh, cmdName)
|
||||
log.Debug("did not detect any command arguments")
|
||||
sourceAndExec = fmt.Sprintf("TARGET=local; APP_NAME=%s; STACK_NAME=%s; %s . %s; %s", app.Name, app.StackName(), exportEnv, app.Recipe.AbraShPath, cmdName)
|
||||
}
|
||||
|
||||
shell := "/bin/bash"
|
||||
if _, err := os.Stat(shell); errors.Is(err, os.ErrNotExist) {
|
||||
logrus.Debugf("%s does not exist locally, use /bin/sh as fallback", shell)
|
||||
log.Debugf("%s does not exist locally, use /bin/sh as fallback", shell)
|
||||
shell = "/bin/sh"
|
||||
}
|
||||
cmd := exec.Command(shell, "-c", sourceAndExec)
|
||||
|
||||
if err := internal.RunCmd(cmd); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
targetServiceName := c.Args().Get(1)
|
||||
|
||||
cmdName := c.Args().Get(2)
|
||||
if err := internal.EnsureCommand(abraSh, app.Recipe, cmdName); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
serviceNames, err := config.GetAppServiceNames(app.Name)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
matchingServiceName := false
|
||||
for _, serviceName := range serviceNames {
|
||||
if serviceName == targetServiceName {
|
||||
matchingServiceName = true
|
||||
}
|
||||
}
|
||||
cmdName := args[2]
|
||||
if err := internal.EnsureCommand(app.Recipe.AbraShPath, app.Recipe.Name, cmdName); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !matchingServiceName {
|
||||
logrus.Fatalf("no service %s for %s?", targetServiceName, app.Name)
|
||||
}
|
||||
serviceNames, err := appPkg.GetAppServiceNames(app.Name)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Debugf("running command %s within the context of %s_%s", cmdName, app.StackName(), targetServiceName)
|
||||
|
||||
if hasCmdArgs {
|
||||
logrus.Debugf("parsed following command arguments: %s", parsedCmdArgs)
|
||||
} else {
|
||||
logrus.Debug("did not detect any command arguments")
|
||||
}
|
||||
|
||||
if err := internal.RunCmdRemote(cl, app, abraSh, targetServiceName, cmdName, parsedCmdArgs); err != nil {
|
||||
logrus.Fatal(err)
|
||||
matchingServiceName := false
|
||||
targetServiceName := args[1]
|
||||
for _, serviceName := range serviceNames {
|
||||
if serviceName == targetServiceName {
|
||||
matchingServiceName = true
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
if !matchingServiceName {
|
||||
log.Fatalf("no service %s for %s?", targetServiceName, app.Name)
|
||||
}
|
||||
|
||||
log.Debugf("running command %s within the context of %s_%s", cmdName, app.StackName(), targetServiceName)
|
||||
|
||||
if hasCmdArgs {
|
||||
log.Debugf("parsed following command arguments: %s", parsedCmdArgs)
|
||||
} else {
|
||||
log.Debug("did not detect any command arguments")
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := internal.RunCmdRemote(
|
||||
cl,
|
||||
app,
|
||||
requestTTY,
|
||||
app.Recipe.AbraShPath,
|
||||
targetServiceName, cmdName, parsedCmdArgs, remoteUser); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var AppCmdListCommand = &cobra.Command{
|
||||
Use: "list <domain> [flags]",
|
||||
Aliases: []string{"ls"},
|
||||
Short: "List all available commands",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cmdNames, err := appPkg.ReadAbraShCmdNames(app.Recipe.AbraShPath)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
sort.Strings(cmdNames)
|
||||
|
||||
for _, cmdName := range cmdNames {
|
||||
fmt.Println(cmdName)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@ -156,3 +234,43 @@ func parseCmdArgs(args []string, isLocal bool) (bool, string) {
|
||||
|
||||
return hasCmdArgs, parsedCmdArgs
|
||||
}
|
||||
|
||||
var (
|
||||
local bool
|
||||
remoteUser string
|
||||
requestTTY bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
AppCmdCommand.Flags().BoolVarP(
|
||||
&local,
|
||||
"local",
|
||||
"l",
|
||||
false,
|
||||
"run command locally",
|
||||
)
|
||||
|
||||
AppCmdCommand.Flags().StringVarP(
|
||||
&remoteUser,
|
||||
"user",
|
||||
"u",
|
||||
"",
|
||||
"request remote user",
|
||||
)
|
||||
|
||||
AppCmdCommand.Flags().BoolVarP(
|
||||
&requestTTY,
|
||||
"tty",
|
||||
"t",
|
||||
false,
|
||||
"request remote TTY",
|
||||
)
|
||||
|
||||
AppCmdCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
}
|
||||
|
@ -13,7 +13,7 @@ func TestParseCmdArgs(t *testing.T) {
|
||||
}{
|
||||
// `--` is not parsed when passed in from the command-line e.g. -- foo bar baz
|
||||
// so we need to eumlate that as missing when testing if bash args are passed in
|
||||
// see https://git.coopcloud.tech/coop-cloud/organising/issues/336 for more
|
||||
// see https://git.coopcloud.tech/toolshed/organising/issues/336 for more
|
||||
{[]string{"foo.com", "app", "test"}, false, ""},
|
||||
{[]string{"foo.com", "app", "test", "foo"}, true, "foo "},
|
||||
{[]string{"foo.com", "app", "test", "foo", "bar", "baz"}, true, "foo bar baz "},
|
||||
|
@ -1,65 +1,57 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var appConfigCommand = cli.Command{
|
||||
Name: "config",
|
||||
Aliases: []string{"cfg"},
|
||||
Usage: "Edit app config",
|
||||
ArgsUsage: "<domain>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.OfflineFlag,
|
||||
var AppConfigCommand = &cobra.Command{
|
||||
Use: "config <domain> [flags]",
|
||||
Aliases: []string{"cfg"},
|
||||
Short: "Edit app config",
|
||||
Example: " abra config 1312.net",
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.AppNameComplete()
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Action: func(c *cli.Context) error {
|
||||
appName := c.Args().First()
|
||||
|
||||
if appName == "" {
|
||||
internal.ShowSubcommandHelpAndError(c, errors.New("no app provided"))
|
||||
}
|
||||
|
||||
files, err := config.LoadAppFiles("")
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
files, err := appPkg.LoadAppFiles("")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
appName := args[0]
|
||||
appFile, exists := files[appName]
|
||||
if !exists {
|
||||
logrus.Fatalf("cannot find app with name %s", appName)
|
||||
log.Fatalf("cannot find app with name %s", appName)
|
||||
}
|
||||
|
||||
ed, ok := os.LookupEnv("EDITOR")
|
||||
if !ok {
|
||||
edPrompt := &survey.Select{
|
||||
Message: "Which editor do you wish to use?",
|
||||
Message: "which editor do you wish to use?",
|
||||
Options: []string{"vi", "vim", "nvim", "nano", "pico", "emacs"},
|
||||
}
|
||||
if err := survey.AskOne(edPrompt, &ed); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
cmd := exec.Command(ed, appFile.Path)
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
c := exec.Command(ed, appFile.Path)
|
||||
c.Stdin = os.Stdin
|
||||
c.Stdout = os.Stdout
|
||||
c.Stderr = os.Stderr
|
||||
if err := c.Run(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
}
|
||||
|
443
cli/app/cp.go
443
cli/app/cp.go
@ -2,155 +2,382 @@ package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/container"
|
||||
containerPkg "coopcloud.tech/abra/pkg/container"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/upstream/container"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var appCpCommand = cli.Command{
|
||||
Name: "cp",
|
||||
Aliases: []string{"c"},
|
||||
ArgsUsage: "<domain> <src> <dst>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
internal.OfflineFlag,
|
||||
var AppCpCommand = &cobra.Command{
|
||||
Use: "cp <domain> <src> <dst> [flags]",
|
||||
Aliases: []string{"c"},
|
||||
Short: "Copy files to/from a deployed app service",
|
||||
Example: ` # copy myfile.txt to the root of the app service
|
||||
abra app cp 1312.net myfile.txt app:/
|
||||
|
||||
# copy that file back to your current working directory locally
|
||||
abra app cp 1312.net app:/myfile.txt`,
|
||||
Args: cobra.ExactArgs(3),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.AppNameComplete()
|
||||
default:
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Usage: "Copy files to/from a running app service",
|
||||
Description: `
|
||||
Copy files to and from any app service file system.
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
If you want to copy a myfile.txt to the root of the app service:
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
abra app cp <domain> myfile.txt app:/
|
||||
|
||||
And if you want to copy that file back to your current working directory locally:
|
||||
|
||||
abra app cp <domain> app:/myfile.txt .
|
||||
`,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
src := args[1]
|
||||
dst := args[2]
|
||||
srcPath, dstPath, service, toContainer, err := parseSrcAndDst(src, dst)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
src := c.Args().Get(1)
|
||||
dst := c.Args().Get(2)
|
||||
if src == "" {
|
||||
logrus.Fatal("missing <src> argument")
|
||||
} else if dst == "" {
|
||||
logrus.Fatal("missing <dest> argument")
|
||||
container, err := containerPkg.GetContainerFromStackAndService(cl, app.StackName(), service)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Debugf("retrieved %s as target container on %s", formatter.ShortenID(container.ID), app.Server)
|
||||
|
||||
parsedSrc := strings.SplitN(src, ":", 2)
|
||||
parsedDst := strings.SplitN(dst, ":", 2)
|
||||
errorMsg := "one of <src>/<dest> arguments must take $SERVICE:$PATH form"
|
||||
if len(parsedSrc) == 2 && len(parsedDst) == 2 {
|
||||
logrus.Fatal(errorMsg)
|
||||
} else if len(parsedSrc) != 2 {
|
||||
if len(parsedDst) != 2 {
|
||||
logrus.Fatal(errorMsg)
|
||||
}
|
||||
} else if len(parsedDst) != 2 {
|
||||
if len(parsedSrc) != 2 {
|
||||
logrus.Fatal(errorMsg)
|
||||
}
|
||||
if toContainer {
|
||||
err = CopyToContainer(cl, container.ID, srcPath, dstPath)
|
||||
} else {
|
||||
err = CopyFromContainer(cl, container.ID, srcPath, dstPath)
|
||||
}
|
||||
|
||||
var service string
|
||||
var srcPath string
|
||||
var dstPath string
|
||||
isToContainer := false // <container:src> <dst>
|
||||
if len(parsedSrc) == 2 {
|
||||
service = parsedSrc[0]
|
||||
srcPath = parsedSrc[1]
|
||||
dstPath = dst
|
||||
logrus.Debugf("assuming transfer is coming FROM the container")
|
||||
} else if len(parsedDst) == 2 {
|
||||
service = parsedDst[0]
|
||||
dstPath = parsedDst[1]
|
||||
srcPath = src
|
||||
isToContainer = true // <src> <container:dst>
|
||||
logrus.Debugf("assuming transfer is going TO the container")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !isToContainer {
|
||||
if _, err := os.Stat(dstPath); os.IsNotExist(err) {
|
||||
logrus.Fatalf("%s does not exist locally?", dstPath)
|
||||
}
|
||||
}
|
||||
|
||||
if err := configureAndCp(c, cl, app, srcPath, dstPath, service, isToContainer); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
}
|
||||
|
||||
func configureAndCp(
|
||||
c *cli.Context,
|
||||
cl *dockerClient.Client,
|
||||
app config.App,
|
||||
srcPath string,
|
||||
dstPath string,
|
||||
service string,
|
||||
isToContainer bool) error {
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), service))
|
||||
var errServiceMissing = errors.New("one of <src>/<dest> arguments must take $SERVICE:$PATH form")
|
||||
|
||||
container, err := container.GetContainer(context.Background(), cl, filters, internal.NoInput)
|
||||
// parseSrcAndDst parses src and dest string. One of src or dst must be of the form $SERVICE:$PATH
|
||||
func parseSrcAndDst(src, dst string) (srcPath string, dstPath string, service string, toContainer bool, err error) {
|
||||
parsedSrc := strings.SplitN(src, ":", 2)
|
||||
parsedDst := strings.SplitN(dst, ":", 2)
|
||||
if len(parsedSrc)+len(parsedDst) != 3 {
|
||||
return "", "", "", false, errServiceMissing
|
||||
}
|
||||
if len(parsedSrc) == 2 {
|
||||
return parsedSrc[1], dst, parsedSrc[0], false, nil
|
||||
}
|
||||
if len(parsedDst) == 2 {
|
||||
return src, parsedDst[1], parsedDst[0], true, nil
|
||||
}
|
||||
return "", "", "", false, errServiceMissing
|
||||
}
|
||||
|
||||
// CopyToContainer copies a file or directory from the local file system to the container.
|
||||
// See the possible copy modes and their documentation.
|
||||
func CopyToContainer(cl *dockerClient.Client, containerID, srcPath, dstPath string) error {
|
||||
srcStat, err := os.Stat(srcPath)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
return fmt.Errorf("local %s ", err)
|
||||
}
|
||||
|
||||
logrus.Debugf("retrieved %s as target container on %s", formatter.ShortenID(container.ID), app.Server)
|
||||
|
||||
if isToContainer {
|
||||
if _, err := os.Stat(srcPath); err != nil {
|
||||
logrus.Fatalf("%s does not exist?", srcPath)
|
||||
dstStat, err := cl.ContainerStatPath(context.Background(), containerID, dstPath)
|
||||
dstExists := true
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
dstExists = false
|
||||
} else {
|
||||
return fmt.Errorf("remote path: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
toTarOpts := &archive.TarOptions{NoOverwriteDirNonDir: true, Compression: archive.Gzip}
|
||||
content, err := archive.TarWithOptions(srcPath, toTarOpts)
|
||||
mode, err := copyMode(srcPath, dstPath, srcStat.Mode(), dstStat.Mode, dstExists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
movePath := ""
|
||||
switch mode {
|
||||
case CopyModeDirToDir:
|
||||
// Add the src directory to the destination path
|
||||
_, srcDir := path.Split(srcPath)
|
||||
dstPath = path.Join(dstPath, srcDir)
|
||||
|
||||
// Make sure the dst directory exits.
|
||||
dcli, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
return err
|
||||
}
|
||||
if _, err := container.RunExec(dcli, cl, containerID, &types.ExecConfig{
|
||||
AttachStderr: true,
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
Cmd: []string{"mkdir", "-p", dstPath},
|
||||
Detach: false,
|
||||
Tty: true,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("create remote directory: %s", err)
|
||||
}
|
||||
case CopyModeFileToFile:
|
||||
// Remove the file component from the path, since docker can only copy
|
||||
// to a directory.
|
||||
dstPath, _ = path.Split(dstPath)
|
||||
case CopyModeFileToFileRename:
|
||||
// Copy the file to the temp directory and move it to its dstPath
|
||||
// afterwards.
|
||||
movePath = dstPath
|
||||
dstPath = "/tmp"
|
||||
}
|
||||
|
||||
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
|
||||
if err := cl.CopyToContainer(context.Background(), container.ID, dstPath, content, copyOpts); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
content, _, err := cl.CopyFromContainer(context.Background(), container.ID, srcPath)
|
||||
toTarOpts := &archive.TarOptions{IncludeSourceDir: true, NoOverwriteDirNonDir: true, Compression: archive.Gzip}
|
||||
content, err := archive.TarWithOptions(srcPath, toTarOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("copy %s from local to %s on container", srcPath, dstPath)
|
||||
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
|
||||
if err := cl.CopyToContainer(context.Background(), containerID, dstPath, content, copyOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if movePath != "" {
|
||||
_, srcFile := path.Split(srcPath)
|
||||
dcli, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
return err
|
||||
}
|
||||
defer content.Close()
|
||||
fromTarOpts := &archive.TarOptions{NoOverwriteDirNonDir: true, Compression: archive.Gzip}
|
||||
if err := archive.Untar(content, dstPath, fromTarOpts); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if _, err := container.RunExec(dcli, cl, containerID, &types.ExecConfig{
|
||||
AttachStderr: true,
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
Cmd: []string{"mv", path.Join("/tmp", srcFile), movePath},
|
||||
Detach: false,
|
||||
Tty: true,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("create remote directory: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyFromContainer copies a file or directory from the given container to the local file system.
|
||||
// See the possible copy modes and their documentation.
|
||||
func CopyFromContainer(cl *dockerClient.Client, containerID, srcPath, dstPath string) error {
|
||||
srcStat, err := cl.ContainerStatPath(context.Background(), containerID, srcPath)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
return fmt.Errorf("remote: %s does not exist", srcPath)
|
||||
} else {
|
||||
return fmt.Errorf("remote path: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
dstStat, err := os.Stat(dstPath)
|
||||
dstExists := true
|
||||
var dstMode os.FileMode
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
dstExists = false
|
||||
} else {
|
||||
return fmt.Errorf("remote path: %s", err)
|
||||
}
|
||||
} else {
|
||||
dstMode = dstStat.Mode()
|
||||
}
|
||||
|
||||
mode, err := copyMode(srcPath, dstPath, srcStat.Mode, dstMode, dstExists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
moveDstDir := ""
|
||||
moveDstFile := ""
|
||||
switch mode {
|
||||
case CopyModeFileToFile:
|
||||
// Remove the file component from the path, since docker can only copy
|
||||
// to a directory.
|
||||
dstPath, _ = path.Split(dstPath)
|
||||
case CopyModeFileToFileRename:
|
||||
// Copy the file to the temp directory and move it to its dstPath
|
||||
// afterwards.
|
||||
moveDstFile = dstPath
|
||||
dstPath = "/tmp"
|
||||
case CopyModeFilesToDir:
|
||||
// Copy the directory to the temp directory and move it to its
|
||||
// dstPath afterwards.
|
||||
moveDstDir = path.Join(dstPath, "/")
|
||||
dstPath = "/tmp"
|
||||
|
||||
// Make sure the temp directory always gets removed
|
||||
defer os.Remove(path.Join("/tmp"))
|
||||
}
|
||||
|
||||
content, _, err := cl.CopyFromContainer(context.Background(), containerID, srcPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("copy: %s", err)
|
||||
}
|
||||
defer content.Close()
|
||||
if err := archive.Untar(content, dstPath, &archive.TarOptions{
|
||||
NoOverwriteDirNonDir: true,
|
||||
Compression: archive.Gzip,
|
||||
NoLchown: true,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("untar: %s", err)
|
||||
}
|
||||
|
||||
if moveDstFile != "" {
|
||||
_, srcFile := path.Split(strings.TrimSuffix(srcPath, "/"))
|
||||
if err := moveFile(path.Join("/tmp", srcFile), moveDstFile); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if moveDstDir != "" {
|
||||
_, srcDir := path.Split(strings.TrimSuffix(srcPath, "/"))
|
||||
if err := moveDir(path.Join("/tmp", srcDir), moveDstDir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
ErrCopyDirToFile = fmt.Errorf("can't copy dir to file")
|
||||
ErrDstDirNotExist = fmt.Errorf("destination directory does not exist")
|
||||
)
|
||||
|
||||
type CopyMode int
|
||||
|
||||
const (
|
||||
// Copy a src file to a dest file. The src and dest file names are the same.
|
||||
// <dir_src>/<file> + <dir_dst>/<file> -> <dir_dst>/<file>
|
||||
CopyModeFileToFile = CopyMode(iota)
|
||||
// Copy a src file to a dest file. The src and dest file names are not the same.
|
||||
// <dir_src>/<file_src> + <dir_dst>/<file_dst> -> <dir_dst>/<file_dst>
|
||||
CopyModeFileToFileRename
|
||||
// Copy a src file to dest directory. The dest file gets created in the dest
|
||||
// folder with the src filename.
|
||||
// <dir_src>/<file> + <dir_dst> -> <dir_dst>/<file>
|
||||
CopyModeFileToDir
|
||||
// Copy a src directory to dest directory.
|
||||
// <dir_src> + <dir_dst> -> <dir_dst>/<dir_src>
|
||||
CopyModeDirToDir
|
||||
// Copy all files in the src directory to the dest directory. This works recursively.
|
||||
// <dir_src>/ + <dir_dst> -> <dir_dst>/<files_from_dir_src>
|
||||
CopyModeFilesToDir
|
||||
)
|
||||
|
||||
// copyMode takes a src and dest path and file mode to determine the copy mode.
|
||||
// See the possible copy modes and their documentation.
|
||||
func copyMode(srcPath, dstPath string, srcMode os.FileMode, dstMode os.FileMode, dstExists bool) (CopyMode, error) {
|
||||
_, srcFile := path.Split(srcPath)
|
||||
_, dstFile := path.Split(dstPath)
|
||||
if srcMode.IsDir() {
|
||||
if !dstExists {
|
||||
return -1, ErrDstDirNotExist
|
||||
}
|
||||
if dstMode.IsDir() {
|
||||
if strings.HasSuffix(srcPath, "/") {
|
||||
return CopyModeFilesToDir, nil
|
||||
}
|
||||
return CopyModeDirToDir, nil
|
||||
}
|
||||
return -1, ErrCopyDirToFile
|
||||
}
|
||||
|
||||
if dstMode.IsDir() {
|
||||
return CopyModeFileToDir, nil
|
||||
}
|
||||
|
||||
if srcFile != dstFile {
|
||||
return CopyModeFileToFileRename, nil
|
||||
}
|
||||
|
||||
return CopyModeFileToFile, nil
|
||||
}
|
||||
|
||||
// moveDir moves all files from a source path to the destination path recursively.
|
||||
func moveDir(sourcePath, destPath string) error {
|
||||
return filepath.Walk(sourcePath, func(p string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newPath := path.Join(destPath, strings.TrimPrefix(p, sourcePath))
|
||||
if info.IsDir() {
|
||||
err := os.Mkdir(newPath, info.Mode())
|
||||
if err != nil {
|
||||
if os.IsExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
if info.Mode().IsRegular() {
|
||||
return moveFile(p, newPath)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// moveFile moves a file from a source path to a destination path.
|
||||
func moveFile(sourcePath, destPath string) error {
|
||||
inputFile, err := os.Open(sourcePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputFile, err := os.Create(destPath)
|
||||
if err != nil {
|
||||
inputFile.Close()
|
||||
return err
|
||||
}
|
||||
defer outputFile.Close()
|
||||
_, err = io.Copy(outputFile, inputFile)
|
||||
inputFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove file after succesfull copy.
|
||||
err = os.Remove(sourcePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
AppCpCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
}
|
||||
|
113
cli/app/cp_test.go
Normal file
113
cli/app/cp_test.go
Normal file
@ -0,0 +1,113 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
tests := []struct {
|
||||
src string
|
||||
dst string
|
||||
srcPath string
|
||||
dstPath string
|
||||
service string
|
||||
toContainer bool
|
||||
err error
|
||||
}{
|
||||
{src: "foo", dst: "bar", err: errServiceMissing},
|
||||
{src: "app:foo", dst: "app:bar", err: errServiceMissing},
|
||||
{src: "app:foo", dst: "bar", srcPath: "foo", dstPath: "bar", service: "app", toContainer: false},
|
||||
{src: "foo", dst: "app:bar", srcPath: "foo", dstPath: "bar", service: "app", toContainer: true},
|
||||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
srcPath, dstPath, service, toContainer, err := parseSrcAndDst(tc.src, tc.dst)
|
||||
if srcPath != tc.srcPath {
|
||||
t.Errorf("[%d] srcPath: want (%s), got(%s)", i, tc.srcPath, srcPath)
|
||||
}
|
||||
if dstPath != tc.dstPath {
|
||||
t.Errorf("[%d] dstPath: want (%s), got(%s)", i, tc.dstPath, dstPath)
|
||||
}
|
||||
if service != tc.service {
|
||||
t.Errorf("[%d] service: want (%s), got(%s)", i, tc.service, service)
|
||||
}
|
||||
if toContainer != tc.toContainer {
|
||||
t.Errorf("[%d] toConainer: want (%t), got(%t)", i, tc.toContainer, toContainer)
|
||||
}
|
||||
if err == nil && tc.err != nil && err.Error() != tc.err.Error() {
|
||||
t.Errorf("[%d] err: want (%s), got(%s)", i, tc.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyMode(t *testing.T) {
|
||||
tests := []struct {
|
||||
srcPath string
|
||||
dstPath string
|
||||
srcMode os.FileMode
|
||||
dstMode os.FileMode
|
||||
dstExists bool
|
||||
mode CopyMode
|
||||
err error
|
||||
}{
|
||||
{
|
||||
srcPath: "foo.txt",
|
||||
dstPath: "foo.txt",
|
||||
srcMode: os.ModePerm,
|
||||
dstMode: os.ModePerm,
|
||||
dstExists: true,
|
||||
mode: CopyModeFileToFile,
|
||||
},
|
||||
{
|
||||
srcPath: "foo.txt",
|
||||
dstPath: "bar.txt",
|
||||
srcMode: os.ModePerm,
|
||||
dstExists: true,
|
||||
mode: CopyModeFileToFileRename,
|
||||
},
|
||||
{
|
||||
srcPath: "foo",
|
||||
dstPath: "foo",
|
||||
srcMode: os.ModeDir,
|
||||
dstMode: os.ModeDir,
|
||||
dstExists: true,
|
||||
mode: CopyModeDirToDir,
|
||||
},
|
||||
{
|
||||
srcPath: "foo/",
|
||||
dstPath: "foo",
|
||||
srcMode: os.ModeDir,
|
||||
dstMode: os.ModeDir,
|
||||
dstExists: true,
|
||||
mode: CopyModeFilesToDir,
|
||||
},
|
||||
{
|
||||
srcPath: "foo",
|
||||
dstPath: "foo",
|
||||
srcMode: os.ModeDir,
|
||||
dstExists: false,
|
||||
mode: -1,
|
||||
err: ErrDstDirNotExist,
|
||||
},
|
||||
{
|
||||
srcPath: "foo",
|
||||
dstPath: "foo",
|
||||
srcMode: os.ModeDir,
|
||||
dstMode: os.ModePerm,
|
||||
dstExists: true,
|
||||
mode: -1,
|
||||
err: ErrCopyDirToFile,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
mode, err := copyMode(tc.srcPath, tc.dstPath, tc.srcMode, tc.dstMode, tc.dstExists)
|
||||
if mode != tc.mode {
|
||||
t.Errorf("[%d] mode: want (%d), got(%d)", i, tc.mode, mode)
|
||||
}
|
||||
if err != tc.err {
|
||||
t.Errorf("[%d] err: want (%s), got(%s)", i, tc.err, err)
|
||||
}
|
||||
}
|
||||
}
|
@ -3,375 +3,354 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/envfile"
|
||||
"coopcloud.tech/abra/pkg/secret"
|
||||
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/dns"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/git"
|
||||
"coopcloud.tech/abra/pkg/lint"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var appDeployCommand = cli.Command{
|
||||
Name: "deploy",
|
||||
Aliases: []string{"d"},
|
||||
Usage: "Deploy an app",
|
||||
ArgsUsage: "<domain>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
internal.ForceFlag,
|
||||
internal.ChaosFlag,
|
||||
internal.NoDomainChecksFlag,
|
||||
internal.DontWaitConvergeFlag,
|
||||
internal.OfflineFlag,
|
||||
var AppDeployCommand = &cobra.Command{
|
||||
Use: "deploy <domain> [version] [flags]",
|
||||
Aliases: []string{"d"},
|
||||
Short: "Deploy an app",
|
||||
Long: `Deploy an app.
|
||||
|
||||
This command supports chaos operations. Use "--chaos/-C" to deploy your recipe
|
||||
checkout as-is. Recipe commit hashes are also supported as values for
|
||||
"[version]". Please note, "upgrade"/"rollback" do not support chaos operations.`,
|
||||
Example: ` # standard deployment
|
||||
abra app deploy 1312.net
|
||||
|
||||
# chaos deployment
|
||||
abra app deploy 1312.net --chaos
|
||||
|
||||
# deploy specific version
|
||||
abra app deploy 1312.net 2.0.0+1.2.3
|
||||
|
||||
# deploy a specific git hash
|
||||
abra app deploy 1312.net 886db76d`,
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.AppNameComplete()
|
||||
case 1:
|
||||
app, err := appPkg.Get(args[0])
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("autocomplete failed: %s", err)
|
||||
return []string{errMsg}, cobra.ShellCompDirectiveError
|
||||
}
|
||||
return autocomplete.RecipeVersionComplete(app.Recipe.Name)
|
||||
default:
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Description: `
|
||||
Deploy an app. It does not support incrementing the version of a deployed app,
|
||||
for this you need to look at the "abra app upgrade <domain>" command.
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var (
|
||||
deployWarnMessages []string
|
||||
toDeployVersion string
|
||||
isChaosCommit bool
|
||||
toDeployChaosVersion = config.CHAOS_DEFAULT
|
||||
)
|
||||
|
||||
You may pass "--force" to re-deploy the same version again. This can be useful
|
||||
if the container runtime has gotten into a weird state.
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
Chas mode ("--chaos") will deploy your local checkout of a recipe as-is,
|
||||
including unstaged changes and can be useful for live hacking and testing new
|
||||
recipes.
|
||||
`,
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
stackName := app.StackName()
|
||||
if err := validateArgsAndFlags(args); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := lint.LintForErrors(app.Recipe); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !internal.Chaos {
|
||||
if err := recipe.EnsureUpToDate(app.Recipe, conf); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
log.Debugf("checking whether %s is already deployed", app.StackName())
|
||||
|
||||
r, err := recipe.Get(app.Recipe, conf)
|
||||
deployMeta, err := stack.IsDeployed(context.Background(), cl, app.StackName())
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := lint.LintForErrors(r); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if deployMeta.IsDeployed && !(internal.Force || internal.Chaos) {
|
||||
log.Fatalf("%s is already deployed", app.Name)
|
||||
}
|
||||
|
||||
logrus.Debugf("checking whether %s is already deployed", stackName)
|
||||
|
||||
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
if len(args) == 2 && args[1] != "" {
|
||||
toDeployVersion = args[1]
|
||||
}
|
||||
|
||||
if isDeployed {
|
||||
if internal.Force || internal.Chaos {
|
||||
logrus.Warnf("%s is already deployed but continuing (--force/--chaos)", app.Name)
|
||||
} else {
|
||||
logrus.Fatalf("%s is already deployed", app.Name)
|
||||
}
|
||||
if !deployMeta.IsDeployed &&
|
||||
toDeployVersion == "" &&
|
||||
app.Recipe.EnvVersion != "" && !internal.IgnoreEnvVersion {
|
||||
log.Debugf("new deployment, choosing .env version: %s", app.Recipe.EnvVersion)
|
||||
toDeployVersion = app.Recipe.EnvVersion
|
||||
}
|
||||
|
||||
version := deployedVersion
|
||||
if version == "unknown" && !internal.Chaos {
|
||||
catl, err := recipe.ReadRecipeCatalogue(conf)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
versions, err := recipe.GetRecipeCatalogueVersions(app.Recipe, catl)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
if len(versions) > 0 {
|
||||
version = versions[len(versions)-1]
|
||||
logrus.Debugf("choosing %s as version to deploy", version)
|
||||
if err := recipe.EnsureVersion(app.Recipe, version); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
head, err := git.GetRecipeHead(app.Recipe)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
version = formatter.SmallSHA(head.String())
|
||||
logrus.Warn("no versions detected, using latest commit")
|
||||
if err := recipe.EnsureLatest(app.Recipe, conf); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if version == "unknown" && !internal.Chaos {
|
||||
logrus.Debugf("choosing %s as version to deploy", version)
|
||||
if err := recipe.EnsureVersion(app.Recipe, version); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if version != "unknown" && !internal.Chaos {
|
||||
if err := recipe.EnsureVersion(app.Recipe, version); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if !internal.Chaos && toDeployVersion == "" {
|
||||
if err := getLatestVersionOrCommit(app, &toDeployVersion); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if internal.Chaos {
|
||||
logrus.Warnf("chaos mode engaged")
|
||||
var err error
|
||||
version, err = recipe.ChaosVersion(app.Recipe)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
if err := getChaosVersion(app, &toDeployVersion, &toDeployChaosVersion); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, app.Recipe, "abra.sh")
|
||||
abraShEnv, err := config.ReadAbraShEnvVars(abraShPath)
|
||||
if !internal.Chaos {
|
||||
isChaosCommit, err = app.Recipe.EnsureVersion(toDeployVersion)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if isChaosCommit {
|
||||
log.Debugf("assuming chaos commit: %s", toDeployVersion)
|
||||
|
||||
internal.Chaos = true
|
||||
toDeployChaosVersion = toDeployVersion
|
||||
|
||||
toDeployVersion, err = app.Recipe.GetVersionLabelLocal()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := validateSecrets(cl, app); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
abraShEnv, err := envfile.ReadAbraShEnvVars(app.Recipe.AbraShPath)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
for k, v := range abraShEnv {
|
||||
app.Env[k] = v
|
||||
}
|
||||
|
||||
composeFiles, err := config.GetAppComposeFiles(app.Recipe, app.Env)
|
||||
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
stackName := app.StackName()
|
||||
deployOpts := stack.Deploy{
|
||||
Composefiles: composeFiles,
|
||||
Namespace: stackName,
|
||||
Prune: false,
|
||||
ResolveImage: stack.ResolveImageAlways,
|
||||
Detach: false,
|
||||
}
|
||||
compose, err := config.GetAppComposeConfig(app.Name, deployOpts, app.Env)
|
||||
compose, err := appPkg.GetAppComposeConfig(app.Name, deployOpts, app.Env)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
config.ExposeAllEnv(stackName, compose, app.Env)
|
||||
config.SetRecipeLabel(compose, stackName, app.Recipe)
|
||||
config.SetChaosLabel(compose, stackName, internal.Chaos)
|
||||
config.SetChaosVersionLabel(compose, stackName, version)
|
||||
config.SetUpdateLabel(compose, stackName, app.Env)
|
||||
|
||||
if err := DeployOverview(app, version, "continue with deployment?"); err != nil {
|
||||
logrus.Fatal(err)
|
||||
toDeployChaosVersionLabel := toDeployChaosVersion
|
||||
if app.Recipe.Dirty {
|
||||
toDeployChaosVersionLabel = formatter.AddDirtyMarker(toDeployChaosVersionLabel)
|
||||
}
|
||||
|
||||
appPkg.ExposeAllEnv(stackName, compose, app.Env)
|
||||
appPkg.SetRecipeLabel(compose, stackName, app.Recipe.Name)
|
||||
appPkg.SetChaosLabel(compose, stackName, internal.Chaos)
|
||||
appPkg.SetChaosVersionLabel(compose, stackName, toDeployChaosVersionLabel)
|
||||
appPkg.SetUpdateLabel(compose, stackName, app.Env)
|
||||
|
||||
envVars, err := appPkg.CheckEnv(app)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, envVar := range envVars {
|
||||
if !envVar.Present {
|
||||
deployWarnMessages = append(deployWarnMessages,
|
||||
fmt.Sprintf("%s missing from %s.env", envVar.Name, app.Domain),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if !internal.NoDomainChecks {
|
||||
domainName, ok := app.Env["DOMAIN"]
|
||||
if ok {
|
||||
if domainName, ok := app.Env["DOMAIN"]; ok {
|
||||
if _, err = dns.EnsureDomainsResolveSameIPv4(domainName, app.Server); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
logrus.Warn("skipping domain checks as no DOMAIN=... configured for app")
|
||||
log.Debug("skipping domain checks, no DOMAIN=... configured")
|
||||
}
|
||||
} else {
|
||||
logrus.Warn("skipping domain checks as requested")
|
||||
log.Debug("skipping domain checks")
|
||||
}
|
||||
|
||||
stack.WaitTimeout, err = config.GetTimeoutFromLabel(compose, stackName)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
deployedVersion := config.NO_VERSION_DEFAULT
|
||||
if deployMeta.IsDeployed {
|
||||
deployedVersion = deployMeta.Version
|
||||
}
|
||||
logrus.Debugf("set waiting timeout to %d s", stack.WaitTimeout)
|
||||
|
||||
toWriteVersion := toDeployVersion
|
||||
if internal.Chaos || isChaosCommit {
|
||||
toWriteVersion = toDeployChaosVersion
|
||||
}
|
||||
|
||||
if err := internal.DeployOverview(
|
||||
app,
|
||||
deployWarnMessages,
|
||||
deployedVersion,
|
||||
deployMeta.ChaosVersion,
|
||||
toDeployVersion,
|
||||
toDeployChaosVersion,
|
||||
toWriteVersion,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
stack.WaitTimeout, err = appPkg.GetTimeoutFromLabel(compose, stackName)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Debugf("set waiting timeout to %d second(s)", stack.WaitTimeout)
|
||||
|
||||
if err := stack.RunDeploy(cl, deployOpts, compose, app.Name, internal.DontWaitConverge); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
postDeployCmds, ok := app.Env["POST_DEPLOY_CMDS"]
|
||||
if ok && !internal.DontWaitConverge {
|
||||
logrus.Debugf("run the following post-deploy commands: %s", postDeployCmds)
|
||||
if err := PostCmds(cl, app, postDeployCmds); err != nil {
|
||||
logrus.Fatalf("attempting to run post deploy commands, saw: %s", err)
|
||||
log.Debugf("run the following post-deploy commands: %s", postDeployCmds)
|
||||
if err := internal.PostCmds(cl, app, postDeployCmds); err != nil {
|
||||
log.Fatalf("attempting to run post deploy commands, saw: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
if err := app.WriteRecipeVersion(toWriteVersion, false); err != nil {
|
||||
log.Fatalf("writing recipe version failed: %s", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// PostCmds parses a string of commands and executes them inside of the respective services
|
||||
// the commands string must have the following format:
|
||||
// "<service> <command> <arguments>|<service> <command> <arguments>|... "
|
||||
func PostCmds(cl *dockerClient.Client, app config.App, commands string) error {
|
||||
abraSh := path.Join(config.RECIPES_DIR, app.Recipe, "abra.sh")
|
||||
if _, err := os.Stat(abraSh); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf(fmt.Sprintf("%s does not exist for %s?", abraSh, app.Name))
|
||||
}
|
||||
func getChaosVersion(app app.App, toDeployVersion, toDeployChaosVersion *string) error {
|
||||
var err error
|
||||
*toDeployChaosVersion, err = app.Recipe.ChaosVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, command := range strings.Split(commands, "|") {
|
||||
commandParts := strings.Split(command, " ")
|
||||
if len(commandParts) < 2 {
|
||||
return fmt.Errorf(fmt.Sprintf("not enough arguments: %s", command))
|
||||
}
|
||||
targetServiceName := commandParts[0]
|
||||
cmdName := commandParts[1]
|
||||
parsedCmdArgs := ""
|
||||
if len(commandParts) > 2 {
|
||||
parsedCmdArgs = fmt.Sprintf("%s ", strings.Join(commandParts[2:], " "))
|
||||
}
|
||||
logrus.Infof("running post-command '%s %s' in container %s", cmdName, parsedCmdArgs, targetServiceName)
|
||||
|
||||
if err := internal.EnsureCommand(abraSh, app.Recipe, cmdName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serviceNames, err := config.GetAppServiceNames(app.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
matchingServiceName := false
|
||||
for _, serviceName := range serviceNames {
|
||||
if serviceName == targetServiceName {
|
||||
matchingServiceName = true
|
||||
}
|
||||
}
|
||||
|
||||
if !matchingServiceName {
|
||||
return fmt.Errorf(fmt.Sprintf("no service %s for %s?", targetServiceName, app.Name))
|
||||
}
|
||||
|
||||
logrus.Debugf("running command %s %s within the context of %s_%s", cmdName, parsedCmdArgs, app.StackName(), targetServiceName)
|
||||
|
||||
internal.Tty = true
|
||||
if err := internal.RunCmdRemote(cl, app, abraSh, targetServiceName, cmdName, parsedCmdArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
*toDeployVersion, err = app.Recipe.GetVersionLabelLocal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeployOverview shows a deployment overview
|
||||
func DeployOverview(app config.App, version, message string) error {
|
||||
tableCol := []string{"server", "recipe", "config", "domain", "version"}
|
||||
table := formatter.CreateTable(tableCol)
|
||||
|
||||
deployConfig := "compose.yml"
|
||||
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
|
||||
deployConfig = strings.Join(strings.Split(composeFiles, ":"), "\n")
|
||||
func getLatestVersionOrCommit(app app.App, toDeployVersion *string) error {
|
||||
versions, err := app.Recipe.Tags()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
server := app.Server
|
||||
if app.Server == "default" {
|
||||
server = "local"
|
||||
}
|
||||
if len(versions) > 0 && !internal.Chaos {
|
||||
*toDeployVersion = versions[len(versions)-1]
|
||||
|
||||
table.Append([]string{server, app.Recipe, deployConfig, app.Domain, version})
|
||||
table.Render()
|
||||
log.Debugf("choosing %s as version to deploy", *toDeployVersion)
|
||||
|
||||
if _, err := app.Recipe.EnsureVersion(*toDeployVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if internal.NoInput {
|
||||
return nil
|
||||
}
|
||||
|
||||
response := false
|
||||
prompt := &survey.Confirm{
|
||||
Message: message,
|
||||
}
|
||||
|
||||
if err := survey.AskOne(prompt, &response); err != nil {
|
||||
head, err := app.Recipe.Head()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !response {
|
||||
logrus.Fatal("exiting as requested")
|
||||
*toDeployVersion = formatter.SmallSHA(head.String())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateArgsAndFlags ensures compatible args/flags.
|
||||
func validateArgsAndFlags(args []string) error {
|
||||
if len(args) == 2 && args[1] != "" && internal.Chaos {
|
||||
return fmt.Errorf("cannot use [version] and --chaos together")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewVersionOverview shows an upgrade or downgrade overview
|
||||
func NewVersionOverview(app config.App, currentVersion, newVersion, releaseNotes string) error {
|
||||
tableCol := []string{"server", "recipe", "config", "domain", "current version", "to be deployed"}
|
||||
table := formatter.CreateTable(tableCol)
|
||||
|
||||
deployConfig := "compose.yml"
|
||||
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
|
||||
deployConfig = strings.Join(strings.Split(composeFiles, ":"), "\n")
|
||||
}
|
||||
|
||||
server := app.Server
|
||||
if app.Server == "default" {
|
||||
server = "local"
|
||||
}
|
||||
|
||||
table.Append([]string{server, app.Recipe, deployConfig, app.Domain, currentVersion, newVersion})
|
||||
table.Render()
|
||||
|
||||
if releaseNotes == "" {
|
||||
var err error
|
||||
releaseNotes, err = GetReleaseNotes(app.Recipe, newVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if releaseNotes != "" && newVersion != "" {
|
||||
fmt.Println()
|
||||
fmt.Println(fmt.Sprintf("%s release notes:\n\n%s", newVersion, releaseNotes))
|
||||
} else {
|
||||
logrus.Warnf("no release notes available for %s", newVersion)
|
||||
}
|
||||
|
||||
if internal.NoInput {
|
||||
return nil
|
||||
}
|
||||
|
||||
response := false
|
||||
prompt := &survey.Confirm{
|
||||
Message: "continue with deployment?",
|
||||
}
|
||||
|
||||
if err := survey.AskOne(prompt, &response); err != nil {
|
||||
func validateSecrets(cl *dockerClient.Client, app app.App) error {
|
||||
secStats, err := secret.PollSecretsStatus(cl, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !response {
|
||||
logrus.Fatal("exiting as requested")
|
||||
for _, secStat := range secStats {
|
||||
if !secStat.CreatedOnRemote {
|
||||
return fmt.Errorf("secret not generated: %s", secStat.LocalName)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetReleaseNotes prints release notes for a recipe version
|
||||
func GetReleaseNotes(recipeName, version string) (string, error) {
|
||||
if version == "" {
|
||||
return "", nil
|
||||
}
|
||||
func init() {
|
||||
AppDeployCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
|
||||
fpath := path.Join(config.RECIPES_DIR, recipeName, "release", version)
|
||||
AppDeployCommand.Flags().BoolVarP(
|
||||
&internal.Force,
|
||||
"force",
|
||||
"f",
|
||||
false,
|
||||
"perform action without further prompt",
|
||||
)
|
||||
|
||||
if _, err := os.Stat(fpath); !os.IsNotExist(err) {
|
||||
releaseNotes, err := ioutil.ReadFile(fpath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(releaseNotes), nil
|
||||
}
|
||||
AppDeployCommand.Flags().BoolVarP(
|
||||
&internal.NoDomainChecks,
|
||||
"no-domain-checks",
|
||||
"D",
|
||||
false,
|
||||
"disable public DNS checks",
|
||||
)
|
||||
|
||||
return "", nil
|
||||
AppDeployCommand.Flags().BoolVarP(
|
||||
&internal.DontWaitConverge,
|
||||
"no-converge-checks",
|
||||
"c",
|
||||
false,
|
||||
"disable converge logic checks",
|
||||
)
|
||||
}
|
||||
|
43
cli/app/env.go
Normal file
43
cli/app/env.go
Normal file
@ -0,0 +1,43 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var AppEnvCommand = &cobra.Command{
|
||||
Use: "env <domain> [flags]",
|
||||
Aliases: []string{"e"},
|
||||
Short: "Show app .env values",
|
||||
Example: " abra app env 1312.net",
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.AppNameComplete()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
var envKeys []string
|
||||
for k := range app.Env {
|
||||
envKeys = append(envKeys, k)
|
||||
}
|
||||
|
||||
sort.Strings(envKeys)
|
||||
|
||||
var rows [][]string
|
||||
for _, k := range envKeys {
|
||||
rows = append(rows, []string{k, app.Env[k]})
|
||||
}
|
||||
|
||||
overview := formatter.CreateOverview("ENV OVERVIEW", rows)
|
||||
fmt.Println(overview)
|
||||
},
|
||||
}
|
@ -1,146 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
stack "coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var appErrorsCommand = cli.Command{
|
||||
Name: "errors",
|
||||
Usage: "List errors for a deployed app",
|
||||
ArgsUsage: "<domain>",
|
||||
Description: `
|
||||
List errors for a deployed app.
|
||||
|
||||
This is a best-effort implementation and an attempt to gather a number of tips
|
||||
& tricks for finding errors together into one convenient command. When an app
|
||||
is failing to deploy or having issues, it could be a lot of things.
|
||||
|
||||
This command currently takes into account:
|
||||
|
||||
Is the service deployed?
|
||||
Is the service killed by an OOM error?
|
||||
Is the service reporting an error (like in "ps --no-trunc" output)
|
||||
Is the service healthcheck failing? what are the healthcheck logs?
|
||||
|
||||
Got any more ideas? Please let us know:
|
||||
|
||||
https://git.coopcloud.tech/coop-cloud/organising/issues/new/choose
|
||||
|
||||
This command is best accompanied by "abra app logs <domain>" which may reveal
|
||||
further information which can help you debug the cause of an app failure via
|
||||
the logs.
|
||||
`,
|
||||
Aliases: []string{"e"},
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.WatchFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, app.StackName())
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if !isDeployed {
|
||||
logrus.Fatalf("%s is not deployed?", app.Name)
|
||||
}
|
||||
|
||||
if !internal.Watch {
|
||||
if err := checkErrors(c, cl, app, conf); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for {
|
||||
if err := checkErrors(c, cl, app, conf); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func checkErrors(c *cli.Context, cl *dockerClient.Client, app config.App, conf *runtime.Config) error {
|
||||
recipe, err := recipe.Get(app.Recipe, conf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, service := range recipe.Config.Services {
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), service.Name))
|
||||
|
||||
containers, err := cl.ContainerList(context.Background(), types.ContainerListOptions{Filters: filters})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(containers) == 0 {
|
||||
logrus.Warnf("%s is not up, something seems wrong", service.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
container := containers[0]
|
||||
containerState, err := cl.ContainerInspect(context.Background(), container.ID)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if containerState.State.OOMKilled {
|
||||
logrus.Warnf("%s has been killed due to an out of memory error", service.Name)
|
||||
}
|
||||
|
||||
if containerState.State.Error != "" {
|
||||
logrus.Warnf("%s reports this error: %s", service.Name, containerState.State.Error)
|
||||
}
|
||||
|
||||
if containerState.State.Health != nil {
|
||||
if containerState.State.Health.Status != "healthy" {
|
||||
logrus.Warnf("%s healthcheck status is %s", service.Name, containerState.State.Health.Status)
|
||||
logrus.Warnf("%s healthcheck has failed %s times", service.Name, strconv.Itoa(containerState.State.Health.FailingStreak))
|
||||
for _, log := range containerState.State.Health.Log {
|
||||
logrus.Warnf("%s healthcheck logs: %s", service.Name, strings.TrimSpace(log.Output))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getServiceName(names []string) string {
|
||||
containerName := strings.Join(names, " ")
|
||||
trimmed := strings.TrimPrefix(containerName, "/")
|
||||
return strings.Split(trimmed, ".")[0]
|
||||
}
|
139
cli/app/labels.go
Normal file
139
cli/app/labels.go
Normal file
@ -0,0 +1,139 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/upstream/convert"
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var AppLabelsCommand = &cobra.Command{
|
||||
Use: "labels <domain> [flags]",
|
||||
Aliases: []string{"lb"},
|
||||
Short: "Show deployment labels",
|
||||
Long: "Both local recipe and live deployment labels are shown.",
|
||||
Example: " abra app labels 1312.net",
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.AppNameComplete()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
remoteLabels, err := getLabels(cl, app.StackName())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
rows := [][]string{
|
||||
{"DEPLOYED LABELS", "---"},
|
||||
}
|
||||
|
||||
remoteLabelKeys := make([]string, 0, len(remoteLabels))
|
||||
for k := range remoteLabels {
|
||||
remoteLabelKeys = append(remoteLabelKeys, k)
|
||||
}
|
||||
|
||||
sort.Strings(remoteLabelKeys)
|
||||
|
||||
for _, k := range remoteLabelKeys {
|
||||
rows = append(rows, []string{
|
||||
k,
|
||||
remoteLabels[k],
|
||||
})
|
||||
}
|
||||
|
||||
if len(remoteLabelKeys) == 0 {
|
||||
rows = append(rows, []string{"unknown"})
|
||||
}
|
||||
|
||||
rows = append(rows, []string{"RECIPE LABELS", "---"})
|
||||
|
||||
config, err := app.Recipe.GetComposeConfig(app.Env)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var localLabelKeys []string
|
||||
var appServiceConfig composetypes.ServiceConfig
|
||||
for _, service := range config.Services {
|
||||
if service.Name == "app" {
|
||||
appServiceConfig = service
|
||||
|
||||
for k := range service.Deploy.Labels {
|
||||
localLabelKeys = append(localLabelKeys, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(localLabelKeys)
|
||||
|
||||
for _, k := range localLabelKeys {
|
||||
rows = append(rows, []string{
|
||||
k,
|
||||
appServiceConfig.Deploy.Labels[k],
|
||||
})
|
||||
}
|
||||
|
||||
overview := formatter.CreateOverview("LABELS OVERVIEW", rows)
|
||||
fmt.Println(overview)
|
||||
},
|
||||
}
|
||||
|
||||
// getLabels reads docker labels from running services in the format of "coop-cloud.${STACK_NAME}.${LABEL}".
|
||||
func getLabels(cl *dockerClient.Client, stackName string) (map[string]string, error) {
|
||||
labels := make(map[string]string)
|
||||
|
||||
filter := filters.NewArgs()
|
||||
filter.Add("label", fmt.Sprintf("%s=%s", convert.LabelNamespace, stackName))
|
||||
|
||||
services, err := cl.ServiceList(context.Background(), types.ServiceListOptions{Filters: filter})
|
||||
if err != nil {
|
||||
return labels, err
|
||||
}
|
||||
|
||||
for _, service := range services {
|
||||
if service.Spec.Name != fmt.Sprintf("%s_app", stackName) {
|
||||
continue
|
||||
}
|
||||
|
||||
for k, v := range service.Spec.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
AppLabelsCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
}
|
224
cli/app/list.go
224
cli/app/list.go
@ -8,38 +8,14 @@ import (
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/tagcmp"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var status bool
|
||||
var statusFlag = &cli.BoolFlag{
|
||||
Name: "status, S",
|
||||
Usage: "Show app deployment status",
|
||||
Destination: &status,
|
||||
}
|
||||
|
||||
var recipeFilter string
|
||||
var recipeFlag = &cli.StringFlag{
|
||||
Name: "recipe, r",
|
||||
Value: "",
|
||||
Usage: "Show apps of a specific recipe",
|
||||
Destination: &recipeFilter,
|
||||
}
|
||||
|
||||
var listAppServer string
|
||||
var listAppServerFlag = &cli.StringFlag{
|
||||
Name: "server, s",
|
||||
Value: "",
|
||||
Usage: "Show apps of a specific server",
|
||||
Destination: &listAppServer,
|
||||
}
|
||||
|
||||
type appStatus struct {
|
||||
Server string `json:"server"`
|
||||
Recipe string `json:"recipe"`
|
||||
@ -62,44 +38,36 @@ type serverStatus struct {
|
||||
UpgradeCount int `json:"upgradeCount"`
|
||||
}
|
||||
|
||||
var appListCommand = cli.Command{
|
||||
Name: "list",
|
||||
var AppListCommand = &cobra.Command{
|
||||
Use: "list [flags]",
|
||||
Aliases: []string{"ls"},
|
||||
Usage: "List all managed apps",
|
||||
Description: `
|
||||
Read the local file system listing of apps and servers (e.g. ~/.abra/) to
|
||||
generate a report of all your apps.
|
||||
Short: "List all managed apps",
|
||||
Long: `Generate a report of all managed apps.
|
||||
|
||||
By passing the "--status/-S" flag, you can query all your servers for the
|
||||
actual live deployment status. Depending on how many servers you manage, this
|
||||
can take some time.
|
||||
`,
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.MachineReadableFlag,
|
||||
statusFlag,
|
||||
listAppServerFlag,
|
||||
recipeFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
Use "--status/-S" flag to query all servers for the live deployment status.`,
|
||||
Example: ` # list apps of all servers without live status
|
||||
abra app ls
|
||||
|
||||
appFiles, err := config.LoadAppFiles(listAppServer)
|
||||
# list apps of a specific server with live status
|
||||
abra app ls -s 1312.net -S
|
||||
|
||||
# list apps of all servers which match a specific recipe
|
||||
abra app ls -r gitea`,
|
||||
Args: cobra.NoArgs,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
appFiles, err := appPkg.LoadAppFiles(listAppServer)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
apps, err := config.GetApps(appFiles, recipeFilter)
|
||||
apps, err := appPkg.GetApps(appFiles, recipeFilter)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
sort.Sort(config.ByServerAndRecipe(apps))
|
||||
sort.Sort(appPkg.ByServerAndRecipe(apps))
|
||||
|
||||
statuses := make(map[string]map[string]string)
|
||||
var catl recipe.RecipeCatalogue
|
||||
if status {
|
||||
alreadySeen := make(map[string]bool)
|
||||
for _, app := range apps {
|
||||
@ -108,14 +76,9 @@ can take some time.
|
||||
}
|
||||
}
|
||||
|
||||
statuses, err = config.GetAppStatuses(apps, internal.MachineReadable)
|
||||
statuses, err = appPkg.GetAppStatuses(apps, internal.MachineReadable)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
catl, err = recipe.ReadRecipeCatalogue(conf)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -133,7 +96,7 @@ can take some time.
|
||||
}
|
||||
}
|
||||
|
||||
if app.Recipe == recipeFilter || recipeFilter == "" {
|
||||
if app.Recipe.Name == recipeFilter || recipeFilter == "" {
|
||||
if recipeFilter != "" {
|
||||
// only count server if matches filter
|
||||
totalServersCount++
|
||||
@ -180,20 +143,20 @@ can take some time.
|
||||
|
||||
var newUpdates []string
|
||||
if version != "unknown" {
|
||||
updates, err := recipe.GetRecipeCatalogueVersions(app.Recipe, catl)
|
||||
updates, err := app.Recipe.Tags()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
parsedVersion, err := tagcmp.Parse(version)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, update := range updates {
|
||||
parsedUpdate, err := tagcmp.Parse(update)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if update != version && parsedUpdate.IsGreaterThan(parsedVersion) {
|
||||
@ -210,14 +173,14 @@ can take some time.
|
||||
stats.LatestCount++
|
||||
}
|
||||
} else {
|
||||
newUpdates = internal.ReverseStringList(newUpdates)
|
||||
newUpdates = internal.SortVersionsDesc(newUpdates)
|
||||
appStats.Upgrade = strings.Join(newUpdates, "\n")
|
||||
stats.UpgradeCount++
|
||||
}
|
||||
}
|
||||
|
||||
appStats.Server = app.Server
|
||||
appStats.Recipe = app.Recipe
|
||||
appStats.Recipe = app.Recipe.Name
|
||||
appStats.AppName = app.Name
|
||||
appStats.Domain = app.Domain
|
||||
|
||||
@ -229,11 +192,12 @@ can take some time.
|
||||
if internal.MachineReadable {
|
||||
jsonstring, err := json.Marshal(allStats)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
} else {
|
||||
fmt.Println(string(jsonstring))
|
||||
}
|
||||
return nil
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
alreadySeen := make(map[string]bool)
|
||||
@ -244,60 +208,118 @@ can take some time.
|
||||
|
||||
serverStat := allStats[app.Server]
|
||||
|
||||
tableCol := []string{"recipe", "domain"}
|
||||
headers := []string{"RECIPE", "DOMAIN", "SERVER"}
|
||||
if status {
|
||||
tableCol = append(tableCol, []string{"status", "chaos", "version", "upgrade", "autoupdate"}...)
|
||||
headers = append(headers, []string{
|
||||
"STATUS",
|
||||
"CHAOS",
|
||||
"VERSION",
|
||||
"UPGRADE",
|
||||
"AUTOUPDATE"}...,
|
||||
)
|
||||
}
|
||||
|
||||
table := formatter.CreateTable(tableCol)
|
||||
table, err := formatter.CreateTable()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
table.Headers(headers...)
|
||||
|
||||
var rows [][]string
|
||||
for _, appStat := range serverStat.Apps {
|
||||
tableRow := []string{appStat.Recipe, appStat.Domain}
|
||||
row := []string{appStat.Recipe, appStat.Domain, appStat.Server}
|
||||
if status {
|
||||
chaosStatus := appStat.Chaos
|
||||
if chaosStatus != "unknown" {
|
||||
chaosEnabled, err := strconv.ParseBool(chaosStatus)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
if chaosEnabled && appStat.ChaosVersion != "unknown" {
|
||||
chaosStatus = appStat.ChaosVersion
|
||||
}
|
||||
}
|
||||
tableRow = append(tableRow, []string{appStat.Status, chaosStatus, appStat.Version, appStat.Upgrade, appStat.AutoUpdate}...)
|
||||
|
||||
row = append(row, []string{
|
||||
appStat.Status,
|
||||
chaosStatus,
|
||||
appStat.Version,
|
||||
appStat.Upgrade,
|
||||
appStat.AutoUpdate}...,
|
||||
)
|
||||
}
|
||||
table.Append(tableRow)
|
||||
|
||||
rows = append(rows, row)
|
||||
}
|
||||
|
||||
if table.NumLines() > 0 {
|
||||
table.Render()
|
||||
table.Rows(rows...)
|
||||
|
||||
if status {
|
||||
fmt.Println(fmt.Sprintf(
|
||||
"server: %s | total apps: %v | versioned: %v | unversioned: %v | latest: %v | upgrade: %v",
|
||||
app.Server,
|
||||
serverStat.AppCount,
|
||||
serverStat.VersionCount,
|
||||
serverStat.UnversionedCount,
|
||||
serverStat.LatestCount,
|
||||
serverStat.UpgradeCount,
|
||||
))
|
||||
} else {
|
||||
fmt.Println(fmt.Sprintf("server: %s | total apps: %v", app.Server, serverStat.AppCount))
|
||||
if len(rows) > 0 {
|
||||
if err := formatter.PrintTable(table); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(allStats) > 1 && table.NumLines() > 0 {
|
||||
fmt.Println() // newline separator for multiple servers
|
||||
if len(allStats) > 1 && len(rows) > 0 {
|
||||
fmt.Println() // newline separator for multiple servers
|
||||
}
|
||||
}
|
||||
|
||||
alreadySeen[app.Server] = true
|
||||
}
|
||||
|
||||
if len(allStats) > 1 {
|
||||
fmt.Println(fmt.Sprintf("total servers: %v | total apps: %v ", totalServersCount, totalAppsCount))
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
status bool
|
||||
recipeFilter string
|
||||
listAppServer string
|
||||
)
|
||||
|
||||
func init() {
|
||||
AppListCommand.Flags().BoolVarP(
|
||||
&status,
|
||||
"status",
|
||||
"S",
|
||||
false,
|
||||
"show app deployment status",
|
||||
)
|
||||
|
||||
AppListCommand.Flags().StringVarP(
|
||||
&recipeFilter,
|
||||
"recipe",
|
||||
"r",
|
||||
"",
|
||||
"show apps of a specific recipe",
|
||||
)
|
||||
|
||||
AppListCommand.RegisterFlagCompletionFunc(
|
||||
"recipe",
|
||||
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.RecipeNameComplete()
|
||||
},
|
||||
)
|
||||
|
||||
AppListCommand.Flags().BoolVarP(
|
||||
&internal.MachineReadable,
|
||||
"machine",
|
||||
"m",
|
||||
false,
|
||||
"print machine-readable output",
|
||||
)
|
||||
|
||||
AppListCommand.Flags().StringVarP(
|
||||
&listAppServer,
|
||||
"server",
|
||||
"s",
|
||||
"",
|
||||
"show apps of a specific server",
|
||||
)
|
||||
|
||||
AppListCommand.RegisterFlagCompletionFunc(
|
||||
"server",
|
||||
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.ServerNameComplete()
|
||||
},
|
||||
)
|
||||
}
|
||||
|
222
cli/app/logs.go
222
cli/app/logs.go
@ -5,133 +5,167 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/service"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"github.com/docker/docker/api/types"
|
||||
containerTypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var logOpts = types.ContainerLogsOptions{
|
||||
ShowStderr: true,
|
||||
ShowStdout: true,
|
||||
Since: "",
|
||||
Until: "",
|
||||
Timestamps: true,
|
||||
Follow: true,
|
||||
Tail: "20",
|
||||
Details: false,
|
||||
var AppLogsCommand = &cobra.Command{
|
||||
Use: "logs <domain> [service] [flags]",
|
||||
Aliases: []string{"l"},
|
||||
Short: "Tail app logs",
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.AppNameComplete()
|
||||
case 1:
|
||||
app, err := appPkg.Get(args[0])
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("autocomplete failed: %s", err)
|
||||
return []string{errMsg}, cobra.ShellCompDirectiveError
|
||||
}
|
||||
return autocomplete.ServiceNameComplete(app.Name)
|
||||
default:
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
stackName := app.StackName()
|
||||
|
||||
if err := app.Recipe.EnsureExists(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
deployMeta, err := stack.IsDeployed(context.Background(), cl, stackName)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !deployMeta.IsDeployed {
|
||||
log.Fatalf("%s is not deployed?", app.Name)
|
||||
}
|
||||
|
||||
var serviceNames []string
|
||||
if len(args) == 2 {
|
||||
serviceNames = []string{args[1]}
|
||||
}
|
||||
|
||||
if err = tailLogs(cl, app, serviceNames); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// stackLogs lists logs for all stack services
|
||||
func stackLogs(c *cli.Context, app config.App, client *dockerClient.Client) {
|
||||
filters, err := app.Filters(true, false)
|
||||
// tailLogs prints logs for the given app with optional service names to be
|
||||
// filtered on. It also checks if the latest task is not runnning and then
|
||||
// prints the past tasks.
|
||||
func tailLogs(cl *dockerClient.Client, app appPkg.App, serviceNames []string) error {
|
||||
f, err := app.Filters(true, false, serviceNames...)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
return err
|
||||
}
|
||||
|
||||
serviceOpts := types.ServiceListOptions{Filters: filters}
|
||||
services, err := client.ServiceList(context.Background(), serviceOpts)
|
||||
services, err := cl.ServiceList(context.Background(), types.ServiceListOptions{Filters: f})
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, service := range services {
|
||||
wg.Add(1)
|
||||
go func(s string) {
|
||||
if internal.StdErrOnly {
|
||||
logOpts.ShowStdout = false
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("name", service.Spec.Name)
|
||||
tasks, err := cl.TaskList(context.Background(), types.TaskListOptions{Filters: f})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(tasks) > 0 {
|
||||
// Need to sort the tasks by the CreatedAt field in the inverse order.
|
||||
// Otherwise they are in the reversed order and not sorted properly.
|
||||
slices.SortFunc[[]swarm.Task](tasks, func(t1, t2 swarm.Task) int {
|
||||
return int(t2.Meta.CreatedAt.Unix() - t1.Meta.CreatedAt.Unix())
|
||||
})
|
||||
lastTask := tasks[0].Status
|
||||
if lastTask.State != swarm.TaskStateRunning {
|
||||
for _, task := range tasks {
|
||||
log.Errorf("[%s] %s State %s: %s", service.Spec.Name, task.Meta.CreatedAt.Format(time.RFC3339), task.Status.State, task.Status.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logs, err := client.ServiceLogs(context.Background(), s, logOpts)
|
||||
// Collect the logs in a go routine, so the logs from all services are
|
||||
// collected in parallel.
|
||||
wg.Add(1)
|
||||
go func(serviceID string) {
|
||||
logs, err := cl.ServiceLogs(context.Background(), serviceID, containerTypes.LogsOptions{
|
||||
ShowStderr: true,
|
||||
ShowStdout: !stdErr,
|
||||
Since: sinceLogs,
|
||||
Until: "",
|
||||
Timestamps: true,
|
||||
Follow: true,
|
||||
Tail: "20",
|
||||
Details: false,
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer logs.Close()
|
||||
|
||||
_, err = io.Copy(os.Stdout, logs)
|
||||
if err != nil && err != io.EOF {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}(service.ID)
|
||||
}
|
||||
|
||||
// Wait for all log streams to be closed.
|
||||
wg.Wait()
|
||||
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
var appLogsCommand = cli.Command{
|
||||
Name: "logs",
|
||||
Aliases: []string{"l"},
|
||||
ArgsUsage: "<domain> [<service>]",
|
||||
Usage: "Tail app logs",
|
||||
Flags: []cli.Flag{
|
||||
internal.StdErrOnlyFlag,
|
||||
internal.SinceLogsFlag,
|
||||
internal.DebugFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
logOpts.Since = internal.SinceLogs
|
||||
|
||||
serviceName := c.Args().Get(1)
|
||||
if serviceName == "" {
|
||||
logrus.Debugf("tailing logs for all %s services", app.Recipe)
|
||||
stackLogs(c, app, cl)
|
||||
} else {
|
||||
logrus.Debugf("tailing logs for %s", serviceName)
|
||||
if err := tailServiceLogs(c, cl, app, serviceName); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func tailServiceLogs(c *cli.Context, cl *dockerClient.Client, app config.App, serviceName string) error {
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("name", fmt.Sprintf("%s_%s", app.StackName(), serviceName))
|
||||
|
||||
chosenService, err := service.GetService(context.Background(), cl, filters, internal.NoInput)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if internal.StdErrOnly {
|
||||
logOpts.ShowStdout = false
|
||||
}
|
||||
|
||||
logs, err := cl.ServiceLogs(context.Background(), chosenService.ID, logOpts)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
defer logs.Close()
|
||||
|
||||
_, err = io.Copy(os.Stdout, logs)
|
||||
if err != nil && err != io.EOF {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
stdErr bool
|
||||
sinceLogs string
|
||||
)
|
||||
|
||||
func init() {
|
||||
AppLogsCommand.Flags().BoolVarP(
|
||||
&stdErr,
|
||||
"stderr",
|
||||
"s",
|
||||
false,
|
||||
"only tail stderr",
|
||||
)
|
||||
|
||||
AppLogsCommand.Flags().StringVarP(
|
||||
&sinceLogs,
|
||||
"since",
|
||||
"S",
|
||||
"",
|
||||
"tail logs since YYYY-MM-DDTHH:MM:SSZ",
|
||||
)
|
||||
}
|
||||
|
359
cli/app/new.go
359
cli/app/new.go
@ -2,35 +2,36 @@ package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/app"
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/jsontable"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
recipePkg "coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/secret"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/charmbracelet/lipgloss/table"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var appNewDescription = `
|
||||
Take a recipe and uses it to create a new app. This new app configuration is
|
||||
stored in your ~/.abra directory under the appropriate server.
|
||||
var appNewDescription = `Creates a new app from a default recipe.
|
||||
|
||||
This new app configuration is stored in your $ABRA_DIR directory under the
|
||||
appropriate server.
|
||||
|
||||
This command does not deploy your app for you. You will need to run "abra app
|
||||
deploy <domain>" to do so.
|
||||
|
||||
You can see what recipes are available (i.e. values for the <recipe> argument)
|
||||
You can see what recipes are available (i.e. values for the [recipe] argument)
|
||||
by running "abra recipe ls".
|
||||
|
||||
Recipe commit hashes are supported values for "[version]".
|
||||
|
||||
Passing the "--secrets/-S" flag will automatically generate secrets for your
|
||||
app and store them encrypted at rest on the chosen target server. These
|
||||
generated secrets are only visible at generation time, so please take care to
|
||||
@ -38,163 +39,230 @@ store them somewhere safe.
|
||||
|
||||
You can use the "--pass/-P" to store these generated passwords locally in a
|
||||
pass store (see passwordstore.org for more). The pass command must be available
|
||||
on your $PATH.
|
||||
`
|
||||
on your $PATH.`
|
||||
|
||||
var appNewCommand = cli.Command{
|
||||
Name: "new",
|
||||
Aliases: []string{"n"},
|
||||
Usage: "Create a new app",
|
||||
Description: appNewDescription,
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
internal.NewAppServerFlag,
|
||||
internal.DomainFlag,
|
||||
internal.PassFlag,
|
||||
internal.SecretsFlag,
|
||||
internal.OfflineFlag,
|
||||
var AppNewCommand = &cobra.Command{
|
||||
Use: "new [recipe] [version] [flags]",
|
||||
Aliases: []string{"n"},
|
||||
Short: "Create a new app",
|
||||
Long: appNewDescription,
|
||||
Args: cobra.RangeArgs(0, 2),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.RecipeNameComplete()
|
||||
case 1:
|
||||
recipe := internal.ValidateRecipe(args, cmd.Name())
|
||||
return autocomplete.RecipeVersionComplete(recipe.Name)
|
||||
default:
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
ArgsUsage: "[<recipe>]",
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
recipe := internal.ValidateRecipeWithPrompt(c, conf)
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
recipe := internal.ValidateRecipe(args, cmd.Name())
|
||||
|
||||
if err := recipePkg.EnsureUpToDate(recipe.Name, conf); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if len(args) == 2 && internal.Chaos {
|
||||
log.Fatal("cannot use [version] and --chaos together")
|
||||
}
|
||||
|
||||
var recipeVersion string
|
||||
if len(args) == 2 {
|
||||
recipeVersion = args[1]
|
||||
}
|
||||
|
||||
chaosVersion := config.CHAOS_DEFAULT
|
||||
if internal.Chaos {
|
||||
recipeVersion = chaosVersion
|
||||
|
||||
if !internal.Offline {
|
||||
if err := recipe.EnsureUpToDate(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !internal.Chaos {
|
||||
if err := recipe.EnsureIsClean(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
var recipeVersions recipePkg.RecipeVersions
|
||||
if recipeVersion == "" {
|
||||
var err error
|
||||
recipeVersions, _, err = recipe.GetRecipeVersions()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(recipeVersions) > 0 {
|
||||
latest := recipeVersions[len(recipeVersions)-1]
|
||||
for tag := range latest {
|
||||
recipeVersion = tag
|
||||
}
|
||||
|
||||
if _, err := recipe.EnsureVersion(recipeVersion); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
if err := recipe.EnsureLatest(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := ensureServerFlag(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := ensureDomainFlag(recipe, internal.NewAppServer); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if err := ensureDomainFlag(recipe, newAppServer); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
sanitisedAppName := config.SanitiseAppName(internal.Domain)
|
||||
logrus.Debugf("%s sanitised as %s for new app", internal.Domain, sanitisedAppName)
|
||||
sanitisedAppName := appPkg.SanitiseAppName(appDomain)
|
||||
log.Debugf("%s sanitised as %s for new app", appDomain, sanitisedAppName)
|
||||
|
||||
if err := config.TemplateAppEnvSample(
|
||||
recipe.Name,
|
||||
internal.Domain,
|
||||
internal.NewAppServer,
|
||||
internal.Domain,
|
||||
if err := appPkg.TemplateAppEnvSample(
|
||||
recipe,
|
||||
appDomain,
|
||||
newAppServer,
|
||||
appDomain,
|
||||
); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := promptForSecrets(internal.Domain); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
cl, err := client.New(internal.NewAppServer)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
var secrets AppSecrets
|
||||
var secretTable *jsontable.JSONTable
|
||||
if internal.Secrets {
|
||||
secrets, err := createSecrets(cl, sanitisedAppName)
|
||||
var appSecrets AppSecrets
|
||||
var secretsTable *table.Table
|
||||
if generateSecrets {
|
||||
sampleEnv, err := recipe.SampleEnv()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
secretCols := []string{"Name", "Value"}
|
||||
secretTable = formatter.CreateTable(secretCols)
|
||||
for secret := range secrets {
|
||||
secretTable.Append([]string{secret, secrets[secret]})
|
||||
composeFiles, err := recipe.GetComposeFiles(sampleEnv)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
secretsConfig, err := secret.ReadSecretsConfig(
|
||||
recipe.SampleEnvPath,
|
||||
composeFiles,
|
||||
appPkg.StackName(appDomain),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := promptForSecrets(recipe.Name, secretsConfig); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cl, err := client.New(newAppServer)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
appSecrets, err = createSecrets(cl, secretsConfig, sanitisedAppName)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
secretsTable, err = formatter.CreateTable()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
headers := []string{"NAME", "VALUE"}
|
||||
secretsTable.Headers(headers...)
|
||||
|
||||
for name, val := range appSecrets {
|
||||
secretsTable.Row(name, val)
|
||||
}
|
||||
}
|
||||
|
||||
if internal.NewAppServer == "default" {
|
||||
internal.NewAppServer = "local"
|
||||
if newAppServer == "default" {
|
||||
newAppServer = "local"
|
||||
}
|
||||
|
||||
tableCol := []string{"server", "recipe", "domain"}
|
||||
table := formatter.CreateTable(tableCol)
|
||||
table.Append([]string{internal.NewAppServer, recipe.Name, internal.Domain})
|
||||
log.Infof("%s created successfully (version: %s, chaos: %s)", appDomain, recipeVersion, chaosVersion)
|
||||
|
||||
fmt.Println("")
|
||||
fmt.Println(fmt.Sprintf("A new %s app has been created! Here is an overview:", recipe.Name))
|
||||
fmt.Println("")
|
||||
table.Render()
|
||||
fmt.Println("")
|
||||
fmt.Println("You can configure this app by running the following:")
|
||||
fmt.Println(fmt.Sprintf("\n abra app config %s", internal.Domain))
|
||||
fmt.Println("")
|
||||
fmt.Println("You can deploy this app by running the following:")
|
||||
fmt.Println(fmt.Sprintf("\n abra app deploy %s", internal.Domain))
|
||||
fmt.Println("")
|
||||
if len(appSecrets) > 0 {
|
||||
rows := [][]string{}
|
||||
for k, v := range appSecrets {
|
||||
rows = append(rows, []string{k, v})
|
||||
}
|
||||
|
||||
if len(secrets) > 0 {
|
||||
fmt.Println("Here are your generated secrets:")
|
||||
fmt.Println("")
|
||||
secretTable.Render()
|
||||
fmt.Println("")
|
||||
logrus.Warn("generated secrets are not shown again, please take note of them *now*")
|
||||
overview := formatter.CreateOverview("SECRETS OVERVIEW", rows)
|
||||
|
||||
fmt.Println(overview)
|
||||
|
||||
log.Warnf(
|
||||
"secrets are %s shown again, please save them %s",
|
||||
formatter.BoldUnderlineStyle.Render("NOT"),
|
||||
formatter.BoldUnderlineStyle.Render("NOW"),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
app, err := app.Get(appDomain)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := app.WriteRecipeVersion(recipeVersion, false); err != nil {
|
||||
log.Fatalf("writing recipe version failed: %s", err)
|
||||
}
|
||||
},
|
||||
BashComplete: autocomplete.RecipeNameComplete,
|
||||
}
|
||||
|
||||
// AppSecrets represents all app secrest
|
||||
type AppSecrets map[string]string
|
||||
|
||||
// createSecrets creates all secrets for a new app.
|
||||
func createSecrets(cl *dockerClient.Client, sanitisedAppName string) (AppSecrets, error) {
|
||||
appEnvPath := path.Join(
|
||||
config.ABRA_DIR,
|
||||
"servers",
|
||||
internal.NewAppServer,
|
||||
fmt.Sprintf("%s.env", internal.Domain),
|
||||
)
|
||||
func createSecrets(cl *dockerClient.Client, secretsConfig map[string]secret.Secret, sanitisedAppName string) (AppSecrets, error) {
|
||||
// NOTE(d1): trim to match app.StackName() implementation
|
||||
if len(sanitisedAppName) > config.MAX_SANITISED_APP_NAME_LENGTH {
|
||||
log.Debugf("trimming %s to %s to avoid runtime limits", sanitisedAppName, sanitisedAppName[:config.MAX_SANITISED_APP_NAME_LENGTH])
|
||||
sanitisedAppName = sanitisedAppName[:config.MAX_SANITISED_APP_NAME_LENGTH]
|
||||
}
|
||||
|
||||
appEnv, err := config.ReadEnv(appEnvPath)
|
||||
secrets, err := secret.GenerateSecrets(cl, secretsConfig, newAppServer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
secretEnvVars := secret.ReadSecretEnvVars(appEnv)
|
||||
secrets, err := secret.GenerateSecrets(cl, secretEnvVars, sanitisedAppName, internal.NewAppServer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if internal.Pass {
|
||||
if saveInPass {
|
||||
for secretName := range secrets {
|
||||
secretValue := secrets[secretName]
|
||||
if err := secret.PassInsertSecret(
|
||||
secretValue,
|
||||
secretName,
|
||||
internal.Domain,
|
||||
internal.NewAppServer,
|
||||
appDomain,
|
||||
newAppServer,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return secrets, nil
|
||||
}
|
||||
|
||||
// ensureDomainFlag checks if the domain flag was used. if not, asks the user for it/
|
||||
func ensureDomainFlag(recipe recipe.Recipe, server string) error {
|
||||
if internal.Domain == "" && !internal.NoInput {
|
||||
func ensureDomainFlag(recipe recipePkg.Recipe, server string) error {
|
||||
if appDomain == "" && !internal.NoInput {
|
||||
prompt := &survey.Input{
|
||||
Message: "Specify app domain",
|
||||
Default: fmt.Sprintf("%s.%s", recipe.Name, server),
|
||||
}
|
||||
if err := survey.AskOne(prompt, &internal.Domain); err != nil {
|
||||
if err := survey.AskOne(prompt, &appDomain); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if internal.Domain == "" {
|
||||
if appDomain == "" {
|
||||
return fmt.Errorf("no domain provided")
|
||||
}
|
||||
|
||||
@ -202,23 +270,17 @@ func ensureDomainFlag(recipe recipe.Recipe, server string) error {
|
||||
}
|
||||
|
||||
// promptForSecrets asks if we should generate secrets for a new app.
|
||||
func promptForSecrets(appName string) error {
|
||||
app, err := app.Get(appName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
secretEnvVars := secret.ReadSecretEnvVars(app.Env)
|
||||
if len(secretEnvVars) == 0 {
|
||||
logrus.Debugf("%s has no secrets to generate, skipping...", app.Recipe)
|
||||
func promptForSecrets(recipeName string, secretsConfig map[string]secret.Secret) error {
|
||||
if len(secretsConfig) == 0 {
|
||||
log.Debugf("%s has no secrets to generate, skipping...", recipeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !internal.Secrets && !internal.NoInput {
|
||||
if !generateSecrets && !internal.NoInput {
|
||||
prompt := &survey.Confirm{
|
||||
Message: "Generate app secrets?",
|
||||
}
|
||||
if err := survey.AskOne(prompt, &internal.Secrets); err != nil {
|
||||
if err := survey.AskOne(prompt, &generateSecrets); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -233,19 +295,76 @@ func ensureServerFlag() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if internal.NewAppServer == "" && !internal.NoInput {
|
||||
if newAppServer == "" && !internal.NoInput {
|
||||
prompt := &survey.Select{
|
||||
Message: "Select app server:",
|
||||
Options: servers,
|
||||
}
|
||||
if err := survey.AskOne(prompt, &internal.NewAppServer); err != nil {
|
||||
if err := survey.AskOne(prompt, &newAppServer); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if internal.NewAppServer == "" {
|
||||
if newAppServer == "" {
|
||||
return fmt.Errorf("no server provided")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
newAppServer string
|
||||
appDomain string
|
||||
saveInPass bool
|
||||
generateSecrets bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
AppNewCommand.Flags().StringVarP(
|
||||
&newAppServer,
|
||||
"server",
|
||||
"s",
|
||||
"",
|
||||
"specify server for new app",
|
||||
)
|
||||
|
||||
AppNewCommand.RegisterFlagCompletionFunc(
|
||||
"server",
|
||||
func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.ServerNameComplete()
|
||||
},
|
||||
)
|
||||
|
||||
AppNewCommand.Flags().StringVarP(
|
||||
&appDomain,
|
||||
"domain",
|
||||
"D",
|
||||
"",
|
||||
"domain name for app",
|
||||
)
|
||||
|
||||
AppNewCommand.Flags().BoolVarP(
|
||||
&saveInPass,
|
||||
"pass",
|
||||
"p",
|
||||
false,
|
||||
"store secrets in a local pass store",
|
||||
)
|
||||
|
||||
AppNewCommand.Flags().BoolVarP(
|
||||
&generateSecrets,
|
||||
"secrets",
|
||||
"S",
|
||||
false,
|
||||
"automatically generate secrets",
|
||||
)
|
||||
|
||||
AppNewCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
|
||||
}
|
||||
|
216
cli/app/ps.go
216
cli/app/ps.go
@ -2,103 +2,203 @@ package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/service"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
abraService "coopcloud.tech/abra/pkg/service"
|
||||
stack "coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"github.com/buger/goterm"
|
||||
dockerFormatter "github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/docker/docker/api/types"
|
||||
containerTypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var appPsCommand = cli.Command{
|
||||
Name: "ps",
|
||||
Aliases: []string{"p"},
|
||||
Usage: "Check app status",
|
||||
ArgsUsage: "<domain>",
|
||||
Description: "Show a more detailed status output of a specific deployed app",
|
||||
Flags: []cli.Flag{
|
||||
internal.WatchFlag,
|
||||
internal.DebugFlag,
|
||||
internal.OfflineFlag,
|
||||
var AppPsCommand = &cobra.Command{
|
||||
Use: "ps <domain> [flags]",
|
||||
Aliases: []string{"p"},
|
||||
Short: "Check app deployment status",
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.AppNameComplete()
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, app.StackName())
|
||||
deployMeta, err := stack.IsDeployed(context.Background(), cl, app.StackName())
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !isDeployed {
|
||||
logrus.Fatalf("%s is not deployed?", app.Name)
|
||||
if !deployMeta.IsDeployed {
|
||||
log.Fatalf("%s is not deployed?", app.Name)
|
||||
}
|
||||
|
||||
if !internal.Watch {
|
||||
showPSOutput(c, app, cl)
|
||||
return nil
|
||||
chaosVersion := config.CHAOS_DEFAULT
|
||||
statuses, err := appPkg.GetAppStatuses([]appPkg.App{app}, true)
|
||||
if statusMeta, ok := statuses[app.StackName()]; ok {
|
||||
if isChaos, exists := statusMeta["chaos"]; exists && isChaos == "true" {
|
||||
if cVersion, exists := statusMeta["chaosVersion"]; exists {
|
||||
chaosVersion = cVersion
|
||||
if strings.HasSuffix(chaosVersion, config.DIRTY_DEFAULT) {
|
||||
chaosVersion = formatter.BoldDirtyDefault(chaosVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
goterm.Clear()
|
||||
for {
|
||||
goterm.MoveCursor(1, 1)
|
||||
showPSOutput(c, app, cl)
|
||||
goterm.Flush()
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
showPSOutput(app, cl, deployMeta.Version, chaosVersion)
|
||||
},
|
||||
}
|
||||
|
||||
// showPSOutput renders ps output.
|
||||
func showPSOutput(c *cli.Context, app config.App, cl *dockerClient.Client) {
|
||||
filters, err := app.Filters(true, true)
|
||||
func showPSOutput(app appPkg.App, cl *dockerClient.Client, deployedVersion, chaosVersion string) {
|
||||
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
containers, err := cl.ContainerList(context.Background(), types.ContainerListOptions{Filters: filters})
|
||||
deployOpts := stack.Deploy{
|
||||
Composefiles: composeFiles,
|
||||
Namespace: app.StackName(),
|
||||
Prune: false,
|
||||
ResolveImage: stack.ResolveImageAlways,
|
||||
}
|
||||
compose, err := appPkg.GetAppComposeConfig(app.Name, deployOpts, app.Env)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
tableCol := []string{"service name", "image", "created", "status", "state", "ports"}
|
||||
table := formatter.CreateTable(tableCol)
|
||||
var rows [][]string
|
||||
allContainerStats := make(map[string]map[string]string)
|
||||
for _, service := range compose.Services {
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), service.Name))
|
||||
|
||||
for _, container := range containers {
|
||||
var containerNames []string
|
||||
for _, containerName := range container.Names {
|
||||
trimmed := strings.TrimPrefix(containerName, "/")
|
||||
containerNames = append(containerNames, trimmed)
|
||||
containers, err := cl.ContainerList(context.Background(), containerTypes.ListOptions{Filters: filters})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
tableRow := []string{
|
||||
service.ContainerToServiceName(container.Names, app.StackName()),
|
||||
formatter.RemoveSha(container.Image),
|
||||
formatter.HumanDuration(container.Created),
|
||||
container.Status,
|
||||
container.State,
|
||||
dockerFormatter.DisplayablePorts(container.Ports),
|
||||
var containerStats map[string]string
|
||||
if len(containers) == 0 {
|
||||
containerStats = map[string]string{
|
||||
"version": deployedVersion,
|
||||
"chaos": chaosVersion,
|
||||
"service": service.Name,
|
||||
"image": "unknown",
|
||||
"created": "unknown",
|
||||
"status": "unknown",
|
||||
"state": "unknown",
|
||||
"ports": "unknown",
|
||||
}
|
||||
} else {
|
||||
container := containers[0]
|
||||
containerStats = map[string]string{
|
||||
"version": deployedVersion,
|
||||
"chaos": chaosVersion,
|
||||
"service": abraService.ContainerToServiceName(container.Names, app.StackName()),
|
||||
"image": formatter.RemoveSha(container.Image),
|
||||
"created": formatter.HumanDuration(container.Created),
|
||||
"status": container.Status,
|
||||
"state": container.State,
|
||||
"ports": dockerFormatter.DisplayablePorts(container.Ports),
|
||||
}
|
||||
}
|
||||
table.Append(tableRow)
|
||||
|
||||
allContainerStats[containerStats["service"]] = containerStats
|
||||
|
||||
// NOTE(d1): don't clobber these variables for --machine output
|
||||
dVersion := deployedVersion
|
||||
cVersion := chaosVersion
|
||||
|
||||
if containerStats["service"] != "app" {
|
||||
// NOTE(d1): don't repeat info which only relevant for the "app" service
|
||||
dVersion = ""
|
||||
cVersion = ""
|
||||
}
|
||||
|
||||
row := []string{
|
||||
containerStats["service"],
|
||||
containerStats["image"],
|
||||
dVersion,
|
||||
cVersion,
|
||||
containerStats["status"],
|
||||
}
|
||||
|
||||
rows = append(rows, row)
|
||||
}
|
||||
|
||||
table.Render()
|
||||
if internal.MachineReadable {
|
||||
rendered, err := json.Marshal(allContainerStats)
|
||||
if err != nil {
|
||||
log.Fatal("unable to convert to JSON: %s", err)
|
||||
}
|
||||
|
||||
fmt.Println(string(rendered))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
table, err := formatter.CreateTable()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
headers := []string{
|
||||
"SERVICE",
|
||||
"IMAGE",
|
||||
"VERSION",
|
||||
"CHAOS",
|
||||
"STATUS",
|
||||
}
|
||||
|
||||
table.
|
||||
Headers(headers...).
|
||||
Rows(rows...)
|
||||
|
||||
if err := formatter.PrintTable(table); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
AppPsCommand.Flags().BoolVarP(
|
||||
&internal.MachineReadable,
|
||||
"machine",
|
||||
"m",
|
||||
false,
|
||||
"print machine-readable output",
|
||||
)
|
||||
|
||||
AppPsCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
}
|
||||
|
@ -8,22 +8,18 @@ import (
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
stack "coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var appRemoveCommand = cli.Command{
|
||||
Name: "remove",
|
||||
Aliases: []string{"rm"},
|
||||
ArgsUsage: "<domain>",
|
||||
Usage: "Remove all app data, locally and remotely",
|
||||
Description: `
|
||||
This command removes everything related to an app which is already undeployed.
|
||||
var AppRemoveCommand = &cobra.Command{
|
||||
Use: "remove <domain> [flags]",
|
||||
Aliases: []string{"rm"},
|
||||
Short: "Remove all app data, locally and remotely",
|
||||
Long: `Remove everything related to an app which is already undeployed.
|
||||
|
||||
By default, it will prompt for confirmation before proceeding. All secrets,
|
||||
volumes and the local app env file will be deleted.
|
||||
@ -38,53 +34,53 @@ Please note, if you delete the local app env file without removing volumes and
|
||||
secrets first, Abra will *not* be able to help you remove them afterwards.
|
||||
|
||||
To delete everything without prompt, use the "--force/-f" or the "--no-input/n"
|
||||
flag.
|
||||
`,
|
||||
Flags: []cli.Flag{
|
||||
internal.ForceFlag,
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
internal.OfflineFlag,
|
||||
flag.`,
|
||||
Example: " abra app remove 1312.net",
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.AppNameComplete()
|
||||
},
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
Before: internal.SubCommandBefore,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
if !internal.Force && !internal.NoInput {
|
||||
log.Warnf("ALERTA ALERTA: deleting %s data and config (local/remote)", app.Name)
|
||||
|
||||
response := false
|
||||
msg := "ALERTA ALERTA: this will completely remove %s data and configurations locally and remotely, are you sure?"
|
||||
prompt := &survey.Confirm{Message: fmt.Sprintf(msg, app.Name)}
|
||||
prompt := &survey.Confirm{Message: "are you sure?"}
|
||||
if err := survey.AskOne(prompt, &response); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !response {
|
||||
logrus.Fatal("aborting as requested")
|
||||
log.Fatal("aborting as requested")
|
||||
}
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, app.StackName())
|
||||
deployMeta, err := stack.IsDeployed(context.Background(), cl, app.StackName())
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
if isDeployed {
|
||||
logrus.Fatalf("%s is still deployed. Run \"abra app undeploy %s\"", app.Name, app.Name)
|
||||
if deployMeta.IsDeployed {
|
||||
log.Fatalf("%s is still deployed. Run \"abra app undeploy %s\"", app.Name, app.Name)
|
||||
}
|
||||
|
||||
fs, err := app.Filters(false, false)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
secretList, err := cl.SecretList(context.Background(), types.SecretListOptions{Filters: fs})
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
secrets := make(map[string]string)
|
||||
@ -99,49 +95,50 @@ flag.
|
||||
for _, name := range secretNames {
|
||||
err := cl.SecretRemove(context.Background(), secrets[name])
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
logrus.Info(fmt.Sprintf("secret: %s removed", name))
|
||||
log.Info(fmt.Sprintf("secret: %s removed", name))
|
||||
}
|
||||
} else {
|
||||
logrus.Info("no secrets to remove")
|
||||
log.Info("no secrets to remove")
|
||||
}
|
||||
|
||||
fs, err = app.Filters(false, true)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
volumeListOptions := volume.ListOptions{fs}
|
||||
volumeListOKBody, err := cl.VolumeList(context.Background(), volumeListOptions)
|
||||
volumeList := volumeListOKBody.Volumes
|
||||
volumeList, err := client.GetVolumes(cl, context.Background(), app.Server, fs)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
volumeNames := client.GetVolumeNames(volumeList)
|
||||
|
||||
var vols []string
|
||||
for _, vol := range volumeList {
|
||||
vols = append(vols, vol.Name)
|
||||
}
|
||||
|
||||
if len(vols) > 0 {
|
||||
for _, vol := range vols {
|
||||
err := cl.VolumeRemove(context.Background(), vol, internal.Force) // last argument is for force removing
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
logrus.Info(fmt.Sprintf("volume %s removed", vol))
|
||||
if len(volumeNames) > 0 {
|
||||
err := client.RemoveVolumes(cl, context.Background(), volumeNames, internal.Force, 5)
|
||||
if err != nil {
|
||||
log.Fatalf("removing volumes failed: %s", err)
|
||||
}
|
||||
|
||||
log.Infof("%d volumes removed successfully", len(volumeNames))
|
||||
} else {
|
||||
logrus.Info("no volumes to remove")
|
||||
log.Info("no volumes to remove")
|
||||
}
|
||||
|
||||
if err = os.Remove(app.Path); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Info(fmt.Sprintf("file: %s removed", app.Path))
|
||||
|
||||
return nil
|
||||
log.Info(fmt.Sprintf("file: %s removed", app.Path))
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
AppRemoveCommand.Flags().BoolVarP(
|
||||
&internal.Force,
|
||||
"force",
|
||||
"f",
|
||||
false,
|
||||
"perform action without further prompt",
|
||||
)
|
||||
}
|
||||
|
@ -2,72 +2,132 @@ package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
upstream "coopcloud.tech/abra/pkg/upstream/service"
|
||||
stack "coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var appRestartCommand = cli.Command{
|
||||
Name: "restart",
|
||||
Aliases: []string{"re"},
|
||||
Usage: "Restart an app",
|
||||
ArgsUsage: "<domain>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Description: `This command restarts a service within a deployed app.`,
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
var AppRestartCommand = &cobra.Command{
|
||||
Use: "restart <domain> [[service] | --all-services] [flags]",
|
||||
Aliases: []string{"re"},
|
||||
Short: "Restart an app",
|
||||
Long: `This command restarts services within a deployed app.
|
||||
|
||||
serviceNameShort := c.Args().Get(1)
|
||||
if serviceNameShort == "" {
|
||||
err := errors.New("missing service?")
|
||||
internal.ShowSubcommandHelpAndError(c, err)
|
||||
Run "abra app ps <domain>" to see a list of service names.
|
||||
|
||||
Pass "--all-services/-a" to restart all services.`,
|
||||
Example: ` # restart a single app service
|
||||
abra app restart 1312.net app
|
||||
|
||||
# restart all app services
|
||||
abra app restart 1312.net -a`,
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.AppNameComplete()
|
||||
case 1:
|
||||
if !allServices {
|
||||
return autocomplete.ServiceNameComplete(args[0])
|
||||
}
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
default:
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var serviceName string
|
||||
if len(args) == 2 {
|
||||
serviceName = args[1]
|
||||
}
|
||||
|
||||
if serviceName == "" && !allServices {
|
||||
log.Fatal("missing [service]")
|
||||
}
|
||||
|
||||
if serviceName != "" && allServices {
|
||||
log.Fatal("cannot use [service] and --all-services/-a together")
|
||||
}
|
||||
|
||||
var serviceNames []string
|
||||
if allServices {
|
||||
var err error
|
||||
serviceNames, err = appPkg.GetAppServiceNames(app.Name)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
serviceNames = append(serviceNames, serviceName)
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
serviceName := fmt.Sprintf("%s_%s", app.StackName(), serviceNameShort)
|
||||
|
||||
logrus.Debugf("attempting to scale %s to 0 (restart logic)", serviceName)
|
||||
if err := upstream.RunServiceScale(context.Background(), cl, serviceName, 0); err != nil {
|
||||
logrus.Fatal(err)
|
||||
deployMeta, err := stack.IsDeployed(context.Background(), cl, app.StackName())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := stack.WaitOnService(context.Background(), cl, serviceName, app.Name); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if !deployMeta.IsDeployed {
|
||||
log.Fatalf("%s is not deployed?", app.Name)
|
||||
}
|
||||
|
||||
logrus.Debugf("%s has been scaled to 0 (restart logic)", serviceName)
|
||||
for _, serviceName := range serviceNames {
|
||||
stackServiceName := fmt.Sprintf("%s_%s", app.StackName(), serviceName)
|
||||
|
||||
logrus.Debugf("attempting to scale %s to 1 (restart logic)", serviceName)
|
||||
if err := upstream.RunServiceScale(context.Background(), cl, serviceName, 1); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Debugf("attempting to scale %s to 0", stackServiceName)
|
||||
|
||||
if err := upstream.RunServiceScale(context.Background(), cl, stackServiceName, 0); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := stack.WaitOnService(context.Background(), cl, stackServiceName, app.Name); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Debugf("%s has been scaled to 0", stackServiceName)
|
||||
log.Debugf("attempting to scale %s to 1", stackServiceName)
|
||||
|
||||
if err := upstream.RunServiceScale(context.Background(), cl, stackServiceName, 1); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := stack.WaitOnService(context.Background(), cl, stackServiceName, app.Name); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Debugf("%s has been scaled to 1", stackServiceName)
|
||||
log.Infof("%s service successfully restarted", serviceName)
|
||||
}
|
||||
|
||||
if err := stack.WaitOnService(context.Background(), cl, serviceName, app.Name); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Debugf("%s has been scaled to 1 (restart logic)", serviceName)
|
||||
|
||||
logrus.Infof("%s service successfully restarted", serviceNameShort)
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var allServices bool
|
||||
|
||||
func init() {
|
||||
AppRestartCommand.Flags().BoolVarP(
|
||||
&allServices,
|
||||
"all-services",
|
||||
"a",
|
||||
false,
|
||||
"restart all services",
|
||||
)
|
||||
}
|
||||
|
@ -1,206 +1,135 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
containerPkg "coopcloud.tech/abra/pkg/container"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/upstream/container"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type restoreConfig struct {
|
||||
preHookCmd string
|
||||
postHookCmd string
|
||||
}
|
||||
var AppRestoreCommand = &cobra.Command{
|
||||
Use: "restore <domain> [flags]",
|
||||
Aliases: []string{"rs"},
|
||||
Short: "Restore a snapshot",
|
||||
Long: `Snapshots are restored while apps are deployed.
|
||||
|
||||
var appRestoreCommand = cli.Command{
|
||||
Name: "restore",
|
||||
Aliases: []string{"rs"},
|
||||
Usage: "Run app restore",
|
||||
ArgsUsage: "<domain> <service> <file>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.OfflineFlag,
|
||||
Some restore scenarios may require service / app restarts.`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.AppNameComplete()
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
Description: `
|
||||
Run an app restore.
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
Pre/post hook commands are defined in the recipe configuration. Abra reads this
|
||||
configuration and run the comands in the context of the service before
|
||||
restoring the backup.
|
||||
|
||||
Unlike "abra app backup", restore must be run on a per-service basis. You can
|
||||
not restore all services in one go. Backup files produced by Abra are
|
||||
compressed archives which use absolute paths. This allows Abra to restore
|
||||
according to standard tar command logic.
|
||||
|
||||
Example:
|
||||
|
||||
abra app restore example.com app ~/.abra/backups/example_com_app_609341138.tar.gz
|
||||
`,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
serviceName := c.Args().Get(1)
|
||||
if serviceName == "" {
|
||||
internal.ShowSubcommandHelpAndError(c, errors.New("missing <service>?"))
|
||||
}
|
||||
|
||||
backupPath := c.Args().Get(2)
|
||||
if backupPath == "" {
|
||||
internal.ShowSubcommandHelpAndError(c, errors.New("missing <file>?"))
|
||||
}
|
||||
|
||||
if _, err := os.Stat(backupPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
logrus.Fatalf("%s doesn't exist?", backupPath)
|
||||
}
|
||||
}
|
||||
|
||||
recipe, err := recipe.Get(app.Recipe, conf)
|
||||
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
restoreConfigs := make(map[string]restoreConfig)
|
||||
for _, service := range recipe.Config.Services {
|
||||
if restoreEnabled, ok := service.Deploy.Labels["backupbot.restore"]; ok {
|
||||
if restoreEnabled == "true" {
|
||||
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), service.Name)
|
||||
rsConfig := restoreConfig{}
|
||||
|
||||
logrus.Debugf("restore config detected for %s", fullServiceName)
|
||||
|
||||
if preHookCmd, ok := service.Deploy.Labels["backupbot.restore.pre-hook"]; ok {
|
||||
logrus.Debugf("detected pre-hook command for %s: %s", fullServiceName, preHookCmd)
|
||||
rsConfig.preHookCmd = preHookCmd
|
||||
}
|
||||
|
||||
if postHookCmd, ok := service.Deploy.Labels["backupbot.restore.post-hook"]; ok {
|
||||
logrus.Debugf("detected post-hook command for %s: %s", fullServiceName, postHookCmd)
|
||||
rsConfig.postHookCmd = postHookCmd
|
||||
}
|
||||
|
||||
restoreConfigs[service.Name] = rsConfig
|
||||
}
|
||||
}
|
||||
execEnv := []string{
|
||||
fmt.Sprintf("SERVICE=%s", app.Domain),
|
||||
"MACHINE_LOGS=true",
|
||||
}
|
||||
|
||||
rsConfig, ok := restoreConfigs[serviceName]
|
||||
if !ok {
|
||||
rsConfig = restoreConfig{}
|
||||
if snapshot != "" {
|
||||
log.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
|
||||
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
|
||||
}
|
||||
|
||||
if err := runRestore(cl, app, backupPath, serviceName, rsConfig); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if targetPath != "" {
|
||||
log.Debugf("including TARGET=%s in backupbot exec invocation", targetPath)
|
||||
execEnv = append(execEnv, fmt.Sprintf("TARGET=%s", targetPath))
|
||||
}
|
||||
|
||||
return nil
|
||||
if internal.NoInput {
|
||||
log.Debugf("including NONINTERACTIVE=%v in backupbot exec invocation", internal.NoInput)
|
||||
execEnv = append(execEnv, fmt.Sprintf("NONINTERACTIVE=%v", internal.NoInput))
|
||||
}
|
||||
|
||||
if len(volumes) > 0 {
|
||||
allVolumes := strings.Join(volumes, ",")
|
||||
log.Debugf("including VOLUMES=%s in backupbot exec invocation", allVolumes)
|
||||
execEnv = append(execEnv, fmt.Sprintf("VOLUMES=%s", allVolumes))
|
||||
}
|
||||
|
||||
if len(services) > 0 {
|
||||
allServices := strings.Join(services, ",")
|
||||
log.Debugf("including CONTAINER=%s in backupbot exec invocation", allServices)
|
||||
execEnv = append(execEnv, fmt.Sprintf("CONTAINER=%s", allServices))
|
||||
}
|
||||
|
||||
if hooks {
|
||||
log.Debugf("including NO_COMMANDS=%v in backupbot exec invocation", false)
|
||||
execEnv = append(execEnv, fmt.Sprintf("NO_COMMANDS=%v", false))
|
||||
}
|
||||
|
||||
if _, err := internal.RunBackupCmdRemote(cl, "restore", targetContainer.ID, execEnv); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// runRestore does the actual restore logic.
|
||||
func runRestore(cl *dockerClient.Client, app config.App, backupPath, serviceName string, rsConfig restoreConfig) error {
|
||||
// FIXME: avoid instantiating a new CLI
|
||||
dcli, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var (
|
||||
targetPath string
|
||||
hooks bool
|
||||
services []string
|
||||
volumes []string
|
||||
)
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), serviceName))
|
||||
func init() {
|
||||
AppRestoreCommand.Flags().StringVarP(
|
||||
&targetPath,
|
||||
"target",
|
||||
"t",
|
||||
"/",
|
||||
"target path",
|
||||
)
|
||||
|
||||
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
AppRestoreCommand.Flags().StringArrayVarP(
|
||||
&services,
|
||||
"services",
|
||||
"s",
|
||||
[]string{},
|
||||
"restore specific services",
|
||||
)
|
||||
|
||||
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), serviceName)
|
||||
if rsConfig.preHookCmd != "" {
|
||||
splitCmd := internal.SafeSplit(rsConfig.preHookCmd)
|
||||
AppRestoreCommand.Flags().StringArrayVarP(
|
||||
&volumes,
|
||||
"volumes",
|
||||
"v",
|
||||
[]string{},
|
||||
"restore specific volumes",
|
||||
)
|
||||
|
||||
logrus.Debugf("split pre-hook command for %s into %s", fullServiceName, splitCmd)
|
||||
AppRestoreCommand.Flags().BoolVarP(
|
||||
&hooks,
|
||||
"hooks",
|
||||
"H",
|
||||
false,
|
||||
"enable pre/post-hook command execution",
|
||||
)
|
||||
|
||||
preHookExecOpts := types.ExecConfig{
|
||||
AttachStderr: true,
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
Cmd: splitCmd,
|
||||
Detach: false,
|
||||
Tty: true,
|
||||
}
|
||||
|
||||
if err := container.RunExec(dcli, cl, targetContainer.ID, &preHookExecOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("succesfully ran %s pre-hook command: %s", fullServiceName, rsConfig.preHookCmd)
|
||||
}
|
||||
|
||||
backupReader, err := os.Open(backupPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
content, err := archive.DecompressStream(backupReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// we use absolute paths so tar knows what to do. it will restore files
|
||||
// according to the paths set in the compresed archive
|
||||
restorePath := "/"
|
||||
|
||||
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
|
||||
if err := cl.CopyToContainer(context.Background(), targetContainer.ID, restorePath, content, copyOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("restored %s to %s", backupPath, fullServiceName)
|
||||
|
||||
if rsConfig.postHookCmd != "" {
|
||||
splitCmd := internal.SafeSplit(rsConfig.postHookCmd)
|
||||
|
||||
logrus.Debugf("split post-hook command for %s into %s", fullServiceName, splitCmd)
|
||||
|
||||
postHookExecOpts := types.ExecConfig{
|
||||
AttachStderr: true,
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
Cmd: splitCmd,
|
||||
Detach: false,
|
||||
Tty: true,
|
||||
}
|
||||
|
||||
if err := container.RunExec(dcli, cl, targetContainer.ID, &postHookExecOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("succesfully ran %s post-hook command: %s", fullServiceName, rsConfig.postHookCmd)
|
||||
}
|
||||
|
||||
return nil
|
||||
AppRestoreCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
}
|
||||
|
@ -1,198 +1,323 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"coopcloud.tech/abra/pkg/app"
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/envfile"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/lint"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
stack "coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"coopcloud.tech/tagcmp"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var appRollbackCommand = cli.Command{
|
||||
Name: "rollback",
|
||||
Aliases: []string{"rl"},
|
||||
Usage: "Roll an app back to a previous version",
|
||||
ArgsUsage: "<domain>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
internal.ForceFlag,
|
||||
internal.ChaosFlag,
|
||||
internal.NoDomainChecksFlag,
|
||||
internal.DontWaitConvergeFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Description: `
|
||||
This command rolls an app back to a previous version if one exists.
|
||||
var AppRollbackCommand = &cobra.Command{
|
||||
Use: "rollback <domain> [version] [flags]",
|
||||
Aliases: []string{"rl"},
|
||||
Short: "Roll an app back to a previous version",
|
||||
Long: `This command rolls an app back to a previous version.
|
||||
|
||||
You may pass "--force/-f" to downgrade to the same version again. This can be
|
||||
useful if the container runtime has gotten into a weird state.
|
||||
Unlike "abra app deploy", chaos operations are not supported here. Only recipe
|
||||
versions are supported values for "[version]".
|
||||
|
||||
This action could be destructive, please ensure you have a copy of your app
|
||||
data beforehand.
|
||||
It is possible to "--force/-f" an downgrade if you want to re-deploy a specific
|
||||
version.
|
||||
|
||||
Chas mode ("--chaos") will deploy your local checkout of a recipe as-is,
|
||||
including unstaged changes and can be useful for live hacking and testing new
|
||||
recipes.
|
||||
`,
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
stackName := app.StackName()
|
||||
Only the deployed version is consulted when trying to determine what downgrades
|
||||
are available. The live deployment version is the "source of truth" in this
|
||||
case. The stored .env version is not consulted.
|
||||
|
||||
if !internal.Chaos {
|
||||
if err := recipe.EnsureUpToDate(app.Recipe, conf); err != nil {
|
||||
logrus.Fatal(err)
|
||||
A downgrade can be destructive, please ensure you have a copy of your app data
|
||||
beforehand. See "abra app backup" for more.`,
|
||||
Example: ` # standard rollback
|
||||
abra app rollback 1312.net
|
||||
|
||||
# rollback to specific version
|
||||
abra app rollback 1312.net 2.0.0+1.2.3`,
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.AppNameComplete()
|
||||
case 1:
|
||||
app, err := appPkg.Get(args[0])
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("autocomplete failed: %s", err)
|
||||
return []string{errMsg}, cobra.ShellCompDirectiveError
|
||||
}
|
||||
return autocomplete.RecipeVersionComplete(app.Recipe.Name)
|
||||
default:
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var (
|
||||
downgradeWarnMessages []string
|
||||
chosenDowngrade string
|
||||
availableDowngrades []string
|
||||
)
|
||||
|
||||
r, err := recipe.Get(app.Recipe, conf)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
if err := lint.LintForErrors(r); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Debugf("checking whether %s is already deployed", stackName)
|
||||
|
||||
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
|
||||
deployMeta, err := ensureDeployed(cl, app)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !isDeployed {
|
||||
logrus.Fatalf("%s is not deployed?", app.Name)
|
||||
if err := lint.LintForErrors(app.Recipe); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
catl, err := recipe.ReadRecipeCatalogue(conf)
|
||||
versions, err := app.Recipe.Tags()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
versions, err := recipe.GetRecipeCatalogueVersions(app.Recipe, catl)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if len(versions) == 0 && !internal.Chaos {
|
||||
logrus.Fatalf("no published releases for %s in the recipe catalogue?", app.Recipe)
|
||||
}
|
||||
|
||||
var availableDowngrades []string
|
||||
if deployedVersion == "unknown" {
|
||||
// NOTE(d1): we've no idea what the live deployment version is, so every
|
||||
// possible downgrade can be shown. it's up to the user to make the choice
|
||||
if deployMeta.Version == config.UNKNOWN_DEFAULT {
|
||||
availableDowngrades = versions
|
||||
logrus.Warnf("failed to determine version of deployed %s", app.Name)
|
||||
}
|
||||
|
||||
if deployedVersion != "unknown" && !internal.Chaos {
|
||||
for _, version := range versions {
|
||||
parsedDeployedVersion, err := tagcmp.Parse(deployedVersion)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
parsedVersion, err := tagcmp.Parse(version)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
if parsedVersion != parsedDeployedVersion && parsedVersion.IsLessThan(parsedDeployedVersion) {
|
||||
availableDowngrades = append(availableDowngrades, version)
|
||||
}
|
||||
if len(args) == 2 && args[1] != "" {
|
||||
chosenDowngrade = args[1]
|
||||
|
||||
if err := validateDowngradeVersionArg(chosenDowngrade, app, deployMeta); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if len(availableDowngrades) == 0 {
|
||||
logrus.Info("no available downgrades, you're on oldest ✌️")
|
||||
return nil
|
||||
}
|
||||
availableDowngrades = append(availableDowngrades, chosenDowngrade)
|
||||
}
|
||||
|
||||
var chosenDowngrade string
|
||||
if len(availableDowngrades) > 0 && !internal.Chaos {
|
||||
if internal.Force || internal.NoInput {
|
||||
chosenDowngrade = availableDowngrades[len(availableDowngrades)-1]
|
||||
logrus.Debugf("choosing %s as version to downgrade to (--force)", chosenDowngrade)
|
||||
} else {
|
||||
prompt := &survey.Select{
|
||||
Message: fmt.Sprintf("Please select a downgrade (current version: %s):", deployedVersion),
|
||||
Options: internal.ReverseStringList(availableDowngrades),
|
||||
}
|
||||
if err := survey.AskOne(prompt, &chosenDowngrade); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !internal.Chaos {
|
||||
if err := recipe.EnsureVersion(app.Recipe, chosenDowngrade); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if internal.Chaos {
|
||||
logrus.Warn("chaos mode engaged")
|
||||
var err error
|
||||
chosenDowngrade, err = recipe.ChaosVersion(app.Recipe)
|
||||
if deployMeta.Version != config.UNKNOWN_DEFAULT && chosenDowngrade == "" {
|
||||
downgradeAvailable, err := ensureDowngradesAvailable(versions, &availableDowngrades, deployMeta)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !downgradeAvailable {
|
||||
log.Info("no available downgrades")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, app.Recipe, "abra.sh")
|
||||
abraShEnv, err := config.ReadAbraShEnvVars(abraShPath)
|
||||
if internal.Force || internal.NoInput || chosenDowngrade != "" {
|
||||
if len(availableDowngrades) > 0 {
|
||||
chosenDowngrade = availableDowngrades[len(availableDowngrades)-1]
|
||||
}
|
||||
} else {
|
||||
if err := chooseDowngrade(availableDowngrades, deployMeta, &chosenDowngrade); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if internal.Force &&
|
||||
chosenDowngrade == "" &&
|
||||
deployMeta.Version != config.UNKNOWN_DEFAULT {
|
||||
chosenDowngrade = deployMeta.Version
|
||||
}
|
||||
|
||||
if chosenDowngrade == "" {
|
||||
log.Fatal("unknown deployed version, unable to downgrade")
|
||||
}
|
||||
|
||||
log.Debugf("choosing %s as version to rollback", chosenDowngrade)
|
||||
|
||||
if _, err := app.Recipe.EnsureVersion(chosenDowngrade); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
abraShEnv, err := envfile.ReadAbraShEnvVars(app.Recipe.AbraShPath)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
for k, v := range abraShEnv {
|
||||
app.Env[k] = v
|
||||
}
|
||||
|
||||
composeFiles, err := config.GetAppComposeFiles(app.Recipe, app.Env)
|
||||
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
stackName := app.StackName()
|
||||
deployOpts := stack.Deploy{
|
||||
Composefiles: composeFiles,
|
||||
Namespace: stackName,
|
||||
Prune: false,
|
||||
ResolveImage: stack.ResolveImageAlways,
|
||||
Detach: false,
|
||||
}
|
||||
compose, err := config.GetAppComposeConfig(app.Name, deployOpts, app.Env)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
config.ExposeAllEnv(stackName, compose, app.Env)
|
||||
config.SetRecipeLabel(compose, stackName, app.Recipe)
|
||||
config.SetChaosLabel(compose, stackName, internal.Chaos)
|
||||
config.SetChaosVersionLabel(compose, stackName, chosenDowngrade)
|
||||
config.SetUpdateLabel(compose, stackName, app.Env)
|
||||
|
||||
if err := NewVersionOverview(app, deployedVersion, chosenDowngrade, ""); err != nil {
|
||||
logrus.Fatal(err)
|
||||
compose, err := appPkg.GetAppComposeConfig(app.Name, deployOpts, app.Env)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
appPkg.ExposeAllEnv(stackName, compose, app.Env)
|
||||
appPkg.SetRecipeLabel(compose, stackName, app.Recipe.Name)
|
||||
appPkg.SetChaosLabel(compose, stackName, internal.Chaos)
|
||||
appPkg.SetChaosVersionLabel(compose, stackName, chosenDowngrade)
|
||||
appPkg.SetUpdateLabel(compose, stackName, app.Env)
|
||||
|
||||
chaosVersion := config.CHAOS_DEFAULT
|
||||
if deployMeta.IsChaos {
|
||||
chaosVersion = deployMeta.ChaosVersion
|
||||
}
|
||||
|
||||
// NOTE(d1): no release notes implemeneted for rolling back
|
||||
if err := internal.NewVersionOverview(
|
||||
app,
|
||||
downgradeWarnMessages,
|
||||
"rollback",
|
||||
deployMeta.Version,
|
||||
chaosVersion,
|
||||
chosenDowngrade,
|
||||
"",
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := stack.RunDeploy(cl, deployOpts, compose, stackName, internal.DontWaitConverge); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
if err := app.WriteRecipeVersion(chosenDowngrade, false); err != nil {
|
||||
log.Fatalf("writing recipe version failed: %s", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// chooseDowngrade prompts the user to choose an downgrade interactively.
|
||||
func chooseDowngrade(
|
||||
availableDowngrades []string,
|
||||
deployMeta stack.DeployMeta,
|
||||
chosenDowngrade *string,
|
||||
) error {
|
||||
msg := fmt.Sprintf("please select a downgrade (version: %s):", deployMeta.Version)
|
||||
|
||||
if deployMeta.IsChaos {
|
||||
chaosVersion := formatter.BoldDirtyDefault(deployMeta.ChaosVersion)
|
||||
|
||||
msg = fmt.Sprintf(
|
||||
"please select a downgrade (version: %s, chaos: %s):",
|
||||
deployMeta.Version,
|
||||
chaosVersion,
|
||||
)
|
||||
}
|
||||
|
||||
prompt := &survey.Select{
|
||||
Message: msg,
|
||||
Options: internal.SortVersionsDesc(availableDowngrades),
|
||||
}
|
||||
|
||||
if err := survey.AskOne(prompt, chosenDowngrade); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateDownpgradeVersionArg validates the specific version.
|
||||
func validateDowngradeVersionArg(
|
||||
specificVersion string,
|
||||
app app.App,
|
||||
deployMeta stack.DeployMeta,
|
||||
) error {
|
||||
parsedDeployedVersion, err := tagcmp.Parse(deployMeta.Version)
|
||||
if err != nil {
|
||||
return fmt.Errorf("'%s' is not a known version for %s", deployMeta.Version, app.Recipe.Name)
|
||||
}
|
||||
|
||||
parsedSpecificVersion, err := tagcmp.Parse(specificVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("'%s' is not a known version for %s", specificVersion, app.Recipe.Name)
|
||||
}
|
||||
|
||||
if parsedSpecificVersion.IsGreaterThan(parsedDeployedVersion) &&
|
||||
!parsedSpecificVersion.Equals(parsedDeployedVersion) {
|
||||
return fmt.Errorf("%s is not a downgrade for %s?", deployMeta.Version, specificVersion)
|
||||
}
|
||||
|
||||
if parsedSpecificVersion.Equals(parsedDeployedVersion) && !internal.Force {
|
||||
return fmt.Errorf("%s is not a downgrade for %s?", deployMeta.Version, specificVersion)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureDowngradesAvailable ensures that there are available downgrades.
|
||||
func ensureDowngradesAvailable(
|
||||
versions []string,
|
||||
availableDowngrades *[]string,
|
||||
deployMeta stack.DeployMeta,
|
||||
) (bool, error) {
|
||||
parsedDeployedVersion, err := tagcmp.Parse(deployMeta.Version)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, version := range versions {
|
||||
parsedVersion, err := tagcmp.Parse(version)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if parsedVersion.IsLessThan(parsedDeployedVersion) &&
|
||||
!(parsedVersion.Equals(parsedDeployedVersion)) {
|
||||
*availableDowngrades = append(*availableDowngrades, version)
|
||||
}
|
||||
}
|
||||
|
||||
if len(*availableDowngrades) == 0 && !internal.Force {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
AppRollbackCommand.Flags().BoolVarP(
|
||||
&internal.Force,
|
||||
"force",
|
||||
"f",
|
||||
false,
|
||||
"perform action without further prompt",
|
||||
)
|
||||
|
||||
AppRollbackCommand.Flags().BoolVarP(
|
||||
&internal.NoDomainChecks,
|
||||
"no-domain-checks",
|
||||
"D",
|
||||
false,
|
||||
"disable public DNS checks",
|
||||
)
|
||||
|
||||
AppRollbackCommand.Flags().BoolVarP(
|
||||
&internal.DontWaitConverge, "no-converge-checks",
|
||||
"c",
|
||||
false,
|
||||
"disable converge logic checks",
|
||||
)
|
||||
}
|
||||
|
115
cli/app/run.go
115
cli/app/run.go
@ -2,102 +2,113 @@ package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
containerPkg "coopcloud.tech/abra/pkg/container"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/upstream/container"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var user string
|
||||
var userFlag = &cli.StringFlag{
|
||||
Name: "user, u",
|
||||
Value: "",
|
||||
Destination: &user,
|
||||
}
|
||||
|
||||
var noTTY bool
|
||||
var noTTYFlag = &cli.BoolFlag{
|
||||
Name: "no-tty, t",
|
||||
Destination: &noTTY,
|
||||
}
|
||||
|
||||
var appRunCommand = cli.Command{
|
||||
Name: "run",
|
||||
var AppRunCommand = &cobra.Command{
|
||||
Use: "run <domain> <service> <cmd> [[args] [flags] | [flags] -- [args]]",
|
||||
Aliases: []string{"r"},
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
noTTYFlag,
|
||||
userFlag,
|
||||
internal.OfflineFlag,
|
||||
Short: "Run a command inside a service container",
|
||||
Example: ` # run <cmd> with args/flags
|
||||
abra app run 1312.net app -- ls -lha
|
||||
|
||||
# run <cmd> without args/flags
|
||||
abra app run 1312.net app bash --user nobody
|
||||
|
||||
# run <cmd> with both kinds of args/flags
|
||||
abra app run 1312.net app --user nobody -- ls -lha`,
|
||||
Args: cobra.MinimumNArgs(3),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.AppNameComplete()
|
||||
case 1:
|
||||
return autocomplete.ServiceNameComplete(args[0])
|
||||
case 2:
|
||||
return autocomplete.CommandNameComplete(args[0])
|
||||
default:
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
ArgsUsage: "<domain> <service> <args>...",
|
||||
Usage: "Run a command in a service container",
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
|
||||
if len(c.Args()) < 2 {
|
||||
internal.ShowSubcommandHelpAndError(c, errors.New("no <service> provided?"))
|
||||
}
|
||||
|
||||
if len(c.Args()) < 3 {
|
||||
internal.ShowSubcommandHelpAndError(c, errors.New("no <args> provided?"))
|
||||
}
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
serviceName := c.Args().Get(1)
|
||||
serviceName := args[1]
|
||||
stackAndServiceName := fmt.Sprintf("^%s_%s", app.StackName(), serviceName)
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("name", stackAndServiceName)
|
||||
|
||||
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, false)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cmd := c.Args()[2:]
|
||||
userCmd := args[2:]
|
||||
execCreateOpts := types.ExecConfig{
|
||||
AttachStderr: true,
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
Cmd: cmd,
|
||||
Cmd: userCmd,
|
||||
Detach: false,
|
||||
Tty: true,
|
||||
}
|
||||
|
||||
if user != "" {
|
||||
execCreateOpts.User = user
|
||||
if runAsUser != "" {
|
||||
execCreateOpts.User = runAsUser
|
||||
}
|
||||
if noTTY {
|
||||
execCreateOpts.Tty = false
|
||||
}
|
||||
|
||||
// FIXME: avoid instantiating a new CLI
|
||||
dcli, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
noTTY bool
|
||||
runAsUser string
|
||||
)
|
||||
|
||||
func init() {
|
||||
AppRunCommand.Flags().BoolVarP(&noTTY,
|
||||
"no-tty",
|
||||
"t",
|
||||
false,
|
||||
"do not request a TTY",
|
||||
)
|
||||
|
||||
AppRunCommand.Flags().StringVarP(
|
||||
&runAsUser,
|
||||
"user",
|
||||
"u",
|
||||
"",
|
||||
"run command as user",
|
||||
)
|
||||
}
|
||||
|
@ -2,245 +2,297 @@ package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/secret"
|
||||
"github.com/docker/docker/api/types"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var allSecrets bool
|
||||
var allSecretsFlag = &cli.BoolFlag{
|
||||
Name: "all, a",
|
||||
Destination: &allSecrets,
|
||||
Usage: "Generate all secrets",
|
||||
}
|
||||
|
||||
var rmAllSecrets bool
|
||||
var rmAllSecretsFlag = &cli.BoolFlag{
|
||||
Name: "all, a",
|
||||
Destination: &rmAllSecrets,
|
||||
Usage: "Remove all secrets",
|
||||
}
|
||||
|
||||
var appSecretGenerateCommand = cli.Command{
|
||||
Name: "generate",
|
||||
Aliases: []string{"g"},
|
||||
Usage: "Generate secrets",
|
||||
ArgsUsage: "<domain> <secret> <version>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
allSecretsFlag,
|
||||
internal.PassFlag,
|
||||
internal.OfflineFlag,
|
||||
var AppSecretGenerateCommand = &cobra.Command{
|
||||
Use: "generate <domain> [[secret] [version] | --all] [flags]",
|
||||
Aliases: []string{"g"},
|
||||
Short: "Generate secrets",
|
||||
Args: cobra.RangeArgs(1, 3),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.AppNameComplete()
|
||||
case 1:
|
||||
app, err := appPkg.Get(args[0])
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("autocomplete failed: %s", err)
|
||||
return []string{errMsg}, cobra.ShellCompDirectiveError
|
||||
}
|
||||
return autocomplete.SecretComplete(app.Recipe.Name)
|
||||
default:
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if len(args) == 1 && !generateAllSecrets {
|
||||
log.Fatal("missing arguments [secret]/[version] or '--all'")
|
||||
}
|
||||
|
||||
if len(args) > 1 && generateAllSecrets {
|
||||
log.Fatal("cannot use '[secret] [version]' and '--all' together")
|
||||
}
|
||||
|
||||
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
secrets, err := secret.ReadSecretsConfig(app.Path, composeFiles, app.StackName())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !generateAllSecrets {
|
||||
secretName := args[1]
|
||||
secretVersion := args[2]
|
||||
s, ok := secrets[secretName]
|
||||
if !ok {
|
||||
log.Fatalf("%s doesn't exist in the env config?", secretName)
|
||||
}
|
||||
s.Version = secretVersion
|
||||
secrets = map[string]secret.Secret{
|
||||
secretName: s,
|
||||
}
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if len(c.Args()) == 1 && !allSecrets {
|
||||
err := errors.New("missing arguments <secret>/<version> or '--all'")
|
||||
internal.ShowSubcommandHelpAndError(c, err)
|
||||
}
|
||||
|
||||
if c.Args().Get(1) != "" && allSecrets {
|
||||
err := errors.New("cannot use '<secret> <version>' and '--all' together")
|
||||
internal.ShowSubcommandHelpAndError(c, err)
|
||||
}
|
||||
|
||||
secretsToCreate := make(map[string]string)
|
||||
secretEnvVars := secret.ReadSecretEnvVars(app.Env)
|
||||
if allSecrets {
|
||||
secretsToCreate = secretEnvVars
|
||||
} else {
|
||||
secretName := c.Args().Get(1)
|
||||
secretVersion := c.Args().Get(2)
|
||||
matches := false
|
||||
for sec := range secretEnvVars {
|
||||
parsed := secret.ParseSecretEnvVarName(sec)
|
||||
if secretName == parsed {
|
||||
secretsToCreate[sec] = secretVersion
|
||||
matches = true
|
||||
}
|
||||
}
|
||||
|
||||
if !matches {
|
||||
logrus.Fatalf("%s doesn't exist in the env config?", secretName)
|
||||
}
|
||||
}
|
||||
|
||||
secretVals, err := secret.GenerateSecrets(cl, secretsToCreate, app.StackName(), app.Server)
|
||||
secretVals, err := secret.GenerateSecrets(cl, secrets, app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if internal.Pass {
|
||||
if storeInPass {
|
||||
for name, data := range secretVals {
|
||||
if err := secret.PassInsertSecret(data, name, app.Name, app.Server); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(secretVals) == 0 {
|
||||
logrus.Warn("no secrets generated")
|
||||
log.Warn("no secrets generated")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
tableCol := []string{"name", "value"}
|
||||
table := formatter.CreateTable(tableCol)
|
||||
for name, val := range secretVals {
|
||||
table.Append([]string{name, val})
|
||||
headers := []string{"NAME", "VALUE"}
|
||||
table, err := formatter.CreateTable()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
table.Render()
|
||||
logrus.Warn("generated secrets are not shown again, please take note of them *now*")
|
||||
|
||||
return nil
|
||||
table.Headers(headers...)
|
||||
|
||||
var rows [][]string
|
||||
for name, val := range secretVals {
|
||||
row := []string{name, val}
|
||||
rows = append(rows, row)
|
||||
table.Row(row...)
|
||||
}
|
||||
|
||||
if internal.MachineReadable {
|
||||
out, err := formatter.ToJSON(headers, rows)
|
||||
if err != nil {
|
||||
log.Fatal("unable to render to JSON: %s", err)
|
||||
}
|
||||
fmt.Println(out)
|
||||
return
|
||||
}
|
||||
|
||||
if err := formatter.PrintTable(table); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Warnf(
|
||||
"generated secrets %s shown again, please take note of them %s",
|
||||
formatter.BoldStyle.Render("NOT"),
|
||||
formatter.BoldStyle.Render("NOW"),
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
var appSecretInsertCommand = cli.Command{
|
||||
Name: "insert",
|
||||
var AppSecretInsertCommand = &cobra.Command{
|
||||
Use: "insert <domain> <secret> <version> <data> [flags]",
|
||||
Aliases: []string{"i"},
|
||||
Usage: "Insert secret",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.PassFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
ArgsUsage: "<domain> <secret-name> <version> <data>",
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
Description: `
|
||||
This command inserts a secret into an app environment.
|
||||
Short: "Insert secret",
|
||||
Long: `This command inserts a secret into an app environment.
|
||||
|
||||
This can be useful when you want to manually generate secrets for an app
|
||||
environment. Typically, you can let Abra generate them for you on app creation
|
||||
(see "abra app new --secrets" for more).
|
||||
(see "abra app new --secrets/-S" for more).`,
|
||||
Args: cobra.MinimumNArgs(4),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.AppNameComplete()
|
||||
case 1:
|
||||
app, err := appPkg.Get(args[0])
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("autocomplete failed: %s", err)
|
||||
return []string{errMsg}, cobra.ShellCompDirectiveError
|
||||
}
|
||||
return autocomplete.SecretComplete(app.Recipe.Name)
|
||||
default:
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
Example:
|
||||
|
||||
abra app secret insert myapp db_pass v1 mySecretPassword
|
||||
|
||||
`,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if len(c.Args()) != 4 {
|
||||
internal.ShowSubcommandHelpAndError(c, errors.New("missing arguments?"))
|
||||
name := args[1]
|
||||
version := args[2]
|
||||
data := args[3]
|
||||
|
||||
if insertFromFile {
|
||||
raw, err := os.ReadFile(data)
|
||||
if err != nil {
|
||||
log.Fatalf("reading secret from file: %s", err)
|
||||
}
|
||||
data = string(raw)
|
||||
}
|
||||
|
||||
name := c.Args().Get(1)
|
||||
version := c.Args().Get(2)
|
||||
data := c.Args().Get(3)
|
||||
if trimInput {
|
||||
data = strings.TrimSpace(data)
|
||||
}
|
||||
|
||||
secretName := fmt.Sprintf("%s_%s_%s", app.StackName(), name, version)
|
||||
if err := client.StoreSecret(cl, secretName, data, app.Server); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Infof("%s successfully stored on server", secretName)
|
||||
log.Infof("%s successfully stored on server", secretName)
|
||||
|
||||
if internal.Pass {
|
||||
if storeInPass {
|
||||
if err := secret.PassInsertSecret(data, name, app.Name, app.Server); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// secretRm removes a secret.
|
||||
func secretRm(cl *dockerClient.Client, app config.App, secretName, parsed string) error {
|
||||
func secretRm(cl *dockerClient.Client, app appPkg.App, secretName, parsed string) error {
|
||||
if err := cl.SecretRemove(context.Background(), secretName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("deleted %s successfully from server", secretName)
|
||||
log.Infof("deleted %s successfully from server", secretName)
|
||||
|
||||
if internal.PassRemove {
|
||||
if removeFromPass {
|
||||
if err := secret.PassRmSecret(parsed, app.StackName(), app.Server); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("deleted %s successfully from local pass store", secretName)
|
||||
log.Infof("deleted %s successfully from local pass store", secretName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var appSecretRmCommand = cli.Command{
|
||||
Name: "remove",
|
||||
var AppSecretRmCommand = &cobra.Command{
|
||||
Use: "remove <domain> [[secret] | --all] [flags]",
|
||||
Aliases: []string{"rm"},
|
||||
Usage: "Remove a secret",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
rmAllSecretsFlag,
|
||||
internal.PassRemoveFlag,
|
||||
internal.OfflineFlag,
|
||||
Short: "Remove a secret",
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.AppNameComplete()
|
||||
case 1:
|
||||
if !rmAllSecrets {
|
||||
app, err := appPkg.Get(args[0])
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("autocomplete failed: %s", err)
|
||||
return []string{errMsg}, cobra.ShellCompDirectiveError
|
||||
}
|
||||
return autocomplete.SecretComplete(app.Recipe.Name)
|
||||
}
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
default:
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
ArgsUsage: "<domain> [<secret-name>]",
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
Description: `
|
||||
This command removes app secrets.
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
Example:
|
||||
|
||||
abra app secret remove myapp db_pass
|
||||
`,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
secrets := secret.ReadSecretEnvVars(app.Env)
|
||||
|
||||
if c.Args().Get(1) != "" && rmAllSecrets {
|
||||
internal.ShowSubcommandHelpAndError(c, errors.New("cannot use '<secret-name>' and '--all' together"))
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if c.Args().Get(1) == "" && !rmAllSecrets {
|
||||
internal.ShowSubcommandHelpAndError(c, errors.New("no secret(s) specified?"))
|
||||
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
secrets, err := secret.ReadSecretsConfig(app.Path, composeFiles, app.StackName())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if len(args) == 2 && rmAllSecrets {
|
||||
log.Fatal("cannot use [secret] and --all/-a together")
|
||||
}
|
||||
|
||||
if len(args) != 2 && !rmAllSecrets {
|
||||
log.Fatal("no secret(s) specified?")
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
filters, err := app.Filters(false, false)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
secretList, err := cl.SecretList(context.Background(), types.SecretListOptions{Filters: filters})
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
remoteSecretNames := make(map[string]bool)
|
||||
@ -248,120 +300,230 @@ Example:
|
||||
remoteSecretNames[cont.Spec.Annotations.Name] = true
|
||||
}
|
||||
|
||||
var secretToRm string
|
||||
if len(args) == 2 {
|
||||
secretToRm = args[1]
|
||||
}
|
||||
|
||||
match := false
|
||||
secretToRm := c.Args().Get(1)
|
||||
for sec := range secrets {
|
||||
secretName := secret.ParseSecretEnvVarName(sec)
|
||||
|
||||
secVal, err := secret.ParseSecretEnvVarValue(secrets[sec])
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
secretRemoteName := fmt.Sprintf("%s_%s_%s", app.StackName(), secretName, secVal.Version)
|
||||
for secretName, val := range secrets {
|
||||
secretRemoteName := fmt.Sprintf("%s_%s_%s", app.StackName(), secretName, val.Version)
|
||||
if _, ok := remoteSecretNames[secretRemoteName]; ok {
|
||||
if secretToRm != "" {
|
||||
if secretName == secretToRm {
|
||||
if err := secretRm(cl, app, secretRemoteName, secretName); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return
|
||||
}
|
||||
} else {
|
||||
match = true
|
||||
|
||||
if err := secretRm(cl, app, secretRemoteName, secretName); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !match && secretToRm != "" {
|
||||
logrus.Fatalf("%s doesn't exist on server?", secretToRm)
|
||||
log.Fatalf("%s doesn't exist on server?", secretToRm)
|
||||
}
|
||||
|
||||
if !match {
|
||||
logrus.Fatal("no secrets to remove?")
|
||||
log.Fatal("no secrets to remove?")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var appSecretLsCommand = cli.Command{
|
||||
Name: "list",
|
||||
var AppSecretLsCommand = &cobra.Command{
|
||||
Use: "list <domain>",
|
||||
Aliases: []string{"ls"},
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.OfflineFlag,
|
||||
Short: "List all secrets",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.AppNameComplete()
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Usage: "List all secrets",
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
secrets := secret.ReadSecretEnvVars(app.Env)
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
tableCol := []string{"Name", "Version", "Generated Name", "Created On Server"}
|
||||
table := formatter.CreateTable(tableCol)
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
filters, err := app.Filters(false, false)
|
||||
headers := []string{"NAME", "VERSION", "GENERATED NAME", "CREATED ON SERVER"}
|
||||
table, err := formatter.CreateTable()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
secretList, err := cl.SecretList(context.Background(), types.SecretListOptions{Filters: filters})
|
||||
table.Headers(headers...)
|
||||
|
||||
secStats, err := secret.PollSecretsStatus(cl, app)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
remoteSecretNames := make(map[string]bool)
|
||||
for _, cont := range secretList {
|
||||
remoteSecretNames[cont.Spec.Annotations.Name] = true
|
||||
}
|
||||
|
||||
for sec := range secrets {
|
||||
createdRemote := false
|
||||
secretName := secret.ParseSecretEnvVarName(sec)
|
||||
secVal, err := secret.ParseSecretEnvVarValue(secrets[sec])
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
var rows [][]string
|
||||
for _, secStat := range secStats {
|
||||
row := []string{
|
||||
secStat.LocalName,
|
||||
secStat.Version,
|
||||
secStat.RemoteName,
|
||||
strconv.FormatBool(secStat.CreatedOnRemote),
|
||||
}
|
||||
secretRemoteName := fmt.Sprintf("%s_%s_%s", app.StackName(), secretName, secVal.Version)
|
||||
if _, ok := remoteSecretNames[secretRemoteName]; ok {
|
||||
createdRemote = true
|
||||
|
||||
rows = append(rows, row)
|
||||
table.Row(row...)
|
||||
}
|
||||
|
||||
if len(rows) > 0 {
|
||||
if internal.MachineReadable {
|
||||
out, err := formatter.ToJSON(headers, rows)
|
||||
if err != nil {
|
||||
log.Fatal("unable to render to JSON: %s", err)
|
||||
}
|
||||
fmt.Println(out)
|
||||
return
|
||||
}
|
||||
tableRow := []string{secretName, secVal.Version, secretRemoteName, strconv.FormatBool(createdRemote)}
|
||||
table.Append(tableRow)
|
||||
|
||||
if err := formatter.PrintTable(table); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if table.NumLines() > 0 {
|
||||
table.Render()
|
||||
} else {
|
||||
logrus.Warnf("no secrets stored for %s", app.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
}
|
||||
|
||||
var appSecretCommand = cli.Command{
|
||||
Name: "secret",
|
||||
Aliases: []string{"s"},
|
||||
Usage: "Manage app secrets",
|
||||
ArgsUsage: "<domain>",
|
||||
Subcommands: []cli.Command{
|
||||
appSecretGenerateCommand,
|
||||
appSecretInsertCommand,
|
||||
appSecretRmCommand,
|
||||
appSecretLsCommand,
|
||||
log.Warnf("no secrets stored for %s", app.Name)
|
||||
},
|
||||
}
|
||||
|
||||
var AppSecretCommand = &cobra.Command{
|
||||
Use: "secret [cmd] [args] [flags]",
|
||||
Aliases: []string{"s"},
|
||||
Short: "Manage app secrets",
|
||||
}
|
||||
|
||||
var (
|
||||
storeInPass bool
|
||||
insertFromFile bool
|
||||
trimInput bool
|
||||
rmAllSecrets bool
|
||||
generateAllSecrets bool
|
||||
removeFromPass bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
AppSecretGenerateCommand.Flags().BoolVarP(
|
||||
&internal.MachineReadable,
|
||||
"machine",
|
||||
"m",
|
||||
false,
|
||||
"print machine-readable output",
|
||||
)
|
||||
|
||||
AppSecretGenerateCommand.Flags().BoolVarP(
|
||||
&storeInPass,
|
||||
"pass",
|
||||
"p",
|
||||
false,
|
||||
"store generated secrets in a local pass store",
|
||||
)
|
||||
|
||||
AppSecretGenerateCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
|
||||
AppSecretGenerateCommand.Flags().BoolVarP(
|
||||
&generateAllSecrets,
|
||||
"all",
|
||||
"a",
|
||||
false,
|
||||
"generate all secrets",
|
||||
)
|
||||
|
||||
AppSecretInsertCommand.Flags().BoolVarP(
|
||||
&storeInPass,
|
||||
"pass",
|
||||
"p",
|
||||
false,
|
||||
"store generated secrets in a local pass store",
|
||||
)
|
||||
|
||||
AppSecretInsertCommand.Flags().BoolVarP(
|
||||
&insertFromFile,
|
||||
"file",
|
||||
"f",
|
||||
false,
|
||||
"treat input as a file",
|
||||
)
|
||||
|
||||
AppSecretInsertCommand.Flags().BoolVarP(
|
||||
&trimInput,
|
||||
"trim",
|
||||
"t",
|
||||
false,
|
||||
"trim input",
|
||||
)
|
||||
|
||||
AppSecretInsertCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
|
||||
AppSecretRmCommand.Flags().BoolVarP(
|
||||
&rmAllSecrets,
|
||||
"all",
|
||||
"a",
|
||||
false,
|
||||
"remove all secrets",
|
||||
)
|
||||
|
||||
AppSecretRmCommand.Flags().BoolVarP(
|
||||
&removeFromPass,
|
||||
"pass",
|
||||
"p",
|
||||
false,
|
||||
"remove generated secrets from a local pass store",
|
||||
)
|
||||
|
||||
AppSecretRmCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
|
||||
AppSecretLsCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
|
||||
AppSecretLsCommand.Flags().BoolVarP(
|
||||
&internal.MachineReadable,
|
||||
"machine",
|
||||
"m",
|
||||
false,
|
||||
"print machine-readable output",
|
||||
)
|
||||
}
|
||||
|
@ -9,56 +9,64 @@ import (
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/service"
|
||||
stack "coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
containerTypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var appServicesCommand = cli.Command{
|
||||
Name: "services",
|
||||
Aliases: []string{"sr"},
|
||||
Usage: "Display all services of an app",
|
||||
ArgsUsage: "<domain>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.OfflineFlag,
|
||||
var AppServicesCommand = &cobra.Command{
|
||||
Use: "services <domain> [flags]",
|
||||
Aliases: []string{"sr"},
|
||||
Short: "Display all services of an app",
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.AppNameComplete()
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, app.StackName())
|
||||
deployMeta, err := stack.IsDeployed(context.Background(), cl, app.StackName())
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !isDeployed {
|
||||
logrus.Fatalf("%s is not deployed?", app.Name)
|
||||
if !deployMeta.IsDeployed {
|
||||
log.Fatalf("%s is not deployed?", app.Name)
|
||||
}
|
||||
|
||||
filters, err := app.Filters(true, true)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
containers, err := cl.ContainerList(context.Background(), types.ContainerListOptions{Filters: filters})
|
||||
containers, err := cl.ContainerList(context.Background(), containerTypes.ListOptions{Filters: filters})
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
tableCol := []string{"service name", "image"}
|
||||
table := formatter.CreateTable(tableCol)
|
||||
table, err := formatter.CreateTable()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
headers := []string{"SERVICE (SHORT)", "SERVICE (LONG)"}
|
||||
table.Headers(headers...)
|
||||
|
||||
var rows [][]string
|
||||
for _, container := range containers {
|
||||
var containerNames []string
|
||||
for _, containerName := range container.Names {
|
||||
@ -69,15 +77,20 @@ var appServicesCommand = cli.Command{
|
||||
serviceShortName := service.ContainerToServiceName(container.Names, app.StackName())
|
||||
serviceLongName := fmt.Sprintf("%s_%s", app.StackName(), serviceShortName)
|
||||
|
||||
tableRow := []string{
|
||||
row := []string{
|
||||
serviceShortName,
|
||||
serviceLongName,
|
||||
formatter.RemoveSha(container.Image),
|
||||
}
|
||||
table.Append(tableRow)
|
||||
|
||||
rows = append(rows, row)
|
||||
}
|
||||
|
||||
table.Render()
|
||||
table.Rows(rows...)
|
||||
|
||||
return nil
|
||||
if len(rows) > 0 {
|
||||
if err := formatter.PrintTable(table); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@ -3,53 +3,103 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
stack "coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var prune bool
|
||||
var AppUndeployCommand = &cobra.Command{
|
||||
Use: "undeploy <domain> [flags]",
|
||||
Aliases: []string{"un"},
|
||||
Short: "Undeploy an app",
|
||||
Long: `This does not destroy any application data.
|
||||
|
||||
var pruneFlag = &cli.BoolFlag{
|
||||
Name: "prune, p",
|
||||
Destination: &prune,
|
||||
Usage: "Prunes unused containers, networks, and dangling images for an app",
|
||||
However, you should remain vigilant, as your swarm installation will consider
|
||||
any previously attached volumes as eligible for pruning once undeployed.
|
||||
|
||||
Passing "--prune/-p" does not remove those volumes.`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.AppNameComplete()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
stackName := app.StackName()
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Debugf("checking whether %s is already deployed", stackName)
|
||||
|
||||
deployMeta, err := stack.IsDeployed(context.Background(), cl, stackName)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !deployMeta.IsDeployed {
|
||||
log.Fatalf("%s is not deployed?", app.Name)
|
||||
}
|
||||
|
||||
chaosVersion := config.CHAOS_DEFAULT
|
||||
if deployMeta.IsChaos {
|
||||
chaosVersion = deployMeta.ChaosVersion
|
||||
}
|
||||
|
||||
toWriteVersion := deployMeta.Version
|
||||
if deployMeta.IsChaos {
|
||||
toWriteVersion = chaosVersion
|
||||
}
|
||||
|
||||
if err := internal.UndeployOverview(
|
||||
app,
|
||||
deployMeta.Version,
|
||||
chaosVersion,
|
||||
toWriteVersion,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
rmOpts := stack.Remove{
|
||||
Namespaces: []string{stackName},
|
||||
Detach: false,
|
||||
}
|
||||
if err := stack.RunRemove(context.Background(), cl, rmOpts); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if prune {
|
||||
if err := pruneApp(cl, app); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := app.WriteRecipeVersion(toWriteVersion, false); err != nil {
|
||||
log.Fatalf("writing recipe version failed: %s", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// pruneApp runs the equivalent of a "docker system prune" but only filtering
|
||||
// against resources connected with the app deployment. It is not a system wide
|
||||
// prune. Volumes are not pruned to avoid unwated data loss.
|
||||
func pruneApp(c *cli.Context, cl *dockerClient.Client, app config.App) error {
|
||||
func pruneApp(cl *dockerClient.Client, app appPkg.App) error {
|
||||
stackName := app.StackName()
|
||||
ctx := context.Background()
|
||||
|
||||
for {
|
||||
logrus.Debugf("polling for %s stack, waiting to be undeployed...", stackName)
|
||||
|
||||
services, err := stack.GetStackServices(ctx, cl, stackName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(services) == 0 {
|
||||
logrus.Debugf("%s undeployed, moving on with pruning logic", stackName)
|
||||
time.Sleep(time.Second) // give runtime more time to tear down related state
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
pruneFilters := filters.NewArgs()
|
||||
stackSearch := fmt.Sprintf("%s*", stackName)
|
||||
pruneFilters.Add("label", stackSearch)
|
||||
@ -59,14 +109,14 @@ func pruneApp(c *cli.Context, cl *dockerClient.Client, app config.App) error {
|
||||
}
|
||||
|
||||
cntSpaceReclaimed := formatter.ByteCountSI(cr.SpaceReclaimed)
|
||||
logrus.Infof("containers pruned: %d; space reclaimed: %s", len(cr.ContainersDeleted), cntSpaceReclaimed)
|
||||
log.Infof("containers pruned: %d; space reclaimed: %s", len(cr.ContainersDeleted), cntSpaceReclaimed)
|
||||
|
||||
nr, err := cl.NetworksPrune(ctx, pruneFilters)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("networks pruned: %d", len(nr.NetworksDeleted))
|
||||
log.Infof("networks pruned: %d", len(nr.NetworksDeleted))
|
||||
|
||||
ir, err := cl.ImagesPrune(ctx, pruneFilters)
|
||||
if err != nil {
|
||||
@ -74,68 +124,21 @@ func pruneApp(c *cli.Context, cl *dockerClient.Client, app config.App) error {
|
||||
}
|
||||
|
||||
imgSpaceReclaimed := formatter.ByteCountSI(ir.SpaceReclaimed)
|
||||
logrus.Infof("images pruned: %d; space reclaimed: %s", len(ir.ImagesDeleted), imgSpaceReclaimed)
|
||||
log.Infof("images pruned: %d; space reclaimed: %s", len(ir.ImagesDeleted), imgSpaceReclaimed)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var appUndeployCommand = cli.Command{
|
||||
Name: "undeploy",
|
||||
Aliases: []string{"un"},
|
||||
ArgsUsage: "<domain>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
pruneFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Usage: "Undeploy an app",
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
Description: `
|
||||
This does not destroy any of the application data.
|
||||
var (
|
||||
prune bool
|
||||
)
|
||||
|
||||
However, you should remain vigilant, as your swarm installation will consider
|
||||
any previously attached volumes as eligible for pruning once undeployed.
|
||||
|
||||
Passing "-p/--prune" does not remove those volumes.
|
||||
`,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
stackName := app.StackName()
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Debugf("checking whether %s is already deployed", stackName)
|
||||
|
||||
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if !isDeployed {
|
||||
logrus.Fatalf("%s is not deployed?", app.Name)
|
||||
}
|
||||
|
||||
if err := DeployOverview(app, deployedVersion, "continue with undeploy?"); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
rmOpts := stack.Remove{Namespaces: []string{app.StackName()}}
|
||||
if err := stack.RunRemove(context.Background(), cl, rmOpts); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if prune {
|
||||
if err := pruneApp(c, cl, app); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
func init() {
|
||||
AppUndeployCommand.Flags().BoolVarP(
|
||||
&prune,
|
||||
"prune",
|
||||
"p",
|
||||
false,
|
||||
"prune unused containers, networks, and dangling images",
|
||||
)
|
||||
}
|
||||
|
@ -5,225 +5,429 @@ import (
|
||||
"fmt"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/app"
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/envfile"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/lint"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
stack "coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"coopcloud.tech/tagcmp"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var appUpgradeCommand = cli.Command{
|
||||
Name: "upgrade",
|
||||
Aliases: []string{"up"},
|
||||
Usage: "Upgrade an app",
|
||||
ArgsUsage: "<domain>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
internal.ForceFlag,
|
||||
internal.ChaosFlag,
|
||||
internal.NoDomainChecksFlag,
|
||||
internal.DontWaitConvergeFlag,
|
||||
internal.OfflineFlag,
|
||||
var AppUpgradeCommand = &cobra.Command{
|
||||
Use: "upgrade <domain> [version] [flags]",
|
||||
Aliases: []string{"up"},
|
||||
Short: "Upgrade an app",
|
||||
Long: `Upgrade an app.
|
||||
|
||||
Unlike "abra app deploy", chaos operations are not supported here. Only recipe
|
||||
versions are supported values for "[version]".
|
||||
|
||||
It is possible to "--force/-f" an upgrade if you want to re-deploy a specific
|
||||
version.
|
||||
|
||||
Only the deployed version is consulted when trying to determine what upgrades
|
||||
are available. The live deployment version is the "source of truth" in this
|
||||
case. The stored .env version is not consulted.
|
||||
|
||||
An upgrade can be destructive, please ensure you have a copy of your app data
|
||||
beforehand. See "abra app backup" for more.`,
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.AppNameComplete()
|
||||
case 1:
|
||||
app, err := appPkg.Get(args[0])
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("autocomplete failed: %s", err)
|
||||
return []string{errMsg}, cobra.ShellCompDirectiveError
|
||||
}
|
||||
return autocomplete.RecipeVersionComplete(app.Recipe.Name)
|
||||
default:
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Description: `
|
||||
Upgrade an app. You can use it to choose and roll out a new upgrade to an
|
||||
existing app.
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var (
|
||||
upgradeWarnMessages []string
|
||||
chosenUpgrade string
|
||||
availableUpgrades []string
|
||||
upgradeReleaseNotes string
|
||||
)
|
||||
|
||||
This command specifically supports incrementing the version of running apps, as
|
||||
opposed to "abra app deploy <domain>" which will not change the version of a
|
||||
deployed app.
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
You may pass "--force/-f" to upgrade to the same version again. This can be
|
||||
useful if the container runtime has gotten into a weird state.
|
||||
|
||||
This action could be destructive, please ensure you have a copy of your app
|
||||
data beforehand.
|
||||
|
||||
Chas mode ("--chaos") will deploy your local checkout of a recipe as-is,
|
||||
including unstaged changes and can be useful for live hacking and testing new
|
||||
recipes.
|
||||
`,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
stackName := app.StackName()
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !internal.Chaos {
|
||||
if err := recipe.EnsureUpToDate(app.Recipe, conf); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
r, err := recipe.Get(app.Recipe, conf)
|
||||
deployMeta, err := ensureDeployed(cl, app)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := lint.LintForErrors(r); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if err := lint.LintForErrors(app.Recipe); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Debugf("checking whether %s is already deployed", stackName)
|
||||
|
||||
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
|
||||
versions, err := app.Recipe.Tags()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !isDeployed {
|
||||
logrus.Fatalf("%s is not deployed?", app.Name)
|
||||
}
|
||||
|
||||
catl, err := recipe.ReadRecipeCatalogue(conf)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
versions, err := recipe.GetRecipeCatalogueVersions(app.Recipe, catl)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if len(versions) == 0 && !internal.Chaos {
|
||||
logrus.Fatalf("no published releases for %s in the recipe catalogue?", app.Recipe)
|
||||
}
|
||||
|
||||
var availableUpgrades []string
|
||||
if deployedVersion == "unknown" {
|
||||
// NOTE(d1): we've no idea what the live deployment version is, so every
|
||||
// possible upgrade can be shown. it's up to the user to make the choice
|
||||
if deployMeta.Version == config.UNKNOWN_DEFAULT {
|
||||
availableUpgrades = versions
|
||||
logrus.Warnf("failed to determine version of deployed %s", app.Name)
|
||||
}
|
||||
|
||||
if deployedVersion != "unknown" && !internal.Chaos {
|
||||
for _, version := range versions {
|
||||
parsedDeployedVersion, err := tagcmp.Parse(deployedVersion)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
parsedVersion, err := tagcmp.Parse(version)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
if parsedVersion.IsGreaterThan(parsedDeployedVersion) {
|
||||
availableUpgrades = append(availableUpgrades, version)
|
||||
}
|
||||
if len(args) == 2 && args[1] != "" {
|
||||
chosenUpgrade = args[1]
|
||||
|
||||
if err := validateUpgradeVersionArg(chosenUpgrade, app, deployMeta); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if len(availableUpgrades) == 0 && !internal.Force {
|
||||
logrus.Infof("no available upgrades, you're on latest (%s) ✌️", deployedVersion)
|
||||
return nil
|
||||
}
|
||||
availableUpgrades = append(availableUpgrades, chosenUpgrade)
|
||||
}
|
||||
|
||||
var chosenUpgrade string
|
||||
if len(availableUpgrades) > 0 && !internal.Chaos {
|
||||
if internal.Force || internal.NoInput {
|
||||
chosenUpgrade = availableUpgrades[len(availableUpgrades)-1]
|
||||
logrus.Debugf("choosing %s as version to upgrade to", chosenUpgrade)
|
||||
} else {
|
||||
prompt := &survey.Select{
|
||||
Message: fmt.Sprintf("Please select an upgrade (current version: %s):", deployedVersion),
|
||||
Options: internal.ReverseStringList(availableUpgrades),
|
||||
}
|
||||
if err := survey.AskOne(prompt, &chosenUpgrade); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if internal.Force && chosenUpgrade == "" {
|
||||
logrus.Warnf("%s is already upgraded to latest but continuing (--force/--chaos)", app.Name)
|
||||
chosenUpgrade = deployedVersion
|
||||
}
|
||||
|
||||
// if release notes written after git tag published, read them before we
|
||||
// check out the tag and then they'll appear to be missing. this covers
|
||||
// when we obviously will forget to write release notes before publishing
|
||||
releaseNotes, err := GetReleaseNotes(app.Recipe, chosenUpgrade)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !internal.Chaos {
|
||||
if err := recipe.EnsureVersion(app.Recipe, chosenUpgrade); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if internal.Chaos {
|
||||
logrus.Warn("chaos mode engaged")
|
||||
var err error
|
||||
chosenUpgrade, err = recipe.ChaosVersion(app.Recipe)
|
||||
if deployMeta.Version != config.UNKNOWN_DEFAULT && chosenUpgrade == "" {
|
||||
upgradeAvailable, err := ensureUpgradesAvailable(versions, &availableUpgrades, deployMeta)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !upgradeAvailable {
|
||||
log.Info("no available upgrades")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, app.Recipe, "abra.sh")
|
||||
abraShEnv, err := config.ReadAbraShEnvVars(abraShPath)
|
||||
if internal.Force || internal.NoInput || chosenUpgrade != "" {
|
||||
if len(availableUpgrades) > 0 {
|
||||
chosenUpgrade = availableUpgrades[len(availableUpgrades)-1]
|
||||
}
|
||||
} else {
|
||||
if err := chooseUpgrade(availableUpgrades, deployMeta, &chosenUpgrade); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if internal.Force &&
|
||||
chosenUpgrade == "" &&
|
||||
deployMeta.Version != config.UNKNOWN_DEFAULT {
|
||||
chosenUpgrade = deployMeta.Version
|
||||
}
|
||||
|
||||
if chosenUpgrade == "" {
|
||||
log.Fatal("unknown deployed version, unable to upgrade")
|
||||
}
|
||||
|
||||
log.Debugf("choosing %s as version to upgrade", chosenUpgrade)
|
||||
|
||||
// NOTE(d1): if release notes written after git tag published, read them
|
||||
// before we check out the tag and then they'll appear to be missing. this
|
||||
// covers when we obviously will forget to write release notes before
|
||||
// publishing
|
||||
if err := getReleaseNotes(app, versions, chosenUpgrade, deployMeta, &upgradeReleaseNotes); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := app.Recipe.EnsureVersion(chosenUpgrade); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
abraShEnv, err := envfile.ReadAbraShEnvVars(app.Recipe.AbraShPath)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
for k, v := range abraShEnv {
|
||||
app.Env[k] = v
|
||||
}
|
||||
|
||||
composeFiles, err := config.GetAppComposeFiles(app.Recipe, app.Env)
|
||||
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
stackName := app.StackName()
|
||||
deployOpts := stack.Deploy{
|
||||
Composefiles: composeFiles,
|
||||
Namespace: stackName,
|
||||
Prune: false,
|
||||
ResolveImage: stack.ResolveImageAlways,
|
||||
}
|
||||
compose, err := config.GetAppComposeConfig(app.Name, deployOpts, app.Env)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
config.ExposeAllEnv(stackName, compose, app.Env)
|
||||
config.SetRecipeLabel(compose, stackName, app.Recipe)
|
||||
config.SetChaosLabel(compose, stackName, internal.Chaos)
|
||||
config.SetChaosVersionLabel(compose, stackName, chosenUpgrade)
|
||||
config.SetUpdateLabel(compose, stackName, app.Env)
|
||||
|
||||
if err := NewVersionOverview(app, deployedVersion, chosenUpgrade, releaseNotes); err != nil {
|
||||
logrus.Fatal(err)
|
||||
Detach: false,
|
||||
}
|
||||
|
||||
stack.WaitTimeout, err = config.GetTimeoutFromLabel(compose, stackName)
|
||||
compose, err := appPkg.GetAppComposeConfig(app.Name, deployOpts, app.Env)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
logrus.Debugf("set waiting timeout to %d s", stack.WaitTimeout)
|
||||
|
||||
appPkg.ExposeAllEnv(stackName, compose, app.Env)
|
||||
appPkg.SetRecipeLabel(compose, stackName, app.Recipe.Name)
|
||||
appPkg.SetChaosLabel(compose, stackName, internal.Chaos)
|
||||
appPkg.SetChaosVersionLabel(compose, stackName, chosenUpgrade)
|
||||
appPkg.SetUpdateLabel(compose, stackName, app.Env)
|
||||
|
||||
envVars, err := appPkg.CheckEnv(app)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, envVar := range envVars {
|
||||
if !envVar.Present {
|
||||
upgradeWarnMessages = append(upgradeWarnMessages,
|
||||
fmt.Sprintf("%s missing from %s.env", envVar.Name, app.Domain),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if showReleaseNotes {
|
||||
fmt.Print(upgradeReleaseNotes)
|
||||
return
|
||||
}
|
||||
|
||||
chaosVersion := config.CHAOS_DEFAULT
|
||||
if deployMeta.IsChaos {
|
||||
chaosVersion = deployMeta.ChaosVersion
|
||||
|
||||
if deployMeta.ChaosVersion == "" {
|
||||
chaosVersion = config.UNKNOWN_DEFAULT
|
||||
}
|
||||
}
|
||||
|
||||
if err := internal.NewVersionOverview(
|
||||
app,
|
||||
upgradeWarnMessages,
|
||||
"upgrade",
|
||||
deployMeta.Version,
|
||||
chaosVersion,
|
||||
chosenUpgrade,
|
||||
upgradeReleaseNotes,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
stack.WaitTimeout, err = appPkg.GetTimeoutFromLabel(compose, stackName)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Debugf("set waiting timeout to %d second(s)", stack.WaitTimeout)
|
||||
|
||||
if err := stack.RunDeploy(cl, deployOpts, compose, stackName, internal.DontWaitConverge); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
postDeployCmds, ok := app.Env["POST_UPGRADE_CMDS"]
|
||||
if ok && !internal.DontWaitConverge {
|
||||
logrus.Debugf("run the following post-deploy commands: %s", postDeployCmds)
|
||||
if err := PostCmds(cl, app, postDeployCmds); err != nil {
|
||||
logrus.Fatalf("attempting to run post deploy commands, saw: %s", err)
|
||||
log.Debugf("run the following post-deploy commands: %s", postDeployCmds)
|
||||
|
||||
if err := internal.PostCmds(cl, app, postDeployCmds); err != nil {
|
||||
log.Fatalf("attempting to run post deploy commands, saw: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
if err := app.WriteRecipeVersion(chosenUpgrade, false); err != nil {
|
||||
log.Fatalf("writing recipe version failed: %s", err)
|
||||
}
|
||||
},
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
}
|
||||
|
||||
// chooseUpgrade prompts the user to choose an upgrade interactively.
|
||||
func chooseUpgrade(
|
||||
availableUpgrades []string,
|
||||
deployMeta stack.DeployMeta,
|
||||
chosenUpgrade *string,
|
||||
) error {
|
||||
msg := fmt.Sprintf("please select an upgrade (version: %s):", deployMeta.Version)
|
||||
|
||||
if deployMeta.IsChaos {
|
||||
chaosVersion := formatter.BoldDirtyDefault(deployMeta.ChaosVersion)
|
||||
|
||||
msg = fmt.Sprintf(
|
||||
"please select an upgrade (version: %s, chaos: %s):",
|
||||
deployMeta.Version,
|
||||
chaosVersion,
|
||||
)
|
||||
}
|
||||
|
||||
prompt := &survey.Select{
|
||||
Message: msg,
|
||||
Options: internal.SortVersionsDesc(availableUpgrades),
|
||||
}
|
||||
|
||||
if err := survey.AskOne(prompt, chosenUpgrade); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getReleaseNotes(
|
||||
app app.App,
|
||||
versions []string,
|
||||
chosenUpgrade string,
|
||||
deployMeta stack.DeployMeta,
|
||||
upgradeReleaseNotes *string,
|
||||
) error {
|
||||
parsedChosenUpgrade, err := tagcmp.Parse(chosenUpgrade)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parsedDeployedVersion, err := tagcmp.Parse(deployMeta.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, version := range internal.SortVersionsDesc(versions) {
|
||||
parsedVersion, err := tagcmp.Parse(version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if parsedVersion.IsGreaterThan(parsedDeployedVersion) &&
|
||||
parsedVersion.IsLessThan(parsedChosenUpgrade) {
|
||||
note, err := app.Recipe.GetReleaseNotes(version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if note != "" {
|
||||
*upgradeReleaseNotes += fmt.Sprintf("%s\n", note)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureUpgradesAvailable ensures that there are available upgrades.
|
||||
func ensureUpgradesAvailable(
|
||||
versions []string,
|
||||
availableUpgrades *[]string,
|
||||
deployMeta stack.DeployMeta,
|
||||
) (bool, error) {
|
||||
parsedDeployedVersion, err := tagcmp.Parse(deployMeta.Version)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, version := range versions {
|
||||
parsedVersion, err := tagcmp.Parse(version)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if parsedVersion.IsGreaterThan(parsedDeployedVersion) &&
|
||||
!(parsedVersion.Equals(parsedDeployedVersion)) {
|
||||
*availableUpgrades = append(*availableUpgrades, version)
|
||||
}
|
||||
}
|
||||
|
||||
if len(*availableUpgrades) == 0 && !internal.Force {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// validateUpgradeVersionArg validates the specific version.
|
||||
func validateUpgradeVersionArg(
|
||||
specificVersion string,
|
||||
app app.App,
|
||||
deployMeta stack.DeployMeta,
|
||||
) error {
|
||||
parsedSpecificVersion, err := tagcmp.Parse(specificVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("'%s' is not a known version for %s", specificVersion, app.Recipe.Name)
|
||||
}
|
||||
|
||||
parsedDeployedVersion, err := tagcmp.Parse(deployMeta.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if parsedSpecificVersion.IsLessThan(parsedDeployedVersion) &&
|
||||
!parsedSpecificVersion.Equals(parsedDeployedVersion) {
|
||||
return fmt.Errorf("%s is not an upgrade for %s?", deployMeta.Version, specificVersion)
|
||||
}
|
||||
|
||||
if parsedSpecificVersion.Equals(parsedDeployedVersion) && !internal.Force {
|
||||
return fmt.Errorf("%s is not an upgrade for %s?", deployMeta.Version, specificVersion)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureDeployed ensures the app is deployed and if so, returns deployment
|
||||
// meta info.
|
||||
func ensureDeployed(cl *dockerClient.Client, app app.App) (stack.DeployMeta, error) {
|
||||
log.Debugf("checking whether %s is already deployed", app.StackName())
|
||||
|
||||
deployMeta, err := stack.IsDeployed(context.Background(), cl, app.StackName())
|
||||
if err != nil {
|
||||
return stack.DeployMeta{}, err
|
||||
}
|
||||
|
||||
if !deployMeta.IsDeployed {
|
||||
return stack.DeployMeta{}, fmt.Errorf("%s is not deployed?", app.Name)
|
||||
}
|
||||
|
||||
return deployMeta, nil
|
||||
}
|
||||
|
||||
var (
|
||||
showReleaseNotes bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
AppUpgradeCommand.Flags().BoolVarP(
|
||||
&internal.Force,
|
||||
"force",
|
||||
"f",
|
||||
false,
|
||||
"perform action without further prompt",
|
||||
)
|
||||
|
||||
AppUpgradeCommand.Flags().BoolVarP(
|
||||
&internal.NoDomainChecks,
|
||||
"no-domain-checks",
|
||||
"D",
|
||||
false,
|
||||
"disable public DNS checks",
|
||||
)
|
||||
|
||||
AppUpgradeCommand.Flags().BoolVarP(
|
||||
&internal.DontWaitConverge, "no-converge-checks",
|
||||
"c",
|
||||
false,
|
||||
"disable converge logic checks",
|
||||
)
|
||||
|
||||
AppUpgradeCommand.Flags().BoolVarP(
|
||||
&showReleaseNotes,
|
||||
"releasenotes",
|
||||
"r",
|
||||
false,
|
||||
"only show release notes",
|
||||
)
|
||||
}
|
||||
|
@ -1,104 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// getImagePath returns the image name
|
||||
func getImagePath(image string) (string, error) {
|
||||
img, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
path := reference.Path(img)
|
||||
|
||||
path = formatter.StripTagMeta(path)
|
||||
|
||||
logrus.Debugf("parsed %s from %s", path, image)
|
||||
|
||||
return path, nil
|
||||
}
|
||||
|
||||
var appVersionCommand = cli.Command{
|
||||
Name: "version",
|
||||
Aliases: []string{"v"},
|
||||
ArgsUsage: "<domain>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Usage: "Show app versions",
|
||||
Description: `
|
||||
Show all information about versioning related to a deployed app. This includes
|
||||
the individual image names, tags and digests. But also the Co-op Cloud recipe
|
||||
version.
|
||||
`,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
stackName := app.StackName()
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Debugf("checking whether %s is already deployed", stackName)
|
||||
|
||||
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if deployedVersion == "unknown" {
|
||||
logrus.Fatalf("failed to determine version of deployed %s", app.Name)
|
||||
}
|
||||
|
||||
if !isDeployed {
|
||||
logrus.Fatalf("%s is not deployed?", app.Name)
|
||||
}
|
||||
|
||||
recipeMeta, err := recipe.GetRecipeMeta(app.Recipe, conf)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
versionsMeta := make(map[string]recipe.ServiceMeta)
|
||||
for _, recipeVersion := range recipeMeta.Versions {
|
||||
if currentVersion, exists := recipeVersion[deployedVersion]; exists {
|
||||
versionsMeta = currentVersion
|
||||
}
|
||||
}
|
||||
|
||||
if len(versionsMeta) == 0 {
|
||||
logrus.Fatalf("could not retrieve deployed version (%s) from recipe catalogue?", deployedVersion)
|
||||
}
|
||||
|
||||
tableCol := []string{"version", "service", "image"}
|
||||
table := formatter.CreateTable(tableCol)
|
||||
table.SetAutoMergeCellsByColumnIndex([]int{0})
|
||||
|
||||
for serviceName, versionMeta := range versionsMeta {
|
||||
table.Append([]string{deployedVersion, serviceName, versionMeta.Image})
|
||||
}
|
||||
|
||||
table.Render()
|
||||
|
||||
return nil
|
||||
},
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
}
|
@ -7,102 +7,115 @@ import (
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var appVolumeListCommand = cli.Command{
|
||||
Name: "list",
|
||||
Aliases: []string{"ls"},
|
||||
ArgsUsage: "<domain>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
internal.OfflineFlag,
|
||||
var AppVolumeListCommand = &cobra.Command{
|
||||
Use: "list <domain> [flags]",
|
||||
Aliases: []string{"ls"},
|
||||
Short: "List volumes associated with an app",
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.AppNameComplete()
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Usage: "List volumes associated with an app",
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
filters, err := app.Filters(false, true)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
volumeList, err := client.GetVolumes(cl, context.Background(), app.Server, filters)
|
||||
volumes, err := client.GetVolumes(cl, context.Background(), app.Server, filters)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
table := formatter.CreateTable([]string{"name", "created", "mounted"})
|
||||
var volTable [][]string
|
||||
for _, volume := range volumeList {
|
||||
volRow := []string{volume.Name, volume.CreatedAt, volume.Mountpoint}
|
||||
volTable = append(volTable, volRow)
|
||||
headers := []string{"NAME", "ON SERVER"}
|
||||
|
||||
table, err := formatter.CreateTable()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
table.AppendBulk(volTable)
|
||||
table.Headers(headers...)
|
||||
|
||||
if table.NumLines() > 0 {
|
||||
table.Render()
|
||||
} else {
|
||||
logrus.Warnf("no volumes created for %s", app.Name)
|
||||
var rows [][]string
|
||||
for _, volume := range volumes {
|
||||
row := []string{volume.Name, volume.Mountpoint}
|
||||
rows = append(rows, row)
|
||||
}
|
||||
|
||||
return nil
|
||||
table.Rows(rows...)
|
||||
|
||||
if len(rows) > 0 {
|
||||
if err := formatter.PrintTable(table); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
log.Warnf("no volumes created for %s", app.Name)
|
||||
},
|
||||
}
|
||||
|
||||
var appVolumeRemoveCommand = cli.Command{
|
||||
Name: "remove",
|
||||
Usage: "Remove volume(s) associated with an app",
|
||||
Description: `
|
||||
This command supports removing volumes associated with an app. The app in
|
||||
question must be undeployed before you try to remove volumes. See "abra app
|
||||
undeploy <domain>" for more.
|
||||
var AppVolumeRemoveCommand = &cobra.Command{
|
||||
Use: "remove <domain> [flags]",
|
||||
Short: "Remove volume(s) associated with an app",
|
||||
Long: `Remove volumes associated with an app.
|
||||
|
||||
The app in question must be undeployed before you try to remove volumes. See
|
||||
"abra app undeploy <domain>" for more.
|
||||
|
||||
The command is interactive and will show a multiple select input which allows
|
||||
you to make a seclection. Use the "?" key to see more help on navigating this
|
||||
interface.
|
||||
|
||||
Passing "--force/-f" will select all volumes for removal. Be careful.
|
||||
`,
|
||||
ArgsUsage: "<domain>",
|
||||
Aliases: []string{"rm"},
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
internal.ForceFlag,
|
||||
internal.OfflineFlag,
|
||||
Passing "--force/-f" will select all volumes for removal. Be careful.`,
|
||||
Aliases: []string{"rm"},
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.AppNameComplete()
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
app := internal.ValidateApp(c, conf)
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
deployMeta, err := stack.IsDeployed(context.Background(), cl, app.StackName())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if deployMeta.IsDeployed {
|
||||
log.Fatalf("%s is still deployed. Run \"abra app undeploy %s\"", app.Name, app.Name)
|
||||
}
|
||||
|
||||
filters, err := app.Filters(false, true)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
volumeList, err := client.GetVolumes(cl, context.Background(), app.Server, filters)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
volumeNames := client.GetVolumeNames(volumeList)
|
||||
|
||||
@ -116,7 +129,7 @@ Passing "--force/-f" will select all volumes for removal. Be careful.
|
||||
Default: volumeNames,
|
||||
}
|
||||
if err := survey.AskOne(volumesPrompt, &volumesToRemove); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -124,25 +137,31 @@ Passing "--force/-f" will select all volumes for removal. Be careful.
|
||||
volumesToRemove = volumeNames
|
||||
}
|
||||
|
||||
err = client.RemoveVolumes(cl, context.Background(), app.Server, volumesToRemove, internal.Force)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
if len(volumesToRemove) > 0 {
|
||||
err := client.RemoveVolumes(cl, context.Background(), volumesToRemove, internal.Force, 5)
|
||||
if err != nil {
|
||||
log.Fatalf("removing volumes failed: %s", err)
|
||||
}
|
||||
|
||||
log.Infof("%d volumes removed successfully", len(volumesToRemove))
|
||||
} else {
|
||||
log.Info("no volumes removed")
|
||||
}
|
||||
|
||||
logrus.Info("volumes removed successfully")
|
||||
|
||||
return nil
|
||||
},
|
||||
BashComplete: autocomplete.AppNameComplete,
|
||||
}
|
||||
|
||||
var appVolumeCommand = cli.Command{
|
||||
Name: "volume",
|
||||
Aliases: []string{"vl"},
|
||||
Usage: "Manage app volumes",
|
||||
ArgsUsage: "<domain>",
|
||||
Subcommands: []cli.Command{
|
||||
appVolumeListCommand,
|
||||
appVolumeRemoveCommand,
|
||||
},
|
||||
}
|
||||
|
||||
var AppVolumeCommand = &cobra.Command{
|
||||
Use: "volume [cmd] [args] [flags]",
|
||||
Aliases: []string{"vl"},
|
||||
Short: "Manage app volumes",
|
||||
}
|
||||
|
||||
func init() {
|
||||
AppVolumeRemoveCommand.Flags().BoolVarP(
|
||||
&internal.Force,
|
||||
"force",
|
||||
"f",
|
||||
false,
|
||||
"perform action without further prompt",
|
||||
)
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"slices"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
@ -12,103 +13,102 @@ import (
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
gitPkg "coopcloud.tech/abra/pkg/git"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var catalogueGenerateCommand = cli.Command{
|
||||
Name: "generate",
|
||||
var CatalogueGenerateCommand = &cobra.Command{
|
||||
Use: "generate [recipe] [flags]",
|
||||
Aliases: []string{"g"},
|
||||
Usage: "Generate the recipe catalogue",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
internal.PublishFlag,
|
||||
internal.DryFlag,
|
||||
internal.SkipUpdatesFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Description: `
|
||||
Generate a new copy of the recipe catalogue which can be found on:
|
||||
Short: "Generate the recipe catalogue",
|
||||
Long: `Generate a new copy of the recipe catalogue.
|
||||
|
||||
https://recipes.coopcloud.tech (website that humans read)
|
||||
https://recipes.coopcloud.tech/recipes.json (JSON that Abra reads)
|
||||
|
||||
It polls the entire git.coopcloud.tech/coop-cloud/... recipe repository
|
||||
listing, parses README.md and git tags to produce recipe metadata which is
|
||||
loaded into the catalogue JSON file.
|
||||
N.B. this command **will** wipe local unstaged changes from your local recipes
|
||||
if present. "--chaos/-C" on this command refers to the catalogue repository
|
||||
("$ABRA_DIR/catalogue") and not the recipes. Please take care not to lose your
|
||||
changes.
|
||||
|
||||
It is possible to generate new metadata for a single recipe by passing
|
||||
<recipe>. The existing local catalogue will be updated, not overwritten.
|
||||
[recipe]. The existing local catalogue will be updated, not overwritten.
|
||||
|
||||
It is quite easy to get rate limited by Docker Hub when running this command.
|
||||
If you have a Hub account you can have Abra log you in to avoid this. Pass
|
||||
"--user" and "--pass".
|
||||
If you have a Hub account you can "docker login" and Abra will automatically
|
||||
use those details.
|
||||
|
||||
Push your new release to git.coopcloud.tech with "-p/--publish". This requires
|
||||
Push your new release to git.coopcloud.tech with "--publish/-p". This requires
|
||||
that you have permission to git push to these repositories and have your SSH
|
||||
keys configured on your account.
|
||||
`,
|
||||
ArgsUsage: "[<recipe>]",
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
recipeName := c.Args().First()
|
||||
keys configured on your account.`,
|
||||
Args: cobra.RangeArgs(0, 1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.RecipeNameComplete()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var recipeName string
|
||||
if len(args) > 0 {
|
||||
recipeName = args[0]
|
||||
}
|
||||
|
||||
if recipeName != "" {
|
||||
internal.ValidateRecipe(c, conf)
|
||||
internal.ValidateRecipe(args, cmd.Name())
|
||||
}
|
||||
|
||||
if err := catalogue.EnsureUpToDate(conf); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if err := catalogue.EnsureCatalogue(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
repos, err := recipe.ReadReposMetadata(conf)
|
||||
if !internal.Chaos {
|
||||
if err := catalogue.EnsureIsClean(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
repos, err := recipe.ReadReposMetadata(internal.Debug)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var barLength int
|
||||
var logMsg string
|
||||
barLength := len(repos)
|
||||
if recipeName != "" {
|
||||
barLength = 1
|
||||
logMsg = fmt.Sprintf("ensuring %v recipe is cloned & up-to-date", barLength)
|
||||
} else {
|
||||
barLength = len(repos)
|
||||
logMsg = fmt.Sprintf("ensuring %v recipes are cloned & up-to-date, this could take some time...", barLength)
|
||||
}
|
||||
|
||||
if !internal.SkipUpdates {
|
||||
logrus.Warn(logMsg)
|
||||
if err := recipe.UpdateRepositories(repos, recipeName, conf); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if !skipUpdates {
|
||||
if err := recipe.UpdateRepositories(repos, recipeName, internal.Debug); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
var warnings []string
|
||||
catl := make(recipe.RecipeCatalogue)
|
||||
catlBar := formatter.CreateProgressbar(barLength, "generating catalogue metadata...")
|
||||
catlBar := formatter.CreateProgressbar(barLength, "collecting catalogue metadata")
|
||||
for _, recipeMeta := range repos {
|
||||
if recipeName != "" && recipeName != recipeMeta.Name {
|
||||
catlBar.Add(1)
|
||||
if !internal.Debug {
|
||||
catlBar.Add(1)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if _, exists := catalogue.CatalogueSkipList[recipeMeta.Name]; exists {
|
||||
catlBar.Add(1)
|
||||
continue
|
||||
r := recipe.Get(recipeMeta.Name)
|
||||
versions, warnMsgs, err := r.GetRecipeVersions()
|
||||
if err != nil {
|
||||
warnings = append(warnings, err.Error())
|
||||
}
|
||||
if len(warnMsgs) > 0 {
|
||||
warnings = append(warnings, warnMsgs...)
|
||||
}
|
||||
|
||||
versions, err := recipe.GetRecipeVersions(recipeMeta.Name, conf)
|
||||
features, category, warnMsgs, err := recipe.GetRecipeFeaturesAndCategory(r)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
warnings = append(warnings, err.Error())
|
||||
}
|
||||
|
||||
features, category, err := recipe.GetRecipeFeaturesAndCategory(recipeMeta.Name)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
if len(warnMsgs) > 0 {
|
||||
warnings = append(warnings, warnMsgs...)
|
||||
}
|
||||
|
||||
catl[recipeMeta.Name] = recipe.RecipeMeta{
|
||||
@ -124,104 +124,152 @@ keys configured on your account.
|
||||
Features: features,
|
||||
}
|
||||
|
||||
catlBar.Add(1)
|
||||
if !internal.Debug {
|
||||
catlBar.Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
if err := catlBar.Close(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var uniqueWarnings []string
|
||||
for _, w := range warnings {
|
||||
if !slices.Contains(uniqueWarnings, w) {
|
||||
uniqueWarnings = append(uniqueWarnings, w)
|
||||
}
|
||||
}
|
||||
|
||||
for _, warnMsg := range uniqueWarnings {
|
||||
log.Warn(warnMsg)
|
||||
}
|
||||
|
||||
recipesJSON, err := json.MarshalIndent(catl, "", " ")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if recipeName == "" {
|
||||
if err := ioutil.WriteFile(config.RECIPES_JSON, recipesJSON, 0764); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
catlFS, err := recipe.ReadRecipeCatalogue(conf)
|
||||
catlFS, err := recipe.ReadRecipeCatalogue(internal.Offline)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
catlFS[recipeName] = catl[recipeName]
|
||||
|
||||
updatedRecipesJSON, err := json.MarshalIndent(catlFS, "", " ")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(config.RECIPES_JSON, updatedRecipesJSON, 0764); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("generated new recipe catalogue in %s", config.RECIPES_JSON)
|
||||
log.Infof("generated recipe catalogue: %s", config.RECIPES_JSON)
|
||||
|
||||
cataloguePath := path.Join(config.ABRA_DIR, "catalogue")
|
||||
if internal.Publish {
|
||||
if publishChanges {
|
||||
|
||||
isClean, err := gitPkg.IsClean(cataloguePath)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if isClean {
|
||||
if !internal.Dry {
|
||||
logrus.Fatalf("no changes discovered in %s, nothing to publish?", cataloguePath)
|
||||
log.Fatalf("no changes discovered in %s, nothing to publish?", cataloguePath)
|
||||
}
|
||||
}
|
||||
|
||||
msg := "chore: publish new catalogue release changes"
|
||||
if err := gitPkg.Commit(cataloguePath, "**.json", msg, internal.Dry); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if err := gitPkg.Commit(cataloguePath, msg, internal.Dry); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
repo, err := git.PlainOpen(cataloguePath)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
sshURL := fmt.Sprintf(config.SSH_URL_TEMPLATE, config.CATALOGUE_JSON_REPO_NAME)
|
||||
sshURL := fmt.Sprintf(config.TOOLSHED_SSH_URL_TEMPLATE, config.CATALOGUE_JSON_REPO_NAME)
|
||||
if err := gitPkg.CreateRemote(repo, "origin-ssh", sshURL, internal.Dry); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := gitPkg.Push(cataloguePath, "origin-ssh", false, internal.Dry); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
repo, err := git.PlainOpen(cataloguePath)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
head, err := repo.Head()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !internal.Dry && internal.Publish {
|
||||
if !internal.Dry && publishChanges {
|
||||
url := fmt.Sprintf("%s/%s/commit/%s", config.REPOS_BASE_URL, config.CATALOGUE_JSON_REPO_NAME, head.Hash())
|
||||
logrus.Infof("new changes published: %s", url)
|
||||
log.Infof("new changes published: %s", url)
|
||||
}
|
||||
|
||||
if internal.Dry {
|
||||
logrus.Info("dry run: no changes published")
|
||||
log.Info("dry run: no changes published")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
BashComplete: autocomplete.RecipeNameComplete,
|
||||
}
|
||||
|
||||
// CatalogueCommand defines the `abra catalogue` command and sub-commands.
|
||||
var CatalogueCommand = cli.Command{
|
||||
Name: "catalogue",
|
||||
Usage: "Manage the recipe catalogue",
|
||||
Aliases: []string{"c"},
|
||||
ArgsUsage: "<recipe>",
|
||||
Description: "This command helps recipe packagers interact with the recipe catalogue",
|
||||
Subcommands: []cli.Command{
|
||||
catalogueGenerateCommand,
|
||||
},
|
||||
var CatalogueCommand = &cobra.Command{
|
||||
Use: "catalogue [cmd] [args] [flags]",
|
||||
Short: "Manage the recipe catalogue",
|
||||
Aliases: []string{"c"},
|
||||
}
|
||||
|
||||
var (
|
||||
publishChanges bool
|
||||
skipUpdates bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
CatalogueGenerateCommand.Flags().BoolVarP(
|
||||
&publishChanges,
|
||||
"publish",
|
||||
"p",
|
||||
false,
|
||||
"publish changes to git.coopcloud.tech",
|
||||
)
|
||||
|
||||
CatalogueGenerateCommand.Flags().BoolVarP(
|
||||
&internal.Dry,
|
||||
"dry-run",
|
||||
"r",
|
||||
false,
|
||||
"report changes that would be made",
|
||||
)
|
||||
|
||||
CatalogueGenerateCommand.Flags().BoolVarP(
|
||||
&skipUpdates,
|
||||
"skip-updates",
|
||||
"s",
|
||||
false,
|
||||
"skip updating recipe repositories",
|
||||
)
|
||||
|
||||
CatalogueGenerateCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
}
|
||||
|
203
cli/cli.go
203
cli/cli.go
@ -1,203 +0,0 @@
|
||||
// Package cli provides the interface for the command-line.
|
||||
package cli
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
|
||||
"coopcloud.tech/abra/cli/app"
|
||||
"coopcloud.tech/abra/cli/catalogue"
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/cli/recipe"
|
||||
"coopcloud.tech/abra/cli/record"
|
||||
"coopcloud.tech/abra/cli/server"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/web"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// AutoCompleteCommand helps people set up auto-complete in their shells
|
||||
var AutoCompleteCommand = cli.Command{
|
||||
Name: "autocomplete",
|
||||
Aliases: []string{"ac"},
|
||||
Usage: "Configure shell autocompletion (recommended)",
|
||||
Description: `
|
||||
Set up auto-completion in your shell by downloading the relevant files and
|
||||
laying out what additional information must be loaded. Supported shells are as
|
||||
follows: bash, fish, fizsh & zsh.
|
||||
|
||||
Example:
|
||||
|
||||
abra autocomplete bash
|
||||
`,
|
||||
ArgsUsage: "<shell>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
shellType := c.Args().First()
|
||||
|
||||
if shellType == "" {
|
||||
internal.ShowSubcommandHelpAndError(c, errors.New("no shell provided"))
|
||||
}
|
||||
|
||||
supportedShells := map[string]bool{
|
||||
"bash": true,
|
||||
"zsh": true,
|
||||
"fizsh": true,
|
||||
"fish": true,
|
||||
}
|
||||
|
||||
if _, ok := supportedShells[shellType]; !ok {
|
||||
logrus.Fatalf("%s is not a supported shell right now, sorry", shellType)
|
||||
}
|
||||
|
||||
if shellType == "fizsh" {
|
||||
shellType = "zsh" // handled the same on the autocompletion side
|
||||
}
|
||||
|
||||
autocompletionDir := path.Join(config.ABRA_DIR, "autocompletion")
|
||||
if err := os.Mkdir(autocompletionDir, 0764); err != nil {
|
||||
if !os.IsExist(err) {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
logrus.Debugf("%s already created", autocompletionDir)
|
||||
}
|
||||
|
||||
autocompletionFile := path.Join(config.ABRA_DIR, "autocompletion", shellType)
|
||||
if _, err := os.Stat(autocompletionFile); err != nil && os.IsNotExist(err) {
|
||||
url := fmt.Sprintf("https://git.coopcloud.tech/coop-cloud/abra/raw/branch/main/scripts/autocomplete/%s", shellType)
|
||||
logrus.Infof("fetching %s", url)
|
||||
if err := web.GetFile(autocompletionFile, url); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
switch shellType {
|
||||
case "bash":
|
||||
fmt.Println(fmt.Sprintf(`
|
||||
# Run the following commands to install auto-completion
|
||||
sudo mkdir /etc/bash_completion.d/
|
||||
sudo cp %s /etc/bash_completion.d/abra
|
||||
echo "source /etc/bash_completion.d/abra" >> ~/.bashrc
|
||||
# To test, run the following: "abra app <hit tab key>" - you should see command completion!
|
||||
`, autocompletionFile))
|
||||
case "zsh":
|
||||
fmt.Println(fmt.Sprintf(`
|
||||
# Run the following commands to install auto-completion
|
||||
sudo mkdir /etc/zsh/completion.d/
|
||||
sudo cp %s /etc/zsh/completion.d/abra
|
||||
echo "PROG=abra\n_CLI_ZSH_AUTOCOMPLETE_HACK=1\nsource /etc/zsh/completion.d/abra" >> ~/.zshrc
|
||||
# To test, run the following: "abra app <hit tab key>" - you should see command completion!
|
||||
`, autocompletionFile))
|
||||
case "fish":
|
||||
fmt.Println(fmt.Sprintf(`
|
||||
# Run the following commands to install auto-completion
|
||||
sudo mkdir -p /etc/fish/completions
|
||||
sudo cp %s /etc/fish/completions/abra
|
||||
echo "source /etc/fish/completions/abra" >> ~/.config/fish/config.fish
|
||||
# To test, run the following: "abra app <hit tab key>" - you should see command completion!
|
||||
`, autocompletionFile))
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// UpgradeCommand upgrades abra in-place.
|
||||
var UpgradeCommand = cli.Command{
|
||||
Name: "upgrade",
|
||||
Aliases: []string{"u"},
|
||||
Usage: "Upgrade Abra itself",
|
||||
Description: `
|
||||
Upgrade Abra in-place with the latest stable or release candidate.
|
||||
|
||||
Pass "-r/--rc" to install the latest release candidate. Please bear in mind
|
||||
that it may contain catastrophic bugs. Thank you very much for the testing
|
||||
efforts!
|
||||
`,
|
||||
Flags: []cli.Flag{internal.RCFlag},
|
||||
Action: func(c *cli.Context) error {
|
||||
mainURL := "https://install.abra.coopcloud.tech"
|
||||
cmd := exec.Command("bash", "-c", fmt.Sprintf("wget -q -O- %s | bash", mainURL))
|
||||
|
||||
if internal.RC {
|
||||
releaseCandidateURL := "https://git.coopcloud.tech/coop-cloud/abra/raw/branch/main/scripts/installer/installer"
|
||||
cmd = exec.Command("bash", "-c", fmt.Sprintf("wget -q -O- %s | bash -s -- --rc", releaseCandidateURL))
|
||||
}
|
||||
|
||||
logrus.Debugf("attempting to run %s", cmd)
|
||||
|
||||
if err := internal.RunCmd(cmd); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func newAbraApp(version, commit string) *cli.App {
|
||||
app := &cli.App{
|
||||
Name: "abra",
|
||||
Usage: `The Co-op Cloud command-line utility belt 🎩🐇
|
||||
____ ____ _ _
|
||||
/ ___|___ ___ _ __ / ___| | ___ _ _ __| |
|
||||
| | / _ \ _____ / _ \| '_ \ | | | |/ _ \| | | |/ _' |
|
||||
| |__| (_) |_____| (_) | |_) | | |___| | (_) | |_| | (_| |
|
||||
\____\___/ \___/| .__/ \____|_|\___/ \__,_|\__,_|
|
||||
|_|
|
||||
`,
|
||||
Version: fmt.Sprintf("%s-%s", version, commit[:7]),
|
||||
Commands: []cli.Command{
|
||||
app.AppCommand,
|
||||
server.ServerCommand,
|
||||
recipe.RecipeCommand,
|
||||
catalogue.CatalogueCommand,
|
||||
record.RecordCommand,
|
||||
UpgradeCommand,
|
||||
AutoCompleteCommand,
|
||||
},
|
||||
BashComplete: autocomplete.SubcommandComplete,
|
||||
}
|
||||
|
||||
app.EnableBashCompletion = true
|
||||
|
||||
app.Before = func(c *cli.Context) error {
|
||||
paths := []string{
|
||||
config.ABRA_DIR,
|
||||
path.Join(config.SERVERS_DIR),
|
||||
path.Join(config.RECIPES_DIR),
|
||||
path.Join(config.VENDOR_DIR),
|
||||
path.Join(config.BACKUP_DIR),
|
||||
}
|
||||
|
||||
for _, path := range paths {
|
||||
if err := os.Mkdir(path, 0764); err != nil {
|
||||
if !os.IsExist(err) {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("abra version %s, commit %s", version, commit)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
// RunApp runs CLI abra app.
|
||||
func RunApp(version, commit string) {
|
||||
app := newAbraApp(version, commit)
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
65
cli/complete.go
Normal file
65
cli/complete.go
Normal file
@ -0,0 +1,65 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var AutocompleteCommand = &cobra.Command{
|
||||
Use: "autocomplete [bash|zsh|fish|powershell]",
|
||||
Short: "Generate autocompletion script",
|
||||
Long: `To load completions:
|
||||
|
||||
Bash:
|
||||
|
||||
$ source <(abra autocomplete bash)
|
||||
|
||||
# To load autocompletion for each session, execute once:
|
||||
# Linux:
|
||||
$ abra autocomplete bash > /etc/bash_completion.d/abra
|
||||
# macOS:
|
||||
$ abra autocomplete bash > $(brew --prefix)/etc/bash_completion.d/abra
|
||||
|
||||
Zsh:
|
||||
|
||||
# If shell autocompletion is not already enabled in your environment,
|
||||
# you will need to enable it. You can execute the following once:
|
||||
|
||||
$ echo "autoload -U compinit; compinit" >> ~/.zshrc
|
||||
|
||||
# To load autocompletions for each session, execute once:
|
||||
$ abra autocomplete zsh > "${fpath[1]}/_abra"
|
||||
|
||||
# You will need to start a new shell for this setup to take effect.
|
||||
|
||||
fish:
|
||||
|
||||
$ abra autocomplete fish | source
|
||||
|
||||
# To load autocompletions for each session, execute once:
|
||||
$ abra autocomplete fish > ~/.config/fish/completions/abra.fish
|
||||
|
||||
PowerShell:
|
||||
|
||||
PS> abra autocomplete powershell | Out-String | Invoke-Expression
|
||||
|
||||
# To load autocompletions for every new session, run:
|
||||
PS> abra autocomplete powershell > abra.ps1
|
||||
# and source this file from your PowerShell profile.`,
|
||||
DisableFlagsInUseLine: true,
|
||||
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
|
||||
Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
switch args[0] {
|
||||
case "bash":
|
||||
cmd.Root().GenBashCompletion(os.Stdout)
|
||||
case "zsh":
|
||||
cmd.Root().GenZshCompletion(os.Stdout)
|
||||
case "fish":
|
||||
cmd.Root().GenFishCompletion(os.Stdout, true)
|
||||
case "powershell":
|
||||
cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout)
|
||||
}
|
||||
},
|
||||
}
|
@ -1,35 +1,74 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
containerPkg "coopcloud.tech/abra/pkg/container"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/service"
|
||||
"coopcloud.tech/abra/pkg/upstream/container"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
// SafeSplit splits up a string into a list of commands safely.
|
||||
func SafeSplit(s string) []string {
|
||||
split := strings.Split(s, " ")
|
||||
|
||||
var result []string
|
||||
var inquote string
|
||||
var block string
|
||||
for _, i := range split {
|
||||
if inquote == "" {
|
||||
if strings.HasPrefix(i, "'") || strings.HasPrefix(i, "\"") {
|
||||
inquote = string(i[0])
|
||||
block = strings.TrimPrefix(i, inquote) + " "
|
||||
} else {
|
||||
result = append(result, i)
|
||||
}
|
||||
} else {
|
||||
if !strings.HasSuffix(i, inquote) {
|
||||
block += i + " "
|
||||
} else {
|
||||
block += strings.TrimSuffix(i, inquote)
|
||||
inquote = ""
|
||||
result = append(result, block)
|
||||
block = ""
|
||||
}
|
||||
}
|
||||
// RetrieveBackupBotContainer gets the deployed backupbot container.
|
||||
func RetrieveBackupBotContainer(cl *dockerClient.Client) (types.Container, error) {
|
||||
ctx := context.Background()
|
||||
chosenService, err := service.GetServiceByLabel(ctx, cl, config.BackupbotLabel, NoInput)
|
||||
if err != nil {
|
||||
return types.Container{}, fmt.Errorf("no backupbot discovered, is it deployed?")
|
||||
}
|
||||
|
||||
return result
|
||||
log.Debugf("retrieved %s as backup enabled service", chosenService.Spec.Name)
|
||||
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("name", chosenService.Spec.Name)
|
||||
targetContainer, err := containerPkg.GetContainer(
|
||||
ctx,
|
||||
cl,
|
||||
filters,
|
||||
NoInput,
|
||||
)
|
||||
if err != nil {
|
||||
return types.Container{}, err
|
||||
}
|
||||
|
||||
return targetContainer, nil
|
||||
}
|
||||
|
||||
// RunBackupCmdRemote runs a backup related command on a remote backupbot container.
|
||||
func RunBackupCmdRemote(
|
||||
cl *dockerClient.Client,
|
||||
backupCmd string,
|
||||
containerID string,
|
||||
execEnv []string) (io.Writer, error) {
|
||||
execBackupListOpts := types.ExecConfig{
|
||||
AttachStderr: true,
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
Cmd: []string{"/usr/bin/backup", "--", backupCmd},
|
||||
Detach: false,
|
||||
Env: execEnv,
|
||||
Tty: true,
|
||||
}
|
||||
|
||||
log.Debugf("running backup %s on %s with exec config %v", backupCmd, containerID, execBackupListOpts)
|
||||
|
||||
// FIXME: avoid instantiating a new CLI
|
||||
dcli, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := container.RunExec(dcli, cl, containerID, &execBackupListOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
@ -1,419 +1,20 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"os"
|
||||
var (
|
||||
// NOTE(d1): global
|
||||
Debug bool
|
||||
NoInput bool
|
||||
Offline bool
|
||||
IgnoreEnvVersion bool
|
||||
|
||||
logrusStack "github.com/Gurpartap/logrus-stack"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
// NOTE(d1): sub-command specific
|
||||
Chaos bool
|
||||
DontWaitConverge bool
|
||||
Dry bool
|
||||
Force bool
|
||||
MachineReadable bool
|
||||
Major bool
|
||||
Minor bool
|
||||
NoDomainChecks bool
|
||||
Patch bool
|
||||
)
|
||||
|
||||
// Secrets stores the variable from SecretsFlag
|
||||
var Secrets bool
|
||||
|
||||
// SecretsFlag turns on/off automatically generating secrets
|
||||
var SecretsFlag = &cli.BoolFlag{
|
||||
Name: "secrets, S",
|
||||
Usage: "Automatically generate secrets",
|
||||
Destination: &Secrets,
|
||||
}
|
||||
|
||||
// Pass stores the variable from PassFlag
|
||||
var Pass bool
|
||||
|
||||
// PassFlag turns on/off storing generated secrets in pass
|
||||
var PassFlag = &cli.BoolFlag{
|
||||
Name: "pass, p",
|
||||
Usage: "Store the generated secrets in a local pass store",
|
||||
Destination: &Pass,
|
||||
}
|
||||
|
||||
// PassRemove stores the variable for PassRemoveFlag
|
||||
var PassRemove bool
|
||||
|
||||
// PassRemoveFlag turns on/off removing generated secrets from pass
|
||||
var PassRemoveFlag = &cli.BoolFlag{
|
||||
Name: "pass, p",
|
||||
Usage: "Remove generated secrets from a local pass store",
|
||||
Destination: &PassRemove,
|
||||
}
|
||||
|
||||
// Force force functionality without asking.
|
||||
var Force bool
|
||||
|
||||
// ForceFlag turns on/off force functionality.
|
||||
var ForceFlag = &cli.BoolFlag{
|
||||
Name: "force, f",
|
||||
Usage: "Perform action without further prompt. Use with care!",
|
||||
Destination: &Force,
|
||||
}
|
||||
|
||||
// Chaos engages chaos mode.
|
||||
var Chaos bool
|
||||
|
||||
// ChaosFlag turns on/off chaos functionality.
|
||||
var ChaosFlag = &cli.BoolFlag{
|
||||
Name: "chaos, C",
|
||||
Usage: "Deploy uncommitted recipes changes. Use with care!",
|
||||
Destination: &Chaos,
|
||||
}
|
||||
|
||||
// Disable tty to run commands from script
|
||||
var Tty bool
|
||||
|
||||
// TtyFlag turns on/off tty mode.
|
||||
var TtyFlag = &cli.BoolFlag{
|
||||
Name: "tty, T",
|
||||
Usage: "Disables TTY mode to run this command from a script.",
|
||||
Destination: &Tty,
|
||||
}
|
||||
|
||||
// DNSProvider specifies a DNS provider.
|
||||
var DNSProvider string
|
||||
|
||||
// DNSProviderFlag selects a DNS provider.
|
||||
var DNSProviderFlag = &cli.StringFlag{
|
||||
Name: "provider, p",
|
||||
Value: "",
|
||||
Usage: "DNS provider",
|
||||
Destination: &DNSProvider,
|
||||
}
|
||||
|
||||
var NoInput bool
|
||||
var NoInputFlag = &cli.BoolFlag{
|
||||
Name: "no-input, n",
|
||||
Usage: "Toggle non-interactive mode",
|
||||
Destination: &NoInput,
|
||||
}
|
||||
|
||||
var DNSType string
|
||||
|
||||
var DNSTypeFlag = &cli.StringFlag{
|
||||
Name: "record-type, rt",
|
||||
Value: "",
|
||||
Usage: "Domain name record type (e.g. A)",
|
||||
Destination: &DNSType,
|
||||
}
|
||||
|
||||
var DNSName string
|
||||
|
||||
var DNSNameFlag = &cli.StringFlag{
|
||||
Name: "record-name, rn",
|
||||
Value: "",
|
||||
Usage: "Domain name record name (e.g. mysubdomain)",
|
||||
Destination: &DNSName,
|
||||
}
|
||||
|
||||
var DNSValue string
|
||||
|
||||
var DNSValueFlag = &cli.StringFlag{
|
||||
Name: "record-value, rv",
|
||||
Value: "",
|
||||
Usage: "Domain name record value (e.g. 192.168.1.1)",
|
||||
Destination: &DNSValue,
|
||||
}
|
||||
|
||||
var DNSTTL string
|
||||
var DNSTTLFlag = &cli.StringFlag{
|
||||
Name: "record-ttl, rl",
|
||||
Value: "600s",
|
||||
Usage: "Domain name TTL value (seconds)",
|
||||
Destination: &DNSTTL,
|
||||
}
|
||||
|
||||
var DNSPriority int
|
||||
|
||||
var DNSPriorityFlag = &cli.IntFlag{
|
||||
Name: "record-priority, rp",
|
||||
Value: 10,
|
||||
Usage: "Domain name priority value",
|
||||
Destination: &DNSPriority,
|
||||
}
|
||||
|
||||
var ServerProvider string
|
||||
|
||||
var ServerProviderFlag = &cli.StringFlag{
|
||||
Name: "provider, p",
|
||||
Usage: "3rd party server provider",
|
||||
Destination: &ServerProvider,
|
||||
}
|
||||
|
||||
var CapsulInstanceURL string
|
||||
|
||||
var CapsulInstanceURLFlag = &cli.StringFlag{
|
||||
Name: "capsul-url, cu",
|
||||
Value: "yolo.servers.coop",
|
||||
Usage: "capsul instance URL",
|
||||
Destination: &CapsulInstanceURL,
|
||||
}
|
||||
|
||||
var CapsulName string
|
||||
|
||||
var CapsulNameFlag = &cli.StringFlag{
|
||||
Name: "capsul-name, cn",
|
||||
Value: "",
|
||||
Usage: "capsul name",
|
||||
Destination: &CapsulName,
|
||||
}
|
||||
|
||||
var CapsulType string
|
||||
|
||||
var CapsulTypeFlag = &cli.StringFlag{
|
||||
Name: "capsul-type, ct",
|
||||
Value: "f1-xs",
|
||||
Usage: "capsul type",
|
||||
Destination: &CapsulType,
|
||||
}
|
||||
|
||||
var CapsulImage string
|
||||
|
||||
var CapsulImageFlag = &cli.StringFlag{
|
||||
Name: "capsul-image, ci",
|
||||
Value: "debian10",
|
||||
Usage: "capsul image",
|
||||
Destination: &CapsulImage,
|
||||
}
|
||||
|
||||
var CapsulSSHKeys cli.StringSlice
|
||||
var CapsulSSHKeysFlag = &cli.StringSliceFlag{
|
||||
Name: "capsul-ssh-keys, cs",
|
||||
Usage: "capsul SSH key",
|
||||
Value: &CapsulSSHKeys,
|
||||
}
|
||||
|
||||
var CapsulAPIToken string
|
||||
|
||||
var CapsulAPITokenFlag = &cli.StringFlag{
|
||||
Name: "capsul-token, ca",
|
||||
Usage: "capsul API token",
|
||||
EnvVar: "CAPSUL_TOKEN",
|
||||
Destination: &CapsulAPIToken,
|
||||
}
|
||||
|
||||
var HetznerCloudName string
|
||||
|
||||
var HetznerCloudNameFlag = &cli.StringFlag{
|
||||
Name: "hetzner-name, hn",
|
||||
Value: "",
|
||||
Usage: "hetzner cloud name",
|
||||
Destination: &HetznerCloudName,
|
||||
}
|
||||
|
||||
var HetznerCloudType string
|
||||
|
||||
var HetznerCloudTypeFlag = &cli.StringFlag{
|
||||
Name: "hetzner-type, ht",
|
||||
Usage: "hetzner cloud type",
|
||||
Destination: &HetznerCloudType,
|
||||
Value: "cx11",
|
||||
}
|
||||
|
||||
var HetznerCloudImage string
|
||||
|
||||
var HetznerCloudImageFlag = &cli.StringFlag{
|
||||
Name: "hetzner-image, hi",
|
||||
Usage: "hetzner cloud image",
|
||||
Value: "debian-10",
|
||||
Destination: &HetznerCloudImage,
|
||||
}
|
||||
|
||||
var HetznerCloudSSHKeys cli.StringSlice
|
||||
|
||||
var HetznerCloudSSHKeysFlag = &cli.StringSliceFlag{
|
||||
Name: "hetzner-ssh-keys, hs",
|
||||
Usage: "hetzner cloud SSH keys (e.g. me@foo.com)",
|
||||
Value: &HetznerCloudSSHKeys,
|
||||
}
|
||||
|
||||
var HetznerCloudLocation string
|
||||
|
||||
var HetznerCloudLocationFlag = &cli.StringFlag{
|
||||
Name: "hetzner-location, hl",
|
||||
Usage: "hetzner cloud server location",
|
||||
Value: "hel1",
|
||||
Destination: &HetznerCloudLocation,
|
||||
}
|
||||
|
||||
var HetznerCloudAPIToken string
|
||||
|
||||
var HetznerCloudAPITokenFlag = &cli.StringFlag{
|
||||
Name: "hetzner-token, ha",
|
||||
Usage: "hetzner cloud API token",
|
||||
EnvVar: "HCLOUD_TOKEN",
|
||||
Destination: &HetznerCloudAPIToken,
|
||||
}
|
||||
|
||||
// Debug stores the variable from DebugFlag.
|
||||
var Debug bool
|
||||
|
||||
// DebugFlag turns on/off verbose logging down to the DEBUG level.
|
||||
var DebugFlag = &cli.BoolFlag{
|
||||
Name: "debug, d",
|
||||
Destination: &Debug,
|
||||
Usage: "Show DEBUG messages",
|
||||
}
|
||||
|
||||
// Offline stores the variable from OfflineFlag.
|
||||
var Offline bool
|
||||
|
||||
// DebugFlag turns on/off offline mode.
|
||||
var OfflineFlag = &cli.BoolFlag{
|
||||
Name: "offline, o",
|
||||
Destination: &Offline,
|
||||
Usage: "Prefer offline & filesystem access when possible",
|
||||
}
|
||||
|
||||
// MachineReadable stores the variable from MachineReadableFlag
|
||||
var MachineReadable bool
|
||||
|
||||
// MachineReadableFlag turns on/off machine readable output where supported
|
||||
var MachineReadableFlag = &cli.BoolFlag{
|
||||
Name: "machine, m",
|
||||
Destination: &MachineReadable,
|
||||
Usage: "Output in a machine-readable format (where supported)",
|
||||
}
|
||||
|
||||
// RC signifies the latest release candidate
|
||||
var RC bool
|
||||
|
||||
// RCFlag chooses the latest release candidate for install
|
||||
var RCFlag = &cli.BoolFlag{
|
||||
Name: "rc, r",
|
||||
Destination: &RC,
|
||||
Usage: "Install the latest release candidate",
|
||||
}
|
||||
|
||||
var Major bool
|
||||
var MajorFlag = &cli.BoolFlag{
|
||||
Name: "major, x",
|
||||
Usage: "Increase the major part of the version",
|
||||
Destination: &Major,
|
||||
}
|
||||
|
||||
var Minor bool
|
||||
var MinorFlag = &cli.BoolFlag{
|
||||
Name: "minor, y",
|
||||
Usage: "Increase the minor part of the version",
|
||||
Destination: &Minor,
|
||||
}
|
||||
|
||||
var Patch bool
|
||||
var PatchFlag = &cli.BoolFlag{
|
||||
Name: "patch, z",
|
||||
Usage: "Increase the patch part of the version",
|
||||
Destination: &Patch,
|
||||
}
|
||||
|
||||
var Dry bool
|
||||
var DryFlag = &cli.BoolFlag{
|
||||
Name: "dry-run, r",
|
||||
Usage: "Only reports changes that would be made",
|
||||
Destination: &Dry,
|
||||
}
|
||||
|
||||
var Publish bool
|
||||
var PublishFlag = &cli.BoolFlag{
|
||||
Name: "publish, p",
|
||||
Usage: "Publish changes to git.coopcloud.tech",
|
||||
Destination: &Publish,
|
||||
}
|
||||
|
||||
var Domain string
|
||||
var DomainFlag = &cli.StringFlag{
|
||||
Name: "domain, D",
|
||||
Value: "",
|
||||
Usage: "Choose a domain name",
|
||||
Destination: &Domain,
|
||||
}
|
||||
|
||||
var NewAppServer string
|
||||
var NewAppServerFlag = &cli.StringFlag{
|
||||
Name: "server, s",
|
||||
Value: "",
|
||||
Usage: "Show apps of a specific server",
|
||||
Destination: &NewAppServer,
|
||||
}
|
||||
|
||||
var NoDomainChecks bool
|
||||
var NoDomainChecksFlag = &cli.BoolFlag{
|
||||
Name: "no-domain-checks, D",
|
||||
Usage: "Disable app domain sanity checks",
|
||||
Destination: &NoDomainChecks,
|
||||
}
|
||||
|
||||
var StdErrOnly bool
|
||||
var StdErrOnlyFlag = &cli.BoolFlag{
|
||||
Name: "stderr, s",
|
||||
Usage: "Only tail stderr",
|
||||
Destination: &StdErrOnly,
|
||||
}
|
||||
|
||||
var SinceLogs string
|
||||
var SinceLogsFlag = &cli.StringFlag{
|
||||
Name: "since, S",
|
||||
Value: "",
|
||||
Usage: "tail logs since YYYY-MM-DDTHH:MM:SSZ",
|
||||
Destination: &SinceLogs,
|
||||
}
|
||||
|
||||
var DontWaitConverge bool
|
||||
var DontWaitConvergeFlag = &cli.BoolFlag{
|
||||
Name: "no-converge-checks, c",
|
||||
Usage: "Don't wait for converge logic checks",
|
||||
Destination: &DontWaitConverge,
|
||||
}
|
||||
|
||||
var Watch bool
|
||||
var WatchFlag = &cli.BoolFlag{
|
||||
Name: "watch, w",
|
||||
Usage: "Watch status by polling repeatedly",
|
||||
Destination: &Watch,
|
||||
}
|
||||
|
||||
var OnlyErrors bool
|
||||
var OnlyErrorFlag = &cli.BoolFlag{
|
||||
Name: "errors, e",
|
||||
Usage: "Only show errors",
|
||||
Destination: &OnlyErrors,
|
||||
}
|
||||
|
||||
var SkipUpdates bool
|
||||
var SkipUpdatesFlag = &cli.BoolFlag{
|
||||
Name: "skip-updates, s",
|
||||
Usage: "Skip updating recipe repositories",
|
||||
Destination: &SkipUpdates,
|
||||
}
|
||||
|
||||
var AllTags bool
|
||||
var AllTagsFlag = &cli.BoolFlag{
|
||||
Name: "all-tags, a",
|
||||
Usage: "List all tags, not just upgrades",
|
||||
Destination: &AllTags,
|
||||
}
|
||||
|
||||
var LocalCmd bool
|
||||
var LocalCmdFlag = &cli.BoolFlag{
|
||||
Name: "local, l",
|
||||
Usage: "Run command locally",
|
||||
Destination: &LocalCmd,
|
||||
}
|
||||
|
||||
var RemoteUser string
|
||||
var RemoteUserFlag = &cli.StringFlag{
|
||||
Name: "user, u",
|
||||
Value: "",
|
||||
Usage: "User to run command within a service context",
|
||||
Destination: &RemoteUser,
|
||||
}
|
||||
|
||||
// SubCommandBefore wires up pre-action machinery (e.g. --debug handling).
|
||||
func SubCommandBefore(c *cli.Context) error {
|
||||
if Debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetFormatter(&logrus.TextFormatter{})
|
||||
logrus.SetOutput(os.Stderr)
|
||||
logrus.AddHook(logrusStack.StandardHook())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -8,20 +8,24 @@ import (
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
containerPkg "coopcloud.tech/abra/pkg/container"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/upstream/container"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// RunCmdRemote executes an abra.sh command in the target service
|
||||
func RunCmdRemote(cl *dockerClient.Client, app config.App, abraSh, serviceName, cmdName, cmdArgs string) error {
|
||||
func RunCmdRemote(
|
||||
cl *dockerClient.Client,
|
||||
app appPkg.App,
|
||||
requestTTY bool,
|
||||
abraSh, serviceName, cmdName, cmdArgs, remoteUser string) error {
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), serviceName))
|
||||
|
||||
@ -30,7 +34,7 @@ func RunCmdRemote(cl *dockerClient.Client, app config.App, abraSh, serviceName,
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf("retrieved %s as target container on %s", formatter.ShortenID(targetContainer.ID), app.Server)
|
||||
log.Debugf("retrieved %s as target container on %s", formatter.ShortenID(targetContainer.ID), app.Server)
|
||||
|
||||
toTarOpts := &archive.TarOptions{NoOverwriteDirNonDir: true, Compression: archive.Gzip}
|
||||
content, err := archive.TarWithOptions(abraSh, toTarOpts)
|
||||
@ -60,8 +64,8 @@ func RunCmdRemote(cl *dockerClient.Client, app config.App, abraSh, serviceName,
|
||||
Tty: false,
|
||||
}
|
||||
|
||||
if err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
|
||||
logrus.Infof("%s does not exist for %s, use /bin/sh as fallback", shell, app.Name)
|
||||
if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
|
||||
log.Infof("%s does not exist for %s, use /bin/sh as fallback", shell, app.Name)
|
||||
shell = "/bin/sh"
|
||||
}
|
||||
|
||||
@ -72,20 +76,20 @@ func RunCmdRemote(cl *dockerClient.Client, app config.App, abraSh, serviceName,
|
||||
cmd = []string{shell, "-c", fmt.Sprintf("TARGET=%s; APP_NAME=%s; STACK_NAME=%s; . /tmp/abra.sh; %s", serviceName, app.Name, app.StackName(), cmdName)}
|
||||
}
|
||||
|
||||
logrus.Debugf("running command: %s", strings.Join(cmd, " "))
|
||||
log.Debugf("running command: %s", strings.Join(cmd, " "))
|
||||
|
||||
if RemoteUser != "" {
|
||||
logrus.Debugf("running command with user %s", RemoteUser)
|
||||
execCreateOpts.User = RemoteUser
|
||||
if remoteUser != "" {
|
||||
log.Debugf("running command with user %s", remoteUser)
|
||||
execCreateOpts.User = remoteUser
|
||||
}
|
||||
|
||||
execCreateOpts.Cmd = cmd
|
||||
execCreateOpts.Tty = true
|
||||
if Tty {
|
||||
execCreateOpts.Tty = false
|
||||
execCreateOpts.Tty = requestTTY
|
||||
if !requestTTY {
|
||||
log.Debugf("not requesting a remote TTY")
|
||||
}
|
||||
|
||||
if err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
|
||||
if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
357
cli/internal/deploy.go
Normal file
357
cli/internal/deploy.go
Normal file
@ -0,0 +1,357 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/tagcmp"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
var borderStyle = lipgloss.NewStyle().
|
||||
BorderStyle(lipgloss.ThickBorder()).
|
||||
Padding(0, 1, 0, 1).
|
||||
MaxWidth(79).
|
||||
BorderForeground(lipgloss.Color("63"))
|
||||
|
||||
var headerStyle = lipgloss.NewStyle().
|
||||
Underline(true).
|
||||
Bold(true).
|
||||
PaddingBottom(1)
|
||||
|
||||
var leftStyle = lipgloss.NewStyle().
|
||||
Bold(true)
|
||||
|
||||
var rightStyle = lipgloss.NewStyle()
|
||||
|
||||
// horizontal is a JoinHorizontal helper function.
|
||||
func horizontal(left, mid, right string) string {
|
||||
return lipgloss.JoinHorizontal(lipgloss.Left, left, mid, right)
|
||||
}
|
||||
|
||||
// NewVersionOverview shows an upgrade or downgrade overview
|
||||
func NewVersionOverview(
|
||||
app appPkg.App,
|
||||
warnMessages []string,
|
||||
kind,
|
||||
deployedVersion,
|
||||
deployedChaosVersion,
|
||||
toDeployVersion,
|
||||
releaseNotes string) error {
|
||||
deployConfig := "compose.yml"
|
||||
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
|
||||
deployConfig = composeFiles
|
||||
}
|
||||
|
||||
server := app.Server
|
||||
if app.Server == "default" {
|
||||
server = "local"
|
||||
}
|
||||
|
||||
domain := app.Domain
|
||||
if domain == "" {
|
||||
domain = config.NO_DOMAIN_DEFAULT
|
||||
}
|
||||
|
||||
upperKind := strings.ToUpper(kind)
|
||||
|
||||
rows := [][]string{
|
||||
{"DOMAIN", domain},
|
||||
{"RECIPE", app.Recipe.Name},
|
||||
{"SERVER", server},
|
||||
{"CONFIG", deployConfig},
|
||||
|
||||
{"CURRENT DEPLOYMENT", "---"},
|
||||
{"VERSION", formatter.BoldDirtyDefault(deployedVersion)},
|
||||
{"CHAOS ", formatter.BoldDirtyDefault(deployedChaosVersion)},
|
||||
|
||||
{upperKind, "---"},
|
||||
{"VERSION", formatter.BoldDirtyDefault(toDeployVersion)},
|
||||
|
||||
{fmt.Sprintf("%s.ENV", strings.ToUpper(app.Domain)), "---"},
|
||||
{"CURRENT VERSION", formatter.BoldDirtyDefault(app.Recipe.EnvVersion)},
|
||||
{"NEW VERSION", formatter.BoldDirtyDefault(toDeployVersion)},
|
||||
}
|
||||
|
||||
overview := formatter.CreateOverview(
|
||||
fmt.Sprintf("%s OVERVIEW", upperKind),
|
||||
rows,
|
||||
)
|
||||
|
||||
fmt.Println(overview)
|
||||
|
||||
if releaseNotes != "" && toDeployVersion != "" {
|
||||
fmt.Print(releaseNotes)
|
||||
} else {
|
||||
warnMessages = append(
|
||||
warnMessages,
|
||||
fmt.Sprintf("no release notes available for %s", toDeployVersion),
|
||||
)
|
||||
}
|
||||
|
||||
for _, msg := range warnMessages {
|
||||
log.Warn(msg)
|
||||
}
|
||||
|
||||
if NoInput {
|
||||
return nil
|
||||
}
|
||||
|
||||
response := false
|
||||
prompt := &survey.Confirm{Message: "proceed?"}
|
||||
if err := survey.AskOne(prompt, &response); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !response {
|
||||
log.Fatal("deployment cancelled")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeployOverview shows a deployment overview
|
||||
func DeployOverview(
|
||||
app appPkg.App,
|
||||
warnMessages []string,
|
||||
deployedVersion string,
|
||||
deployedChaosVersion string,
|
||||
toDeployVersion,
|
||||
toDeployChaosVersion string,
|
||||
toWriteVersion string,
|
||||
) error {
|
||||
deployConfig := "compose.yml"
|
||||
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
|
||||
deployConfig = composeFiles
|
||||
}
|
||||
|
||||
server := app.Server
|
||||
if app.Server == "default" {
|
||||
server = "local"
|
||||
}
|
||||
|
||||
domain := app.Domain
|
||||
if domain == "" {
|
||||
domain = config.NO_DOMAIN_DEFAULT
|
||||
}
|
||||
|
||||
if app.Recipe.Dirty {
|
||||
toWriteVersion = formatter.AddDirtyMarker(toWriteVersion)
|
||||
toDeployChaosVersion = formatter.AddDirtyMarker(toDeployChaosVersion)
|
||||
}
|
||||
|
||||
recipeName, exists := app.Env["RECIPE"]
|
||||
if !exists {
|
||||
recipeName = app.Env["TYPE"]
|
||||
}
|
||||
|
||||
envVersion, err := recipe.GetEnvVersionRaw(recipeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if envVersion == "" {
|
||||
envVersion = config.NO_VERSION_DEFAULT
|
||||
}
|
||||
|
||||
rows := [][]string{
|
||||
{"DOMAIN", domain},
|
||||
{"RECIPE", app.Recipe.Name},
|
||||
{"SERVER", server},
|
||||
{"CONFIG", deployConfig},
|
||||
|
||||
{"CURRENT DEPLOYMENT", "---"},
|
||||
{"VERSION", formatter.BoldDirtyDefault(deployedVersion)},
|
||||
{"CHAOS", formatter.BoldDirtyDefault(deployedChaosVersion)},
|
||||
|
||||
{"NEW DEPLOYMENT", "---"},
|
||||
{"VERSION", formatter.BoldDirtyDefault(toDeployVersion)},
|
||||
{"CHAOS", formatter.BoldDirtyDefault(toDeployChaosVersion)},
|
||||
|
||||
{fmt.Sprintf("%s.ENV", strings.ToUpper(app.Name)), "---"},
|
||||
{"CURRENT VERSION", formatter.BoldDirtyDefault(envVersion)},
|
||||
{"NEW VERSION", formatter.BoldDirtyDefault(toWriteVersion)},
|
||||
}
|
||||
|
||||
overview := formatter.CreateOverview("DEPLOY OVERVIEW", rows)
|
||||
|
||||
fmt.Println(overview)
|
||||
|
||||
for _, msg := range warnMessages {
|
||||
log.Warn(msg)
|
||||
}
|
||||
|
||||
if NoInput {
|
||||
return nil
|
||||
}
|
||||
|
||||
response := false
|
||||
prompt := &survey.Confirm{Message: "proceed?"}
|
||||
if err := survey.AskOne(prompt, &response); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !response {
|
||||
log.Fatal("deployment cancelled")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UndeployOverview shows an undeployment overview
|
||||
func UndeployOverview(
|
||||
app appPkg.App,
|
||||
deployedVersion,
|
||||
deployedChaosVersion,
|
||||
toWriteVersion string,
|
||||
) error {
|
||||
deployConfig := "compose.yml"
|
||||
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
|
||||
deployConfig = composeFiles
|
||||
}
|
||||
|
||||
server := app.Server
|
||||
if app.Server == "default" {
|
||||
server = "local"
|
||||
}
|
||||
|
||||
domain := app.Domain
|
||||
if domain == "" {
|
||||
domain = config.NO_DOMAIN_DEFAULT
|
||||
}
|
||||
|
||||
recipeName, exists := app.Env["RECIPE"]
|
||||
if !exists {
|
||||
recipeName = app.Env["TYPE"]
|
||||
}
|
||||
|
||||
envVersion, err := recipe.GetEnvVersionRaw(recipeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if envVersion == "" {
|
||||
envVersion = config.NO_VERSION_DEFAULT
|
||||
}
|
||||
|
||||
rows := [][]string{
|
||||
{"DOMAIN", domain},
|
||||
{"RECIPE", app.Recipe.Name},
|
||||
{"SERVER", server},
|
||||
{"CONFIG", deployConfig},
|
||||
|
||||
{"CURRENT DEPLOYMENT", "---"},
|
||||
{"VERSION", formatter.BoldDirtyDefault(deployedVersion)},
|
||||
{"CHAOS", formatter.BoldDirtyDefault(deployedChaosVersion)},
|
||||
|
||||
{fmt.Sprintf("%s.ENV", strings.ToUpper(app.Name)), "---"},
|
||||
{"CURRENT VERSION", formatter.BoldDirtyDefault(envVersion)},
|
||||
{"NEW VERSION", formatter.BoldDirtyDefault(toWriteVersion)},
|
||||
}
|
||||
|
||||
overview := formatter.CreateOverview("UNDEPLOY OVERVIEW", rows)
|
||||
|
||||
fmt.Println(overview)
|
||||
|
||||
if NoInput {
|
||||
return nil
|
||||
}
|
||||
|
||||
response := false
|
||||
prompt := &survey.Confirm{Message: "proceed?"}
|
||||
if err := survey.AskOne(prompt, &response); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !response {
|
||||
log.Fatal("undeploy cancelled")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PostCmds parses a string of commands and executes them inside of the respective services
|
||||
// the commands string must have the following format:
|
||||
// "<service> <command> <arguments>|<service> <command> <arguments>|... "
|
||||
func PostCmds(cl *dockerClient.Client, app appPkg.App, commands string) error {
|
||||
if _, err := os.Stat(app.Recipe.AbraShPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf(fmt.Sprintf("%s does not exist for %s?", app.Recipe.AbraShPath, app.Name))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for _, command := range strings.Split(commands, "|") {
|
||||
commandParts := strings.Split(command, " ")
|
||||
if len(commandParts) < 2 {
|
||||
return fmt.Errorf(fmt.Sprintf("not enough arguments: %s", command))
|
||||
}
|
||||
targetServiceName := commandParts[0]
|
||||
cmdName := commandParts[1]
|
||||
parsedCmdArgs := ""
|
||||
if len(commandParts) > 2 {
|
||||
parsedCmdArgs = fmt.Sprintf("%s ", strings.Join(commandParts[2:], " "))
|
||||
}
|
||||
log.Infof("running post-command '%s %s' in container %s", cmdName, parsedCmdArgs, targetServiceName)
|
||||
|
||||
if err := EnsureCommand(app.Recipe.AbraShPath, app.Recipe.Name, cmdName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serviceNames, err := appPkg.GetAppServiceNames(app.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
matchingServiceName := false
|
||||
for _, serviceName := range serviceNames {
|
||||
if serviceName == targetServiceName {
|
||||
matchingServiceName = true
|
||||
}
|
||||
}
|
||||
|
||||
if !matchingServiceName {
|
||||
return fmt.Errorf(fmt.Sprintf("no service %s for %s?", targetServiceName, app.Name))
|
||||
}
|
||||
|
||||
log.Debugf("running command %s %s within the context of %s_%s", cmdName, parsedCmdArgs, app.StackName(), targetServiceName)
|
||||
|
||||
requestTTY := true
|
||||
if err := RunCmdRemote(
|
||||
cl,
|
||||
app,
|
||||
requestTTY,
|
||||
app.Recipe.AbraShPath, targetServiceName, cmdName, parsedCmdArgs, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SortVersionsDesc sorts versions in descending order.
|
||||
func SortVersionsDesc(versions []string) []string {
|
||||
var tags []tagcmp.Tag
|
||||
|
||||
for _, v := range versions {
|
||||
parsed, _ := tagcmp.Parse(v) // skips unsupported tags
|
||||
tags = append(tags, parsed)
|
||||
}
|
||||
|
||||
sort.Sort(tagcmp.ByTagDesc(tags))
|
||||
|
||||
var desc []string
|
||||
for _, t := range tags {
|
||||
desc = append(desc, t.String())
|
||||
}
|
||||
|
||||
return desc
|
||||
}
|
17
cli/internal/deploy_test.go
Normal file
17
cli/internal/deploy_test.go
Normal file
@ -0,0 +1,17 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSortVersionsDesc(t *testing.T) {
|
||||
versions := SortVersionsDesc([]string{
|
||||
"0.2.3+1.2.2",
|
||||
"1.0.0+2.2.2",
|
||||
})
|
||||
|
||||
assert.Equal(t, "1.0.0+2.2.2", versions[0])
|
||||
assert.Equal(t, "0.2.3+1.2.2", versions[1])
|
||||
}
|
11
cli/internal/ensure.go
Normal file
11
cli/internal/ensure.go
Normal file
@ -0,0 +1,11 @@
|
||||
package internal
|
||||
|
||||
import "coopcloud.tech/abra/pkg/recipe"
|
||||
|
||||
func GetEnsureContext() recipe.EnsureContext {
|
||||
return recipe.EnsureContext{
|
||||
Chaos,
|
||||
Offline,
|
||||
IgnoreEnvVersion,
|
||||
}
|
||||
}
|
@ -1,18 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// ShowSubcommandHelpAndError exits the program on error, logs the error to the
|
||||
// terminal, and shows the help command.
|
||||
func ShowSubcommandHelpAndError(c *cli.Context, err interface{}) {
|
||||
if err2 := cli.ShowSubcommandHelp(c); err2 != nil {
|
||||
logrus.Error(err2)
|
||||
}
|
||||
logrus.Error(err)
|
||||
os.Exit(1)
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
package internal
|
||||
|
||||
// ReverseStringList reverses a list of a strings. Roll on Go generics.
|
||||
func ReverseStringList(strings []string) []string {
|
||||
for i, j := 0, len(strings)-1; i < j; i, j = i+1, j-1 {
|
||||
strings[i], strings[j] = strings[j], strings[i]
|
||||
}
|
||||
|
||||
return strings
|
||||
}
|
@ -4,10 +4,10 @@ import (
|
||||
"fmt"
|
||||
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/distribution/reference"
|
||||
)
|
||||
|
||||
// PromptBumpType prompts for version bump type
|
||||
@ -65,7 +65,7 @@ func GetBumpType() string {
|
||||
} else if Patch {
|
||||
bumpType = "patch"
|
||||
} else {
|
||||
logrus.Fatal("no version bump type specififed?")
|
||||
log.Fatal("no version bump type specififed?")
|
||||
}
|
||||
|
||||
return bumpType
|
||||
@ -80,7 +80,7 @@ func SetBumpType(bumpType string) {
|
||||
} else if bumpType == "patch" {
|
||||
Patch = true
|
||||
} else {
|
||||
logrus.Fatal("no version bump type specififed?")
|
||||
log.Fatal("no version bump type specififed?")
|
||||
}
|
||||
}
|
||||
|
||||
@ -88,7 +88,11 @@ func SetBumpType(bumpType string) {
|
||||
func GetMainAppImage(recipe recipe.Recipe) (string, error) {
|
||||
var path string
|
||||
|
||||
for _, service := range recipe.Config.Services {
|
||||
config, err := recipe.GetComposeConfig(nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, service := range config.Services {
|
||||
if service.Name == "app" {
|
||||
img, err := reference.ParseNormalizedNamed(service.Image)
|
||||
if err != nil {
|
||||
|
@ -1,63 +1,28 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// ValidateRecipe ensures the recipe arg is valid.
|
||||
func ValidateRecipe(c *cli.Context, conf *runtime.Config) recipe.Recipe {
|
||||
recipeName := c.Args().First()
|
||||
|
||||
if recipeName == "" {
|
||||
ShowSubcommandHelpAndError(c, errors.New("no recipe name provided"))
|
||||
func ValidateRecipe(args []string, cmdName string) recipe.Recipe {
|
||||
var recipeName string
|
||||
if len(args) > 0 {
|
||||
recipeName = args[0]
|
||||
}
|
||||
|
||||
chosenRecipe, err := recipe.Get(recipeName, conf)
|
||||
if err != nil {
|
||||
if c.Command.Name == "generate" {
|
||||
if strings.Contains(err.Error(), "missing a compose") {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
logrus.Warn(err)
|
||||
} else {
|
||||
if strings.Contains(err.Error(), "template_driver is not allowed") {
|
||||
logrus.Warnf("ensure %s recipe compose.* files include \"version: '3.8'\"", recipeName)
|
||||
}
|
||||
logrus.Fatalf("unable to validate recipe: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := recipe.EnsureLatest(recipeName, conf); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Debugf("validated %s as recipe argument", recipeName)
|
||||
|
||||
return chosenRecipe
|
||||
}
|
||||
|
||||
// ValidateRecipeWithPrompt ensures a recipe argument is present before
|
||||
// validating, asking for input if required.
|
||||
func ValidateRecipeWithPrompt(c *cli.Context, conf *runtime.Config) recipe.Recipe {
|
||||
recipeName := c.Args().First()
|
||||
|
||||
if recipeName == "" && !NoInput {
|
||||
var recipes []string
|
||||
|
||||
catl, err := recipe.ReadRecipeCatalogue(conf)
|
||||
catl, err := recipe.ReadRecipeCatalogue(Offline)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
knownRecipes := make(map[string]bool)
|
||||
@ -67,7 +32,7 @@ func ValidateRecipeWithPrompt(c *cli.Context, conf *runtime.Config) recipe.Recip
|
||||
|
||||
localRecipes, err := recipe.GetRecipesLocal()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, recipeLocal := range localRecipes {
|
||||
@ -85,53 +50,63 @@ func ValidateRecipeWithPrompt(c *cli.Context, conf *runtime.Config) recipe.Recip
|
||||
Options: recipes,
|
||||
}
|
||||
if err := survey.AskOne(prompt, &recipeName); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if recipeName == "" {
|
||||
ShowSubcommandHelpAndError(c, errors.New("no recipe name provided"))
|
||||
log.Fatal("no recipe name provided")
|
||||
}
|
||||
|
||||
chosenRecipe, err := recipe.Get(recipeName, conf)
|
||||
chosenRecipe := recipe.Get(recipeName)
|
||||
err := chosenRecipe.EnsureExists()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
_, err = chosenRecipe.GetComposeConfig(nil)
|
||||
if err != nil {
|
||||
if cmdName == "generate" {
|
||||
if strings.Contains(err.Error(), "missing a compose") {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Warn(err)
|
||||
} else {
|
||||
if strings.Contains(err.Error(), "template_driver is not allowed") {
|
||||
log.Warnf("ensure %s recipe compose.* files include \"version: '3.8'\"", recipeName)
|
||||
}
|
||||
log.Fatalf("unable to validate recipe: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := recipe.EnsureLatest(recipeName, conf); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Debugf("validated %s as recipe argument", recipeName)
|
||||
log.Debugf("validated %s as recipe argument", recipeName)
|
||||
|
||||
return chosenRecipe
|
||||
}
|
||||
|
||||
// ValidateApp ensures the app name arg is valid.
|
||||
func ValidateApp(c *cli.Context, conf *runtime.Config) config.App {
|
||||
appName := c.Args().First()
|
||||
|
||||
if appName == "" {
|
||||
ShowSubcommandHelpAndError(c, errors.New("no app provided"))
|
||||
func ValidateApp(args []string) app.App {
|
||||
if len(args) == 0 {
|
||||
log.Fatal("no app provided")
|
||||
}
|
||||
|
||||
appName := args[0]
|
||||
|
||||
app, err := app.Get(appName)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := recipe.EnsureExists(app.Recipe, conf); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Debugf("validated %s as app argument", appName)
|
||||
log.Debugf("validated %s as app argument", appName)
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
// ValidateDomain ensures the domain name arg is valid.
|
||||
func ValidateDomain(c *cli.Context) string {
|
||||
domainName := c.Args().First()
|
||||
func ValidateDomain(args []string) string {
|
||||
var domainName string
|
||||
if len(args) > 0 {
|
||||
domainName = args[0]
|
||||
}
|
||||
|
||||
if domainName == "" && !NoInput {
|
||||
prompt := &survey.Input{
|
||||
@ -139,40 +114,29 @@ func ValidateDomain(c *cli.Context) string {
|
||||
Default: "example.com",
|
||||
}
|
||||
if err := survey.AskOne(prompt, &domainName); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if domainName == "" {
|
||||
ShowSubcommandHelpAndError(c, errors.New("no domain provided"))
|
||||
log.Fatal("no domain provided")
|
||||
}
|
||||
|
||||
logrus.Debugf("validated %s as domain argument", domainName)
|
||||
log.Debugf("validated %s as domain argument", domainName)
|
||||
|
||||
return domainName
|
||||
}
|
||||
|
||||
// ValidateSubCmdFlags ensures flag order conforms to correct order
|
||||
func ValidateSubCmdFlags(c *cli.Context) bool {
|
||||
for argIdx, arg := range c.Args() {
|
||||
if !strings.HasPrefix(arg, "--") {
|
||||
for _, flag := range c.Args()[argIdx:] {
|
||||
if strings.HasPrefix(flag, "--") {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ValidateServer ensures the server name arg is valid.
|
||||
func ValidateServer(c *cli.Context) string {
|
||||
serverName := c.Args().First()
|
||||
func ValidateServer(args []string) string {
|
||||
var serverName string
|
||||
if len(args) > 0 {
|
||||
serverName = args[0]
|
||||
}
|
||||
|
||||
serverNames, err := config.ReadServerNames()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if serverName == "" && !NoInput {
|
||||
@ -181,7 +145,7 @@ func ValidateServer(c *cli.Context) string {
|
||||
Options: serverNames,
|
||||
}
|
||||
if err := survey.AskOne(prompt, &serverName); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -192,309 +156,15 @@ func ValidateServer(c *cli.Context) string {
|
||||
}
|
||||
}
|
||||
|
||||
if !matched {
|
||||
ShowSubcommandHelpAndError(c, errors.New("server doesn't exist?"))
|
||||
}
|
||||
|
||||
if serverName == "" {
|
||||
ShowSubcommandHelpAndError(c, errors.New("no server provided"))
|
||||
log.Fatal("no server provided")
|
||||
}
|
||||
|
||||
logrus.Debugf("validated %s as server argument", serverName)
|
||||
if !matched {
|
||||
log.Fatal("server doesn't exist?")
|
||||
}
|
||||
|
||||
log.Debugf("validated %s as server argument", serverName)
|
||||
|
||||
return serverName
|
||||
}
|
||||
|
||||
// EnsureDNSProvider ensures a DNS provider is chosen.
|
||||
func EnsureDNSProvider() error {
|
||||
if DNSProvider == "" && !NoInput {
|
||||
prompt := &survey.Select{
|
||||
Message: "Select DNS provider",
|
||||
Options: []string{"gandi"},
|
||||
}
|
||||
|
||||
if err := survey.AskOne(prompt, &DNSProvider); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if DNSProvider == "" {
|
||||
return fmt.Errorf("missing DNS provider?")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureDNSTypeFlag ensures a DNS type flag is present.
|
||||
func EnsureDNSTypeFlag(c *cli.Context) error {
|
||||
if DNSType == "" && !NoInput {
|
||||
prompt := &survey.Input{
|
||||
Message: "Specify DNS record type",
|
||||
Default: "A",
|
||||
}
|
||||
if err := survey.AskOne(prompt, &DNSType); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if DNSType == "" {
|
||||
ShowSubcommandHelpAndError(c, errors.New("no record type provided"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureDNSNameFlag ensures a DNS name flag is present.
|
||||
func EnsureDNSNameFlag(c *cli.Context) error {
|
||||
if DNSName == "" && !NoInput {
|
||||
prompt := &survey.Input{
|
||||
Message: "Specify DNS record name",
|
||||
Default: "mysubdomain",
|
||||
}
|
||||
if err := survey.AskOne(prompt, &DNSName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if DNSName == "" {
|
||||
ShowSubcommandHelpAndError(c, errors.New("no record name provided"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureDNSValueFlag ensures a DNS value flag is present.
|
||||
func EnsureDNSValueFlag(c *cli.Context) error {
|
||||
if DNSValue == "" && !NoInput {
|
||||
prompt := &survey.Input{
|
||||
Message: "Specify DNS record value",
|
||||
Default: "192.168.1.2",
|
||||
}
|
||||
if err := survey.AskOne(prompt, &DNSValue); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if DNSValue == "" {
|
||||
ShowSubcommandHelpAndError(c, errors.New("no record value provided"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureZoneArgument ensures a zone argument is present.
|
||||
func EnsureZoneArgument(c *cli.Context) (string, error) {
|
||||
zone := c.Args().First()
|
||||
|
||||
if zone == "" && !NoInput {
|
||||
prompt := &survey.Input{
|
||||
Message: "Specify a domain name zone",
|
||||
Default: "example.com",
|
||||
}
|
||||
|
||||
if err := survey.AskOne(prompt, &zone); err != nil {
|
||||
return zone, err
|
||||
}
|
||||
}
|
||||
|
||||
if zone == "" {
|
||||
ShowSubcommandHelpAndError(c, errors.New("no zone value provided"))
|
||||
}
|
||||
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
// EnsureServerProvider ensures a 3rd party server provider is chosen.
|
||||
func EnsureServerProvider() error {
|
||||
if ServerProvider == "" && !NoInput {
|
||||
prompt := &survey.Select{
|
||||
Message: "Select server provider",
|
||||
Options: []string{"capsul", "hetzner-cloud"},
|
||||
}
|
||||
|
||||
if err := survey.AskOne(prompt, &ServerProvider); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if ServerProvider == "" {
|
||||
return fmt.Errorf("missing server provider?")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureNewCapsulVPSFlags ensure all flags are present.
|
||||
func EnsureNewCapsulVPSFlags(c *cli.Context) error {
|
||||
if CapsulName == "" && !NoInput {
|
||||
prompt := &survey.Input{
|
||||
Message: "specify capsul name",
|
||||
}
|
||||
if err := survey.AskOne(prompt, &CapsulName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !NoInput {
|
||||
prompt := &survey.Input{
|
||||
Message: "specify capsul instance URL",
|
||||
Default: CapsulInstanceURL,
|
||||
}
|
||||
if err := survey.AskOne(prompt, &CapsulInstanceURL); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !NoInput {
|
||||
prompt := &survey.Input{
|
||||
Message: "specify capsul type",
|
||||
Default: CapsulType,
|
||||
}
|
||||
if err := survey.AskOne(prompt, &CapsulType); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !NoInput {
|
||||
prompt := &survey.Input{
|
||||
Message: "specify capsul image",
|
||||
Default: CapsulImage,
|
||||
}
|
||||
if err := survey.AskOne(prompt, &CapsulImage); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(CapsulSSHKeys.Value()) == 0 && !NoInput {
|
||||
var sshKeys string
|
||||
prompt := &survey.Input{
|
||||
Message: "specify capsul SSH keys (e.g. me@foo.com)",
|
||||
Default: "",
|
||||
}
|
||||
if err := survey.AskOne(prompt, &CapsulSSHKeys); err != nil {
|
||||
return err
|
||||
}
|
||||
CapsulSSHKeys = cli.StringSlice(strings.Split(sshKeys, ","))
|
||||
}
|
||||
|
||||
if CapsulAPIToken == "" && !NoInput {
|
||||
token, ok := os.LookupEnv("CAPSUL_TOKEN")
|
||||
if !ok {
|
||||
prompt := &survey.Input{
|
||||
Message: "specify capsul API token",
|
||||
}
|
||||
if err := survey.AskOne(prompt, &CapsulAPIToken); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
CapsulAPIToken = token
|
||||
}
|
||||
}
|
||||
|
||||
if CapsulName == "" {
|
||||
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul name?"))
|
||||
}
|
||||
if CapsulInstanceURL == "" {
|
||||
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul instance url?"))
|
||||
}
|
||||
if CapsulType == "" {
|
||||
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul type?"))
|
||||
}
|
||||
if CapsulImage == "" {
|
||||
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul image?"))
|
||||
}
|
||||
if len(CapsulSSHKeys.Value()) == 0 {
|
||||
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul ssh keys?"))
|
||||
}
|
||||
if CapsulAPIToken == "" {
|
||||
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul API token?"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureNewHetznerCloudVPSFlags ensure all flags are present.
|
||||
func EnsureNewHetznerCloudVPSFlags(c *cli.Context) error {
|
||||
if HetznerCloudName == "" && !NoInput {
|
||||
prompt := &survey.Input{
|
||||
Message: "specify hetzner cloud VPS name",
|
||||
}
|
||||
if err := survey.AskOne(prompt, &HetznerCloudName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !NoInput {
|
||||
prompt := &survey.Input{
|
||||
Message: "specify hetzner cloud VPS type",
|
||||
Default: HetznerCloudType,
|
||||
}
|
||||
if err := survey.AskOne(prompt, &HetznerCloudType); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !NoInput {
|
||||
prompt := &survey.Input{
|
||||
Message: "specify hetzner cloud VPS image",
|
||||
Default: HetznerCloudImage,
|
||||
}
|
||||
if err := survey.AskOne(prompt, &HetznerCloudImage); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(HetznerCloudSSHKeys.Value()) == 0 && !NoInput {
|
||||
var sshKeys string
|
||||
prompt := &survey.Input{
|
||||
Message: "specify hetzner cloud SSH keys (e.g. me@foo.com)",
|
||||
Default: "",
|
||||
}
|
||||
if err := survey.AskOne(prompt, &sshKeys); err != nil {
|
||||
return err
|
||||
}
|
||||
HetznerCloudSSHKeys = cli.StringSlice(strings.Split(sshKeys, ","))
|
||||
}
|
||||
|
||||
if !NoInput {
|
||||
prompt := &survey.Input{
|
||||
Message: "specify hetzner cloud VPS location",
|
||||
Default: HetznerCloudLocation,
|
||||
}
|
||||
if err := survey.AskOne(prompt, &HetznerCloudLocation); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if HetznerCloudAPIToken == "" && !NoInput {
|
||||
token, ok := os.LookupEnv("HCLOUD_TOKEN")
|
||||
if !ok {
|
||||
prompt := &survey.Input{
|
||||
Message: "specify hetzner cloud API token",
|
||||
}
|
||||
if err := survey.AskOne(prompt, &HetznerCloudAPIToken); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
HetznerCloudAPIToken = token
|
||||
}
|
||||
}
|
||||
|
||||
if HetznerCloudName == "" {
|
||||
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud VPS name?"))
|
||||
}
|
||||
if HetznerCloudType == "" {
|
||||
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud VPS type?"))
|
||||
}
|
||||
if HetznerCloudImage == "" {
|
||||
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud image?"))
|
||||
}
|
||||
if HetznerCloudLocation == "" {
|
||||
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud VPS location?"))
|
||||
}
|
||||
if HetznerCloudAPIToken == "" {
|
||||
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud API token?"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
29
cli/recipe/diff.go
Normal file
29
cli/recipe/diff.go
Normal file
@ -0,0 +1,29 @@
|
||||
package recipe
|
||||
|
||||
import (
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
gitPkg "coopcloud.tech/abra/pkg/git"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var RecipeDiffCommand = &cobra.Command{
|
||||
Use: "diff <recipe> [flags]",
|
||||
Aliases: []string{"d"},
|
||||
Short: "Show unstaged changes in recipe config",
|
||||
Long: "This command requires /usr/bin/git.",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.RecipeNameComplete()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
r := internal.ValidateRecipe(args, cmd.Name())
|
||||
if err := gitPkg.DiffUnstaged(r.Dir); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
@ -3,42 +3,72 @@ package recipe
|
||||
import (
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var recipeFetchCommand = cli.Command{
|
||||
Name: "fetch",
|
||||
Usage: "Fetch recipe local copies",
|
||||
Aliases: []string{"f"},
|
||||
ArgsUsage: "[<recipe>]",
|
||||
Description: "Fetchs all recipes without arguments.",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.OfflineFlag,
|
||||
var RecipeFetchCommand = &cobra.Command{
|
||||
Use: "fetch [recipe | --all] [flags]",
|
||||
Aliases: []string{"f"},
|
||||
Short: "Clone recipe(s) locally",
|
||||
Args: cobra.RangeArgs(0, 1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.RecipeNameComplete()
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
BashComplete: autocomplete.RecipeNameComplete,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
recipeName := c.Args().First()
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var recipeName string
|
||||
if len(args) > 0 {
|
||||
recipeName = args[0]
|
||||
}
|
||||
|
||||
if recipeName == "" && !fetchAllRecipes {
|
||||
log.Fatal("missing [recipe] or --all/-a")
|
||||
}
|
||||
|
||||
if recipeName != "" && fetchAllRecipes {
|
||||
log.Fatal("cannot use [recipe] and --all/-a together")
|
||||
}
|
||||
|
||||
ensureCtx := internal.GetEnsureContext()
|
||||
if recipeName != "" {
|
||||
internal.ValidateRecipe(c, conf)
|
||||
return nil // ValidateRecipe ensures latest checkout
|
||||
r := internal.ValidateRecipe(args, cmd.Name())
|
||||
if err := r.Ensure(ensureCtx); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
repos, err := recipe.ReadReposMetadata(conf)
|
||||
catalogue, err := recipe.ReadRecipeCatalogue(internal.Offline)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := recipe.UpdateRepositories(repos, recipeName, conf); err != nil {
|
||||
logrus.Fatal(err)
|
||||
catlBar := formatter.CreateProgressbar(len(catalogue), "fetching latest recipes...")
|
||||
for recipeName := range catalogue {
|
||||
r := recipe.Get(recipeName)
|
||||
if err := r.Ensure(ensureCtx); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
catlBar.Add(1)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
fetchAllRecipes bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
RecipeFetchCommand.Flags().BoolVarP(
|
||||
&fetchAllRecipes,
|
||||
"all",
|
||||
"a",
|
||||
false,
|
||||
"fetch all recipes",
|
||||
)
|
||||
}
|
||||
|
@ -1,47 +1,55 @@
|
||||
package recipe
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/lint"
|
||||
recipePkg "coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var recipeLintCommand = cli.Command{
|
||||
Name: "lint",
|
||||
Usage: "Lint a recipe",
|
||||
Aliases: []string{"l"},
|
||||
ArgsUsage: "<recipe>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.OnlyErrorFlag,
|
||||
internal.OfflineFlag,
|
||||
var RecipeLintCommand = &cobra.Command{
|
||||
Use: "lint <recipe> [flags]",
|
||||
Short: "Lint a recipe",
|
||||
Aliases: []string{"l"},
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.RecipeNameComplete()
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
BashComplete: autocomplete.RecipeNameComplete,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
recipe := internal.ValidateRecipe(c, conf)
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
recipe := internal.ValidateRecipe(args, cmd.Name())
|
||||
|
||||
if err := recipePkg.EnsureUpToDate(recipe.Name, conf); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if err := recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
tableCol := []string{"ref", "rule", "severity", "satisfied", "skipped", "resolve"}
|
||||
table := formatter.CreateTable(tableCol)
|
||||
headers := []string{
|
||||
"ref",
|
||||
"rule",
|
||||
"severity",
|
||||
"satisfied",
|
||||
"skipped",
|
||||
"resolve",
|
||||
}
|
||||
|
||||
table, err := formatter.CreateTable()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
table.Headers(headers...)
|
||||
|
||||
hasError := false
|
||||
bar := formatter.CreateProgressbar(-1, "running recipe lint rules...")
|
||||
var rows [][]string
|
||||
var warnMessages []string
|
||||
for level := range lint.LintRules {
|
||||
for _, rule := range lint.LintRules[level] {
|
||||
if internal.OnlyErrors && rule.Level != "error" {
|
||||
logrus.Debugf("skipping %s, does not have level \"error\"", rule.Ref)
|
||||
if onlyError && rule.Level != "error" {
|
||||
log.Debugf("skipping %s, does not have level \"error\"", rule.Ref)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -52,14 +60,14 @@ var recipeLintCommand = cli.Command{
|
||||
|
||||
skippedOutput := "-"
|
||||
if skipped {
|
||||
skippedOutput = "yes"
|
||||
skippedOutput = "✅"
|
||||
}
|
||||
|
||||
satisfied := false
|
||||
if !skipped {
|
||||
ok, err := rule.Function(recipe)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
warnMessages = append(warnMessages, err.Error())
|
||||
}
|
||||
|
||||
if !ok && rule.Level == "error" {
|
||||
@ -71,36 +79,62 @@ var recipeLintCommand = cli.Command{
|
||||
}
|
||||
}
|
||||
|
||||
satisfiedOutput := "yes"
|
||||
satisfiedOutput := "✅"
|
||||
if !satisfied {
|
||||
satisfiedOutput = "NO"
|
||||
satisfiedOutput = "❌"
|
||||
if skipped {
|
||||
satisfiedOutput = "-"
|
||||
}
|
||||
}
|
||||
|
||||
table.Append([]string{
|
||||
row := []string{
|
||||
rule.Ref,
|
||||
rule.Description,
|
||||
rule.Level,
|
||||
satisfiedOutput,
|
||||
skippedOutput,
|
||||
rule.HowToResolve,
|
||||
})
|
||||
}
|
||||
|
||||
bar.Add(1)
|
||||
rows = append(rows, row)
|
||||
table.Row(row...)
|
||||
}
|
||||
}
|
||||
|
||||
if table.NumLines() > 0 {
|
||||
fmt.Println()
|
||||
table.Render()
|
||||
}
|
||||
if len(rows) > 0 {
|
||||
if err := formatter.PrintTable(table); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if hasError {
|
||||
logrus.Warn("watch out, some critical errors are present in your recipe config")
|
||||
}
|
||||
for _, warnMsg := range warnMessages {
|
||||
log.Warn(warnMsg)
|
||||
}
|
||||
|
||||
return nil
|
||||
if hasError {
|
||||
log.Warnf("critical errors present in %s config", recipe.Name)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
onlyError bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
RecipeLintCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
|
||||
RecipeLintCommand.Flags().BoolVarP(
|
||||
&onlyError,
|
||||
"error",
|
||||
"e",
|
||||
false,
|
||||
"only show errors",
|
||||
)
|
||||
}
|
||||
|
@ -8,48 +8,46 @@ import (
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var pattern string
|
||||
var patternFlag = &cli.StringFlag{
|
||||
Name: "pattern, p",
|
||||
Value: "",
|
||||
Usage: "Simple string to filter recipes",
|
||||
Destination: &pattern,
|
||||
}
|
||||
|
||||
var recipeListCommand = cli.Command{
|
||||
Name: "list",
|
||||
Usage: "List available recipes",
|
||||
var RecipeListCommand = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List recipes",
|
||||
Aliases: []string{"ls"},
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.MachineReadableFlag,
|
||||
patternFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
|
||||
catl, err := recipe.ReadRecipeCatalogue(conf)
|
||||
Args: cobra.NoArgs,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
catl, err := recipe.ReadRecipeCatalogue(internal.Offline)
|
||||
if err != nil {
|
||||
logrus.Fatal(err.Error())
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
recipes := catl.Flatten()
|
||||
sort.Sort(recipe.ByRecipeName(recipes))
|
||||
|
||||
tableCol := []string{"name", "category", "status", "healthcheck", "backups", "email", "tests", "SSO"}
|
||||
table := formatter.CreateTable(tableCol)
|
||||
table, err := formatter.CreateTable()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
len := 0
|
||||
headers := []string{
|
||||
"name",
|
||||
"category",
|
||||
"status",
|
||||
"healthcheck",
|
||||
"backups",
|
||||
"email",
|
||||
"tests",
|
||||
"SSO",
|
||||
}
|
||||
|
||||
table.Headers(headers...)
|
||||
|
||||
var rows [][]string
|
||||
for _, recipe := range recipes {
|
||||
tableRow := []string{
|
||||
row := []string{
|
||||
recipe.Name,
|
||||
recipe.Category,
|
||||
strconv.Itoa(recipe.Features.Status),
|
||||
@ -62,25 +60,50 @@ var recipeListCommand = cli.Command{
|
||||
|
||||
if pattern != "" {
|
||||
if strings.Contains(recipe.Name, pattern) {
|
||||
table.Append(tableRow)
|
||||
len++
|
||||
table.Row(row...)
|
||||
rows = append(rows, row)
|
||||
}
|
||||
} else {
|
||||
table.Append(tableRow)
|
||||
len++
|
||||
table.Row(row...)
|
||||
rows = append(rows, row)
|
||||
}
|
||||
}
|
||||
|
||||
if table.NumLines() > 0 {
|
||||
if len(rows) > 0 {
|
||||
if internal.MachineReadable {
|
||||
table.SetCaption(false, "")
|
||||
table.JSONRender()
|
||||
} else {
|
||||
table.SetCaption(true, fmt.Sprintf("total recipes: %v", len))
|
||||
table.Render()
|
||||
out, err := formatter.ToJSON(headers, rows)
|
||||
if err != nil {
|
||||
log.Fatal("unable to render to JSON: %s", err)
|
||||
}
|
||||
fmt.Println(out)
|
||||
return
|
||||
}
|
||||
|
||||
if err := formatter.PrintTable(table); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
pattern string
|
||||
)
|
||||
|
||||
func init() {
|
||||
RecipeListCommand.Flags().BoolVarP(
|
||||
&internal.MachineReadable,
|
||||
"machine",
|
||||
"m",
|
||||
false,
|
||||
"print machine-readable output",
|
||||
)
|
||||
|
||||
RecipeListCommand.Flags().StringVarP(
|
||||
&pattern,
|
||||
"pattern",
|
||||
"p",
|
||||
"",
|
||||
"filter by recipe",
|
||||
)
|
||||
}
|
||||
|
@ -2,18 +2,17 @@ package recipe
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"text/template"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/git"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// recipeMetadata is the recipe metadata for the README.md
|
||||
@ -30,97 +29,61 @@ type recipeMetadata struct {
|
||||
SSO string
|
||||
}
|
||||
|
||||
var recipeNewCommand = cli.Command{
|
||||
Name: "new",
|
||||
var RecipeNewCommand = &cobra.Command{
|
||||
Use: "new <recipe> [flags]",
|
||||
Aliases: []string{"n"},
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
internal.OfflineFlag,
|
||||
Short: "Create a new recipe",
|
||||
Long: `A community managed recipe template is used.`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.RecipeNameComplete()
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Usage: "Create a new recipe",
|
||||
ArgsUsage: "<recipe>",
|
||||
Description: `
|
||||
Create a new recipe.
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
recipeName := args[0]
|
||||
|
||||
Abra uses the built-in example repository which is available here:
|
||||
|
||||
https://git.coopcloud.tech/coop-cloud/example
|
||||
|
||||
Files within the example repository make use of the Golang templating system
|
||||
which Abra uses to inject values into the generated recipe folder (e.g. name of
|
||||
recipe and domain in the sample environment config).
|
||||
`,
|
||||
Action: func(c *cli.Context) error {
|
||||
recipeName := c.Args().First()
|
||||
|
||||
if recipeName == "" {
|
||||
internal.ShowSubcommandHelpAndError(c, errors.New("no recipe name provided"))
|
||||
}
|
||||
|
||||
directory := path.Join(config.RECIPES_DIR, recipeName)
|
||||
if _, err := os.Stat(directory); !os.IsNotExist(err) {
|
||||
logrus.Fatalf("%s recipe directory already exists?", directory)
|
||||
r := recipe.Get(recipeName)
|
||||
if _, err := os.Stat(r.Dir); !os.IsNotExist(err) {
|
||||
log.Fatalf("%s recipe directory already exists?", r.Dir)
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/example.git", config.REPOS_BASE_URL)
|
||||
if err := git.Clone(directory, url); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if err := git.Clone(r.Dir, url); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
gitRepo := path.Join(config.RECIPES_DIR, recipeName, ".git")
|
||||
gitRepo := path.Join(r.Dir, ".git")
|
||||
if err := os.RemoveAll(gitRepo); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
logrus.Debugf("removed example git repo in %s", gitRepo)
|
||||
log.Debugf("removed .git repo in %s", gitRepo)
|
||||
|
||||
meta := newRecipeMeta(recipeName)
|
||||
|
||||
toParse := []string{
|
||||
path.Join(config.RECIPES_DIR, recipeName, "README.md"),
|
||||
path.Join(config.RECIPES_DIR, recipeName, ".env.sample"),
|
||||
}
|
||||
for _, path := range toParse {
|
||||
for _, path := range []string{r.ReadmePath, r.SampleEnvPath} {
|
||||
tpl, err := template.ParseFiles(path)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var templated bytes.Buffer
|
||||
if err := tpl.Execute(&templated, meta); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(path, templated.Bytes(), 0644); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if err := os.WriteFile(path, templated.Bytes(), 0o644); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
newGitRepo := path.Join(config.RECIPES_DIR, recipeName)
|
||||
if err := git.Init(newGitRepo, true); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if err := git.Init(r.Dir, true, gitName, gitEmail); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Print(fmt.Sprintf(`
|
||||
Your new %s recipe has been created in %s.
|
||||
|
||||
In order to share your recipe, you can upload it the git repository to:
|
||||
|
||||
https://git.coopcloud.tech/coop-cloud/%s
|
||||
|
||||
If you're not sure how to do that, come chat with us:
|
||||
|
||||
https://docs.coopcloud.tech/intro/contact
|
||||
|
||||
See "abra recipe -h" for additional recipe maintainer commands.
|
||||
|
||||
Happy Hacking!
|
||||
|
||||
`, recipeName, path.Join(config.RECIPES_DIR, recipeName), recipeName))
|
||||
|
||||
return nil
|
||||
log.Infof("new recipe '%s' created: %s", recipeName, path.Join(r.Dir))
|
||||
log.Info("happy hacking 🎉")
|
||||
},
|
||||
}
|
||||
|
||||
@ -139,3 +102,26 @@ func newRecipeMeta(recipeName string) recipeMetadata {
|
||||
SSO: "No",
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
gitName string
|
||||
gitEmail string
|
||||
)
|
||||
|
||||
func init() {
|
||||
RecipeNewCommand.Flags().StringVarP(
|
||||
&gitName,
|
||||
"git-name",
|
||||
"N",
|
||||
"",
|
||||
"Git (user) name to do commits with",
|
||||
)
|
||||
|
||||
RecipeNewCommand.Flags().StringVarP(
|
||||
&gitEmail,
|
||||
"git-email",
|
||||
"e",
|
||||
"",
|
||||
"Git email name to do commits with",
|
||||
)
|
||||
}
|
||||
|
@ -1,34 +1,19 @@
|
||||
package recipe
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
// RecipeCommand defines all recipe related sub-commands.
|
||||
var RecipeCommand = cli.Command{
|
||||
Name: "recipe",
|
||||
Aliases: []string{"r"},
|
||||
Usage: "Manage recipes",
|
||||
ArgsUsage: "<recipe>",
|
||||
Description: `
|
||||
A recipe is a blueprint for an app. It is a bunch of config files which
|
||||
describe how to deploy and maintain an app. Recipes are maintained by the Co-op
|
||||
Cloud community and you can use Abra to read them, deploy them and create apps
|
||||
for you.
|
||||
var RecipeCommand = &cobra.Command{
|
||||
Use: "recipe [cmd] [args] [flags]",
|
||||
Aliases: []string{"r"},
|
||||
Short: "Manage recipes",
|
||||
Long: `A recipe is a blueprint for an app.
|
||||
|
||||
It is a bunch of config files which describe how to deploy and maintain an app.
|
||||
Recipes are maintained by the Co-op Cloud community and you can use Abra to
|
||||
read them, deploy them and create apps for you.
|
||||
|
||||
Anyone who uses a recipe can become a maintainer. Maintainers typically make
|
||||
sure the recipe is in good working order and the config upgraded in a timely
|
||||
manner. Abra supports convenient automation for recipe maintainenace, see the
|
||||
"abra recipe upgrade", "abra recipe sync" and "abra recipe release" commands.
|
||||
`,
|
||||
Subcommands: []cli.Command{
|
||||
recipeFetchCommand,
|
||||
recipeLintCommand,
|
||||
recipeListCommand,
|
||||
recipeNewCommand,
|
||||
recipeReleaseCommand,
|
||||
recipeSyncCommand,
|
||||
recipeUpgradeCommand,
|
||||
recipeVersionCommand,
|
||||
},
|
||||
manner.`,
|
||||
}
|
||||
|
@ -1,35 +1,34 @@
|
||||
package recipe
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
gitPkg "coopcloud.tech/abra/pkg/git"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
recipePkg "coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/tagcmp"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var recipeReleaseCommand = cli.Command{
|
||||
Name: "release",
|
||||
Aliases: []string{"rl"},
|
||||
Usage: "Release a new recipe version",
|
||||
ArgsUsage: "<recipe> [<version>]",
|
||||
Description: `
|
||||
Create a new version of a recipe. These versions are then published on the
|
||||
Co-op Cloud recipe catalogue. These versions take the following form:
|
||||
var RecipeReleaseCommand = &cobra.Command{
|
||||
Use: "release <recipe> [version] [flags]",
|
||||
Aliases: []string{"rl"},
|
||||
Short: "Release a new recipe version",
|
||||
Long: `Create a new version of a recipe.
|
||||
|
||||
These versions are then published on the Co-op Cloud recipe catalogue. These
|
||||
versions take the following form:
|
||||
|
||||
a.b.c+x.y.z
|
||||
|
||||
@ -43,97 +42,117 @@ recipe updates are properly communicated. I.e. developers of an app might
|
||||
publish a minor version but that might lead to changes in the recipe which are
|
||||
major and therefore require intervention while doing the upgrade work.
|
||||
|
||||
Publish your new release to git.coopcloud.tech with "-p/--publish". This
|
||||
Publish your new release to git.coopcloud.tech with "--publish/-p". This
|
||||
requires that you have permission to git push to these repositories and have
|
||||
your SSH keys configured on your account.
|
||||
`,
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
internal.DryFlag,
|
||||
internal.MajorFlag,
|
||||
internal.MinorFlag,
|
||||
internal.PatchFlag,
|
||||
internal.PublishFlag,
|
||||
internal.OfflineFlag,
|
||||
your SSH keys configured on your account.`,
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.RecipeNameComplete()
|
||||
case 1:
|
||||
return autocomplete.RecipeVersionComplete(args[0])
|
||||
default:
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
BashComplete: autocomplete.RecipeNameComplete,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
recipe := internal.ValidateRecipeWithPrompt(c, conf)
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
recipe := internal.ValidateRecipe(args, cmd.Name())
|
||||
|
||||
imagesTmp, err := getImageVersions(recipe)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
mainApp, err := internal.GetMainAppImage(recipe)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
mainAppVersion := imagesTmp[mainApp]
|
||||
if mainAppVersion == "" {
|
||||
logrus.Fatalf("main app service version for %s is empty?", recipe.Name)
|
||||
log.Fatalf("main app service version for %s is empty?", recipe.Name)
|
||||
}
|
||||
|
||||
var tagString string
|
||||
if len(args) == 2 {
|
||||
tagString = args[1]
|
||||
}
|
||||
|
||||
tagString := c.Args().Get(1)
|
||||
if tagString != "" {
|
||||
if _, err := tagcmp.Parse(tagString); err != nil {
|
||||
logrus.Fatalf("cannot parse %s, invalid tag specified?", tagString)
|
||||
log.Fatalf("cannot parse %s, invalid tag specified?", tagString)
|
||||
}
|
||||
}
|
||||
|
||||
if (internal.Major || internal.Minor || internal.Patch) && tagString != "" {
|
||||
logrus.Fatal("cannot specify tag and bump type at the same time")
|
||||
log.Fatal("cannot specify tag and bump type at the same time")
|
||||
}
|
||||
|
||||
if tagString != "" {
|
||||
if err := createReleaseFromTag(recipe, tagString, mainAppVersion); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
tags, err := recipe.Tags()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if tagString == "" && (!internal.Major && !internal.Minor && !internal.Patch) {
|
||||
var err error
|
||||
tagString, err = getLabelVersion(recipe, false)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
isClean, err := gitPkg.IsClean(recipe.Dir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !isClean {
|
||||
log.Infof("%s currently has these unstaged changes 👇", recipe.Name)
|
||||
if err := gitPkg.DiffUnstaged(recipe.Dir); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(tags) > 0 {
|
||||
logrus.Warnf("previous git tags detected, assuming this is a new semver release")
|
||||
log.Warnf("previous git tags detected, assuming this is a new semver release")
|
||||
if err := createReleaseFromPreviousTag(tagString, mainAppVersion, recipe, tags); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
logrus.Warnf("no tag specified and no previous tag available for %s, assuming this is the initial release", recipe.Name)
|
||||
log.Warnf("no tag specified and no previous tag available for %s, assuming this is the initial release", recipe.Name)
|
||||
|
||||
if err := createReleaseFromTag(recipe, tagString, mainAppVersion); err != nil {
|
||||
if cleanUpErr := cleanUpTag(tagString, recipe.Name); err != nil {
|
||||
logrus.Fatal(cleanUpErr)
|
||||
if cleanUpErr := cleanUpTag(recipe, tagString); err != nil {
|
||||
log.Fatal(cleanUpErr)
|
||||
}
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return
|
||||
},
|
||||
}
|
||||
|
||||
// getImageVersions retrieves image versions for a recipe
|
||||
func getImageVersions(recipe recipe.Recipe) (map[string]string, error) {
|
||||
var services = make(map[string]string)
|
||||
services := make(map[string]string)
|
||||
|
||||
config, err := recipe.GetComposeConfig(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
missingTag := false
|
||||
for _, service := range recipe.Config.Services {
|
||||
for _, service := range config.Services {
|
||||
if service.Image == "" {
|
||||
continue
|
||||
}
|
||||
@ -172,8 +191,7 @@ func getImageVersions(recipe recipe.Recipe) (map[string]string, error) {
|
||||
func createReleaseFromTag(recipe recipe.Recipe, tagString, mainAppVersion string) error {
|
||||
var err error
|
||||
|
||||
directory := path.Join(config.RECIPES_DIR, recipe.Name)
|
||||
repo, err := git.PlainOpen(directory)
|
||||
repo, err := git.PlainOpen(recipe.Dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -197,16 +215,20 @@ func createReleaseFromTag(recipe recipe.Recipe, tagString, mainAppVersion string
|
||||
tagString = fmt.Sprintf("%s+%s", tag.String(), mainAppVersion)
|
||||
}
|
||||
|
||||
if err := addReleaseNotes(recipe, tagString); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := commitRelease(recipe, tagString); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := tagRelease(tagString, repo); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := pushRelease(recipe, tagString); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -227,26 +249,111 @@ func getTagCreateOptions(tag string) (git.CreateTagOptions, error) {
|
||||
return git.CreateTagOptions{Message: msg}, nil
|
||||
}
|
||||
|
||||
func commitRelease(recipe recipe.Recipe, tag string) error {
|
||||
if internal.Dry {
|
||||
logrus.Debugf("dry run: no changes committed")
|
||||
// addReleaseNotes checks if the release/next release note exists and moves the
|
||||
// file to release/<tag>.
|
||||
func addReleaseNotes(recipe recipe.Recipe, tag string) error {
|
||||
releaseDir := path.Join(recipe.Dir, "release")
|
||||
if _, err := os.Stat(releaseDir); errors.Is(err, os.ErrNotExist) {
|
||||
if err := os.Mkdir(releaseDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tagReleaseNotePath := path.Join(releaseDir, tag)
|
||||
if _, err := os.Stat(tagReleaseNotePath); err == nil {
|
||||
// Release note for current tag already exist exists.
|
||||
return nil
|
||||
} else if !errors.Is(err, os.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
|
||||
nextReleaseNotePath := path.Join(releaseDir, "next")
|
||||
if _, err := os.Stat(nextReleaseNotePath); err == nil {
|
||||
// release/next note exists. Move it to release/<tag>
|
||||
if internal.Dry {
|
||||
log.Debugf("dry run: move release note from 'next' to %s", tag)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !internal.NoInput {
|
||||
prompt := &survey.Input{
|
||||
Message: "Use release note in release/next?",
|
||||
}
|
||||
var addReleaseNote bool
|
||||
if err := survey.AskOne(prompt, &addReleaseNote); err != nil {
|
||||
return err
|
||||
}
|
||||
if !addReleaseNote {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
err := os.Rename(nextReleaseNotePath, tagReleaseNotePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = gitPkg.Add(recipe.Dir, path.Join("release", "next"), internal.Dry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = gitPkg.Add(recipe.Dir, path.Join("release", tag), internal.Dry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if !errors.Is(err, os.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
|
||||
// No release note exists for the current release.
|
||||
if internal.NoInput {
|
||||
return nil
|
||||
}
|
||||
|
||||
isClean, err := gitPkg.IsClean(recipe.Dir())
|
||||
prompt := &survey.Input{
|
||||
Message: "Release Note (leave empty for no release note)",
|
||||
}
|
||||
|
||||
var releaseNote string
|
||||
if err := survey.AskOne(prompt, &releaseNote); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if releaseNote == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := os.WriteFile(tagReleaseNotePath, []byte(releaseNote), 0o644); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := gitPkg.Add(recipe.Dir, path.Join("release", tag), internal.Dry); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func commitRelease(recipe recipe.Recipe, tag string) error {
|
||||
if internal.Dry {
|
||||
log.Debugf("dry run: no changes committed")
|
||||
return nil
|
||||
}
|
||||
|
||||
isClean, err := gitPkg.IsClean(recipe.Dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isClean {
|
||||
if !internal.Dry {
|
||||
return fmt.Errorf("no changes discovered in %s, nothing to publish?", recipe.Dir())
|
||||
return fmt.Errorf("no changes discovered in %s, nothing to publish?", recipe.Dir)
|
||||
}
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("chore: publish %s release", tag)
|
||||
repoPath := path.Join(config.RECIPES_DIR, recipe.Name)
|
||||
if err := gitPkg.Commit(repoPath, ".", msg, internal.Dry); err != nil {
|
||||
if err := gitPkg.Commit(recipe.Dir, msg, internal.Dry); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -255,7 +362,7 @@ func commitRelease(recipe recipe.Recipe, tag string) error {
|
||||
|
||||
func tagRelease(tagString string, repo *git.Repository) error {
|
||||
if internal.Dry {
|
||||
logrus.Debugf("dry run: no git tag created (%s)", tagString)
|
||||
log.Debugf("dry run: no git tag created (%s)", tagString)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -275,43 +382,42 @@ func tagRelease(tagString string, repo *git.Repository) error {
|
||||
}
|
||||
|
||||
hash := formatter.SmallSHA(head.Hash().String())
|
||||
logrus.Debugf(fmt.Sprintf("created tag %s at %s", tagString, hash))
|
||||
log.Debugf(fmt.Sprintf("created tag %s at %s", tagString, hash))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func pushRelease(recipe recipe.Recipe, tagString string) error {
|
||||
if internal.Dry {
|
||||
logrus.Info("dry run: no changes published")
|
||||
log.Info("dry run: no changes published")
|
||||
return nil
|
||||
}
|
||||
|
||||
if !internal.Publish && !internal.NoInput {
|
||||
if !publish && !internal.NoInput {
|
||||
prompt := &survey.Confirm{
|
||||
Message: "publish new release?",
|
||||
}
|
||||
|
||||
if err := survey.AskOne(prompt, &internal.Publish); err != nil {
|
||||
if err := survey.AskOne(prompt, &publish); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if internal.Publish {
|
||||
if publish {
|
||||
if err := recipe.Push(internal.Dry); err != nil {
|
||||
return err
|
||||
}
|
||||
url := fmt.Sprintf("%s/%s/src/tag/%s", config.REPOS_BASE_URL, recipe.Name, tagString)
|
||||
logrus.Infof("new release published: %s", url)
|
||||
url := fmt.Sprintf("%s/src/tag/%s", recipe.GitURL, tagString)
|
||||
log.Infof("new release published: %s", url)
|
||||
} else {
|
||||
logrus.Info("no -p/--publish passed, not publishing")
|
||||
log.Info("no -p/--publish passed, not publishing")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createReleaseFromPreviousTag(tagString, mainAppVersion string, recipe recipe.Recipe, tags []string) error {
|
||||
directory := path.Join(config.RECIPES_DIR, recipe.Name)
|
||||
repo, err := git.PlainOpen(directory)
|
||||
repo, err := git.PlainOpen(recipe.Dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -376,7 +482,7 @@ func createReleaseFromPreviousTag(tagString, mainAppVersion string, recipe recip
|
||||
}
|
||||
|
||||
if lastGitTag.String() == tagString {
|
||||
logrus.Fatalf("latest git tag (%s) and synced label (%s) are the same?", lastGitTag, tagString)
|
||||
log.Fatalf("latest git tag (%s) and synced label (%s) are the same?", lastGitTag, tagString)
|
||||
}
|
||||
|
||||
if !internal.NoInput {
|
||||
@ -386,33 +492,36 @@ func createReleaseFromPreviousTag(tagString, mainAppVersion string, recipe recip
|
||||
|
||||
var ok bool
|
||||
if err := survey.AskOne(prompt, &ok); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !ok {
|
||||
logrus.Fatal("exiting as requested")
|
||||
log.Fatal("exiting as requested")
|
||||
}
|
||||
}
|
||||
|
||||
if err := addReleaseNotes(recipe, tagString); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := commitRelease(recipe, tagString); err != nil {
|
||||
logrus.Fatalf("failed to commit changes: %s", err.Error())
|
||||
log.Fatalf("failed to commit changes: %s", err.Error())
|
||||
}
|
||||
|
||||
if err := tagRelease(tagString, repo); err != nil {
|
||||
logrus.Fatalf("failed to tag release: %s", err.Error())
|
||||
log.Fatalf("failed to tag release: %s", err.Error())
|
||||
}
|
||||
|
||||
if err := pushRelease(recipe, tagString); err != nil {
|
||||
logrus.Fatalf("failed to publish new release: %s", err.Error())
|
||||
log.Fatalf("failed to publish new release: %s", err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanUpTag removes a freshly created tag
|
||||
func cleanUpTag(tag, recipeName string) error {
|
||||
directory := path.Join(config.RECIPES_DIR, recipeName)
|
||||
repo, err := git.PlainOpen(directory)
|
||||
func cleanUpTag(recipe recipe.Recipe, tag string) error {
|
||||
repo, err := git.PlainOpen(recipe.Dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -423,22 +532,22 @@ func cleanUpTag(tag, recipeName string) error {
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("removed freshly created tag %s", tag)
|
||||
log.Debugf("removed freshly created tag %s", tag)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getLabelVersion(recipe recipe.Recipe, prompt bool) (string, error) {
|
||||
initTag, err := recipePkg.GetVersionLabelLocal(recipe)
|
||||
initTag, err := recipe.GetVersionLabelLocal()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if initTag == "" {
|
||||
logrus.Fatalf("unable to read version for %s from synced label. Did you try running \"abra recipe sync %s\" already?", recipe.Name, recipe.Name)
|
||||
log.Fatalf("unable to read version for %s from synced label. Did you try running \"abra recipe sync %s\" already?", recipe.Name, recipe.Name)
|
||||
}
|
||||
|
||||
logrus.Warnf("discovered %s as currently synced recipe label", initTag)
|
||||
log.Warnf("discovered %s as currently synced recipe label", initTag)
|
||||
|
||||
if prompt && !internal.NoInput {
|
||||
var response bool
|
||||
@ -454,3 +563,50 @@ func getLabelVersion(recipe recipe.Recipe, prompt bool) (string, error) {
|
||||
|
||||
return initTag, nil
|
||||
}
|
||||
|
||||
var (
|
||||
publish bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
RecipeReleaseCommand.Flags().BoolVarP(
|
||||
&internal.Dry,
|
||||
"dry-run",
|
||||
"r",
|
||||
false,
|
||||
"report changes that would be made",
|
||||
)
|
||||
|
||||
RecipeReleaseCommand.Flags().BoolVarP(
|
||||
&internal.Major,
|
||||
"major",
|
||||
"x",
|
||||
false,
|
||||
"increase the major part of the version",
|
||||
)
|
||||
|
||||
RecipeReleaseCommand.Flags().BoolVarP(
|
||||
&internal.Minor,
|
||||
"minor",
|
||||
"y",
|
||||
false,
|
||||
"increase the minor part of the version",
|
||||
)
|
||||
|
||||
RecipeReleaseCommand.Flags().BoolVarP(
|
||||
&internal.Patch,
|
||||
"patch",
|
||||
"z",
|
||||
false,
|
||||
"increase the patch part of the version",
|
||||
)
|
||||
|
||||
RecipeReleaseCommand.Flags().BoolVarP(
|
||||
&publish,
|
||||
"publish",
|
||||
"p",
|
||||
false,
|
||||
"publish changes to git.coopcloud.tech",
|
||||
)
|
||||
|
||||
}
|
||||
|
46
cli/recipe/reset.go
Normal file
46
cli/recipe/reset.go
Normal file
@ -0,0 +1,46 @@
|
||||
package recipe
|
||||
|
||||
import (
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var RecipeResetCommand = &cobra.Command{
|
||||
Use: "reset <recipe> [flags]",
|
||||
Aliases: []string{"rs"},
|
||||
Short: "Remove all unstaged changes from recipe config",
|
||||
Long: "WARNING: this will delete your changes. Be Careful.",
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.RecipeNameComplete()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
r := internal.ValidateRecipe(args, cmd.Name())
|
||||
|
||||
repo, err := git.PlainOpen(r.Dir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
ref, err := repo.Head()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
worktree, err := repo.Worktree()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
opts := &git.ResetOptions{Commit: ref.Hash(), Mode: git.HardReset}
|
||||
if err := worktree.Reset(opts); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
@ -2,70 +2,76 @@ package recipe
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
gitPkg "coopcloud.tech/abra/pkg/git"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/tagcmp"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var recipeSyncCommand = cli.Command{
|
||||
Name: "sync",
|
||||
Aliases: []string{"s"},
|
||||
Usage: "Sync recipe version label",
|
||||
ArgsUsage: "<recipe> [<version>]",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
internal.DryFlag,
|
||||
internal.MajorFlag,
|
||||
internal.MinorFlag,
|
||||
internal.PatchFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Description: `
|
||||
Generate labels for the main recipe service (i.e. by convention, the service
|
||||
named "app") which corresponds to the following format:
|
||||
var RecipeSyncCommand = &cobra.Command{
|
||||
Use: "sync <recipe> [version] [flags]",
|
||||
Aliases: []string{"s"},
|
||||
Short: "Sync recipe version label",
|
||||
Long: `Generate labels for the main recipe service.
|
||||
|
||||
By convention, the service named "app" using the following format:
|
||||
|
||||
coop-cloud.${STACK_NAME}.version=<version>
|
||||
|
||||
Where <version> can be specifed on the command-line or Abra can attempt to
|
||||
Where [version] can be specifed on the command-line or Abra can attempt to
|
||||
auto-generate it for you. The <recipe> configuration will be updated on the
|
||||
local file system.
|
||||
`,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
recipe := internal.ValidateRecipeWithPrompt(c, conf)
|
||||
local file system.`,
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.RecipeNameComplete()
|
||||
case 1:
|
||||
return autocomplete.RecipeVersionComplete(args[0])
|
||||
default:
|
||||
return nil, cobra.ShellCompDirectiveError
|
||||
}
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
recipe := internal.ValidateRecipe(args, cmd.Name())
|
||||
|
||||
mainApp, err := internal.GetMainAppImage(recipe)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
imagesTmp, err := getImageVersions(recipe)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
mainAppVersion := imagesTmp[mainApp]
|
||||
|
||||
tags, err := recipe.Tags()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var nextTag string
|
||||
if len(args) == 2 {
|
||||
nextTag = args[1]
|
||||
}
|
||||
|
||||
nextTag := c.Args().Get(1)
|
||||
if len(tags) == 0 && nextTag == "" {
|
||||
logrus.Warnf("no git tags found for %s", recipe.Name)
|
||||
log.Warnf("no git tags found for %s", recipe.Name)
|
||||
if internal.NoInput {
|
||||
log.Fatalf("unable to continue, input required for initial version")
|
||||
}
|
||||
fmt.Println(fmt.Sprintf(`
|
||||
The following options are two types of initial semantic version that you can
|
||||
pick for %s that will be published in the recipe catalogue. This follows the
|
||||
@ -91,7 +97,7 @@ likely to change.
|
||||
}
|
||||
|
||||
if err := survey.AskOne(edPrompt, &chosenVersion); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
nextTag = fmt.Sprintf("%s+%s", chosenVersion, mainAppVersion)
|
||||
@ -100,27 +106,26 @@ likely to change.
|
||||
if nextTag == "" && (!internal.Major && !internal.Minor && !internal.Patch) {
|
||||
latestRelease := tags[len(tags)-1]
|
||||
if err := internal.PromptBumpType("", latestRelease); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if nextTag == "" {
|
||||
recipeDir := path.Join(config.RECIPES_DIR, recipe.Name)
|
||||
repo, err := git.PlainOpen(recipeDir)
|
||||
repo, err := git.PlainOpen(recipe.Dir)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var lastGitTag tagcmp.Tag
|
||||
iter, err := repo.Tags()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := iter.ForEach(func(ref *plumbing.Reference) error {
|
||||
obj, err := repo.TagObject(ref.Hash())
|
||||
if err != nil {
|
||||
logrus.Fatal("Tag at commit ", ref.Hash(), " is unannotated or otherwise broken. Please fix it.")
|
||||
log.Fatal("Tag at commit ", ref.Hash(), " is unannotated or otherwise broken. Please fix it.")
|
||||
return err
|
||||
}
|
||||
|
||||
@ -137,7 +142,7 @@ likely to change.
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// bumpType is used to decide what part of the tag should be incremented
|
||||
@ -145,7 +150,7 @@ likely to change.
|
||||
if bumpType != 0 {
|
||||
// a bitwise check if the number is a power of 2
|
||||
if (bumpType & (bumpType - 1)) != 0 {
|
||||
logrus.Fatal("you can only use one version flag: --major, --minor or --patch")
|
||||
log.Fatal("you can only use one version flag: --major, --minor or --patch")
|
||||
}
|
||||
}
|
||||
|
||||
@ -154,14 +159,14 @@ likely to change.
|
||||
if internal.Patch {
|
||||
now, err := strconv.Atoi(newTag.Patch)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
newTag.Patch = strconv.Itoa(now + 1)
|
||||
} else if internal.Minor {
|
||||
now, err := strconv.Atoi(newTag.Minor)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
newTag.Patch = "0"
|
||||
@ -169,7 +174,7 @@ likely to change.
|
||||
} else if internal.Major {
|
||||
now, err := strconv.Atoi(newTag.Major)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
newTag.Patch = "0"
|
||||
@ -179,25 +184,67 @@ likely to change.
|
||||
}
|
||||
|
||||
newTag.Metadata = mainAppVersion
|
||||
logrus.Debugf("choosing %s as new version for %s", newTag.String(), recipe.Name)
|
||||
log.Debugf("choosing %s as new version for %s", newTag.String(), recipe.Name)
|
||||
nextTag = newTag.String()
|
||||
}
|
||||
|
||||
if _, err := tagcmp.Parse(nextTag); err != nil {
|
||||
logrus.Fatalf("invalid version %s specified", nextTag)
|
||||
log.Fatalf("invalid version %s specified", nextTag)
|
||||
}
|
||||
|
||||
mainService := "app"
|
||||
label := fmt.Sprintf("coop-cloud.${STACK_NAME}.version=%s", nextTag)
|
||||
if !internal.Dry {
|
||||
if err := recipe.UpdateLabel("compose.y*ml", mainService, label); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
logrus.Infof("dry run: not syncing label %s for recipe %s", nextTag, recipe.Name)
|
||||
log.Infof("dry run: not syncing label %s for recipe %s", nextTag, recipe.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
isClean, err := gitPkg.IsClean(recipe.Dir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if !isClean {
|
||||
log.Infof("%s currently has these unstaged changes 👇", recipe.Name)
|
||||
if err := gitPkg.DiffUnstaged(recipe.Dir); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
},
|
||||
BashComplete: autocomplete.RecipeNameComplete,
|
||||
}
|
||||
|
||||
func init() {
|
||||
RecipeSyncCommand.Flags().BoolVarP(
|
||||
&internal.Dry,
|
||||
"dry-run",
|
||||
"r",
|
||||
false,
|
||||
"report changes that would be made",
|
||||
)
|
||||
|
||||
RecipeSyncCommand.Flags().BoolVarP(
|
||||
&internal.Major,
|
||||
"major",
|
||||
"x",
|
||||
false,
|
||||
"increase the major part of the version",
|
||||
)
|
||||
|
||||
RecipeSyncCommand.Flags().BoolVarP(
|
||||
&internal.Minor,
|
||||
"minor",
|
||||
"y",
|
||||
false,
|
||||
"increase the minor part of the version",
|
||||
)
|
||||
|
||||
RecipeSyncCommand.Flags().BoolVarP(
|
||||
&internal.Patch,
|
||||
"patch",
|
||||
"z",
|
||||
false,
|
||||
"increase the patch part of the version",
|
||||
)
|
||||
}
|
||||
|
@ -12,15 +12,14 @@ import (
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
gitPkg "coopcloud.tech/abra/pkg/git"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
recipePkg "coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/tagcmp"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type imgPin struct {
|
||||
@ -28,8 +27,8 @@ type imgPin struct {
|
||||
version tagcmp.Tag
|
||||
}
|
||||
|
||||
// anUpgrade represents a single service upgrade (as within a recipe), and the list of tags that it can be upgraded to,
|
||||
// for serialization purposes.
|
||||
// anUpgrade represents a single service upgrade (as within a recipe), and the
|
||||
// list of tags that it can be upgraded to, for serialization purposes.
|
||||
type anUpgrade struct {
|
||||
Service string `json:"service"`
|
||||
Image string `json:"image"`
|
||||
@ -37,14 +36,13 @@ type anUpgrade struct {
|
||||
UpgradeTags []string `json:"upgrades"`
|
||||
}
|
||||
|
||||
var recipeUpgradeCommand = cli.Command{
|
||||
Name: "upgrade",
|
||||
var RecipeUpgradeCommand = &cobra.Command{
|
||||
Use: "upgrade <recipe> [flags]",
|
||||
Aliases: []string{"u"},
|
||||
Usage: "Upgrade recipe image tags",
|
||||
Description: `
|
||||
Parse all image tags within the given <recipe> configuration and prompt with
|
||||
more recent tags to upgrade to. It will update the relevant compose file tags
|
||||
on the local file system.
|
||||
Short: "Upgrade recipe image tags",
|
||||
Long: `Upgrade a given <recipe> configuration.
|
||||
|
||||
It will update the relevant compose file tags on the local file system.
|
||||
|
||||
Some image tags cannot be parsed because they do not follow some sort of
|
||||
semver-like convention. In this case, all possible tags will be listed and it
|
||||
@ -54,36 +52,26 @@ The command is interactive and will show a select input which allows you to
|
||||
make a seclection. Use the "?" key to see more help on navigating this
|
||||
interface.
|
||||
|
||||
You may invoke this command in "wizard" mode and be prompted for input:
|
||||
|
||||
abra recipe upgrade
|
||||
`,
|
||||
BashComplete: autocomplete.RecipeNameComplete,
|
||||
ArgsUsage: "<recipe>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
internal.PatchFlag,
|
||||
internal.MinorFlag,
|
||||
internal.MajorFlag,
|
||||
internal.MachineReadableFlag,
|
||||
internal.AllTagsFlag,
|
||||
internal.OfflineFlag,
|
||||
You may invoke this command in "wizard" mode and be prompted for input.`,
|
||||
Args: cobra.RangeArgs(0, 1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.RecipeNameComplete()
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
recipe := internal.ValidateRecipeWithPrompt(c, conf)
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
recipe := internal.ValidateRecipe(args, cmd.Name())
|
||||
|
||||
if err := recipePkg.EnsureUpToDate(recipe.Name, conf); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if err := recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
bumpType := btoi(internal.Major)*4 + btoi(internal.Minor)*2 + btoi(internal.Patch)
|
||||
if bumpType != 0 {
|
||||
// a bitwise check if the number is a power of 2
|
||||
if (bumpType & (bumpType - 1)) != 0 {
|
||||
logrus.Fatal("you can only use one of: --major, --minor, --patch.")
|
||||
log.Fatal("you can only use one of: --major, --minor, --patch.")
|
||||
}
|
||||
}
|
||||
|
||||
@ -96,26 +84,25 @@ You may invoke this command in "wizard" mode and be prompted for input:
|
||||
|
||||
// check for versions file and load pinned versions
|
||||
versionsPresent := false
|
||||
recipeDir := path.Join(config.RECIPES_DIR, recipe.Name)
|
||||
versionsPath := path.Join(recipeDir, "versions")
|
||||
var servicePins = make(map[string]imgPin)
|
||||
versionsPath := path.Join(recipe.Dir, "versions")
|
||||
servicePins := make(map[string]imgPin)
|
||||
if _, err := os.Stat(versionsPath); err == nil {
|
||||
logrus.Debugf("found versions file for %s", recipe.Name)
|
||||
log.Debugf("found versions file for %s", recipe.Name)
|
||||
file, err := os.Open(versionsPath)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
splitLine := strings.Split(line, " ")
|
||||
if splitLine[0] != "pin" || len(splitLine) != 3 {
|
||||
logrus.Fatalf("malformed version pin specification: %s", line)
|
||||
log.Fatalf("malformed version pin specification: %s", line)
|
||||
}
|
||||
pinSlice := strings.Split(splitLine[2], ":")
|
||||
pinTag, err := tagcmp.Parse(pinSlice[1])
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
pin := imgPin{
|
||||
image: pinSlice[0],
|
||||
@ -124,45 +111,50 @@ You may invoke this command in "wizard" mode and be prompted for input:
|
||||
servicePins[splitLine[1]] = pin
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
logrus.Error(err)
|
||||
log.Error(err)
|
||||
}
|
||||
versionsPresent = true
|
||||
} else {
|
||||
logrus.Debugf("did not find versions file for %s", recipe.Name)
|
||||
log.Debugf("did not find versions file for %s", recipe.Name)
|
||||
}
|
||||
|
||||
for _, service := range recipe.Config.Services {
|
||||
config, err := recipe.GetComposeConfig(nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, service := range config.Services {
|
||||
img, err := reference.ParseNormalizedNamed(service.Image)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
regVersions, err := client.GetRegistryTags(img)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
image := reference.Path(img)
|
||||
logrus.Debugf("retrieved %s from remote registry for %s", regVersions, image)
|
||||
log.Debugf("retrieved %s from remote registry for %s", regVersions, image)
|
||||
image = formatter.StripTagMeta(image)
|
||||
|
||||
switch img.(type) {
|
||||
case reference.NamedTagged:
|
||||
if !tagcmp.IsParsable(img.(reference.NamedTagged).Tag()) {
|
||||
logrus.Debugf("%s not considered semver-like", img.(reference.NamedTagged).Tag())
|
||||
log.Debugf("%s not considered semver-like", img.(reference.NamedTagged).Tag())
|
||||
}
|
||||
default:
|
||||
logrus.Warnf("unable to read tag for image %s, is it missing? skipping upgrade for %s", image, service.Name)
|
||||
log.Warnf("unable to read tag for image %s, is it missing? skipping upgrade for %s", image, service.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
tag, err := tagcmp.Parse(img.(reference.NamedTagged).Tag())
|
||||
if err != nil {
|
||||
logrus.Warnf("unable to parse %s, error was: %s, skipping upgrade for %s", image, err.Error(), service.Name)
|
||||
log.Warnf("unable to parse %s, error was: %s, skipping upgrade for %s", image, err.Error(), service.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
logrus.Debugf("parsed %s for %s", tag, service.Name)
|
||||
log.Debugf("parsed %s for %s", tag, service.Name)
|
||||
|
||||
var compatible []tagcmp.Tag
|
||||
for _, regVersion := range regVersions {
|
||||
@ -176,18 +168,18 @@ You may invoke this command in "wizard" mode and be prompted for input:
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("detected potential upgradable tags %s for %s", compatible, service.Name)
|
||||
log.Debugf("detected potential upgradable tags %s for %s", compatible, service.Name)
|
||||
|
||||
sort.Sort(tagcmp.ByTagDesc(compatible))
|
||||
|
||||
if len(compatible) == 0 && !internal.AllTags {
|
||||
logrus.Info(fmt.Sprintf("no new versions available for %s, assuming %s is the latest (use -a/--all-tags to see all anyway)", image, tag))
|
||||
if len(compatible) == 0 && !allTags {
|
||||
log.Info(fmt.Sprintf("no new versions available for %s, assuming %s is the latest (use -a/--all-tags to see all anyway)", image, tag))
|
||||
continue // skip on to the next tag and don't update any compose files
|
||||
}
|
||||
|
||||
catlVersions, err := recipePkg.VersionsOfService(recipe.Name, service.Name, conf)
|
||||
catlVersions, err := recipePkg.VersionsOfService(recipe.Name, service.Name, internal.Offline)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
compatibleStrings := []string{"skip"}
|
||||
@ -203,7 +195,7 @@ You may invoke this command in "wizard" mode and be prompted for input:
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("detected compatible upgradable tags %s for %s", compatibleStrings, service.Name)
|
||||
log.Debugf("detected compatible upgradable tags %s for %s", compatibleStrings, service.Name)
|
||||
|
||||
var upgradeTag string
|
||||
_, ok := servicePins[service.Name]
|
||||
@ -220,13 +212,13 @@ You may invoke this command in "wizard" mode and be prompted for input:
|
||||
}
|
||||
}
|
||||
if contains {
|
||||
logrus.Infof("upgrading service %s from %s to %s (pinned tag: %s)", service.Name, tag.String(), upgradeTag, pinnedTagString)
|
||||
log.Infof("upgrading service %s from %s to %s (pinned tag: %s)", service.Name, tag.String(), upgradeTag, pinnedTagString)
|
||||
} else {
|
||||
logrus.Infof("service %s, image %s pinned to %s, no compatible upgrade found", service.Name, servicePins[service.Name].image, pinnedTagString)
|
||||
log.Infof("service %s, image %s pinned to %s, no compatible upgrade found", service.Name, servicePins[service.Name].image, pinnedTagString)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
logrus.Fatalf("service %s is at version %s, but pinned to %s, please correct your compose.yml file manually!", service.Name, tag.String(), pinnedTag.String())
|
||||
log.Fatalf("service %s is at version %s, but pinned to %s, please correct your compose.yml file manually!", service.Name, tag.String(), pinnedTag.String())
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
@ -234,7 +226,7 @@ You may invoke this command in "wizard" mode and be prompted for input:
|
||||
for _, upTag := range compatible {
|
||||
upElement, err := tag.UpgradeDelta(upTag)
|
||||
if err != nil {
|
||||
return err
|
||||
return
|
||||
}
|
||||
delta := upElement.UpgradeType()
|
||||
if delta <= bumpType {
|
||||
@ -243,15 +235,15 @@ You may invoke this command in "wizard" mode and be prompted for input:
|
||||
}
|
||||
}
|
||||
if upgradeTag == "" {
|
||||
logrus.Warnf("not upgrading from %s to %s for %s, because the upgrade type is more serious than what user wants", tag.String(), compatible[0].String(), image)
|
||||
log.Warnf("not upgrading from %s to %s for %s, because the upgrade type is more serious than what user wants", tag.String(), compatible[0].String(), image)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
msg := fmt.Sprintf("upgrade to which tag? (service: %s, image: %s, tag: %s)", service.Name, image, tag)
|
||||
if !tagcmp.IsParsable(img.(reference.NamedTagged).Tag()) || internal.AllTags {
|
||||
if !tagcmp.IsParsable(img.(reference.NamedTagged).Tag()) || allTags {
|
||||
tag := img.(reference.NamedTagged).Tag()
|
||||
if !internal.AllTags {
|
||||
logrus.Warning(fmt.Sprintf("unable to determine versioning semantics of %s, listing all tags", tag))
|
||||
if !allTags {
|
||||
log.Warn(fmt.Sprintf("unable to determine versioning semantics of %s, listing all tags", tag))
|
||||
}
|
||||
msg = fmt.Sprintf("upgrade to which tag? (service: %s, tag: %s)", service.Name, tag)
|
||||
compatibleStrings = []string{"skip"}
|
||||
@ -289,7 +281,7 @@ You may invoke this command in "wizard" mode and be prompted for input:
|
||||
Options: compatibleStrings,
|
||||
}
|
||||
if err := survey.AskOne(prompt, &upgradeTag); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -297,14 +289,14 @@ You may invoke this command in "wizard" mode and be prompted for input:
|
||||
if upgradeTag != "skip" {
|
||||
ok, err := recipe.UpdateTag(image, upgradeTag)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
if ok {
|
||||
logrus.Infof("tag upgraded from %s to %s for %s", tag.String(), upgradeTag, image)
|
||||
log.Infof("tag upgraded from %s to %s for %s", tag.String(), upgradeTag, image)
|
||||
}
|
||||
} else {
|
||||
if !internal.NoInput {
|
||||
logrus.Warnf("not upgrading %s, skipping as requested", image)
|
||||
log.Warnf("not upgrading %s, skipping as requested", image)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -313,20 +305,77 @@ You may invoke this command in "wizard" mode and be prompted for input:
|
||||
if internal.MachineReadable {
|
||||
jsonstring, err := json.Marshal(upgradeList)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println(string(jsonstring))
|
||||
return nil
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
for _, upgrade := range upgradeList {
|
||||
logrus.Infof("can upgrade service: %s, image: %s, tag: %s ::\n", upgrade.Service, upgrade.Image, upgrade.Tag)
|
||||
log.Infof("can upgrade service: %s, image: %s, tag: %s ::", upgrade.Service, upgrade.Image, upgrade.Tag)
|
||||
for _, utag := range upgrade.UpgradeTags {
|
||||
logrus.Infof(" %s\n", utag)
|
||||
log.Infof(" %s", utag)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
isClean, err := gitPkg.IsClean(recipe.Dir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if !isClean {
|
||||
log.Infof("%s currently has these unstaged changes 👇", recipe.Name)
|
||||
if err := gitPkg.DiffUnstaged(recipe.Dir); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
allTags bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
RecipeUpgradeCommand.Flags().BoolVarP(
|
||||
&internal.Major,
|
||||
"major",
|
||||
"x",
|
||||
false,
|
||||
"increase the major part of the version",
|
||||
)
|
||||
|
||||
RecipeUpgradeCommand.Flags().BoolVarP(
|
||||
&internal.Minor,
|
||||
"minor",
|
||||
"y",
|
||||
false,
|
||||
"increase the minor part of the version",
|
||||
)
|
||||
|
||||
RecipeUpgradeCommand.Flags().BoolVarP(
|
||||
&internal.Patch,
|
||||
"patch",
|
||||
"z",
|
||||
false,
|
||||
"increase the patch part of the version",
|
||||
)
|
||||
|
||||
RecipeUpgradeCommand.Flags().BoolVarP(
|
||||
&internal.MachineReadable,
|
||||
"machine",
|
||||
"m",
|
||||
false,
|
||||
"print machine-readable output",
|
||||
)
|
||||
|
||||
RecipeUpgradeCommand.Flags().BoolVarP(
|
||||
&allTags,
|
||||
"all-tags",
|
||||
"a",
|
||||
false,
|
||||
"list all tags, not just upgrades",
|
||||
)
|
||||
}
|
||||
|
@ -1,59 +1,135 @@
|
||||
package recipe
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
recipePkg "coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var recipeVersionCommand = cli.Command{
|
||||
Name: "versions",
|
||||
Aliases: []string{"v"},
|
||||
Usage: "List recipe versions",
|
||||
ArgsUsage: "<recipe>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.OfflineFlag,
|
||||
var RecipeVersionCommand = &cobra.Command{
|
||||
Use: "versions <recipe> [flags]",
|
||||
Aliases: []string{"v"},
|
||||
Short: "List recipe versions",
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.RecipeNameComplete()
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
BashComplete: autocomplete.RecipeNameComplete,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
recipe := internal.ValidateRecipe(c, conf)
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var warnMessages []string
|
||||
|
||||
catalogue, err := recipePkg.ReadRecipeCatalogue(conf)
|
||||
recipe := internal.ValidateRecipe(args, cmd.Name())
|
||||
|
||||
catl, err := recipePkg.ReadRecipeCatalogue(internal.Offline)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
recipeMeta, ok := catalogue[recipe.Name]
|
||||
recipeMeta, ok := catl[recipe.Name]
|
||||
if !ok {
|
||||
logrus.Fatalf("%s recipe doesn't exist?", recipe.Name)
|
||||
warnMessages = append(warnMessages, "retrieved versions from local recipe repository")
|
||||
|
||||
recipeVersions, warnMsg, err := recipe.GetRecipeVersions()
|
||||
if err != nil {
|
||||
warnMessages = append(warnMessages, err.Error())
|
||||
}
|
||||
if len(warnMsg) > 0 {
|
||||
warnMessages = append(warnMessages, warnMsg...)
|
||||
}
|
||||
|
||||
recipeMeta = recipePkg.RecipeMeta{Versions: recipeVersions}
|
||||
}
|
||||
|
||||
tableCol := []string{"Version", "Service", "Image", "Tag"}
|
||||
table := formatter.CreateTable(tableCol)
|
||||
if len(recipeMeta.Versions) == 0 {
|
||||
log.Fatalf("%s has no published versions?", recipe.Name)
|
||||
}
|
||||
|
||||
for i := len(recipeMeta.Versions) - 1; i >= 0; i-- {
|
||||
for tag, meta := range recipeMeta.Versions[i] {
|
||||
table, err := formatter.CreateTable()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
table.Headers("SERVICE", "IMAGE", "TAG", "VERSION")
|
||||
|
||||
for version, meta := range recipeMeta.Versions[i] {
|
||||
var allRows [][]string
|
||||
var rows [][]string
|
||||
|
||||
for service, serviceMeta := range meta {
|
||||
table.Append([]string{tag, service, serviceMeta.Image, serviceMeta.Tag})
|
||||
recipeVersion := version
|
||||
if service != "app" {
|
||||
recipeVersion = ""
|
||||
}
|
||||
|
||||
rows = append(rows, []string{
|
||||
service,
|
||||
serviceMeta.Image,
|
||||
serviceMeta.Tag,
|
||||
recipeVersion,
|
||||
})
|
||||
|
||||
allRows = append(allRows, []string{
|
||||
version,
|
||||
service,
|
||||
serviceMeta.Image,
|
||||
serviceMeta.Tag,
|
||||
recipeVersion,
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(rows, sortServiceByName(rows))
|
||||
|
||||
table.Rows(rows...)
|
||||
|
||||
if !internal.MachineReadable {
|
||||
if err := formatter.PrintTable(table); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if internal.MachineReadable {
|
||||
sort.Slice(allRows, sortServiceByName(allRows))
|
||||
headers := []string{"VERSION", "SERVICE", "NAME", "TAG"}
|
||||
out, err := formatter.ToJSON(headers, allRows)
|
||||
if err != nil {
|
||||
log.Fatal("unable to render to JSON: %s", err)
|
||||
}
|
||||
fmt.Println(out)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
table.SetAutoMergeCells(true)
|
||||
|
||||
if table.NumLines() > 0 {
|
||||
table.Render()
|
||||
} else {
|
||||
logrus.Fatalf("%s has no published versions?", recipe.Name)
|
||||
if !internal.MachineReadable {
|
||||
for _, warnMsg := range warnMessages {
|
||||
log.Warn(warnMsg)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func sortServiceByName(versions [][]string) func(i, j int) bool {
|
||||
return func(i, j int) bool {
|
||||
return versions[i][0] < versions[j][0]
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
RecipeVersionCommand.Flags().BoolVarP(
|
||||
&internal.MachineReadable,
|
||||
"machine",
|
||||
"m",
|
||||
false,
|
||||
"print machine-readable output",
|
||||
)
|
||||
}
|
||||
|
@ -1,83 +0,0 @@
|
||||
package record
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
gandiPkg "coopcloud.tech/abra/pkg/dns/gandi"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"github.com/libdns/gandi"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// RecordListCommand lists domains.
|
||||
var RecordListCommand = cli.Command{
|
||||
Name: "list",
|
||||
Usage: "List domain name records",
|
||||
Aliases: []string{"ls"},
|
||||
ArgsUsage: "<zone>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.DNSProviderFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Description: `
|
||||
List all domain name records managed by a 3rd party provider for a specific
|
||||
zone.
|
||||
|
||||
You must specify a zone (e.g. example.com) under which your domain name records
|
||||
are listed. This zone must already be created on your provider account.
|
||||
`,
|
||||
Action: func(c *cli.Context) error {
|
||||
if err := internal.EnsureDNSProvider(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
zone, err := internal.EnsureZoneArgument(c)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
var provider gandi.Provider
|
||||
switch internal.DNSProvider {
|
||||
case "gandi":
|
||||
provider, err = gandiPkg.New()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
default:
|
||||
logrus.Fatalf("%s is not a supported DNS provider", internal.DNSProvider)
|
||||
}
|
||||
|
||||
records, err := provider.GetRecords(context.Background(), zone)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
tableCol := []string{"type", "name", "value", "TTL", "priority"}
|
||||
table := formatter.CreateTable(tableCol)
|
||||
|
||||
for _, record := range records {
|
||||
value := record.Value
|
||||
if len(record.Value) > 30 {
|
||||
value = fmt.Sprintf("%s...", record.Value[:30])
|
||||
}
|
||||
|
||||
table.Append([]string{
|
||||
record.Type,
|
||||
record.Name,
|
||||
value,
|
||||
record.TTL.String(),
|
||||
strconv.Itoa(record.Priority),
|
||||
})
|
||||
}
|
||||
|
||||
table.Render()
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
@ -1,149 +0,0 @@
|
||||
package record
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/dns"
|
||||
gandiPkg "coopcloud.tech/abra/pkg/dns/gandi"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"github.com/libdns/gandi"
|
||||
"github.com/libdns/libdns"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// RecordNewCommand creates a new domain name record.
|
||||
var RecordNewCommand = cli.Command{
|
||||
Name: "new",
|
||||
Usage: "Create a new domain record",
|
||||
Aliases: []string{"n"},
|
||||
ArgsUsage: "<zone>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
internal.DNSProviderFlag,
|
||||
internal.DNSTypeFlag,
|
||||
internal.DNSNameFlag,
|
||||
internal.DNSValueFlag,
|
||||
internal.DNSTTLFlag,
|
||||
internal.DNSPriorityFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Description: `
|
||||
Create a new domain name record for a specific zone.
|
||||
|
||||
You must specify a zone (e.g. example.com) under which your domain name records
|
||||
are listed. This zone must already be created on your provider account.
|
||||
|
||||
Example:
|
||||
|
||||
abra record new foo.com -p gandi -t A -n myapp -v 192.168.178.44
|
||||
|
||||
You may also invoke this command in "wizard" mode and be prompted for input:
|
||||
|
||||
abra record new
|
||||
`,
|
||||
Action: func(c *cli.Context) error {
|
||||
zone, err := internal.EnsureZoneArgument(c)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if err := internal.EnsureDNSProvider(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
var provider gandi.Provider
|
||||
switch internal.DNSProvider {
|
||||
case "gandi":
|
||||
provider, err = gandiPkg.New()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
default:
|
||||
logrus.Fatalf("%s is not a supported DNS provider", internal.DNSProvider)
|
||||
}
|
||||
|
||||
if err := internal.EnsureDNSTypeFlag(c); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if err := internal.EnsureDNSNameFlag(c); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if err := internal.EnsureDNSValueFlag(c); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
ttl, err := dns.GetTTL(internal.DNSTTL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
record := libdns.Record{
|
||||
Type: internal.DNSType,
|
||||
Name: internal.DNSName,
|
||||
Value: internal.DNSValue,
|
||||
TTL: ttl,
|
||||
}
|
||||
|
||||
if internal.DNSType == "MX" || internal.DNSType == "SRV" || internal.DNSType == "URI" {
|
||||
record.Priority = internal.DNSPriority
|
||||
}
|
||||
|
||||
records, err := provider.GetRecords(context.Background(), zone)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
for _, existingRecord := range records {
|
||||
if existingRecord.Type == record.Type &&
|
||||
existingRecord.Name == record.Name &&
|
||||
existingRecord.Value == record.Value {
|
||||
logrus.Fatalf("%s record for %s already exists?", record.Type, zone)
|
||||
}
|
||||
}
|
||||
|
||||
createdRecords, err := provider.SetRecords(
|
||||
context.Background(),
|
||||
zone,
|
||||
[]libdns.Record{record},
|
||||
)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if len(createdRecords) == 0 {
|
||||
logrus.Fatal("provider library reports that no record was created?")
|
||||
}
|
||||
|
||||
createdRecord := createdRecords[0]
|
||||
|
||||
tableCol := []string{"type", "name", "value", "TTL", "priority"}
|
||||
table := formatter.CreateTable(tableCol)
|
||||
|
||||
value := createdRecord.Value
|
||||
if len(createdRecord.Value) > 30 {
|
||||
value = fmt.Sprintf("%s...", createdRecord.Value[:30])
|
||||
}
|
||||
|
||||
table.Append([]string{
|
||||
createdRecord.Type,
|
||||
createdRecord.Name,
|
||||
value,
|
||||
createdRecord.TTL.String(),
|
||||
strconv.Itoa(createdRecord.Priority),
|
||||
})
|
||||
|
||||
table.Render()
|
||||
|
||||
logrus.Info("record created")
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
package record
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// RecordCommand supports managing DNS entries.
|
||||
var RecordCommand = cli.Command{
|
||||
Name: "record",
|
||||
Usage: "Manage domain name records",
|
||||
Aliases: []string{"rc"},
|
||||
ArgsUsage: "<record>",
|
||||
Description: `
|
||||
Manage domain name records via 3rd party providers such as Gandi DNS. It
|
||||
supports listing, creating and removing all types of records that you might
|
||||
need for managing Co-op Cloud apps.
|
||||
|
||||
The following providers are supported:
|
||||
|
||||
Gandi DNS https://www.gandi.net
|
||||
|
||||
You need an account with such a provider already. Typically, you need to
|
||||
provide an API token on the Abra command-line when using these commands so that
|
||||
you can authenticate with your provider account.
|
||||
|
||||
New providers can be integrated, we welcome change sets. See the underlying DNS
|
||||
library documentation for more. It supports many existing providers and allows
|
||||
to implement new provider support easily.
|
||||
|
||||
https://pkg.go.dev/github.com/libdns/libdns
|
||||
`,
|
||||
Subcommands: []cli.Command{
|
||||
RecordListCommand,
|
||||
RecordNewCommand,
|
||||
RecordRemoveCommand,
|
||||
},
|
||||
}
|
@ -1,137 +0,0 @@
|
||||
package record
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
gandiPkg "coopcloud.tech/abra/pkg/dns/gandi"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/libdns/gandi"
|
||||
"github.com/libdns/libdns"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// RecordRemoveCommand lists domains.
|
||||
var RecordRemoveCommand = cli.Command{
|
||||
Name: "remove",
|
||||
Usage: "Remove a domain name record",
|
||||
Aliases: []string{"rm"},
|
||||
ArgsUsage: "<zone>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
internal.DNSProviderFlag,
|
||||
internal.DNSTypeFlag,
|
||||
internal.DNSNameFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Description: `
|
||||
Remove a domain name record for a specific zone.
|
||||
|
||||
It uses the type of record and name to match existing records and choose one
|
||||
for deletion. You must specify a zone (e.g. example.com) under which your
|
||||
domain name records are listed. This zone must already be created on your
|
||||
provider account.
|
||||
|
||||
Example:
|
||||
|
||||
abra record remove foo.com -p gandi -t A -n myapp
|
||||
|
||||
You may also invoke this command in "wizard" mode and be prompted for input:
|
||||
|
||||
abra record rm
|
||||
`,
|
||||
Action: func(c *cli.Context) error {
|
||||
zone, err := internal.EnsureZoneArgument(c)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if err := internal.EnsureDNSProvider(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
var provider gandi.Provider
|
||||
switch internal.DNSProvider {
|
||||
case "gandi":
|
||||
provider, err = gandiPkg.New()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
default:
|
||||
logrus.Fatalf("%s is not a supported DNS provider", internal.DNSProvider)
|
||||
}
|
||||
|
||||
if err := internal.EnsureDNSTypeFlag(c); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if err := internal.EnsureDNSNameFlag(c); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
records, err := provider.GetRecords(context.Background(), zone)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
var toDelete libdns.Record
|
||||
for _, record := range records {
|
||||
if record.Type == internal.DNSType && record.Name == internal.DNSName {
|
||||
toDelete = record
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (libdns.Record{}) == toDelete {
|
||||
logrus.Fatal("provider library reports no matching record?")
|
||||
}
|
||||
|
||||
tableCol := []string{"type", "name", "value", "TTL", "priority"}
|
||||
table := formatter.CreateTable(tableCol)
|
||||
|
||||
value := toDelete.Value
|
||||
if len(toDelete.Value) > 30 {
|
||||
value = fmt.Sprintf("%s...", toDelete.Value[:30])
|
||||
}
|
||||
|
||||
table.Append([]string{
|
||||
toDelete.Type,
|
||||
toDelete.Name,
|
||||
value,
|
||||
toDelete.TTL.String(),
|
||||
strconv.Itoa(toDelete.Priority),
|
||||
})
|
||||
|
||||
table.Render()
|
||||
|
||||
if !internal.NoInput {
|
||||
response := false
|
||||
prompt := &survey.Confirm{
|
||||
Message: "continue with record deletion?",
|
||||
}
|
||||
|
||||
if err := survey.AskOne(prompt, &response); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !response {
|
||||
logrus.Fatal("exiting as requested")
|
||||
}
|
||||
}
|
||||
|
||||
_, err = provider.DeleteRecords(context.Background(), zone, []libdns.Record{toDelete})
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Info("record successfully deleted")
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
214
cli/run.go
Normal file
214
cli/run.go
Normal file
@ -0,0 +1,214 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"coopcloud.tech/abra/cli/app"
|
||||
"coopcloud.tech/abra/cli/catalogue"
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/cli/recipe"
|
||||
"coopcloud.tech/abra/cli/server"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
charmLog "github.com/charmbracelet/log"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
func Run(version, commit string) {
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "abra [cmd] [args] [flags]",
|
||||
Short: "The Co-op Cloud command-line utility belt 🎩🐇",
|
||||
Version: fmt.Sprintf("%s-%s", version, commit[:7]),
|
||||
ValidArgs: []string{
|
||||
"app",
|
||||
"autocomplete",
|
||||
"catalogue",
|
||||
"man",
|
||||
"recipe",
|
||||
"server",
|
||||
"upgrade",
|
||||
},
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
paths := []string{
|
||||
config.ABRA_DIR,
|
||||
config.SERVERS_DIR,
|
||||
config.RECIPES_DIR,
|
||||
config.VENDOR_DIR, // TODO(d1): remove > 0.9.x
|
||||
config.BACKUP_DIR, // TODO(d1): remove > 0.9.x
|
||||
}
|
||||
|
||||
for _, path := range paths {
|
||||
if err := os.Mkdir(path, 0764); err != nil {
|
||||
if !os.IsExist(err) {
|
||||
log.Fatal(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
log.Logger.SetStyles(charmLog.DefaultStyles())
|
||||
charmLog.SetDefault(log.Logger)
|
||||
|
||||
if internal.Debug {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
log.SetOutput(os.Stderr)
|
||||
log.SetReportCaller(true)
|
||||
}
|
||||
|
||||
log.Debugf("abra version %s, commit %s", version, commit)
|
||||
},
|
||||
}
|
||||
|
||||
rootCmd.CompletionOptions.DisableDefaultCmd = true
|
||||
|
||||
manCommand := &cobra.Command{
|
||||
Use: "man [flags]",
|
||||
Aliases: []string{"m"},
|
||||
Short: "Generate manpage",
|
||||
Example: ` # generate the man pages into /usr/local/share/man/man1
|
||||
sudo abra man
|
||||
sudo mandb
|
||||
|
||||
# read the man pages
|
||||
man abra
|
||||
man abra-app-deploy`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
header := &doc.GenManHeader{
|
||||
Title: "ABRA",
|
||||
Section: "1",
|
||||
}
|
||||
|
||||
manDir := "/usr/local/share/man/man1"
|
||||
if _, err := os.Stat(manDir); os.IsNotExist(err) {
|
||||
log.Fatalf("unable to proceed, '%s' does not exist?")
|
||||
}
|
||||
|
||||
err := doc.GenManTree(rootCmd, header, manDir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Info("don't forget to run 'sudo mandb'")
|
||||
},
|
||||
}
|
||||
|
||||
rootCmd.PersistentFlags().BoolVarP(
|
||||
&internal.Debug,
|
||||
"debug",
|
||||
"d",
|
||||
false,
|
||||
"show debug messages",
|
||||
)
|
||||
|
||||
rootCmd.PersistentFlags().BoolVarP(
|
||||
&internal.NoInput,
|
||||
"no-input",
|
||||
"n",
|
||||
false,
|
||||
"toggle non-interactive mode",
|
||||
)
|
||||
|
||||
rootCmd.PersistentFlags().BoolVarP(
|
||||
&internal.Offline,
|
||||
"offline",
|
||||
"o",
|
||||
false,
|
||||
"prefer offline & filesystem access",
|
||||
)
|
||||
|
||||
rootCmd.PersistentFlags().BoolVarP(
|
||||
&internal.IgnoreEnvVersion,
|
||||
"ignore-env-version",
|
||||
"i",
|
||||
false,
|
||||
"ignore .env version checkout",
|
||||
)
|
||||
|
||||
catalogue.CatalogueCommand.AddCommand(
|
||||
catalogue.CatalogueGenerateCommand,
|
||||
)
|
||||
|
||||
server.ServerCommand.AddCommand(
|
||||
server.ServerAddCommand,
|
||||
server.ServerListCommand,
|
||||
server.ServerPruneCommand,
|
||||
server.ServerRemoveCommand,
|
||||
)
|
||||
|
||||
recipe.RecipeCommand.AddCommand(
|
||||
recipe.RecipeDiffCommand,
|
||||
recipe.RecipeFetchCommand,
|
||||
recipe.RecipeLintCommand,
|
||||
recipe.RecipeListCommand,
|
||||
recipe.RecipeNewCommand,
|
||||
recipe.RecipeReleaseCommand,
|
||||
recipe.RecipeResetCommand,
|
||||
recipe.RecipeSyncCommand,
|
||||
recipe.RecipeUpgradeCommand,
|
||||
recipe.RecipeVersionCommand,
|
||||
)
|
||||
|
||||
rootCmd.AddCommand(
|
||||
UpgradeCommand,
|
||||
AutocompleteCommand,
|
||||
manCommand,
|
||||
app.AppCommand,
|
||||
catalogue.CatalogueCommand,
|
||||
server.ServerCommand,
|
||||
recipe.RecipeCommand,
|
||||
)
|
||||
|
||||
app.AppCmdCommand.AddCommand(
|
||||
app.AppCmdListCommand,
|
||||
)
|
||||
|
||||
app.AppSecretCommand.AddCommand(
|
||||
app.AppSecretGenerateCommand,
|
||||
app.AppSecretInsertCommand,
|
||||
app.AppSecretRmCommand,
|
||||
app.AppSecretLsCommand,
|
||||
)
|
||||
|
||||
app.AppVolumeCommand.AddCommand(
|
||||
app.AppVolumeListCommand,
|
||||
app.AppVolumeRemoveCommand,
|
||||
)
|
||||
|
||||
app.AppBackupCommand.AddCommand(
|
||||
app.AppBackupListCommand,
|
||||
app.AppBackupDownloadCommand,
|
||||
app.AppBackupCreateCommand,
|
||||
app.AppBackupSnapshotsCommand,
|
||||
)
|
||||
|
||||
app.AppCommand.AddCommand(
|
||||
app.AppBackupCommand,
|
||||
app.AppCheckCommand,
|
||||
app.AppCmdCommand,
|
||||
app.AppConfigCommand,
|
||||
app.AppCpCommand,
|
||||
app.AppDeployCommand,
|
||||
app.AppListCommand,
|
||||
app.AppLogsCommand,
|
||||
app.AppNewCommand,
|
||||
app.AppPsCommand,
|
||||
app.AppRemoveCommand,
|
||||
app.AppRestartCommand,
|
||||
app.AppRestoreCommand,
|
||||
app.AppRollbackCommand,
|
||||
app.AppRunCommand,
|
||||
app.AppSecretCommand,
|
||||
app.AppServicesCommand,
|
||||
app.AppUndeployCommand,
|
||||
app.AppUpgradeCommand,
|
||||
app.AppVolumeCommand,
|
||||
app.AppLabelsCommand,
|
||||
app.AppEnvCommand,
|
||||
)
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
@ -1,51 +1,149 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
contextPkg "coopcloud.tech/abra/pkg/context"
|
||||
"coopcloud.tech/abra/pkg/dns"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/server"
|
||||
sshPkg "coopcloud.tech/abra/pkg/ssh"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var local bool
|
||||
var localFlag = &cli.BoolFlag{
|
||||
Name: "local, l",
|
||||
Usage: "Use local server",
|
||||
Destination: &local,
|
||||
var ServerAddCommand = &cobra.Command{
|
||||
Use: "add [[server] | --local] [flags]",
|
||||
Aliases: []string{"a"},
|
||||
Short: "Add a new server",
|
||||
Long: `Add a new server to your configuration so that it can be managed by Abra.
|
||||
|
||||
Abra relies on the standard SSH command-line and ~/.ssh/config for client
|
||||
connection details. You must configure an entry per-host in your ~/.ssh/config
|
||||
for each server:
|
||||
|
||||
Host 1312.net 1312
|
||||
Hostname 1312.net
|
||||
User antifa
|
||||
Port 12345
|
||||
IdentityFile ~/.ssh/antifa@somewhere
|
||||
|
||||
If "--local" is passed, then Abra assumes that the current local server is
|
||||
intended as the target server. This is useful when you want to have your entire
|
||||
Co-op Cloud config located on the server itself, and not on your local
|
||||
developer machine. The domain is then set to "default".`,
|
||||
Example: " abra server add 1312.net",
|
||||
Args: cobra.RangeArgs(0, 1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
if !local {
|
||||
return autocomplete.ServerNameComplete()
|
||||
}
|
||||
return nil, cobra.ShellCompDirectiveDefault
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) > 0 && local {
|
||||
log.Fatal("cannot use [server] and --local together")
|
||||
}
|
||||
|
||||
if len(args) == 0 && !local {
|
||||
log.Fatal("missing argument or --local/-l flag")
|
||||
}
|
||||
|
||||
name := "default"
|
||||
if !local {
|
||||
name = internal.ValidateDomain(args)
|
||||
}
|
||||
|
||||
// NOTE(d1): reasonable 5 second timeout for connections which can't
|
||||
// succeed. The connection is attempted twice, so this results in 10
|
||||
// seconds.
|
||||
timeout := client.WithTimeout(5)
|
||||
|
||||
if local {
|
||||
created, err := createServerDir(name)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Debugf("attempting to create client for %s", name)
|
||||
|
||||
if _, err := client.New(name, timeout); err != nil {
|
||||
cleanUp(name)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if created {
|
||||
log.Info("local server successfully added")
|
||||
} else {
|
||||
log.Warn("local server already exists")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
_, err := createServerDir(name)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
created, err := newContext(name)
|
||||
if err != nil {
|
||||
cleanUp(name)
|
||||
log.Fatalf("unable to create local context: %s", err)
|
||||
}
|
||||
|
||||
log.Debugf("attempting to create client for %s", name)
|
||||
|
||||
if _, err := client.New(name, timeout); err != nil {
|
||||
cleanUp(name)
|
||||
log.Debugf("ssh %s error: %s", name, sshPkg.Fatal(name, err))
|
||||
log.Fatalf("can't ssh to %s, make sure \"ssh %s\" works", name, name)
|
||||
}
|
||||
|
||||
if created {
|
||||
log.Infof("%s successfully added", name)
|
||||
|
||||
if _, err := dns.EnsureIPv4(name); err != nil {
|
||||
log.Warnf("unable to resolve IPv4 for %s", name)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Warnf("%s already exists", name)
|
||||
},
|
||||
}
|
||||
|
||||
func cleanUp(domainName string) {
|
||||
if domainName != "default" {
|
||||
logrus.Infof("cleaning up context for %s", domainName)
|
||||
if err := client.DeleteContext(domainName); err != nil {
|
||||
logrus.Fatal(err)
|
||||
// cleanUp cleans up the partially created context/client details for a failed
|
||||
// "server add" attempt.
|
||||
func cleanUp(name string) {
|
||||
if name != "default" {
|
||||
log.Debugf("serverAdd: cleanUp: cleaning up context for %s", name)
|
||||
if err := client.DeleteContext(name); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("attempting to clean up server directory for %s", domainName)
|
||||
|
||||
serverDir := filepath.Join(config.SERVERS_DIR, domainName)
|
||||
serverDir := filepath.Join(config.SERVERS_DIR, name)
|
||||
files, err := config.GetAllFilesInDirectory(serverDir)
|
||||
if err != nil {
|
||||
logrus.Fatalf("unable to list files in %s: %s", serverDir, err)
|
||||
log.Fatalf("serverAdd: cleanUp: unable to list files in %s: %s", serverDir, err)
|
||||
}
|
||||
|
||||
if len(files) > 0 {
|
||||
logrus.Warnf("aborting clean up of %s because it is not empty", serverDir)
|
||||
log.Debugf("serverAdd: cleanUp: %s is not empty, aborting cleanup", serverDir)
|
||||
return
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(serverDir); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatalf("serverAdd: cleanUp: failed to remove %s: %s", serverDir, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -53,130 +151,54 @@ func cleanUp(domainName string) {
|
||||
// Docker manages SSH connection details. These are stored to disk in
|
||||
// ~/.docker. Abra can manage this completely for the user, so it's an
|
||||
// implementation detail.
|
||||
func newContext(c *cli.Context, domainName, username, port string) error {
|
||||
func newContext(name string) (bool, error) {
|
||||
store := contextPkg.NewDefaultDockerContextStore()
|
||||
contexts, err := store.Store.List()
|
||||
if err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, context := range contexts {
|
||||
if context.Name == domainName {
|
||||
logrus.Debugf("context for %s already exists", domainName)
|
||||
return nil
|
||||
if context.Name == name {
|
||||
log.Debugf("context for %s already exists", name)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("creating context with domain %s, username %s and port %s", domainName, username, port)
|
||||
log.Debugf("creating context with domain %s", name)
|
||||
|
||||
if err := client.CreateContext(domainName, username, port); err != nil {
|
||||
return err
|
||||
if err := client.CreateContext(name); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// createServerDir creates the ~/.abra/servers/... directory for a new server.
|
||||
func createServerDir(domainName string) error {
|
||||
if err := server.CreateServerDir(domainName); err != nil {
|
||||
func createServerDir(name string) (bool, error) {
|
||||
if err := server.CreateServerDir(name); err != nil {
|
||||
if !os.IsExist(err) {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
logrus.Debugf("server dir for %s already created", domainName)
|
||||
|
||||
log.Debugf("server dir for %s already created", name)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
var serverAddCommand = cli.Command{
|
||||
Name: "add",
|
||||
Aliases: []string{"a"},
|
||||
Usage: "Add a server to your configuration",
|
||||
Description: `
|
||||
Add a new server to your configuration so that it can be managed by Abra.
|
||||
var (
|
||||
local bool
|
||||
)
|
||||
|
||||
Abra uses the SSH command-line to discover connection details for your server.
|
||||
It is advised to configure an entry per-host in your ~/.ssh/config for each
|
||||
server. For example:
|
||||
|
||||
Host example.com
|
||||
Hostname example.com
|
||||
User exampleUser
|
||||
Port 12345
|
||||
IdentityFile ~/.ssh/example@somewhere
|
||||
|
||||
Abra can then load SSH connection details from this configuratiion with:
|
||||
|
||||
abra server add example.com
|
||||
|
||||
If "--local" is passed, then Abra assumes that the current local server is
|
||||
intended as the target server. This is useful when you want to have your entire
|
||||
Co-op Cloud config located on the server itself, and not on your local
|
||||
developer machine.
|
||||
`,
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
localFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
ArgsUsage: "<domain>",
|
||||
Action: func(c *cli.Context) error {
|
||||
if len(c.Args()) > 0 && local || !internal.ValidateSubCmdFlags(c) {
|
||||
err := errors.New("cannot use <domain> and --local together")
|
||||
internal.ShowSubcommandHelpAndError(c, err)
|
||||
}
|
||||
|
||||
var domainName string
|
||||
if local {
|
||||
domainName = "default"
|
||||
} else {
|
||||
domainName = internal.ValidateDomain(c)
|
||||
}
|
||||
|
||||
if local {
|
||||
if err := createServerDir(domainName); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Infof("attempting to create client for %s", domainName)
|
||||
if _, err := client.New(domainName); err != nil {
|
||||
cleanUp(domainName)
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Info("local server added")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := dns.EnsureIPv4(domainName); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if err := createServerDir(domainName); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
hostConfig, err := sshPkg.GetHostConfig(domainName)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if err := newContext(c, domainName, hostConfig.User, hostConfig.Port); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Infof("attempting to create client for %s", domainName)
|
||||
if _, err := client.New(domainName); err != nil {
|
||||
cleanUp(domainName)
|
||||
logrus.Debugf("failed to construct client for %s, saw %s", domainName, err.Error())
|
||||
logrus.Fatal(sshPkg.Fatal(domainName, err))
|
||||
}
|
||||
|
||||
logrus.Infof("%s added", domainName)
|
||||
|
||||
return nil
|
||||
},
|
||||
func init() {
|
||||
ServerAddCommand.Flags().BoolVarP(
|
||||
&local,
|
||||
"local",
|
||||
"l",
|
||||
false,
|
||||
"use local server",
|
||||
)
|
||||
}
|
||||
|
@ -1,96 +1,103 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/context"
|
||||
contextPkg "coopcloud.tech/abra/pkg/context"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"github.com/docker/cli/cli/connhelper/ssh"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var problemsFilter bool
|
||||
|
||||
var problemsFilterFlag = &cli.BoolFlag{
|
||||
Name: "problems, p",
|
||||
Usage: "Show only servers with potential connection problems",
|
||||
Destination: &problemsFilter,
|
||||
}
|
||||
|
||||
var serverListCommand = cli.Command{
|
||||
Name: "list",
|
||||
var ServerListCommand = &cobra.Command{
|
||||
Use: "list [flags]",
|
||||
Aliases: []string{"ls"},
|
||||
Usage: "List managed servers",
|
||||
Flags: []cli.Flag{
|
||||
problemsFilterFlag,
|
||||
internal.DebugFlag,
|
||||
internal.MachineReadableFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Action: func(c *cli.Context) error {
|
||||
dockerContextStore := context.NewDefaultDockerContextStore()
|
||||
Short: "List managed servers",
|
||||
Args: cobra.NoArgs,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dockerContextStore := contextPkg.NewDefaultDockerContextStore()
|
||||
contexts, err := dockerContextStore.Store.List()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
tableColumns := []string{"name", "host", "user", "port"}
|
||||
table := formatter.CreateTable(tableColumns)
|
||||
table, err := formatter.CreateTable()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
headers := []string{"NAME", "HOST"}
|
||||
table.Headers(headers...)
|
||||
|
||||
serverNames, err := config.ReadServerNames()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var rows [][]string
|
||||
for _, serverName := range serverNames {
|
||||
var row []string
|
||||
for _, ctx := range contexts {
|
||||
endpoint, err := context.GetContextEndpoint(ctx)
|
||||
for _, dockerCtx := range contexts {
|
||||
endpoint, err := contextPkg.GetContextEndpoint(dockerCtx)
|
||||
if err != nil && strings.Contains(err.Error(), "does not exist") {
|
||||
// No local context found, we can continue safely
|
||||
continue
|
||||
}
|
||||
|
||||
if ctx.Name == serverName {
|
||||
if dockerCtx.Name == serverName {
|
||||
sp, err := ssh.ParseURL(endpoint)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
row = []string{serverName, sp.Host, sp.User, sp.Port}
|
||||
|
||||
if sp.Host == "" {
|
||||
sp.Host = "unknown"
|
||||
}
|
||||
|
||||
row = []string{serverName, sp.Host}
|
||||
rows = append(rows, row)
|
||||
}
|
||||
}
|
||||
|
||||
if len(row) == 0 {
|
||||
if serverName == "default" {
|
||||
row = []string{serverName, "local", "n/a", "n/a"}
|
||||
row = []string{serverName, "local"}
|
||||
} else {
|
||||
row = []string{serverName, "unknown", "unknown", "unknown"}
|
||||
row = []string{serverName, "unknown"}
|
||||
}
|
||||
rows = append(rows, row)
|
||||
}
|
||||
|
||||
if problemsFilter {
|
||||
if row[1] == "unknown" {
|
||||
table.Append(row)
|
||||
}
|
||||
} else {
|
||||
table.Append(row)
|
||||
}
|
||||
table.Row(row...)
|
||||
}
|
||||
|
||||
if internal.MachineReadable {
|
||||
table.JSONRender()
|
||||
} else {
|
||||
if problemsFilter && table.NumLines() == 0 {
|
||||
logrus.Info("all servers wired up correctly 👏")
|
||||
} else {
|
||||
table.Render()
|
||||
out, err := formatter.ToJSON(headers, rows)
|
||||
if err != nil {
|
||||
log.Fatal("unable to render to JSON: %s", err)
|
||||
}
|
||||
|
||||
fmt.Println(out)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
if err := formatter.PrintTable(table); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
ServerListCommand.Flags().BoolVarP(
|
||||
&internal.MachineReadable,
|
||||
"machine",
|
||||
"m",
|
||||
false,
|
||||
"print machine-readable output",
|
||||
)
|
||||
}
|
||||
|
@ -1,261 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/libcapsul"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/hetznercloud/hcloud-go/hcloud"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func newHetznerCloudVPS(c *cli.Context) error {
|
||||
if err := internal.EnsureNewHetznerCloudVPSFlags(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client := hcloud.NewClient(hcloud.WithToken(internal.HetznerCloudAPIToken))
|
||||
|
||||
var sshKeysRaw []string
|
||||
var sshKeys []*hcloud.SSHKey
|
||||
for _, sshKey := range c.StringSlice("hetzner-ssh-keys") {
|
||||
if sshKey == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
sshKey, _, err := client.SSHKey.GetByName(context.Background(), sshKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sshKeys = append(sshKeys, sshKey)
|
||||
sshKeysRaw = append(sshKeysRaw, sshKey.Name)
|
||||
}
|
||||
|
||||
serverOpts := hcloud.ServerCreateOpts{
|
||||
Name: internal.HetznerCloudName,
|
||||
ServerType: &hcloud.ServerType{Name: internal.HetznerCloudType},
|
||||
Image: &hcloud.Image{Name: internal.HetznerCloudImage},
|
||||
SSHKeys: sshKeys,
|
||||
Location: &hcloud.Location{Name: internal.HetznerCloudLocation},
|
||||
}
|
||||
|
||||
sshKeyIDs := strings.Join(sshKeysRaw, "\n")
|
||||
if sshKeyIDs == "" {
|
||||
sshKeyIDs = "N/A (password auth)"
|
||||
}
|
||||
|
||||
tableColumns := []string{"name", "type", "image", "ssh-keys", "location"}
|
||||
table := formatter.CreateTable(tableColumns)
|
||||
table.Append([]string{
|
||||
internal.HetznerCloudName,
|
||||
internal.HetznerCloudType,
|
||||
internal.HetznerCloudImage,
|
||||
sshKeyIDs,
|
||||
internal.HetznerCloudLocation,
|
||||
})
|
||||
table.Render()
|
||||
|
||||
response := false
|
||||
prompt := &survey.Confirm{
|
||||
Message: "continue with hetzner cloud VPS creation?",
|
||||
}
|
||||
|
||||
if err := survey.AskOne(prompt, &response); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !response {
|
||||
logrus.Fatal("exiting as requested")
|
||||
}
|
||||
|
||||
res, _, err := client.Server.Create(context.Background(), serverOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var rootPassword string
|
||||
if len(sshKeys) > 0 {
|
||||
rootPassword = "N/A (using SSH keys)"
|
||||
} else {
|
||||
rootPassword = res.RootPassword
|
||||
}
|
||||
|
||||
ip := res.Server.PublicNet.IPv4.IP.String()
|
||||
|
||||
fmt.Println(fmt.Sprintf(`
|
||||
Your new Hetzner Cloud VPS has successfully been created! Here are the details:
|
||||
|
||||
name: %s
|
||||
IP address: %s
|
||||
root password: %s
|
||||
|
||||
You can access this new VPS via SSH using the following command:
|
||||
|
||||
ssh root@%s
|
||||
|
||||
Please note, this server is not managed by Abra yet (i.e. "abra server ls" will
|
||||
not list this server)! You will need to assign a domain name record (manually
|
||||
or by using "abra record new") and add the server to your Abra configuration
|
||||
("abra server add") to have a working server that you can deploy Co-op Cloud
|
||||
apps to.
|
||||
|
||||
When setting up domain name records, you probably want to set up the following
|
||||
2 A records. This supports deploying apps to your root domain (e.g.
|
||||
example.com) and other apps on sub-domains (e.g. foo.example.com,
|
||||
bar.example.com).
|
||||
|
||||
@ 1800 IN A %s
|
||||
* 1800 IN A %s
|
||||
`,
|
||||
internal.HetznerCloudName, ip, rootPassword,
|
||||
ip, ip, ip,
|
||||
))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newCapsulVPS(c *cli.Context) error {
|
||||
if err := internal.EnsureNewCapsulVPSFlags(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
capsulCreateURL := fmt.Sprintf("https://%s/api/capsul/create", internal.CapsulInstanceURL)
|
||||
|
||||
var sshKeys []string
|
||||
for _, sshKey := range c.StringSlice("capsul-ssh-keys") {
|
||||
if sshKey == "" {
|
||||
continue
|
||||
}
|
||||
sshKeys = append(sshKeys, sshKey)
|
||||
}
|
||||
|
||||
tableColumns := []string{"instance", "name", "type", "image", "ssh-keys"}
|
||||
table := formatter.CreateTable(tableColumns)
|
||||
table.Append([]string{
|
||||
internal.CapsulInstanceURL,
|
||||
internal.CapsulName,
|
||||
internal.CapsulType,
|
||||
internal.CapsulImage,
|
||||
strings.Join(sshKeys, "\n"),
|
||||
})
|
||||
table.Render()
|
||||
|
||||
response := false
|
||||
prompt := &survey.Confirm{
|
||||
Message: "continue with capsul creation?",
|
||||
}
|
||||
|
||||
if err := survey.AskOne(prompt, &response); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !response {
|
||||
logrus.Fatal("exiting as requested")
|
||||
}
|
||||
|
||||
capsulClient := libcapsul.New(capsulCreateURL, internal.CapsulAPIToken)
|
||||
resp, err := capsulClient.Create(
|
||||
internal.CapsulName,
|
||||
internal.CapsulType,
|
||||
internal.CapsulImage,
|
||||
sshKeys,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(fmt.Sprintf(`
|
||||
Your new Capsul has successfully been created! Here are the details:
|
||||
|
||||
Capsul name: %s
|
||||
Capsul ID: %v
|
||||
|
||||
You will need to log into your Capsul instance web interface to retrieve the IP
|
||||
address. You can learn all about how to get SSH access to your new Capsul on:
|
||||
|
||||
%s/about-ssh
|
||||
|
||||
Please note, this server is not managed by Abra yet (i.e. "abra server ls" will
|
||||
not list this server)! You will need to assign a domain name record (manually
|
||||
or by using "abra record new") and add the server to your Abra configuration
|
||||
("abra server add") to have a working server that you can deploy Co-op Cloud
|
||||
apps to.
|
||||
|
||||
When setting up domain name records, you probably want to set up the following
|
||||
2 A records. This supports deploying apps to your root domain (e.g.
|
||||
example.com) and other apps on sub-domains (e.g. foo.example.com,
|
||||
bar.example.com).
|
||||
|
||||
@ 1800 IN A <your-capsul-ip>
|
||||
* 1800 IN A <your-capsul-ip>
|
||||
`, internal.CapsulName, resp.ID, internal.CapsulInstanceURL))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var serverNewCommand = cli.Command{
|
||||
Name: "new",
|
||||
Aliases: []string{"n"},
|
||||
Usage: "Create a new server using a 3rd party provider",
|
||||
Description: `
|
||||
Create a new server via a 3rd party provider.
|
||||
|
||||
The following providers are supported:
|
||||
|
||||
Capsul https://git.cyberia.club/Cyberia/capsul-flask
|
||||
Hetzner Cloud https://docs.hetzner.com/cloud
|
||||
|
||||
You may invoke this command in "wizard" mode and be prompted for input:
|
||||
|
||||
abra record new
|
||||
|
||||
API tokens are read from the environment if specified, e.g.
|
||||
|
||||
export HCLOUD_TOKEN=...
|
||||
`,
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
internal.ServerProviderFlag,
|
||||
internal.OfflineFlag,
|
||||
|
||||
// Capsul
|
||||
internal.CapsulInstanceURLFlag,
|
||||
internal.CapsulTypeFlag,
|
||||
internal.CapsulImageFlag,
|
||||
internal.CapsulSSHKeysFlag,
|
||||
internal.CapsulAPITokenFlag,
|
||||
|
||||
// Hetzner
|
||||
internal.HetznerCloudNameFlag,
|
||||
internal.HetznerCloudTypeFlag,
|
||||
internal.HetznerCloudImageFlag,
|
||||
internal.HetznerCloudSSHKeysFlag,
|
||||
internal.HetznerCloudLocationFlag,
|
||||
internal.HetznerCloudAPITokenFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Action: func(c *cli.Context) error {
|
||||
if err := internal.EnsureServerProvider(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
switch internal.ServerProvider {
|
||||
case "capsul":
|
||||
if err := newCapsulVPS(c); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
case "hetzner-cloud":
|
||||
if err := newHetznerCloudVPS(c); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
@ -1,101 +1,102 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var allFilter bool
|
||||
|
||||
var allFilterFlag = &cli.BoolFlag{
|
||||
Name: "all, a",
|
||||
Usage: "Remove all unused images not just dangling ones",
|
||||
Destination: &allFilter,
|
||||
}
|
||||
|
||||
var volunesFilter bool
|
||||
|
||||
var volumesFilterFlag = &cli.BoolFlag{
|
||||
Name: "volumes, v",
|
||||
Usage: "Prune volumes. This will remove app data, Be Careful!",
|
||||
Destination: &volunesFilter,
|
||||
}
|
||||
|
||||
var serverPruneCommand = cli.Command{
|
||||
Name: "prune",
|
||||
var ServerPruneCommand = &cobra.Command{
|
||||
Use: "prune <server> [flags]",
|
||||
Aliases: []string{"p"},
|
||||
Usage: "Prune a managed server; Runs a docker system prune",
|
||||
Description: `
|
||||
Prunes unused containers, networks, and dangling images.
|
||||
Short: "Prune resources on a server",
|
||||
Long: `Prunes unused containers, networks, and dangling images.
|
||||
|
||||
If passing "-v/--volumes" then volumes not connected with a deployed app will
|
||||
also be removed. This can result in unwanted data loss if not used carefully.
|
||||
`,
|
||||
ArgsUsage: "[<server>]",
|
||||
Flags: []cli.Flag{
|
||||
allFilterFlag,
|
||||
volumesFilterFlag,
|
||||
internal.DebugFlag,
|
||||
internal.OfflineFlag,
|
||||
Use "--volumes/-v" to remove volumes that are not associated with a deployed
|
||||
app. This can result in unwanted data loss if not used carefully.`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.ServerNameComplete()
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
BashComplete: autocomplete.ServerNameComplete,
|
||||
Action: func(c *cli.Context) error {
|
||||
var args filters.Args
|
||||
|
||||
serverName := internal.ValidateServer(c)
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
serverName := internal.ValidateServer(args)
|
||||
|
||||
cl, err := client.New(serverName)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
cr, err := cl.ContainersPrune(ctx, args)
|
||||
var filterArgs filters.Args
|
||||
|
||||
cr, err := cl.ContainersPrune(cmd.Context(), filterArgs)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cntSpaceReclaimed := formatter.ByteCountSI(cr.SpaceReclaimed)
|
||||
logrus.Infof("containers pruned: %d; space reclaimed: %s", len(cr.ContainersDeleted), cntSpaceReclaimed)
|
||||
log.Infof("containers pruned: %d; space reclaimed: %s", len(cr.ContainersDeleted), cntSpaceReclaimed)
|
||||
|
||||
nr, err := cl.NetworksPrune(ctx, args)
|
||||
nr, err := cl.NetworksPrune(cmd.Context(), filterArgs)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Infof("networks pruned: %d", len(nr.NetworksDeleted))
|
||||
log.Infof("networks pruned: %d", len(nr.NetworksDeleted))
|
||||
|
||||
pruneFilters := filters.NewArgs()
|
||||
if allFilter {
|
||||
log.Debugf("removing all images, not only dangling ones")
|
||||
pruneFilters.Add("dangling", "false")
|
||||
}
|
||||
|
||||
ir, err := cl.ImagesPrune(ctx, pruneFilters)
|
||||
ir, err := cl.ImagesPrune(cmd.Context(), pruneFilters)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
imgSpaceReclaimed := formatter.ByteCountSI(ir.SpaceReclaimed)
|
||||
logrus.Infof("images pruned: %d; space reclaimed: %s", len(ir.ImagesDeleted), imgSpaceReclaimed)
|
||||
log.Infof("images pruned: %d; space reclaimed: %s", len(ir.ImagesDeleted), imgSpaceReclaimed)
|
||||
|
||||
if volunesFilter {
|
||||
vr, err := cl.VolumesPrune(ctx, args)
|
||||
if volumesFilter {
|
||||
vr, err := cl.VolumesPrune(cmd.Context(), filterArgs)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
volSpaceReclaimed := formatter.ByteCountSI(vr.SpaceReclaimed)
|
||||
logrus.Infof("volumes pruned: %d; space reclaimed: %s", len(vr.VolumesDeleted), volSpaceReclaimed)
|
||||
log.Infof("volumes pruned: %d; space reclaimed: %s", len(vr.VolumesDeleted), volSpaceReclaimed)
|
||||
}
|
||||
|
||||
return nil
|
||||
return
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
allFilter bool
|
||||
volumesFilter bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
ServerPruneCommand.Flags().BoolVarP(
|
||||
&allFilter,
|
||||
"all",
|
||||
"a",
|
||||
false,
|
||||
"remove all unused images",
|
||||
)
|
||||
|
||||
ServerPruneCommand.Flags().BoolVarP(
|
||||
&volumesFilter,
|
||||
"volumes",
|
||||
"v",
|
||||
false,
|
||||
"remove volumes",
|
||||
)
|
||||
}
|
||||
|
@ -1,8 +1,6 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
@ -10,178 +8,39 @@ import (
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/hetznercloud/hcloud-go/hcloud"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var rmServer bool
|
||||
var rmServerFlag = &cli.BoolFlag{
|
||||
Name: "server, s",
|
||||
Usage: "remove the actual server also",
|
||||
Destination: &rmServer,
|
||||
}
|
||||
var ServerRemoveCommand = &cobra.Command{
|
||||
Use: "remove <server> [flags]",
|
||||
Aliases: []string{"rm"},
|
||||
Short: "Remove a managed server",
|
||||
Long: `Remove a managed server.
|
||||
|
||||
func rmHetznerCloudVPS(c *cli.Context) error {
|
||||
if internal.HetznerCloudName == "" && !internal.NoInput {
|
||||
prompt := &survey.Input{
|
||||
Message: "specify hetzner cloud VPS name",
|
||||
}
|
||||
if err := survey.AskOne(prompt, &internal.HetznerCloudName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if internal.HetznerCloudAPIToken == "" && !internal.NoInput {
|
||||
token, ok := os.LookupEnv("HCLOUD_TOKEN")
|
||||
if !ok {
|
||||
prompt := &survey.Input{
|
||||
Message: "specify hetzner cloud API token",
|
||||
}
|
||||
if err := survey.AskOne(prompt, &internal.HetznerCloudAPIToken); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
internal.HetznerCloudAPIToken = token
|
||||
}
|
||||
}
|
||||
|
||||
client := hcloud.NewClient(hcloud.WithToken(internal.HetznerCloudAPIToken))
|
||||
|
||||
server, _, err := client.Server.Get(context.Background(), internal.HetznerCloudName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if server == nil {
|
||||
logrus.Fatalf("library provider reports that %s doesn't exist?", internal.HetznerCloudName)
|
||||
}
|
||||
|
||||
fmt.Println(fmt.Sprintf(`
|
||||
You have requested that Abra delete the following server (%s). Please be
|
||||
absolutely sure that this is indeed the server that you would like to have
|
||||
removed. There will be no going back once you confirm, the server will be
|
||||
destroyed.
|
||||
`, server.Name))
|
||||
|
||||
tableColumns := []string{"name", "type", "image", "location"}
|
||||
table := formatter.CreateTable(tableColumns)
|
||||
table.Append([]string{
|
||||
server.Name,
|
||||
server.ServerType.Name,
|
||||
server.Image.Name,
|
||||
server.Datacenter.Name,
|
||||
})
|
||||
table.Render()
|
||||
|
||||
response := false
|
||||
prompt := &survey.Confirm{
|
||||
Message: "continue with hetzner cloud VPS removal?",
|
||||
}
|
||||
|
||||
if err := survey.AskOne(prompt, &response); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !response {
|
||||
logrus.Fatal("exiting as requested")
|
||||
}
|
||||
|
||||
_, err = client.Server.Delete(context.Background(), server)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("%s has been deleted from your hetzner cloud account", internal.HetznerCloudName)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var serverRemoveCommand = cli.Command{
|
||||
Name: "remove",
|
||||
Aliases: []string{"rm"},
|
||||
ArgsUsage: "[<server>]",
|
||||
Usage: "Remove a managed server",
|
||||
Description: `
|
||||
Remova a server from Abra management.
|
||||
|
||||
Depending on whether you used a 3rd party provider to create this server ("abra
|
||||
server new"), you can also destroy the virtual server as well. Pass
|
||||
"--server/-s" to tell Abra to try to delete this VPS.
|
||||
|
||||
Otherwise, Abra will remove the internal bookkeeping (~/.abra/servers/...) and
|
||||
Abra will remove the internal bookkeeping ($ABRA_DIR/servers/...) and
|
||||
underlying client connection context. This server will then be lost in time,
|
||||
like tears in rain.
|
||||
`,
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.NoInputFlag,
|
||||
rmServerFlag,
|
||||
internal.ServerProviderFlag,
|
||||
internal.OfflineFlag,
|
||||
|
||||
// Hetzner
|
||||
internal.HetznerCloudNameFlag,
|
||||
internal.HetznerCloudAPITokenFlag,
|
||||
like tears in rain.`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
return autocomplete.ServerNameComplete()
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
BashComplete: autocomplete.ServerNameComplete,
|
||||
Action: func(c *cli.Context) error {
|
||||
serverName := internal.ValidateServer(c)
|
||||
|
||||
warnMsg := `Did not pass -s/--server for actual server deletion, prompting!
|
||||
|
||||
Abra doesn't currently know if it helped you create this server with one of the
|
||||
3rd party integrations (e.g. Capsul). You have a choice here to actually,
|
||||
really and finally destroy this server using those integrations. If you want to
|
||||
do this, choose Yes.
|
||||
|
||||
If you just want to remove the server config files & context, choose No.
|
||||
`
|
||||
|
||||
if !rmServer {
|
||||
logrus.Warn(fmt.Sprintf(warnMsg))
|
||||
|
||||
response := false
|
||||
prompt := &survey.Confirm{
|
||||
Message: "delete actual live server?",
|
||||
}
|
||||
if err := survey.AskOne(prompt, &response); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
if response {
|
||||
logrus.Info("setting -s/--server and attempting to remove actual server")
|
||||
rmServer = true
|
||||
}
|
||||
}
|
||||
|
||||
if rmServer {
|
||||
if err := internal.EnsureServerProvider(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
switch internal.ServerProvider {
|
||||
case "capsul":
|
||||
logrus.Warn("capsul provider does not support automatic removal yet, sorry!")
|
||||
case "hetzner-cloud":
|
||||
if err := rmHetznerCloudVPS(c); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
serverName := internal.ValidateServer(args)
|
||||
|
||||
if err := client.DeleteContext(serverName); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(filepath.Join(config.SERVERS_DIR, serverName)); err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
logrus.Infof("server at %s has been lost in time, like tears in rain", serverName)
|
||||
log.Infof("%s is now lost in time, like tears in rain", serverName)
|
||||
|
||||
return nil
|
||||
return
|
||||
},
|
||||
}
|
||||
|
@ -1,27 +1,10 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
// ServerCommand defines the `abra server` command and its subcommands
|
||||
var ServerCommand = cli.Command{
|
||||
Name: "server",
|
||||
var ServerCommand = &cobra.Command{
|
||||
Use: "server [cmd] [args] [flags]",
|
||||
Aliases: []string{"s"},
|
||||
Usage: "Manage servers",
|
||||
Description: `
|
||||
Create, manage and remove servers using 3rd party integrations.
|
||||
|
||||
Servers can be created from scratch using the "abra server new" command. If you
|
||||
already have a server, you can add it to your configuration using "abra server
|
||||
add". Abra can provision servers so that they are ready to deploy Co-op Cloud
|
||||
recipes, see available flags on "abra server add" for more.
|
||||
`,
|
||||
Subcommands: []cli.Command{
|
||||
serverNewCommand,
|
||||
serverAddCommand,
|
||||
serverListCommand,
|
||||
serverRemoveCommand,
|
||||
serverPruneCommand,
|
||||
},
|
||||
Short: "Manage servers",
|
||||
}
|
||||
|
@ -8,148 +8,128 @@ import (
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/envfile"
|
||||
"coopcloud.tech/abra/pkg/lint"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/upstream/convert"
|
||||
"coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"coopcloud.tech/tagcmp"
|
||||
charmLog "github.com/charmbracelet/log"
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
)
|
||||
|
||||
const SERVER = "localhost"
|
||||
|
||||
var majorUpdate bool
|
||||
var majorFlag = &cli.BoolFlag{
|
||||
Name: "major, m",
|
||||
Usage: "Also check for major updates",
|
||||
Destination: &majorUpdate,
|
||||
}
|
||||
|
||||
var updateAll bool
|
||||
var allFlag = &cli.BoolFlag{
|
||||
Name: "all, a",
|
||||
Usage: "Update all deployed apps",
|
||||
Destination: &updateAll,
|
||||
}
|
||||
|
||||
// Notify checks for available upgrades
|
||||
var Notify = cli.Command{
|
||||
Name: "notify",
|
||||
// NotifyCommand checks for available upgrades.
|
||||
var NotifyCommand = &cobra.Command{
|
||||
Use: "notify [flags]",
|
||||
Aliases: []string{"n"},
|
||||
Usage: "Check for available upgrades",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
majorFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Description: `
|
||||
It reads the deployed app versions and looks for new versions in the recipe
|
||||
catalogue. If a new patch/minor version is available, a notification is
|
||||
printed. To include major versions use the --major flag.
|
||||
`,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
Short: "Check for available upgrades",
|
||||
Long: `Notify on new versions for deployed apps.
|
||||
|
||||
If a new patch/minor version is available, a notification is printed.
|
||||
|
||||
Use "--major/-m" to include new major versions.`,
|
||||
Args: cobra.NoArgs,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cl, err := client.New("default")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
stacks, err := stack.GetStacks(cl)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, stackInfo := range stacks {
|
||||
stackName := stackInfo.Name
|
||||
recipeName, err := getLabel(cl, stackName, "recipe")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if recipeName != "" {
|
||||
_, err = getLatestUpgrade(cl, stackName, recipeName, conf)
|
||||
_, err = getLatestUpgrade(cl, stackName, recipeName)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// UpgradeApp upgrades apps.
|
||||
var UpgradeApp = cli.Command{
|
||||
Name: "upgrade",
|
||||
Aliases: []string{"u"},
|
||||
Usage: "Upgrade apps",
|
||||
ArgsUsage: "<stack-name> <recipe>",
|
||||
Flags: []cli.Flag{
|
||||
internal.DebugFlag,
|
||||
internal.ChaosFlag,
|
||||
majorFlag,
|
||||
allFlag,
|
||||
internal.OfflineFlag,
|
||||
},
|
||||
Before: internal.SubCommandBefore,
|
||||
Description: `
|
||||
Upgrade an app by specifying its stack name and recipe. By passing "--all"
|
||||
instead, every deployed app is upgraded. For each apps with enabled auto
|
||||
updates the deployed version is compared with the current recipe catalogue
|
||||
version. If a new patch/minor version is available, the app is upgraded. To
|
||||
include major versions use the "--major" flag. Don't do that, it will probably
|
||||
break things. Only apps that are not deployed with "--chaos" are upgraded, to
|
||||
update chaos deployments use the "--chaos" flag. Use it with care.
|
||||
`,
|
||||
Action: func(c *cli.Context) error {
|
||||
conf := runtime.New(runtime.WithOffline(internal.Offline))
|
||||
// UpgradeCommand upgrades apps.
|
||||
var UpgradeCommand = &cobra.Command{
|
||||
Use: "upgrade [[stack] [recipe] | --all] [flags]",
|
||||
Aliases: []string{"u"},
|
||||
Short: "Upgrade apps",
|
||||
Long: `Upgrade an app by specifying stack name and recipe.
|
||||
|
||||
Use "--all" to upgrade every deployed app.
|
||||
|
||||
For each app with auto updates enabled, the deployed version is compared with
|
||||
the current recipe catalogue version. If a new patch/minor version is
|
||||
available, the app is upgraded.
|
||||
|
||||
To include major versions use the "--major/-m" flag. You probably don't want
|
||||
that as it will break things. Only apps that are not deployed with "--chaos/-C"
|
||||
are upgraded, to update chaos deployments use the "--chaos/-C" flag. Use it
|
||||
with care.`,
|
||||
Args: cobra.RangeArgs(0, 2),
|
||||
// TODO(d1): complete stack/recipe
|
||||
// ValidArgsFunction: func(
|
||||
// cmd *cobra.Command,
|
||||
// args []string,
|
||||
// toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
// },
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cl, err := client.New("default")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if !updateAll && len(args) != 2 {
|
||||
log.Fatal("missing arguments or --all/-a flag")
|
||||
}
|
||||
|
||||
if !updateAll {
|
||||
stackName := c.Args().Get(0)
|
||||
recipeName := c.Args().Get(1)
|
||||
err = tryUpgrade(cl, stackName, recipeName, conf)
|
||||
stackName := args[0]
|
||||
recipeName := args[1]
|
||||
|
||||
err = tryUpgrade(cl, stackName, recipeName)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
stacks, err := stack.GetStacks(cl)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, stackInfo := range stacks {
|
||||
stackName := stackInfo.Name
|
||||
recipeName, err := getLabel(cl, stackName, "recipe")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = tryUpgrade(cl, stackName, recipeName, conf)
|
||||
err = tryUpgrade(cl, stackName, recipeName)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@ -170,7 +150,7 @@ func getLabel(cl *dockerclient.Client, stackName string, label string) (string,
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("no %s label found for %s", label, stackName)
|
||||
log.Debugf("no %s label found for %s", label, stackName)
|
||||
|
||||
return "", nil
|
||||
}
|
||||
@ -191,13 +171,13 @@ func getBoolLabel(cl *dockerclient.Client, stackName string, label string) (bool
|
||||
return value, nil
|
||||
}
|
||||
|
||||
logrus.Debugf("Boolean label %s could not be found for %s, set default to false.", label, stackName)
|
||||
log.Debugf("boolean label %s could not be found for %s, set default to false.", label, stackName)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// getEnv reads env variables from docker services.
|
||||
func getEnv(cl *dockerclient.Client, stackName string) (config.AppEnv, error) {
|
||||
func getEnv(cl *dockerclient.Client, stackName string) (envfile.AppEnv, error) {
|
||||
envMap := make(map[string]string)
|
||||
filter := filters.NewArgs()
|
||||
filter.Add("label", fmt.Sprintf("%s=%s", convert.LabelNamespace, stackName))
|
||||
@ -212,12 +192,12 @@ func getEnv(cl *dockerclient.Client, stackName string) (config.AppEnv, error) {
|
||||
for _, envString := range envList {
|
||||
splitString := strings.SplitN(envString, "=", 2)
|
||||
if len(splitString) != 2 {
|
||||
logrus.Debugf("can't separate key from value: %s (this variable is probably unset)", envString)
|
||||
log.Debugf("can't separate key from value: %s (this variable is probably unset)", envString)
|
||||
continue
|
||||
}
|
||||
k := splitString[0]
|
||||
v := splitString[1]
|
||||
logrus.Debugf("For %s read env %s with value: %s from docker service", stackName, k, v)
|
||||
log.Debugf("for %s read env %s with value: %s from docker service", stackName, k, v)
|
||||
envMap[k] = v
|
||||
}
|
||||
}
|
||||
@ -227,27 +207,26 @@ func getEnv(cl *dockerclient.Client, stackName string) (config.AppEnv, error) {
|
||||
|
||||
// getLatestUpgrade returns the latest available version for an app respecting
|
||||
// the "--major" flag if it is newer than the currently deployed version.
|
||||
func getLatestUpgrade(cl *dockerclient.Client, stackName string,
|
||||
recipeName string, conf *runtime.Config) (string, error) {
|
||||
func getLatestUpgrade(cl *dockerclient.Client, stackName string, recipeName string) (string, error) {
|
||||
deployedVersion, err := getDeployedVersion(cl, stackName, recipeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
availableUpgrades, err := getAvailableUpgrades(cl, stackName, recipeName, deployedVersion, conf)
|
||||
availableUpgrades, err := getAvailableUpgrades(cl, stackName, recipeName, deployedVersion)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(availableUpgrades) == 0 {
|
||||
logrus.Debugf("no available upgrades for %s", stackName)
|
||||
log.Debugf("no available upgrades for %s", stackName)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var chosenUpgrade string
|
||||
if len(availableUpgrades) > 0 {
|
||||
chosenUpgrade = availableUpgrades[len(availableUpgrades)-1]
|
||||
logrus.Infof("%s (%s) can be upgraded from version %s to %s", stackName, recipeName, deployedVersion, chosenUpgrade)
|
||||
log.Infof("%s (%s) can be upgraded from version %s to %s", stackName, recipeName, deployedVersion, chosenUpgrade)
|
||||
}
|
||||
|
||||
return chosenUpgrade, nil
|
||||
@ -255,30 +234,30 @@ func getLatestUpgrade(cl *dockerclient.Client, stackName string,
|
||||
|
||||
// getDeployedVersion returns the currently deployed version of an app.
|
||||
func getDeployedVersion(cl *dockerclient.Client, stackName string, recipeName string) (string, error) {
|
||||
logrus.Debugf("Retrieve deployed version whether %s is already deployed", stackName)
|
||||
log.Debugf("retrieve deployed version whether %s is already deployed", stackName)
|
||||
|
||||
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
|
||||
deployMeta, err := stack.IsDeployed(context.Background(), cl, stackName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !isDeployed {
|
||||
if !deployMeta.IsDeployed {
|
||||
return "", fmt.Errorf("%s is not deployed?", stackName)
|
||||
}
|
||||
|
||||
if deployedVersion == "unknown" {
|
||||
if deployMeta.Version == "unknown" {
|
||||
return "", fmt.Errorf("failed to determine deployed version of %s", stackName)
|
||||
}
|
||||
|
||||
return deployedVersion, nil
|
||||
return deployMeta.Version, nil
|
||||
}
|
||||
|
||||
// getAvailableUpgrades returns all available versions of an app that are newer
|
||||
// than the deployed version. It only includes major upgrades if the "--major"
|
||||
// flag is set.
|
||||
func getAvailableUpgrades(cl *dockerclient.Client, stackName string, recipeName string,
|
||||
deployedVersion string, conf *runtime.Config) ([]string, error) {
|
||||
catl, err := recipe.ReadRecipeCatalogue(conf)
|
||||
deployedVersion string) ([]string, error) {
|
||||
catl, err := recipe.ReadRecipeCatalogue(internal.Offline)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -289,7 +268,7 @@ func getAvailableUpgrades(cl *dockerclient.Client, stackName string, recipeName
|
||||
}
|
||||
|
||||
if len(versions) == 0 {
|
||||
logrus.Warnf("no published releases for %s in the recipe catalogue?", recipeName)
|
||||
log.Warnf("no published releases for %s in the recipe catalogue?", recipeName)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -310,34 +289,32 @@ func getAvailableUpgrades(cl *dockerclient.Client, stackName string, recipeName
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if 0 < versionDelta.UpgradeType() && (versionDelta.UpgradeType() < 4 || majorUpdate) {
|
||||
if 0 < versionDelta.UpgradeType() && (versionDelta.UpgradeType() < 4 || includeMajorUpdates) {
|
||||
availableUpgrades = append(availableUpgrades, version)
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("Available updates for %s: %s", stackName, availableUpgrades)
|
||||
log.Debugf("available updates for %s: %s", stackName, availableUpgrades)
|
||||
|
||||
return availableUpgrades, nil
|
||||
}
|
||||
|
||||
// processRecipeRepoVersion clones, pulls, checks out the version and lints the
|
||||
// recipe repository.
|
||||
func processRecipeRepoVersion(recipeName, version string, conf *runtime.Config) error {
|
||||
if err := recipe.EnsureExists(recipeName, conf); err != nil {
|
||||
func processRecipeRepoVersion(r recipe.Recipe, version string) error {
|
||||
if err := r.EnsureExists(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := recipe.EnsureUpToDate(recipeName, conf); err != nil {
|
||||
if err := r.EnsureUpToDate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := recipe.EnsureVersion(recipeName, version); err != nil {
|
||||
if _, err := r.EnsureVersion(version); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r, err := recipe.Get(recipeName, conf); err != nil {
|
||||
return err
|
||||
} else if err := lint.LintForErrors(r); err != nil {
|
||||
if err := lint.LintForErrors(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -345,15 +322,14 @@ func processRecipeRepoVersion(recipeName, version string, conf *runtime.Config)
|
||||
}
|
||||
|
||||
// mergeAbraShEnv merges abra.sh env vars into the app env vars.
|
||||
func mergeAbraShEnv(recipeName string, env config.AppEnv) error {
|
||||
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, recipeName, "abra.sh")
|
||||
abraShEnv, err := config.ReadAbraShEnvVars(abraShPath)
|
||||
func mergeAbraShEnv(recipe recipe.Recipe, env envfile.AppEnv) error {
|
||||
abraShEnv, err := envfile.ReadAbraShEnvVars(recipe.AbraShPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for k, v := range abraShEnv {
|
||||
logrus.Debugf("read v:%s k: %s", v, k)
|
||||
log.Debugf("read v:%s k: %s", v, k)
|
||||
env[k] = v
|
||||
}
|
||||
|
||||
@ -361,40 +337,41 @@ func mergeAbraShEnv(recipeName string, env config.AppEnv) error {
|
||||
}
|
||||
|
||||
// createDeployConfig merges and enriches the compose config for the deployment.
|
||||
func createDeployConfig(recipeName string, stackName string, env config.AppEnv) (*composetypes.Config, stack.Deploy, error) {
|
||||
func createDeployConfig(r recipe.Recipe, stackName string, env envfile.AppEnv) (*composetypes.Config, stack.Deploy, error) {
|
||||
env["STACK_NAME"] = stackName
|
||||
|
||||
deployOpts := stack.Deploy{
|
||||
Namespace: stackName,
|
||||
Prune: false,
|
||||
ResolveImage: stack.ResolveImageAlways,
|
||||
Detach: false,
|
||||
}
|
||||
|
||||
composeFiles, err := config.GetAppComposeFiles(recipeName, env)
|
||||
composeFiles, err := r.GetComposeFiles(env)
|
||||
if err != nil {
|
||||
return nil, deployOpts, err
|
||||
}
|
||||
|
||||
deployOpts.Composefiles = composeFiles
|
||||
compose, err := config.GetAppComposeConfig(stackName, deployOpts, env)
|
||||
compose, err := appPkg.GetAppComposeConfig(stackName, deployOpts, env)
|
||||
if err != nil {
|
||||
return nil, deployOpts, err
|
||||
}
|
||||
|
||||
config.ExposeAllEnv(stackName, compose, env)
|
||||
appPkg.ExposeAllEnv(stackName, compose, env)
|
||||
|
||||
// after the upgrade the deployment won't be in chaos state anymore
|
||||
config.SetChaosLabel(compose, stackName, false)
|
||||
config.SetRecipeLabel(compose, stackName, recipeName)
|
||||
config.SetUpdateLabel(compose, stackName, env)
|
||||
appPkg.SetChaosLabel(compose, stackName, false)
|
||||
appPkg.SetRecipeLabel(compose, stackName, r.Name)
|
||||
appPkg.SetUpdateLabel(compose, stackName, env)
|
||||
|
||||
return compose, deployOpts, nil
|
||||
}
|
||||
|
||||
// tryUpgrade performs the upgrade if all the requirements are fulfilled.
|
||||
func tryUpgrade(cl *dockerclient.Client, stackName, recipeName string, conf *runtime.Config) error {
|
||||
func tryUpgrade(cl *dockerclient.Client, stackName, recipeName string) error {
|
||||
if recipeName == "" {
|
||||
logrus.Debugf("don't update %s due to missing recipe name", stackName)
|
||||
log.Debugf("don't update %s due to missing recipe name", stackName)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -404,7 +381,7 @@ func tryUpgrade(cl *dockerclient.Client, stackName, recipeName string, conf *run
|
||||
}
|
||||
|
||||
if chaos && !internal.Chaos {
|
||||
logrus.Debugf("don't update %s due to chaos deployment", stackName)
|
||||
log.Debugf("don't update %s due to chaos deployment", stackName)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -414,91 +391,142 @@ func tryUpgrade(cl *dockerclient.Client, stackName, recipeName string, conf *run
|
||||
}
|
||||
|
||||
if !updatesEnabled {
|
||||
logrus.Debugf("don't update %s due to disabled auto updates or missing ENABLE_AUTO_UPDATE env", stackName)
|
||||
log.Debugf("don't update %s due to disabled auto updates or missing ENABLE_AUTO_UPDATE env", stackName)
|
||||
return nil
|
||||
}
|
||||
|
||||
upgradeVersion, err := getLatestUpgrade(cl, stackName, recipeName, conf)
|
||||
upgradeVersion, err := getLatestUpgrade(cl, stackName, recipeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if upgradeVersion == "" {
|
||||
logrus.Debugf("don't update %s due to no new version", stackName)
|
||||
log.Debugf("don't update %s due to no new version", stackName)
|
||||
return nil
|
||||
}
|
||||
|
||||
err = upgrade(cl, stackName, recipeName, upgradeVersion, conf)
|
||||
err = upgrade(cl, stackName, recipeName, upgradeVersion)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// upgrade performs all necessary steps to upgrade an app.
|
||||
func upgrade(cl *dockerclient.Client, stackName, recipeName,
|
||||
upgradeVersion string, conf *runtime.Config) error {
|
||||
func upgrade(cl *dockerclient.Client, stackName, recipeName, upgradeVersion string) error {
|
||||
env, err := getEnv(cl, stackName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
app := config.App{
|
||||
app := appPkg.App{
|
||||
Name: stackName,
|
||||
Recipe: recipeName,
|
||||
Recipe: recipe.Get(recipeName),
|
||||
Server: SERVER,
|
||||
Env: env,
|
||||
}
|
||||
|
||||
if err = processRecipeRepoVersion(recipeName, upgradeVersion, conf); err != nil {
|
||||
r := recipe.Get(recipeName)
|
||||
|
||||
if err = processRecipeRepoVersion(r, upgradeVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = mergeAbraShEnv(recipeName, app.Env); err != nil {
|
||||
if err = mergeAbraShEnv(app.Recipe, app.Env); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
compose, deployOpts, err := createDeployConfig(recipeName, stackName, app.Env)
|
||||
compose, deployOpts, err := createDeployConfig(r, stackName, app.Env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("upgrade %s (%s) to version %s", stackName, recipeName, upgradeVersion)
|
||||
log.Infof("upgrade %s (%s) to version %s", stackName, recipeName, upgradeVersion)
|
||||
|
||||
err = stack.RunDeploy(cl, deployOpts, compose, stackName, true)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func newAbraApp(version, commit string) *cli.App {
|
||||
app := &cli.App{
|
||||
Name: "kadabra",
|
||||
Usage: `The Co-op Cloud auto-updater
|
||||
____ ____ _ _
|
||||
/ ___|___ ___ _ __ / ___| | ___ _ _ __| |
|
||||
| | / _ \ _____ / _ \| '_ \ | | | |/ _ \| | | |/ _' |
|
||||
| |__| (_) |_____| (_) | |_) | | |___| | (_) | |_| | (_| |
|
||||
\____\___/ \___/| .__/ \____|_|\___/ \__,_|\__,_|
|
||||
|_|
|
||||
`,
|
||||
func newKadabraApp(version, commit string) *cobra.Command {
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "kadabra [cmd] [flags]",
|
||||
Version: fmt.Sprintf("%s-%s", version, commit[:7]),
|
||||
Commands: []cli.Command{
|
||||
Notify,
|
||||
UpgradeApp,
|
||||
Short: "The Co-op Cloud auto-updater 🤖 🚀",
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
log.Logger.SetStyles(charmLog.DefaultStyles())
|
||||
charmLog.SetDefault(log.Logger)
|
||||
|
||||
if internal.Debug {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
log.SetOutput(os.Stderr)
|
||||
log.SetReportCaller(true)
|
||||
}
|
||||
|
||||
log.Debugf("kadabra version %s, commit %s", version, commit)
|
||||
},
|
||||
}
|
||||
|
||||
app.Before = func(c *cli.Context) error {
|
||||
logrus.Debugf("kadabra version %s, commit %s", version, commit)
|
||||
return nil
|
||||
}
|
||||
rootCmd.PersistentFlags().BoolVarP(
|
||||
&internal.Debug, "debug", "d", false,
|
||||
"show debug messages",
|
||||
)
|
||||
|
||||
return app
|
||||
rootCmd.PersistentFlags().BoolVarP(
|
||||
&internal.NoInput, "no-input", "n", false,
|
||||
"toggle non-interactive mode",
|
||||
)
|
||||
|
||||
rootCmd.AddCommand(
|
||||
NotifyCommand,
|
||||
UpgradeCommand,
|
||||
)
|
||||
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
// RunApp runs CLI abra app.
|
||||
func RunApp(version, commit string) {
|
||||
app := newAbraApp(version, commit)
|
||||
app := newKadabraApp(version, commit)
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
logrus.Fatal(err)
|
||||
if err := app.Execute(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
includeMajorUpdates bool
|
||||
updateAll bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
NotifyCommand.Flags().BoolVarP(
|
||||
&includeMajorUpdates,
|
||||
"major",
|
||||
"m",
|
||||
false,
|
||||
"check for major updates",
|
||||
)
|
||||
|
||||
UpgradeCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
|
||||
UpgradeCommand.Flags().BoolVarP(
|
||||
&includeMajorUpdates,
|
||||
"major",
|
||||
"m",
|
||||
false,
|
||||
"check for major updates",
|
||||
)
|
||||
|
||||
UpgradeCommand.Flags().BoolVarP(
|
||||
&updateAll,
|
||||
"all",
|
||||
"a",
|
||||
false,
|
||||
"update all deployed apps",
|
||||
)
|
||||
}
|
||||
|
56
cli/upgrade.go
Normal file
56
cli/upgrade.go
Normal file
@ -0,0 +1,56 @@
|
||||
// Package cli provides the interface for the command-line.
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// UpgradeCommand upgrades abra in-place.
|
||||
var UpgradeCommand = &cobra.Command{
|
||||
Use: "upgrade [flags]",
|
||||
Aliases: []string{"u"},
|
||||
Short: "Upgrade abra",
|
||||
Long: `Upgrade abra in-place with the latest stable or release candidate.
|
||||
|
||||
By default, the latest stable release is downloaded.
|
||||
|
||||
Use "--rc/-r" to install the latest release candidate. Please bear in mind that
|
||||
it may contain absolutely catastrophic deal-breaker bugs. Thank you very much
|
||||
for the testing efforts 💗`,
|
||||
Example: " abra upgrade --rc",
|
||||
Args: cobra.NoArgs,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
mainURL := "https://install.abra.coopcloud.tech"
|
||||
c := exec.Command("bash", "-c", fmt.Sprintf("wget -q -O- %s | bash", mainURL))
|
||||
|
||||
if releaseCandidate {
|
||||
releaseCandidateURL := "https://git.coopcloud.tech/coop-cloud/abra/raw/branch/main/scripts/installer/installer"
|
||||
c = exec.Command("bash", "-c", fmt.Sprintf("wget -q -O- %s | bash -s -- --rc", releaseCandidateURL))
|
||||
}
|
||||
|
||||
log.Debugf("attempting to run %s", c)
|
||||
|
||||
if err := internal.RunCmd(c); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
releaseCandidate bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
UpgradeCommand.Flags().BoolVarP(
|
||||
&releaseCandidate,
|
||||
"rc",
|
||||
"r",
|
||||
false,
|
||||
"install release candidate (may contain bugs)",
|
||||
)
|
||||
}
|
@ -19,5 +19,5 @@ func main() {
|
||||
Commit = " "
|
||||
}
|
||||
|
||||
cli.RunApp(Version, Commit)
|
||||
cli.Run(Version, Commit)
|
||||
}
|
||||
|
170
go.mod
170
go.mod
@ -1,52 +1,150 @@
|
||||
module coopcloud.tech/abra
|
||||
|
||||
go 1.16
|
||||
go 1.22.7
|
||||
|
||||
toolchain go1.23.1
|
||||
|
||||
require (
|
||||
coopcloud.tech/tagcmp v0.0.0-20211103052201-885b22f77d52
|
||||
coopcloud.tech/tagcmp v0.0.0-20230809071031-eb3e7758d4eb
|
||||
git.coopcloud.tech/toolshed/godotenv v1.5.2-0.20250103171850-4d0ca41daa5c
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7
|
||||
github.com/Autonomic-Cooperative/godotenv v1.3.1-0.20210731094149-b031ea1211e7
|
||||
github.com/Gurpartap/logrus-stack v0.0.0-20170710170904-89c00d8a28f4
|
||||
github.com/docker/cli v24.0.5+incompatible
|
||||
github.com/docker/distribution v2.8.2+incompatible
|
||||
github.com/docker/docker v24.0.5+incompatible
|
||||
github.com/charmbracelet/lipgloss v1.0.0
|
||||
github.com/charmbracelet/log v0.4.0
|
||||
github.com/distribution/reference v0.6.0
|
||||
github.com/docker/cli v27.4.1+incompatible
|
||||
github.com/docker/docker v27.4.1+incompatible
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/go-git/go-git/v5 v5.8.1
|
||||
github.com/moby/sys/signal v0.7.0
|
||||
github.com/moby/term v0.5.0
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/go-git/go-git/v5 v5.13.1
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/moby/sys/signal v0.7.1
|
||||
github.com/moby/term v0.5.2
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/schollz/progressbar/v3 v3.13.1
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
gotest.tools/v3 v3.5.0
|
||||
github.com/schollz/progressbar/v3 v3.17.1
|
||||
golang.org/x/term v0.28.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gotest.tools/v3 v3.5.1
|
||||
)
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/BurntSushi/toml v1.4.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.1.3 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.6.0 // indirect
|
||||
github.com/cloudflare/circl v1.5.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.6.1 // indirect
|
||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mmcloughlin/avo v0.6.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||
github.com/moby/sys/user v0.3.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/muesli/termenv v0.15.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/runc v1.1.13 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.1.0 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pjbgf/sha1cd v0.3.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.61.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/skeema/knownhosts v1.3.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.33.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/net v0.34.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.29.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422 // indirect
|
||||
google.golang.org/grpc v1.69.2 // indirect
|
||||
google.golang.org/protobuf v1.36.2 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
coopcloud.tech/libcapsul v0.0.0-20230605070824-878af473f07b
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
|
||||
github.com/buger/goterm v1.0.4
|
||||
github.com/containerd/containerd v1.5.9 // indirect
|
||||
github.com/containers/image v3.0.2+incompatible
|
||||
github.com/containers/storage v1.38.2 // indirect
|
||||
github.com/decentral1se/passgen v1.0.1
|
||||
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
|
||||
github.com/fvbommel/sortorder v1.0.2 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/fvbommel/sortorder v1.1.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4
|
||||
github.com/hetznercloud/hcloud-go v1.48.0
|
||||
github.com/klauspost/pgzip v1.2.6
|
||||
github.com/libdns/gandi v1.0.2
|
||||
github.com/libdns/libdns v0.2.1
|
||||
github.com/moby/patternmatcher v0.5.0 // indirect
|
||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 // indirect
|
||||
github.com/rogpeppe/go-internal v1.11.0 // indirect
|
||||
github.com/sergi/go-diff v1.2.0 // indirect
|
||||
github.com/spf13/cobra v1.3.0 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7
|
||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/prometheus/client_golang v1.20.5 // indirect
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/theupdateframework/notary v0.7.0 // indirect
|
||||
github.com/urfave/cli v1.22.9
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b // indirect
|
||||
golang.org/x/sys v0.10.0
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
golang.org/x/sys v0.29.0
|
||||
)
|
||||
|
694
pkg/app/app.go
694
pkg/app/app.go
@ -1,42 +1,698 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"github.com/sirupsen/logrus"
|
||||
"coopcloud.tech/abra/pkg/envfile"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/upstream/convert"
|
||||
"coopcloud.tech/abra/pkg/upstream/stack"
|
||||
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
loader "coopcloud.tech/abra/pkg/upstream/stack"
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/schollz/progressbar/v3"
|
||||
)
|
||||
|
||||
// Get retrieves an app
|
||||
func Get(appName string) (config.App, error) {
|
||||
files, err := config.LoadAppFiles("")
|
||||
func Get(appName string) (App, error) {
|
||||
files, err := LoadAppFiles("")
|
||||
if err != nil {
|
||||
return config.App{}, err
|
||||
return App{}, err
|
||||
}
|
||||
|
||||
app, err := config.GetApp(files, appName)
|
||||
app, err := GetApp(files, appName)
|
||||
if err != nil {
|
||||
return config.App{}, err
|
||||
return App{}, err
|
||||
}
|
||||
|
||||
logrus.Debugf("retrieved %s for %s", app, appName)
|
||||
log.Debugf("loaded app %s: %s", appName, app)
|
||||
|
||||
return app, nil
|
||||
}
|
||||
|
||||
// deployedServiceSpec represents a deployed service of an app.
|
||||
type deployedServiceSpec struct {
|
||||
Name string
|
||||
Version string
|
||||
// GetApp loads an apps settings, reading it from file, in preparation to use
|
||||
// it. It should only be used when ready to use the env file to keep IO
|
||||
// operations down.
|
||||
func GetApp(apps AppFiles, name AppName) (App, error) {
|
||||
appFile, exists := apps[name]
|
||||
if !exists {
|
||||
return App{}, fmt.Errorf("cannot find app with name %s", name)
|
||||
}
|
||||
|
||||
app, err := ReadAppEnvFile(appFile, name)
|
||||
if err != nil {
|
||||
return App{}, err
|
||||
}
|
||||
|
||||
return app, nil
|
||||
}
|
||||
|
||||
// VersionSpec represents a deployed app and associated metadata.
|
||||
type VersionSpec map[string]deployedServiceSpec
|
||||
// GetApps returns a slice of Apps with their env files read from a given
|
||||
// slice of AppFiles.
|
||||
func GetApps(appFiles AppFiles, recipeFilter string) ([]App, error) {
|
||||
var apps []App
|
||||
|
||||
// ParseServiceName parses a $STACK_NAME_$SERVICE_NAME service label.
|
||||
func ParseServiceName(label string) string {
|
||||
idx := strings.LastIndex(label, "_")
|
||||
serviceName := label[idx+1:]
|
||||
logrus.Debugf("parsed %s as service name from %s", serviceName, label)
|
||||
return serviceName
|
||||
for name := range appFiles {
|
||||
app, err := GetApp(appFiles, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if recipeFilter != "" {
|
||||
if app.Recipe.Name == recipeFilter {
|
||||
apps = append(apps, app)
|
||||
}
|
||||
} else {
|
||||
apps = append(apps, app)
|
||||
}
|
||||
}
|
||||
|
||||
return apps, nil
|
||||
}
|
||||
|
||||
// App reprents an app with its env file read into memory
|
||||
type App struct {
|
||||
Name AppName
|
||||
Recipe recipe.Recipe
|
||||
Domain string
|
||||
Env envfile.AppEnv
|
||||
Server string
|
||||
Path string
|
||||
}
|
||||
|
||||
// String outputs a human-friendly string representation.
|
||||
func (a App) String() string {
|
||||
out := fmt.Sprintf("{name: %s, ", a.Name)
|
||||
out += fmt.Sprintf("recipe: %s, ", a.Recipe)
|
||||
out += fmt.Sprintf("domain: %s, ", a.Domain)
|
||||
out += fmt.Sprintf("env %s, ", a.Env)
|
||||
out += fmt.Sprintf("server %s, ", a.Server)
|
||||
out += fmt.Sprintf("path %s}", a.Path)
|
||||
return out
|
||||
}
|
||||
|
||||
// Type aliases to make code hints easier to understand
|
||||
|
||||
// AppName is AppName
|
||||
type AppName = string
|
||||
|
||||
// AppFile represents app env files on disk without reading the contents
|
||||
type AppFile struct {
|
||||
Path string
|
||||
Server string
|
||||
}
|
||||
|
||||
// AppFiles is a slice of appfiles
|
||||
type AppFiles map[AppName]AppFile
|
||||
|
||||
// See documentation of config.StackName
|
||||
func (a App) StackName() string {
|
||||
if _, exists := a.Env["STACK_NAME"]; exists {
|
||||
return a.Env["STACK_NAME"]
|
||||
}
|
||||
|
||||
stackName := StackName(a.Name)
|
||||
|
||||
a.Env["STACK_NAME"] = stackName
|
||||
|
||||
return stackName
|
||||
}
|
||||
|
||||
// StackName gets whatever the docker safe (uses the right delimiting
|
||||
// character, e.g. "_") stack name is for the app. In general, you don't want
|
||||
// to use this to show anything to end-users, you want use a.Name instead.
|
||||
func StackName(appName string) string {
|
||||
stackName := SanitiseAppName(appName)
|
||||
|
||||
if len(stackName) > config.MAX_SANITISED_APP_NAME_LENGTH {
|
||||
log.Debugf("trimming %s to %s to avoid runtime limits", stackName, stackName[:config.MAX_SANITISED_APP_NAME_LENGTH])
|
||||
stackName = stackName[:config.MAX_SANITISED_APP_NAME_LENGTH]
|
||||
}
|
||||
|
||||
return stackName
|
||||
}
|
||||
|
||||
// Filters retrieves app filters for querying the container runtime. By default
|
||||
// it filters on all services in the app. It is also possible to pass an
|
||||
// otional list of service names, which get filtered instead.
|
||||
//
|
||||
// Due to upstream issues, filtering works different depending on what you're
|
||||
// querying. So, for example, secrets don't work with regex! The caller needs
|
||||
// to implement their own validation that the right secrets are matched. In
|
||||
// order to handle these cases, we provide the `appendServiceNames` /
|
||||
// `exactMatch` modifiers.
|
||||
func (a App) Filters(appendServiceNames, exactMatch bool, services ...string) (filters.Args, error) {
|
||||
filters := filters.NewArgs()
|
||||
if len(services) > 0 {
|
||||
for _, serviceName := range services {
|
||||
filters.Add("name", ServiceFilter(a.StackName(), serviceName, exactMatch))
|
||||
}
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
// When not appending the service name, just add one filter for the whole
|
||||
// stack.
|
||||
if !appendServiceNames {
|
||||
f := fmt.Sprintf("%s", a.StackName())
|
||||
if exactMatch {
|
||||
f = fmt.Sprintf("^%s", f)
|
||||
}
|
||||
filters.Add("name", f)
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
composeFiles, err := a.Recipe.GetComposeFiles(a.Env)
|
||||
if err != nil {
|
||||
return filters, err
|
||||
}
|
||||
|
||||
opts := stack.Deploy{Composefiles: composeFiles}
|
||||
compose, err := GetAppComposeConfig(a.Recipe.Name, opts, a.Env)
|
||||
if err != nil {
|
||||
return filters, err
|
||||
}
|
||||
|
||||
for _, service := range compose.Services {
|
||||
f := ServiceFilter(a.StackName(), service.Name, exactMatch)
|
||||
filters.Add("name", f)
|
||||
}
|
||||
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
// ServiceFilter creates a filter string for filtering a service in the docker
|
||||
// container runtime. When exact match is true, it uses regex to match the
|
||||
// string exactly.
|
||||
func ServiceFilter(stack, service string, exact bool) string {
|
||||
if exact {
|
||||
return fmt.Sprintf("^%s_%s", stack, service)
|
||||
}
|
||||
return fmt.Sprintf("%s_%s", stack, service)
|
||||
}
|
||||
|
||||
// ByServer sort a slice of Apps
|
||||
type ByServer []App
|
||||
|
||||
func (a ByServer) Len() int { return len(a) }
|
||||
func (a ByServer) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByServer) Less(i, j int) bool {
|
||||
return strings.ToLower(a[i].Server) < strings.ToLower(a[j].Server)
|
||||
}
|
||||
|
||||
// ByServerAndRecipe sort a slice of Apps
|
||||
type ByServerAndRecipe []App
|
||||
|
||||
func (a ByServerAndRecipe) Len() int { return len(a) }
|
||||
func (a ByServerAndRecipe) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByServerAndRecipe) Less(i, j int) bool {
|
||||
if a[i].Server == a[j].Server {
|
||||
return strings.ToLower(a[i].Recipe.Name) < strings.ToLower(a[j].Recipe.Name)
|
||||
}
|
||||
return strings.ToLower(a[i].Server) < strings.ToLower(a[j].Server)
|
||||
}
|
||||
|
||||
// ByRecipe sort a slice of Apps
|
||||
type ByRecipe []App
|
||||
|
||||
func (a ByRecipe) Len() int { return len(a) }
|
||||
func (a ByRecipe) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByRecipe) Less(i, j int) bool {
|
||||
return strings.ToLower(a[i].Recipe.Name) < strings.ToLower(a[j].Recipe.Name)
|
||||
}
|
||||
|
||||
// ByName sort a slice of Apps
|
||||
type ByName []App
|
||||
|
||||
func (a ByName) Len() int { return len(a) }
|
||||
func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByName) Less(i, j int) bool {
|
||||
return strings.ToLower(a[i].Name) < strings.ToLower(a[j].Name)
|
||||
}
|
||||
|
||||
func ReadAppEnvFile(appFile AppFile, name AppName) (App, error) {
|
||||
env, err := envfile.ReadEnv(appFile.Path)
|
||||
if err != nil {
|
||||
return App{}, fmt.Errorf("env file for %s couldn't be read: %s", name, err.Error())
|
||||
}
|
||||
|
||||
app, err := NewApp(env, name, appFile)
|
||||
if err != nil {
|
||||
return App{}, fmt.Errorf("env file for %s has issues: %s", name, err.Error())
|
||||
}
|
||||
|
||||
return app, nil
|
||||
}
|
||||
|
||||
// NewApp creates new App object
|
||||
func NewApp(env envfile.AppEnv, name string, appFile AppFile) (App, error) {
|
||||
domain := env["DOMAIN"]
|
||||
|
||||
recipeName, exists := env["RECIPE"]
|
||||
if !exists {
|
||||
recipeName, exists = env["TYPE"]
|
||||
if !exists {
|
||||
return App{}, fmt.Errorf("%s is missing the TYPE env var?", name)
|
||||
}
|
||||
}
|
||||
|
||||
return App{
|
||||
Name: name,
|
||||
Domain: domain,
|
||||
Recipe: recipe.Get(recipeName),
|
||||
Env: env,
|
||||
Server: appFile.Server,
|
||||
Path: appFile.Path,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LoadAppFiles gets all app files for a given set of servers or all servers.
|
||||
func LoadAppFiles(servers ...string) (AppFiles, error) {
|
||||
appFiles := make(AppFiles)
|
||||
if len(servers) == 1 {
|
||||
if servers[0] == "" {
|
||||
// Empty servers flag, one string will always be passed
|
||||
var err error
|
||||
servers, err = config.GetAllFoldersInDirectory(config.SERVERS_DIR)
|
||||
if err != nil {
|
||||
return appFiles, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("collecting metadata from %v servers: %s", len(servers), strings.Join(servers, ", "))
|
||||
|
||||
for _, server := range servers {
|
||||
serverDir := path.Join(config.SERVERS_DIR, server)
|
||||
files, err := config.GetAllFilesInDirectory(serverDir)
|
||||
if err != nil {
|
||||
return appFiles, fmt.Errorf("server %s doesn't exist? Run \"abra server ls\" to check", server)
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
appName := strings.TrimSuffix(file.Name(), ".env")
|
||||
appFilePath := path.Join(config.SERVERS_DIR, server, file.Name())
|
||||
appFiles[appName] = AppFile{
|
||||
Path: appFilePath,
|
||||
Server: server,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return appFiles, nil
|
||||
}
|
||||
|
||||
// GetAppServiceNames retrieves a list of app service names.
|
||||
func GetAppServiceNames(appName string) ([]string, error) {
|
||||
var serviceNames []string
|
||||
|
||||
appFiles, err := LoadAppFiles("")
|
||||
if err != nil {
|
||||
return serviceNames, err
|
||||
}
|
||||
|
||||
app, err := GetApp(appFiles, appName)
|
||||
if err != nil {
|
||||
return serviceNames, err
|
||||
}
|
||||
|
||||
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
|
||||
if err != nil {
|
||||
return serviceNames, err
|
||||
}
|
||||
|
||||
opts := stack.Deploy{Composefiles: composeFiles}
|
||||
compose, err := GetAppComposeConfig(app.Recipe.Name, opts, app.Env)
|
||||
if err != nil {
|
||||
return serviceNames, err
|
||||
}
|
||||
|
||||
for _, service := range compose.Services {
|
||||
serviceNames = append(serviceNames, service.Name)
|
||||
}
|
||||
|
||||
return serviceNames, nil
|
||||
}
|
||||
|
||||
// GetAppNames retrieves a list of app names.
|
||||
func GetAppNames() ([]string, error) {
|
||||
var appNames []string
|
||||
|
||||
appFiles, err := LoadAppFiles("")
|
||||
if err != nil {
|
||||
return appNames, err
|
||||
}
|
||||
|
||||
apps, err := GetApps(appFiles, "")
|
||||
if err != nil {
|
||||
return appNames, err
|
||||
}
|
||||
|
||||
for _, app := range apps {
|
||||
appNames = append(appNames, app.Name)
|
||||
}
|
||||
|
||||
return appNames, nil
|
||||
}
|
||||
|
||||
// TemplateAppEnvSample copies the example env file for the app into the users
|
||||
// env files.
|
||||
func TemplateAppEnvSample(r recipe.Recipe, appName, server, domain string) error {
|
||||
envSample, err := os.ReadFile(r.SampleEnvPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
appEnvPath := path.Join(config.ABRA_DIR, "servers", server, fmt.Sprintf("%s.env", appName))
|
||||
if _, err := os.Stat(appEnvPath); !os.IsNotExist(err) {
|
||||
return fmt.Errorf("%s already exists?", appEnvPath)
|
||||
}
|
||||
|
||||
err = os.WriteFile(appEnvPath, envSample, 0o664)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
read, err := os.ReadFile(appEnvPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newContents := strings.Replace(string(read), r.Name+".example.com", domain, -1)
|
||||
|
||||
err = os.WriteFile(appEnvPath, []byte(newContents), 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("copied & templated %s to %s", r.SampleEnvPath, appEnvPath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SanitiseAppName makes a app name usable with Docker by replacing illegal
|
||||
// characters.
|
||||
func SanitiseAppName(name string) string {
|
||||
return strings.ReplaceAll(name, ".", "_")
|
||||
}
|
||||
|
||||
// GetAppStatuses queries servers to check the deployment status of given apps.
|
||||
func GetAppStatuses(apps []App, MachineReadable bool) (map[string]map[string]string, error) {
|
||||
statuses := make(map[string]map[string]string)
|
||||
|
||||
servers := make(map[string]struct{})
|
||||
for _, app := range apps {
|
||||
if _, ok := servers[app.Server]; !ok {
|
||||
servers[app.Server] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
var bar *progressbar.ProgressBar
|
||||
if !MachineReadable {
|
||||
bar = formatter.CreateProgressbar(len(servers), "querying remote servers...")
|
||||
}
|
||||
|
||||
ch := make(chan stack.StackStatus, len(servers))
|
||||
for server := range servers {
|
||||
cl, err := client.New(server)
|
||||
if err != nil {
|
||||
return statuses, err
|
||||
}
|
||||
|
||||
go func(s string) {
|
||||
ch <- stack.GetAllDeployedServices(cl, s)
|
||||
if !MachineReadable {
|
||||
bar.Add(1)
|
||||
}
|
||||
}(server)
|
||||
}
|
||||
|
||||
for range servers {
|
||||
status := <-ch
|
||||
if status.Err != nil {
|
||||
return statuses, status.Err
|
||||
}
|
||||
|
||||
for _, service := range status.Services {
|
||||
result := make(map[string]string)
|
||||
name := service.Spec.Labels[convert.LabelNamespace]
|
||||
|
||||
if _, ok := statuses[name]; !ok {
|
||||
result["status"] = "deployed"
|
||||
}
|
||||
|
||||
labelKey := fmt.Sprintf("coop-cloud.%s.chaos", name)
|
||||
chaos, ok := service.Spec.Labels[labelKey]
|
||||
if ok {
|
||||
result["chaos"] = chaos
|
||||
}
|
||||
|
||||
labelKey = fmt.Sprintf("coop-cloud.%s.chaos-version", name)
|
||||
if chaosVersion, ok := service.Spec.Labels[labelKey]; ok {
|
||||
result["chaosVersion"] = chaosVersion
|
||||
}
|
||||
|
||||
labelKey = fmt.Sprintf("coop-cloud.%s.autoupdate", name)
|
||||
if autoUpdate, ok := service.Spec.Labels[labelKey]; ok {
|
||||
result["autoUpdate"] = autoUpdate
|
||||
} else {
|
||||
result["autoUpdate"] = "false"
|
||||
}
|
||||
|
||||
labelKey = fmt.Sprintf("coop-cloud.%s.version", name)
|
||||
if version, ok := service.Spec.Labels[labelKey]; ok {
|
||||
result["version"] = version
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
|
||||
statuses[name] = result
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("retrieved app statuses: %s", statuses)
|
||||
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
// GetAppComposeConfig retrieves a compose specification for a recipe. This
|
||||
// specification is the result of a merge of all the compose.**.yml files in
|
||||
// the recipe repository.
|
||||
func GetAppComposeConfig(recipe string, opts stack.Deploy, appEnv envfile.AppEnv) (*composetypes.Config, error) {
|
||||
compose, err := loader.LoadComposefile(opts, appEnv)
|
||||
if err != nil {
|
||||
return &composetypes.Config{}, err
|
||||
}
|
||||
|
||||
log.Debugf("retrieved %s for %s", compose.Filename, recipe)
|
||||
|
||||
return compose, nil
|
||||
}
|
||||
|
||||
// ExposeAllEnv exposes all env variables to the app container
|
||||
func ExposeAllEnv(stackName string, compose *composetypes.Config, appEnv envfile.AppEnv) {
|
||||
for _, service := range compose.Services {
|
||||
if service.Name == "app" {
|
||||
log.Debugf("adding env vars to %s service config", stackName)
|
||||
for k, v := range appEnv {
|
||||
_, exists := service.Environment[k]
|
||||
if !exists {
|
||||
value := v
|
||||
service.Environment[k] = &value
|
||||
log.Debugf("%s: %s: %s", stackName, k, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func CheckEnv(app App) ([]envfile.EnvVar, error) {
|
||||
var envVars []envfile.EnvVar
|
||||
|
||||
envSample, err := app.Recipe.SampleEnv()
|
||||
if err != nil {
|
||||
return envVars, err
|
||||
}
|
||||
|
||||
var keys []string
|
||||
for key := range envSample {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, key := range keys {
|
||||
if _, ok := app.Env[key]; ok {
|
||||
envVars = append(envVars, envfile.EnvVar{Name: key, Present: true})
|
||||
} else {
|
||||
envVars = append(envVars, envfile.EnvVar{Name: key, Present: false})
|
||||
}
|
||||
}
|
||||
|
||||
return envVars, nil
|
||||
}
|
||||
|
||||
// ReadAbraShCmdNames reads the names of commands.
|
||||
func ReadAbraShCmdNames(abraSh string) ([]string, error) {
|
||||
var cmdNames []string
|
||||
|
||||
file, err := os.Open(abraSh)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return cmdNames, nil
|
||||
}
|
||||
return cmdNames, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
cmdNameRegex, err := regexp.Compile(`(\w+)(\(\).*\{)`)
|
||||
if err != nil {
|
||||
return cmdNames, err
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
matches := cmdNameRegex.FindStringSubmatch(line)
|
||||
if len(matches) > 0 {
|
||||
cmdNames = append(cmdNames, matches[1])
|
||||
}
|
||||
}
|
||||
|
||||
if len(cmdNames) > 0 {
|
||||
log.Debugf("read %s from %s", strings.Join(cmdNames, " "), abraSh)
|
||||
} else {
|
||||
log.Debugf("read 0 command names from %s", abraSh)
|
||||
}
|
||||
|
||||
return cmdNames, nil
|
||||
}
|
||||
|
||||
// Wipe removes the version from the app .env file.
|
||||
func (a App) WipeRecipeVersion() error {
|
||||
file, err := os.Open(a.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var (
|
||||
lines []string
|
||||
scanner = bufio.NewScanner(file)
|
||||
)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if !strings.HasPrefix(line, "RECIPE=") && !strings.HasPrefix(line, "TYPE=") {
|
||||
lines = append(lines, line)
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(line, "#") {
|
||||
lines = append(lines, line)
|
||||
continue
|
||||
}
|
||||
|
||||
splitted := strings.Split(line, ":")
|
||||
lines = append(lines, splitted[0])
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := os.WriteFile(a.Path, []byte(strings.Join(lines, "\n")), os.ModePerm); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Debugf("version wiped from %s.env", a.Domain)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteRecipeVersion writes the recipe version to the app .env file.
|
||||
func (a App) WriteRecipeVersion(version string, dryRun bool) error {
|
||||
file, err := os.Open(a.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var (
|
||||
dirtyVersion string
|
||||
skipped bool
|
||||
lines []string
|
||||
scanner = bufio.NewScanner(file)
|
||||
)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if !strings.HasPrefix(line, "RECIPE=") && !strings.HasPrefix(line, "TYPE=") {
|
||||
lines = append(lines, line)
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(line, "#") {
|
||||
lines = append(lines, line)
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.Contains(line, version) && !a.Recipe.Dirty && !strings.HasSuffix(line, config.DIRTY_DEFAULT) {
|
||||
skipped = true
|
||||
lines = append(lines, line)
|
||||
continue
|
||||
}
|
||||
|
||||
splitted := strings.Split(line, ":")
|
||||
|
||||
if a.Recipe.Dirty {
|
||||
dirtyVersion = fmt.Sprintf("%s%s", version, config.DIRTY_DEFAULT)
|
||||
if strings.Contains(line, dirtyVersion) {
|
||||
skipped = true
|
||||
lines = append(lines, line)
|
||||
continue
|
||||
}
|
||||
|
||||
line = fmt.Sprintf("%s:%s", splitted[0], dirtyVersion)
|
||||
lines = append(lines, line)
|
||||
continue
|
||||
}
|
||||
|
||||
line = fmt.Sprintf("%s:%s", splitted[0], version)
|
||||
lines = append(lines, line)
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if a.Recipe.Dirty && dirtyVersion != "" {
|
||||
version = dirtyVersion
|
||||
}
|
||||
|
||||
if !dryRun {
|
||||
if err := os.WriteFile(a.Path, []byte(strings.Join(lines, "\n")), os.ModePerm); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
log.Debugf("skipping writing version %s because dry run", version)
|
||||
}
|
||||
|
||||
if !skipped {
|
||||
log.Debugf("version %s saved to %s.env", version, a.Domain)
|
||||
} else {
|
||||
log.Debugf("skipping version %s write as already exists in %s.env", version, a.Domain)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
238
pkg/app/app_test.go
Normal file
238
pkg/app/app_test.go
Normal file
@ -0,0 +1,238 @@
|
||||
package app_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/envfile"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
testPkg "coopcloud.tech/abra/pkg/test"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewApp(t *testing.T) {
|
||||
app, err := appPkg.NewApp(testPkg.ExpectedAppEnv, testPkg.AppName, testPkg.ExpectedAppFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(app, testPkg.ExpectedApp) {
|
||||
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, testPkg.ExpectedApp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadAppEnvFile(t *testing.T) {
|
||||
app, err := appPkg.ReadAppEnvFile(testPkg.ExpectedAppFile, testPkg.AppName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(app, testPkg.ExpectedApp) {
|
||||
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, testPkg.ExpectedApp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetApp(t *testing.T) {
|
||||
app, err := appPkg.GetApp(testPkg.ExpectedAppFiles, testPkg.AppName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(app, testPkg.ExpectedApp) {
|
||||
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, testPkg.ExpectedApp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetComposeFiles(t *testing.T) {
|
||||
r := recipe.Get("abra-test-recipe")
|
||||
err := r.EnsureExists()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
appEnv map[string]string
|
||||
composeFiles []string
|
||||
}{
|
||||
{
|
||||
map[string]string{},
|
||||
[]string{
|
||||
fmt.Sprintf("%s/compose.yml", r.Dir),
|
||||
},
|
||||
},
|
||||
{
|
||||
map[string]string{"COMPOSE_FILE": "compose.yml"},
|
||||
[]string{
|
||||
fmt.Sprintf("%s/compose.yml", r.Dir),
|
||||
},
|
||||
},
|
||||
{
|
||||
map[string]string{"COMPOSE_FILE": "compose.extra_secret.yml"},
|
||||
[]string{
|
||||
fmt.Sprintf("%s/compose.extra_secret.yml", r.Dir),
|
||||
},
|
||||
},
|
||||
{
|
||||
map[string]string{"COMPOSE_FILE": "compose.yml:compose.extra_secret.yml"},
|
||||
[]string{
|
||||
fmt.Sprintf("%s/compose.yml", r.Dir),
|
||||
fmt.Sprintf("%s/compose.extra_secret.yml", r.Dir),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
composeFiles, err := r.GetComposeFiles(test.appEnv)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, composeFiles, test.composeFiles)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetComposeFilesError(t *testing.T) {
|
||||
r := recipe.Get("abra-test-recipe")
|
||||
err := r.EnsureExists()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tests := []struct{ appEnv map[string]string }{
|
||||
{map[string]string{"COMPOSE_FILE": "compose.yml::compose.foo.yml"}},
|
||||
{map[string]string{"COMPOSE_FILE": "doesnt.exist.yml"}},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
_, err := r.GetComposeFiles(test.appEnv)
|
||||
if err == nil {
|
||||
t.Fatalf("should have failed: %v", test.appEnv)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilters(t *testing.T) {
|
||||
oldDir := config.RECIPES_DIR
|
||||
config.RECIPES_DIR = "./testdata"
|
||||
defer func() {
|
||||
config.RECIPES_DIR = oldDir
|
||||
}()
|
||||
|
||||
app, err := appPkg.NewApp(envfile.AppEnv{
|
||||
"DOMAIN": "test.example.com",
|
||||
"RECIPE": "test-recipe",
|
||||
}, "test_example_com", appPkg.AppFile{
|
||||
Path: "./testdata/filtertest.end",
|
||||
Server: "local",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := app.Filters(false, false)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
compareFilter(t, f, map[string]map[string]bool{
|
||||
"name": {
|
||||
"test_example_com": true,
|
||||
},
|
||||
})
|
||||
|
||||
f2, err := app.Filters(false, true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
compareFilter(t, f2, map[string]map[string]bool{
|
||||
"name": {
|
||||
"^test_example_com": true,
|
||||
},
|
||||
})
|
||||
|
||||
f3, err := app.Filters(true, false)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
compareFilter(t, f3, map[string]map[string]bool{
|
||||
"name": {
|
||||
"test_example_com_bar": true,
|
||||
"test_example_com_foo": true,
|
||||
},
|
||||
})
|
||||
|
||||
f4, err := app.Filters(true, true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
compareFilter(t, f4, map[string]map[string]bool{
|
||||
"name": {
|
||||
"^test_example_com_bar": true,
|
||||
"^test_example_com_foo": true,
|
||||
},
|
||||
})
|
||||
|
||||
f5, err := app.Filters(false, false, "foo")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
compareFilter(t, f5, map[string]map[string]bool{
|
||||
"name": {
|
||||
"test_example_com_foo": true,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func compareFilter(t *testing.T, f1 filters.Args, f2 map[string]map[string]bool) {
|
||||
t.Helper()
|
||||
j1, err := f1.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
j2, err := json.Marshal(f2)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if diff := cmp.Diff(string(j2), string(j1)); diff != "" {
|
||||
t.Errorf("filters mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteRecipeVersionOverwrite(t *testing.T) {
|
||||
app, err := appPkg.GetApp(testPkg.ExpectedAppFiles, testPkg.AppName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer t.Cleanup(func() {
|
||||
if err := app.WipeRecipeVersion(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
assert.Equal(t, "", app.Recipe.EnvVersion)
|
||||
|
||||
if err := app.WriteRecipeVersion("foo", false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
app, err = appPkg.GetApp(testPkg.ExpectedAppFiles, testPkg.AppName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.Equal(t, "foo", app.Recipe.EnvVersion)
|
||||
|
||||
app.Recipe.Dirty = true
|
||||
if err := app.WriteRecipeVersion("foo+U", false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
app, err = appPkg.GetApp(testPkg.ExpectedAppFiles, testPkg.AppName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.Equal(t, "foo+U", app.Recipe.EnvVersion)
|
||||
}
|
88
pkg/app/compose.go
Normal file
88
pkg/app/compose.go
Normal file
@ -0,0 +1,88 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"coopcloud.tech/abra/pkg/envfile"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
)
|
||||
|
||||
// SetRecipeLabel adds the label 'coop-cloud.${STACK_NAME}.recipe=${RECIPE}' to the app container
|
||||
// to signal which recipe is connected to the deployed app
|
||||
func SetRecipeLabel(compose *composetypes.Config, stackName string, recipe string) {
|
||||
for _, service := range compose.Services {
|
||||
if service.Name == "app" {
|
||||
log.Debugf("set recipe label 'coop-cloud.%s.recipe' to %s for %s", stackName, recipe, stackName)
|
||||
labelKey := fmt.Sprintf("coop-cloud.%s.recipe", stackName)
|
||||
service.Deploy.Labels[labelKey] = recipe
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetChaosLabel adds the label 'coop-cloud.${STACK_NAME}.chaos=true/false' to the app container
|
||||
// to signal if the app is deployed in chaos mode
|
||||
func SetChaosLabel(compose *composetypes.Config, stackName string, chaos bool) {
|
||||
for _, service := range compose.Services {
|
||||
if service.Name == "app" {
|
||||
log.Debugf("set label 'coop-cloud.%s.chaos' to %v for %s", stackName, chaos, stackName)
|
||||
labelKey := fmt.Sprintf("coop-cloud.%s.chaos", stackName)
|
||||
service.Deploy.Labels[labelKey] = strconv.FormatBool(chaos)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetChaosVersionLabel adds the label 'coop-cloud.${STACK_NAME}.chaos-version=$(GIT_COMMIT)' to the app container
|
||||
func SetChaosVersionLabel(compose *composetypes.Config, stackName string, chaosVersion string) {
|
||||
for _, service := range compose.Services {
|
||||
if service.Name == "app" {
|
||||
log.Debugf("set label 'coop-cloud.%s.chaos-version' to %v for %s", stackName, chaosVersion, stackName)
|
||||
labelKey := fmt.Sprintf("coop-cloud.%s.chaos-version", stackName)
|
||||
service.Deploy.Labels[labelKey] = chaosVersion
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetUpdateLabel adds env ENABLE_AUTO_UPDATE as label to enable/disable the
|
||||
// auto update process for this app. The default if this variable is not set is to disable
|
||||
// the auto update process.
|
||||
func SetUpdateLabel(compose *composetypes.Config, stackName string, appEnv envfile.AppEnv) {
|
||||
for _, service := range compose.Services {
|
||||
if service.Name == "app" {
|
||||
enable_auto_update, exists := appEnv["ENABLE_AUTO_UPDATE"]
|
||||
if !exists {
|
||||
enable_auto_update = "false"
|
||||
}
|
||||
log.Debugf("set label 'coop-cloud.%s.autoupdate' to %s for %s", stackName, enable_auto_update, stackName)
|
||||
labelKey := fmt.Sprintf("coop-cloud.%s.autoupdate", stackName)
|
||||
service.Deploy.Labels[labelKey] = enable_auto_update
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetLabel reads docker labels in the format of "coop-cloud.${STACK_NAME}.${LABEL}" from the local compose files
|
||||
func GetLabel(compose *composetypes.Config, stackName string, label string) string {
|
||||
for _, service := range compose.Services {
|
||||
if service.Name == "app" {
|
||||
labelKey := fmt.Sprintf("coop-cloud.%s.%s", stackName, label)
|
||||
log.Debugf("get label '%s'", labelKey)
|
||||
if labelValue, ok := service.Deploy.Labels[labelKey]; ok {
|
||||
return labelValue
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Debugf("no %s label found for %s", label, stackName)
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetTimeoutFromLabel reads the timeout value from docker label "coop-cloud.${STACK_NAME}.TIMEOUT" and returns 50 as default value
|
||||
func GetTimeoutFromLabel(compose *composetypes.Config, stackName string) (int, error) {
|
||||
timeout := 50 // Default Timeout
|
||||
var err error = nil
|
||||
if timeoutLabel := GetLabel(compose, stackName, "timeout"); timeoutLabel != "" {
|
||||
log.Debugf("timeout label: %s", timeoutLabel)
|
||||
timeout, err = strconv.Atoi(timeoutLabel)
|
||||
}
|
||||
return timeout, err
|
||||
}
|
2
pkg/app/testdata/filtertest.env
vendored
Normal file
2
pkg/app/testdata/filtertest.env
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
RECIPE=test-recipe
|
||||
DOMAIN=test.example.com
|
6
pkg/app/testdata/test-recipe/compose.yml
vendored
Normal file
6
pkg/app/testdata/test-recipe/compose.yml
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
version: "3.8"
|
||||
services:
|
||||
foo:
|
||||
image: debian
|
||||
bar:
|
||||
image: debian
|
@ -2,84 +2,123 @@ package autocomplete
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/app"
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// AppNameComplete copletes app names.
|
||||
func AppNameComplete(c *cli.Context) {
|
||||
appNames, err := config.GetAppNames()
|
||||
func AppNameComplete() ([]string, cobra.ShellCompDirective) {
|
||||
appFiles, err := app.LoadAppFiles("")
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
err := fmt.Sprintf("autocomplete failed: %s", err)
|
||||
return []string{err}, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
if c.NArg() > 0 {
|
||||
return
|
||||
var appNames []string
|
||||
for appName := range appFiles {
|
||||
appNames = append(appNames, appName)
|
||||
}
|
||||
|
||||
for _, a := range appNames {
|
||||
fmt.Println(a)
|
||||
return appNames, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
|
||||
func ServiceNameComplete(appName string) ([]string, cobra.ShellCompDirective) {
|
||||
serviceNames, err := app.GetAppServiceNames(appName)
|
||||
if err != nil {
|
||||
err := fmt.Sprintf("autocomplete failed: %s", err)
|
||||
return []string{err}, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
return serviceNames, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
|
||||
// RecipeNameComplete completes recipe names.
|
||||
func RecipeNameComplete(c *cli.Context) {
|
||||
// defaults since we can't take arguments here... this means auto-completion
|
||||
// of recipe names always access the network if e.g. the catalogue needs
|
||||
// cloning / updating
|
||||
conf := runtime.New()
|
||||
|
||||
catl, err := recipe.ReadRecipeCatalogue(conf)
|
||||
func RecipeNameComplete() ([]string, cobra.ShellCompDirective) {
|
||||
catl, err := recipe.ReadRecipeCatalogue(false)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
if c.NArg() > 0 {
|
||||
return
|
||||
err := fmt.Sprintf("autocomplete failed: %s", err)
|
||||
return []string{err}, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
var recipeNames []string
|
||||
for name := range catl {
|
||||
fmt.Println(name)
|
||||
recipeNames = append(recipeNames, name)
|
||||
}
|
||||
|
||||
return recipeNames, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
|
||||
// RecipeVersionComplete completes versions for the recipe.
|
||||
func RecipeVersionComplete(recipeName string) ([]string, cobra.ShellCompDirective) {
|
||||
catl, err := recipe.ReadRecipeCatalogue(true)
|
||||
if err != nil {
|
||||
err := fmt.Sprintf("autocomplete failed: %s", err)
|
||||
return []string{err}, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
var recipeVersions []string
|
||||
for _, v := range catl[recipeName].Versions {
|
||||
for v2 := range v {
|
||||
recipeVersions = append(recipeVersions, v2)
|
||||
}
|
||||
}
|
||||
|
||||
return recipeVersions, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
|
||||
// ServerNameComplete completes server names.
|
||||
func ServerNameComplete(c *cli.Context) {
|
||||
files, err := config.LoadAppFiles("")
|
||||
func ServerNameComplete() ([]string, cobra.ShellCompDirective) {
|
||||
files, err := app.LoadAppFiles("")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if c.NArg() > 0 {
|
||||
return
|
||||
err := fmt.Sprintf("autocomplete failed: %s", err)
|
||||
return []string{err}, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
var serverNames []string
|
||||
for _, appFile := range files {
|
||||
fmt.Println(appFile.Server)
|
||||
serverNames = append(serverNames, appFile.Server)
|
||||
}
|
||||
|
||||
return serverNames, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
|
||||
// SubcommandComplete completes sub-commands.
|
||||
func SubcommandComplete(c *cli.Context) {
|
||||
if c.NArg() > 0 {
|
||||
return
|
||||
// CommandNameComplete completes recipe commands.
|
||||
func CommandNameComplete(appName string) ([]string, cobra.ShellCompDirective) {
|
||||
app, err := app.Get(appName)
|
||||
if err != nil {
|
||||
err := fmt.Sprintf("autocomplete failed: %s", err)
|
||||
return []string{err}, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
subcmds := []string{
|
||||
"app",
|
||||
"autocomplete",
|
||||
"catalogue",
|
||||
"recipe",
|
||||
"record",
|
||||
"server",
|
||||
"upgrade",
|
||||
cmdNames, err := appPkg.ReadAbraShCmdNames(app.Recipe.AbraShPath)
|
||||
if err != nil {
|
||||
err := fmt.Sprintf("autocomplete failed: %s", err)
|
||||
return []string{err}, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
for _, cmd := range subcmds {
|
||||
fmt.Println(cmd)
|
||||
}
|
||||
sort.Strings(cmdNames)
|
||||
|
||||
return cmdNames, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
|
||||
// SecretsComplete completes recipe secrets.
|
||||
func SecretComplete(recipeName string) ([]string, cobra.ShellCompDirective) {
|
||||
r := recipe.Get(recipeName)
|
||||
|
||||
config, err := r.GetComposeConfig(nil)
|
||||
if err != nil {
|
||||
err := fmt.Sprintf("autocomplete failed: %s", err)
|
||||
return []string{err}, cobra.ShellCompDirectiveError
|
||||
}
|
||||
|
||||
var secretNames []string
|
||||
for name := range config.Secrets {
|
||||
secretNames = append(secretNames, name)
|
||||
}
|
||||
|
||||
return secretNames, cobra.ShellCompDirectiveDefault
|
||||
}
|
||||
|
@ -8,72 +8,27 @@ import (
|
||||
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
gitPkg "coopcloud.tech/abra/pkg/git"
|
||||
"coopcloud.tech/abra/pkg/runtime"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// CatalogueSkipList is all the repos that are not recipes.
|
||||
var CatalogueSkipList = map[string]bool{
|
||||
"abra": true,
|
||||
"abra-apps": true,
|
||||
"abra-aur": true,
|
||||
"abra-bash": true,
|
||||
"abra-capsul": true,
|
||||
"abra-gandi": true,
|
||||
"abra-hetzner": true,
|
||||
"apps": true,
|
||||
"aur-abra-git": true,
|
||||
"auto-recipes-catalogue-json": true,
|
||||
"auto-mirror": true,
|
||||
"backup-bot": true,
|
||||
"backup-bot-two": true,
|
||||
"beta.coopcloud.tech": true,
|
||||
"comrade-renovate-bot": true,
|
||||
"coopcloud.tech": true,
|
||||
"coturn": true,
|
||||
"docker-cp-deploy": true,
|
||||
"docker-dind-bats-kcov": true,
|
||||
"docs.coopcloud.tech": true,
|
||||
"drone-abra": true,
|
||||
"example": true,
|
||||
"gardening": true,
|
||||
"go-abra": true,
|
||||
"organising": true,
|
||||
"pyabra": true,
|
||||
"radicle-seed-node": true,
|
||||
"recipes-catalogue-json": true,
|
||||
"recipes-wishlist": true,
|
||||
"recipes.coopcloud.tech": true,
|
||||
"stack-ssh-deploy": true,
|
||||
"swarm-cronjob": true,
|
||||
"tagcmp": true,
|
||||
"traefik-cert-dumper": true,
|
||||
"tyop": true,
|
||||
}
|
||||
|
||||
// EnsureCatalogue ensures that the catalogue is cloned locally & present.
|
||||
func EnsureCatalogue(conf *runtime.Config) error {
|
||||
func EnsureCatalogue() error {
|
||||
catalogueDir := path.Join(config.ABRA_DIR, "catalogue")
|
||||
if _, err := os.Stat(catalogueDir); err != nil && os.IsNotExist(err) {
|
||||
if conf.Offline {
|
||||
return fmt.Errorf("no local copy of the catalogue available, network access required")
|
||||
}
|
||||
log.Debugf("catalogue is missing, retrieving now")
|
||||
|
||||
url := fmt.Sprintf("%s/%s.git", config.REPOS_BASE_URL, config.CATALOGUE_JSON_REPO_NAME)
|
||||
if err := gitPkg.Clone(catalogueDir, url); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf("cloned catalogue repository to %s", catalogueDir)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureUpToDate ensures that the local catalogue has no unstaged changes as
|
||||
// is up to date. This is useful to run before doing catalogue generation.
|
||||
func EnsureUpToDate(conf *runtime.Config) error {
|
||||
// EnsureIsClean makes sure that the catalogue has no unstaged changes.
|
||||
func EnsureIsClean() error {
|
||||
isClean, err := gitPkg.IsClean(config.CATALOGUE_DIR)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -84,11 +39,11 @@ func EnsureUpToDate(conf *runtime.Config) error {
|
||||
return fmt.Errorf(msg, config.CATALOGUE_DIR)
|
||||
}
|
||||
|
||||
if conf.Offline {
|
||||
logrus.Debug("attempting to use local catalogue without access network (\"--offline\")")
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureUpToDate ensures that the local catalogue is up to date.
|
||||
func EnsureUpToDate() error {
|
||||
repo, err := git.PlainOpen(config.CATALOGUE_DIR)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -101,7 +56,7 @@ func EnsureUpToDate(conf *runtime.Config) error {
|
||||
|
||||
if len(remotes) == 0 {
|
||||
msg := "cannot ensure %s is up-to-date, no git remotes configured"
|
||||
logrus.Debugf(msg, config.CATALOGUE_DIR)
|
||||
log.Debugf(msg, config.CATALOGUE_DIR)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -126,7 +81,7 @@ func EnsureUpToDate(conf *runtime.Config) error {
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("fetched latest git changes for %s", config.CATALOGUE_DIR)
|
||||
log.Debugf("fetched latest git changes for %s", config.CATALOGUE_DIR)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -10,17 +10,32 @@ import (
|
||||
"time"
|
||||
|
||||
contextPkg "coopcloud.tech/abra/pkg/context"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
sshPkg "coopcloud.tech/abra/pkg/ssh"
|
||||
commandconnPkg "coopcloud.tech/abra/pkg/upstream/commandconn"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Conf is a Docker client configuration.
|
||||
type Conf struct {
|
||||
Timeout int
|
||||
}
|
||||
|
||||
// Opt is a Docker client option.
|
||||
type Opt func(c *Conf)
|
||||
|
||||
// WithTimeout specifies a timeout for a Docker client.
|
||||
func WithTimeout(timeout int) Opt {
|
||||
return func(c *Conf) {
|
||||
c.Timeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
// New initiates a new Docker client. New client connections are validated so
|
||||
// that we ensure connections via SSH to the daemon can succeed. It takes into
|
||||
// account that you may only want the local client and not communicate via SSH.
|
||||
// For this use-case, please pass "default" as the contextName.
|
||||
func New(serverName string) (*client.Client, error) {
|
||||
func New(serverName string, opts ...Opt) (*client.Client, error) {
|
||||
var clientOpts []client.Opt
|
||||
|
||||
if serverName != "default" {
|
||||
@ -34,7 +49,12 @@ func New(serverName string) (*client.Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
helper, err := commandconnPkg.NewConnectionHelper(ctxEndpoint)
|
||||
conf := &Conf{}
|
||||
for _, opt := range opts {
|
||||
opt(conf)
|
||||
}
|
||||
|
||||
helper, err := commandconnPkg.NewConnectionHelper(ctxEndpoint, conf.Timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -65,7 +85,7 @@ func New(serverName string) (*client.Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf("created client for %s", serverName)
|
||||
log.Debugf("created client for %s", serverName)
|
||||
|
||||
info, err := cl.Info(context.Background())
|
||||
if err != nil {
|
||||
@ -75,9 +95,9 @@ func New(serverName string) (*client.Client, error) {
|
||||
if info.Swarm.LocalNodeState == "inactive" {
|
||||
if serverName != "default" {
|
||||
return cl, fmt.Errorf("swarm mode not enabled on %s?", serverName)
|
||||
} else {
|
||||
return cl, errors.New("swarm mode not enabled on local server?")
|
||||
}
|
||||
|
||||
return cl, errors.New("swarm mode not enabled on local server?")
|
||||
}
|
||||
|
||||
return cl, nil
|
||||
|
@ -5,28 +5,25 @@ import (
|
||||
"fmt"
|
||||
|
||||
"coopcloud.tech/abra/pkg/context"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
commandconnPkg "coopcloud.tech/abra/pkg/upstream/commandconn"
|
||||
dConfig "github.com/docker/cli/cli/config"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
contextStore "github.com/docker/cli/cli/context/store"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Context = contextStore.Metadata
|
||||
|
||||
func CreateContext(contextName string, user string, port string) error {
|
||||
host := contextName
|
||||
if user != "" {
|
||||
host = fmt.Sprintf("%s@%s", user, host)
|
||||
}
|
||||
if port != "" {
|
||||
host = fmt.Sprintf("%s:%s", host, port)
|
||||
}
|
||||
host = fmt.Sprintf("ssh://%s", host)
|
||||
// CreateContext creates a new Docker context.
|
||||
func CreateContext(contextName string) error {
|
||||
host := fmt.Sprintf("ssh://%s", contextName)
|
||||
|
||||
if err := createContext(contextName, host); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("created the %s context", contextName)
|
||||
|
||||
log.Debugf("created the %s context", contextName)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/containers/image/docker"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/distribution/reference"
|
||||
)
|
||||
|
||||
// GetRegistryTags retrieves all tags of an image from a container registry.
|
||||
|
@ -2,15 +2,17 @@ package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
func GetVolumes(cl *client.Client, ctx context.Context, server string, fs filters.Args) ([]*volume.Volume, error) {
|
||||
volumeListOptions := volume.ListOptions{fs}
|
||||
volumeListOKBody, err := cl.VolumeList(ctx, volumeListOptions)
|
||||
volumeListOKBody, err := cl.VolumeList(ctx, volume.ListOptions{Filters: fs})
|
||||
volumeList := volumeListOKBody.Volumes
|
||||
if err != nil {
|
||||
return volumeList, err
|
||||
@ -29,13 +31,32 @@ func GetVolumeNames(volumes []*volume.Volume) []string {
|
||||
return volumeNames
|
||||
}
|
||||
|
||||
func RemoveVolumes(cl *client.Client, ctx context.Context, server string, volumeNames []string, force bool) error {
|
||||
func RemoveVolumes(cl *client.Client, ctx context.Context, volumeNames []string, force bool, retries int) error {
|
||||
for _, volName := range volumeNames {
|
||||
err := cl.VolumeRemove(ctx, volName, force)
|
||||
err := retryFunc(5, func() error {
|
||||
return cl.VolumeRemove(context.Background(), volName, force)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("volume %s: %s", volName, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// retryFunc retries the given function for the given retries. After the nth
|
||||
// retry it waits (n + 1)^2 seconds before the next retry (starting with n=0).
|
||||
// It returns an error if the function still failed after the last retry.
|
||||
func retryFunc(retries int, fn func() error) error {
|
||||
for i := 0; i < retries; i++ {
|
||||
err := fn()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if i+1 < retries {
|
||||
sleep := time.Duration(i+1) * time.Duration(i+1)
|
||||
log.Infof("%s: waiting %d seconds before next retry", err, sleep)
|
||||
time.Sleep(sleep * time.Second)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("%d retries failed", retries)
|
||||
}
|
||||
|
26
pkg/client/volumes_test.go
Normal file
26
pkg/client/volumes_test.go
Normal file
@ -0,0 +1,26 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRetryFunc(t *testing.T) {
|
||||
err := retryFunc(1, func() error { return nil })
|
||||
if err != nil {
|
||||
t.Errorf("should not return an error: %s", err)
|
||||
}
|
||||
|
||||
i := 0
|
||||
fn := func() error {
|
||||
i++
|
||||
return fmt.Errorf("oh no, something went wrong!")
|
||||
}
|
||||
err = retryFunc(2, fn)
|
||||
if err == nil {
|
||||
t.Error("should return an error")
|
||||
}
|
||||
if i != 2 {
|
||||
t.Errorf("The function should have been called 1 times, got %d", i)
|
||||
}
|
||||
}
|
@ -1,158 +0,0 @@
|
||||
package compose
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/upstream/stack"
|
||||
loader "coopcloud.tech/abra/pkg/upstream/stack"
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// UpdateTag updates an image tag in-place on file system local compose files.
|
||||
func UpdateTag(pattern, image, tag, recipeName string) (bool, error) {
|
||||
composeFiles, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
logrus.Debugf("considering %s config(s) for tag update", strings.Join(composeFiles, ", "))
|
||||
|
||||
for _, composeFile := range composeFiles {
|
||||
opts := stack.Deploy{Composefiles: []string{composeFile}}
|
||||
|
||||
envSamplePath := path.Join(config.RECIPES_DIR, recipeName, ".env.sample")
|
||||
sampleEnv, err := config.ReadEnv(envSamplePath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
compose, err := loader.LoadComposefile(opts, sampleEnv)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, service := range compose.Services {
|
||||
if service.Image == "" {
|
||||
continue // may be a compose.$optional.yml file
|
||||
}
|
||||
|
||||
img, _ := reference.ParseNormalizedNamed(service.Image)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var composeTag string
|
||||
switch img.(type) {
|
||||
case reference.NamedTagged:
|
||||
composeTag = img.(reference.NamedTagged).Tag()
|
||||
default:
|
||||
logrus.Debugf("unable to parse %s, skipping", img)
|
||||
continue
|
||||
}
|
||||
|
||||
composeImage := formatter.StripTagMeta(reference.Path(img))
|
||||
|
||||
logrus.Debugf("parsed %s from %s", composeTag, service.Image)
|
||||
|
||||
if image == composeImage {
|
||||
bytes, err := ioutil.ReadFile(composeFile)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
old := fmt.Sprintf("%s:%s", composeImage, composeTag)
|
||||
new := fmt.Sprintf("%s:%s", composeImage, tag)
|
||||
replacedBytes := strings.Replace(string(bytes), old, new, -1)
|
||||
|
||||
logrus.Debugf("updating %s to %s in %s", old, new, compose.Filename)
|
||||
|
||||
if err := ioutil.WriteFile(compose.Filename, []byte(replacedBytes), 0764); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// UpdateLabel updates a label in-place on file system local compose files.
|
||||
func UpdateLabel(pattern, serviceName, label, recipeName string) error {
|
||||
composeFiles, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf("considering %s config(s) for label update", strings.Join(composeFiles, ", "))
|
||||
|
||||
for _, composeFile := range composeFiles {
|
||||
opts := stack.Deploy{Composefiles: []string{composeFile}}
|
||||
|
||||
envSamplePath := path.Join(config.RECIPES_DIR, recipeName, ".env.sample")
|
||||
sampleEnv, err := config.ReadEnv(envSamplePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
compose, err := loader.LoadComposefile(opts, sampleEnv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
serviceExists := false
|
||||
var service composetypes.ServiceConfig
|
||||
for _, s := range compose.Services {
|
||||
if s.Name == serviceName {
|
||||
service = s
|
||||
serviceExists = true
|
||||
}
|
||||
}
|
||||
|
||||
if !serviceExists {
|
||||
continue
|
||||
}
|
||||
|
||||
discovered := false
|
||||
for oldLabel, value := range service.Deploy.Labels {
|
||||
if strings.HasPrefix(oldLabel, "coop-cloud") {
|
||||
discovered = true
|
||||
|
||||
bytes, err := ioutil.ReadFile(composeFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
old := fmt.Sprintf("coop-cloud.${STACK_NAME}.version=%s", value)
|
||||
replacedBytes := strings.Replace(string(bytes), old, label, -1)
|
||||
|
||||
if old == label {
|
||||
logrus.Warnf("%s is already set, nothing to do?", label)
|
||||
return nil
|
||||
}
|
||||
|
||||
logrus.Debugf("updating %s to %s in %s", old, label, compose.Filename)
|
||||
|
||||
if err := ioutil.WriteFile(compose.Filename, []byte(replacedBytes), 0764); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("synced label %s to service %s", label, serviceName)
|
||||
}
|
||||
}
|
||||
|
||||
if !discovered {
|
||||
logrus.Warn("no existing label found, automagic insertion not supported yet")
|
||||
logrus.Fatalf("add '- \"%s\"' manually to the 'app' service in %s", label, composeFile)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
124
pkg/config/abra.go
Normal file
124
pkg/config/abra.go
Normal file
@ -0,0 +1,124 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// LoadAbraConfig returns the abra configuration. It tries to find a abra
|
||||
// configuration file (see findAbraConfig for lookup logic). When no
|
||||
// configuration was found it returns the default config.
|
||||
func LoadAbraConfig() Abra {
|
||||
wd, _ := os.Getwd()
|
||||
configFile := findAbraConfig(wd)
|
||||
if configFile == "" {
|
||||
log.Debugf("no config file found")
|
||||
return Abra{}
|
||||
}
|
||||
data, err := os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
// Do nothing, when an error occurs
|
||||
log.Debugf("error reading config file: %s", err)
|
||||
return Abra{}
|
||||
}
|
||||
|
||||
config := Abra{}
|
||||
err = yaml.Unmarshal(data, &config)
|
||||
if err != nil {
|
||||
// Do nothing, when an error occurs
|
||||
log.Debugf("error loading config file: %s", err)
|
||||
return Abra{}
|
||||
}
|
||||
log.Debugf("config file loaded from: %s", configFile)
|
||||
config.configPath = filepath.Dir(configFile)
|
||||
return config
|
||||
}
|
||||
|
||||
// findAbraConfig recursively looks for a abra.y(a)ml file in the given directory.
|
||||
// When the file was not found it calls the function again with the parent
|
||||
// directory until the home directory is hit. When no abra config was found it
|
||||
// returns an empty string.
|
||||
func findAbraConfig(dir string) string {
|
||||
dir, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
if dir == os.ExpandEnv("$HOME") || dir == "/" {
|
||||
return ""
|
||||
}
|
||||
p := path.Join(dir, "abra.yaml")
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
return p
|
||||
}
|
||||
p = path.Join(dir, "abra.yml")
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
return p
|
||||
}
|
||||
return findAbraConfig(filepath.Dir(dir))
|
||||
}
|
||||
|
||||
// Abra defines the configuration file for abra.
|
||||
type Abra struct {
|
||||
configPath string
|
||||
AbraDir string `yaml:"abraDir"`
|
||||
}
|
||||
|
||||
// GetAbraDir returns the abra dir. It has the following logic:
|
||||
// 1. check if $ABRA_DIR is set
|
||||
// 2. check if abraDir was set in a config file
|
||||
// 3. use $HOME/.abra when above two options failed
|
||||
func (a Abra) GetAbraDir() string {
|
||||
if dir, exists := os.LookupEnv("ABRA_DIR"); exists && dir != "" {
|
||||
log.Debug("read abra dir from $ABRA_DIR")
|
||||
return dir
|
||||
}
|
||||
if a.AbraDir != "" {
|
||||
log.Debug("read abra dir from config file")
|
||||
if path.IsAbs(a.AbraDir) {
|
||||
return a.AbraDir
|
||||
}
|
||||
// Make the path absolute
|
||||
return path.Join(a.configPath, a.AbraDir)
|
||||
}
|
||||
log.Debug("using default abra dir")
|
||||
return os.ExpandEnv("$HOME/.abra")
|
||||
}
|
||||
|
||||
func (a Abra) GetServersDir() string { return path.Join(a.GetAbraDir(), "servers") }
|
||||
func (a Abra) GetRecipesDir() string { return path.Join(a.GetAbraDir(), "recipes") }
|
||||
func (a Abra) GetVendorDir() string { return path.Join(a.GetAbraDir(), "vendor") }
|
||||
func (a Abra) GetBackupDir() string { return path.Join(a.GetAbraDir(), "backups") }
|
||||
func (a Abra) GetCatalogueDir() string { return path.Join(a.GetAbraDir(), "catalogue") }
|
||||
|
||||
var config = LoadAbraConfig()
|
||||
|
||||
var (
|
||||
ABRA_DIR = config.GetAbraDir()
|
||||
SERVERS_DIR = config.GetServersDir()
|
||||
RECIPES_DIR = config.GetRecipesDir()
|
||||
VENDOR_DIR = config.GetVendorDir()
|
||||
BACKUP_DIR = config.GetBackupDir()
|
||||
CATALOGUE_DIR = config.GetCatalogueDir()
|
||||
RECIPES_JSON = path.Join(config.GetCatalogueDir(), "recipes.json")
|
||||
REPOS_BASE_URL = "https://git.coopcloud.tech/coop-cloud"
|
||||
CATALOGUE_JSON_REPO_NAME = "recipes-catalogue-json"
|
||||
TOOLSHED_SSH_URL_TEMPLATE = "ssh://git@git.coopcloud.tech:2222/toolshed/%s.git"
|
||||
RECIPES_SSH_URL_TEMPLATE = "ssh://git@git.coopcloud.tech:2222/coop-cloud/%s.git"
|
||||
|
||||
// NOTE(d1): please note, this was done purely out of laziness on our part
|
||||
// AFAICR. it's easy to punt the value into the label because that is what is
|
||||
// expects. it's not particularly useful in terms of UI/UX but hey, nobody
|
||||
// complained yet!
|
||||
CHAOS_DEFAULT = "false"
|
||||
|
||||
DIRTY_DEFAULT = "+U"
|
||||
|
||||
NO_DOMAIN_DEFAULT = "N/A"
|
||||
NO_VERSION_DEFAULT = "N/A"
|
||||
|
||||
UNKNOWN_DEFAULT = "unknown"
|
||||
)
|
133
pkg/config/abra_test.go
Normal file
133
pkg/config/abra_test.go
Normal file
@ -0,0 +1,133 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFindAbraConfig(t *testing.T) {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
tests := []struct {
|
||||
Dir string
|
||||
Config string
|
||||
}{
|
||||
{
|
||||
Dir: "testdata/abraconfig1",
|
||||
Config: filepath.Join(wd, "testdata/abraconfig1/abra.yaml"),
|
||||
},
|
||||
{
|
||||
Dir: "testdata/abraconfig1/subdir",
|
||||
Config: filepath.Join(wd, "testdata/abraconfig1/abra.yaml"),
|
||||
},
|
||||
{
|
||||
Dir: "testdata/abraconfig2",
|
||||
Config: filepath.Join(wd, "testdata/abraconfig2/abra.yml"),
|
||||
},
|
||||
{
|
||||
Dir: "testdata/abraconfig2/subdir",
|
||||
Config: filepath.Join(wd, "testdata/abraconfig2/abra.yml"),
|
||||
},
|
||||
{
|
||||
Dir: "testdata",
|
||||
Config: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.Dir, func(t *testing.T) {
|
||||
config := findAbraConfig(tc.Dir)
|
||||
if config != tc.Config {
|
||||
t.Errorf("\nwant: %s\ngot: %s", tc.Config, config)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadAbraConfigGetAbraDir(t *testing.T) {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
t.Setenv("ABRA_DIR", "")
|
||||
|
||||
t.Run("default", func(t *testing.T) {
|
||||
cfg := LoadAbraConfig()
|
||||
wantAbraDir := os.ExpandEnv("$HOME/.abra")
|
||||
if cfg.GetAbraDir() != wantAbraDir {
|
||||
t.Errorf("\nwant: %s\ngot: %s", wantAbraDir, cfg.GetAbraDir())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("from config file", func(t *testing.T) {
|
||||
t.Cleanup(func() { os.Chdir(wd) })
|
||||
err = os.Chdir(filepath.Join(wd, "testdata/abraconfig1"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cfg := LoadAbraConfig()
|
||||
wantAbraDir := filepath.Join(wd, "testdata/abraconfig1/foobar")
|
||||
if cfg.GetAbraDir() != wantAbraDir {
|
||||
t.Errorf("\nwant: %s\ngot: %s", wantAbraDir, cfg.GetAbraDir())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("default when config file is empty", func(t *testing.T) {
|
||||
t.Cleanup(func() { os.Chdir(wd) })
|
||||
err := os.Chdir(filepath.Join(wd, "testdata/abraconfig2"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cfg := LoadAbraConfig()
|
||||
wantAbraDir := os.ExpandEnv("$HOME/.abra")
|
||||
if cfg.GetAbraDir() != wantAbraDir {
|
||||
t.Errorf("\nwant: %s\ngot: %s", wantAbraDir, cfg.GetAbraDir())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("from env variable", func(t *testing.T) {
|
||||
t.Setenv("ABRA_DIR", "foo")
|
||||
cfg := LoadAbraConfig()
|
||||
wantAbraDir := "foo"
|
||||
if cfg.GetAbraDir() != wantAbraDir {
|
||||
t.Errorf("\nwant: %s\ngot: %s", wantAbraDir, cfg.GetAbraDir())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoadAbraConfigServersDir(t *testing.T) {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
t.Setenv("ABRA_DIR", "")
|
||||
|
||||
t.Run("default", func(t *testing.T) {
|
||||
cfg := LoadAbraConfig()
|
||||
wantServersDir := os.ExpandEnv("$HOME/.abra/servers")
|
||||
if cfg.GetServersDir() != wantServersDir {
|
||||
t.Errorf("\nwant: %s\ngot: %s", wantServersDir, cfg.GetServersDir())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("from config file", func(t *testing.T) {
|
||||
t.Cleanup(func() { os.Chdir(wd) })
|
||||
err = os.Chdir(filepath.Join(wd, "testdata/abraconfig1"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cfg := LoadAbraConfig()
|
||||
log.Println(cfg)
|
||||
wantServersDir := filepath.Join(wd, "testdata/abraconfig1/foobar/servers")
|
||||
if cfg.GetServersDir() != wantServersDir {
|
||||
t.Errorf("\nwant: %s\ngot: %s", wantServersDir, cfg.GetServersDir())
|
||||
}
|
||||
})
|
||||
}
|
@ -1,572 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/schollz/progressbar/v3"
|
||||
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/upstream/convert"
|
||||
loader "coopcloud.tech/abra/pkg/upstream/stack"
|
||||
stack "coopcloud.tech/abra/pkg/upstream/stack"
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Type aliases to make code hints easier to understand
|
||||
|
||||
// AppEnv is a map of the values in an apps env config
|
||||
type AppEnv = map[string]string
|
||||
|
||||
// AppName is AppName
|
||||
type AppName = string
|
||||
|
||||
// AppFile represents app env files on disk without reading the contents
|
||||
type AppFile struct {
|
||||
Path string
|
||||
Server string
|
||||
}
|
||||
|
||||
// AppFiles is a slice of appfiles
|
||||
type AppFiles map[AppName]AppFile
|
||||
|
||||
// App reprents an app with its env file read into memory
|
||||
type App struct {
|
||||
Name AppName
|
||||
Recipe string
|
||||
Domain string
|
||||
Env AppEnv
|
||||
Server string
|
||||
Path string
|
||||
}
|
||||
|
||||
// StackName gets whatever the docker safe (uses the right delimiting
|
||||
// character, e.g. "_") stack name is for the app. In general, you don't want
|
||||
// to use this to show anything to end-users, you want use a.Name instead.
|
||||
func (a App) StackName() string {
|
||||
if _, exists := a.Env["STACK_NAME"]; exists {
|
||||
return a.Env["STACK_NAME"]
|
||||
}
|
||||
|
||||
stackName := SanitiseAppName(a.Name)
|
||||
|
||||
if len(stackName) > 45 {
|
||||
logrus.Debugf("trimming %s to %s to avoid runtime limits", stackName, stackName[:45])
|
||||
stackName = stackName[:45]
|
||||
}
|
||||
|
||||
a.Env["STACK_NAME"] = stackName
|
||||
|
||||
return stackName
|
||||
}
|
||||
|
||||
// Filters retrieves exact app filters for querying the container runtime. Due
|
||||
// to upstream issues, filtering works different depending on what you're
|
||||
// querying. So, for example, secrets don't work with regex! The caller needs
|
||||
// to implement their own validation that the right secrets are matched. In
|
||||
// order to handle these cases, we provide the `appendServiceNames` /
|
||||
// `exactMatch` modifiers.
|
||||
func (a App) Filters(appendServiceNames, exactMatch bool) (filters.Args, error) {
|
||||
filters := filters.NewArgs()
|
||||
|
||||
composeFiles, err := GetAppComposeFiles(a.Recipe, a.Env)
|
||||
if err != nil {
|
||||
return filters, err
|
||||
}
|
||||
|
||||
opts := stack.Deploy{Composefiles: composeFiles}
|
||||
compose, err := GetAppComposeConfig(a.Recipe, opts, a.Env)
|
||||
if err != nil {
|
||||
return filters, err
|
||||
}
|
||||
|
||||
for _, service := range compose.Services {
|
||||
var filter string
|
||||
|
||||
if appendServiceNames {
|
||||
if exactMatch {
|
||||
filter = fmt.Sprintf("^%s_%s", a.StackName(), service.Name)
|
||||
} else {
|
||||
filter = fmt.Sprintf("%s_%s", a.StackName(), service.Name)
|
||||
}
|
||||
} else {
|
||||
if exactMatch {
|
||||
filter = fmt.Sprintf("^%s", a.StackName())
|
||||
} else {
|
||||
filter = fmt.Sprintf("%s", a.StackName())
|
||||
}
|
||||
}
|
||||
|
||||
filters.Add("name", filter)
|
||||
}
|
||||
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
// ByServer sort a slice of Apps
|
||||
type ByServer []App
|
||||
|
||||
func (a ByServer) Len() int { return len(a) }
|
||||
func (a ByServer) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByServer) Less(i, j int) bool {
|
||||
return strings.ToLower(a[i].Server) < strings.ToLower(a[j].Server)
|
||||
}
|
||||
|
||||
// ByServerAndRecipe sort a slice of Apps
|
||||
type ByServerAndRecipe []App
|
||||
|
||||
func (a ByServerAndRecipe) Len() int { return len(a) }
|
||||
func (a ByServerAndRecipe) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByServerAndRecipe) Less(i, j int) bool {
|
||||
if a[i].Server == a[j].Server {
|
||||
return strings.ToLower(a[i].Recipe) < strings.ToLower(a[j].Recipe)
|
||||
}
|
||||
return strings.ToLower(a[i].Server) < strings.ToLower(a[j].Server)
|
||||
}
|
||||
|
||||
// ByRecipe sort a slice of Apps
|
||||
type ByRecipe []App
|
||||
|
||||
func (a ByRecipe) Len() int { return len(a) }
|
||||
func (a ByRecipe) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByRecipe) Less(i, j int) bool {
|
||||
return strings.ToLower(a[i].Recipe) < strings.ToLower(a[j].Recipe)
|
||||
}
|
||||
|
||||
// ByName sort a slice of Apps
|
||||
type ByName []App
|
||||
|
||||
func (a ByName) Len() int { return len(a) }
|
||||
func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByName) Less(i, j int) bool {
|
||||
return strings.ToLower(a[i].Name) < strings.ToLower(a[j].Name)
|
||||
}
|
||||
|
||||
func readAppEnvFile(appFile AppFile, name AppName) (App, error) {
|
||||
env, err := ReadEnv(appFile.Path)
|
||||
if err != nil {
|
||||
return App{}, fmt.Errorf("env file for %s couldn't be read: %s", name, err.Error())
|
||||
}
|
||||
|
||||
logrus.Debugf("read env %s from %s", env, appFile.Path)
|
||||
|
||||
app, err := newApp(env, name, appFile)
|
||||
if err != nil {
|
||||
return App{}, fmt.Errorf("env file for %s has issues: %s", name, err.Error())
|
||||
}
|
||||
|
||||
return app, nil
|
||||
}
|
||||
|
||||
// newApp creates new App object
|
||||
func newApp(env AppEnv, name string, appFile AppFile) (App, error) {
|
||||
domain := env["DOMAIN"]
|
||||
|
||||
recipe, exists := env["RECIPE"]
|
||||
if !exists {
|
||||
recipe, exists = env["TYPE"]
|
||||
if !exists {
|
||||
return App{}, fmt.Errorf("%s is missing the TYPE env var?", name)
|
||||
}
|
||||
}
|
||||
|
||||
return App{
|
||||
Name: name,
|
||||
Domain: domain,
|
||||
Recipe: recipe,
|
||||
Env: env,
|
||||
Server: appFile.Server,
|
||||
Path: appFile.Path,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LoadAppFiles gets all app files for a given set of servers or all servers.
|
||||
func LoadAppFiles(servers ...string) (AppFiles, error) {
|
||||
appFiles := make(AppFiles)
|
||||
if len(servers) == 1 {
|
||||
if servers[0] == "" {
|
||||
// Empty servers flag, one string will always be passed
|
||||
var err error
|
||||
servers, err = GetAllFoldersInDirectory(SERVERS_DIR)
|
||||
if err != nil {
|
||||
return appFiles, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("collecting metadata from %v servers: %s", len(servers), strings.Join(servers, ", "))
|
||||
|
||||
for _, server := range servers {
|
||||
serverDir := path.Join(SERVERS_DIR, server)
|
||||
files, err := GetAllFilesInDirectory(serverDir)
|
||||
if err != nil {
|
||||
return appFiles, fmt.Errorf("server %s doesn't exist? Run \"abra server ls\" to check", server)
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
appName := strings.TrimSuffix(file.Name(), ".env")
|
||||
appFilePath := path.Join(SERVERS_DIR, server, file.Name())
|
||||
appFiles[appName] = AppFile{
|
||||
Path: appFilePath,
|
||||
Server: server,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return appFiles, nil
|
||||
}
|
||||
|
||||
// GetApp loads an apps settings, reading it from file, in preparation to use
|
||||
// it. It should only be used when ready to use the env file to keep IO
|
||||
// operations down.
|
||||
func GetApp(apps AppFiles, name AppName) (App, error) {
|
||||
appFile, exists := apps[name]
|
||||
if !exists {
|
||||
return App{}, fmt.Errorf("cannot find app with name %s", name)
|
||||
}
|
||||
|
||||
app, err := readAppEnvFile(appFile, name)
|
||||
if err != nil {
|
||||
return App{}, err
|
||||
}
|
||||
|
||||
return app, nil
|
||||
}
|
||||
|
||||
// GetApps returns a slice of Apps with their env files read from a given
|
||||
// slice of AppFiles.
|
||||
func GetApps(appFiles AppFiles, recipeFilter string) ([]App, error) {
|
||||
var apps []App
|
||||
|
||||
for name := range appFiles {
|
||||
app, err := GetApp(appFiles, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if recipeFilter != "" {
|
||||
if app.Recipe == recipeFilter {
|
||||
apps = append(apps, app)
|
||||
}
|
||||
} else {
|
||||
apps = append(apps, app)
|
||||
}
|
||||
}
|
||||
|
||||
return apps, nil
|
||||
}
|
||||
|
||||
// GetAppServiceNames retrieves a list of app service names.
|
||||
func GetAppServiceNames(appName string) ([]string, error) {
|
||||
var serviceNames []string
|
||||
|
||||
appFiles, err := LoadAppFiles("")
|
||||
if err != nil {
|
||||
return serviceNames, err
|
||||
}
|
||||
|
||||
app, err := GetApp(appFiles, appName)
|
||||
if err != nil {
|
||||
return serviceNames, err
|
||||
}
|
||||
|
||||
composeFiles, err := GetAppComposeFiles(app.Recipe, app.Env)
|
||||
if err != nil {
|
||||
return serviceNames, err
|
||||
}
|
||||
|
||||
opts := stack.Deploy{Composefiles: composeFiles}
|
||||
compose, err := GetAppComposeConfig(app.Recipe, opts, app.Env)
|
||||
if err != nil {
|
||||
return serviceNames, err
|
||||
}
|
||||
|
||||
for _, service := range compose.Services {
|
||||
serviceNames = append(serviceNames, service.Name)
|
||||
}
|
||||
|
||||
return serviceNames, nil
|
||||
}
|
||||
|
||||
// GetAppNames retrieves a list of app names.
|
||||
func GetAppNames() ([]string, error) {
|
||||
var appNames []string
|
||||
|
||||
appFiles, err := LoadAppFiles("")
|
||||
if err != nil {
|
||||
return appNames, err
|
||||
}
|
||||
|
||||
apps, err := GetApps(appFiles, "")
|
||||
if err != nil {
|
||||
return appNames, err
|
||||
}
|
||||
|
||||
for _, app := range apps {
|
||||
appNames = append(appNames, app.Name)
|
||||
}
|
||||
|
||||
return appNames, nil
|
||||
}
|
||||
|
||||
// TemplateAppEnvSample copies the example env file for the app into the users
|
||||
// env files.
|
||||
func TemplateAppEnvSample(recipeName, appName, server, domain string) error {
|
||||
envSamplePath := path.Join(RECIPES_DIR, recipeName, ".env.sample")
|
||||
envSample, err := ioutil.ReadFile(envSamplePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
appEnvPath := path.Join(ABRA_DIR, "servers", server, fmt.Sprintf("%s.env", appName))
|
||||
if _, err := os.Stat(appEnvPath); os.IsExist(err) {
|
||||
return fmt.Errorf("%s already exists?", appEnvPath)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(appEnvPath, envSample, 0664)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
read, err := ioutil.ReadFile(appEnvPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newContents := strings.Replace(string(read), recipeName+".example.com", domain, -1)
|
||||
|
||||
err = ioutil.WriteFile(appEnvPath, []byte(newContents), 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf("copied & templated %s to %s", envSamplePath, appEnvPath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SanitiseAppName makes a app name usable with Docker by replacing illegal
|
||||
// characters.
|
||||
func SanitiseAppName(name string) string {
|
||||
return strings.ReplaceAll(name, ".", "_")
|
||||
}
|
||||
|
||||
// GetAppStatuses queries servers to check the deployment status of given apps.
|
||||
func GetAppStatuses(apps []App, MachineReadable bool) (map[string]map[string]string, error) {
|
||||
statuses := make(map[string]map[string]string)
|
||||
|
||||
servers := make(map[string]struct{})
|
||||
for _, app := range apps {
|
||||
if _, ok := servers[app.Server]; !ok {
|
||||
servers[app.Server] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
var bar *progressbar.ProgressBar
|
||||
if !MachineReadable {
|
||||
bar = formatter.CreateProgressbar(len(servers), "querying remote servers...")
|
||||
}
|
||||
|
||||
ch := make(chan stack.StackStatus, len(servers))
|
||||
for server := range servers {
|
||||
cl, err := client.New(server)
|
||||
if err != nil {
|
||||
return statuses, err
|
||||
}
|
||||
|
||||
go func(s string) {
|
||||
ch <- stack.GetAllDeployedServices(cl, s)
|
||||
if !MachineReadable {
|
||||
bar.Add(1)
|
||||
}
|
||||
}(server)
|
||||
}
|
||||
|
||||
for range servers {
|
||||
status := <-ch
|
||||
if status.Err != nil {
|
||||
return statuses, status.Err
|
||||
}
|
||||
|
||||
for _, service := range status.Services {
|
||||
result := make(map[string]string)
|
||||
name := service.Spec.Labels[convert.LabelNamespace]
|
||||
|
||||
if _, ok := statuses[name]; !ok {
|
||||
result["status"] = "deployed"
|
||||
}
|
||||
|
||||
labelKey := fmt.Sprintf("coop-cloud.%s.chaos", name)
|
||||
chaos, ok := service.Spec.Labels[labelKey]
|
||||
if ok {
|
||||
result["chaos"] = chaos
|
||||
}
|
||||
|
||||
labelKey = fmt.Sprintf("coop-cloud.%s.chaos-version", name)
|
||||
if chaosVersion, ok := service.Spec.Labels[labelKey]; ok {
|
||||
result["chaosVersion"] = chaosVersion
|
||||
}
|
||||
|
||||
labelKey = fmt.Sprintf("coop-cloud.%s.autoupdate", name)
|
||||
if autoUpdate, ok := service.Spec.Labels[labelKey]; ok {
|
||||
result["autoUpdate"] = autoUpdate
|
||||
} else {
|
||||
result["autoUpdate"] = "false"
|
||||
}
|
||||
|
||||
labelKey = fmt.Sprintf("coop-cloud.%s.version", name)
|
||||
if version, ok := service.Spec.Labels[labelKey]; ok {
|
||||
result["version"] = version
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
|
||||
statuses[name] = result
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("retrieved app statuses: %s", statuses)
|
||||
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
// GetAppComposeFiles gets the list of compose files for an app which should be
|
||||
// merged into a composetypes.Config while respecting the COMPOSE_FILE env var.
|
||||
func GetAppComposeFiles(recipe string, appEnv AppEnv) ([]string, error) {
|
||||
var composeFiles []string
|
||||
|
||||
if _, ok := appEnv["COMPOSE_FILE"]; !ok {
|
||||
logrus.Debug("no COMPOSE_FILE detected, loading compose.yml")
|
||||
path := fmt.Sprintf("%s/%s/compose.yml", RECIPES_DIR, recipe)
|
||||
composeFiles = append(composeFiles, path)
|
||||
return composeFiles, nil
|
||||
}
|
||||
|
||||
composeFileEnvVar := appEnv["COMPOSE_FILE"]
|
||||
envVars := strings.Split(composeFileEnvVar, ":")
|
||||
logrus.Debugf("COMPOSE_FILE detected (%s), loading %s", composeFileEnvVar, strings.Join(envVars, ", "))
|
||||
for _, file := range strings.Split(composeFileEnvVar, ":") {
|
||||
path := fmt.Sprintf("%s/%s/%s", RECIPES_DIR, recipe, file)
|
||||
composeFiles = append(composeFiles, path)
|
||||
}
|
||||
|
||||
logrus.Debugf("retrieved %s configs for %s", strings.Join(composeFiles, ", "), recipe)
|
||||
|
||||
return composeFiles, nil
|
||||
}
|
||||
|
||||
// GetAppComposeConfig retrieves a compose specification for a recipe. This
|
||||
// specification is the result of a merge of all the compose.**.yml files in
|
||||
// the recipe repository.
|
||||
func GetAppComposeConfig(recipe string, opts stack.Deploy, appEnv AppEnv) (*composetypes.Config, error) {
|
||||
compose, err := loader.LoadComposefile(opts, appEnv)
|
||||
if err != nil {
|
||||
return &composetypes.Config{}, err
|
||||
}
|
||||
|
||||
logrus.Debugf("retrieved %s for %s", compose.Filename, recipe)
|
||||
|
||||
return compose, nil
|
||||
}
|
||||
|
||||
// ExposeAllEnv exposes all env variables to the app container
|
||||
func ExposeAllEnv(stackName string, compose *composetypes.Config, appEnv AppEnv) {
|
||||
for _, service := range compose.Services {
|
||||
if service.Name == "app" {
|
||||
logrus.Debugf("Add the following environment to the app service config of %s:", stackName)
|
||||
for k, v := range appEnv {
|
||||
_, exists := service.Environment[k]
|
||||
if !exists {
|
||||
value := v
|
||||
service.Environment[k] = &value
|
||||
logrus.Debugf("Add Key: %s Value: %s to %s", k, value, stackName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetRecipeLabel adds the label 'coop-cloud.${STACK_NAME}.recipe=${RECIPE}' to the app container
|
||||
// to signal which recipe is connected to the deployed app
|
||||
func SetRecipeLabel(compose *composetypes.Config, stackName string, recipe string) {
|
||||
for _, service := range compose.Services {
|
||||
if service.Name == "app" {
|
||||
logrus.Debugf("set recipe label 'coop-cloud.%s.recipe' to %s for %s", stackName, recipe, stackName)
|
||||
labelKey := fmt.Sprintf("coop-cloud.%s.recipe", stackName)
|
||||
service.Deploy.Labels[labelKey] = recipe
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetChaosLabel adds the label 'coop-cloud.${STACK_NAME}.chaos=true/false' to the app container
|
||||
// to signal if the app is deployed in chaos mode
|
||||
func SetChaosLabel(compose *composetypes.Config, stackName string, chaos bool) {
|
||||
for _, service := range compose.Services {
|
||||
if service.Name == "app" {
|
||||
logrus.Debugf("set label 'coop-cloud.%s.chaos' to %v for %s", stackName, chaos, stackName)
|
||||
labelKey := fmt.Sprintf("coop-cloud.%s.chaos", stackName)
|
||||
service.Deploy.Labels[labelKey] = strconv.FormatBool(chaos)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetChaosVersionLabel adds the label 'coop-cloud.${STACK_NAME}.chaos-version=$(GIT_COMMIT)' to the app container
|
||||
func SetChaosVersionLabel(compose *composetypes.Config, stackName string, chaosVersion string) {
|
||||
for _, service := range compose.Services {
|
||||
if service.Name == "app" {
|
||||
logrus.Debugf("set label 'coop-cloud.%s.chaos-version' to %v for %s", stackName, chaosVersion, stackName)
|
||||
labelKey := fmt.Sprintf("coop-cloud.%s.chaos-version", stackName)
|
||||
service.Deploy.Labels[labelKey] = chaosVersion
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetUpdateLabel adds env ENABLE_AUTO_UPDATE as label to enable/disable the
|
||||
// auto update process for this app. The default if this variable is not set is to disable
|
||||
// the auto update process.
|
||||
func SetUpdateLabel(compose *composetypes.Config, stackName string, appEnv AppEnv) {
|
||||
for _, service := range compose.Services {
|
||||
if service.Name == "app" {
|
||||
enable_auto_update, exists := appEnv["ENABLE_AUTO_UPDATE"]
|
||||
if !exists {
|
||||
enable_auto_update = "false"
|
||||
}
|
||||
logrus.Debugf("set label 'coop-cloud.%s.autoupdate' to %s for %s", stackName, enable_auto_update, stackName)
|
||||
labelKey := fmt.Sprintf("coop-cloud.%s.autoupdate", stackName)
|
||||
service.Deploy.Labels[labelKey] = enable_auto_update
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetLabel reads docker labels in the format of "coop-cloud.${STACK_NAME}.${LABEL}" from the local compose files
|
||||
func GetLabel(compose *composetypes.Config, stackName string, label string) string {
|
||||
for _, service := range compose.Services {
|
||||
if service.Name == "app" {
|
||||
labelKey := fmt.Sprintf("coop-cloud.%s.%s", stackName, label)
|
||||
logrus.Debugf("get label '%s'", labelKey)
|
||||
if labelValue, ok := service.Deploy.Labels[labelKey]; ok {
|
||||
return labelValue
|
||||
}
|
||||
}
|
||||
}
|
||||
logrus.Debugf("no %s label found for %s", label, stackName)
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetTimeoutFromLabel reads the timeout value from docker label "coop-cloud.${STACK_NAME}.TIMEOUT" and returns 50 as default value
|
||||
func GetTimeoutFromLabel(compose *composetypes.Config, stackName string) (int, error) {
|
||||
var timeout = 50 // Default Timeout
|
||||
var err error = nil
|
||||
if timeoutLabel := GetLabel(compose, stackName, "timeout"); timeoutLabel != "" {
|
||||
logrus.Debugf("timeout label: %s", timeoutLabel)
|
||||
timeout, err = strconv.Atoi(timeoutLabel)
|
||||
}
|
||||
return timeout, err
|
||||
}
|
@ -1,36 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewApp(t *testing.T) {
|
||||
app, err := newApp(expectedAppEnv, appName, expectedAppFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(app, expectedApp) {
|
||||
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, expectedApp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadAppEnvFile(t *testing.T) {
|
||||
app, err := readAppEnvFile(expectedAppFile, appName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(app, expectedApp) {
|
||||
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, expectedApp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetApp(t *testing.T) {
|
||||
app, err := GetApp(expectedAppFiles, appName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(app, expectedApp) {
|
||||
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, expectedApp)
|
||||
}
|
||||
}
|
@ -1,7 +1,6 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
@ -10,20 +9,13 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Autonomic-Cooperative/godotenv"
|
||||
"github.com/sirupsen/logrus"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
)
|
||||
|
||||
var ABRA_DIR = os.ExpandEnv("$HOME/.abra")
|
||||
var SERVERS_DIR = path.Join(ABRA_DIR, "servers")
|
||||
var RECIPES_DIR = path.Join(ABRA_DIR, "recipes")
|
||||
var VENDOR_DIR = path.Join(ABRA_DIR, "vendor")
|
||||
var BACKUP_DIR = path.Join(ABRA_DIR, "backups")
|
||||
var CATALOGUE_DIR = path.Join(ABRA_DIR, "catalogue")
|
||||
var RECIPES_JSON = path.Join(ABRA_DIR, "catalogue", "recipes.json")
|
||||
var REPOS_BASE_URL = "https://git.coopcloud.tech/coop-cloud"
|
||||
var CATALOGUE_JSON_REPO_NAME = "recipes-catalogue-json"
|
||||
var SSH_URL_TEMPLATE = "ssh://git@git.coopcloud.tech:2222/coop-cloud/%s.git"
|
||||
const MAX_SANITISED_APP_NAME_LENGTH = 45
|
||||
const MAX_DOCKER_SECRET_LENGTH = 64
|
||||
|
||||
var BackupbotLabel = "coop-cloud.backupbot.enabled"
|
||||
|
||||
// GetServers retrieves all servers.
|
||||
func GetServers() ([]string, error) {
|
||||
@ -34,23 +26,16 @@ func GetServers() ([]string, error) {
|
||||
return servers, err
|
||||
}
|
||||
|
||||
logrus.Debugf("retrieved %v servers: %s", len(servers), servers)
|
||||
|
||||
return servers, nil
|
||||
}
|
||||
|
||||
// ReadEnv loads an app envivornment into a map.
|
||||
func ReadEnv(filePath string) (AppEnv, error) {
|
||||
var envFile AppEnv
|
||||
|
||||
envFile, err := godotenv.Read(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var filtered []string
|
||||
for _, s := range servers {
|
||||
if !strings.HasPrefix(s, ".") {
|
||||
filtered = append(filtered, s)
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("read %s from %s", envFile, filePath)
|
||||
log.Debugf("retrieved %v servers: %s", len(filtered), filtered)
|
||||
|
||||
return envFile, nil
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
// ReadServerNames retrieves all server names.
|
||||
@ -61,7 +46,7 @@ func ReadServerNames() ([]string, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf("read %s from %s", strings.Join(serverNames, ","), SERVERS_DIR)
|
||||
log.Debugf("read %s from %s", strings.Join(serverNames, ","), SERVERS_DIR)
|
||||
|
||||
return serverNames, nil
|
||||
}
|
||||
@ -85,7 +70,7 @@ func GetAllFilesInDirectory(directory string) ([]fs.FileInfo, error) {
|
||||
|
||||
realPath, err := filepath.EvalSymlinks(filePath)
|
||||
if err != nil {
|
||||
logrus.Warningf("broken symlink in your abra config folders: %s", filePath)
|
||||
log.Warnf("broken symlink in your abra config folders: %s", filePath)
|
||||
} else {
|
||||
realFile, err := os.Stat(realPath)
|
||||
if err != nil {
|
||||
@ -118,7 +103,7 @@ func GetAllFoldersInDirectory(directory string) ([]string, error) {
|
||||
filePath := path.Join(directory, file.Name())
|
||||
realDir, err := filepath.EvalSymlinks(filePath)
|
||||
if err != nil {
|
||||
logrus.Warningf("broken symlink in your abra config folders: %s", filePath)
|
||||
log.Warnf("broken symlink in your abra config folders: %s", filePath)
|
||||
} else if stat, err := os.Stat(realDir); err == nil && stat.IsDir() {
|
||||
// path is a directory
|
||||
folders = append(folders, file.Name())
|
||||
@ -128,34 +113,3 @@ func GetAllFoldersInDirectory(directory string) ([]string, error) {
|
||||
|
||||
return folders, nil
|
||||
}
|
||||
|
||||
// ReadAbraShEnvVars reads env vars from an abra.sh recipe file.
|
||||
func ReadAbraShEnvVars(abraSh string) (map[string]string, error) {
|
||||
envVars := make(map[string]string)
|
||||
|
||||
file, err := os.Open(abraSh)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return envVars, nil
|
||||
}
|
||||
return envVars, err
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.Contains(line, "export") {
|
||||
splitVals := strings.Split(line, "export ")
|
||||
envVarDef := splitVals[len(splitVals)-1]
|
||||
keyVal := strings.Split(envVarDef, "=")
|
||||
if len(keyVal) != 2 {
|
||||
return envVars, fmt.Errorf("couldn't parse %s", line)
|
||||
}
|
||||
envVars[keyVal[0]] = keyVal[1]
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("read %s from %s", envVars, abraSh)
|
||||
|
||||
return envVars, nil
|
||||
}
|
||||
|
@ -1,84 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testFolder = os.ExpandEnv("$PWD/../../tests/resources/test_folder")
|
||||
var validAbraConf = os.ExpandEnv("$PWD/../../tests/resources/valid_abra_config")
|
||||
|
||||
// make sure these are in alphabetical order
|
||||
var tFolders = []string{"folder1", "folder2"}
|
||||
var tFiles = []string{"bar.env", "foo.env"}
|
||||
|
||||
var appName = "ecloud"
|
||||
var serverName = "evil.corp"
|
||||
|
||||
var expectedAppEnv = AppEnv{
|
||||
"DOMAIN": "ecloud.evil.corp",
|
||||
"RECIPE": "ecloud",
|
||||
}
|
||||
|
||||
var expectedApp = App{
|
||||
Name: appName,
|
||||
Recipe: expectedAppEnv["RECIPE"],
|
||||
Domain: expectedAppEnv["DOMAIN"],
|
||||
Env: expectedAppEnv,
|
||||
Path: expectedAppFile.Path,
|
||||
Server: expectedAppFile.Server,
|
||||
}
|
||||
|
||||
var expectedAppFile = AppFile{
|
||||
Path: path.Join(validAbraConf, "servers", serverName, appName+".env"),
|
||||
Server: serverName,
|
||||
}
|
||||
|
||||
var expectedAppFiles = map[string]AppFile{
|
||||
appName: expectedAppFile,
|
||||
}
|
||||
|
||||
// var expectedServerNames = []string{"evil.corp"}
|
||||
|
||||
func TestGetAllFoldersInDirectory(t *testing.T) {
|
||||
folders, err := GetAllFoldersInDirectory(testFolder)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(folders, tFolders) {
|
||||
t.Fatalf("did not get expected folders. Expected: (%s), Got: (%s)", strings.Join(tFolders, ","), strings.Join(folders, ","))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAllFilesInDirectory(t *testing.T) {
|
||||
files, err := GetAllFilesInDirectory(testFolder)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var fileNames []string
|
||||
for _, file := range files {
|
||||
fileNames = append(fileNames, file.Name())
|
||||
}
|
||||
if !reflect.DeepEqual(fileNames, tFiles) {
|
||||
t.Fatalf("did not get expected files. Expected: (%s), Got: (%s)", strings.Join(tFiles, ","), strings.Join(fileNames, ","))
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadEnv(t *testing.T) {
|
||||
env, err := ReadEnv(expectedAppFile.Path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(env, expectedAppEnv) {
|
||||
t.Fatalf(
|
||||
"did not get expected application settings. Expected: DOMAIN=%s RECIPE=%s; Got: DOMAIN=%s RECIPE=%s",
|
||||
expectedAppEnv["DOMAIN"],
|
||||
expectedAppEnv["RECIPE"],
|
||||
env["DOMAIN"],
|
||||
env["RECIPE"],
|
||||
)
|
||||
}
|
||||
}
|
1
pkg/config/testdata/abraconfig1/abra.yaml
vendored
Normal file
1
pkg/config/testdata/abraconfig1/abra.yaml
vendored
Normal file
@ -0,0 +1 @@
|
||||
abraDir: foobar
|
0
pkg/config/testdata/abraconfig1/subdir/.gitkeep
vendored
Normal file
0
pkg/config/testdata/abraconfig1/subdir/.gitkeep
vendored
Normal file
0
pkg/config/testdata/abraconfig2/abra.yml
vendored
Normal file
0
pkg/config/testdata/abraconfig2/abra.yml
vendored
Normal file
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user