Compare commits

...

681 Commits

Author SHA1 Message Date
b81f5651d3 fmt
All checks were successful
continuous-integration/drone/pr Build is passing
2024-03-11 16:14:36 +01:00
7d80f4d56b fix
Some checks failed
continuous-integration/drone/pr Build is failing
2024-03-11 16:07:15 +01:00
114bdc5ce9 Merge remote-tracking branch 'upstream/main' into upgrade-cli 2024-03-11 15:40:57 +01:00
40c0fb4bac fix-integration-tests (!403)
All checks were successful
continuous-integration/drone/push Build is passing
In preparation for the new abra release, let's fix all integration tests

After merging, this needs to be cherry-picked into the release-0-9 branch.

  - [x] app_backup.bats (skip this one)
  - [x] app_check.bats (fixed by bd21014fed)
  - [x] app_cmd.bats (partially fixed in 08232b74f6), has known regression coop-cloud/organising#581
  - [x] app_config.bats (no changes needed)
  - [x] app_cp.bats (no changes needed)
  - [x] app_deploy.bats
  - [x] app_errors.bats (no changes needed)
  - [x] app_list.bats (no changes needed)
  - [x] app_logs.bats (no changes needed)
  - [x] app_new.bats (no changes needed)
  - [x] app_ps.bats (no changes needed)
  - [x] app_remove.bats (fixed by [2f29fbeb2e](coop-cloud/abra#403/commits/2f29fbeb2e018656413fa25f8615b7a98cdcb083))
  - [x] app_restart.bats (no changes needed
  - [x] app_restore.bats (fixed by [f2dd5afc38](coop-cloud/abra#403/commits/f2dd5afc38a25a8316899fa0c6d59499445868d7))
  - [x] app_rollback.bats (partially fixed by 6e99b74c24)
  - [x] app_run.bats (no changes needed)
  - [x] app_secret.bats (fixed by bd069d32f6)
  - [x] app_services.bats (no changes needed)
  - [x] app_undeploy.bats (no changes needed)
  - [x] app_upgrade.bats (no changes needed)
  - [x] app_version.bats (partially fixed by ad323ad2bd)
  - [x] app_volume.bats (fixed by [03c3823770](coop-cloud/abra#403/commits/03c38237707ae795b723180eb07a7edc84a8de35))
  - [x] autocomplete.bats (no changes needed)
  - [x] catalogue.bats (no changes needed)
  - [x] dirs.bats (no changes needed)
  - [x] install.bats (failes, but is expected)
  - [x] recipe_diff.bats (no changes needed)
  - [x] recipe_fetch.bats (no changes needed)
  - [x] recipe_lint.bats (fixed by [b6b0808066](coop-cloud/abra#403/commits/b6b0808066a11e4bcd77517ec39600d500bcb944))
  - [x] recipe_list.bats (no changes needed)
  - [x] recipe_new.bats (fixed by [0aac464ded](coop-cloud/abra#403/commits/0aac464ded6b43afb3ec37ade2f64d6191b9838f))
  - [x] recipe_release.bats (no changes needed)
  - [x] recipe_reset.bats (no changes needed)
  - [x] recipe_sync.bats (no changes needed)
  - [x] recipe_upgrade.bats (fixed by [ab86904cf4](coop-cloud/abra#403/commits/ab86904cf45db89c7c189ca1fd9971909bd446dd))
  - [x] recipe_version.bats (fixed by 81897bf4da)
  - [x] server_add.bats
  - [x] server_list.bats
  - [x] server_prune.bats (no changes needed)
  - [x] server_remove.bats
  - [x] upgrade.bats
  - [x] version.bats (no changes needed)

Co-authored-by: decentral1se <cellarspoon@riseup.net>
Reviewed-on: coop-cloud/abra#403
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2024-03-11 13:27:21 +00:00
8a7d17f37b fix
Some checks failed
continuous-integration/drone/pr Build is failing
2024-02-23 14:57:28 +01:00
deb4293fba wip try to get tests running 2024-02-23 14:51:10 +01:00
ac39d6ab97 partially fix tests/integration/app_cmd.bats 2024-02-23 14:29:30 +01:00
428426b6b7 ugrade urfave/cli to v2 2024-02-23 14:29:22 +01:00
0643df6d73 feat: fetch all recipes when no recipe is specified (!401)
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#530

Reviewed-on: coop-cloud/abra#401
Reviewed-by: decentral1se <decentral1se@noreply.git.coopcloud.tech>
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2024-01-24 15:01:33 +00:00
e9b99fe921 make installer save abra-download to /tmp/ directory
All checks were successful
continuous-integration/drone/push Build is passing
the current location of download is ~/.local/bin/ but this
conflicts with some security tools
2024-01-24 14:27:09 +00:00
4920dfedb3 fix: retry docker volume remove (!399)
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#509

Reviewed-on: coop-cloud/abra#399
Reviewed-by: decentral1se <decentral1se@noreply.git.coopcloud.tech>
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2024-01-19 15:09:00 +00:00
0a3624c15b feat: add version input to abra app new (!400)
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#519

Reviewed-on: coop-cloud/abra#400
Reviewed-by: decentral1se <decentral1se@noreply.git.coopcloud.tech>
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2024-01-19 15:08:41 +00:00
c5687dfbd7 feat: backup revolution
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#485
2024-01-12 22:01:08 +01:00
ca91abbed9 fix: correct append service name logic in Filters function (!396)
All checks were successful
continuous-integration/drone/push Build is passing
This fixes a regression introduced by #395

Reviewed-on: coop-cloud/abra#396
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2023-12-22 12:08:12 +00:00
d4727db8f9 feat: abra app logs shows task errors (!395)
All checks were successful
continuous-integration/drone/push Build is passing
The log command now checks for the ready state in the task list. If it is not ready. It shows the task logs. This might look like this:
```
ERRO[0000] Service abra-test-recipe_default_app: State rejected: No such image: ngaaaax:1.21.0
ERRO[0000] Service abra-test-recipe_default_app: State preparing:
ERRO[0000] Service abra-test-recipe_default_app: State rejected: No such image: ngaaaax:1.21.0
ERRO[0000] Service abra-test-recipe_default_app: State rejected: No such image: ngaaaax:1.21.0
ERRO[0000] Service abra-test-recipe_default_app: State rejected: No such image: ngaaaax:1.21.0
```

Closes coop-cloud/organising#518

Reviewed-on: coop-cloud/abra#395
Reviewed-by: decentral1se <decentral1se@noreply.git.coopcloud.tech>
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2023-12-14 13:15:24 +00:00
af8cd1f67a feat: abra release now asks for a release note (!393)
All checks were successful
continuous-integration/drone/push Build is passing
This implements coop-cloud/organising#540 by checking if a`release/next` file exists and if so moves it to `release/<tag>`. When no release notes exists it prompts for them.

Reviewed-on: coop-cloud/abra#393
Reviewed-by: moritz <moritz.m@local-it.org>
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2023-12-12 14:46:20 +00:00
cdd7516e54 chore: go mod tidy [ci skip] 2023-12-04 22:56:58 +01:00
99e3ed416f fix: secret name generation when secretId is not part of the secret name
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-04 21:52:09 +00:00
02b726db02 add comments to better explain how the length modifier gets added to the secret
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-04 17:30:26 +00:00
2de6934322 feat: abra app cp enhancements
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-02 15:39:27 +00:00
cb49cf06d1 chore: drop old godotenv pointers [ci skip]
Follows 9affda8a70
2023-12-02 13:02:24 +01:00
9affda8a70 chore: update godotenv fork commit pointer
All checks were successful
continuous-integration/drone/push Build is passing
Follows coop-cloud/abra#391
2023-12-02 12:59:42 +01:00
3957b7c965 proper env modifiers support
All checks were successful
continuous-integration/drone/push Build is passing
This implements proper modifier support in the env file using this new fork of the godotenv library. The modifier implementation is quite basic for but can be improved later if needed. See this commit for the actual implementation.

Because we are now using proper modifer parsing, it does not affect the parsing of value, so this is possible again:
```
MY_VAR="#foo"
```
Closes coop-cloud/organising#535
2023-12-01 11:03:52 +00:00
0d83339d80 fix(ssh): increase connection timeout #482
All checks were successful
continuous-integration/drone/push Build is passing
see coop-cloud/organising#482
2023-11-30 16:35:53 +01:00
6e54ec7213 test: skip failing test for now
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#535.
2023-11-28 11:42:36 +01:00
66b40a9189 fix: just run it in place [ci skip] 2023-11-27 11:25:01 +01:00
049f02f063 docs: add p4u1 [ci skip] 2023-11-27 11:23:03 +01:00
15857e6453 fix: clean up after cp'ing script [ci skip]
Follows 31e0ed75b0.
2023-11-27 11:21:46 +01:00
31e0ed75b0 build: target for docker building
Some checks failed
continuous-integration/drone/push Build is failing
Adapted from coop-cloud/abra#384.

Thanks @cas.
2023-11-27 11:15:59 +01:00
b1d3fcbb0b add integration test
Some checks failed
continuous-integration/drone/push Build is failing
2023-11-27 10:01:33 +00:00
7b6134f35e add bash completion for abra cmd 2023-11-27 10:01:33 +00:00
316b59b465 test: support local-first testing
Some checks failed
continuous-integration/drone/push Build is failing
Cherry-picked from coop-cloud/abra#389

Thanks @p4u1.
2023-11-27 10:41:46 +01:00
92b073d5b6 chore: go mod tidy
Some checks failed
continuous-integration/drone/push Build is failing
2023-11-27 10:28:43 +01:00
9b0dd933b5 chore(deps): update module github.com/schollz/progressbar/v3 to v3.14.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-11-10 08:00:52 +00:00
f255fa1555 chore(deps): update module github.com/hashicorp/go-retryablehttp to v0.7.5
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-11-09 08:00:33 +00:00
74200318ab chore(deps): update module github.com/schollz/progressbar/v3 to v3.14.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-11-07 08:01:11 +00:00
609656b4e1 chore(deps): update module golang.org/x/sys to v0.14.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-11-06 08:00:33 +00:00
856c9f2f7d chore: go mod tidy
Some checks failed
continuous-integration/drone/push Build is failing
2023-11-04 09:37:15 +01:00
bd5cdd3443 chore(deps): update module github.com/docker/docker to v24.0.7
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-10-30 08:00:53 +00:00
79d274e074 chore(deps): update module github.com/docker/cli to v24.0.7
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-10-27 07:01:16 +00:00
51e3df17f1 chore(deps): update module github.com/go-git/go-git/v5 to v5.10.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-10-26 07:00:33 +00:00
ccf0215495 hotfix: parse values starting with # correctly
Some checks failed
continuous-integration/drone/push Build is failing
2023-10-23 19:21:45 +02:00
254df7f2be feat: app cmd ls
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#484
2023-10-17 21:16:31 +02:00
6a673ef101 refactor: filter by topic when building catalogue
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#377
2023-10-16 18:42:38 +02:00
7f7f7224c6 feat: diff on release flow
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
Also, don't commit unstaged files.
2023-10-16 18:31:22 +02:00
f96bf9a8ac feat: recipe reset, recipe diff
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#511
2023-10-15 12:56:52 +02:00
dcecf32999 chore: bump version for installer script [ci skip] 2023-10-11 19:31:28 +02:00
bc88dac150 test: reset before changing files
All checks were successful
continuous-integration/drone/push Build is passing
2023-10-11 19:29:19 +02:00
704c0e9c74 test: adapt failing tests to new changes 2023-10-11 18:34:08 +02:00
c9bb7e15c2 fix: bring back docker build
All checks were successful
continuous-integration/drone/push Build is passing
2023-10-10 07:27:49 +02:00
d90c9b88f1 fix: include ca-certs to avoid x509 error [ci skip] 2023-10-10 00:50:43 +02:00
69ce07f81f fix: ignore build files for docker [ci skip] 2023-10-09 23:40:41 +02:00
85b90ef80c fix: bail if --chaos and specific version
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#503.
2023-10-09 20:54:44 +00:00
3e511446aa refactor: use app check emoji here too
All checks were successful
continuous-integration/drone/push Build is passing
2023-10-09 22:53:46 +02:00
7566b4262b fix: set go version to 1.21
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2023-10-09 22:07:30 +02:00
c249c6ae9c fix: fix: trim comments that are not modifers
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#505
2023-10-09 14:42:05 +02:00
be693e9df0 fix: trim comments that are not modifers
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
See coop-cloud/organising#505
2023-10-08 22:42:34 +02:00
a43125701c test: optimise default make target for abra hacking [ci skip] 2023-10-07 10:32:42 +02:00
b57edb440a fix: improve app check
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#446
2023-10-06 10:56:33 +02:00
6fc4573a71 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-10-06 09:49:03 +02:00
cbe6676881 chore(deps): update module golang.org/x/sys to v0.13.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-10-06 07:00:49 +00:00
b4fd39828f test: abra-integration-test-recipe -> abra-test-recipe
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/abra-test-recipe#3
2023-10-05 14:22:11 +02:00
14f2d72aba refactor!: lowercase, hyphenate keys
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
This will potentially break scripts, so time to discuss!
2023-10-05 08:36:01 +02:00
57692ec3c9 feat: add --machine to secret ls
See coop-cloud/organising#481
2023-10-04 23:08:39 +02:00
47d3b77003 refactor: not generating here, skipping
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2023-10-04 15:13:15 +02:00
8078e91e52 fix: warn if secrets not generated
See coop-cloud/organising#499
2023-10-04 15:13:14 +02:00
dc5d3a8dd6 test: build, init & test in one stage
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2023-10-04 14:37:09 +02:00
ab6107610c test: skip build step, test will do it 2023-10-04 14:36:59 +02:00
e837835e00 test: remove duplicate call to EnsureCatalogue
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-10-04 14:05:02 +02:00
c646263e9e fix: validate COMPOSE_FILE
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is passing
See coop-cloud/organising#468.
See coop-cloud/organising#376.
2023-10-04 13:27:04 +02:00
422c642949 fix: ensure ipv4 is checked, not sometimes ipv6
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#490
2023-10-04 09:29:10 +00:00
379915587c fix: don't export from within function
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
Also, don't explode on command function which has "export" in the name!

See coop-cloud/organising#498
2023-10-04 11:20:50 +02:00
970ae0fc4e test: use _test to avoid cyclic imports 2023-10-04 02:36:44 +02:00
d11ad61efb docs: make chaos flag description more generic [ci skip] 2023-10-04 01:34:53 +02:00
54dc696c69 build: fix targets for small local builds
All checks were successful
continuous-integration/drone/push Build is passing
2023-10-03 09:31:57 +02:00
7e3ce9c42a chore: go mod tidy 2023-10-03 09:30:26 +02:00
7751423c7d chore(deps): update module github.com/docker/distribution to v2.8.3
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-10-03 07:00:43 +00:00
f18f0b6f82 build: set ABRA_DIR explicitly
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2023-09-30 08:26:20 +02:00
892f6c0730 test: ensure catalogue is cloned 2023-09-30 08:19:16 +02:00
b53fd2689c test: add unit test for TestEnsureDomainsResolveSameIPv4 2023-09-30 08:19:02 +02:00
906bf65d47 test: moar domain check tests [ci skip] 2023-09-29 09:31:25 +02:00
1e6a6e6174 fix: app logs retrieves recipe
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-09-27 09:19:57 +02:00
1e4f1b4ade build: disable publish image for now
Some checks reported errors
continuous-integration/drone/push Build was killed
continuous-integration/drone Build is failing
It's failing for unknown reasons and block releases.

See coop-cloud/recipes-catalogue-json#6
2023-09-25 17:51:30 +02:00
306fe02d1c chore: tag 0.8.x series
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-09-25 17:33:09 +02:00
e4610f8ad5 test: make int test script lighter [ci skip] 2023-09-25 16:45:08 +02:00
e1f900de14 test: fix app_secret generate tests [ci skip] 2023-09-25 16:32:16 +02:00
d5b18d74ef fix: use secretId to match secret names in configs
All checks were successful
continuous-integration/drone/push Build is passing
2023-09-25 15:51:15 +02:00
776a83d8d1 fix: use new GetComposeFiles API 2023-09-25 15:51:03 +02:00
810cea8269 test: bats does output for us [ci skip] 2023-09-25 12:14:35 +02:00
c0f3e6f2a4 test: integration test script [ci skip] 2023-09-25 12:00:39 +02:00
7b240059b0 test: fix app_backup recipe cleanups [ci skip] 2023-09-25 11:50:29 +02:00
c456d13881 test: fix recipe_* tests [ci skip] 2023-09-25 11:27:36 +02:00
c7c553164d test: fix refute output check [ci skip] 2023-09-25 11:21:36 +02:00
7616528f4e test: ensure app cleanup 2023-09-25 11:20:56 +02:00
6cd85f7239 test: dont assert_success for check [ci skip] 2023-09-25 11:11:29 +02:00
b1774cc44b test: fix app_check tests 2023-09-25 10:52:47 +02:00
e438fc6e8e test: reset recipe in file teardown 2023-09-25 10:52:27 +02:00
c065ceb1f0 test: secret generation & --offline/chaos handling tests
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-09-25 10:33:15 +02:00
ce4b775428 build: require 1.18 due to slices.Contains usage 2023-09-25 10:32:41 +02:00
d02f659bf8 fix: secrets from config, --offline/chaos handling, typos
See coop-cloud/organising#464
2023-09-25 10:31:59 +02:00
f3ded88ed8 fix: app version includes tags, sorts & tests
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#442
2023-09-24 11:19:27 +02:00
bf648eeb5d fix: recipe versions sorts, aligns & spaces 2023-09-24 11:18:26 +02:00
533edbf172 fix: recipe versions lists correctly (also -m) 2023-09-24 10:56:02 +02:00
78b8cf9725 test: fix git tag command [ci skip] 2023-09-24 00:56:00 +02:00
f0560ca975 test: no args for helpers, fix recipe_* tests [ci skip] 2023-09-23 23:57:52 +02:00
ce7b4733d7 test: tag/git helpers & refactor [ci skip] 2023-09-23 23:19:49 +02:00
575bfbb0fb test: test arguments, notes, local tag lookup
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-09-23 09:17:24 +02:00
510ce66570 feat: version arguments, local tag lookups & release notes
See:
* coop-cloud/organising#441
* coop-cloud/organising#204
* coop-cloud/organising#493
2023-09-23 09:15:27 +02:00
82631d9ab1 fix: don't output if not tags 2023-09-23 09:15:17 +02:00
358490e939 refactor: deploy output wording 2023-09-23 09:14:45 +02:00
79b9cc9be7 fix: --offline/--chaos handlings for backup/check/cmd/restore
All checks were successful
continuous-integration/drone/push Build is passing
2023-09-22 09:47:36 +02:00
9b6eb613aa test: woops, keep unit test target default
All checks were successful
continuous-integration/drone/push Build is passing
2023-09-21 12:06:41 +02:00
8f1231e409 test: integration test for abra app upgrade [ci skip] 2023-09-21 11:52:58 +02:00
aa37c936eb test: pass arg to _checkout_recipe 2023-09-21 11:52:21 +02:00
3d1158a425 fix: don't read TIMEOUT for version= label
Some checks failed
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#451
2023-09-21 11:33:45 +02:00
8788558cf1 fix: only sync version label once
Some checks failed
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#492
2023-09-21 10:58:17 +02:00
76035e003e fix: recipe workflow with integration tests
Some checks failed
continuous-integration/drone/push Build is failing
2023-09-21 10:36:53 +02:00
b708382d26 feat: recipe lint supports --chaos 2023-09-21 09:07:00 +02:00
557b670fc5 docs: improve recipe fetch usage/desc [ci skip] 2023-09-21 08:46:33 +02:00
e116148c49 test: ensure catalogue --chaos works [ci skip]
Closes coop-cloud/organising#462.
2023-09-20 14:19:49 +02:00
d5593b69e0 test: ensure 3 commits behind, ignore output on fail [ci skip] 2023-09-20 14:10:07 +02:00
0be532692d test: moar integration tests [ci skip]
Some checks failed
continuous-integration/drone/pr Build is failing
2023-09-20 13:51:06 +02:00
7a9224b2b2 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-09-19 12:38:02 +02:00
e73d1a8359 chore(deps): update module gotest.tools/v3 to v3.5.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-09-19 07:02:01 +00:00
f8c49c82c8 fix: skip "abra-integration-test-recipe" also
All checks were successful
continuous-integration/drone/push Build is passing
2023-09-18 14:02:38 +02:00
ab7edd2a62 refactor!: drop "record" & "server new" command
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
These were alpha prototypes and we'll reconsider once other layers of
Abra are more stable.
2023-09-14 16:45:01 +02:00
b1888dcf0f chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-09-14 09:39:28 +02:00
e5e122296f chore(deps): update module github.com/go-git/go-git/v5 to v5.9.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-09-13 07:01:51 +00:00
83bf148304 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-09-07 14:34:40 +02:00
d80b882b83 chore(deps): update module github.com/docker/docker to v24.0.6
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-09-07 07:02:43 +00:00
c345c6f5f1 chore(deps): update module github.com/docker/cli to v24.0.6
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-09-06 07:01:56 +00:00
f8c4fd72a3 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-09-05 13:56:34 +02:00
10f612f998 test: more integration tests
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-09-05 13:03:38 +02:00
58e78e4d7c fix: overridable ABRA_DIR
Some checks failed
continuous-integration/drone/push Build is failing
2023-09-05 09:58:13 +00:00
25258d3d64 fix: separate abra/kababra makefile targets 2023-09-05 09:58:13 +00:00
b3bd058962 chore: don't join if nothing to join 2023-09-05 09:58:13 +00:00
b4fd7fd77c fix: clone catalogue on initial run 2023-09-05 09:58:13 +00:00
64cfdae6b7 fix: only load client if creating secrets 2023-09-05 09:58:13 +00:00
0a765794f2 test: write initial automatic integration tests 2023-09-05 09:58:13 +00:00
18dc6e9434 feat: support abra testing mode 2023-09-05 09:58:13 +00:00
4ba4107288 chore(deps): update module golang.org/x/sys to v0.12.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-09-04 07:02:01 +00:00
d9b4f4ef3b chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-08-26 09:58:46 +02:00
c365dcf96d chore(deps): update module github.com/hetznercloud/hcloud-go to v1.50.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-08-25 07:02:00 +00:00
0c6a7cc0b8 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.49.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-08-18 07:01:42 +00:00
6640cfab64 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-08-13 17:42:24 +02:00
71addcd1b2 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.49.0
Some checks failed
continuous-integration/drone/push Build is failing
2023-08-13 15:41:44 +00:00
60c0e55e3d fix: don't specify refs when pulling tags
Some checks failed
continuous-integration/drone/push Build is failing
See coop-cloud/organising#477
2023-08-13 12:07:37 +00:00
e42139fd83 chore(deps): update golang docker tag to v1.21
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-08-09 07:02:07 +00:00
2d826e47d0 chore(deps): update module golang.org/x/sys to v0.11.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-08-07 07:01:55 +00:00
2db172ea5a Further changes to messages.
All checks were successful
continuous-integration/drone/push Build is passing
2023-08-04 19:22:48 +00:00
2077658f6a Attempt to replace the deploy completed message. 2023-08-04 19:22:48 +00:00
502e26b534 Change message when starting to poll for deployment status. 2023-08-04 19:22:48 +00:00
e22b692ada Add os hook for interrupt signal while waiting for service to converge. 2023-08-04 19:22:48 +00:00
5ae73f700e Merge branch 'fix-deploy-no-catalogue'
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-08-02 10:48:54 +02:00
63d419caae Merge branch 'fix-478' 2023-08-02 10:48:46 +02:00
179b66d65c Merge branch 'fix-476' 2023-08-02 10:48:37 +02:00
c9144d90f3 refactor: integration -> manual 2023-08-02 08:45:24 +02:00
ebf5d82c56 fix: failover if no recipe meta available
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2023-08-02 00:48:27 +02:00
8bb98ed0ed fix: deploy fresh recipe without versions
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
See coop-cloud/organising#476
2023-08-01 21:47:34 +02:00
23f5745cb8 fix: skip recipe clone / up to date sync for some commands
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
Continues work of 3dc5662821.
2023-08-01 21:19:20 +02:00
2cd453ae8d build: attempt to ignore goreleaser upgrades
Some checks failed
continuous-integration/drone/push Build is failing
See e42cc0f91d.
2023-08-01 19:33:36 +02:00
e42cc0f91d Revert "chore(deps): update goreleaser/goreleaser docker tag to v1.19.2"
This reverts commit 1de45a6508.

See 8fa9419c99.
2023-08-01 19:31:51 +02:00
1de45a6508 chore(deps): update goreleaser/goreleaser docker tag to v1.19.2
Some checks failed
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is failing
2023-07-31 07:02:04 +00:00
55c7aca3c0 chore: publish 0.8.0-rc2-beta
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-07-29 00:31:49 +02:00
8fa9419c99 build: pin to goreleaser v18 [ci skip]
See coop-cloud/organising#474
2023-07-29 00:22:01 +02:00
73ad0a802e Revert "build: replacements is deprecated"
This reverts commit 473cae0146.

Aiming to freeze on an old version of goreleaser for now.
2023-07-29 00:14:08 +02:00
798fd2336c chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-07-27 21:48:49 +02:00
70e65d6667 chore(deps): update module github.com/go-git/go-git/v5 to v5.8.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-07-27 07:06:22 +00:00
efc9602808 chore: welcome comrade rix [ci skip] 2023-07-26 09:59:22 +02:00
1e110f1375 docs: wording [ci skip] 2023-07-26 09:58:30 +02:00
473cae0146 build: replacements is deprecated
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-07-26 09:18:52 +02:00
2da859896a fix: point to rc1 [ci skip] 2023-07-26 08:53:39 +02:00
ab00578ee1 chore: publish 0.8.0-rc1-beta
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-07-26 08:52:33 +02:00
3dc5662821 fix: improved offline support
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#471.
2023-07-26 08:16:07 +02:00
ab64eb2e8d fix: only use git to update local catalogue
See coop-cloud/organising#321.
2023-07-25 21:13:04 +02:00
4f22228aab feat: lint for lightweight tags
See coop-cloud/organising#433
2023-07-25 20:38:29 +02:00
a7f1af7476 refactor: drop internal deploy package 2023-07-25 18:03:37 +02:00
949510d4c3 revert: always clone latest recipe changes
Some checks failed
continuous-integration/drone/push Build is failing
This change was about trying to optimise for offline scenarios but
caused a lot of issues for the online case. It needs to be thought
through again.

See coop-cloud/organising#471.

Closes coop-cloud/organising#432.
2023-07-25 15:05:01 +00:00
9f478dac1d fix: list downgrades/upgrades in correct order
Some checks failed
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is failing
Now that we have correct sorting of versions:

  coop-cloud/organising#427

We don't need to reverse sort. Only for showing prompts when the
latest should be the first. Otherwise, logic can follow the sorted
order, the last item in the list is the latest upgrade.

Related:

  coop-cloud/organising#444

Also fix `upgrade` to actually show the latest version
2023-07-25 15:08:32 +02:00
69f38ea445 fix: always show overview, even with -f
coop-cloud/organising#444
2023-07-25 15:08:10 +02:00
0582147874 fix: better error message for missing local tag
Aiming to help the following scenario better:

coop-cloud/organising#444 (comment)
2023-07-25 15:07:29 +02:00
bdeeb75973 fix: upgrade force logic parity with deploy force logic
coop-cloud/organising#444 (comment)
2023-07-25 15:06:50 +02:00
2518e65e3e chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-07-25 10:22:02 +02:00
8354c92654 Merge remote-tracking branch 'origin/renovate/main-github.com-docker-docker-24.x' 2023-07-25 10:21:16 +02:00
173e81b885 chore(deps): update module github.com/docker/docker to v24.0.5
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-07-25 07:05:53 +00:00
d91731518b chore(deps): update module github.com/docker/cli to v24.0.5
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-07-25 07:05:47 +00:00
2bfee5058d chore(deps): update module github.com/go-git/go-git/v5 to v5.8.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-07-24 07:03:29 +00:00
a7ce71d6cf Fix formatting.
All checks were successful
continuous-integration/drone/push Build is passing
2023-07-15 08:15:46 +00:00
10f60fee1d Replace deprecated system.TempFileSequential with os.CreateTemp 2023-07-15 08:15:46 +00:00
6025ab443f Update volume list options. 2023-07-15 08:15:46 +00:00
43ecf35449 Change CommonOptions (deprecated) to ClientOptions and remove unneeded parameters. 2023-07-15 08:15:46 +00:00
4d2a1065d2 Replace types.volume with new volume type 2023-07-15 08:15:46 +00:00
0b67500cab Add docker v24 and associated dependencies. 2023-07-15 08:15:46 +00:00
e0c3a06182 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-07-14 19:47:09 +02:00
a86ba4e97b chore(deps): update module github.com/hetznercloud/hcloud-go to v2
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-07-14 07:03:04 +00:00
b5b3395138 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.48.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-07-13 07:03:02 +00:00
502b78ef5c chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-07-05 09:50:38 +02:00
3e2b4dae6a chore(deps): update module golang.org/x/sys to v0.10.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-07-05 07:02:41 +00:00
573fe403b3 chore(deps): update module gotest.tools/v3 to v3.5.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-06-30 07:02:40 +00:00
76862e9d66 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-06-22 16:44:53 +02:00
e8e337a608 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.47.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-06-22 07:02:07 +00:00
500389c5f5 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.46.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-06-19 07:02:59 +00:00
dea665652c chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-06-16 18:10:02 +02:00
e8cf84b523 chore(deps): update module golang.org/x/sys to v0.9.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-06-16 07:03:47 +00:00
fab25a6124 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.46.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-06-15 07:03:10 +00:00
e71377539c chore(deps): update module github.com/alecaivazis/survey/v2 to v2.3.7
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-06-14 07:02:51 +00:00
497ecf476a docs: wording [ci skip] 2023-06-12 00:09:52 +02:00
ff1c043ec5 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-06-07 10:45:17 +02:00
c4d2e297f8 Merge remote-tracking branch 'origin/renovate/main-coopcloud.tech-libcapsul-digest' 2023-06-07 10:44:35 +02:00
e98b8e3666 chore(deps): update module github.com/hashicorp/go-retryablehttp to v0.7.4
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-06-07 07:02:31 +00:00
f5835fe404 chore(deps): update coopcloud.tech/libcapsul digest to 878af47
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-06-07 07:02:13 +00:00
07bbe9394f chore(deps): update module github.com/sirupsen/logrus to v1.9.3
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-06-05 07:02:38 +00:00
6974681af5 fix: improve error message
All checks were successful
continuous-integration/drone/push Build is passing
2023-05-29 14:57:41 +02:00
73250fb899 chore(deps): update module github.com/go-git/go-git/v5 to v5.7.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-24 07:02:28 +00:00
4ce377cffe chore(deps): update module github.com/sirupsen/logrus to v1.9.2
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-19 07:03:02 +00:00
c7dd029689 chore(deps): update module github.com/docker/cli to v20.10.25
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-18 07:02:30 +00:00
51319d2ae2 chore(deps): update module github.com/docker/docker to v20.10.25
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-16 07:03:23 +00:00
d1c2343a54 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.45.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-15 07:03:03 +00:00
135ffde0e5 chore(deps): update module github.com/docker/distribution to v2.8.2
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-12 07:03:02 +00:00
6e4dd51b27 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-05-08 11:42:50 +02:00
81b652718b chore(deps): update module github.com/hetznercloud/hcloud-go to v1.44.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-08 07:02:38 +00:00
442f46e17f chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-05-05 10:59:22 +02:00
574794d4e8 Merge remote-tracking branch 'origin/renovate/main-golang.org-x-sys-0.x' 2023-05-05 10:58:25 +02:00
88184125c4 chore(deps): update module golang.org/x/sys to v0.8.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-05 07:03:15 +00:00
8a4baa66ee chore(deps): update module github.com/klauspost/pgzip to v1.2.6
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-05 07:03:01 +00:00
16ecbd0291 chore(deps): update module github.com/moby/term to v0.5.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-03 07:02:46 +00:00
f65b262c11 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.43.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-04-28 07:03:28 +00:00
c5d9d88359 Add some minor tweaks to machine readable pathway in recipe upgrade
Some checks failed
continuous-integration/drone/push Build is failing
2023-04-27 16:45:57 +00:00
87e5909363 Make -m imply -n in recipe/upgrade 2023-04-27 16:45:57 +00:00
152c5d4563 Add machine output for recipe/upgrade
- Normal faff related to calling external libraries with structs thnx go
- Ouputs json now
2023-04-27 16:45:57 +00:00
34b274bc52 recipe/upgrade: Refactor upgradability list to make output easier
For future, we can print the struct as JSON.
2023-04-27 16:45:57 +00:00
62f8103fc2 recipe/upgrade: Add non-interactive mode.
Add support for -n which just outputs the list of compatible tags for each image.
2023-04-27 16:45:57 +00:00
2dcbfa1d65 chore(deps): update module github.com/coreos/go-semver to v0.3.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-04-26 07:02:52 +00:00
049da94629 fix(version): semver version ordering (!293)
All checks were successful
continuous-integration/drone/push Build is passing
Solves coop-cloud/organising#427

This fix sorts the recipe versions at the catalogue generation and the versions that are received from the catalogue.

Co-authored-by: Moritz <moritz.m@local-it.org>
Reviewed-on: coop-cloud/abra#293
2023-04-26 06:38:15 +00:00
b2739dcdf2 fix(deploy) post deploy cmds
All checks were successful
continuous-integration/drone/push Build is passing
2023-04-18 19:05:46 +02:00
343b2bfb91 docs: go doc badge [ci skip] 2023-04-14 23:31:21 +02:00
17aeed6dbd chore: go mod tidy [ci skip] 2023-04-14 19:09:53 +02:00
27cac81830 fix(app): fix app list chaos field
All checks were successful
continuous-integration/drone/push Build is passing
show only the chaos version if the app is a chaos deploy
2023-04-14 18:01:08 +02:00
31ec322c55 feat(deploy): set timeout via label (!290)
All checks were successful
continuous-integration/drone/push Build is passing
Solves coop-cloud/organising#437

A timeout can be specified globally for a recipe using this label:
`coop-cloud.${STACK_NAME}.timeout=${TIMEOUT:-120}`. This sets the default timeout to 120s. An app specific timeout can be set using the env `TIMEOUT`.

Co-authored-by: Moritz <moritz.m@local-it.org>
Reviewed-on: coop-cloud/abra#290
2023-04-14 14:44:18 +00:00
18615eaaef Post-deploy abra.sh hooks (!292)
All checks were successful
continuous-integration/drone/push Build is passing
This solves coop-cloud/organising#235

Co-authored-by: Moritz <moritz.m@local-it.org>
Reviewed-on: coop-cloud/abra#292
2023-04-14 14:12:31 +00:00
5e508538f3 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.42.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-04-13 07:02:01 +00:00
9e05000476 fix(kadabra): always pull new recipe version
All checks were successful
continuous-integration/drone/push Build is passing
2023-04-06 17:22:33 +02:00
f088a0d327 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-04-06 10:00:14 +02:00
3832403c97 chore(deps): update module github.com/docker/docker to v20.10.24
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-04-06 07:01:59 +00:00
47058c897c chore(deps): update module github.com/docker/cli to v20.10.24
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-04-05 07:02:26 +00:00
5d4c7f8ef0 chore(deps): update module golang.org/x/sys to v0.7.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-04-04 16:33:38 +00:00
ee4315adb3 fix(rm): remove volumes correctly during app removal
All checks were successful
continuous-integration/drone/push Build is passing
2023-03-30 13:40:44 +02:00
9ade250f01 feat(cmd): add --tty flag to run commands from a script
Some checks failed
continuous-integration/drone/push Build is failing
2023-03-29 14:25:08 +02:00
81b032be85 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-03-21 09:19:02 +01:00
5409990a68 chore(deps): update module github.com/go-git/go-git/v5 to v5.6.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-03-17 08:03:41 +00:00
b1595c0ec9 chore(deps): update module github.com/schollz/progressbar/v3 to v3.13.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-03-15 08:02:29 +00:00
6c99a2980b chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-03-07 16:35:20 +01:00
a9405a36c6 Merge remote-tracking branch 'origin/renovate/main-github.com-hetznercloud-hcloud-go-1.x' 2023-03-07 16:34:23 +01:00
15a417d9bd fix(list): fix output of chaos + chaos-version merge
All checks were successful
continuous-integration/drone/push Build is passing
2023-03-07 13:46:19 +01:00
0ce8b3a5c2 Merge pull request 'app ls --status shows more detailles about the deployment state' (!280) from detailed_app_list into main
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: coop-cloud/abra#280
2023-03-07 12:31:38 +00:00
edff63b446 Revert "review: change label autoupdate -> auto-update"
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
This reverts commit 74baa76f5ee5e5dd7b71b1f14be97cc40dfc611b.
2023-03-07 13:24:46 +01:00
d5979436c1 review: merge chaos + chaos_version column 2023-03-07 13:24:46 +01:00
cb33edaac3 review: change label autoupdate -> auto-update 2023-03-07 13:24:46 +01:00
e9879e2226 review: label convention chaos_version -> chaos-version 2023-03-07 13:24:46 +01:00
5428ebf43b review: avoid stackName recalculation 2023-03-07 13:24:46 +01:00
d120299929 feat(list): show autoupdate state 2023-03-07 13:24:46 +01:00
3753357ef8 feat(list): show chaos status and chaos version 2023-03-07 13:24:46 +01:00
611430aab2 Set chaos version label for each deployed or upgraded app 2023-03-07 13:24:46 +01:00
f56b02b951 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.41.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-03-07 08:02:02 +00:00
f29278f80a chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-03-06 15:04:15 +01:00
a9a294cbb7 chore(deps): update module golang.org/x/sys to v0.6.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-03-06 08:02:17 +00:00
73004789a4 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-03-04 08:50:24 +01:00
440aba77d5 chore(deps): update module github.com/go-git/go-git/v5 to v5.6.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-03-01 08:02:11 +00:00
e4a89bcc4f fix(kadabra): only warn if a deployed app has no published release
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-28 15:34:27 +01:00
eb07617e73 chore: publish new release 0.7.0-beta
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-02-22 08:47:43 +01:00
9fca4e56fb docs: add comrade vera [ci skip] 2023-02-19 11:00:43 +01:00
f17523010a chore: publish next tag 0.7.0-rc3-beta
Some checks failed
continuous-integration/drone/push Build is failing
2023-02-19 10:51:10 +01:00
3058178d84 fix: if all servers good, don't show empty table
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-19 10:34:47 +01:00
d62c4e3400 refactor: improved logging on pruning
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-19 10:28:18 +01:00
5739758c3a fix: give more time to tear down state [ci skip] 2023-02-17 11:11:28 +01:00
a6b5566fa6 refactor: clarify prune scope, not system wide [ci skip] 2023-02-17 11:09:44 +01:00
4dbe1362a8 docs: more clarity on prune functionality
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-17 11:00:02 +01:00
98fc36c830 refactor: hopefully more robust prune logic, docs 2023-02-17 10:59:06 +01:00
b8abc8705c docs: volumes pruning docs - more warnings 2023-02-17 10:42:38 +01:00
636261934f refactor: pass args in, docs, rename, lower-case logs 2023-02-17 10:23:00 +01:00
6381b73a6a chore: use lower-case like elsewhere 2023-02-17 10:21:56 +01:00
1a72e27045 refactor: add server auto-complete & cosmetics 2023-02-17 10:12:46 +01:00
9754c1b2d1 feat: server auto-complete on remove sub-command
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-17 10:10:48 +01:00
b14ec0cda4 review cleanups
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-17 08:53:43 +00:00
c7730ba604 Adding server prune and undeploy prune 2023-02-17 08:53:43 +00:00
47c61df444 docs: add comrade yksflip [ci skip] 2023-02-15 11:26:20 +01:00
312b93e794 fix: no gitops on recipe for "app new"
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#408
2023-02-15 00:49:22 +01:00
992e675921 refactor: use passed down conf to decide 2023-02-15 00:35:33 +01:00
d4f3a7be31 docs: add comrade codegod100 [ci skip] 2023-02-14 17:16:25 +01:00
d619f399e7 Update 'cli/app/undeploy.go'
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-14 13:59:35 +00:00
96a8cb7aff chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-14 14:29:25 +01:00
9b51d22c20 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.40.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-02-14 08:02:28 +00:00
d789830ce4 feat: adds --since flag for abra app logs
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2023-02-14 00:19:38 +01:00
e4b4084dfd fix: stream logs without hitting git.coopcloud.tech
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
Medium-sized options refactor in here too!

See coop-cloud/organising#292.
2023-02-13 16:46:43 +01:00
ff58646cfc fix: better error message when network gone 2023-02-13 12:33:00 +01:00
eec6469ba1 fix: Change error message to reflect RECIPE -> TYPE
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#409
2023-02-12 16:40:48 +01:00
e94f947d20 fix: don't create clients twice per server
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#407
2023-02-12 00:02:59 +01:00
cccbe4a2ec fix: typo [ci skip] 2023-02-11 23:53:42 +01:00
f53cfb6c36 fix: better error message when missing context [ci skip] 2023-02-11 23:49:01 +01:00
f55f01a25c build: verbose local builds to show progress 2023-02-11 23:40:47 +01:00
ce5c1a9ebb chore: 0.7.0-rc2-beta
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-10 12:47:20 +01:00
5e3b039f93 fix: kadabra is now called kadabra not abra
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#402
2023-02-10 12:45:41 +01:00
0e9d218bbc docs: fix comment names 2023-02-10 12:45:24 +01:00
e1c635af86 chore: remove newline [ci skip] 2023-02-08 23:49:01 +01:00
f6b139dfea chore: formatting pass on kadabra [ci skip] 2023-02-08 23:20:25 +01:00
3d2b8fa446 chore: spacing 2023-02-08 23:02:54 +01:00
2eebac6fc0 chore: formatting, indentation 2023-02-08 22:59:47 +01:00
f5e2710138 chore: remove comment 2023-02-08 22:59:30 +01:00
986470784d chore: sort gitignore listing 2023-02-08 22:59:03 +01:00
e76ed771df feat: kadabra, the app auto-updater (!268)
All checks were successful
continuous-integration/drone/push Build is passing
coop-cloud/organising#236

Autoupdater `kadabra` is ready for testing.
It should run on the server, check for available minor/patch updates and automatically upgrade the apps.

Co-authored-by: Moritz <moritz.m@local-it.org>
Reviewed-on: coop-cloud/abra#268
2023-02-08 18:53:04 +00:00
f28af5e42f fix: use correctly formatted comments
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-08 11:28:38 +01:00
fdf4854b0c fix: unbork comments
Some checks failed
continuous-integration/drone/push Build is failing
Was breaking the build but not anymore!
2023-02-08 11:20:30 +01:00
6b9512d09c build: docker dev builds depend on check too 2023-02-08 11:16:54 +01:00
21a86731d0 build: dont test/build if check fails
Some checks failed
continuous-integration/drone/push Build is failing
Save cycles for small mistakes.
2023-02-08 11:13:20 +01:00
91102e6607 build: not so useful anymore, also broken 2023-02-08 11:12:03 +01:00
fadafda0b8 fix: make test suite work again 2023-02-08 11:11:39 +01:00
c03cf76702 chore: gofmt import statements
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-02-08 10:56:39 +01:00
ebb748b7e7 chore: publish next tag 0.7.0-rc1-beta
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-02-08 10:28:54 +01:00
2b3dbee24c chore: go mod tidy
Some checks failed
continuous-integration/drone/push Build is failing
2023-02-07 22:20:11 +01:00
a448cfdd0d fix: revert bogus dependabot changes
Revert "chore(deps): update module github.com/docker/cli to v23"
This reverts commit 5ee6eb53b2.

Revert "chore(deps): update module github.com/docker/docker to v23"
This reverts commit 7b2880d425.
2023-02-07 22:19:28 +01:00
5ee6eb53b2 chore(deps): update module github.com/docker/cli to v23
Some checks failed
continuous-integration/drone/push Build is failing
2023-02-07 21:16:18 +00:00
7b2880d425 chore(deps): update module github.com/docker/docker to v23
Some checks failed
continuous-integration/drone/push Build is failing
2023-02-07 21:16:06 +00:00
928d6f5d7f chore(deps): update module golang.org/x/sys to v0.5.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-02-07 08:02:03 +00:00
29fa607190 fix: restrict pulling to specific branch
Some checks failed
continuous-integration/drone/push Build is failing
2023-02-02 21:12:50 +01:00
7c541ffdfa fix: better error handling in EnsureUpToDate 2023-02-02 21:12:24 +01:00
7ccc4b4c08 fix: woops, remove that print statement
Some checks failed
continuous-integration/drone/push Build is failing
2023-02-02 21:00:31 +01:00
ef4df35995 fix: don't check twice (called in EnsureUpToDate)
Some checks failed
continuous-integration/drone/push Build is failing
2023-02-02 20:59:04 +01:00
71a9155042 fix: specify refs when fetching tags
See coop-cloud/organising#397
2023-02-02 20:58:38 +01:00
2a88491d7c fix: catch errors here too
Some checks failed
continuous-integration/drone/push Build is failing
See coop-cloud/abra#266
2023-02-02 20:26:19 +01:00
bf79552204 fix: improve permission denied message
Some checks failed
continuous-integration/drone/push Build is failing
2023-02-02 20:07:45 +01:00
0a7fa54759 fix: cant pass client here
Some checks failed
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#396
2023-02-02 20:06:49 +01:00
7c1a97be72 refactor!: consolidate SSH handling
Some checks failed
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#389.
Closes coop-cloud/organising#341.
Closes coop-cloud/organising#326.
Closes coop-cloud/organising#380.
Closes coop-cloud/organising#360.
2023-02-02 08:37:14 +00:00
f20fbbc913 chore(deps): update golang docker tag to v1.20
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-02-02 08:02:02 +00:00
76717531bd resolve PR: include the service info in the log message
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-31 16:15:11 +01:00
6774893412 add env ENABLE_AUTO_UPDATE as label to enable/disable the auto update process
Some checks reported errors
continuous-integration/drone/pr Build was killed
2023-01-31 16:12:02 +01:00
ebb86391af add a label to signal that a deploy is a chaos deploy (!265)
Some checks failed
continuous-integration/drone/push Build is failing
Resolves coop-cloud/organising#390 by adding the following label `coop-cloud.${STACK_NAME}.chaos=true` (according to the version label).
This is required for the auto updater coop-cloud/organising#236 (comment)

Co-authored-by: Moritz <moritz.m@local-it.org>
Reviewed-on: coop-cloud/abra#265
2023-01-31 15:06:35 +00:00
50db39424c add a label to signal that a deploy is connected with a recipe (!264)
Some checks failed
continuous-integration/drone/push Build is failing
Resolves coop-cloud/organising#391 by adding the following label `coop-cloud.${STACK_NAME}.recipe=${RECIPE}` (according to the version label).
This is required for the auto updater coop-cloud/organising#236 (comment)

Co-authored-by: Moritz <moritz.m@local-it.org>
Reviewed-on: coop-cloud/abra#264
2023-01-31 14:35:43 +00:00
ca1ea32c46 Expose all env vars to app container. (!263)
Some checks failed
continuous-integration/drone/push Build is failing
Resolves coop-cloud/organising#393 and is required for the auto updater coop-cloud/organising#236 (comment)

Co-authored-by: Moritz <moritz.m@local-it.org>
Reviewed-on: coop-cloud/abra#263
2023-01-31 14:13:43 +00:00
32851d4d99 fix: always fetch all repository tags
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-01-31 11:52:15 +01:00
c47aa49373 fix: improved missing context message
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-24 10:48:53 +01:00
cdee6b00c4 docs: better auto-completion help
Some checks failed
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#328
2023-01-23 19:01:00 +01:00
a3e9383a4a docs: wording [ci skip] 2023-01-23 18:48:51 +01:00
b4cce7dcf4 fix: better warning if flying < 3.8 compose spec
Some checks failed
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#350
2023-01-23 18:42:23 +01:00
b089109c94 fix: more robust docker context problem handling
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
See coop-cloud/organising#325
See coop-cloud/organising#340
2023-01-23 14:56:34 +01:00
27e0708ac7 fix: don't delete server dir on cleanup if not empty
Part of coop-cloud/organising#325.
2023-01-23 13:56:27 +01:00
a93786c6be fix!: make "app rm" more explicit & simpler
We point users to "app volume/secret remove" for more specific deletion
of other app data resources now. The idea is that if you lose the env
file locally, then you can't clean up anything after. So it is handy to
have a sort of WARNING barrier to deleting that file. This flow is the
only way to get Abra to delete your local env file. It now feels more
documented and sufficiently scary in the UI/UX to merit that. Hopefully
addresses the ticket sufficiently.

Closes coop-cloud/organising#335.
2023-01-23 13:29:46 +01:00
521570224b Merge branch 'filter-servers-by-recipe'
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-23 09:33:31 +01:00
c72462e0b6 fix: no domain checks if no DOMAIN=... configured
Closes coop-cloud/organising#353
2023-01-23 09:33:12 +01:00
54646650c7 fix!: disable traefik linting when DOMAIN isn't present
Some checks failed
continuous-integration/drone/push Build is failing
Also reformats the linting output to be more readable.

Closes coop-cloud/organising#319.
2023-01-23 08:31:00 +00:00
903aac9d7a feat: recipe fetch command
Some checks reported errors
continuous-integration/drone/pr Build was killed
continuous-integration/drone/push Build was killed
Also may have rooted out another go-git cloning bug 🙄

Closes coop-cloud/organising#365
2023-01-23 09:26:53 +01:00
49865c6a97 feat: app services command
Some checks reported errors
continuous-integration/drone/push Build was killed
Closes coop-cloud/organising#372
2023-01-23 08:25:17 +00:00
a694c8c20e feat: filter server by recipe
Closes coop-cloud/organising#363
2023-01-23 00:54:22 +01:00
d4a42d8378 fix: error out if no backup configs found
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-01-22 18:50:45 +01:00
e16ca45fa7 fix!: better backup file names
Closes coop-cloud/organising#366
2023-01-22 18:50:27 +01:00
32de2ee5de fix: ensure catalogue is clean/up-to-date
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#367
2023-01-22 17:52:36 +01:00
834d41ef50 docs: wording [ci skip] 2023-01-22 10:07:58 +01:00
6fe5aed408 fix!: remove digest handling
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#379
2023-01-22 08:54:13 +01:00
03041b88d0 chore: gofmt
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-21 23:26:23 +01:00
9338afb492 chore: go mod tidy
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-20 10:16:14 +01:00
313ae0dbe2 chore(deps): update module github.com/docker/cli to v20.10.23
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-20 09:12:52 +00:00
0dc7ec8570 chore(deps): update module github.com/docker/docker to v20.10.23
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-01-20 08:02:52 +00:00
8a1a3aeb12 ci: automerge & run tidy [ci skip] 2023-01-18 17:28:36 +01:00
3wc
910469cfa0 chore: switch to dev image by default
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-15 19:48:07 -08:00
3wc
4f055096e9 chore: fix Drone build, ignore auto-recipes-catalogue-json
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-15 18:16:53 -08:00
3wc
6c93f980dc chore: tweak docker build
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-01-15 18:08:22 -08:00
3wc
57f52bbf33 chore: disable go cache for now, parallelise build
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-15 17:16:32 -08:00
3wc
9f5620d881 chore: attempt to fix drone build
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-01-15 17:11:50 -08:00
3wc
44c4555aae chore: attempt to enable go caching for docker image build
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-01-15 17:10:57 -08:00
3wc
025d1e0a8c chore: tweak drone image building 2023-01-15 17:10:52 -08:00
3wc
f484021148 feat: add docker image, auto-built using CI 2023-01-15 17:10:45 -08:00
3wc
1403eac72c fix: parse "Status" field during catalogue generate 2023-01-15 17:10:45 -08:00
a6e23938eb Add tests to jsontable.
- Test major functionality of jsontable
- Fix bug discovered in testing.
2023-01-15 17:10:36 -08:00
cae0d9ef79 Introduce a JSON output table mechanic
- Create JSONTable as a proxy/extension to tablewriter which can also output JSON.
- Implement machine readable output for `server list` and `recipe list`
2023-01-12 21:15:14 +00:00
89fcb5b216 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-01-06 10:05:20 +01:00
56b3e9bb19 chore(deps): update module github.com/go-git/go-git/v5 to v5.5.2
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-01-06 08:02:14 +00:00
9aa4a98b0b chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-01-05 17:45:33 +01:00
5fbba0c934 Merge remote-tracking branch 'origin/renovate/main-golang.org-x-sys-0.x' 2023-01-05 17:44:53 +01:00
d772f4b2c6 Merge remote-tracking branch 'origin/renovate/main-golang.org-x-crypto-0.x' 2023-01-05 17:44:31 +01:00
7513fbd57d chore(deps): update module golang.org/x/sys to v0.4.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-01-05 08:02:28 +00:00
9082761f86 chore(deps): update module golang.org/x/crypto to v0.5.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-01-05 08:02:09 +00:00
a3bd6e14d0 chore(deps): update module github.com/schollz/progressbar/v3 to v3.13.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-01-05 08:01:57 +00:00
49dd702d98 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-01-04 09:36:35 +01:00
e4cd5e3efe Merge remote-tracking branch 'origin/renovate/main-github.com-hetznercloud-hcloud-go-1.x' 2023-01-04 09:36:13 +01:00
1db4602020 chore(deps): update module github.com/hashicorp/go-retryablehttp to v0.7.2
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-01-04 08:01:58 +00:00
b50718050b chore(deps): update module github.com/hetznercloud/hcloud-go to v1.39.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-12-30 08:01:53 +00:00
3wc
9e39e1dc88 docs: fix typo in error message
All checks were successful
continuous-integration/drone/push Build is passing
2022-12-22 19:27:42 -08:00
1a3a53cfc2 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-12-19 09:06:28 +01:00
5f53d591f8 chore(deps): update module github.com/docker/docker to v20.10.22 2022-12-19 09:06:27 +01:00
d7013518cc chore(deps): update module github.com/docker/cli to v20.10.22
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-12-19 08:01:53 +00:00
3wc
b204b289d1 fix: disable progress bar with machine-readable output
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2022-12-16 10:20:51 -08:00
3a0d9f7ed7 chore: 0.6.0-beta release
Some checks failed
continuous-integration/drone/push Build is passing
continuous-integration/drone Build is failing
2022-12-13 16:09:36 +01:00
e794c17fb4 chore: authors add & sort [ci skip] 2022-12-13 16:07:05 +01:00
e788ac21f6 fix: keep abra working if recipe catalogue is offline (!235)
All checks were successful
continuous-integration/drone/push Build is passing
Co-authored-by: Moritz <moritz.m@local-it.org>
Reviewed-on: coop-cloud/abra#235
2022-12-13 14:42:45 +00:00
4e78b060e0 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-12-12 10:50:38 +01:00
4fada9c1b7 Merge remote-tracking branch 'origin/renovate/main-github.com-hetznercloud-hcloud-go-1.x' 2022-12-12 10:50:18 +01:00
08d26e1a39 Merge remote-tracking branch 'origin/renovate/main-github.com-schollz-progressbar-v3-3.x' 2022-12-12 10:49:42 +01:00
581b28a2b1 Merge remote-tracking branch 'origin/renovate/main-golang.org-x-crypto-0.x' 2022-12-12 10:49:32 +01:00
e4d58849ce chore(deps): update module github.com/go-git/go-git/v5 to v5.5.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-12-12 08:01:31 +00:00
5e8b9d9bf7 chore(deps): update module golang.org/x/crypto to v0.4.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-12-08 08:01:33 +00:00
11dd665794 chore(deps): update module github.com/schollz/progressbar/v3 to v3.12.2
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-12-07 08:01:51 +00:00
ba163e9bf3 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.38.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-12-06 08:01:41 +00:00
09048ee223 Done did make format
All checks were successful
continuous-integration/drone/push Build is passing
2022-12-05 18:03:13 +00:00
19a055b59b Add myself to the AUTHORS.md 2022-12-05 18:03:13 +00:00
1b28a07e17 Minor stylistic improvements to MR output in list. 2022-12-05 18:03:13 +00:00
82866cd213 Partial implementation of machine readable output.
- Implement global flag for machine readable output.
- Add machine readable output (as JSON) to list command.
2022-12-05 18:03:13 +00:00
47f3d2638b chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-12-05 09:30:46 +01:00
a3b894320a Merge remote-tracking branch 'origin/renovate/main-golang.org-x-sys-0.x' 2022-12-05 09:29:33 +01:00
9424a58c52 chore(deps): update module golang.org/x/sys to v0.3.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-12-05 08:01:48 +00:00
1751ba534e chore(deps): update module github.com/go-git/go-git/v5 to v5.5.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-12-05 08:01:25 +00:00
3wc
a21d431541 fix: don't panic() 😅
All checks were successful
continuous-integration/drone/push Build is passing
2022-11-24 17:33:59 +00:00
3wc
8fad34e430 fix: switch back to replacing <recipe>.example.com
Fixes #355
2022-11-24 17:33:59 +00:00
a036de3c26 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-11-17 13:23:19 +01:00
4c2109e8ce chore(deps): update module golang.org/x/crypto to v0.3.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-11-17 08:01:01 +00:00
7f745ff19f feat(cmd)!: run abra.sh commands with /bin/bash if available.
All checks were successful
continuous-integration/drone/push Build is passing
BREAKING CHANGE: abra.sh commands that depend on /bin/sh will break

Closes coop-cloud/organising#357.

See coop-cloud/abra#229.
2022-11-15 23:01:57 +01:00
521d3d1259 feat(autocomplete): add autocompletion for fish shell
All checks were successful
continuous-integration/drone/push Build is passing
2022-11-15 22:24:34 +01:00
14187449a5 fix: fork passgen
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#358
2022-11-14 15:18:54 +01:00
2037f4cc19 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-11-11 17:40:42 +01:00
05d492d30b chore(deps): update module github.com/hetznercloud/hcloud-go to v1.37.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-11-11 08:01:11 +00:00
9591e91ed6 feat(cmd): make env variables accessible for local abra.sh commands
All checks were successful
continuous-integration/drone/push Build is passing
2022-11-10 11:12:35 +00:00
f6f587e506 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-11-10 11:47:16 +01:00
4f28dbee87 chore(deps): update module golang.org/x/crypto to v0.2.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-11-10 08:01:07 +00:00
ad1cc038e3 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.36.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-11-09 08:01:00 +00:00
15dbd85d25 chore(deps): update module golang.org/x/sys to v0.2.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-11-08 08:00:59 +00:00
2a97955586 chore(deps): update module github.com/schollz/progressbar/v3 to v3.12.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-11-07 08:00:59 +00:00
9e44d1dfba chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-11-04 14:52:56 +01:00
87ad8e2761 chore(deps): update module github.com/schollz/progressbar/v3 to v3.12.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-11-03 08:01:03 +00:00
cfe703b15d chore(deps): update module github.com/docker/cli to v20.10.21
Some checks failed
continuous-integration/drone/push Build is failing
2022-10-27 08:44:38 +00:00
96503fa9e9 chore(deps): update module github.com/docker/docker to v20.10.21
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-10-26 07:01:18 +00:00
07d49d8566 chore go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-10-22 14:19:10 +02:00
5a7c25375a Merge remote-tracking branch 'origin/renovate/main-gotest.tools-v3-3.x' 2022-10-22 14:18:35 +02:00
652143e76c Merge remote-tracking branch 'origin/renovate/main-golang.org-x-sys-0.x' 2022-10-22 14:18:25 +02:00
8afce6eebf Merge remote-tracking branch 'origin/renovate/main-golang.org-x-crypto-0.x' 2022-10-22 14:17:35 +02:00
d3e6c9dc94 Merge remote-tracking branch 'origin/renovate/main-github.com-docker-docker-20.x' 2022-10-22 14:17:26 +02:00
4fd0ca3dd1 chore(deps): update module golang.org/x/crypto to v0.1.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-10-20 07:00:57 +00:00
dc0b6c2c8c chore(deps): update module github.com/docker/docker to v20.10.20
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-10-19 07:00:57 +00:00
54f242baf7 chore(deps): update module github.com/docker/cli to v20.10.20
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-10-19 07:00:51 +00:00
07620c7d89 chore(deps): update module golang.org/x/sys to v0.1.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-10-18 07:01:15 +00:00
1cae4cce4e chore(deps): update module gotest.tools/v3 to v3.4.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-10-10 07:01:50 +00:00
9347ade82c chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-09-20 10:42:53 +02:00
3fa18a8050 Merge remote-tracking branch 'origin/renovate/main-github.com-schollz-progressbar-v3-3.x' 2022-09-20 10:42:25 +02:00
4ac67662a2 Merge remote-tracking branch 'origin/renovate/main-github.com-hetznercloud-hcloud-go-1.x' 2022-09-20 10:42:17 +02:00
d1be4077c5 Merge remote-tracking branch 'origin/renovate/main-github.com-gliderlabs-ssh-0.x' 2022-09-20 10:42:09 +02:00
5a88c34a7c Merge remote-tracking branch 'origin/renovate/main-github.com-docker-go-units-0.x' 2022-09-20 10:41:57 +02:00
2e452e3213 Merge remote-tracking branch 'origin/renovate/main-github.com-docker-docker-20.x' 2022-09-20 10:41:30 +02:00
9d16a8e10c Merge remote-tracking branch 'origin/renovate/main-github.com-docker-cli-20.x' 2022-09-20 10:41:22 +02:00
8755a6c3b4 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.35.3
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-09-20 07:01:14 +00:00
8cee8ae33a chore(deps): update module github.com/schollz/progressbar/v3 to v3.11.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-09-12 07:01:55 +00:00
15b138e026 chore(deps): update module github.com/docker/docker to v20.10.18
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-09-12 07:01:39 +00:00
4a8ed36dea chore(deps): update module github.com/docker/cli to v20.10.18
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-09-12 07:01:28 +00:00
7d0c3cc496 chore(deps): update module github.com/alecaivazis/survey/v2 to v2.3.6
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-09-12 07:01:18 +00:00
3cf479ffd5 chore(deps): update module github.com/docker/go-units to v0.5.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-09-01 07:01:24 +00:00
d402050a40 chore(deps): update module github.com/gliderlabs/ssh to v0.3.5
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-09-01 07:01:15 +00:00
664edce09d build: fix matching to ignore deps upgrade [ci skip] 2022-08-15 12:32:19 +02:00
e41caa891d fix: dont check ip on server when it is local
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#334.
2022-08-14 22:20:17 +02:00
42a6818ff4 fix: app cmd parsing, usage & tests
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
Note: the integration tests don't work due to ValidateApp still
attempting to validate the host key for the test app which doesn't
exist. This will be fixed in a future commit.
2022-08-14 16:18:58 +02:00
8f709c05bf build: ignore merges, chores & sort 2022-08-12 01:11:25 +02:00
a4ebf7befc docs: add frando & fix intro [ci skip] 2022-08-11 17:50:19 +02:00
8458e61d17 fix: branch checking logic
See https://github.com/go-git/go-git/issues/518 for why this is needed.
2022-08-11 17:49:22 +02:00
b42d5bf113 fix: ignore until coop-cloud/organising#336 is fixed [ci skip]
See coop-cloud/organising#336
2022-08-04 12:39:04 +03:00
f684c6d6e4 fix: drop back to urfave@v1.22.5 for parsing fix
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#336
2022-08-03 14:40:01 +03:00
6593baf9f4 chore(deps): update golang docker tag to v1.19
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2022-08-03 07:01:11 +00:00
50123f3810 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-08-02 11:25:13 +03:00
d132e87f14 Merge remote-tracking branch 'origin/renovate/main-github.com-schollz-progressbar-v3-3.x' 2022-08-02 11:24:03 +03:00
37a1c3fb85 chore(deps): update module github.com/schollz/progressbar/v3 to v3.9.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-08-01 07:01:38 +00:00
c8183aa6d1 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.35.2
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-08-01 07:01:21 +00:00
4711de29ae chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-07-21 15:03:17 +03:00
b719aaba41 Merge remote-tracking branch 'origin/renovate/main-github.com-sirupsen-logrus-1.x' 2022-07-21 15:02:25 +03:00
074c51b672 chore(deps): update module github.com/sirupsen/logrus to v1.9.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-07-20 07:01:14 +00:00
1aa6be704a chore(deps): update module github.com/schollz/progressbar/v3 to v3.8.7
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-07-20 07:01:06 +00:00
e8e3cb8598 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-07-14 11:53:22 +02:00
85fec6b107 Merge remote-tracking branch 'origin/renovate/main-gotest.tools-v3-3.x' 2022-07-14 11:51:41 +02:00
12dbb061a9 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.35.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-07-05 07:01:24 +00:00
351bd7d4ba chore(deps): update module gotest.tools/v3 to v3.3.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-06-20 07:01:21 +00:00
cdc7037c25 chore: go mod tidy [ci skip] 2022-06-15 13:56:43 +02:00
682237c98e chore(deps): update module github.com/alecaivazis/survey/v2 to v2.3.5
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-06-08 07:01:45 +00:00
08d97be43a chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-06-07 09:09:08 +02:00
786dfde27e Merge commit 'c153c5d' into main 2022-06-07 09:08:55 +02:00
6e012b910e chore(deps): update module github.com/docker/docker to v20.10.17
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-06-07 07:01:42 +00:00
c153c5da2e chore(deps): update module github.com/docker/cli to v20.10.17
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-06-07 07:01:28 +00:00
0540e42168 alpha -> beta
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2022-05-31 10:23:49 +02:00
4bc95a5b52 chore: go mod tidy [ci skip] 2022-05-16 16:22:21 +02:00
febc6e2874 Merge remote-tracking branch 'origin/renovate/main-github.com-docker-docker-20.x' into main 2022-05-16 16:22:12 +02:00
b2c990bf12 Merge remote-tracking branch 'origin/renovate/main-github.com-docker-cli-20.x' into main 2022-05-16 16:22:06 +02:00
3b8893502a docs: re-word on docstrings [ci skip] 2022-05-13 16:44:49 +02:00
e0a0378f73 chore(deps): update module github.com/docker/docker to v20.10.16
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-05-13 07:01:43 +00:00
0837045d44 chore(deps): update module github.com/docker/cli to v20.10.16
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-05-13 07:01:33 +00:00
cd8137a7d8 chore: go mod tidy [ci skip] 2022-05-10 16:15:08 +02:00
ece4537a2d Merge remote-tracking branch 'origin/renovate/main-github.com-gliderlabs-ssh-0.x' into main 2022-05-10 16:14:45 +02:00
16fe1b68c6 fix: thread app name & stack name correctly
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2022-05-10 12:10:36 +02:00
e37f235fd4 chore(deps): update module github.com/gliderlabs/ssh to v0.3.4
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-05-10 07:01:27 +00:00
0423ce7e84 fix: working link [ci skip] 2022-05-10 08:32:12 +02:00
d46ac22bd7 chore: go mod tidy [ci skip] 2022-05-09 14:09:14 +02:00
cef5cd8611 Merge remote-tracking branch 'origin/renovate/main-github.com-docker-docker-20.x' into main 2022-05-09 14:04:16 +02:00
8b38dac9ab chore(deps): update module github.com/docker/docker to v20.10.15
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-05-06 07:01:51 +00:00
89fc875088 chore(deps): update module github.com/docker/cli to v20.10.15
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-05-06 07:01:31 +00:00
026a9ba2d7 chore: go mod tidy [ci skip] 2022-05-05 15:13:20 +02:00
99f2b9c6dc chore(deps): update module github.com/urfave/cli to v1.22.9
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-05-05 07:01:30 +00:00
578e91eeec chore: publish next tag 0.5.0-alpha
Some checks reported errors
continuous-integration/drone/push Build was killed
2022-05-03 17:22:54 +02:00
49f79dbd45 fix!: new catalogue URL
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2022-05-03 17:08:52 +02:00
574d556bb9 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-04-30 18:28:42 +02:00
801aad64df Merge remote-tracking branch 'origin/renovate/main-gotest.tools-v3-3.x' into main 2022-04-30 18:28:22 +02:00
b0a0829712 Merge remote-tracking branch 'origin/renovate/main-github.com-urfave-cli-1.x' into main 2022-04-30 18:28:15 +02:00
6aae06c3ec chore(deps): update module github.com/urfave/cli to v1.22.8
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-04-29 07:01:30 +00:00
d0c6fa5b45 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.33.2
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-04-27 07:02:33 +00:00
c947354ee3 chore(deps): update module gotest.tools/v3 to v3.2.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-04-25 07:01:44 +00:00
9b7e5752fb chore: go mod tidy [ci skip] 2022-04-22 11:25:08 +02:00
9bc51629d4 chore(deps): update module github.com/urfave/cli to v1.22.7
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-04-22 07:01:22 +00:00
4ba15df9b7 chore: 0.4.1-alpha
Some checks reported errors
continuous-integration/drone/push Build was killed
2022-04-21 15:47:39 +02:00
5721b357a2 fix: per service logs
Some checks reported errors
continuous-integration/drone/pr Build was killed
continuous-integration/drone/push Build was killed
2022-04-21 15:40:23 +02:00
6140abbcac fix: sync to latest before commits come in
All checks were successful
continuous-integration/drone/push Build is passing
Follows from a4989e3834
2022-04-20 11:42:24 +00:00
996255188b Revert "fix: ensure we're on latest for recipe release dance"
This reverts commit 3c4bb6a55e.
2022-04-20 11:42:24 +00:00
11d78234b2 installer: add 32 bit arm support
All checks were successful
continuous-integration/drone/push Build is passing
2022-04-20 13:37:51 +02:00
c214937e4a installer: download on aarch64
All checks were successful
continuous-integration/drone/push Build is passing
2022-04-20 13:13:50 +02:00
3a3f41988b chore: publish 0.4.0-alpha
All checks were successful
continuous-integration/drone/push Build is passing
2022-04-19 14:36:56 +02:00
f6690a80bd build: upx release script [ci skip] 2022-04-19 14:34:06 +02:00
2337c4648b chore: remove unused command 2022-04-19 14:32:34 +02:00
a1190f1352 fix: show which service is getting backed up [ci skip] 2022-04-19 13:50:23 +02:00
e421922f5b fix: restore uses absolute paths & better docs
All checks were successful
continuous-integration/drone/push Build is passing
2022-04-19 13:21:12 +02:00
10d5705d1a docs: better backup docs 2022-04-19 13:20:48 +02:00
a4f1634b24 fix: backups get gzip, absolute paths, single archive file 2022-04-19 12:52:30 +02:00
cbd924060f fix: better local changes message
All checks were successful
continuous-integration/drone/push Build is passing
2022-04-19 10:29:05 +02:00
3c4bb6a55e fix: ensure we're on latest for recipe release dance
Closes coop-cloud/organising#313.
2022-04-19 10:28:49 +02:00
a0d7a76f9d fix: better error messages for release failures
See coop-cloud/organising#313
2022-04-19 10:20:35 +02:00
c71efb46ba feat: arm builds [ci skip]
See coop-cloud/organising#312
2022-04-19 10:06:14 +02:00
ce69967ec5 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-04-18 10:42:39 +02:00
1a04439b1f chore(deps): update module github.com/hashicorp/go-retryablehttp to v0.7.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-04-14 07:01:24 +00:00
979f417a63 chore: gpl this sucka [ci skip] 2022-04-05 12:18:34 +02:00
b27acb2f61 feat: backup/restore [ci skip]
All checks were successful
continuous-integration/drone/pr Build is passing
See coop-cloud/organising#30.
2022-04-03 18:24:09 +02:00
622ecc4885 docs: drop slash [ci skip] 2022-04-01 23:18:22 +02:00
ed5bbda811 docs: wording & emoji [ci skip] 2022-04-01 23:14:57 +02:00
7b627ea518 docs: nice gopher [ci skip] 2022-04-01 23:12:24 +02:00
1ac66da83f chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-04-01 10:21:16 +02:00
061de96b62 chore(deps): update module github.com/kevinburke/ssh_config to v1.2.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-04-01 07:01:23 +00:00
6998298d32 chore: publish next tag 0.4.0-alpha-rc8
Some checks reported errors
continuous-integration/drone/push Build was killed
2022-03-30 16:28:55 +02:00
323f4467c8 fix: filtering requires case-by-case handling
Some checks reported errors
continuous-integration/drone/pr Build was killed
continuous-integration/drone/push Build was killed
See https://github.com/moby/moby/issues/32985.
2022-03-30 16:25:38 +02:00
e8e41850b5 fix: pass args to local function invocations too
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2022-03-30 11:31:16 +02:00
0e23ec53d7 refactor!: simple validation only 2022-03-30 11:30:51 +02:00
b943a8b9b1 feat: allow choosing user on remote commands 2022-03-30 11:30:36 +02:00
acc665f054 chore: publish next tag 0.4.0-alpha-rc7
Some checks reported errors
continuous-integration/drone/push Build was killed
2022-03-27 21:33:30 +02:00
860f1d6376 feat: bring back scripts interface
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#301.
2022-03-27 19:30:48 +00:00
2122f0e67c fix: avoid short command alias conflicts 2022-03-27 19:30:48 +00:00
6aa23a76a1 fix: more precise filtering
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#305.
2022-03-27 19:30:36 +00:00
338360096c feat: pass domain to new app envs
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#304.
2022-03-27 21:06:48 +02:00
7a8c7cd50f ci: drop static check
All checks were successful
continuous-integration/drone/push Build is passing
2022-03-27 13:51:40 +02:00
bafc8a8e34 chore: go mod tidy
Some checks failed
continuous-integration/drone/push Build is failing
2022-03-26 15:23:27 +01:00
3d44d8c9fd Merge remote-tracking branch 'origin/renovate/main-github.com-docker-docker-20.x' into main 2022-03-26 15:22:31 +01:00
b8b4616498 Merge remote-tracking branch 'origin/renovate/main-github.com-docker-cli-20.x' into main 2022-03-26 15:22:18 +01:00
da97117929 chore(deps): update module github.com/docker/docker to v20.10.14
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-03-24 08:01:35 +00:00
978297c464 chore(deps): update module github.com/docker/cli to v20.10.14
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-03-24 08:01:27 +00:00
11da4808fc chore(deps): update module github.com/alecaivazis/survey/v2 to v2.3.4
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-03-24 08:01:21 +00:00
4023e6a066 fix: wait until app created to check for secrets
Some checks failed
continuous-integration/drone/push Build is failing
2022-03-18 11:10:15 +01:00
f432bfdd23 fix: warn when no repo on git
Some checks failed
continuous-integration/drone/push Build is failing
2022-03-18 10:13:24 +01:00
848e17578d chore(deps): update golang docker tag to v1.18
Some checks reported errors
continuous-integration/drone/push Build was killed
continuous-integration/drone/pr Build was killed
2022-03-16 08:01:41 +00:00
1615130929 fix: skip prompt for no passwords
All checks were successful
continuous-integration/drone/push Build is passing
2022-03-15 10:54:05 +01:00
7f315315f0 fix: better prompts & matching for secret removal
All checks were successful
continuous-integration/drone/push Build is passing
2022-03-13 10:59:19 +01:00
6a50981120 fix: match on generation of single secret 2022-03-13 10:50:35 +01:00
c67471e6ca fix: show which secret was generated 2022-03-13 10:45:08 +01:00
f0fc1027e5 feat: more info on volumes. skip driver info
All checks were successful
continuous-integration/drone/push Build is passing
2022-03-12 17:11:05 +01:00
c66695d55e fix: return err not logrus + new lines 2022-03-12 17:02:04 +01:00
262009701e fix: guard against concurrent write errors 2022-03-12 16:59:45 +01:00
b31cb6b866 feat: prompt for secret generation
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#302.
2022-03-12 16:47:19 +01:00
f39e186b66 fix: match Force/NoInput where needed
All checks were successful
continuous-integration/drone/push Build is passing
2022-03-12 16:15:20 +01:00
a8f35bdf2f fix: handle NoInput for volume removal 2022-03-12 16:09:05 +01:00
6e1e02ac28 chore: use same flag docs style 2022-03-12 16:08:44 +01:00
16fc5ee54b fix: can't force remove if it is already deployed 2022-03-12 16:08:26 +01:00
37a1fcc4af fix: delete all secrets if force/noinput 2022-03-12 16:01:42 +01:00
a9b522719f fix: use name not stack name for pass storage 2022-03-12 16:01:31 +01:00
ce70932a1c feat: single char short flag for volumes removal 2022-03-12 16:01:14 +01:00
d61e104536 fix: look at removal flag for pass logic 2022-03-12 15:48:43 +01:00
d5f30a3ae4 fix: use removal flag with correct help 2022-03-12 15:48:26 +01:00
2555096510 feat: short flags for run command 2022-03-12 15:42:29 +01:00
3797292b20 fix: no domain/converge check for deploy/upgrade/rollback 2022-03-12 15:36:43 +01:00
6333815b71 fix: remove unused flag 2022-03-12 15:32:23 +01:00
793a850fd5 refactor!: short flags for server add 2022-03-12 15:30:43 +01:00
42c1450384 refactor!: prefer short flags on release 2022-03-12 15:28:33 +01:00
a2377882f6 refacator!: use single char short flags 2022-03-12 15:27:19 +01:00
e78b395662 feat: new short flag for RC upgrading 2022-03-12 15:24:19 +01:00
cdec834ca9 reformat: remove extra line in CLI help 2022-03-12 10:20:37 +01:00
b4b0b464bd fix: only delete secrets from specific app
Some checks failed
continuous-integration/drone/push Build is failing
See coop-cloud/organising#300.
2022-03-12 09:39:30 +01:00
d8a1b0ccc1 doc: indicate storage location of secret in logs 2022-03-12 09:39:15 +01:00
3fbd381f55 fix: add pass remove flag & show name is optional 2022-03-12 09:17:24 +01:00
d3e127e5c8 fix: retain backwards compat with TYPE/RECIPE change
All checks were successful
continuous-integration/drone/push Build is passing
2022-03-11 19:37:50 +01:00
e9cfb076c6 fix: strip length modifiers
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#297.
2022-03-11 16:40:10 +01:00
8ccf856110 fix: lay out generated secrets with warning/clarification 2022-03-11 16:39:34 +01:00
d0945aa09d fix: handle NoInput for app removal 2022-03-11 16:39:20 +01:00
123619219e chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-03-11 09:17:37 +01:00
a27410952e Merge remote-tracking branch 'origin/renovate/main-github.com-docker-docker-20.x' into main 2022-03-11 09:17:15 +01:00
13e0392af6 chore(deps): update module github.com/docker/docker to v20.10.13
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-03-11 08:01:57 +00:00
99a6135f72 chore(deps): update module github.com/docker/cli to v20.10.13
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-03-11 08:01:45 +00:00
a6b52c1354 chore: go mod tidy [ci skip] 2022-03-09 12:28:26 +01:00
fa51459191 chore(deps): update module github.com/docker/distribution to v2.8.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-03-09 08:01:26 +00:00
c529988427 feat: output success for secret insert [ci skip] 2022-03-08 18:10:37 +01:00
231cc3c718 fix: use StackName to filter volumes
All checks were successful
continuous-integration/drone/push Build is passing
2022-03-08 18:04:47 +01:00
3381b8936d fix: better error handling & proper context deletion for server rm
All checks were successful
continuous-integration/drone/push Build is passing
2022-02-24 15:57:52 +01:00
823f869f1d fix: error out correctly from ValidateDomain 2022-02-24 15:57:40 +01:00
ecbeacf10f fix: prompt for container choice correctly on run [ci skip] 2022-02-22 11:47:36 +01:00
3f838038d5 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-02-22 10:52:14 +01:00
91b4e021d0 chore(deps): update module github.com/containers/image to v5
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-02-22 08:01:12 +00:00
598e87dca2 chore: skip new repositories
All checks were successful
continuous-integration/drone/push Build is passing
2022-02-21 08:46:30 +00:00
001511876d chore: go mod tidy 2022-02-21 08:46:30 +00:00
b295958c17 fix: handle all container registries
See coop-cloud/organising#258

This fixes also how we read the digest of the image. I think it was
wrong before. Some registries restrict reading this info and we now just
default to "unknown" for that case.

This also appears to bring a wave of new dependencies due to the generic
handling logic of containers/... package. The abra binary is now 1mb
larger.

The catalogue generation is now slower unfortunately. But it is more
robust.

The generic logic looks in ~/.docker/config.json for log in details, so
you don't have to pass those in manually on the CLI anymore. We just
read those defaults. You can "docker login" to get credentials setup in
that file. Since most folks won't generate the catalogue, this seems
fine for now.
2022-02-21 08:46:30 +00:00
2fbdcfb958 refactor: try the meta for default branch too
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
Sometimes the Branch(...) call gets confused with state in the
repository. Its more robust to use the default value we get from gitea.

See coop-cloud/organising#299.
2022-02-20 18:07:49 +01:00
09ac74d205 fix: check out default branch from tags
All checks were successful
continuous-integration/drone/push Build is passing
Also fix error handling to match function signatures.
2022-02-18 11:17:43 +01:00
5da4afa0ec fix: only ensure latest after cloning
All checks were successful
continuous-integration/drone/push Build is passing
2022-02-18 09:55:07 +01:00
9d5e805748 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-02-16 13:53:09 +01:00
770ae5ed9b chore(deps): update module github.com/moby/sys/signal to v0.7.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-02-16 08:01:33 +00:00
e056d8dc44 fix: de-dupe dns resolver logging, more concise [ci skip] 2022-02-14 18:06:06 +01:00
c3442354e7 fix: skip dupe ipv4 check, done in EnsureDomainsResolveSameIPv4
All checks were successful
continuous-integration/drone/push Build is passing
2022-02-14 17:44:15 +01:00
6b2a0011af fix: remove dupe logging on catalogue reading [ci skip] 2022-02-14 17:37:25 +01:00
46fca7cfa7 docs: less ambig wording [ci skip] 2022-02-14 17:35:42 +01:00
82d560a946 fix: prompt for input on app cp
All checks were successful
continuous-integration/drone/push Build is passing
2022-02-14 17:10:53 +01:00
fc5107865b fix: typo
All checks were successful
continuous-integration/drone/push Build is passing
2022-02-10 10:59:19 +01:00
53ed1fc545 chore: go mod tidy
Some checks failed
continuous-integration/drone/push Build is failing
2022-02-09 09:59:23 +01:00
cc9e3d4e60 chore(deps): update module github.com/docker/distribution to v2.8.0 2022-02-09 09:59:23 +01:00
0557284461 fix: use new repo name
All checks were successful
continuous-integration/drone/push Build is passing
2022-02-09 08:58:51 +00:00
b5f23d3791 feat: show latest published version on sync
All checks were successful
continuous-integration/drone/push Build is passing
2022-02-09 08:58:20 +00:00
2b2dcc01b4 fix: dont checkout latest if we dont have a copy
All checks were successful
continuous-integration/drone/push Build is passing
2022-02-09 09:54:02 +01:00
0a208d049e chore: go mod tidy + patch upgrades
All checks were successful
continuous-integration/drone/push Build is passing
2022-02-04 10:50:55 +01:00
141711ecd0 Merge remote-tracking branch 'origin/renovate/main-github.com-schollz-progressbar-v3-3.x' into main 2022-02-04 10:50:36 +01:00
cd46d71ce4 chore(deps): update module github.com/schollz/progressbar/v3 to v3.8.6
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-02-04 08:01:17 +00:00
6fa090352d chore(deps): update module github.com/buger/goterm to v1.0.4
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is running
continuous-integration/drone/push Build is failing
2022-02-04 08:01:11 +00:00
227c02cd09 refactor!: make common flags single char again
All checks were successful
continuous-integration/drone/push Build is passing
2022-02-03 14:19:51 +01:00
bfeda40e34 fix: catch more ssh failure modes with help
All checks were successful
continuous-integration/drone/push Build is passing
2022-02-03 13:43:11 +01:00
5237c7ed50 docs: focus more on straight ssh docs for server add 2022-02-03 13:42:49 +01:00
4e09f3b9a8 refactor: migrate authors to dedicated file [ci skip] 2022-02-02 21:00:00 +01:00
dfb32cbb68 fix: type -> recipe [ci skip] 2022-02-02 20:48:12 +01:00
bdd9b0a1aa fix: ensure recipes on latest for lint/generate
All checks were successful
continuous-integration/drone/push Build is passing
Follows b2d17a1829.
2022-01-29 14:06:25 +01:00
b2d17a1829 fix: ensure latest checked out for recipe upgrade
All checks were successful
continuous-integration/drone/push Build is passing
2022-01-29 13:35:42 +01:00
c905376472 refactor!: use "config" instead of "compose" [ci skip] 2022-01-27 12:24:33 +01:00
d316de218c feat: include recipe in deploy & friends overview 2022-01-27 12:23:02 +01:00
123475bd36 chore: remove old files [ci skip] 2022-01-27 12:14:01 +01:00
58e98f490d refactor!: type -> recipes
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is passing
2022-01-27 12:06:32 +01:00
224b8865bf test: newlines for output when Y'ing & N'ing
Some checks failed
continuous-integration/drone/pr Build is running
continuous-integration/drone/push Build is failing
2022-01-27 12:05:22 +01:00
8fb9f42f13 test: add remaining scripts 2022-01-27 12:05:21 +01:00
dc5e2a5b24 test: fix pwd usage, PWD doesn't exist 2022-01-27 12:05:21 +01:00
40b4ef5ab2 test: disable debug, its too much noise 2022-01-27 12:05:21 +01:00
4a912ae3bc test: show how to run all tests 2022-01-27 12:05:21 +01:00
1150fcc595 test: remove manual test guide, using semi-automated now 2022-01-27 12:05:20 +01:00
45224d1349 test: use new flags + order for record/server 2022-01-27 12:05:20 +01:00
7a40e2d616 fix: remove duplicate flags on "server new" 2022-01-27 12:05:20 +01:00
2277e4ef72 refactor!: remove no-input flag where not needed 2022-01-27 12:05:19 +01:00
c0c3d9fe76 refactor!: make dry-run flag more convenient 2022-01-27 12:05:19 +01:00
2493921ade refactor!: de-duplicate record flags 2022-01-27 12:05:19 +01:00
22f9cf2be4 refactor: remove unused flag 2022-01-27 12:05:18 +01:00
a23124aede feat: auto strip domain names to avoid runtime limits
All checks were successful
continuous-integration/drone/push Build is passing
2022-01-27 10:33:21 +00:00
e670844b56 refactor!: app name -> domain 2022-01-27 10:33:21 +00:00
bc1729c5ca trim docs, point to new docs [ci skip] 2022-01-27 10:30:28 +01:00
fa8611b115 fix: respect NoInput on "app cp" & use app to get StackName
All checks were successful
continuous-integration/drone/push Build is passing
2022-01-25 11:39:38 +01:00
415df981ff test: long flags, drop docker, use run_tests for all tests
All checks were successful
continuous-integration/drone/push Build is passing
2022-01-24 16:49:51 +01:00
57728e58e8 test: improve semi-manual testing
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2022-01-21 16:48:42 +01:00
c7062e0494 fix: initial subcmd completion
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
Broken by migration to v1 API.
2022-01-20 11:42:04 +01:00
cff7534bf9 chore: publish 0.4.0-alpha-rc6
All checks were successful
continuous-integration/drone/push Build is passing
2022-01-19 13:33:32 +01:00
13e582349c fix: correctly override with ~/.ssh/config if failing to connect
All checks were successful
continuous-integration/drone/push Build is passing
2022-01-19 13:28:57 +01:00
b1b9612e01 fix: dont try to parse empty values on status lookup
All checks were successful
continuous-integration/drone/push Build is passing
2022-01-19 12:38:41 +01:00
afeee1270e test: break up integration, rejig manual 2022-01-19 12:17:09 +01:00
cb210d0c81 docs: pass on flag/help strings
All checks were successful
continuous-integration/drone/push Build is passing
2022-01-19 11:21:06 +01:00
9f2bb3f74f refactor!: remove auto dns, too magic, too broken 2022-01-19 11:20:51 +01:00
a33767f848 refactor!: drop auto traefik deploy, rarely works
All checks were successful
continuous-integration/drone/push Build is passing
2022-01-19 11:08:43 +01:00
a1abe5c6be refactor!: drop backup/restore for now
This will be done with the bot from now on.
2022-01-19 11:06:54 +01:00
672b44f965 test: remove since we're not supporting that in abra now 2022-01-19 11:04:28 +01:00
6d9573ec7e test: more help for how to do this 2022-01-19 11:04:15 +01:00
53cd3b8b71 fix: drop duplicate flags 2022-01-19 10:58:09 +01:00
b9ec41647b fix: when upgrading, skip over bad tags, don't error out
All checks were successful
continuous-integration/drone/push Build is passing
2022-01-19 10:40:55 +01:00
f4b563528f docs: point to new option for better assurance on tag listing 2022-01-19 10:40:37 +01:00
f9a2c1d58f refactor: put StripTagMeta into formatter package
Avoid circular import.
2022-01-19 10:40:14 +01:00
7a66a90ecb fix!: change dry-run alias to not conflict with debug 2022-01-18 17:13:28 +01:00
0e688f1407 refactor!: migrate to urfave/cli v1
All checks were successful
continuous-integration/drone/push Build is passing
Better flexible flags handling.
2022-01-18 14:38:20 +01:00
c6db9ee355 chore: publish 0.4.0-alpha-rc5
All checks were successful
continuous-integration/drone/push Build is passing
2022-01-18 11:39:02 +01:00
7733637767 fix: ensure catalogue cloned for catalogue reliant commands
All checks were successful
continuous-integration/drone/push Build is passing
2022-01-18 11:19:33 +01:00
88f9796aaf fix: let us know if not pushing changes without dry-run (recipe release)
All checks were successful
continuous-integration/drone/push Build is passing
2022-01-18 10:55:07 +01:00
6cdba0f9de fix: commit changes if dry-run not present (recipe release) 2022-01-18 10:54:54 +01:00
199aa5f4e3 fix: read password length from env files
All checks were successful
continuous-integration/drone/push Build is passing
2022-01-17 22:34:32 +01:00
9b26c24a5f docs: drop that, not happening 2022-01-17 22:27:25 +01:00
ca75654769 fix: read correct app file name for secret generation
Stack name is only an internal docker concept now.
2022-01-17 22:17:59 +01:00
fc2d83d203 fix: better error message for missing server 2022-01-17 22:04:11 +01:00
2f4f288a46 feat: -a/--all-tags for listing all tags on recipe upgrade 2022-01-17 21:59:31 +01:00
e98f00d354 chore: go mod tidy 2022-01-17 21:50:25 +01:00
b4c2773b87 chore(deps): update module gotest.tools/v3 to v3.1.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-01-17 08:01:18 +00:00
3aec5d1d7e fix: ignore new test repo
All checks were successful
continuous-integration/drone/push Build is passing
2022-01-12 16:11:18 +01:00
e0fa1b6995 fix: let users know what was deleted
All checks were successful
continuous-integration/drone/push Build is passing
2022-01-06 11:47:10 +01:00
b69ab0df65 fix: chaos mode fixed for upgrade/rollback
All checks were successful
continuous-integration/drone/push Build is passing
Follows 4b7ec6384c.
2022-01-06 10:32:24 +01:00
178 changed files with 12856 additions and 5765 deletions

8
.dockerignore Normal file
View File

@ -0,0 +1,8 @@
*.swo
*.swp
.dockerignore
Dockerfile
abra
dist
kadabra
tags

View File

@ -3,45 +3,20 @@ kind: pipeline
name: coopcloud.tech/abra
steps:
- name: make check
image: golang:1.17
image: golang:1.21
commands:
- make check
- name: make static
image: golang:1.17
ignore: true # until we decide we all want this check
environment:
STATIC_CHECK_URL: honnef.co/go/tools/cmd/staticcheck
STATIC_CHECK_VERSION: v0.2.0
commands:
- go install $STATIC_CHECK_URL@$STATIC_CHECK_VERSION
- make static
- name: make build
image: golang:1.17
commands:
- make build
- name: make test
image: golang:1.17
image: golang:1.21
environment:
ABRA_DIR: "/root/.abra"
commands:
- make build-abra
- ./abra help # show version, initialise $ABRA_DIR
- make test
- name: notify on failure
image: plugins/matrix
settings:
homeserver: https://matrix.autonomic.zone
roomid: "IFazIpLtxiScqbHqoa:autonomic.zone"
userid: "@autono-bot:autonomic.zone"
accesstoken:
from_secret: autono_bot_access_token
depends_on:
- make check
- make build
- make test
when:
status:
- failure
- name: fetch
image: docker:git
@ -49,13 +24,12 @@ steps:
- git fetch --tags
depends_on:
- make check
- make build
- make test
when:
event: tag
- name: release
image: golang:1.17
image: goreleaser/goreleaser:v1.18.2
environment:
GITEA_TOKEN:
from_secret: goreleaser_gitea_token
@ -63,12 +37,29 @@ steps:
- name: deps
path: /go
commands:
- curl -sL https://git.io/goreleaser | bash
- goreleaser release
depends_on:
- fetch
when:
event: tag
- name: publish image
image: plugins/docker
settings:
auto_tag: true
username: 3wordchant
password:
from_secret: git_coopcloud_tech_token_3wc
repo: git.coopcloud.tech/coop-cloud/abra
tags: dev
registry: git.coopcloud.tech
when:
event:
exclude:
- pull_request
depends_on:
- make check
volumes:
- name: deps
temp: {}

View File

@ -1,4 +0,0 @@
GANDI_TOKEN=...
HCLOUD_TOKEN=...
REGISTRY_PASSWORD=...
REGISTRY_USERNAME=...

View File

@ -1,6 +1,7 @@
go env -w GOPRIVATE=coopcloud.tech
# export PASSWORD_STORE_DIR=$(pwd)/../../autonomic/passwords/passwords/
# export HCLOUD_TOKEN=$(pass show logins/hetzner/cicd/api_key)
# export CAPSUL_TOKEN=...
# export GITEA_TOKEN=...
# export ABRA_DIR="$HOME/.abra_test"
# export ABRA_TEST_DOMAIN=test.example.com
# export ABRA_SKIP_TEARDOWN=1 # for faster feedback when developing tests

2
.gitignore vendored
View File

@ -2,6 +2,8 @@
.e2e.env
.envrc
.vscode/
/kadabra
abra
dist/
tests/integration/.bats
vendor/

View File

@ -1,38 +1,76 @@
---
project_name: abra
gitea_urls:
api: https://git.coopcloud.tech/api/v1
download: https://git.coopcloud.tech/
skip_tls_verify: false
before:
hooks:
- go mod tidy
- go generate ./...
builds:
- env:
- CGO_ENABLED=0
- id: abra
binary: abra
dir: cmd/abra
env:
- CGO_ENABLED=0
goos:
- linux
- darwin
goarch:
- 386
- amd64
- arm
- arm64
goarm:
- 5
- 6
- 7
ldflags:
- "-X 'main.Commit={{ .Commit }}'"
- "-X 'main.Version={{ .Version }}'"
- id: kadabra
binary: kadabra
dir: cmd/kadabra
env:
- CGO_ENABLED=0
goos:
- linux
- darwin
goarch:
- 386
- amd64
- arm
- arm64
goarm:
- 5
- 6
- 7
ldflags:
- "-X 'main.Commit={{ .Commit }}'"
- "-X 'main.Version={{ .Version }}'"
archives:
- replacements:
386: i386
amd64: x86_64
format: binary
checksum:
name_template: "checksums.txt"
snapshot:
name_template: "{{ incpatch .Version }}-next"
changelog:
sort: desc
filters:
exclude:
- "^Merge"
- "^Revert"
- "^WIP:"
- "^chore(deps):"
- "^style:"
- "^test:"
- "^tests:"
- "^Revert"

18
AUTHORS.md Normal file
View File

@ -0,0 +1,18 @@
# authors
> If you're looking at this and you hack on `abra` and you're not listed here,
> please do add yourself! This is a community project, let's show some 💞
- 3wordchant
- cassowary
- codegod100
- decentral1se
- frando
- kawaiipunk
- knoflook
- moritz
- p4u1
- rix
- roxxers
- vera
- yksflip

24
Dockerfile Normal file
View File

@ -0,0 +1,24 @@
FROM golang:1.21-alpine AS build
ENV GOPRIVATE coopcloud.tech
RUN apk add --no-cache \
ca-certificates \
gcc \
git \
make \
musl-dev
RUN update-ca-certificates
COPY . /app
WORKDIR /app
RUN CGO_ENABLED=0 make build
FROM scratch
COPY --from=build /app/abra /abra
ENTRYPOINT ["/abra"]

15
LICENSE Normal file
View File

@ -0,0 +1,15 @@
Abra: The Co-op Cloud utility belt
Copyright (C) 2022 Co-op Cloud <helo@coopcloud.tech>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.

View File

@ -1,57 +1,55 @@
ABRA := ./cmd/abra
COMMIT := $(shell git rev-list -1 HEAD)
GOPATH := $(shell go env GOPATH)
LDFLAGS := "-X 'main.Commit=$(COMMIT)'"
ABRA := ./cmd/abra
KADABRA := ./cmd/kadabra
COMMIT := $(shell git rev-list -1 HEAD)
GOPATH := $(shell go env GOPATH)
GOVERSION := 1.21
LDFLAGS := "-X 'main.Commit=$(COMMIT)'"
DIST_LDFLAGS := $(LDFLAGS)" -s -w"
export GOPRIVATE=coopcloud.tech
all: format check static build test
# NOTE(d1): default `make` optimised for Abra hacking
all: format check build-abra test
run:
run-abra:
@go run -ldflags=$(LDFLAGS) $(ABRA)
install:
run-kadabra:
@go run -ldflags=$(LDFLAGS) $(KADABRA)
install-abra:
@go install -ldflags=$(LDFLAGS) $(ABRA)
build-dev:
@go build -ldflags=$(LDFLAGS) $(ABRA)
install-kadabra:
@go install -ldflags=$(LDFLAGS) $(KADABRA)
build:
@go build -ldflags=$(DIST_LDFLAGS) $(ABRA)
build-abra:
@go build -v -ldflags=$(DIST_LDFLAGS) $(ABRA)
build-kadabra:
@go build -v -ldflags=$(DIST_LDFLAGS) $(KADABRA)
build: build-abra build-kadabra
build-docker-abra:
@docker run -it -v $(PWD):/abra golang:$(GOVERSION) \
bash -c 'cd /abra; ./scripts/docker/build.sh'
build-docker: build-docker-abra
clean:
@rm '$(GOPATH)/bin/abra'
@rm '$(GOPATH)/bin/kadabra'
format:
@gofmt -s -w .
check:
@test -z $$(gofmt -l .) || (echo "gofmt: formatting issue - run 'make format' to resolve" && exit 1)
static:
@staticcheck $(ABRA)
@test -z $$(gofmt -l .) || \
(echo "gofmt: formatting issue - run 'make format' to resolve" && exit 1)
test:
@go test ./... -cover -v
loc:
@find . -name "*.go" | xargs wc -l
loc-author:
@git ls-files -z | \
xargs -0rn 1 -P "$$(nproc)" -I{} sh -c 'git blame -w -M -C -C --line-porcelain -- {} | grep -I --line-buffered "^author "' | \
sort -f | \
uniq -ic | \
sort -n
int-core:
@docker run \
-v $$(pwd):/src \
--env-file .e2e.env \
debian:bullseye-slim \
sh -c "\
apt update && apt install -y wget curl git; echo ""; echo ""; \
git config --global user.email 'e2e@coopcloud.tech'; \
git config --global user.name 'e2e'; \
cd /src/tests/integration && bash core.sh -- --dev \
"

View File

@ -1,75 +1,13 @@
# abra
> https://coopcloud.tech
# `abra`
[![Build Status](https://build.coopcloud.tech/api/badges/coop-cloud/abra/status.svg?ref=refs/heads/main)](https://build.coopcloud.tech/coop-cloud/abra)
[![Go Report Card](https://goreportcard.com/badge/git.coopcloud.tech/coop-cloud/abra)](https://goreportcard.com/report/git.coopcloud.tech/coop-cloud/abra)
[![Go Reference](https://pkg.go.dev/badge/coopcloud.tech/abra.svg)](https://pkg.go.dev/coopcloud.tech/abra)
The Co-op Cloud utility belt 🎩🐇
`abra` is a command-line tool for managing your own [Co-op Cloud](https://coopcloud.tech). It can provision new servers, create apps, deploy them, run backup and restore operations and a whole lot of other things. Please see [docs.coopcloud.tech](https://docs.coopcloud.tech) for more extensive documentation.
<a href="https://github.com/egonelbre/gophers"><img align="right" width="150" src="https://github.com/egonelbre/gophers/raw/master/.thumb/sketch/adventure/poking-fire.png"/></a>
## Quick install
`abra` is the flagship client & command-line tool for Co-op Cloud. It has been developed specifically for the purpose of making the day-to-day operations of [operators](https://docs.coopcloud.tech/operators/) and [maintainers](https://docs.coopcloud.tech/maintainers/) pleasant & convenient. It is libre software, written in [Go](https://go.dev) and maintained and extended by the community 💖
```bash
curl https://install.abra.autonomic.zone | bash
```
Or using the latest release candidate (extra experimental!):
```bash
curl https://install.abra.autonomic.zone | bash -s -- --rc
```
Source for this script is in [scripts/installer/installer](./scripts/installer/installer).
## Hacking
### Getting started
Install [direnv](https://direnv.net), run `cp .envrc.sample .envrc`, then run `direnv allow` in this directory. This will set coopcloud repos as private due to [this bug.](https://git.coopcloud.tech/coop-cloud/coopcloud.tech/issues/20#issuecomment-8201). Or you can run `go env -w GOPRIVATE=coopcloud.tech` but I'm not sure how persistent this is.
Install [Go >= 1.16](https://golang.org/doc/install) and then:
- `make build` to build
- `./abra` to run commands
- `make test` will run tests
- `make install` will install it to `$GOPATH/bin`
- `go get <package>` and `go mod tidy` to add a new dependency
Our [Drone CI configuration](.drone.yml) runs a number of sanity on each pushed commit. See the [Makefile](./Makefile) for more handy targets.
Please use the [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/) for your commits so we can automate our change log.
### Versioning
We use [goreleaser](https://goreleaser.com) to help us automate releases. We use [semver](https://semver.org) for versioning all releases of the tool. While we are still in the public alpha release phase, we will maintain a `0.y.z-alpha` format. Change logs are generated from our commit logs. We are still working this out and aim to refine our release praxis as we go.
For developers, while using this `-alpha` format, the `y` part is the "major" version part. So, if you make breaking changes, you increment that and _not_ the `x` part. So, if you're on `0.1.0-alpha`, then you'd go to `0.1.1-alpha` for a backwards compatible change and `0.2.0-alpha` for a backwards incompatible change.
### Making a new release
- Change `ABRA_VERSION` to match the new tag in [`scripts`](./scripts/installer/installer) (use [semver](https://semver.org))
- Commit that change (e.g. `git commit -m 'chore: publish next tag x.y.z-alpha'`)
- Make a new tag (e.g. `git tag -a x.y.z-alpha`)
- Push the new tag (e.g. `git push && git push --tags`)
- Wait until the build finishes on [build.coopcloud.tech](https://build.coopcloud.tech/coop-cloud/abra)
- Deploy the new installer script (e.g. `cd ./scripts/installer && make`)
- Check the release worked, (e.g. `abra upgrade; abra -v`)
### Fork maintenance
#### `godotenv`
We maintain a fork of [godotenv](https://github.com/Autonomic-Cooperative/godotenv) for two features:
1. multi-line env var support
2. inline comment parsing
You can upgrade the version here by running `go get github.com/Autonomic-Cooperative/godotenv@<commit>` where `<commit>` is the
latest commit you want to pin to. We are aiming to migrate to YAML format for the environment configuration, so this should only
be a temporary thing.
#### `docker/client`
A number of modules in [pkg/upstream](./pkg/upstream) are copy/pasta'd from the upstream [docker/docker/client](https://pkg.go.dev/github.com/docker/docker/client). We had to do this because upstream are not exposing their API as public.
Please see [docs.coopcloud.tech/abra](https://docs.coopcloud.tech/abra) for help on install, upgrade, hacking, troubleshooting & more!

View File

@ -4,37 +4,34 @@ import (
"github.com/urfave/cli/v2"
)
// AppCommand defines the `abra app` command and ets subcommands
var AppCommand = &cli.Command{
Name: "app",
Usage: "Manage apps",
Aliases: []string{"a"},
ArgsUsage: "<app>",
Description: `
This command provides all the functionality you need to manage the life cycle
of your apps. From initial deployment, day-2 operations (e.g. backup/restore)
to scaling apps up and spinning them down.
`,
var AppCommand = cli.Command{
Name: "app",
Aliases: []string{"a"},
Usage: "Manage apps",
ArgsUsage: "<domain>",
Description: "Functionality for managing the life cycle of your apps",
Subcommands: []*cli.Command{
appNewCommand,
appConfigCommand,
appRestartCommand,
appDeployCommand,
appUpgradeCommand,
appUndeployCommand,
appBackupCommand,
appRestoreCommand,
appRemoveCommand,
appCheckCommand,
appListCommand,
appPsCommand,
appLogsCommand,
appCpCommand,
appRunCommand,
appRollbackCommand,
appSecretCommand,
appVolumeCommand,
appVersionCommand,
appErrorsCommand,
&appBackupCommand,
&appCheckCommand,
&appCmdCommand,
&appConfigCommand,
&appCpCommand,
&appDeployCommand,
&appErrorsCommand,
&appListCommand,
&appLogsCommand,
&appNewCommand,
&appPsCommand,
&appRemoveCommand,
&appRestartCommand,
&appRestoreCommand,
&appRollbackCommand,
&appRunCommand,
&appSecretCommand,
&appServicesCommand,
&appUndeployCommand,
&appUpgradeCommand,
&appVersionCommand,
&appVolumeCommand,
},
}

View File

@ -1,77 +1,296 @@
package app
import (
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var backupAllServices bool
var backupAllServicesFlag = &cli.BoolFlag{
Name: "all",
Value: false,
Destination: &backupAllServices,
Aliases: []string{"a"},
Usage: "Backup all services",
var snapshot string
var snapshotFlag = &cli.StringFlag{
Name: "snapshot, s",
Usage: "Lists specific snapshot",
Destination: &snapshot,
}
var appBackupCommand = &cli.Command{
Name: "backup",
Usage: "Backup an app",
Aliases: []string{"b"},
Flags: []cli.Flag{backupAllServicesFlag},
ArgsUsage: "<service>",
var includePath string
var includePathFlag = &cli.StringFlag{
Name: "path, p",
Usage: "Include path",
Destination: &includePath,
}
var resticRepo string
var resticRepoFlag = &cli.StringFlag{
Name: "repo, r",
Usage: "Restic repository",
Destination: &resticRepo,
}
var appBackupListCommand = cli.Command{
Name: "list",
Aliases: []string{"ls"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
snapshotFlag,
includePathFlag,
},
Before: internal.SubCommandBefore,
Usage: "List all backups",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if c.Args().Get(1) != "" && backupAllServices {
internal.ShowSubcommandHelpAndError(c, errors.New("cannot use '<service>' and '--all' together"))
}
abraSh := path.Join(config.RECIPES_DIR, app.Type, "abra.sh")
if _, err := os.Stat(abraSh); err != nil {
if os.IsNotExist(err) {
logrus.Fatalf("%s does not exist?", abraSh)
}
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
sourceCmd := fmt.Sprintf("source %s", abraSh)
execCmd := "abra_backup"
if !backupAllServices {
serviceName := c.Args().Get(1)
if serviceName == "" {
internal.ShowSubcommandHelpAndError(c, errors.New("no service(s) target provided"))
if !internal.Chaos {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
execCmd = fmt.Sprintf("abra_backup_%s", serviceName)
}
bytes, err := ioutil.ReadFile(abraSh)
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
if !strings.Contains(string(bytes), execCmd) {
logrus.Fatalf("%s doesn't have a %s function", app.Type, execCmd)
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
logrus.Fatal(err)
}
sourceAndExec := fmt.Sprintf("%s; %s", sourceCmd, execCmd)
cmd := exec.Command("bash", "-c", sourceAndExec)
if err := internal.RunCmd(cmd); err != nil {
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if snapshot != "" {
logrus.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if includePath != "" {
logrus.Debugf("including INCLUDE_PATH=%s in backupbot exec invocation", includePath)
execEnv = append(execEnv, fmt.Sprintf("INCLUDE_PATH=%s", includePath))
}
if err := internal.RunBackupCmdRemote(cl, "ls", targetContainer.ID, execEnv); err != nil {
logrus.Fatal(err)
}
return nil
},
BashComplete: autocomplete.AppNameComplete,
}
var appBackupDownloadCommand = cli.Command{
Name: "download",
Aliases: []string{"d"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
snapshotFlag,
includePathFlag,
},
Before: internal.SubCommandBefore,
Usage: "Download a backup",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Chaos {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
logrus.Fatal(err)
}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if snapshot != "" {
logrus.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if includePath != "" {
logrus.Debugf("including INCLUDE_PATH=%s in backupbot exec invocation", includePath)
execEnv = append(execEnv, fmt.Sprintf("INCLUDE_PATH=%s", includePath))
}
if err := internal.RunBackupCmdRemote(cl, "download", targetContainer.ID, execEnv); err != nil {
logrus.Fatal(err)
}
remoteBackupDir := "/tmp/backup.tar.gz"
currentWorkingDir := "."
if err = CopyFromContainer(cl, targetContainer.ID, remoteBackupDir, currentWorkingDir); err != nil {
logrus.Fatal(err)
}
fmt.Println("backup successfully downloaded to current working directory")
return nil
},
}
var appBackupCreateCommand = cli.Command{
Name: "create",
Aliases: []string{"c"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
resticRepoFlag,
},
Before: internal.SubCommandBefore,
Usage: "Create a new backup",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Chaos {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
logrus.Fatal(err)
}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if resticRepo != "" {
logrus.Debugf("including RESTIC_REPO=%s in backupbot exec invocation", resticRepo)
execEnv = append(execEnv, fmt.Sprintf("RESTIC_REPO=%s", resticRepo))
}
if err := internal.RunBackupCmdRemote(cl, "create", targetContainer.ID, execEnv); err != nil {
logrus.Fatal(err)
}
return nil
},
}
var appBackupSnapshotsCommand = cli.Command{
Name: "snapshots",
Aliases: []string{"s"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
snapshotFlag,
},
Before: internal.SubCommandBefore,
Usage: "List backup snapshots",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Chaos {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
logrus.Fatal(err)
}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if snapshot != "" {
logrus.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if err := internal.RunBackupCmdRemote(cl, "snapshots", targetContainer.ID, execEnv); err != nil {
logrus.Fatal(err)
}
return nil
},
}
var appBackupCommand = cli.Command{
Name: "backup",
Aliases: []string{"b"},
Usage: "Manage app backups",
ArgsUsage: "<domain>",
Subcommands: []*cli.Command{
&appBackupListCommand,
&appBackupSnapshotsCommand,
&appBackupDownloadCommand,
&appBackupCreateCommand,
},
}

View File

@ -1,53 +1,81 @@
package app
import (
"os"
"path"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/recipe"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var appCheckCommand = &cli.Command{
Name: "check",
Usage: "Check if app is configured correctly",
Aliases: []string{"c"},
ArgsUsage: "<service>",
var appCheckCommand = cli.Command{
Name: "check",
Aliases: []string{"chk"},
Usage: "Ensure an app is well configured",
Description: `
This command compares env vars in both the app ".env" and recipe ".env.sample"
file.
The goal is to ensure that recipe ".env.sample" env vars are defined in your
app ".env" file. Only env var definitions in the ".env.sample" which are
uncommented, e.g. "FOO=bar" are checked. If an app ".env" file does not include
these env vars, then "check" will complain.
Recipe maintainers may or may not provide defaults for env vars within their
recipes regardless of commenting or not (e.g. through the use of
${FOO:<default>} syntax). "check" does not confirm or deny this for you.`,
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.ChaosFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
envSamplePath := path.Join(config.RECIPES_DIR, app.Type, ".env.sample")
if _, err := os.Stat(envSamplePath); err != nil {
if os.IsNotExist(err) {
logrus.Fatalf("%s does not exist?", envSamplePath)
}
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
envSample, err := config.ReadEnv(envSamplePath)
if !internal.Chaos {
if err := recipePkg.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipePkg.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipePkg.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
tableCol := []string{"recipe env sample", "app env"}
table := formatter.CreateTable(tableCol)
envVars, err := config.CheckEnv(app)
if err != nil {
logrus.Fatal(err)
}
var missing []string
for k := range envSample {
if _, ok := app.Env[k]; !ok {
missing = append(missing, k)
for _, envVar := range envVars {
if envVar.Present {
table.Append([]string{envVar.Name, "✅"})
} else {
table.Append([]string{envVar.Name, "❌"})
}
}
if len(missing) > 0 {
missingEnvVars := strings.Join(missing, ", ")
logrus.Fatalf("%s is missing %s", app.Path, missingEnvVars)
}
logrus.Infof("all necessary environment variables defined for %s", app.Name)
table.Render()
return nil
},
BashComplete: autocomplete.AppNameComplete,
}

266
cli/app/cmd.go Normal file
View File

@ -0,0 +1,266 @@
package app
import (
"errors"
"fmt"
"os"
"os/exec"
"path"
"sort"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/recipe"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var appCmdCommand = cli.Command{
Name: "command",
Aliases: []string{"cmd"},
Usage: "Run app commands",
Description: `Run an app specific command.
These commands are bash functions, defined in the abra.sh of the recipe itself.
They can be run within the context of a service (e.g. app) or locally on your
work station by passing "--local". Arguments can be passed into these functions
using the "-- <args>" syntax.
Example:
abra app cmd example.com app create_user -- me@example.com
`,
ArgsUsage: "<domain> [<service>] <command> [-- <args>]",
Flags: []cli.Flag{
internal.DebugFlag,
internal.LocalCmdFlag,
internal.RemoteUserFlag,
internal.TtyFlag,
internal.OfflineFlag,
internal.ChaosFlag,
},
Before: internal.SubCommandBefore,
Subcommands: []*cli.Command{&appCmdListCommand},
BashComplete: func(ctx *cli.Context) {
args := ctx.Args()
switch args.Len() {
case 0:
autocomplete.AppNameComplete(ctx)
case 1:
autocomplete.ServiceNameComplete(args.Get(0))
case 2:
cmdNameComplete(args.Get(0))
}
},
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Chaos {
if err := recipePkg.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipePkg.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipePkg.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if internal.LocalCmd && internal.RemoteUser != "" {
internal.ShowSubcommandHelpAndError(c, errors.New("cannot use --local & --user together"))
}
hasCmdArgs, parsedCmdArgs := parseCmdArgs(c.Args().Slice())
abraSh := path.Join(config.RECIPES_DIR, app.Recipe, "abra.sh")
if _, err := os.Stat(abraSh); err != nil {
if os.IsNotExist(err) {
logrus.Fatalf("%s does not exist for %s?", abraSh, app.Name)
}
logrus.Fatal(err)
}
if internal.LocalCmd {
if !(c.Args().Len() >= 2) {
internal.ShowSubcommandHelpAndError(c, errors.New("missing arguments"))
}
cmdName := c.Args().Get(1)
if err := internal.EnsureCommand(abraSh, app.Recipe, cmdName); err != nil {
logrus.Fatal(err)
}
logrus.Debugf("--local detected, running %s on local work station", cmdName)
var exportEnv string
for k, v := range app.Env {
exportEnv = exportEnv + fmt.Sprintf("%s='%s'; ", k, v)
}
var sourceAndExec string
if hasCmdArgs {
logrus.Debugf("parsed following command arguments: %s", parsedCmdArgs)
sourceAndExec = fmt.Sprintf("TARGET=local; APP_NAME=%s; STACK_NAME=%s; %s . %s; %s %s", app.Name, app.StackName(), exportEnv, abraSh, cmdName, parsedCmdArgs)
} else {
logrus.Debug("did not detect any command arguments")
sourceAndExec = fmt.Sprintf("TARGET=local; APP_NAME=%s; STACK_NAME=%s; %s . %s; %s", app.Name, app.StackName(), exportEnv, abraSh, cmdName)
}
shell := "/bin/bash"
if _, err := os.Stat(shell); errors.Is(err, os.ErrNotExist) {
logrus.Debugf("%s does not exist locally, use /bin/sh as fallback", shell)
shell = "/bin/sh"
}
cmd := exec.Command(shell, "-c", sourceAndExec)
if err := internal.RunCmd(cmd); err != nil {
logrus.Fatal(err)
}
} else {
if !(c.Args().Len() >= 3) {
internal.ShowSubcommandHelpAndError(c, errors.New("missing arguments"))
}
targetServiceName := c.Args().Get(1)
cmdName := c.Args().Get(2)
if err := internal.EnsureCommand(abraSh, app.Recipe, cmdName); err != nil {
logrus.Fatal(err)
}
serviceNames, err := config.GetAppServiceNames(app.Name)
if err != nil {
logrus.Fatal(err)
}
matchingServiceName := false
for _, serviceName := range serviceNames {
if serviceName == targetServiceName {
matchingServiceName = true
}
}
if !matchingServiceName {
logrus.Fatalf("no service %s for %s?", targetServiceName, app.Name)
}
logrus.Debugf("running command %s within the context of %s_%s", cmdName, app.StackName(), targetServiceName)
if hasCmdArgs {
logrus.Debugf("parsed following command arguments: %s", parsedCmdArgs)
} else {
logrus.Debug("did not detect any command arguments")
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
if err := internal.RunCmdRemote(cl, app, abraSh, targetServiceName, cmdName, parsedCmdArgs); err != nil {
logrus.Fatal(err)
}
}
return nil
},
}
// Parse the command arguments from the cli args.
// Arguments should look like this:
//
// DOMAIN COMMAND -- ARGUMENT1 ARGUMENT2 ...
func parseCmdArgs(args []string) (bool, string) {
if len(args) < 4 {
return false, ""
}
return true, fmt.Sprintf("%s ", strings.Join(args[3:], " "))
}
func cmdNameComplete(appName string) {
app, err := app.Get(appName)
if err != nil {
return
}
cmdNames, _ := getShCmdNames(app)
if err != nil {
return
}
for _, n := range cmdNames {
fmt.Println(n)
}
}
var appCmdListCommand = cli.Command{
Name: "list",
Aliases: []string{"ls"},
Usage: "List all available commands",
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
internal.ChaosFlag,
},
BashComplete: autocomplete.AppNameComplete,
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Chaos {
if err := recipePkg.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipePkg.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipePkg.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
cmdNames, err := getShCmdNames(app)
if err != nil {
logrus.Fatal(err)
}
for _, cmdName := range cmdNames {
fmt.Println(cmdName)
}
return nil
},
}
func getShCmdNames(app config.App) ([]string, error) {
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, app.Recipe, "abra.sh")
cmdNames, err := config.ReadAbraShCmdNames(abraShPath)
if err != nil {
return nil, err
}
sort.Strings(cmdNames)
return cmdNames, nil
}

31
cli/app/cmd_test.go Normal file
View File

@ -0,0 +1,31 @@
package app
import (
"strings"
"testing"
)
func TestParseCmdArgs(t *testing.T) {
tests := []struct {
input []string
shouldParse bool
expectedOutput string
}{
// `--` is not parsed when passed in from the command-line e.g. -- foo bar baz
// so we need to eumlate that as missing when testing if bash args are passed in
// see https://git.coopcloud.tech/coop-cloud/organising/issues/336 for more
{[]string{"foo.com", "app", "test"}, false, ""},
{[]string{"foo.com", "app", "test", "foo"}, true, "foo "},
{[]string{"foo.com", "app", "test", "foo", "bar", "baz"}, true, "foo bar baz "},
}
for _, test := range tests {
ok, parsed := parseCmdArgs(test.input)
if ok != test.shouldParse {
t.Fatalf("[%s] should not parse", strings.Join(test.input, " "))
}
if parsed != test.expectedOutput {
t.Fatalf("%s does not match %s", parsed, test.expectedOutput)
}
}
}

View File

@ -13,10 +13,16 @@ import (
"github.com/urfave/cli/v2"
)
var appConfigCommand = &cli.Command{
Name: "config",
Aliases: []string{"c"},
Usage: "Edit app config",
var appConfigCommand = cli.Command{
Name: "config",
Aliases: []string{"cfg"},
Usage: "Edit app config",
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
appName := c.Args().First()
@ -55,5 +61,4 @@ var appConfigCommand = &cli.Command{
return nil
},
BashComplete: autocomplete.AppNameComplete,
}

View File

@ -1,40 +1,52 @@
package app
import (
"context"
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/container"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/archive"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var appCpCommand = &cli.Command{
var appCpCommand = cli.Command{
Name: "cp",
Aliases: []string{"c"},
ArgsUsage: "<src> <dst>",
Usage: "Copy files to/from a running app service",
ArgsUsage: "<domain> <src> <dst>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
},
Before: internal.SubCommandBefore,
Usage: "Copy files to/from a deployed app service",
Description: `
This command supports copying files to and from any app service file system.
Copy files to and from any app service file system.
If you want to copy a myfile.txt to the root of the app service:
abra app cp <app> myfile.txt app:/
abra app cp <domain> myfile.txt app:/
And if you want to copy that file back to your current working directory locally:
abra app cp <app> app:/myfile.txt .
abra app cp <domain> app:/myfile.txt .
`,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
@ -42,114 +54,326 @@ And if you want to copy that file back to your current working directory locally
dst := c.Args().Get(2)
if src == "" {
logrus.Fatal("missing <src> argument")
} else if dst == "" {
}
if dst == "" {
logrus.Fatal("missing <dest> argument")
}
parsedSrc := strings.SplitN(src, ":", 2)
parsedDst := strings.SplitN(dst, ":", 2)
errorMsg := "one of <src>/<dest> arguments must take $SERVICE:$PATH form"
if len(parsedSrc) == 2 && len(parsedDst) == 2 {
logrus.Fatal(errorMsg)
} else if len(parsedSrc) != 2 {
if len(parsedDst) != 2 {
logrus.Fatal(errorMsg)
}
} else if len(parsedDst) != 2 {
if len(parsedSrc) != 2 {
logrus.Fatal(errorMsg)
}
}
var service string
var srcPath string
var dstPath string
isToContainer := false // <container:src> <dst>
if len(parsedSrc) == 2 {
service = parsedSrc[0]
srcPath = parsedSrc[1]
dstPath = dst
logrus.Debugf("assuming transfer is coming FROM the container")
} else if len(parsedDst) == 2 {
service = parsedDst[0]
dstPath = parsedDst[1]
srcPath = src
isToContainer = true // <src> <container:dst>
logrus.Debugf("assuming transfer is going TO the container")
}
if !isToContainer {
if _, err := os.Stat(dstPath); os.IsNotExist(err) {
logrus.Fatalf("%s does not exist locally?", dstPath)
}
}
err := configureAndCp(c, app, srcPath, dstPath, service, isToContainer)
srcPath, dstPath, service, toContainer, err := parseSrcAndDst(src, dst)
if err != nil {
logrus.Fatal(err)
}
return nil
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
container, err := containerPkg.GetContainerFromStackAndService(cl, app.StackName(), service)
if err != nil {
logrus.Fatal(err)
}
logrus.Debugf("retrieved %s as target container on %s", formatter.ShortenID(container.ID), app.Server)
if toContainer {
err = CopyToContainer(cl, container.ID, srcPath, dstPath)
} else {
err = CopyFromContainer(cl, container.ID, srcPath, dstPath)
}
if err != nil {
logrus.Fatal(err)
}
return nil
},
BashComplete: autocomplete.AppNameComplete,
}
func configureAndCp(
c *cli.Context,
app config.App,
srcPath string,
dstPath string,
service string,
isToContainer bool) error {
appFiles, err := config.LoadAppFiles("")
var errServiceMissing = errors.New("one of <src>/<dest> arguments must take $SERVICE:$PATH form")
// parseSrcAndDst parses src and dest string. One of src or dst must be of the form $SERVICE:$PATH
func parseSrcAndDst(src, dst string) (srcPath string, dstPath string, service string, toContainer bool, err error) {
parsedSrc := strings.SplitN(src, ":", 2)
parsedDst := strings.SplitN(dst, ":", 2)
if len(parsedSrc)+len(parsedDst) != 3 {
return "", "", "", false, errServiceMissing
}
if len(parsedSrc) == 2 {
return parsedSrc[1], dst, parsedSrc[0], false, nil
}
if len(parsedDst) == 2 {
return src, parsedDst[1], parsedDst[0], true, nil
}
return "", "", "", false, errServiceMissing
}
// CopyToContainer copies a file or directory from the local file system to the container.
// See the possible copy modes and their documentation.
func CopyToContainer(cl *dockerClient.Client, containerID, srcPath, dstPath string) error {
srcStat, err := os.Stat(srcPath)
if err != nil {
logrus.Fatal(err)
return fmt.Errorf("local %s ", err)
}
appEnv, err := config.GetApp(appFiles, app.Name)
dstStat, err := cl.ContainerStatPath(context.Background(), containerID, dstPath)
dstExists := true
if err != nil {
logrus.Fatal(err)
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("%s_%s", appEnv.StackName(), service))
container, err := container.GetContainer(c.Context, cl, filters, true)
if err != nil {
logrus.Fatal(err)
}
logrus.Debugf("retrieved %s as target container on %s", formatter.ShortenID(container.ID), app.Server)
if isToContainer {
if _, err := os.Stat(srcPath); err != nil {
logrus.Fatalf("%s does not exist?", srcPath)
if errdefs.IsNotFound(err) {
dstExists = false
} else {
return fmt.Errorf("remote path: %s", err)
}
}
toTarOpts := &archive.TarOptions{NoOverwriteDirNonDir: true, Compression: archive.Gzip}
content, err := archive.TarWithOptions(srcPath, toTarOpts)
mode, err := copyMode(srcPath, dstPath, srcStat.Mode(), dstStat.Mode, dstExists)
if err != nil {
return err
}
movePath := ""
switch mode {
case CopyModeDirToDir:
// Add the src directory to the destination path
_, srcDir := path.Split(srcPath)
dstPath = path.Join(dstPath, srcDir)
// Make sure the dst directory exits.
dcli, err := command.NewDockerCli()
if err != nil {
logrus.Fatal(err)
return err
}
if _, err := container.RunExec(dcli, cl, containerID, &types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: []string{"mkdir", "-p", dstPath},
Detach: false,
Tty: true,
}); err != nil {
return fmt.Errorf("create remote directory: %s", err)
}
case CopyModeFileToFile:
// Remove the file component from the path, since docker can only copy
// to a directory.
dstPath, _ = path.Split(dstPath)
case CopyModeFileToFileRename:
// Copy the file to the temp directory and move it to its dstPath
// afterwards.
movePath = dstPath
dstPath = "/tmp"
}
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
if err := cl.CopyToContainer(c.Context, container.ID, dstPath, content, copyOpts); err != nil {
logrus.Fatal(err)
toTarOpts := &archive.TarOptions{IncludeSourceDir: true, NoOverwriteDirNonDir: true, Compression: archive.Gzip}
content, err := archive.TarWithOptions(srcPath, toTarOpts)
if err != nil {
return err
}
logrus.Debugf("copy %s from local to %s on container", srcPath, dstPath)
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
if err := cl.CopyToContainer(context.Background(), containerID, dstPath, content, copyOpts); err != nil {
return err
}
if movePath != "" {
_, srcFile := path.Split(srcPath)
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
if _, err := container.RunExec(dcli, cl, containerID, &types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: []string{"mv", path.Join("/tmp", srcFile), movePath},
Detach: false,
Tty: true,
}); err != nil {
return fmt.Errorf("create remote directory: %s", err)
}
}
return nil
}
// CopyFromContainer copies a file or directory from the given container to the local file system.
// See the possible copy modes and their documentation.
func CopyFromContainer(cl *dockerClient.Client, containerID, srcPath, dstPath string) error {
srcStat, err := cl.ContainerStatPath(context.Background(), containerID, srcPath)
if err != nil {
if errdefs.IsNotFound(err) {
return fmt.Errorf("remote: %s does not exist", srcPath)
} else {
return fmt.Errorf("remote path: %s", err)
}
}
dstStat, err := os.Stat(dstPath)
dstExists := true
var dstMode os.FileMode
if err != nil {
if os.IsNotExist(err) {
dstExists = false
} else {
return fmt.Errorf("remote path: %s", err)
}
} else {
content, _, err := cl.CopyFromContainer(c.Context, container.ID, srcPath)
if err != nil {
logrus.Fatal(err)
dstMode = dstStat.Mode()
}
mode, err := copyMode(srcPath, dstPath, srcStat.Mode, dstMode, dstExists)
if err != nil {
return err
}
moveDstDir := ""
moveDstFile := ""
switch mode {
case CopyModeFileToFile:
// Remove the file component from the path, since docker can only copy
// to a directory.
dstPath, _ = path.Split(dstPath)
case CopyModeFileToFileRename:
// Copy the file to the temp directory and move it to its dstPath
// afterwards.
moveDstFile = dstPath
dstPath = "/tmp"
case CopyModeFilesToDir:
// Copy the directory to the temp directory and move it to its
// dstPath afterwards.
moveDstDir = path.Join(dstPath, "/")
dstPath = "/tmp"
// Make sure the temp directory always gets removed
defer os.Remove(path.Join("/tmp"))
}
content, _, err := cl.CopyFromContainer(context.Background(), containerID, srcPath)
if err != nil {
return fmt.Errorf("copy: %s", err)
}
defer content.Close()
if err := archive.Untar(content, dstPath, &archive.TarOptions{
NoOverwriteDirNonDir: true,
Compression: archive.Gzip,
NoLchown: true,
}); err != nil {
return fmt.Errorf("untar: %s", err)
}
if moveDstFile != "" {
_, srcFile := path.Split(strings.TrimSuffix(srcPath, "/"))
if err := moveFile(path.Join("/tmp", srcFile), moveDstFile); err != nil {
return err
}
defer content.Close()
fromTarOpts := &archive.TarOptions{NoOverwriteDirNonDir: true, Compression: archive.Gzip}
if err := archive.Untar(content, dstPath, fromTarOpts); err != nil {
logrus.Fatal(err)
}
if moveDstDir != "" {
_, srcDir := path.Split(strings.TrimSuffix(srcPath, "/"))
if err := moveDir(path.Join("/tmp", srcDir), moveDstDir); err != nil {
return err
}
}
return nil
}
var (
ErrCopyDirToFile = fmt.Errorf("can't copy dir to file")
ErrDstDirNotExist = fmt.Errorf("destination directory does not exist")
)
type CopyMode int
const (
// Copy a src file to a dest file. The src and dest file names are the same.
// <dir_src>/<file> + <dir_dst>/<file> -> <dir_dst>/<file>
CopyModeFileToFile = CopyMode(iota)
// Copy a src file to a dest file. The src and dest file names are not the same.
// <dir_src>/<file_src> + <dir_dst>/<file_dst> -> <dir_dst>/<file_dst>
CopyModeFileToFileRename
// Copy a src file to dest directory. The dest file gets created in the dest
// folder with the src filename.
// <dir_src>/<file> + <dir_dst> -> <dir_dst>/<file>
CopyModeFileToDir
// Copy a src directory to dest directory.
// <dir_src> + <dir_dst> -> <dir_dst>/<dir_src>
CopyModeDirToDir
// Copy all files in the src directory to the dest directory. This works recursively.
// <dir_src>/ + <dir_dst> -> <dir_dst>/<files_from_dir_src>
CopyModeFilesToDir
)
// copyMode takes a src and dest path and file mode to determine the copy mode.
// See the possible copy modes and their documentation.
func copyMode(srcPath, dstPath string, srcMode os.FileMode, dstMode os.FileMode, dstExists bool) (CopyMode, error) {
_, srcFile := path.Split(srcPath)
_, dstFile := path.Split(dstPath)
if srcMode.IsDir() {
if !dstExists {
return -1, ErrDstDirNotExist
}
if dstMode.IsDir() {
if strings.HasSuffix(srcPath, "/") {
return CopyModeFilesToDir, nil
}
return CopyModeDirToDir, nil
}
return -1, ErrCopyDirToFile
}
if dstMode.IsDir() {
return CopyModeFileToDir, nil
}
if srcFile != dstFile {
return CopyModeFileToFileRename, nil
}
return CopyModeFileToFile, nil
}
// moveDir moves all files from a source path to the destination path recursively.
func moveDir(sourcePath, destPath string) error {
return filepath.Walk(sourcePath, func(p string, info os.FileInfo, err error) error {
if err != nil {
return err
}
newPath := path.Join(destPath, strings.TrimPrefix(p, sourcePath))
if info.IsDir() {
err := os.Mkdir(newPath, info.Mode())
if err != nil {
if os.IsExist(err) {
return nil
}
return err
}
}
if info.Mode().IsRegular() {
return moveFile(p, newPath)
}
return nil
})
}
// moveFile moves a file from a source path to a destination path.
func moveFile(sourcePath, destPath string) error {
inputFile, err := os.Open(sourcePath)
if err != nil {
return err
}
outputFile, err := os.Create(destPath)
if err != nil {
inputFile.Close()
return err
}
defer outputFile.Close()
_, err = io.Copy(outputFile, inputFile)
inputFile.Close()
if err != nil {
return err
}
// Remove file after succesfull copy.
err = os.Remove(sourcePath)
if err != nil {
return err
}
return nil
}

113
cli/app/cp_test.go Normal file
View File

@ -0,0 +1,113 @@
package app
import (
"os"
"testing"
)
func TestParse(t *testing.T) {
tests := []struct {
src string
dst string
srcPath string
dstPath string
service string
toContainer bool
err error
}{
{src: "foo", dst: "bar", err: errServiceMissing},
{src: "app:foo", dst: "app:bar", err: errServiceMissing},
{src: "app:foo", dst: "bar", srcPath: "foo", dstPath: "bar", service: "app", toContainer: false},
{src: "foo", dst: "app:bar", srcPath: "foo", dstPath: "bar", service: "app", toContainer: true},
}
for i, tc := range tests {
srcPath, dstPath, service, toContainer, err := parseSrcAndDst(tc.src, tc.dst)
if srcPath != tc.srcPath {
t.Errorf("[%d] srcPath: want (%s), got(%s)", i, tc.srcPath, srcPath)
}
if dstPath != tc.dstPath {
t.Errorf("[%d] dstPath: want (%s), got(%s)", i, tc.dstPath, dstPath)
}
if service != tc.service {
t.Errorf("[%d] service: want (%s), got(%s)", i, tc.service, service)
}
if toContainer != tc.toContainer {
t.Errorf("[%d] toConainer: want (%t), got(%t)", i, tc.toContainer, toContainer)
}
if err == nil && tc.err != nil && err.Error() != tc.err.Error() {
t.Errorf("[%d] err: want (%s), got(%s)", i, tc.err, err)
}
}
}
func TestCopyMode(t *testing.T) {
tests := []struct {
srcPath string
dstPath string
srcMode os.FileMode
dstMode os.FileMode
dstExists bool
mode CopyMode
err error
}{
{
srcPath: "foo.txt",
dstPath: "foo.txt",
srcMode: os.ModePerm,
dstMode: os.ModePerm,
dstExists: true,
mode: CopyModeFileToFile,
},
{
srcPath: "foo.txt",
dstPath: "bar.txt",
srcMode: os.ModePerm,
dstExists: true,
mode: CopyModeFileToFileRename,
},
{
srcPath: "foo",
dstPath: "foo",
srcMode: os.ModeDir,
dstMode: os.ModeDir,
dstExists: true,
mode: CopyModeDirToDir,
},
{
srcPath: "foo/",
dstPath: "foo",
srcMode: os.ModeDir,
dstMode: os.ModeDir,
dstExists: true,
mode: CopyModeFilesToDir,
},
{
srcPath: "foo",
dstPath: "foo",
srcMode: os.ModeDir,
dstExists: false,
mode: -1,
err: ErrDstDirNotExist,
},
{
srcPath: "foo",
dstPath: "foo",
srcMode: os.ModeDir,
dstMode: os.ModePerm,
dstExists: true,
mode: -1,
err: ErrCopyDirToFile,
},
}
for i, tc := range tests {
mode, err := copyMode(tc.srcPath, tc.dstPath, tc.srcMode, tc.dstMode, tc.dstExists)
if mode != tc.mode {
t.Errorf("[%d] mode: want (%d), got(%d)", i, tc.mode, mode)
}
if err != tc.err {
t.Errorf("[%d] err: want (%s), got(%s)", i, tc.err, err)
}
}
}

View File

@ -1,33 +1,254 @@
package app
import (
"context"
"fmt"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/secret"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/dns"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/lint"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/stack"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var appDeployCommand = &cli.Command{
Name: "deploy",
Aliases: []string{"d"},
Usage: "Deploy an app",
var appDeployCommand = cli.Command{
Name: "deploy",
Aliases: []string{"d"},
Usage: "Deploy an app",
ArgsUsage: "<domain> [<version>]",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.ForceFlag,
internal.ChaosFlag,
internal.NoDomainChecksFlag,
internal.DontWaitConvergeFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Description: `
This command deploys an app. It does not support incrementing the version of a
deployed app, for this you need to look at the "abra app upgrade <app>"
command.
Deploy an app. It does not support incrementing the version of a deployed app,
for this you need to look at the "abra app upgrade <domain>" command.
You may pass "--force" to re-deploy the same version again. This can be useful
if the container runtime has gotten into a weird state.
Chas mode ("--chaos") will deploy your local checkout of a recipe as-is,
Chaos mode ("--chaos") will deploy your local checkout of a recipe as-is,
including unstaged changes and can be useful for live hacking and testing new
recipes.
`,
Action: internal.DeployAction,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
stackName := app.StackName()
specificVersion := c.Args().Get(1)
if specificVersion != "" && internal.Chaos {
logrus.Fatal("cannot use <version> and --chaos together")
}
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Chaos {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
r, err := recipe.Get(app.Recipe, internal.Offline)
if err != nil {
logrus.Fatal(err)
}
if err := lint.LintForErrors(r); err != nil {
logrus.Fatal(err)
}
logrus.Debugf("checking whether %s is already deployed", stackName)
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
logrus.Fatal(err)
}
secStats, err := secret.PollSecretsStatus(cl, app)
if err != nil {
logrus.Fatal(err)
}
for _, secStat := range secStats {
if !secStat.CreatedOnRemote {
logrus.Fatalf("unable to deploy, secrets not generated (%s)?", secStat.LocalName)
}
}
if isDeployed {
if internal.Force || internal.Chaos {
logrus.Warnf("%s is already deployed but continuing (--force/--chaos)", app.Name)
} else {
logrus.Fatalf("%s is already deployed", app.Name)
}
}
version := deployedVersion
if specificVersion != "" {
version = specificVersion
logrus.Debugf("choosing %s as version to deploy", version)
if err := recipe.EnsureVersion(app.Recipe, version); err != nil {
logrus.Fatal(err)
}
}
if !internal.Chaos && specificVersion == "" {
catl, err := recipe.ReadRecipeCatalogue(internal.Offline)
if err != nil {
logrus.Fatal(err)
}
versions, err := recipe.GetRecipeCatalogueVersions(app.Recipe, catl)
if err != nil {
logrus.Fatal(err)
}
if len(versions) == 0 && !internal.Chaos {
logrus.Warn("no published versions in catalogue, trying local recipe repository")
recipeVersions, err := recipe.GetRecipeVersions(app.Recipe, internal.Offline)
if err != nil {
logrus.Warn(err)
}
for _, recipeVersion := range recipeVersions {
for version := range recipeVersion {
versions = append(versions, version)
}
}
}
if len(versions) > 0 && !internal.Chaos {
version = versions[len(versions)-1]
logrus.Debugf("choosing %s as version to deploy", version)
if err := recipe.EnsureVersion(app.Recipe, version); err != nil {
logrus.Fatal(err)
}
} else {
head, err := git.GetRecipeHead(app.Recipe)
if err != nil {
logrus.Fatal(err)
}
version = formatter.SmallSHA(head.String())
logrus.Warn("no versions detected, using latest commit")
}
}
if internal.Chaos {
logrus.Warnf("chaos mode engaged")
var err error
version, err = recipe.ChaosVersion(app.Recipe)
if err != nil {
logrus.Fatal(err)
}
}
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, app.Recipe, "abra.sh")
abraShEnv, err := config.ReadAbraShEnvVars(abraShPath)
if err != nil {
logrus.Fatal(err)
}
for k, v := range abraShEnv {
app.Env[k] = v
}
composeFiles, err := config.GetComposeFiles(app.Recipe, app.Env)
if err != nil {
logrus.Fatal(err)
}
deployOpts := stack.Deploy{
Composefiles: composeFiles,
Namespace: stackName,
Prune: false,
ResolveImage: stack.ResolveImageAlways,
}
compose, err := config.GetAppComposeConfig(app.Name, deployOpts, app.Env)
if err != nil {
logrus.Fatal(err)
}
config.ExposeAllEnv(stackName, compose, app.Env)
config.SetRecipeLabel(compose, stackName, app.Recipe)
config.SetChaosLabel(compose, stackName, internal.Chaos)
config.SetChaosVersionLabel(compose, stackName, version)
config.SetUpdateLabel(compose, stackName, app.Env)
envVars, err := config.CheckEnv(app)
if err != nil {
logrus.Fatal(err)
}
for _, envVar := range envVars {
if !envVar.Present {
logrus.Warnf("env var %s missing from %s.env, present in recipe .env.sample", envVar.Name, app.Domain)
}
}
if err := internal.DeployOverview(app, version, "continue with deployment?"); err != nil {
logrus.Fatal(err)
}
if !internal.NoDomainChecks {
domainName, ok := app.Env["DOMAIN"]
if ok {
if _, err = dns.EnsureDomainsResolveSameIPv4(domainName, app.Server); err != nil {
logrus.Fatal(err)
}
} else {
logrus.Warn("skipping domain checks as no DOMAIN=... configured for app")
}
} else {
logrus.Warn("skipping domain checks as requested")
}
stack.WaitTimeout, err = config.GetTimeoutFromLabel(compose, stackName)
if err != nil {
logrus.Fatal(err)
}
logrus.Debugf("set waiting timeout to %d s", stack.WaitTimeout)
if err := stack.RunDeploy(cl, deployOpts, compose, app.Name, internal.DontWaitConverge); err != nil {
logrus.Fatal(err)
}
postDeployCmds, ok := app.Env["POST_DEPLOY_CMDS"]
if ok && !internal.DontWaitConverge {
logrus.Debugf("run the following post-deploy commands: %s", postDeployCmds)
if err := internal.PostCmds(cl, app, postDeployCmds); err != nil {
logrus.Fatalf("attempting to run post deploy commands, saw: %s", err)
}
}
return nil
},
}

View File

@ -1,6 +1,8 @@
package app
import (
"context"
"fmt"
"strconv"
"strings"
"time"
@ -18,11 +20,12 @@ import (
"github.com/urfave/cli/v2"
)
var appErrorsCommand = &cli.Command{
Name: "errors",
Usage: "List errors for a deployed app",
var appErrorsCommand = cli.Command{
Name: "errors",
Usage: "List errors for a deployed app",
ArgsUsage: "<domain>",
Description: `
This command lists errors for a deployed app.
List errors for a deployed app.
This is a best-effort implementation and an attempt to gather a number of tips
& tricks for finding errors together into one convenient command. When an app
@ -39,13 +42,17 @@ Got any more ideas? Please let us know:
https://git.coopcloud.tech/coop-cloud/organising/issues/new/choose
This command is best accompanied by "abra app logs <app>" which may reveal
This command is best accompanied by "abra app logs <domain>" which may reveal
further information which can help you debug the cause of an app failure via
the logs.
`,
Aliases: []string{"e"},
Flags: []cli.Flag{internal.WatchFlag},
Aliases: []string{"e"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.WatchFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
@ -55,7 +62,7 @@ the logs.
logrus.Fatal(err)
}
isDeployed, _, err := stack.IsDeployed(c.Context, cl, app.StackName())
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
logrus.Fatal(err)
}
@ -77,21 +84,20 @@ the logs.
}
time.Sleep(2 * time.Second)
}
return nil
},
}
func checkErrors(c *cli.Context, cl *dockerClient.Client, app config.App) error {
recipe, err := recipe.Get(app.Type)
recipe, err := recipe.Get(app.Recipe, internal.Offline)
if err != nil {
return err
}
for _, service := range recipe.Config.Services {
filters := filters.NewArgs()
filters.Add("name", service.Name)
containers, err := cl.ContainerList(c.Context, types.ContainerListOptions{Filters: filters})
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), service.Name))
containers, err := cl.ContainerList(context.Background(), types.ContainerListOptions{Filters: filters})
if err != nil {
return err
}
@ -102,7 +108,7 @@ func checkErrors(c *cli.Context, cl *dockerClient.Client, app config.App) error
}
container := containers[0]
containerState, err := cl.ContainerInspect(c.Context, container.ID)
containerState, err := cl.ContainerInspect(context.Background(), container.ID)
if err != nil {
logrus.Fatal(err)
}

View File

@ -1,15 +1,16 @@
package app
import (
"encoding/json"
"fmt"
"sort"
"strconv"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/ssh"
"coopcloud.tech/tagcmp"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
@ -19,18 +20,17 @@ var status bool
var statusFlag = &cli.BoolFlag{
Name: "status",
Aliases: []string{"S"},
Value: false,
Usage: "Show app deployment status",
Destination: &status,
}
var appType string
var typeFlag = &cli.StringFlag{
Name: "type",
Aliases: []string{"t"},
var recipeFilter string
var recipeFlag = &cli.StringFlag{
Name: "recipe",
Aliases: []string{"r"},
Value: "",
Usage: "Show apps of a specific type",
Destination: &appType,
Usage: "Show apps of a specific recipe",
Destination: &recipeFilter,
}
var listAppServer string
@ -43,53 +43,60 @@ var listAppServerFlag = &cli.StringFlag{
}
type appStatus struct {
server string
recipe string
appName string
domain string
status string
version string
upgrade string
Server string `json:"server"`
Recipe string `json:"recipe"`
AppName string `json:"appName"`
Domain string `json:"domain"`
Status string `json:"status"`
Chaos string `json:"chaos"`
ChaosVersion string `json:"chaosVersion"`
AutoUpdate string `json:"autoUpdate"`
Version string `json:"version"`
Upgrade string `json:"upgrade"`
}
type serverStatus struct {
apps []appStatus
appCount int
versionCount int
unversionedCount int
latestCount int
upgradeCount int
Apps []appStatus `json:"apps"`
AppCount int `json:"appCount"`
VersionCount int `json:"versionCount"`
UnversionedCount int `json:"unversionedCount"`
LatestCount int `json:"latestCount"`
UpgradeCount int `json:"upgradeCount"`
}
var appListCommand = &cli.Command{
Name: "list",
Usage: "List all managed apps",
var appListCommand = cli.Command{
Name: "list",
Aliases: []string{"ls"},
Usage: "List all managed apps",
Description: `
This command looks at your local file system listing of apps and servers (e.g.
in ~/.abra/) to generate a report of all your apps.
Read the local file system listing of apps and servers (e.g. ~/.abra/) to
generate a report of all your apps.
By passing the "--status/-S" flag, you can query all your servers for the
actual live deployment status. Depending on how many servers you manage, this
can take some time.
`,
Aliases: []string{"ls"},
`,
Flags: []cli.Flag{
internal.DebugFlag,
internal.MachineReadableFlag,
statusFlag,
listAppServerFlag,
typeFlag,
recipeFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
appFiles, err := config.LoadAppFiles(listAppServer)
if err != nil {
logrus.Fatal(err)
}
apps, err := config.GetApps(appFiles)
apps, err := config.GetApps(appFiles, recipeFilter)
if err != nil {
logrus.Fatal(err)
}
sort.Sort(config.ByServerAndType(apps))
sort.Sort(config.ByServerAndRecipe(apps))
statuses := make(map[string]map[string]string)
var catl recipe.RecipeCatalogue
@ -97,20 +104,16 @@ can take some time.
alreadySeen := make(map[string]bool)
for _, app := range apps {
if _, ok := alreadySeen[app.Server]; !ok {
if err := ssh.EnsureHostKey(app.Server); err != nil {
logrus.Fatal(fmt.Sprintf(internal.SSHFailMsg, app.Server))
}
alreadySeen[app.Server] = true
}
}
statuses, err = config.GetAppStatuses(appFiles)
statuses, err = config.GetAppStatuses(apps, internal.MachineReadable)
if err != nil {
logrus.Fatal(err)
}
var err error
catl, err = recipe.ReadRecipeCatalogue()
catl, err = recipe.ReadRecipeCatalogue(internal.Offline)
if err != nil {
logrus.Fatal(err)
}
@ -124,43 +127,60 @@ can take some time.
var ok bool
if stats, ok = allStats[app.Server]; !ok {
stats = serverStatus{}
if appType == "" {
if recipeFilter == "" {
// count server, no filtering
totalServersCount++
}
}
if app.Type == appType || appType == "" {
if appType != "" {
if app.Recipe == recipeFilter || recipeFilter == "" {
if recipeFilter != "" {
// only count server if matches filter
totalServersCount++
}
appStats := appStatus{}
stats.appCount++
stats.AppCount++
totalAppsCount++
if status {
status := "unknown"
version := "unknown"
chaos := "unknown"
chaosVersion := "unknown"
autoUpdate := "unknown"
if statusMeta, ok := statuses[app.StackName()]; ok {
if currentVersion, exists := statusMeta["version"]; exists {
version = currentVersion
if currentVersion != "" {
version = currentVersion
}
}
if chaosDeploy, exists := statusMeta["chaos"]; exists {
chaos = chaosDeploy
}
if chaosDeployVersion, exists := statusMeta["chaosVersion"]; exists {
chaosVersion = chaosDeployVersion
}
if autoUpdateState, exists := statusMeta["autoUpdate"]; exists {
autoUpdate = autoUpdateState
}
if statusMeta["status"] != "" {
status = statusMeta["status"]
}
stats.versionCount++
stats.VersionCount++
} else {
stats.unversionedCount++
stats.UnversionedCount++
}
appStats.status = status
appStats.version = version
appStats.Status = status
appStats.Chaos = chaos
appStats.ChaosVersion = chaosVersion
appStats.Version = version
appStats.AutoUpdate = autoUpdate
var newUpdates []string
if version != "unknown" {
updates, err := recipe.GetRecipeCatalogueVersions(app.Type, catl)
updates, err := recipe.GetRecipeCatalogueVersions(app.Recipe, catl)
if err != nil {
logrus.Fatal(err)
}
@ -184,29 +204,38 @@ can take some time.
if len(newUpdates) == 0 {
if version == "unknown" {
appStats.upgrade = "unknown"
appStats.Upgrade = "unknown"
} else {
appStats.upgrade = "latest"
stats.latestCount++
appStats.Upgrade = "latest"
stats.LatestCount++
}
} else {
newUpdates = internal.ReverseStringList(newUpdates)
appStats.upgrade = strings.Join(newUpdates, "\n")
stats.upgradeCount++
appStats.Upgrade = strings.Join(newUpdates, "\n")
stats.UpgradeCount++
}
}
appStats.server = app.Server
appStats.recipe = app.Type
appStats.appName = app.Name
appStats.domain = app.Domain
appStats.Server = app.Server
appStats.Recipe = app.Recipe
appStats.AppName = app.Name
appStats.Domain = app.Domain
stats.apps = append(stats.apps, appStats)
stats.Apps = append(stats.Apps, appStats)
}
allStats[app.Server] = stats
}
if internal.MachineReadable {
jsonstring, err := json.Marshal(allStats)
if err != nil {
logrus.Fatal(err)
} else {
fmt.Println(string(jsonstring))
}
return nil
}
alreadySeen := make(map[string]bool)
for _, app := range apps {
if _, ok := alreadySeen[app.Server]; ok {
@ -215,17 +244,27 @@ can take some time.
serverStat := allStats[app.Server]
tableCol := []string{"recipe", "domain", "app name"}
tableCol := []string{"recipe", "domain"}
if status {
tableCol = append(tableCol, []string{"status", "version", "upgrade"}...)
tableCol = append(tableCol, []string{"status", "chaos", "version", "upgrade", "autoupdate"}...)
}
table := formatter.CreateTable(tableCol)
for _, appStat := range serverStat.apps {
tableRow := []string{appStat.recipe, appStat.domain, appStat.appName}
for _, appStat := range serverStat.Apps {
tableRow := []string{appStat.Recipe, appStat.Domain}
if status {
tableRow = append(tableRow, []string{appStat.status, appStat.version, appStat.upgrade}...)
chaosStatus := appStat.Chaos
if chaosStatus != "unknown" {
chaosEnabled, err := strconv.ParseBool(chaosStatus)
if err != nil {
logrus.Fatal(err)
}
if chaosEnabled && appStat.ChaosVersion != "unknown" {
chaosStatus = appStat.ChaosVersion
}
}
tableRow = append(tableRow, []string{appStat.Status, chaosStatus, appStat.Version, appStat.Upgrade, appStat.AutoUpdate}...)
}
table.Append(tableRow)
}
@ -237,14 +276,14 @@ can take some time.
fmt.Println(fmt.Sprintf(
"server: %s | total apps: %v | versioned: %v | unversioned: %v | latest: %v | upgrade: %v",
app.Server,
serverStat.appCount,
serverStat.versionCount,
serverStat.unversionedCount,
serverStat.latestCount,
serverStat.upgradeCount,
serverStat.AppCount,
serverStat.VersionCount,
serverStat.UnversionedCount,
serverStat.LatestCount,
serverStat.UpgradeCount,
))
} else {
fmt.Println(fmt.Sprintf("server: %s | total apps: %v", app.Server, serverStat.appCount))
fmt.Println(fmt.Sprintf("server: %s | total apps: %v", app.Server, serverStat.AppCount))
}
}

View File

@ -1,51 +1,125 @@
package app
import (
"fmt"
"context"
"io"
"os"
"slices"
"sync"
"time"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/service"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/stack"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var logOpts = types.ContainerLogsOptions{
Details: false,
Follow: true,
ShowStderr: true,
ShowStdout: true,
Tail: "20",
Timestamps: true,
var appLogsCommand = cli.Command{
Name: "logs",
Aliases: []string{"l"},
ArgsUsage: "<domain> [<service>]",
Usage: "Tail app logs",
Flags: []cli.Flag{
internal.StdErrOnlyFlag,
internal.SinceLogsFlag,
internal.DebugFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
stackName := app.StackName()
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
logrus.Fatal(err)
}
if !isDeployed {
logrus.Fatalf("%s is not deployed?", app.Name)
}
serviceName := c.Args().Get(1)
serviceNames := []string{}
if serviceName != "" {
serviceNames = []string{serviceName}
}
err = tailLogs(cl, app, serviceNames)
if err != nil {
logrus.Fatal(err)
}
return nil
},
}
// stackLogs lists logs for all stack services
func stackLogs(c *cli.Context, stackName string, client *dockerClient.Client) {
filters := filters.NewArgs()
filters.Add("name", stackName)
serviceOpts := types.ServiceListOptions{Filters: filters}
services, err := client.ServiceList(c.Context, serviceOpts)
// tailLogs prints logs for the given app with optional service names to be
// filtered on. It also checks if the latest task is not runnning and then
// prints the past tasks.
func tailLogs(cl *dockerClient.Client, app config.App, serviceNames []string) error {
f, err := app.Filters(true, false, serviceNames...)
if err != nil {
logrus.Fatal(err)
return err
}
services, err := cl.ServiceList(context.Background(), types.ServiceListOptions{Filters: f})
if err != nil {
return err
}
var wg sync.WaitGroup
for _, service := range services {
wg.Add(1)
go func(s string) {
if internal.StdErrOnly {
logOpts.ShowStdout = false
filters := filters.NewArgs()
filters.Add("name", service.Spec.Name)
tasks, err := cl.TaskList(context.Background(), types.TaskListOptions{Filters: f})
if err != nil {
return err
}
if len(tasks) > 0 {
// Need to sort the tasks by the CreatedAt field in the inverse order.
// Otherwise they are in the reversed order and not sorted properly.
slices.SortFunc[[]swarm.Task](tasks, func(t1, t2 swarm.Task) int {
return int(t2.Meta.CreatedAt.Unix() - t1.Meta.CreatedAt.Unix())
})
lastTask := tasks[0].Status
if lastTask.State != swarm.TaskStateRunning {
for _, task := range tasks {
logrus.Errorf("[%s] %s State %s: %s", service.Spec.Name, task.Meta.CreatedAt.Format(time.RFC3339), task.Status.State, task.Status.Err)
}
}
}
logs, err := client.ServiceLogs(c.Context, s, logOpts)
// Collect the logs in a go routine, so the logs from all services are
// collected in parallel.
wg.Add(1)
go func(serviceID string) {
logs, err := cl.ServiceLogs(context.Background(), serviceID, types.ContainerLogsOptions{
ShowStderr: true,
ShowStdout: !internal.StdErrOnly,
Since: internal.SinceLogs,
Until: "",
Timestamps: true,
Follow: true,
Tail: "20",
Details: false,
})
if err != nil {
logrus.Fatal(err)
}
@ -58,65 +132,8 @@ func stackLogs(c *cli.Context, stackName string, client *dockerClient.Client) {
}(service.ID)
}
// Wait for all log streams to be closed.
wg.Wait()
os.Exit(0)
}
var appLogsCommand = &cli.Command{
Name: "logs",
Aliases: []string{"l"},
ArgsUsage: "[<service>]",
Usage: "Tail app logs",
Flags: []cli.Flag{
internal.StdErrOnlyFlag,
},
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
serviceName := c.Args().Get(1)
if serviceName == "" {
logrus.Debugf("tailing logs for all %s services", app.Type)
stackLogs(c, app.StackName(), cl)
} else {
logrus.Debugf("tailing logs for %s", serviceName)
if err := tailServiceLogs(c, cl, app, serviceName); err != nil {
logrus.Fatal(err)
}
}
return nil
},
}
func tailServiceLogs(c *cli.Context, cl *dockerClient.Client, app config.App, serviceName string) error {
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("%s_%s", app.StackName(), serviceName))
chosenService, err := service.GetService(c.Context, cl, filters, internal.NoInput)
if err != nil {
logrus.Fatal(err)
}
if internal.StdErrOnly {
logOpts.ShowStdout = false
}
logs, err := cl.ServiceLogs(c.Context, chosenService.ID, logOpts)
if err != nil {
logrus.Fatal(err)
}
defer logs.Close()
_, err = io.Copy(os.Stdout, logs)
if err != nil && err != io.EOF {
logrus.Fatal(err)
}
return nil
}

View File

@ -1,17 +1,30 @@
package app
import (
"fmt"
"path"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/jsontable"
"coopcloud.tech/abra/pkg/recipe"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/secret"
"github.com/AlecAivazis/survey/v2"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var appNewDescription = `
This command takes a recipe and uses it to create a new app. This new app
configuration is stored in your ~/.abra directory under the appropriate server.
Take a recipe and uses it to create a new app. This new app configuration is
stored in your ~/.abra directory under the appropriate server.
This command does not deploy your app for you. You will need to run "abra app
deploy <app>" to do so.
deploy <domain>" to do so.
You can see what recipes are available (i.e. values for the <recipe> argument)
by running "abra recipe ls".
@ -26,19 +39,230 @@ pass store (see passwordstore.org for more). The pass command must be available
on your $PATH.
`
var appNewCommand = &cli.Command{
var appNewCommand = cli.Command{
Name: "new",
Usage: "Create a new app",
Aliases: []string{"n"},
Usage: "Create a new app",
Description: appNewDescription,
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.NewAppServerFlag,
internal.DomainFlag,
internal.NewAppNameFlag,
internal.PassFlag,
internal.SecretsFlag,
internal.OfflineFlag,
internal.ChaosFlag,
},
Before: internal.SubCommandBefore,
ArgsUsage: "[<recipe>] [<version>]",
BashComplete: func(ctx *cli.Context) {
args := ctx.Args()
switch args.Len() {
case 0:
autocomplete.RecipeNameComplete(ctx)
case 1:
autocomplete.RecipeVersionComplete(ctx.Args().Get(0))
}
},
Action: func(c *cli.Context) error {
recipe := internal.ValidateRecipe(c)
if !internal.Chaos {
if err := recipePkg.EnsureIsClean(recipe.Name); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipePkg.EnsureUpToDate(recipe.Name); err != nil {
logrus.Fatal(err)
}
}
if c.Args().Get(1) == "" {
if err := recipePkg.EnsureLatest(recipe.Name); err != nil {
logrus.Fatal(err)
}
} else {
if err := recipePkg.EnsureVersion(recipe.Name, c.Args().Get(1)); err != nil {
logrus.Fatal(err)
}
}
}
if err := ensureServerFlag(); err != nil {
logrus.Fatal(err)
}
if err := ensureDomainFlag(recipe, internal.NewAppServer); err != nil {
logrus.Fatal(err)
}
sanitisedAppName := config.SanitiseAppName(internal.Domain)
logrus.Debugf("%s sanitised as %s for new app", internal.Domain, sanitisedAppName)
if err := config.TemplateAppEnvSample(
recipe.Name,
internal.Domain,
internal.NewAppServer,
internal.Domain,
); err != nil {
logrus.Fatal(err)
}
var secrets AppSecrets
var secretTable *jsontable.JSONTable
if internal.Secrets {
sampleEnv, err := recipe.SampleEnv()
if err != nil {
logrus.Fatal(err)
}
composeFiles, err := config.GetComposeFiles(recipe.Name, sampleEnv)
if err != nil {
logrus.Fatal(err)
}
envSamplePath := path.Join(config.RECIPES_DIR, recipe.Name, ".env.sample")
secretsConfig, err := secret.ReadSecretsConfig(envSamplePath, composeFiles, config.StackName(internal.Domain))
if err != nil {
return err
}
if err := promptForSecrets(recipe.Name, secretsConfig); err != nil {
logrus.Fatal(err)
}
cl, err := client.New(internal.NewAppServer)
if err != nil {
logrus.Fatal(err)
}
secrets, err = createSecrets(cl, secretsConfig, sanitisedAppName)
if err != nil {
logrus.Fatal(err)
}
secretCols := []string{"Name", "Value"}
secretTable = formatter.CreateTable(secretCols)
for name, val := range secrets {
secretTable.Append([]string{name, val})
}
}
if internal.NewAppServer == "default" {
internal.NewAppServer = "local"
}
tableCol := []string{"server", "recipe", "domain"}
table := formatter.CreateTable(tableCol)
table.Append([]string{internal.NewAppServer, recipe.Name, internal.Domain})
fmt.Println(fmt.Sprintf("A new %s app has been created! Here is an overview:", recipe.Name))
fmt.Println("")
table.Render()
fmt.Println("")
fmt.Println("You can configure this app by running the following:")
fmt.Println(fmt.Sprintf("\n abra app config %s", internal.Domain))
fmt.Println("")
fmt.Println("You can deploy this app by running the following:")
fmt.Println(fmt.Sprintf("\n abra app deploy %s", internal.Domain))
if len(secrets) > 0 {
fmt.Println("")
fmt.Println("Here are your generated secrets:")
fmt.Println("")
secretTable.Render()
logrus.Warn("generated secrets are not shown again, please take note of them NOW")
}
return nil
},
ArgsUsage: "<recipe>",
Action: internal.NewAction,
BashComplete: autocomplete.RecipeNameComplete,
}
// AppSecrets represents all app secrest
type AppSecrets map[string]string
// createSecrets creates all secrets for a new app.
func createSecrets(cl *dockerClient.Client, secretsConfig map[string]secret.Secret, sanitisedAppName string) (AppSecrets, error) {
secrets, err := secret.GenerateSecrets(cl, secretsConfig, internal.NewAppServer)
if err != nil {
return nil, err
}
if internal.Pass {
for secretName := range secrets {
secretValue := secrets[secretName]
if err := secret.PassInsertSecret(
secretValue,
secretName,
internal.Domain,
internal.NewAppServer,
); err != nil {
return nil, err
}
}
}
return secrets, nil
}
// ensureDomainFlag checks if the domain flag was used. if not, asks the user for it/
func ensureDomainFlag(recipe recipe.Recipe, server string) error {
if internal.Domain == "" && !internal.NoInput {
prompt := &survey.Input{
Message: "Specify app domain",
Default: fmt.Sprintf("%s.%s", recipe.Name, server),
}
if err := survey.AskOne(prompt, &internal.Domain); err != nil {
return err
}
}
if internal.Domain == "" {
return fmt.Errorf("no domain provided")
}
return nil
}
// promptForSecrets asks if we should generate secrets for a new app.
func promptForSecrets(recipeName string, secretsConfig map[string]secret.Secret) error {
if len(secretsConfig) == 0 {
logrus.Debugf("%s has no secrets to generate, skipping...", recipeName)
return nil
}
if !internal.Secrets && !internal.NoInput {
prompt := &survey.Confirm{
Message: "Generate app secrets?",
}
if err := survey.AskOne(prompt, &internal.Secrets); err != nil {
return err
}
}
return nil
}
// ensureServerFlag checks if the server flag was used. if not, asks the user for it.
func ensureServerFlag() error {
servers, err := config.GetServers()
if err != nil {
return err
}
if internal.NewAppServer == "" && !internal.NoInput {
prompt := &survey.Select{
Message: "Select app server:",
Options: servers,
}
if err := survey.AskOne(prompt, &internal.NewAppServer); err != nil {
return err
}
}
if internal.NewAppServer == "" {
return fmt.Errorf("no server provided")
}
return nil
}

View File

@ -1,6 +1,7 @@
package app
import (
"context"
"strings"
"time"
@ -14,20 +15,22 @@ import (
"github.com/buger/goterm"
dockerFormatter "github.com/docker/cli/cli/command/formatter"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var appPsCommand = &cli.Command{
var appPsCommand = cli.Command{
Name: "ps",
Usage: "Check app status",
Description: "This command shows a more detailed status output of a specific deployed app.",
Aliases: []string{"p"},
Usage: "Check app status",
ArgsUsage: "<domain>",
Description: "Show a more detailed status output of a specific deployed app",
Flags: []cli.Flag{
internal.WatchFlag,
internal.DebugFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
@ -37,7 +40,7 @@ var appPsCommand = &cli.Command{
logrus.Fatal(err)
}
isDeployed, _, err := stack.IsDeployed(c.Context, cl, app.StackName())
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
logrus.Fatal(err)
}
@ -63,10 +66,12 @@ var appPsCommand = &cli.Command{
// showPSOutput renders ps output.
func showPSOutput(c *cli.Context, app config.App, cl *dockerClient.Client) {
filters := filters.NewArgs()
filters.Add("name", app.StackName())
filters, err := app.Filters(true, true)
if err != nil {
logrus.Fatal(err)
}
containers, err := cl.ContainerList(c.Context, types.ContainerListOptions{Filters: filters})
containers, err := cl.ContainerList(context.Background(), types.ContainerListOptions{Filters: filters})
if err != nil {
logrus.Fatal(err)
}

View File

@ -1,8 +1,11 @@
package app
import (
"context"
"fmt"
"log"
"os"
"time"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
@ -10,37 +13,49 @@ import (
stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/AlecAivazis/survey/v2"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/volume"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
// Volumes stores the variable from VolumesFlag
var Volumes bool
var appRemoveCommand = cli.Command{
Name: "remove",
Aliases: []string{"rm"},
ArgsUsage: "<domain>",
Usage: "Remove all app data, locally and remotely",
Description: `
This command removes everything related to an app which is already undeployed.
// VolumesFlag is used to specify if volumes should be deleted when deleting an app
var VolumesFlag = &cli.BoolFlag{
Name: "volumes",
Value: false,
Destination: &Volumes,
}
By default, it will prompt for confirmation before proceeding. All secrets,
volumes and the local app env file will be deleted.
var appRemoveCommand = &cli.Command{
Name: "remove",
Usage: "Remove an already undeployed app",
Aliases: []string{"rm"},
Only run this command when you are sure you want to completely remove the app
and all associated app data. This is a destructive action, Be Careful!
If you would like to delete specific volumes or secrets, please use removal
sub-commands under "app volume" and "app secret" instead.
Please note, if you delete the local app env file without removing volumes and
secrets first, Abra will *not* be able to help you remove them afterwards.
To delete everything without prompt, use the "--force/-f" or the "--no-input/n"
flag.
`,
Flags: []cli.Flag{
VolumesFlag,
internal.ForceFlag,
internal.DebugFlag,
internal.NoInputFlag,
internal.OfflineFlag,
},
BashComplete: autocomplete.AppNameComplete,
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if !internal.Force {
if !internal.Force && !internal.NoInput {
response := false
prompt := &survey.Confirm{
Message: fmt.Sprintf("about to remove %s, are you sure?", app.Name),
}
msg := "ALERTA ALERTA: this will completely remove %s data and configurations locally and remotely, are you sure?"
prompt := &survey.Confirm{Message: fmt.Sprintf(msg, app.Name)}
if err := survey.AskOne(prompt, &response); err != nil {
logrus.Fatal(err)
}
@ -54,17 +69,20 @@ var appRemoveCommand = &cli.Command{
logrus.Fatal(err)
}
isDeployed, _, err := stack.IsDeployed(c.Context, cl, app.StackName())
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
logrus.Fatal(err)
}
if isDeployed {
logrus.Fatalf("%s is still deployed. Run \"abra app undeploy %s \" or pass --force", app.Name, app.Name)
logrus.Fatalf("%s is still deployed. Run \"abra app undeploy %s\"", app.Name, app.Name)
}
fs := filters.NewArgs()
fs.Add("name", app.StackName())
secretList, err := cl.SecretList(c.Context, types.SecretListOptions{Filters: fs})
fs, err := app.Filters(false, false)
if err != nil {
logrus.Fatal(err)
}
secretList, err := cl.SecretList(context.Background(), types.SecretListOptions{Filters: fs})
if err != nil {
logrus.Fatal(err)
}
@ -78,23 +96,8 @@ var appRemoveCommand = &cli.Command{
}
if len(secrets) > 0 {
var secretNamesToRemove []string
if !internal.Force {
secretsPrompt := &survey.MultiSelect{
Message: "which secrets do you want to remove?",
Help: "'x' indicates selected, enter / return to confirm, ctrl-c to exit, vim mode is enabled",
VimMode: true,
Options: secretNames,
Default: secretNames,
}
if err := survey.AskOne(secretsPrompt, &secretNamesToRemove); err != nil {
logrus.Fatal(err)
}
}
for _, name := range secretNamesToRemove {
err := cl.SecretRemove(c.Context, secrets[name])
for _, name := range secretNames {
err := cl.SecretRemove(context.Background(), secrets[name])
if err != nil {
logrus.Fatal(err)
}
@ -104,7 +107,13 @@ var appRemoveCommand = &cli.Command{
logrus.Info("no secrets to remove")
}
volumeListOKBody, err := cl.VolumeList(c.Context, fs)
fs, err = app.Filters(false, true)
if err != nil {
logrus.Fatal(err)
}
volumeListOptions := volume.ListOptions{fs}
volumeListOKBody, err := cl.VolumeList(context.Background(), volumeListOptions)
volumeList := volumeListOKBody.Volumes
if err != nil {
logrus.Fatal(err)
@ -116,43 +125,43 @@ var appRemoveCommand = &cli.Command{
}
if len(vols) > 0 {
if Volumes {
var removeVols []string
if !internal.Force {
volumesPrompt := &survey.MultiSelect{
Message: "which volumes do you want to remove?",
Help: "'x' indicates selected, enter / return to confirm, ctrl-c to exit, vim mode is enabled",
VimMode: true,
Options: vols,
Default: vols,
}
if err := survey.AskOne(volumesPrompt, &removeVols); err != nil {
logrus.Fatal(err)
}
for _, vol := range vols {
err = retryFunc(5, func() error {
return cl.VolumeRemove(context.Background(), vol, internal.Force) // last argument is for force removing
})
if err != nil {
log.Fatalf("removing volumes failed: %s", err)
}
for _, vol := range removeVols {
err := cl.VolumeRemove(c.Context, vol, internal.Force) // last argument is for force removing
if err != nil {
logrus.Fatal(err)
}
logrus.Info(fmt.Sprintf("volume %s removed", vol))
}
} else {
logrus.Info("no volumes were removed")
logrus.Info(fmt.Sprintf("volume %s removed", vol))
}
} else {
if Volumes {
logrus.Info("no volumes to remove")
}
logrus.Info("no volumes to remove")
}
err = os.Remove(app.Path)
if err != nil {
if err = os.Remove(app.Path); err != nil {
logrus.Fatal(err)
}
logrus.Info(fmt.Sprintf("file: %s removed", app.Path))
return nil
},
BashComplete: autocomplete.AppNameComplete,
}
// retryFunc retries the given function for the given retries. After the nth
// retry it waits (n + 1)^2 seconds before the next retry (starting with n=0).
// It returns an error if the function still failed after the last retry.
func retryFunc(retries int, fn func() error) error {
for i := 0; i < retries; i++ {
err := fn()
if err == nil {
return nil
}
if i+1 < retries {
sleep := time.Duration(i+1) * time.Duration(i+1)
logrus.Infof("%s: waiting %d seconds before next retry", err, sleep)
time.Sleep(sleep * time.Second)
}
}
return fmt.Errorf("%d retries failed", retries)
}

26
cli/app/remove_test.go Normal file
View File

@ -0,0 +1,26 @@
package app
import (
"fmt"
"testing"
)
func TestRetryFunc(t *testing.T) {
err := retryFunc(1, func() error { return nil })
if err != nil {
t.Errorf("should not return an error: %s", err)
}
i := 0
fn := func() error {
i++
return fmt.Errorf("oh no, something went wrong!")
}
err = retryFunc(2, fn)
if err == nil {
t.Error("should return an error")
}
if i != 2 {
t.Errorf("The function should have been called 1 times, got %d", i)
}
}

View File

@ -1,6 +1,7 @@
package app
import (
"context"
"errors"
"fmt"
@ -13,11 +14,16 @@ import (
"github.com/urfave/cli/v2"
)
var appRestartCommand = &cli.Command{
Name: "restart",
Usage: "Restart an app",
Aliases: []string{"re"},
ArgsUsage: "<service>",
var appRestartCommand = cli.Command{
Name: "restart",
Aliases: []string{"re"},
Usage: "Restart an app",
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Description: `This command restarts a service within a deployed app.`,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
@ -34,25 +40,34 @@ var appRestartCommand = &cli.Command{
logrus.Fatal(err)
}
serviceName := fmt.Sprintf("%s_%s", app.StackName(), serviceNameShort)
logrus.Debugf("attempting to scale %s to 0 (restart logic)", serviceName)
if err := upstream.RunServiceScale(c.Context, cl, serviceName, 0); err != nil {
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
logrus.Fatal(err)
}
if err := stack.WaitOnService(c.Context, cl, serviceName, app.Name); err != nil {
if !isDeployed {
logrus.Fatalf("%s is not deployed?", app.Name)
}
serviceName := fmt.Sprintf("%s_%s", app.StackName(), serviceNameShort)
logrus.Debugf("attempting to scale %s to 0 (restart logic)", serviceName)
if err := upstream.RunServiceScale(context.Background(), cl, serviceName, 0); err != nil {
logrus.Fatal(err)
}
if err := stack.WaitOnService(context.Background(), cl, serviceName, app.Name); err != nil {
logrus.Fatal(err)
}
logrus.Debugf("%s has been scaled to 0 (restart logic)", serviceName)
logrus.Debugf("attempting to scale %s to 1 (restart logic)", serviceName)
if err := upstream.RunServiceScale(c.Context, cl, serviceName, 1); err != nil {
if err := upstream.RunServiceScale(context.Background(), cl, serviceName, 1); err != nil {
logrus.Fatal(err)
}
if err := stack.WaitOnService(c.Context, cl, serviceName, app.Name); err != nil {
if err := stack.WaitOnService(context.Background(), cl, serviceName, app.Name); err != nil {
logrus.Fatal(err)
}

View File

@ -1,76 +1,79 @@
package app
import (
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var restoreAllServices bool
var restoreAllServicesFlag = &cli.BoolFlag{
Name: "all",
Value: false,
Destination: &restoreAllServices,
Aliases: []string{"a"},
Usage: "Restore all services",
var targetPath string
var targetPathFlag = &cli.StringFlag{
Name: "target, t",
Usage: "Target path",
Destination: &targetPath,
}
var appRestoreCommand = &cli.Command{
var appRestoreCommand = cli.Command{
Name: "restore",
Usage: "Restore an app from a backup",
Aliases: []string{"rs"},
Flags: []cli.Flag{restoreAllServicesFlag},
ArgsUsage: "<service> [<backup file>]",
Usage: "Restore an app backup",
ArgsUsage: "<domain> <service>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
targetPathFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if c.Args().Len() > 1 && restoreAllServices {
internal.ShowSubcommandHelpAndError(c, errors.New("cannot use <service>/<backup file> and '--all' together"))
}
abraSh := path.Join(config.RECIPES_DIR, app.Type, "abra.sh")
if _, err := os.Stat(abraSh); err != nil {
if os.IsNotExist(err) {
logrus.Fatalf("%s does not exist?", abraSh)
}
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
sourceCmd := fmt.Sprintf("source %s", abraSh)
execCmd := "abra_restore"
if !restoreAllServices {
serviceName := c.Args().Get(1)
if serviceName == "" {
internal.ShowSubcommandHelpAndError(c, errors.New("no service(s) target provided"))
if !internal.Chaos {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
execCmd = fmt.Sprintf("abra_restore_%s", serviceName)
}
bytes, err := ioutil.ReadFile(abraSh)
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
if !strings.Contains(string(bytes), execCmd) {
logrus.Fatalf("%s doesn't have a %s function", app.Type, execCmd)
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
logrus.Fatal(err)
}
backupFile := c.Args().Get(2)
if backupFile != "" {
execCmd = fmt.Sprintf("%s %s", execCmd, backupFile)
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if snapshot != "" {
logrus.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if targetPath != "" {
logrus.Debugf("including TARGET=%s in backupbot exec invocation", targetPath)
execEnv = append(execEnv, fmt.Sprintf("TARGET=%s", targetPath))
}
sourceAndExec := fmt.Sprintf("%s; %s", sourceCmd, execCmd)
cmd := exec.Command("bash", "-c", sourceAndExec)
if err := internal.RunCmd(cmd); err != nil {
if err := internal.RunBackupCmdRemote(cl, "restore", targetContainer.ID, execEnv); err != nil {
logrus.Fatal(err)
}

View File

@ -1,6 +1,7 @@
package app
import (
"context"
"fmt"
"coopcloud.tech/abra/pkg/autocomplete"
@ -17,16 +18,21 @@ import (
"github.com/urfave/cli/v2"
)
var appRollbackCommand = &cli.Command{
var appRollbackCommand = cli.Command{
Name: "rollback",
Usage: "Roll an app back to a previous version",
Aliases: []string{"rl"},
ArgsUsage: "<app>",
Usage: "Roll an app back to a previous version",
ArgsUsage: "<domain> [<version>]",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.ForceFlag,
internal.ChaosFlag,
internal.NoDomainChecksFlag,
internal.DontWaitConvergeFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Description: `
This command rolls an app back to a previous version if one exists.
@ -34,9 +40,9 @@ You may pass "--force/-f" to downgrade to the same version again. This can be
useful if the container runtime has gotten into a weird state.
This action could be destructive, please ensure you have a copy of your app
data beforehand - see "abra app backup <app>" for more.
data beforehand.
Chas mode ("--chaos") will deploy your local checkout of a recipe as-is,
Chaos mode ("--chaos") will deploy your local checkout of a recipe as-is,
including unstaged changes and can be useful for live hacking and testing new
recipes.
`,
@ -45,11 +51,32 @@ recipes.
app := internal.ValidateApp(c)
stackName := app.StackName()
if err := recipe.EnsureUpToDate(app.Type); err != nil {
specificVersion := c.Args().Get(1)
if specificVersion != "" && internal.Chaos {
logrus.Fatal("cannot use <version> and --chaos together")
}
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
r, err := recipe.Get(app.Type)
if !internal.Chaos {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
r, err := recipe.Get(app.Recipe, internal.Offline)
if err != nil {
logrus.Fatal(err)
}
@ -65,7 +92,7 @@ recipes.
logrus.Debugf("checking whether %s is already deployed", stackName)
isDeployed, deployedVersion, err := stack.IsDeployed(c.Context, cl, stackName)
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
logrus.Fatal(err)
}
@ -74,27 +101,51 @@ recipes.
logrus.Fatalf("%s is not deployed?", app.Name)
}
catl, err := recipe.ReadRecipeCatalogue()
catl, err := recipe.ReadRecipeCatalogue(internal.Offline)
if err != nil {
logrus.Fatal(err)
}
versions, err := recipe.GetRecipeCatalogueVersions(app.Type, catl)
versions, err := recipe.GetRecipeCatalogueVersions(app.Recipe, catl)
if err != nil {
logrus.Fatal(err)
}
if len(versions) == 0 && !internal.Chaos {
logrus.Fatalf("no published releases for %s in the recipe catalogue?", app.Type)
logrus.Warn("no published versions in catalogue, trying local recipe repository")
recipeVersions, err := recipe.GetRecipeVersions(app.Recipe, internal.Offline)
if err != nil {
logrus.Warn(err)
}
for _, recipeVersion := range recipeVersions {
for version := range recipeVersion {
versions = append(versions, version)
}
}
}
var availableDowngrades []string
if deployedVersion == "unknown" {
availableDowngrades = versions
logrus.Warnf("failed to determine version of deployed %s", app.Name)
logrus.Warnf("failed to determine deployed version of %s", app.Name)
}
if deployedVersion != "unknown" && !internal.Chaos {
if specificVersion != "" {
parsedDeployedVersion, err := tagcmp.Parse(deployedVersion)
if err != nil {
logrus.Fatal(err)
}
parsedSpecificVersion, err := tagcmp.Parse(specificVersion)
if err != nil {
logrus.Fatal(err)
}
if parsedSpecificVersion.IsGreaterThan(parsedDeployedVersion) || parsedSpecificVersion.Equals(parsedDeployedVersion) {
logrus.Fatalf("%s is not a downgrade for %s?", deployedVersion, specificVersion)
}
availableDowngrades = append(availableDowngrades, specificVersion)
}
if deployedVersion != "unknown" && !internal.Chaos && specificVersion == "" {
for _, version := range versions {
parsedDeployedVersion, err := tagcmp.Parse(deployedVersion)
if err != nil {
@ -104,28 +155,26 @@ recipes.
if err != nil {
logrus.Fatal(err)
}
if parsedVersion != parsedDeployedVersion && parsedVersion.IsLessThan(parsedDeployedVersion) {
if parsedVersion.IsLessThan(parsedDeployedVersion) && !(parsedVersion.Equals(parsedDeployedVersion)) {
availableDowngrades = append(availableDowngrades, version)
}
}
if len(availableDowngrades) == 0 {
if len(availableDowngrades) == 0 && !internal.Force {
logrus.Info("no available downgrades, you're on oldest ✌️")
return nil
}
}
availableDowngrades = internal.ReverseStringList(availableDowngrades)
var chosenDowngrade string
if !internal.Chaos {
if internal.Force {
chosenDowngrade = availableDowngrades[0]
logrus.Debugf("choosing %s as version to downgrade to (--force)", chosenDowngrade)
if len(availableDowngrades) > 0 && !internal.Chaos {
if internal.Force || internal.NoInput || specificVersion != "" {
chosenDowngrade = availableDowngrades[len(availableDowngrades)-1]
logrus.Debugf("choosing %s as version to downgrade to (--force/--no-input)", chosenDowngrade)
} else {
prompt := &survey.Select{
Message: fmt.Sprintf("Please select a downgrade (current version: %s):", deployedVersion),
Options: availableDowngrades,
Options: internal.ReverseStringList(availableDowngrades),
}
if err := survey.AskOne(prompt, &chosenDowngrade); err != nil {
return err
@ -134,7 +183,7 @@ recipes.
}
if !internal.Chaos {
if err := recipe.EnsureVersion(app.Type, chosenDowngrade); err != nil {
if err := recipe.EnsureVersion(app.Recipe, chosenDowngrade); err != nil {
logrus.Fatal(err)
}
}
@ -142,13 +191,13 @@ recipes.
if internal.Chaos {
logrus.Warn("chaos mode engaged")
var err error
chosenDowngrade, err = recipe.ChaosVersion(app.Type)
chosenDowngrade, err = recipe.ChaosVersion(app.Recipe)
if err != nil {
logrus.Fatal(err)
}
}
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, app.Type, "abra.sh")
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, app.Recipe, "abra.sh")
abraShEnv, err := config.ReadAbraShEnvVars(abraShPath)
if err != nil {
logrus.Fatal(err)
@ -157,7 +206,7 @@ recipes.
app.Env[k] = v
}
composeFiles, err := config.GetAppComposeFiles(app.Type, app.Env)
composeFiles, err := config.GetComposeFiles(app.Recipe, app.Env)
if err != nil {
logrus.Fatal(err)
}
@ -171,14 +220,18 @@ recipes.
if err != nil {
logrus.Fatal(err)
}
config.ExposeAllEnv(stackName, compose, app.Env)
config.SetRecipeLabel(compose, stackName, app.Recipe)
config.SetChaosLabel(compose, stackName, internal.Chaos)
config.SetChaosVersionLabel(compose, stackName, chosenDowngrade)
config.SetUpdateLabel(compose, stackName, app.Env)
if !internal.Force {
if err := internal.NewVersionOverview(app, deployedVersion, chosenDowngrade, ""); err != nil {
logrus.Fatal(err)
}
// NOTE(d1): no release notes implemeneted for rolling back
if err := internal.NewVersionOverview(app, deployedVersion, chosenDowngrade, ""); err != nil {
logrus.Fatal(err)
}
if err := stack.RunDeploy(cl, deployOpts, compose, app.StackName(), internal.DontWaitConverge); err != nil {
if err := stack.RunDeploy(cl, deployOpts, compose, stackName, internal.DontWaitConverge); err != nil {
logrus.Fatal(err)
}

View File

@ -1,6 +1,7 @@
package app
import (
"context"
"errors"
"fmt"
@ -18,26 +19,27 @@ import (
var user string
var userFlag = &cli.StringFlag{
Name: "user",
Name: "user, u",
Value: "",
Destination: &user,
}
var noTTY bool
var noTTYFlag = &cli.BoolFlag{
Name: "no-tty",
Value: false,
Name: "no-tty, t",
Destination: &noTTY,
}
var appRunCommand = &cli.Command{
Name: "run",
var appRunCommand = cli.Command{
Name: "run",
Aliases: []string{"r"},
Flags: []cli.Flag{
internal.DebugFlag,
noTTYFlag,
userFlag,
},
Aliases: []string{"r"},
ArgsUsage: "<service> <args>...",
Before: internal.SubCommandBefore,
ArgsUsage: "<domain> <service> <args>...",
Usage: "Run a command in a service container",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
@ -57,11 +59,11 @@ var appRunCommand = &cli.Command{
}
serviceName := c.Args().Get(1)
stackAndServiceName := fmt.Sprintf("%s_%s", app.StackName(), serviceName)
stackAndServiceName := fmt.Sprintf("^%s_%s", app.StackName(), serviceName)
filters := filters.NewArgs()
filters.Add("name", stackAndServiceName)
targetContainer, err := containerPkg.GetContainer(c.Context, cl, filters, true)
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, false)
if err != nil {
logrus.Fatal(err)
}
@ -89,7 +91,7 @@ var appRunCommand = &cli.Command{
logrus.Fatal(err)
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
logrus.Fatal(err)
}

View File

@ -1,6 +1,7 @@
package app
import (
"context"
"errors"
"fmt"
"os"
@ -9,33 +10,72 @@ import (
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/secret"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var allSecrets bool
var allSecretsFlag = &cli.BoolFlag{
Name: "all",
Aliases: []string{"a"},
Value: false,
Destination: &allSecrets,
Usage: "Generate all secrets",
}
var (
allSecrets bool
allSecretsFlag = &cli.BoolFlag{
Name: "all, a",
Destination: &allSecrets,
Usage: "Generate all secrets",
}
)
var appSecretGenerateCommand = &cli.Command{
Name: "generate",
Aliases: []string{"g"},
Usage: "Generate secrets",
ArgsUsage: "<secret> <version>",
Flags: []cli.Flag{allSecretsFlag, internal.PassFlag},
var (
rmAllSecrets bool
rmAllSecretsFlag = &cli.BoolFlag{
Name: "all, a",
Destination: &rmAllSecrets,
Usage: "Remove all secrets",
}
)
var appSecretGenerateCommand = cli.Command{
Name: "generate",
Aliases: []string{"g"},
Usage: "Generate secrets",
ArgsUsage: "<domain> <secret> <version>",
Flags: []cli.Flag{
internal.DebugFlag,
allSecretsFlag,
internal.PassFlag,
internal.MachineReadableFlag,
internal.OfflineFlag,
internal.ChaosFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Chaos {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if c.Args().Len() == 1 && !allSecrets {
err := errors.New("missing arguments <secret>/<version> or '--all'")
internal.ShowSubcommandHelpAndError(c, err)
@ -46,33 +86,42 @@ var appSecretGenerateCommand = &cli.Command{
internal.ShowSubcommandHelpAndError(c, err)
}
secretsToCreate := make(map[string]string)
secretEnvVars := secret.ReadSecretEnvVars(app.Env)
if allSecrets {
secretsToCreate = secretEnvVars
} else {
composeFiles, err := config.GetComposeFiles(app.Recipe, app.Env)
if err != nil {
logrus.Fatal(err)
}
secrets, err := secret.ReadSecretsConfig(app.Path, composeFiles, app.StackName())
if err != nil {
logrus.Fatal(err)
}
if !allSecrets {
secretName := c.Args().Get(1)
secretVersion := c.Args().Get(2)
matches := false
for sec := range secretEnvVars {
parsed := secret.ParseSecretEnvVarName(sec)
if secretName == parsed {
secretsToCreate[sec] = secretVersion
}
}
if !matches {
s, ok := secrets[secretName]
if !ok {
logrus.Fatalf("%s doesn't exist in the env config?", secretName)
}
s.Version = secretVersion
secrets = map[string]secret.Secret{
secretName: s,
}
}
secretVals, err := secret.GenerateSecrets(secretsToCreate, app.StackName(), app.Server)
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
secretVals, err := secret.GenerateSecrets(cl, secrets, app.Server)
if err != nil {
logrus.Fatal(err)
}
if internal.Pass {
for name, data := range secretVals {
if err := secret.PassInsertSecret(data, name, app.StackName(), app.Server); err != nil {
if err := secret.PassInsertSecret(data, name, app.Name, app.Server); err != nil {
logrus.Fatal(err)
}
}
@ -88,19 +137,28 @@ var appSecretGenerateCommand = &cli.Command{
for name, val := range secretVals {
table.Append([]string{name, val})
}
table.Render()
logrus.Warn("generated secrets are not shown again, please take note of them *now*")
if internal.MachineReadable {
table.JSONRender()
} else {
table.Render()
}
logrus.Warn("generated secrets are not shown again, please take note of them NOW")
return nil
},
}
var appSecretInsertCommand = &cli.Command{
Name: "insert",
Aliases: []string{"i"},
Usage: "Insert secret",
Flags: []cli.Flag{internal.PassFlag},
ArgsUsage: "<app> <secret-name> <version> <data>",
var appSecretInsertCommand = cli.Command{
Name: "insert",
Aliases: []string{"i"},
Usage: "Insert secret",
Flags: []cli.Flag{
internal.DebugFlag,
internal.PassFlag,
},
Before: internal.SubCommandBefore,
ArgsUsage: "<domain> <secret-name> <version> <data>",
BashComplete: autocomplete.AppNameComplete,
Description: `
This command inserts a secret into an app environment.
@ -121,17 +179,24 @@ Example:
internal.ShowSubcommandHelpAndError(c, errors.New("missing arguments?"))
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
name := c.Args().Get(1)
version := c.Args().Get(2)
data := c.Args().Get(3)
secretName := fmt.Sprintf("%s_%s_%s", app.StackName(), name, version)
if err := client.StoreSecret(secretName, data, app.Server); err != nil {
if err := client.StoreSecret(cl, secretName, data, app.Server); err != nil {
logrus.Fatal(err)
}
logrus.Infof("%s successfully stored on server", secretName)
if internal.Pass {
if err := secret.PassInsertSecret(data, name, app.StackName(), app.Server); err != nil {
if err := secret.PassInsertSecret(data, name, app.Name, app.Server); err != nil {
logrus.Fatal(err)
}
}
@ -140,29 +205,85 @@ Example:
},
}
var appSecretRmCommand = &cli.Command{
Name: "remove",
Usage: "Remove a secret",
Aliases: []string{"rm"},
Flags: []cli.Flag{allSecretsFlag, internal.PassFlag},
ArgsUsage: "<app> <secret-name>",
// secretRm removes a secret.
func secretRm(cl *dockerClient.Client, app config.App, secretName, parsed string) error {
if err := cl.SecretRemove(context.Background(), secretName); err != nil {
return err
}
logrus.Infof("deleted %s successfully from server", secretName)
if internal.PassRemove {
if err := secret.PassRmSecret(parsed, app.StackName(), app.Server); err != nil {
return err
}
logrus.Infof("deleted %s successfully from local pass store", secretName)
}
return nil
}
var appSecretRmCommand = cli.Command{
Name: "remove",
Aliases: []string{"rm"},
Usage: "Remove a secret",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
rmAllSecretsFlag,
internal.PassRemoveFlag,
internal.OfflineFlag,
internal.ChaosFlag,
},
Before: internal.SubCommandBefore,
ArgsUsage: "<domain> [<secret-name>]",
BashComplete: autocomplete.AppNameComplete,
Description: `
This command removes a secret from an app environment.
This command removes app secrets.
Example:
abra app secret remove myapp db_pass
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if c.Args().Get(1) != "" && allSecrets {
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Chaos {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
composeFiles, err := config.GetComposeFiles(app.Recipe, app.Env)
if err != nil {
logrus.Fatal(err)
}
secrets, err := secret.ReadSecretsConfig(app.Path, composeFiles, app.StackName())
if err != nil {
logrus.Fatal(err)
}
if c.Args().Get(1) != "" && rmAllSecrets {
internal.ShowSubcommandHelpAndError(c, errors.New("cannot use '<secret-name>' and '--all' together"))
}
if c.Args().Get(1) == "" && !allSecrets {
if c.Args().Get(1) == "" && !rmAllSecrets {
internal.ShowSubcommandHelpAndError(c, errors.New("no secret(s) specified?"))
}
@ -171,63 +292,12 @@ Example:
logrus.Fatal(err)
}
filters := filters.NewArgs()
filters.Add("name", app.StackName())
secretList, err := cl.SecretList(c.Context, types.SecretListOptions{Filters: filters})
filters, err := app.Filters(false, false)
if err != nil {
logrus.Fatal(err)
}
secretToRm := c.Args().Get(1)
for _, cont := range secretList {
secretName := cont.Spec.Annotations.Name
parsed := secret.ParseGeneratedSecretName(secretName, app)
if allSecrets {
if err := cl.SecretRemove(c.Context, secretName); err != nil {
logrus.Fatal(err)
}
if internal.Pass {
if err := secret.PassRmSecret(parsed, app.StackName(), app.Server); err != nil {
logrus.Fatal(err)
}
}
} else {
if parsed == secretToRm {
if err := cl.SecretRemove(c.Context, secretName); err != nil {
logrus.Fatal(err)
}
if internal.Pass {
if err := secret.PassRmSecret(parsed, app.StackName(), app.Server); err != nil {
logrus.Fatal(err)
}
}
}
}
}
return nil
},
}
var appSecretLsCommand = &cli.Command{
Name: "list",
Usage: "List all secrets",
Aliases: []string{"ls"},
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
secrets := secret.ReadSecretEnvVars(app.Env)
tableCol := []string{"Name", "Version", "Generated Name", "Created On Server"}
table := formatter.CreateTable(tableCol)
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
filters := filters.NewArgs()
filters.Add("name", app.StackName())
secretList, err := cl.SecretList(c.Context, types.SecretListOptions{Filters: filters})
secretList, err := cl.SecretList(context.Background(), types.SecretListOptions{Filters: filters})
if err != nil {
logrus.Fatal(err)
}
@ -237,41 +307,122 @@ var appSecretLsCommand = &cli.Command{
remoteSecretNames[cont.Spec.Annotations.Name] = true
}
for sec := range secrets {
createdRemote := false
secretName := secret.ParseSecretEnvVarName(sec)
secVal, err := secret.ParseSecretEnvVarValue(secrets[sec])
if err != nil {
match := false
secretToRm := c.Args().Get(1)
for secretName, val := range secrets {
secretRemoteName := fmt.Sprintf("%s_%s_%s", app.StackName(), secretName, val.Version)
if _, ok := remoteSecretNames[secretRemoteName]; ok {
if secretToRm != "" {
if secretName == secretToRm {
if err := secretRm(cl, app, secretRemoteName, secretName); err != nil {
logrus.Fatal(err)
}
return nil
}
} else {
match = true
if err := secretRm(cl, app, secretRemoteName, secretName); err != nil {
logrus.Fatal(err)
}
}
}
}
if !match && secretToRm != "" {
logrus.Fatalf("%s doesn't exist on server?", secretToRm)
}
if !match {
logrus.Fatal("no secrets to remove?")
}
return nil
},
}
var appSecretLsCommand = cli.Command{
Name: "list",
Aliases: []string{"ls"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
internal.ChaosFlag,
internal.MachineReadableFlag,
},
Before: internal.SubCommandBefore,
Usage: "List all secrets",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Chaos {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
secretRemoteName := fmt.Sprintf("%s_%s_%s", app.StackName(), secretName, secVal.Version)
if _, ok := remoteSecretNames[secretRemoteName]; ok {
createdRemote = true
if !internal.Offline {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
tableCol := []string{"Name", "Version", "Generated Name", "Created On Server"}
table := formatter.CreateTable(tableCol)
secStats, err := secret.PollSecretsStatus(cl, app)
if err != nil {
logrus.Fatal(err)
}
for _, secStat := range secStats {
tableRow := []string{
secStat.LocalName,
secStat.Version,
secStat.RemoteName,
strconv.FormatBool(secStat.CreatedOnRemote),
}
tableRow := []string{secretName, secVal.Version, secretRemoteName, strconv.FormatBool(createdRemote)}
table.Append(tableRow)
}
if table.NumLines() > 0 {
table.Render()
if internal.MachineReadable {
table.JSONRender()
} else {
table.Render()
}
} else {
logrus.Warnf("no secrets stored for %s", app.Name)
}
return nil
},
BashComplete: autocomplete.AppNameComplete,
}
var appSecretCommand = &cli.Command{
var appSecretCommand = cli.Command{
Name: "secret",
Aliases: []string{"s"},
Usage: "Manage app secrets",
ArgsUsage: "<command>",
ArgsUsage: "<domain>",
Subcommands: []*cli.Command{
appSecretGenerateCommand,
appSecretInsertCommand,
appSecretRmCommand,
appSecretLsCommand,
&appSecretGenerateCommand,
&appSecretInsertCommand,
&appSecretRmCommand,
&appSecretLsCommand,
},
}

80
cli/app/services.go Normal file
View File

@ -0,0 +1,80 @@
package app
import (
"context"
"fmt"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/service"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/docker/docker/api/types"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var appServicesCommand = cli.Command{
Name: "services",
Aliases: []string{"sr"},
Usage: "Display all services of an app",
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
logrus.Fatal(err)
}
if !isDeployed {
logrus.Fatalf("%s is not deployed?", app.Name)
}
filters, err := app.Filters(true, true)
if err != nil {
logrus.Fatal(err)
}
containers, err := cl.ContainerList(context.Background(), types.ContainerListOptions{Filters: filters})
if err != nil {
logrus.Fatal(err)
}
tableCol := []string{"service name", "image"}
table := formatter.CreateTable(tableCol)
for _, container := range containers {
var containerNames []string
for _, containerName := range container.Names {
trimmed := strings.TrimPrefix(containerName, "/")
containerNames = append(containerNames, trimmed)
}
serviceShortName := service.ContainerToServiceName(container.Names, app.StackName())
serviceLongName := fmt.Sprintf("%s_%s", app.StackName(), serviceShortName)
tableRow := []string{
serviceLongName,
formatter.RemoveSha(container.Image),
}
table.Append(tableRow)
}
table.Render()
return nil
},
}

View File

@ -1,22 +1,102 @@
package app
import (
"context"
"fmt"
"time"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var appUndeployCommand = &cli.Command{
Name: "undeploy",
Aliases: []string{"un"},
Usage: "Undeploy an app",
var prune bool
var pruneFlag = &cli.BoolFlag{
Name: "prune, p",
Destination: &prune,
Usage: "Prunes unused containers, networks, and dangling images for an app",
}
// pruneApp runs the equivalent of a "docker system prune" but only filtering
// against resources connected with the app deployment. It is not a system wide
// prune. Volumes are not pruned to avoid unwated data loss.
func pruneApp(c *cli.Context, cl *dockerClient.Client, app config.App) error {
stackName := app.StackName()
ctx := context.Background()
for {
logrus.Debugf("polling for %s stack, waiting to be undeployed...", stackName)
services, err := stack.GetStackServices(ctx, cl, stackName)
if err != nil {
return err
}
if len(services) == 0 {
logrus.Debugf("%s undeployed, moving on with pruning logic", stackName)
time.Sleep(time.Second) // give runtime more time to tear down related state
break
}
time.Sleep(time.Second)
}
pruneFilters := filters.NewArgs()
stackSearch := fmt.Sprintf("%s*", stackName)
pruneFilters.Add("label", stackSearch)
cr, err := cl.ContainersPrune(ctx, pruneFilters)
if err != nil {
return err
}
cntSpaceReclaimed := formatter.ByteCountSI(cr.SpaceReclaimed)
logrus.Infof("containers pruned: %d; space reclaimed: %s", len(cr.ContainersDeleted), cntSpaceReclaimed)
nr, err := cl.NetworksPrune(ctx, pruneFilters)
if err != nil {
return err
}
logrus.Infof("networks pruned: %d", len(nr.NetworksDeleted))
ir, err := cl.ImagesPrune(ctx, pruneFilters)
if err != nil {
return err
}
imgSpaceReclaimed := formatter.ByteCountSI(ir.SpaceReclaimed)
logrus.Infof("images pruned: %d; space reclaimed: %s", len(ir.ImagesDeleted), imgSpaceReclaimed)
return nil
}
var appUndeployCommand = cli.Command{
Name: "undeploy",
Aliases: []string{"un"},
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
pruneFlag,
},
Before: internal.SubCommandBefore,
Usage: "Undeploy an app",
BashComplete: autocomplete.AppNameComplete,
Description: `
This does not destroy any of the application data. However, you should remain
vigilant, as your swarm installation will consider any previously attached
volumes as eligiblef or pruning once undeployed.
This does not destroy any of the application data.
However, you should remain vigilant, as your swarm installation will consider
any previously attached volumes as eligible for pruning once undeployed.
Passing "-p/--prune" does not remove those volumes.
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
@ -29,7 +109,7 @@ volumes as eligiblef or pruning once undeployed.
logrus.Debugf("checking whether %s is already deployed", stackName)
isDeployed, deployedVersion, err := stack.IsDeployed(c.Context, cl, stackName)
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
logrus.Fatal(err)
}
@ -43,11 +123,16 @@ volumes as eligiblef or pruning once undeployed.
}
rmOpts := stack.Remove{Namespaces: []string{app.StackName()}}
if err := stack.RunRemove(c.Context, cl, rmOpts); err != nil {
if err := stack.RunRemove(context.Background(), cl, rmOpts); err != nil {
logrus.Fatal(err)
}
if prune {
if err := pruneApp(c, cl, app); err != nil {
logrus.Fatal(err)
}
}
return nil
},
BashComplete: autocomplete.AppNameComplete,
}

View File

@ -1,6 +1,7 @@
package app
import (
"context"
"fmt"
"coopcloud.tech/abra/cli/internal"
@ -9,6 +10,7 @@ import (
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/lint"
"coopcloud.tech/abra/pkg/recipe"
recipePkg "coopcloud.tech/abra/pkg/recipe"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"coopcloud.tech/tagcmp"
"github.com/AlecAivazis/survey/v2"
@ -16,59 +18,82 @@ import (
"github.com/urfave/cli/v2"
)
var appUpgradeCommand = &cli.Command{
var appUpgradeCommand = cli.Command{
Name: "upgrade",
Aliases: []string{"up"},
Usage: "Upgrade an app",
ArgsUsage: "<app>",
ArgsUsage: "<domain> [<version>]",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.ForceFlag,
internal.ChaosFlag,
internal.NoDomainChecksFlag,
internal.DontWaitConvergeFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Description: `
This command supports upgrading an app. You can use it to choose and roll out a
new upgrade to an existing app.
Upgrade an app. You can use it to choose and roll out a new upgrade to an
existing app.
This command specifically supports incrementing the version of running apps, as
opposed to "abra app deploy <app>" which will not change the version of a
opposed to "abra app deploy <domain>" which will not change the version of a
deployed app.
You may pass "--force/-f" to upgrade to the same version again. This can be
useful if the container runtime has gotten into a weird state.
This action could be destructive, please ensure you have a copy of your app
data beforehand - see "abra app backup <app>" for more.
data beforehand.
Chas mode ("--chaos") will deploy your local checkout of a recipe as-is,
Chaos mode ("--chaos") will deploy your local checkout of a recipe as-is,
including unstaged changes and can be useful for live hacking and testing new
recipes.
`,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
stackName := app.StackName()
if err := recipe.EnsureUpToDate(app.Type); err != nil {
logrus.Fatal(err)
specificVersion := c.Args().Get(1)
if specificVersion != "" && internal.Chaos {
logrus.Fatal("cannot use <version> and --chaos together")
}
r, err := recipe.Get(app.Type)
if !internal.Chaos {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
recipe, err := recipePkg.Get(app.Recipe, internal.Offline)
if err != nil {
logrus.Fatal(err)
}
if err := lint.LintForErrors(r); err != nil {
if err := lint.LintForErrors(recipe); err != nil {
logrus.Fatal(err)
}
logrus.Debugf("checking whether %s is already deployed", stackName)
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
logrus.Debugf("checking whether %s is already deployed", stackName)
isDeployed, deployedVersion, err := stack.IsDeployed(c.Context, cl, stackName)
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
logrus.Fatal(err)
}
@ -77,37 +102,62 @@ recipes.
logrus.Fatalf("%s is not deployed?", app.Name)
}
catl, err := recipe.ReadRecipeCatalogue()
catl, err := recipePkg.ReadRecipeCatalogue(internal.Offline)
if err != nil {
logrus.Fatal(err)
}
versions, err := recipe.GetRecipeCatalogueVersions(app.Type, catl)
versions, err := recipePkg.GetRecipeCatalogueVersions(app.Recipe, catl)
if err != nil {
logrus.Fatal(err)
}
if len(versions) == 0 && !internal.Chaos {
logrus.Fatalf("no published releases for %s in the recipe catalogue?", app.Type)
logrus.Warn("no published versions in catalogue, trying local recipe repository")
recipeVersions, err := recipePkg.GetRecipeVersions(app.Recipe, internal.Offline)
if err != nil {
logrus.Warn(err)
}
for _, recipeVersion := range recipeVersions {
for version := range recipeVersion {
versions = append(versions, version)
}
}
}
var availableUpgrades []string
if deployedVersion == "uknown" {
if deployedVersion == "unknown" {
availableUpgrades = versions
logrus.Warnf("failed to determine version of deployed %s", app.Name)
logrus.Warnf("failed to determine deployed version of %s", app.Name)
}
if deployedVersion != "unknown" && !internal.Chaos {
if specificVersion != "" {
parsedDeployedVersion, err := tagcmp.Parse(deployedVersion)
if err != nil {
logrus.Fatal(err)
}
parsedSpecificVersion, err := tagcmp.Parse(specificVersion)
if err != nil {
logrus.Fatal(err)
}
if parsedSpecificVersion.IsLessThan(parsedDeployedVersion) || parsedSpecificVersion.Equals(parsedDeployedVersion) {
logrus.Fatalf("%s is not an upgrade for %s?", deployedVersion, specificVersion)
}
availableUpgrades = append(availableUpgrades, specificVersion)
}
parsedDeployedVersion, err := tagcmp.Parse(deployedVersion)
if err != nil {
logrus.Fatal(err)
}
if deployedVersion != "unknown" && !internal.Chaos && specificVersion == "" {
for _, version := range versions {
parsedDeployedVersion, err := tagcmp.Parse(deployedVersion)
if err != nil {
logrus.Fatal(err)
}
parsedVersion, err := tagcmp.Parse(version)
if err != nil {
logrus.Fatal(err)
}
if parsedVersion.IsGreaterThan(parsedDeployedVersion) {
if parsedVersion.IsGreaterThan(parsedDeployedVersion) && !(parsedVersion.Equals(parsedDeployedVersion)) {
availableUpgrades = append(availableUpgrades, version)
}
}
@ -118,17 +168,15 @@ recipes.
}
}
availableUpgrades = internal.ReverseStringList(availableUpgrades)
var chosenUpgrade string
if len(availableUpgrades) > 0 && !internal.Chaos {
if internal.Force {
if internal.Force || internal.NoInput || specificVersion != "" {
chosenUpgrade = availableUpgrades[len(availableUpgrades)-1]
logrus.Debugf("choosing %s as version to upgrade to", chosenUpgrade)
} else {
prompt := &survey.Select{
Message: fmt.Sprintf("Please select an upgrade (current version: %s):", deployedVersion),
Options: availableUpgrades,
Options: internal.ReverseStringList(availableUpgrades),
}
if err := survey.AskOne(prompt, &chosenUpgrade); err != nil {
return err
@ -136,16 +184,38 @@ recipes.
}
}
if internal.Force && chosenUpgrade == "" {
logrus.Warnf("%s is already upgraded to latest but continuing (--force/--chaos)", app.Name)
chosenUpgrade = deployedVersion
}
// if release notes written after git tag published, read them before we
// check out the tag and then they'll appear to be missing. this covers
// when we obviously will forget to write release notes before publishing
releaseNotes, err := internal.GetReleaseNotes(app.Type, chosenUpgrade)
if err != nil {
return err
var releaseNotes string
for _, version := range versions {
parsedVersion, err := tagcmp.Parse(version)
if err != nil {
logrus.Fatal(err)
}
parsedChosenUpgrade, err := tagcmp.Parse(chosenUpgrade)
if err != nil {
logrus.Fatal(err)
}
if !(parsedVersion.Equals(parsedDeployedVersion)) && parsedVersion.IsLessThan(parsedChosenUpgrade) {
note, err := internal.GetReleaseNotes(app.Recipe, version)
if err != nil {
return err
}
if note != "" {
releaseNotes += fmt.Sprintf("%s\n", note)
}
}
}
if !internal.Chaos {
if err := recipe.EnsureVersion(app.Type, chosenUpgrade); err != nil {
if err := recipePkg.EnsureVersion(app.Recipe, chosenUpgrade); err != nil {
logrus.Fatal(err)
}
}
@ -153,13 +223,13 @@ recipes.
if internal.Chaos {
logrus.Warn("chaos mode engaged")
var err error
chosenUpgrade, err = recipe.ChaosVersion(app.Type)
chosenUpgrade, err = recipePkg.ChaosVersion(app.Recipe)
if err != nil {
logrus.Fatal(err)
}
}
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, app.Type, "abra.sh")
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, app.Recipe, "abra.sh")
abraShEnv, err := config.ReadAbraShEnvVars(abraShPath)
if err != nil {
logrus.Fatal(err)
@ -168,7 +238,7 @@ recipes.
app.Env[k] = v
}
composeFiles, err := config.GetAppComposeFiles(app.Type, app.Env)
composeFiles, err := config.GetComposeFiles(app.Recipe, app.Env)
if err != nil {
logrus.Fatal(err)
}
@ -182,16 +252,45 @@ recipes.
if err != nil {
logrus.Fatal(err)
}
config.ExposeAllEnv(stackName, compose, app.Env)
config.SetRecipeLabel(compose, stackName, app.Recipe)
config.SetChaosLabel(compose, stackName, internal.Chaos)
config.SetChaosVersionLabel(compose, stackName, chosenUpgrade)
config.SetUpdateLabel(compose, stackName, app.Env)
envVars, err := config.CheckEnv(app)
if err != nil {
logrus.Fatal(err)
}
for _, envVar := range envVars {
if !envVar.Present {
logrus.Warnf("env var %s missing from %s.env, present in recipe .env.sample", envVar.Name, app.Domain)
}
}
if err := internal.NewVersionOverview(app, deployedVersion, chosenUpgrade, releaseNotes); err != nil {
logrus.Fatal(err)
}
if err := stack.RunDeploy(cl, deployOpts, compose, app.StackName(), internal.DontWaitConverge); err != nil {
stack.WaitTimeout, err = config.GetTimeoutFromLabel(compose, stackName)
if err != nil {
logrus.Fatal(err)
}
logrus.Debugf("set waiting timeout to %d s", stack.WaitTimeout)
if err := stack.RunDeploy(cl, deployOpts, compose, stackName, internal.DontWaitConverge); err != nil {
logrus.Fatal(err)
}
postDeployCmds, ok := app.Env["POST_UPGRADE_CMDS"]
if ok && !internal.DontWaitConverge {
logrus.Debugf("run the following post-deploy commands: %s", postDeployCmds)
if err := internal.PostCmds(cl, app, postDeployCmds); err != nil {
logrus.Fatalf("attempting to run post deploy commands, saw: %s", err)
}
}
return nil
},
BashComplete: autocomplete.AppNameComplete,
}

View File

@ -1,6 +1,9 @@
package app
import (
"context"
"sort"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
@ -8,10 +11,21 @@ import (
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/stack"
"github.com/docker/distribution/reference"
"github.com/olekukonko/tablewriter"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
func sortServiceByName(versions [][]string) func(i, j int) bool {
return func(i, j int) bool {
// NOTE(d1): corresponds to the `tableCol` definition below
if versions[i][1] == "app" {
return true
}
return versions[i][1] < versions[j][1]
}
}
// getImagePath returns the image name
func getImagePath(image string) (string, error) {
img, err := reference.ParseNormalizedNamed(image)
@ -21,22 +35,25 @@ func getImagePath(image string) (string, error) {
path := reference.Path(img)
path = recipe.StripTagMeta(path)
path = formatter.StripTagMeta(path)
logrus.Debugf("parsed %s from %s", path, image)
return path, nil
}
var appVersionCommand = &cli.Command{
Name: "version",
Aliases: []string{"v"},
Usage: "Show app versions",
Description: `
This command shows all information about versioning related to a deployed app.
This includes the individual image names, tags and digests. But also the Co-op
Cloud recipe version.
`,
var appVersionCommand = cli.Command{
Name: "version",
Aliases: []string{"v"},
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Usage: "Show version info of a deployed app",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
stackName := app.StackName()
@ -48,20 +65,20 @@ Cloud recipe version.
logrus.Debugf("checking whether %s is already deployed", stackName)
isDeployed, deployedVersion, err := stack.IsDeployed(c.Context, cl, stackName)
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
logrus.Fatal(err)
}
if deployedVersion == "unknown" {
logrus.Fatalf("failed to determine version of deployed %s", app.Name)
}
if !isDeployed {
logrus.Fatalf("%s is not deployed?", app.Name)
}
recipeMeta, err := recipe.GetRecipeMeta(app.Type)
if deployedVersion == "unknown" {
logrus.Fatalf("failed to determine version of deployed %s", app.Name)
}
recipeMeta, err := recipe.GetRecipeMeta(app.Recipe, internal.Offline)
if err != nil {
logrus.Fatal(err)
}
@ -77,17 +94,24 @@ Cloud recipe version.
logrus.Fatalf("could not retrieve deployed version (%s) from recipe catalogue?", deployedVersion)
}
tableCol := []string{"version", "service", "image", "digest"}
tableCol := []string{"version", "service", "image", "tag"}
table := formatter.CreateTable(tableCol)
table.SetAutoMergeCellsByColumnIndex([]int{0})
var versions [][]string
for serviceName, versionMeta := range versionsMeta {
table.Append([]string{deployedVersion, serviceName, versionMeta.Image, versionMeta.Digest})
versions = append(versions, []string{deployedVersion, serviceName, versionMeta.Image, versionMeta.Tag})
}
sort.Slice(versions, sortServiceByName(versions))
for _, version := range versions {
table.Append(version)
}
table.SetAutoMergeCellsByColumnIndex([]int{0})
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.Render()
return nil
},
BashComplete: autocomplete.AppNameComplete,
}

View File

@ -1,35 +1,51 @@
package app
import (
"context"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/upstream/stack"
"github.com/AlecAivazis/survey/v2"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var appVolumeListCommand = &cli.Command{
Name: "list",
var appVolumeListCommand = cli.Command{
Name: "list",
Aliases: []string{"ls"},
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
},
Before: internal.SubCommandBefore,
Usage: "List volumes associated with an app",
Aliases: []string{"ls"},
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
volumeList, err := client.GetVolumes(c.Context, app.Server, app.Name)
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
table := formatter.CreateTable([]string{"driver", "volume name"})
filters, err := app.Filters(false, true)
if err != nil {
logrus.Fatal(err)
}
volumeList, err := client.GetVolumes(cl, context.Background(), app.Server, filters)
if err != nil {
logrus.Fatal(err)
}
table := formatter.CreateTable([]string{"name", "created", "mounted"})
var volTable [][]string
for _, volume := range volumeList {
volRow := []string{
volume.Driver,
volume.Name,
}
volRow := []string{volume.Name, volume.CreatedAt, volume.Mountpoint}
volTable = append(volTable, volRow)
}
@ -45,36 +61,59 @@ var appVolumeListCommand = &cli.Command{
},
}
var appVolumeRemoveCommand = &cli.Command{
var appVolumeRemoveCommand = cli.Command{
Name: "remove",
Usage: "Remove volume(s) associated with an app",
Description: `
This command supports removing volumes associated with an app. The app in
question must be undeployed before you try to remove volumes. See "abra app
undeploy <app>" for more.
undeploy <domain>" for more.
The command is interactive and will show a multiple select input which allows
you to make a seclection. Use the "?" key to see more help on navigating this
interface.
Passing "--force" will select all volumes for removal. Be careful.
Passing "--force/-f" will select all volumes for removal. Be careful.
`,
ArgsUsage: "<app>",
ArgsUsage: "<domain>",
Aliases: []string{"rm"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.ForceFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
volumeList, err := client.GetVolumes(c.Context, app.Server, app.Name)
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
logrus.Fatal(err)
}
if isDeployed {
logrus.Fatalf("%s is still deployed. Run \"abra app undeploy %s\"", app.Name, app.Name)
}
filters, err := app.Filters(false, true)
if err != nil {
logrus.Fatal(err)
}
volumeList, err := client.GetVolumes(cl, context.Background(), app.Server, filters)
if err != nil {
logrus.Fatal(err)
}
volumeNames := client.GetVolumeNames(volumeList)
var volumesToRemove []string
if !internal.Force {
if !internal.Force && !internal.NoInput {
volumesPrompt := &survey.MultiSelect{
Message: "which volumes do you want to remove?",
Help: "'x' indicates selected, enter / return to confirm, ctrl-c to exit, vim mode is enabled",
@ -85,29 +124,34 @@ Passing "--force" will select all volumes for removal. Be careful.
if err := survey.AskOne(volumesPrompt, &volumesToRemove); err != nil {
logrus.Fatal(err)
}
} else {
}
if internal.Force || internal.NoInput {
volumesToRemove = volumeNames
}
err = client.RemoveVolumes(c.Context, app.Server, volumesToRemove, internal.Force)
if err != nil {
logrus.Fatal(err)
}
if len(volumesToRemove) > 0 {
err = client.RemoveVolumes(cl, context.Background(), app.Server, volumesToRemove, internal.Force)
if err != nil {
logrus.Fatal(err)
}
logrus.Info("volumes removed successfully")
logrus.Info("volumes removed successfully")
} else {
logrus.Info("no volumes removed")
}
return nil
},
BashComplete: autocomplete.AppNameComplete,
}
var appVolumeCommand = &cli.Command{
var appVolumeCommand = cli.Command{
Name: "volume",
Aliases: []string{"vl"},
Usage: "Manage app volumes",
ArgsUsage: "<command>",
ArgsUsage: "<domain>",
Subcommands: []*cli.Command{
appVolumeListCommand,
appVolumeRemoveCommand,
&appVolumeListCommand,
&appVolumeRemoveCommand,
},
}

View File

@ -8,72 +8,39 @@ import (
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/catalogue"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
gitPkg "coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/limit"
"coopcloud.tech/abra/pkg/recipe"
"github.com/go-git/go-git/v5"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
// CatalogueSkipList is all the repos that are not recipes.
var CatalogueSkipList = map[string]bool{
"abra": true,
"abra-apps": true,
"abra-aur": true,
"abra-bash": true,
"abra-capsul": true,
"abra-gandi": true,
"abra-hetzner": true,
"apps": true,
"aur-abra-git": true,
"auto-apps-json": true,
"auto-mirror": true,
"backup-bot": true,
"backup-bot-two": true,
"comrade-renovate-bot": true,
"coopcloud.tech": true,
"coturn": true,
"docker-cp-deploy": true,
"docker-dind-bats-kcov": true,
"docs.coopcloud.tech": true,
"drone-abra": true,
"example": true,
"gardening": true,
"go-abra": true,
"organising": true,
"outline-with-patch": true,
"pyabra": true,
"radicle-seed-node": true,
"recipes": true,
"stack-ssh-deploy": true,
"swarm-cronjob": true,
"tagcmp": true,
"traefik-cert-dumper": true,
"tyop": true,
}
var catalogueGenerateCommand = &cli.Command{
var catalogueGenerateCommand = cli.Command{
Name: "generate",
Aliases: []string{"g"},
Usage: "Generate the recipe catalogue",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.PublishFlag,
internal.DryFlag,
internal.SkipUpdatesFlag,
internal.RegistryUsernameFlag,
internal.RegistryPasswordFlag,
internal.ChaosFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Description: `
This command generates a new copy of the recipe catalogue which can be found on:
Generate a new copy of the recipe catalogue which can be found on:
https://recipes.coopcloud.tech
https://recipes.coopcloud.tech (website that humans read)
https://recipes.coopcloud.tech/recipes.json (JSON that Abra reads)
It polls the entire git.coopcloud.tech/coop-cloud/... recipe repository
listing, parses README.md and git tags of those repositories to produce recipe
metadata and produces a recipes JSON file.
listing, parses README.md and git tags to produce recipe metadata which is
loaded into the catalogue JSON file.
It is possible to generate new metadata for a single recipe by passing
<recipe>. The existing local catalogue will be updated, not overwritten.
@ -82,21 +49,23 @@ It is quite easy to get rate limited by Docker Hub when running this command.
If you have a Hub account you can have Abra log you in to avoid this. Pass
"--user" and "--pass".
Push your new release git.coopcloud.tech with "-p/--publish". This requires
Push your new release to git.coopcloud.tech with "-p/--publish". This requires
that you have permission to git push to these repositories and have your SSH
keys configured on your account.
`,
ArgsUsage: "[<recipe>]",
ArgsUsage: "[<recipe>]",
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipeName := c.Args().First()
if recipeName != "" {
internal.ValidateRecipe(c)
}
catalogueDir := path.Join(config.ABRA_DIR, "catalogue")
url := fmt.Sprintf("%s/%s.git", config.REPOS_BASE_URL, "recipes")
if err := gitPkg.Clone(catalogueDir, url); err != nil {
return err
if !internal.Chaos {
if err := catalogue.EnsureIsClean(); err != nil {
logrus.Fatal(err)
}
}
repos, err := recipe.ReadReposMetadata()
@ -116,7 +85,7 @@ keys configured on your account.
if !internal.SkipUpdates {
logrus.Warn(logMsg)
if err := updateRepositories(repos, recipeName); err != nil {
if err := recipe.UpdateRepositories(repos, recipeName); err != nil {
logrus.Fatal(err)
}
}
@ -129,18 +98,9 @@ keys configured on your account.
continue
}
if _, exists := CatalogueSkipList[recipeMeta.Name]; exists {
catlBar.Add(1)
continue
}
versions, err := recipe.GetRecipeVersions(
recipeMeta.Name,
internal.RegistryUsername,
internal.RegistryPassword,
)
versions, err := recipe.GetRecipeVersions(recipeMeta.Name, internal.Offline)
if err != nil {
logrus.Fatal(err)
logrus.Warn(err)
}
features, category, err := recipe.GetRecipeFeaturesAndCategory(recipeMeta.Name)
@ -174,7 +134,7 @@ keys configured on your account.
logrus.Fatal(err)
}
} else {
catlFS, err := recipe.ReadRecipeCatalogue()
catlFS, err := recipe.ReadRecipeCatalogue(internal.Offline)
if err != nil {
logrus.Fatal(err)
}
@ -208,7 +168,7 @@ keys configured on your account.
}
msg := "chore: publish new catalogue release changes"
if err := gitPkg.Commit(cataloguePath, "**.json", msg, internal.Dry); err != nil {
if err := gitPkg.Commit(cataloguePath, msg, internal.Dry); err != nil {
logrus.Fatal(err)
}
@ -217,7 +177,7 @@ keys configured on your account.
logrus.Fatal(err)
}
sshURL := fmt.Sprintf(config.SSH_URL_TEMPLATE, "recipes")
sshURL := fmt.Sprintf(config.SSH_URL_TEMPLATE, config.CATALOGUE_JSON_REPO_NAME)
if err := gitPkg.CreateRemote(repo, "origin-ssh", sshURL, internal.Dry); err != nil {
logrus.Fatal(err)
}
@ -238,7 +198,7 @@ keys configured on your account.
}
if !internal.Dry && internal.Publish {
url := fmt.Sprintf("%s/recipes/commit/%s", config.REPOS_BASE_URL, head.Hash())
url := fmt.Sprintf("%s/%s/commit/%s", config.REPOS_BASE_URL, config.CATALOGUE_JSON_REPO_NAME, head.Hash())
logrus.Infof("new changes published: %s", url)
}
@ -248,76 +208,16 @@ keys configured on your account.
return nil
},
BashComplete: autocomplete.RecipeNameComplete,
}
// CatalogueCommand defines the `abra catalogue` command and sub-commands.
var CatalogueCommand = &cli.Command{
var CatalogueCommand = cli.Command{
Name: "catalogue",
Usage: "Manage the recipe catalogue",
Aliases: []string{"c"},
ArgsUsage: "<recipe>",
Description: "This command helps recipe packagers interact with the recipe catalogue",
Subcommands: []*cli.Command{
catalogueGenerateCommand,
&catalogueGenerateCommand,
},
}
func updateRepositories(repos recipe.RepoCatalogue, recipeName string) error {
var barLength int
if recipeName != "" {
barLength = 1
} else {
barLength = len(repos)
}
cloneLimiter := limit.New(10)
retrieveBar := formatter.CreateProgressbar(barLength, "ensuring recipes are cloned & up-to-date...")
ch := make(chan string, barLength)
for _, repoMeta := range repos {
go func(rm recipe.RepoMeta) {
cloneLimiter.Begin()
defer cloneLimiter.End()
if recipeName != "" && recipeName != rm.Name {
ch <- rm.Name
retrieveBar.Add(1)
return
}
if _, exists := CatalogueSkipList[rm.Name]; exists {
ch <- rm.Name
retrieveBar.Add(1)
return
}
recipeDir := path.Join(config.RECIPES_DIR, rm.Name)
if err := gitPkg.Clone(recipeDir, rm.CloneURL); err != nil {
logrus.Fatal(err)
}
isClean, err := gitPkg.IsClean(recipeDir)
if err != nil {
logrus.Fatal(err)
}
if !isClean {
logrus.Fatalf("%s has locally unstaged changes", rm.Name)
}
if err := recipe.EnsureUpToDate(rm.Name); err != nil {
logrus.Fatal(err)
}
ch <- rm.Name
retrieveBar.Add(1)
}(repoMeta)
}
for range repos {
<-ch // wait for everything
}
return nil
}

View File

@ -12,37 +12,33 @@ import (
"coopcloud.tech/abra/cli/catalogue"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/cli/recipe"
"coopcloud.tech/abra/cli/record"
"coopcloud.tech/abra/cli/server"
"coopcloud.tech/abra/pkg/autocomplete"
cataloguePkg "coopcloud.tech/abra/pkg/catalogue"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/web"
logrusStack "github.com/Gurpartap/logrus-stack"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
// AutoCompleteCommand helps people set up auto-complete in their shells
var AutoCompleteCommand = &cli.Command{
var AutoCompleteCommand = cli.Command{
Name: "autocomplete",
Usage: "Configure shell autocompletion (recommended)",
Aliases: []string{"ac"},
Usage: "Configure shell autocompletion (recommended)",
Description: `
This command helps set up autocompletion in your shell by downloading the
relevant autocompletion files and laying out what additional information must
be loaded.
Set up auto-completion in your shell by downloading the relevant files and
laying out what additional information must be loaded. Supported shells are as
follows: bash, fish, fizsh & zsh.
Example:
abra autocomplete bash
Supported shells are as follows:
fizsh
zsh
bash
`,
ArgsUsage: "<shell>",
Flags: []cli.Flag{
internal.DebugFlag,
},
Action: func(c *cli.Context) error {
shellType := c.Args().First()
@ -54,6 +50,7 @@ Supported shells are as follows:
"bash": true,
"zsh": true,
"fizsh": true,
"fish": true,
}
if _, ok := supportedShells[shellType]; !ok {
@ -84,19 +81,27 @@ Supported shells are as follows:
switch shellType {
case "bash":
fmt.Println(fmt.Sprintf(`
# Run the following commands to install autocompletion
# Run the following commands to install auto-completion
sudo mkdir /etc/bash_completion.d/
sudo cp %s /etc/bash_completion.d/abra
echo "source /etc/bash_completion.d/abra" >> ~/.bashrc
# And finally run "abra app ps <hit tab key>" to test things are working, you should see app names listed!
# To test, run the following: "abra app <hit tab key>" - you should see command completion!
`, autocompletionFile))
case "zsh":
fmt.Println(fmt.Sprintf(`
# Run the following commands to install autocompletion
# Run the following commands to install auto-completion
sudo mkdir /etc/zsh/completion.d/
sudo cp %s /etc/zsh/completion.d/abra
echo "PROG=abra\n_CLI_ZSH_AUTOCOMPLETE_HACK=1\nsource /etc/zsh/completion.d/abra" >> ~/.zshrc
# And finally run "abra app ps <hit tab key>" to test things are working, you should see app names listed!
# To test, run the following: "abra app <hit tab key>" - you should see command completion!
`, autocompletionFile))
case "fish":
fmt.Println(fmt.Sprintf(`
# Run the following commands to install auto-completion
sudo mkdir -p /etc/fish/completions
sudo cp %s /etc/fish/completions/abra
echo "source /etc/fish/completions/abra" >> ~/.config/fish/config.fish
# To test, run the following: "abra app <hit tab key>" - you should see command completion!
`, autocompletionFile))
}
@ -105,18 +110,16 @@ echo "PROG=abra\n_CLI_ZSH_AUTOCOMPLETE_HACK=1\nsource /etc/zsh/completion.d/abra
}
// UpgradeCommand upgrades abra in-place.
var UpgradeCommand = &cli.Command{
var UpgradeCommand = cli.Command{
Name: "upgrade",
Usage: "Upgrade Abra itself",
Aliases: []string{"u"},
Usage: "Upgrade Abra itself",
Description: `
This command allows you to upgrade Abra in-place with the latest stable or
release candidate.
Upgrade Abra in-place with the latest stable or release candidate.
If you would like to install the latest release candidate, please pass the
"--rc" option. Please bear in mind that the latest release candidate may have
some catastrophic bugs contained in it. In any case, thank you very much for
the testing efforts!
Pass "-r/--rc" to install the latest release candidate. Please bear in mind
that it may contain catastrophic bugs. Thank you very much for the testing
efforts!
`,
Flags: []cli.Flag{internal.RCFlag},
Action: func(c *cli.Context) error {
@ -151,45 +154,25 @@ func newAbraApp(version, commit string) *cli.App {
`,
Version: fmt.Sprintf("%s-%s", version, commit[:7]),
Commands: []*cli.Command{
app.AppCommand,
server.ServerCommand,
recipe.RecipeCommand,
catalogue.CatalogueCommand,
record.RecordCommand,
UpgradeCommand,
AutoCompleteCommand,
},
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
},
Authors: []*cli.Author{
// If you're looking at this and you hack on Abra and you're not listed
// here, please do add yourself! This is a community project, let's show
// some love
{Name: "3wordchant"},
{Name: "decentral1se"},
{Name: "kawaiipunk"},
{Name: "knoflook"},
{Name: "roxxers"},
&app.AppCommand,
&server.ServerCommand,
&recipe.RecipeCommand,
&catalogue.CatalogueCommand,
&UpgradeCommand,
&AutoCompleteCommand,
},
BashComplete: autocomplete.SubcommandComplete,
}
app.EnableBashCompletion = true
app.Before = func(c *cli.Context) error {
if internal.Debug {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetFormatter(&logrus.TextFormatter{})
logrus.SetOutput(os.Stderr)
logrus.AddHook(logrusStack.StandardHook())
}
paths := []string{
config.ABRA_DIR,
path.Join(config.SERVERS_DIR),
path.Join(config.RECIPES_DIR),
path.Join(config.VENDOR_DIR),
config.SERVERS_DIR,
config.RECIPES_DIR,
config.VENDOR_DIR,
config.BACKUP_DIR,
}
for _, path := range paths {
@ -201,10 +184,15 @@ func newAbraApp(version, commit string) *cli.App {
}
}
if err := cataloguePkg.EnsureCatalogue(); err != nil {
logrus.Fatal(err)
}
logrus.Debugf("abra version %s, commit %s", version, commit)
return nil
}
return app
}

67
cli/internal/backup.go Normal file
View File

@ -0,0 +1,67 @@
package internal
import (
"context"
"coopcloud.tech/abra/pkg/config"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/service"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
)
// RetrieveBackupBotContainer gets the deployed backupbot container.
func RetrieveBackupBotContainer(cl *dockerClient.Client) (types.Container, error) {
ctx := context.Background()
chosenService, err := service.GetServiceByLabel(ctx, cl, config.BackupbotLabel, NoInput)
if err != nil {
return types.Container{}, err
}
logrus.Debugf("retrieved %s as backup enabled service", chosenService.Spec.Name)
filters := filters.NewArgs()
filters.Add("name", chosenService.Spec.Name)
targetContainer, err := containerPkg.GetContainer(
ctx,
cl,
filters,
NoInput,
)
if err != nil {
return types.Container{}, err
}
return targetContainer, nil
}
// RunBackupCmdRemote runs a backup related command on a remote backupbot container.
func RunBackupCmdRemote(cl *dockerClient.Client, backupCmd string, containerID string, execEnv []string) error {
execBackupListOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: []string{"/usr/bin/backup", "--", backupCmd},
Detach: false,
Env: execEnv,
Tty: true,
}
logrus.Debugf("running backup %s on %s with exec config %v", backupCmd, containerID, execBackupListOpts)
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
if _, err := container.RunExec(dcli, cl, containerID, &execBackupListOpts); err != nil {
return err
}
return nil
}

315
cli/internal/cli.go Normal file
View File

@ -0,0 +1,315 @@
package internal
import (
"os"
logrusStack "github.com/Gurpartap/logrus-stack"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
// Secrets stores the variable from SecretsFlag
var Secrets bool
// SecretsFlag turns on/off automatically generating secrets
var SecretsFlag = &cli.BoolFlag{
Name: "secrets",
Aliases: []string{"S"},
Usage: "Automatically generate secrets",
Destination: &Secrets,
}
// Pass stores the variable from PassFlag
var Pass bool
// PassFlag turns on/off storing generated secrets in pass
var PassFlag = &cli.BoolFlag{
Name: "pass",
Aliases: []string{"p"},
Usage: "Store the generated secrets in a local pass store",
Destination: &Pass,
}
// PassRemove stores the variable for PassRemoveFlag
var PassRemove bool
// PassRemoveFlag turns on/off removing generated secrets from pass
var PassRemoveFlag = &cli.BoolFlag{
Name: "pass",
Aliases: []string{"p"},
Usage: "Remove generated secrets from a local pass store",
Destination: &PassRemove,
}
// Force force functionality without asking.
var Force bool
// ForceFlag turns on/off force functionality.
var ForceFlag = &cli.BoolFlag{
Name: "force",
Aliases: []string{"f"},
Usage: "Perform action without further prompt. Use with care!",
Destination: &Force,
}
// Chaos engages chaos mode.
var Chaos bool
// ChaosFlag turns on/off chaos functionality.
var ChaosFlag = &cli.BoolFlag{
Name: "chaos",
Aliases: []string{"C"},
Usage: "Proceed with uncommitted recipes changes. Use with care!",
Destination: &Chaos,
}
// Disable tty to run commands from script
var Tty bool
// TtyFlag turns on/off tty mode.
var TtyFlag = &cli.BoolFlag{
Name: "tty",
Aliases: []string{"T"},
Usage: "Disables TTY mode to run this command from a script.",
Destination: &Tty,
}
var (
NoInput bool
NoInputFlag = &cli.BoolFlag{
Name: "no-input",
Aliases: []string{"n"},
Usage: "Toggle non-interactive mode",
Destination: &NoInput,
}
)
// Debug stores the variable from DebugFlag.
var Debug bool
// DebugFlag turns on/off verbose logging down to the DEBUG level.
var DebugFlag = &cli.BoolFlag{
Name: "debug",
Aliases: []string{"d"},
Destination: &Debug,
Usage: "Show DEBUG messages",
}
// Offline stores the variable from OfflineFlag.
var Offline bool
// DebugFlag turns on/off offline mode.
var OfflineFlag = &cli.BoolFlag{
Name: "offline",
Aliases: []string{"o"},
Destination: &Offline,
Usage: "Prefer offline & filesystem access when possible",
}
// MachineReadable stores the variable from MachineReadableFlag
var MachineReadable bool
// MachineReadableFlag turns on/off machine readable output where supported
var MachineReadableFlag = &cli.BoolFlag{
Name: "machine",
Aliases: []string{"m"},
Destination: &MachineReadable,
Usage: "Output in a machine-readable format (where supported)",
}
// RC signifies the latest release candidate
var RC bool
// RCFlag chooses the latest release candidate for install
var RCFlag = &cli.BoolFlag{
Name: "rc",
Aliases: []string{"c"},
Destination: &RC,
Usage: "Install the latest release candidate",
}
var (
Major bool
MajorFlag = &cli.BoolFlag{
Name: "major",
Aliases: []string{"x"},
Usage: "Increase the major part of the version",
Destination: &Major,
}
)
var (
Minor bool
MinorFlag = &cli.BoolFlag{
Name: "minor",
Aliases: []string{"y"},
Usage: "Increase the minor part of the version",
Destination: &Minor,
}
)
var (
Patch bool
PatchFlag = &cli.BoolFlag{
Name: "patch",
Aliases: []string{"z"},
Usage: "Increase the patch part of the version",
Destination: &Patch,
}
)
var (
Dry bool
DryFlag = &cli.BoolFlag{
Name: "dry-run",
Aliases: []string{"r"},
Usage: "Only reports changes that would be made",
Destination: &Dry,
}
)
var (
Publish bool
PublishFlag = &cli.BoolFlag{
Name: "publish",
Aliases: []string{"p"},
Usage: "Publish changes to git.coopcloud.tech",
Destination: &Publish,
}
)
var (
Domain string
DomainFlag = &cli.StringFlag{
Name: "domain",
Aliases: []string{"D"},
Value: "",
Usage: "Choose a domain name",
Destination: &Domain,
}
)
var (
NewAppServer string
NewAppServerFlag = &cli.StringFlag{
Name: "server",
Aliases: []string{"s"},
Value: "",
Usage: "Show apps of a specific server",
Destination: &NewAppServer,
}
)
var (
NoDomainChecks bool
NoDomainChecksFlag = &cli.BoolFlag{
Name: "no-domain-checks",
Aliases: []string{"D"},
Usage: "Disable app domain sanity checks",
Destination: &NoDomainChecks,
}
)
var (
StdErrOnly bool
StdErrOnlyFlag = &cli.BoolFlag{
Name: "stderr",
Aliases: []string{"s"},
Usage: "Only tail stderr",
Destination: &StdErrOnly,
}
)
var (
SinceLogs string
SinceLogsFlag = &cli.StringFlag{
Name: "since",
Aliases: []string{"S"},
Value: "",
Usage: "tail logs since YYYY-MM-DDTHH:MM:SSZ",
Destination: &SinceLogs,
}
)
var (
DontWaitConverge bool
DontWaitConvergeFlag = &cli.BoolFlag{
Name: "no-converge-checks",
Aliases: []string{"c"},
Usage: "Don't wait for converge logic checks",
Destination: &DontWaitConverge,
}
)
var (
Watch bool
WatchFlag = &cli.BoolFlag{
Name: "watch",
Aliases: []string{"w"},
Usage: "Watch status by polling repeatedly",
Destination: &Watch,
}
)
var (
OnlyErrors bool
OnlyErrorFlag = &cli.BoolFlag{
Name: "errors",
Aliases: []string{"e"},
Usage: "Only show errors",
Destination: &OnlyErrors,
}
)
var (
SkipUpdates bool
SkipUpdatesFlag = &cli.BoolFlag{
Name: "skip-updates",
Aliases: []string{"s"},
Usage: "Skip updating recipe repositories",
Destination: &SkipUpdates,
}
)
var (
AllTags bool
AllTagsFlag = &cli.BoolFlag{
Name: "all-tags",
Aliases: []string{"a"},
Usage: "List all tags, not just upgrades",
Destination: &AllTags,
}
)
var (
LocalCmd bool
LocalCmdFlag = &cli.BoolFlag{
Name: "local",
Aliases: []string{"l"},
Usage: "Run command locally",
Destination: &LocalCmd,
}
)
var (
RemoteUser string
RemoteUserFlag = &cli.StringFlag{
Name: "user",
Aliases: []string{"u"},
Value: "",
Usage: "User to run command within a service context",
Destination: &RemoteUser,
}
)
// SubCommandBefore wires up pre-action machinery (e.g. --debug handling).
func SubCommandBefore(c *cli.Context) error {
if Debug {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetFormatter(&logrus.TextFormatter{})
logrus.SetOutput(os.Stderr)
logrus.AddHook(logrusStack.StandardHook())
}
return nil
}

View File

@ -2,10 +2,109 @@ package internal
import (
"bufio"
"context"
"fmt"
"io/ioutil"
"os/exec"
"strings"
"coopcloud.tech/abra/pkg/config"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
"github.com/sirupsen/logrus"
)
// RunCmdRemote executes an abra.sh command in the target service
func RunCmdRemote(cl *dockerClient.Client, app config.App, abraSh, serviceName, cmdName, cmdArgs string) error {
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), serviceName))
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, false)
if err != nil {
return err
}
logrus.Debugf("retrieved %s as target container on %s", formatter.ShortenID(targetContainer.ID), app.Server)
toTarOpts := &archive.TarOptions{NoOverwriteDirNonDir: true, Compression: archive.Gzip}
content, err := archive.TarWithOptions(abraSh, toTarOpts)
if err != nil {
return err
}
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
if err := cl.CopyToContainer(context.Background(), targetContainer.ID, "/tmp", content, copyOpts); err != nil {
return err
}
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
shell := "/bin/bash"
findShell := []string{"test", "-e", shell}
execCreateOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: findShell,
Detach: false,
Tty: false,
}
if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
logrus.Infof("%s does not exist for %s, use /bin/sh as fallback", shell, app.Name)
shell = "/bin/sh"
}
var cmd []string
if cmdArgs != "" {
cmd = []string{shell, "-c", fmt.Sprintf("TARGET=%s; APP_NAME=%s; STACK_NAME=%s; . /tmp/abra.sh; %s %s", serviceName, app.Name, app.StackName(), cmdName, cmdArgs)}
} else {
cmd = []string{shell, "-c", fmt.Sprintf("TARGET=%s; APP_NAME=%s; STACK_NAME=%s; . /tmp/abra.sh; %s", serviceName, app.Name, app.StackName(), cmdName)}
}
logrus.Debugf("running command: %s", strings.Join(cmd, " "))
if RemoteUser != "" {
logrus.Debugf("running command with user %s", RemoteUser)
execCreateOpts.User = RemoteUser
}
execCreateOpts.Cmd = cmd
execCreateOpts.Tty = true
if Tty {
execCreateOpts.Tty = false
}
if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
return err
}
return nil
}
func EnsureCommand(abraSh, recipeName, execCmd string) error {
bytes, err := ioutil.ReadFile(abraSh)
if err != nil {
return err
}
if !strings.Contains(string(bytes), execCmd) {
return fmt.Errorf("%s doesn't have a %s function", recipeName, execCmd)
}
return nil
}
// RunCmd runs a shell command and streams stdout/stderr in real-time.
func RunCmd(cmd *exec.Cmd) error {
r, err := cmd.StdoutPipe()

View File

@ -7,199 +7,16 @@ import (
"path"
"strings"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/dns"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/lint"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/stack"
"github.com/AlecAivazis/survey/v2"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
// DeployAction is the main command-line action for this package
func DeployAction(c *cli.Context) error {
app := ValidateApp(c)
if !Chaos {
if err := recipe.EnsureUpToDate(app.Type); err != nil {
logrus.Fatal(err)
}
}
r, err := recipe.Get(app.Type)
if err != nil {
logrus.Fatal(err)
}
if err := lint.LintForErrors(r); err != nil {
logrus.Fatal(err)
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
logrus.Debugf("checking whether %s is already deployed", app.StackName())
isDeployed, deployedVersion, err := stack.IsDeployed(c.Context, cl, app.StackName())
if err != nil {
logrus.Fatal(err)
}
if isDeployed {
if Force || Chaos {
logrus.Warnf("%s is already deployed but continuing (--force/--chaos)", app.Name)
} else {
logrus.Fatalf("%s is already deployed", app.Name)
}
}
version := deployedVersion
if version == "unknown" && !Chaos {
catl, err := recipe.ReadRecipeCatalogue()
if err != nil {
logrus.Fatal(err)
}
versions, err := recipe.GetRecipeCatalogueVersions(app.Type, catl)
if err != nil {
logrus.Fatal(err)
}
if len(versions) > 0 {
version = versions[len(versions)-1]
logrus.Debugf("choosing %s as version to deploy", version)
if err := recipe.EnsureVersion(app.Type, version); err != nil {
logrus.Fatal(err)
}
} else {
head, err := git.GetRecipeHead(app.Type)
if err != nil {
logrus.Fatal(err)
}
version = formatter.SmallSHA(head.String())
logrus.Warn("no versions detected, using latest commit")
if err := recipe.EnsureLatest(app.Type); err != nil {
logrus.Fatal(err)
}
}
}
if version == "unknown" && !Chaos {
logrus.Debugf("choosing %s as version to deploy", version)
if err := recipe.EnsureVersion(app.Type, version); err != nil {
logrus.Fatal(err)
}
}
if version != "unknown" && !Chaos {
if err := recipe.EnsureVersion(app.Type, version); err != nil {
logrus.Fatal(err)
}
}
if Chaos {
logrus.Warnf("chaos mode engaged")
var err error
version, err = recipe.ChaosVersion(app.Type)
if err != nil {
logrus.Fatal(err)
}
}
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, app.Type, "abra.sh")
abraShEnv, err := config.ReadAbraShEnvVars(abraShPath)
if err != nil {
logrus.Fatal(err)
}
for k, v := range abraShEnv {
app.Env[k] = v
}
composeFiles, err := config.GetAppComposeFiles(app.Type, app.Env)
if err != nil {
logrus.Fatal(err)
}
deployOpts := stack.Deploy{
Composefiles: composeFiles,
Namespace: app.StackName(),
Prune: false,
ResolveImage: stack.ResolveImageAlways,
}
compose, err := config.GetAppComposeConfig(app.Name, deployOpts, app.Env)
if err != nil {
logrus.Fatal(err)
}
if err := DeployOverview(app, version, "continue with deployment?"); err != nil {
logrus.Fatal(err)
}
if !NoDomainChecks {
domainName := app.Env["DOMAIN"]
ipv4, err := dns.EnsureIPv4(domainName)
if err != nil || ipv4 == "" {
logrus.Fatalf("could not find an IP address assigned to %s?", domainName)
}
if _, err = dns.EnsureDomainsResolveSameIPv4(domainName, app.Server); err != nil {
logrus.Fatal(err)
}
} else {
logrus.Warn("skipping domain checks as requested")
}
if err := stack.RunDeploy(cl, deployOpts, compose, app.Name, DontWaitConverge); err != nil {
logrus.Fatal(err)
}
return nil
}
// DeployOverview shows a deployment overview
func DeployOverview(app config.App, version, message string) error {
tableCol := []string{"server", "compose", "domain", "app name", "version"}
table := formatter.CreateTable(tableCol)
deployConfig := "compose.yml"
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
deployConfig = strings.Join(strings.Split(composeFiles, ":"), "\n")
}
server := app.Server
if app.Server == "default" {
server = "local"
}
table.Append([]string{server, deployConfig, app.Domain, app.Name, version})
table.Render()
if NoInput {
return nil
}
response := false
prompt := &survey.Confirm{
Message: message,
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
return nil
}
// NewVersionOverview shows an upgrade or downgrade overview
func NewVersionOverview(app config.App, currentVersion, newVersion, releaseNotes string) error {
tableCol := []string{"server", "compose", "domain", "app name", "current version", "to be deployed"}
tableCol := []string{"server", "recipe", "config", "domain", "current version", "to be deployed"}
table := formatter.CreateTable(tableCol)
deployConfig := "compose.yml"
@ -212,20 +29,12 @@ func NewVersionOverview(app config.App, currentVersion, newVersion, releaseNotes
server = "local"
}
table.Append([]string{server, deployConfig, app.Domain, app.Name, currentVersion, newVersion})
table.Append([]string{server, app.Recipe, deployConfig, app.Domain, currentVersion, newVersion})
table.Render()
if releaseNotes == "" {
var err error
releaseNotes, err = GetReleaseNotes(app.Type, newVersion)
if err != nil {
return err
}
}
if releaseNotes != "" && newVersion != "" {
fmt.Println()
fmt.Println(fmt.Sprintf("%s release notes:\n\n%s", newVersion, releaseNotes))
fmt.Print(releaseNotes)
} else {
logrus.Warnf("no release notes available for %s", newVersion)
}
@ -263,8 +72,102 @@ func GetReleaseNotes(recipeName, version string) (string, error) {
if err != nil {
return "", err
}
return string(releaseNotes), nil
withTitle := fmt.Sprintf("%s release notes:\n%s", version, string(releaseNotes))
return withTitle, nil
}
return "", nil
}
// PostCmds parses a string of commands and executes them inside of the respective services
// the commands string must have the following format:
// "<service> <command> <arguments>|<service> <command> <arguments>|... "
func PostCmds(cl *dockerClient.Client, app config.App, commands string) error {
abraSh := path.Join(config.RECIPES_DIR, app.Recipe, "abra.sh")
if _, err := os.Stat(abraSh); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf(fmt.Sprintf("%s does not exist for %s?", abraSh, app.Name))
}
return err
}
for _, command := range strings.Split(commands, "|") {
commandParts := strings.Split(command, " ")
if len(commandParts) < 2 {
return fmt.Errorf(fmt.Sprintf("not enough arguments: %s", command))
}
targetServiceName := commandParts[0]
cmdName := commandParts[1]
parsedCmdArgs := ""
if len(commandParts) > 2 {
parsedCmdArgs = fmt.Sprintf("%s ", strings.Join(commandParts[2:], " "))
}
logrus.Infof("running post-command '%s %s' in container %s", cmdName, parsedCmdArgs, targetServiceName)
if err := EnsureCommand(abraSh, app.Recipe, cmdName); err != nil {
return err
}
serviceNames, err := config.GetAppServiceNames(app.Name)
if err != nil {
return err
}
matchingServiceName := false
for _, serviceName := range serviceNames {
if serviceName == targetServiceName {
matchingServiceName = true
}
}
if !matchingServiceName {
return fmt.Errorf(fmt.Sprintf("no service %s for %s?", targetServiceName, app.Name))
}
logrus.Debugf("running command %s %s within the context of %s_%s", cmdName, parsedCmdArgs, app.StackName(), targetServiceName)
Tty = true
if err := RunCmdRemote(cl, app, abraSh, targetServiceName, cmdName, parsedCmdArgs); err != nil {
return err
}
}
return nil
}
// DeployOverview shows a deployment overview
func DeployOverview(app config.App, version, message string) error {
tableCol := []string{"server", "recipe", "config", "domain", "version"}
table := formatter.CreateTable(tableCol)
deployConfig := "compose.yml"
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
deployConfig = strings.Join(strings.Split(composeFiles, ":"), "\n")
}
server := app.Server
if app.Server == "default" {
server = "local"
}
table.Append([]string{server, app.Recipe, deployConfig, app.Domain, version})
table.Render()
if NoInput {
return nil
}
response := false
prompt := &survey.Confirm{
Message: message,
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
return nil
}

View File

@ -1,488 +0,0 @@
package internal
import (
"github.com/urfave/cli/v2"
)
// Secrets stores the variable from SecretsFlag
var Secrets bool
// SecretsFlag turns on/off automatically generating secrets
var SecretsFlag = &cli.BoolFlag{
Name: "secrets",
Aliases: []string{"ss"},
Value: false,
Usage: "Automatically generate secrets",
Destination: &Secrets,
}
// Pass stores the variable from PassFlag
var Pass bool
// PassFlag turns on/off storing generated secrets in pass
var PassFlag = &cli.BoolFlag{
Name: "pass",
Aliases: []string{"p"},
Value: false,
Usage: "Store the generated secrets in a local pass store",
Destination: &Pass,
}
// Context is temp
var Context string
// ContextFlag is temp
var ContextFlag = &cli.StringFlag{
Name: "context",
Value: "",
Aliases: []string{"c"},
Destination: &Context,
}
// Force force functionality without asking.
var Force bool
// ForceFlag turns on/off force functionality.
var ForceFlag = &cli.BoolFlag{
Name: "force",
Value: false,
Aliases: []string{"f"},
Usage: "Perform action without further prompt. Use with care!",
Destination: &Force,
}
// Chaos engages chaos mode.
var Chaos bool
// ChaosFlag turns on/off chaos functionality.
var ChaosFlag = &cli.BoolFlag{
Name: "chaos",
Value: false,
Aliases: []string{"ch"},
Usage: "Deploy uncommitted recipes changes. Use with care!",
Destination: &Chaos,
}
// DNSProvider specifies a DNS provider.
var DNSProvider string
// DNSProviderFlag selects a DNS provider.
var DNSProviderFlag = &cli.StringFlag{
Name: "provider",
Value: "",
Aliases: []string{"p"},
Usage: "DNS provider",
Destination: &DNSProvider,
}
var NoInput bool
var NoInputFlag = &cli.BoolFlag{
Name: "no-input",
Value: false,
Aliases: []string{"n"},
Usage: "Toggle non-interactive mode",
Destination: &NoInput,
}
var DNSType string
var DNSTypeFlag = &cli.StringFlag{
Name: "type",
Value: "",
Aliases: []string{"t"},
Usage: "Domain name record type (e.g. A)",
Destination: &DNSType,
}
var DNSName string
var DNSNameFlag = &cli.StringFlag{
Name: "name",
Value: "",
Aliases: []string{"n"},
Usage: "Domain name record name (e.g. mysubdomain)",
Destination: &DNSName,
}
var DNSValue string
var DNSValueFlag = &cli.StringFlag{
Name: "value",
Value: "",
Aliases: []string{"v"},
Usage: "Domain name record value (e.g. 192.168.1.1)",
Destination: &DNSValue,
}
var DNSTTL string
var DNSTTLFlag = &cli.StringFlag{
Name: "ttl",
Value: "600s",
Aliases: []string{"T"},
Usage: "Domain name TTL value (seconds)",
Destination: &DNSTTL,
}
var DNSPriority int
var DNSPriorityFlag = &cli.IntFlag{
Name: "priority",
Value: 10,
Aliases: []string{"P"},
Usage: "Domain name priority value",
Destination: &DNSPriority,
}
var ServerProvider string
var ServerProviderFlag = &cli.StringFlag{
Name: "provider",
Aliases: []string{"p"},
Usage: "3rd party server provider",
Destination: &ServerProvider,
}
var CapsulInstanceURL string
var CapsulInstanceURLFlag = &cli.StringFlag{
Name: "capsul-url",
Value: "yolo.servers.coop",
Aliases: []string{"cu"},
Usage: "capsul instance URL",
Destination: &CapsulInstanceURL,
}
var CapsulName string
var CapsulNameFlag = &cli.StringFlag{
Name: "capsul-name",
Value: "",
Aliases: []string{"cn"},
Usage: "capsul name",
Destination: &CapsulName,
}
var CapsulType string
var CapsulTypeFlag = &cli.StringFlag{
Name: "capsul-type",
Value: "f1-xs",
Aliases: []string{"ct"},
Usage: "capsul type",
Destination: &CapsulType,
}
var CapsulImage string
var CapsulImageFlag = &cli.StringFlag{
Name: "capsul-image",
Value: "debian10",
Aliases: []string{"ci"},
Usage: "capsul image",
Destination: &CapsulImage,
}
var CapsulSSHKeys cli.StringSlice
var CapsulSSHKeysFlag = &cli.StringSliceFlag{
Name: "capsul-ssh-keys",
Aliases: []string{"cs"},
Usage: "capsul SSH key",
Destination: &CapsulSSHKeys,
}
var CapsulAPIToken string
var CapsulAPITokenFlag = &cli.StringFlag{
Name: "capsul-token",
Aliases: []string{"ca"},
Usage: "capsul API token",
EnvVars: []string{"CAPSUL_TOKEN"},
Destination: &CapsulAPIToken,
}
var HetznerCloudName string
var HetznerCloudNameFlag = &cli.StringFlag{
Name: "hetzner-name",
Value: "",
Aliases: []string{"hn"},
Usage: "hetzner cloud name",
Destination: &HetznerCloudName,
}
var HetznerCloudType string
var HetznerCloudTypeFlag = &cli.StringFlag{
Name: "hetzner-type",
Aliases: []string{"ht"},
Usage: "hetzner cloud type",
Destination: &HetznerCloudType,
Value: "cx11",
}
var HetznerCloudImage string
var HetznerCloudImageFlag = &cli.StringFlag{
Name: "hetzner-image",
Aliases: []string{"hi"},
Usage: "hetzner cloud image",
Value: "debian-10",
Destination: &HetznerCloudImage,
}
var HetznerCloudSSHKeys cli.StringSlice
var HetznerCloudSSHKeysFlag = &cli.StringSliceFlag{
Name: "hetzner-ssh-keys",
Aliases: []string{"hs"},
Usage: "hetzner cloud SSH keys (e.g. me@foo.com)",
Destination: &HetznerCloudSSHKeys,
}
var HetznerCloudLocation string
var HetznerCloudLocationFlag = &cli.StringFlag{
Name: "hetzner-location",
Aliases: []string{"hl"},
Usage: "hetzner cloud server location",
Value: "hel1",
Destination: &HetznerCloudLocation,
}
var HetznerCloudAPIToken string
var HetznerCloudAPITokenFlag = &cli.StringFlag{
Name: "hetzner-token",
Aliases: []string{"ha"},
Usage: "hetzner cloud API token",
EnvVars: []string{"HCLOUD_TOKEN"},
Destination: &HetznerCloudAPIToken,
}
// Debug stores the variable from DebugFlag.
var Debug bool
// DebugFlag turns on/off verbose logging down to the DEBUG level.
var DebugFlag = &cli.BoolFlag{
Name: "debug",
Aliases: []string{"d"},
Value: false,
Destination: &Debug,
Usage: "Show DEBUG messages",
}
// RC signifies the latest release candidate
var RC bool
// RCFlag chooses the latest release candidate for install
var RCFlag = &cli.BoolFlag{
Name: "rc",
Value: false,
Destination: &RC,
Usage: "Insatll the latest release candidate",
}
var Major bool
var MajorFlag = &cli.BoolFlag{
Name: "major",
Usage: "Increase the major part of the version",
Value: false,
Aliases: []string{"ma", "x"},
Destination: &Major,
}
var Minor bool
var MinorFlag = &cli.BoolFlag{
Name: "minor",
Usage: "Increase the minor part of the version",
Value: false,
Aliases: []string{"mi", "y"},
Destination: &Minor,
}
var Patch bool
var PatchFlag = &cli.BoolFlag{
Name: "patch",
Usage: "Increase the patch part of the version",
Value: false,
Aliases: []string{"pa", "z"},
Destination: &Patch,
}
var Dry bool
var DryFlag = &cli.BoolFlag{
Name: "dry-run",
Usage: "Only reports changes that would be made",
Value: false,
Aliases: []string{"d"},
Destination: &Dry,
}
var Publish bool
var PublishFlag = &cli.BoolFlag{
Name: "publish",
Usage: "Publish changes to git.coopcloud.tech",
Value: false,
Aliases: []string{"p"},
Destination: &Publish,
}
var Domain string
var DomainFlag = &cli.StringFlag{
Name: "domain",
Aliases: []string{"d"},
Value: "",
Usage: "Choose a domain name",
Destination: &Domain,
}
var NewAppServer string
var NewAppServerFlag = &cli.StringFlag{
Name: "server",
Aliases: []string{"s"},
Value: "",
Usage: "Show apps of a specific server",
Destination: &NewAppServer,
}
var NewAppName string
var NewAppNameFlag = &cli.StringFlag{
Name: "app-name",
Aliases: []string{"a"},
Value: "",
Usage: "Choose an app name",
Destination: &NewAppName,
}
var NoDomainChecks bool
var NoDomainChecksFlag = &cli.BoolFlag{
Name: "no-domain-checks",
Aliases: []string{"nd"},
Value: false,
Usage: "Disable app domain sanity checks",
Destination: &NoDomainChecks,
}
var StdErrOnly bool
var StdErrOnlyFlag = &cli.BoolFlag{
Name: "stderr",
Aliases: []string{"s"},
Value: false,
Usage: "Only tail stderr",
Destination: &StdErrOnly,
}
var AutoDNSRecord bool
var AutoDNSRecordFlag = &cli.BoolFlag{
Name: "auto",
Aliases: []string{"a"},
Value: false,
Usage: "Automatically configure DNS records",
Destination: &AutoDNSRecord,
}
var DontWaitConverge bool
var DontWaitConvergeFlag = &cli.BoolFlag{
Name: "no-converge-checks",
Aliases: []string{"nc"},
Value: false,
Usage: "Don't wait for converge logic checks",
Destination: &DontWaitConverge,
}
var Watch bool
var WatchFlag = &cli.BoolFlag{
Name: "watch",
Aliases: []string{"w"},
Value: false,
Usage: "Watch status by polling repeatedly",
Destination: &Watch,
}
var OnlyErrors bool
var OnlyErrorFlag = &cli.BoolFlag{
Name: "errors",
Aliases: []string{"e"},
Value: false,
Usage: "Only show errors",
Destination: &OnlyErrors,
}
var SkipUpdates bool
var SkipUpdatesFlag = &cli.BoolFlag{
Name: "skip-updates",
Aliases: []string{"s"},
Value: false,
Usage: "Skip updating recipe repositories",
Destination: &SkipUpdates,
}
var RegistryUsername string
var RegistryUsernameFlag = &cli.StringFlag{
Name: "username",
Aliases: []string{"user"},
Value: "",
Usage: "Registry username",
EnvVars: []string{"REGISTRY_USERNAME"},
Destination: &RegistryUsername,
}
var RegistryPassword string
var RegistryPasswordFlag = &cli.StringFlag{
Name: "password",
Aliases: []string{"pass"},
Value: "",
Usage: "Registry password",
EnvVars: []string{"REGISTRY_PASSWORD"},
Destination: &RegistryUsername,
}
// SSHFailMsg is a hopefully helpful SSH failure message
var SSHFailMsg = `
Woops, Abra is unable to connect to connect to %s.
Here are a few tips for debugging your local SSH config. Abra uses plain 'ol
SSH to make connections to servers, so if your SSH config is working, Abra is
working.
In the first place, Abra will always try to read your Docker context connection
string for SSH connection details. You can view your server context configs
with the following command. Are they correct?
abra server ls
Is your ssh-agent running? You can start it by running the following command:
eval "$(ssh-agent)"
If your SSH private key loaded? You can check by running the following command:
ssh-add -L
If, you can add it with:
ssh-add ~/.ssh/<private-key-part>
If you are using a non-default public/private key, you can configure this in
your ~/.ssh/config file which Abra will read in order to figure out connection
details:
Host foo.coopcloud.tech
Hostname foo.coopcloud.tech
User bar
Port 12345
IdentityFile ~/.ssh/bar@foo.coopcloud.tech
If you're only using password authentication, you can use the following config:
Host foo.coopcloud.tech
Hostname foo.coopcloud.tech
User bar
Port 12345
PreferredAuthentications=password
PubkeyAuthentication=no
Good luck!
`

View File

@ -1,183 +0,0 @@
package internal
import (
"fmt"
"path"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/recipe"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/secret"
"coopcloud.tech/abra/pkg/ssh"
"github.com/AlecAivazis/survey/v2"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
// AppSecrets represents all app secrest
type AppSecrets map[string]string
// RecipeName is used for configuring recipe name programmatically
var RecipeName string
// createSecrets creates all secrets for a new app.
func createSecrets(sanitisedAppName string) (AppSecrets, error) {
appEnvPath := path.Join(config.ABRA_DIR, "servers", NewAppServer, fmt.Sprintf("%s.env", sanitisedAppName))
appEnv, err := config.ReadEnv(appEnvPath)
if err != nil {
return nil, err
}
secretEnvVars := secret.ReadSecretEnvVars(appEnv)
secrets, err := secret.GenerateSecrets(secretEnvVars, sanitisedAppName, NewAppServer)
if err != nil {
return nil, err
}
if Pass {
for secretName := range secrets {
secretValue := secrets[secretName]
if err := secret.PassInsertSecret(secretValue, secretName, sanitisedAppName, NewAppServer); err != nil {
return nil, err
}
}
}
return secrets, nil
}
// ensureDomainFlag checks if the domain flag was used. if not, asks the user for it/
func ensureDomainFlag(recipe recipe.Recipe, server string) error {
if Domain == "" && !NoInput {
prompt := &survey.Input{
Message: "Specify app domain",
Default: fmt.Sprintf("%s.%s", recipe.Name, server),
}
if err := survey.AskOne(prompt, &Domain); err != nil {
return err
}
}
if Domain == "" {
return fmt.Errorf("no domain provided")
}
return nil
}
// ensureServerFlag checks if the server flag was used. if not, asks the user for it.
func ensureServerFlag() error {
servers, err := config.GetServers()
if err != nil {
return err
}
if NewAppServer == "" && !NoInput {
prompt := &survey.Select{
Message: "Select app server:",
Options: servers,
}
if err := survey.AskOne(prompt, &NewAppServer); err != nil {
return err
}
}
if NewAppServer == "" {
return fmt.Errorf("no server provided")
}
return nil
}
// ensureServerFlag checks if the AppName flag was used. if not, asks the user for it.
func ensureAppNameFlag() error {
if NewAppName == "" && !NoInput {
prompt := &survey.Input{
Message: "Specify app name:",
Default: Domain,
}
if err := survey.AskOne(prompt, &NewAppName); err != nil {
return err
}
}
if NewAppName == "" {
return fmt.Errorf("no app name provided")
}
return nil
}
// NewAction is the new app creation logic
func NewAction(c *cli.Context) error {
recipe := ValidateRecipeWithPrompt(c)
if err := recipePkg.EnsureUpToDate(recipe.Name); err != nil {
logrus.Fatal(err)
}
if err := ensureServerFlag(); err != nil {
logrus.Fatal(err)
}
if err := ensureDomainFlag(recipe, NewAppServer); err != nil {
logrus.Fatal(err)
}
if err := ensureAppNameFlag(); err != nil {
logrus.Fatal(err)
}
sanitisedAppName := config.SanitiseAppName(NewAppName)
if len(sanitisedAppName) > 45 {
logrus.Fatalf("%s cannot be longer than 45 characters", sanitisedAppName)
}
logrus.Debugf("%s sanitised as %s for new app", NewAppName, sanitisedAppName)
if err := config.TemplateAppEnvSample(recipe.Name, NewAppName, NewAppServer, Domain); err != nil {
logrus.Fatal(err)
}
if Secrets {
if err := ssh.EnsureHostKey(NewAppServer); err != nil {
logrus.Fatal(err)
}
secrets, err := createSecrets(sanitisedAppName)
if err != nil {
logrus.Fatal(err)
}
secretCols := []string{"Name", "Value"}
secretTable := formatter.CreateTable(secretCols)
for secret := range secrets {
secretTable.Append([]string{secret, secrets[secret]})
}
if len(secrets) > 0 {
defer secretTable.Render()
}
}
if NewAppServer == "default" {
NewAppServer = "local"
}
tableCol := []string{"server", "type", "domain", "app name"}
table := formatter.CreateTable(tableCol)
table.Append([]string{NewAppServer, recipe.Name, Domain, NewAppName})
fmt.Println("")
fmt.Println(fmt.Sprintf("A new %s app has been created! Here is an overview:", recipe.Name))
fmt.Println("")
table.Render()
fmt.Println("")
fmt.Println("You can configure this app by running the following:")
fmt.Println(fmt.Sprintf("\n abra app config %s", NewAppName))
fmt.Println("")
fmt.Println("You can deploy this app by running the following:")
fmt.Println(fmt.Sprintf("\n abra app deploy %s", NewAppName))
fmt.Println("")
return nil
}

View File

@ -3,15 +3,15 @@ package internal
import (
"fmt"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/recipe"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"github.com/AlecAivazis/survey/v2"
"github.com/docker/distribution/reference"
"github.com/sirupsen/logrus"
)
// PromptBumpType prompts for version bump type
func PromptBumpType(tagString string) error {
func PromptBumpType(tagString, latestRelease string) error {
if (!Major && !Minor && !Patch) && tagString == "" {
fmt.Printf(`
You need to make a decision about what kind of an update this new recipe
@ -20,6 +20,8 @@ migration work or take care of some breaking changes? This can be signaled in
the version you specify on the recipe deploy label and is called a semantic
version.
The latest published version is %s.
Here is a semver cheat sheet (more on https://semver.org):
major: new features/bug fixes, backwards incompatible (e.g 1.0.0 -> 2.0.0).
@ -34,7 +36,7 @@ Here is a semver cheat sheet (more on https://semver.org):
should also Just Work and is mostly to do with minor bug fixes
and/or security patches. "nothing to worry about".
`)
`, latestRelease)
var chosenBumpType string
prompt := &survey.Select{
@ -94,7 +96,7 @@ func GetMainAppImage(recipe recipe.Recipe) (string, error) {
}
path = reference.Path(img)
path = recipePkg.StripTagMeta(path)
path = formatter.StripTagMeta(path)
return path, nil
}

View File

@ -2,56 +2,24 @@ package internal
import (
"errors"
"fmt"
"os"
"strings"
"coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/ssh"
"github.com/AlecAivazis/survey/v2"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
// AppName is used for configuring app name programmatically
var AppName string
// ValidateRecipe ensures the recipe arg is valid.
func ValidateRecipe(c *cli.Context) recipe.Recipe {
recipeName := c.Args().First()
if recipeName == "" {
ShowSubcommandHelpAndError(c, errors.New("no recipe name provided"))
}
chosenRecipe, err := recipe.Get(recipeName)
if err != nil {
if c.Command.Name == "generate" {
if strings.Contains(err.Error(), "missing a compose") {
logrus.Fatal(err)
}
logrus.Warn(err)
} else {
logrus.Fatal(err)
}
}
logrus.Debugf("validated %s as recipe argument", recipeName)
return chosenRecipe
}
// ValidateRecipeWithPrompt ensures a recipe argument is present before
// validating, asking for input if required.
func ValidateRecipeWithPrompt(c *cli.Context) recipe.Recipe {
recipeName := c.Args().First()
if recipeName == "" && !NoInput {
var recipes []string
catl, err := recipe.ReadRecipeCatalogue()
catl, err := recipe.ReadRecipeCatalogue(Offline)
if err != nil {
logrus.Fatal(err)
}
@ -85,18 +53,23 @@ func ValidateRecipeWithPrompt(c *cli.Context) recipe.Recipe {
}
}
if RecipeName != "" {
recipeName = RecipeName
logrus.Debugf("programmatically setting recipe name to %s", recipeName)
}
if recipeName == "" {
ShowSubcommandHelpAndError(c, errors.New("no recipe name provided"))
}
chosenRecipe, err := recipe.Get(recipeName)
chosenRecipe, err := recipe.Get(recipeName, Offline)
if err != nil {
logrus.Fatal(err)
if c.Command.Name == "generate" {
if strings.Contains(err.Error(), "missing a compose") {
logrus.Fatal(err)
}
logrus.Warn(err)
} else {
if strings.Contains(err.Error(), "template_driver is not allowed") {
logrus.Warnf("ensure %s recipe compose.* files include \"version: '3.8'\"", recipeName)
}
logrus.Fatalf("unable to validate recipe: %s", err)
}
}
logrus.Debugf("validated %s as recipe argument", recipeName)
@ -108,11 +81,6 @@ func ValidateRecipeWithPrompt(c *cli.Context) recipe.Recipe {
func ValidateApp(c *cli.Context) config.App {
appName := c.Args().First()
if AppName != "" {
appName = AppName
logrus.Debugf("programmatically setting app name to %s", appName)
}
if appName == "" {
ShowSubcommandHelpAndError(c, errors.New("no app provided"))
}
@ -122,21 +90,13 @@ func ValidateApp(c *cli.Context) config.App {
logrus.Fatal(err)
}
if err := recipe.EnsureExists(app.Type); err != nil {
logrus.Fatal(err)
}
if err := ssh.EnsureHostKey(app.Server); err != nil {
logrus.Fatal(err)
}
logrus.Debugf("validated %s as app argument", appName)
return app
}
// ValidateDomain ensures the domain name arg is valid.
func ValidateDomain(c *cli.Context) (string, error) {
func ValidateDomain(c *cli.Context) string {
domainName := c.Args().First()
if domainName == "" && !NoInput {
@ -145,7 +105,7 @@ func ValidateDomain(c *cli.Context) (string, error) {
Default: "example.com",
}
if err := survey.AskOne(prompt, &domainName); err != nil {
return domainName, err
logrus.Fatal(err)
}
}
@ -155,7 +115,7 @@ func ValidateDomain(c *cli.Context) (string, error) {
logrus.Debugf("validated %s as domain argument", domainName)
return domainName, nil
return domainName
}
// ValidateSubCmdFlags ensures flag order conforms to correct order
@ -173,12 +133,12 @@ func ValidateSubCmdFlags(c *cli.Context) bool {
}
// ValidateServer ensures the server name arg is valid.
func ValidateServer(c *cli.Context) (string, error) {
func ValidateServer(c *cli.Context) string {
serverName := c.Args().First()
serverNames, err := config.ReadServerNames()
if err != nil {
return serverName, err
logrus.Fatal(err)
}
if serverName == "" && !NoInput {
@ -187,7 +147,14 @@ func ValidateServer(c *cli.Context) (string, error) {
Options: serverNames,
}
if err := survey.AskOne(prompt, &serverName); err != nil {
return serverName, err
logrus.Fatal(err)
}
}
matched := false
for _, name := range serverNames {
if name == serverName {
matched = true
}
}
@ -195,301 +162,11 @@ func ValidateServer(c *cli.Context) (string, error) {
ShowSubcommandHelpAndError(c, errors.New("no server provided"))
}
if !matched {
ShowSubcommandHelpAndError(c, errors.New("server doesn't exist?"))
}
logrus.Debugf("validated %s as server argument", serverName)
return serverName, nil
}
// EnsureDNSProvider ensures a DNS provider is chosen.
func EnsureDNSProvider() error {
if DNSProvider == "" && !NoInput {
prompt := &survey.Select{
Message: "Select DNS provider",
Options: []string{"gandi"},
}
if err := survey.AskOne(prompt, &DNSProvider); err != nil {
return err
}
}
if DNSProvider == "" {
return fmt.Errorf("missing DNS provider?")
}
return nil
}
// EnsureDNSTypeFlag ensures a DNS type flag is present.
func EnsureDNSTypeFlag(c *cli.Context) error {
if DNSType == "" && !NoInput {
prompt := &survey.Input{
Message: "Specify DNS record type",
Default: "A",
}
if err := survey.AskOne(prompt, &DNSType); err != nil {
return err
}
}
if DNSType == "" {
ShowSubcommandHelpAndError(c, errors.New("no record type provided"))
}
return nil
}
// EnsureDNSNameFlag ensures a DNS name flag is present.
func EnsureDNSNameFlag(c *cli.Context) error {
if DNSName == "" && !NoInput {
prompt := &survey.Input{
Message: "Specify DNS record name",
Default: "mysubdomain",
}
if err := survey.AskOne(prompt, &DNSName); err != nil {
return err
}
}
if DNSName == "" {
ShowSubcommandHelpAndError(c, errors.New("no record name provided"))
}
return nil
}
// EnsureDNSValueFlag ensures a DNS value flag is present.
func EnsureDNSValueFlag(c *cli.Context) error {
if DNSValue == "" && !NoInput {
prompt := &survey.Input{
Message: "Specify DNS record value",
Default: "192.168.1.2",
}
if err := survey.AskOne(prompt, &DNSValue); err != nil {
return err
}
}
if DNSValue == "" {
ShowSubcommandHelpAndError(c, errors.New("no record value provided"))
}
return nil
}
// EnsureZoneArgument ensures a zone argument is present.
func EnsureZoneArgument(c *cli.Context) (string, error) {
zone := c.Args().First()
if zone == "" && !NoInput {
prompt := &survey.Input{
Message: "Specify a domain name zone",
Default: "example.com",
}
if err := survey.AskOne(prompt, &zone); err != nil {
return zone, err
}
}
if zone == "" {
ShowSubcommandHelpAndError(c, errors.New("no zone value provided"))
}
return zone, nil
}
// EnsureServerProvider ensures a 3rd party server provider is chosen.
func EnsureServerProvider() error {
if ServerProvider == "" && !NoInput {
prompt := &survey.Select{
Message: "Select server provider",
Options: []string{"capsul", "hetzner-cloud"},
}
if err := survey.AskOne(prompt, &ServerProvider); err != nil {
return err
}
}
if ServerProvider == "" {
return fmt.Errorf("missing server provider?")
}
return nil
}
// EnsureNewCapsulVPSFlags ensure all flags are present.
func EnsureNewCapsulVPSFlags(c *cli.Context) error {
if CapsulName == "" && !NoInput {
prompt := &survey.Input{
Message: "specify capsul name",
}
if err := survey.AskOne(prompt, &CapsulName); err != nil {
return err
}
}
if !NoInput {
prompt := &survey.Input{
Message: "specify capsul instance URL",
Default: CapsulInstanceURL,
}
if err := survey.AskOne(prompt, &CapsulInstanceURL); err != nil {
return err
}
}
if !NoInput {
prompt := &survey.Input{
Message: "specify capsul type",
Default: CapsulType,
}
if err := survey.AskOne(prompt, &CapsulType); err != nil {
return err
}
}
if !NoInput {
prompt := &survey.Input{
Message: "specify capsul image",
Default: CapsulImage,
}
if err := survey.AskOne(prompt, &CapsulImage); err != nil {
return err
}
}
if len(CapsulSSHKeys.Value()) == 0 && !NoInput {
var sshKeys string
prompt := &survey.Input{
Message: "specify capsul SSH keys (e.g. me@foo.com)",
Default: "",
}
if err := survey.AskOne(prompt, &CapsulSSHKeys); err != nil {
return err
}
CapsulSSHKeys = *cli.NewStringSlice(strings.Split(sshKeys, ",")...)
}
if CapsulAPIToken == "" && !NoInput {
token, ok := os.LookupEnv("CAPSUL_TOKEN")
if !ok {
prompt := &survey.Input{
Message: "specify capsul API token",
}
if err := survey.AskOne(prompt, &CapsulAPIToken); err != nil {
return err
}
} else {
CapsulAPIToken = token
}
}
if CapsulName == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul name?"))
}
if CapsulInstanceURL == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul instance url?"))
}
if CapsulType == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul type?"))
}
if CapsulImage == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul image?"))
}
if len(CapsulSSHKeys.Value()) == 0 {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul ssh keys?"))
}
if CapsulAPIToken == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul API token?"))
}
return nil
}
// EnsureNewHetznerCloudVPSFlags ensure all flags are present.
func EnsureNewHetznerCloudVPSFlags(c *cli.Context) error {
if HetznerCloudName == "" && !NoInput {
prompt := &survey.Input{
Message: "specify hetzner cloud VPS name",
}
if err := survey.AskOne(prompt, &HetznerCloudName); err != nil {
return err
}
}
if !NoInput {
prompt := &survey.Input{
Message: "specify hetzner cloud VPS type",
Default: HetznerCloudType,
}
if err := survey.AskOne(prompt, &HetznerCloudType); err != nil {
return err
}
}
if !NoInput {
prompt := &survey.Input{
Message: "specify hetzner cloud VPS image",
Default: HetznerCloudImage,
}
if err := survey.AskOne(prompt, &HetznerCloudImage); err != nil {
return err
}
}
if len(HetznerCloudSSHKeys.Value()) == 0 && !NoInput {
var sshKeys string
prompt := &survey.Input{
Message: "specify hetzner cloud SSH keys (e.g. me@foo.com)",
Default: "",
}
if err := survey.AskOne(prompt, &sshKeys); err != nil {
return err
}
HetznerCloudSSHKeys = *cli.NewStringSlice(strings.Split(sshKeys, ",")...)
}
if !NoInput {
prompt := &survey.Input{
Message: "specify hetzner cloud VPS location",
Default: HetznerCloudLocation,
}
if err := survey.AskOne(prompt, &HetznerCloudLocation); err != nil {
return err
}
}
if HetznerCloudAPIToken == "" && !NoInput {
token, ok := os.LookupEnv("HCLOUD_TOKEN")
if !ok {
prompt := &survey.Input{
Message: "specify hetzner cloud API token",
}
if err := survey.AskOne(prompt, &HetznerCloudAPIToken); err != nil {
return err
}
} else {
HetznerCloudAPIToken = token
}
}
if HetznerCloudName == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud VPS name?"))
}
if HetznerCloudType == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud VPS type?"))
}
if HetznerCloudImage == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud image?"))
}
if HetznerCloudLocation == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud VPS location?"))
}
if HetznerCloudAPIToken == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud API token?"))
}
return nil
return serverName
}

40
cli/recipe/diff.go Normal file
View File

@ -0,0 +1,40 @@
package recipe
import (
"path"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
gitPkg "coopcloud.tech/abra/pkg/git"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var recipeDiffCommand = cli.Command{
Name: "diff",
Usage: "Show unstaged changes in recipe config",
Description: "Due to limitations in our underlying Git dependency, this command requires /usr/bin/git.",
Aliases: []string{"d"},
ArgsUsage: "<recipe>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipeName := c.Args().First()
if recipeName != "" {
internal.ValidateRecipe(c)
}
recipeDir := path.Join(config.RECIPES_DIR, recipeName)
if err := gitPkg.DiffUnstaged(recipeDir); err != nil {
logrus.Fatal(err)
}
return nil
},
}

50
cli/recipe/fetch.go Normal file
View File

@ -0,0 +1,50 @@
package recipe
import (
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var recipeFetchCommand = cli.Command{
Name: "fetch",
Usage: "Fetch recipe(s)",
Aliases: []string{"f"},
ArgsUsage: "[<recipe>]",
Description: "Retrieves all recipes if no <recipe> argument is passed",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipeName := c.Args().First()
if recipeName != "" {
internal.ValidateRecipe(c)
if err := recipe.Ensure(recipeName); err != nil {
logrus.Fatal(err)
}
return nil
}
catalogue, err := recipe.ReadRecipeCatalogue(internal.Offline)
if err != nil {
logrus.Fatal(err)
}
catlBar := formatter.CreateProgressbar(len(catalogue), "fetching latest recipes...")
for recipeName := range catalogue {
if err := recipe.Ensure(recipeName); err != nil {
logrus.Error(err)
}
catlBar.Add(1)
}
return nil
},
}

View File

@ -12,52 +12,99 @@ import (
"github.com/urfave/cli/v2"
)
var recipeLintCommand = &cli.Command{
Name: "lint",
Usage: "Lint a recipe",
Aliases: []string{"l"},
ArgsUsage: "<recipe>",
Flags: []cli.Flag{internal.OnlyErrorFlag},
var recipeLintCommand = cli.Command{
Name: "lint",
Usage: "Lint a recipe",
Aliases: []string{"l"},
ArgsUsage: "<recipe>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.OnlyErrorFlag,
internal.OfflineFlag,
internal.NoInputFlag,
internal.ChaosFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipe := internal.ValidateRecipe(c)
if err := recipePkg.EnsureUpToDate(recipe.Name); err != nil {
if err := recipePkg.EnsureExists(recipe.Name); err != nil {
logrus.Fatal(err)
}
tableCol := []string{"ref", "rule", "satisfied", "severity", "resolve"}
if !internal.Chaos {
if err := recipePkg.EnsureIsClean(recipe.Name); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipePkg.EnsureUpToDate(recipe.Name); err != nil {
logrus.Fatal(err)
}
}
if err := recipePkg.EnsureLatest(recipe.Name); err != nil {
logrus.Fatal(err)
}
}
tableCol := []string{"ref", "rule", "severity", "satisfied", "skipped", "resolve"}
table := formatter.CreateTable(tableCol)
hasError := false
bar := formatter.CreateProgressbar(-1, "running recipe lint rules...")
for level := range lint.LintRules {
for _, rule := range lint.LintRules[level] {
ok, err := rule.Function(recipe)
if err != nil {
logrus.Warn(err)
if internal.OnlyErrors && rule.Level != "error" {
logrus.Debugf("skipping %s, does not have level \"error\"", rule.Ref)
continue
}
if !ok && rule.Level == "error" {
hasError = true
skipped := false
if rule.Skip(recipe) {
skipped = true
}
var result string
if ok {
result = "yes"
} else {
result = "NO"
skippedOutput := "-"
if skipped {
skippedOutput = ""
}
if internal.OnlyErrors {
if !ok && rule.Level == "error" {
table.Append([]string{rule.Ref, rule.Description, result, rule.Level, rule.HowToResolve})
bar.Add(1)
satisfied := false
if !skipped {
ok, err := rule.Function(recipe)
if err != nil {
logrus.Warn(err)
}
if !ok && rule.Level == "error" {
hasError = true
}
if ok {
satisfied = true
}
} else {
table.Append([]string{rule.Ref, rule.Description, result, rule.Level, rule.HowToResolve})
bar.Add(1)
}
satisfiedOutput := "✅"
if !satisfied {
satisfiedOutput = "❌"
if skipped {
satisfiedOutput = "-"
}
}
table.Append([]string{
rule.Ref,
rule.Description,
rule.Level,
satisfiedOutput,
skippedOutput,
rule.HowToResolve,
})
bar.Add(1)
}
}

View File

@ -2,14 +2,12 @@ package recipe
import (
"fmt"
"path"
"sort"
"strconv"
"strings"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/formatter"
gitPkg "coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
@ -17,28 +15,25 @@ import (
var pattern string
var patternFlag = &cli.StringFlag{
Name: "pattern",
Name: "pattern, p",
Value: "",
Aliases: []string{"p"},
Usage: "Simple string to filter recipes",
Destination: &pattern,
}
var recipeListCommand = &cli.Command{
var recipeListCommand = cli.Command{
Name: "list",
Usage: "List available recipes",
Aliases: []string{"ls"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.MachineReadableFlag,
patternFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
catalogueDir := path.Join(config.ABRA_DIR, "catalogue")
url := fmt.Sprintf("%s/%s.git", config.REPOS_BASE_URL, "recipes")
if err := gitPkg.Clone(catalogueDir, url); err != nil {
return err
}
catl, err := recipe.ReadRecipeCatalogue()
catl, err := recipe.ReadRecipeCatalogue(internal.Offline)
if err != nil {
logrus.Fatal(err.Error())
}
@ -73,10 +68,14 @@ var recipeListCommand = &cli.Command{
}
}
table.SetCaption(true, fmt.Sprintf("total recipes: %v", len))
if table.NumLines() > 0 {
table.Render()
if internal.MachineReadable {
table.SetCaption(false, "")
table.JSONRender()
} else {
table.SetCaption(true, fmt.Sprintf("total recipes: %v", len))
table.Render()
}
}
return nil

View File

@ -30,15 +30,21 @@ type recipeMetadata struct {
SSO string
}
var recipeNewCommand = &cli.Command{
Name: "new",
var recipeNewCommand = cli.Command{
Name: "new",
Aliases: []string{"n"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Usage: "Create a new recipe",
Aliases: []string{"n"},
ArgsUsage: "<recipe>",
Description: `
This command creates a new recipe.
Create a new recipe.
Abra uses our built-in example repository which is available here:
Abra uses the built-in example repository which is available here:
https://git.coopcloud.tech/coop-cloud/example
@ -106,7 +112,7 @@ In order to share your recipe, you can upload it the git repository to:
If you're not sure how to do that, come chat with us:
https://docs.coopcloud.tech/contact
https://docs.coopcloud.tech/intro/contact
See "abra recipe -h" for additional recipe maintainer commands.

View File

@ -5,15 +5,16 @@ import (
)
// RecipeCommand defines all recipe related sub-commands.
var RecipeCommand = &cli.Command{
var RecipeCommand = cli.Command{
Name: "recipe",
Aliases: []string{"r"},
Usage: "Manage recipes",
ArgsUsage: "<recipe>",
Aliases: []string{"r"},
Description: `
A recipe is a blueprint for an app. It is a bunch of config files which
describe how to deploy and maintain an app. Recipes are maintained by the Co-op
Cloud community and you can use Abra to read them and create apps for you.
Cloud community and you can use Abra to read them, deploy them and create apps
for you.
Anyone who uses a recipe can become a maintainer. Maintainers typically make
sure the recipe is in good working order and the config upgraded in a timely
@ -21,12 +22,15 @@ manner. Abra supports convenient automation for recipe maintainenace, see the
"abra recipe upgrade", "abra recipe sync" and "abra recipe release" commands.
`,
Subcommands: []*cli.Command{
recipeListCommand,
recipeVersionCommand,
recipeReleaseCommand,
recipeNewCommand,
recipeUpgradeCommand,
recipeSyncCommand,
recipeLintCommand,
&recipeFetchCommand,
&recipeLintCommand,
&recipeListCommand,
&recipeNewCommand,
&recipeReleaseCommand,
&recipeSyncCommand,
&recipeUpgradeCommand,
&recipeVersionCommand,
&recipeResetCommand,
&recipeDiffCommand,
},
}

View File

@ -1,7 +1,9 @@
package recipe
import (
"errors"
"fmt"
"os"
"path"
"strconv"
"strings"
@ -21,23 +23,22 @@ import (
"github.com/urfave/cli/v2"
)
var recipeReleaseCommand = &cli.Command{
var recipeReleaseCommand = cli.Command{
Name: "release",
Usage: "Release a new recipe version",
Aliases: []string{"rl"},
Usage: "Release a new recipe version",
ArgsUsage: "<recipe> [<version>]",
Description: `
This command is used to specify a new version of a recipe. These versions are
then published on the Co-op Cloud recipe catalogue. These versions take the
following form:
Create a new version of a recipe. These versions are then published on the
Co-op Cloud recipe catalogue. These versions take the following form:
a.b.c+x.y.z
Where the "a.b.c" part is a semantic version determined by the maintainer. And
the "x.y.z" part is the image tag of the recipe "app" service (the main
container which contains the software to be used).
Where the "a.b.c" part is a semantic version determined by the maintainer. The
"x.y.z" part is the image tag of the recipe "app" service (the main container
which contains the software to be used, by naming convention).
We maintain a semantic versioning scheme ("a.b.c") alongside the libre app
We maintain a semantic versioning scheme ("a.b.c") alongside the recipe
versioning scheme ("x.y.z") in order to maximise the chances that the nature of
recipe updates are properly communicated. I.e. developers of an app might
publish a minor version but that might lead to changes in the recipe which are
@ -48,15 +49,19 @@ requires that you have permission to git push to these repositories and have
your SSH keys configured on your account.
`,
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.DryFlag,
internal.MajorFlag,
internal.MinorFlag,
internal.PatchFlag,
internal.PublishFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipe := internal.ValidateRecipeWithPrompt(c)
recipe := internal.ValidateRecipe(c)
imagesTmp, err := getImageVersions(recipe)
if err != nil {
@ -103,6 +108,18 @@ your SSH keys configured on your account.
}
}
isClean, err := gitPkg.IsClean(recipe.Dir())
if err != nil {
logrus.Fatal(err)
}
if !isClean {
logrus.Infof("%s currently has these unstaged changes 👇", recipe.Name)
if err := gitPkg.DiffUnstaged(recipe.Dir()); err != nil {
logrus.Fatal(err)
}
}
if len(tags) > 0 {
logrus.Warnf("previous git tags detected, assuming this is a new semver release")
if err := createReleaseFromPreviousTag(tagString, mainAppVersion, recipe, tags); err != nil {
@ -125,7 +142,7 @@ your SSH keys configured on your account.
// getImageVersions retrieves image versions for a recipe
func getImageVersions(recipe recipe.Recipe) (map[string]string, error) {
var services = make(map[string]string)
services := make(map[string]string)
missingTag := false
for _, service := range recipe.Config.Services {
@ -140,7 +157,7 @@ func getImageVersions(recipe recipe.Recipe) (map[string]string, error) {
path := reference.Path(img)
path = recipePkg.StripTagMeta(path)
path = formatter.StripTagMeta(path)
var tag string
switch img.(type) {
@ -192,6 +209,10 @@ func createReleaseFromTag(recipe recipe.Recipe, tagString, mainAppVersion string
tagString = fmt.Sprintf("%s+%s", tag.String(), mainAppVersion)
}
if err := addReleaseNotes(recipe, tagString); err != nil {
logrus.Fatal(err)
}
if err := commitRelease(recipe, tagString); err != nil {
logrus.Fatal(err)
}
@ -222,6 +243,82 @@ func getTagCreateOptions(tag string) (git.CreateTagOptions, error) {
return git.CreateTagOptions{Message: msg}, nil
}
// addReleaseNotes checks if the release/next release note exists and moves the
// file to release/<tag>.
func addReleaseNotes(recipe recipe.Recipe, tag string) error {
repoPath := path.Join(config.RECIPES_DIR, recipe.Name)
tagReleaseNotePath := path.Join(repoPath, "release", tag)
if _, err := os.Stat(tagReleaseNotePath); err == nil {
// Release note for current tag already exist exists.
return nil
} else if !errors.Is(err, os.ErrNotExist) {
return err
}
nextReleaseNotePath := path.Join(repoPath, "release", "next")
if _, err := os.Stat(nextReleaseNotePath); err == nil {
// release/next note exists. Move it to release/<tag>
if internal.Dry {
logrus.Debugf("dry run: move release note from 'next' to %s", tag)
return nil
}
if !internal.NoInput {
prompt := &survey.Input{
Message: "Use release note in release/next?",
}
var addReleaseNote bool
if err := survey.AskOne(prompt, &addReleaseNote); err != nil {
return err
}
if !addReleaseNote {
return nil
}
}
err := os.Rename(nextReleaseNotePath, tagReleaseNotePath)
if err != nil {
return err
}
err = gitPkg.Add(repoPath, path.Join("release", "next"), internal.Dry)
if err != nil {
return err
}
err = gitPkg.Add(repoPath, path.Join("release", tag), internal.Dry)
if err != nil {
return err
}
} else if !errors.Is(err, os.ErrNotExist) {
return err
}
// No release note exists for the current release.
if internal.NoInput {
return nil
}
prompt := &survey.Input{
Message: "Release Note (leave empty for no release note)",
}
var releaseNote string
if err := survey.AskOne(prompt, &releaseNote); err != nil {
return err
}
if releaseNote == "" {
return nil
}
err := os.WriteFile(tagReleaseNotePath, []byte(releaseNote), 0o644)
if err != nil {
return err
}
err = gitPkg.Add(repoPath, path.Join("release", tag), internal.Dry)
if err != nil {
return err
}
return nil
}
func commitRelease(recipe recipe.Recipe, tag string) error {
if internal.Dry {
logrus.Debugf("dry run: no changes committed")
@ -239,12 +336,10 @@ func commitRelease(recipe recipe.Recipe, tag string) error {
}
}
if internal.Publish {
msg := fmt.Sprintf("chore: publish %s release", tag)
repoPath := path.Join(config.RECIPES_DIR, recipe.Name)
if err := gitPkg.Commit(repoPath, "compose.**yml", msg, internal.Dry); err != nil {
return err
}
msg := fmt.Sprintf("chore: publish %s release", tag)
repoPath := path.Join(config.RECIPES_DIR, recipe.Name)
if err := gitPkg.Commit(repoPath, msg, internal.Dry); err != nil {
return err
}
return nil
@ -297,13 +392,10 @@ func pushRelease(recipe recipe.Recipe, tagString string) error {
if err := recipe.Push(internal.Dry); err != nil {
return err
}
if !internal.Dry {
url := fmt.Sprintf("%s/%s/src/tag/%s", config.REPOS_BASE_URL, recipe.Name, tagString)
logrus.Infof("new release published: %s", url)
} else {
logrus.Info("dry run: no changes published")
}
url := fmt.Sprintf("%s/%s/src/tag/%s", config.REPOS_BASE_URL, recipe.Name, tagString)
logrus.Infof("new release published: %s", url)
} else {
logrus.Info("no -p/--publish passed, not publishing")
}
return nil
@ -324,12 +416,6 @@ func createReleaseFromPreviousTag(tagString, mainAppVersion string, recipe recip
}
var lastGitTag tagcmp.Tag
if tagString == "" {
if err := internal.PromptBumpType(tagString); err != nil {
return err
}
}
for _, tag := range tags {
parsed, err := tagcmp.Parse(tag)
if err != nil {
@ -370,13 +456,19 @@ func createReleaseFromPreviousTag(tagString, mainAppVersion string, recipe recip
newTag.Major = strconv.Itoa(now + 1)
}
if tagString == "" {
if err := internal.PromptBumpType(tagString, lastGitTag.String()); err != nil {
return err
}
}
if internal.Major || internal.Minor || internal.Patch {
newTag.Metadata = mainAppVersion
tagString = newTag.String()
}
if lastGitTag.String() == tagString {
logrus.Fatalf("latest git tag (%s) and synced lable (%s) are the same?", lastGitTag, tagString)
logrus.Fatalf("latest git tag (%s) and synced label (%s) are the same?", lastGitTag, tagString)
}
if !internal.NoInput {
@ -394,16 +486,20 @@ func createReleaseFromPreviousTag(tagString, mainAppVersion string, recipe recip
}
}
if err := commitRelease(recipe, tagString); err != nil {
if err := addReleaseNotes(recipe, tagString); err != nil {
logrus.Fatal(err)
}
if err := commitRelease(recipe, tagString); err != nil {
logrus.Fatalf("failed to commit changes: %s", err.Error())
}
if err := tagRelease(tagString, repo); err != nil {
logrus.Fatal(err)
logrus.Fatalf("failed to tag release: %s", err.Error())
}
if err := pushRelease(recipe, tagString); err != nil {
logrus.Fatal(err)
logrus.Fatalf("failed to publish new release: %s", err.Error())
}
return nil

56
cli/recipe/reset.go Normal file
View File

@ -0,0 +1,56 @@
package recipe
import (
"path"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
"github.com/go-git/go-git/v5"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var recipeResetCommand = cli.Command{
Name: "reset",
Usage: "Remove all unstaged changes from recipe config",
Description: "WARNING, this will delete your changes. Be Careful.",
Aliases: []string{"rs"},
ArgsUsage: "<recipe>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipeName := c.Args().First()
if recipeName != "" {
internal.ValidateRecipe(c)
}
repoPath := path.Join(config.RECIPES_DIR, recipeName)
repo, err := git.PlainOpen(repoPath)
if err != nil {
logrus.Fatal(err)
}
ref, err := repo.Head()
if err != nil {
logrus.Fatal(err)
}
worktree, err := repo.Worktree()
if err != nil {
logrus.Fatal(err)
}
opts := &git.ResetOptions{Commit: ref.Hash(), Mode: git.HardReset}
if err := worktree.Reset(opts); err != nil {
logrus.Fatal(err)
}
return nil
},
}

View File

@ -8,6 +8,7 @@ import (
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
gitPkg "coopcloud.tech/abra/pkg/git"
"coopcloud.tech/tagcmp"
"github.com/AlecAivazis/survey/v2"
"github.com/go-git/go-git/v5"
@ -16,20 +17,23 @@ import (
"github.com/urfave/cli/v2"
)
var recipeSyncCommand = &cli.Command{
var recipeSyncCommand = cli.Command{
Name: "sync",
Usage: "Sync recipe version label",
Aliases: []string{"s"},
Usage: "Sync recipe version label",
ArgsUsage: "<recipe> [<version>]",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.DryFlag,
internal.MajorFlag,
internal.MinorFlag,
internal.PatchFlag,
},
Before: internal.SubCommandBefore,
Description: `
This command will generate labels for the main recipe service (i.e. by
convention, the service named 'app') which corresponds to the following format:
Generate labels for the main recipe service (i.e. by convention, the service
named "app") which corresponds to the following format:
coop-cloud.${STACK_NAME}.version=<version>
@ -37,8 +41,9 @@ Where <version> can be specifed on the command-line or Abra can attempt to
auto-generate it for you. The <recipe> configuration will be updated on the
local file system.
`,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipe := internal.ValidateRecipeWithPrompt(c)
recipe := internal.ValidateRecipe(c)
mainApp, err := internal.GetMainAppImage(recipe)
if err != nil {
@ -60,6 +65,9 @@ local file system.
nextTag := c.Args().Get(1)
if len(tags) == 0 && nextTag == "" {
logrus.Warnf("no git tags found for %s", recipe.Name)
if internal.NoInput {
logrus.Fatalf("unable to continue, input required for initial version")
}
fmt.Println(fmt.Sprintf(`
The following options are two types of initial semantic version that you can
pick for %s that will be published in the recipe catalogue. This follows the
@ -92,7 +100,8 @@ likely to change.
}
if nextTag == "" && (!internal.Major && !internal.Minor && !internal.Patch) {
if err := internal.PromptBumpType(""); err != nil {
latestRelease := tags[len(tags)-1]
if err := internal.PromptBumpType("", latestRelease); err != nil {
logrus.Fatal(err)
}
}
@ -113,6 +122,7 @@ likely to change.
if err := iter.ForEach(func(ref *plumbing.Reference) error {
obj, err := repo.TagObject(ref.Hash())
if err != nil {
logrus.Fatal("Tag at commit ", ref.Hash(), " is unannotated or otherwise broken. Please fix it.")
return err
}
@ -189,7 +199,17 @@ likely to change.
logrus.Infof("dry run: not syncing label %s for recipe %s", nextTag, recipe.Name)
}
isClean, err := gitPkg.IsClean(recipe.Dir())
if err != nil {
logrus.Fatal(err)
}
if !isClean {
logrus.Infof("%s currently has these unstaged changes 👇", recipe.Name)
if err := gitPkg.DiffUnstaged(recipe.Dir()); err != nil {
logrus.Fatal(err)
}
}
return nil
},
BashComplete: autocomplete.RecipeNameComplete,
}

View File

@ -2,6 +2,7 @@ package recipe
import (
"bufio"
"encoding/json"
"fmt"
"os"
"path"
@ -12,6 +13,8 @@ import (
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
gitPkg "coopcloud.tech/abra/pkg/git"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/tagcmp"
"github.com/AlecAivazis/survey/v2"
@ -25,14 +28,23 @@ type imgPin struct {
version tagcmp.Tag
}
var recipeUpgradeCommand = &cli.Command{
// anUpgrade represents a single service upgrade (as within a recipe), and the list of tags that it can be upgraded to,
// for serialization purposes.
type anUpgrade struct {
Service string `json:"service"`
Image string `json:"image"`
Tag string `json:"tag"`
UpgradeTags []string `json:"upgrades"`
}
var recipeUpgradeCommand = cli.Command{
Name: "upgrade",
Usage: "Upgrade recipe image tags",
Aliases: []string{"u"},
Usage: "Upgrade recipe image tags",
Description: `
This command reads and attempts to parse all image tags within the given
<recipe> configuration and prompt with more recent tags to upgrade to. It will
update the relevant compose file tags on the local file system.
Parse all image tags within the given <recipe> configuration and prompt with
more recent tags to upgrade to. It will update the relevant compose file tags
on the local file system.
Some image tags cannot be parsed because they do not follow some sort of
semver-like convention. In this case, all possible tags will be listed and it
@ -45,17 +57,37 @@ interface.
You may invoke this command in "wizard" mode and be prompted for input:
abra recipe upgrade
`,
BashComplete: autocomplete.RecipeNameComplete,
ArgsUsage: "<recipe>",
ArgsUsage: "<recipe>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.PatchFlag,
internal.MinorFlag,
internal.MajorFlag,
internal.MachineReadableFlag,
internal.AllTagsFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipe := internal.ValidateRecipeWithPrompt(c)
recipe := internal.ValidateRecipe(c)
if err := recipePkg.EnsureIsClean(recipe.Name); err != nil {
logrus.Fatal(err)
}
if err := recipePkg.EnsureExists(recipe.Name); err != nil {
logrus.Fatal(err)
}
if err := recipePkg.EnsureUpToDate(recipe.Name); err != nil {
logrus.Fatal(err)
}
if err := recipePkg.EnsureLatest(recipe.Name); err != nil {
logrus.Fatal(err)
}
bumpType := btoi(internal.Major)*4 + btoi(internal.Minor)*2 + btoi(internal.Patch)
if bumpType != 0 {
@ -65,6 +97,13 @@ You may invoke this command in "wizard" mode and be prompted for input:
}
}
if internal.MachineReadable {
// -m implies -n in this case
internal.NoInput = true
}
upgradeList := make(map[string]anUpgrade)
// check for versions file and load pinned versions
versionsPresent := false
recipeDir := path.Join(config.RECIPES_DIR, recipe.Name)
@ -108,14 +147,14 @@ You may invoke this command in "wizard" mode and be prompted for input:
logrus.Fatal(err)
}
image := reference.Path(img)
regVersions, err := client.GetRegistryTags(image)
regVersions, err := client.GetRegistryTags(img)
if err != nil {
logrus.Fatal(err)
}
logrus.Debugf("retrieved %s from remote registry for %s", regVersions, image)
image = recipePkg.StripTagMeta(image)
image := reference.Path(img)
logrus.Debugf("retrieved %s from remote registry for %s", regVersions, image)
image = formatter.StripTagMeta(image)
switch img.(type) {
case reference.NamedTagged:
@ -137,7 +176,7 @@ You may invoke this command in "wizard" mode and be prompted for input:
var compatible []tagcmp.Tag
for _, regVersion := range regVersions {
other, err := tagcmp.Parse(regVersion.Name)
other, err := tagcmp.Parse(regVersion)
if err != nil {
continue // skip tags that cannot be parsed
}
@ -151,12 +190,12 @@ You may invoke this command in "wizard" mode and be prompted for input:
sort.Sort(tagcmp.ByTagDesc(compatible))
if len(compatible) == 0 {
logrus.Info(fmt.Sprintf("no new versions available for %s, %s is the latest", image, tag))
if len(compatible) == 0 && !internal.AllTags {
logrus.Info(fmt.Sprintf("no new versions available for %s, assuming %s is the latest (use -a/--all-tags to see all anyway)", image, tag))
continue // skip on to the next tag and don't update any compose files
}
catlVersions, err := recipePkg.VersionsOfService(recipe.Name, service.Name)
catlVersions, err := recipePkg.VersionsOfService(recipe.Name, service.Name, internal.Offline)
if err != nil {
logrus.Fatal(err)
}
@ -219,24 +258,49 @@ You may invoke this command in "wizard" mode and be prompted for input:
}
} else {
msg := fmt.Sprintf("upgrade to which tag? (service: %s, image: %s, tag: %s)", service.Name, image, tag)
if !tagcmp.IsParsable(img.(reference.NamedTagged).Tag()) {
if !tagcmp.IsParsable(img.(reference.NamedTagged).Tag()) || internal.AllTags {
tag := img.(reference.NamedTagged).Tag()
logrus.Warning(fmt.Sprintf("unable to determine versioning semantics of %s, listing all tags", tag))
if !internal.AllTags {
logrus.Warning(fmt.Sprintf("unable to determine versioning semantics of %s, listing all tags", tag))
}
msg = fmt.Sprintf("upgrade to which tag? (service: %s, tag: %s)", service.Name, tag)
compatibleStrings = []string{"skip"}
for _, regVersion := range regVersions {
compatibleStrings = append(compatibleStrings, regVersion.Name)
compatibleStrings = append(compatibleStrings, regVersion)
}
}
prompt := &survey.Select{
Message: msg,
Help: "enter / return to confirm, choose 'skip' to not upgrade this tag, vim mode is enabled",
VimMode: true,
Options: compatibleStrings,
// there is always at least the item "skip" in compatibleStrings (a list of
// possible upgradable tags) and at least one other tag.
upgradableTags := compatibleStrings[1:]
upgrade := anUpgrade{
Service: service.Name,
Image: image,
Tag: tag.String(),
UpgradeTags: make([]string, len(upgradableTags)),
}
if err := survey.AskOne(prompt, &upgradeTag); err != nil {
logrus.Fatal(err)
for n, s := range upgradableTags {
var sb strings.Builder
if _, err := sb.WriteString(s); err != nil {
}
upgrade.UpgradeTags[n] = sb.String()
}
upgradeList[upgrade.Service] = upgrade
if internal.NoInput {
upgradeTag = "skip"
} else {
prompt := &survey.Select{
Message: msg,
Help: "enter / return to confirm, choose 'skip' to not upgrade this tag, vim mode is enabled",
VimMode: true,
Options: compatibleStrings,
}
if err := survey.AskOne(prompt, &upgradeTag); err != nil {
logrus.Fatal(err)
}
}
}
}
@ -249,7 +313,40 @@ You may invoke this command in "wizard" mode and be prompted for input:
logrus.Infof("tag upgraded from %s to %s for %s", tag.String(), upgradeTag, image)
}
} else {
logrus.Warnf("not upgrading %s, skipping as requested", image)
if !internal.NoInput {
logrus.Warnf("not upgrading %s, skipping as requested", image)
}
}
}
if internal.NoInput {
if internal.MachineReadable {
jsonstring, err := json.Marshal(upgradeList)
if err != nil {
logrus.Fatal(err)
}
fmt.Println(string(jsonstring))
return nil
}
for _, upgrade := range upgradeList {
logrus.Infof("can upgrade service: %s, image: %s, tag: %s ::\n", upgrade.Service, upgrade.Image, upgrade.Tag)
for _, utag := range upgrade.UpgradeTags {
logrus.Infof(" %s\n", utag)
}
}
}
isClean, err := gitPkg.IsClean(recipeDir)
if err != nil {
logrus.Fatal(err)
}
if !isClean {
logrus.Infof("%s currently has these unstaged changes 👇", recipe.Name)
if err := gitPkg.DiffUnstaged(recipeDir); err != nil {
logrus.Fatal(err)
}
}

View File

@ -2,62 +2,84 @@ package recipe
import (
"fmt"
"path"
"sort"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
gitPkg "coopcloud.tech/abra/pkg/git"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"github.com/olekukonko/tablewriter"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var recipeVersionCommand = &cli.Command{
Name: "versions",
Usage: "List recipe versions",
Aliases: []string{"v"},
ArgsUsage: "<recipe>",
func sortServiceByName(versions [][]string) func(i, j int) bool {
return func(i, j int) bool {
// NOTE(d1): corresponds to the `tableCol` definition below
if versions[i][1] == "app" {
return true
}
return versions[i][1] < versions[j][1]
}
}
var recipeVersionCommand = cli.Command{
Name: "versions",
Aliases: []string{"v"},
Usage: "List recipe versions",
ArgsUsage: "<recipe>",
Description: "Versions are read from the recipe catalogue.",
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
internal.NoInputFlag,
internal.MachineReadableFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipe := internal.ValidateRecipe(c)
catalogueDir := path.Join(config.ABRA_DIR, "catalogue")
url := fmt.Sprintf("%s/%s.git", config.REPOS_BASE_URL, "recipes")
if err := gitPkg.Clone(catalogueDir, url); err != nil {
return err
}
catalogue, err := recipePkg.ReadRecipeCatalogue()
catl, err := recipePkg.ReadRecipeCatalogue(internal.Offline)
if err != nil {
logrus.Fatal(err)
}
recipeMeta, ok := catalogue[recipe.Name]
recipeMeta, ok := catl[recipe.Name]
if !ok {
logrus.Fatalf("%s recipe doesn't exist?", recipe.Name)
logrus.Fatalf("%s is not published on the catalogue?", recipe.Name)
}
tableCol := []string{"Version", "Service", "Image", "Tag", "Digest"}
table := formatter.CreateTable(tableCol)
if len(recipeMeta.Versions) == 0 {
logrus.Fatalf("%s has no catalogue published versions?", recipe.Name)
}
for i := len(recipeMeta.Versions) - 1; i >= 0; i-- {
for tag, meta := range recipeMeta.Versions[i] {
tableCols := []string{"version", "service", "image", "tag"}
table := formatter.CreateTable(tableCols)
for version, meta := range recipeMeta.Versions[i] {
var versions [][]string
for service, serviceMeta := range meta {
table.Append([]string{tag, service, serviceMeta.Image, serviceMeta.Tag, serviceMeta.Digest})
versions = append(versions, []string{version, service, serviceMeta.Image, serviceMeta.Tag})
}
sort.Slice(versions, sortServiceByName(versions))
for _, version := range versions {
table.Append(version)
}
if internal.MachineReadable {
table.JSONRender()
} else {
table.SetAutoMergeCellsByColumnIndex([]int{0})
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.Render()
fmt.Println()
}
}
}
table.SetAutoMergeCells(true)
if table.NumLines() > 0 {
table.Render()
} else {
logrus.Fatalf("%s has no published versions?", recipe.Name)
}
return nil
},
}

View File

@ -1,79 +0,0 @@
package record
import (
"fmt"
"strconv"
"coopcloud.tech/abra/cli/internal"
gandiPkg "coopcloud.tech/abra/pkg/dns/gandi"
"coopcloud.tech/abra/pkg/formatter"
"github.com/libdns/gandi"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
// RecordListCommand lists domains.
var RecordListCommand = &cli.Command{
Name: "list",
Usage: "List domain name records",
Aliases: []string{"ls"},
ArgsUsage: "<zone>",
Flags: []cli.Flag{
internal.DNSProviderFlag,
},
Description: `
This command lists all domain name records managed by a 3rd party provider for
a specific zone.
You must specify a zone (e.g. example.com) under which your domain name records
are listed. This zone must already be created on your provider account.
`,
Action: func(c *cli.Context) error {
if err := internal.EnsureDNSProvider(); err != nil {
logrus.Fatal(err)
}
zone, err := internal.EnsureZoneArgument(c)
if err != nil {
logrus.Fatal(err)
}
var provider gandi.Provider
switch internal.DNSProvider {
case "gandi":
provider, err = gandiPkg.New()
if err != nil {
logrus.Fatal(err)
}
default:
logrus.Fatalf("%s is not a supported DNS provider", internal.DNSProvider)
}
records, err := provider.GetRecords(c.Context, zone)
if err != nil {
logrus.Fatal(err)
}
tableCol := []string{"type", "name", "value", "TTL", "priority"}
table := formatter.CreateTable(tableCol)
for _, record := range records {
value := record.Value
if len(record.Value) > 30 {
value = fmt.Sprintf("%s...", record.Value[:30])
}
table.Append([]string{
record.Type,
record.Name,
value,
record.TTL.String(),
strconv.Itoa(record.Priority),
})
}
table.Render()
return nil
},
}

View File

@ -1,252 +0,0 @@
package record
import (
"fmt"
"strconv"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/dns"
gandiPkg "coopcloud.tech/abra/pkg/dns/gandi"
"coopcloud.tech/abra/pkg/formatter"
"github.com/libdns/gandi"
"github.com/libdns/libdns"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
// RecordNewCommand creates a new domain name record.
var RecordNewCommand = &cli.Command{
Name: "new",
Usage: "Create a new domain record",
Aliases: []string{"n"},
ArgsUsage: "<zone>",
Flags: []cli.Flag{
internal.DNSProviderFlag,
internal.DNSTypeFlag,
internal.DNSNameFlag,
internal.DNSValueFlag,
internal.DNSTTLFlag,
internal.DNSPriorityFlag,
internal.AutoDNSRecordFlag,
},
Description: `
This command creates a new domain name record for a specific zone.
You must specify a zone (e.g. example.com) under which your domain name records
are listed. This zone must already be created on your provider account.
Example:
abra record new foo.com -p gandi -t A -n myapp -v 192.168.178.44
Typically, you need two records, an A record which points at the zone (@.) and
a wildcard record for your apps (*.). Pass "--auto" to have Abra automatically
set this up.
abra record new --auto foo.com -p gandi -v 192.168.178.44
You may also invoke this command in "wizard" mode and be prompted for input
abra record new
`,
Action: func(c *cli.Context) error {
zone, err := internal.EnsureZoneArgument(c)
if err != nil {
logrus.Fatal(err)
}
if err := internal.EnsureDNSProvider(); err != nil {
logrus.Fatal(err)
}
var provider gandi.Provider
switch internal.DNSProvider {
case "gandi":
provider, err = gandiPkg.New()
if err != nil {
logrus.Fatal(err)
}
default:
logrus.Fatalf("%s is not a supported DNS provider", internal.DNSProvider)
}
if internal.AutoDNSRecord {
ipv4, err := dns.EnsureIPv4(zone)
if err != nil {
logrus.Debugf("no ipv4 associated with %s, prompting for input", zone)
if err := internal.EnsureDNSValueFlag(c); err != nil {
logrus.Fatal(err)
}
ipv4 = internal.DNSValue
}
logrus.Infof("automatically configuring @./*. A records for %s for %s (--auto)", zone, ipv4)
if err := autoConfigure(c, &provider, zone, ipv4); err != nil {
logrus.Fatal(err)
}
return nil
}
if err := internal.EnsureDNSTypeFlag(c); err != nil {
logrus.Fatal(err)
}
if err := internal.EnsureDNSNameFlag(c); err != nil {
logrus.Fatal(err)
}
if err := internal.EnsureDNSValueFlag(c); err != nil {
logrus.Fatal(err)
}
ttl, err := dns.GetTTL(internal.DNSTTL)
if err != nil {
return err
}
record := libdns.Record{
Type: internal.DNSType,
Name: internal.DNSName,
Value: internal.DNSValue,
TTL: ttl,
}
if internal.DNSType == "MX" || internal.DNSType == "SRV" || internal.DNSType == "URI" {
record.Priority = internal.DNSPriority
}
records, err := provider.GetRecords(c.Context, zone)
if err != nil {
logrus.Fatal(err)
}
for _, existingRecord := range records {
if existingRecord.Type == record.Type &&
existingRecord.Name == record.Name &&
existingRecord.Value == record.Value {
logrus.Fatalf("%s record for %s already exists?", record.Type, zone)
}
}
createdRecords, err := provider.SetRecords(
c.Context,
zone,
[]libdns.Record{record},
)
if err != nil {
logrus.Fatal(err)
}
if len(createdRecords) == 0 {
logrus.Fatal("provider library reports that no record was created?")
}
createdRecord := createdRecords[0]
tableCol := []string{"type", "name", "value", "TTL", "priority"}
table := formatter.CreateTable(tableCol)
value := createdRecord.Value
if len(createdRecord.Value) > 30 {
value = fmt.Sprintf("%s...", createdRecord.Value[:30])
}
table.Append([]string{
createdRecord.Type,
createdRecord.Name,
value,
createdRecord.TTL.String(),
strconv.Itoa(createdRecord.Priority),
})
table.Render()
logrus.Info("record created")
return nil
},
}
func autoConfigure(c *cli.Context, provider *gandi.Provider, zone, ipv4 string) error {
ttl, err := dns.GetTTL(internal.DNSTTL)
if err != nil {
return err
}
atRecord := libdns.Record{
Type: "A",
Name: "@",
Value: ipv4,
TTL: ttl,
}
wildcardRecord := libdns.Record{
Type: "A",
Name: "*",
Value: ipv4,
TTL: ttl,
}
records := []libdns.Record{atRecord, wildcardRecord}
tableCol := []string{"type", "name", "value", "TTL", "priority"}
table := formatter.CreateTable(tableCol)
for _, record := range records {
existingRecords, err := provider.GetRecords(c.Context, zone)
if err != nil {
return err
}
discovered := false
for _, existingRecord := range existingRecords {
if existingRecord.Type == record.Type &&
existingRecord.Name == record.Name &&
existingRecord.Value == record.Value {
logrus.Warnf("%s record: %s %s for %s already exists?", record.Type, record.Name, record.Value, zone)
discovered = true
}
}
if discovered {
continue
}
createdRecords, err := provider.SetRecords(
c.Context,
zone,
[]libdns.Record{record},
)
if err != nil {
return err
}
if len(createdRecords) == 0 {
return fmt.Errorf("provider library reports that no record was created?")
}
createdRecord := createdRecords[0]
value := createdRecord.Value
if len(createdRecord.Value) > 30 {
value = fmt.Sprintf("%s...", createdRecord.Value[:30])
}
table.Append([]string{
createdRecord.Type,
createdRecord.Name,
value,
createdRecord.TTL.String(),
strconv.Itoa(createdRecord.Priority),
})
}
if table.NumLines() > 0 {
table.Render()
}
return nil
}

View File

@ -1,38 +0,0 @@
package record
import (
"github.com/urfave/cli/v2"
)
// RecordCommand supports managing DNS entries.
var RecordCommand = &cli.Command{
Name: "record",
Usage: "Manage domain name records",
Aliases: []string{"rc"},
ArgsUsage: "<record>",
Description: `
This command supports managing domain name records via 3rd party providers such
as Gandi DNS. It supports listing, creating and removing all types of records
that you might need for managing Co-op Cloud apps.
The following providers are supported:
Gandi DNS https://www.gandi.net
You need an account with such a provider already. Typically, you need to
provide an API token on the Abra command-line when using these commands so that
you can authenticate with your provider account.
New providers can be integrated, we welcome change sets. See the underlying DNS
library documentation for more. It supports many existing providers and allows
to implement new provider support easily.
https://pkg.go.dev/github.com/libdns/libdns
`,
Subcommands: []*cli.Command{
RecordListCommand,
RecordNewCommand,
RecordRemoveCommand,
},
}

View File

@ -1,132 +0,0 @@
package record
import (
"fmt"
"strconv"
"coopcloud.tech/abra/cli/internal"
gandiPkg "coopcloud.tech/abra/pkg/dns/gandi"
"coopcloud.tech/abra/pkg/formatter"
"github.com/AlecAivazis/survey/v2"
"github.com/libdns/gandi"
"github.com/libdns/libdns"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
// RecordRemoveCommand lists domains.
var RecordRemoveCommand = &cli.Command{
Name: "remove",
Usage: "Remove a domain name record",
Aliases: []string{"rm"},
ArgsUsage: "<zone>",
Flags: []cli.Flag{
internal.DNSProviderFlag,
internal.DNSTypeFlag,
internal.DNSNameFlag,
},
Description: `
This command removes a domain name record for a specific zone.
It uses the type of record and name to match existing records and choose one
for deletion. You must specify a zone (e.g. example.com) under which your
domain name records are listed. This zone must already be created on your
provider account.
Example:
abra record remove foo.com -p gandi -t A -n myapp
You may also invoke this command in "wizard" mode and be prompted for input
abra record rm
`,
Action: func(c *cli.Context) error {
zone, err := internal.EnsureZoneArgument(c)
if err != nil {
logrus.Fatal(err)
}
if err := internal.EnsureDNSProvider(); err != nil {
logrus.Fatal(err)
}
var provider gandi.Provider
switch internal.DNSProvider {
case "gandi":
provider, err = gandiPkg.New()
if err != nil {
logrus.Fatal(err)
}
default:
logrus.Fatalf("%s is not a supported DNS provider", internal.DNSProvider)
}
if err := internal.EnsureDNSTypeFlag(c); err != nil {
logrus.Fatal(err)
}
if err := internal.EnsureDNSNameFlag(c); err != nil {
logrus.Fatal(err)
}
records, err := provider.GetRecords(c.Context, zone)
if err != nil {
logrus.Fatal(err)
}
var toDelete libdns.Record
for _, record := range records {
if record.Type == internal.DNSType && record.Name == internal.DNSName {
toDelete = record
break
}
}
if (libdns.Record{}) == toDelete {
logrus.Fatal("provider library reports no matching record?")
}
tableCol := []string{"type", "name", "value", "TTL", "priority"}
table := formatter.CreateTable(tableCol)
value := toDelete.Value
if len(toDelete.Value) > 30 {
value = fmt.Sprintf("%s...", toDelete.Value[:30])
}
table.Append([]string{
toDelete.Type,
toDelete.Name,
value,
toDelete.TTL.String(),
strconv.Itoa(toDelete.Priority),
})
table.Render()
if !internal.NoInput {
response := false
prompt := &survey.Confirm{
Message: "continue with record deletion?",
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
}
_, err = provider.DeleteRecords(c.Context, zone, []libdns.Record{toDelete})
if err != nil {
logrus.Fatal(err)
}
logrus.Info("record successfully deleted")
return nil
},
}

View File

@ -2,13 +2,8 @@ package server
import (
"errors"
"fmt"
"os"
"os/exec"
"os/user"
"path"
"path/filepath"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/client"
@ -16,164 +11,49 @@ import (
contextPkg "coopcloud.tech/abra/pkg/context"
"coopcloud.tech/abra/pkg/dns"
"coopcloud.tech/abra/pkg/server"
"coopcloud.tech/abra/pkg/ssh"
"github.com/AlecAivazis/survey/v2"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
dockerClient "github.com/docker/docker/client"
sshPkg "coopcloud.tech/abra/pkg/ssh"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var (
dockerInstallMsg = `
A docker installation cannot be found on %s. This is a required system
dependency for running Co-op Cloud on your server. If you would like, Abra can
attempt to install Docker for you using the upstream non-interactive
installation script.
See the following documentation for more:
https://docs.docker.com/engine/install/debian/#install-using-the-convenience-script
N.B Docker doesn't recommend it for production environments but many use it for
such purposes. Docker stable is now installed by default by this script. The
source for this script can be seen here:
https://github.com/docker/docker-install
`
)
var local bool
var localFlag = &cli.BoolFlag{
Name: "local",
Aliases: []string{"l"},
Value: false,
Usage: "Use local server",
Destination: &local,
}
var provision bool
var provisionFlag = &cli.BoolFlag{
Name: "provision",
Aliases: []string{"p"},
Value: false,
Usage: "Provision server so it can deploy apps",
Destination: &provision,
}
var sshAuth string
var sshAuthFlag = &cli.StringFlag{
Name: "ssh-auth",
Aliases: []string{"sh"},
Value: "identity-file",
Usage: "Select SSH authentication method (identity-file, password)",
Destination: &sshAuth,
}
var askSudoPass bool
var askSudoPassFlag = &cli.BoolFlag{
Name: "ask-sudo-pass",
Aliases: []string{"as"},
Value: false,
Usage: "Ask for sudo password",
Destination: &askSudoPass,
}
var traefik bool
var traefikFlag = &cli.BoolFlag{
Name: "traefik",
Aliases: []string{"t"},
Value: false,
Usage: "Deploy traefik",
Destination: &traefik,
}
func cleanUp(domainName string) {
logrus.Warnf("cleaning up context for %s", domainName)
if err := client.DeleteContext(domainName); err != nil {
logrus.Fatal(err)
}
logrus.Warnf("cleaning up server directory for %s", domainName)
if err := os.RemoveAll(filepath.Join(config.SERVERS_DIR, domainName)); err != nil {
logrus.Fatal(err)
}
}
func installDockerLocal(c *cli.Context) error {
fmt.Println(fmt.Sprintf(dockerInstallMsg, "this local server"))
response := false
prompt := &survey.Confirm{
Message: fmt.Sprintf("attempt install docker on local server?"),
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
for _, exe := range []string{"wget", "bash"} {
exists, err := ensureLocalExecutable(exe)
if err != nil {
return err
}
if !exists {
return fmt.Errorf("%s missing, please install it", exe)
if domainName != "default" {
logrus.Infof("cleaning up context for %s", domainName)
if err := client.DeleteContext(domainName); err != nil {
logrus.Fatal(err)
}
}
cmd := exec.Command("bash", "-c", "wget -O- https://get.docker.com | bash")
if err := internal.RunCmd(cmd); err != nil {
return err
}
logrus.Infof("attempting to clean up server directory for %s", domainName)
return nil
}
func newLocalServer(c *cli.Context, domainName string) error {
if err := createServerDir(domainName); err != nil {
return err
}
cl, err := newClient(c, domainName)
serverDir := filepath.Join(config.SERVERS_DIR, domainName)
files, err := config.GetAllFilesInDirectory(serverDir)
if err != nil {
return err
logrus.Fatalf("unable to list files in %s: %s", serverDir, err)
}
if provision {
exists, err := ensureLocalExecutable("docker")
if err != nil {
return err
}
if !exists {
if err := installDockerLocal(c); err != nil {
return err
}
}
if err := initSwarmLocal(c, cl, domainName); err != nil {
if !strings.Contains(err.Error(), "proxy already exists") {
logrus.Fatal(err)
}
}
if len(files) > 0 {
logrus.Warnf("aborting clean up of %s because it is not empty", serverDir)
return
}
if traefik {
if err := deployTraefik(c, cl, domainName); err != nil {
return err
}
if err := os.RemoveAll(serverDir); err != nil {
logrus.Fatal(err)
}
logrus.Info("local server has been added")
return nil
}
// newContext creates a new internal Docker context for a server. This is how
// Docker manages SSH connection details. These are stored to disk in
// ~/.docker. Abra can manage this completely for the user, so it's an
// implementation detail.
func newContext(c *cli.Context, domainName, username, port string) error {
store := contextPkg.NewDefaultDockerContextStore()
contexts, err := store.Store.List()
@ -197,187 +77,7 @@ func newContext(c *cli.Context, domainName, username, port string) error {
return nil
}
func newClient(c *cli.Context, domainName string) (*dockerClient.Client, error) {
cl, err := client.New(domainName)
if err != nil {
return &dockerClient.Client{}, err
}
return cl, nil
}
func installDocker(c *cli.Context, cl *dockerClient.Client, sshCl *ssh.Client, domainName string) error {
exists, err := ensureRemoteExecutable("docker", sshCl)
if err != nil {
return err
}
if !exists {
fmt.Println(fmt.Sprintf(dockerInstallMsg, domainName))
response := false
prompt := &survey.Confirm{
Message: fmt.Sprintf("attempt install docker on %s?", domainName),
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
exes := []string{"wget", "bash"}
if askSudoPass {
exes = append(exes, "ssh-askpass")
}
for _, exe := range exes {
exists, err := ensureRemoteExecutable(exe, sshCl)
if err != nil {
return err
}
if !exists {
return fmt.Errorf("%s missing on remote, please install it", exe)
}
}
var sudoPass string
if askSudoPass {
cmd := "wget -O- https://get.docker.com | bash"
prompt := &survey.Password{
Message: "sudo password?",
}
if err := survey.AskOne(prompt, &sudoPass); err != nil {
return err
}
logrus.Debugf("running %s on %s now with sudo password", cmd, domainName)
if sudoPass == "" {
return fmt.Errorf("missing sudo password but requested --ask-sudo-pass?")
}
logrus.Warn("installing docker, this could take some time...")
if err := ssh.RunSudoCmd(cmd, sudoPass, sshCl); err != nil {
fmt.Print(fmt.Sprintf(`
Abra was unable to bootstrap Docker, see below for logs:
%s
If nothing works, you try running the Docker install script manually on your server:
wget -O- https://get.docker.com | bash
`, string(err.Error())))
logrus.Fatal("Process exited with status 1")
}
logrus.Infof("docker is installed on %s", domainName)
remoteUser := sshCl.SSHClient.Conn.User()
logrus.Infof("adding %s to docker group", remoteUser)
permsCmd := fmt.Sprintf("sudo usermod -aG docker %s", remoteUser)
if err := ssh.RunSudoCmd(permsCmd, sudoPass, sshCl); err != nil {
return err
}
} else {
cmd := "wget -O- https://get.docker.com | bash"
logrus.Debugf("running %s on %s now without sudo password", cmd, domainName)
logrus.Warn("installing docker, this could take some time...")
if out, err := sshCl.Exec(cmd); err != nil {
fmt.Print(fmt.Sprintf(`
Abra was unable to bootstrap Docker, see below for logs:
%s
This could be due to a number of things but one of the most common is that your
server user account does not have sudo access, and if it does, you need to pass
"--ask-sudo-pass" in order to supply Abra with your password.
If nothing works, you try running the Docker install script manually on your server:
wget -O- https://get.docker.com | bash
`, string(out)))
logrus.Fatal(err)
}
logrus.Infof("docker is installed on %s", domainName)
}
}
return nil
}
func initSwarmLocal(c *cli.Context, cl *dockerClient.Client, domainName string) error {
initReq := swarm.InitRequest{ListenAddr: "0.0.0.0:2377"}
if _, err := cl.SwarmInit(c.Context, initReq); err != nil {
if strings.Contains(err.Error(), "is already part of a swarm") ||
strings.Contains(err.Error(), "must specify a listening address") {
logrus.Infof("swarm mode already initialised on %s", domainName)
} else {
return err
}
} else {
logrus.Infof("initialised swarm mode on local server")
}
netOpts := types.NetworkCreate{Driver: "overlay", Scope: "swarm"}
if _, err := cl.NetworkCreate(c.Context, "proxy", netOpts); err != nil {
if !strings.Contains(err.Error(), "proxy already exists") {
return err
}
logrus.Info("swarm overlay network already created on local server")
} else {
logrus.Infof("swarm overlay network created on local server")
}
return nil
}
func initSwarm(c *cli.Context, cl *dockerClient.Client, domainName string) error {
ipv4, err := dns.EnsureIPv4(domainName)
if err != nil {
return err
}
initReq := swarm.InitRequest{
ListenAddr: "0.0.0.0:2377",
AdvertiseAddr: ipv4,
}
if _, err := cl.SwarmInit(c.Context, initReq); err != nil {
if strings.Contains(err.Error(), "is already part of a swarm") ||
strings.Contains(err.Error(), "must specify a listening address") {
logrus.Infof("swarm mode already initialised on %s", domainName)
} else {
return err
}
} else {
logrus.Infof("initialised swarm mode on %s", domainName)
}
netOpts := types.NetworkCreate{Driver: "overlay", Scope: "swarm"}
if _, err := cl.NetworkCreate(c.Context, "proxy", netOpts); err != nil {
if !strings.Contains(err.Error(), "proxy already exists") {
return err
}
logrus.Infof("swarm overlay network already created on %s", domainName)
} else {
logrus.Infof("swarm overlay network created on %s", domainName)
}
return nil
}
// createServerDir creates the ~/.abra/servers/... directory for a new server.
func createServerDir(domainName string) error {
if err := server.CreateServerDir(domainName); err != nil {
if !os.IsExist(err) {
@ -385,195 +85,98 @@ func createServerDir(domainName string) error {
}
logrus.Debugf("server dir for %s already created", domainName)
}
return nil
}
func deployTraefik(c *cli.Context, cl *dockerClient.Client, domainName string) error {
internal.NoInput = true
internal.RecipeName = "traefik"
internal.NewAppServer = domainName
internal.Domain = fmt.Sprintf("%s.%s", "traefik", domainName)
internal.NewAppName = fmt.Sprintf("%s_%s", "traefik", config.SanitiseAppName(domainName))
appEnvPath := path.Join(config.ABRA_DIR, "servers", internal.Domain, fmt.Sprintf("%s.env", internal.NewAppName))
if _, err := os.Stat(appEnvPath); os.IsNotExist(err) {
logrus.Info(fmt.Sprintf("-t/--traefik specified, automatically deploying traefik to %s", internal.NewAppServer))
if err := internal.NewAction(c); err != nil {
logrus.Fatal(err)
}
} else {
logrus.Infof("%s already exists, not creating again", appEnvPath)
}
internal.AppName = internal.NewAppName
if err := internal.DeployAction(c); err != nil {
logrus.Fatal(err)
}
return nil
}
var serverAddCommand = &cli.Command{
Name: "add",
Usage: "Add a server to your configuration",
var serverAddCommand = cli.Command{
Name: "add",
Aliases: []string{"a"},
Usage: "Add a server to your configuration",
Description: `
This command adds a new server to your configuration so that it can be managed
by Abra. This can be useful when you already have a server provisioned and want
to start running Abra commands against it.
Add a new server to your configuration so that it can be managed by Abra.
This command can also provision your server ("--provision/-p") so that it is
capable of hosting Co-op Cloud apps. Abra will default to expecting that you
have a running ssh-agent and are using SSH keys to connect to your new server.
Abra will also read your SSH config (matching "Host" as <domain>). SSH
connection details precedence follows as such: command-line > SSH config >
guessed defaults.
Abra uses the SSH command-line to discover connection details for your server.
It is advised to configure an entry per-host in your ~/.ssh/config for each
server. For example:
If you have no SSH key configured for this host and are instead using password
authentication, you may pass "--ssh-auth password" to have Abra ask you for the
password. "--ask-sudo-pass" may be passed if you run your provisioning commands
via sudo privilege escalation.
Host example.com
Hostname example.com
User exampleUser
Port 12345
IdentityFile ~/.ssh/example@somewhere
Abra can then load SSH connection details from this configuratiion with:
abra server add example.com
If "--local" is passed, then Abra assumes that the current local server is
intended as the target server. This is useful when you want to have your entire
Co-op Cloud config located on the server itself, and not on your local
developer machine.
Example:
abra server add --local
Otherwise, you may specify a remote server. The <domain> argument must be a
publicy accessible domain name which points to your server. You should have SSH
access to this server, Abra will assume port 22 and will use your current
system username to make an initial connection. You can use the <user> and
<port> arguments to adjust this.
Example:
abra server add --provision --traefik varia.zone glodemodem 12345
Abra will construct the following SSH connection and Docker context:
ssh://globemodem@varia.zone:12345
All communication between Abra and the server will use this SSH connection.
In this example, Abra will run the following operations:
1. Install Docker
2. Initialise Swarm mode
3. Deploy Traefik (core web proxy)
You may omit flags to avoid performing this provisioning logic.
`,
Aliases: []string{"a"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
localFlag,
provisionFlag,
sshAuthFlag,
askSudoPassFlag,
traefikFlag,
},
ArgsUsage: "<domain> [<user>] [<port>]",
Before: internal.SubCommandBefore,
ArgsUsage: "<domain>",
Action: func(c *cli.Context) error {
if c.Args().Len() > 0 && local || !internal.ValidateSubCmdFlags(c) {
err := errors.New("cannot use <domain> and --local together")
internal.ShowSubcommandHelpAndError(c, err)
}
if sshAuth != "password" && sshAuth != "identity-file" {
err := errors.New("--ssh-auth only accepts identity-file or password")
internal.ShowSubcommandHelpAndError(c, err)
var domainName string
if local {
domainName = "default"
} else {
domainName = internal.ValidateDomain(c)
}
if local {
if err := newLocalServer(c, "default"); err != nil {
if err := createServerDir(domainName); err != nil {
logrus.Fatal(err)
}
logrus.Infof("attempting to create client for %s", domainName)
if _, err := client.New(domainName); err != nil {
cleanUp(domainName)
logrus.Fatal(err)
}
logrus.Info("local server added")
return nil
}
domainName, err := internal.ValidateDomain(c)
if err != nil {
if _, err := dns.EnsureIPv4(domainName); err != nil {
logrus.Fatal(err)
}
username := c.Args().Get(1)
if username == "" {
systemUser, err := user.Current()
if err != nil {
return err
}
username = systemUser.Username
}
port := c.Args().Get(2)
if port == "" {
port = "22"
}
if err := createServerDir(domainName); err != nil {
logrus.Fatal(err)
}
if err := newContext(c, domainName, username, port); err != nil {
logrus.Fatal(err)
}
cl, err := newClient(c, domainName)
hostConfig, err := sshPkg.GetHostConfig(domainName)
if err != nil {
logrus.Fatal(err)
}
if provision {
logrus.Debugf("attempting to construct SSH client for %s", domainName)
sshCl, err := ssh.New(domainName, sshAuth, username, port)
if err != nil {
logrus.Fatal(err)
}
defer sshCl.Close()
logrus.Debugf("successfully created SSH client for %s", domainName)
if err := installDocker(c, cl, sshCl, domainName); err != nil {
logrus.Fatal(err)
}
if err := initSwarm(c, cl, domainName); err != nil {
logrus.Fatal(err)
}
if err := newContext(c, domainName, hostConfig.User, hostConfig.Port); err != nil {
logrus.Fatal(err)
}
if _, err := cl.Info(c.Context); err != nil {
logrus.Infof("attempting to create client for %s", domainName)
if _, err := client.New(domainName); err != nil {
cleanUp(domainName)
logrus.Fatalf("couldn't make a remote docker connection to %s? use --provision/-p to attempt to install", domainName)
logrus.Debugf("failed to construct client for %s, saw %s", domainName, err.Error())
logrus.Fatal(sshPkg.Fatal(domainName, err))
}
if traefik {
if err := deployTraefik(c, cl, domainName); err != nil {
logrus.Fatal(err)
}
}
logrus.Infof("%s added", domainName)
return nil
},
}
// ensureLocalExecutable ensures that an executable is present on the local machine
func ensureLocalExecutable(exe string) (bool, error) {
out, err := exec.Command("which", exe).Output()
if err != nil {
return false, err
}
return string(out) != "", nil
}
// ensureRemoteExecutable ensures that an executable is present on a remote machine
func ensureRemoteExecutable(exe string, sshCl *ssh.Client) (bool, error) {
out, err := sshCl.Exec(fmt.Sprintf("which %s", exe))
if err != nil && string(out) != "" {
return false, err
}
return string(out) != "", nil
}

View File

@ -3,6 +3,7 @@ package server
import (
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/context"
"coopcloud.tech/abra/pkg/formatter"
@ -11,12 +12,25 @@ import (
"github.com/urfave/cli/v2"
)
var serverListCommand = &cli.Command{
Name: "list",
Aliases: []string{"ls"},
Usage: "List managed servers",
ArgsUsage: " ",
HideHelp: true,
var problemsFilter bool
var problemsFilterFlag = &cli.BoolFlag{
Name: "problems, p",
Usage: "Show only servers with potential connection problems",
Destination: &problemsFilter,
}
var serverListCommand = cli.Command{
Name: "list",
Aliases: []string{"ls"},
Usage: "List managed servers",
Flags: []cli.Flag{
problemsFilterFlag,
internal.DebugFlag,
internal.MachineReadableFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
dockerContextStore := context.NewDefaultDockerContextStore()
contexts, err := dockerContextStore.Store.List()
@ -26,7 +40,6 @@ var serverListCommand = &cli.Command{
tableColumns := []string{"name", "host", "user", "port"}
table := formatter.CreateTable(tableColumns)
defer table.Render()
serverNames, err := config.ReadServerNames()
if err != nil {
@ -41,14 +54,27 @@ var serverListCommand = &cli.Command{
// No local context found, we can continue safely
continue
}
if ctx.Name == serverName {
sp, err := ssh.ParseURL(endpoint)
if err != nil {
logrus.Fatal(err)
}
if sp.Host == "" {
sp.Host = "unknown"
}
if sp.User == "" {
sp.User = "unknown"
}
if sp.Port == "" {
sp.Port = "unknown"
}
row = []string{serverName, sp.Host, sp.User, sp.Port}
}
}
if len(row) == 0 {
if serverName == "default" {
row = []string{serverName, "local", "n/a", "n/a"}
@ -56,7 +82,27 @@ var serverListCommand = &cli.Command{
row = []string{serverName, "unknown", "unknown", "unknown"}
}
}
table.Append(row)
if problemsFilter {
for _, val := range row {
if val == "unknown" {
table.Append(row)
break
}
}
} else {
table.Append(row)
}
}
if internal.MachineReadable {
table.JSONRender()
} else {
if problemsFilter && table.NumLines() == 0 {
logrus.Info("all servers wired up correctly 👏")
} else {
table.Render()
}
}
return nil

View File

@ -1,261 +0,0 @@
package server
import (
"fmt"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/libcapsul"
"github.com/AlecAivazis/survey/v2"
"github.com/hetznercloud/hcloud-go/hcloud"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
func newHetznerCloudVPS(c *cli.Context) error {
if err := internal.EnsureNewHetznerCloudVPSFlags(c); err != nil {
return err
}
client := hcloud.NewClient(hcloud.WithToken(internal.HetznerCloudAPIToken))
var sshKeysRaw []string
var sshKeys []*hcloud.SSHKey
for _, sshKey := range c.StringSlice("hetzner-ssh-keys") {
if sshKey == "" {
continue
}
sshKey, _, err := client.SSHKey.GetByName(c.Context, sshKey)
if err != nil {
return err
}
sshKeys = append(sshKeys, sshKey)
sshKeysRaw = append(sshKeysRaw, sshKey.Name)
}
serverOpts := hcloud.ServerCreateOpts{
Name: internal.HetznerCloudName,
ServerType: &hcloud.ServerType{Name: internal.HetznerCloudType},
Image: &hcloud.Image{Name: internal.HetznerCloudImage},
SSHKeys: sshKeys,
Location: &hcloud.Location{Name: internal.HetznerCloudLocation},
}
sshKeyIDs := strings.Join(sshKeysRaw, "\n")
if sshKeyIDs == "" {
sshKeyIDs = "N/A (password auth)"
}
tableColumns := []string{"name", "type", "image", "ssh-keys", "location"}
table := formatter.CreateTable(tableColumns)
table.Append([]string{
internal.HetznerCloudName,
internal.HetznerCloudType,
internal.HetznerCloudImage,
sshKeyIDs,
internal.HetznerCloudLocation,
})
table.Render()
response := false
prompt := &survey.Confirm{
Message: "continue with hetzner cloud VPS creation?",
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
res, _, err := client.Server.Create(c.Context, serverOpts)
if err != nil {
return err
}
var rootPassword string
if len(sshKeys) > 0 {
rootPassword = "N/A (using SSH keys)"
} else {
rootPassword = res.RootPassword
}
ip := res.Server.PublicNet.IPv4.IP.String()
fmt.Println(fmt.Sprintf(`
Your new Hetzner Cloud VPS has successfully been created! Here are the details:
name: %s
IP address: %s
root password: %s
You can access this new VPS via SSH using the following command:
ssh root@%s
Please note, this server is not managed by Abra yet (i.e. "abra server ls" will
not list this server)! You will need to assign a domain name record ("abra
record new") and add the server to your Abra configuration ("abra server add")
to have a working server that you can deploy Co-op Cloud apps to.
When setting up domain name records, you probably want to set up the following
2 A records. This supports deploying apps to your root domain (e.g.
example.com) and other apps on sub-domains (e.g. foo.example.com,
bar.example.com).
@ 1800 IN A %s
* 1800 IN A %s
"abra record new --auto" can help you do this quickly if you use a supported
DNS provider.
`,
internal.HetznerCloudName, ip, rootPassword,
ip, ip, ip,
))
return nil
}
func newCapsulVPS(c *cli.Context) error {
if err := internal.EnsureNewCapsulVPSFlags(c); err != nil {
return err
}
capsulCreateURL := fmt.Sprintf("https://%s/api/capsul/create", internal.CapsulInstanceURL)
var sshKeys []string
for _, sshKey := range c.StringSlice("capsul-ssh-keys") {
if sshKey == "" {
continue
}
sshKeys = append(sshKeys, sshKey)
}
tableColumns := []string{"instance", "name", "type", "image", "ssh-keys"}
table := formatter.CreateTable(tableColumns)
table.Append([]string{
internal.CapsulInstanceURL,
internal.CapsulName,
internal.CapsulType,
internal.CapsulImage,
strings.Join(sshKeys, "\n"),
})
table.Render()
response := false
prompt := &survey.Confirm{
Message: "continue with capsul creation?",
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
capsulClient := libcapsul.New(capsulCreateURL, internal.CapsulAPIToken)
resp, err := capsulClient.Create(
internal.CapsulName,
internal.CapsulType,
internal.CapsulImage,
sshKeys,
)
if err != nil {
return err
}
fmt.Println(fmt.Sprintf(`
Your new Capsul has successfully been created! Here are the details:
Capsul name: %s
Capsul ID: %v
You will need to log into your Capsul instance web interface to retrieve the IP
address. You can learn all about how to get SSH access to your new Capsul on:
%s/about-ssh
Please note, this server is not managed by Abra yet (i.e. "abra server ls" will
not list this server)! You will need to assign a domain name record ("abra
record new") and add the server to your Abra configuration ("abra server add")
to have a working server that you can deploy Co-op Cloud apps to.
When setting up domain name records, you probably want to set up the following
2 A records. This supports deploying apps to your root domain (e.g.
example.com) and other apps on sub-domains (e.g. foo.example.com,
bar.example.com).
@ 1800 IN A <your-capsul-ip>
* 1800 IN A <your-capsul-ip>
`, internal.CapsulName, resp.ID, internal.CapsulInstanceURL))
return nil
}
var serverNewCommand = &cli.Command{
Name: "new",
Aliases: []string{"n"},
Usage: "Create a new server using a 3rd party provider",
Description: `
This command creates a new server via a 3rd party provider.
The following providers are supported:
Capsul https://git.cyberia.club/Cyberia/capsul-flask
Hetzner Cloud https://docs.hetzner.com/cloud
You may invoke this command in "wizard" mode and be prompted for input:
abra record new
API tokens are read from the environment if specified, e.g.
export HCLOUD_TOKEN=...
Where "$provider_TOKEN" is the expected env var format.
`,
Flags: []cli.Flag{
internal.ServerProviderFlag,
// Capsul
internal.CapsulInstanceURLFlag,
internal.CapsulTypeFlag,
internal.CapsulImageFlag,
internal.CapsulSSHKeysFlag,
internal.CapsulAPITokenFlag,
// Hetzner
internal.HetznerCloudNameFlag,
internal.HetznerCloudTypeFlag,
internal.HetznerCloudImageFlag,
internal.HetznerCloudSSHKeysFlag,
internal.HetznerCloudLocationFlag,
internal.HetznerCloudAPITokenFlag,
},
Action: func(c *cli.Context) error {
if err := internal.EnsureServerProvider(); err != nil {
logrus.Fatal(err)
}
switch internal.ServerProvider {
case "capsul":
if err := newCapsulVPS(c); err != nil {
logrus.Fatal(err)
}
case "hetzner-cloud":
if err := newHetznerCloudVPS(c); err != nil {
logrus.Fatal(err)
}
}
return nil
},
}

103
cli/server/prune.go Normal file
View File

@ -0,0 +1,103 @@
package server
import (
"context"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/formatter"
"github.com/docker/docker/api/types/filters"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var allFilter bool
var allFilterFlag = &cli.BoolFlag{
Name: "all, a",
Usage: "Remove all unused images not just dangling ones",
Destination: &allFilter,
}
var volumesFilter bool
var volumesFilterFlag = &cli.BoolFlag{
Name: "volumes, v",
Usage: "Prune volumes. This will remove app data, Be Careful!",
Destination: &volumesFilter,
}
var serverPruneCommand = cli.Command{
Name: "prune",
Aliases: []string{"p"},
Usage: "Prune a managed server; Runs a docker system prune",
Description: `
Prunes unused containers, networks, and dangling images.
If passing "-v/--volumes" then volumes not connected to a deployed app will
also be removed. This can result in unwanted data loss if not used carefully.
`,
ArgsUsage: "[<server>]",
Flags: []cli.Flag{
allFilterFlag,
volumesFilterFlag,
internal.DebugFlag,
internal.OfflineFlag,
internal.NoInputFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.ServerNameComplete,
Action: func(c *cli.Context) error {
serverName := internal.ValidateServer(c)
cl, err := client.New(serverName)
if err != nil {
logrus.Fatal(err)
}
var args filters.Args
ctx := context.Background()
cr, err := cl.ContainersPrune(ctx, args)
if err != nil {
logrus.Fatal(err)
}
cntSpaceReclaimed := formatter.ByteCountSI(cr.SpaceReclaimed)
logrus.Infof("containers pruned: %d; space reclaimed: %s", len(cr.ContainersDeleted), cntSpaceReclaimed)
nr, err := cl.NetworksPrune(ctx, args)
if err != nil {
logrus.Fatal(err)
}
logrus.Infof("networks pruned: %d", len(nr.NetworksDeleted))
pruneFilters := filters.NewArgs()
if allFilter {
logrus.Debugf("removing all images, not only dangling ones")
pruneFilters.Add("dangling", "false")
}
ir, err := cl.ImagesPrune(ctx, pruneFilters)
if err != nil {
logrus.Fatal(err)
}
imgSpaceReclaimed := formatter.ByteCountSI(ir.SpaceReclaimed)
logrus.Infof("images pruned: %d; space reclaimed: %s", len(ir.ImagesDeleted), imgSpaceReclaimed)
if volumesFilter {
vr, err := cl.VolumesPrune(ctx, args)
if err != nil {
logrus.Fatal(err)
}
volSpaceReclaimed := formatter.ByteCountSI(vr.SpaceReclaimed)
logrus.Infof("volumes pruned: %d; space reclaimed: %s", len(vr.VolumesDeleted), volSpaceReclaimed)
}
return nil
},
}

View File

@ -1,181 +1,47 @@
package server
import (
"fmt"
"os"
"path/filepath"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"github.com/AlecAivazis/survey/v2"
"github.com/hetznercloud/hcloud-go/hcloud"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
var rmServer bool
var rmServerFlag = &cli.BoolFlag{
Name: "server",
Aliases: []string{"s"},
Value: false,
Usage: "remove the actual server also",
Destination: &rmServer,
}
func rmHetznerCloudVPS(c *cli.Context) error {
if internal.HetznerCloudName == "" && !internal.NoInput {
prompt := &survey.Input{
Message: "specify hetzner cloud VPS name",
}
if err := survey.AskOne(prompt, &internal.HetznerCloudName); err != nil {
return err
}
}
if internal.HetznerCloudAPIToken == "" && !internal.NoInput {
token, ok := os.LookupEnv("HCLOUD_TOKEN")
if !ok {
prompt := &survey.Input{
Message: "specify hetzner cloud API token",
}
if err := survey.AskOne(prompt, &internal.HetznerCloudAPIToken); err != nil {
return err
}
} else {
internal.HetznerCloudAPIToken = token
}
}
client := hcloud.NewClient(hcloud.WithToken(internal.HetznerCloudAPIToken))
server, _, err := client.Server.Get(c.Context, internal.HetznerCloudName)
if err != nil {
return err
}
if server == nil {
logrus.Fatalf("library provider reports that %s doesn't exist?", internal.HetznerCloudName)
}
fmt.Println(fmt.Sprintf(`
You have requested that Abra delete the following server (%s). Please be
absolutely sure that this is indeed the server that you would like to have
removed. There will be no going back once you confirm, the server will be
destroyed.
`, server.Name))
tableColumns := []string{"name", "type", "image", "location"}
table := formatter.CreateTable(tableColumns)
table.Append([]string{
server.Name,
server.ServerType.Name,
server.Image.Name,
server.Datacenter.Name,
})
table.Render()
response := false
prompt := &survey.Confirm{
Message: "continue with hetzner cloud VPS removal?",
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
_, err = client.Server.Delete(c.Context, server)
if err != nil {
return err
}
logrus.Infof("%s has been deleted from your hetzner cloud account", internal.HetznerCloudName)
return nil
}
var serverRemoveCommand = &cli.Command{
var serverRemoveCommand = cli.Command{
Name: "remove",
Aliases: []string{"rm"},
ArgsUsage: "[<server>]",
ArgsUsage: "<server>",
Usage: "Remove a managed server",
Description: `
This command removes a server from Abra management.
Description: `Remove a managed server.
Depending on whether you used a 3rd party provider to create this server ("abra
server new"), you can also destroy the virtual server as well. Pass
"--server/-s" to tell Abra to try to delete this VPS.
Otherwise, Abra will remove the internal bookkeeping (~/.abra/servers/...) and
underlying client connection context. This server will then be lost in time,
like tears in rain.
Abra will remove the internal bookkeeping (~/.abra/servers/...) and underlying
client connection context. This server will then be lost in time, like tears in
rain.
`,
Flags: []cli.Flag{
rmServerFlag,
internal.ServerProviderFlag,
// Hetzner
internal.HetznerCloudNameFlag,
internal.HetznerCloudAPITokenFlag,
internal.DebugFlag,
internal.NoInputFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.ServerNameComplete,
Action: func(c *cli.Context) error {
serverName := c.Args().Get(1)
if serverName != "" {
var err error
serverName, err = internal.ValidateServer(c)
if err != nil {
logrus.Fatal(err)
}
serverName := internal.ValidateServer(c)
if err := client.DeleteContext(serverName); err != nil {
logrus.Fatal(err)
}
if !rmServer {
logrus.Warn("did not pass -s/--server for actual server deletion, prompting")
response := false
prompt := &survey.Confirm{
Message: "prompt to actual server deletion?",
}
if err := survey.AskOne(prompt, &response); err != nil {
logrus.Fatal(err)
}
if response {
logrus.Info("setting -s/--server and attempting to remove actual server")
rmServer = true
}
if err := os.RemoveAll(filepath.Join(config.SERVERS_DIR, serverName)); err != nil {
logrus.Fatal(err)
}
if rmServer {
if err := internal.EnsureServerProvider(); err != nil {
logrus.Fatal(err)
}
switch internal.ServerProvider {
case "capsul":
logrus.Warn("capsul provider does not support automatic removal yet, sorry!")
case "hetzner-cloud":
if err := rmHetznerCloudVPS(c); err != nil {
logrus.Fatal(err)
}
}
}
if serverName != "" {
if err := client.DeleteContext(serverName); err != nil {
logrus.Fatal(err)
}
if err := os.RemoveAll(filepath.Join(config.SERVERS_DIR, serverName)); err != nil {
logrus.Fatal(err)
}
logrus.Infof("server at %s has been lost in time, like tears in rain", serverName)
}
logrus.Infof("server at %s has been lost in time, like tears in rain", serverName)
return nil
},

View File

@ -5,23 +5,14 @@ import (
)
// ServerCommand defines the `abra server` command and its subcommands
var ServerCommand = &cli.Command{
var ServerCommand = cli.Command{
Name: "server",
Aliases: []string{"s"},
Usage: "Manage servers",
Description: `
These commands support creating, managing and removing servers using 3rd party
integrations.
Servers can be created from scratch using the "abra server new" command. If you
already have a server, you can add it to your configuration using "abra server
add". Abra can provision servers so that they are ready to deploy Co-op Cloud
apps, see available flags on "server add" for more.
`,
Subcommands: []*cli.Command{
serverNewCommand,
serverAddCommand,
serverListCommand,
serverRemoveCommand,
&serverAddCommand,
&serverListCommand,
&serverRemoveCommand,
&serverPruneCommand,
},
}

504
cli/updater/updater.go Normal file
View File

@ -0,0 +1,504 @@
package updater
import (
"context"
"fmt"
"os"
"strconv"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/lint"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/convert"
"coopcloud.tech/abra/pkg/upstream/stack"
"coopcloud.tech/tagcmp"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerclient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
const SERVER = "localhost"
var (
majorUpdate bool
majorFlag = &cli.BoolFlag{
Name: "major",
Aliases: []string{"m"},
Usage: "Also check for major updates",
Destination: &majorUpdate,
}
updateAll bool
allFlag = &cli.BoolFlag{
Name: "all",
Aliases: []string{"a"},
Usage: "Update all deployed apps",
Destination: &updateAll,
}
)
// Notify checks for available upgrades
var Notify = cli.Command{
Name: "notify",
Aliases: []string{"n"},
Usage: "Check for available upgrades",
Flags: []cli.Flag{
internal.DebugFlag,
majorFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Description: `
It reads the deployed app versions and looks for new versions in the recipe
catalogue. If a new patch/minor version is available, a notification is
printed. To include major versions use the --major flag.
`,
Action: func(c *cli.Context) error {
cl, err := client.New("default")
if err != nil {
logrus.Fatal(err)
}
stacks, err := stack.GetStacks(cl)
if err != nil {
logrus.Fatal(err)
}
for _, stackInfo := range stacks {
stackName := stackInfo.Name
recipeName, err := getLabel(cl, stackName, "recipe")
if err != nil {
logrus.Fatal(err)
}
if recipeName != "" {
_, err = getLatestUpgrade(cl, stackName, recipeName)
if err != nil {
logrus.Fatal(err)
}
}
}
return nil
},
}
// UpgradeApp upgrades apps.
var UpgradeApp = cli.Command{
Name: "upgrade",
Aliases: []string{"u"},
Usage: "Upgrade apps",
ArgsUsage: "<stack-name> <recipe>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.ChaosFlag,
majorFlag,
allFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Description: `
Upgrade an app by specifying its stack name and recipe. By passing "--all"
instead, every deployed app is upgraded. For each apps with enabled auto
updates the deployed version is compared with the current recipe catalogue
version. If a new patch/minor version is available, the app is upgraded. To
include major versions use the "--major" flag. Don't do that, it will probably
break things. Only apps that are not deployed with "--chaos" are upgraded, to
update chaos deployments use the "--chaos" flag. Use it with care.
`,
Action: func(c *cli.Context) error {
cl, err := client.New("default")
if err != nil {
logrus.Fatal(err)
}
if !updateAll {
stackName := c.Args().Get(0)
recipeName := c.Args().Get(1)
err = tryUpgrade(cl, stackName, recipeName)
if err != nil {
logrus.Fatal(err)
}
return nil
}
stacks, err := stack.GetStacks(cl)
if err != nil {
logrus.Fatal(err)
}
for _, stackInfo := range stacks {
stackName := stackInfo.Name
recipeName, err := getLabel(cl, stackName, "recipe")
if err != nil {
logrus.Fatal(err)
}
err = tryUpgrade(cl, stackName, recipeName)
if err != nil {
logrus.Fatal(err)
}
}
return nil
},
}
// getLabel reads docker labels from running services in the format of "coop-cloud.${STACK_NAME}.${LABEL}".
func getLabel(cl *dockerclient.Client, stackName string, label string) (string, error) {
filter := filters.NewArgs()
filter.Add("label", fmt.Sprintf("%s=%s", convert.LabelNamespace, stackName))
services, err := cl.ServiceList(context.Background(), types.ServiceListOptions{Filters: filter})
if err != nil {
return "", err
}
for _, service := range services {
labelKey := fmt.Sprintf("coop-cloud.%s.%s", stackName, label)
if labelValue, ok := service.Spec.Labels[labelKey]; ok {
return labelValue, nil
}
}
logrus.Debugf("no %s label found for %s", label, stackName)
return "", nil
}
// getBoolLabel reads a boolean docker label from running services
func getBoolLabel(cl *dockerclient.Client, stackName string, label string) (bool, error) {
lableValue, err := getLabel(cl, stackName, label)
if err != nil {
return false, err
}
if lableValue != "" {
value, err := strconv.ParseBool(lableValue)
if err != nil {
return false, err
}
return value, nil
}
logrus.Debugf("Boolean label %s could not be found for %s, set default to false.", label, stackName)
return false, nil
}
// getEnv reads env variables from docker services.
func getEnv(cl *dockerclient.Client, stackName string) (config.AppEnv, error) {
envMap := make(map[string]string)
filter := filters.NewArgs()
filter.Add("label", fmt.Sprintf("%s=%s", convert.LabelNamespace, stackName))
services, err := cl.ServiceList(context.Background(), types.ServiceListOptions{Filters: filter})
if err != nil {
return nil, err
}
for _, service := range services {
envList := service.Spec.TaskTemplate.ContainerSpec.Env
for _, envString := range envList {
splitString := strings.SplitN(envString, "=", 2)
if len(splitString) != 2 {
logrus.Debugf("can't separate key from value: %s (this variable is probably unset)", envString)
continue
}
k := splitString[0]
v := splitString[1]
logrus.Debugf("For %s read env %s with value: %s from docker service", stackName, k, v)
envMap[k] = v
}
}
return envMap, nil
}
// getLatestUpgrade returns the latest available version for an app respecting
// the "--major" flag if it is newer than the currently deployed version.
func getLatestUpgrade(cl *dockerclient.Client, stackName string, recipeName string) (string, error) {
deployedVersion, err := getDeployedVersion(cl, stackName, recipeName)
if err != nil {
return "", err
}
availableUpgrades, err := getAvailableUpgrades(cl, stackName, recipeName, deployedVersion)
if err != nil {
return "", err
}
if len(availableUpgrades) == 0 {
logrus.Debugf("no available upgrades for %s", stackName)
return "", nil
}
var chosenUpgrade string
if len(availableUpgrades) > 0 {
chosenUpgrade = availableUpgrades[len(availableUpgrades)-1]
logrus.Infof("%s (%s) can be upgraded from version %s to %s", stackName, recipeName, deployedVersion, chosenUpgrade)
}
return chosenUpgrade, nil
}
// getDeployedVersion returns the currently deployed version of an app.
func getDeployedVersion(cl *dockerclient.Client, stackName string, recipeName string) (string, error) {
logrus.Debugf("Retrieve deployed version whether %s is already deployed", stackName)
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
return "", err
}
if !isDeployed {
return "", fmt.Errorf("%s is not deployed?", stackName)
}
if deployedVersion == "unknown" {
return "", fmt.Errorf("failed to determine deployed version of %s", stackName)
}
return deployedVersion, nil
}
// getAvailableUpgrades returns all available versions of an app that are newer
// than the deployed version. It only includes major upgrades if the "--major"
// flag is set.
func getAvailableUpgrades(cl *dockerclient.Client, stackName string, recipeName string,
deployedVersion string,
) ([]string, error) {
catl, err := recipe.ReadRecipeCatalogue(internal.Offline)
if err != nil {
return nil, err
}
versions, err := recipe.GetRecipeCatalogueVersions(recipeName, catl)
if err != nil {
return nil, err
}
if len(versions) == 0 {
logrus.Warnf("no published releases for %s in the recipe catalogue?", recipeName)
return nil, nil
}
var availableUpgrades []string
for _, version := range versions {
parsedDeployedVersion, err := tagcmp.Parse(deployedVersion)
if err != nil {
return nil, err
}
parsedVersion, err := tagcmp.Parse(version)
if err != nil {
return nil, err
}
versionDelta, err := parsedDeployedVersion.UpgradeDelta(parsedVersion)
if err != nil {
return nil, err
}
if 0 < versionDelta.UpgradeType() && (versionDelta.UpgradeType() < 4 || majorUpdate) {
availableUpgrades = append(availableUpgrades, version)
}
}
logrus.Debugf("Available updates for %s: %s", stackName, availableUpgrades)
return availableUpgrades, nil
}
// processRecipeRepoVersion clones, pulls, checks out the version and lints the
// recipe repository.
func processRecipeRepoVersion(recipeName, version string) error {
if err := recipe.EnsureExists(recipeName); err != nil {
return err
}
if err := recipe.EnsureUpToDate(recipeName); err != nil {
return err
}
if err := recipe.EnsureVersion(recipeName, version); err != nil {
return err
}
if r, err := recipe.Get(recipeName, internal.Offline); err != nil {
return err
} else if err := lint.LintForErrors(r); err != nil {
return err
}
return nil
}
// mergeAbraShEnv merges abra.sh env vars into the app env vars.
func mergeAbraShEnv(recipeName string, env config.AppEnv) error {
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, recipeName, "abra.sh")
abraShEnv, err := config.ReadAbraShEnvVars(abraShPath)
if err != nil {
return err
}
for k, v := range abraShEnv {
logrus.Debugf("read v:%s k: %s", v, k)
env[k] = v
}
return nil
}
// createDeployConfig merges and enriches the compose config for the deployment.
func createDeployConfig(recipeName string, stackName string, env config.AppEnv) (*composetypes.Config, stack.Deploy, error) {
env["STACK_NAME"] = stackName
deployOpts := stack.Deploy{
Namespace: stackName,
Prune: false,
ResolveImage: stack.ResolveImageAlways,
}
composeFiles, err := config.GetComposeFiles(recipeName, env)
if err != nil {
return nil, deployOpts, err
}
deployOpts.Composefiles = composeFiles
compose, err := config.GetAppComposeConfig(stackName, deployOpts, env)
if err != nil {
return nil, deployOpts, err
}
config.ExposeAllEnv(stackName, compose, env)
// after the upgrade the deployment won't be in chaos state anymore
config.SetChaosLabel(compose, stackName, false)
config.SetRecipeLabel(compose, stackName, recipeName)
config.SetUpdateLabel(compose, stackName, env)
return compose, deployOpts, nil
}
// tryUpgrade performs the upgrade if all the requirements are fulfilled.
func tryUpgrade(cl *dockerclient.Client, stackName, recipeName string) error {
if recipeName == "" {
logrus.Debugf("don't update %s due to missing recipe name", stackName)
return nil
}
chaos, err := getBoolLabel(cl, stackName, "chaos")
if err != nil {
return err
}
if chaos && !internal.Chaos {
logrus.Debugf("don't update %s due to chaos deployment", stackName)
return nil
}
updatesEnabled, err := getBoolLabel(cl, stackName, "autoupdate")
if err != nil {
return err
}
if !updatesEnabled {
logrus.Debugf("don't update %s due to disabled auto updates or missing ENABLE_AUTO_UPDATE env", stackName)
return nil
}
upgradeVersion, err := getLatestUpgrade(cl, stackName, recipeName)
if err != nil {
return err
}
if upgradeVersion == "" {
logrus.Debugf("don't update %s due to no new version", stackName)
return nil
}
err = upgrade(cl, stackName, recipeName, upgradeVersion)
return err
}
// upgrade performs all necessary steps to upgrade an app.
func upgrade(cl *dockerclient.Client, stackName, recipeName,
upgradeVersion string,
) error {
env, err := getEnv(cl, stackName)
if err != nil {
return err
}
app := config.App{
Name: stackName,
Recipe: recipeName,
Server: SERVER,
Env: env,
}
if err = processRecipeRepoVersion(recipeName, upgradeVersion); err != nil {
return err
}
if err = mergeAbraShEnv(recipeName, app.Env); err != nil {
return err
}
compose, deployOpts, err := createDeployConfig(recipeName, stackName, app.Env)
if err != nil {
return err
}
logrus.Infof("upgrade %s (%s) to version %s", stackName, recipeName, upgradeVersion)
err = stack.RunDeploy(cl, deployOpts, compose, stackName, true)
return err
}
func newAbraApp(version, commit string) *cli.App {
app := &cli.App{
Name: "kadabra",
Usage: `The Co-op Cloud auto-updater
____ ____ _ _
/ ___|___ ___ _ __ / ___| | ___ _ _ __| |
| | / _ \ _____ / _ \| '_ \ | | | |/ _ \| | | |/ _' |
| |__| (_) |_____| (_) | |_) | | |___| | (_) | |_| | (_| |
\____\___/ \___/| .__/ \____|_|\___/ \__,_|\__,_|
|_|
`,
Version: fmt.Sprintf("%s-%s", version, commit[:7]),
Commands: []*cli.Command{
&Notify,
&UpgradeApp,
},
}
app.Before = func(c *cli.Context) error {
logrus.Debugf("kadabra version %s, commit %s", version, commit)
return nil
}
return app
}
// RunApp runs CLI abra app.
func RunApp(version, commit string) {
app := newAbraApp(version, commit)
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
}
}

View File

@ -5,10 +5,10 @@ import (
"coopcloud.tech/abra/cli"
)
// Version is the current version of Abra
// Version is the current version of Abra.
var Version string
// Commit is the current git commit of Abra
// Commit is the current git commit of Abra.
var Commit string
func main() {

23
cmd/kadabra/main.go Normal file
View File

@ -0,0 +1,23 @@
// Package main provides the command-line entrypoint.
package main
import (
"coopcloud.tech/abra/cli/updater"
)
// Version is the current version of Kadabra.
var Version string
// Commit is the current git commit of Kadabra.
var Commit string
func main() {
if Version == "" {
Version = "dev"
}
if Commit == "" {
Commit = " "
}
updater.RunApp(Version, Commit)
}

135
go.mod
View File

@ -1,49 +1,122 @@
module coopcloud.tech/abra
go 1.16
go 1.21
require (
coopcloud.tech/tagcmp v0.0.0-20211103052201-885b22f77d52
github.com/AlecAivazis/survey/v2 v2.3.2
github.com/Autonomic-Cooperative/godotenv v1.3.1-0.20210731170023-c37c0920d1a4
git.coopcloud.tech/coop-cloud/godotenv v1.5.2-0.20231130100509-01bff8284355
github.com/AlecAivazis/survey/v2 v2.3.7
github.com/Gurpartap/logrus-stack v0.0.0-20170710170904-89c00d8a28f4
github.com/docker/cli v20.10.12+incompatible
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker v20.10.12+incompatible
github.com/docker/go-units v0.4.0
github.com/go-git/go-git/v5 v5.4.2
github.com/hetznercloud/hcloud-go v1.33.1
github.com/moby/sys/signal v0.6.0
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
github.com/docker/cli v24.0.7+incompatible
github.com/docker/distribution v2.8.3+incompatible
github.com/docker/docker v24.0.7+incompatible
github.com/docker/go-units v0.5.0
github.com/go-git/go-git/v5 v5.10.0
github.com/google/go-cmp v0.5.9
github.com/moby/sys/signal v0.7.0
github.com/moby/term v0.5.0
github.com/olekukonko/tablewriter v0.0.5
github.com/pkg/errors v0.9.1
github.com/schollz/progressbar/v3 v3.8.5
github.com/schultz-is/passgen v1.0.1
github.com/sirupsen/logrus v1.8.1
github.com/urfave/cli/v2 v2.3.0
gotest.tools/v3 v3.0.3
github.com/schollz/progressbar/v3 v3.14.1
github.com/sirupsen/logrus v1.9.3
gotest.tools/v3 v3.5.1
)
require (
coopcloud.tech/libcapsul v0.0.0-20211022074848-c35e78fe3f3e
github.com/Microsoft/hcsshim v0.8.21 // indirect
github.com/buger/goterm v1.0.3
github.com/containerd/containerd v1.5.5 // indirect
dario.cat/mergo v1.0.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/BurntSushi/toml v1.3.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Microsoft/hcsshim v0.9.2 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
github.com/acomagu/bufpipe v1.0.4 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cloudflare/circl v1.3.3 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/distribution/reference v0.5.0 // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.5.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/klauspost/compress v1.14.2 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
github.com/miekg/pkcs11 v1.0.3 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/mapstructure v1.4.3 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/runc v1.1.0 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/skeema/knownhosts v1.2.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/urfave/cli/v2 v2.27.1 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xrash/smetrics v0.0.0-20231213231151-1d8dd44e695e // indirect
golang.org/x/crypto v0.14.0 // indirect
golang.org/x/mod v0.12.0 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/term v0.14.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
golang.org/x/tools v0.13.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
require (
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
github.com/buger/goterm v1.0.4
github.com/containerd/containerd v1.5.9 // indirect
github.com/containers/image v3.0.2+incompatible
github.com/containers/storage v1.38.2 // indirect
github.com/decentral1se/passgen v1.0.1
github.com/docker/docker-credential-helpers v0.6.4 // indirect
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/fvbommel/sortorder v1.0.2 // indirect
github.com/gliderlabs/ssh v0.3.3
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/hashicorp/go-retryablehttp v0.7.0
github.com/kevinburke/ssh_config v1.1.0
github.com/libdns/gandi v1.0.2
github.com/libdns/libdns v0.2.1
github.com/moby/sys/mount v0.2.0 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/opencontainers/runc v1.0.2 // indirect
github.com/hashicorp/go-retryablehttp v0.7.5
github.com/klauspost/pgzip v1.2.6
github.com/moby/patternmatcher v0.5.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 // indirect
github.com/prometheus/client_golang v1.16.0 // indirect
github.com/sergi/go-diff v1.2.0 // indirect
github.com/spf13/cobra v1.3.0 // indirect
github.com/stretchr/testify v1.8.4
github.com/theupdateframework/notary v0.7.0 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
github.com/urfave/cli v1.22.9
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b // indirect
golang.org/x/sys v0.14.0
)

773
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,9 @@
package app
import (
"context"
"fmt"
"strings"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/upstream/stack"
apiclient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
)
@ -37,45 +33,6 @@ type deployedServiceSpec struct {
// VersionSpec represents a deployed app and associated metadata.
type VersionSpec map[string]deployedServiceSpec
// DeployedVersions lists metadata (e.g. versions) for deployed
func DeployedVersions(ctx context.Context, cl *apiclient.Client, app config.App) (VersionSpec, bool, error) {
services, err := stack.GetStackServices(ctx, cl, app.StackName())
if err != nil {
return VersionSpec{}, false, err
}
appSpec := make(VersionSpec)
for _, service := range services {
serviceName := ParseServiceName(service.Spec.Name)
label := fmt.Sprintf("coop-cloud.%s.%s.version", app.StackName(), serviceName)
if deployLabel, ok := service.Spec.Labels[label]; ok {
version, _ := ParseVersionLabel(deployLabel)
appSpec[serviceName] = deployedServiceSpec{Name: serviceName, Version: version}
}
}
deployed := len(services) > 0
if deployed {
logrus.Debugf("detected %s as deployed versions of %s", appSpec, app.Name)
} else {
logrus.Debugf("detected %s as not deployed", app.Name)
}
return appSpec, len(services) > 0, nil
}
// ParseVersionLabel parses a $VERSION-$DIGEST app service label.
func ParseVersionLabel(label string) (string, string) {
// versions may look like v4.2-abcd or v4.2-alpine-abcd
idx := strings.LastIndex(label, "-")
version := label[:idx]
digest := label[idx+1:]
logrus.Debugf("parsed %s as version from %s", version, label)
logrus.Debugf("parsed %s as digest from %s", digest, label)
return version, digest
}
// ParseServiceName parses a $STACK_NAME_$SERVICE_NAME service label.
func ParseServiceName(label string) string {
idx := strings.LastIndex(label, "_")

View File

@ -9,7 +9,7 @@ import (
"github.com/urfave/cli/v2"
)
// AppNameComplete copletes app names
// AppNameComplete copletes app names.
func AppNameComplete(c *cli.Context) {
appNames, err := config.GetAppNames()
if err != nil {
@ -25,9 +25,19 @@ func AppNameComplete(c *cli.Context) {
}
}
// RecipeNameComplete completes recipe names
func ServiceNameComplete(appName string) {
serviceNames, err := config.GetAppServiceNames(appName)
if err != nil {
return
}
for _, s := range serviceNames {
fmt.Println(s)
}
}
// RecipeNameComplete completes recipe names.
func RecipeNameComplete(c *cli.Context) {
catl, err := recipe.ReadRecipeCatalogue()
catl, err := recipe.ReadRecipeCatalogue(false)
if err != nil {
logrus.Warn(err)
}
@ -40,3 +50,53 @@ func RecipeNameComplete(c *cli.Context) {
fmt.Println(name)
}
}
// RecipeVersionComplete completes versions for the recipe.
func RecipeVersionComplete(recipeName string) {
catl, err := recipe.ReadRecipeCatalogue(false)
if err != nil {
logrus.Warn(err)
}
for _, v := range catl[recipeName].Versions {
for v2 := range v {
fmt.Println(v2)
}
}
}
// ServerNameComplete completes server names.
func ServerNameComplete(c *cli.Context) {
files, err := config.LoadAppFiles("")
if err != nil {
logrus.Fatal(err)
}
if c.NArg() > 0 {
return
}
for _, appFile := range files {
fmt.Println(appFile.Server)
}
}
// SubcommandComplete completes sub-commands.
func SubcommandComplete(c *cli.Context) {
if c.NArg() > 0 {
return
}
subcmds := []string{
"app",
"autocomplete",
"catalogue",
"recipe",
"server",
"upgrade",
}
for _, cmd := range subcmds {
fmt.Println(cmd)
}
}

View File

@ -0,0 +1,88 @@
package catalogue
import (
"fmt"
"os"
"path"
"strings"
"coopcloud.tech/abra/pkg/config"
gitPkg "coopcloud.tech/abra/pkg/git"
"github.com/go-git/go-git/v5"
"github.com/sirupsen/logrus"
)
// EnsureCatalogue ensures that the catalogue is cloned locally & present.
func EnsureCatalogue() error {
catalogueDir := path.Join(config.ABRA_DIR, "catalogue")
if _, err := os.Stat(catalogueDir); err != nil && os.IsNotExist(err) {
logrus.Warnf("local recipe catalogue is missing, retrieving now")
url := fmt.Sprintf("%s/%s.git", config.REPOS_BASE_URL, config.CATALOGUE_JSON_REPO_NAME)
if err := gitPkg.Clone(catalogueDir, url); err != nil {
return err
}
logrus.Debugf("cloned catalogue repository to %s", catalogueDir)
}
return nil
}
// EnsureIsClean makes sure that the catalogue has no unstaged changes.
func EnsureIsClean() error {
isClean, err := gitPkg.IsClean(config.CATALOGUE_DIR)
if err != nil {
return err
}
if !isClean {
msg := "%s has locally unstaged changes? please commit/remove your changes before proceeding"
return fmt.Errorf(msg, config.CATALOGUE_DIR)
}
return nil
}
// EnsureUpToDate ensures that the local catalogue is up to date.
func EnsureUpToDate() error {
repo, err := git.PlainOpen(config.CATALOGUE_DIR)
if err != nil {
return err
}
remotes, err := repo.Remotes()
if err != nil {
return err
}
if len(remotes) == 0 {
msg := "cannot ensure %s is up-to-date, no git remotes configured"
logrus.Debugf(msg, config.CATALOGUE_DIR)
return nil
}
worktree, err := repo.Worktree()
if err != nil {
return err
}
branch, err := gitPkg.CheckoutDefaultBranch(repo, config.CATALOGUE_DIR)
if err != nil {
return err
}
opts := &git.PullOptions{
Force: true,
ReferenceName: branch,
}
if err := worktree.Pull(opts); err != nil {
if !strings.Contains(err.Error(), "already up-to-date") {
return err
}
}
logrus.Debugf("fetched latest git changes for %s", config.CATALOGUE_DIR)
return nil
}

View File

@ -2,24 +2,31 @@
package client
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"time"
contextPkg "coopcloud.tech/abra/pkg/context"
sshPkg "coopcloud.tech/abra/pkg/ssh"
commandconnPkg "coopcloud.tech/abra/pkg/upstream/commandconn"
"github.com/docker/docker/client"
"github.com/sirupsen/logrus"
)
// New initiates a new Docker client.
func New(contextName string) (*client.Client, error) {
// New initiates a new Docker client. New client connections are validated so
// that we ensure connections via SSH to the daemon can succeed. It takes into
// account that you may only want the local client and not communicate via SSH.
// For this use-case, please pass "default" as the contextName.
func New(serverName string) (*client.Client, error) {
var clientOpts []client.Opt
if contextName != "default" {
context, err := GetContext(contextName)
if serverName != "default" {
context, err := GetContext(serverName)
if err != nil {
return nil, err
return nil, fmt.Errorf("unknown server, run \"abra server add %s\"?", serverName)
}
ctxEndpoint, err := contextPkg.GetContextEndpoint(context)
@ -27,9 +34,12 @@ func New(contextName string) (*client.Client, error) {
return nil, err
}
helper := commandconnPkg.NewConnectionHelper(ctxEndpoint)
helper, err := commandconnPkg.NewConnectionHelper(ctxEndpoint)
if err != nil {
return nil, err
}
httpClient := &http.Client{
// No tls, no proxy
Transport: &http.Transport{
DialContext: helper.Dialer,
IdleConnTimeout: 30 * time.Second,
@ -55,7 +65,20 @@ func New(contextName string) (*client.Client, error) {
return nil, err
}
logrus.Debugf("created client for %s", contextName)
logrus.Debugf("created client for %s", serverName)
info, err := cl.Info(context.Background())
if err != nil {
return cl, sshPkg.Fatal(serverName, err)
}
if info.Swarm.LocalNodeState == "inactive" {
if serverName != "default" {
return cl, fmt.Errorf("swarm mode not enabled on %s?", serverName)
} else {
return cl, errors.New("swarm mode not enabled on local server?")
}
}
return cl, nil
}

View File

@ -1,193 +1,28 @@
package client
import (
"encoding/base64"
"encoding/json"
"context"
"fmt"
"io/ioutil"
"net/http"
"strings"
"coopcloud.tech/abra/pkg/web"
"github.com/containers/image/docker"
"github.com/containers/image/types"
"github.com/docker/distribution/reference"
"github.com/docker/docker/client"
"github.com/hashicorp/go-retryablehttp"
"github.com/sirupsen/logrus"
)
type RawTag struct {
Layer string
Name string
}
// GetRegistryTags retrieves all tags of an image from a container registry.
func GetRegistryTags(img reference.Named) ([]string, error) {
var tags []string
type RawTags []RawTag
ref, err := docker.ParseReference(fmt.Sprintf("//%s", img))
if err != nil {
return tags, fmt.Errorf("failed to parse image %s, saw: %s", img, err.Error())
}
var registryURL = "https://registry.hub.docker.com/v1/repositories/%s/tags"
func GetRegistryTags(image string) (RawTags, error) {
var tags RawTags
tagsUrl := fmt.Sprintf(registryURL, image)
if err := web.ReadJSON(tagsUrl, &tags); err != nil {
ctx := context.Background()
tags, err = docker.GetRepositoryTags(ctx, &types.SystemContext{}, ref)
if err != nil {
return tags, err
}
return tags, nil
}
func basicAuth(username, password string) string {
auth := username + ":" + password
return base64.StdEncoding.EncodeToString([]byte(auth))
}
// getRegv2Token retrieves a registry v2 authentication token.
func getRegv2Token(cl *client.Client, image reference.Named, registryUsername, registryPassword string) (string, error) {
img := reference.Path(image)
tokenURL := "https://auth.docker.io/token"
values := fmt.Sprintf("service=registry.docker.io&scope=repository:%s:pull", img)
fullURL := fmt.Sprintf("%s?%s", tokenURL, values)
req, err := retryablehttp.NewRequest("GET", fullURL, nil)
if err != nil {
return "", err
}
if registryUsername != "" && registryPassword != "" {
logrus.Debugf("using registry log in credentials for token request")
auth := basicAuth(registryUsername, registryPassword)
req.Header.Add("Authorization", fmt.Sprintf("Basic %s", auth))
}
client := web.NewHTTPRetryClient()
res, err := client.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
_, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", nil
}
tokenRes := struct {
AccessToken string `json:"access_token"`
Expiry int `json:"expires_in"`
Issued string `json:"issued_at"`
Token string `json:"token"`
}{}
if err := json.Unmarshal(body, &tokenRes); err != nil {
return "", err
}
return tokenRes.Token, nil
}
// GetTagDigest retrieves an image digest from a v2 registry
func GetTagDigest(cl *client.Client, image reference.Named, registryUsername, registryPassword string) (string, error) {
img := reference.Path(image)
tag := image.(reference.NamedTagged).Tag()
manifestURL := fmt.Sprintf("https://index.docker.io/v2/%s/manifests/%s", img, tag)
req, err := retryablehttp.NewRequest("GET", manifestURL, nil)
if err != nil {
return "", err
}
token, err := getRegv2Token(cl, image, registryUsername, registryPassword)
if err != nil {
return "", err
}
if token == "" {
return "", fmt.Errorf("unable to retrieve registry token?")
}
req.Header = http.Header{
"Accept": []string{
"application/vnd.docker.distribution.manifest.v2+json",
"application/vnd.docker.distribution.manifest.list.v2+json",
},
"Authorization": []string{fmt.Sprintf("Bearer %s", token)},
}
client := web.NewHTTPRetryClient()
res, err := client.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
_, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
registryResT1 := struct {
SchemaVersion int
MediaType string
Manifests []struct {
MediaType string
Size int
Digest string
Platform struct {
Architecture string
Os string
}
}
}{}
registryResT2 := struct {
SchemaVersion int
MediaType string
Config struct {
MediaType string
Size int
Digest string
}
Layers []struct {
MediaType string
Size int
Digest string
}
}{}
if err := json.Unmarshal(body, &registryResT1); err != nil {
return "", err
}
var digest string
for _, manifest := range registryResT1.Manifests {
if string(manifest.Platform.Architecture) == "amd64" {
digest = strings.Split(manifest.Digest, ":")[1][:7]
}
}
if digest == "" {
if err := json.Unmarshal(body, &registryResT2); err != nil {
return "", err
}
digest = strings.Split(registryResT2.Config.Digest, ":")[1][:7]
}
if digest == "" {
return "", fmt.Errorf("Unable to retrieve amd64 digest for %s", image)
}
return digest, nil
}

View File

@ -4,20 +4,14 @@ import (
"context"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
)
func StoreSecret(secretName, secretValue, server string) error {
cl, err := New(server)
if err != nil {
return err
}
ctx := context.Background()
func StoreSecret(cl *client.Client, secretName, secretValue, server string) error {
ann := swarm.Annotations{Name: secretName}
spec := swarm.SecretSpec{Annotations: ann, Data: []byte(secretValue)}
// We don't bother with the secret IDs for now
if _, err := cl.SecretCreate(ctx, spec); err != nil {
if _, err := cl.SecretCreate(context.Background(), spec); err != nil {
return err
}

View File

@ -3,49 +3,39 @@ package client
import (
"context"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/sirupsen/logrus"
"github.com/docker/docker/api/types/volume"
"github.com/docker/docker/client"
)
func GetVolumes(ctx context.Context, server string, appName string) ([]*types.Volume, error) {
cl, err := New(server)
if err != nil {
return nil, err
}
fs := filters.NewArgs()
fs.Add("name", appName)
volumeListOKBody, err := cl.VolumeList(ctx, fs)
func GetVolumes(cl *client.Client, ctx context.Context, server string, fs filters.Args) ([]*volume.Volume, error) {
volumeListOptions := volume.ListOptions{fs}
volumeListOKBody, err := cl.VolumeList(ctx, volumeListOptions)
volumeList := volumeListOKBody.Volumes
if err != nil {
logrus.Fatal(err)
return volumeList, err
}
return volumeList, nil
}
func GetVolumeNames(volumes []*types.Volume) []string {
func GetVolumeNames(volumes []*volume.Volume) []string {
var volumeNames []string
for _, vol := range volumes {
volumeNames = append(volumeNames, vol.Name)
}
return volumeNames
}
func RemoveVolumes(ctx context.Context, server string, volumeNames []string, force bool) error {
cl, err := New(server)
if err != nil {
return err
}
func RemoveVolumes(cl *client.Client, ctx context.Context, server string, volumeNames []string, force bool) error {
for _, volName := range volumeNames {
err := cl.VolumeRemove(ctx, volName, force)
if err != nil {
return err
}
}
return nil
return nil
}

View File

@ -8,6 +8,7 @@ import (
"strings"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/upstream/stack"
loader "coopcloud.tech/abra/pkg/upstream/stack"
composetypes "github.com/docker/cli/cli/compose/types"
@ -53,11 +54,11 @@ func UpdateTag(pattern, image, tag, recipeName string) (bool, error) {
case reference.NamedTagged:
composeTag = img.(reference.NamedTagged).Tag()
default:
// unable to parse, typically image missing tag
return false, nil
logrus.Debugf("unable to parse %s, skipping", img)
continue
}
composeImage := reference.Path(img)
composeImage := formatter.StripTagMeta(reference.Path(img))
logrus.Debugf("parsed %s from %s", composeTag, service.Image)
@ -74,7 +75,7 @@ func UpdateTag(pattern, image, tag, recipeName string) (bool, error) {
logrus.Debugf("updating %s to %s in %s", old, new, compose.Filename)
if err := ioutil.WriteFile(compose.Filename, []byte(replacedBytes), 0764); err != nil {
return true, err
return false, err
}
}
}
@ -121,7 +122,7 @@ func UpdateLabel(pattern, serviceName, label, recipeName string) error {
discovered := false
for oldLabel, value := range service.Deploy.Labels {
if strings.HasPrefix(oldLabel, "coop-cloud") {
if strings.HasPrefix(oldLabel, "coop-cloud") && strings.Contains(oldLabel, "version") {
discovered = true
bytes, err := ioutil.ReadFile(composeFile)

View File

@ -2,17 +2,21 @@ package config
import (
"fmt"
"html/template"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
"github.com/schollz/progressbar/v3"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/upstream/convert"
loader "coopcloud.tech/abra/pkg/upstream/stack"
stack "coopcloud.tech/abra/pkg/upstream/stack"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types/filters"
"github.com/sirupsen/logrus"
)
@ -21,6 +25,9 @@ import (
// AppEnv is a map of the values in an apps env config
type AppEnv = map[string]string
// AppModifiers is a map of modifiers in an apps env config
type AppModifiers = map[string]map[string]string
// AppName is AppName
type AppName = string
@ -36,28 +43,97 @@ type AppFiles map[AppName]AppFile
// App reprents an app with its env file read into memory
type App struct {
Name AppName
Type string
Recipe string
Domain string
Env AppEnv
Server string
Path string
}
// StackName gets what the docker safe stack name is for the app. This should
// not not shown to the user, use a.Name for that. Give the output of this
// command to Docker only.
// See documentation of config.StackName
func (a App) StackName() string {
if _, exists := a.Env["STACK_NAME"]; exists {
return a.Env["STACK_NAME"]
}
stackName := SanitiseAppName(a.Name)
stackName := StackName(a.Name)
a.Env["STACK_NAME"] = stackName
return stackName
}
// SORTING TYPES
// StackName gets whatever the docker safe (uses the right delimiting
// character, e.g. "_") stack name is for the app. In general, you don't want
// to use this to show anything to end-users, you want use a.Name instead.
func StackName(appName string) string {
stackName := SanitiseAppName(appName)
if len(stackName) > 45 {
logrus.Debugf("trimming %s to %s to avoid runtime limits", stackName, stackName[:45])
stackName = stackName[:45]
}
return stackName
}
// Filters retrieves app filters for querying the container runtime. By default
// it filters on all services in the app. It is also possible to pass an
// otional list of service names, which get filtered instead.
//
// Due to upstream issues, filtering works different depending on what you're
// querying. So, for example, secrets don't work with regex! The caller needs
// to implement their own validation that the right secrets are matched. In
// order to handle these cases, we provide the `appendServiceNames` /
// `exactMatch` modifiers.
func (a App) Filters(appendServiceNames, exactMatch bool, services ...string) (filters.Args, error) {
filters := filters.NewArgs()
if len(services) > 0 {
for _, serviceName := range services {
filters.Add("name", ServiceFilter(a.StackName(), serviceName, exactMatch))
}
return filters, nil
}
// When not appending the service name, just add one filter for the whole
// stack.
if !appendServiceNames {
f := fmt.Sprintf("%s", a.StackName())
if exactMatch {
f = fmt.Sprintf("^%s", f)
}
filters.Add("name", f)
return filters, nil
}
composeFiles, err := GetComposeFiles(a.Recipe, a.Env)
if err != nil {
return filters, err
}
opts := stack.Deploy{Composefiles: composeFiles}
compose, err := GetAppComposeConfig(a.Recipe, opts, a.Env)
if err != nil {
return filters, err
}
for _, service := range compose.Services {
f := ServiceFilter(a.StackName(), service.Name, exactMatch)
filters.Add("name", f)
}
return filters, nil
}
// ServiceFilter creates a filter string for filtering a service in the docker
// container runtime. When exact match is true, it uses regex to match the
// string exactly.
func ServiceFilter(stack, service string, exact bool) string {
if exact {
return fmt.Sprintf("^%s_%s", stack, service)
}
return fmt.Sprintf("%s_%s", stack, service)
}
// ByServer sort a slice of Apps
type ByServer []App
@ -68,25 +144,25 @@ func (a ByServer) Less(i, j int) bool {
return strings.ToLower(a[i].Server) < strings.ToLower(a[j].Server)
}
// ByServerAndType sort a slice of Apps
type ByServerAndType []App
// ByServerAndRecipe sort a slice of Apps
type ByServerAndRecipe []App
func (a ByServerAndType) Len() int { return len(a) }
func (a ByServerAndType) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByServerAndType) Less(i, j int) bool {
func (a ByServerAndRecipe) Len() int { return len(a) }
func (a ByServerAndRecipe) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByServerAndRecipe) Less(i, j int) bool {
if a[i].Server == a[j].Server {
return strings.ToLower(a[i].Type) < strings.ToLower(a[j].Type)
return strings.ToLower(a[i].Recipe) < strings.ToLower(a[j].Recipe)
}
return strings.ToLower(a[i].Server) < strings.ToLower(a[j].Server)
}
// ByType sort a slice of Apps
type ByType []App
// ByRecipe sort a slice of Apps
type ByRecipe []App
func (a ByType) Len() int { return len(a) }
func (a ByType) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByType) Less(i, j int) bool {
return strings.ToLower(a[i].Type) < strings.ToLower(a[j].Type)
func (a ByRecipe) Len() int { return len(a) }
func (a ByRecipe) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByRecipe) Less(i, j int) bool {
return strings.ToLower(a[i].Recipe) < strings.ToLower(a[j].Recipe)
}
// ByName sort a slice of Apps
@ -98,7 +174,7 @@ func (a ByName) Less(i, j int) bool {
return strings.ToLower(a[i].Name) < strings.ToLower(a[j].Name)
}
func readAppEnvFile(appFile AppFile, name AppName) (App, error) {
func ReadAppEnvFile(appFile AppFile, name AppName) (App, error) {
env, err := ReadEnv(appFile.Path)
if err != nil {
return App{}, fmt.Errorf("env file for %s couldn't be read: %s", name, err.Error())
@ -106,7 +182,7 @@ func readAppEnvFile(appFile AppFile, name AppName) (App, error) {
logrus.Debugf("read env %s from %s", env, appFile.Path)
app, err := newApp(env, name, appFile)
app, err := NewApp(env, name, appFile)
if err != nil {
return App{}, fmt.Errorf("env file for %s has issues: %s", name, err.Error())
}
@ -114,26 +190,29 @@ func readAppEnvFile(appFile AppFile, name AppName) (App, error) {
return app, nil
}
// newApp creates new App object
func newApp(env AppEnv, name string, appFile AppFile) (App, error) {
// NewApp creates new App object
func NewApp(env AppEnv, name string, appFile AppFile) (App, error) {
domain := env["DOMAIN"]
appType, exists := env["TYPE"]
recipe, exists := env["RECIPE"]
if !exists {
return App{}, fmt.Errorf("%s is missing the TYPE env var", name)
recipe, exists = env["TYPE"]
if !exists {
return App{}, fmt.Errorf("%s is missing the TYPE env var?", name)
}
}
return App{
Name: name,
Domain: domain,
Type: appType,
Recipe: recipe,
Env: env,
Server: appFile.Server,
Path: appFile.Path,
}, nil
}
// LoadAppFiles gets all app files for a given set of servers or all servers
// LoadAppFiles gets all app files for a given set of servers or all servers.
func LoadAppFiles(servers ...string) (AppFiles, error) {
appFiles := make(AppFiles)
if len(servers) == 1 {
@ -142,7 +221,7 @@ func LoadAppFiles(servers ...string) (AppFiles, error) {
var err error
servers, err = GetAllFoldersInDirectory(SERVERS_DIR)
if err != nil {
return nil, err
return appFiles, err
}
}
}
@ -151,10 +230,11 @@ func LoadAppFiles(servers ...string) (AppFiles, error) {
for _, server := range servers {
serverDir := path.Join(SERVERS_DIR, server)
files, err := getAllFilesInDirectory(serverDir)
files, err := GetAllFilesInDirectory(serverDir)
if err != nil {
return nil, err
return appFiles, fmt.Errorf("server %s doesn't exist? Run \"abra server ls\" to check", server)
}
for _, file := range files {
appName := strings.TrimSuffix(file.Name(), ".env")
appFilePath := path.Join(SERVERS_DIR, server, file.Name())
@ -164,19 +244,20 @@ func LoadAppFiles(servers ...string) (AppFiles, error) {
}
}
}
return appFiles, nil
}
// GetApp loads an apps settings, reading it from file, in preparation to use it
//
// ONLY use when ready to use the env file to keep IO down
// GetApp loads an apps settings, reading it from file, in preparation to use
// it. It should only be used when ready to use the env file to keep IO
// operations down.
func GetApp(apps AppFiles, name AppName) (App, error) {
appFile, exists := apps[name]
if !exists {
return App{}, fmt.Errorf("cannot find app with name %s", name)
}
app, err := readAppEnvFile(appFile, name)
app, err := ReadAppEnvFile(appFile, name)
if err != nil {
return App{}, err
}
@ -184,8 +265,9 @@ func GetApp(apps AppFiles, name AppName) (App, error) {
return app, nil
}
// GetApps returns a slice of Apps with their env files read from a given slice of AppFiles
func GetApps(appFiles AppFiles) ([]App, error) {
// GetApps returns a slice of Apps with their env files read from a given
// slice of AppFiles.
func GetApps(appFiles AppFiles, recipeFilter string) ([]App, error) {
var apps []App
for name := range appFiles {
@ -193,7 +275,14 @@ func GetApps(appFiles AppFiles) ([]App, error) {
if err != nil {
return nil, err
}
apps = append(apps, app)
if recipeFilter != "" {
if app.Recipe == recipeFilter {
apps = append(apps, app)
}
} else {
apps = append(apps, app)
}
}
return apps, nil
@ -213,13 +302,13 @@ func GetAppServiceNames(appName string) ([]string, error) {
return serviceNames, err
}
composeFiles, err := GetAppComposeFiles(app.Type, app.Env)
composeFiles, err := GetComposeFiles(app.Recipe, app.Env)
if err != nil {
return serviceNames, err
}
opts := stack.Deploy{Composefiles: composeFiles}
compose, err := GetAppComposeConfig(app.Type, opts, app.Env)
compose, err := GetAppComposeConfig(app.Recipe, opts, app.Env)
if err != nil {
return serviceNames, err
}
@ -240,7 +329,7 @@ func GetAppNames() ([]string, error) {
return appNames, err
}
apps, err := GetApps(appFiles)
apps, err := GetApps(appFiles, "")
if err != nil {
return appNames, err
}
@ -252,7 +341,8 @@ func GetAppNames() ([]string, error) {
return appNames, nil
}
// TemplateAppEnvSample copies the example env file for the app into the users env files
// TemplateAppEnvSample copies the example env file for the app into the users
// env files.
func TemplateAppEnvSample(recipeName, appName, server, domain string) error {
envSamplePath := path.Join(RECIPES_DIR, recipeName, ".env.sample")
envSample, err := ioutil.ReadFile(envSamplePath)
@ -261,27 +351,24 @@ func TemplateAppEnvSample(recipeName, appName, server, domain string) error {
}
appEnvPath := path.Join(ABRA_DIR, "servers", server, fmt.Sprintf("%s.env", appName))
if _, err := os.Stat(appEnvPath); os.IsExist(err) {
if _, err := os.Stat(appEnvPath); !os.IsNotExist(err) {
return fmt.Errorf("%s already exists?", appEnvPath)
}
err = ioutil.WriteFile(appEnvPath, envSample, 0664)
err = ioutil.WriteFile(appEnvPath, envSample, 0o664)
if err != nil {
return err
}
file, err := os.OpenFile(appEnvPath, os.O_RDWR, 0664)
if err != nil {
return err
}
defer file.Close()
tpl, err := template.ParseFiles(appEnvPath)
read, err := ioutil.ReadFile(appEnvPath)
if err != nil {
return err
}
if err := tpl.Execute(file, struct{ Name string }{recipeName}); err != nil {
newContents := strings.Replace(string(read), recipeName+".example.com", domain, -1)
err = ioutil.WriteFile(appEnvPath, []byte(newContents), 0)
if err != nil {
return err
}
@ -290,35 +377,49 @@ func TemplateAppEnvSample(recipeName, appName, server, domain string) error {
return nil
}
// SanitiseAppName makes a app name usable with Docker by replacing illegal characters
// SanitiseAppName makes a app name usable with Docker by replacing illegal
// characters.
func SanitiseAppName(name string) string {
return strings.ReplaceAll(name, ".", "_")
}
// GetAppStatuses queries servers to check the deployment status of given apps
func GetAppStatuses(appFiles AppFiles) (map[string]map[string]string, error) {
// GetAppStatuses queries servers to check the deployment status of given apps.
func GetAppStatuses(apps []App, MachineReadable bool) (map[string]map[string]string, error) {
statuses := make(map[string]map[string]string)
var unique []string
servers := make(map[string]struct{})
for _, appFile := range appFiles {
if _, ok := servers[appFile.Server]; !ok {
servers[appFile.Server] = struct{}{}
unique = append(unique, appFile.Server)
for _, app := range apps {
if _, ok := servers[app.Server]; !ok {
servers[app.Server] = struct{}{}
}
}
bar := formatter.CreateProgressbar(len(servers), "querying remote servers...")
var bar *progressbar.ProgressBar
if !MachineReadable {
bar = formatter.CreateProgressbar(len(servers), "querying remote servers...")
}
ch := make(chan stack.StackStatus, len(servers))
for server := range servers {
cl, err := client.New(server)
if err != nil {
return statuses, err
}
go func(s string) {
ch <- stack.GetAllDeployedServices(s)
bar.Add(1)
ch <- stack.GetAllDeployedServices(cl, s)
if !MachineReadable {
bar.Add(1)
}
}(server)
}
for range servers {
status := <-ch
if status.Err != nil {
return statuses, status.Err
}
for _, service := range status.Services {
result := make(map[string]string)
name := service.Spec.Labels[convert.LabelNamespace]
@ -327,7 +428,25 @@ func GetAppStatuses(appFiles AppFiles) (map[string]map[string]string, error) {
result["status"] = "deployed"
}
labelKey := fmt.Sprintf("coop-cloud.%s.version", name)
labelKey := fmt.Sprintf("coop-cloud.%s.chaos", name)
chaos, ok := service.Spec.Labels[labelKey]
if ok {
result["chaos"] = chaos
}
labelKey = fmt.Sprintf("coop-cloud.%s.chaos-version", name)
if chaosVersion, ok := service.Spec.Labels[labelKey]; ok {
result["chaosVersion"] = chaosVersion
}
labelKey = fmt.Sprintf("coop-cloud.%s.autoupdate", name)
if autoUpdate, ok := service.Spec.Labels[labelKey]; ok {
result["autoUpdate"] = autoUpdate
} else {
result["autoUpdate"] = "false"
}
labelKey = fmt.Sprintf("coop-cloud.%s.version", name)
if version, ok := service.Spec.Labels[labelKey]; ok {
result["version"] = version
} else {
@ -343,26 +462,56 @@ func GetAppStatuses(appFiles AppFiles) (map[string]map[string]string, error) {
return statuses, nil
}
// GetAppComposeFiles gets the list of compose files for an app which should be
// merged into a composetypes.Config while respecting the COMPOSE_FILE env var.
func GetAppComposeFiles(recipe string, appEnv AppEnv) ([]string, error) {
// ensurePathExists ensures that a path exists.
func ensurePathExists(path string) error {
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
return err
}
return nil
}
// GetComposeFiles gets the list of compose files for an app (or recipe if you
// don't already have an app) which should be merged into a composetypes.Config
// while respecting the COMPOSE_FILE env var.
func GetComposeFiles(recipe string, appEnv AppEnv) ([]string, error) {
var composeFiles []string
if _, ok := appEnv["COMPOSE_FILE"]; !ok {
logrus.Debug("no COMPOSE_FILE detected, loading compose.yml")
composeFileEnvVar, ok := appEnv["COMPOSE_FILE"]
if !ok {
path := fmt.Sprintf("%s/%s/compose.yml", RECIPES_DIR, recipe)
if err := ensurePathExists(path); err != nil {
return composeFiles, err
}
logrus.Debugf("no COMPOSE_FILE detected, loading default: %s", path)
composeFiles = append(composeFiles, path)
return composeFiles, nil
}
composeFileEnvVar := appEnv["COMPOSE_FILE"]
envVars := strings.Split(composeFileEnvVar, ":")
logrus.Debugf("COMPOSE_FILE detected (%s), loading %s", composeFileEnvVar, strings.Join(envVars, ", "))
for _, file := range strings.Split(composeFileEnvVar, ":") {
if !strings.Contains(composeFileEnvVar, ":") {
path := fmt.Sprintf("%s/%s/%s", RECIPES_DIR, recipe, composeFileEnvVar)
if err := ensurePathExists(path); err != nil {
return composeFiles, err
}
logrus.Debugf("COMPOSE_FILE detected, loading %s", path)
composeFiles = append(composeFiles, path)
return composeFiles, nil
}
numComposeFiles := strings.Count(composeFileEnvVar, ":") + 1
envVars := strings.SplitN(composeFileEnvVar, ":", numComposeFiles)
if len(envVars) != numComposeFiles {
return composeFiles, fmt.Errorf("COMPOSE_FILE (=\"%s\") parsing failed?", composeFileEnvVar)
}
for _, file := range envVars {
path := fmt.Sprintf("%s/%s/%s", RECIPES_DIR, recipe, file)
if err := ensurePathExists(path); err != nil {
return composeFiles, err
}
composeFiles = append(composeFiles, path)
}
logrus.Debugf("COMPOSE_FILE detected (%s), loading %s", composeFileEnvVar, strings.Join(envVars, ", "))
logrus.Debugf("retrieved %s configs for %s", strings.Join(composeFiles, ", "), recipe)
return composeFiles, nil
@ -381,3 +530,98 @@ func GetAppComposeConfig(recipe string, opts stack.Deploy, appEnv AppEnv) (*comp
return compose, nil
}
// ExposeAllEnv exposes all env variables to the app container
func ExposeAllEnv(stackName string, compose *composetypes.Config, appEnv AppEnv) {
for _, service := range compose.Services {
if service.Name == "app" {
logrus.Debugf("Add the following environment to the app service config of %s:", stackName)
for k, v := range appEnv {
_, exists := service.Environment[k]
if !exists {
value := v
service.Environment[k] = &value
logrus.Debugf("Add Key: %s Value: %s to %s", k, value, stackName)
}
}
}
}
}
// SetRecipeLabel adds the label 'coop-cloud.${STACK_NAME}.recipe=${RECIPE}' to the app container
// to signal which recipe is connected to the deployed app
func SetRecipeLabel(compose *composetypes.Config, stackName string, recipe string) {
for _, service := range compose.Services {
if service.Name == "app" {
logrus.Debugf("set recipe label 'coop-cloud.%s.recipe' to %s for %s", stackName, recipe, stackName)
labelKey := fmt.Sprintf("coop-cloud.%s.recipe", stackName)
service.Deploy.Labels[labelKey] = recipe
}
}
}
// SetChaosLabel adds the label 'coop-cloud.${STACK_NAME}.chaos=true/false' to the app container
// to signal if the app is deployed in chaos mode
func SetChaosLabel(compose *composetypes.Config, stackName string, chaos bool) {
for _, service := range compose.Services {
if service.Name == "app" {
logrus.Debugf("set label 'coop-cloud.%s.chaos' to %v for %s", stackName, chaos, stackName)
labelKey := fmt.Sprintf("coop-cloud.%s.chaos", stackName)
service.Deploy.Labels[labelKey] = strconv.FormatBool(chaos)
}
}
}
// SetChaosVersionLabel adds the label 'coop-cloud.${STACK_NAME}.chaos-version=$(GIT_COMMIT)' to the app container
func SetChaosVersionLabel(compose *composetypes.Config, stackName string, chaosVersion string) {
for _, service := range compose.Services {
if service.Name == "app" {
logrus.Debugf("set label 'coop-cloud.%s.chaos-version' to %v for %s", stackName, chaosVersion, stackName)
labelKey := fmt.Sprintf("coop-cloud.%s.chaos-version", stackName)
service.Deploy.Labels[labelKey] = chaosVersion
}
}
}
// SetUpdateLabel adds env ENABLE_AUTO_UPDATE as label to enable/disable the
// auto update process for this app. The default if this variable is not set is to disable
// the auto update process.
func SetUpdateLabel(compose *composetypes.Config, stackName string, appEnv AppEnv) {
for _, service := range compose.Services {
if service.Name == "app" {
enable_auto_update, exists := appEnv["ENABLE_AUTO_UPDATE"]
if !exists {
enable_auto_update = "false"
}
logrus.Debugf("set label 'coop-cloud.%s.autoupdate' to %s for %s", stackName, enable_auto_update, stackName)
labelKey := fmt.Sprintf("coop-cloud.%s.autoupdate", stackName)
service.Deploy.Labels[labelKey] = enable_auto_update
}
}
}
// GetLabel reads docker labels in the format of "coop-cloud.${STACK_NAME}.${LABEL}" from the local compose files
func GetLabel(compose *composetypes.Config, stackName string, label string) string {
for _, service := range compose.Services {
if service.Name == "app" {
labelKey := fmt.Sprintf("coop-cloud.%s.%s", stackName, label)
logrus.Debugf("get label '%s'", labelKey)
if labelValue, ok := service.Deploy.Labels[labelKey]; ok {
return labelValue
}
}
}
logrus.Debugf("no %s label found for %s", label, stackName)
return ""
}
// GetTimeoutFromLabel reads the timeout value from docker label "coop-cloud.${STACK_NAME}.TIMEOUT" and returns 50 as default value
func GetTimeoutFromLabel(compose *composetypes.Config, stackName string) (int, error) {
timeout := 50 // Default Timeout
var err error = nil
if timeoutLabel := GetLabel(compose, stackName, "timeout"); timeoutLabel != "" {
logrus.Debugf("timeout label: %s", timeoutLabel)
timeout, err = strconv.Atoi(timeoutLabel)
}
return timeout, err
}

View File

@ -1,36 +1,197 @@
package config
package config_test
import (
"encoding/json"
"fmt"
"reflect"
"testing"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/recipe"
"github.com/docker/docker/api/types/filters"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
)
func TestNewApp(t *testing.T) {
app, err := newApp(expectedAppEnv, appName, expectedAppFile)
app, err := config.NewApp(ExpectedAppEnv, AppName, ExpectedAppFile)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(app, expectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, expectedApp)
if !reflect.DeepEqual(app, ExpectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, ExpectedApp)
}
}
func TestReadAppEnvFile(t *testing.T) {
app, err := readAppEnvFile(expectedAppFile, appName)
app, err := config.ReadAppEnvFile(ExpectedAppFile, AppName)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(app, expectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, expectedApp)
if !reflect.DeepEqual(app, ExpectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, ExpectedApp)
}
}
func TestGetApp(t *testing.T) {
app, err := GetApp(expectedAppFiles, appName)
app, err := config.GetApp(ExpectedAppFiles, AppName)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(app, expectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, expectedApp)
if !reflect.DeepEqual(app, ExpectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, ExpectedApp)
}
}
func TestGetComposeFiles(t *testing.T) {
offline := true
r, err := recipe.Get("abra-test-recipe", offline)
if err != nil {
t.Fatal(err)
}
tests := []struct {
appEnv map[string]string
composeFiles []string
}{
{
map[string]string{},
[]string{
fmt.Sprintf("%s/%s/compose.yml", config.RECIPES_DIR, r.Name),
},
},
{
map[string]string{"COMPOSE_FILE": "compose.yml"},
[]string{
fmt.Sprintf("%s/%s/compose.yml", config.RECIPES_DIR, r.Name),
},
},
{
map[string]string{"COMPOSE_FILE": "compose.extra_secret.yml"},
[]string{
fmt.Sprintf("%s/%s/compose.extra_secret.yml", config.RECIPES_DIR, r.Name),
},
},
{
map[string]string{"COMPOSE_FILE": "compose.yml:compose.extra_secret.yml"},
[]string{
fmt.Sprintf("%s/%s/compose.yml", config.RECIPES_DIR, r.Name),
fmt.Sprintf("%s/%s/compose.extra_secret.yml", config.RECIPES_DIR, r.Name),
},
},
}
for _, test := range tests {
composeFiles, err := config.GetComposeFiles(r.Name, test.appEnv)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, composeFiles, test.composeFiles)
}
}
func TestGetComposeFilesError(t *testing.T) {
offline := true
r, err := recipe.Get("abra-test-recipe", offline)
if err != nil {
t.Fatal(err)
}
tests := []struct{ appEnv map[string]string }{
{map[string]string{"COMPOSE_FILE": "compose.yml::compose.foo.yml"}},
{map[string]string{"COMPOSE_FILE": "doesnt.exist.yml"}},
}
for _, test := range tests {
_, err := config.GetComposeFiles(r.Name, test.appEnv)
if err == nil {
t.Fatalf("should have failed: %v", test.appEnv)
}
}
}
func TestFilters(t *testing.T) {
oldDir := config.RECIPES_DIR
config.RECIPES_DIR = "./testdir"
defer func() {
config.RECIPES_DIR = oldDir
}()
app, err := config.NewApp(config.AppEnv{
"DOMAIN": "test.example.com",
"RECIPE": "test-recipe",
}, "test_example_com", config.AppFile{
Path: "./testdir/filtertest.end",
Server: "local",
})
if err != nil {
t.Fatal(err)
}
f, err := app.Filters(false, false)
if err != nil {
t.Error(err)
}
compareFilter(t, f, map[string]map[string]bool{
"name": {
"test_example_com": true,
},
})
f2, err := app.Filters(false, true)
if err != nil {
t.Error(err)
}
compareFilter(t, f2, map[string]map[string]bool{
"name": {
"^test_example_com": true,
},
})
f3, err := app.Filters(true, false)
if err != nil {
t.Error(err)
}
compareFilter(t, f3, map[string]map[string]bool{
"name": {
"test_example_com_bar": true,
"test_example_com_foo": true,
},
})
f4, err := app.Filters(true, true)
if err != nil {
t.Error(err)
}
compareFilter(t, f4, map[string]map[string]bool{
"name": {
"^test_example_com_bar": true,
"^test_example_com_foo": true,
},
})
f5, err := app.Filters(false, false, "foo")
if err != nil {
t.Error(err)
}
compareFilter(t, f5, map[string]map[string]bool{
"name": {
"test_example_com_foo": true,
},
})
}
func compareFilter(t *testing.T, f1 filters.Args, f2 map[string]map[string]bool) {
t.Helper()
j1, err := f1.MarshalJSON()
if err != nil {
t.Error(err)
}
j2, err := json.Marshal(f2)
if err != nil {
t.Error(err)
}
if diff := cmp.Diff(string(j2), string(j1)); diff != "" {
t.Errorf("filters mismatch (-want +got):\n%s", diff)
}
}

View File

@ -8,20 +8,41 @@ import (
"os"
"path"
"path/filepath"
"regexp"
"sort"
"strings"
"github.com/Autonomic-Cooperative/godotenv"
"git.coopcloud.tech/coop-cloud/godotenv"
"github.com/sirupsen/logrus"
)
var ABRA_DIR = os.ExpandEnv("$HOME/.abra")
// getBaseDir retrieves the Abra base directory.
func getBaseDir() string {
home := os.ExpandEnv("$HOME/.abra")
if customAbraDir, exists := os.LookupEnv("ABRA_DIR"); exists && customAbraDir != "" {
home = customAbraDir
}
return home
}
var ABRA_DIR = getBaseDir()
var SERVERS_DIR = path.Join(ABRA_DIR, "servers")
var RECIPES_DIR = path.Join(ABRA_DIR, "apps")
var RECIPES_DIR = path.Join(ABRA_DIR, "recipes")
var VENDOR_DIR = path.Join(ABRA_DIR, "vendor")
var BACKUP_DIR = path.Join(ABRA_DIR, "backups")
var CATALOGUE_DIR = path.Join(ABRA_DIR, "catalogue")
var RECIPES_JSON = path.Join(ABRA_DIR, "catalogue", "recipes.json")
var REPOS_BASE_URL = "https://git.coopcloud.tech/coop-cloud"
var CATALOGUE_JSON_REPO_NAME = "recipes-catalogue-json"
var SSH_URL_TEMPLATE = "ssh://git@git.coopcloud.tech:2222/coop-cloud/%s.git"
var BackupbotLabel = "coop-cloud.backupbot.enabled"
// envVarModifiers is a list of env var modifier strings. These are added to
// env vars as comments and modify their processing by Abra, e.g. determining
// how long secrets should be.
var envVarModifiers = []string{"length"}
// GetServers retrieves all servers.
func GetServers() ([]string, error) {
var servers []string
@ -38,16 +59,30 @@ func GetServers() ([]string, error) {
// ReadEnv loads an app envivornment into a map.
func ReadEnv(filePath string) (AppEnv, error) {
var envFile AppEnv
var envVars AppEnv
envFile, err := godotenv.Read(filePath)
envVars, _, err := godotenv.Read(filePath)
if err != nil {
return nil, err
}
logrus.Debugf("read %s from %s", envFile, filePath)
logrus.Debugf("read %s from %s", envVars, filePath)
return envFile, nil
return envVars, nil
}
// ReadEnv loads an app envivornment and their modifiers in two different maps.
func ReadEnvWithModifiers(filePath string) (AppEnv, AppModifiers, error) {
var envVars AppEnv
envVars, mods, err := godotenv.Read(filePath)
if err != nil {
return nil, mods, err
}
logrus.Debugf("read %s from %s", envVars, filePath)
return envVars, mods, nil
}
// ReadServerNames retrieves all server names.
@ -63,8 +98,8 @@ func ReadServerNames() ([]string, error) {
return serverNames, nil
}
// getAllFilesInDirectory returns filenames of all files in directory
func getAllFilesInDirectory(directory string) ([]fs.FileInfo, error) {
// GetAllFilesInDirectory returns filenames of all files in directory
func GetAllFilesInDirectory(directory string) ([]fs.FileInfo, error) {
var realFiles []fs.FileInfo
files, err := ioutil.ReadDir(directory)
@ -137,22 +172,107 @@ func ReadAbraShEnvVars(abraSh string) (map[string]string, error) {
}
return envVars, err
}
defer file.Close()
exportRegex, err := regexp.Compile(`^export\s+(\w+=\w+)`)
if err != nil {
return envVars, err
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, "export") {
splitVals := strings.Split(line, "export ")
txt := scanner.Text()
if exportRegex.MatchString(txt) {
splitVals := strings.Split(txt, "export ")
envVarDef := splitVals[len(splitVals)-1]
keyVal := strings.Split(envVarDef, "=")
if len(keyVal) != 2 {
return envVars, fmt.Errorf("couldn't parse %s", line)
return envVars, fmt.Errorf("couldn't parse %s", txt)
}
envVars[keyVal[0]] = keyVal[1]
}
}
logrus.Debugf("read %s from %s", envVars, abraSh)
if len(envVars) > 0 {
logrus.Debugf("read %s from %s", envVars, abraSh)
} else {
logrus.Debugf("read 0 env var exports from %s", abraSh)
}
return envVars, nil
}
type EnvVar struct {
Name string
Present bool
}
func CheckEnv(app App) ([]EnvVar, error) {
var envVars []EnvVar
envSamplePath := path.Join(RECIPES_DIR, app.Recipe, ".env.sample")
if _, err := os.Stat(envSamplePath); err != nil {
if os.IsNotExist(err) {
return envVars, fmt.Errorf("%s does not exist?", envSamplePath)
}
return envVars, err
}
envSample, err := ReadEnv(envSamplePath)
if err != nil {
return envVars, err
}
var keys []string
for key := range envSample {
keys = append(keys, key)
}
sort.Strings(keys)
for _, key := range keys {
if _, ok := app.Env[key]; ok {
envVars = append(envVars, EnvVar{Name: key, Present: true})
} else {
envVars = append(envVars, EnvVar{Name: key, Present: false})
}
}
return envVars, nil
}
// ReadAbraShCmdNames reads the names of commands.
func ReadAbraShCmdNames(abraSh string) ([]string, error) {
var cmdNames []string
file, err := os.Open(abraSh)
if err != nil {
if os.IsNotExist(err) {
return cmdNames, nil
}
return cmdNames, err
}
defer file.Close()
cmdNameRegex, err := regexp.Compile(`(\w+)(\(\).*\{)`)
if err != nil {
return cmdNames, err
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
matches := cmdNameRegex.FindStringSubmatch(line)
if len(matches) > 0 {
cmdNames = append(cmdNames, matches[1])
}
}
if len(cmdNames) > 0 {
logrus.Debugf("read %s from %s", strings.Join(cmdNames, " "), abraSh)
} else {
logrus.Debugf("read 0 command names from %s", abraSh)
}
return cmdNames, nil
}

View File

@ -1,60 +1,69 @@
package config
package config_test
import (
"fmt"
"os"
"path"
"reflect"
"slices"
"strings"
"testing"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/recipe"
)
var testFolder = os.ExpandEnv("$PWD/../../tests/resources/test_folder")
var validAbraConf = os.ExpandEnv("$PWD/../../tests/resources/valid_abra_config")
var (
TestFolder = os.ExpandEnv("$PWD/../../tests/resources/test_folder")
ValidAbraConf = os.ExpandEnv("$PWD/../../tests/resources/valid_abra_config")
)
// make sure these are in alphabetical order
var tFolders = []string{"folder1", "folder2"}
var tFiles = []string{"bar.env", "foo.env"}
var (
TFolders = []string{"folder1", "folder2"}
TFiles = []string{"bar.env", "foo.env"}
)
var appName = "ecloud"
var serverName = "evil.corp"
var (
AppName = "ecloud"
ServerName = "evil.corp"
)
var expectedAppEnv = AppEnv{
var ExpectedAppEnv = config.AppEnv{
"DOMAIN": "ecloud.evil.corp",
"TYPE": "ecloud",
"RECIPE": "ecloud",
}
var expectedApp = App{
Name: appName,
Type: expectedAppEnv["TYPE"],
Domain: expectedAppEnv["DOMAIN"],
Env: expectedAppEnv,
Path: expectedAppFile.Path,
Server: expectedAppFile.Server,
var ExpectedApp = config.App{
Name: AppName,
Recipe: ExpectedAppEnv["RECIPE"],
Domain: ExpectedAppEnv["DOMAIN"],
Env: ExpectedAppEnv,
Path: ExpectedAppFile.Path,
Server: ExpectedAppFile.Server,
}
var expectedAppFile = AppFile{
Path: path.Join(validAbraConf, "servers", serverName, appName+".env"),
Server: serverName,
var ExpectedAppFile = config.AppFile{
Path: path.Join(ValidAbraConf, "servers", ServerName, AppName+".env"),
Server: ServerName,
}
var expectedAppFiles = map[string]AppFile{
appName: expectedAppFile,
var ExpectedAppFiles = map[string]config.AppFile{
AppName: ExpectedAppFile,
}
// var expectedServerNames = []string{"evil.corp"}
func TestGetAllFoldersInDirectory(t *testing.T) {
folders, err := GetAllFoldersInDirectory(testFolder)
folders, err := config.GetAllFoldersInDirectory(TestFolder)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(folders, tFolders) {
t.Fatalf("did not get expected folders. Expected: (%s), Got: (%s)", strings.Join(tFolders, ","), strings.Join(folders, ","))
if !reflect.DeepEqual(folders, TFolders) {
t.Fatalf("did not get expected folders. Expected: (%s), Got: (%s)", strings.Join(TFolders, ","), strings.Join(folders, ","))
}
}
func TestGetAllFilesInDirectory(t *testing.T) {
files, err := getAllFilesInDirectory(testFolder)
files, err := config.GetAllFilesInDirectory(TestFolder)
if err != nil {
t.Fatal(err)
}
@ -62,23 +71,205 @@ func TestGetAllFilesInDirectory(t *testing.T) {
for _, file := range files {
fileNames = append(fileNames, file.Name())
}
if !reflect.DeepEqual(fileNames, tFiles) {
t.Fatalf("did not get expected files. Expected: (%s), Got: (%s)", strings.Join(tFiles, ","), strings.Join(fileNames, ","))
if !reflect.DeepEqual(fileNames, TFiles) {
t.Fatalf("did not get expected files. Expected: (%s), Got: (%s)", strings.Join(TFiles, ","), strings.Join(fileNames, ","))
}
}
func TestReadEnv(t *testing.T) {
env, err := ReadEnv(expectedAppFile.Path)
env, err := config.ReadEnv(ExpectedAppFile.Path)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(env, expectedAppEnv) {
if !reflect.DeepEqual(env, ExpectedAppEnv) {
t.Fatalf(
"did not get expected application settings. Expected: DOMAIN=%s TYPE=%s; Got: DOMAIN=%s TYPE=%s",
expectedAppEnv["DOMAIN"],
expectedAppEnv["TYPE"],
"did not get expected application settings. Expected: DOMAIN=%s RECIPE=%s; Got: DOMAIN=%s RECIPE=%s",
ExpectedAppEnv["DOMAIN"],
ExpectedAppEnv["RECIPE"],
env["DOMAIN"],
env["TYPE"],
env["RECIPE"],
)
}
}
func TestReadAbraShEnvVars(t *testing.T) {
offline := true
r, err := recipe.Get("abra-test-recipe", offline)
if err != nil {
t.Fatal(err)
}
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, r.Name, "abra.sh")
abraShEnv, err := config.ReadAbraShEnvVars(abraShPath)
if err != nil {
t.Fatal(err)
}
if len(abraShEnv) == 0 {
t.Error("at least one env var should be exported")
}
if _, ok := abraShEnv["INNER_FOO"]; ok {
t.Error("INNER_FOO should not be exported")
}
if _, ok := abraShEnv["INNER_BAZ"]; ok {
t.Error("INNER_BAZ should not be exported")
}
if _, ok := abraShEnv["OUTER_FOO"]; !ok {
t.Error("OUTER_FOO should be exported")
}
}
func TestReadAbraShCmdNames(t *testing.T) {
offline := true
r, err := recipe.Get("abra-test-recipe", offline)
if err != nil {
t.Fatal(err)
}
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, r.Name, "abra.sh")
cmdNames, err := config.ReadAbraShCmdNames(abraShPath)
if err != nil {
t.Fatal(err)
}
if len(cmdNames) == 0 {
t.Error("at least one command name should be found")
}
expectedCmdNames := []string{"test_cmd", "test_cmd_args"}
for _, cmdName := range expectedCmdNames {
if !slices.Contains(cmdNames, cmdName) {
t.Fatalf("%s should have been found in %s", cmdName, abraShPath)
}
}
}
func TestCheckEnv(t *testing.T) {
offline := true
r, err := recipe.Get("abra-test-recipe", offline)
if err != nil {
t.Fatal(err)
}
envSamplePath := path.Join(config.RECIPES_DIR, r.Name, ".env.sample")
envSample, err := config.ReadEnv(envSamplePath)
if err != nil {
t.Fatal(err)
}
app := config.App{
Name: "test-app",
Recipe: r.Name,
Domain: "example.com",
Env: envSample,
Path: "example.com.env",
Server: "example.com",
}
envVars, err := config.CheckEnv(app)
if err != nil {
t.Fatal(err)
}
for _, envVar := range envVars {
if !envVar.Present {
t.Fatalf("%s should be present", envVar.Name)
}
}
}
func TestCheckEnvError(t *testing.T) {
offline := true
r, err := recipe.Get("abra-test-recipe", offline)
if err != nil {
t.Fatal(err)
}
envSamplePath := path.Join(config.RECIPES_DIR, r.Name, ".env.sample")
envSample, err := config.ReadEnv(envSamplePath)
if err != nil {
t.Fatal(err)
}
delete(envSample, "DOMAIN")
app := config.App{
Name: "test-app",
Recipe: r.Name,
Domain: "example.com",
Env: envSample,
Path: "example.com.env",
Server: "example.com",
}
envVars, err := config.CheckEnv(app)
if err != nil {
t.Fatal(err)
}
for _, envVar := range envVars {
if envVar.Name == "DOMAIN" && envVar.Present {
t.Fatalf("%s should not be present", envVar.Name)
}
}
}
func TestEnvVarCommentsRemoved(t *testing.T) {
offline := true
r, err := recipe.Get("abra-test-recipe", offline)
if err != nil {
t.Fatal(err)
}
envSamplePath := path.Join(config.RECIPES_DIR, r.Name, ".env.sample")
envSample, err := config.ReadEnv(envSamplePath)
if err != nil {
t.Fatal(err)
}
envVar, exists := envSample["WITH_COMMENT"]
if !exists {
t.Fatal("WITH_COMMENT env var should be present in .env.sample")
}
if strings.Contains(envVar, "should be removed") {
t.Fatalf("comment from '%s' should be removed", envVar)
}
envVar, exists = envSample["SECRET_TEST_PASS_TWO_VERSION"]
if !exists {
t.Fatal("WITH_COMMENT env var should be present in .env.sample")
}
if strings.Contains(envVar, "length") {
t.Fatal("comment from env var SECRET_TEST_PASS_TWO_VERSION should have been removed")
}
}
func TestEnvVarModifiersIncluded(t *testing.T) {
offline := true
r, err := recipe.Get("abra-test-recipe", offline)
if err != nil {
t.Fatal(err)
}
envSamplePath := path.Join(config.RECIPES_DIR, r.Name, ".env.sample")
envSample, modifiers, err := config.ReadEnvWithModifiers(envSamplePath)
if err != nil {
t.Fatal(err)
}
if !strings.Contains(envSample["SECRET_TEST_PASS_TWO_VERSION"], "v1") {
t.Errorf("value should be 'v1', got: '%s'", envSample["SECRET_TEST_PASS_TWO_VERSION"])
}
if modifiers == nil || modifiers["SECRET_TEST_PASS_TWO_VERSION"] == nil {
t.Errorf("no modifiers included")
} else {
if modifiers["SECRET_TEST_PASS_TWO_VERSION"]["length"] != "10" {
t.Errorf("length modifier should be '10', got: '%s'", modifiers["SECRET_TEST_PASS_TWO_VERSION"]["length"])
}
}
}

View File

@ -0,0 +1,2 @@
RECIPE=test-recipe
DOMAIN=test.example.com

View File

@ -0,0 +1,6 @@
version: "3.8"
services:
foo:
image: debian
bar:
image: debian

View File

@ -13,10 +13,10 @@ import (
"github.com/sirupsen/logrus"
)
// GetContainer retrieves a container. If prompt is true and the retrievd count
// of containers does not match 1, then a prompt is presented to let the user
// choose. A count of 0 is handled gracefully.
func GetContainer(c context.Context, cl *client.Client, filters filters.Args, prompt bool) (types.Container, error) {
// GetContainer retrieves a container. If noInput is false and the retrievd
// count of containers does not match 1, then a prompt is presented to let the
// user choose. A count of 0 is handled gracefully.
func GetContainer(c context.Context, cl *client.Client, filters filters.Args, noInput bool) (types.Container, error) {
containerOpts := types.ContainerListOptions{Filters: filters}
containers, err := cl.ContainerList(c, containerOpts)
if err != nil {
@ -28,7 +28,7 @@ func GetContainer(c context.Context, cl *client.Client, filters filters.Args, pr
return types.Container{}, fmt.Errorf("no containers matching the %v filter found?", filter)
}
if len(containers) != 1 {
if len(containers) > 1 {
var containersRaw []string
for _, container := range containers {
containerName := strings.Join(container.Names, " ")
@ -37,7 +37,7 @@ func GetContainer(c context.Context, cl *client.Client, filters filters.Args, pr
containersRaw = append(containersRaw, fmt.Sprintf("%s (created %v)", trimmed, created))
}
if !prompt {
if noInput {
err := fmt.Errorf("expected 1 container but found %v: %s", len(containers), strings.Join(containersRaw, " "))
return types.Container{}, err
}
@ -68,3 +68,15 @@ func GetContainer(c context.Context, cl *client.Client, filters filters.Args, pr
return containers[0], nil
}
// GetContainerFromStackAndService retrieves the container for the given stack and service.
func GetContainerFromStackAndService(cl *client.Client, stack, service string) (types.Container, error) {
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", stack, service))
container, err := GetContainer(context.Background(), cl, filters, true)
if err != nil {
return types.Container{}, err
}
return container, nil
}

View File

@ -8,22 +8,19 @@ import (
"github.com/docker/cli/cli/context"
contextStore "github.com/docker/cli/cli/context/store"
cliflags "github.com/docker/cli/cli/flags"
"github.com/moby/term"
)
func NewDefaultDockerContextStore() *command.ContextStoreWithDefault {
_, _, stderr := term.StdStreams()
dockerConfig := dConfig.LoadDefaultConfigFile(stderr)
contextDir := dConfig.ContextStoreDir()
storeConfig := command.DefaultContextStoreConfig()
store := newContextStore(contextDir, storeConfig)
opts := &cliflags.CommonOptions{Context: "default"}
opts := &cliflags.ClientOptions{Context: "default"}
dockerContextStore := &command.ContextStoreWithDefault{
Store: store,
Resolver: func() (*command.DefaultContext, error) {
return command.ResolveDefaultContext(opts, dockerConfig, storeConfig, stderr)
return command.ResolveDefaultContext(opts, storeConfig)
},
}

View File

@ -1,105 +0,0 @@
package dns
import (
"context"
"fmt"
"net"
"os"
"time"
"github.com/AlecAivazis/survey/v2"
"github.com/sirupsen/logrus"
)
// NewToken constructs a new DNS provider token.
func NewToken(provider, providerTokenEnvVar string) (string, error) {
if token, present := os.LookupEnv(providerTokenEnvVar); present {
return token, nil
}
logrus.Debugf("no %s in environment, asking via stdin", providerTokenEnvVar)
var token string
prompt := &survey.Input{
Message: fmt.Sprintf("%s API token?", provider),
}
if err := survey.AskOne(prompt, &token); err != nil {
return "", err
}
return token, nil
}
// EnsureIPv4 ensures that an ipv4 address is set for a domain name
func EnsureIPv4(domainName string) (string, error) {
var ipv4 string
// comrade librehosters DNS resolver -> https://www.privacy-handbuch.de/handbuch_93d.htm
freifunkDNS := "5.1.66.255:53"
resolver := &net.Resolver{
PreferGo: false,
Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
d := net.Dialer{
Timeout: time.Millisecond * time.Duration(10000),
}
return d.DialContext(ctx, "udp", freifunkDNS)
},
}
logrus.Debugf("created DNS resolver via %s", freifunkDNS)
ctx := context.Background()
ips, err := resolver.LookupIPAddr(ctx, domainName)
if err != nil {
return ipv4, err
}
if len(ips) == 0 {
return ipv4, fmt.Errorf("unable to retrieve ipv4 address for %s", domainName)
}
ipv4 = ips[0].IP.To4().String()
logrus.Debugf("discovered the following ipv4 addr: %s", ipv4)
return ipv4, nil
}
// EnsureDomainsResolveSameIPv4 ensures that domains resolve to the same ipv4 address
func EnsureDomainsResolveSameIPv4(domainName, server string) (string, error) {
var ipv4 string
domainIPv4, err := EnsureIPv4(domainName)
if err != nil {
return ipv4, err
}
if domainIPv4 == "" {
return ipv4, fmt.Errorf("cannot resolve ipv4 for %s?", domainName)
}
serverIPv4, err := EnsureIPv4(server)
if err != nil {
return ipv4, err
}
if serverIPv4 == "" {
return ipv4, fmt.Errorf("cannot resolve ipv4 for %s?", server)
}
if domainIPv4 != serverIPv4 {
err := "app domain %s (%s) does not appear to resolve to app server %s (%s)?"
return ipv4, fmt.Errorf(err, domainName, domainIPv4, server, serverIPv4)
}
return ipv4, nil
}
// GetTTL parses a ttl string into a duration
func GetTTL(ttl string) (time.Duration, error) {
val, err := time.ParseDuration(ttl)
if err != nil {
return val, err
}
return val, nil
}

55
pkg/dns/dns.go Normal file
View File

@ -0,0 +1,55 @@
package dns
import (
"fmt"
"net"
)
// EnsureIPv4 ensures that an ipv4 address is set for a domain name
func EnsureIPv4(domainName string) (string, error) {
ipv4, err := net.ResolveIPAddr("ip4", domainName)
if err != nil {
return "", err
}
// NOTE(d1): e.g. when there is only an ipv6 record available
if ipv4 == nil {
return "", fmt.Errorf("unable to resolve ipv4 address for %s", domainName)
}
return ipv4.String(), nil
}
// EnsureDomainsResolveSameIPv4 ensures that domains resolve to the same ipv4 address
func EnsureDomainsResolveSameIPv4(domainName, server string) (string, error) {
if server == "default" || server == "local" {
return "", nil
}
var ipv4 string
domainIPv4, err := EnsureIPv4(domainName)
if err != nil {
return ipv4, err
}
if domainIPv4 == "" {
return ipv4, fmt.Errorf("cannot resolve ipv4 for %s?", domainName)
}
serverIPv4, err := EnsureIPv4(server)
if err != nil {
return ipv4, err
}
if serverIPv4 == "" {
return ipv4, fmt.Errorf("cannot resolve ipv4 for %s?", server)
}
if domainIPv4 != serverIPv4 {
err := "app domain %s (%s) does not appear to resolve to app server %s (%s)?"
return ipv4, fmt.Errorf(err, domainName, domainIPv4, server, serverIPv4)
}
return ipv4, nil
}

64
pkg/dns/dns_test.go Normal file
View File

@ -0,0 +1,64 @@
package dns
import (
"fmt"
"testing"
"gotest.tools/v3/assert"
)
func TestEnsureDomainsResolveSameIPv4(t *testing.T) {
tests := []struct {
domainName string
serverName string
shouldValidate bool
}{
// NOTE(d1): DNS records get checked, so use something that is maintained
// within the federation. if you're here because of a failing test, try
// `dig +short <domain>` to ensure stuff matches first! If flakyness
// becomes an issue we can look into mocking
{"docs.coopcloud.tech", "coopcloud.tech", true},
{"docs.coopcloud.tech", "swarm.autonomic.zone", true},
// NOTE(d1): special case handling for "--local"
{"", "default", true},
{"", "local", true},
{"", "", false},
{"123", "", false},
}
for _, test := range tests {
_, err := EnsureDomainsResolveSameIPv4(test.domainName, test.serverName)
if err != nil && test.shouldValidate {
t.Fatal(err)
}
if err == nil && !test.shouldValidate {
t.Fatal(fmt.Errorf("should have failed but did not: %v", test))
}
}
}
func TestEnsureIpv4(t *testing.T) {
// NOTE(d1): DNS records get checked, so use something that is maintained
// within the federation. if you're here because of a failing test, try `dig
// +short <domain>` to ensure stuff matches first! If flakyness becomes an
// issue we can look into mocking
domainName := "collabora.ostrom.collective.tools"
serverName := "ostrom.collective.tools"
for i := 0; i < 15; i++ {
domainIpv4, err := EnsureIPv4(domainName)
if err != nil {
t.Fatal(err)
}
serverIpv4, err := EnsureIPv4(serverName)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, domainIpv4, serverIpv4)
}
}

View File

@ -1,15 +0,0 @@
package gandi
import (
"coopcloud.tech/abra/pkg/dns"
"github.com/libdns/gandi"
)
// New constructs a new DNs provider.
func New() (gandi.Provider, error) {
token, err := dns.NewToken("Gandi", "GANDI_TOKEN")
if err != nil {
return gandi.Provider{}, err
}
return gandi.Provider{APIToken: token}, nil
}

View File

@ -1,13 +1,16 @@
package formatter
import (
"fmt"
"os"
"strings"
"time"
"github.com/docker/go-units"
"github.com/olekukonko/tablewriter"
// "github.com/olekukonko/tablewriter"
"coopcloud.tech/abra/pkg/jsontable"
"github.com/schollz/progressbar/v3"
"github.com/sirupsen/logrus"
)
func ShortenID(str string) string {
@ -31,8 +34,8 @@ func HumanDuration(timestamp int64) string {
}
// CreateTable prepares a table layout for output.
func CreateTable(columns []string) *tablewriter.Table {
table := tablewriter.NewWriter(os.Stdout)
func CreateTable(columns []string) *jsontable.JSONTable {
table := jsontable.NewJSONTable(os.Stdout)
table.SetAutoWrapText(false)
table.SetHeader(columns)
return table
@ -49,3 +52,40 @@ func CreateProgressbar(length int, title string) *progressbar.ProgressBar {
progressbar.OptionSetDescription(title),
)
}
// StripTagMeta strips front-matter image tag data that we don't need for parsing.
func StripTagMeta(image string) string {
originalImage := image
if strings.Contains(image, "docker.io") {
image = strings.Split(image, "/")[1]
}
if strings.Contains(image, "library") {
image = strings.Split(image, "/")[1]
}
if originalImage != image {
logrus.Debugf("stripped %s to %s for parsing", originalImage, image)
}
return image
}
// ByteCountSI presents a human friendly representation of a byte count. See
// https://yourbasic.org/golang/formatting-byte-size-to-human-readable-format.
func ByteCountSI(b uint64) string {
const unit = 1000
if b < unit {
return fmt.Sprintf("%d B", b)
}
div, exp := uint64(unit), 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "kMGTPE"[exp])
}

27
pkg/git/add.go Normal file
View File

@ -0,0 +1,27 @@
package git
import (
"github.com/go-git/go-git/v5"
"github.com/sirupsen/logrus"
)
// Add adds a file to the git index.
func Add(repoPath, path string, dryRun bool) error {
repo, err := git.PlainOpen(repoPath)
if err != nil {
return err
}
worktree, err := repo.Worktree()
if err != nil {
return err
}
if dryRun {
logrus.Debugf("dry run: adding %s", path)
} else {
worktree.Add(path)
}
return nil
}

View File

@ -1,11 +1,34 @@
package git
import (
"fmt"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"github.com/sirupsen/logrus"
)
// GetCurrentBranch retrieves the current branch of a repository
// Check if a branch exists in a repo. Use this and not repository.Branch(),
// because the latter does not actually check for existing branches. See
// https://github.com/gogit/gogit/issues/518 for more.
func HasBranch(repository *git.Repository, name string) bool {
var exist bool
if iter, err := repository.Branches(); err == nil {
iterFunc := func(reference *plumbing.Reference) error {
if name == reference.Name().Short() {
exist = true
return nil
}
return nil
}
_ = iter.ForEach(iterFunc)
}
return exist
}
// GetCurrentBranch retrieves the current branch of a repository.
func GetCurrentBranch(repository *git.Repository) (string, error) {
branchRefs, err := repository.Branches()
if err != nil {
@ -33,3 +56,45 @@ func GetCurrentBranch(repository *git.Repository) (string, error) {
return currentBranchName, nil
}
// GetDefaultBranch retrieves the default branch of a repository.
func GetDefaultBranch(repo *git.Repository, repoPath string) (plumbing.ReferenceName, error) {
branch := "master"
if !HasBranch(repo, "master") {
if !HasBranch(repo, "main") {
return "", fmt.Errorf("failed to select default branch in %s", repoPath)
}
branch = "main"
}
return plumbing.ReferenceName(fmt.Sprintf("refs/heads/%s", branch)), nil
}
// CheckoutDefaultBranch checks out the default branch of a repository.
func CheckoutDefaultBranch(repo *git.Repository, repoPath string) (plumbing.ReferenceName, error) {
branch, err := GetDefaultBranch(repo, repoPath)
if err != nil {
return plumbing.ReferenceName(""), err
}
worktree, err := repo.Worktree()
if err != nil {
return plumbing.ReferenceName(""), err
}
checkOutOpts := &git.CheckoutOptions{
Create: false,
Force: true,
Branch: branch,
}
if err := worktree.Checkout(checkOutOpts); err != nil {
logrus.Debugf("failed to check out %s in %s", branch, repoPath)
return branch, err
}
logrus.Debugf("successfully checked out %v in %s", branch, repoPath)
return branch, nil
}

View File

@ -15,22 +15,32 @@ import (
func Clone(dir, url string) error {
if _, err := os.Stat(dir); os.IsNotExist(err) {
logrus.Debugf("%s does not exist, attempting to git clone from %s", dir, url)
_, err := git.PlainClone(dir, false, &git.CloneOptions{URL: url, Tags: git.AllTags})
_, err := git.PlainClone(dir, false, &git.CloneOptions{
URL: url,
Tags: git.AllTags,
ReferenceName: plumbing.ReferenceName("refs/heads/master"),
SingleBranch: true,
})
if err != nil {
logrus.Debugf("cloning %s default branch failed, attempting from main branch", url)
_, err := git.PlainClone(dir, false, &git.CloneOptions{
URL: url,
Tags: git.AllTags,
ReferenceName: plumbing.ReferenceName("refs/heads/main"),
SingleBranch: true,
})
if err != nil {
if strings.Contains(err.Error(), "authentication required") {
name := filepath.Base(dir)
return fmt.Errorf("unable to clone %s, does %s exist?", name, url)
}
return err
}
}
logrus.Debugf("%s has been git cloned successfully", dir)
} else {
logrus.Debugf("%s already exists", dir)

View File

@ -8,7 +8,7 @@ import (
)
// Commit runs a git commit
func Commit(repoPath, glob, commitMessage string, dryRun bool) error {
func Commit(repoPath, commitMessage string, dryRun bool) error {
if commitMessage == "" {
return fmt.Errorf("no commit message specified?")
}
@ -33,17 +33,8 @@ func Commit(repoPath, glob, commitMessage string, dryRun bool) error {
}
if !dryRun {
err = commitWorktree.AddGlob(glob)
if err != nil {
return err
}
logrus.Debugf("staged %s for commit", glob)
} else {
logrus.Debugf("dry run: did not stage %s for commit", glob)
}
if !dryRun {
_, err = commitWorktree.Commit(commitMessage, &git.CommitOptions{})
// NOTE(d1): `All: true` does not include untracked files
_, err = commitWorktree.Commit(commitMessage, &git.CommitOptions{All: true})
if err != nil {
return err
}

42
pkg/git/diff.go Normal file
View File

@ -0,0 +1,42 @@
package git
import (
"fmt"
"os/exec"
"github.com/sirupsen/logrus"
)
// getGitDiffArgs builds the `git diff` invocation args. It removes the usage
// of a pager and ensures that colours are specified even when Git might detect
// otherwise.
func getGitDiffArgs(repoPath string) []string {
return []string{
"-C",
repoPath,
"--no-pager",
"-c",
"color.diff=always",
"diff",
}
}
// DiffUnstaged shows a `git diff`. Due to limitations in the underlying go-git
// library, this implementation requires the /usr/bin/git binary. It gracefully
// skips if it cannot find the command on the system.
func DiffUnstaged(path string) error {
if _, err := exec.LookPath("git"); err != nil {
logrus.Warnf("unable to locate git command, cannot output diff")
return nil
}
gitDiffArgs := getGitDiffArgs(path)
diff, err := exec.Command("git", gitDiffArgs...).Output()
if err != nil {
return nil
}
fmt.Print(string(diff))
return nil
}

211
pkg/jsontable/jsontable.go Normal file
View File

@ -0,0 +1,211 @@
package jsontable
import (
"fmt"
"io"
"strings"
"github.com/olekukonko/tablewriter"
)
// A quick-and-dirty proxy/emulator of tablewriter to enable more easy machine readable output
// - Does not strictly support types, just quoted or unquoted values
// - Does not support nested values.
// If a datalabel is set with SetDataLabel(true, "..."), that will be used as the key for teh data of the table,
// otherwise if the caption is set with SetCaption(true, "..."), the data label will be set to the default of
// "rows", otherwise the table will output as a JSON list.
//
// Proxys all actions through to the tablewriter except addrow and addbatch, which it does at render time
//
type JSONTable struct {
out io.Writer
colsize int
rows [][]string
keys []string
quoted []bool // hack to do output typing, quoted vs. unquoted
hasDataLabel bool
dataLabel string
hasCaption bool
caption string // the actual caption
hasCaptionLabel bool
captionLabel string // the key in the dictionary for the caption
tbl *tablewriter.Table
}
func writeChar(w io.Writer, c byte) {
w.Write([]byte{c})
}
func NewJSONTable(writer io.Writer) *JSONTable {
t := &JSONTable{
out: writer,
colsize: 0,
rows: [][]string{},
keys: []string{},
quoted: []bool{},
hasDataLabel: false,
dataLabel: "rows",
hasCaption: false,
caption: "",
hasCaptionLabel: false,
captionLabel: "caption",
tbl: tablewriter.NewWriter(writer),
}
return t
}
func (t *JSONTable) NumLines() int {
// JSON only but reflects a shared state.
return len(t.rows)
}
func (t *JSONTable) SetHeader(keys []string) {
// Set the keys value which will assign each column to the keys.
// Note that we'll ignore values that are beyond the length of the keys list
t.colsize = len(keys)
t.keys = []string{}
for _, k := range keys {
t.keys = append(t.keys, k)
t.quoted = append(t.quoted, true)
}
t.tbl.SetHeader(keys)
}
func (t *JSONTable) SetColumnQuoting(quoting []bool) {
// Specify which columns are quoted or unquoted in output
// JSON only
for i := 0; i < t.colsize; i++ {
t.quoted[i] = quoting[i]
}
}
func (t *JSONTable) Append(row []string) {
// We'll just append whatever to the rows list. If they fix the keys after appending rows, it'll work as
// expected.
// We should detect if the row is narrower than the key list tho.
// JSON only (but we use the rows later when rendering a regular table)
t.rows = append(t.rows, row)
}
func (t *JSONTable) Render() {
// Load the table with rows and render.
// Proxy only
for _, row := range t.rows {
t.tbl.Append(row)
}
t.tbl.Render()
}
func (t *JSONTable) _JSONRenderInner() {
// JSON only
// Render the list of dictionaries to the writer.
//// inner render loop
writeChar(t.out, '[')
for rowidx, row := range t.rows {
if rowidx != 0 {
writeChar(t.out, ',')
}
writeChar(t.out, '{')
for keyidx, key := range t.keys {
key := strings.ToLower(key)
key = strings.ReplaceAll(key, " ", "-")
value := "nil"
if keyidx < len(row) {
value = row[keyidx]
}
if keyidx != 0 {
writeChar(t.out, ',')
}
if t.quoted[keyidx] {
fmt.Fprintf(t.out, "\"%s\":\"%s\"", key, value)
} else {
fmt.Fprintf(t.out, "\"%s\":%s", key, value)
}
}
writeChar(t.out, '}')
}
writeChar(t.out, ']')
}
func (t *JSONTable) JSONRender() {
// write JSON table to output
// JSON only
if t.hasDataLabel || t.hasCaption {
// dict mode
writeChar(t.out, '{')
if t.hasCaption {
fmt.Fprintf(t.out, "\"%s\":\"%s\",", t.captionLabel, t.caption)
}
fmt.Fprintf(t.out, "\"%s\":", t.dataLabel)
}
// write list
t._JSONRenderInner()
if t.hasDataLabel || t.hasCaption {
// dict mode
writeChar(t.out, '}')
}
}
func (t *JSONTable) SetCaption(caption bool, captionText ...string) {
t.hasCaption = caption
if len(captionText) == 1 {
t.caption = captionText[0]
}
t.tbl.SetCaption(caption, captionText...)
}
func (t *JSONTable) SetCaptionLabel(captionLabel bool, captionLabelText ...string) {
// JSON only
t.hasCaptionLabel = captionLabel
if len(captionLabelText) == 1 {
t.captionLabel = captionLabelText[0]
}
}
func (t *JSONTable) SetDataLabel(dataLabel bool, dataLabelText ...string) {
// JSON only
t.hasDataLabel = dataLabel
if len(dataLabelText) == 1 {
t.dataLabel = dataLabelText[0]
}
}
func (t *JSONTable) AppendBulk(rows [][]string) {
// JSON only but reflects shared state
for _, row := range rows {
t.Append(row)
}
}
// Stuff we should implement but we just proxy for now.
func (t *JSONTable) SetAutoMergeCellsByColumnIndex(cols []int) {
// FIXME
t.tbl.SetAutoMergeCellsByColumnIndex(cols)
}
// Stuff we should implement but we just proxy for now.
func (t *JSONTable) SetAlignment(align int) {
// FIXME
t.tbl.SetAlignment(align)
}
func (t *JSONTable) SetAutoMergeCells(auto bool) {
// FIXME
t.tbl.SetAutoMergeCells(auto)
}
// Stub functions
func (t *JSONTable) SetAutoWrapText(auto bool) {
t.tbl.SetAutoWrapText(auto)
return
}

Some files were not shown because too many files have changed in this diff Show More