Compare commits

...

105 Commits

Author SHA1 Message Date
bd80599114 secret: allow inserting secret from file and add trim flag
All checks were successful
continuous-integration/drone/pr Build is passing
2024-06-22 17:28:13 +02:00
aa3910f8df refactor!: drop all SSH opts / config handling
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#601
See coop-cloud/organising#482
2024-06-21 17:16:41 +02:00
43990b6fae test: use more plumbung for git output
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-06-21 17:10:12 +02:00
91ea2c01a5 fix: fix old app version deploy wrt. compose files
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#617
2024-06-21 16:14:40 +02:00
316fdd3643 fix: abra app new checks out latest version
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#618
2024-06-21 15:51:34 +02:00
e07ae8cccd chore: make format/check
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-06-19 19:17:22 +02:00
300a4ead01 fix: stop using deprecated APIs
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2024-06-19 19:14:52 +02:00
f209b6f564 chore: go get -u -t 2024-06-19 19:14:44 +02:00
791183adfe build: new deps target 2024-06-19 19:14:31 +02:00
e6b35e8524 fix(upgrade): make upgrade --chaos working again
All checks were successful
continuous-integration/drone/push Build is passing
2024-05-22 10:21:31 +02:00
8a0274cac0 fix(recipe): output correct formatted json for recipe version
All checks were successful
continuous-integration/drone/push Build is passing
2024-05-21 16:59:59 +02:00
e609924af0 feat(upgrade): add --releasenotes: show release notes and skip upgrading
All checks were successful
continuous-integration/drone/push Build is passing
2024-05-21 13:49:36 +02:00
70e2943301 fix(upgrade): only show release notes relevant for the upgrade 2024-05-21 13:49:11 +02:00
0590c1824d checkout deployed version
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-05-14 00:07:58 +02:00
459abecfa5 only show container that should be deployed
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-05-13 23:26:02 +02:00
183ad8f576 machine readable ps output
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-05-13 22:08:03 +02:00
03f94da2d8 docs: add fauno [ci skip] 2024-05-01 01:20:25 +02:00
f
766f69b0fd feat: strip debug symbols
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
to produce smaller binaries
2024-04-30 14:05:03 -03:00
004cd70aed fix: use unique rule number & wording [ci skip] 2024-04-06 23:52:56 +02:00
a4de446f58 test: more verbose failure msg, use contains [ci skip] 2024-04-06 23:48:22 +02:00
d21c35965d fix: add warning for long secret names (!359)
All checks were successful
continuous-integration/drone/push Build is passing
A start of a fix for coop-cloud/organising#463
Putting some code out to start a discussion.  I've added a linting rule for recipes to establish a general principal but I want to put some validation into cli/app/new.go as that's the point we have both the recipe and the domain and can say for sure whether or not the secret names lengths cause a problem but that will have to wait for a bit.  Let me know if I've missed the mark somewhere

Reviewed-on: coop-cloud/abra#359
Reviewed-by: decentral1se <decentral1se@noreply.git.coopcloud.tech>
Co-authored-by: Rich M <r.p.makepeace@gmail.com>
Co-committed-by: Rich M <r.p.makepeace@gmail.com>
2024-04-06 21:41:37 +00:00
63ea58ffaa add relevant command to error message
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-01 18:51:53 +01:00
2ecace3e90 fix: add missing packages on final layer
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#598
2024-04-01 13:57:51 +02:00
d5ac3958a4 feat: add retries to app volume remove
All checks were successful
continuous-integration/drone/push Build is passing
2024-03-27 05:38:24 +00:00
3wc
72c20e0039 fix: make installer work again
All checks were successful
continuous-integration/drone/push Build is passing
2024-03-26 21:07:38 -03:00
575f9905f1 Revert "Revert "feat: backup revolution""
All checks were successful
continuous-integration/drone/push Build is passing
This reverts commit 2c515ce70a.
2024-03-12 10:34:40 +01:00
e3a0af5840 build: upgrade goreleaser
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#474
2024-03-12 10:11:14 +01:00
9a3a39a185 chore: new 0.9.x series
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-03-12 10:05:31 +01:00
cea56dddde fix: drop deprecated stanza (goreleaser) 2024-03-12 10:04:50 +01:00
2c515ce70a Revert "feat: backup revolution"
This reverts commit c5687dfbd7.

This is a temporary measure to facilitate a release which won't
completely explode peoples workflows (missing command logic). We
re-instate this commit after the first 0.9.x release.
2024-03-12 10:03:42 +01:00
40c0fb4bac fix-integration-tests (!403)
All checks were successful
continuous-integration/drone/push Build is passing
In preparation for the new abra release, let's fix all integration tests

After merging, this needs to be cherry-picked into the release-0-9 branch.

  - [x] app_backup.bats (skip this one)
  - [x] app_check.bats (fixed by bd21014fed)
  - [x] app_cmd.bats (partially fixed in 08232b74f6), has known regression coop-cloud/organising#581
  - [x] app_config.bats (no changes needed)
  - [x] app_cp.bats (no changes needed)
  - [x] app_deploy.bats
  - [x] app_errors.bats (no changes needed)
  - [x] app_list.bats (no changes needed)
  - [x] app_logs.bats (no changes needed)
  - [x] app_new.bats (no changes needed)
  - [x] app_ps.bats (no changes needed)
  - [x] app_remove.bats (fixed by [2f29fbeb2e](coop-cloud/abra#403/commits/2f29fbeb2e018656413fa25f8615b7a98cdcb083))
  - [x] app_restart.bats (no changes needed
  - [x] app_restore.bats (fixed by [f2dd5afc38](coop-cloud/abra#403/commits/f2dd5afc38a25a8316899fa0c6d59499445868d7))
  - [x] app_rollback.bats (partially fixed by 6e99b74c24)
  - [x] app_run.bats (no changes needed)
  - [x] app_secret.bats (fixed by bd069d32f6)
  - [x] app_services.bats (no changes needed)
  - [x] app_undeploy.bats (no changes needed)
  - [x] app_upgrade.bats (no changes needed)
  - [x] app_version.bats (partially fixed by ad323ad2bd)
  - [x] app_volume.bats (fixed by [03c3823770](coop-cloud/abra#403/commits/03c38237707ae795b723180eb07a7edc84a8de35))
  - [x] autocomplete.bats (no changes needed)
  - [x] catalogue.bats (no changes needed)
  - [x] dirs.bats (no changes needed)
  - [x] install.bats (failes, but is expected)
  - [x] recipe_diff.bats (no changes needed)
  - [x] recipe_fetch.bats (no changes needed)
  - [x] recipe_lint.bats (fixed by [b6b0808066](coop-cloud/abra#403/commits/b6b0808066a11e4bcd77517ec39600d500bcb944))
  - [x] recipe_list.bats (no changes needed)
  - [x] recipe_new.bats (fixed by [0aac464ded](coop-cloud/abra#403/commits/0aac464ded6b43afb3ec37ade2f64d6191b9838f))
  - [x] recipe_release.bats (no changes needed)
  - [x] recipe_reset.bats (no changes needed)
  - [x] recipe_sync.bats (no changes needed)
  - [x] recipe_upgrade.bats (fixed by [ab86904cf4](coop-cloud/abra#403/commits/ab86904cf45db89c7c189ca1fd9971909bd446dd))
  - [x] recipe_version.bats (fixed by 81897bf4da)
  - [x] server_add.bats
  - [x] server_list.bats
  - [x] server_prune.bats (no changes needed)
  - [x] server_remove.bats
  - [x] upgrade.bats
  - [x] version.bats (no changes needed)

Co-authored-by: decentral1se <cellarspoon@riseup.net>
Reviewed-on: coop-cloud/abra#403
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2024-03-11 13:27:21 +00:00
0643df6d73 feat: fetch all recipes when no recipe is specified (!401)
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#530

Reviewed-on: coop-cloud/abra#401
Reviewed-by: decentral1se <decentral1se@noreply.git.coopcloud.tech>
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2024-01-24 15:01:33 +00:00
e9b99fe921 make installer save abra-download to /tmp/ directory
All checks were successful
continuous-integration/drone/push Build is passing
the current location of download is ~/.local/bin/ but this
conflicts with some security tools
2024-01-24 14:27:09 +00:00
4920dfedb3 fix: retry docker volume remove (!399)
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#509

Reviewed-on: coop-cloud/abra#399
Reviewed-by: decentral1se <decentral1se@noreply.git.coopcloud.tech>
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2024-01-19 15:09:00 +00:00
0a3624c15b feat: add version input to abra app new (!400)
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#519

Reviewed-on: coop-cloud/abra#400
Reviewed-by: decentral1se <decentral1se@noreply.git.coopcloud.tech>
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2024-01-19 15:08:41 +00:00
c5687dfbd7 feat: backup revolution
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#485
2024-01-12 22:01:08 +01:00
ca91abbed9 fix: correct append service name logic in Filters function (!396)
All checks were successful
continuous-integration/drone/push Build is passing
This fixes a regression introduced by #395

Reviewed-on: coop-cloud/abra#396
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2023-12-22 12:08:12 +00:00
d4727db8f9 feat: abra app logs shows task errors (!395)
All checks were successful
continuous-integration/drone/push Build is passing
The log command now checks for the ready state in the task list. If it is not ready. It shows the task logs. This might look like this:
```
ERRO[0000] Service abra-test-recipe_default_app: State rejected: No such image: ngaaaax:1.21.0
ERRO[0000] Service abra-test-recipe_default_app: State preparing:
ERRO[0000] Service abra-test-recipe_default_app: State rejected: No such image: ngaaaax:1.21.0
ERRO[0000] Service abra-test-recipe_default_app: State rejected: No such image: ngaaaax:1.21.0
ERRO[0000] Service abra-test-recipe_default_app: State rejected: No such image: ngaaaax:1.21.0
```

Closes coop-cloud/organising#518

Reviewed-on: coop-cloud/abra#395
Reviewed-by: decentral1se <decentral1se@noreply.git.coopcloud.tech>
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2023-12-14 13:15:24 +00:00
af8cd1f67a feat: abra release now asks for a release note (!393)
All checks were successful
continuous-integration/drone/push Build is passing
This implements coop-cloud/organising#540 by checking if a`release/next` file exists and if so moves it to `release/<tag>`. When no release notes exists it prompts for them.

Reviewed-on: coop-cloud/abra#393
Reviewed-by: moritz <moritz.m@local-it.org>
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2023-12-12 14:46:20 +00:00
cdd7516e54 chore: go mod tidy [ci skip] 2023-12-04 22:56:58 +01:00
99e3ed416f fix: secret name generation when secretId is not part of the secret name
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-04 21:52:09 +00:00
02b726db02 add comments to better explain how the length modifier gets added to the secret
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-04 17:30:26 +00:00
2de6934322 feat: abra app cp enhancements
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-02 15:39:27 +00:00
cb49cf06d1 chore: drop old godotenv pointers [ci skip]
Follows 9affda8a70
2023-12-02 13:02:24 +01:00
9affda8a70 chore: update godotenv fork commit pointer
All checks were successful
continuous-integration/drone/push Build is passing
Follows coop-cloud/abra#391
2023-12-02 12:59:42 +01:00
3957b7c965 proper env modifiers support
All checks were successful
continuous-integration/drone/push Build is passing
This implements proper modifier support in the env file using this new fork of the godotenv library. The modifier implementation is quite basic for but can be improved later if needed. See this commit for the actual implementation.

Because we are now using proper modifer parsing, it does not affect the parsing of value, so this is possible again:
```
MY_VAR="#foo"
```
Closes coop-cloud/organising#535
2023-12-01 11:03:52 +00:00
0d83339d80 fix(ssh): increase connection timeout #482
All checks were successful
continuous-integration/drone/push Build is passing
see coop-cloud/organising#482
2023-11-30 16:35:53 +01:00
6e54ec7213 test: skip failing test for now
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#535.
2023-11-28 11:42:36 +01:00
66b40a9189 fix: just run it in place [ci skip] 2023-11-27 11:25:01 +01:00
049f02f063 docs: add p4u1 [ci skip] 2023-11-27 11:23:03 +01:00
15857e6453 fix: clean up after cp'ing script [ci skip]
Follows 31e0ed75b0.
2023-11-27 11:21:46 +01:00
31e0ed75b0 build: target for docker building
Some checks failed
continuous-integration/drone/push Build is failing
Adapted from coop-cloud/abra#384.

Thanks @cas.
2023-11-27 11:15:59 +01:00
b1d3fcbb0b add integration test
Some checks failed
continuous-integration/drone/push Build is failing
2023-11-27 10:01:33 +00:00
7b6134f35e add bash completion for abra cmd 2023-11-27 10:01:33 +00:00
316b59b465 test: support local-first testing
Some checks failed
continuous-integration/drone/push Build is failing
Cherry-picked from coop-cloud/abra#389

Thanks @p4u1.
2023-11-27 10:41:46 +01:00
92b073d5b6 chore: go mod tidy
Some checks failed
continuous-integration/drone/push Build is failing
2023-11-27 10:28:43 +01:00
9b0dd933b5 chore(deps): update module github.com/schollz/progressbar/v3 to v3.14.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-11-10 08:00:52 +00:00
f255fa1555 chore(deps): update module github.com/hashicorp/go-retryablehttp to v0.7.5
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-11-09 08:00:33 +00:00
74200318ab chore(deps): update module github.com/schollz/progressbar/v3 to v3.14.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-11-07 08:01:11 +00:00
609656b4e1 chore(deps): update module golang.org/x/sys to v0.14.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-11-06 08:00:33 +00:00
856c9f2f7d chore: go mod tidy
Some checks failed
continuous-integration/drone/push Build is failing
2023-11-04 09:37:15 +01:00
bd5cdd3443 chore(deps): update module github.com/docker/docker to v24.0.7
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-10-30 08:00:53 +00:00
79d274e074 chore(deps): update module github.com/docker/cli to v24.0.7
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-10-27 07:01:16 +00:00
51e3df17f1 chore(deps): update module github.com/go-git/go-git/v5 to v5.10.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-10-26 07:00:33 +00:00
ccf0215495 hotfix: parse values starting with # correctly
Some checks failed
continuous-integration/drone/push Build is failing
2023-10-23 19:21:45 +02:00
254df7f2be feat: app cmd ls
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#484
2023-10-17 21:16:31 +02:00
6a673ef101 refactor: filter by topic when building catalogue
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#377
2023-10-16 18:42:38 +02:00
7f7f7224c6 feat: diff on release flow
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
Also, don't commit unstaged files.
2023-10-16 18:31:22 +02:00
f96bf9a8ac feat: recipe reset, recipe diff
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#511
2023-10-15 12:56:52 +02:00
dcecf32999 chore: bump version for installer script [ci skip] 2023-10-11 19:31:28 +02:00
bc88dac150 test: reset before changing files
All checks were successful
continuous-integration/drone/push Build is passing
2023-10-11 19:29:19 +02:00
704c0e9c74 test: adapt failing tests to new changes 2023-10-11 18:34:08 +02:00
c9bb7e15c2 fix: bring back docker build
All checks were successful
continuous-integration/drone/push Build is passing
2023-10-10 07:27:49 +02:00
d90c9b88f1 fix: include ca-certs to avoid x509 error [ci skip] 2023-10-10 00:50:43 +02:00
69ce07f81f fix: ignore build files for docker [ci skip] 2023-10-09 23:40:41 +02:00
85b90ef80c fix: bail if --chaos and specific version
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#503.
2023-10-09 20:54:44 +00:00
3e511446aa refactor: use app check emoji here too
All checks were successful
continuous-integration/drone/push Build is passing
2023-10-09 22:53:46 +02:00
7566b4262b fix: set go version to 1.21
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2023-10-09 22:07:30 +02:00
c249c6ae9c fix: fix: trim comments that are not modifers
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#505
2023-10-09 14:42:05 +02:00
be693e9df0 fix: trim comments that are not modifers
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
See coop-cloud/organising#505
2023-10-08 22:42:34 +02:00
a43125701c test: optimise default make target for abra hacking [ci skip] 2023-10-07 10:32:42 +02:00
b57edb440a fix: improve app check
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#446
2023-10-06 10:56:33 +02:00
6fc4573a71 chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-10-06 09:49:03 +02:00
cbe6676881 chore(deps): update module golang.org/x/sys to v0.13.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-10-06 07:00:49 +00:00
b4fd39828f test: abra-integration-test-recipe -> abra-test-recipe
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/abra-test-recipe#3
2023-10-05 14:22:11 +02:00
14f2d72aba refactor!: lowercase, hyphenate keys
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
This will potentially break scripts, so time to discuss!
2023-10-05 08:36:01 +02:00
57692ec3c9 feat: add --machine to secret ls
See coop-cloud/organising#481
2023-10-04 23:08:39 +02:00
47d3b77003 refactor: not generating here, skipping
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2023-10-04 15:13:15 +02:00
8078e91e52 fix: warn if secrets not generated
See coop-cloud/organising#499
2023-10-04 15:13:14 +02:00
dc5d3a8dd6 test: build, init & test in one stage
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2023-10-04 14:37:09 +02:00
ab6107610c test: skip build step, test will do it 2023-10-04 14:36:59 +02:00
e837835e00 test: remove duplicate call to EnsureCatalogue
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-10-04 14:05:02 +02:00
c646263e9e fix: validate COMPOSE_FILE
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is passing
See coop-cloud/organising#468.
See coop-cloud/organising#376.
2023-10-04 13:27:04 +02:00
422c642949 fix: ensure ipv4 is checked, not sometimes ipv6
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#490
2023-10-04 09:29:10 +00:00
379915587c fix: don't export from within function
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
Also, don't explode on command function which has "export" in the name!

See coop-cloud/organising#498
2023-10-04 11:20:50 +02:00
970ae0fc4e test: use _test to avoid cyclic imports 2023-10-04 02:36:44 +02:00
d11ad61efb docs: make chaos flag description more generic [ci skip] 2023-10-04 01:34:53 +02:00
54dc696c69 build: fix targets for small local builds
All checks were successful
continuous-integration/drone/push Build is passing
2023-10-03 09:31:57 +02:00
7e3ce9c42a chore: go mod tidy 2023-10-03 09:30:26 +02:00
7751423c7d chore(deps): update module github.com/docker/distribution to v2.8.3
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-10-03 07:00:43 +00:00
f18f0b6f82 build: set ABRA_DIR explicitly
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2023-09-30 08:26:20 +02:00
892f6c0730 test: ensure catalogue is cloned 2023-09-30 08:19:16 +02:00
b53fd2689c test: add unit test for TestEnsureDomainsResolveSameIPv4 2023-09-30 08:19:02 +02:00
906bf65d47 test: moar domain check tests [ci skip] 2023-09-29 09:31:25 +02:00
1e6a6e6174 fix: app logs retrieves recipe
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-09-27 09:19:57 +02:00
104 changed files with 3692 additions and 1987 deletions

View File

@ -1,4 +1,8 @@
Dockerfile
.dockerignore
*.swp
*.swo *.swo
*.swp
.dockerignore
Dockerfile
abra
dist
kadabra
tags

View File

@ -7,16 +7,13 @@ steps:
commands: commands:
- make check - make check
- name: make build
image: golang:1.21
commands:
- make build
depends_on:
- make check
- name: make test - name: make test
image: golang:1.21 image: golang:1.21
environment:
ABRA_DIR: "/root/.abra"
commands: commands:
- make build-abra
- ./abra help # show version, initialise $ABRA_DIR
- make test - make test
depends_on: depends_on:
- make check - make check
@ -27,13 +24,12 @@ steps:
- git fetch --tags - git fetch --tags
depends_on: depends_on:
- make check - make check
- make build
- make test - make test
when: when:
event: tag event: tag
- name: release - name: release
image: goreleaser/goreleaser:v1.18.2 image: goreleaser/goreleaser:v1.24.0
environment: environment:
GITEA_TOKEN: GITEA_TOKEN:
from_secret: goreleaser_gitea_token from_secret: goreleaser_gitea_token
@ -47,22 +43,22 @@ steps:
when: when:
event: tag event: tag
# - name: publish image - name: publish image
# image: plugins/docker image: plugins/docker
# settings: settings:
# auto_tag: true auto_tag: true
# username: 3wordchant username: 3wordchant
# password: password:
# from_secret: git_coopcloud_tech_token_3wc from_secret: git_coopcloud_tech_token_3wc
# repo: git.coopcloud.tech/coop-cloud/abra repo: git.coopcloud.tech/coop-cloud/abra
# tags: dev tags: dev
# registry: git.coopcloud.tech registry: git.coopcloud.tech
# when: when:
# event: event:
# exclude: exclude:
# - pull_request - pull_request
# depends_on: depends_on:
# - make check - make check
volumes: volumes:
- name: deps - name: deps

View File

@ -29,6 +29,8 @@ builds:
ldflags: ldflags:
- "-X 'main.Commit={{ .Commit }}'" - "-X 'main.Commit={{ .Commit }}'"
- "-X 'main.Version={{ .Version }}'" - "-X 'main.Version={{ .Version }}'"
- "-s"
- "-w"
- id: kadabra - id: kadabra
binary: kadabra binary: kadabra
@ -50,12 +52,8 @@ builds:
ldflags: ldflags:
- "-X 'main.Commit={{ .Commit }}'" - "-X 'main.Commit={{ .Commit }}'"
- "-X 'main.Version={{ .Version }}'" - "-X 'main.Version={{ .Version }}'"
- "-s"
archives: - "-w"
- replacements:
386: i386
amd64: x86_64
format: binary
checksum: checksum:
name_template: "checksums.txt" name_template: "checksums.txt"

View File

@ -7,10 +7,12 @@
- cassowary - cassowary
- codegod100 - codegod100
- decentral1se - decentral1se
- fauno
- frando - frando
- kawaiipunk - kawaiipunk
- knoflook - knoflook
- moritz - moritz
- p4u1
- rix - rix
- roxxers - roxxers
- vera - vera

View File

@ -1,8 +1,13 @@
# Build image
FROM golang:1.21-alpine AS build FROM golang:1.21-alpine AS build
ENV GOPRIVATE coopcloud.tech ENV GOPRIVATE coopcloud.tech
RUN apk add --no-cache make git gcc musl-dev RUN apk add --no-cache \
gcc \
git \
make \
musl-dev
COPY . /app COPY . /app
@ -10,7 +15,15 @@ WORKDIR /app
RUN CGO_ENABLED=0 make build RUN CGO_ENABLED=0 make build
FROM scratch # Release image ("slim")
FROM alpine:3.19.1
RUN apk add --no-cache \
ca-certificates \
git \
openssh
RUN update-ca-certificates
COPY --from=build /app/abra /abra COPY --from=build /app/abra /abra

View File

@ -2,12 +2,14 @@ ABRA := ./cmd/abra
KADABRA := ./cmd/kadabra KADABRA := ./cmd/kadabra
COMMIT := $(shell git rev-list -1 HEAD) COMMIT := $(shell git rev-list -1 HEAD)
GOPATH := $(shell go env GOPATH) GOPATH := $(shell go env GOPATH)
GOVERSION := 1.21
LDFLAGS := "-X 'main.Commit=$(COMMIT)'" LDFLAGS := "-X 'main.Commit=$(COMMIT)'"
DIST_LDFLAGS := $(LDFLAGS)" -s -w" DIST_LDFLAGS := $(LDFLAGS)" -s -w"
export GOPRIVATE=coopcloud.tech export GOPRIVATE=coopcloud.tech
all: format check build test # NOTE(d1): default `make` optimised for Abra hacking
all: format check build-abra test
run-abra: run-abra:
@go run -ldflags=$(LDFLAGS) $(ABRA) @go run -ldflags=$(LDFLAGS) $(ABRA)
@ -18,19 +20,23 @@ run-kadabra:
install-abra: install-abra:
@go install -ldflags=$(LDFLAGS) $(ABRA) @go install -ldflags=$(LDFLAGS) $(ABRA)
install-kadaabra: install-kadabra:
@go install -ldflags=$(LDFLAGS) $(KADABRA) @go install -ldflags=$(LDFLAGS) $(KADABRA)
build-abra: build-abra:
@go build -v -ldflags=$(LDFLAGS) $(ABRA) @go build -v -ldflags=$(DIST_LDFLAGS) $(ABRA)
build-kadabra: build-kadabra:
@go build -v -ldflags=$(LDFLAGS) $(KADABRA)
build:
@go build -v -ldflags=$(DIST_LDFLAGS) $(ABRA)
@go build -v -ldflags=$(DIST_LDFLAGS) $(KADABRA) @go build -v -ldflags=$(DIST_LDFLAGS) $(KADABRA)
build: build-abra build-kadabra
build-docker-abra:
@docker run -it -v $(PWD):/abra golang:$(GOVERSION) \
bash -c 'cd /abra; ./scripts/docker/build.sh'
build-docker: build-docker-abra
clean: clean:
@rm '$(GOPATH)/bin/abra' @rm '$(GOPATH)/bin/abra'
@rm '$(GOPATH)/bin/kadabra' @rm '$(GOPATH)/bin/kadabra'
@ -47,3 +53,6 @@ test:
loc: loc:
@find . -name "*.go" | xargs wc -l @find . -name "*.go" | xargs wc -l
deps:
@go get -t -u ./...

View File

@ -1,414 +1,296 @@
package app package app
import ( import (
"archive/tar"
"context"
"fmt" "fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"coopcloud.tech/abra/cli/internal" "coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete" "coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client" "coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config" "coopcloud.tech/abra/pkg/recipe"
containerPkg "coopcloud.tech/abra/pkg/container"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
"github.com/klauspost/pgzip"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/urfave/cli" "github.com/urfave/cli"
) )
type backupConfig struct { var snapshot string
preHookCmd string var snapshotFlag = &cli.StringFlag{
postHookCmd string Name: "snapshot, s",
backupPaths []string Usage: "Lists specific snapshot",
Destination: &snapshot,
} }
var appBackupCommand = cli.Command{ var includePath string
Name: "backup", var includePathFlag = &cli.StringFlag{
Aliases: []string{"bk"}, Name: "path, p",
Usage: "Run app backup", Usage: "Include path",
ArgsUsage: "<domain> [<service>]", Destination: &includePath,
}
var resticRepo string
var resticRepoFlag = &cli.StringFlag{
Name: "repo, r",
Usage: "Restic repository",
Destination: &resticRepo,
}
var appBackupListCommand = cli.Command{
Name: "list",
Aliases: []string{"ls"},
Flags: []cli.Flag{ Flags: []cli.Flag{
internal.DebugFlag, internal.DebugFlag,
internal.OfflineFlag, internal.OfflineFlag,
internal.ChaosFlag, snapshotFlag,
includePathFlag,
}, },
Before: internal.SubCommandBefore, Before: internal.SubCommandBefore,
Usage: "List all backups",
BashComplete: autocomplete.AppNameComplete, BashComplete: autocomplete.AppNameComplete,
Description: `
Run an app backup.
A backup command and pre/post hook commands are defined in the recipe
configuration. Abra reads this configuration and run the comands in the context
of the deployed services. Pass <service> if you only want to back up a single
service. All backups are placed in the ~/.abra/backups directory.
A single backup file is produced for all backup paths specified for a service.
If we have the following backup configuration:
- "backupbot.backup.path=/var/lib/foo,/var/lib/bar"
And we run "abra app backup example.com app", Abra will produce a file that
looks like:
~/.abra/backups/example_com_app_609341138.tar.gz
This file is a compressed archive which contains all backup paths. To see paths, run:
tar -tf ~/.abra/backups/example_com_app_609341138.tar.gz
(Make sure to change the name of the backup file)
This single file can be used to restore your app. See "abra app restore" for more.
`,
Action: func(c *cli.Context) error { Action: func(c *cli.Context) error {
app := internal.ValidateApp(c) app := internal.ValidateApp(c)
recipe, err := recipePkg.Get(app.Recipe, internal.Offline) if err := recipe.EnsureExists(app.Recipe); err != nil {
if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
if !internal.Chaos { if !internal.Chaos {
if err := recipePkg.EnsureIsClean(app.Recipe); err != nil { if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
if !internal.Offline { if !internal.Offline {
if err := recipePkg.EnsureUpToDate(app.Recipe); err != nil { if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
} }
if err := recipePkg.EnsureLatest(app.Recipe); err != nil { if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
} }
backupConfigs := make(map[string]backupConfig)
for _, service := range recipe.Config.Services {
if backupsEnabled, ok := service.Deploy.Labels["backupbot.backup"]; ok {
if backupsEnabled == "true" {
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), service.Name)
bkConfig := backupConfig{}
logrus.Debugf("backup config detected for %s", fullServiceName)
if paths, ok := service.Deploy.Labels["backupbot.backup.path"]; ok {
logrus.Debugf("detected backup paths for %s: %s", fullServiceName, paths)
bkConfig.backupPaths = strings.Split(paths, ",")
}
if preHookCmd, ok := service.Deploy.Labels["backupbot.backup.pre-hook"]; ok {
logrus.Debugf("detected pre-hook command for %s: %s", fullServiceName, preHookCmd)
bkConfig.preHookCmd = preHookCmd
}
if postHookCmd, ok := service.Deploy.Labels["backupbot.backup.post-hook"]; ok {
logrus.Debugf("detected post-hook command for %s: %s", fullServiceName, postHookCmd)
bkConfig.postHookCmd = postHookCmd
}
backupConfigs[service.Name] = bkConfig
}
}
}
cl, err := client.New(app.Server) cl, err := client.New(app.Server)
if err != nil { if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
serviceName := c.Args().Get(1) targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if serviceName != "" { if err != nil {
backupConfig, ok := backupConfigs[serviceName]
if !ok {
logrus.Fatalf("no backup config for %s? does %s exist?", serviceName, serviceName)
}
logrus.Infof("running backup for the %s service", serviceName)
if err := runBackup(cl, app, serviceName, backupConfig); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
} else {
if len(backupConfigs) == 0 { execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
logrus.Fatalf("no backup configs discovered for %s?", app.Name) if snapshot != "" {
logrus.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if includePath != "" {
logrus.Debugf("including INCLUDE_PATH=%s in backupbot exec invocation", includePath)
execEnv = append(execEnv, fmt.Sprintf("INCLUDE_PATH=%s", includePath))
} }
for serviceName, backupConfig := range backupConfigs { if err := internal.RunBackupCmdRemote(cl, "ls", targetContainer.ID, execEnv); err != nil {
logrus.Infof("running backup for the %s service", serviceName)
if err := runBackup(cl, app, serviceName, backupConfig); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
}
}
return nil return nil
}, },
} }
// TimeStamp generates a file name friendly timestamp. var appBackupDownloadCommand = cli.Command{
func TimeStamp() string { Name: "download",
ts := time.Now().UTC().Format(time.RFC3339) Aliases: []string{"d"},
return strings.Replace(ts, ":", "-", -1) Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
snapshotFlag,
includePathFlag,
},
Before: internal.SubCommandBefore,
Usage: "Download a backup",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
} }
// runBackup does the actual backup logic. if !internal.Chaos {
func runBackup(cl *dockerClient.Client, app config.App, serviceName string, bkConfig backupConfig) error { if err := recipe.EnsureIsClean(app.Recipe); err != nil {
if len(bkConfig.backupPaths) == 0 { logrus.Fatal(err)
return fmt.Errorf("backup paths are empty for %s?", serviceName)
} }
// FIXME: avoid instantiating a new CLI if !internal.Offline {
dcli, err := command.NewDockerCli() if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
cl, err := client.New(app.Server)
if err != nil { if err != nil {
return err logrus.Fatal(err)
} }
filters := filters.NewArgs() targetContainer, err := internal.RetrieveBackupBotContainer(cl)
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), serviceName))
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, true)
if err != nil { if err != nil {
return err logrus.Fatal(err)
} }
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), serviceName) execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if bkConfig.preHookCmd != "" { if snapshot != "" {
splitCmd := internal.SafeSplit(bkConfig.preHookCmd) logrus.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
logrus.Debugf("split pre-hook command for %s into %s", fullServiceName, splitCmd) }
if includePath != "" {
preHookExecOpts := types.ExecConfig{ logrus.Debugf("including INCLUDE_PATH=%s in backupbot exec invocation", includePath)
AttachStderr: true, execEnv = append(execEnv, fmt.Sprintf("INCLUDE_PATH=%s", includePath))
AttachStdin: true,
AttachStdout: true,
Cmd: splitCmd,
Detach: false,
Tty: true,
} }
if err := container.RunExec(dcli, cl, targetContainer.ID, &preHookExecOpts); err != nil { if err := internal.RunBackupCmdRemote(cl, "download", targetContainer.ID, execEnv); err != nil {
return fmt.Errorf("failed to run %s on %s: %s", bkConfig.preHookCmd, targetContainer.ID, err.Error()) logrus.Fatal(err)
} }
logrus.Infof("succesfully ran %s pre-hook command: %s", fullServiceName, bkConfig.preHookCmd) remoteBackupDir := "/tmp/backup.tar.gz"
currentWorkingDir := "."
if err = CopyFromContainer(cl, targetContainer.ID, remoteBackupDir, currentWorkingDir); err != nil {
logrus.Fatal(err)
} }
var tempBackupPaths []string fmt.Println("backup successfully downloaded to current working directory")
for _, remoteBackupPath := range bkConfig.backupPaths {
sanitisedPath := strings.ReplaceAll(remoteBackupPath, "/", "_")
localBackupPath := filepath.Join(config.BACKUP_DIR, fmt.Sprintf("%s%s_%s.tar.gz", fullServiceName, sanitisedPath, TimeStamp()))
logrus.Debugf("temporarily backing up %s:%s to %s", fullServiceName, remoteBackupPath, localBackupPath)
logrus.Infof("backing up %s:%s", fullServiceName, remoteBackupPath) return nil
},
}
content, _, err := cl.CopyFromContainer(context.Background(), targetContainer.ID, remoteBackupPath) var appBackupCreateCommand = cli.Command{
Name: "create",
Aliases: []string{"c"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
resticRepoFlag,
},
Before: internal.SubCommandBefore,
Usage: "Create a new backup",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Chaos {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
cl, err := client.New(app.Server)
if err != nil { if err != nil {
logrus.Debugf("failed to copy %s from container: %s", remoteBackupPath, err.Error()) logrus.Fatal(err)
if err := cleanupTempArchives(tempBackupPaths); err != nil {
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
}
return fmt.Errorf("failed to copy %s from container: %s", remoteBackupPath, err.Error())
}
defer content.Close()
_, srcBase := archive.SplitPathDirEntry(remoteBackupPath)
preArchive := archive.RebaseArchiveEntries(content, srcBase, remoteBackupPath)
if err := copyToFile(localBackupPath, preArchive); err != nil {
logrus.Debugf("failed to create tar archive (%s): %s", localBackupPath, err.Error())
if err := cleanupTempArchives(tempBackupPaths); err != nil {
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
}
return fmt.Errorf("failed to create tar archive (%s): %s", localBackupPath, err.Error())
} }
tempBackupPaths = append(tempBackupPaths, localBackupPath) targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
logrus.Fatal(err)
} }
logrus.Infof("compressing and merging archives...") execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if resticRepo != "" {
if err := mergeArchives(tempBackupPaths, fullServiceName); err != nil { logrus.Debugf("including RESTIC_REPO=%s in backupbot exec invocation", resticRepo)
logrus.Debugf("failed to merge archive files: %s", err.Error()) execEnv = append(execEnv, fmt.Sprintf("RESTIC_REPO=%s", resticRepo))
if err := cleanupTempArchives(tempBackupPaths); err != nil {
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
}
return fmt.Errorf("failed to merge archive files: %s", err.Error())
} }
if err := cleanupTempArchives(tempBackupPaths); err != nil { if err := internal.RunBackupCmdRemote(cl, "create", targetContainer.ID, execEnv); err != nil {
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error()) logrus.Fatal(err)
}
if bkConfig.postHookCmd != "" {
splitCmd := internal.SafeSplit(bkConfig.postHookCmd)
logrus.Debugf("split post-hook command for %s into %s", fullServiceName, splitCmd)
postHookExecOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: splitCmd,
Detach: false,
Tty: true,
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &postHookExecOpts); err != nil {
return err
}
logrus.Infof("succesfully ran %s post-hook command: %s", fullServiceName, bkConfig.postHookCmd)
} }
return nil return nil
},
} }
func copyToFile(outfile string, r io.Reader) error { var appBackupSnapshotsCommand = cli.Command{
tmpFile, err := os.CreateTemp(filepath.Dir(outfile), ".tar_temp") Name: "snapshots",
Aliases: []string{"s"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
snapshotFlag,
},
Before: internal.SubCommandBefore,
Usage: "List backup snapshots",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Chaos {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
cl, err := client.New(app.Server)
if err != nil { if err != nil {
return err logrus.Fatal(err)
} }
tmpPath := tmpFile.Name() targetContainer, err := internal.RetrieveBackupBotContainer(cl)
_, err = io.Copy(tmpFile, r)
tmpFile.Close()
if err != nil { if err != nil {
os.Remove(tmpPath) logrus.Fatal(err)
return err
} }
if err = os.Rename(tmpPath, outfile); err != nil { execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
os.Remove(tmpPath) if snapshot != "" {
return err logrus.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if err := internal.RunBackupCmdRemote(cl, "snapshots", targetContainer.ID, execEnv); err != nil {
logrus.Fatal(err)
} }
return nil return nil
},
} }
func cleanupTempArchives(tarPaths []string) error { var appBackupCommand = cli.Command{
for _, tarPath := range tarPaths { Name: "backup",
if err := os.RemoveAll(tarPath); err != nil { Aliases: []string{"b"},
return err Usage: "Manage app backups",
} ArgsUsage: "<domain>",
Subcommands: []cli.Command{
logrus.Debugf("remove temporary archive file %s", tarPath) appBackupListCommand,
} appBackupSnapshotsCommand,
appBackupDownloadCommand,
return nil appBackupCreateCommand,
} },
func mergeArchives(tarPaths []string, serviceName string) error {
var out io.Writer
var cout *pgzip.Writer
localBackupPath := filepath.Join(config.BACKUP_DIR, fmt.Sprintf("%s_%s.tar.gz", serviceName, TimeStamp()))
fout, err := os.Create(localBackupPath)
if err != nil {
return fmt.Errorf("Failed to open %s: %s", localBackupPath, err)
}
defer fout.Close()
out = fout
cout = pgzip.NewWriter(out)
out = cout
tw := tar.NewWriter(out)
for _, tarPath := range tarPaths {
if err := addTar(tw, tarPath); err != nil {
return fmt.Errorf("failed to merge %s: %v", tarPath, err)
}
}
if err := tw.Close(); err != nil {
return fmt.Errorf("failed to close tar writer %v", err)
}
if cout != nil {
if err := cout.Flush(); err != nil {
return fmt.Errorf("failed to flush: %s", err)
} else if err = cout.Close(); err != nil {
return fmt.Errorf("failed to close compressed writer: %s", err)
}
}
logrus.Infof("backed up %s to %s", serviceName, localBackupPath)
return nil
}
func addTar(tw *tar.Writer, pth string) (err error) {
var tr *tar.Reader
var rc io.ReadCloser
var hdr *tar.Header
if tr, rc, err = openTarFile(pth); err != nil {
return
}
for {
if hdr, err = tr.Next(); err != nil {
if err == io.EOF {
err = nil
}
break
}
if err = tw.WriteHeader(hdr); err != nil {
break
} else if _, err = io.Copy(tw, tr); err != nil {
break
}
}
if err == nil {
err = rc.Close()
} else {
rc.Close()
}
return
}
func openTarFile(pth string) (tr *tar.Reader, rc io.ReadCloser, err error) {
var fin *os.File
var n int
buff := make([]byte, 1024)
if fin, err = os.Open(pth); err != nil {
return
}
if n, err = fin.Read(buff); err != nil {
fin.Close()
return
} else if n == 0 {
fin.Close()
err = fmt.Errorf("%s is empty", pth)
return
}
if _, err = fin.Seek(0, 0); err != nil {
fin.Close()
return
}
rc = fin
tr = tar.NewReader(rc)
return tr, rc, nil
} }

View File

@ -1,13 +1,10 @@
package app package app
import ( import (
"os"
"path"
"strings"
"coopcloud.tech/abra/cli/internal" "coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete" "coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config" "coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/recipe" "coopcloud.tech/abra/pkg/recipe"
recipePkg "coopcloud.tech/abra/pkg/recipe" recipePkg "coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -17,7 +14,19 @@ import (
var appCheckCommand = cli.Command{ var appCheckCommand = cli.Command{
Name: "check", Name: "check",
Aliases: []string{"chk"}, Aliases: []string{"chk"},
Usage: "Check if an app is configured correctly", Usage: "Ensure an app is well configured",
Description: `
This command compares env vars in both the app ".env" and recipe ".env.sample"
file.
The goal is to ensure that recipe ".env.sample" env vars are defined in your
app ".env" file. Only env var definitions in the ".env.sample" which are
uncommented, e.g. "FOO=bar" are checked. If an app ".env" file does not include
these env vars, then "check" will complain.
Recipe maintainers may or may not provide defaults for env vars within their
recipes regardless of commenting or not (e.g. through the use of
${FOO:<default>} syntax). "check" does not confirm or deny this for you.`,
ArgsUsage: "<domain>", ArgsUsage: "<domain>",
Flags: []cli.Flag{ Flags: []cli.Flag{
internal.DebugFlag, internal.DebugFlag,
@ -49,32 +58,23 @@ var appCheckCommand = cli.Command{
} }
} }
envSamplePath := path.Join(config.RECIPES_DIR, app.Recipe, ".env.sample") tableCol := []string{"recipe env sample", "app env"}
if _, err := os.Stat(envSamplePath); err != nil { table := formatter.CreateTable(tableCol)
if os.IsNotExist(err) {
logrus.Fatalf("%s does not exist?", envSamplePath)
}
logrus.Fatal(err)
}
envSample, err := config.ReadEnv(envSamplePath) envVars, err := config.CheckEnv(app)
if err != nil { if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
var missing []string for _, envVar := range envVars {
for k := range envSample { if envVar.Present {
if _, ok := app.Env[k]; !ok { table.Append([]string{envVar.Name, "✅"})
missing = append(missing, k) } else {
table.Append([]string{envVar.Name, "❌"})
} }
} }
if len(missing) > 0 { table.Render()
missingEnvVars := strings.Join(missing, ", ")
logrus.Fatalf("%s is missing %s", app.Path, missingEnvVars)
}
logrus.Infof("all necessary environment variables defined for %s", app.Name)
return nil return nil
}, },

View File

@ -6,9 +6,11 @@ import (
"os" "os"
"os/exec" "os/exec"
"path" "path"
"sort"
"strings" "strings"
"coopcloud.tech/abra/cli/internal" "coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete" "coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client" "coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config" "coopcloud.tech/abra/pkg/config"
@ -22,8 +24,7 @@ var appCmdCommand = cli.Command{
Name: "command", Name: "command",
Aliases: []string{"cmd"}, Aliases: []string{"cmd"},
Usage: "Run app commands", Usage: "Run app commands",
Description: ` Description: `Run an app specific command.
Run an app specific command.
These commands are bash functions, defined in the abra.sh of the recipe itself. These commands are bash functions, defined in the abra.sh of the recipe itself.
They can be run within the context of a service (e.g. app) or locally on your They can be run within the context of a service (e.g. app) or locally on your
@ -43,8 +44,19 @@ Example:
internal.OfflineFlag, internal.OfflineFlag,
internal.ChaosFlag, internal.ChaosFlag,
}, },
BashComplete: autocomplete.AppNameComplete,
Before: internal.SubCommandBefore, Before: internal.SubCommandBefore,
Subcommands: []cli.Command{appCmdListCommand},
BashComplete: func(ctx *cli.Context) {
args := ctx.Args()
switch len(args) {
case 0:
autocomplete.AppNameComplete(ctx)
case 1:
autocomplete.ServiceNameComplete(args.Get(0))
case 2:
cmdNameComplete(args.Get(0))
}
},
Action: func(c *cli.Context) error { Action: func(c *cli.Context) error {
app := internal.ValidateApp(c) app := internal.ValidateApp(c)
@ -186,3 +198,76 @@ func parseCmdArgs(args []string, isLocal bool) (bool, string) {
return hasCmdArgs, parsedCmdArgs return hasCmdArgs, parsedCmdArgs
} }
func cmdNameComplete(appName string) {
app, err := app.Get(appName)
if err != nil {
return
}
cmdNames, _ := getShCmdNames(app)
if err != nil {
return
}
for _, n := range cmdNames {
fmt.Println(n)
}
}
var appCmdListCommand = cli.Command{
Name: "list",
Aliases: []string{"ls"},
Usage: "List all available commands",
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
internal.ChaosFlag,
},
BashComplete: autocomplete.AppNameComplete,
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Chaos {
if err := recipePkg.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipePkg.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipePkg.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
cmdNames, err := getShCmdNames(app)
if err != nil {
logrus.Fatal(err)
}
for _, cmdName := range cmdNames {
fmt.Println(cmdName)
}
return nil
},
}
func getShCmdNames(app config.App) ([]string, error) {
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, app.Recipe, "abra.sh")
cmdNames, err := config.ReadAbraShCmdNames(abraShPath)
if err != nil {
return nil, err
}
sort.Strings(cmdNames)
return cmdNames, nil
}

View File

@ -2,19 +2,24 @@ package app
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io"
"os" "os"
"path"
"path/filepath"
"strings" "strings"
"coopcloud.tech/abra/cli/internal" "coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete" "coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client" "coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config" containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/formatter" "coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client" dockerClient "github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/urfave/cli" "github.com/urfave/cli"
@ -49,46 +54,14 @@ And if you want to copy that file back to your current working directory locally
dst := c.Args().Get(2) dst := c.Args().Get(2)
if src == "" { if src == "" {
logrus.Fatal("missing <src> argument") logrus.Fatal("missing <src> argument")
} else if dst == "" { }
if dst == "" {
logrus.Fatal("missing <dest> argument") logrus.Fatal("missing <dest> argument")
} }
parsedSrc := strings.SplitN(src, ":", 2) srcPath, dstPath, service, toContainer, err := parseSrcAndDst(src, dst)
parsedDst := strings.SplitN(dst, ":", 2) if err != nil {
errorMsg := "one of <src>/<dest> arguments must take $SERVICE:$PATH form" logrus.Fatal(err)
if len(parsedSrc) == 2 && len(parsedDst) == 2 {
logrus.Fatal(errorMsg)
} else if len(parsedSrc) != 2 {
if len(parsedDst) != 2 {
logrus.Fatal(errorMsg)
}
} else if len(parsedDst) != 2 {
if len(parsedSrc) != 2 {
logrus.Fatal(errorMsg)
}
}
var service string
var srcPath string
var dstPath string
isToContainer := false // <container:src> <dst>
if len(parsedSrc) == 2 {
service = parsedSrc[0]
srcPath = parsedSrc[1]
dstPath = dst
logrus.Debugf("assuming transfer is coming FROM the container")
} else if len(parsedDst) == 2 {
service = parsedDst[0]
dstPath = parsedDst[1]
srcPath = src
isToContainer = true // <src> <container:dst>
logrus.Debugf("assuming transfer is going TO the container")
}
if isToContainer {
if _, err := os.Stat(srcPath); os.IsNotExist(err) {
logrus.Fatalf("%s does not exist locally?", srcPath)
}
} }
cl, err := client.New(app.Server) cl, err := client.New(app.Server)
@ -96,7 +69,18 @@ And if you want to copy that file back to your current working directory locally
logrus.Fatal(err) logrus.Fatal(err)
} }
if err := configureAndCp(c, cl, app, srcPath, dstPath, service, isToContainer); err != nil { container, err := containerPkg.GetContainerFromStackAndService(cl, app.StackName(), service)
if err != nil {
logrus.Fatal(err)
}
logrus.Debugf("retrieved %s as target container on %s", formatter.ShortenID(container.ID), app.Server)
if toContainer {
err = CopyToContainer(cl, container.ID, srcPath, dstPath)
} else {
err = CopyFromContainer(cl, container.ID, srcPath, dstPath)
}
if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
@ -104,46 +88,292 @@ And if you want to copy that file back to your current working directory locally
}, },
} }
func configureAndCp( var errServiceMissing = errors.New("one of <src>/<dest> arguments must take $SERVICE:$PATH form")
c *cli.Context,
cl *dockerClient.Client,
app config.App,
srcPath string,
dstPath string,
service string,
isToContainer bool) error {
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), service))
container, err := container.GetContainer(context.Background(), cl, filters, internal.NoInput) // parseSrcAndDst parses src and dest string. One of src or dst must be of the form $SERVICE:$PATH
if err != nil { func parseSrcAndDst(src, dst string) (srcPath string, dstPath string, service string, toContainer bool, err error) {
logrus.Fatal(err) parsedSrc := strings.SplitN(src, ":", 2)
parsedDst := strings.SplitN(dst, ":", 2)
if len(parsedSrc)+len(parsedDst) != 3 {
return "", "", "", false, errServiceMissing
}
if len(parsedSrc) == 2 {
return parsedSrc[1], dst, parsedSrc[0], false, nil
}
if len(parsedDst) == 2 {
return src, parsedDst[1], parsedDst[0], true, nil
}
return "", "", "", false, errServiceMissing
} }
logrus.Debugf("retrieved %s as target container on %s", formatter.ShortenID(container.ID), app.Server) // CopyToContainer copies a file or directory from the local file system to the container.
// See the possible copy modes and their documentation.
func CopyToContainer(cl *dockerClient.Client, containerID, srcPath, dstPath string) error {
srcStat, err := os.Stat(srcPath)
if err != nil {
return fmt.Errorf("local %s ", err)
}
if isToContainer { dstStat, err := cl.ContainerStatPath(context.Background(), containerID, dstPath)
toTarOpts := &archive.TarOptions{NoOverwriteDirNonDir: true, Compression: archive.Gzip} dstExists := true
if err != nil {
if errdefs.IsNotFound(err) {
dstExists = false
} else {
return fmt.Errorf("remote path: %s", err)
}
}
mode, err := copyMode(srcPath, dstPath, srcStat.Mode(), dstStat.Mode, dstExists)
if err != nil {
return err
}
movePath := ""
switch mode {
case CopyModeDirToDir:
// Add the src directory to the destination path
_, srcDir := path.Split(srcPath)
dstPath = path.Join(dstPath, srcDir)
// Make sure the dst directory exits.
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
if _, err := container.RunExec(dcli, cl, containerID, &types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: []string{"mkdir", "-p", dstPath},
Detach: false,
Tty: true,
}); err != nil {
return fmt.Errorf("create remote directory: %s", err)
}
case CopyModeFileToFile:
// Remove the file component from the path, since docker can only copy
// to a directory.
dstPath, _ = path.Split(dstPath)
case CopyModeFileToFileRename:
// Copy the file to the temp directory and move it to its dstPath
// afterwards.
movePath = dstPath
dstPath = "/tmp"
}
toTarOpts := &archive.TarOptions{IncludeSourceDir: true, NoOverwriteDirNonDir: true, Compression: archive.Gzip}
content, err := archive.TarWithOptions(srcPath, toTarOpts) content, err := archive.TarWithOptions(srcPath, toTarOpts)
if err != nil { if err != nil {
logrus.Fatal(err) return err
} }
logrus.Debugf("copy %s from local to %s on container", srcPath, dstPath)
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false} copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
if err := cl.CopyToContainer(context.Background(), container.ID, dstPath, content, copyOpts); err != nil { if err := cl.CopyToContainer(context.Background(), containerID, dstPath, content, copyOpts); err != nil {
logrus.Fatal(err) return err
} }
} else {
content, _, err := cl.CopyFromContainer(context.Background(), container.ID, srcPath) if movePath != "" {
_, srcFile := path.Split(srcPath)
dcli, err := command.NewDockerCli()
if err != nil { if err != nil {
logrus.Fatal(err) return err
} }
defer content.Close() if _, err := container.RunExec(dcli, cl, containerID, &types.ExecConfig{
fromTarOpts := &archive.TarOptions{NoOverwriteDirNonDir: true, Compression: archive.Gzip} AttachStderr: true,
if err := archive.Untar(content, dstPath, fromTarOpts); err != nil { AttachStdin: true,
logrus.Fatal(err) AttachStdout: true,
Cmd: []string{"mv", path.Join("/tmp", srcFile), movePath},
Detach: false,
Tty: true,
}); err != nil {
return fmt.Errorf("create remote directory: %s", err)
} }
} }
return nil return nil
} }
// CopyFromContainer copies a file or directory from the given container to the local file system.
// See the possible copy modes and their documentation.
func CopyFromContainer(cl *dockerClient.Client, containerID, srcPath, dstPath string) error {
srcStat, err := cl.ContainerStatPath(context.Background(), containerID, srcPath)
if err != nil {
if errdefs.IsNotFound(err) {
return fmt.Errorf("remote: %s does not exist", srcPath)
} else {
return fmt.Errorf("remote path: %s", err)
}
}
dstStat, err := os.Stat(dstPath)
dstExists := true
var dstMode os.FileMode
if err != nil {
if os.IsNotExist(err) {
dstExists = false
} else {
return fmt.Errorf("remote path: %s", err)
}
} else {
dstMode = dstStat.Mode()
}
mode, err := copyMode(srcPath, dstPath, srcStat.Mode, dstMode, dstExists)
if err != nil {
return err
}
moveDstDir := ""
moveDstFile := ""
switch mode {
case CopyModeFileToFile:
// Remove the file component from the path, since docker can only copy
// to a directory.
dstPath, _ = path.Split(dstPath)
case CopyModeFileToFileRename:
// Copy the file to the temp directory and move it to its dstPath
// afterwards.
moveDstFile = dstPath
dstPath = "/tmp"
case CopyModeFilesToDir:
// Copy the directory to the temp directory and move it to its
// dstPath afterwards.
moveDstDir = path.Join(dstPath, "/")
dstPath = "/tmp"
// Make sure the temp directory always gets removed
defer os.Remove(path.Join("/tmp"))
}
content, _, err := cl.CopyFromContainer(context.Background(), containerID, srcPath)
if err != nil {
return fmt.Errorf("copy: %s", err)
}
defer content.Close()
if err := archive.Untar(content, dstPath, &archive.TarOptions{
NoOverwriteDirNonDir: true,
Compression: archive.Gzip,
NoLchown: true,
}); err != nil {
return fmt.Errorf("untar: %s", err)
}
if moveDstFile != "" {
_, srcFile := path.Split(strings.TrimSuffix(srcPath, "/"))
if err := moveFile(path.Join("/tmp", srcFile), moveDstFile); err != nil {
return err
}
}
if moveDstDir != "" {
_, srcDir := path.Split(strings.TrimSuffix(srcPath, "/"))
if err := moveDir(path.Join("/tmp", srcDir), moveDstDir); err != nil {
return err
}
}
return nil
}
var (
ErrCopyDirToFile = fmt.Errorf("can't copy dir to file")
ErrDstDirNotExist = fmt.Errorf("destination directory does not exist")
)
type CopyMode int
const (
// Copy a src file to a dest file. The src and dest file names are the same.
// <dir_src>/<file> + <dir_dst>/<file> -> <dir_dst>/<file>
CopyModeFileToFile = CopyMode(iota)
// Copy a src file to a dest file. The src and dest file names are not the same.
// <dir_src>/<file_src> + <dir_dst>/<file_dst> -> <dir_dst>/<file_dst>
CopyModeFileToFileRename
// Copy a src file to dest directory. The dest file gets created in the dest
// folder with the src filename.
// <dir_src>/<file> + <dir_dst> -> <dir_dst>/<file>
CopyModeFileToDir
// Copy a src directory to dest directory.
// <dir_src> + <dir_dst> -> <dir_dst>/<dir_src>
CopyModeDirToDir
// Copy all files in the src directory to the dest directory. This works recursively.
// <dir_src>/ + <dir_dst> -> <dir_dst>/<files_from_dir_src>
CopyModeFilesToDir
)
// copyMode takes a src and dest path and file mode to determine the copy mode.
// See the possible copy modes and their documentation.
func copyMode(srcPath, dstPath string, srcMode os.FileMode, dstMode os.FileMode, dstExists bool) (CopyMode, error) {
_, srcFile := path.Split(srcPath)
_, dstFile := path.Split(dstPath)
if srcMode.IsDir() {
if !dstExists {
return -1, ErrDstDirNotExist
}
if dstMode.IsDir() {
if strings.HasSuffix(srcPath, "/") {
return CopyModeFilesToDir, nil
}
return CopyModeDirToDir, nil
}
return -1, ErrCopyDirToFile
}
if dstMode.IsDir() {
return CopyModeFileToDir, nil
}
if srcFile != dstFile {
return CopyModeFileToFileRename, nil
}
return CopyModeFileToFile, nil
}
// moveDir moves all files from a source path to the destination path recursively.
func moveDir(sourcePath, destPath string) error {
return filepath.Walk(sourcePath, func(p string, info os.FileInfo, err error) error {
if err != nil {
return err
}
newPath := path.Join(destPath, strings.TrimPrefix(p, sourcePath))
if info.IsDir() {
err := os.Mkdir(newPath, info.Mode())
if err != nil {
if os.IsExist(err) {
return nil
}
return err
}
}
if info.Mode().IsRegular() {
return moveFile(p, newPath)
}
return nil
})
}
// moveFile moves a file from a source path to a destination path.
func moveFile(sourcePath, destPath string) error {
inputFile, err := os.Open(sourcePath)
if err != nil {
return err
}
outputFile, err := os.Create(destPath)
if err != nil {
inputFile.Close()
return err
}
defer outputFile.Close()
_, err = io.Copy(outputFile, inputFile)
inputFile.Close()
if err != nil {
return err
}
// Remove file after succesfull copy.
err = os.Remove(sourcePath)
if err != nil {
return err
}
return nil
}

113
cli/app/cp_test.go Normal file
View File

@ -0,0 +1,113 @@
package app
import (
"os"
"testing"
)
func TestParse(t *testing.T) {
tests := []struct {
src string
dst string
srcPath string
dstPath string
service string
toContainer bool
err error
}{
{src: "foo", dst: "bar", err: errServiceMissing},
{src: "app:foo", dst: "app:bar", err: errServiceMissing},
{src: "app:foo", dst: "bar", srcPath: "foo", dstPath: "bar", service: "app", toContainer: false},
{src: "foo", dst: "app:bar", srcPath: "foo", dstPath: "bar", service: "app", toContainer: true},
}
for i, tc := range tests {
srcPath, dstPath, service, toContainer, err := parseSrcAndDst(tc.src, tc.dst)
if srcPath != tc.srcPath {
t.Errorf("[%d] srcPath: want (%s), got(%s)", i, tc.srcPath, srcPath)
}
if dstPath != tc.dstPath {
t.Errorf("[%d] dstPath: want (%s), got(%s)", i, tc.dstPath, dstPath)
}
if service != tc.service {
t.Errorf("[%d] service: want (%s), got(%s)", i, tc.service, service)
}
if toContainer != tc.toContainer {
t.Errorf("[%d] toConainer: want (%t), got(%t)", i, tc.toContainer, toContainer)
}
if err == nil && tc.err != nil && err.Error() != tc.err.Error() {
t.Errorf("[%d] err: want (%s), got(%s)", i, tc.err, err)
}
}
}
func TestCopyMode(t *testing.T) {
tests := []struct {
srcPath string
dstPath string
srcMode os.FileMode
dstMode os.FileMode
dstExists bool
mode CopyMode
err error
}{
{
srcPath: "foo.txt",
dstPath: "foo.txt",
srcMode: os.ModePerm,
dstMode: os.ModePerm,
dstExists: true,
mode: CopyModeFileToFile,
},
{
srcPath: "foo.txt",
dstPath: "bar.txt",
srcMode: os.ModePerm,
dstExists: true,
mode: CopyModeFileToFileRename,
},
{
srcPath: "foo",
dstPath: "foo",
srcMode: os.ModeDir,
dstMode: os.ModeDir,
dstExists: true,
mode: CopyModeDirToDir,
},
{
srcPath: "foo/",
dstPath: "foo",
srcMode: os.ModeDir,
dstMode: os.ModeDir,
dstExists: true,
mode: CopyModeFilesToDir,
},
{
srcPath: "foo",
dstPath: "foo",
srcMode: os.ModeDir,
dstExists: false,
mode: -1,
err: ErrDstDirNotExist,
},
{
srcPath: "foo",
dstPath: "foo",
srcMode: os.ModeDir,
dstMode: os.ModePerm,
dstExists: true,
mode: -1,
err: ErrCopyDirToFile,
},
}
for i, tc := range tests {
mode, err := copyMode(tc.srcPath, tc.dstPath, tc.srcMode, tc.dstMode, tc.dstExists)
if mode != tc.mode {
t.Errorf("[%d] mode: want (%d), got(%d)", i, tc.mode, mode)
}
if err != tc.err {
t.Errorf("[%d] err: want (%s), got(%s)", i, tc.err, err)
}
}
}

View File

@ -6,6 +6,7 @@ import (
"coopcloud.tech/abra/cli/internal" "coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete" "coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/secret"
"coopcloud.tech/abra/pkg/client" "coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config" "coopcloud.tech/abra/pkg/config"
@ -50,6 +51,11 @@ recipes.
app := internal.ValidateApp(c) app := internal.ValidateApp(c)
stackName := app.StackName() stackName := app.StackName()
specificVersion := c.Args().Get(1)
if specificVersion != "" && internal.Chaos {
logrus.Fatal("cannot use <version> and --chaos together")
}
if err := recipe.EnsureExists(app.Recipe); err != nil { if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
@ -91,16 +97,11 @@ recipes.
logrus.Fatal(err) logrus.Fatal(err)
} }
if isDeployed { // NOTE(d1): check out specific version before dealing with secrets. This
if internal.Force || internal.Chaos { // is because we need to deal with GetComposeFiles under the hood and these
logrus.Warnf("%s is already deployed but continuing (--force/--chaos)", app.Name) // files change from version to version which therefore affects which
} else { // secrets might be generated
logrus.Fatalf("%s is already deployed", app.Name)
}
}
version := deployedVersion version := deployedVersion
specificVersion := c.Args().Get(1)
if specificVersion != "" { if specificVersion != "" {
version = specificVersion version = specificVersion
logrus.Debugf("choosing %s as version to deploy", version) logrus.Debugf("choosing %s as version to deploy", version)
@ -109,6 +110,25 @@ recipes.
} }
} }
secStats, err := secret.PollSecretsStatus(cl, app)
if err != nil {
logrus.Fatal(err)
}
for _, secStat := range secStats {
if !secStat.CreatedOnRemote {
logrus.Fatalf("unable to deploy, secrets not generated (%s)?", secStat.LocalName)
}
}
if isDeployed {
if internal.Force || internal.Chaos {
logrus.Warnf("%s is already deployed but continuing (--force/--chaos)", app.Name)
} else {
logrus.Fatalf("%s is already deployed", app.Name)
}
}
if !internal.Chaos && specificVersion == "" { if !internal.Chaos && specificVersion == "" {
catl, err := recipe.ReadRecipeCatalogue(internal.Offline) catl, err := recipe.ReadRecipeCatalogue(internal.Offline)
if err != nil { if err != nil {
@ -188,6 +208,17 @@ recipes.
config.SetChaosVersionLabel(compose, stackName, version) config.SetChaosVersionLabel(compose, stackName, version)
config.SetUpdateLabel(compose, stackName, app.Env) config.SetUpdateLabel(compose, stackName, app.Env)
envVars, err := config.CheckEnv(app)
if err != nil {
logrus.Fatal(err)
}
for _, envVar := range envVars {
if !envVar.Present {
logrus.Warnf("env var %s missing from %s.env, present in recipe .env.sample", envVar.Name, app.Domain)
}
}
if err := internal.DeployOverview(app, version, "continue with deployment?"); err != nil { if err := internal.DeployOverview(app, version, "continue with deployment?"); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }

View File

@ -13,7 +13,7 @@ import (
"coopcloud.tech/abra/pkg/config" "coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/recipe" "coopcloud.tech/abra/pkg/recipe"
stack "coopcloud.tech/abra/pkg/upstream/stack" stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/docker/docker/api/types" containerTypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client" dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -97,7 +97,7 @@ func checkErrors(c *cli.Context, cl *dockerClient.Client, app config.App) error
filters := filters.NewArgs() filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), service.Name)) filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), service.Name))
containers, err := cl.ContainerList(context.Background(), types.ContainerListOptions{Filters: filters}) containers, err := cl.ContainerList(context.Background(), containerTypes.ListOptions{Filters: filters})
if err != nil { if err != nil {
return err return err
} }

View File

@ -2,74 +2,27 @@ package app
import ( import (
"context" "context"
"fmt"
"io" "io"
"os" "os"
"slices"
"sync" "sync"
"time"
"coopcloud.tech/abra/cli/internal" "coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete" "coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client" "coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config" "coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/service" "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/stack" "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
containerTypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
dockerClient "github.com/docker/docker/client" dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/urfave/cli" "github.com/urfave/cli"
) )
var logOpts = types.ContainerLogsOptions{
ShowStderr: true,
ShowStdout: true,
Since: "",
Until: "",
Timestamps: true,
Follow: true,
Tail: "20",
Details: false,
}
// stackLogs lists logs for all stack services
func stackLogs(c *cli.Context, app config.App, client *dockerClient.Client) {
filters, err := app.Filters(true, false)
if err != nil {
logrus.Fatal(err)
}
serviceOpts := types.ServiceListOptions{Filters: filters}
services, err := client.ServiceList(context.Background(), serviceOpts)
if err != nil {
logrus.Fatal(err)
}
var wg sync.WaitGroup
for _, service := range services {
wg.Add(1)
go func(s string) {
if internal.StdErrOnly {
logOpts.ShowStdout = false
}
logs, err := client.ServiceLogs(context.Background(), s, logOpts)
if err != nil {
logrus.Fatal(err)
}
defer logs.Close()
_, err = io.Copy(os.Stdout, logs)
if err != nil && err != io.EOF {
logrus.Fatal(err)
}
}(service.ID)
}
wg.Wait()
os.Exit(0)
}
var appLogsCommand = cli.Command{ var appLogsCommand = cli.Command{
Name: "logs", Name: "logs",
Aliases: []string{"l"}, Aliases: []string{"l"},
@ -86,6 +39,10 @@ var appLogsCommand = cli.Command{
app := internal.ValidateApp(c) app := internal.ValidateApp(c)
stackName := app.StackName() stackName := app.StackName()
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
cl, err := client.New(app.Server) cl, err := client.New(app.Server)
if err != nil { if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
@ -100,37 +57,70 @@ var appLogsCommand = cli.Command{
logrus.Fatalf("%s is not deployed?", app.Name) logrus.Fatalf("%s is not deployed?", app.Name)
} }
logOpts.Since = internal.SinceLogs
serviceName := c.Args().Get(1) serviceName := c.Args().Get(1)
if serviceName == "" { serviceNames := []string{}
logrus.Debugf("tailing logs for all %s services", app.Recipe) if serviceName != "" {
stackLogs(c, app, cl) serviceNames = []string{serviceName}
} else {
logrus.Debugf("tailing logs for %s", serviceName)
if err := tailServiceLogs(c, cl, app, serviceName); err != nil {
logrus.Fatal(err)
} }
err = tailLogs(cl, app, serviceNames)
if err != nil {
logrus.Fatal(err)
} }
return nil return nil
}, },
} }
func tailServiceLogs(c *cli.Context, cl *dockerClient.Client, app config.App, serviceName string) error { // tailLogs prints logs for the given app with optional service names to be
filters := filters.NewArgs() // filtered on. It also checks if the latest task is not runnning and then
filters.Add("name", fmt.Sprintf("%s_%s", app.StackName(), serviceName)) // prints the past tasks.
func tailLogs(cl *dockerClient.Client, app config.App, serviceNames []string) error {
chosenService, err := service.GetService(context.Background(), cl, filters, internal.NoInput) f, err := app.Filters(true, false, serviceNames...)
if err != nil { if err != nil {
logrus.Fatal(err) return err
} }
if internal.StdErrOnly { services, err := cl.ServiceList(context.Background(), types.ServiceListOptions{Filters: f})
logOpts.ShowStdout = false if err != nil {
return err
} }
logs, err := cl.ServiceLogs(context.Background(), chosenService.ID, logOpts) var wg sync.WaitGroup
for _, service := range services {
filters := filters.NewArgs()
filters.Add("name", service.Spec.Name)
tasks, err := cl.TaskList(context.Background(), types.TaskListOptions{Filters: f})
if err != nil {
return err
}
if len(tasks) > 0 {
// Need to sort the tasks by the CreatedAt field in the inverse order.
// Otherwise they are in the reversed order and not sorted properly.
slices.SortFunc[[]swarm.Task](tasks, func(t1, t2 swarm.Task) int {
return int(t2.Meta.CreatedAt.Unix() - t1.Meta.CreatedAt.Unix())
})
lastTask := tasks[0].Status
if lastTask.State != swarm.TaskStateRunning {
for _, task := range tasks {
logrus.Errorf("[%s] %s State %s: %s", service.Spec.Name, task.Meta.CreatedAt.Format(time.RFC3339), task.Status.State, task.Status.Err)
}
}
}
// Collect the logs in a go routine, so the logs from all services are
// collected in parallel.
wg.Add(1)
go func(serviceID string) {
logs, err := cl.ServiceLogs(context.Background(), serviceID, containerTypes.LogsOptions{
ShowStderr: true,
ShowStdout: !internal.StdErrOnly,
Since: internal.SinceLogs,
Until: "",
Timestamps: true,
Follow: true,
Tail: "20",
Details: false,
})
if err != nil { if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
@ -140,6 +130,11 @@ func tailServiceLogs(c *cli.Context, cl *dockerClient.Client, app config.App, se
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
logrus.Fatal(err) logrus.Fatal(err)
} }
}(service.ID)
}
// Wait for all log streams to be closed.
wg.Wait()
return nil return nil
} }

View File

@ -2,6 +2,7 @@ package app
import ( import (
"fmt" "fmt"
"path"
"coopcloud.tech/abra/cli/internal" "coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete" "coopcloud.tech/abra/pkg/autocomplete"
@ -9,7 +10,6 @@ import (
"coopcloud.tech/abra/pkg/config" "coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter" "coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/jsontable" "coopcloud.tech/abra/pkg/jsontable"
"coopcloud.tech/abra/pkg/recipe"
recipePkg "coopcloud.tech/abra/pkg/recipe" recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/secret" "coopcloud.tech/abra/pkg/secret"
"github.com/AlecAivazis/survey/v2" "github.com/AlecAivazis/survey/v2"
@ -54,8 +54,16 @@ var appNewCommand = cli.Command{
internal.ChaosFlag, internal.ChaosFlag,
}, },
Before: internal.SubCommandBefore, Before: internal.SubCommandBefore,
ArgsUsage: "[<recipe>]", ArgsUsage: "[<recipe>] [<version>]",
BashComplete: autocomplete.RecipeNameComplete, BashComplete: func(ctx *cli.Context) {
args := ctx.Args()
switch len(args) {
case 0:
autocomplete.RecipeNameComplete(ctx)
case 1:
autocomplete.RecipeVersionComplete(ctx.Args().Get(0))
}
},
Action: func(c *cli.Context) error { Action: func(c *cli.Context) error {
recipe := internal.ValidateRecipe(c) recipe := internal.ValidateRecipe(c)
@ -68,10 +76,36 @@ var appNewCommand = cli.Command{
logrus.Fatal(err) logrus.Fatal(err)
} }
} }
if c.Args().Get(1) == "" {
var version string
recipeVersions, err := recipePkg.GetRecipeVersions(recipe.Name, internal.Offline)
if err != nil {
logrus.Fatal(err)
}
// NOTE(d1): determine whether recipe versions exist or not and check
// out the latest version or current HEAD
if len(recipeVersions) > 0 {
latest := recipeVersions[len(recipeVersions)-1]
for tag := range latest {
version = tag
}
if err := recipePkg.EnsureVersion(recipe.Name, version); err != nil {
logrus.Fatal(err)
}
} else {
if err := recipePkg.EnsureLatest(recipe.Name); err != nil { if err := recipePkg.EnsureLatest(recipe.Name); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
} }
} else {
if err := recipePkg.EnsureVersion(recipe.Name, c.Args().Get(1)); err != nil {
logrus.Fatal(err)
}
}
}
if err := ensureServerFlag(); err != nil { if err := ensureServerFlag(); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
@ -106,7 +140,8 @@ var appNewCommand = cli.Command{
logrus.Fatal(err) logrus.Fatal(err)
} }
secretsConfig, err := secret.ReadSecretsConfig(sampleEnv, composeFiles, recipe.Name) envSamplePath := path.Join(config.RECIPES_DIR, recipe.Name, ".env.sample")
secretsConfig, err := secret.ReadSecretsConfig(envSamplePath, composeFiles, config.StackName(internal.Domain))
if err != nil { if err != nil {
return err return err
} }
@ -166,14 +201,14 @@ var appNewCommand = cli.Command{
type AppSecrets map[string]string type AppSecrets map[string]string
// createSecrets creates all secrets for a new app. // createSecrets creates all secrets for a new app.
func createSecrets(cl *dockerClient.Client, secretsConfig map[string]string, sanitisedAppName string) (AppSecrets, error) { func createSecrets(cl *dockerClient.Client, secretsConfig map[string]secret.Secret, sanitisedAppName string) (AppSecrets, error) {
// NOTE(d1): trim to match app.StackName() implementation // NOTE(d1): trim to match app.StackName() implementation
if len(sanitisedAppName) > 45 { if len(sanitisedAppName) > config.MAX_SANITISED_APP_NAME_LENGTH {
logrus.Debugf("trimming %s to %s to avoid runtime limits", sanitisedAppName, sanitisedAppName[:45]) logrus.Debugf("trimming %s to %s to avoid runtime limits", sanitisedAppName, sanitisedAppName[:config.MAX_SANITISED_APP_NAME_LENGTH])
sanitisedAppName = sanitisedAppName[:45] sanitisedAppName = sanitisedAppName[:config.MAX_SANITISED_APP_NAME_LENGTH]
} }
secrets, err := secret.GenerateSecrets(cl, secretsConfig, sanitisedAppName, internal.NewAppServer) secrets, err := secret.GenerateSecrets(cl, secretsConfig, internal.NewAppServer)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -196,7 +231,7 @@ func createSecrets(cl *dockerClient.Client, secretsConfig map[string]string, san
} }
// ensureDomainFlag checks if the domain flag was used. if not, asks the user for it/ // ensureDomainFlag checks if the domain flag was used. if not, asks the user for it/
func ensureDomainFlag(recipe recipe.Recipe, server string) error { func ensureDomainFlag(recipe recipePkg.Recipe, server string) error {
if internal.Domain == "" && !internal.NoInput { if internal.Domain == "" && !internal.NoInput {
prompt := &survey.Input{ prompt := &survey.Input{
Message: "Specify app domain", Message: "Specify app domain",
@ -215,7 +250,7 @@ func ensureDomainFlag(recipe recipe.Recipe, server string) error {
} }
// promptForSecrets asks if we should generate secrets for a new app. // promptForSecrets asks if we should generate secrets for a new app.
func promptForSecrets(recipeName string, secretsConfig map[string]string) error { func promptForSecrets(recipeName string, secretsConfig map[string]secret.Secret) error {
if len(secretsConfig) == 0 { if len(secretsConfig) == 0 {
logrus.Debugf("%s has no secrets to generate, skipping...", recipeName) logrus.Debugf("%s has no secrets to generate, skipping...", recipeName)
return nil return nil

View File

@ -2,7 +2,8 @@ package app
import ( import (
"context" "context"
"strings" "encoding/json"
"fmt"
"time" "time"
"coopcloud.tech/abra/cli/internal" "coopcloud.tech/abra/cli/internal"
@ -10,11 +11,13 @@ import (
"coopcloud.tech/abra/pkg/client" "coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config" "coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter" "coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/service" "coopcloud.tech/abra/pkg/recipe"
abraService "coopcloud.tech/abra/pkg/service"
stack "coopcloud.tech/abra/pkg/upstream/stack" stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/buger/goterm" "github.com/buger/goterm"
dockerFormatter "github.com/docker/cli/cli/command/formatter" dockerFormatter "github.com/docker/cli/cli/command/formatter"
"github.com/docker/docker/api/types" containerTypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client" dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/urfave/cli" "github.com/urfave/cli"
@ -27,6 +30,7 @@ var appPsCommand = cli.Command{
ArgsUsage: "<domain>", ArgsUsage: "<domain>",
Description: "Show a more detailed status output of a specific deployed app", Description: "Show a more detailed status output of a specific deployed app",
Flags: []cli.Flag{ Flags: []cli.Flag{
internal.MachineReadableFlag,
internal.WatchFlag, internal.WatchFlag,
internal.DebugFlag, internal.DebugFlag,
}, },
@ -40,7 +44,7 @@ var appPsCommand = cli.Command{
logrus.Fatal(err) logrus.Fatal(err)
} }
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, app.StackName()) isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil { if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
@ -49,6 +53,15 @@ var appPsCommand = cli.Command{
logrus.Fatalf("%s is not deployed?", app.Name) logrus.Fatalf("%s is not deployed?", app.Name)
} }
statuses, err := config.GetAppStatuses([]config.App{app}, true)
if statusMeta, ok := statuses[app.StackName()]; ok {
if _, exists := statusMeta["chaos"]; !exists {
if err := recipe.EnsureVersion(app.Recipe, deployedVersion); err != nil {
logrus.Fatal(err)
}
}
}
if !internal.Watch { if !internal.Watch {
showPSOutput(c, app, cl) showPSOutput(c, app, cl)
return nil return nil
@ -66,36 +79,77 @@ var appPsCommand = cli.Command{
// showPSOutput renders ps output. // showPSOutput renders ps output.
func showPSOutput(c *cli.Context, app config.App, cl *dockerClient.Client) { func showPSOutput(c *cli.Context, app config.App, cl *dockerClient.Client) {
filters, err := app.Filters(true, true) composeFiles, err := config.GetComposeFiles(app.Recipe, app.Env)
if err != nil { if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
return
} }
containers, err := cl.ContainerList(context.Background(), types.ContainerListOptions{Filters: filters}) deployOpts := stack.Deploy{
Composefiles: composeFiles,
Namespace: app.StackName(),
Prune: false,
ResolveImage: stack.ResolveImageAlways,
}
compose, err := config.GetAppComposeConfig(app.Name, deployOpts, app.Env)
if err != nil { if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
return
} }
var tablerows [][]string
allContainerStats := make(map[string]map[string]string)
for _, service := range compose.Services {
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), service.Name))
containers, err := cl.ContainerList(context.Background(), containerTypes.ListOptions{Filters: filters})
if err != nil {
logrus.Fatal(err)
return
}
var containerStats map[string]string
if len(containers) == 0 {
containerStats = map[string]string{
"service name": service.Name,
"image": "unknown",
"created": "unknown",
"status": "unknown",
"state": "unknown",
"ports": "unknown",
}
} else {
container := containers[0]
containerStats = map[string]string{
"service name": abraService.ContainerToServiceName(container.Names, app.StackName()),
"image": formatter.RemoveSha(container.Image),
"created": formatter.HumanDuration(container.Created),
"status": container.Status,
"state": container.State,
"ports": dockerFormatter.DisplayablePorts(container.Ports),
}
}
allContainerStats[containerStats["service name"]] = containerStats
var tablerow []string = []string{containerStats["service name"], containerStats["image"], containerStats["created"], containerStats["status"], containerStats["state"], containerStats["ports"]}
tablerows = append(tablerows, tablerow)
}
if internal.MachineReadable {
jsonstring, err := json.Marshal(allContainerStats)
if err != nil {
logrus.Fatal(err)
} else {
fmt.Println(string(jsonstring))
}
return
} else {
tableCol := []string{"service name", "image", "created", "status", "state", "ports"} tableCol := []string{"service name", "image", "created", "status", "state", "ports"}
table := formatter.CreateTable(tableCol) table := formatter.CreateTable(tableCol)
for _, row := range tablerows {
for _, container := range containers { table.Append(row)
var containerNames []string
for _, containerName := range container.Names {
trimmed := strings.TrimPrefix(containerName, "/")
containerNames = append(containerNames, trimmed)
} }
tableRow := []string{
service.ContainerToServiceName(container.Names, app.StackName()),
formatter.RemoveSha(container.Image),
formatter.HumanDuration(container.Created),
container.Status,
container.State,
dockerFormatter.DisplayablePorts(container.Ports),
}
table.Append(tableRow)
}
table.Render() table.Render()
} }
}

View File

@ -3,6 +3,7 @@ package app
import ( import (
"context" "context"
"fmt" "fmt"
"log"
"os" "os"
"coopcloud.tech/abra/cli/internal" "coopcloud.tech/abra/cli/internal"
@ -11,7 +12,6 @@ import (
stack "coopcloud.tech/abra/pkg/upstream/stack" stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/AlecAivazis/survey/v2" "github.com/AlecAivazis/survey/v2"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/volume"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/urfave/cli" "github.com/urfave/cli"
) )
@ -110,26 +110,19 @@ flag.
logrus.Fatal(err) logrus.Fatal(err)
} }
volumeListOptions := volume.ListOptions{fs} volumeList, err := client.GetVolumes(cl, context.Background(), app.Server, fs)
volumeListOKBody, err := cl.VolumeList(context.Background(), volumeListOptions)
volumeList := volumeListOKBody.Volumes
if err != nil { if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
volumeNames := client.GetVolumeNames(volumeList)
var vols []string if len(volumeNames) > 0 {
for _, vol := range volumeList { err := client.RemoveVolumes(cl, context.Background(), volumeNames, internal.Force, 5)
vols = append(vols, vol.Name)
}
if len(vols) > 0 {
for _, vol := range vols {
err := cl.VolumeRemove(context.Background(), vol, internal.Force) // last argument is for force removing
if err != nil { if err != nil {
logrus.Fatal(err) log.Fatalf("removing volumes failed: %s", err)
}
logrus.Info(fmt.Sprintf("volume %s removed", vol))
} }
logrus.Infof("%d volumes removed successfully", len(volumeNames))
} else { } else {
logrus.Info("no volumes to remove") logrus.Info("no volumes to remove")
} }

View File

@ -1,223 +1,82 @@
package app package app
import ( import (
"context"
"errors"
"fmt" "fmt"
"os"
"coopcloud.tech/abra/cli/internal" "coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete" "coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client" "coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/recipe" "coopcloud.tech/abra/pkg/recipe"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/urfave/cli" "github.com/urfave/cli"
) )
type restoreConfig struct { var targetPath string
preHookCmd string var targetPathFlag = &cli.StringFlag{
postHookCmd string Name: "target, t",
Usage: "Target path",
Destination: &targetPath,
} }
var appRestoreCommand = cli.Command{ var appRestoreCommand = cli.Command{
Name: "restore", Name: "restore",
Aliases: []string{"rs"}, Aliases: []string{"rs"},
Usage: "Run app restore", Usage: "Restore an app backup",
ArgsUsage: "<domain> <service> <file>", ArgsUsage: "<domain> <service>",
Flags: []cli.Flag{ Flags: []cli.Flag{
internal.DebugFlag, internal.DebugFlag,
internal.OfflineFlag, internal.OfflineFlag,
internal.ChaosFlag, targetPathFlag,
}, },
Before: internal.SubCommandBefore, Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete, BashComplete: autocomplete.AppNameComplete,
Description: `
Run an app restore.
Pre/post hook commands are defined in the recipe configuration. Abra reads this
configuration and run the comands in the context of the service before
restoring the backup.
Unlike "abra app backup", restore must be run on a per-service basis. You can
not restore all services in one go. Backup files produced by Abra are
compressed archives which use absolute paths. This allows Abra to restore
according to standard tar command logic, i.e. the backup will be restored to
the path it was originally backed up from.
Example:
abra app restore example.com app ~/.abra/backups/example_com_app_609341138.tar.gz
`,
Action: func(c *cli.Context) error { Action: func(c *cli.Context) error {
app := internal.ValidateApp(c) app := internal.ValidateApp(c)
recipe, err := recipe.Get(app.Recipe, internal.Offline) if err := recipe.EnsureExists(app.Recipe); err != nil {
if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
if !internal.Chaos { if !internal.Chaos {
if err := recipePkg.EnsureIsClean(app.Recipe); err != nil { if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
if !internal.Offline { if !internal.Offline {
if err := recipePkg.EnsureUpToDate(app.Recipe); err != nil { if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
} }
if err := recipePkg.EnsureLatest(app.Recipe); err != nil { if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
} }
serviceName := c.Args().Get(1)
if serviceName == "" {
internal.ShowSubcommandHelpAndError(c, errors.New("missing <service>?"))
}
backupPath := c.Args().Get(2)
if backupPath == "" {
internal.ShowSubcommandHelpAndError(c, errors.New("missing <file>?"))
}
if _, err := os.Stat(backupPath); err != nil {
if os.IsNotExist(err) {
logrus.Fatalf("%s doesn't exist?", backupPath)
}
}
restoreConfigs := make(map[string]restoreConfig)
for _, service := range recipe.Config.Services {
if restoreEnabled, ok := service.Deploy.Labels["backupbot.restore"]; ok {
if restoreEnabled == "true" {
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), service.Name)
rsConfig := restoreConfig{}
logrus.Debugf("restore config detected for %s", fullServiceName)
if preHookCmd, ok := service.Deploy.Labels["backupbot.restore.pre-hook"]; ok {
logrus.Debugf("detected pre-hook command for %s: %s", fullServiceName, preHookCmd)
rsConfig.preHookCmd = preHookCmd
}
if postHookCmd, ok := service.Deploy.Labels["backupbot.restore.post-hook"]; ok {
logrus.Debugf("detected post-hook command for %s: %s", fullServiceName, postHookCmd)
rsConfig.postHookCmd = postHookCmd
}
restoreConfigs[service.Name] = rsConfig
}
}
}
rsConfig, ok := restoreConfigs[serviceName]
if !ok {
rsConfig = restoreConfig{}
}
cl, err := client.New(app.Server) cl, err := client.New(app.Server)
if err != nil { if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
if err := runRestore(cl, app, backupPath, serviceName, rsConfig); err != nil { targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
logrus.Fatal(err)
}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if snapshot != "" {
logrus.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if targetPath != "" {
logrus.Debugf("including TARGET=%s in backupbot exec invocation", targetPath)
execEnv = append(execEnv, fmt.Sprintf("TARGET=%s", targetPath))
}
if err := internal.RunBackupCmdRemote(cl, "restore", targetContainer.ID, execEnv); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
return nil return nil
}, },
} }
// runRestore does the actual restore logic.
func runRestore(cl *dockerClient.Client, app config.App, backupPath, serviceName string, rsConfig restoreConfig) error {
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), serviceName))
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, true)
if err != nil {
return err
}
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), serviceName)
if rsConfig.preHookCmd != "" {
splitCmd := internal.SafeSplit(rsConfig.preHookCmd)
logrus.Debugf("split pre-hook command for %s into %s", fullServiceName, splitCmd)
preHookExecOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: splitCmd,
Detach: false,
Tty: true,
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &preHookExecOpts); err != nil {
return err
}
logrus.Infof("succesfully ran %s pre-hook command: %s", fullServiceName, rsConfig.preHookCmd)
}
backupReader, err := os.Open(backupPath)
if err != nil {
return err
}
content, err := archive.DecompressStream(backupReader)
if err != nil {
return err
}
// NOTE(d1): we use absolute paths so tar knows what to do. it will restore
// files according to the paths set in the compressed archive
restorePath := "/"
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
if err := cl.CopyToContainer(context.Background(), targetContainer.ID, restorePath, content, copyOpts); err != nil {
return err
}
logrus.Infof("restored %s to %s", backupPath, fullServiceName)
if rsConfig.postHookCmd != "" {
splitCmd := internal.SafeSplit(rsConfig.postHookCmd)
logrus.Debugf("split post-hook command for %s into %s", fullServiceName, splitCmd)
postHookExecOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: splitCmd,
Detach: false,
Tty: true,
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &postHookExecOpts); err != nil {
return err
}
logrus.Infof("succesfully ran %s post-hook command: %s", fullServiceName, rsConfig.postHookCmd)
}
return nil
}

View File

@ -51,6 +51,11 @@ recipes.
app := internal.ValidateApp(c) app := internal.ValidateApp(c)
stackName := app.StackName() stackName := app.StackName()
specificVersion := c.Args().Get(1)
if specificVersion != "" && internal.Chaos {
logrus.Fatal("cannot use <version> and --chaos together")
}
if err := recipe.EnsureExists(app.Recipe); err != nil { if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
@ -125,7 +130,6 @@ recipes.
logrus.Warnf("failed to determine deployed version of %s", app.Name) logrus.Warnf("failed to determine deployed version of %s", app.Name)
} }
specificVersion := c.Args().Get(1)
if specificVersion != "" { if specificVersion != "" {
parsedDeployedVersion, err := tagcmp.Parse(deployedVersion) parsedDeployedVersion, err := tagcmp.Parse(deployedVersion)
if err != nil { if err != nil {

View File

@ -91,7 +91,7 @@ var appRunCommand = cli.Command{
logrus.Fatal(err) logrus.Fatal(err)
} }
if err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil { if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }

View File

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"os" "os"
"strconv" "strconv"
"strings"
"coopcloud.tech/abra/cli/internal" "coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete" "coopcloud.tech/abra/pkg/autocomplete"
@ -20,19 +21,23 @@ import (
"github.com/urfave/cli" "github.com/urfave/cli"
) )
var allSecrets bool var (
var allSecretsFlag = &cli.BoolFlag{ allSecrets bool
allSecretsFlag = &cli.BoolFlag{
Name: "all, a", Name: "all, a",
Destination: &allSecrets, Destination: &allSecrets,
Usage: "Generate all secrets", Usage: "Generate all secrets",
} }
)
var rmAllSecrets bool var (
var rmAllSecretsFlag = &cli.BoolFlag{ rmAllSecrets bool
rmAllSecretsFlag = &cli.BoolFlag{
Name: "all, a", Name: "all, a",
Destination: &rmAllSecrets, Destination: &rmAllSecrets,
Usage: "Remove all secrets", Usage: "Remove all secrets",
} }
)
var appSecretGenerateCommand = cli.Command{ var appSecretGenerateCommand = cli.Command{
Name: "generate", Name: "generate",
@ -87,28 +92,22 @@ var appSecretGenerateCommand = cli.Command{
logrus.Fatal(err) logrus.Fatal(err)
} }
secretsConfig, err := secret.ReadSecretsConfig(app.Env, composeFiles, app.Recipe) secrets, err := secret.ReadSecretsConfig(app.Path, composeFiles, app.StackName())
if err != nil { if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
secretsToCreate := make(map[string]string) if !allSecrets {
if allSecrets {
secretsToCreate = secretsConfig
} else {
secretName := c.Args().Get(1) secretName := c.Args().Get(1)
secretVersion := c.Args().Get(2) secretVersion := c.Args().Get(2)
matches := false s, ok := secrets[secretName]
for name := range secretsConfig { if !ok {
if secretName == name {
secretsToCreate[name] = secretVersion
matches = true
}
}
if !matches {
logrus.Fatalf("%s doesn't exist in the env config?", secretName) logrus.Fatalf("%s doesn't exist in the env config?", secretName)
} }
s.Version = secretVersion
secrets = map[string]secret.Secret{
secretName: s,
}
} }
cl, err := client.New(app.Server) cl, err := client.New(app.Server)
@ -116,7 +115,7 @@ var appSecretGenerateCommand = cli.Command{
logrus.Fatal(err) logrus.Fatal(err)
} }
secretVals, err := secret.GenerateSecrets(cl, secretsToCreate, app.StackName(), app.Server) secretVals, err := secret.GenerateSecrets(cl, secrets, app.Server)
if err != nil { if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
@ -158,6 +157,8 @@ var appSecretInsertCommand = cli.Command{
Flags: []cli.Flag{ Flags: []cli.Flag{
internal.DebugFlag, internal.DebugFlag,
internal.PassFlag, internal.PassFlag,
internal.FileFlag,
internal.TrimFlag,
}, },
Before: internal.SubCommandBefore, Before: internal.SubCommandBefore,
ArgsUsage: "<domain> <secret-name> <version> <data>", ArgsUsage: "<domain> <secret-name> <version> <data>",
@ -190,6 +191,18 @@ Example:
version := c.Args().Get(2) version := c.Args().Get(2)
data := c.Args().Get(3) data := c.Args().Get(3)
if internal.File {
raw, err := os.ReadFile(data)
if err != nil {
logrus.Fatalf("reading secret from file: %s", err)
}
data = string(raw)
}
if internal.Trim {
data = strings.TrimSpace(data)
}
secretName := fmt.Sprintf("%s_%s_%s", app.StackName(), name, version) secretName := fmt.Sprintf("%s_%s_%s", app.StackName(), name, version)
if err := client.StoreSecret(cl, secretName, data, app.Server); err != nil { if err := client.StoreSecret(cl, secretName, data, app.Server); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
@ -276,7 +289,7 @@ Example:
logrus.Fatal(err) logrus.Fatal(err)
} }
secretsConfig, err := secret.ReadSecretsConfig(app.Env, composeFiles, app.Recipe) secrets, err := secret.ReadSecretsConfig(app.Path, composeFiles, app.StackName())
if err != nil { if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
@ -311,12 +324,7 @@ Example:
match := false match := false
secretToRm := c.Args().Get(1) secretToRm := c.Args().Get(1)
for secretName, secretValue := range secretsConfig { for secretName, val := range secrets {
val, err := secret.ParseSecretValue(secretValue)
if err != nil {
logrus.Fatal(err)
}
secretRemoteName := fmt.Sprintf("%s_%s_%s", app.StackName(), secretName, val.Version) secretRemoteName := fmt.Sprintf("%s_%s_%s", app.StackName(), secretName, val.Version)
if _, ok := remoteSecretNames[secretRemoteName]; ok { if _, ok := remoteSecretNames[secretRemoteName]; ok {
if secretToRm != "" { if secretToRm != "" {
@ -356,6 +364,7 @@ var appSecretLsCommand = cli.Command{
internal.DebugFlag, internal.DebugFlag,
internal.OfflineFlag, internal.OfflineFlag,
internal.ChaosFlag, internal.ChaosFlag,
internal.MachineReadableFlag,
}, },
Before: internal.SubCommandBefore, Before: internal.SubCommandBefore,
Usage: "List all secrets", Usage: "List all secrets",
@ -383,12 +392,7 @@ var appSecretLsCommand = cli.Command{
} }
} }
composeFiles, err := config.GetComposeFiles(app.Recipe, app.Env) cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
secretsConfig, err := secret.ReadSecretsConfig(app.Env, composeFiles, app.Recipe)
if err != nil { if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
@ -396,42 +400,27 @@ var appSecretLsCommand = cli.Command{
tableCol := []string{"Name", "Version", "Generated Name", "Created On Server"} tableCol := []string{"Name", "Version", "Generated Name", "Created On Server"}
table := formatter.CreateTable(tableCol) table := formatter.CreateTable(tableCol)
cl, err := client.New(app.Server) secStats, err := secret.PollSecretsStatus(cl, app)
if err != nil { if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
filters, err := app.Filters(false, false) for _, secStat := range secStats {
if err != nil { tableRow := []string{
logrus.Fatal(err) secStat.LocalName,
secStat.Version,
secStat.RemoteName,
strconv.FormatBool(secStat.CreatedOnRemote),
} }
secretList, err := cl.SecretList(context.Background(), types.SecretListOptions{Filters: filters})
if err != nil {
logrus.Fatal(err)
}
remoteSecretNames := make(map[string]bool)
for _, cont := range secretList {
remoteSecretNames[cont.Spec.Annotations.Name] = true
}
for secretName, secretValue := range secretsConfig {
createdRemote := false
val, err := secret.ParseSecretValue(secretValue)
if err != nil {
logrus.Fatal(err)
}
secretRemoteName := fmt.Sprintf("%s_%s_%s", app.StackName(), secretName, val.Version)
if _, ok := remoteSecretNames[secretRemoteName]; ok {
createdRemote = true
}
tableRow := []string{secretName, val.Version, secretRemoteName, strconv.FormatBool(createdRemote)}
table.Append(tableRow) table.Append(tableRow)
} }
if table.NumLines() > 0 { if table.NumLines() > 0 {
if internal.MachineReadable {
table.JSONRender()
} else {
table.Render() table.Render()
}
} else { } else {
logrus.Warnf("no secrets stored for %s", app.Name) logrus.Warnf("no secrets stored for %s", app.Name)
} }

View File

@ -11,7 +11,7 @@ import (
"coopcloud.tech/abra/pkg/formatter" "coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/service" "coopcloud.tech/abra/pkg/service"
stack "coopcloud.tech/abra/pkg/upstream/stack" stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/docker/docker/api/types" containerTypes "github.com/docker/docker/api/types/container"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/urfave/cli" "github.com/urfave/cli"
) )
@ -48,7 +48,7 @@ var appServicesCommand = cli.Command{
logrus.Fatal(err) logrus.Fatal(err)
} }
containers, err := cl.ContainerList(context.Background(), types.ContainerListOptions{Filters: filters}) containers, err := cl.ContainerList(context.Background(), containerTypes.ListOptions{Filters: filters})
if err != nil { if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }

View File

@ -31,6 +31,7 @@ var appUpgradeCommand = cli.Command{
internal.NoDomainChecksFlag, internal.NoDomainChecksFlag,
internal.DontWaitConvergeFlag, internal.DontWaitConvergeFlag,
internal.OfflineFlag, internal.OfflineFlag,
internal.ReleaseNotesFlag,
}, },
Before: internal.SubCommandBefore, Before: internal.SubCommandBefore,
Description: ` Description: `
@ -56,6 +57,11 @@ recipes.
app := internal.ValidateApp(c) app := internal.ValidateApp(c)
stackName := app.StackName() stackName := app.StackName()
specificVersion := c.Args().Get(1)
if specificVersion != "" && internal.Chaos {
logrus.Fatal("cannot use <version> and --chaos together")
}
if !internal.Chaos { if !internal.Chaos {
if err := recipe.EnsureIsClean(app.Recipe); err != nil { if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
@ -126,7 +132,6 @@ recipes.
logrus.Warnf("failed to determine deployed version of %s", app.Name) logrus.Warnf("failed to determine deployed version of %s", app.Name)
} }
specificVersion := c.Args().Get(1)
if specificVersion != "" { if specificVersion != "" {
parsedDeployedVersion, err := tagcmp.Parse(deployedVersion) parsedDeployedVersion, err := tagcmp.Parse(deployedVersion)
if err != nil { if err != nil {
@ -189,17 +194,17 @@ recipes.
// check out the tag and then they'll appear to be missing. this covers // check out the tag and then they'll appear to be missing. this covers
// when we obviously will forget to write release notes before publishing // when we obviously will forget to write release notes before publishing
var releaseNotes string var releaseNotes string
if chosenUpgrade != "" {
parsedChosenUpgrade, err := tagcmp.Parse(chosenUpgrade)
if err != nil {
logrus.Fatal(err)
}
for _, version := range versions { for _, version := range versions {
parsedVersion, err := tagcmp.Parse(version) parsedVersion, err := tagcmp.Parse(version)
if err != nil { if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
parsedChosenUpgrade, err := tagcmp.Parse(chosenUpgrade) if parsedVersion.IsGreaterThan(parsedDeployedVersion) && parsedVersion.IsLessThan(parsedChosenUpgrade) {
if err != nil {
logrus.Fatal(err)
}
if !(parsedVersion.Equals(parsedDeployedVersion)) && parsedVersion.IsLessThan(parsedChosenUpgrade) {
note, err := internal.GetReleaseNotes(app.Recipe, version) note, err := internal.GetReleaseNotes(app.Recipe, version)
if err != nil { if err != nil {
return err return err
@ -209,6 +214,7 @@ recipes.
} }
} }
} }
}
if !internal.Chaos { if !internal.Chaos {
if err := recipePkg.EnsureVersion(app.Recipe, chosenUpgrade); err != nil { if err := recipePkg.EnsureVersion(app.Recipe, chosenUpgrade); err != nil {
@ -254,6 +260,23 @@ recipes.
config.SetChaosVersionLabel(compose, stackName, chosenUpgrade) config.SetChaosVersionLabel(compose, stackName, chosenUpgrade)
config.SetUpdateLabel(compose, stackName, app.Env) config.SetUpdateLabel(compose, stackName, app.Env)
envVars, err := config.CheckEnv(app)
if err != nil {
logrus.Fatal(err)
}
for _, envVar := range envVars {
if !envVar.Present {
logrus.Warnf("env var %s missing from %s.env, present in recipe .env.sample", envVar.Name, app.Domain)
}
}
if internal.ReleaseNotes {
fmt.Println()
fmt.Print(releaseNotes)
return nil
}
if err := internal.NewVersionOverview(app, deployedVersion, chosenUpgrade, releaseNotes); err != nil { if err := internal.NewVersionOverview(app, deployedVersion, chosenUpgrade, releaseNotes); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }

View File

@ -10,7 +10,7 @@ import (
"coopcloud.tech/abra/pkg/formatter" "coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/recipe" "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/stack" "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/docker/distribution/reference" "github.com/distribution/reference"
"github.com/olekukonko/tablewriter" "github.com/olekukonko/tablewriter"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/urfave/cli" "github.com/urfave/cli"

View File

@ -2,6 +2,7 @@ package app
import ( import (
"context" "context"
"log"
"coopcloud.tech/abra/cli/internal" "coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete" "coopcloud.tech/abra/pkg/autocomplete"
@ -131,12 +132,12 @@ Passing "--force/-f" will select all volumes for removal. Be careful.
} }
if len(volumesToRemove) > 0 { if len(volumesToRemove) > 0 {
err = client.RemoveVolumes(cl, context.Background(), app.Server, volumesToRemove, internal.Force) err := client.RemoveVolumes(cl, context.Background(), volumesToRemove, internal.Force, 5)
if err != nil { if err != nil {
logrus.Fatal(err) log.Fatalf("removing volumes failed: %s", err)
} }
logrus.Info("volumes removed successfully") logrus.Infof("%d volumes removed successfully", len(volumesToRemove))
} else { } else {
logrus.Info("no volumes removed") logrus.Info("no volumes removed")
} }

View File

@ -98,11 +98,6 @@ keys configured on your account.
continue continue
} }
if _, exists := catalogue.CatalogueSkipList[recipeMeta.Name]; exists {
catlBar.Add(1)
continue
}
versions, err := recipe.GetRecipeVersions(recipeMeta.Name, internal.Offline) versions, err := recipe.GetRecipeVersions(recipeMeta.Name, internal.Offline)
if err != nil { if err != nil {
logrus.Warn(err) logrus.Warn(err)
@ -173,7 +168,7 @@ keys configured on your account.
} }
msg := "chore: publish new catalogue release changes" msg := "chore: publish new catalogue release changes"
if err := gitPkg.Commit(cataloguePath, "**.json", msg, internal.Dry); err != nil { if err := gitPkg.Commit(cataloguePath, msg, internal.Dry); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }

View File

@ -14,8 +14,8 @@ import (
"coopcloud.tech/abra/cli/recipe" "coopcloud.tech/abra/cli/recipe"
"coopcloud.tech/abra/cli/server" "coopcloud.tech/abra/cli/server"
"coopcloud.tech/abra/pkg/autocomplete" "coopcloud.tech/abra/pkg/autocomplete"
cataloguePkg "coopcloud.tech/abra/pkg/catalogue"
"coopcloud.tech/abra/pkg/config" "coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/web" "coopcloud.tech/abra/pkg/web"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/urfave/cli" "github.com/urfave/cli"
@ -184,13 +184,9 @@ func newAbraApp(version, commit string) *cli.App {
} }
} }
if _, err := os.Stat(config.CATALOGUE_DIR); os.IsNotExist(err) { if err := cataloguePkg.EnsureCatalogue(); err != nil {
url := fmt.Sprintf("%s/%s.git", config.REPOS_BASE_URL, config.CATALOGUE_JSON_REPO_NAME)
logrus.Warnf("local recipe catalogue is missing, retrieving now")
if err := git.Clone(config.CATALOGUE_DIR, url); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
}
logrus.Debugf("abra version %s, commit %s", version, commit) logrus.Debugf("abra version %s, commit %s", version, commit)

View File

@ -1,35 +1,67 @@
package internal package internal
import ( import (
"strings" "context"
"coopcloud.tech/abra/pkg/config"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/service"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
) )
// SafeSplit splits up a string into a list of commands safely. // RetrieveBackupBotContainer gets the deployed backupbot container.
func SafeSplit(s string) []string { func RetrieveBackupBotContainer(cl *dockerClient.Client) (types.Container, error) {
split := strings.Split(s, " ") ctx := context.Background()
chosenService, err := service.GetServiceByLabel(ctx, cl, config.BackupbotLabel, NoInput)
var result []string if err != nil {
var inquote string return types.Container{}, err
var block string
for _, i := range split {
if inquote == "" {
if strings.HasPrefix(i, "'") || strings.HasPrefix(i, "\"") {
inquote = string(i[0])
block = strings.TrimPrefix(i, inquote) + " "
} else {
result = append(result, i)
}
} else {
if !strings.HasSuffix(i, inquote) {
block += i + " "
} else {
block += strings.TrimSuffix(i, inquote)
inquote = ""
result = append(result, block)
block = ""
}
}
} }
return result logrus.Debugf("retrieved %s as backup enabled service", chosenService.Spec.Name)
filters := filters.NewArgs()
filters.Add("name", chosenService.Spec.Name)
targetContainer, err := containerPkg.GetContainer(
ctx,
cl,
filters,
NoInput,
)
if err != nil {
return types.Container{}, err
}
return targetContainer, nil
}
// RunBackupCmdRemote runs a backup related command on a remote backupbot container.
func RunBackupCmdRemote(cl *dockerClient.Client, backupCmd string, containerID string, execEnv []string) error {
execBackupListOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: []string{"/usr/bin/backup", "--", backupCmd},
Detach: false,
Env: execEnv,
Tty: true,
}
logrus.Debugf("running backup %s on %s with exec config %v", backupCmd, containerID, execBackupListOpts)
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
if _, err := container.RunExec(dcli, cl, containerID, &execBackupListOpts); err != nil {
return err
}
return nil
} }

View File

@ -38,6 +38,20 @@ var PassRemoveFlag = &cli.BoolFlag{
Destination: &PassRemove, Destination: &PassRemove,
} }
var File bool
var FileFlag = &cli.BoolFlag{
Name: "file, f",
Usage: "Treat input as a file",
Destination: &File,
}
var Trim bool
var TrimFlag = &cli.BoolFlag{
Name: "trim, t",
Usage: "Trim input",
Destination: &Trim,
}
// Force force functionality without asking. // Force force functionality without asking.
var Force bool var Force bool
@ -54,7 +68,7 @@ var Chaos bool
// ChaosFlag turns on/off chaos functionality. // ChaosFlag turns on/off chaos functionality.
var ChaosFlag = &cli.BoolFlag{ var ChaosFlag = &cli.BoolFlag{
Name: "chaos, C", Name: "chaos, C",
Usage: "Deploy uncommitted recipes changes. Use with care!", Usage: "Proceed with uncommitted recipes changes. Use with care!",
Destination: &Chaos, Destination: &Chaos,
} }
@ -95,6 +109,16 @@ var OfflineFlag = &cli.BoolFlag{
Usage: "Prefer offline & filesystem access when possible", Usage: "Prefer offline & filesystem access when possible",
} }
// ReleaseNotes stores the variable from ReleaseNotesFlag.
var ReleaseNotes bool
// ReleaseNotesFlag turns on/off printing only release notes when upgrading.
var ReleaseNotesFlag = &cli.BoolFlag{
Name: "releasenotes, r",
Destination: &ReleaseNotes,
Usage: "Only show release notes",
}
// MachineReadable stores the variable from MachineReadableFlag // MachineReadable stores the variable from MachineReadableFlag
var MachineReadable bool var MachineReadable bool

View File

@ -60,7 +60,7 @@ func RunCmdRemote(cl *dockerClient.Client, app config.App, abraSh, serviceName,
Tty: false, Tty: false,
} }
if err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil { if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
logrus.Infof("%s does not exist for %s, use /bin/sh as fallback", shell, app.Name) logrus.Infof("%s does not exist for %s, use /bin/sh as fallback", shell, app.Name)
shell = "/bin/sh" shell = "/bin/sh"
} }
@ -85,7 +85,7 @@ func RunCmdRemote(cl *dockerClient.Client, app config.App, abraSh, serviceName,
execCreateOpts.Tty = false execCreateOpts.Tty = false
} }
if err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil { if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
return err return err
} }

View File

@ -6,7 +6,7 @@ import (
"coopcloud.tech/abra/pkg/formatter" "coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/recipe" "coopcloud.tech/abra/pkg/recipe"
"github.com/AlecAivazis/survey/v2" "github.com/AlecAivazis/survey/v2"
"github.com/docker/distribution/reference" "github.com/distribution/reference"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )

40
cli/recipe/diff.go Normal file
View File

@ -0,0 +1,40 @@
package recipe
import (
"path"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
gitPkg "coopcloud.tech/abra/pkg/git"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var recipeDiffCommand = cli.Command{
Name: "diff",
Usage: "Show unstaged changes in recipe config",
Description: "Due to limitations in our underlying Git dependency, this command requires /usr/bin/git.",
Aliases: []string{"d"},
ArgsUsage: "<recipe>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipeName := c.Args().First()
if recipeName != "" {
internal.ValidateRecipe(c)
}
recipeDir := path.Join(config.RECIPES_DIR, recipeName)
if err := gitPkg.DiffUnstaged(recipeDir); err != nil {
logrus.Fatal(err)
}
return nil
},
}

View File

@ -3,6 +3,7 @@ package recipe
import ( import (
"coopcloud.tech/abra/cli/internal" "coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete" "coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/recipe" "coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/urfave/cli" "github.com/urfave/cli"
@ -17,26 +18,31 @@ var recipeFetchCommand = cli.Command{
Flags: []cli.Flag{ Flags: []cli.Flag{
internal.DebugFlag, internal.DebugFlag,
internal.NoInputFlag, internal.NoInputFlag,
internal.OfflineFlag,
}, },
Before: internal.SubCommandBefore, Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete, BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error { Action: func(c *cli.Context) error {
recipeName := c.Args().First() recipeName := c.Args().First()
if recipeName != "" { if recipeName != "" {
internal.ValidateRecipe(c) internal.ValidateRecipe(c)
if err := recipe.Ensure(recipeName); err != nil {
logrus.Fatal(err)
}
return nil
} }
if err := recipe.EnsureExists(recipeName); err != nil { catalogue, err := recipe.ReadRecipeCatalogue(internal.Offline)
if err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
if err := recipe.EnsureUpToDate(recipeName); err != nil { catlBar := formatter.CreateProgressbar(len(catalogue), "fetching latest recipes...")
logrus.Fatal(err) for recipeName := range catalogue {
if err := recipe.Ensure(recipeName); err != nil {
logrus.Error(err)
} }
catlBar.Add(1)
if err := recipe.EnsureLatest(recipeName); err != nil {
logrus.Fatal(err)
} }
return nil return nil

View File

@ -68,7 +68,7 @@ var recipeLintCommand = cli.Command{
skippedOutput := "-" skippedOutput := "-"
if skipped { if skipped {
skippedOutput = "yes" skippedOutput = ""
} }
satisfied := false satisfied := false
@ -87,9 +87,9 @@ var recipeLintCommand = cli.Command{
} }
} }
satisfiedOutput := "yes" satisfiedOutput := ""
if !satisfied { if !satisfied {
satisfiedOutput = "NO" satisfiedOutput = ""
if skipped { if skipped {
satisfiedOutput = "-" satisfiedOutput = "-"
} }

View File

@ -30,5 +30,7 @@ manner. Abra supports convenient automation for recipe maintainenace, see the
recipeSyncCommand, recipeSyncCommand,
recipeUpgradeCommand, recipeUpgradeCommand,
recipeVersionCommand, recipeVersionCommand,
recipeResetCommand,
recipeDiffCommand,
}, },
} }

View File

@ -1,7 +1,9 @@
package recipe package recipe
import ( import (
"errors"
"fmt" "fmt"
"os"
"path" "path"
"strconv" "strconv"
"strings" "strings"
@ -15,7 +17,7 @@ import (
recipePkg "coopcloud.tech/abra/pkg/recipe" recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/tagcmp" "coopcloud.tech/tagcmp"
"github.com/AlecAivazis/survey/v2" "github.com/AlecAivazis/survey/v2"
"github.com/docker/distribution/reference" "github.com/distribution/reference"
"github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/urfave/cli" "github.com/urfave/cli"
@ -106,6 +108,18 @@ your SSH keys configured on your account.
} }
} }
isClean, err := gitPkg.IsClean(recipe.Dir())
if err != nil {
logrus.Fatal(err)
}
if !isClean {
logrus.Infof("%s currently has these unstaged changes 👇", recipe.Name)
if err := gitPkg.DiffUnstaged(recipe.Dir()); err != nil {
logrus.Fatal(err)
}
}
if len(tags) > 0 { if len(tags) > 0 {
logrus.Warnf("previous git tags detected, assuming this is a new semver release") logrus.Warnf("previous git tags detected, assuming this is a new semver release")
if err := createReleaseFromPreviousTag(tagString, mainAppVersion, recipe, tags); err != nil { if err := createReleaseFromPreviousTag(tagString, mainAppVersion, recipe, tags); err != nil {
@ -128,7 +142,7 @@ your SSH keys configured on your account.
// getImageVersions retrieves image versions for a recipe // getImageVersions retrieves image versions for a recipe
func getImageVersions(recipe recipe.Recipe) (map[string]string, error) { func getImageVersions(recipe recipe.Recipe) (map[string]string, error) {
var services = make(map[string]string) services := make(map[string]string)
missingTag := false missingTag := false
for _, service := range recipe.Config.Services { for _, service := range recipe.Config.Services {
@ -195,6 +209,10 @@ func createReleaseFromTag(recipe recipe.Recipe, tagString, mainAppVersion string
tagString = fmt.Sprintf("%s+%s", tag.String(), mainAppVersion) tagString = fmt.Sprintf("%s+%s", tag.String(), mainAppVersion)
} }
if err := addReleaseNotes(recipe, tagString); err != nil {
logrus.Fatal(err)
}
if err := commitRelease(recipe, tagString); err != nil { if err := commitRelease(recipe, tagString); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
@ -225,6 +243,82 @@ func getTagCreateOptions(tag string) (git.CreateTagOptions, error) {
return git.CreateTagOptions{Message: msg}, nil return git.CreateTagOptions{Message: msg}, nil
} }
// addReleaseNotes checks if the release/next release note exists and moves the
// file to release/<tag>.
func addReleaseNotes(recipe recipe.Recipe, tag string) error {
repoPath := path.Join(config.RECIPES_DIR, recipe.Name)
tagReleaseNotePath := path.Join(repoPath, "release", tag)
if _, err := os.Stat(tagReleaseNotePath); err == nil {
// Release note for current tag already exist exists.
return nil
} else if !errors.Is(err, os.ErrNotExist) {
return err
}
nextReleaseNotePath := path.Join(repoPath, "release", "next")
if _, err := os.Stat(nextReleaseNotePath); err == nil {
// release/next note exists. Move it to release/<tag>
if internal.Dry {
logrus.Debugf("dry run: move release note from 'next' to %s", tag)
return nil
}
if !internal.NoInput {
prompt := &survey.Input{
Message: "Use release note in release/next?",
}
var addReleaseNote bool
if err := survey.AskOne(prompt, &addReleaseNote); err != nil {
return err
}
if !addReleaseNote {
return nil
}
}
err := os.Rename(nextReleaseNotePath, tagReleaseNotePath)
if err != nil {
return err
}
err = gitPkg.Add(repoPath, path.Join("release", "next"), internal.Dry)
if err != nil {
return err
}
err = gitPkg.Add(repoPath, path.Join("release", tag), internal.Dry)
if err != nil {
return err
}
} else if !errors.Is(err, os.ErrNotExist) {
return err
}
// No release note exists for the current release.
if internal.NoInput {
return nil
}
prompt := &survey.Input{
Message: "Release Note (leave empty for no release note)",
}
var releaseNote string
if err := survey.AskOne(prompt, &releaseNote); err != nil {
return err
}
if releaseNote == "" {
return nil
}
err := os.WriteFile(tagReleaseNotePath, []byte(releaseNote), 0o644)
if err != nil {
return err
}
err = gitPkg.Add(repoPath, path.Join("release", tag), internal.Dry)
if err != nil {
return err
}
return nil
}
func commitRelease(recipe recipe.Recipe, tag string) error { func commitRelease(recipe recipe.Recipe, tag string) error {
if internal.Dry { if internal.Dry {
logrus.Debugf("dry run: no changes committed") logrus.Debugf("dry run: no changes committed")
@ -244,7 +338,7 @@ func commitRelease(recipe recipe.Recipe, tag string) error {
msg := fmt.Sprintf("chore: publish %s release", tag) msg := fmt.Sprintf("chore: publish %s release", tag)
repoPath := path.Join(config.RECIPES_DIR, recipe.Name) repoPath := path.Join(config.RECIPES_DIR, recipe.Name)
if err := gitPkg.Commit(repoPath, ".", msg, internal.Dry); err != nil { if err := gitPkg.Commit(repoPath, msg, internal.Dry); err != nil {
return err return err
} }
@ -392,6 +486,10 @@ func createReleaseFromPreviousTag(tagString, mainAppVersion string, recipe recip
} }
} }
if err := addReleaseNotes(recipe, tagString); err != nil {
logrus.Fatal(err)
}
if err := commitRelease(recipe, tagString); err != nil { if err := commitRelease(recipe, tagString); err != nil {
logrus.Fatalf("failed to commit changes: %s", err.Error()) logrus.Fatalf("failed to commit changes: %s", err.Error())
} }

56
cli/recipe/reset.go Normal file
View File

@ -0,0 +1,56 @@
package recipe
import (
"path"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
"github.com/go-git/go-git/v5"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var recipeResetCommand = cli.Command{
Name: "reset",
Usage: "Remove all unstaged changes from recipe config",
Description: "WARNING, this will delete your changes. Be Careful.",
Aliases: []string{"rs"},
ArgsUsage: "<recipe>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipeName := c.Args().First()
if recipeName != "" {
internal.ValidateRecipe(c)
}
repoPath := path.Join(config.RECIPES_DIR, recipeName)
repo, err := git.PlainOpen(repoPath)
if err != nil {
logrus.Fatal(err)
}
ref, err := repo.Head()
if err != nil {
logrus.Fatal(err)
}
worktree, err := repo.Worktree()
if err != nil {
logrus.Fatal(err)
}
opts := &git.ResetOptions{Commit: ref.Hash(), Mode: git.HardReset}
if err := worktree.Reset(opts); err != nil {
logrus.Fatal(err)
}
return nil
},
}

View File

@ -8,6 +8,7 @@ import (
"coopcloud.tech/abra/cli/internal" "coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete" "coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config" "coopcloud.tech/abra/pkg/config"
gitPkg "coopcloud.tech/abra/pkg/git"
"coopcloud.tech/tagcmp" "coopcloud.tech/tagcmp"
"github.com/AlecAivazis/survey/v2" "github.com/AlecAivazis/survey/v2"
"github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5"
@ -198,6 +199,17 @@ likely to change.
logrus.Infof("dry run: not syncing label %s for recipe %s", nextTag, recipe.Name) logrus.Infof("dry run: not syncing label %s for recipe %s", nextTag, recipe.Name)
} }
isClean, err := gitPkg.IsClean(recipe.Dir())
if err != nil {
logrus.Fatal(err)
}
if !isClean {
logrus.Infof("%s currently has these unstaged changes 👇", recipe.Name)
if err := gitPkg.DiffUnstaged(recipe.Dir()); err != nil {
logrus.Fatal(err)
}
}
return nil return nil
}, },
} }

View File

@ -14,10 +14,11 @@ import (
"coopcloud.tech/abra/pkg/client" "coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config" "coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter" "coopcloud.tech/abra/pkg/formatter"
gitPkg "coopcloud.tech/abra/pkg/git"
recipePkg "coopcloud.tech/abra/pkg/recipe" recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/tagcmp" "coopcloud.tech/tagcmp"
"github.com/AlecAivazis/survey/v2" "github.com/AlecAivazis/survey/v2"
"github.com/docker/distribution/reference" "github.com/distribution/reference"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/urfave/cli" "github.com/urfave/cli"
) )
@ -326,6 +327,7 @@ You may invoke this command in "wizard" mode and be prompted for input:
} }
fmt.Println(string(jsonstring)) fmt.Println(string(jsonstring))
return nil return nil
} }
@ -336,6 +338,18 @@ You may invoke this command in "wizard" mode and be prompted for input:
} }
} }
} }
isClean, err := gitPkg.IsClean(recipeDir)
if err != nil {
logrus.Fatal(err)
}
if !isClean {
logrus.Infof("%s currently has these unstaged changes 👇", recipe.Name)
if err := gitPkg.DiffUnstaged(recipeDir); err != nil {
logrus.Fatal(err)
}
}
return nil return nil
}, },
} }

View File

@ -54,8 +54,9 @@ var recipeVersionCommand = cli.Command{
logrus.Fatalf("%s has no catalogue published versions?", recipe.Name) logrus.Fatalf("%s has no catalogue published versions?", recipe.Name)
} }
for i := len(recipeMeta.Versions) - 1; i >= 0; i-- {
tableCols := []string{"version", "service", "image", "tag"} tableCols := []string{"version", "service", "image", "tag"}
aggregated_table := formatter.CreateTable(tableCols)
for i := len(recipeMeta.Versions) - 1; i >= 0; i-- {
table := formatter.CreateTable(tableCols) table := formatter.CreateTable(tableCols)
for version, meta := range recipeMeta.Versions[i] { for version, meta := range recipeMeta.Versions[i] {
var versions [][]string var versions [][]string
@ -67,11 +68,10 @@ var recipeVersionCommand = cli.Command{
for _, version := range versions { for _, version := range versions {
table.Append(version) table.Append(version)
aggregated_table.Append(version)
} }
if internal.MachineReadable { if !internal.MachineReadable {
table.JSONRender()
} else {
table.SetAutoMergeCellsByColumnIndex([]int{0}) table.SetAutoMergeCellsByColumnIndex([]int{0})
table.SetAlignment(tablewriter.ALIGN_LEFT) table.SetAlignment(tablewriter.ALIGN_LEFT)
table.Render() table.Render()
@ -79,6 +79,9 @@ var recipeVersionCommand = cli.Command{
} }
} }
} }
if internal.MachineReadable {
aggregated_table.JSONRender()
}
return nil return nil
}, },

View File

@ -53,7 +53,7 @@ func cleanUp(domainName string) {
// Docker manages SSH connection details. These are stored to disk in // Docker manages SSH connection details. These are stored to disk in
// ~/.docker. Abra can manage this completely for the user, so it's an // ~/.docker. Abra can manage this completely for the user, so it's an
// implementation detail. // implementation detail.
func newContext(c *cli.Context, domainName, username, port string) error { func newContext(c *cli.Context, domainName string) error {
store := contextPkg.NewDefaultDockerContextStore() store := contextPkg.NewDefaultDockerContextStore()
contexts, err := store.Store.List() contexts, err := store.Store.List()
if err != nil { if err != nil {
@ -67,9 +67,9 @@ func newContext(c *cli.Context, domainName, username, port string) error {
} }
} }
logrus.Debugf("creating context with domain %s, username %s and port %s", domainName, username, port) logrus.Debugf("creating context with domain %s", domainName)
if err := client.CreateContext(domainName, username, port); err != nil { if err := client.CreateContext(domainName); err != nil {
return err return err
} }
@ -158,12 +158,7 @@ developer machine.
logrus.Fatal(err) logrus.Fatal(err)
} }
hostConfig, err := sshPkg.GetHostConfig(domainName) if err := newContext(c, domainName); err != nil {
if err != nil {
logrus.Fatal(err)
}
if err := newContext(c, domainName, hostConfig.User, hostConfig.Port); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }

138
go.mod
View File

@ -1,119 +1,133 @@
module coopcloud.tech/abra module coopcloud.tech/abra
go 1.18 go 1.21
require ( require (
coopcloud.tech/tagcmp v0.0.0-20211103052201-885b22f77d52 coopcloud.tech/tagcmp v0.0.0-20230809071031-eb3e7758d4eb
git.coopcloud.tech/coop-cloud/godotenv v1.5.2-0.20231130100509-01bff8284355
github.com/AlecAivazis/survey/v2 v2.3.7 github.com/AlecAivazis/survey/v2 v2.3.7
github.com/Autonomic-Cooperative/godotenv v1.3.1-0.20210731094149-b031ea1211e7
github.com/Gurpartap/logrus-stack v0.0.0-20170710170904-89c00d8a28f4 github.com/Gurpartap/logrus-stack v0.0.0-20170710170904-89c00d8a28f4
github.com/docker/cli v24.0.6+incompatible github.com/distribution/distribution v2.8.3+incompatible
github.com/docker/distribution v2.8.2+incompatible github.com/docker/cli v26.1.4+incompatible
github.com/docker/docker v24.0.6+incompatible github.com/docker/docker v26.1.4+incompatible
github.com/docker/go-units v0.5.0 github.com/docker/go-units v0.5.0
github.com/go-git/go-git/v5 v5.9.0 github.com/go-git/go-git/v5 v5.12.0
github.com/google/go-cmp v0.6.0
github.com/moby/sys/signal v0.7.0 github.com/moby/sys/signal v0.7.0
github.com/moby/term v0.5.0 github.com/moby/term v0.5.0
github.com/olekukonko/tablewriter v0.0.5 github.com/olekukonko/tablewriter v0.0.5
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/schollz/progressbar/v3 v3.13.1 github.com/schollz/progressbar/v3 v3.14.4
github.com/sirupsen/logrus v1.9.3 github.com/sirupsen/logrus v1.9.3
gotest.tools/v3 v3.5.1 gotest.tools/v3 v3.5.1
) )
require ( require (
dario.cat/mergo v1.0.0 // indirect dario.cat/mergo v1.0.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/BurntSushi/toml v1.0.0 // indirect github.com/BurntSushi/toml v1.4.0 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/Microsoft/hcsshim v0.9.2 // indirect github.com/ProtonMail/go-crypto v1.0.0 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
github.com/acomagu/bufpipe v1.0.4 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cloudflare/circl v1.3.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/cloudflare/circl v1.3.9 // indirect
github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/containerd/log v0.1.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/cyphar/filepath-securejoin v0.2.5 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/emirpasic/gods v1.18.1 // indirect github.com/emirpasic/gods v1.18.1 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/ghodss/yaml v1.0.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.5.0 // indirect github.com/go-git/go-billy/v5 v5.5.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/klauspost/compress v1.14.2 // indirect github.com/klauspost/compress v1.17.9 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/miekg/pkcs11 v1.0.3 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/sys/user v0.1.0 // indirect
github.com/morikuni/aec v1.0.0 // indirect github.com/morikuni/aec v1.0.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/runc v1.1.0 // indirect github.com/opencontainers/runc v1.1.13 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/common v0.54.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect
github.com/rivo/uniseg v0.2.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/skeema/knownhosts v1.2.0 // indirect github.com/skeema/knownhosts v1.2.2 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect
golang.org/x/crypto v0.13.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect
golang.org/x/mod v0.12.0 // indirect go.opentelemetry.io/otel v1.27.0 // indirect
golang.org/x/net v0.15.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 // indirect
golang.org/x/sync v0.3.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect
golang.org/x/term v0.12.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
golang.org/x/text v0.13.0 // indirect go.opentelemetry.io/otel/metric v1.27.0 // indirect
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect go.opentelemetry.io/otel/sdk v1.27.0 // indirect
golang.org/x/tools v0.13.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect go.opentelemetry.io/otel/trace v1.27.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/term v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 // indirect
google.golang.org/grpc v1.64.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
) )
require ( require (
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
github.com/buger/goterm v1.0.4 github.com/buger/goterm v1.0.4
github.com/containerd/containerd v1.5.9 // indirect github.com/containerd/containerd v1.7.18 // indirect
github.com/containers/image v3.0.2+incompatible github.com/containers/image v3.0.2+incompatible
github.com/containers/storage v1.38.2 // indirect github.com/containers/storage v1.38.2 // indirect
github.com/decentral1se/passgen v1.0.1 github.com/decentral1se/passgen v1.0.1
github.com/docker/docker-credential-helpers v0.6.4 // indirect github.com/docker/docker-credential-helpers v0.8.2 // indirect
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/fvbommel/sortorder v1.0.2 // indirect github.com/fvbommel/sortorder v1.1.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/gorilla/mux v1.8.0 // indirect github.com/gorilla/mux v1.8.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.4 github.com/hashicorp/go-retryablehttp v0.7.7
github.com/klauspost/pgzip v1.2.6 github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/patternmatcher v0.5.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_golang v1.19.1 // indirect
github.com/sergi/go-diff v1.2.0 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
github.com/spf13/cobra v1.3.0 // indirect github.com/spf13/cobra v1.8.1 // indirect
github.com/stretchr/testify v1.8.4 github.com/stretchr/testify v1.9.0
github.com/theupdateframework/notary v0.7.0 // indirect github.com/theupdateframework/notary v0.7.0 // indirect
github.com/urfave/cli v1.22.9 github.com/urfave/cli v1.22.15
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
golang.org/x/sys v0.12.0 golang.org/x/sys v0.21.0
) )

642
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -25,6 +25,16 @@ func AppNameComplete(c *cli.Context) {
} }
} }
func ServiceNameComplete(appName string) {
serviceNames, err := config.GetAppServiceNames(appName)
if err != nil {
return
}
for _, s := range serviceNames {
fmt.Println(s)
}
}
// RecipeNameComplete completes recipe names. // RecipeNameComplete completes recipe names.
func RecipeNameComplete(c *cli.Context) { func RecipeNameComplete(c *cli.Context) {
catl, err := recipe.ReadRecipeCatalogue(false) catl, err := recipe.ReadRecipeCatalogue(false)
@ -41,6 +51,20 @@ func RecipeNameComplete(c *cli.Context) {
} }
} }
// RecipeVersionComplete completes versions for the recipe.
func RecipeVersionComplete(recipeName string) {
catl, err := recipe.ReadRecipeCatalogue(false)
if err != nil {
logrus.Warn(err)
}
for _, v := range catl[recipeName].Versions {
for v2 := range v {
fmt.Println(v2)
}
}
}
// ServerNameComplete completes server names. // ServerNameComplete completes server names.
func ServerNameComplete(c *cli.Context) { func ServerNameComplete(c *cli.Context) {
files, err := config.LoadAppFiles("") files, err := config.LoadAppFiles("")

View File

@ -12,50 +12,11 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
// CatalogueSkipList is all the repos that are not recipes.
var CatalogueSkipList = map[string]bool{
"abra": true,
"abra-apps": true,
"abra-aur": true,
"abra-bash": true,
"abra-capsul": true,
"abra-gandi": true,
"abra-hetzner": true,
"abra-integration-test-recipe": true,
"apps": true,
"aur-abra-git": true,
"auto-mirror": true,
"auto-recipes-catalogue-json": true,
"backup-bot": true,
"backup-bot-two": true,
"beta.coopcloud.tech": true,
"comrade-renovate-bot": true,
"coopcloud.tech": true,
"coturn": true,
"docker-cp-deploy": true,
"docker-dind-bats-kcov": true,
"docs.coopcloud.tech": true,
"drone-abra": true,
"example": true,
"gardening": true,
"go-abra": true,
"organising": true,
"pyabra": true,
"radicle-seed-node": true,
"recipes-catalogue-json": true,
"recipes-wishlist": true,
"recipes.coopcloud.tech": true,
"stack-ssh-deploy": true,
"swarm-cronjob": true,
"tagcmp": true,
"traefik-cert-dumper": true,
"tyop": true,
}
// EnsureCatalogue ensures that the catalogue is cloned locally & present. // EnsureCatalogue ensures that the catalogue is cloned locally & present.
func EnsureCatalogue() error { func EnsureCatalogue() error {
catalogueDir := path.Join(config.ABRA_DIR, "catalogue") catalogueDir := path.Join(config.ABRA_DIR, "catalogue")
if _, err := os.Stat(catalogueDir); err != nil && os.IsNotExist(err) { if _, err := os.Stat(catalogueDir); err != nil && os.IsNotExist(err) {
logrus.Warnf("local recipe catalogue is missing, retrieving now")
url := fmt.Sprintf("%s/%s.git", config.REPOS_BASE_URL, config.CATALOGUE_JSON_REPO_NAME) url := fmt.Sprintf("%s/%s.git", config.REPOS_BASE_URL, config.CATALOGUE_JSON_REPO_NAME)
if err := gitPkg.Clone(catalogueDir, url); err != nil { if err := gitPkg.Clone(catalogueDir, url); err != nil {
return err return err

View File

@ -14,19 +14,16 @@ import (
type Context = contextStore.Metadata type Context = contextStore.Metadata
func CreateContext(contextName string, user string, port string) error { // CreateContext creates a new Docker context.
host := contextName func CreateContext(contextName string) error {
if user != "" { host := fmt.Sprintf("ssh://%s", contextName)
host = fmt.Sprintf("%s@%s", user, host)
}
if port != "" {
host = fmt.Sprintf("%s:%s", host, port)
}
host = fmt.Sprintf("ssh://%s", host)
if err := createContext(contextName, host); err != nil { if err := createContext(contextName, host); err != nil {
return err return err
} }
logrus.Debugf("created the %s context", contextName) logrus.Debugf("created the %s context", contextName)
return nil return nil
} }

View File

@ -6,7 +6,7 @@ import (
"github.com/containers/image/docker" "github.com/containers/image/docker"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/reference" "github.com/distribution/reference"
) )
// GetRegistryTags retrieves all tags of an image from a container registry. // GetRegistryTags retrieves all tags of an image from a container registry.

View File

@ -2,15 +2,17 @@ package client
import ( import (
"context" "context"
"fmt"
"time"
"github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/volume" "github.com/docker/docker/api/types/volume"
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
) )
func GetVolumes(cl *client.Client, ctx context.Context, server string, fs filters.Args) ([]*volume.Volume, error) { func GetVolumes(cl *client.Client, ctx context.Context, server string, fs filters.Args) ([]*volume.Volume, error) {
volumeListOptions := volume.ListOptions{fs} volumeListOKBody, err := cl.VolumeList(ctx, volume.ListOptions{Filters: fs})
volumeListOKBody, err := cl.VolumeList(ctx, volumeListOptions)
volumeList := volumeListOKBody.Volumes volumeList := volumeListOKBody.Volumes
if err != nil { if err != nil {
return volumeList, err return volumeList, err
@ -29,13 +31,32 @@ func GetVolumeNames(volumes []*volume.Volume) []string {
return volumeNames return volumeNames
} }
func RemoveVolumes(cl *client.Client, ctx context.Context, server string, volumeNames []string, force bool) error { func RemoveVolumes(cl *client.Client, ctx context.Context, volumeNames []string, force bool, retries int) error {
for _, volName := range volumeNames { for _, volName := range volumeNames {
err := cl.VolumeRemove(ctx, volName, force) err := retryFunc(5, func() error {
return cl.VolumeRemove(context.Background(), volName, force)
})
if err != nil { if err != nil {
return err return fmt.Errorf("volume %s: %s", volName, err)
} }
} }
return nil return nil
} }
// retryFunc retries the given function for the given retries. After the nth
// retry it waits (n + 1)^2 seconds before the next retry (starting with n=0).
// It returns an error if the function still failed after the last retry.
func retryFunc(retries int, fn func() error) error {
for i := 0; i < retries; i++ {
err := fn()
if err == nil {
return nil
}
if i+1 < retries {
sleep := time.Duration(i+1) * time.Duration(i+1)
logrus.Infof("%s: waiting %d seconds before next retry", err, sleep)
time.Sleep(sleep * time.Second)
}
}
return fmt.Errorf("%d retries failed", retries)
}

View File

@ -0,0 +1,26 @@
package client
import (
"fmt"
"testing"
)
func TestRetryFunc(t *testing.T) {
err := retryFunc(1, func() error { return nil })
if err != nil {
t.Errorf("should not return an error: %s", err)
}
i := 0
fn := func() error {
i++
return fmt.Errorf("oh no, something went wrong!")
}
err = retryFunc(2, fn)
if err == nil {
t.Error("should return an error")
}
if i != 2 {
t.Errorf("The function should have been called 1 times, got %d", i)
}
}

View File

@ -11,8 +11,8 @@ import (
"coopcloud.tech/abra/pkg/formatter" "coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/upstream/stack" "coopcloud.tech/abra/pkg/upstream/stack"
loader "coopcloud.tech/abra/pkg/upstream/stack" loader "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/distribution/reference"
composetypes "github.com/docker/cli/cli/compose/types" composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/distribution/reference"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )

View File

@ -25,6 +25,9 @@ import (
// AppEnv is a map of the values in an apps env config // AppEnv is a map of the values in an apps env config
type AppEnv = map[string]string type AppEnv = map[string]string
// AppModifiers is a map of modifiers in an apps env config
type AppModifiers = map[string]map[string]string
// AppName is AppName // AppName is AppName
type AppName = string type AppName = string
@ -47,34 +50,61 @@ type App struct {
Path string Path string
} }
// StackName gets whatever the docker safe (uses the right delimiting // See documentation of config.StackName
// character, e.g. "_") stack name is for the app. In general, you don't want
// to use this to show anything to end-users, you want use a.Name instead.
func (a App) StackName() string { func (a App) StackName() string {
if _, exists := a.Env["STACK_NAME"]; exists { if _, exists := a.Env["STACK_NAME"]; exists {
return a.Env["STACK_NAME"] return a.Env["STACK_NAME"]
} }
stackName := SanitiseAppName(a.Name) stackName := StackName(a.Name)
if len(stackName) > 45 {
logrus.Debugf("trimming %s to %s to avoid runtime limits", stackName, stackName[:45])
stackName = stackName[:45]
}
a.Env["STACK_NAME"] = stackName a.Env["STACK_NAME"] = stackName
return stackName return stackName
} }
// Filters retrieves exact app filters for querying the container runtime. Due // StackName gets whatever the docker safe (uses the right delimiting
// to upstream issues, filtering works different depending on what you're // character, e.g. "_") stack name is for the app. In general, you don't want
// to use this to show anything to end-users, you want use a.Name instead.
func StackName(appName string) string {
stackName := SanitiseAppName(appName)
if len(stackName) > MAX_SANITISED_APP_NAME_LENGTH {
logrus.Debugf("trimming %s to %s to avoid runtime limits", stackName, stackName[:MAX_SANITISED_APP_NAME_LENGTH])
stackName = stackName[:MAX_SANITISED_APP_NAME_LENGTH]
}
return stackName
}
// Filters retrieves app filters for querying the container runtime. By default
// it filters on all services in the app. It is also possible to pass an
// otional list of service names, which get filtered instead.
//
// Due to upstream issues, filtering works different depending on what you're
// querying. So, for example, secrets don't work with regex! The caller needs // querying. So, for example, secrets don't work with regex! The caller needs
// to implement their own validation that the right secrets are matched. In // to implement their own validation that the right secrets are matched. In
// order to handle these cases, we provide the `appendServiceNames` / // order to handle these cases, we provide the `appendServiceNames` /
// `exactMatch` modifiers. // `exactMatch` modifiers.
func (a App) Filters(appendServiceNames, exactMatch bool) (filters.Args, error) { func (a App) Filters(appendServiceNames, exactMatch bool, services ...string) (filters.Args, error) {
filters := filters.NewArgs() filters := filters.NewArgs()
if len(services) > 0 {
for _, serviceName := range services {
filters.Add("name", ServiceFilter(a.StackName(), serviceName, exactMatch))
}
return filters, nil
}
// When not appending the service name, just add one filter for the whole
// stack.
if !appendServiceNames {
f := fmt.Sprintf("%s", a.StackName())
if exactMatch {
f = fmt.Sprintf("^%s", f)
}
filters.Add("name", f)
return filters, nil
}
composeFiles, err := GetComposeFiles(a.Recipe, a.Env) composeFiles, err := GetComposeFiles(a.Recipe, a.Env)
if err != nil { if err != nil {
@ -88,28 +118,23 @@ func (a App) Filters(appendServiceNames, exactMatch bool) (filters.Args, error)
} }
for _, service := range compose.Services { for _, service := range compose.Services {
var filter string f := ServiceFilter(a.StackName(), service.Name, exactMatch)
filters.Add("name", f)
if appendServiceNames {
if exactMatch {
filter = fmt.Sprintf("^%s_%s", a.StackName(), service.Name)
} else {
filter = fmt.Sprintf("%s_%s", a.StackName(), service.Name)
}
} else {
if exactMatch {
filter = fmt.Sprintf("^%s", a.StackName())
} else {
filter = fmt.Sprintf("%s", a.StackName())
}
}
filters.Add("name", filter)
} }
return filters, nil return filters, nil
} }
// ServiceFilter creates a filter string for filtering a service in the docker
// container runtime. When exact match is true, it uses regex to match the
// string exactly.
func ServiceFilter(stack, service string, exact bool) string {
if exact {
return fmt.Sprintf("^%s_%s", stack, service)
}
return fmt.Sprintf("%s_%s", stack, service)
}
// ByServer sort a slice of Apps // ByServer sort a slice of Apps
type ByServer []App type ByServer []App
@ -149,7 +174,7 @@ func (a ByName) Less(i, j int) bool {
return strings.ToLower(a[i].Name) < strings.ToLower(a[j].Name) return strings.ToLower(a[i].Name) < strings.ToLower(a[j].Name)
} }
func readAppEnvFile(appFile AppFile, name AppName) (App, error) { func ReadAppEnvFile(appFile AppFile, name AppName) (App, error) {
env, err := ReadEnv(appFile.Path) env, err := ReadEnv(appFile.Path)
if err != nil { if err != nil {
return App{}, fmt.Errorf("env file for %s couldn't be read: %s", name, err.Error()) return App{}, fmt.Errorf("env file for %s couldn't be read: %s", name, err.Error())
@ -157,7 +182,7 @@ func readAppEnvFile(appFile AppFile, name AppName) (App, error) {
logrus.Debugf("read env %s from %s", env, appFile.Path) logrus.Debugf("read env %s from %s", env, appFile.Path)
app, err := newApp(env, name, appFile) app, err := NewApp(env, name, appFile)
if err != nil { if err != nil {
return App{}, fmt.Errorf("env file for %s has issues: %s", name, err.Error()) return App{}, fmt.Errorf("env file for %s has issues: %s", name, err.Error())
} }
@ -165,8 +190,8 @@ func readAppEnvFile(appFile AppFile, name AppName) (App, error) {
return app, nil return app, nil
} }
// newApp creates new App object // NewApp creates new App object
func newApp(env AppEnv, name string, appFile AppFile) (App, error) { func NewApp(env AppEnv, name string, appFile AppFile) (App, error) {
domain := env["DOMAIN"] domain := env["DOMAIN"]
recipe, exists := env["RECIPE"] recipe, exists := env["RECIPE"]
@ -232,7 +257,7 @@ func GetApp(apps AppFiles, name AppName) (App, error) {
return App{}, fmt.Errorf("cannot find app with name %s", name) return App{}, fmt.Errorf("cannot find app with name %s", name)
} }
app, err := readAppEnvFile(appFile, name) app, err := ReadAppEnvFile(appFile, name)
if err != nil { if err != nil {
return App{}, err return App{}, err
} }
@ -330,7 +355,7 @@ func TemplateAppEnvSample(recipeName, appName, server, domain string) error {
return fmt.Errorf("%s already exists?", appEnvPath) return fmt.Errorf("%s already exists?", appEnvPath)
} }
err = ioutil.WriteFile(appEnvPath, envSample, 0664) err = ioutil.WriteFile(appEnvPath, envSample, 0o664)
if err != nil { if err != nil {
return err return err
} }
@ -437,27 +462,56 @@ func GetAppStatuses(apps []App, MachineReadable bool) (map[string]map[string]str
return statuses, nil return statuses, nil
} }
// ensurePathExists ensures that a path exists.
func ensurePathExists(path string) error {
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
return err
}
return nil
}
// GetComposeFiles gets the list of compose files for an app (or recipe if you // GetComposeFiles gets the list of compose files for an app (or recipe if you
// don't already have an app) which should be merged into a composetypes.Config // don't already have an app) which should be merged into a composetypes.Config
// while respecting the COMPOSE_FILE env var. // while respecting the COMPOSE_FILE env var.
func GetComposeFiles(recipe string, appEnv AppEnv) ([]string, error) { func GetComposeFiles(recipe string, appEnv AppEnv) ([]string, error) {
var composeFiles []string var composeFiles []string
if _, ok := appEnv["COMPOSE_FILE"]; !ok { composeFileEnvVar, ok := appEnv["COMPOSE_FILE"]
logrus.Debug("no COMPOSE_FILE detected, loading compose.yml") if !ok {
path := fmt.Sprintf("%s/%s/compose.yml", RECIPES_DIR, recipe) path := fmt.Sprintf("%s/%s/compose.yml", RECIPES_DIR, recipe)
if err := ensurePathExists(path); err != nil {
return composeFiles, err
}
logrus.Debugf("no COMPOSE_FILE detected, loading default: %s", path)
composeFiles = append(composeFiles, path) composeFiles = append(composeFiles, path)
return composeFiles, nil return composeFiles, nil
} }
composeFileEnvVar := appEnv["COMPOSE_FILE"] if !strings.Contains(composeFileEnvVar, ":") {
envVars := strings.Split(composeFileEnvVar, ":") path := fmt.Sprintf("%s/%s/%s", RECIPES_DIR, recipe, composeFileEnvVar)
logrus.Debugf("COMPOSE_FILE detected (%s), loading %s", composeFileEnvVar, strings.Join(envVars, ", ")) if err := ensurePathExists(path); err != nil {
for _, file := range strings.Split(composeFileEnvVar, ":") { return composeFiles, err
}
logrus.Debugf("COMPOSE_FILE detected, loading %s", path)
composeFiles = append(composeFiles, path)
return composeFiles, nil
}
numComposeFiles := strings.Count(composeFileEnvVar, ":") + 1
envVars := strings.SplitN(composeFileEnvVar, ":", numComposeFiles)
if len(envVars) != numComposeFiles {
return composeFiles, fmt.Errorf("COMPOSE_FILE (=\"%s\") parsing failed?", composeFileEnvVar)
}
for _, file := range envVars {
path := fmt.Sprintf("%s/%s/%s", RECIPES_DIR, recipe, file) path := fmt.Sprintf("%s/%s/%s", RECIPES_DIR, recipe, file)
if err := ensurePathExists(path); err != nil {
return composeFiles, err
}
composeFiles = append(composeFiles, path) composeFiles = append(composeFiles, path)
} }
logrus.Debugf("COMPOSE_FILE detected (%s), loading %s", composeFileEnvVar, strings.Join(envVars, ", "))
logrus.Debugf("retrieved %s configs for %s", strings.Join(composeFiles, ", "), recipe) logrus.Debugf("retrieved %s configs for %s", strings.Join(composeFiles, ", "), recipe)
return composeFiles, nil return composeFiles, nil
@ -563,7 +617,7 @@ func GetLabel(compose *composetypes.Config, stackName string, label string) stri
// GetTimeoutFromLabel reads the timeout value from docker label "coop-cloud.${STACK_NAME}.TIMEOUT" and returns 50 as default value // GetTimeoutFromLabel reads the timeout value from docker label "coop-cloud.${STACK_NAME}.TIMEOUT" and returns 50 as default value
func GetTimeoutFromLabel(compose *composetypes.Config, stackName string) (int, error) { func GetTimeoutFromLabel(compose *composetypes.Config, stackName string) (int, error) {
var timeout = 50 // Default Timeout timeout := 50 // Default Timeout
var err error = nil var err error = nil
if timeoutLabel := GetLabel(compose, stackName, "timeout"); timeoutLabel != "" { if timeoutLabel := GetLabel(compose, stackName, "timeout"); timeoutLabel != "" {
logrus.Debugf("timeout label: %s", timeoutLabel) logrus.Debugf("timeout label: %s", timeoutLabel)

View File

@ -1,36 +1,197 @@
package config package config_test
import ( import (
"encoding/json"
"fmt"
"reflect" "reflect"
"testing" "testing"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/recipe"
"github.com/docker/docker/api/types/filters"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
) )
func TestNewApp(t *testing.T) { func TestNewApp(t *testing.T) {
app, err := newApp(expectedAppEnv, appName, expectedAppFile) app, err := config.NewApp(ExpectedAppEnv, AppName, ExpectedAppFile)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if !reflect.DeepEqual(app, expectedApp) { if !reflect.DeepEqual(app, ExpectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, expectedApp) t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, ExpectedApp)
} }
} }
func TestReadAppEnvFile(t *testing.T) { func TestReadAppEnvFile(t *testing.T) {
app, err := readAppEnvFile(expectedAppFile, appName) app, err := config.ReadAppEnvFile(ExpectedAppFile, AppName)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if !reflect.DeepEqual(app, expectedApp) { if !reflect.DeepEqual(app, ExpectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, expectedApp) t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, ExpectedApp)
} }
} }
func TestGetApp(t *testing.T) { func TestGetApp(t *testing.T) {
app, err := GetApp(expectedAppFiles, appName) app, err := config.GetApp(ExpectedAppFiles, AppName)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if !reflect.DeepEqual(app, expectedApp) { if !reflect.DeepEqual(app, ExpectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, expectedApp) t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, ExpectedApp)
}
}
func TestGetComposeFiles(t *testing.T) {
offline := true
r, err := recipe.Get("abra-test-recipe", offline)
if err != nil {
t.Fatal(err)
}
tests := []struct {
appEnv map[string]string
composeFiles []string
}{
{
map[string]string{},
[]string{
fmt.Sprintf("%s/%s/compose.yml", config.RECIPES_DIR, r.Name),
},
},
{
map[string]string{"COMPOSE_FILE": "compose.yml"},
[]string{
fmt.Sprintf("%s/%s/compose.yml", config.RECIPES_DIR, r.Name),
},
},
{
map[string]string{"COMPOSE_FILE": "compose.extra_secret.yml"},
[]string{
fmt.Sprintf("%s/%s/compose.extra_secret.yml", config.RECIPES_DIR, r.Name),
},
},
{
map[string]string{"COMPOSE_FILE": "compose.yml:compose.extra_secret.yml"},
[]string{
fmt.Sprintf("%s/%s/compose.yml", config.RECIPES_DIR, r.Name),
fmt.Sprintf("%s/%s/compose.extra_secret.yml", config.RECIPES_DIR, r.Name),
},
},
}
for _, test := range tests {
composeFiles, err := config.GetComposeFiles(r.Name, test.appEnv)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, composeFiles, test.composeFiles)
}
}
func TestGetComposeFilesError(t *testing.T) {
offline := true
r, err := recipe.Get("abra-test-recipe", offline)
if err != nil {
t.Fatal(err)
}
tests := []struct{ appEnv map[string]string }{
{map[string]string{"COMPOSE_FILE": "compose.yml::compose.foo.yml"}},
{map[string]string{"COMPOSE_FILE": "doesnt.exist.yml"}},
}
for _, test := range tests {
_, err := config.GetComposeFiles(r.Name, test.appEnv)
if err == nil {
t.Fatalf("should have failed: %v", test.appEnv)
}
}
}
func TestFilters(t *testing.T) {
oldDir := config.RECIPES_DIR
config.RECIPES_DIR = "./testdir"
defer func() {
config.RECIPES_DIR = oldDir
}()
app, err := config.NewApp(config.AppEnv{
"DOMAIN": "test.example.com",
"RECIPE": "test-recipe",
}, "test_example_com", config.AppFile{
Path: "./testdir/filtertest.end",
Server: "local",
})
if err != nil {
t.Fatal(err)
}
f, err := app.Filters(false, false)
if err != nil {
t.Error(err)
}
compareFilter(t, f, map[string]map[string]bool{
"name": {
"test_example_com": true,
},
})
f2, err := app.Filters(false, true)
if err != nil {
t.Error(err)
}
compareFilter(t, f2, map[string]map[string]bool{
"name": {
"^test_example_com": true,
},
})
f3, err := app.Filters(true, false)
if err != nil {
t.Error(err)
}
compareFilter(t, f3, map[string]map[string]bool{
"name": {
"test_example_com_bar": true,
"test_example_com_foo": true,
},
})
f4, err := app.Filters(true, true)
if err != nil {
t.Error(err)
}
compareFilter(t, f4, map[string]map[string]bool{
"name": {
"^test_example_com_bar": true,
"^test_example_com_foo": true,
},
})
f5, err := app.Filters(false, false, "foo")
if err != nil {
t.Error(err)
}
compareFilter(t, f5, map[string]map[string]bool{
"name": {
"test_example_com_foo": true,
},
})
}
func compareFilter(t *testing.T, f1 filters.Args, f2 map[string]map[string]bool) {
t.Helper()
j1, err := f1.MarshalJSON()
if err != nil {
t.Error(err)
}
j2, err := json.Marshal(f2)
if err != nil {
t.Error(err)
}
if diff := cmp.Diff(string(j2), string(j1)); diff != "" {
t.Errorf("filters mismatch (-want +got):\n%s", diff)
} }
} }

View File

@ -8,9 +8,11 @@ import (
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"regexp"
"sort"
"strings" "strings"
"github.com/Autonomic-Cooperative/godotenv" "git.coopcloud.tech/coop-cloud/godotenv"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
@ -34,6 +36,16 @@ var REPOS_BASE_URL = "https://git.coopcloud.tech/coop-cloud"
var CATALOGUE_JSON_REPO_NAME = "recipes-catalogue-json" var CATALOGUE_JSON_REPO_NAME = "recipes-catalogue-json"
var SSH_URL_TEMPLATE = "ssh://git@git.coopcloud.tech:2222/coop-cloud/%s.git" var SSH_URL_TEMPLATE = "ssh://git@git.coopcloud.tech:2222/coop-cloud/%s.git"
const MAX_SANITISED_APP_NAME_LENGTH = 45
const MAX_DOCKER_SECRET_LENGTH = 64
var BackupbotLabel = "coop-cloud.backupbot.enabled"
// envVarModifiers is a list of env var modifier strings. These are added to
// env vars as comments and modify their processing by Abra, e.g. determining
// how long secrets should be.
var envVarModifiers = []string{"length"}
// GetServers retrieves all servers. // GetServers retrieves all servers.
func GetServers() ([]string, error) { func GetServers() ([]string, error) {
var servers []string var servers []string
@ -50,16 +62,30 @@ func GetServers() ([]string, error) {
// ReadEnv loads an app envivornment into a map. // ReadEnv loads an app envivornment into a map.
func ReadEnv(filePath string) (AppEnv, error) { func ReadEnv(filePath string) (AppEnv, error) {
var envFile AppEnv var envVars AppEnv
envFile, err := godotenv.Read(filePath) envVars, _, err := godotenv.Read(filePath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
logrus.Debugf("read %s from %s", envFile, filePath) logrus.Debugf("read %s from %s", envVars, filePath)
return envFile, nil return envVars, nil
}
// ReadEnv loads an app envivornment and their modifiers in two different maps.
func ReadEnvWithModifiers(filePath string) (AppEnv, AppModifiers, error) {
var envVars AppEnv
envVars, mods, err := godotenv.Read(filePath)
if err != nil {
return nil, mods, err
}
logrus.Debugf("read %s from %s", envVars, filePath)
return envVars, mods, nil
} }
// ReadServerNames retrieves all server names. // ReadServerNames retrieves all server names.
@ -149,22 +175,107 @@ func ReadAbraShEnvVars(abraSh string) (map[string]string, error) {
} }
return envVars, err return envVars, err
} }
defer file.Close()
exportRegex, err := regexp.Compile(`^export\s+(\w+=\w+)`)
if err != nil {
return envVars, err
}
scanner := bufio.NewScanner(file) scanner := bufio.NewScanner(file)
for scanner.Scan() { for scanner.Scan() {
line := scanner.Text() txt := scanner.Text()
if strings.Contains(line, "export") { if exportRegex.MatchString(txt) {
splitVals := strings.Split(line, "export ") splitVals := strings.Split(txt, "export ")
envVarDef := splitVals[len(splitVals)-1] envVarDef := splitVals[len(splitVals)-1]
keyVal := strings.Split(envVarDef, "=") keyVal := strings.Split(envVarDef, "=")
if len(keyVal) != 2 { if len(keyVal) != 2 {
return envVars, fmt.Errorf("couldn't parse %s", line) return envVars, fmt.Errorf("couldn't parse %s", txt)
} }
envVars[keyVal[0]] = keyVal[1] envVars[keyVal[0]] = keyVal[1]
} }
} }
if len(envVars) > 0 {
logrus.Debugf("read %s from %s", envVars, abraSh) logrus.Debugf("read %s from %s", envVars, abraSh)
} else {
logrus.Debugf("read 0 env var exports from %s", abraSh)
}
return envVars, nil return envVars, nil
} }
type EnvVar struct {
Name string
Present bool
}
func CheckEnv(app App) ([]EnvVar, error) {
var envVars []EnvVar
envSamplePath := path.Join(RECIPES_DIR, app.Recipe, ".env.sample")
if _, err := os.Stat(envSamplePath); err != nil {
if os.IsNotExist(err) {
return envVars, fmt.Errorf("%s does not exist?", envSamplePath)
}
return envVars, err
}
envSample, err := ReadEnv(envSamplePath)
if err != nil {
return envVars, err
}
var keys []string
for key := range envSample {
keys = append(keys, key)
}
sort.Strings(keys)
for _, key := range keys {
if _, ok := app.Env[key]; ok {
envVars = append(envVars, EnvVar{Name: key, Present: true})
} else {
envVars = append(envVars, EnvVar{Name: key, Present: false})
}
}
return envVars, nil
}
// ReadAbraShCmdNames reads the names of commands.
func ReadAbraShCmdNames(abraSh string) ([]string, error) {
var cmdNames []string
file, err := os.Open(abraSh)
if err != nil {
if os.IsNotExist(err) {
return cmdNames, nil
}
return cmdNames, err
}
defer file.Close()
cmdNameRegex, err := regexp.Compile(`(\w+)(\(\).*\{)`)
if err != nil {
return cmdNames, err
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
matches := cmdNameRegex.FindStringSubmatch(line)
if len(matches) > 0 {
cmdNames = append(cmdNames, matches[1])
}
}
if len(cmdNames) > 0 {
logrus.Debugf("read %s from %s", strings.Join(cmdNames, " "), abraSh)
} else {
logrus.Debugf("read 0 command names from %s", abraSh)
}
return cmdNames, nil
}

View File

@ -1,60 +1,69 @@
package config package config_test
import ( import (
"fmt"
"os" "os"
"path" "path"
"reflect" "reflect"
"slices"
"strings" "strings"
"testing" "testing"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/recipe"
) )
var testFolder = os.ExpandEnv("$PWD/../../tests/resources/test_folder") var (
var validAbraConf = os.ExpandEnv("$PWD/../../tests/resources/valid_abra_config") TestFolder = os.ExpandEnv("$PWD/../../tests/resources/test_folder")
ValidAbraConf = os.ExpandEnv("$PWD/../../tests/resources/valid_abra_config")
)
// make sure these are in alphabetical order // make sure these are in alphabetical order
var tFolders = []string{"folder1", "folder2"} var (
var tFiles = []string{"bar.env", "foo.env"} TFolders = []string{"folder1", "folder2"}
TFiles = []string{"bar.env", "foo.env"}
)
var appName = "ecloud" var (
var serverName = "evil.corp" AppName = "ecloud"
ServerName = "evil.corp"
)
var expectedAppEnv = AppEnv{ var ExpectedAppEnv = config.AppEnv{
"DOMAIN": "ecloud.evil.corp", "DOMAIN": "ecloud.evil.corp",
"RECIPE": "ecloud", "RECIPE": "ecloud",
} }
var expectedApp = App{ var ExpectedApp = config.App{
Name: appName, Name: AppName,
Recipe: expectedAppEnv["RECIPE"], Recipe: ExpectedAppEnv["RECIPE"],
Domain: expectedAppEnv["DOMAIN"], Domain: ExpectedAppEnv["DOMAIN"],
Env: expectedAppEnv, Env: ExpectedAppEnv,
Path: expectedAppFile.Path, Path: ExpectedAppFile.Path,
Server: expectedAppFile.Server, Server: ExpectedAppFile.Server,
} }
var expectedAppFile = AppFile{ var ExpectedAppFile = config.AppFile{
Path: path.Join(validAbraConf, "servers", serverName, appName+".env"), Path: path.Join(ValidAbraConf, "servers", ServerName, AppName+".env"),
Server: serverName, Server: ServerName,
} }
var expectedAppFiles = map[string]AppFile{ var ExpectedAppFiles = map[string]config.AppFile{
appName: expectedAppFile, AppName: ExpectedAppFile,
} }
// var expectedServerNames = []string{"evil.corp"}
func TestGetAllFoldersInDirectory(t *testing.T) { func TestGetAllFoldersInDirectory(t *testing.T) {
folders, err := GetAllFoldersInDirectory(testFolder) folders, err := config.GetAllFoldersInDirectory(TestFolder)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if !reflect.DeepEqual(folders, tFolders) { if !reflect.DeepEqual(folders, TFolders) {
t.Fatalf("did not get expected folders. Expected: (%s), Got: (%s)", strings.Join(tFolders, ","), strings.Join(folders, ",")) t.Fatalf("did not get expected folders. Expected: (%s), Got: (%s)", strings.Join(TFolders, ","), strings.Join(folders, ","))
} }
} }
func TestGetAllFilesInDirectory(t *testing.T) { func TestGetAllFilesInDirectory(t *testing.T) {
files, err := GetAllFilesInDirectory(testFolder) files, err := config.GetAllFilesInDirectory(TestFolder)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -62,23 +71,205 @@ func TestGetAllFilesInDirectory(t *testing.T) {
for _, file := range files { for _, file := range files {
fileNames = append(fileNames, file.Name()) fileNames = append(fileNames, file.Name())
} }
if !reflect.DeepEqual(fileNames, tFiles) { if !reflect.DeepEqual(fileNames, TFiles) {
t.Fatalf("did not get expected files. Expected: (%s), Got: (%s)", strings.Join(tFiles, ","), strings.Join(fileNames, ",")) t.Fatalf("did not get expected files. Expected: (%s), Got: (%s)", strings.Join(TFiles, ","), strings.Join(fileNames, ","))
} }
} }
func TestReadEnv(t *testing.T) { func TestReadEnv(t *testing.T) {
env, err := ReadEnv(expectedAppFile.Path) env, err := config.ReadEnv(ExpectedAppFile.Path)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if !reflect.DeepEqual(env, expectedAppEnv) { if !reflect.DeepEqual(env, ExpectedAppEnv) {
t.Fatalf( t.Fatalf(
"did not get expected application settings. Expected: DOMAIN=%s RECIPE=%s; Got: DOMAIN=%s RECIPE=%s", "did not get expected application settings. Expected: DOMAIN=%s RECIPE=%s; Got: DOMAIN=%s RECIPE=%s",
expectedAppEnv["DOMAIN"], ExpectedAppEnv["DOMAIN"],
expectedAppEnv["RECIPE"], ExpectedAppEnv["RECIPE"],
env["DOMAIN"], env["DOMAIN"],
env["RECIPE"], env["RECIPE"],
) )
} }
} }
func TestReadAbraShEnvVars(t *testing.T) {
offline := true
r, err := recipe.Get("abra-test-recipe", offline)
if err != nil {
t.Fatal(err)
}
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, r.Name, "abra.sh")
abraShEnv, err := config.ReadAbraShEnvVars(abraShPath)
if err != nil {
t.Fatal(err)
}
if len(abraShEnv) == 0 {
t.Error("at least one env var should be exported")
}
if _, ok := abraShEnv["INNER_FOO"]; ok {
t.Error("INNER_FOO should not be exported")
}
if _, ok := abraShEnv["INNER_BAZ"]; ok {
t.Error("INNER_BAZ should not be exported")
}
if _, ok := abraShEnv["OUTER_FOO"]; !ok {
t.Error("OUTER_FOO should be exported")
}
}
func TestReadAbraShCmdNames(t *testing.T) {
offline := true
r, err := recipe.Get("abra-test-recipe", offline)
if err != nil {
t.Fatal(err)
}
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, r.Name, "abra.sh")
cmdNames, err := config.ReadAbraShCmdNames(abraShPath)
if err != nil {
t.Fatal(err)
}
if len(cmdNames) == 0 {
t.Error("at least one command name should be found")
}
expectedCmdNames := []string{"test_cmd", "test_cmd_args"}
for _, cmdName := range expectedCmdNames {
if !slices.Contains(cmdNames, cmdName) {
t.Fatalf("%s should have been found in %s", cmdName, abraShPath)
}
}
}
func TestCheckEnv(t *testing.T) {
offline := true
r, err := recipe.Get("abra-test-recipe", offline)
if err != nil {
t.Fatal(err)
}
envSamplePath := path.Join(config.RECIPES_DIR, r.Name, ".env.sample")
envSample, err := config.ReadEnv(envSamplePath)
if err != nil {
t.Fatal(err)
}
app := config.App{
Name: "test-app",
Recipe: r.Name,
Domain: "example.com",
Env: envSample,
Path: "example.com.env",
Server: "example.com",
}
envVars, err := config.CheckEnv(app)
if err != nil {
t.Fatal(err)
}
for _, envVar := range envVars {
if !envVar.Present {
t.Fatalf("%s should be present", envVar.Name)
}
}
}
func TestCheckEnvError(t *testing.T) {
offline := true
r, err := recipe.Get("abra-test-recipe", offline)
if err != nil {
t.Fatal(err)
}
envSamplePath := path.Join(config.RECIPES_DIR, r.Name, ".env.sample")
envSample, err := config.ReadEnv(envSamplePath)
if err != nil {
t.Fatal(err)
}
delete(envSample, "DOMAIN")
app := config.App{
Name: "test-app",
Recipe: r.Name,
Domain: "example.com",
Env: envSample,
Path: "example.com.env",
Server: "example.com",
}
envVars, err := config.CheckEnv(app)
if err != nil {
t.Fatal(err)
}
for _, envVar := range envVars {
if envVar.Name == "DOMAIN" && envVar.Present {
t.Fatalf("%s should not be present", envVar.Name)
}
}
}
func TestEnvVarCommentsRemoved(t *testing.T) {
offline := true
r, err := recipe.Get("abra-test-recipe", offline)
if err != nil {
t.Fatal(err)
}
envSamplePath := path.Join(config.RECIPES_DIR, r.Name, ".env.sample")
envSample, err := config.ReadEnv(envSamplePath)
if err != nil {
t.Fatal(err)
}
envVar, exists := envSample["WITH_COMMENT"]
if !exists {
t.Fatal("WITH_COMMENT env var should be present in .env.sample")
}
if strings.Contains(envVar, "should be removed") {
t.Fatalf("comment from '%s' should be removed", envVar)
}
envVar, exists = envSample["SECRET_TEST_PASS_TWO_VERSION"]
if !exists {
t.Fatal("WITH_COMMENT env var should be present in .env.sample")
}
if strings.Contains(envVar, "length") {
t.Fatal("comment from env var SECRET_TEST_PASS_TWO_VERSION should have been removed")
}
}
func TestEnvVarModifiersIncluded(t *testing.T) {
offline := true
r, err := recipe.Get("abra-test-recipe", offline)
if err != nil {
t.Fatal(err)
}
envSamplePath := path.Join(config.RECIPES_DIR, r.Name, ".env.sample")
envSample, modifiers, err := config.ReadEnvWithModifiers(envSamplePath)
if err != nil {
t.Fatal(err)
}
if !strings.Contains(envSample["SECRET_TEST_PASS_TWO_VERSION"], "v1") {
t.Errorf("value should be 'v1', got: '%s'", envSample["SECRET_TEST_PASS_TWO_VERSION"])
}
if modifiers == nil || modifiers["SECRET_TEST_PASS_TWO_VERSION"] == nil {
t.Errorf("no modifiers included")
} else {
if modifiers["SECRET_TEST_PASS_TWO_VERSION"]["length"] != "10" {
t.Errorf("length modifier should be '10', got: '%s'", modifiers["SECRET_TEST_PASS_TWO_VERSION"]["length"])
}
}
}

View File

@ -0,0 +1,2 @@
RECIPE=test-recipe
DOMAIN=test.example.com

View File

@ -0,0 +1,6 @@
version: "3.8"
services:
foo:
image: debian
bar:
image: debian

View File

@ -8,6 +8,7 @@ import (
"coopcloud.tech/abra/pkg/formatter" "coopcloud.tech/abra/pkg/formatter"
"github.com/AlecAivazis/survey/v2" "github.com/AlecAivazis/survey/v2"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
containerTypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -17,7 +18,7 @@ import (
// count of containers does not match 1, then a prompt is presented to let the // count of containers does not match 1, then a prompt is presented to let the
// user choose. A count of 0 is handled gracefully. // user choose. A count of 0 is handled gracefully.
func GetContainer(c context.Context, cl *client.Client, filters filters.Args, noInput bool) (types.Container, error) { func GetContainer(c context.Context, cl *client.Client, filters filters.Args, noInput bool) (types.Container, error) {
containerOpts := types.ContainerListOptions{Filters: filters} containerOpts := containerTypes.ListOptions{Filters: filters}
containers, err := cl.ContainerList(c, containerOpts) containers, err := cl.ContainerList(c, containerOpts)
if err != nil { if err != nil {
return types.Container{}, err return types.Container{}, err
@ -28,7 +29,7 @@ func GetContainer(c context.Context, cl *client.Client, filters filters.Args, no
return types.Container{}, fmt.Errorf("no containers matching the %v filter found?", filter) return types.Container{}, fmt.Errorf("no containers matching the %v filter found?", filter)
} }
if len(containers) != 1 { if len(containers) > 1 {
var containersRaw []string var containersRaw []string
for _, container := range containers { for _, container := range containers {
containerName := strings.Join(container.Names, " ") containerName := strings.Join(container.Names, " ")
@ -68,3 +69,15 @@ func GetContainer(c context.Context, cl *client.Client, filters filters.Args, no
return containers[0], nil return containers[0], nil
} }
// GetContainerFromStackAndService retrieves the container for the given stack and service.
func GetContainerFromStackAndService(cl *client.Client, stack, service string) (types.Container, error) {
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", stack, service))
container, err := GetContainer(context.Background(), cl, filters, true)
if err != nil {
return types.Container{}, err
}
return container, nil
}

View File

@ -7,11 +7,16 @@ import (
// EnsureIPv4 ensures that an ipv4 address is set for a domain name // EnsureIPv4 ensures that an ipv4 address is set for a domain name
func EnsureIPv4(domainName string) (string, error) { func EnsureIPv4(domainName string) (string, error) {
ipv4, err := net.ResolveIPAddr("ip", domainName) ipv4, err := net.ResolveIPAddr("ip4", domainName)
if err != nil { if err != nil {
return "", err return "", err
} }
// NOTE(d1): e.g. when there is only an ipv6 record available
if ipv4 == nil {
return "", fmt.Errorf("unable to resolve ipv4 address for %s", domainName)
}
return ipv4.String(), nil return ipv4.String(), nil
} }

64
pkg/dns/dns_test.go Normal file
View File

@ -0,0 +1,64 @@
package dns
import (
"fmt"
"testing"
"gotest.tools/v3/assert"
)
func TestEnsureDomainsResolveSameIPv4(t *testing.T) {
tests := []struct {
domainName string
serverName string
shouldValidate bool
}{
// NOTE(d1): DNS records get checked, so use something that is maintained
// within the federation. if you're here because of a failing test, try
// `dig +short <domain>` to ensure stuff matches first! If flakyness
// becomes an issue we can look into mocking
{"docs.coopcloud.tech", "coopcloud.tech", true},
{"docs.coopcloud.tech", "swarm.autonomic.zone", true},
// NOTE(d1): special case handling for "--local"
{"", "default", true},
{"", "local", true},
{"", "", false},
{"123", "", false},
}
for _, test := range tests {
_, err := EnsureDomainsResolveSameIPv4(test.domainName, test.serverName)
if err != nil && test.shouldValidate {
t.Fatal(err)
}
if err == nil && !test.shouldValidate {
t.Fatal(fmt.Errorf("should have failed but did not: %v", test))
}
}
}
func TestEnsureIpv4(t *testing.T) {
// NOTE(d1): DNS records get checked, so use something that is maintained
// within the federation. if you're here because of a failing test, try `dig
// +short <domain>` to ensure stuff matches first! If flakyness becomes an
// issue we can look into mocking
domainName := "collabora.ostrom.collective.tools"
serverName := "ostrom.collective.tools"
for i := 0; i < 15; i++ {
domainIpv4, err := EnsureIPv4(domainName)
if err != nil {
t.Fatal(err)
}
serverIpv4, err := EnsureIPv4(serverName)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, domainIpv4, serverIpv4)
}
}

27
pkg/git/add.go Normal file
View File

@ -0,0 +1,27 @@
package git
import (
"github.com/go-git/go-git/v5"
"github.com/sirupsen/logrus"
)
// Add adds a file to the git index.
func Add(repoPath, path string, dryRun bool) error {
repo, err := git.PlainOpen(repoPath)
if err != nil {
return err
}
worktree, err := repo.Worktree()
if err != nil {
return err
}
if dryRun {
logrus.Debugf("dry run: adding %s", path)
} else {
worktree.Add(path)
}
return nil
}

View File

@ -8,7 +8,7 @@ import (
) )
// Commit runs a git commit // Commit runs a git commit
func Commit(repoPath, glob, commitMessage string, dryRun bool) error { func Commit(repoPath, commitMessage string, dryRun bool) error {
if commitMessage == "" { if commitMessage == "" {
return fmt.Errorf("no commit message specified?") return fmt.Errorf("no commit message specified?")
} }
@ -33,17 +33,8 @@ func Commit(repoPath, glob, commitMessage string, dryRun bool) error {
} }
if !dryRun { if !dryRun {
err = commitWorktree.AddGlob(glob) // NOTE(d1): `All: true` does not include untracked files
if err != nil { _, err = commitWorktree.Commit(commitMessage, &git.CommitOptions{All: true})
return err
}
logrus.Debugf("staged %s for commit", glob)
} else {
logrus.Debugf("dry run: did not stage %s for commit", glob)
}
if !dryRun {
_, err = commitWorktree.Commit(commitMessage, &git.CommitOptions{})
if err != nil { if err != nil {
return err return err
} }

42
pkg/git/diff.go Normal file
View File

@ -0,0 +1,42 @@
package git
import (
"fmt"
"os/exec"
"github.com/sirupsen/logrus"
)
// getGitDiffArgs builds the `git diff` invocation args. It removes the usage
// of a pager and ensures that colours are specified even when Git might detect
// otherwise.
func getGitDiffArgs(repoPath string) []string {
return []string{
"-C",
repoPath,
"--no-pager",
"-c",
"color.diff=always",
"diff",
}
}
// DiffUnstaged shows a `git diff`. Due to limitations in the underlying go-git
// library, this implementation requires the /usr/bin/git binary. It gracefully
// skips if it cannot find the command on the system.
func DiffUnstaged(path string) error {
if _, err := exec.LookPath("git"); err != nil {
logrus.Warnf("unable to locate git command, cannot output diff")
return nil
}
gitDiffArgs := getGitDiffArgs(path)
diff, err := exec.Command("git", gitDiffArgs...).Output()
if err != nil {
return nil
}
fmt.Print(string(diff))
return nil
}

View File

@ -3,6 +3,7 @@ package jsontable
import ( import (
"fmt" "fmt"
"io" "io"
"strings"
"github.com/olekukonko/tablewriter" "github.com/olekukonko/tablewriter"
) )
@ -109,6 +110,9 @@ func (t *JSONTable) _JSONRenderInner() {
} }
writeChar(t.out, '{') writeChar(t.out, '{')
for keyidx, key := range t.keys { for keyidx, key := range t.keys {
key := strings.ToLower(key)
key = strings.ReplaceAll(key, " ", "-")
value := "nil" value := "nil"
if keyidx < len(row) { if keyidx < len(row) {
value = row[keyidx] value = row[keyidx]
@ -138,10 +142,8 @@ func (t *JSONTable) JSONRender() {
if t.hasCaption { if t.hasCaption {
fmt.Fprintf(t.out, "\"%s\":\"%s\",", t.captionLabel, t.caption) fmt.Fprintf(t.out, "\"%s\":\"%s\",", t.captionLabel, t.caption)
} }
fmt.Fprintf(t.out, "\"%s\":", t.dataLabel) fmt.Fprintf(t.out, "\"%s\":", t.dataLabel)
} }
// write list // write list

View File

@ -10,7 +10,7 @@ import (
"coopcloud.tech/abra/pkg/recipe" "coopcloud.tech/abra/pkg/recipe"
recipePkg "coopcloud.tech/abra/pkg/recipe" recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/tagcmp" "coopcloud.tech/tagcmp"
"github.com/docker/distribution/reference" "github.com/distribution/reference"
"github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -115,6 +115,13 @@ var LintRules = map[string][]LintRule{
HowToResolve: "upload your recipe to git.coopcloud.tech/coop-cloud/...", HowToResolve: "upload your recipe to git.coopcloud.tech/coop-cloud/...",
Function: LintHasRecipeRepo, Function: LintHasRecipeRepo,
}, },
{
Ref: "R015",
Level: "warn",
Description: "long secret names",
HowToResolve: "reduce length of secret names to 12 chars",
Function: LintSecretLengths,
},
}, },
"error": { "error": {
{ {
@ -401,6 +408,16 @@ func LintHasRecipeRepo(recipe recipe.Recipe) (bool, error) {
return true, nil return true, nil
} }
func LintSecretLengths(recipe recipe.Recipe) (bool, error) {
for name := range recipe.Config.Secrets {
if len(name) > 12 {
return false, fmt.Errorf("secret %s is longer than 12 characters", name)
}
}
return true, nil
}
func LintValidTags(recipe recipe.Recipe) (bool, error) { func LintValidTags(recipe recipe.Recipe) (bool, error) {
recipeDir := path.Join(config.RECIPES_DIR, recipe.Name) recipeDir := path.Join(config.RECIPES_DIR, recipe.Name)

View File

@ -7,6 +7,7 @@ import (
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"slices"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -21,8 +22,8 @@ import (
loader "coopcloud.tech/abra/pkg/upstream/stack" loader "coopcloud.tech/abra/pkg/upstream/stack"
"coopcloud.tech/abra/pkg/web" "coopcloud.tech/abra/pkg/web"
"coopcloud.tech/tagcmp" "coopcloud.tech/tagcmp"
"github.com/distribution/reference"
composetypes "github.com/docker/cli/cli/compose/types" composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/distribution/reference"
"github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -31,7 +32,7 @@ import (
// RecipeCatalogueURL is the only current recipe catalogue available. // RecipeCatalogueURL is the only current recipe catalogue available.
const RecipeCatalogueURL = "https://recipes.coopcloud.tech/recipes.json" const RecipeCatalogueURL = "https://recipes.coopcloud.tech/recipes.json"
// ReposMetadataURL is the recipe repository metadata // ReposMetadataURL is the recipe repository metadata.
const ReposMetadataURL = "https://git.coopcloud.tech/api/v1/orgs/coop-cloud/repos" const ReposMetadataURL = "https://git.coopcloud.tech/api/v1/orgs/coop-cloud/repos"
// tag represents a git tag. // tag represents a git tag.
@ -63,6 +64,11 @@ type RecipeMeta struct {
Website string `json:"website"` Website string `json:"website"`
} }
// TopicMeta represents a list of topics for a repository.
type TopicMeta struct {
Topics []string `json:"topics"`
}
// LatestVersion returns the latest version of a recipe. // LatestVersion returns the latest version of a recipe.
func (r RecipeMeta) LatestVersion() string { func (r RecipeMeta) LatestVersion() string {
var version string var version string
@ -258,6 +264,20 @@ func (r Recipe) SampleEnv() (map[string]string, error) {
return sampleEnv, nil return sampleEnv, nil
} }
// Ensure makes sure the recipe exists, is up to date and has the latest version checked out.
func Ensure(recipeName string) error {
if err := EnsureExists(recipeName); err != nil {
return err
}
if err := EnsureUpToDate(recipeName); err != nil {
return err
}
if err := EnsureLatest(recipeName); err != nil {
return err
}
return nil
}
// EnsureExists ensures that a recipe is locally cloned // EnsureExists ensures that a recipe is locally cloned
func EnsureExists(recipeName string) error { func EnsureExists(recipeName string) error {
recipeDir := path.Join(config.RECIPES_DIR, recipeName) recipeDir := path.Join(config.RECIPES_DIR, recipeName)
@ -822,8 +842,17 @@ func ReadReposMetadata() (RepoCatalogue, error) {
} }
for idx, repo := range reposList { for idx, repo := range reposList {
var topicMeta TopicMeta
topicsURL := getReposTopicUrl(repo.Name)
if err := web.ReadJSON(topicsURL, &topicMeta); err != nil {
return reposMeta, err
}
if slices.Contains(topicMeta.Topics, "recipe") && repo.Name != "example" {
reposMeta[repo.Name] = reposList[idx] reposMeta[repo.Name] = reposList[idx]
} }
}
pageIdx++ pageIdx++
bar.Add(1) bar.Add(1)
@ -916,6 +945,7 @@ func GetRecipeVersions(recipeName string, offline bool) (RecipeVersions, error)
if err != nil { if err != nil {
return versions, err return versions, err
} }
sortRecipeVersions(versions) sortRecipeVersions(versions)
logrus.Debugf("collected %s for %s", versions, recipeName) logrus.Debugf("collected %s for %s", versions, recipeName)
@ -1002,14 +1032,8 @@ func UpdateRepositories(repos RepoCatalogue, recipeName string) error {
retrieveBar.Add(1) retrieveBar.Add(1)
return return
} }
if _, exists := catalogue.CatalogueSkipList[rm.Name]; exists {
ch <- rm.Name
retrieveBar.Add(1)
return
}
recipeDir := path.Join(config.RECIPES_DIR, rm.Name) recipeDir := path.Join(config.RECIPES_DIR, rm.Name)
if err := gitPkg.Clone(recipeDir, rm.CloneURL); err != nil { if err := gitPkg.Clone(recipeDir, rm.CloneURL); err != nil {
logrus.Fatal(err) logrus.Fatal(err)
} }
@ -1025,3 +1049,8 @@ func UpdateRepositories(repos RepoCatalogue, recipeName string) error {
return nil return nil
} }
// getReposTopicUrl retrieves the repository specific topic listing.
func getReposTopicUrl(repoName string) string {
return fmt.Sprintf("https://git.coopcloud.tech/api/v1/repos/coop-cloud/%s/topics", repoName)
}

View File

@ -4,6 +4,7 @@
package secret package secret
import ( import (
"context"
"fmt" "fmt"
"slices" "slices"
"strconv" "strconv"
@ -11,18 +12,33 @@ import (
"sync" "sync"
"coopcloud.tech/abra/pkg/client" "coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/upstream/stack" "coopcloud.tech/abra/pkg/upstream/stack"
loader "coopcloud.tech/abra/pkg/upstream/stack" loader "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/decentral1se/passgen" "github.com/decentral1se/passgen"
"github.com/docker/docker/api/types"
dockerClient "github.com/docker/docker/client" dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
// secretValue represents a parsed `SECRET_FOO=v1 # length=bar` env var config // Secret represents a secret.
// secret definition. type Secret struct {
type secretValue struct { // Version comes from the secret version environment variable.
// For example:
// SECRET_FOO=v1
Version string Version string
// Length comes from the length modifier at the secret version environment
// variable. For Example:
// SECRET_FOO=v1 # length=12
Length int Length int
// RemoteName is the name of the secret on the server. For example:
// name: ${STACK_NAME}_test_pass_two_${SECRET_TEST_PASS_TWO_VERSION}
// With the following:
// STACK_NAME=test_example_com
// SECRET_TEST_PASS_TWO_VERSION=v2
// Will have this remote name:
// test_example_com_test_pass_two_v2
RemoteName string
} }
// GeneratePasswords generates passwords. // GeneratePasswords generates passwords.
@ -32,7 +48,6 @@ func GeneratePasswords(count, length uint) ([]string, error) {
length, length,
passgen.AlphabetDefault, passgen.AlphabetDefault,
) )
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -51,7 +66,6 @@ func GeneratePassphrases(count uint) ([]string, error) {
passgen.PassphraseCasingDefault, passgen.PassphraseCasingDefault,
passgen.WordListDefault, passgen.WordListDefault,
) )
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -66,17 +80,27 @@ func GeneratePassphrases(count uint) ([]string, error) {
// and some times you don't (as the caller). We need to be able to handle the // and some times you don't (as the caller). We need to be able to handle the
// "app new" case where we pass in the .env.sample and the "secret generate" // "app new" case where we pass in the .env.sample and the "secret generate"
// case where the app is created. // case where the app is created.
func ReadSecretsConfig(appEnv map[string]string, composeFiles []string, recipeName string) (map[string]string, error) { func ReadSecretsConfig(appEnvPath string, composeFiles []string, stackName string) (map[string]Secret, error) {
secretConfigs := make(map[string]string) appEnv, appModifiers, err := config.ReadEnvWithModifiers(appEnvPath)
if err != nil {
return nil, err
}
// Set the STACK_NAME to be able to generate the remote name correctly.
appEnv["STACK_NAME"] = stackName
opts := stack.Deploy{Composefiles: composeFiles} opts := stack.Deploy{Composefiles: composeFiles}
config, err := loader.LoadComposefile(opts, appEnv) composeConfig, err := loader.LoadComposefile(opts, appEnv)
if err != nil { if err != nil {
return secretConfigs, err return nil, err
}
// Read the compose files without injecting environment variables.
configWithoutEnv, err := loader.LoadComposefile(opts, map[string]string{}, loader.SkipInterpolation)
if err != nil {
return nil, err
} }
var enabledSecrets []string var enabledSecrets []string
for _, service := range config.Services { for _, service := range composeConfig.Services {
for _, secret := range service.Secrets { for _, secret := range service.Secrets {
enabledSecrets = append(enabledSecrets, secret.Source) enabledSecrets = append(enabledSecrets, secret.Source)
} }
@ -84,83 +108,78 @@ func ReadSecretsConfig(appEnv map[string]string, composeFiles []string, recipeNa
if len(enabledSecrets) == 0 { if len(enabledSecrets) == 0 {
logrus.Debugf("not generating app secrets, none enabled in recipe config") logrus.Debugf("not generating app secrets, none enabled in recipe config")
return secretConfigs, nil return nil, nil
} }
for secretId, secretConfig := range config.Secrets { secretValues := map[string]Secret{}
for secretId, secretConfig := range composeConfig.Secrets {
if string(secretConfig.Name[len(secretConfig.Name)-1]) == "_" { if string(secretConfig.Name[len(secretConfig.Name)-1]) == "_" {
return secretConfigs, fmt.Errorf("missing version for secret? (%s)", secretId) return nil, fmt.Errorf("missing version for secret? (%s)", secretId)
} }
if !(slices.Contains(enabledSecrets, secretId)) { if !(slices.Contains(enabledSecrets, secretId)) {
logrus.Warnf("%s not enabled in recipe config, not generating", secretId) logrus.Warnf("%s not enabled in recipe config, skipping", secretId)
continue continue
} }
lastIdx := strings.LastIndex(secretConfig.Name, "_") lastIdx := strings.LastIndex(secretConfig.Name, "_")
secretVersion := secretConfig.Name[lastIdx+1:] secretVersion := secretConfig.Name[lastIdx+1:]
secretConfigs[secretId] = secretVersion value := Secret{Version: secretVersion, RemoteName: secretConfig.Name}
if len(value.RemoteName) > config.MAX_DOCKER_SECRET_LENGTH {
return nil, fmt.Errorf("secret %s is > %d chars when combined with %s", secretId, config.MAX_DOCKER_SECRET_LENGTH, stackName)
} }
return secretConfigs, nil // Check if the length modifier is set for this secret.
for envName, modifierValues := range appModifiers {
// configWithoutEnv contains the raw name as defined in the compose.yaml
// The name will look something like this:
// name: ${STACK_NAME}_test_pass_two_${SECRET_TEST_PASS_TWO_VERSION}
// To check if the current modifier is for the current secret we check
// if the raw name contains the env name (e.g. SECRET_TEST_PASS_TWO_VERSION).
if !strings.Contains(configWithoutEnv.Secrets[secretId].Name, envName) {
continue
} }
lengthRaw, ok := modifierValues["length"]
func ParseSecretValue(secret string) (secretValue, error) { if ok {
values := strings.Split(secret, "#") length, err := strconv.Atoi(lengthRaw)
if len(values) == 0 {
return secretValue{}, fmt.Errorf("unable to parse %s", secret)
}
if len(values) == 1 {
return secretValue{Version: values[0], Length: 0}, nil
}
split := strings.Split(values[1], "=")
parsed := split[len(split)-1]
stripped := strings.ReplaceAll(parsed, " ", "")
length, err := strconv.Atoi(stripped)
if err != nil { if err != nil {
return secretValue{}, err return nil, err
}
value.Length = length
}
break
}
secretValues[secretId] = value
} }
version := strings.ReplaceAll(values[0], " ", "")
logrus.Debugf("parsed version %s and length '%v' from %s", version, length, secret) return secretValues, nil
return secretValue{Version: version, Length: length}, nil
} }
// GenerateSecrets generates secrets locally and sends them to a remote server for storage. // GenerateSecrets generates secrets locally and sends them to a remote server for storage.
func GenerateSecrets(cl *dockerClient.Client, secretsFromConfig map[string]string, appName, server string) (map[string]string, error) { func GenerateSecrets(cl *dockerClient.Client, secrets map[string]Secret, server string) (map[string]string, error) {
secrets := make(map[string]string) secretsGenerated := map[string]string{}
var mutex sync.Mutex var mutex sync.Mutex
var wg sync.WaitGroup var wg sync.WaitGroup
ch := make(chan error, len(secretsFromConfig)) ch := make(chan error, len(secrets))
for n, v := range secretsFromConfig { for n, v := range secrets {
wg.Add(1) wg.Add(1)
go func(secretName, secretValue string) { go func(secretName string, secret Secret) {
defer wg.Done() defer wg.Done()
parsedSecretValue, err := ParseSecretValue(secretValue) logrus.Debugf("attempting to generate and store %s on %s", secret.RemoteName, server)
if secret.Length > 0 {
passwords, err := GeneratePasswords(1, uint(secret.Length))
if err != nil { if err != nil {
ch <- err ch <- err
return return
} }
secretRemoteName := fmt.Sprintf("%s_%s_%s", appName, secretName, parsedSecretValue.Version) if err := client.StoreSecret(cl, secret.RemoteName, passwords[0], server); err != nil {
logrus.Debugf("attempting to generate and store %s on %s", secretRemoteName, server)
if parsedSecretValue.Length > 0 {
passwords, err := GeneratePasswords(1, uint(parsedSecretValue.Length))
if err != nil {
ch <- err
return
}
if err := client.StoreSecret(cl, secretRemoteName, passwords[0], server); err != nil {
if strings.Contains(err.Error(), "AlreadyExists") { if strings.Contains(err.Error(), "AlreadyExists") {
logrus.Warnf("%s already exists, moving on...", secretRemoteName) logrus.Warnf("%s already exists, moving on...", secret.RemoteName)
ch <- nil ch <- nil
} else { } else {
ch <- err ch <- err
@ -170,7 +189,7 @@ func GenerateSecrets(cl *dockerClient.Client, secretsFromConfig map[string]strin
mutex.Lock() mutex.Lock()
defer mutex.Unlock() defer mutex.Unlock()
secrets[secretName] = passwords[0] secretsGenerated[secretName] = passwords[0]
} else { } else {
passphrases, err := GeneratePassphrases(1) passphrases, err := GeneratePassphrases(1)
if err != nil { if err != nil {
@ -178,9 +197,9 @@ func GenerateSecrets(cl *dockerClient.Client, secretsFromConfig map[string]strin
return return
} }
if err := client.StoreSecret(cl, secretRemoteName, passphrases[0], server); err != nil { if err := client.StoreSecret(cl, secret.RemoteName, passphrases[0], server); err != nil {
if strings.Contains(err.Error(), "AlreadyExists") { if strings.Contains(err.Error(), "AlreadyExists") {
logrus.Warnf("%s already exists, moving on...", secretRemoteName) logrus.Warnf("%s already exists, moving on...", secret.RemoteName)
ch <- nil ch <- nil
} else { } else {
ch <- err ch <- err
@ -190,7 +209,7 @@ func GenerateSecrets(cl *dockerClient.Client, secretsFromConfig map[string]strin
mutex.Lock() mutex.Lock()
defer mutex.Unlock() defer mutex.Unlock()
secrets[secretName] = passphrases[0] secretsGenerated[secretName] = passphrases[0]
} }
ch <- nil ch <- nil
}(n, v) }(n, v)
@ -198,14 +217,72 @@ func GenerateSecrets(cl *dockerClient.Client, secretsFromConfig map[string]strin
wg.Wait() wg.Wait()
for range secretsFromConfig { for range secrets {
err := <-ch err := <-ch
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
logrus.Debugf("generated and stored %s on %s", secrets, server) logrus.Debugf("generated and stored %v on %s", secrets, server)
return secrets, nil return secretsGenerated, nil
}
type secretStatus struct {
LocalName string
RemoteName string
Version string
CreatedOnRemote bool
}
type secretStatuses []secretStatus
// PollSecretsStatus checks status of secrets by comparing the local recipe
// config and deploymend server state.
func PollSecretsStatus(cl *dockerClient.Client, app config.App) (secretStatuses, error) {
var secStats secretStatuses
composeFiles, err := config.GetComposeFiles(app.Recipe, app.Env)
if err != nil {
return secStats, err
}
secretsConfig, err := ReadSecretsConfig(app.Path, composeFiles, app.StackName())
if err != nil {
return secStats, err
}
filters, err := app.Filters(false, false)
if err != nil {
return secStats, err
}
secretList, err := cl.SecretList(context.Background(), types.SecretListOptions{Filters: filters})
if err != nil {
return secStats, err
}
remoteSecretNames := make(map[string]bool)
for _, cont := range secretList {
remoteSecretNames[cont.Spec.Annotations.Name] = true
}
for secretName, val := range secretsConfig {
createdRemote := false
secretRemoteName := fmt.Sprintf("%s_%s_%s", app.StackName(), secretName, val.Version)
if _, ok := remoteSecretNames[secretRemoteName]; ok {
createdRemote = true
}
secStats = append(secStats, secretStatus{
LocalName: secretName,
RemoteName: secretRemoteName,
Version: val.Version,
CreatedOnRemote: createdRemote,
})
}
return secStats, nil
} }

View File

@ -1,41 +1,39 @@
package secret package secret
import ( import (
"path"
"testing" "testing"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/stack"
loader "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestReadSecretsConfig(t *testing.T) { func TestReadSecretsConfig(t *testing.T) {
offline := true composeFiles := []string{"./testdir/compose.yaml"}
recipe, err := recipe.Get("matrix-synapse", offline) secretsFromConfig, err := ReadSecretsConfig("./testdir/.env.sample", composeFiles, "test_example_com")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
sampleEnv, err := recipe.SampleEnv() // Simple secret
if err != nil { assert.Equal(t, "test_example_com_test_pass_one_v2", secretsFromConfig["test_pass_one"].RemoteName)
t.Fatal(err) assert.Equal(t, "v2", secretsFromConfig["test_pass_one"].Version)
assert.Equal(t, 0, secretsFromConfig["test_pass_one"].Length)
// Has a length modifier
assert.Equal(t, "test_example_com_test_pass_two_v1", secretsFromConfig["test_pass_two"].RemoteName)
assert.Equal(t, "v1", secretsFromConfig["test_pass_two"].Version)
assert.Equal(t, 10, secretsFromConfig["test_pass_two"].Length)
// Secret name does not include the secret id
assert.Equal(t, "test_example_com_pass_three_v2", secretsFromConfig["test_pass_three"].RemoteName)
assert.Equal(t, "v2", secretsFromConfig["test_pass_three"].Version)
assert.Equal(t, 0, secretsFromConfig["test_pass_three"].Length)
} }
composeFiles := []string{path.Join(config.RECIPES_DIR, recipe.Name, "compose.yml")} func TestReadSecretsConfigWithLongDomain(t *testing.T) {
secretsFromConfig, err := ReadSecretsConfig(sampleEnv, composeFiles, recipe.Name) composeFiles := []string{"./testdir/compose.yaml"}
if err != nil { _, err := ReadSecretsConfig("./testdir/.env.sample", composeFiles, "should_break_on_forty_eight_char_stack_nameeeeee")
t.Fatal(err) if err == nil {
} t.Fatal("expected failure, stack name is too long")
opts := stack.Deploy{Composefiles: composeFiles}
config, err := loader.LoadComposefile(opts, sampleEnv)
if err != nil {
t.Fatal(err)
}
for secretId := range config.Secrets {
assert.Contains(t, secretsFromConfig, secretId)
} }
assert.Contains(t, err.Error(), "is > 64 chars")
} }

View File

@ -0,0 +1,3 @@
SECRET_TEST_PASS_ONE_VERSION=v2
SECRET_TEST_PASS_TWO_VERSION=v1 # length=10
SECRET_TEST_PASS_THREE_VERSION=v2

View File

@ -0,0 +1,21 @@
---
version: "3.8"
services:
app:
image: nginx:1.21.0
secrets:
- test_pass_one
- test_pass_two
- test_pass_three
secrets:
test_pass_one:
external: true
name: ${STACK_NAME}_test_pass_one_${SECRET_TEST_PASS_ONE_VERSION} # should be removed
test_pass_two:
external: true
name: ${STACK_NAME}_test_pass_two_${SECRET_TEST_PASS_TWO_VERSION}
test_pass_three:
external: true
name: ${STACK_NAME}_pass_three_${SECRET_TEST_PASS_THREE_VERSION} # secretId and name don't match

View File

@ -14,6 +14,70 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
// GetService retrieves a service container based on a label. If prompt is true
// and the retrievd count of service containers does not match 1, then a prompt
// is presented to let the user choose. An error is returned when no service is
// found.
func GetServiceByLabel(c context.Context, cl *client.Client, label string, prompt bool) (swarm.Service, error) {
services, err := cl.ServiceList(c, types.ServiceListOptions{})
if err != nil {
return swarm.Service{}, err
}
if len(services) == 0 {
return swarm.Service{}, fmt.Errorf("no services deployed?")
}
var matchingServices []swarm.Service
for _, service := range services {
if enabled, exists := service.Spec.Labels[label]; exists && enabled == "true" {
matchingServices = append(matchingServices, service)
}
}
if len(matchingServices) == 0 {
return swarm.Service{}, fmt.Errorf("no services deployed matching label '%s'?", label)
}
if len(matchingServices) > 1 {
var servicesRaw []string
for _, service := range matchingServices {
serviceName := service.Spec.Name
created := formatter.HumanDuration(service.CreatedAt.Unix())
servicesRaw = append(servicesRaw, fmt.Sprintf("%s (created %v)", serviceName, created))
}
if !prompt {
err := fmt.Errorf("expected 1 service but found %v: %s", len(matchingServices), strings.Join(servicesRaw, " "))
return swarm.Service{}, err
}
logrus.Warnf("ambiguous service list received, prompting for input")
var response string
prompt := &survey.Select{
Message: "which service are you looking for?",
Options: servicesRaw,
}
if err := survey.AskOne(prompt, &response); err != nil {
return swarm.Service{}, err
}
chosenService := strings.TrimSpace(strings.Split(response, " ")[0])
for _, service := range matchingServices {
serviceName := strings.ToLower(service.Spec.Name)
if serviceName == chosenService {
return service, nil
}
}
logrus.Panic("failed to match chosen service")
}
return matchingServices[0], nil
}
// GetService retrieves a service container. If prompt is true and the retrievd // GetService retrieves a service container. If prompt is true and the retrievd
// count of service containers does not match 1, then a prompt is presented to // count of service containers does not match 1, then a prompt is presented to
// let the user choose. A count of 0 is handled gracefully. // let the user choose. A count of 0 is handled gracefully.

View File

@ -2,73 +2,14 @@ package ssh
import ( import (
"fmt" "fmt"
"os/exec"
"strings" "strings"
"github.com/sirupsen/logrus"
) )
// HostConfig is a SSH host config.
type HostConfig struct {
Host string
IdentityFile string
Port string
User string
}
// String presents a human friendly output for the HostConfig.
func (h HostConfig) String() string {
return fmt.Sprintf(
"{host: %s, username: %s, port: %s, identityfile: %s}",
h.Host,
h.User,
h.Port,
h.IdentityFile,
)
}
// GetHostConfig retrieves a ~/.ssh/config config for a host using /usr/bin/ssh
// directly. We therefore maintain consistent interop with this standard
// tooling. This is useful because SSH confuses a lot of people and having to
// learn how two tools (`ssh` and `abra`) handle SSH connection details instead
// of one (just `ssh`) is Not Cool. Here's to less bug reports on this topic!
func GetHostConfig(hostname string) (HostConfig, error) {
var hostConfig HostConfig
out, err := exec.Command("ssh", "-G", hostname).Output()
if err != nil {
return hostConfig, err
}
for _, line := range strings.Split(string(out), "\n") {
entries := strings.Split(line, " ")
for idx, entry := range entries {
if entry == "hostname" {
hostConfig.Host = entries[idx+1]
}
if entry == "user" {
hostConfig.User = entries[idx+1]
}
if entry == "port" {
hostConfig.Port = entries[idx+1]
}
if entry == "identityfile" {
if hostConfig.IdentityFile == "" {
hostConfig.IdentityFile = entries[idx+1]
}
}
}
}
logrus.Debugf("retrieved ssh config for %s: %s", hostname, hostConfig.String())
return hostConfig, nil
}
// Fatal is a error output wrapper which aims to make SSH failures easier to // Fatal is a error output wrapper which aims to make SSH failures easier to
// parse through re-wording. // parse through re-wording.
func Fatal(hostname string, err error) error { func Fatal(hostname string, err error) error {
out := err.Error() out := err.Error()
if strings.Contains(out, "Host key verification failed.") { if strings.Contains(out, "Host key verification failed.") {
return fmt.Errorf("SSH host key verification failed for %s", hostname) return fmt.Errorf("SSH host key verification failed for %s", hostname)
} else if strings.Contains(out, "Could not resolve hostname") { } else if strings.Contains(out, "Could not resolve hostname") {
@ -79,7 +20,7 @@ func Fatal(hostname string, err error) error {
return fmt.Errorf("ssh auth: permission denied for %s", hostname) return fmt.Errorf("ssh auth: permission denied for %s", hostname)
} else if strings.Contains(out, "Network is unreachable") { } else if strings.Contains(out, "Network is unreachable") {
return fmt.Errorf("unable to connect to %s, network is unreachable?", hostname) return fmt.Errorf("unable to connect to %s, network is unreachable?", hostname)
} else { }
return err return err
} }
}

View File

@ -16,12 +16,12 @@ import (
// GetConnectionHelper returns Docker-specific connection helper for the given URL. // GetConnectionHelper returns Docker-specific connection helper for the given URL.
// GetConnectionHelper returns nil without error when no helper is registered for the scheme. // GetConnectionHelper returns nil without error when no helper is registered for the scheme.
// //
// ssh://<user>@<host> URL requires Docker 18.09 or later on the remote host. // ssh://<host> URL requires Docker 18.09 or later on the remote host.
func GetConnectionHelper(daemonURL string) (*connhelper.ConnectionHelper, error) { func GetConnectionHelper(daemonURL string) (*connhelper.ConnectionHelper, error) {
return getConnectionHelper(daemonURL, []string{"-o ConnectTimeout=5"}) return getConnectionHelper(daemonURL)
} }
func getConnectionHelper(daemonURL string, sshFlags []string) (*connhelper.ConnectionHelper, error) { func getConnectionHelper(daemonURL string) (*connhelper.ConnectionHelper, error) {
url, err := url.Parse(daemonURL) url, err := url.Parse(daemonURL)
if err != nil { if err != nil {
return nil, err return nil, err
@ -35,7 +35,7 @@ func getConnectionHelper(daemonURL string, sshFlags []string) (*connhelper.Conne
return &connhelper.ConnectionHelper{ return &connhelper.ConnectionHelper{
Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) { Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
return New(ctx, "ssh", append(sshFlags, ctxConnDetails.Args("docker", "system", "dial-stdio")...)...) return New(ctx, "ssh", ctxConnDetails.Args("docker", "system", "dial-stdio")...)
}, },
Host: "http://docker.example.com", Host: "http://docker.example.com",
}, nil }, nil
@ -45,6 +45,7 @@ func getConnectionHelper(daemonURL string, sshFlags []string) (*connhelper.Conne
return nil, err return nil, err
} }
// NewConnectionHelper creates new connection helper for a remote docker daemon.
func NewConnectionHelper(daemonURL string) (*connhelper.ConnectionHelper, error) { func NewConnectionHelper(daemonURL string) (*connhelper.ConnectionHelper, error) {
helper, err := GetConnectionHelper(daemonURL) helper, err := GetConnectionHelper(daemonURL)
if err != nil { if err != nil {
@ -73,6 +74,7 @@ func getDockerEndpoint(host string) (docker.Endpoint, error) {
return ep, nil return ep, nil
} }
// GetDockerEndpointMetadataAndTLS retrieves the docker endpoint and TLS info for a remote host.
func GetDockerEndpointMetadataAndTLS(host string) (docker.EndpointMeta, *dCliContextStore.EndpointTLSData, error) { func GetDockerEndpointMetadataAndTLS(host string) (docker.EndpointMeta, *dCliContextStore.EndpointTLSData, error) {
ep, err := getDockerEndpoint(host) ep, err := getDockerEndpoint(host)
if err != nil { if err != nil {

View File

@ -13,7 +13,10 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
func RunExec(dockerCli command.Cli, client *apiclient.Client, containerID string, execConfig *types.ExecConfig) error { // RunExec runs a command on a remote container. io.Writer corresponds to the
// command output.
func RunExec(dockerCli command.Cli, client *apiclient.Client, containerID string,
execConfig *types.ExecConfig) (io.Writer, error) {
ctx := context.Background() ctx := context.Background()
// We need to check the tty _before_ we do the ContainerExecCreate, because // We need to check the tty _before_ we do the ContainerExecCreate, because
@ -21,22 +24,22 @@ func RunExec(dockerCli command.Cli, client *apiclient.Client, containerID string
// there's no easy way to clean those up). But also in order to make "not // there's no easy way to clean those up). But also in order to make "not
// exist" errors take precedence we do a dummy inspect first. // exist" errors take precedence we do a dummy inspect first.
if _, err := client.ContainerInspect(ctx, containerID); err != nil { if _, err := client.ContainerInspect(ctx, containerID); err != nil {
return err return nil, err
} }
if !execConfig.Detach { if !execConfig.Detach {
if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil { if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil {
return err return nil, err
} }
} }
response, err := client.ContainerExecCreate(ctx, containerID, *execConfig) response, err := client.ContainerExecCreate(ctx, containerID, *execConfig)
if err != nil { if err != nil {
return err return nil, err
} }
execID := response.ID execID := response.ID
if execID == "" { if execID == "" {
return errors.New("exec ID empty") return nil, errors.New("exec ID empty")
} }
if execConfig.Detach { if execConfig.Detach {
@ -44,13 +47,13 @@ func RunExec(dockerCli command.Cli, client *apiclient.Client, containerID string
Detach: execConfig.Detach, Detach: execConfig.Detach,
Tty: execConfig.Tty, Tty: execConfig.Tty,
} }
return client.ContainerExecStart(ctx, execID, execStartCheck) return nil, client.ContainerExecStart(ctx, execID, execStartCheck)
} }
return interactiveExec(ctx, dockerCli, client, execConfig, execID) return interactiveExec(ctx, dockerCli, client, execConfig, execID)
} }
func interactiveExec(ctx context.Context, dockerCli command.Cli, client *apiclient.Client, func interactiveExec(ctx context.Context, dockerCli command.Cli, client *apiclient.Client,
execConfig *types.ExecConfig, execID string) error { execConfig *types.ExecConfig, execID string) (io.Writer, error) {
// Interactive exec requested. // Interactive exec requested.
var ( var (
out, stderr io.Writer out, stderr io.Writer
@ -76,7 +79,7 @@ func interactiveExec(ctx context.Context, dockerCli command.Cli, client *apiclie
} }
resp, err := client.ContainerExecAttach(ctx, execID, execStartCheck) resp, err := client.ContainerExecAttach(ctx, execID, execStartCheck)
if err != nil { if err != nil {
return err return out, err
} }
defer resp.Close() defer resp.Close()
@ -107,10 +110,10 @@ func interactiveExec(ctx context.Context, dockerCli command.Cli, client *apiclie
if err := <-errCh; err != nil { if err := <-errCh; err != nil {
logrus.Debugf("Error hijack: %s", err) logrus.Debugf("Error hijack: %s", err)
return err return out, err
} }
return getExecExitStatus(ctx, client, execID) return out, getExecExitStatus(ctx, client, execID)
} }
func getExecExitStatus(ctx context.Context, client apiclient.ContainerAPIClient, execID string) error { func getExecExitStatus(ctx context.Context, client apiclient.ContainerAPIClient, execID string) error {

View File

@ -9,7 +9,7 @@ import (
"time" "time"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container"
"github.com/docker/docker/client" "github.com/docker/docker/client"
apiclient "github.com/docker/docker/client" apiclient "github.com/docker/docker/client"
"github.com/moby/sys/signal" "github.com/moby/sys/signal"
@ -22,7 +22,7 @@ func resizeTtyTo(ctx context.Context, client client.ContainerAPIClient, id strin
return nil return nil
} }
options := types.ResizeOptions{ options := container.ResizeOptions{
Height: height, Height: height,
Width: width, Width: width,
} }

View File

@ -420,7 +420,7 @@ func convertServiceSecrets(
return nil, err return nil, err
} }
// NOTE(d1): strip # length=... modifiers // NOTE(d1): strip all comments
if strings.Contains(obj.Name, "#") { if strings.Contains(obj.Name, "#") {
vals := strings.Split(obj.Name, "#") vals := strings.Split(obj.Name, "#")
obj.Name = strings.TrimSpace(vals[0]) obj.Name = strings.TrimSpace(vals[0])

View File

@ -18,15 +18,24 @@ func DontSkipValidation(opts *loader.Options) {
opts.SkipValidation = false opts.SkipValidation = false
} }
// SkipInterpolation skip interpolating environment variables.
func SkipInterpolation(opts *loader.Options) {
opts.SkipInterpolation = true
}
// LoadComposefile parse the composefile specified in the cli and returns its Config and version. // LoadComposefile parse the composefile specified in the cli and returns its Config and version.
func LoadComposefile(opts Deploy, appEnv map[string]string) (*composetypes.Config, error) { func LoadComposefile(opts Deploy, appEnv map[string]string, options ...func(*loader.Options)) (*composetypes.Config, error) {
configDetails, err := getConfigDetails(opts.Composefiles, appEnv) configDetails, err := getConfigDetails(opts.Composefiles, appEnv)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if options == nil {
options = []func(*loader.Options){DontSkipValidation}
}
dicts := getDictsFrom(configDetails.ConfigFiles) dicts := getDictsFrom(configDetails.ConfigFiles)
config, err := loader.Load(configDetails, DontSkipValidation) config, err := loader.Load(configDetails, options...)
if err != nil { if err != nil {
if fpe, ok := err.(*loader.ForbiddenPropertiesError); ok { if fpe, ok := err.(*loader.ForbiddenPropertiesError); ok {
return nil, fmt.Errorf("compose file contains unsupported options: %s", return nil, fmt.Errorf("compose file contains unsupported options: %s",

View File

@ -233,7 +233,7 @@ func validateExternalNetworks(ctx context.Context, client dockerClient.NetworkAP
network, err := client.NetworkInspect(ctx, networkName, types.NetworkInspectOptions{}) network, err := client.NetworkInspect(ctx, networkName, types.NetworkInspectOptions{})
switch { switch {
case dockerClient.IsErrNotFound(err): case dockerClient.IsErrNotFound(err):
return errors.Errorf("network %q is declared as external, but could not be found. You need to create a swarm-scoped network before the stack is deployed", networkName) return errors.Errorf("network %q is declared as external, but could not be found. You need to create a swarm-scoped network before the stack is deployed, which you can do by running this on the server: docker network create -d overlay proxy", networkName)
case err != nil: case err != nil:
return err return err
case network.Scope != "swarm": case network.Scope != "swarm":

11
scripts/docker/build.sh Executable file
View File

@ -0,0 +1,11 @@
#!/bin/bash
if [ ! -f .envrc ]; then
. .envrc.sample
else
. .envrc
fi
git config --global --add safe.directory /abra # work around funky file permissions
make build

View File

@ -1,8 +1,8 @@
#!/usr/bin/env bash #!/usr/bin/env bash
ABRA_VERSION="0.8.0-beta" ABRA_VERSION="0.9.0-beta"
ABRA_RELEASE_URL="https://git.coopcloud.tech/api/v1/repos/coop-cloud/abra/releases/tags/$ABRA_VERSION" ABRA_RELEASE_URL="https://git.coopcloud.tech/api/v1/repos/coop-cloud/abra/releases/tags/$ABRA_VERSION"
RC_VERSION="0.8.0-beta" RC_VERSION="0.8.0-rc1-beta"
RC_VERSION_URL="https://git.coopcloud.tech/api/v1/repos/coop-cloud/abra/releases/tags/$RC_VERSION" RC_VERSION_URL="https://git.coopcloud.tech/api/v1/repos/coop-cloud/abra/releases/tags/$RC_VERSION"
for arg in "$@"; do for arg in "$@"; do
@ -45,7 +45,9 @@ function install_abra_release {
fi fi
ARCH=$(uname -m) ARCH=$(uname -m)
if [[ $ARCH =~ "aarch64" ]]; then if [[ $ARCH =~ "x86_64" ]]; then
ARCH="amd64"
elif [[ $ARCH =~ "aarch64" ]]; then
ARCH="arm64" ARCH="arm64"
elif [[ $ARCH =~ "armv5l" ]]; then elif [[ $ARCH =~ "armv5l" ]]; then
ARCH="armv5" ARCH="armv5"
@ -55,7 +57,7 @@ function install_abra_release {
ARCH="armv7" ARCH="armv7"
fi fi
PLATFORM=$(uname -s | tr '[:upper:]' '[:lower:]')_$ARCH PLATFORM=$(uname -s | tr '[:upper:]' '[:lower:]')_$ARCH
FILENAME="abra_"$ABRA_VERSION"_"$PLATFORM"" FILENAME="abra_"$ABRA_VERSION"_"$PLATFORM".tar.gz"
sed_command_rel='s/.*"assets":\[\{[^]]*"name":"'$FILENAME'"[^}]*"browser_download_url":"([^"]*)".*\].*/\1/p' sed_command_rel='s/.*"assets":\[\{[^]]*"name":"'$FILENAME'"[^}]*"browser_download_url":"([^"]*)".*\].*/\1/p'
sed_command_checksums='s/.*"assets":\[\{[^\]*"name":"checksums.txt"[^}]*"browser_download_url":"([^"]*)".*\].*/\1/p' sed_command_checksums='s/.*"assets":\[\{[^\]*"name":"checksums.txt"[^}]*"browser_download_url":"([^"]*)".*\].*/\1/p'
@ -65,17 +67,22 @@ function install_abra_release {
checksums=$(wget -q -O- $checksums_url) checksums=$(wget -q -O- $checksums_url)
checksum=$(echo "$checksums" | grep "$FILENAME" - | sed -En 's/([0-9a-f]{64})\s+'"$FILENAME"'.*/\1/p') checksum=$(echo "$checksums" | grep "$FILENAME" - | sed -En 's/([0-9a-f]{64})\s+'"$FILENAME"'.*/\1/p')
abra_download="/tmp/abra-download.tar.gz"
echo "downloading $ABRA_VERSION $PLATFORM binary release for abra..." echo "downloading $ABRA_VERSION $PLATFORM binary release for abra..."
wget -q "$release_url" -O "$HOME/.local/bin/.abra-download"
localsum=$(sha256sum $HOME/.local/bin/.abra-download | sed -En 's/([0-9a-f]{64})\s+.*/\1/p') wget -q "$release_url" -O $abra_download
localsum=$(sha256sum $abra_download | sed -En 's/([0-9a-f]{64})\s+.*/\1/p')
echo "checking if checksums match..." echo "checking if checksums match..."
if [[ "$localsum" != "$checksum" ]]; then if [[ "$localsum" != "$checksum" ]]; then
print_checksum_error print_checksum_error
exit 1 exit 1
fi fi
echo "$(tput setaf 2)check successful!$(tput sgr0)" echo "$(tput setaf 2)check successful!$(tput sgr0)"
mv "$HOME/.local/bin/.abra-download" "$HOME/.local/bin/abra" cd /tmp/
tar xf abra-download.tar.gz
mv abra "$HOME/.local/bin/abra"
tar tf abra-download.tar.gz | xargs rm -f
chmod +x "$HOME/.local/bin/abra" chmod +x "$HOME/.local/bin/abra"
x=$(echo $PATH | grep $HOME/.local/bin) x=$(echo $PATH | grep $HOME/.local/bin)

View File

@ -35,7 +35,7 @@ setup(){
run $ABRA app check "$TEST_APP_DOMAIN" run $ABRA app check "$TEST_APP_DOMAIN"
assert_success assert_success
assert_output --partial 'all necessary environment variables defined' refute_output --partial ''
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE" assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE"
} }
@ -70,13 +70,13 @@ setup(){
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
run $ABRA app check "$TEST_APP_DOMAIN" run $ABRA app check "$TEST_APP_DOMAIN"
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
refute_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
_reset_recipe _reset_recipe
} }
@ -86,7 +86,7 @@ setup(){
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 1' assert_output --partial "Your branch is behind 'origin/main' by 1 commit"
# NOTE(d1): we can't quite tell if this will fail or not in the future, so, # NOTE(d1): we can't quite tell if this will fail or not in the future, so,
# since it isn't an important part of what we're testing here, we don't check # since it isn't an important part of what we're testing here, we don't check
@ -94,7 +94,7 @@ setup(){
run $ABRA app check "$TEST_APP_DOMAIN" --offline run $ABRA app check "$TEST_APP_DOMAIN" --offline
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 1' assert_output --partial "Your branch is behind 'origin/main' by 1 commit"
_reset_recipe _reset_recipe
} }
@ -111,12 +111,16 @@ setup(){
} }
@test "error if missing env var" { @test "error if missing env var" {
run $ABRA app check "$TEST_APP_DOMAIN"
assert_success
refute_output --partial '❌'
run bash -c 'echo "NEW_VAR=foo" >> "$ABRA_DIR/recipes/$TEST_RECIPE/.env.sample"' run bash -c 'echo "NEW_VAR=foo" >> "$ABRA_DIR/recipes/$TEST_RECIPE/.env.sample"'
assert_success assert_success
run $ABRA app check "$TEST_APP_DOMAIN" --chaos run $ABRA app check "$TEST_APP_DOMAIN" --chaos
assert_failure assert_success
assert_output --partial "NEW_VAR" assert_output --partial '❌'
_checkout_recipe _checkout_recipe
} }

View File

@ -25,6 +25,24 @@ teardown(){
fi fi
} }
# bats test_tags=slow
@test "autocomplete" {
run $ABRA app cmd --generate-bash-completion
assert_success
assert_output "$TEST_APP_DOMAIN"
run $ABRA app cmd "$TEST_APP_DOMAIN" --generate-bash-completion
assert_success
assert_output "app"
run $ABRA app cmd "$TEST_APP_DOMAIN" app --generate-bash-completion
assert_success
assert_output "test_cmd
test_cmd_arg
test_cmd_args
test_cmd_export"
}
@test "validate app argument" { @test "validate app argument" {
run $ABRA app cmd run $ABRA app cmd
assert_failure assert_failure
@ -40,7 +58,7 @@ teardown(){
assert_success assert_success
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE" assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE"
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd --local run $ABRA app cmd --local "$TEST_APP_DOMAIN" test_cmd
assert_success assert_success
assert_output --partial 'baz' assert_output --partial 'baz'
@ -52,7 +70,7 @@ teardown(){
assert_success assert_success
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo" assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd --local run $ABRA app cmd --local "$TEST_APP_DOMAIN" test_cmd
assert_failure assert_failure
assert_output --partial 'locally unstaged changes' assert_output --partial 'locally unstaged changes'
@ -65,7 +83,7 @@ teardown(){
assert_success assert_success
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo" assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd --local --chaos run $ABRA app cmd --local --chaos "$TEST_APP_DOMAIN" test_cmd
assert_success assert_success
assert_output --partial 'baz' assert_output --partial 'baz'
@ -78,14 +96,14 @@ teardown(){
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd --local run $ABRA app cmd --local "$TEST_APP_DOMAIN" test_cmd
assert_success assert_success
assert_output --partial 'baz' assert_output --partial 'baz'
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
refute_output --partial 'behind 3' assert_output --partial "up to date"
_reset_recipe "$TEST_RECIPE" _reset_recipe "$TEST_RECIPE"
} }
@ -95,14 +113,14 @@ teardown(){
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd --local --offline run $ABRA app cmd --local --offline "$TEST_APP_DOMAIN" test_cmd
assert_success assert_success
assert_output --partial 'baz' assert_output --partial 'baz'
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
_reset_recipe "$TEST_RECIPE" _reset_recipe "$TEST_RECIPE"
} }
@ -114,13 +132,13 @@ teardown(){
} }
@test "error if missing arguments when passing --local" { @test "error if missing arguments when passing --local" {
run $ABRA app cmd "$TEST_APP_DOMAIN" --local run $ABRA app cmd --local "$TEST_APP_DOMAIN"
assert_failure assert_failure
assert_output --partial 'missing arguments' assert_output --partial 'missing arguments'
} }
@test "cannot use --local and --user at same time" { @test "cannot use --local and --user at same time" {
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd --local --user root run $ABRA app cmd --local --user root "$TEST_APP_DOMAIN" test_cmd
assert_failure assert_failure
assert_output --partial 'cannot use --local & --user together' assert_output --partial 'cannot use --local & --user together'
} }
@ -129,7 +147,7 @@ teardown(){
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/abra.sh" run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/abra.sh"
assert_success assert_success
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd --local --chaos run $ABRA app cmd --local --chaos "$TEST_APP_DOMAIN" test_cmd
assert_failure assert_failure
assert_output --partial "$ABRA_DIR/recipes/$TEST_RECIPE/abra.sh does not exist" assert_output --partial "$ABRA_DIR/recipes/$TEST_RECIPE/abra.sh does not exist"
@ -137,25 +155,25 @@ teardown(){
} }
@test "error if missing command" { @test "error if missing command" {
run $ABRA app cmd "$TEST_APP_DOMAIN" doesnt_exist --local run $ABRA app cmd --local "$TEST_APP_DOMAIN" doesnt_exist
assert_failure assert_failure
assert_output --partial "doesn't have a doesnt_exist function" assert_output --partial "doesn't have a doesnt_exist function"
} }
@test "run --local command" { @test "run --local command" {
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd --local run $ABRA app cmd --local "$TEST_APP_DOMAIN" test_cmd
assert_success assert_success
assert_output --partial 'baz' assert_output --partial 'baz'
} }
@test "run command with single arg" { @test "run command with single arg" {
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd_arg --local -- bing run $ABRA app cmd --local "$TEST_APP_DOMAIN" test_cmd_arg -- bing
assert_success assert_success
assert_output --partial 'bing' assert_output --partial 'bing'
} }
@test "run command with several args" { @test "run command with several args" {
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd_args --local -- bong bang run $ABRA app cmd --local "$TEST_APP_DOMAIN" test_cmd_args -- bong bang
assert_success assert_success
assert_output --partial 'bong bang' assert_output --partial 'bong bang'
} }

View File

@ -5,9 +5,11 @@ setup_file(){
_common_setup _common_setup
_add_server _add_server
_new_app _new_app
_deploy_app
} }
teardown_file(){ teardown_file(){
_undeploy_app
_rm_app _rm_app
_rm_server _rm_server
} }
@ -17,13 +19,6 @@ setup(){
_common_setup _common_setup
} }
teardown(){
# https://github.com/bats-core/bats-core/issues/383#issuecomment-738628888
if [[ -z "${BATS_TEST_COMPLETED}" ]]; then
_undeploy_app
fi
}
@test "validate app argument" { @test "validate app argument" {
run $ABRA app cp run $ABRA app cp
assert_failure assert_failure
@ -54,68 +49,120 @@ teardown(){
assert_output --partial 'arguments must take $SERVICE:$PATH form' assert_output --partial 'arguments must take $SERVICE:$PATH form'
} }
@test "detect 'coming FROM' syntax" {
run $ABRA app cp "$TEST_APP_DOMAIN" app:/myfile.txt . --debug
assert_failure
assert_output --partial 'coming FROM the container'
}
@test "detect 'going TO' syntax" {
run $ABRA app cp "$TEST_APP_DOMAIN" myfile.txt app:/somewhere --debug
assert_failure
assert_output --partial 'going TO the container'
}
@test "error if local file missing" { @test "error if local file missing" {
run $ABRA app cp "$TEST_APP_DOMAIN" myfile.txt app:/somewhere run $ABRA app cp "$TEST_APP_DOMAIN" thisfileshouldnotexist.txt app:/somewhere
assert_failure assert_failure
assert_output --partial 'myfile.txt does not exist locally?' assert_output --partial 'local stat thisfileshouldnotexist.txt: no such file or directory'
} }
# bats test_tags=slow # bats test_tags=slow
@test "error if service doesn't exist" { @test "error if service doesn't exist" {
_deploy_app _mkfile "$BATS_TMPDIR/myfile.txt" "foo"
run bash -c "echo foo >> $BATS_TMPDIR/myfile.txt" run $ABRA app cp "$TEST_APP_DOMAIN" "$BATS_TMPDIR/myfile.txt" doesnt_exist:/ --debug
assert_success
run $ABRA app cp "$TEST_APP_DOMAIN" "$BATS_TMPDIR/myfile.txt" doesnt_exist:/
assert_failure assert_failure
assert_output --partial 'no containers matching' assert_output --partial 'no containers matching'
run rm -rf "$BATS_TMPDIR/myfile.txt" _rm "$BATS_TMPDIR/myfile.txt"
assert_success
_undeploy_app
} }
# bats test_tags=slow # bats test_tags=slow
@test "copy to container" { @test "copy local file to container directory" {
_deploy_app _mkfile "$BATS_TMPDIR/myfile.txt" "foo"
run bash -c "echo foo >> $BATS_TMPDIR/myfile.txt"
assert_success
run $ABRA app cp "$TEST_APP_DOMAIN" "$BATS_TMPDIR/myfile.txt" app:/etc run $ABRA app cp "$TEST_APP_DOMAIN" "$BATS_TMPDIR/myfile.txt" app:/etc
assert_success assert_success
run rm -rf "$BATS_TMPDIR/myfile.txt" run $ABRA app run "$TEST_APP_DOMAIN" app cat /etc/myfile.txt
assert_success assert_success
assert_output --partial "foo"
_undeploy_app _rm "$BATS_TMPDIR/myfile.txt"
_rm_remote "/etc/myfile.txt"
} }
# bats test_tags=slow # bats test_tags=slow
@test "copy from container" { @test "copy local file to container file (and override on remote)" {
_deploy_app _mkfile "$BATS_TMPDIR/myfile.txt" "foo"
run bash -c "echo foo >> $BATS_TMPDIR/myfile.txt" # create
run $ABRA app cp "$TEST_APP_DOMAIN" "$BATS_TMPDIR/myfile.txt" app:/etc/myfile.txt
assert_success assert_success
run $ABRA app cp "$TEST_APP_DOMAIN" "$BATS_TMPDIR/myfile.txt" app:/etc run $ABRA app run "$TEST_APP_DOMAIN" app cat /etc/myfile.txt
assert_success
assert_output --partial "foo"
_mkfile "$BATS_TMPDIR/myfile.txt" "bar"
# override
run $ABRA app cp "$TEST_APP_DOMAIN" "$BATS_TMPDIR/myfile.txt" app:/etc/myfile.txt
assert_success assert_success
run rm -rf "$BATS_TMPDIR/myfile.txt" run $ABRA app run "$TEST_APP_DOMAIN" app cat /etc/myfile.txt
assert_success
assert_output --partial "bar"
_rm "$BATS_TMPDIR/myfile.txt"
_rm_remote "/etc/myfile.txt"
}
# bats test_tags=slow
@test "copy local file to container file (and rename)" {
_mkfile "$BATS_TMPDIR/myfile.txt" "foo"
# rename
run $ABRA app cp "$TEST_APP_DOMAIN" "$BATS_TMPDIR/myfile.txt" app:/etc/myfile2.txt
assert_success
run $ABRA app run "$TEST_APP_DOMAIN" app cat /etc/myfile2.txt
assert_success
assert_output --partial "foo"
_rm "$BATS_TMPDIR/myfile.txt"
_rm_remote "/etc/myfile2.txt"
}
# bats test_tags=slow
@test "copy local directory to container directory (and creates missing directory)" {
_mkdir "$BATS_TMPDIR/mydir"
_mkfile "$BATS_TMPDIR/mydir/myfile.txt" "foo"
run $ABRA app cp "$TEST_APP_DOMAIN" "$BATS_TMPDIR/mydir" app:/etc
assert_success
run $ABRA app run "$TEST_APP_DOMAIN" app ls /etc/mydir
assert_success
assert_output --partial "myfile.txt"
_rm "$BATS_TMPDIR/mydir"
_rm_remote "/etc/mydir"
}
# bats test_tags=slow
@test "copy local files to container directory" {
_mkdir "$BATS_TMPDIR/mydir"
_mkfile "$BATS_TMPDIR/mydir/myfile.txt" "foo"
_mkfile "$BATS_TMPDIR/mydir/myfile2.txt" "foo"
run $ABRA app cp "$TEST_APP_DOMAIN" "$BATS_TMPDIR/mydir/" app:/etc
assert_success
run $ABRA app run "$TEST_APP_DOMAIN" app ls /etc/myfile.txt
assert_success
assert_output --partial "myfile.txt"
run $ABRA app run "$TEST_APP_DOMAIN" app ls /etc/myfile2.txt
assert_success
assert_output --partial "myfile2.txt"
_rm "$BATS_TMPDIR/mydir"
_rm_remote "/etc/myfile*"
}
# bats test_tags=slow
@test "copy container file to local directory" {
run $ABRA app run "$TEST_APP_DOMAIN" app bash -c "echo foo > /etc/myfile.txt"
assert_success assert_success
run $ABRA app cp "$TEST_APP_DOMAIN" app:/etc/myfile.txt "$BATS_TMPDIR" run $ABRA app cp "$TEST_APP_DOMAIN" app:/etc/myfile.txt "$BATS_TMPDIR"
@ -123,8 +170,76 @@ teardown(){
assert_exists "$BATS_TMPDIR/myfile.txt" assert_exists "$BATS_TMPDIR/myfile.txt"
assert bash -c "cat $BATS_TMPDIR/myfile.txt | grep -q foo" assert bash -c "cat $BATS_TMPDIR/myfile.txt | grep -q foo"
run rm -rf "$BATS_TMPDIR/myfile.txt" _rm "$BATS_TMPDIR/myfile.txt"
_rm_remote "/etc/myfile.txt"
}
# bats test_tags=slow
@test "copy container file to local file" {
run $ABRA app run "$TEST_APP_DOMAIN" app bash -c "echo foo > /etc/myfile.txt"
assert_success assert_success
_undeploy_app run $ABRA app cp "$TEST_APP_DOMAIN" app:/etc/myfile.txt "$BATS_TMPDIR/myfile.txt"
assert_success
assert_exists "$BATS_TMPDIR/myfile.txt"
assert bash -c "cat $BATS_TMPDIR/myfile.txt | grep -q foo"
_rm "$BATS_TMPDIR/myfile.txt"
_rm_remote "/etc/myfile.txt"
}
# bats test_tags=slow
@test "copy container file to local file and rename" {
run $ABRA app run "$TEST_APP_DOMAIN" app bash -c "echo foo > /etc/myfile.txt"
assert_success
run $ABRA app cp "$TEST_APP_DOMAIN" app:/etc/myfile.txt "$BATS_TMPDIR/myfile2.txt"
assert_success
assert_exists "$BATS_TMPDIR/myfile2.txt"
assert bash -c "cat $BATS_TMPDIR/myfile2.txt | grep -q foo"
_rm "$BATS_TMPDIR/myfile2.txt"
_rm_remote "/etc/myfile.txt"
}
# bats test_tags=slow
@test "copy container directory to local directory" {
run $ABRA app run "$TEST_APP_DOMAIN" app bash -c "echo foo > /etc/myfile.txt"
assert_success
run $ABRA app run "$TEST_APP_DOMAIN" app bash -c "echo bar > /etc/myfile2.txt"
assert_success
mkdir "$BATS_TMPDIR/mydir"
run $ABRA app cp "$TEST_APP_DOMAIN" app:/etc "$BATS_TMPDIR/mydir"
assert_success
assert_exists "$BATS_TMPDIR/mydir/etc/myfile.txt"
assert_success
assert_exists "$BATS_TMPDIR/mydir/etc/myfile2.txt"
_rm "$BATS_TMPDIR/mydir"
_rm_remote "/etc/myfile.txt"
_rm_remote "/etc/myfile2.txt"
}
# bats test_tags=slow
@test "copy container files to local directory" {
run $ABRA app run "$TEST_APP_DOMAIN" app bash -c "echo foo > /etc/myfile.txt"
assert_success
run $ABRA app run "$TEST_APP_DOMAIN" app bash -c "echo bar > /etc/myfile2.txt"
assert_success
mkdir "$BATS_TMPDIR/mydir"
run $ABRA app cp "$TEST_APP_DOMAIN" app:/etc/ "$BATS_TMPDIR/mydir"
assert_success
assert_exists "$BATS_TMPDIR/mydir/myfile.txt"
assert_success
assert_exists "$BATS_TMPDIR/mydir/myfile2.txt"
_rm "$BATS_TMPDIR/mydir"
_rm_remote "/etc/myfile.txt"
_rm_remote "/etc/myfile2.txt"
} }

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
setup_file(){ setup_file(){
load "$PWD/tests/integration/helpers/git"
load "$PWD/tests/integration/helpers/common" load "$PWD/tests/integration/helpers/common"
_common_setup _common_setup
_add_server _add_server
@ -16,6 +17,7 @@ teardown_file(){
setup(){ setup(){
load "$PWD/tests/integration/helpers/common" load "$PWD/tests/integration/helpers/common"
_common_setup _common_setup
_reset_recipe
} }
teardown(){ teardown(){
@ -82,13 +84,13 @@ teardown(){
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
run $ABRA app deploy "$TEST_APP_DOMAIN" --no-input --no-converge-checks run $ABRA app deploy "$TEST_APP_DOMAIN" --no-input --no-converge-checks
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
refute_output --partial 'behind 3' refute_output --regexp 'behind .* 3 commits'
_reset_recipe _reset_recipe
_undeploy_app _undeploy_app
@ -100,7 +102,7 @@ teardown(){
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
# NOTE(d1): need to use --chaos to force same commit # NOTE(d1): need to use --chaos to force same commit
run $ABRA app deploy "$TEST_APP_DOMAIN" \ run $ABRA app deploy "$TEST_APP_DOMAIN" \
@ -108,7 +110,7 @@ teardown(){
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
_undeploy_app _undeploy_app
_reset_recipe _reset_recipe
@ -116,6 +118,9 @@ teardown(){
# bats test_tags=slow # bats test_tags=slow
@test "deploy latest commit if no published versions and no --chaos" { @test "deploy latest commit if no published versions and no --chaos" {
# TODO(d1): fix with a new test recipe which has no published versions?
skip "known issue, abra-test-recipe has published versions now"
latestCommit="$(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" rev-parse --short HEAD)" latestCommit="$(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" rev-parse --short HEAD)"
_remove_tags _remove_tags
@ -140,7 +145,7 @@ teardown(){
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
threeCommitsBack="$(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" rev-parse --short HEAD)" threeCommitsBack="$(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" rev-parse --short HEAD)"
@ -272,6 +277,25 @@ teardown(){
assert_success assert_success
} }
@test "ensure domain is checked" {
if [[ "$TEST_SERVER" == "default" ]]; then
skip "domain checks are disabled for local server"
fi
appDomain="custom-html.DOESNTEXIST"
run $ABRA app new custom-html \
--no-input \
--server "$TEST_SERVER" \
--domain "$appDomain"
assert_success
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$appDomain.env"
run $ABRA app deploy "$appDomain" --no-input
assert_failure
assert_output --partial 'no such host'
}
# bats test_tags=slow # bats test_tags=slow
@test "skip domain check when requested" { @test "skip domain check when requested" {
run $ABRA app deploy "$TEST_APP_DOMAIN" \ run $ABRA app deploy "$TEST_APP_DOMAIN" \
@ -296,3 +320,88 @@ teardown(){
_undeploy_app _undeploy_app
} }
@test "bail out if specific version and chaos" {
run $ABRA app deploy "$TEST_APP_DOMAIN" "0.2.0+1.21.0" \
--chaos --no-input --no-converge-checks
assert_failure
assert_output --partial 'cannot use'
}
# bats test_tags=slow
@test "COMPOSE_FILE with \$COMPOSE_FILE override works" {
_reset_recipe
run sed -i 's/#COMPOSE_FILE="$COMPOSE_FILE:compose.extra_env.yml"/COMPOSE_FILE="$COMPOSE_FILE:compose.extra_env.yml"/g' \
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
assert_success
# NOTE(d1): --chaos used to bypass versions and access compose.extra_env.yml
run $ABRA app deploy "$TEST_APP_DOMAIN" \
--no-input --no-converge-checks --chaos
assert_success
assert_output --partial "compose.yml"
assert_output --partial "compose.extra_env.yml"
_undeploy_app
_reset_app
}
@test "error if no secrets generated" {
run sed -i 's/COMPOSE_FILE="compose.yml"/COMPOSE_FILE="compose.yml:compose.extra_secret.yml"/g' \
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
assert_success
run sed -i 's/#SECRET_EXTRA_PASS_VERSION=v1/SECRET_EXTRA_PASS_VERSION=v1/g' \
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
assert_success
run $ABRA app deploy "$TEST_APP_DOMAIN" --no-input --no-converge-checks
assert_failure
assert_output --partial 'unable to deploy, secrets not generated'
_reset_app
}
# bats test_tags=slow
@test "recipe config comments not present in values" {
run $ABRA app deploy "$TEST_APP_DOMAIN" --no-input
assert_success
run $ABRA app run "$TEST_APP_DOMAIN" app env
assert_success
refute_output --partial 'should be removed'
}
# bats test_tags=slow
@test "deploy specific version with incompatible HEAD" {
run sed -i 's/COMPOSE_FILE="compose.yml"/COMPOSE_FILE="compose.yml:compose.extra_secret.yml"/g' \
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
assert_success
run sed -i 's/#SECRET_EXTRA_PASS_VERSION=v1/SECRET_EXTRA_PASS_VERSION=v1/g' \
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
assert_success
run $ABRA app secret generate "$TEST_APP_DOMAIN" --all
assert_success
assert_output --partial 'extra_pass'
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/compose.extra_secret.yml"
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/compose.extra_secret.yml"
_git_commit
# NOTE(d1): 0.1.1+1.20.2 is a previous version which includes compose.extra_secret.yml
run $ABRA app deploy "$TEST_APP_DOMAIN" "0.1.1+1.20.2" --no-input --no-converge-checks
assert_success
refute_output --partial 'no such file or directory'
_undeploy_app
_reset_app
run $ABRA app secret rm "$TEST_APP_DOMAIN" --all
assert_success
_reset_recipe
}

View File

@ -32,3 +32,15 @@ setup(){
assert_failure assert_failure
assert_output --partial 'is not deployed' assert_output --partial 'is not deployed'
} }
@test "retrieve recipe if missing" {
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE"
assert_success
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE"
run $ABRA app logs "$TEST_APP_DOMAIN"
assert_failure
assert_output --partial 'is not deployed'
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE"
}

View File

@ -13,14 +13,22 @@ teardown_file(){
setup(){ setup(){
load "$PWD/tests/integration/helpers/common" load "$PWD/tests/integration/helpers/common"
load "$PWD/tests/integration/helpers/git"
_common_setup _common_setup
_fetch_recipe _fetch_recipe
} }
teardown(){ teardown(){
load "$PWD/tests/integration/helpers/common"
_rm_app _rm_app
} }
@test "autocomplete" {
run $ABRA app new --generate-bash-completion
assert_success
assert_output --partial "traefik"
}
@test "create new app" { @test "create new app" {
run $ABRA app new "$TEST_RECIPE" \ run $ABRA app new "$TEST_RECIPE" \
--no-input \ --no-input \
@ -28,10 +36,31 @@ teardown(){
--domain "$TEST_APP_DOMAIN" --domain "$TEST_APP_DOMAIN"
assert_success assert_success
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env" assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
_get_head_hash
_get_current_hash
assert_equal "$headHash" "$currentHash"
}
@test "create new app with version" {
run $ABRA app new "$TEST_RECIPE" 0.1.1+1.20.2 \
--no-input \
--server "$TEST_SERVER" \
--domain "$TEST_APP_DOMAIN"
assert_success
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
_get_tag_hash 0.1.1+1.20.2
_get_current_hash
assert_equal "$tagHash" "$currentHash"
} }
@test "does not overwrite existing env files" { @test "does not overwrite existing env files" {
_new_app run $ABRA app new "$TEST_RECIPE" \
--no-input \
--server "$TEST_SERVER" \
--domain "$TEST_APP_DOMAIN"
assert_success
run $ABRA app new "$TEST_RECIPE" \ run $ABRA app new "$TEST_RECIPE" \
--no-input \ --no-input \
@ -74,8 +103,7 @@ teardown(){
--no-input \ --no-input \
--chaos \ --chaos \
--server "$TEST_SERVER" \ --server "$TEST_SERVER" \
--domain "$TEST_APP_DOMAIN" \ --domain "$TEST_APP_DOMAIN"
--secrets
assert_success assert_success
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env" assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
@ -84,32 +112,38 @@ teardown(){
} }
@test "ensure recipe up to date if no --offline" { @test "ensure recipe up to date if no --offline" {
_reset_recipe
_get_n_hash 3
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" reset --hard HEAD~3 run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" reset --hard HEAD~3
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status _get_current_hash
assert_output --partial 'behind 3' assert_equal "$currentHash" "$nHash"
run $ABRA app new "$TEST_RECIPE" \ run $ABRA app new "$TEST_RECIPE" \
--no-input \ --no-input \
--server "$TEST_SERVER" \ --server "$TEST_SERVER" \
--domain "$TEST_APP_DOMAIN" \ --domain "$TEST_APP_DOMAIN"
--secrets
assert_success assert_success
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env" assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status _get_head_hash
refute_output --partial 'behind 3' _get_current_hash
assert_equal "$HEAD_HASH" "$CURRENT_HASH"
_reset_recipe _reset_recipe
} }
@test "ensure recipe not up to date if --offline" { @test "ensure recipe not up to date if --offline" {
_reset_recipe
_get_n_hash 3
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" reset --hard HEAD~3 run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" reset --hard HEAD~3
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status _get_current_hash
assert_output --partial 'behind 3' assert_equal "$currentHash" "$nHash"
# NOTE(d1): need to use --chaos to force same commit # NOTE(d1): need to use --chaos to force same commit
run $ABRA app new "$TEST_RECIPE" \ run $ABRA app new "$TEST_RECIPE" \
@ -117,17 +151,17 @@ teardown(){
--offline \ --offline \
--chaos \ --chaos \
--server "$TEST_SERVER" \ --server "$TEST_SERVER" \
--domain "$TEST_APP_DOMAIN" \ --domain "$TEST_APP_DOMAIN"
--secrets
assert_success assert_success
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env" assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status _get_current_hash
assert_output --partial 'behind 3' assert_equal "$currentHash" "$nHash"
_reset_recipe _reset_recipe
} }
# bats test_tags=slow
@test "generate secrets" { @test "generate secrets" {
run $ABRA app new "$TEST_RECIPE" \ run $ABRA app new "$TEST_RECIPE" \
--no-input \ --no-input \

View File

@ -104,10 +104,7 @@ teardown(){
_undeploy_app _undeploy_app
# NOTE(d1): to let the stack come down before nuking volumes run $ABRA app volume rm "$TEST_APP_DOMAIN" --no-input
sleep 5
run $ABRA app volume rm "$TEST_APP_DOMAIN" --force
assert_success assert_success
run $ABRA app volume ls "$TEST_APP_DOMAIN" run $ABRA app volume ls "$TEST_APP_DOMAIN"
@ -132,9 +129,6 @@ teardown(){
_undeploy_app _undeploy_app
# NOTE(d1): to let the stack come down before nuking volumes
sleep 5
run $ABRA app rm "$TEST_APP_DOMAIN" --no-input run $ABRA app rm "$TEST_APP_DOMAIN" --no-input
assert_success assert_success
assert_output --partial 'test-volume' assert_output --partial 'test-volume'

View File

@ -109,13 +109,13 @@ teardown(){
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
run $ABRA app restore "$TEST_APP_DOMAIN" app DOESNTEXIST run $ABRA app restore "$TEST_APP_DOMAIN" app
assert_failure assert_failure
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
refute_output --partial 'behind 3' assert_output --partial "up to date"
} }
@test "ensure recipe not up to date if --offline" { @test "ensure recipe not up to date if --offline" {
@ -126,19 +126,19 @@ teardown(){
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
run $ABRA app restore "$TEST_APP_DOMAIN" app DOESNTEXIST --offline run $ABRA app restore "$TEST_APP_DOMAIN" app --offline
assert_failure assert_failure
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" checkout "$latestCommit" run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" checkout "$latestCommit"
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
refute_output --partial 'behind 3' assert_output --partial "HEAD detached at $latestCommit"
} }
@test "error if missing service" { @test "error if missing service" {

View File

@ -50,13 +50,13 @@ teardown(){
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
run $ABRA app rollback "$TEST_APP_DOMAIN" --no-input --no-converge-checks run $ABRA app rollback "$TEST_APP_DOMAIN" --no-input --no-converge-checks
assert_failure assert_failure
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
refute_output --partial 'behind 3' assert_output --partial "up to date"
} }
@test "ensure recipe not up to date if --offline" { @test "ensure recipe not up to date if --offline" {
@ -67,14 +67,14 @@ teardown(){
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
run $ABRA app rollback "$TEST_APP_DOMAIN" \ run $ABRA app rollback "$TEST_APP_DOMAIN" \
--no-input --no-converge-checks --offline --no-input --no-converge-checks --offline
assert_failure assert_failure
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" checkout "$latestCommit" run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" checkout "$latestCommit"
assert_success assert_success
@ -131,7 +131,7 @@ teardown(){
latestCommit="$(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" rev-parse --short HEAD)" latestCommit="$(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" rev-parse --short HEAD)"
run $ABRA app deploy "$TEST_APP_DOMAIN" \ run $ABRA app deploy "$TEST_APP_DOMAIN" \
--no-input --no-converge-checks --chaos --no-input --chaos
assert_success assert_success
assert_output --partial "$latestCommit" assert_output --partial "$latestCommit"
assert_output --partial 'chaos' assert_output --partial 'chaos'
@ -194,6 +194,13 @@ teardown(){
_undeploy_app _undeploy_app
} }
@test "bail out if specific version and chaos" {
run $ABRA app rollback "$TEST_APP_DOMAIN" "0.2.0+1.21.0" \
--chaos --no-input --no-converge-checks
assert_failure
assert_output --partial 'cannot use'
}
# bats test_tags=slow # bats test_tags=slow
@test "rollback to previous version" { @test "rollback to previous version" {
run $ABRA app deploy "$TEST_APP_DOMAIN" "0.1.1+1.20.2" --no-input --no-converge-checks run $ABRA app deploy "$TEST_APP_DOMAIN" "0.1.1+1.20.2" --no-input --no-converge-checks

View File

@ -8,7 +8,7 @@ setup_file(){
run $ABRA app new "$TEST_RECIPE" \ run $ABRA app new "$TEST_RECIPE" \
--no-input \ --no-input \
--server "$TEST_SERVER" \ --server "$TEST_SERVER" \
--domain "$TEST_APP_DOMAIN" \ --domain "$TEST_APP_DOMAIN"
assert_success assert_success
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env" assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
} }
@ -20,23 +20,10 @@ teardown_file(){
} }
teardown() { teardown() {
# https://github.com/bats-core/bats-core/issues/383#issuecomment-738628888 run $ABRA app secret rm "$TEST_APP_DOMAIN" --all
if [[ -z "${BATS_TEST_COMPLETED}" ]]; then _reset_app
_undeploy_app _reset_recipe
fi _checkout_recipe
}
_reset_app(){
run rm -rf "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
assert_success
assert_not_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
run $ABRA app new "$TEST_RECIPE" \
--no-input \
--server "$TEST_SERVER" \
--domain "$TEST_APP_DOMAIN" \
assert_success
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
} }
setup(){ setup(){
@ -97,9 +84,6 @@ setup(){
assert_output --partial 'test_pass_one' assert_output --partial 'test_pass_one'
assert_output --partial 'test_pass_two' assert_output --partial 'test_pass_two'
refute_output --partial 'extra_pass' refute_output --partial 'extra_pass'
run $ABRA app secret rm "$TEST_APP_DOMAIN" --all
assert_success
} }
@test "generate: broken if missing version" { @test "generate: broken if missing version" {
@ -111,7 +95,6 @@ setup(){
assert_failure assert_failure
assert_output --partial 'missing version' assert_output --partial 'missing version'
_reset_app
} }
@test "generate: use version from app env" { @test "generate: use version from app env" {
@ -123,15 +106,11 @@ setup(){
assert_success assert_success
assert_output --partial 'test_pass_one' assert_output --partial 'test_pass_one'
run docker -c "$TEST_SERVER" secret ls run bash -c '$ABRA app secret ls $TEST_APP_DOMAIN --machine | \
jq -r ".[] | select(.name==\"test_pass_one\") | .version"'
assert_success assert_success
assert_output --regexp ".*_test_pass_one_v2" assert_output --partial 'v2'
refute_output --regexp ".*_test_pass_one_v1" refute_output --partial 'v1'
run $ABRA app secret rm "$TEST_APP_DOMAIN" --all
assert_success
_reset_app
} }
@test "generate: generate extra secret based on COMPOSE_FILE" { @test "generate: generate extra secret based on COMPOSE_FILE" {
@ -150,11 +129,6 @@ setup(){
run docker -c "$TEST_SERVER" secret ls run docker -c "$TEST_SERVER" secret ls
assert_success assert_success
assert_output --partial "$TEST_APP_DOMAIN_extra_pass_v1" assert_output --partial "$TEST_APP_DOMAIN_extra_pass_v1"
run $ABRA app secret rm "$TEST_APP_DOMAIN" --all
assert_success
_reset_app
} }
@test "generate: bail if unstaged changes and no --chaos" { @test "generate: bail if unstaged changes and no --chaos" {
@ -181,8 +155,6 @@ setup(){
run $ABRA app secret rm "$TEST_APP_DOMAIN" --all --chaos run $ABRA app secret rm "$TEST_APP_DOMAIN" --all --chaos
assert_success assert_success
_checkout_recipe
} }
@test "generate: ensure secret name uses trimmed stack name" { @test "generate: ensure secret name uses trimmed stack name" {
@ -247,9 +219,22 @@ setup(){
run $ABRA app secret ls "$TEST_APP_DOMAIN" run $ABRA app secret ls "$TEST_APP_DOMAIN"
assert_success assert_success
assert_output --partial 'true' assert_output --partial 'true'
}
run $ABRA app secret rm "$TEST_APP_DOMAIN" test_pass_one @test "insert: create secret from file" {
run $ABRA app secret ls "$TEST_APP_DOMAIN"
assert_success assert_success
assert_output --partial 'false'
run bash -c "echo bar >> $ABRA_DIR/recipes/$TEST_RECIPE/foo"
run $ABRA app secret insert --file "$TEST_APP_DOMAIN" test_pass_one v1 "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
assert_success
assert_output --partial 'successfully stored on server'
run $ABRA app secret ls "$TEST_APP_DOMAIN"
assert_success
assert_output --partial 'true'
} }
@test "rm: validate arguments" { @test "rm: validate arguments" {
@ -333,9 +318,19 @@ setup(){
run $ABRA app secret ls "$TEST_APP_DOMAIN" run $ABRA app secret ls "$TEST_APP_DOMAIN"
assert_success assert_success
assert_output --partial 'true' assert_output --partial 'true'
}
run $ABRA app secret rm "$TEST_APP_DOMAIN" --all @test "ls: show secrets as machine readable" {
run $ABRA app secret ls "$TEST_APP_DOMAIN"
assert_success assert_success
assert_output --partial 'false'
run $ABRA app secret generate "$TEST_APP_DOMAIN" --all
assert_success
run $ABRA app secret ls "$TEST_APP_DOMAIN" --machine
assert_success
assert_output --partial '"created-on-server":"true"'
} }
@test "ls: bail if unstaged changes and no --chaos" { @test "ls: bail if unstaged changes and no --chaos" {

View File

@ -47,6 +47,13 @@ teardown(){
_undeploy_app _undeploy_app
} }
@test "bail out if specific version and chaos" {
run $ABRA app upgrade "$TEST_APP_DOMAIN" "0.2.0+1.21.0" \
--chaos --no-input --no-converge-checks
assert_failure
assert_output --partial 'cannot use'
}
# bats test_tags=slow # bats test_tags=slow
@test "no upgrade if lint error" { @test "no upgrade if lint error" {
_deploy_app _deploy_app

View File

@ -59,6 +59,8 @@ teardown(){
# bats test_tags=slow # bats test_tags=slow
@test "error if not in catalogue" { @test "error if not in catalogue" {
skip "known issue, see https://git.coopcloud.tech/coop-cloud/recipes-catalogue-json/issues/6"
_deploy_app _deploy_app
run $ABRA app version "$TEST_APP_DOMAIN" run $ABRA app version "$TEST_APP_DOMAIN"
@ -92,7 +94,7 @@ teardown(){
assert_success assert_success
# NOTE(d1): to let the stack come down before nuking volumes # NOTE(d1): to let the stack come down before nuking volumes
sleep 3 sleep 5
run $ABRA app volume remove "$appDomain" --no-input run $ABRA app volume remove "$appDomain" --no-input
assert_success assert_success

View File

@ -78,9 +78,6 @@ teardown(){
_undeploy_app _undeploy_app
# NOTE(d1): to let the stack come down before nuking volumes
sleep 5
run $ABRA app volume rm "$TEST_APP_DOMAIN" --force run $ABRA app volume rm "$TEST_APP_DOMAIN" --force
assert_success assert_success
assert_output --partial 'volumes removed successfully' assert_output --partial 'volumes removed successfully'
@ -92,9 +89,6 @@ teardown(){
_undeploy_app _undeploy_app
# NOTE(d1): to let the stack come down before nuking volumes
sleep 5
run $ABRA app volume rm "$TEST_APP_DOMAIN" --force run $ABRA app volume rm "$TEST_APP_DOMAIN" --force
assert_success assert_success
assert_output --partial 'volumes removed successfully' assert_output --partial 'volumes removed successfully'

View File

@ -40,3 +40,16 @@ _rm_app() {
run $ABRA app remove "$TEST_APP_DOMAIN" --no-input run $ABRA app remove "$TEST_APP_DOMAIN" --no-input
fi fi
} }
_reset_app(){
run rm -rf "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
assert_success
assert_not_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
run $ABRA app new "$TEST_RECIPE" \
--no-input \
--server "$TEST_SERVER" \
--domain "$TEST_APP_DOMAIN"
assert_success
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
}

View File

@ -1,10 +1,11 @@
#!/usr/bin/env bash #!/usr/bin/env bash
_common_setup() { _common_setup() {
load '/usr/lib/bats/bats-support/load' bats_load_library bats-support
load '/usr/lib/bats/bats-assert/load' bats_load_library bats-assert
load '/usr/lib/bats/bats-file/load' bats_load_library bats-file
load "$PWD/tests/integration/helpers/file"
load "$PWD/tests/integration/helpers/app" load "$PWD/tests/integration/helpers/app"
load "$PWD/tests/integration/helpers/git" load "$PWD/tests/integration/helpers/git"
load "$PWD/tests/integration/helpers/recipe" load "$PWD/tests/integration/helpers/recipe"
@ -15,5 +16,5 @@ _common_setup() {
export TEST_APP_NAME="$(basename "${BATS_TEST_FILENAME//./_}")" export TEST_APP_NAME="$(basename "${BATS_TEST_FILENAME//./_}")"
export TEST_APP_DOMAIN="$TEST_APP_NAME.$TEST_SERVER" export TEST_APP_DOMAIN="$TEST_APP_NAME.$TEST_SERVER"
export TEST_RECIPE="abra-integration-test-recipe" export TEST_RECIPE="abra-test-recipe"
} }

View File

@ -0,0 +1,24 @@
_mkfile() {
run bash -c "echo $2 > $1"
assert_success
}
_mkfile_remote() {
run $ABRA app run "$TEST_APP_DOMAIN" app "bash -c \"echo $2 > $1\""
assert_success
}
_mkdir() {
run bash -c "mkdir -p $1"
assert_success
}
_rm() {
run rm -rf "$1"
assert_success
}
_rm_remote() {
run "$ABRA" app run "$TEST_APP_DOMAIN" app rm -rf "$1"
assert_success
}

View File

@ -28,3 +28,39 @@ _reset_tags() {
assert_success assert_success
refute_output '0' refute_output '0'
} }
_set_git_author() {
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" config --local user.email test@example.com
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" config --local user.name test
assert_success
}
_git_commit() {
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" add .
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" commit -m "test: helpers/git.bash: _git_commit"
assert_success
}
_get_tag_hash() {
tagHash=$(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" rev-list -n 1 "$1")
assert_success
}
_get_head_hash() {
headHash=$(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" show -s --format="%H" HEAD)
assert_success
}
_get_current_hash() {
currentHash=$(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" show -s --format="%H")
assert_success
}
_get_n_hash() {
nHash=$(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" show -s --format="%H" "HEAD~$1")
assert_success
}

View File

@ -1,13 +1,21 @@
#!/usr/bin/env bash #!/usr/bin/env bash
_add_server() { _add_server() {
if [[ "$TEST_SERVER" == "default" ]]; then
run $ABRA server add -l
else
run $ABRA server add "$TEST_SERVER" run $ABRA server add "$TEST_SERVER"
fi
assert_success assert_success
assert_exists "$ABRA_DIR/servers/$TEST_SERVER" assert_exists "$ABRA_DIR/servers/$TEST_SERVER"
} }
_rm_server() { _rm_server() {
if [[ "$TEST_SERVER" == "default" ]]; then
run rm -rf "$ABRA_DIR/servers/default"
else
run $ABRA server remove --no-input "$TEST_SERVER" run $ABRA server remove --no-input "$TEST_SERVER"
fi
assert_success assert_success
assert_not_exists "$ABRA_DIR/servers/$TEST_SERVER" assert_not_exists "$ABRA_DIR/servers/$TEST_SERVER"
} }

View File

@ -0,0 +1,21 @@
#!/usr/bin/env bash
setup() {
load "$PWD/tests/integration/helpers/common"
_common_setup
}
@test "show unstaged changes" {
run $ABRA recipe diff "$TEST_RECIPE"
assert_success
refute_output --partial 'traefik.enable'
run sed -i '/traefik.enable=.*/d' "$ABRA_DIR/recipes/$TEST_RECIPE/compose.yml"
assert_success
run $ABRA recipe diff "$TEST_RECIPE"
assert_success
assert_output --partial 'traefik.enable'
_reset_recipe
}

View File

@ -5,7 +5,17 @@ setup() {
_common_setup _common_setup
} }
@test "recipe fetch" { @test "recipe fetch all" {
run rm -rf "$ABRA_DIR/recipes/matrix-synapse"
assert_success
assert_not_exists "$ABRA_DIR/recipes/matrix-synapse"
run $ABRA recipe fetch
assert_success
assert_exists "$ABRA_DIR/recipes/matrix-synapse"
}
@test "recipe fetch single recipe" {
run rm -rf "$ABRA_DIR/recipes/matrix-synapse" run rm -rf "$ABRA_DIR/recipes/matrix-synapse"
assert_success assert_success
assert_not_exists "$ABRA_DIR/recipes/matrix-synapse" assert_not_exists "$ABRA_DIR/recipes/matrix-synapse"

View File

@ -66,13 +66,13 @@ setup() {
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
run $ABRA recipe lint "$TEST_RECIPE" run $ABRA recipe lint "$TEST_RECIPE"
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
refute_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
_reset_recipe _reset_recipe
} }
@ -82,13 +82,13 @@ setup() {
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
run $ABRA recipe lint "$TEST_RECIPE" --offline run $ABRA recipe lint "$TEST_RECIPE" --offline
assert_success assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial 'behind 3' assert_output --regexp 'behind .* 3 commits'
_reset_recipe _reset_recipe
} }

Some files were not shown because too many files have changed in this diff Show More