Compare commits

...

547 Commits

Author SHA1 Message Date
1f9b863be0
fix: appease formatter, ignore vendor
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-10-21 16:46:39 +02:00
3b3ce85ef9
fix: rebase coop-cloud/organising#533
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2024-10-21 16:39:36 +02:00
1f8662cd95
refactor: urfave v3 2024-10-21 16:39:27 +02:00
375e17a4a0
refactor: urfave v2 2024-10-21 11:00:35 +02:00
04aec8232f
chore: vendor
All checks were successful
continuous-integration/drone/push Build is passing
2024-08-04 11:06:58 +02:00
2a5985e44e
build: drop 2MB with GCFLAGS [ci skip] 2024-07-27 12:56:43 +02:00
c65be64e7d
fix: dont checkout version for abra app undeploy
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#628
2024-07-24 16:09:27 +02:00
fd8652e26d
fix: --chaos/--offline for abra app ps
See coop-cloud/organising#628
See coop-cloud/organising#629
2024-07-24 16:09:03 +02:00
518c5795f4
fix: avoid overwriting non version env vars
See coop-cloud/organising#630
2024-07-24 16:07:08 +02:00
827edcb0da
test: full width for CI testing [ci skip]
Also clean up the .env.sample.
2024-07-18 11:03:02 +02:00
05489a129c
test: re-create serer for setup [ci skip] 2024-07-17 14:32:53 +02:00
c02e11eb0a
test: fix order of teardown [ci skip] 2024-07-17 14:15:03 +02:00
8b8e158664
test: int suite fixes
All checks were successful
continuous-integration/drone/push Build is passing
2024-07-17 14:05:46 +02:00
e5a6dea10c
fix: catch ctrl-c again; less cryptic logging
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-07-17 10:09:09 +02:00
1132b09b5b
fix: error out for invalid env versions 2024-07-17 10:08:51 +02:00
b2436174b0
chore: more logging for env versions 2024-07-17 10:08:32 +02:00
ea10019068
fix: "secret insert" respects env version 2024-07-17 10:08:13 +02:00
9b0b3c2e4c
fix: override version from CLI
See coop-cloud/organising#541
2024-07-17 10:07:47 +02:00
8084bff104
test: env version tests
See coop-cloud/organising#541
2024-07-17 10:06:46 +02:00
d22e2c38ce
test: just build main
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-07-17 08:29:58 +02:00
e945087f79
test: env version writing tests 2024-07-17 08:27:12 +02:00
7734dd555d
fix: spacer between multiple versions 2024-07-17 02:12:26 +02:00
aedf5e5ff7
fix: dont write commented out versions
See coop-cloud/organising#626
2024-07-17 01:56:28 +02:00
95c598d030
feat: "app new" supports writing env files
And, automagically, chaos commit hashes.
2024-07-17 01:45:19 +02:00
56068362e8
fix: write versions on deploy/upgrade/rollback
See coop-cloud/organising#625
2024-07-17 01:29:49 +02:00
cf14731b46
refactor: "false" -> CHAOS_DEFAULT 2024-07-17 01:23:12 +02:00
486cfa68d8
test: explode on failures
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#623
2024-07-17 00:16:47 +02:00
1718903834
test: reset recipe before undeploying [ci skip] 2024-07-17 00:00:06 +02:00
eb9894e5bb
test: dont clone if exists [ci skip] 2024-07-16 23:51:28 +02:00
a2116774e8
test: ensure catalogue in place [ci skip] 2024-07-16 23:46:02 +02:00
d2efdf8bf5
test: adjust output checking [ci skip] 2024-07-16 23:39:10 +02:00
b15c05929c
test: adjust output checking [ci skip] 2024-07-16 23:32:12 +02:00
f167a91868
test: skip for now, it's flaking again [ci skip]
We need to solve coop-cloud/organising#541
2024-07-16 23:28:18 +02:00
8cded8752a
test: ensure correct server for diffing [ci skip] 2024-07-16 23:25:17 +02:00
d1876e2fae
test: do exact diff of JSON for integration
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#627
2024-07-16 23:19:36 +02:00
e42a1bca29
fix: add chaos/deploy versiosn back to ps output
All checks were successful
continuous-integration/drone/push Build is passing
Fix to support alakazam parsing
2024-07-16 22:47:47 +02:00
b5493ba059
refactor: CreateTable2 -> CreateTable [ci skip] 2024-07-16 22:45:03 +02:00
a41a36b8fd
fix: dont lock existing version on rollback
All checks were successful
continuous-integration/drone/push Build is passing
Otherwise, we can't select previous versions.
2024-07-16 17:35:15 +02:00
de006782b6
refactor: tablewriter -> lipgloss
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
Also the jsontable impl. is dropped also. Output is unchanged.
2024-07-16 16:22:47 +02:00
f28cffe6d8
refactor: vertical deploy overview 2024-07-16 09:37:10 +02:00
d3ede0f0f6
refactor: logging with background/padding 2024-07-15 22:55:02 +02:00
ae4653f5e3
build: add full install target [ci skip] 2024-07-13 15:30:38 +02:00
f
7f0a74d3c3
fix: source autocompletion on the current terminal
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-07-11 12:02:38 -03:00
f
e99114e695
fix: setup should be run once 2024-07-11 12:02:22 -03:00
f
b1208f9db5
fix: sometimes the completion directories already exist 2024-07-11 12:01:21 -03:00
b8e1a3b75f
test: remote recipe tests
All checks were successful
continuous-integration/drone/push Build is passing
See #432
2024-07-10 16:03:28 +02:00
ff90b43929
fix: use struct data for HEAD retrieval
All checks were successful
continuous-integration/drone/push Build is passing
See ce7dda1eae
2024-07-10 15:51:11 +02:00
c5724d56f8 fix(config): Removes config file name from abra dir
All checks were successful
continuous-integration/drone/push Build is passing
2024-07-10 13:42:24 +00:00
ce7dda1eae
fix: use recipe struct data
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
Follow up for #432
2024-07-10 15:40:45 +02:00
d38f3ab7f5
test: speed up test
All checks were successful
continuous-integration/drone/push Build is passing
2024-07-10 13:27:58 +02:00
4be8c8daed
test: fix outputs [ci skip]
See https://build.coopcloud.tech/coop-cloud/abra/2035/1/5
2024-07-10 13:20:39 +02:00
3c9405a4ed
refactor!: --problems/p goes away
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
Follow up for #413
2024-07-10 13:06:46 +02:00
f6b7510da6 feat: introduce remote recipes
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #432
2024-07-10 10:25:06 +00:00
7596982282 feat: update new version in env file
All checks were successful
continuous-integration/drone/pr Build is passing
2024-07-10 12:12:43 +02:00
4085eb6654 feat: define recipe version inside app env file 2024-07-10 12:11:46 +02:00
790dbca362 feat!: remove all catalogue reads from app commands 2024-07-10 12:06:57 +02:00
d7a870b887 feat: remote recipes 2024-07-10 12:06:44 +02:00
1a3ec7a107
fix: pass recipe name for listing cmds
All checks were successful
continuous-integration/drone/push Build is passing
2024-07-09 17:23:06 +02:00
7f910b4e5b
test: recipe test fixups
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-07-09 11:34:20 +02:00
b82ac3bd63
refactor: make IsChaos an actual bool 2024-07-09 11:34:01 +02:00
00d60f7114
fix: ensure force upgrade/rollback works 2024-07-09 11:33:33 +02:00
71d93cbbea
refactor: debug logging and errors for version issues 2024-07-09 11:33:07 +02:00
2fb5493ab5
feat: support chaos commits on deploy
See coop-cloud/organising#517
2024-07-09 11:31:52 +02:00
0ff8e49cfd
docs: pass on sub-command help 2024-07-09 09:43:18 +02:00
addbda9145
test: fixups for the changepocalypse
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-07-09 09:41:49 +02:00
c33ca1c6bc
fix!: chaos consistency (deploy/undeploy/rollback/upgrade)
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#559

--chaos for rollback/upgrade goes away.
2024-07-08 17:23:49 +02:00
4580df72cb
fix: use recipe name
All checks were successful
continuous-integration/drone/push Build is passing
2024-07-08 14:58:57 +02:00
f003430a8d
fix: use recipe name, not app name
All checks were successful
continuous-integration/drone/push Build is passing
2024-07-08 14:54:15 +02:00
5426464092
refactor!: drop version, show versions in ps
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#526
See coop-cloud/organising#502
2024-07-08 14:41:46 +02:00
72c021c727
fix: remove old commands from deploy fail help
All checks were successful
continuous-integration/drone/push Build is passing
2024-07-08 14:29:51 +02:00
f2e076b35f
fix: set default logger on kadabra 2024-07-08 14:26:27 +02:00
4ccb4198d6
fix: "recipe version" handles non-catalogue recipes 2024-07-08 14:26:26 +02:00
a9f7579ca9
fix: remove old logrus calls 2024-07-08 14:21:17 +02:00
9cd1fe658b refactor(recipe): create a recipe struct that gets used everywhere #430
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #430
2024-07-08 12:18:58 +00:00
41c16db670 test: fix test failure
All checks were successful
continuous-integration/drone/pr Build is passing
2024-07-08 14:10:17 +02:00
87ecc05962 refactor(recipe): remove direct usage of config.RECIPE_DIR
Some checks failed
continuous-integration/drone/pr Build is failing
2024-07-08 13:48:02 +02:00
f14d49cc64 refactor(recipe): rename Recipe2 -> Recipe 2024-07-08 13:19:40 +02:00
f638b6a16b refator(recipe): remove old struct 2024-07-08 13:16:47 +02:00
5617a9ba07 refactor(recipe): remove remaining usage of old recipe struct 2024-07-08 13:15:20 +02:00
c1b03bcbd7 refactor(recipe): load load compoes config where its used 2024-07-08 12:31:39 +02:00
99da8d4e57 refactor(recipe): move GetComposeFiles to new struct 2024-07-08 12:06:58 +02:00
ca1db33e97 refactor(recipe): remove Dir method on old struct 2024-07-08 11:48:53 +02:00
eb62e0ecc3 refactor(recipe): move Tags method to new struct 2024-07-08 11:45:47 +02:00
6f90fc3025 refactor(recipe): don't use README.md path directly 2024-07-08 11:43:18 +02:00
c861c09cce refactor(recipe): use method or variable for .env.sample 2024-07-08 11:41:26 +02:00
2f41b6d8b4 refactor(recipe): store sample env path in new struct 2024-07-08 11:31:55 +02:00
73e9b818b4 refactor(recipe): move SampleEnv method to new struct 2024-07-08 11:02:43 +02:00
f268e5893b refactor(recipe): move functions that operate on the git repo to new file 2024-07-08 11:00:50 +02:00
47013c63d6 refactor(recipe): use template for ssh url 2024-07-08 10:56:08 +02:00
4cf6155fb8 refactor(recipe): introduce Dir var 2024-07-08 10:56:08 +02:00
01f3f4be17 refactor(recipe): use new recipe.Ensure method 2024-07-08 10:55:55 +02:00
eee2ecda06 refactor(recipe): add offline and chaos options to Ensure method 2024-07-08 10:55:55 +02:00
950f85e2b4 refactor(recipe): introduce new recipe struct and move some methods 2024-07-08 10:55:43 +02:00
9ef64778f5
chore: go deps update
All checks were successful
continuous-integration/drone/push Build is passing
2024-07-08 01:52:17 +02:00
735f521bc0
refactor(errors)!: remove WIP/broken command
All checks were successful
continuous-integration/drone/push Build is passing
2024-07-08 01:33:06 +02:00
96a25425a4
refactor(ps)!: remove -w, "watch ..." does it better
All checks were successful
continuous-integration/drone/push Build is passing
2024-07-08 01:10:58 +02:00
1a8dca9804
fix(deploy): only output when actually waiting
All checks were successful
continuous-integration/drone/push Build is passing
2024-07-08 01:01:14 +02:00
465827d5ee
log: no additional newlines 2024-07-08 01:01:14 +02:00
cde06f4f00
log: output caller on debug, use stdout as default 2024-07-08 01:01:13 +02:00
050a479df7
refactor: "service name" -> "service" 2024-07-08 00:38:54 +02:00
ef108d63e1
refactor: use central logger
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-07-08 00:01:28 +02:00
cf8ff410cc
feat: central log config
See coop-cloud/organising#422
2024-07-08 00:01:27 +02:00
6ec678208f
chore: formatting 2024-07-07 22:40:06 +02:00
a001be3021
docs: better "app ps" description 2024-07-07 22:39:57 +02:00
6712bd446f
chore: add upstream link
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-07-07 21:52:45 +02:00
1097daa69f
fix: "abra app restart" docs + --all-services
See coop-cloud/organising#605
2024-07-07 21:52:24 +02:00
beaa233421
test: only publish image on main merge
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-07-07 12:21:51 +02:00
f871f9beee
test: reduce duplication
All checks were successful
continuous-integration/drone/push Build is passing
2024-07-07 12:13:07 +02:00
0f8f0f908f
test: ensure catalogue
All checks were successful
continuous-integration/drone/push Build is passing
2024-07-07 12:03:43 +02:00
c5211fbd7e
test: fix imports 2024-07-07 12:03:37 +02:00
0076b31253 new package envfile and move GetComposeFiles to recipe package
Some checks failed
continuous-integration/drone/pr Build is failing
2024-07-06 16:37:16 +02:00
37aff723c0 move GetComposeFiles 2024-07-06 16:37:16 +02:00
f18c642226 refactor: move app files from config to app package 2024-07-06 16:37:16 +02:00
ac695ae28e feat: introduce abra config file and load abra dir from it (!419)
All checks were successful
continuous-integration/drone/push Build is passing
This is the first step to introduce a configuration file for abra. The config file must be named `abra.yaml` or àbra.yml`. abra look for the config file in the current directory and when not found traverses the directory tree up until it is found or the home/root directory is reached.

For now there is only one setting that is made configurable: `abraDir`. The new logic for setting the abra dir is the following:
1. lookup `$ABRA_DIR` env
2. look for config file and take value from there
3. `$HOME/.abra` as fallback

See coop-cloud/organising#303.

Reviewed-on: #419
Reviewed-by: decentral1se <decentral1se@noreply.git.coopcloud.tech>
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2024-07-06 14:36:31 +00:00
ac87898005
test: run versioned script [ci skip] 2024-07-03 10:02:04 +02:00
32ae2499b6
test: add CI integration script [ci skip] 2024-07-03 09:57:22 +02:00
1136ec5dcd
build: remove old release scripts 2024-07-03 09:57:06 +02:00
6a2db1abaa
test: run int suite on remote server via cron
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-07-02 17:18:05 +02:00
9554ad40c8
refactor: use adapted upstream detach=false logic [ci skip]
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#607.
2024-07-02 14:52:12 +02:00
2014cd6622
test: less fragile integration suite [ci skip]
See coop-cloud/organising#584
See coop-cloud/organising#595
2024-07-02 12:16:58 +02:00
a9ce2106c6
test: skip test for now
All checks were successful
continuous-integration/drone/push Build is passing
Also, don't build image if tests fail.
2024-06-28 06:12:32 +02:00
34de38928a
test: include catalogue
Some checks failed
continuous-integration/drone/push Build is failing
2024-06-26 23:46:35 +02:00
f58522d822
fix: dont always download the catalogue
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
See coop-cloud/organising#592
2024-06-25 16:48:41 +02:00
712ebfb701
test: update and fix cleanup for "server add"
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-06-25 16:24:44 +02:00
1fe601cd16
fix: custom timeout only for "server add" 2024-06-25 16:13:57 +02:00
7b7e1bfa97
refactor!: server add/rm has better UI/UX
Less confusing logging messages, clear "is created" / "already exists"
output. Move the majority of logging to debug output to not confuse the
situation. Some code cleanups also in there.
2024-06-25 09:48:53 +02:00
1a12bef53e
docs: better "server add" help output 2024-06-25 09:24:01 +02:00
d787f71215
fix: more accurate dns errors
All checks were successful
continuous-integration/drone/push Build is passing
2024-06-25 00:27:48 +02:00
9bf44c15ed
fix: clean up if failed to create context 2024-06-25 00:27:34 +02:00
349cacc1f2
docs: explain -D for "server add" 2024-06-25 00:27:16 +02:00
938534f5ac feat: support non-TLD resolving server domains
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#566
2024-06-24 22:07:16 +00:00
6cd331ebd6 secret: allow inserting secret from file and add trim flag
All checks were successful
continuous-integration/drone/push Build is passing
2024-06-22 16:49:59 +00:00
40517171f7
test: separate test for git name/email
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See #405
2024-06-22 18:46:28 +02:00
b2485cc122 feat: add git-user and git-email flags to recipe new
All checks were successful
continuous-integration/drone/push Build is passing
2024-06-22 16:38:32 +00:00
9ec99c7712 test: return/echo from git helper functions
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-06-22 17:04:33 +02:00
aa3910f8df
refactor!: drop all SSH opts / config handling
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#601
See coop-cloud/organising#482
2024-06-21 17:16:41 +02:00
43990b6fae
test: use more plumbung for git output
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-06-21 17:10:12 +02:00
91ea2c01a5
fix: fix old app version deploy wrt. compose files
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#617
2024-06-21 16:14:40 +02:00
316fdd3643
fix: abra app new checks out latest version
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#618
2024-06-21 15:51:34 +02:00
e07ae8cccd
chore: make format/check
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-06-19 19:17:22 +02:00
300a4ead01
fix: stop using deprecated APIs
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2024-06-19 19:14:52 +02:00
f209b6f564
chore: go get -u -t 2024-06-19 19:14:44 +02:00
791183adfe
build: new deps target 2024-06-19 19:14:31 +02:00
e6b35e8524 fix(upgrade): make upgrade --chaos working again
All checks were successful
continuous-integration/drone/push Build is passing
2024-05-22 10:21:31 +02:00
8a0274cac0 fix(recipe): output correct formatted json for recipe version
All checks were successful
continuous-integration/drone/push Build is passing
2024-05-21 16:59:59 +02:00
e609924af0 feat(upgrade): add --releasenotes: show release notes and skip upgrading
All checks were successful
continuous-integration/drone/push Build is passing
2024-05-21 13:49:36 +02:00
70e2943301 fix(upgrade): only show release notes relevant for the upgrade 2024-05-21 13:49:11 +02:00
0590c1824d checkout deployed version
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2024-05-14 00:07:58 +02:00
459abecfa5 only show container that should be deployed
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-05-13 23:26:02 +02:00
183ad8f576 machine readable ps output
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2024-05-13 22:08:03 +02:00
03f94da2d8
docs: add fauno [ci skip] 2024-05-01 01:20:25 +02:00
f
766f69b0fd
feat: strip debug symbols
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
to produce smaller binaries
2024-04-30 14:05:03 -03:00
004cd70aed
fix: use unique rule number & wording [ci skip] 2024-04-06 23:52:56 +02:00
a4de446f58
test: more verbose failure msg, use contains [ci skip] 2024-04-06 23:48:22 +02:00
d21c35965d fix: add warning for long secret names (!359)
All checks were successful
continuous-integration/drone/push Build is passing
A start of a fix for coop-cloud/organising#463
Putting some code out to start a discussion.  I've added a linting rule for recipes to establish a general principal but I want to put some validation into cli/app/new.go as that's the point we have both the recipe and the domain and can say for sure whether or not the secret names lengths cause a problem but that will have to wait for a bit.  Let me know if I've missed the mark somewhere

Reviewed-on: #359
Reviewed-by: decentral1se <decentral1se@noreply.git.coopcloud.tech>
Co-authored-by: Rich M <r.p.makepeace@gmail.com>
Co-committed-by: Rich M <r.p.makepeace@gmail.com>
2024-04-06 21:41:37 +00:00
63ea58ffaa add relevant command to error message
All checks were successful
continuous-integration/drone/push Build is passing
2024-04-01 18:51:53 +01:00
2ecace3e90
fix: add missing packages on final layer
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#598
2024-04-01 13:57:51 +02:00
d5ac3958a4 feat: add retries to app volume remove
All checks were successful
continuous-integration/drone/push Build is passing
2024-03-27 05:38:24 +00:00
3wc
72c20e0039 fix: make installer work again
All checks were successful
continuous-integration/drone/push Build is passing
2024-03-26 21:07:38 -03:00
575f9905f1
Revert "Revert "feat: backup revolution""
All checks were successful
continuous-integration/drone/push Build is passing
This reverts commit 2c515ce70a.
2024-03-12 10:34:40 +01:00
e3a0af5840
build: upgrade goreleaser
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#474
2024-03-12 10:11:14 +01:00
9a3a39a185
chore: new 0.9.x series
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-03-12 10:05:31 +01:00
cea56dddde
fix: drop deprecated stanza (goreleaser) 2024-03-12 10:04:50 +01:00
2c515ce70a
Revert "feat: backup revolution"
This reverts commit c5687dfbd7.

This is a temporary measure to facilitate a release which won't
completely explode peoples workflows (missing command logic). We
re-instate this commit after the first 0.9.x release.
2024-03-12 10:03:42 +01:00
40c0fb4bac fix-integration-tests (!403)
All checks were successful
continuous-integration/drone/push Build is passing
In preparation for the new abra release, let's fix all integration tests

After merging, this needs to be cherry-picked into the release-0-9 branch.

  - [x] app_backup.bats (skip this one)
  - [x] app_check.bats (fixed by bd21014fed)
  - [x] app_cmd.bats (partially fixed in 08232b74f6), has known regression coop-cloud/organising#581
  - [x] app_config.bats (no changes needed)
  - [x] app_cp.bats (no changes needed)
  - [x] app_deploy.bats
  - [x] app_errors.bats (no changes needed)
  - [x] app_list.bats (no changes needed)
  - [x] app_logs.bats (no changes needed)
  - [x] app_new.bats (no changes needed)
  - [x] app_ps.bats (no changes needed)
  - [x] app_remove.bats (fixed by [2f29fbeb2e](#403/commits/2f29fbeb2e018656413fa25f8615b7a98cdcb083))
  - [x] app_restart.bats (no changes needed
  - [x] app_restore.bats (fixed by [f2dd5afc38](#403/commits/f2dd5afc38a25a8316899fa0c6d59499445868d7))
  - [x] app_rollback.bats (partially fixed by 6e99b74c24)
  - [x] app_run.bats (no changes needed)
  - [x] app_secret.bats (fixed by bd069d32f6)
  - [x] app_services.bats (no changes needed)
  - [x] app_undeploy.bats (no changes needed)
  - [x] app_upgrade.bats (no changes needed)
  - [x] app_version.bats (partially fixed by ad323ad2bd)
  - [x] app_volume.bats (fixed by [03c3823770](#403/commits/03c38237707ae795b723180eb07a7edc84a8de35))
  - [x] autocomplete.bats (no changes needed)
  - [x] catalogue.bats (no changes needed)
  - [x] dirs.bats (no changes needed)
  - [x] install.bats (failes, but is expected)
  - [x] recipe_diff.bats (no changes needed)
  - [x] recipe_fetch.bats (no changes needed)
  - [x] recipe_lint.bats (fixed by [b6b0808066](#403/commits/b6b0808066a11e4bcd77517ec39600d500bcb944))
  - [x] recipe_list.bats (no changes needed)
  - [x] recipe_new.bats (fixed by [0aac464ded](#403/commits/0aac464ded6b43afb3ec37ade2f64d6191b9838f))
  - [x] recipe_release.bats (no changes needed)
  - [x] recipe_reset.bats (no changes needed)
  - [x] recipe_sync.bats (no changes needed)
  - [x] recipe_upgrade.bats (fixed by [ab86904cf4](#403/commits/ab86904cf45db89c7c189ca1fd9971909bd446dd))
  - [x] recipe_version.bats (fixed by 81897bf4da)
  - [x] server_add.bats
  - [x] server_list.bats
  - [x] server_prune.bats (no changes needed)
  - [x] server_remove.bats
  - [x] upgrade.bats
  - [x] version.bats (no changes needed)

Co-authored-by: decentral1se <cellarspoon@riseup.net>
Reviewed-on: #403
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2024-03-11 13:27:21 +00:00
0643df6d73 feat: fetch all recipes when no recipe is specified (!401)
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#530

Reviewed-on: #401
Reviewed-by: decentral1se <decentral1se@noreply.git.coopcloud.tech>
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2024-01-24 15:01:33 +00:00
e9b99fe921 make installer save abra-download to /tmp/ directory
All checks were successful
continuous-integration/drone/push Build is passing
the current location of download is ~/.local/bin/ but this
conflicts with some security tools
2024-01-24 14:27:09 +00:00
4920dfedb3 fix: retry docker volume remove (!399)
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#509

Reviewed-on: #399
Reviewed-by: decentral1se <decentral1se@noreply.git.coopcloud.tech>
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2024-01-19 15:09:00 +00:00
0a3624c15b feat: add version input to abra app new (!400)
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#519

Reviewed-on: #400
Reviewed-by: decentral1se <decentral1se@noreply.git.coopcloud.tech>
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2024-01-19 15:08:41 +00:00
c5687dfbd7
feat: backup revolution
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#485
2024-01-12 22:01:08 +01:00
ca91abbed9 fix: correct append service name logic in Filters function (!396)
All checks were successful
continuous-integration/drone/push Build is passing
This fixes a regression introduced by #395

Reviewed-on: #396
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2023-12-22 12:08:12 +00:00
d4727db8f9 feat: abra app logs shows task errors (!395)
All checks were successful
continuous-integration/drone/push Build is passing
The log command now checks for the ready state in the task list. If it is not ready. It shows the task logs. This might look like this:
```
ERRO[0000] Service abra-test-recipe_default_app: State rejected: No such image: ngaaaax:1.21.0
ERRO[0000] Service abra-test-recipe_default_app: State preparing:
ERRO[0000] Service abra-test-recipe_default_app: State rejected: No such image: ngaaaax:1.21.0
ERRO[0000] Service abra-test-recipe_default_app: State rejected: No such image: ngaaaax:1.21.0
ERRO[0000] Service abra-test-recipe_default_app: State rejected: No such image: ngaaaax:1.21.0
```

Closes coop-cloud/organising#518

Reviewed-on: #395
Reviewed-by: decentral1se <decentral1se@noreply.git.coopcloud.tech>
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2023-12-14 13:15:24 +00:00
af8cd1f67a feat: abra release now asks for a release note (!393)
All checks were successful
continuous-integration/drone/push Build is passing
This implements coop-cloud/organising#540 by checking if a`release/next` file exists and if so moves it to `release/<tag>`. When no release notes exists it prompts for them.

Reviewed-on: #393
Reviewed-by: moritz <moritz.m@local-it.org>
Co-authored-by: p4u1 <p4u1_f4u1@riseup.net>
Co-committed-by: p4u1 <p4u1_f4u1@riseup.net>
2023-12-12 14:46:20 +00:00
cdd7516e54
chore: go mod tidy [ci skip] 2023-12-04 22:56:58 +01:00
test
99e3ed416f fix: secret name generation when secretId is not part of the secret name
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-04 21:52:09 +00:00
02b726db02 add comments to better explain how the length modifier gets added to the secret
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-04 17:30:26 +00:00
2de6934322 feat: abra app cp enhancements
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-02 15:39:27 +00:00
cb49cf06d1
chore: drop old godotenv pointers [ci skip]
Follows 9affda8a70
2023-12-02 13:02:24 +01:00
9affda8a70
chore: update godotenv fork commit pointer
All checks were successful
continuous-integration/drone/push Build is passing
Follows #391
2023-12-02 12:59:42 +01:00
3957b7c965 proper env modifiers support
All checks were successful
continuous-integration/drone/push Build is passing
This implements proper modifier support in the env file using this new fork of the godotenv library. The modifier implementation is quite basic for but can be improved later if needed. See this commit for the actual implementation.

Because we are now using proper modifer parsing, it does not affect the parsing of value, so this is possible again:
```
MY_VAR="#foo"
```
Closes coop-cloud/organising#535
2023-12-01 11:03:52 +00:00
0d83339d80 fix(ssh): increase connection timeout #482
All checks were successful
continuous-integration/drone/push Build is passing
see coop-cloud/organising#482
2023-11-30 16:35:53 +01:00
6e54ec7213
test: skip failing test for now
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#535.
2023-11-28 11:42:36 +01:00
66b40a9189
fix: just run it in place [ci skip] 2023-11-27 11:25:01 +01:00
049f02f063
docs: add p4u1 [ci skip] 2023-11-27 11:23:03 +01:00
15857e6453
fix: clean up after cp'ing script [ci skip]
Follows 31e0ed75b0.
2023-11-27 11:21:46 +01:00
31e0ed75b0
build: target for docker building
Some checks failed
continuous-integration/drone/push Build is failing
Adapted from #384.

Thanks @cas.
2023-11-27 11:15:59 +01:00
b1d3fcbb0b add integration test
Some checks failed
continuous-integration/drone/push Build is failing
2023-11-27 10:01:33 +00:00
7b6134f35e add bash completion for abra cmd 2023-11-27 10:01:33 +00:00
316b59b465
test: support local-first testing
Some checks failed
continuous-integration/drone/push Build is failing
Cherry-picked from #389

Thanks @p4u1.
2023-11-27 10:41:46 +01:00
92b073d5b6
chore: go mod tidy
Some checks failed
continuous-integration/drone/push Build is failing
2023-11-27 10:28:43 +01:00
9b0dd933b5 chore(deps): update module github.com/schollz/progressbar/v3 to v3.14.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-11-10 08:00:52 +00:00
f255fa1555 chore(deps): update module github.com/hashicorp/go-retryablehttp to v0.7.5
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-11-09 08:00:33 +00:00
74200318ab chore(deps): update module github.com/schollz/progressbar/v3 to v3.14.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-11-07 08:01:11 +00:00
609656b4e1 chore(deps): update module golang.org/x/sys to v0.14.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-11-06 08:00:33 +00:00
856c9f2f7d
chore: go mod tidy
Some checks failed
continuous-integration/drone/push Build is failing
2023-11-04 09:37:15 +01:00
bd5cdd3443 chore(deps): update module github.com/docker/docker to v24.0.7
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-10-30 08:00:53 +00:00
79d274e074 chore(deps): update module github.com/docker/cli to v24.0.7
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-10-27 07:01:16 +00:00
51e3df17f1 chore(deps): update module github.com/go-git/go-git/v5 to v5.10.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-10-26 07:00:33 +00:00
ccf0215495 hotfix: parse values starting with # correctly
Some checks failed
continuous-integration/drone/push Build is failing
2023-10-23 19:21:45 +02:00
254df7f2be
feat: app cmd ls
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#484
2023-10-17 21:16:31 +02:00
6a673ef101
refactor: filter by topic when building catalogue
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#377
2023-10-16 18:42:38 +02:00
7f7f7224c6
feat: diff on release flow
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
Also, don't commit unstaged files.
2023-10-16 18:31:22 +02:00
f96bf9a8ac
feat: recipe reset, recipe diff
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#511
2023-10-15 12:56:52 +02:00
dcecf32999
chore: bump version for installer script [ci skip] 2023-10-11 19:31:28 +02:00
bc88dac150
test: reset before changing files
All checks were successful
continuous-integration/drone/push Build is passing
2023-10-11 19:29:19 +02:00
704c0e9c74
test: adapt failing tests to new changes 2023-10-11 18:34:08 +02:00
c9bb7e15c2
fix: bring back docker build
All checks were successful
continuous-integration/drone/push Build is passing
2023-10-10 07:27:49 +02:00
d90c9b88f1
fix: include ca-certs to avoid x509 error [ci skip] 2023-10-10 00:50:43 +02:00
69ce07f81f
fix: ignore build files for docker [ci skip] 2023-10-09 23:40:41 +02:00
85b90ef80c fix: bail if --chaos and specific version
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#503.
2023-10-09 20:54:44 +00:00
3e511446aa
refactor: use app check emoji here too
All checks were successful
continuous-integration/drone/push Build is passing
2023-10-09 22:53:46 +02:00
7566b4262b
fix: set go version to 1.21
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2023-10-09 22:07:30 +02:00
c249c6ae9c
fix: fix: trim comments that are not modifers
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#505
2023-10-09 14:42:05 +02:00
be693e9df0
fix: trim comments that are not modifers
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
See coop-cloud/organising#505
2023-10-08 22:42:34 +02:00
a43125701c
test: optimise default make target for abra hacking [ci skip] 2023-10-07 10:32:42 +02:00
b57edb440a
fix: improve app check
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
See coop-cloud/organising#446
2023-10-06 10:56:33 +02:00
6fc4573a71
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-10-06 09:49:03 +02:00
cbe6676881 chore(deps): update module golang.org/x/sys to v0.13.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-10-06 07:00:49 +00:00
b4fd39828f
test: abra-integration-test-recipe -> abra-test-recipe
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/abra-test-recipe#3
2023-10-05 14:22:11 +02:00
14f2d72aba
refactor!: lowercase, hyphenate keys
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
This will potentially break scripts, so time to discuss!
2023-10-05 08:36:01 +02:00
57692ec3c9
feat: add --machine to secret ls
See coop-cloud/organising#481
2023-10-04 23:08:39 +02:00
47d3b77003
refactor: not generating here, skipping
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2023-10-04 15:13:15 +02:00
8078e91e52
fix: warn if secrets not generated
See coop-cloud/organising#499
2023-10-04 15:13:14 +02:00
dc5d3a8dd6
test: build, init & test in one stage
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2023-10-04 14:37:09 +02:00
ab6107610c
test: skip build step, test will do it 2023-10-04 14:36:59 +02:00
e837835e00
test: remove duplicate call to EnsureCatalogue
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-10-04 14:05:02 +02:00
c646263e9e
fix: validate COMPOSE_FILE
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is passing
See coop-cloud/organising#468.
See coop-cloud/organising#376.
2023-10-04 13:27:04 +02:00
422c642949 fix: ensure ipv4 is checked, not sometimes ipv6
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#490
2023-10-04 09:29:10 +00:00
379915587c
fix: don't export from within function
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
Also, don't explode on command function which has "export" in the name!

See coop-cloud/organising#498
2023-10-04 11:20:50 +02:00
970ae0fc4e
test: use _test to avoid cyclic imports 2023-10-04 02:36:44 +02:00
d11ad61efb
docs: make chaos flag description more generic [ci skip] 2023-10-04 01:34:53 +02:00
54dc696c69
build: fix targets for small local builds
All checks were successful
continuous-integration/drone/push Build is passing
2023-10-03 09:31:57 +02:00
7e3ce9c42a
chore: go mod tidy 2023-10-03 09:30:26 +02:00
7751423c7d chore(deps): update module github.com/docker/distribution to v2.8.3
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-10-03 07:00:43 +00:00
f18f0b6f82
build: set ABRA_DIR explicitly
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2023-09-30 08:26:20 +02:00
892f6c0730
test: ensure catalogue is cloned 2023-09-30 08:19:16 +02:00
b53fd2689c
test: add unit test for TestEnsureDomainsResolveSameIPv4 2023-09-30 08:19:02 +02:00
906bf65d47
test: moar domain check tests [ci skip] 2023-09-29 09:31:25 +02:00
1e6a6e6174
fix: app logs retrieves recipe
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-09-27 09:19:57 +02:00
1e4f1b4ade
build: disable publish image for now
Some checks reported errors
continuous-integration/drone/push Build was killed
continuous-integration/drone Build is failing
It's failing for unknown reasons and block releases.

See coop-cloud/recipes-catalogue-json#6
2023-09-25 17:51:30 +02:00
306fe02d1c
chore: tag 0.8.x series
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-09-25 17:33:09 +02:00
e4610f8ad5
test: make int test script lighter [ci skip] 2023-09-25 16:45:08 +02:00
e1f900de14
test: fix app_secret generate tests [ci skip] 2023-09-25 16:32:16 +02:00
d5b18d74ef
fix: use secretId to match secret names in configs
All checks were successful
continuous-integration/drone/push Build is passing
2023-09-25 15:51:15 +02:00
776a83d8d1
fix: use new GetComposeFiles API 2023-09-25 15:51:03 +02:00
810cea8269
test: bats does output for us [ci skip] 2023-09-25 12:14:35 +02:00
c0f3e6f2a4
test: integration test script [ci skip] 2023-09-25 12:00:39 +02:00
7b240059b0
test: fix app_backup recipe cleanups [ci skip] 2023-09-25 11:50:29 +02:00
c456d13881
test: fix recipe_* tests [ci skip] 2023-09-25 11:27:36 +02:00
c7c553164d
test: fix refute output check [ci skip] 2023-09-25 11:21:36 +02:00
7616528f4e
test: ensure app cleanup 2023-09-25 11:20:56 +02:00
6cd85f7239
test: dont assert_success for check [ci skip] 2023-09-25 11:11:29 +02:00
b1774cc44b
test: fix app_check tests 2023-09-25 10:52:47 +02:00
e438fc6e8e
test: reset recipe in file teardown 2023-09-25 10:52:27 +02:00
c065ceb1f0
test: secret generation & --offline/chaos handling tests
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-09-25 10:33:15 +02:00
ce4b775428
build: require 1.18 due to slices.Contains usage 2023-09-25 10:32:41 +02:00
d02f659bf8
fix: secrets from config, --offline/chaos handling, typos
See coop-cloud/organising#464
2023-09-25 10:31:59 +02:00
f3ded88ed8
fix: app version includes tags, sorts & tests
All checks were successful
continuous-integration/drone/push Build is passing
See coop-cloud/organising#442
2023-09-24 11:19:27 +02:00
bf648eeb5d
fix: recipe versions sorts, aligns & spaces 2023-09-24 11:18:26 +02:00
533edbf172
fix: recipe versions lists correctly (also -m) 2023-09-24 10:56:02 +02:00
78b8cf9725
test: fix git tag command [ci skip] 2023-09-24 00:56:00 +02:00
f0560ca975
test: no args for helpers, fix recipe_* tests [ci skip] 2023-09-23 23:57:52 +02:00
ce7b4733d7
test: tag/git helpers & refactor [ci skip] 2023-09-23 23:19:49 +02:00
575bfbb0fb
test: test arguments, notes, local tag lookup
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-09-23 09:17:24 +02:00
510ce66570
feat: version arguments, local tag lookups & release notes
See:
* coop-cloud/organising#441
* coop-cloud/organising#204
* coop-cloud/organising#493
2023-09-23 09:15:27 +02:00
82631d9ab1
fix: don't output if not tags 2023-09-23 09:15:17 +02:00
358490e939
refactor: deploy output wording 2023-09-23 09:14:45 +02:00
79b9cc9be7
fix: --offline/--chaos handlings for backup/check/cmd/restore
All checks were successful
continuous-integration/drone/push Build is passing
2023-09-22 09:47:36 +02:00
9b6eb613aa
test: woops, keep unit test target default
All checks were successful
continuous-integration/drone/push Build is passing
2023-09-21 12:06:41 +02:00
8f1231e409
test: integration test for abra app upgrade [ci skip] 2023-09-21 11:52:58 +02:00
aa37c936eb
test: pass arg to _checkout_recipe 2023-09-21 11:52:21 +02:00
3d1158a425
fix: don't read TIMEOUT for version= label
Some checks failed
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#451
2023-09-21 11:33:45 +02:00
8788558cf1
fix: only sync version label once
Some checks failed
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#492
2023-09-21 10:58:17 +02:00
76035e003e
fix: recipe workflow with integration tests
Some checks failed
continuous-integration/drone/push Build is failing
2023-09-21 10:36:53 +02:00
b708382d26
feat: recipe lint supports --chaos 2023-09-21 09:07:00 +02:00
557b670fc5
docs: improve recipe fetch usage/desc [ci skip] 2023-09-21 08:46:33 +02:00
e116148c49
test: ensure catalogue --chaos works [ci skip]
Closes coop-cloud/organising#462.
2023-09-20 14:19:49 +02:00
d5593b69e0
test: ensure 3 commits behind, ignore output on fail [ci skip] 2023-09-20 14:10:07 +02:00
0be532692d
test: moar integration tests [ci skip]
Some checks failed
continuous-integration/drone/pr Build is failing
2023-09-20 13:51:06 +02:00
7a9224b2b2
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-09-19 12:38:02 +02:00
e73d1a8359 chore(deps): update module gotest.tools/v3 to v3.5.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-09-19 07:02:01 +00:00
f8c49c82c8
fix: skip "abra-integration-test-recipe" also
All checks were successful
continuous-integration/drone/push Build is passing
2023-09-18 14:02:38 +02:00
ab7edd2a62
refactor!: drop "record" & "server new" command
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
These were alpha prototypes and we'll reconsider once other layers of
Abra are more stable.
2023-09-14 16:45:01 +02:00
b1888dcf0f
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-09-14 09:39:28 +02:00
e5e122296f chore(deps): update module github.com/go-git/go-git/v5 to v5.9.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-09-13 07:01:51 +00:00
83bf148304
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-09-07 14:34:40 +02:00
d80b882b83 chore(deps): update module github.com/docker/docker to v24.0.6
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-09-07 07:02:43 +00:00
c345c6f5f1 chore(deps): update module github.com/docker/cli to v24.0.6
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-09-06 07:01:56 +00:00
f8c4fd72a3
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-09-05 13:56:34 +02:00
10f612f998
test: more integration tests
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-09-05 13:03:38 +02:00
58e78e4d7c fix: overridable ABRA_DIR
Some checks failed
continuous-integration/drone/push Build is failing
2023-09-05 09:58:13 +00:00
25258d3d64 fix: separate abra/kababra makefile targets 2023-09-05 09:58:13 +00:00
b3bd058962 chore: don't join if nothing to join 2023-09-05 09:58:13 +00:00
b4fd7fd77c fix: clone catalogue on initial run 2023-09-05 09:58:13 +00:00
64cfdae6b7 fix: only load client if creating secrets 2023-09-05 09:58:13 +00:00
0a765794f2 test: write initial automatic integration tests 2023-09-05 09:58:13 +00:00
18dc6e9434 feat: support abra testing mode 2023-09-05 09:58:13 +00:00
4ba4107288 chore(deps): update module golang.org/x/sys to v0.12.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-09-04 07:02:01 +00:00
d9b4f4ef3b
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-08-26 09:58:46 +02:00
c365dcf96d chore(deps): update module github.com/hetznercloud/hcloud-go to v1.50.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-08-25 07:02:00 +00:00
0c6a7cc0b8 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.49.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-08-18 07:01:42 +00:00
6640cfab64
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-08-13 17:42:24 +02:00
71addcd1b2 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.49.0
Some checks failed
continuous-integration/drone/push Build is failing
2023-08-13 15:41:44 +00:00
60c0e55e3d fix: don't specify refs when pulling tags
Some checks failed
continuous-integration/drone/push Build is failing
See coop-cloud/organising#477
2023-08-13 12:07:37 +00:00
e42139fd83 chore(deps): update golang docker tag to v1.21
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-08-09 07:02:07 +00:00
2d826e47d0 chore(deps): update module golang.org/x/sys to v0.11.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-08-07 07:01:55 +00:00
2db172ea5a Further changes to messages.
All checks were successful
continuous-integration/drone/push Build is passing
2023-08-04 19:22:48 +00:00
2077658f6a Attempt to replace the deploy completed message. 2023-08-04 19:22:48 +00:00
502e26b534 Change message when starting to poll for deployment status. 2023-08-04 19:22:48 +00:00
e22b692ada Add os hook for interrupt signal while waiting for service to converge. 2023-08-04 19:22:48 +00:00
5ae73f700e
Merge branch 'fix-deploy-no-catalogue'
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-08-02 10:48:54 +02:00
63d419caae
Merge branch 'fix-478' 2023-08-02 10:48:46 +02:00
179b66d65c
Merge branch 'fix-476' 2023-08-02 10:48:37 +02:00
c9144d90f3
refactor: integration -> manual 2023-08-02 08:45:24 +02:00
ebf5d82c56
fix: failover if no recipe meta available
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
2023-08-02 00:48:27 +02:00
8bb98ed0ed
fix: deploy fresh recipe without versions
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
See coop-cloud/organising#476
2023-08-01 21:47:34 +02:00
23f5745cb8
fix: skip recipe clone / up to date sync for some commands
All checks were successful
continuous-integration/drone/push Build is passing
continuous-integration/drone/pr Build is passing
Continues work of 3dc5662821.
2023-08-01 21:19:20 +02:00
2cd453ae8d
build: attempt to ignore goreleaser upgrades
Some checks failed
continuous-integration/drone/push Build is failing
See e42cc0f91d.
2023-08-01 19:33:36 +02:00
e42cc0f91d
Revert "chore(deps): update goreleaser/goreleaser docker tag to v1.19.2"
This reverts commit 1de45a6508.

See 8fa9419c99.
2023-08-01 19:31:51 +02:00
1de45a6508 chore(deps): update goreleaser/goreleaser docker tag to v1.19.2
Some checks failed
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is failing
2023-07-31 07:02:04 +00:00
55c7aca3c0
chore: publish 0.8.0-rc2-beta
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-07-29 00:31:49 +02:00
8fa9419c99
build: pin to goreleaser v18 [ci skip]
See coop-cloud/organising#474
2023-07-29 00:22:01 +02:00
73ad0a802e
Revert "build: replacements is deprecated"
This reverts commit 473cae0146.

Aiming to freeze on an old version of goreleaser for now.
2023-07-29 00:14:08 +02:00
798fd2336c
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-07-27 21:48:49 +02:00
70e65d6667 chore(deps): update module github.com/go-git/go-git/v5 to v5.8.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-07-27 07:06:22 +00:00
efc9602808
chore: welcome comrade rix [ci skip] 2023-07-26 09:59:22 +02:00
1e110f1375
docs: wording [ci skip] 2023-07-26 09:58:30 +02:00
473cae0146
build: replacements is deprecated
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-07-26 09:18:52 +02:00
2da859896a
fix: point to rc1 [ci skip] 2023-07-26 08:53:39 +02:00
ab00578ee1
chore: publish 0.8.0-rc1-beta
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-07-26 08:52:33 +02:00
3dc5662821
fix: improved offline support
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#471.
2023-07-26 08:16:07 +02:00
ab64eb2e8d
fix: only use git to update local catalogue
See coop-cloud/organising#321.
2023-07-25 21:13:04 +02:00
4f22228aab
feat: lint for lightweight tags
See coop-cloud/organising#433
2023-07-25 20:38:29 +02:00
a7f1af7476
refactor: drop internal deploy package 2023-07-25 18:03:37 +02:00
949510d4c3 revert: always clone latest recipe changes
Some checks failed
continuous-integration/drone/push Build is failing
This change was about trying to optimise for offline scenarios but
caused a lot of issues for the online case. It needs to be thought
through again.

See coop-cloud/organising#471.

Closes coop-cloud/organising#432.
2023-07-25 15:05:01 +00:00
9f478dac1d
fix: list downgrades/upgrades in correct order
Some checks failed
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is failing
Now that we have correct sorting of versions:

  coop-cloud/organising#427

We don't need to reverse sort. Only for showing prompts when the
latest should be the first. Otherwise, logic can follow the sorted
order, the last item in the list is the latest upgrade.

Related:

  coop-cloud/organising#444

Also fix `upgrade` to actually show the latest version
2023-07-25 15:08:32 +02:00
69f38ea445
fix: always show overview, even with -f
coop-cloud/organising#444
2023-07-25 15:08:10 +02:00
0582147874
fix: better error message for missing local tag
Aiming to help the following scenario better:

coop-cloud/organising#444 (comment)
2023-07-25 15:07:29 +02:00
bdeeb75973
fix: upgrade force logic parity with deploy force logic
coop-cloud/organising#444 (comment)
2023-07-25 15:06:50 +02:00
2518e65e3e
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-07-25 10:22:02 +02:00
8354c92654
Merge remote-tracking branch 'origin/renovate/main-github.com-docker-docker-24.x' 2023-07-25 10:21:16 +02:00
173e81b885 chore(deps): update module github.com/docker/docker to v24.0.5
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-07-25 07:05:53 +00:00
d91731518b chore(deps): update module github.com/docker/cli to v24.0.5
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-07-25 07:05:47 +00:00
2bfee5058d chore(deps): update module github.com/go-git/go-git/v5 to v5.8.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-07-24 07:03:29 +00:00
a7ce71d6cf Fix formatting.
All checks were successful
continuous-integration/drone/push Build is passing
2023-07-15 08:15:46 +00:00
10f60fee1d Replace deprecated system.TempFileSequential with os.CreateTemp 2023-07-15 08:15:46 +00:00
6025ab443f Update volume list options. 2023-07-15 08:15:46 +00:00
43ecf35449 Change CommonOptions (deprecated) to ClientOptions and remove unneeded parameters. 2023-07-15 08:15:46 +00:00
4d2a1065d2 Replace types.volume with new volume type 2023-07-15 08:15:46 +00:00
0b67500cab Add docker v24 and associated dependencies. 2023-07-15 08:15:46 +00:00
e0c3a06182
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-07-14 19:47:09 +02:00
a86ba4e97b chore(deps): update module github.com/hetznercloud/hcloud-go to v2
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-07-14 07:03:04 +00:00
b5b3395138 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.48.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-07-13 07:03:02 +00:00
502b78ef5c
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-07-05 09:50:38 +02:00
3e2b4dae6a chore(deps): update module golang.org/x/sys to v0.10.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-07-05 07:02:41 +00:00
573fe403b3 chore(deps): update module gotest.tools/v3 to v3.5.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-06-30 07:02:40 +00:00
76862e9d66
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-06-22 16:44:53 +02:00
e8e337a608 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.47.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-06-22 07:02:07 +00:00
500389c5f5 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.46.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-06-19 07:02:59 +00:00
dea665652c
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-06-16 18:10:02 +02:00
e8cf84b523 chore(deps): update module golang.org/x/sys to v0.9.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-06-16 07:03:47 +00:00
fab25a6124 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.46.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-06-15 07:03:10 +00:00
e71377539c chore(deps): update module github.com/alecaivazis/survey/v2 to v2.3.7
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-06-14 07:02:51 +00:00
497ecf476a
docs: wording [ci skip] 2023-06-12 00:09:52 +02:00
ff1c043ec5
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-06-07 10:45:17 +02:00
c4d2e297f8
Merge remote-tracking branch 'origin/renovate/main-coopcloud.tech-libcapsul-digest' 2023-06-07 10:44:35 +02:00
e98b8e3666 chore(deps): update module github.com/hashicorp/go-retryablehttp to v0.7.4
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-06-07 07:02:31 +00:00
f5835fe404 chore(deps): update coopcloud.tech/libcapsul digest to 878af47
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-06-07 07:02:13 +00:00
07bbe9394f chore(deps): update module github.com/sirupsen/logrus to v1.9.3
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-06-05 07:02:38 +00:00
6974681af5 fix: improve error message
All checks were successful
continuous-integration/drone/push Build is passing
2023-05-29 14:57:41 +02:00
73250fb899 chore(deps): update module github.com/go-git/go-git/v5 to v5.7.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-24 07:02:28 +00:00
4ce377cffe chore(deps): update module github.com/sirupsen/logrus to v1.9.2
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-19 07:03:02 +00:00
c7dd029689 chore(deps): update module github.com/docker/cli to v20.10.25
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-18 07:02:30 +00:00
51319d2ae2 chore(deps): update module github.com/docker/docker to v20.10.25
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-16 07:03:23 +00:00
d1c2343a54 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.45.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-15 07:03:03 +00:00
135ffde0e5 chore(deps): update module github.com/docker/distribution to v2.8.2
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-12 07:03:02 +00:00
6e4dd51b27
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-05-08 11:42:50 +02:00
81b652718b chore(deps): update module github.com/hetznercloud/hcloud-go to v1.44.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-08 07:02:38 +00:00
442f46e17f
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-05-05 10:59:22 +02:00
574794d4e8
Merge remote-tracking branch 'origin/renovate/main-golang.org-x-sys-0.x' 2023-05-05 10:58:25 +02:00
88184125c4 chore(deps): update module golang.org/x/sys to v0.8.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-05 07:03:15 +00:00
8a4baa66ee chore(deps): update module github.com/klauspost/pgzip to v1.2.6
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-05 07:03:01 +00:00
16ecbd0291 chore(deps): update module github.com/moby/term to v0.5.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-05-03 07:02:46 +00:00
f65b262c11 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.43.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-04-28 07:03:28 +00:00
c5d9d88359 Add some minor tweaks to machine readable pathway in recipe upgrade
Some checks failed
continuous-integration/drone/push Build is failing
2023-04-27 16:45:57 +00:00
87e5909363 Make -m imply -n in recipe/upgrade 2023-04-27 16:45:57 +00:00
152c5d4563 Add machine output for recipe/upgrade
- Normal faff related to calling external libraries with structs thnx go
- Ouputs json now
2023-04-27 16:45:57 +00:00
34b274bc52 recipe/upgrade: Refactor upgradability list to make output easier
For future, we can print the struct as JSON.
2023-04-27 16:45:57 +00:00
62f8103fc2 recipe/upgrade: Add non-interactive mode.
Add support for -n which just outputs the list of compatible tags for each image.
2023-04-27 16:45:57 +00:00
2dcbfa1d65 chore(deps): update module github.com/coreos/go-semver to v0.3.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-04-26 07:02:52 +00:00
049da94629 fix(version): semver version ordering (!293)
All checks were successful
continuous-integration/drone/push Build is passing
Solves coop-cloud/organising#427

This fix sorts the recipe versions at the catalogue generation and the versions that are received from the catalogue.

Co-authored-by: Moritz <moritz.m@local-it.org>
Reviewed-on: #293
2023-04-26 06:38:15 +00:00
b2739dcdf2 fix(deploy) post deploy cmds
All checks were successful
continuous-integration/drone/push Build is passing
2023-04-18 19:05:46 +02:00
343b2bfb91
docs: go doc badge [ci skip] 2023-04-14 23:31:21 +02:00
17aeed6dbd
chore: go mod tidy [ci skip] 2023-04-14 19:09:53 +02:00
27cac81830 fix(app): fix app list chaos field
All checks were successful
continuous-integration/drone/push Build is passing
show only the chaos version if the app is a chaos deploy
2023-04-14 18:01:08 +02:00
31ec322c55 feat(deploy): set timeout via label (!290)
All checks were successful
continuous-integration/drone/push Build is passing
Solves coop-cloud/organising#437

A timeout can be specified globally for a recipe using this label:
`coop-cloud.${STACK_NAME}.timeout=${TIMEOUT:-120}`. This sets the default timeout to 120s. An app specific timeout can be set using the env `TIMEOUT`.

Co-authored-by: Moritz <moritz.m@local-it.org>
Reviewed-on: #290
2023-04-14 14:44:18 +00:00
18615eaaef Post-deploy abra.sh hooks (!292)
All checks were successful
continuous-integration/drone/push Build is passing
This solves coop-cloud/organising#235

Co-authored-by: Moritz <moritz.m@local-it.org>
Reviewed-on: #292
2023-04-14 14:12:31 +00:00
5e508538f3 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.42.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-04-13 07:02:01 +00:00
9e05000476 fix(kadabra): always pull new recipe version
All checks were successful
continuous-integration/drone/push Build is passing
2023-04-06 17:22:33 +02:00
f088a0d327
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-04-06 10:00:14 +02:00
3832403c97 chore(deps): update module github.com/docker/docker to v20.10.24
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-04-06 07:01:59 +00:00
47058c897c chore(deps): update module github.com/docker/cli to v20.10.24
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-04-05 07:02:26 +00:00
5d4c7f8ef0 chore(deps): update module golang.org/x/sys to v0.7.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-04-04 16:33:38 +00:00
ee4315adb3 fix(rm): remove volumes correctly during app removal
All checks were successful
continuous-integration/drone/push Build is passing
2023-03-30 13:40:44 +02:00
9ade250f01 feat(cmd): add --tty flag to run commands from a script
Some checks failed
continuous-integration/drone/push Build is failing
2023-03-29 14:25:08 +02:00
81b032be85
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-03-21 09:19:02 +01:00
5409990a68 chore(deps): update module github.com/go-git/go-git/v5 to v5.6.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-03-17 08:03:41 +00:00
b1595c0ec9 chore(deps): update module github.com/schollz/progressbar/v3 to v3.13.1
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-03-15 08:02:29 +00:00
6c99a2980b
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-03-07 16:35:20 +01:00
a9405a36c6
Merge remote-tracking branch 'origin/renovate/main-github.com-hetznercloud-hcloud-go-1.x' 2023-03-07 16:34:23 +01:00
15a417d9bd fix(list): fix output of chaos + chaos-version merge
All checks were successful
continuous-integration/drone/push Build is passing
2023-03-07 13:46:19 +01:00
0ce8b3a5c2 Merge pull request 'app ls --status shows more detailles about the deployment state' (!280) from detailed_app_list into main
All checks were successful
continuous-integration/drone/push Build is passing
Reviewed-on: #280
2023-03-07 12:31:38 +00:00
edff63b446 Revert "review: change label autoupdate -> auto-update"
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
This reverts commit 74baa76f5ee5e5dd7b71b1f14be97cc40dfc611b.
2023-03-07 13:24:46 +01:00
d5979436c1 review: merge chaos + chaos_version column 2023-03-07 13:24:46 +01:00
cb33edaac3 review: change label autoupdate -> auto-update 2023-03-07 13:24:46 +01:00
e9879e2226 review: label convention chaos_version -> chaos-version 2023-03-07 13:24:46 +01:00
5428ebf43b review: avoid stackName recalculation 2023-03-07 13:24:46 +01:00
d120299929 feat(list): show autoupdate state 2023-03-07 13:24:46 +01:00
3753357ef8 feat(list): show chaos status and chaos version 2023-03-07 13:24:46 +01:00
611430aab2 Set chaos version label for each deployed or upgraded app 2023-03-07 13:24:46 +01:00
f56b02b951 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.41.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-03-07 08:02:02 +00:00
f29278f80a
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-03-06 15:04:15 +01:00
a9a294cbb7 chore(deps): update module golang.org/x/sys to v0.6.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-03-06 08:02:17 +00:00
73004789a4
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-03-04 08:50:24 +01:00
440aba77d5 chore(deps): update module github.com/go-git/go-git/v5 to v5.6.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-03-01 08:02:11 +00:00
e4a89bcc4f fix(kadabra): only warn if a deployed app has no published release
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-28 15:34:27 +01:00
eb07617e73
chore: publish new release 0.7.0-beta
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-02-22 08:47:43 +01:00
9fca4e56fb
docs: add comrade vera [ci skip] 2023-02-19 11:00:43 +01:00
f17523010a
chore: publish next tag 0.7.0-rc3-beta
Some checks failed
continuous-integration/drone/push Build is failing
2023-02-19 10:51:10 +01:00
3058178d84
fix: if all servers good, don't show empty table
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-19 10:34:47 +01:00
d62c4e3400
refactor: improved logging on pruning
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-19 10:28:18 +01:00
5739758c3a
fix: give more time to tear down state [ci skip] 2023-02-17 11:11:28 +01:00
a6b5566fa6
refactor: clarify prune scope, not system wide [ci skip] 2023-02-17 11:09:44 +01:00
4dbe1362a8
docs: more clarity on prune functionality
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-17 11:00:02 +01:00
98fc36c830
refactor: hopefully more robust prune logic, docs 2023-02-17 10:59:06 +01:00
b8abc8705c
docs: volumes pruning docs - more warnings 2023-02-17 10:42:38 +01:00
636261934f
refactor: pass args in, docs, rename, lower-case logs 2023-02-17 10:23:00 +01:00
6381b73a6a
chore: use lower-case like elsewhere 2023-02-17 10:21:56 +01:00
1a72e27045
refactor: add server auto-complete & cosmetics 2023-02-17 10:12:46 +01:00
9754c1b2d1
feat: server auto-complete on remove sub-command
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-17 10:10:48 +01:00
b14ec0cda4 review cleanups
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-17 08:53:43 +00:00
c7730ba604 Adding server prune and undeploy prune 2023-02-17 08:53:43 +00:00
47c61df444
docs: add comrade yksflip [ci skip] 2023-02-15 11:26:20 +01:00
312b93e794
fix: no gitops on recipe for "app new"
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#408
2023-02-15 00:49:22 +01:00
992e675921
refactor: use passed down conf to decide 2023-02-15 00:35:33 +01:00
d4f3a7be31
docs: add comrade codegod100 [ci skip] 2023-02-14 17:16:25 +01:00
d619f399e7 Update 'cli/app/undeploy.go'
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-14 13:59:35 +00:00
96a8cb7aff
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-14 14:29:25 +01:00
9b51d22c20 chore(deps): update module github.com/hetznercloud/hcloud-go to v1.40.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-02-14 08:02:28 +00:00
d789830ce4 feat: adds --since flag for abra app logs
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2023-02-14 00:19:38 +01:00
e4b4084dfd
fix: stream logs without hitting git.coopcloud.tech
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
Medium-sized options refactor in here too!

See coop-cloud/organising#292.
2023-02-13 16:46:43 +01:00
ff58646cfc
fix: better error message when network gone 2023-02-13 12:33:00 +01:00
eec6469ba1
fix: Change error message to reflect RECIPE -> TYPE
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#409
2023-02-12 16:40:48 +01:00
e94f947d20
fix: don't create clients twice per server
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#407
2023-02-12 00:02:59 +01:00
cccbe4a2ec
fix: typo [ci skip] 2023-02-11 23:53:42 +01:00
f53cfb6c36
fix: better error message when missing context [ci skip] 2023-02-11 23:49:01 +01:00
f55f01a25c
build: verbose local builds to show progress 2023-02-11 23:40:47 +01:00
ce5c1a9ebb
chore: 0.7.0-rc2-beta
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-10 12:47:20 +01:00
5e3b039f93
fix: kadabra is now called kadabra not abra
All checks were successful
continuous-integration/drone/push Build is passing
Closes coop-cloud/organising#402
2023-02-10 12:45:41 +01:00
0e9d218bbc
docs: fix comment names 2023-02-10 12:45:24 +01:00
e1c635af86
chore: remove newline [ci skip] 2023-02-08 23:49:01 +01:00
f6b139dfea
chore: formatting pass on kadabra [ci skip] 2023-02-08 23:20:25 +01:00
3d2b8fa446
chore: spacing 2023-02-08 23:02:54 +01:00
2eebac6fc0
chore: formatting, indentation 2023-02-08 22:59:47 +01:00
f5e2710138
chore: remove comment 2023-02-08 22:59:30 +01:00
986470784d
chore: sort gitignore listing 2023-02-08 22:59:03 +01:00
e76ed771df feat: kadabra, the app auto-updater (!268)
All checks were successful
continuous-integration/drone/push Build is passing
coop-cloud/organising#236

Autoupdater `kadabra` is ready for testing.
It should run on the server, check for available minor/patch updates and automatically upgrade the apps.

Co-authored-by: Moritz <moritz.m@local-it.org>
Reviewed-on: #268
2023-02-08 18:53:04 +00:00
f28af5e42f
fix: use correctly formatted comments
All checks were successful
continuous-integration/drone/push Build is passing
2023-02-08 11:28:38 +01:00
fdf4854b0c
fix: unbork comments
Some checks failed
continuous-integration/drone/push Build is failing
Was breaking the build but not anymore!
2023-02-08 11:20:30 +01:00
6b9512d09c
build: docker dev builds depend on check too 2023-02-08 11:16:54 +01:00
21a86731d0
build: dont test/build if check fails
Some checks failed
continuous-integration/drone/push Build is failing
Save cycles for small mistakes.
2023-02-08 11:13:20 +01:00
91102e6607
build: not so useful anymore, also broken 2023-02-08 11:12:03 +01:00
fadafda0b8
fix: make test suite work again 2023-02-08 11:11:39 +01:00
c03cf76702
chore: gofmt import statements
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-02-08 10:56:39 +01:00
ebb748b7e7
chore: publish next tag 0.7.0-rc1-beta
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-02-08 10:28:54 +01:00
2b3dbee24c
chore: go mod tidy
Some checks failed
continuous-integration/drone/push Build is failing
2023-02-07 22:20:11 +01:00
a448cfdd0d
fix: revert bogus dependabot changes
Revert "chore(deps): update module github.com/docker/cli to v23"
This reverts commit 5ee6eb53b2.

Revert "chore(deps): update module github.com/docker/docker to v23"
This reverts commit 7b2880d425.
2023-02-07 22:19:28 +01:00
5ee6eb53b2 chore(deps): update module github.com/docker/cli to v23
Some checks failed
continuous-integration/drone/push Build is failing
2023-02-07 21:16:18 +00:00
7b2880d425 chore(deps): update module github.com/docker/docker to v23
Some checks failed
continuous-integration/drone/push Build is failing
2023-02-07 21:16:06 +00:00
928d6f5d7f chore(deps): update module golang.org/x/sys to v0.5.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-02-07 08:02:03 +00:00
29fa607190
fix: restrict pulling to specific branch
Some checks failed
continuous-integration/drone/push Build is failing
2023-02-02 21:12:50 +01:00
7c541ffdfa
fix: better error handling in EnsureUpToDate 2023-02-02 21:12:24 +01:00
7ccc4b4c08
fix: woops, remove that print statement
Some checks failed
continuous-integration/drone/push Build is failing
2023-02-02 21:00:31 +01:00
ef4df35995
fix: don't check twice (called in EnsureUpToDate)
Some checks failed
continuous-integration/drone/push Build is failing
2023-02-02 20:59:04 +01:00
71a9155042
fix: specify refs when fetching tags
See coop-cloud/organising#397
2023-02-02 20:58:38 +01:00
2a88491d7c
fix: catch errors here too
Some checks failed
continuous-integration/drone/push Build is failing
See #266
2023-02-02 20:26:19 +01:00
bf79552204
fix: improve permission denied message
Some checks failed
continuous-integration/drone/push Build is failing
2023-02-02 20:07:45 +01:00
0a7fa54759
fix: cant pass client here
Some checks failed
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#396
2023-02-02 20:06:49 +01:00
7c1a97be72 refactor!: consolidate SSH handling
Some checks failed
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#389.
Closes coop-cloud/organising#341.
Closes coop-cloud/organising#326.
Closes coop-cloud/organising#380.
Closes coop-cloud/organising#360.
2023-02-02 08:37:14 +00:00
f20fbbc913 chore(deps): update golang docker tag to v1.20
Some checks failed
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-02-02 08:02:02 +00:00
76717531bd resolve PR: include the service info in the log message
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-31 16:15:11 +01:00
6774893412 add env ENABLE_AUTO_UPDATE as label to enable/disable the auto update process
Some checks reported errors
continuous-integration/drone/pr Build was killed
2023-01-31 16:12:02 +01:00
ebb86391af add a label to signal that a deploy is a chaos deploy (!265)
Some checks failed
continuous-integration/drone/push Build is failing
Resolves coop-cloud/organising#390 by adding the following label `coop-cloud.${STACK_NAME}.chaos=true` (according to the version label).
This is required for the auto updater coop-cloud/organising#236 (comment)

Co-authored-by: Moritz <moritz.m@local-it.org>
Reviewed-on: #265
2023-01-31 15:06:35 +00:00
50db39424c add a label to signal that a deploy is connected with a recipe (!264)
Some checks failed
continuous-integration/drone/push Build is failing
Resolves coop-cloud/organising#391 by adding the following label `coop-cloud.${STACK_NAME}.recipe=${RECIPE}` (according to the version label).
This is required for the auto updater coop-cloud/organising#236 (comment)

Co-authored-by: Moritz <moritz.m@local-it.org>
Reviewed-on: #264
2023-01-31 14:35:43 +00:00
ca1ea32c46 Expose all env vars to app container. (!263)
Some checks failed
continuous-integration/drone/push Build is failing
Resolves coop-cloud/organising#393 and is required for the auto updater coop-cloud/organising#236 (comment)

Co-authored-by: Moritz <moritz.m@local-it.org>
Reviewed-on: #263
2023-01-31 14:13:43 +00:00
32851d4d99 fix: always fetch all repository tags
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-01-31 11:52:15 +01:00
c47aa49373
fix: improved missing context message
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-24 10:48:53 +01:00
cdee6b00c4
docs: better auto-completion help
Some checks failed
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#328
2023-01-23 19:01:00 +01:00
a3e9383a4a
docs: wording [ci skip] 2023-01-23 18:48:51 +01:00
b4cce7dcf4
fix: better warning if flying < 3.8 compose spec
Some checks failed
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#350
2023-01-23 18:42:23 +01:00
b089109c94
fix: more robust docker context problem handling
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
See coop-cloud/organising#325
See coop-cloud/organising#340
2023-01-23 14:56:34 +01:00
27e0708ac7
fix: don't delete server dir on cleanup if not empty
Part of coop-cloud/organising#325.
2023-01-23 13:56:27 +01:00
a93786c6be
fix!: make "app rm" more explicit & simpler
We point users to "app volume/secret remove" for more specific deletion
of other app data resources now. The idea is that if you lose the env
file locally, then you can't clean up anything after. So it is handy to
have a sort of WARNING barrier to deleting that file. This flow is the
only way to get Abra to delete your local env file. It now feels more
documented and sufficiently scary in the UI/UX to merit that. Hopefully
addresses the ticket sufficiently.

Closes coop-cloud/organising#335.
2023-01-23 13:29:46 +01:00
521570224b
Merge branch 'filter-servers-by-recipe'
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-23 09:33:31 +01:00
c72462e0b6
fix: no domain checks if no DOMAIN=... configured
Closes coop-cloud/organising#353
2023-01-23 09:33:12 +01:00
54646650c7 fix!: disable traefik linting when DOMAIN isn't present
Some checks failed
continuous-integration/drone/push Build is failing
Also reformats the linting output to be more readable.

Closes coop-cloud/organising#319.
2023-01-23 08:31:00 +00:00
903aac9d7a
feat: recipe fetch command
Some checks reported errors
continuous-integration/drone/pr Build was killed
continuous-integration/drone/push Build was killed
Also may have rooted out another go-git cloning bug 🙄

Closes coop-cloud/organising#365
2023-01-23 09:26:53 +01:00
49865c6a97 feat: app services command
Some checks reported errors
continuous-integration/drone/push Build was killed
Closes coop-cloud/organising#372
2023-01-23 08:25:17 +00:00
a694c8c20e
feat: filter server by recipe
Closes coop-cloud/organising#363
2023-01-23 00:54:22 +01:00
d4a42d8378
fix: error out if no backup configs found
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-01-22 18:50:45 +01:00
e16ca45fa7
fix!: better backup file names
Closes coop-cloud/organising#366
2023-01-22 18:50:27 +01:00
32de2ee5de
fix: ensure catalogue is clean/up-to-date
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#367
2023-01-22 17:52:36 +01:00
834d41ef50
docs: wording [ci skip] 2023-01-22 10:07:58 +01:00
6fe5aed408
fix!: remove digest handling
Some checks failed
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
Closes coop-cloud/organising#379
2023-01-22 08:54:13 +01:00
03041b88d0
chore: gofmt
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-21 23:26:23 +01:00
9338afb492
chore: go mod tidy
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-20 10:16:14 +01:00
313ae0dbe2 chore(deps): update module github.com/docker/cli to v20.10.23
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-20 09:12:52 +00:00
0dc7ec8570 chore(deps): update module github.com/docker/docker to v20.10.23
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-01-20 08:02:52 +00:00
8a1a3aeb12
ci: automerge & run tidy [ci skip] 2023-01-18 17:28:36 +01:00
3wc
910469cfa0 chore: switch to dev image by default
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-15 19:48:07 -08:00
3wc
4f055096e9 chore: fix Drone build, ignore auto-recipes-catalogue-json
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-15 18:16:53 -08:00
3wc
6c93f980dc chore: tweak docker build
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-01-15 18:08:22 -08:00
3wc
57f52bbf33 chore: disable go cache for now, parallelise build
Some checks failed
continuous-integration/drone/push Build is failing
2023-01-15 17:16:32 -08:00
3wc
9f5620d881 chore: attempt to fix drone build
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-01-15 17:11:50 -08:00
3wc
44c4555aae chore: attempt to enable go caching for docker image build
Some checks reported errors
continuous-integration/drone/push Build was killed
2023-01-15 17:10:57 -08:00
3wc
025d1e0a8c chore: tweak drone image building 2023-01-15 17:10:52 -08:00
3wc
f484021148 feat: add docker image, auto-built using CI 2023-01-15 17:10:45 -08:00
3wc
1403eac72c fix: parse "Status" field during catalogue generate 2023-01-15 17:10:45 -08:00
a6e23938eb Add tests to jsontable.
- Test major functionality of jsontable
- Fix bug discovered in testing.
2023-01-15 17:10:36 -08:00
cae0d9ef79 Introduce a JSON output table mechanic
- Create JSONTable as a proxy/extension to tablewriter which can also output JSON.
- Implement machine readable output for `server list` and `recipe list`
2023-01-12 21:15:14 +00:00
89fcb5b216
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-01-06 10:05:20 +01:00
56b3e9bb19 chore(deps): update module github.com/go-git/go-git/v5 to v5.5.2
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-01-06 08:02:14 +00:00
9aa4a98b0b
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-01-05 17:45:33 +01:00
5fbba0c934
Merge remote-tracking branch 'origin/renovate/main-golang.org-x-sys-0.x' 2023-01-05 17:44:53 +01:00
d772f4b2c6
Merge remote-tracking branch 'origin/renovate/main-golang.org-x-crypto-0.x' 2023-01-05 17:44:31 +01:00
7513fbd57d chore(deps): update module golang.org/x/sys to v0.4.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-01-05 08:02:28 +00:00
9082761f86 chore(deps): update module golang.org/x/crypto to v0.5.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2023-01-05 08:02:09 +00:00
a3bd6e14d0 chore(deps): update module github.com/schollz/progressbar/v3 to v3.13.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-01-05 08:01:57 +00:00
49dd702d98
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2023-01-04 09:36:35 +01:00
e4cd5e3efe
Merge remote-tracking branch 'origin/renovate/main-github.com-hetznercloud-hcloud-go-1.x' 2023-01-04 09:36:13 +01:00
1db4602020 chore(deps): update module github.com/hashicorp/go-retryablehttp to v0.7.2
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2023-01-04 08:01:58 +00:00
b50718050b chore(deps): update module github.com/hetznercloud/hcloud-go to v1.39.0
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/push Build is failing
continuous-integration/drone/pr Build is failing
2022-12-30 08:01:53 +00:00
3wc
9e39e1dc88 docs: fix typo in error message
All checks were successful
continuous-integration/drone/push Build is passing
2022-12-22 19:27:42 -08:00
1a3a53cfc2
chore: go mod tidy
All checks were successful
continuous-integration/drone/push Build is passing
2022-12-19 09:06:28 +01:00
5f53d591f8
chore(deps): update module github.com/docker/docker to v20.10.22 2022-12-19 09:06:27 +01:00
d7013518cc chore(deps): update module github.com/docker/cli to v20.10.22
Some checks failed
renovate/artifacts Artifact file update failure
continuous-integration/drone/pr Build is failing
continuous-integration/drone/push Build is failing
2022-12-19 08:01:53 +00:00
3wc
b204b289d1 fix: disable progress bar with machine-readable output
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
2022-12-16 10:20:51 -08:00
3778 changed files with 989016 additions and 8327 deletions

8
.dockerignore Normal file
View File

@ -0,0 +1,8 @@
*.swo
*.swp
.dockerignore
Dockerfile
abra
dist
kadabra
tags

View File

@ -3,35 +3,20 @@ kind: pipeline
name: coopcloud.tech/abra
steps:
- name: make check
image: golang:1.19
image: golang:1.21
commands:
- make check
- name: make build
image: golang:1.19
commands:
- make build
- name: make test
image: golang:1.19
image: golang:1.21
environment:
CATL_URL: https://git.coopcloud.tech/coop-cloud/recipes-catalogue-json.git
commands:
- mkdir -p $HOME/.abra
- git clone $CATL_URL $HOME/.abra/catalogue
- make test
- name: notify on failure
image: plugins/matrix
settings:
homeserver: https://matrix.autonomic.zone
roomid: "IFazIpLtxiScqbHqoa:autonomic.zone"
userid: "@autono-bot:autonomic.zone"
accesstoken:
from_secret: autono_bot_access_token
depends_on:
- make check
- make build
- make test
when:
status:
- failure
- name: fetch
image: docker:git
@ -39,13 +24,12 @@ steps:
- git fetch --tags
depends_on:
- make check
- make build
- make test
when:
event: tag
- name: release
image: golang:1.19
image: goreleaser/goreleaser:v1.24.0
environment:
GITEA_TOKEN:
from_secret: goreleaser_gitea_token
@ -53,12 +37,53 @@ steps:
- name: deps
path: /go
commands:
- curl -sL https://git.io/goreleaser | bash
- goreleaser release
depends_on:
- fetch
when:
event: tag
- name: publish image
image: plugins/docker
settings:
auto_tag: true
username: 3wordchant
password:
from_secret: git_coopcloud_tech_token_3wc
repo: git.coopcloud.tech/coop-cloud/abra
tags: dev
registry: git.coopcloud.tech
when:
branch:
- main
depends_on:
- make check
- make test
- name: integration test
image: appleboy/drone-ssh
settings:
host:
- int.coopcloud.tech
username: abra
key:
from_secret: abra_int_private_key
port: 22
command_timeout: 60m
script_stop: true
request_pty: true
script:
- |
wget https://git.coopcloud.tech/coop-cloud/abra/raw/branch/main/scripts/tests/run-ci-int -O run-ci-int
chmod +x run-ci-int
sh run-ci-int
when:
event:
- cron:
cron:
# @daily https://docs.drone.io/cron/
- integration
volumes:
- name: deps
temp: {}

View File

@ -1,4 +0,0 @@
GANDI_TOKEN=...
HCLOUD_TOKEN=...
REGISTRY_PASSWORD=...
REGISTRY_USERNAME=...

View File

@ -1,6 +1,7 @@
go env -w GOPRIVATE=coopcloud.tech
# integration test suite
# export ABRA_DIR="$HOME/.abra_test"
# export ABRA_TEST_DOMAIN=test.example.com
# export ABRA_CI=1
# export PASSWORD_STORE_DIR=$(pwd)/../../autonomic/passwords/passwords/
# export HCLOUD_TOKEN=$(pass show logins/hetzner/cicd/api_key)
# export CAPSUL_TOKEN=...
# export GITEA_TOKEN=...
# release automation
# export GITEA_TOKEN=

4
.gitignore vendored
View File

@ -2,7 +2,7 @@
.e2e.env
.envrc
.vscode/
/kadabra
abra
dist/
tests/integration/.abra/catalogue
vendor/
tests/integration/.bats

View File

@ -1,16 +1,19 @@
---
project_name: abra
gitea_urls:
api: https://git.coopcloud.tech/api/v1
download: https://git.coopcloud.tech/
skip_tls_verify: false
before:
hooks:
- go mod tidy
builds:
- env:
- CGO_ENABLED=0
- id: abra
binary: abra
dir: cmd/abra
env:
- CGO_ENABLED=0
goos:
- linux
- darwin
@ -26,15 +29,40 @@ builds:
ldflags:
- "-X 'main.Commit={{ .Commit }}'"
- "-X 'main.Version={{ .Version }}'"
archives:
- replacements:
386: i386
amd64: x86_64
format: binary
- "-s"
- "-w"
- id: kadabra
binary: kadabra
dir: cmd/kadabra
env:
- CGO_ENABLED=0
goos:
- linux
- darwin
goarch:
- 386
- amd64
- arm
- arm64
goarm:
- 5
- 6
- 7
gcflags:
- "all=-l -B"
ldflags:
- "-X 'main.Commit={{ .Commit }}'"
- "-X 'main.Version={{ .Version }}'"
- "-s"
- "-w"
checksum:
name_template: "checksums.txt"
snapshot:
name_template: "{{ incpatch .Version }}-next"
changelog:
sort: desc
filters:

View File

@ -1,13 +1,19 @@
# authors
> If you're looking at this and you hack on `abra` and you're not listed here,
> please do add yourself! This is a community project, let's show some :heart:
> please do add yourself! This is a community project, let's show some 💞
- 3wordchant
- cassowary
- codegod100
- decentral1se
- fauno
- frando
- kawaiipunk
- knoflook
- moritz
- p4u1
- rix
- roxxers
- vera
- yksflip

30
Dockerfile Normal file
View File

@ -0,0 +1,30 @@
# Build image
FROM golang:1.21-alpine AS build
ENV GOPRIVATE coopcloud.tech
RUN apk add --no-cache \
gcc \
git \
make \
musl-dev
COPY . /app
WORKDIR /app
RUN CGO_ENABLED=0 make build
# Release image ("slim")
FROM alpine:3.19.1
RUN apk add --no-cache \
ca-certificates \
git \
openssh
RUN update-ca-certificates
COPY --from=build /app/abra /abra
ENTRYPOINT ["/abra"]

View File

@ -1,5 +1,5 @@
Abra: The Co-op Cloud utility belt
Copyright (C) 2022 Co-op Cloud <helo@coopcloud.tech>
Copyright (C) 2022 Co-op Cloud <helo@coopcloud.tech>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@ -1,32 +1,55 @@
ABRA := ./cmd/abra
COMMIT := $(shell git rev-list -1 HEAD)
GOPATH := $(shell go env GOPATH)
LDFLAGS := "-X 'main.Commit=$(COMMIT)'"
ABRA := ./cmd/abra
KADABRA := ./cmd/kadabra
COMMIT := $(shell git rev-list -1 HEAD)
GOPATH := $(shell go env GOPATH)
GOVERSION := 1.21
LDFLAGS := "-X 'main.Commit=$(COMMIT)'"
DIST_LDFLAGS := $(LDFLAGS)" -s -w"
GCFLAGS := "all=-l -B"
export GOPRIVATE=coopcloud.tech
all: format check build test
# NOTE(d1): default `make` optimised for Abra hacking
all: format check build-abra test
run:
@go run -ldflags=$(LDFLAGS) $(ABRA)
run-abra:
@go run -gcflags=$(GCFLAGS) -ldflags=$(LDFLAGS) $(ABRA)
install:
@go install -ldflags=$(LDFLAGS) $(ABRA)
run-kadabra:
@go run -gcflags=$(GCFLAGS) -ldflags=$(LDFLAGS) $(KADABRA)
build-dev:
@go build -ldflags=$(LDFLAGS) $(ABRA)
install-abra:
@go install -gcflags=$(GCFLAGS) -ldflags=$(LDFLAGS) $(ABRA)
build:
@go build -ldflags=$(DIST_LDFLAGS) $(ABRA)
install-kadabra:
@go install -gcflags=$(GCFLAGS) -ldflags=$(LDFLAGS) $(KADABRA)
install: install-abra install-kadabra
build-abra:
@go build -v -gcflags=$(GCFLAGS) -ldflags=$(DIST_LDFLAGS) $(ABRA)
build-kadabra:
@go build -v -gcflags=$(GCFLAGS) -ldflags=$(DIST_LDFLAGS) $(KADABRA)
build: build-abra build-kadabra
build-docker-abra:
@docker run -it -v $(PWD):/abra golang:$(GOVERSION) \
bash -c 'cd /abra; ./scripts/docker/build.sh'
build-docker: build-docker-abra
clean:
@rm '$(GOPATH)/bin/abra'
@rm '$(GOPATH)/bin/kadabra'
format:
@gofmt -s -w .
@gofmt -s -w $$(find . -type f -name '*.go' | grep -v "/vendor/")
check:
@test -z $$(gofmt -l .) || (echo "gofmt: formatting issue - run 'make format' to resolve" && exit 1)
@test -z $$(gofmt -l $$(find . -type f -name '*.go' | grep -v "/vendor/")) || \
(echo "gofmt: formatting issue - run 'make format' to resolve" && exit 1)
test:
@go test ./... -cover -v
@ -34,9 +57,5 @@ test:
loc:
@find . -name "*.go" | xargs wc -l
loc-author:
@git ls-files -z | \
xargs -0rn 1 -P "$$(nproc)" -I{} sh -c 'git blame -w -M -C -C --line-porcelain -- {} | grep -I --line-buffered "^author "' | \
sort -f | \
uniq -ic | \
sort -n
deps:
@go get -t -u ./...

View File

@ -2,11 +2,12 @@
[![Build Status](https://build.coopcloud.tech/api/badges/coop-cloud/abra/status.svg?ref=refs/heads/main)](https://build.coopcloud.tech/coop-cloud/abra)
[![Go Report Card](https://goreportcard.com/badge/git.coopcloud.tech/coop-cloud/abra)](https://goreportcard.com/report/git.coopcloud.tech/coop-cloud/abra)
[![Go Reference](https://pkg.go.dev/badge/coopcloud.tech/abra.svg)](https://pkg.go.dev/coopcloud.tech/abra)
The Co-op Cloud utility belt 🎩🐇
<a href="https://github.com/egonelbre/gophers"><img align="right" width="150" src="https://github.com/egonelbre/gophers/raw/master/.thumb/sketch/adventure/poking-fire.png"/></a>
`abra` is our flagship client & command-line tool which has been developed specifically in the context of the Co-op Cloud project for the purpose of making the day-to-day operations of [operators](https://docs.coopcloud.tech/operators/) and [maintainers](https://docs.coopcloud.tech/maintainers/) pleasant & convenient. It is libre software, written in [Go](https://go.dev) and maintained and extended by the community :heart:
`abra` is the flagship client & command-line tool for Co-op Cloud. It has been developed specifically for the purpose of making the day-to-day operations of [operators](https://docs.coopcloud.tech/operators/) and [maintainers](https://docs.coopcloud.tech/maintainers/) pleasant & convenient. It is libre software, written in [Go](https://go.dev) and maintained and extended by the community 💖
Please see [docs.coopcloud.tech/abra](https://docs.coopcloud.tech/abra) for help on install, upgrade, hacking, troubleshooting & more!

View File

@ -1,36 +1,34 @@
package app
import (
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
var AppCommand = cli.Command{
Name: "app",
Aliases: []string{"a"},
Usage: "Manage apps",
ArgsUsage: "<domain>",
Description: "Functionality for managing the life cycle of your apps",
Subcommands: []cli.Command{
appNewCommand,
appConfigCommand,
appRestartCommand,
appDeployCommand,
appUpgradeCommand,
appUndeployCommand,
appRemoveCommand,
appCheckCommand,
appListCommand,
appPsCommand,
appLogsCommand,
appCpCommand,
appRunCommand,
appRollbackCommand,
appSecretCommand,
appVolumeCommand,
appVersionCommand,
appErrorsCommand,
appCmdCommand,
appBackupCommand,
appRestoreCommand,
Name: "app",
Aliases: []string{"a"},
Usage: "Manage apps",
UsageText: "abra app [command] [arguments] [options]",
Commands: []*cli.Command{
&appBackupCommand,
&appCheckCommand,
&appCmdCommand,
&appConfigCommand,
&appCpCommand,
&appDeployCommand,
&appListCommand,
&appLogsCommand,
&appNewCommand,
&appPsCommand,
&appRemoveCommand,
&appRestartCommand,
&appRestoreCommand,
&appRollbackCommand,
&appRunCommand,
&appSecretCommand,
&appServicesCommand,
&appUndeployCommand,
&appUpgradeCommand,
&appVolumeCommand,
},
}

View File

@ -1,389 +1,283 @@
package app
import (
"archive/tar"
"context"
"fmt"
"io"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/system"
"github.com/klauspost/pgzip"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"coopcloud.tech/abra/pkg/log"
"github.com/urfave/cli/v3"
)
type backupConfig struct {
preHookCmd string
postHookCmd string
backupPaths []string
var snapshot string
var snapshotFlag = &cli.StringFlag{
Name: "snapshot",
Aliases: []string{"s"},
Usage: "Lists specific snapshot",
Destination: &snapshot,
}
var appBackupCommand = cli.Command{
Name: "backup",
Aliases: []string{"bk"},
Usage: "Run app backup",
ArgsUsage: "<domain> [<service>]",
var includePath string
var includePathFlag = &cli.StringFlag{
Name: "path",
Aliases: []string{"p"},
Usage: "Include path",
Destination: &includePath,
}
var resticRepo string
var resticRepoFlag = &cli.StringFlag{
Name: "repo",
Aliases: []string{"r"},
Usage: "Restic repository",
Destination: &resticRepo,
}
var appBackupListCommand = cli.Command{
Name: "list",
Aliases: []string{"ls"},
Flags: []cli.Flag{
internal.DebugFlag,
snapshotFlag,
includePathFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Description: `
Run an app backup.
Before: internal.SubCommandBefore,
Usage: "List all backups",
UsageText: "abra app backup list <domain> [options]",
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
A backup command and pre/post hook commands are defined in the recipe
configuration. Abra reads this configuration and run the comands in the context
of the deployed services. Pass <service> if you only want to back up a single
service. All backups are placed in the ~/.abra/backups directory.
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
A single backup file is produced for all backup paths specified for a service.
If we have the following backup configuration:
- "backupbot.backup.path=/var/lib/foo,/var/lib/bar"
And we run "abra app backup example.com app", Abra will produce a file that
looks like:
~/.abra/backups/example_com_app_609341138.tar.gz
This file is a compressed archive which contains all backup paths. To see paths, run:
tar -tf ~/.abra/backups/example_com_app_609341138.tar.gz
(Make sure to change the name of the backup file)
This single file can be used to restore your app. See "abra app restore" for more.
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
recipe, err := recipe.Get(app.Recipe)
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
backupConfigs := make(map[string]backupConfig)
for _, service := range recipe.Config.Services {
if backupsEnabled, ok := service.Deploy.Labels["backupbot.backup"]; ok {
if backupsEnabled == "true" {
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), service.Name)
bkConfig := backupConfig{}
logrus.Debugf("backup config detected for %s", fullServiceName)
if paths, ok := service.Deploy.Labels["backupbot.backup.path"]; ok {
logrus.Debugf("detected backup paths for %s: %s", fullServiceName, paths)
bkConfig.backupPaths = strings.Split(paths, ",")
}
if preHookCmd, ok := service.Deploy.Labels["backupbot.backup.pre-hook"]; ok {
logrus.Debugf("detected pre-hook command for %s: %s", fullServiceName, preHookCmd)
bkConfig.preHookCmd = preHookCmd
}
if postHookCmd, ok := service.Deploy.Labels["backupbot.backup.post-hook"]; ok {
logrus.Debugf("detected post-hook command for %s: %s", fullServiceName, postHookCmd)
bkConfig.postHookCmd = postHookCmd
}
backupConfigs[service.Name] = bkConfig
}
}
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
log.Fatal(err)
}
serviceName := c.Args().Get(1)
if serviceName != "" {
backupConfig, ok := backupConfigs[serviceName]
if !ok {
logrus.Fatalf("no backup config for %s? does %s exist?", serviceName, serviceName)
}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if snapshot != "" {
log.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if includePath != "" {
log.Debugf("including INCLUDE_PATH=%s in backupbot exec invocation", includePath)
execEnv = append(execEnv, fmt.Sprintf("INCLUDE_PATH=%s", includePath))
}
logrus.Infof("running backup for the %s service", serviceName)
if err := runBackup(app, serviceName, backupConfig); err != nil {
logrus.Fatal(err)
}
} else {
for serviceName, backupConfig := range backupConfigs {
logrus.Infof("running backup for the %s service", serviceName)
if err := runBackup(app, serviceName, backupConfig); err != nil {
logrus.Fatal(err)
}
}
if err := internal.RunBackupCmdRemote(cl, "ls", targetContainer.ID, execEnv); err != nil {
log.Fatal(err)
}
return nil
},
}
// runBackup does the actual backup logic.
func runBackup(app config.App, serviceName string, bkConfig backupConfig) error {
if len(bkConfig.backupPaths) == 0 {
return fmt.Errorf("backup paths are empty for %s?", serviceName)
}
var appBackupDownloadCommand = cli.Command{
Name: "download",
Aliases: []string{"d"},
Flags: []cli.Flag{
snapshotFlag,
includePathFlag,
},
Before: internal.SubCommandBefore,
Usage: "Download a backup",
UsageText: "abra app backup download <domain> [options]",
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
cl, err := client.New(app.Server)
if err != nil {
return err
}
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), serviceName))
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, true)
if err != nil {
return err
}
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), serviceName)
if bkConfig.preHookCmd != "" {
splitCmd := internal.SafeSplit(bkConfig.preHookCmd)
logrus.Debugf("split pre-hook command for %s into %s", fullServiceName, splitCmd)
preHookExecOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: splitCmd,
Detach: false,
Tty: true,
if err := app.Recipe.EnsureExists(); err != nil {
log.Fatal(err)
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &preHookExecOpts); err != nil {
return fmt.Errorf("failed to run %s on %s: %s", bkConfig.preHookCmd, targetContainer.ID, err.Error())
if !internal.Chaos {
if err := app.Recipe.EnsureIsClean(); err != nil {
log.Fatal(err)
}
if !internal.Offline {
if err := app.Recipe.EnsureUpToDate(); err != nil {
log.Fatal(err)
}
}
if err := app.Recipe.EnsureLatest(); err != nil {
log.Fatal(err)
}
}
logrus.Infof("succesfully ran %s pre-hook command: %s", fullServiceName, bkConfig.preHookCmd)
}
var tempBackupPaths []string
for _, remoteBackupPath := range bkConfig.backupPaths {
timestamp := strconv.Itoa(time.Now().Nanosecond())
sanitisedPath := strings.ReplaceAll(remoteBackupPath, "/", "_")
localBackupPath := filepath.Join(config.BACKUP_DIR, fmt.Sprintf("%s%s_%s.tar.gz", fullServiceName, sanitisedPath, timestamp))
logrus.Debugf("temporarily backing up %s:%s to %s", fullServiceName, remoteBackupPath, localBackupPath)
logrus.Infof("backing up %s:%s", fullServiceName, remoteBackupPath)
content, _, err := cl.CopyFromContainer(context.Background(), targetContainer.ID, remoteBackupPath)
cl, err := client.New(app.Server)
if err != nil {
logrus.Debugf("failed to copy %s from container: %s", remoteBackupPath, err.Error())
if err := cleanupTempArchives(tempBackupPaths); err != nil {
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
log.Fatal(err)
}
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
log.Fatal(err)
}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if snapshot != "" {
log.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if includePath != "" {
log.Debugf("including INCLUDE_PATH=%s in backupbot exec invocation", includePath)
execEnv = append(execEnv, fmt.Sprintf("INCLUDE_PATH=%s", includePath))
}
if err := internal.RunBackupCmdRemote(cl, "download", targetContainer.ID, execEnv); err != nil {
log.Fatal(err)
}
remoteBackupDir := "/tmp/backup.tar.gz"
currentWorkingDir := "."
if err = CopyFromContainer(cl, targetContainer.ID, remoteBackupDir, currentWorkingDir); err != nil {
log.Fatal(err)
}
fmt.Println("backup successfully downloaded to current working directory")
return nil
},
}
var appBackupCreateCommand = cli.Command{
Name: "create",
Aliases: []string{"c"},
Flags: []cli.Flag{
resticRepoFlag,
},
Before: internal.SubCommandBefore,
Usage: "Create a new backup",
UsageText: "abra app backup create <domain> [options]",
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
if err := app.Recipe.EnsureExists(); err != nil {
log.Fatal(err)
}
if !internal.Chaos {
if err := app.Recipe.EnsureIsClean(); err != nil {
log.Fatal(err)
}
return fmt.Errorf("failed to copy %s from container: %s", remoteBackupPath, err.Error())
}
defer content.Close()
_, srcBase := archive.SplitPathDirEntry(remoteBackupPath)
preArchive := archive.RebaseArchiveEntries(content, srcBase, remoteBackupPath)
if err := copyToFile(localBackupPath, preArchive); err != nil {
logrus.Debugf("failed to create tar archive (%s): %s", localBackupPath, err.Error())
if err := cleanupTempArchives(tempBackupPaths); err != nil {
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
if !internal.Offline {
if err := app.Recipe.EnsureUpToDate(); err != nil {
log.Fatal(err)
}
}
return fmt.Errorf("failed to create tar archive (%s): %s", localBackupPath, err.Error())
}
tempBackupPaths = append(tempBackupPaths, localBackupPath)
}
logrus.Infof("compressing and merging archives...")
if err := mergeArchives(tempBackupPaths, fullServiceName); err != nil {
logrus.Debugf("failed to merge archive files: %s", err.Error())
if err := cleanupTempArchives(tempBackupPaths); err != nil {
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
}
return fmt.Errorf("failed to merge archive files: %s", err.Error())
}
if err := cleanupTempArchives(tempBackupPaths); err != nil {
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
}
if bkConfig.postHookCmd != "" {
splitCmd := internal.SafeSplit(bkConfig.postHookCmd)
logrus.Debugf("split post-hook command for %s into %s", fullServiceName, splitCmd)
postHookExecOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: splitCmd,
Detach: false,
Tty: true,
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &postHookExecOpts); err != nil {
return err
}
logrus.Infof("succesfully ran %s post-hook command: %s", fullServiceName, bkConfig.postHookCmd)
}
return nil
}
func copyToFile(outfile string, r io.Reader) error {
tmpFile, err := system.TempFileSequential(filepath.Dir(outfile), ".tar_temp")
if err != nil {
return err
}
tmpPath := tmpFile.Name()
_, err = io.Copy(tmpFile, r)
tmpFile.Close()
if err != nil {
os.Remove(tmpPath)
return err
}
if err = os.Rename(tmpPath, outfile); err != nil {
os.Remove(tmpPath)
return err
}
return nil
}
func cleanupTempArchives(tarPaths []string) error {
for _, tarPath := range tarPaths {
if err := os.RemoveAll(tarPath); err != nil {
return err
}
logrus.Debugf("remove temporary archive file %s", tarPath)
}
return nil
}
func mergeArchives(tarPaths []string, serviceName string) error {
var out io.Writer
var cout *pgzip.Writer
timestamp := strconv.Itoa(time.Now().Nanosecond())
localBackupPath := filepath.Join(config.BACKUP_DIR, fmt.Sprintf("%s_%s.tar.gz", serviceName, timestamp))
fout, err := os.Create(localBackupPath)
if err != nil {
return fmt.Errorf("Failed to open %s: %s", localBackupPath, err)
}
defer fout.Close()
out = fout
cout = pgzip.NewWriter(out)
out = cout
tw := tar.NewWriter(out)
for _, tarPath := range tarPaths {
if err := addTar(tw, tarPath); err != nil {
return fmt.Errorf("failed to merge %s: %v", tarPath, err)
}
}
if err := tw.Close(); err != nil {
return fmt.Errorf("failed to close tar writer %v", err)
}
if cout != nil {
if err := cout.Flush(); err != nil {
return fmt.Errorf("failed to flush: %s", err)
} else if err = cout.Close(); err != nil {
return fmt.Errorf("failed to close compressed writer: %s", err)
}
}
logrus.Infof("backed up %s to %s", serviceName, localBackupPath)
return nil
}
func addTar(tw *tar.Writer, pth string) (err error) {
var tr *tar.Reader
var rc io.ReadCloser
var hdr *tar.Header
if tr, rc, err = openTarFile(pth); err != nil {
return
}
for {
if hdr, err = tr.Next(); err != nil {
if err == io.EOF {
err = nil
if err := app.Recipe.EnsureLatest(); err != nil {
log.Fatal(err)
}
break
}
if err = tw.WriteHeader(hdr); err != nil {
break
} else if _, err = io.Copy(tw, tr); err != nil {
break
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
}
if err == nil {
err = rc.Close()
} else {
rc.Close()
}
return
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
log.Fatal(err)
}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if resticRepo != "" {
log.Debugf("including RESTIC_REPO=%s in backupbot exec invocation", resticRepo)
execEnv = append(execEnv, fmt.Sprintf("RESTIC_REPO=%s", resticRepo))
}
if err := internal.RunBackupCmdRemote(cl, "create", targetContainer.ID, execEnv); err != nil {
log.Fatal(err)
}
return nil
},
}
func openTarFile(pth string) (tr *tar.Reader, rc io.ReadCloser, err error) {
var fin *os.File
var n int
buff := make([]byte, 1024)
var appBackupSnapshotsCommand = cli.Command{
Name: "snapshots",
Aliases: []string{"s"},
Flags: []cli.Flag{
snapshotFlag,
},
Before: internal.SubCommandBefore,
Usage: "List backup snapshots",
UsageText: "abra app backup snapshots <domain> [options]",
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
if fin, err = os.Open(pth); err != nil {
return
}
if err := app.Recipe.EnsureExists(); err != nil {
log.Fatal(err)
}
if n, err = fin.Read(buff); err != nil {
fin.Close()
return
} else if n == 0 {
fin.Close()
err = fmt.Errorf("%s is empty", pth)
return
}
if !internal.Chaos {
if err := app.Recipe.EnsureIsClean(); err != nil {
log.Fatal(err)
}
if _, err = fin.Seek(0, 0); err != nil {
fin.Close()
return
}
if !internal.Offline {
if err := app.Recipe.EnsureUpToDate(); err != nil {
log.Fatal(err)
}
}
rc = fin
tr = tar.NewReader(rc)
if err := app.Recipe.EnsureLatest(); err != nil {
log.Fatal(err)
}
}
return tr, rc, nil
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
log.Fatal(err)
}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if snapshot != "" {
log.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if err := internal.RunBackupCmdRemote(cl, "snapshots", targetContainer.ID, execEnv); err != nil {
log.Fatal(err)
}
return nil
},
}
var appBackupCommand = cli.Command{
Name: "backup",
Aliases: []string{"b"},
Usage: "Manage app backups",
UsageText: "abra app backup [command] [arguments] [options]",
Commands: []*cli.Command{
&appBackupListCommand,
&appBackupSnapshotsCommand,
&appBackupDownloadCommand,
&appBackupCreateCommand,
},
}

View File

@ -1,57 +1,80 @@
package app
import (
"os"
"path"
"strings"
"context"
"fmt"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"github.com/charmbracelet/lipgloss"
"github.com/urfave/cli/v3"
)
var appCheckCommand = cli.Command{
Name: "check",
Aliases: []string{"chk"},
Usage: "Check if app is configured correctly",
ArgsUsage: "<domain>",
UsageText: "abra app check <domain> [options]",
Usage: "Ensure an app is well configured",
Description: `Compare env vars in both the app ".env" and recipe ".env.sample" file.
The goal is to ensure that recipe ".env.sample" env vars are defined in your
app ".env" file. Only env var definitions in the ".env.sample" which are
uncommented, e.g. "FOO=bar" are checked. If an app ".env" file does not include
these env vars, then "check" will complain.
Recipe maintainers may or may not provide defaults for env vars within their
recipes regardless of commenting or not (e.g. through the use of
${FOO:<default>} syntax). "check" does not confirm or deny this for you.`,
Flags: []cli.Flag{
internal.DebugFlag,
internal.ChaosFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
envSamplePath := path.Join(config.RECIPES_DIR, app.Recipe, ".env.sample")
if _, err := os.Stat(envSamplePath); err != nil {
if os.IsNotExist(err) {
logrus.Fatalf("%s does not exist?", envSamplePath)
}
logrus.Fatal(err)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
envSample, err := config.ReadEnv(envSamplePath)
table, err := formatter.CreateTable()
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
var missing []string
for k := range envSample {
if _, ok := app.Env[k]; !ok {
missing = append(missing, k)
table.
Headers("RECIPE ENV SAMPLE", "APP ENV").
StyleFunc(func(row, col int) lipgloss.Style {
switch {
case col == 1:
return lipgloss.NewStyle().Padding(0, 1, 0, 1).Align(lipgloss.Center)
default:
return lipgloss.NewStyle().Padding(0, 1, 0, 1)
}
})
envVars, err := appPkg.CheckEnv(app)
if err != nil {
log.Fatal(err)
}
for _, envVar := range envVars {
if envVar.Present {
val := []string{envVar.Name, "✅"}
table.Row(val...)
} else {
val := []string{envVar.Name, "❌"}
table.Row(val...)
}
}
if len(missing) > 0 {
missingEnvVars := strings.Join(missing, ", ")
logrus.Fatalf("%s is missing %s", app.Path, missingEnvVars)
}
logrus.Infof("all necessary environment variables defined for %s", app.Name)
fmt.Println(table)
return nil
},
BashComplete: autocomplete.AppNameComplete,
}

View File

@ -4,110 +4,122 @@ import (
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"sort"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/app"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/pkg/archive"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"coopcloud.tech/abra/pkg/log"
"github.com/urfave/cli/v3"
)
var appCmdCommand = cli.Command{
Name: "command",
Aliases: []string{"cmd"},
Usage: "Run app commands",
Description: `
Run an app specific command.
Name: "command",
Aliases: []string{"cmd"},
Usage: "Run app commands",
UsageText: "abra app cmd <domain> [<service>] <cmd> [<cmd-args>] [options]",
Description: `Run an app specific command.
These commands are bash functions, defined in the abra.sh of the recipe itself.
They can be run within the context of a service (e.g. app) or locally on your
work station by passing "--local". Arguments can be passed into these functions
using the "-- <args>" syntax.
Example:
abra app cmd example.com app create_user -- me@example.com
`,
ArgsUsage: "<domain> [<service>] <command> [-- <args>]",
work station by passing "--local".`,
Flags: []cli.Flag{
internal.DebugFlag,
internal.LocalCmdFlag,
internal.RemoteUserFlag,
internal.TtyFlag,
internal.ChaosFlag,
},
BashComplete: autocomplete.AppNameComplete,
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
Before: internal.SubCommandBefore,
Commands: []*cli.Command{
&appCmdListCommand,
},
ShellComplete: func(ctx context.Context, cmd *cli.Command) {
args := cmd.Args()
switch args.Len() {
case 0:
autocomplete.AppNameComplete(ctx, cmd)
case 1:
autocomplete.ServiceNameComplete(args.Get(0))
case 2:
cmdNameComplete(args.Get(0))
}
},
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
if internal.LocalCmd && internal.RemoteUser != "" {
internal.ShowSubcommandHelpAndError(c, errors.New("cannot use --local & --user together"))
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
hasCmdArgs, parsedCmdArgs := parseCmdArgs(c.Args(), internal.LocalCmd)
if internal.LocalCmd && internal.RemoteUser != "" {
internal.ShowSubcommandHelpAndError(cmd, errors.New("cannot use --local & --user together"))
}
abraSh := path.Join(config.RECIPES_DIR, app.Recipe, "abra.sh")
if _, err := os.Stat(abraSh); err != nil {
hasCmdArgs, parsedCmdArgs := parseCmdArgs(cmd.Args().Slice(), internal.LocalCmd)
if _, err := os.Stat(app.Recipe.AbraShPath); err != nil {
if os.IsNotExist(err) {
logrus.Fatalf("%s does not exist for %s?", abraSh, app.Name)
log.Fatalf("%s does not exist for %s?", app.Recipe.AbraShPath, app.Name)
}
logrus.Fatal(err)
log.Fatal(err)
}
if internal.LocalCmd {
cmdName := c.Args().Get(1)
if err := ensureCommand(abraSh, app.Recipe, cmdName); err != nil {
logrus.Fatal(err)
if !(cmd.Args().Len() >= 2) {
internal.ShowSubcommandHelpAndError(cmd, errors.New("missing arguments"))
}
logrus.Debugf("--local detected, running %s on local work station", cmdName)
cmdName := cmd.Args().Get(1)
if err := internal.EnsureCommand(app.Recipe.AbraShPath, app.Recipe.Name, cmdName); err != nil {
log.Fatal(err)
}
log.Debugf("--local detected, running %s on local work station", cmdName)
var exportEnv string
for k, v := range app.Env {
exportEnv = exportEnv + fmt.Sprintf("%s='%s'; ", k, v)
}
var sourceAndExec string
if hasCmdArgs {
logrus.Debugf("parsed following command arguments: %s", parsedCmdArgs)
sourceAndExec = fmt.Sprintf("TARGET=local; APP_NAME=%s; STACK_NAME=%s; %s . %s; %s %s", app.Name, app.StackName(), exportEnv, abraSh, cmdName, parsedCmdArgs)
log.Debugf("parsed following command arguments: %s", parsedCmdArgs)
sourceAndExec = fmt.Sprintf("TARGET=local; APP_NAME=%s; STACK_NAME=%s; %s . %s; %s %s", app.Name, app.StackName(), exportEnv, app.Recipe.AbraShPath, cmdName, parsedCmdArgs)
} else {
logrus.Debug("did not detect any command arguments")
sourceAndExec = fmt.Sprintf("TARGET=local; APP_NAME=%s; STACK_NAME=%s; %s . %s; %s", app.Name, app.StackName(), exportEnv, abraSh, cmdName)
log.Debug("did not detect any command arguments")
sourceAndExec = fmt.Sprintf("TARGET=local; APP_NAME=%s; STACK_NAME=%s; %s . %s; %s", app.Name, app.StackName(), exportEnv, app.Recipe.AbraShPath, cmdName)
}
shell := "/bin/bash"
if _, err := os.Stat(shell); errors.Is(err, os.ErrNotExist) {
logrus.Debugf("%s does not exist locally, use /bin/sh as fallback", shell)
log.Debugf("%s does not exist locally, use /bin/sh as fallback", shell)
shell = "/bin/sh"
}
cmd := exec.Command(shell, "-c", sourceAndExec)
if err := internal.RunCmd(cmd); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
} else {
targetServiceName := c.Args().Get(1)
cmdName := c.Args().Get(2)
if err := ensureCommand(abraSh, app.Recipe, cmdName); err != nil {
logrus.Fatal(err)
if !(cmd.Args().Len() >= 3) {
internal.ShowSubcommandHelpAndError(cmd, errors.New("missing arguments"))
}
serviceNames, err := config.GetAppServiceNames(app.Name)
targetServiceName := cmd.Args().Get(1)
cmdName := cmd.Args().Get(2)
if err := internal.EnsureCommand(app.Recipe.AbraShPath, app.Recipe.Name, cmdName); err != nil {
log.Fatal(err)
}
serviceNames, err := appPkg.GetAppServiceNames(app.Name)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
matchingServiceName := false
@ -118,19 +130,24 @@ Example:
}
if !matchingServiceName {
logrus.Fatalf("no service %s for %s?", targetServiceName, app.Name)
log.Fatalf("no service %s for %s?", targetServiceName, app.Name)
}
logrus.Debugf("running command %s within the context of %s_%s", cmdName, app.StackName(), targetServiceName)
log.Debugf("running command %s within the context of %s_%s", cmdName, app.StackName(), targetServiceName)
if hasCmdArgs {
logrus.Debugf("parsed following command arguments: %s", parsedCmdArgs)
log.Debugf("parsed following command arguments: %s", parsedCmdArgs)
} else {
logrus.Debug("did not detect any command arguments")
log.Debug("did not detect any command arguments")
}
if err := runCmdRemote(app, abraSh, targetServiceName, cmdName, parsedCmdArgs); err != nil {
logrus.Fatal(err)
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
if err := internal.RunCmdRemote(cl, app, app.Recipe.AbraShPath, targetServiceName, cmdName, parsedCmdArgs); err != nil {
log.Fatal(err)
}
}
@ -157,88 +174,73 @@ func parseCmdArgs(args []string, isLocal bool) (bool, string) {
return hasCmdArgs, parsedCmdArgs
}
func ensureCommand(abraSh, recipeName, execCmd string) error {
bytes, err := ioutil.ReadFile(abraSh)
func cmdNameComplete(appName string) {
app, err := app.Get(appName)
if err != nil {
return err
return
}
if !strings.Contains(string(bytes), execCmd) {
return fmt.Errorf("%s doesn't have a %s function", recipeName, execCmd)
cmdNames, _ := getShCmdNames(app)
if err != nil {
return
}
for _, n := range cmdNames {
fmt.Println(n)
}
return nil
}
func runCmdRemote(app config.App, abraSh, serviceName, cmdName, cmdArgs string) error {
cl, err := client.New(app.Server)
if err != nil {
return err
}
var appCmdListCommand = cli.Command{
Name: "list",
Aliases: []string{"ls"},
Usage: "List all available commands",
UsageText: "abra app cmd ls <domain> [options]",
Flags: []cli.Flag{
internal.ChaosFlag,
},
ShellComplete: autocomplete.AppNameComplete,
Before: internal.SubCommandBefore,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), serviceName))
if err := app.Recipe.EnsureExists(); err != nil {
log.Fatal(err)
}
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, true)
if err != nil {
return err
}
if !internal.Chaos {
if err := app.Recipe.EnsureIsClean(); err != nil {
log.Fatal(err)
}
logrus.Debugf("retrieved %s as target container on %s", formatter.ShortenID(targetContainer.ID), app.Server)
if !internal.Offline {
if err := app.Recipe.EnsureUpToDate(); err != nil {
log.Fatal(err)
}
}
toTarOpts := &archive.TarOptions{NoOverwriteDirNonDir: true, Compression: archive.Gzip}
content, err := archive.TarWithOptions(abraSh, toTarOpts)
if err != nil {
return err
}
if err := app.Recipe.EnsureLatest(); err != nil {
log.Fatal(err)
}
}
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
if err := cl.CopyToContainer(context.Background(), targetContainer.ID, "/tmp", content, copyOpts); err != nil {
return err
}
cmdNames, err := getShCmdNames(app)
if err != nil {
log.Fatal(err)
}
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
for _, cmdName := range cmdNames {
fmt.Println(cmdName)
}
shell := "/bin/bash"
findShell := []string{"test", "-e", shell}
execCreateOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: findShell,
Detach: false,
Tty: false,
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
logrus.Infof("%s does not exist for %s, use /bin/sh as fallback", shell, app.Name)
shell = "/bin/sh"
}
var cmd []string
if cmdArgs != "" {
cmd = []string{shell, "-c", fmt.Sprintf("TARGET=%s; APP_NAME=%s; STACK_NAME=%s; . /tmp/abra.sh; %s %s", serviceName, app.Name, app.StackName(), cmdName, cmdArgs)}
} else {
cmd = []string{shell, "-c", fmt.Sprintf("TARGET=%s; APP_NAME=%s; STACK_NAME=%s; . /tmp/abra.sh; %s", serviceName, app.Name, app.StackName(), cmdName)}
}
logrus.Debugf("running command: %s", strings.Join(cmd, " "))
if internal.RemoteUser != "" {
logrus.Debugf("running command with user %s", internal.RemoteUser)
execCreateOpts.User = internal.RemoteUser
}
execCreateOpts.Cmd = cmd
execCreateOpts.Tty = true
if err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
return err
}
return nil
return nil
},
}
func getShCmdNames(app appPkg.App) ([]string, error) {
cmdNames, err := appPkg.ReadAbraShCmdNames(app.Recipe.AbraShPath)
if err != nil {
return nil, err
}
sort.Strings(cmdNames)
return cmdNames, nil
}

View File

@ -1,64 +1,63 @@
package app
import (
"context"
"errors"
"os"
"os/exec"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/log"
"github.com/AlecAivazis/survey/v2"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
var appConfigCommand = cli.Command{
Name: "config",
Aliases: []string{"cfg"},
Usage: "Edit app config",
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
},
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
appName := c.Args().First()
Name: "config",
Aliases: []string{"cfg"},
Usage: "Edit app config",
UsageText: "abra app config <domain> [options]",
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
appName := cmd.Args().First()
if appName == "" {
internal.ShowSubcommandHelpAndError(c, errors.New("no app provided"))
internal.ShowSubcommandHelpAndError(cmd, errors.New("no app provided"))
}
files, err := config.LoadAppFiles("")
files, err := appPkg.LoadAppFiles("")
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
appFile, exists := files[appName]
if !exists {
logrus.Fatalf("cannot find app with name %s", appName)
log.Fatalf("cannot find app with name %s", appName)
}
ed, ok := os.LookupEnv("EDITOR")
if !ok {
edPrompt := &survey.Select{
Message: "Which editor do you wish to use?",
Message: "which editor do you wish to use?",
Options: []string{"vi", "vim", "nvim", "nano", "pico", "emacs"},
}
if err := survey.AskOne(edPrompt, &ed); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
}
cmd := exec.Command(ed, appFile.Path)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
logrus.Fatal(err)
c := exec.Command(ed, appFile.Path)
c.Stdin = os.Stdin
c.Stdout = os.Stdout
c.Stderr = os.Stderr
if err := c.Run(); err != nil {
log.Fatal(err)
}
return nil
},
BashComplete: autocomplete.AppNameComplete,
}

View File

@ -2,35 +2,36 @@ package app
import (
"context"
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/container"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/archive"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
var appCpCommand = cli.Command{
Name: "cp",
Aliases: []string{"c"},
ArgsUsage: "<domain> <src> <dst>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
},
Before: internal.SubCommandBefore,
Usage: "Copy files to/from a running app service",
Description: `
Copy files to and from any app service file system.
Before: internal.SubCommandBefore,
Usage: "Copy files to/from a deployed app service",
UsageText: "abra app cp <domain> <src> <dst> [options]",
Description: `Copy files to and from any app service file system.
If you want to copy a myfile.txt to the root of the app service:
@ -38,114 +39,339 @@ If you want to copy a myfile.txt to the root of the app service:
And if you want to copy that file back to your current working directory locally:
abra app cp <domain> app:/myfile.txt .
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
abra app cp <domain> app:/myfile.txt`,
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
src := c.Args().Get(1)
dst := c.Args().Get(2)
src := cmd.Args().Get(1)
dst := cmd.Args().Get(2)
if src == "" {
logrus.Fatal("missing <src> argument")
} else if dst == "" {
logrus.Fatal("missing <dest> argument")
log.Fatal("missing <src> argument")
}
if dst == "" {
log.Fatal("missing <dest> argument")
}
parsedSrc := strings.SplitN(src, ":", 2)
parsedDst := strings.SplitN(dst, ":", 2)
errorMsg := "one of <src>/<dest> arguments must take $SERVICE:$PATH form"
if len(parsedSrc) == 2 && len(parsedDst) == 2 {
logrus.Fatal(errorMsg)
} else if len(parsedSrc) != 2 {
if len(parsedDst) != 2 {
logrus.Fatal(errorMsg)
}
} else if len(parsedDst) != 2 {
if len(parsedSrc) != 2 {
logrus.Fatal(errorMsg)
}
}
var service string
var srcPath string
var dstPath string
isToContainer := false // <container:src> <dst>
if len(parsedSrc) == 2 {
service = parsedSrc[0]
srcPath = parsedSrc[1]
dstPath = dst
logrus.Debugf("assuming transfer is coming FROM the container")
} else if len(parsedDst) == 2 {
service = parsedDst[0]
dstPath = parsedDst[1]
srcPath = src
isToContainer = true // <src> <container:dst>
logrus.Debugf("assuming transfer is going TO the container")
}
if !isToContainer {
if _, err := os.Stat(dstPath); os.IsNotExist(err) {
logrus.Fatalf("%s does not exist locally?", dstPath)
}
}
err := configureAndCp(c, app, srcPath, dstPath, service, isToContainer)
srcPath, dstPath, service, toContainer, err := parseSrcAndDst(src, dst)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
return nil
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
container, err := containerPkg.GetContainerFromStackAndService(cl, app.StackName(), service)
if err != nil {
log.Fatal(err)
}
log.Debugf("retrieved %s as target container on %s", formatter.ShortenID(container.ID), app.Server)
if toContainer {
err = CopyToContainer(cl, container.ID, srcPath, dstPath)
} else {
err = CopyFromContainer(cl, container.ID, srcPath, dstPath)
}
if err != nil {
log.Fatal(err)
}
return nil
},
BashComplete: autocomplete.AppNameComplete,
}
func configureAndCp(
c *cli.Context,
app config.App,
srcPath string,
dstPath string,
service string,
isToContainer bool) error {
cl, err := client.New(app.Server)
var errServiceMissing = errors.New("one of <src>/<dest> arguments must take $SERVICE:$PATH form")
// parseSrcAndDst parses src and dest string. One of src or dst must be of the form $SERVICE:$PATH
func parseSrcAndDst(src, dst string) (srcPath string, dstPath string, service string, toContainer bool, err error) {
parsedSrc := strings.SplitN(src, ":", 2)
parsedDst := strings.SplitN(dst, ":", 2)
if len(parsedSrc)+len(parsedDst) != 3 {
return "", "", "", false, errServiceMissing
}
if len(parsedSrc) == 2 {
return parsedSrc[1], dst, parsedSrc[0], false, nil
}
if len(parsedDst) == 2 {
return src, parsedDst[1], parsedDst[0], true, nil
}
return "", "", "", false, errServiceMissing
}
// CopyToContainer copies a file or directory from the local file system to the container.
// See the possible copy modes and their documentation.
func CopyToContainer(cl *dockerClient.Client, containerID, srcPath, dstPath string) error {
srcStat, err := os.Stat(srcPath)
if err != nil {
logrus.Fatal(err)
return fmt.Errorf("local %s ", err)
}
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), service))
container, err := container.GetContainer(context.Background(), cl, filters, internal.NoInput)
dstStat, err := cl.ContainerStatPath(context.Background(), containerID, dstPath)
dstExists := true
if err != nil {
logrus.Fatal(err)
if errdefs.IsNotFound(err) {
dstExists = false
} else {
return fmt.Errorf("remote path: %s", err)
}
}
logrus.Debugf("retrieved %s as target container on %s", formatter.ShortenID(container.ID), app.Server)
mode, err := copyMode(srcPath, dstPath, srcStat.Mode(), dstStat.Mode, dstExists)
if err != nil {
return err
}
movePath := ""
switch mode {
case CopyModeDirToDir:
// Add the src directory to the destination path
_, srcDir := path.Split(srcPath)
dstPath = path.Join(dstPath, srcDir)
if isToContainer {
if _, err := os.Stat(srcPath); err != nil {
logrus.Fatalf("%s does not exist?", srcPath)
}
toTarOpts := &archive.TarOptions{NoOverwriteDirNonDir: true, Compression: archive.Gzip}
content, err := archive.TarWithOptions(srcPath, toTarOpts)
// Make sure the dst directory exits.
dcli, err := command.NewDockerCli()
if err != nil {
logrus.Fatal(err)
return err
}
if _, err := container.RunExec(dcli, cl, containerID, &types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: []string{"mkdir", "-p", dstPath},
Detach: false,
Tty: true,
}); err != nil {
return fmt.Errorf("create remote directory: %s", err)
}
case CopyModeFileToFile:
// Remove the file component from the path, since docker can only copy
// to a directory.
dstPath, _ = path.Split(dstPath)
case CopyModeFileToFileRename:
// Copy the file to the temp directory and move it to its dstPath
// afterwards.
movePath = dstPath
dstPath = "/tmp"
}
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
if err := cl.CopyToContainer(context.Background(), container.ID, dstPath, content, copyOpts); err != nil {
logrus.Fatal(err)
}
} else {
content, _, err := cl.CopyFromContainer(context.Background(), container.ID, srcPath)
toTarOpts := &archive.TarOptions{IncludeSourceDir: true, NoOverwriteDirNonDir: true, Compression: archive.Gzip}
content, err := archive.TarWithOptions(srcPath, toTarOpts)
if err != nil {
return err
}
log.Debugf("copy %s from local to %s on container", srcPath, dstPath)
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
if err := cl.CopyToContainer(context.Background(), containerID, dstPath, content, copyOpts); err != nil {
return err
}
if movePath != "" {
_, srcFile := path.Split(srcPath)
dcli, err := command.NewDockerCli()
if err != nil {
logrus.Fatal(err)
return err
}
defer content.Close()
fromTarOpts := &archive.TarOptions{NoOverwriteDirNonDir: true, Compression: archive.Gzip}
if err := archive.Untar(content, dstPath, fromTarOpts); err != nil {
logrus.Fatal(err)
if _, err := container.RunExec(dcli, cl, containerID, &types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: []string{"mv", path.Join("/tmp", srcFile), movePath},
Detach: false,
Tty: true,
}); err != nil {
return fmt.Errorf("create remote directory: %s", err)
}
}
return nil
}
// CopyFromContainer copies a file or directory from the given container to the local file system.
// See the possible copy modes and their documentation.
func CopyFromContainer(cl *dockerClient.Client, containerID, srcPath, dstPath string) error {
srcStat, err := cl.ContainerStatPath(context.Background(), containerID, srcPath)
if err != nil {
if errdefs.IsNotFound(err) {
return fmt.Errorf("remote: %s does not exist", srcPath)
} else {
return fmt.Errorf("remote path: %s", err)
}
}
dstStat, err := os.Stat(dstPath)
dstExists := true
var dstMode os.FileMode
if err != nil {
if os.IsNotExist(err) {
dstExists = false
} else {
return fmt.Errorf("remote path: %s", err)
}
} else {
dstMode = dstStat.Mode()
}
mode, err := copyMode(srcPath, dstPath, srcStat.Mode, dstMode, dstExists)
if err != nil {
return err
}
moveDstDir := ""
moveDstFile := ""
switch mode {
case CopyModeFileToFile:
// Remove the file component from the path, since docker can only copy
// to a directory.
dstPath, _ = path.Split(dstPath)
case CopyModeFileToFileRename:
// Copy the file to the temp directory and move it to its dstPath
// afterwards.
moveDstFile = dstPath
dstPath = "/tmp"
case CopyModeFilesToDir:
// Copy the directory to the temp directory and move it to its
// dstPath afterwards.
moveDstDir = path.Join(dstPath, "/")
dstPath = "/tmp"
// Make sure the temp directory always gets removed
defer os.Remove(path.Join("/tmp"))
}
content, _, err := cl.CopyFromContainer(context.Background(), containerID, srcPath)
if err != nil {
return fmt.Errorf("copy: %s", err)
}
defer content.Close()
if err := archive.Untar(content, dstPath, &archive.TarOptions{
NoOverwriteDirNonDir: true,
Compression: archive.Gzip,
NoLchown: true,
}); err != nil {
return fmt.Errorf("untar: %s", err)
}
if moveDstFile != "" {
_, srcFile := path.Split(strings.TrimSuffix(srcPath, "/"))
if err := moveFile(path.Join("/tmp", srcFile), moveDstFile); err != nil {
return err
}
}
if moveDstDir != "" {
_, srcDir := path.Split(strings.TrimSuffix(srcPath, "/"))
if err := moveDir(path.Join("/tmp", srcDir), moveDstDir); err != nil {
return err
}
}
return nil
}
var (
ErrCopyDirToFile = fmt.Errorf("can't copy dir to file")
ErrDstDirNotExist = fmt.Errorf("destination directory does not exist")
)
type CopyMode int
const (
// Copy a src file to a dest file. The src and dest file names are the same.
// <dir_src>/<file> + <dir_dst>/<file> -> <dir_dst>/<file>
CopyModeFileToFile = CopyMode(iota)
// Copy a src file to a dest file. The src and dest file names are not the same.
// <dir_src>/<file_src> + <dir_dst>/<file_dst> -> <dir_dst>/<file_dst>
CopyModeFileToFileRename
// Copy a src file to dest directory. The dest file gets created in the dest
// folder with the src filename.
// <dir_src>/<file> + <dir_dst> -> <dir_dst>/<file>
CopyModeFileToDir
// Copy a src directory to dest directory.
// <dir_src> + <dir_dst> -> <dir_dst>/<dir_src>
CopyModeDirToDir
// Copy all files in the src directory to the dest directory. This works recursively.
// <dir_src>/ + <dir_dst> -> <dir_dst>/<files_from_dir_src>
CopyModeFilesToDir
)
// copyMode takes a src and dest path and file mode to determine the copy mode.
// See the possible copy modes and their documentation.
func copyMode(srcPath, dstPath string, srcMode os.FileMode, dstMode os.FileMode, dstExists bool) (CopyMode, error) {
_, srcFile := path.Split(srcPath)
_, dstFile := path.Split(dstPath)
if srcMode.IsDir() {
if !dstExists {
return -1, ErrDstDirNotExist
}
if dstMode.IsDir() {
if strings.HasSuffix(srcPath, "/") {
return CopyModeFilesToDir, nil
}
return CopyModeDirToDir, nil
}
return -1, ErrCopyDirToFile
}
if dstMode.IsDir() {
return CopyModeFileToDir, nil
}
if srcFile != dstFile {
return CopyModeFileToFileRename, nil
}
return CopyModeFileToFile, nil
}
// moveDir moves all files from a source path to the destination path recursively.
func moveDir(sourcePath, destPath string) error {
return filepath.Walk(sourcePath, func(p string, info os.FileInfo, err error) error {
if err != nil {
return err
}
newPath := path.Join(destPath, strings.TrimPrefix(p, sourcePath))
if info.IsDir() {
err := os.Mkdir(newPath, info.Mode())
if err != nil {
if os.IsExist(err) {
return nil
}
return err
}
}
if info.Mode().IsRegular() {
return moveFile(p, newPath)
}
return nil
})
}
// moveFile moves a file from a source path to a destination path.
func moveFile(sourcePath, destPath string) error {
inputFile, err := os.Open(sourcePath)
if err != nil {
return err
}
outputFile, err := os.Create(destPath)
if err != nil {
inputFile.Close()
return err
}
defer outputFile.Close()
_, err = io.Copy(outputFile, inputFile)
inputFile.Close()
if err != nil {
return err
}
// Remove file after succesfull copy.
err = os.Remove(sourcePath)
if err != nil {
return err
}
return nil
}

113
cli/app/cp_test.go Normal file
View File

@ -0,0 +1,113 @@
package app
import (
"os"
"testing"
)
func TestParse(t *testing.T) {
tests := []struct {
src string
dst string
srcPath string
dstPath string
service string
toContainer bool
err error
}{
{src: "foo", dst: "bar", err: errServiceMissing},
{src: "app:foo", dst: "app:bar", err: errServiceMissing},
{src: "app:foo", dst: "bar", srcPath: "foo", dstPath: "bar", service: "app", toContainer: false},
{src: "foo", dst: "app:bar", srcPath: "foo", dstPath: "bar", service: "app", toContainer: true},
}
for i, tc := range tests {
srcPath, dstPath, service, toContainer, err := parseSrcAndDst(tc.src, tc.dst)
if srcPath != tc.srcPath {
t.Errorf("[%d] srcPath: want (%s), got(%s)", i, tc.srcPath, srcPath)
}
if dstPath != tc.dstPath {
t.Errorf("[%d] dstPath: want (%s), got(%s)", i, tc.dstPath, dstPath)
}
if service != tc.service {
t.Errorf("[%d] service: want (%s), got(%s)", i, tc.service, service)
}
if toContainer != tc.toContainer {
t.Errorf("[%d] toConainer: want (%t), got(%t)", i, tc.toContainer, toContainer)
}
if err == nil && tc.err != nil && err.Error() != tc.err.Error() {
t.Errorf("[%d] err: want (%s), got(%s)", i, tc.err, err)
}
}
}
func TestCopyMode(t *testing.T) {
tests := []struct {
srcPath string
dstPath string
srcMode os.FileMode
dstMode os.FileMode
dstExists bool
mode CopyMode
err error
}{
{
srcPath: "foo.txt",
dstPath: "foo.txt",
srcMode: os.ModePerm,
dstMode: os.ModePerm,
dstExists: true,
mode: CopyModeFileToFile,
},
{
srcPath: "foo.txt",
dstPath: "bar.txt",
srcMode: os.ModePerm,
dstExists: true,
mode: CopyModeFileToFileRename,
},
{
srcPath: "foo",
dstPath: "foo",
srcMode: os.ModeDir,
dstMode: os.ModeDir,
dstExists: true,
mode: CopyModeDirToDir,
},
{
srcPath: "foo/",
dstPath: "foo",
srcMode: os.ModeDir,
dstMode: os.ModeDir,
dstExists: true,
mode: CopyModeFilesToDir,
},
{
srcPath: "foo",
dstPath: "foo",
srcMode: os.ModeDir,
dstExists: false,
mode: -1,
err: ErrDstDirNotExist,
},
{
srcPath: "foo",
dstPath: "foo",
srcMode: os.ModeDir,
dstMode: os.ModePerm,
dstExists: true,
mode: -1,
err: ErrCopyDirToFile,
},
}
for i, tc := range tests {
mode, err := copyMode(tc.srcPath, tc.dstPath, tc.srcMode, tc.dstMode, tc.dstExists)
if mode != tc.mode {
t.Errorf("[%d] mode: want (%d), got(%d)", i, tc.mode, mode)
}
if err != tc.err {
t.Errorf("[%d] err: want (%s), got(%s)", i, tc.err, err)
}
}
}

View File

@ -1,36 +1,263 @@
package app
import (
"context"
"fmt"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"github.com/urfave/cli"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/envfile"
"coopcloud.tech/abra/pkg/secret"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/dns"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/lint"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/upstream/stack"
"github.com/urfave/cli/v3"
)
var appDeployCommand = cli.Command{
Name: "deploy",
Aliases: []string{"d"},
Usage: "Deploy an app",
ArgsUsage: "<domain>",
UsageText: "abra app deploy <domain> [<version>] [options]",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.ForceFlag,
internal.ChaosFlag,
internal.NoDomainChecksFlag,
internal.DontWaitConvergeFlag,
},
Before: internal.SubCommandBefore,
Description: `
Deploy an app. It does not support incrementing the version of a deployed app,
for this you need to look at the "abra app upgrade <domain>" command.
Description: `Deploy an app.
You may pass "--force" to re-deploy the same version again. This can be useful
if the container runtime has gotten into a weird state.
This command supports chaos operations. Use "--chaos" to deploy your recipe
checkout as-is. Recipe commit hashes are also supported values for
"[<version>]". Please note, "upgrade"/"rollback" do not support chaos
operations.`,
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
var warnMessages []string
Chas mode ("--chaos") will deploy your local checkout of a recipe as-is,
including unstaged changes and can be useful for live hacking and testing new
recipes.
`,
Action: internal.DeployAction,
BashComplete: autocomplete.AppNameComplete,
app := internal.ValidateApp(cmd)
stackName := app.StackName()
specificVersion := cmd.Args().Get(1)
if specificVersion == "" {
specificVersion = app.Recipe.Version
}
if specificVersion != "" && internal.Chaos {
log.Fatal("cannot use <version> and --chaos together")
}
if specificVersion != "" {
log.Debugf("overriding env file version (%s) with %s", app.Recipe.Version, specificVersion)
app.Recipe.Version = specificVersion
}
if specificVersion == "" && app.Recipe.Version != "" && !internal.Chaos {
log.Debugf("retrieved %s as version from env file", app.Recipe.Version)
specificVersion = app.Recipe.Version
}
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
if err := lint.LintForErrors(app.Recipe); err != nil {
log.Fatal(err)
}
log.Debugf("checking whether %s is already deployed", stackName)
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
deployMeta, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
log.Fatal(err)
}
// NOTE(d1): handles "<version> as git hash" use case
var isChaosCommit bool
// NOTE(d1): check out specific version before dealing with secrets. This
// is because we need to deal with GetComposeFiles under the hood and these
// files change from version to version which therefore affects which
// secrets might be generated
version := deployMeta.Version
if specificVersion != "" {
version = specificVersion
log.Debugf("choosing %s as version to deploy", version)
var err error
isChaosCommit, err = app.Recipe.EnsureVersion(version)
if err != nil {
log.Fatal(err)
}
if isChaosCommit {
log.Debugf("assuming '%s' is a chaos commit", version)
internal.Chaos = true
}
}
secStats, err := secret.PollSecretsStatus(cl, app)
if err != nil {
log.Fatal(err)
}
for _, secStat := range secStats {
if !secStat.CreatedOnRemote {
log.Fatalf("unable to deploy, secrets not generated (%s)?", secStat.LocalName)
}
}
if deployMeta.IsDeployed {
if internal.Force || internal.Chaos {
warnMessages = append(warnMessages, fmt.Sprintf("%s is already deployed", app.Name))
} else {
log.Fatalf("%s is already deployed", app.Name)
}
}
if !internal.Chaos && specificVersion == "" {
versions, err := app.Recipe.Tags()
if err != nil {
log.Fatal(err)
}
if len(versions) > 0 && !internal.Chaos {
version = versions[len(versions)-1]
log.Debugf("choosing %s as version to deploy", version)
if _, err := app.Recipe.EnsureVersion(version); err != nil {
log.Fatal(err)
}
} else {
head, err := app.Recipe.Head()
if err != nil {
log.Fatal(err)
}
version = formatter.SmallSHA(head.String())
warnMessages = append(warnMessages, fmt.Sprintf("no versions detected, using latest commit"))
}
}
chaosVersion := config.CHAOS_DEFAULT
if internal.Chaos {
warnMessages = append(warnMessages, "chaos mode engaged")
if isChaosCommit {
chaosVersion = specificVersion
versionLabelLocal, err := app.Recipe.GetVersionLabelLocal()
if err != nil {
log.Fatal(err)
}
version = versionLabelLocal
} else {
var err error
chaosVersion, err = app.Recipe.ChaosVersion()
if err != nil {
log.Fatal(err)
}
}
}
abraShEnv, err := envfile.ReadAbraShEnvVars(app.Recipe.AbraShPath)
if err != nil {
log.Fatal(err)
}
for k, v := range abraShEnv {
app.Env[k] = v
}
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
if err != nil {
log.Fatal(err)
}
deployOpts := stack.Deploy{
Composefiles: composeFiles,
Namespace: stackName,
Prune: false,
ResolveImage: stack.ResolveImageAlways,
Detach: false,
}
compose, err := appPkg.GetAppComposeConfig(app.Name, deployOpts, app.Env)
if err != nil {
log.Fatal(err)
}
appPkg.ExposeAllEnv(stackName, compose, app.Env)
appPkg.SetRecipeLabel(compose, stackName, app.Recipe.Name)
appPkg.SetChaosLabel(compose, stackName, internal.Chaos)
appPkg.SetChaosVersionLabel(compose, stackName, chaosVersion)
appPkg.SetUpdateLabel(compose, stackName, app.Env)
envVars, err := appPkg.CheckEnv(app)
if err != nil {
log.Fatal(err)
}
for _, envVar := range envVars {
if !envVar.Present {
warnMessages = append(warnMessages,
fmt.Sprintf("env var %s missing from %s.env, present in recipe .env.sample", envVar.Name, app.Domain),
)
}
}
if !internal.NoDomainChecks {
domainName, ok := app.Env["DOMAIN"]
if ok {
if _, err = dns.EnsureDomainsResolveSameIPv4(domainName, app.Server); err != nil {
log.Fatal(err)
}
} else {
warnMessages = append(warnMessages, "skipping domain checks as no DOMAIN=... configured for app")
}
} else {
warnMessages = append(warnMessages, "skipping domain checks as requested")
}
if err := internal.DeployOverview(app, warnMessages, version, chaosVersion); err != nil {
log.Fatal(err)
}
stack.WaitTimeout, err = appPkg.GetTimeoutFromLabel(compose, stackName)
if err != nil {
log.Fatal(err)
}
log.Debugf("set waiting timeout to %d s", stack.WaitTimeout)
if err := stack.RunDeploy(cl, deployOpts, compose, app.Name, internal.DontWaitConverge); err != nil {
log.Fatal(err)
}
postDeployCmds, ok := app.Env["POST_DEPLOY_CMDS"]
if ok && !internal.DontWaitConverge {
log.Debugf("run the following post-deploy commands: %s", postDeployCmds)
if err := internal.PostCmds(cl, app, postDeployCmds); err != nil {
log.Fatalf("attempting to run post deploy commands, saw: %s", err)
}
}
app.Recipe.Version = version
if chaosVersion != config.CHAOS_DEFAULT {
app.Recipe.Version = chaosVersion
}
log.Debugf("choosing %s as version to save to env file", app.Recipe.Version)
if err := app.WriteRecipeVersion(app.Recipe.Version, false); err != nil {
log.Fatalf("writing new recipe version in env file: %s", err)
}
return nil
},
}

View File

@ -1,143 +0,0 @@
package app
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/recipe"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var appErrorsCommand = cli.Command{
Name: "errors",
Usage: "List errors for a deployed app",
ArgsUsage: "<domain>",
Description: `
List errors for a deployed app.
This is a best-effort implementation and an attempt to gather a number of tips
& tricks for finding errors together into one convenient command. When an app
is failing to deploy or having issues, it could be a lot of things.
This command currently takes into account:
Is the service deployed?
Is the service killed by an OOM error?
Is the service reporting an error (like in "ps --no-trunc" output)
Is the service healthcheck failing? what are the healthcheck logs?
Got any more ideas? Please let us know:
https://git.coopcloud.tech/coop-cloud/organising/issues/new/choose
This command is best accompanied by "abra app logs <domain>" which may reveal
further information which can help you debug the cause of an app failure via
the logs.
`,
Aliases: []string{"e"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.WatchFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
logrus.Fatal(err)
}
if !isDeployed {
logrus.Fatalf("%s is not deployed?", app.Name)
}
if !internal.Watch {
if err := checkErrors(c, cl, app); err != nil {
logrus.Fatal(err)
}
return nil
}
for {
if err := checkErrors(c, cl, app); err != nil {
logrus.Fatal(err)
}
time.Sleep(2 * time.Second)
}
return nil
},
}
func checkErrors(c *cli.Context, cl *dockerClient.Client, app config.App) error {
recipe, err := recipe.Get(app.Recipe)
if err != nil {
return err
}
for _, service := range recipe.Config.Services {
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), service.Name))
containers, err := cl.ContainerList(context.Background(), types.ContainerListOptions{Filters: filters})
if err != nil {
return err
}
if len(containers) == 0 {
logrus.Warnf("%s is not up, something seems wrong", service.Name)
continue
}
container := containers[0]
containerState, err := cl.ContainerInspect(context.Background(), container.ID)
if err != nil {
logrus.Fatal(err)
}
if containerState.State.OOMKilled {
logrus.Warnf("%s has been killed due to an out of memory error", service.Name)
}
if containerState.State.Error != "" {
logrus.Warnf("%s reports this error: %s", service.Name, containerState.State.Error)
}
if containerState.State.Health != nil {
if containerState.State.Health.Status != "healthy" {
logrus.Warnf("%s healthcheck status is %s", service.Name, containerState.State.Health.Status)
logrus.Warnf("%s healthcheck has failed %s times", service.Name, strconv.Itoa(containerState.State.Health.FailingStreak))
for _, log := range containerState.State.Health.Log {
logrus.Warnf("%s healthcheck logs: %s", service.Name, strings.TrimSpace(log.Output))
}
}
}
}
return nil
}
func getServiceName(names []string) string {
containerName := strings.Join(names, " ")
trimmed := strings.TrimPrefix(containerName, "/")
return strings.Split(trimmed, ".")[0]
}

View File

@ -1,52 +1,64 @@
package app
import (
"context"
"encoding/json"
"fmt"
"sort"
"strconv"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/config"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/ssh"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/tagcmp"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
var status bool
var statusFlag = &cli.BoolFlag{
Name: "status, S",
Usage: "Show app deployment status",
Destination: &status,
}
var (
status bool
statusFlag = &cli.BoolFlag{
Name: "status",
Aliases: []string{"S"},
Usage: "Show app deployment status",
Destination: &status,
}
)
var appRecipe string
var recipeFlag = &cli.StringFlag{
Name: "recipe, r",
Value: "",
Usage: "Show apps of a specific recipe",
Destination: &appRecipe,
}
var (
recipeFilter string
recipeFlag = &cli.StringFlag{
Name: "recipe",
Aliases: []string{"r"},
Value: "",
Usage: "Show apps of a specific recipe",
Destination: &recipeFilter,
}
)
var listAppServer string
var listAppServerFlag = &cli.StringFlag{
Name: "server, s",
Value: "",
Usage: "Show apps of a specific server",
Destination: &listAppServer,
}
var (
listAppServer string
listAppServerFlag = &cli.StringFlag{
Name: "server",
Aliases: []string{"s"},
Value: "",
Usage: "Show apps of a specific server",
Destination: &listAppServer,
}
)
type appStatus struct {
Server string `json:"server"`
Recipe string `json:"recipe"`
AppName string `json:"appName"`
Domain string `json:"domain"`
Status string `json:"status"`
Version string `json:"version"`
Upgrade string `json:"upgrade"`
Server string `json:"server"`
Recipe string `json:"recipe"`
AppName string `json:"appName"`
Domain string `json:"domain"`
Status string `json:"status"`
Chaos string `json:"chaos"`
ChaosVersion string `json:"chaosVersion"`
AutoUpdate string `json:"autoUpdate"`
Version string `json:"version"`
Upgrade string `json:"upgrade"`
}
type serverStatus struct {
@ -59,60 +71,48 @@ type serverStatus struct {
}
var appListCommand = cli.Command{
Name: "list",
Aliases: []string{"ls"},
Usage: "List all managed apps",
Description: `
Read the local file system listing of apps and servers (e.g. ~/.abra/) to
generate a report of all your apps.
Name: "list",
Aliases: []string{"ls"},
Usage: "List all managed apps",
UsageText: "abra app list [options]",
Description: `Generate a report of all managed apps.
By passing the "--status/-S" flag, you can query all your servers for the
actual live deployment status. Depending on how many servers you manage, this
can take some time.
`,
can take some time.`,
Flags: []cli.Flag{
internal.DebugFlag,
internal.MachineReadableFlag,
statusFlag,
listAppServerFlag,
recipeFlag,
},
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
appFiles, err := config.LoadAppFiles(listAppServer)
Before: internal.SubCommandBefore,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
appFiles, err := appPkg.LoadAppFiles(listAppServer)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
apps, err := config.GetApps(appFiles)
apps, err := appPkg.GetApps(appFiles, recipeFilter)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
sort.Sort(config.ByServerAndRecipe(apps))
sort.Sort(appPkg.ByServerAndRecipe(apps))
statuses := make(map[string]map[string]string)
var catl recipe.RecipeCatalogue
if status {
alreadySeen := make(map[string]bool)
for _, app := range apps {
if _, ok := alreadySeen[app.Server]; !ok {
if err := ssh.EnsureHostKey(app.Server); err != nil {
logrus.Fatal(fmt.Sprintf(internal.SSHFailMsg, app.Server))
}
alreadySeen[app.Server] = true
}
}
statuses, err = config.GetAppStatuses(appFiles)
statuses, err = appPkg.GetAppStatuses(apps, internal.MachineReadable)
if err != nil {
logrus.Fatal(err)
}
var err error
catl, err = recipe.ReadRecipeCatalogue()
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
}
@ -124,14 +124,14 @@ can take some time.
var ok bool
if stats, ok = allStats[app.Server]; !ok {
stats = serverStatus{}
if appRecipe == "" {
if recipeFilter == "" {
// count server, no filtering
totalServersCount++
}
}
if app.Recipe == appRecipe || appRecipe == "" {
if appRecipe != "" {
if app.Recipe.Name == recipeFilter || recipeFilter == "" {
if recipeFilter != "" {
// only count server if matches filter
totalServersCount++
}
@ -143,12 +143,24 @@ can take some time.
if status {
status := "unknown"
version := "unknown"
chaos := "unknown"
chaosVersion := "unknown"
autoUpdate := "unknown"
if statusMeta, ok := statuses[app.StackName()]; ok {
if currentVersion, exists := statusMeta["version"]; exists {
if currentVersion != "" {
version = currentVersion
}
}
if chaosDeploy, exists := statusMeta["chaos"]; exists {
chaos = chaosDeploy
}
if chaosDeployVersion, exists := statusMeta["chaosVersion"]; exists {
chaosVersion = chaosDeployVersion
}
if autoUpdateState, exists := statusMeta["autoUpdate"]; exists {
autoUpdate = autoUpdateState
}
if statusMeta["status"] != "" {
status = statusMeta["status"]
}
@ -158,24 +170,27 @@ can take some time.
}
appStats.Status = status
appStats.Chaos = chaos
appStats.ChaosVersion = chaosVersion
appStats.Version = version
appStats.AutoUpdate = autoUpdate
var newUpdates []string
if version != "unknown" {
updates, err := recipe.GetRecipeCatalogueVersions(app.Recipe, catl)
updates, err := app.Recipe.Tags()
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
parsedVersion, err := tagcmp.Parse(version)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
for _, update := range updates {
parsedUpdate, err := tagcmp.Parse(update)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if update != version && parsedUpdate.IsGreaterThan(parsedVersion) {
@ -199,7 +214,7 @@ can take some time.
}
appStats.Server = app.Server
appStats.Recipe = app.Recipe
appStats.Recipe = app.Recipe.Name
appStats.AppName = app.Name
appStats.Domain = app.Domain
@ -207,15 +222,17 @@ can take some time.
}
allStats[app.Server] = stats
}
if internal.MachineReadable {
jsonstring, err := json.Marshal(allStats)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
} else {
fmt.Println(string(jsonstring))
}
return nil
}
alreadySeen := make(map[string]bool)
for _, app := range apps {
if _, ok := alreadySeen[app.Server]; ok {
@ -224,27 +241,59 @@ can take some time.
serverStat := allStats[app.Server]
tableCol := []string{"recipe", "domain"}
headers := []string{"RECIPE", "DOMAIN"}
if status {
tableCol = append(tableCol, []string{"status", "version", "upgrade"}...)
headers = append(headers, []string{
"STATUS",
"CHAOS",
"VERSION",
"UPGRADE",
"AUTOUPDATE"}...,
)
}
table := formatter.CreateTable(tableCol)
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
table.Headers(headers...)
var rows [][]string
for _, appStat := range serverStat.Apps {
tableRow := []string{appStat.Recipe, appStat.Domain}
row := []string{appStat.Recipe, appStat.Domain}
if status {
tableRow = append(tableRow, []string{appStat.Status, appStat.Version, appStat.Upgrade}...)
chaosStatus := appStat.Chaos
if chaosStatus != "unknown" {
chaosEnabled, err := strconv.ParseBool(chaosStatus)
if err != nil {
log.Fatal(err)
}
if chaosEnabled && appStat.ChaosVersion != "unknown" {
chaosStatus = appStat.ChaosVersion
}
}
row = append(row, []string{
appStat.Status,
chaosStatus,
appStat.Version,
appStat.Upgrade,
appStat.AutoUpdate}...,
)
}
table.Append(tableRow)
rows = append(rows, row)
}
if table.NumLines() > 0 {
table.Render()
table.Rows(rows...)
if len(rows) > 0 {
fmt.Println(table)
if status {
fmt.Println(fmt.Sprintf(
"server: %s | total apps: %v | versioned: %v | unversioned: %v | latest: %v | upgrade: %v",
"SERVER: %s | TOTAL APPS: %v | VERSIONED: %v | UNVERSIONED: %v | LATEST : %v | UPGRADE: %v",
app.Server,
serverStat.AppCount,
serverStat.VersionCount,
@ -253,19 +302,21 @@ can take some time.
serverStat.UpgradeCount,
))
} else {
fmt.Println(fmt.Sprintf("server: %s | total apps: %v", app.Server, serverStat.AppCount))
log.Infof("SERVER: %s TOTAL APPS: %v", app.Server, serverStat.AppCount)
}
}
if len(allStats) > 1 && table.NumLines() > 0 {
fmt.Println() // newline separator for multiple servers
if len(allStats) > 1 && len(rows) > 0 {
fmt.Println() // newline separator for multiple servers
}
}
alreadySeen[app.Server] = true
}
if len(allStats) > 1 {
fmt.Println(fmt.Sprintf("total servers: %v | total apps: %v ", totalServersCount, totalAppsCount))
totalServers := formatter.BoldStyle.Render("TOTAL SERVERS")
totalApps := formatter.BoldStyle.Render("TOTAL APPS")
log.Infof("%s: %v | %s: %v ", totalServers, totalServersCount, totalApps, totalAppsCount)
}
return nil

View File

@ -2,128 +2,138 @@ package app
import (
"context"
"fmt"
"io"
"os"
"slices"
"sync"
"time"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/service"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/upstream/stack"
"github.com/docker/docker/api/types"
containerTypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
var logOpts = types.ContainerLogsOptions{
Details: false,
Follow: true,
ShowStderr: true,
ShowStdout: true,
Tail: "20",
Timestamps: true,
}
// stackLogs lists logs for all stack services
func stackLogs(c *cli.Context, app config.App, client *dockerClient.Client) {
filters, err := app.Filters(true, false)
if err != nil {
logrus.Fatal(err)
}
serviceOpts := types.ServiceListOptions{Filters: filters}
services, err := client.ServiceList(context.Background(), serviceOpts)
if err != nil {
logrus.Fatal(err)
}
var wg sync.WaitGroup
for _, service := range services {
wg.Add(1)
go func(s string) {
if internal.StdErrOnly {
logOpts.ShowStdout = false
}
logs, err := client.ServiceLogs(context.Background(), s, logOpts)
if err != nil {
logrus.Fatal(err)
}
defer logs.Close()
_, err = io.Copy(os.Stdout, logs)
if err != nil && err != io.EOF {
logrus.Fatal(err)
}
}(service.ID)
}
wg.Wait()
os.Exit(0)
}
var appLogsCommand = cli.Command{
Name: "logs",
Aliases: []string{"l"},
ArgsUsage: "<domain> [<service>]",
Usage: "Tail app logs",
UsageText: "abra app logs <domain> [<service>] [options]",
Flags: []cli.Flag{
internal.StdErrOnlyFlag,
internal.DebugFlag,
internal.SinceLogsFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
stackName := app.StackName()
if err := app.Recipe.EnsureExists(); err != nil {
log.Fatal(err)
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
serviceName := c.Args().Get(1)
if serviceName == "" {
logrus.Debugf("tailing logs for all %s services", app.Recipe)
stackLogs(c, app, cl)
} else {
logrus.Debugf("tailing logs for %s", serviceName)
if err := tailServiceLogs(c, cl, app, serviceName); err != nil {
logrus.Fatal(err)
}
deployMeta, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
log.Fatal(err)
}
if !deployMeta.IsDeployed {
log.Fatalf("%s is not deployed?", app.Name)
}
serviceName := cmd.Args().Get(1)
serviceNames := []string{}
if serviceName != "" {
serviceNames = []string{serviceName}
}
err = tailLogs(cl, app, serviceNames)
if err != nil {
log.Fatal(err)
}
return nil
},
}
func tailServiceLogs(c *cli.Context, cl *dockerClient.Client, app config.App, serviceName string) error {
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("%s_%s", app.StackName(), serviceName))
chosenService, err := service.GetService(context.Background(), cl, filters, internal.NoInput)
// tailLogs prints logs for the given app with optional service names to be
// filtered on. It also checks if the latest task is not runnning and then
// prints the past tasks.
func tailLogs(cl *dockerClient.Client, app appPkg.App, serviceNames []string) error {
f, err := app.Filters(true, false, serviceNames...)
if err != nil {
logrus.Fatal(err)
return err
}
if internal.StdErrOnly {
logOpts.ShowStdout = false
}
logs, err := cl.ServiceLogs(context.Background(), chosenService.ID, logOpts)
services, err := cl.ServiceList(context.Background(), types.ServiceListOptions{Filters: f})
if err != nil {
logrus.Fatal(err)
return err
}
defer logs.Close()
_, err = io.Copy(os.Stdout, logs)
if err != nil && err != io.EOF {
logrus.Fatal(err)
var wg sync.WaitGroup
for _, service := range services {
filters := filters.NewArgs()
filters.Add("name", service.Spec.Name)
tasks, err := cl.TaskList(context.Background(), types.TaskListOptions{Filters: f})
if err != nil {
return err
}
if len(tasks) > 0 {
// Need to sort the tasks by the CreatedAt field in the inverse order.
// Otherwise they are in the reversed order and not sorted properly.
slices.SortFunc[[]swarm.Task](tasks, func(t1, t2 swarm.Task) int {
return int(t2.Meta.CreatedAt.Unix() - t1.Meta.CreatedAt.Unix())
})
lastTask := tasks[0].Status
if lastTask.State != swarm.TaskStateRunning {
for _, task := range tasks {
log.Errorf("[%s] %s State %s: %s", service.Spec.Name, task.Meta.CreatedAt.Format(time.RFC3339), task.Status.State, task.Status.Err)
}
}
}
// Collect the logs in a go routine, so the logs from all services are
// collected in parallel.
wg.Add(1)
go func(serviceID string) {
logs, err := cl.ServiceLogs(context.Background(), serviceID, containerTypes.LogsOptions{
ShowStderr: true,
ShowStdout: !internal.StdErrOnly,
Since: internal.SinceLogs,
Until: "",
Timestamps: true,
Follow: true,
Tail: "20",
Details: false,
})
if err != nil {
log.Fatal(err)
}
defer logs.Close()
_, err = io.Copy(os.Stdout, logs)
if err != nil && err != io.EOF {
log.Fatal(err)
}
}(service.ID)
}
// Wait for all log streams to be closed.
wg.Wait()
return nil
}

View File

@ -1,14 +1,29 @@
package app
import (
"context"
"fmt"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/app"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"github.com/urfave/cli"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/secret"
"github.com/AlecAivazis/survey/v2"
"github.com/charmbracelet/lipgloss/table"
dockerClient "github.com/docker/docker/client"
"github.com/urfave/cli/v3"
)
var appNewDescription = `
Take a recipe and uses it to create a new app. This new app configuration is
stored in your ~/.abra directory under the appropriate server.
var appNewDescription = `Creates a new app from a default recipe.
This new app configuration is stored in your $ABRA_DIR directory under the
appropriate server.
This command does not deploy your app for you. You will need to run "abra app
deploy <domain>" to do so.
@ -16,6 +31,8 @@ deploy <domain>" to do so.
You can see what recipes are available (i.e. values for the <recipe> argument)
by running "abra recipe ls".
Recipe commit hashes are supported values for "[<version>]".
Passing the "--secrets/-S" flag will automatically generate secrets for your
app and store them encrypted at rest on the chosen target server. These
generated secrets are only visible at generation time, so please take care to
@ -23,24 +40,284 @@ store them somewhere safe.
You can use the "--pass/-P" to store these generated passwords locally in a
pass store (see passwordstore.org for more). The pass command must be available
on your $PATH.
`
on your $PATH.`
var appNewCommand = cli.Command{
Name: "new",
Aliases: []string{"n"},
Usage: "Create a new app",
UsageText: "abra app new [<recipe>] [<version>] [options]",
Description: appNewDescription,
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.NewAppServerFlag,
internal.DomainFlag,
internal.PassFlag,
internal.SecretsFlag,
internal.ChaosFlag,
},
Before: internal.SubCommandBefore,
HideHelp: true,
ShellComplete: func(ctx context.Context, cmd *cli.Command) {
args := cmd.Args()
switch args.Len() {
case 0:
autocomplete.RecipeNameComplete(ctx, cmd)
case 1:
autocomplete.RecipeVersionComplete(cmd.Args().Get(0))
}
},
Action: func(ctx context.Context, cmd *cli.Command) error {
recipe := internal.ValidateRecipe(cmd)
var version string
if !internal.Chaos {
if err := recipe.EnsureIsClean(); err != nil {
log.Fatal(err)
}
if !internal.Offline {
if err := recipe.EnsureUpToDate(); err != nil {
log.Fatal(err)
}
}
if cmd.Args().Get(1) == "" {
recipeVersions, err := recipe.GetRecipeVersions()
if err != nil {
log.Fatal(err)
}
if len(recipeVersions) > 0 {
latest := recipeVersions[len(recipeVersions)-1]
for tag := range latest {
version = tag
}
if _, err := recipe.EnsureVersion(version); err != nil {
log.Fatal(err)
}
} else {
if err := recipe.EnsureLatest(); err != nil {
log.Fatal(err)
}
}
} else {
version = cmd.Args().Get(1)
if _, err := recipe.EnsureVersion(version); err != nil {
log.Fatal(err)
}
}
}
if err := ensureServerFlag(); err != nil {
log.Fatal(err)
}
if err := ensureDomainFlag(recipe, internal.NewAppServer); err != nil {
log.Fatal(err)
}
sanitisedAppName := appPkg.SanitiseAppName(internal.Domain)
log.Debugf("%s sanitised as %s for new app", internal.Domain, sanitisedAppName)
if err := appPkg.TemplateAppEnvSample(
recipe,
internal.Domain,
internal.NewAppServer,
internal.Domain,
); err != nil {
log.Fatal(err)
}
var secrets AppSecrets
var secretsTable *table.Table
if internal.Secrets {
sampleEnv, err := recipe.SampleEnv()
if err != nil {
log.Fatal(err)
}
composeFiles, err := recipe.GetComposeFiles(sampleEnv)
if err != nil {
log.Fatal(err)
}
secretsConfig, err := secret.ReadSecretsConfig(recipe.SampleEnvPath, composeFiles, appPkg.StackName(internal.Domain))
if err != nil {
return err
}
if err := promptForSecrets(recipe.Name, secretsConfig); err != nil {
log.Fatal(err)
}
cl, err := client.New(internal.NewAppServer)
if err != nil {
log.Fatal(err)
}
secrets, err = createSecrets(cl, secretsConfig, sanitisedAppName)
if err != nil {
log.Fatal(err)
}
secretsTable, err = formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
headers := []string{"NAME", "VALUE"}
secretsTable.Headers(headers...)
for name, val := range secrets {
secretsTable.Row(name, val)
}
}
if internal.NewAppServer == "default" {
internal.NewAppServer = "local"
}
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
headers := []string{"SERVER", "DOMAIN", "RECIPE", "VERSION"}
table.Headers(headers...)
table.Row(internal.NewAppServer, internal.Domain, recipe.Name, version)
log.Infof("new app '%s' created 🌞", recipe.Name)
fmt.Println("")
fmt.Println(table)
fmt.Println("")
fmt.Println("Configure this app:")
fmt.Println(fmt.Sprintf("\n abra app config %s", internal.Domain))
fmt.Println("")
fmt.Println("Deploy this app:")
fmt.Println(fmt.Sprintf("\n abra app deploy %s", internal.Domain))
if len(secrets) > 0 {
fmt.Println("")
fmt.Println("Generated secrets:")
fmt.Println("")
fmt.Println(secretsTable)
log.Warnf(
"generated secrets %s shown again, please take note of them %s",
formatter.BoldStyle.Render("NOT"),
formatter.BoldStyle.Render("NOW"),
)
}
app, err := app.Get(internal.Domain)
if err != nil {
log.Fatal(err)
}
log.Debugf("choosing %s as version to save to env file", version)
if err := app.WriteRecipeVersion(version, false); err != nil {
log.Fatalf("writing new recipe version in env file: %s", err)
}
return nil
},
Before: internal.SubCommandBefore,
ArgsUsage: "[<recipe>]",
Action: internal.NewAction,
BashComplete: autocomplete.RecipeNameComplete,
}
// AppSecrets represents all app secrest
type AppSecrets map[string]string
// createSecrets creates all secrets for a new app.
func createSecrets(cl *dockerClient.Client, secretsConfig map[string]secret.Secret, sanitisedAppName string) (AppSecrets, error) {
// NOTE(d1): trim to match app.StackName() implementation
if len(sanitisedAppName) > config.MAX_SANITISED_APP_NAME_LENGTH {
log.Debugf("trimming %s to %s to avoid runtime limits", sanitisedAppName, sanitisedAppName[:config.MAX_SANITISED_APP_NAME_LENGTH])
sanitisedAppName = sanitisedAppName[:config.MAX_SANITISED_APP_NAME_LENGTH]
}
secrets, err := secret.GenerateSecrets(cl, secretsConfig, internal.NewAppServer)
if err != nil {
return nil, err
}
if internal.Pass {
for secretName := range secrets {
secretValue := secrets[secretName]
if err := secret.PassInsertSecret(
secretValue,
secretName,
internal.Domain,
internal.NewAppServer,
); err != nil {
return nil, err
}
}
}
return secrets, nil
}
// ensureDomainFlag checks if the domain flag was used. if not, asks the user for it/
func ensureDomainFlag(recipe recipePkg.Recipe, server string) error {
if internal.Domain == "" && !internal.NoInput {
prompt := &survey.Input{
Message: "Specify app domain",
Default: fmt.Sprintf("%s.%s", recipe.Name, server),
}
if err := survey.AskOne(prompt, &internal.Domain); err != nil {
return err
}
}
if internal.Domain == "" {
return fmt.Errorf("no domain provided")
}
return nil
}
// promptForSecrets asks if we should generate secrets for a new app.
func promptForSecrets(recipeName string, secretsConfig map[string]secret.Secret) error {
if len(secretsConfig) == 0 {
log.Debugf("%s has no secrets to generate, skipping...", recipeName)
return nil
}
if !internal.Secrets && !internal.NoInput {
prompt := &survey.Confirm{
Message: "Generate app secrets?",
}
if err := survey.AskOne(prompt, &internal.Secrets); err != nil {
return err
}
}
return nil
}
// ensureServerFlag checks if the server flag was used. if not, asks the user for it.
func ensureServerFlag() error {
servers, err := config.GetServers()
if err != nil {
return err
}
if internal.NewAppServer == "" && !internal.NoInput {
prompt := &survey.Select{
Message: "Select app server:",
Options: servers,
}
if err := survey.AskOne(prompt, &internal.NewAppServer); err != nil {
return err
}
}
if internal.NewAppServer == "" {
return fmt.Errorf("no server provided")
}
return nil
}

View File

@ -2,100 +2,176 @@ package app
import (
"context"
"strings"
"time"
"encoding/json"
"fmt"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/service"
"coopcloud.tech/abra/pkg/log"
abraService "coopcloud.tech/abra/pkg/service"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/buger/goterm"
dockerFormatter "github.com/docker/cli/cli/command/formatter"
"github.com/docker/docker/api/types"
containerTypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
var appPsCommand = cli.Command{
Name: "ps",
Aliases: []string{"p"},
Usage: "Check app status",
ArgsUsage: "<domain>",
Description: "Show a more detailed status output of a specific deployed app",
UsageText: "abra app ps <domain> [options]",
Description: "Show status of a deployed app.",
Flags: []cli.Flag{
internal.WatchFlag,
internal.DebugFlag,
internal.MachineReadableFlag,
internal.ChaosFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, app.StackName())
deployMeta, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if !isDeployed {
logrus.Fatalf("%s is not deployed?", app.Name)
if !deployMeta.IsDeployed {
log.Fatalf("%s is not deployed?", app.Name)
}
if !internal.Watch {
showPSOutput(c, app, cl)
return nil
chaosVersion := config.CHAOS_DEFAULT
statuses, err := appPkg.GetAppStatuses([]appPkg.App{app}, true)
if statusMeta, ok := statuses[app.StackName()]; ok {
if isChaos, exists := statusMeta["chaos"]; exists && isChaos == "true" {
chaosVersion, err = app.Recipe.ChaosVersion()
if err != nil {
log.Fatal(err)
}
}
}
goterm.Clear()
for {
goterm.MoveCursor(1, 1)
showPSOutput(c, app, cl)
goterm.Flush()
time.Sleep(2 * time.Second)
}
showPSOutput(app, cl, deployMeta.Version, chaosVersion)
return nil
},
}
// showPSOutput renders ps output.
func showPSOutput(c *cli.Context, app config.App, cl *dockerClient.Client) {
filters, err := app.Filters(true, true)
func showPSOutput(app appPkg.App, cl *dockerClient.Client, deployedVersion, chaosVersion string) {
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
return
}
containers, err := cl.ContainerList(context.Background(), types.ContainerListOptions{Filters: filters})
deployOpts := stack.Deploy{
Composefiles: composeFiles,
Namespace: app.StackName(),
Prune: false,
ResolveImage: stack.ResolveImageAlways,
}
compose, err := appPkg.GetAppComposeConfig(app.Name, deployOpts, app.Env)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
return
}
tableCol := []string{"service name", "image", "created", "status", "state", "ports"}
table := formatter.CreateTable(tableCol)
var rows [][]string
allContainerStats := make(map[string]map[string]string)
for _, service := range compose.Services {
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), service.Name))
for _, container := range containers {
var containerNames []string
for _, containerName := range container.Names {
trimmed := strings.TrimPrefix(containerName, "/")
containerNames = append(containerNames, trimmed)
containers, err := cl.ContainerList(context.Background(), containerTypes.ListOptions{Filters: filters})
if err != nil {
log.Fatal(err)
return
}
tableRow := []string{
service.ContainerToServiceName(container.Names, app.StackName()),
formatter.RemoveSha(container.Image),
formatter.HumanDuration(container.Created),
container.Status,
container.State,
dockerFormatter.DisplayablePorts(container.Ports),
var containerStats map[string]string
if len(containers) == 0 {
containerStats = map[string]string{
"version": deployedVersion,
"chaos": chaosVersion,
"service": service.Name,
"image": "unknown",
"created": "unknown",
"status": "unknown",
"state": "unknown",
"ports": "unknown",
}
} else {
container := containers[0]
containerStats = map[string]string{
"version": deployedVersion,
"chaos": chaosVersion,
"service": abraService.ContainerToServiceName(container.Names, app.StackName()),
"image": formatter.RemoveSha(container.Image),
"created": formatter.HumanDuration(container.Created),
"status": container.Status,
"state": container.State,
"ports": dockerFormatter.DisplayablePorts(container.Ports),
}
}
table.Append(tableRow)
allContainerStats[containerStats["service"]] = containerStats
row := []string{
containerStats["service"],
containerStats["image"],
containerStats["created"],
containerStats["status"],
containerStats["state"],
containerStats["ports"],
}
rows = append(rows, row)
}
table.Render()
if internal.MachineReadable {
jsonstring, err := json.Marshal(allContainerStats)
if err != nil {
log.Fatal("unable to convert to JSON: %s", err)
}
fmt.Println(string(jsonstring))
return
}
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
headers := []string{
"SERVICE",
"IMAGE",
"CREATED",
"STATUS",
"STATE",
"PORTS",
}
table.
Headers(headers...).
Rows(rows...)
fmt.Println(table)
log.Infof("VERSION: %s CHAOS: %s", deployedVersion, chaosVersion)
}

View File

@ -8,71 +8,78 @@ import (
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/log"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/AlecAivazis/survey/v2"
"github.com/docker/docker/api/types"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
// Volumes stores the variable from VolumesFlag
var Volumes bool
// VolumesFlag is used to specify if volumes should be deleted when deleting an app
var VolumesFlag = &cli.BoolFlag{
Name: "volumes, V",
Destination: &Volumes,
}
var appRemoveCommand = cli.Command{
Name: "remove",
Aliases: []string{"rm"},
ArgsUsage: "<domain>",
Usage: "Remove an already undeployed app",
UsageText: "abra app remove <domain> [options]",
Usage: "Remove all app data, locally and remotely",
Description: `Remove everything related to an app which is already undeployed.
By default, it will prompt for confirmation before proceeding. All secrets,
volumes and the local app env file will be deleted.
Only run this command when you are sure you want to completely remove the app
and all associated app data. This is a destructive action, Be Careful!
If you would like to delete specific volumes or secrets, please use removal
sub-commands under "app volume" and "app secret" instead.
Please note, if you delete the local app env file without removing volumes and
secrets first, Abra will *not* be able to help you remove them afterwards.
To delete everything without prompt, use the "--force/-f" or the "--no-input/n"
flag.`,
Flags: []cli.Flag{
VolumesFlag,
internal.ForceFlag,
internal.DebugFlag,
internal.NoInputFlag,
},
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
ShellComplete: autocomplete.AppNameComplete,
Before: internal.SubCommandBefore,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
if !internal.Force && !internal.NoInput {
log.Warnf("ALERTA ALERTA: this will completely remove %s data and config locally and remotely", app.Name)
response := false
prompt := &survey.Confirm{
Message: fmt.Sprintf("about to remove %s, are you sure?", app.Name),
}
prompt := &survey.Confirm{Message: "are you sure?"}
if err := survey.AskOne(prompt, &response); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if !response {
logrus.Fatal("aborting as requested")
log.Fatal("aborting as requested")
}
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, app.StackName())
deployMeta, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if isDeployed {
logrus.Fatalf("%s is still deployed. Run \"abra app undeploy %s\"", app.Name, app.Name)
if deployMeta.IsDeployed {
log.Fatalf("%s is still deployed. Run \"abra app undeploy %s\"", app.Name, app.Name)
}
fs, err := app.Filters(false, false)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
secretList, err := cl.SecretList(context.Background(), types.SecretListOptions{Filters: fs})
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
secrets := make(map[string]string)
@ -84,91 +91,45 @@ var appRemoveCommand = cli.Command{
}
if len(secrets) > 0 {
var secretNamesToRemove []string
if !internal.Force && !internal.NoInput {
secretsPrompt := &survey.MultiSelect{
Message: "which secrets do you want to remove?",
Help: "'x' indicates selected, enter / return to confirm, ctrl-c to exit, vim mode is enabled",
VimMode: true,
Options: secretNames,
Default: secretNames,
}
if err := survey.AskOne(secretsPrompt, &secretNamesToRemove); err != nil {
logrus.Fatal(err)
}
}
if internal.Force || internal.NoInput {
secretNamesToRemove = secretNames
}
for _, name := range secretNamesToRemove {
for _, name := range secretNames {
err := cl.SecretRemove(context.Background(), secrets[name])
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
logrus.Info(fmt.Sprintf("secret: %s removed", name))
log.Info(fmt.Sprintf("secret: %s removed", name))
}
} else {
logrus.Info("no secrets to remove")
log.Info("no secrets to remove")
}
fs, err = app.Filters(false, true)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
volumeListOKBody, err := cl.VolumeList(context.Background(), fs)
volumeList := volumeListOKBody.Volumes
volumeList, err := client.GetVolumes(cl, context.Background(), app.Server, fs)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
volumeNames := client.GetVolumeNames(volumeList)
var vols []string
for _, vol := range volumeList {
vols = append(vols, vol.Name)
}
if len(vols) > 0 {
if Volumes {
var removeVols []string
if !internal.Force && !internal.NoInput {
volumesPrompt := &survey.MultiSelect{
Message: "which volumes do you want to remove?",
Help: "'x' indicates selected, enter / return to confirm, ctrl-c to exit, vim mode is enabled",
VimMode: true,
Options: vols,
Default: vols,
}
if err := survey.AskOne(volumesPrompt, &removeVols); err != nil {
logrus.Fatal(err)
}
}
for _, vol := range removeVols {
err := cl.VolumeRemove(context.Background(), vol, internal.Force) // last argument is for force removing
if err != nil {
logrus.Fatal(err)
}
logrus.Info(fmt.Sprintf("volume %s removed", vol))
}
} else {
logrus.Info("no volumes were removed")
if len(volumeNames) > 0 {
err := client.RemoveVolumes(cl, context.Background(), volumeNames, internal.Force, 5)
if err != nil {
log.Fatalf("removing volumes failed: %s", err)
}
log.Infof("%d volumes removed successfully", len(volumeNames))
} else {
if Volumes {
logrus.Info("no volumes to remove")
}
log.Info("no volumes to remove")
}
err = os.Remove(app.Path)
if err != nil {
logrus.Fatal(err)
if err = os.Remove(app.Path); err != nil {
log.Fatal(err)
}
logrus.Info(fmt.Sprintf("file: %s removed", app.Path))
log.Info(fmt.Sprintf("file: %s removed", app.Path))
return nil
},
BashComplete: autocomplete.AppNameComplete,
}

View File

@ -6,65 +6,100 @@ import (
"fmt"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/log"
upstream "coopcloud.tech/abra/pkg/upstream/service"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
var appRestartCommand = cli.Command{
Name: "restart",
Aliases: []string{"re"},
Usage: "Restart an app",
ArgsUsage: "<domain>",
UsageText: "abra app restart <domain> [<service>] [options]",
Flags: []cli.Flag{
internal.DebugFlag,
internal.AllServicesFlag,
},
Before: internal.SubCommandBefore,
Description: `This command restarts a service within a deployed app.`,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
Before: internal.SubCommandBefore,
Description: `This command restarts services within a deployed app.
serviceNameShort := c.Args().Get(1)
if serviceNameShort == "" {
err := errors.New("missing service?")
internal.ShowSubcommandHelpAndError(c, err)
Run "abra app ps <domain>" to see a list of service names.
Pass "--all-services/-a" to restart all services.`,
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
if err := app.Recipe.Ensure(false, false); err != nil {
log.Fatal(err)
}
serviceName := cmd.Args().Get(1)
if serviceName == "" && !internal.AllServices {
err := errors.New("missing <service>")
internal.ShowSubcommandHelpAndError(cmd, err)
}
if serviceName != "" && internal.AllServices {
log.Fatal("cannot use <service> and --all-services together")
}
var serviceNames []string
if internal.AllServices {
var err error
serviceNames, err = appPkg.GetAppServiceNames(app.Name)
if err != nil {
log.Fatal(err)
}
} else {
serviceNames = append(serviceNames, serviceName)
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
serviceName := fmt.Sprintf("%s_%s", app.StackName(), serviceNameShort)
logrus.Debugf("attempting to scale %s to 0 (restart logic)", serviceName)
if err := upstream.RunServiceScale(context.Background(), cl, serviceName, 0); err != nil {
logrus.Fatal(err)
deployMeta, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
log.Fatal(err)
}
if err := stack.WaitOnService(context.Background(), cl, serviceName, app.Name); err != nil {
logrus.Fatal(err)
if !deployMeta.IsDeployed {
log.Fatalf("%s is not deployed?", app.Name)
}
logrus.Debugf("%s has been scaled to 0 (restart logic)", serviceName)
for _, serviceName := range serviceNames {
stackServiceName := fmt.Sprintf("%s_%s", app.StackName(), serviceName)
logrus.Debugf("attempting to scale %s to 1 (restart logic)", serviceName)
if err := upstream.RunServiceScale(context.Background(), cl, serviceName, 1); err != nil {
logrus.Fatal(err)
log.Debugf("attempting to scale %s to 0", stackServiceName)
if err := upstream.RunServiceScale(context.Background(), cl, stackServiceName, 0); err != nil {
log.Fatal(err)
}
if err := stack.WaitOnService(context.Background(), cl, stackServiceName, app.Name); err != nil {
log.Fatal(err)
}
log.Debugf("%s has been scaled to 0", stackServiceName)
log.Debugf("attempting to scale %s to 1", stackServiceName)
if err := upstream.RunServiceScale(context.Background(), cl, stackServiceName, 1); err != nil {
log.Fatal(err)
}
if err := stack.WaitOnService(context.Background(), cl, stackServiceName, app.Name); err != nil {
log.Fatal(err)
}
log.Debugf("%s has been scaled to 1", stackServiceName)
log.Infof("%s service successfully restarted", serviceName)
}
if err := stack.WaitOnService(context.Background(), cl, serviceName, app.Name); err != nil {
logrus.Fatal(err)
}
logrus.Debugf("%s has been scaled to 1 (restart logic)", serviceName)
logrus.Infof("%s service successfully restarted", serviceNameShort)
return nil
},
}

View File

@ -2,200 +2,64 @@ package app
import (
"context"
"errors"
"fmt"
"os"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/pkg/archive"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"coopcloud.tech/abra/pkg/log"
"github.com/urfave/cli/v3"
)
type restoreConfig struct {
preHookCmd string
postHookCmd string
var targetPath string
var targetPathFlag = &cli.StringFlag{
Name: "target",
Aliases: []string{"t"},
Usage: "Target path",
Destination: &targetPath,
}
var appRestoreCommand = cli.Command{
Name: "restore",
Aliases: []string{"rs"},
Usage: "Run app restore",
ArgsUsage: "<domain> <service> <file>",
Usage: "Restore an app backup",
UsageText: "abra app restore <domain> <service> [options]",
Flags: []cli.Flag{
internal.DebugFlag,
targetPathFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Description: `
Run an app restore.
Pre/post hook commands are defined in the recipe configuration. Abra reads this
configuration and run the comands in the context of the service before
restoring the backup.
Unlike "abra app backup", restore must be run on a per-service basis. You can
not restore all services in one go. Backup files produced by Abra are
compressed archives which use absolute paths. This allows Abra to restore
according to standard tar command logic.
Example:
abra app restore example.com app ~/.abra/backups/example_com_app_609341138.tar.gz
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
serviceName := c.Args().Get(1)
if serviceName == "" {
internal.ShowSubcommandHelpAndError(c, errors.New("missing <service>?"))
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
backupPath := c.Args().Get(2)
if backupPath == "" {
internal.ShowSubcommandHelpAndError(c, errors.New("missing <file>?"))
}
if _, err := os.Stat(backupPath); err != nil {
if os.IsNotExist(err) {
logrus.Fatalf("%s doesn't exist?", backupPath)
}
}
recipe, err := recipe.Get(app.Recipe)
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
restoreConfigs := make(map[string]restoreConfig)
for _, service := range recipe.Config.Services {
if restoreEnabled, ok := service.Deploy.Labels["backupbot.restore"]; ok {
if restoreEnabled == "true" {
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), service.Name)
rsConfig := restoreConfig{}
logrus.Debugf("restore config detected for %s", fullServiceName)
if preHookCmd, ok := service.Deploy.Labels["backupbot.restore.pre-hook"]; ok {
logrus.Debugf("detected pre-hook command for %s: %s", fullServiceName, preHookCmd)
rsConfig.preHookCmd = preHookCmd
}
if postHookCmd, ok := service.Deploy.Labels["backupbot.restore.post-hook"]; ok {
logrus.Debugf("detected post-hook command for %s: %s", fullServiceName, postHookCmd)
rsConfig.postHookCmd = postHookCmd
}
restoreConfigs[service.Name] = rsConfig
}
}
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
log.Fatal(err)
}
rsConfig, ok := restoreConfigs[serviceName]
if !ok {
rsConfig = restoreConfig{}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if snapshot != "" {
log.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if err := runRestore(app, backupPath, serviceName, rsConfig); err != nil {
logrus.Fatal(err)
if targetPath != "" {
log.Debugf("including TARGET=%s in backupbot exec invocation", targetPath)
execEnv = append(execEnv, fmt.Sprintf("TARGET=%s", targetPath))
}
if err := internal.RunBackupCmdRemote(cl, "restore", targetContainer.ID, execEnv); err != nil {
log.Fatal(err)
}
return nil
},
}
// runRestore does the actual restore logic.
func runRestore(app config.App, backupPath, serviceName string, rsConfig restoreConfig) error {
cl, err := client.New(app.Server)
if err != nil {
return err
}
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), serviceName))
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, true)
if err != nil {
return err
}
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), serviceName)
if rsConfig.preHookCmd != "" {
splitCmd := internal.SafeSplit(rsConfig.preHookCmd)
logrus.Debugf("split pre-hook command for %s into %s", fullServiceName, splitCmd)
preHookExecOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: splitCmd,
Detach: false,
Tty: true,
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &preHookExecOpts); err != nil {
return err
}
logrus.Infof("succesfully ran %s pre-hook command: %s", fullServiceName, rsConfig.preHookCmd)
}
backupReader, err := os.Open(backupPath)
if err != nil {
return err
}
content, err := archive.DecompressStream(backupReader)
if err != nil {
return err
}
// we use absolute paths so tar knows what to do. it will restore files
// according to the paths set in the compresed archive
restorePath := "/"
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
if err := cl.CopyToContainer(context.Background(), targetContainer.ID, restorePath, content, copyOpts); err != nil {
return err
}
logrus.Infof("restored %s to %s", backupPath, fullServiceName)
if rsConfig.postHookCmd != "" {
splitCmd := internal.SafeSplit(rsConfig.postHookCmd)
logrus.Debugf("split post-hook command for %s into %s", fullServiceName, splitCmd)
postHookExecOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: splitCmd,
Detach: false,
Tty: true,
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &postHookExecOpts); err != nil {
return err
}
logrus.Infof("succesfully ran %s post-hook command: %s", fullServiceName, rsConfig.postHookCmd)
}
return nil
}

View File

@ -4,189 +4,221 @@ import (
"context"
"fmt"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/envfile"
"coopcloud.tech/abra/pkg/lint"
"coopcloud.tech/abra/pkg/recipe"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"coopcloud.tech/tagcmp"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/log"
"github.com/AlecAivazis/survey/v2"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
var appRollbackCommand = cli.Command{
Name: "rollback",
Aliases: []string{"rl"},
Usage: "Roll an app back to a previous version",
ArgsUsage: "<domain>",
UsageText: "abra app rollback <domain> [<version>] [options]",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.ForceFlag,
internal.ChaosFlag,
internal.NoDomainChecksFlag,
internal.DontWaitConvergeFlag,
},
Before: internal.SubCommandBefore,
Description: `
This command rolls an app back to a previous version if one exists.
Description: `This command rolls an app back to a previous version.
You may pass "--force/-f" to downgrade to the same version again. This can be
useful if the container runtime has gotten into a weird state.
Unlike "deploy", chaos operations are not supported here. Only recipe versions
are supported values for "[<version>]".
This action could be destructive, please ensure you have a copy of your app
data beforehand.
A rollback can be destructive, please ensure you have a copy of your app data
beforehand.`,
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
var warnMessages []string
Chas mode ("--chaos") will deploy your local checkout of a recipe as-is,
including unstaged changes and can be useful for live hacking and testing new
recipes.
`,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
app := internal.ValidateApp(cmd)
stackName := app.StackName()
if !internal.Chaos {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
specificVersion := cmd.Args().Get(1)
if specificVersion != "" {
log.Debugf("overriding env file version (%s) with %s", app.Recipe.Version, specificVersion)
app.Recipe.Version = specificVersion
}
r, err := recipe.Get(app.Recipe)
if err != nil {
logrus.Fatal(err)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
if err := lint.LintForErrors(r); err != nil {
logrus.Fatal(err)
if err := lint.LintForErrors(app.Recipe); err != nil {
log.Fatal(err)
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
logrus.Debugf("checking whether %s is already deployed", stackName)
log.Debugf("checking whether %s is already deployed", stackName)
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
deployMeta, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if !isDeployed {
logrus.Fatalf("%s is not deployed?", app.Name)
if !deployMeta.IsDeployed {
log.Fatalf("%s is not deployed?", app.Name)
}
catl, err := recipe.ReadRecipeCatalogue()
versions, err := app.Recipe.Tags()
if err != nil {
logrus.Fatal(err)
}
versions, err := recipe.GetRecipeCatalogueVersions(app.Recipe, catl)
if err != nil {
logrus.Fatal(err)
}
if len(versions) == 0 && !internal.Chaos {
logrus.Fatalf("no published releases for %s in the recipe catalogue?", app.Recipe)
log.Fatal(err)
}
var availableDowngrades []string
if deployedVersion == "unknown" {
if deployMeta.Version == "unknown" {
availableDowngrades = versions
logrus.Warnf("failed to determine version of deployed %s", app.Name)
warnMessages = append(warnMessages, fmt.Sprintf("failed to determine deployed version of %s", app.Name))
}
if deployedVersion != "unknown" && !internal.Chaos {
if specificVersion != "" {
parsedDeployedVersion, err := tagcmp.Parse(deployMeta.Version)
if err != nil {
log.Fatalf("'%s' is not a known version for %s", deployMeta.Version, app.Recipe.Name)
}
parsedSpecificVersion, err := tagcmp.Parse(specificVersion)
if err != nil {
log.Fatalf("'%s' is not a known version for %s", specificVersion, app.Recipe.Name)
}
if parsedSpecificVersion.IsGreaterThan(parsedDeployedVersion) && !parsedSpecificVersion.Equals(parsedDeployedVersion) {
log.Fatalf("%s is not a downgrade for %s?", deployMeta.Version, specificVersion)
}
if parsedSpecificVersion.Equals(parsedDeployedVersion) && !internal.Force {
log.Fatalf("%s is not a downgrade for %s?", deployMeta.Version, specificVersion)
}
availableDowngrades = append(availableDowngrades, specificVersion)
}
if deployMeta.Version != "unknown" && specificVersion == "" {
if deployMeta.IsChaos {
warnMessages = append(warnMessages, fmt.Sprintf("attempting to rollback a chaos deployment"))
}
for _, version := range versions {
parsedDeployedVersion, err := tagcmp.Parse(deployedVersion)
parsedDeployedVersion, err := tagcmp.Parse(deployMeta.Version)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
parsedVersion, err := tagcmp.Parse(version)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if parsedVersion != parsedDeployedVersion && parsedVersion.IsLessThan(parsedDeployedVersion) {
if parsedVersion.IsLessThan(parsedDeployedVersion) && !(parsedVersion.Equals(parsedDeployedVersion)) {
availableDowngrades = append(availableDowngrades, version)
}
}
if len(availableDowngrades) == 0 {
logrus.Info("no available downgrades, you're on oldest ✌️")
if len(availableDowngrades) == 0 && !internal.Force {
log.Info("no available downgrades")
return nil
}
}
availableDowngrades = internal.ReverseStringList(availableDowngrades)
var chosenDowngrade string
if !internal.Chaos {
if internal.Force || internal.NoInput {
chosenDowngrade = availableDowngrades[0]
logrus.Debugf("choosing %s as version to downgrade to (--force)", chosenDowngrade)
if len(availableDowngrades) > 0 {
if internal.Force || internal.NoInput || specificVersion != "" {
chosenDowngrade = availableDowngrades[len(availableDowngrades)-1]
log.Debugf("choosing %s as version to downgrade to (--force/--no-input)", chosenDowngrade)
} else {
prompt := &survey.Select{
Message: fmt.Sprintf("Please select a downgrade (current version: %s):", deployedVersion),
Options: availableDowngrades,
msg := fmt.Sprintf("please select a downgrade (version: %s):", deployMeta.Version)
if deployMeta.IsChaos {
msg = fmt.Sprintf("please select a downgrade (version: %s, chaosVersion: %s):", deployMeta.Version, deployMeta.ChaosVersion)
}
prompt := &survey.Select{
Message: msg,
Options: internal.ReverseStringList(availableDowngrades),
}
if err := survey.AskOne(prompt, &chosenDowngrade); err != nil {
return err
}
}
}
if !internal.Chaos {
if err := recipe.EnsureVersion(app.Recipe, chosenDowngrade); err != nil {
logrus.Fatal(err)
}
log.Debugf("choosing %s as version to rollback", chosenDowngrade)
if _, err := app.Recipe.EnsureVersion(chosenDowngrade); err != nil {
log.Fatal(err)
}
if internal.Chaos {
logrus.Warn("chaos mode engaged")
var err error
chosenDowngrade, err = recipe.ChaosVersion(app.Recipe)
if err != nil {
logrus.Fatal(err)
}
}
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, app.Recipe, "abra.sh")
abraShEnv, err := config.ReadAbraShEnvVars(abraShPath)
abraShEnv, err := envfile.ReadAbraShEnvVars(app.Recipe.AbraShPath)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
for k, v := range abraShEnv {
app.Env[k] = v
}
composeFiles, err := config.GetAppComposeFiles(app.Recipe, app.Env)
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
deployOpts := stack.Deploy{
Composefiles: composeFiles,
Namespace: stackName,
Prune: false,
ResolveImage: stack.ResolveImageAlways,
Detach: false,
}
compose, err := config.GetAppComposeConfig(app.Name, deployOpts, app.Env)
compose, err := appPkg.GetAppComposeConfig(app.Name, deployOpts, app.Env)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if !internal.Force {
if err := internal.NewVersionOverview(app, deployedVersion, chosenDowngrade, ""); err != nil {
logrus.Fatal(err)
}
appPkg.ExposeAllEnv(stackName, compose, app.Env)
appPkg.SetRecipeLabel(compose, stackName, app.Recipe.Name)
appPkg.SetChaosLabel(compose, stackName, internal.Chaos)
appPkg.SetChaosVersionLabel(compose, stackName, chosenDowngrade)
appPkg.SetUpdateLabel(compose, stackName, app.Env)
chaosVersion := config.CHAOS_DEFAULT
if deployMeta.IsChaos {
chaosVersion = deployMeta.ChaosVersion
}
if err := stack.RunDeploy(cl, deployOpts, compose, app.StackName(), internal.DontWaitConverge); err != nil {
logrus.Fatal(err)
// NOTE(d1): no release notes implemeneted for rolling back
if err := internal.NewVersionOverview(
app,
warnMessages,
"rollback",
deployMeta.Version,
chaosVersion,
chosenDowngrade,
""); err != nil {
log.Fatal(err)
}
if err := stack.RunDeploy(cl, deployOpts, compose, stackName, internal.DontWaitConverge); err != nil {
log.Fatal(err)
}
app.Recipe.Version = chosenDowngrade
log.Debugf("choosing %s as version to save to env file", app.Recipe.Version)
if err := app.WriteRecipeVersion(app.Recipe.Version, false); err != nil {
log.Fatalf("writing new recipe version in env file: %s", err)
}
return nil

View File

@ -9,24 +9,26 @@ import (
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
var user string
var userFlag = &cli.StringFlag{
Name: "user, u",
Name: "user",
Aliases: []string{"u"},
Value: "",
Destination: &user,
}
var noTTY bool
var noTTYFlag = &cli.BoolFlag{
Name: "no-tty, t",
Name: "no-tty",
Aliases: []string{"t"},
Destination: &noTTY,
}
@ -34,46 +36,46 @@ var appRunCommand = cli.Command{
Name: "run",
Aliases: []string{"r"},
Flags: []cli.Flag{
internal.DebugFlag,
noTTYFlag,
userFlag,
},
Before: internal.SubCommandBefore,
ArgsUsage: "<domain> <service> <args>...",
Usage: "Run a command in a service container",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
Before: internal.SubCommandBefore,
Usage: "Run a command in an app service",
UsageText: "abra app run <domain> <service> <args> [options]",
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
if len(c.Args()) < 2 {
internal.ShowSubcommandHelpAndError(c, errors.New("no <service> provided?"))
if cmd.Args().Len() < 2 {
internal.ShowSubcommandHelpAndError(cmd, errors.New("no <service> provided?"))
}
if len(c.Args()) < 3 {
internal.ShowSubcommandHelpAndError(c, errors.New("no <args> provided?"))
if cmd.Args().Len() < 3 {
internal.ShowSubcommandHelpAndError(cmd, errors.New("no <args> provided?"))
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
serviceName := c.Args().Get(1)
serviceName := cmd.Args().Get(1)
stackAndServiceName := fmt.Sprintf("^%s_%s", app.StackName(), serviceName)
filters := filters.NewArgs()
filters.Add("name", stackAndServiceName)
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, false)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
cmd := c.Args()[2:]
c := cmd.Args().Slice()[2:]
execCreateOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: cmd,
Cmd: c,
Detach: false,
Tty: true,
}
@ -88,11 +90,11 @@ var appRunCommand = cli.Command{
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
logrus.Fatal(err)
if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
log.Fatal(err)
}
return nil

View File

@ -6,153 +6,212 @@ import (
"fmt"
"os"
"strconv"
"strings"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/secret"
"github.com/docker/docker/api/types"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
var allSecrets bool
var allSecretsFlag = &cli.BoolFlag{
Name: "all, a",
Destination: &allSecrets,
Usage: "Generate all secrets",
}
var (
allSecrets bool
allSecretsFlag = &cli.BoolFlag{
Name: "all",
Aliases: []string{"a"},
Destination: &allSecrets,
Usage: "Generate all secrets",
}
)
var rmAllSecrets bool
var rmAllSecretsFlag = &cli.BoolFlag{
Name: "all, a",
Destination: &rmAllSecrets,
Usage: "Remove all secrets",
}
var (
rmAllSecrets bool
rmAllSecretsFlag = &cli.BoolFlag{
Name: "all",
Aliases: []string{"a"},
Destination: &rmAllSecrets,
Usage: "Remove all secrets",
}
)
var appSecretGenerateCommand = cli.Command{
Name: "generate",
Aliases: []string{"g"},
Usage: "Generate secrets",
ArgsUsage: "<domain> <secret> <version>",
UsageText: "abra app secret generate <domain> <secret> <version> [options]",
Flags: []cli.Flag{
internal.DebugFlag,
allSecretsFlag,
internal.PassFlag,
internal.MachineReadableFlag,
internal.ChaosFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
if len(c.Args()) == 1 && !allSecrets {
if cmd.Args().Len() == 1 && !allSecrets {
err := errors.New("missing arguments <secret>/<version> or '--all'")
internal.ShowSubcommandHelpAndError(c, err)
internal.ShowSubcommandHelpAndError(cmd, err)
}
if c.Args().Get(1) != "" && allSecrets {
if cmd.Args().Get(1) != "" && allSecrets {
err := errors.New("cannot use '<secret> <version>' and '--all' together")
internal.ShowSubcommandHelpAndError(c, err)
internal.ShowSubcommandHelpAndError(cmd, err)
}
secretsToCreate := make(map[string]string)
secretEnvVars := secret.ReadSecretEnvVars(app.Env)
if allSecrets {
secretsToCreate = secretEnvVars
} else {
secretName := c.Args().Get(1)
secretVersion := c.Args().Get(2)
matches := false
for sec := range secretEnvVars {
parsed := secret.ParseSecretEnvVarName(sec)
if secretName == parsed {
secretsToCreate[sec] = secretVersion
matches = true
}
}
if !matches {
logrus.Fatalf("%s doesn't exist in the env config?", secretName)
}
}
secretVals, err := secret.GenerateSecrets(secretsToCreate, app.StackName(), app.Server)
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
secrets, err := secret.ReadSecretsConfig(app.Path, composeFiles, app.StackName())
if err != nil {
log.Fatal(err)
}
if !allSecrets {
secretName := cmd.Args().Get(1)
secretVersion := cmd.Args().Get(2)
s, ok := secrets[secretName]
if !ok {
log.Fatalf("%s doesn't exist in the env config?", secretName)
}
s.Version = secretVersion
secrets = map[string]secret.Secret{
secretName: s,
}
}
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
secretVals, err := secret.GenerateSecrets(cl, secrets, app.Server)
if err != nil {
log.Fatal(err)
}
if internal.Pass {
for name, data := range secretVals {
if err := secret.PassInsertSecret(data, name, app.Name, app.Server); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
}
}
if len(secretVals) == 0 {
logrus.Warn("no secrets generated")
log.Warn("no secrets generated")
os.Exit(1)
}
tableCol := []string{"name", "value"}
table := formatter.CreateTable(tableCol)
for name, val := range secretVals {
table.Append([]string{name, val})
headers := []string{"NAME", "VALUE"}
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
table.Render()
logrus.Warn("generated secrets are not shown again, please take note of them *now*")
table.Headers(headers...)
var rows [][]string
for name, val := range secretVals {
row := []string{name, val}
rows = append(rows, row)
table.Row(row...)
}
if internal.MachineReadable {
out, err := formatter.ToJSON(headers, rows)
if err != nil {
log.Fatal("unable to render to JSON: %s", err)
}
fmt.Println(out)
return nil
}
fmt.Println(table)
log.Warnf(
"generated secrets %s shown again, please take note of them %s",
formatter.BoldStyle.Render("NOT"),
formatter.BoldStyle.Render("NOW"),
)
return nil
},
}
var appSecretInsertCommand = cli.Command{
Name: "insert",
Aliases: []string{"i"},
Usage: "Insert secret",
Name: "insert",
Aliases: []string{"i"},
Usage: "Insert secret",
UsageText: "abra app secret insert <domain> <secret> <version> <data> [options]",
Flags: []cli.Flag{
internal.DebugFlag,
internal.PassFlag,
internal.FileFlag,
internal.TrimFlag,
internal.ChaosFlag,
},
Before: internal.SubCommandBefore,
ArgsUsage: "<domain> <secret-name> <version> <data>",
BashComplete: autocomplete.AppNameComplete,
Description: `
This command inserts a secret into an app environment.
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.AppNameComplete,
HideHelpCommand: true,
Description: `This command inserts a secret into an app environment.
This can be useful when you want to manually generate secrets for an app
environment. Typically, you can let Abra generate them for you on app creation
(see "abra app new --secrets" for more).
Example:
abra app secret insert myapp db_pass v1 mySecretPassword
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if len(c.Args()) != 4 {
internal.ShowSubcommandHelpAndError(c, errors.New("missing arguments?"))
(see "abra app new --secrets" for more).`,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
name := c.Args().Get(1)
version := c.Args().Get(2)
data := c.Args().Get(3)
if cmd.Args().Len() != 4 {
internal.ShowSubcommandHelpAndError(cmd, errors.New("missing arguments?"))
}
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
name := cmd.Args().Get(1)
version := cmd.Args().Get(2)
data := cmd.Args().Get(3)
if internal.File {
raw, err := os.ReadFile(data)
if err != nil {
log.Fatalf("reading secret from file: %s", err)
}
data = string(raw)
}
if internal.Trim {
data = strings.TrimSpace(data)
}
secretName := fmt.Sprintf("%s_%s_%s", app.StackName(), name, version)
if err := client.StoreSecret(secretName, data, app.Server); err != nil {
logrus.Fatal(err)
if err := client.StoreSecret(cl, secretName, data, app.Server); err != nil {
log.Fatal(err)
}
logrus.Infof("%s successfully stored on server", secretName)
log.Infof("%s successfully stored on server", secretName)
if internal.Pass {
if err := secret.PassInsertSecret(data, name, app.Name, app.Server); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
}
@ -161,69 +220,82 @@ Example:
}
// secretRm removes a secret.
func secretRm(cl *dockerClient.Client, app config.App, secretName, parsed string) error {
func secretRm(cl *dockerClient.Client, app appPkg.App, secretName, parsed string) error {
if err := cl.SecretRemove(context.Background(), secretName); err != nil {
return err
}
logrus.Infof("deleted %s successfully from server", secretName)
log.Infof("deleted %s successfully from server", secretName)
if internal.PassRemove {
if err := secret.PassRmSecret(parsed, app.StackName(), app.Server); err != nil {
return err
}
logrus.Infof("deleted %s successfully from local pass store", secretName)
log.Infof("deleted %s successfully from local pass store", secretName)
}
return nil
}
var appSecretRmCommand = cli.Command{
Name: "remove",
Aliases: []string{"rm"},
Usage: "Remove a secret",
Name: "remove",
Aliases: []string{"rm"},
Usage: "Remove a secret",
UsageText: "abra app remove <domainabra app remove <domain> [options]",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
rmAllSecretsFlag,
internal.PassRemoveFlag,
internal.OfflineFlag,
internal.ChaosFlag,
},
Before: internal.SubCommandBefore,
ArgsUsage: "<domain> [<secret-name>]",
BashComplete: autocomplete.AppNameComplete,
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.AppNameComplete,
Description: `
This command removes app secrets.
Example:
abra app secret remove myapp db_pass
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
secrets := secret.ReadSecretEnvVars(app.Env)
if c.Args().Get(1) != "" && rmAllSecrets {
internal.ShowSubcommandHelpAndError(c, errors.New("cannot use '<secret-name>' and '--all' together"))
abra app secret remove myapp db_pass`,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
if c.Args().Get(1) == "" && !rmAllSecrets {
internal.ShowSubcommandHelpAndError(c, errors.New("no secret(s) specified?"))
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
if err != nil {
log.Fatal(err)
}
secrets, err := secret.ReadSecretsConfig(app.Path, composeFiles, app.StackName())
if err != nil {
log.Fatal(err)
}
if cmd.Args().Get(1) != "" && rmAllSecrets {
internal.ShowSubcommandHelpAndError(cmd, errors.New("cannot use '<secret-name>' and '--all' together"))
}
if cmd.Args().Get(1) == "" && !rmAllSecrets {
internal.ShowSubcommandHelpAndError(cmd, errors.New("no secret(s) specified?"))
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
filters, err := app.Filters(false, false)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
secretList, err := cl.SecretList(context.Background(), types.SecretListOptions{Filters: filters})
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
remoteSecretNames := make(map[string]bool)
@ -232,21 +304,14 @@ Example:
}
match := false
secretToRm := c.Args().Get(1)
for sec := range secrets {
secretName := secret.ParseSecretEnvVarName(sec)
secVal, err := secret.ParseSecretEnvVarValue(secrets[sec])
if err != nil {
logrus.Fatal(err)
}
secretRemoteName := fmt.Sprintf("%s_%s_%s", app.StackName(), secretName, secVal.Version)
secretToRm := cmd.Args().Get(1)
for secretName, val := range secrets {
secretRemoteName := fmt.Sprintf("%s_%s_%s", app.StackName(), secretName, val.Version)
if _, ok := remoteSecretNames[secretRemoteName]; ok {
if secretToRm != "" {
if secretName == secretToRm {
if err := secretRm(cl, app, secretRemoteName, secretName); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
return nil
@ -255,18 +320,18 @@ Example:
match = true
if err := secretRm(cl, app, secretRemoteName, secretName); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
}
}
}
if !match && secretToRm != "" {
logrus.Fatalf("%s doesn't exist on server?", secretToRm)
log.Fatalf("%s doesn't exist on server?", secretToRm)
}
if !match {
logrus.Fatal("no secrets to remove?")
log.Fatal("no secrets to remove?")
}
return nil
@ -277,72 +342,81 @@ var appSecretLsCommand = cli.Command{
Name: "list",
Aliases: []string{"ls"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
internal.ChaosFlag,
internal.MachineReadableFlag,
},
Before: internal.SubCommandBefore,
Usage: "List all secrets",
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
secrets := secret.ReadSecretEnvVars(app.Env)
tableCol := []string{"Name", "Version", "Generated Name", "Created On Server"}
table := formatter.CreateTable(tableCol)
Before: internal.SubCommandBefore,
Usage: "List all secrets",
UsageText: "abra app secret list [options]",
HideHelp: true,
ShellComplete: autocomplete.AppNameComplete,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
filters, err := app.Filters(false, false)
headers := []string{"NAME", "VERSION", "GENERATED NAME", "CREATED ON SERVER"}
table, err := formatter.CreateTable()
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
secretList, err := cl.SecretList(context.Background(), types.SecretListOptions{Filters: filters})
table.Headers(headers...)
secStats, err := secret.PollSecretsStatus(cl, app)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
remoteSecretNames := make(map[string]bool)
for _, cont := range secretList {
remoteSecretNames[cont.Spec.Annotations.Name] = true
}
for sec := range secrets {
createdRemote := false
secretName := secret.ParseSecretEnvVarName(sec)
secVal, err := secret.ParseSecretEnvVarValue(secrets[sec])
if err != nil {
logrus.Fatal(err)
var rows [][]string
for _, secStat := range secStats {
row := []string{
secStat.LocalName,
secStat.Version,
secStat.RemoteName,
strconv.FormatBool(secStat.CreatedOnRemote),
}
secretRemoteName := fmt.Sprintf("%s_%s_%s", app.StackName(), secretName, secVal.Version)
if _, ok := remoteSecretNames[secretRemoteName]; ok {
createdRemote = true
}
tableRow := []string{secretName, secVal.Version, secretRemoteName, strconv.FormatBool(createdRemote)}
table.Append(tableRow)
rows = append(rows, row)
table.Row(row...)
}
if table.NumLines() > 0 {
table.Render()
} else {
logrus.Warnf("no secrets stored for %s", app.Name)
if len(rows) > 0 {
if internal.MachineReadable {
out, err := formatter.ToJSON(headers, rows)
if err != nil {
log.Fatal("unable to render to JSON: %s", err)
}
fmt.Println(out)
return nil
}
fmt.Println(table)
return nil
}
log.Warnf("no secrets stored for %s", app.Name)
return nil
},
BashComplete: autocomplete.AppNameComplete,
}
var appSecretCommand = cli.Command{
Name: "secret",
Aliases: []string{"s"},
Usage: "Manage app secrets",
ArgsUsage: "<domain>",
Subcommands: []cli.Command{
appSecretGenerateCommand,
appSecretInsertCommand,
appSecretRmCommand,
appSecretLsCommand,
UsageText: "abra app secret [command] [arguments] [options]",
Commands: []*cli.Command{
&appSecretGenerateCommand,
&appSecretInsertCommand,
&appSecretRmCommand,
&appSecretLsCommand,
},
}

93
cli/app/services.go Normal file
View File

@ -0,0 +1,93 @@
package app
import (
"context"
"fmt"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/service"
stack "coopcloud.tech/abra/pkg/upstream/stack"
containerTypes "github.com/docker/docker/api/types/container"
"github.com/urfave/cli/v3"
)
var appServicesCommand = cli.Command{
Name: "services",
Aliases: []string{"sr"},
Usage: "Display all services of an app",
UsageText: "abra app services <domain> [options]",
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
deployMeta, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
log.Fatal(err)
}
if !deployMeta.IsDeployed {
log.Fatalf("%s is not deployed?", app.Name)
}
filters, err := app.Filters(true, true)
if err != nil {
log.Fatal(err)
}
containers, err := cl.ContainerList(context.Background(), containerTypes.ListOptions{Filters: filters})
if err != nil {
log.Fatal(err)
}
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
headers := []string{"SERVICE (SHORT)", "SERVICE (LONG)", "IMAGE"}
table.Headers(headers...)
var rows [][]string
for _, container := range containers {
var containerNames []string
for _, containerName := range container.Names {
trimmed := strings.TrimPrefix(containerName, "/")
containerNames = append(containerNames, trimmed)
}
serviceShortName := service.ContainerToServiceName(container.Names, app.StackName())
serviceLongName := fmt.Sprintf("%s_%s", app.StackName(), serviceShortName)
row := []string{
serviceShortName,
serviceLongName,
formatter.RemoveSha(container.Image),
}
rows = append(rows, row)
}
table.Rows(rows...)
if len(rows) > 0 {
fmt.Println(table)
}
return nil
},
}

View File

@ -2,60 +2,128 @@ package app
import (
"context"
"fmt"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/urfave/cli/v3"
)
var prune bool
var pruneFlag = &cli.BoolFlag{
Name: "prune",
Aliases: []string{"p"},
Destination: &prune,
Usage: "Prunes unused containers, networks, and dangling images for an app",
}
// pruneApp runs the equivalent of a "docker system prune" but only filtering
// against resources connected with the app deployment. It is not a system wide
// prune. Volumes are not pruned to avoid unwated data loss.
func pruneApp(cl *dockerClient.Client, app appPkg.App) error {
stackName := app.StackName()
ctx := context.Background()
pruneFilters := filters.NewArgs()
stackSearch := fmt.Sprintf("%s*", stackName)
pruneFilters.Add("label", stackSearch)
cr, err := cl.ContainersPrune(ctx, pruneFilters)
if err != nil {
return err
}
cntSpaceReclaimed := formatter.ByteCountSI(cr.SpaceReclaimed)
log.Infof("containers pruned: %d; space reclaimed: %s", len(cr.ContainersDeleted), cntSpaceReclaimed)
nr, err := cl.NetworksPrune(ctx, pruneFilters)
if err != nil {
return err
}
log.Infof("networks pruned: %d", len(nr.NetworksDeleted))
ir, err := cl.ImagesPrune(ctx, pruneFilters)
if err != nil {
return err
}
imgSpaceReclaimed := formatter.ByteCountSI(ir.SpaceReclaimed)
log.Infof("images pruned: %d; space reclaimed: %s", len(ir.ImagesDeleted), imgSpaceReclaimed)
return nil
}
var appUndeployCommand = cli.Command{
Name: "undeploy",
Aliases: []string{"un"},
ArgsUsage: "<domain>",
UsageText: "abra app undeploy <domain> [options]",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.OfflineFlag,
pruneFlag,
},
Before: internal.SubCommandBefore,
Usage: "Undeploy an app",
Description: `
This does not destroy any of the application data. However, you should remain
vigilant, as your swarm installation will consider any previously attached
volumes as eligiblef or pruning once undeployed.
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
Before: internal.SubCommandBefore,
Usage: "Undeploy an app",
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Description: `This does not destroy any of the application data.
However, you should remain vigilant, as your swarm installation will consider
any previously attached volumes as eligible for pruning once undeployed.
Passing "-p/--prune" does not remove those volumes.`,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
stackName := app.StackName()
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
logrus.Debugf("checking whether %s is already deployed", stackName)
log.Debugf("checking whether %s is already deployed", stackName)
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
deployMeta, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if !isDeployed {
logrus.Fatalf("%s is not deployed?", app.Name)
if !deployMeta.IsDeployed {
log.Fatalf("%s is not deployed?", app.Name)
}
if err := internal.DeployOverview(app, deployedVersion, "continue with undeploy?"); err != nil {
logrus.Fatal(err)
chaosVersion := config.CHAOS_DEFAULT
if deployMeta.IsChaos {
chaosVersion = deployMeta.ChaosVersion
}
rmOpts := stack.Remove{Namespaces: []string{app.StackName()}}
if err := internal.DeployOverview(app, []string{}, deployMeta.Version, chaosVersion); err != nil {
log.Fatal(err)
}
rmOpts := stack.Remove{
Namespaces: []string{app.StackName()},
Detach: false,
}
if err := stack.RunRemove(context.Background(), cl, rmOpts); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if prune {
if err := pruneApp(cl, app); err != nil {
log.Fatal(err)
}
}
return nil
},
BashComplete: autocomplete.AppNameComplete,
}

View File

@ -5,200 +5,283 @@ import (
"fmt"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/envfile"
"coopcloud.tech/abra/pkg/lint"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/log"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"coopcloud.tech/tagcmp"
"github.com/AlecAivazis/survey/v2"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
var appUpgradeCommand = cli.Command{
Name: "upgrade",
Aliases: []string{"up"},
Usage: "Upgrade an app",
ArgsUsage: "<domain>",
UsageText: "abra app upgrade <domain> [<version>] [options]",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.ForceFlag,
internal.ChaosFlag,
internal.NoDomainChecksFlag,
internal.DontWaitConvergeFlag,
internal.ReleaseNotesFlag,
},
Before: internal.SubCommandBefore,
Description: `
Upgrade an app. You can use it to choose and roll out a new upgrade to an
existing app.
Description: `Upgrade an app.
This command specifically supports incrementing the version of running apps, as
opposed to "abra app deploy <domain>" which will not change the version of a
deployed app.
Unlike "deploy", chaos operations are not supported here. Only recipe versions
are supported values for "[<version>]".
You may pass "--force/-f" to upgrade to the same version again. This can be
useful if the container runtime has gotten into a weird state.
An upgrade can be destructive, please ensure you have a copy of your app data
beforehand.`,
HideHelp: true,
ShellComplete: autocomplete.AppNameComplete,
Action: func(ctx context.Context, cmd *cli.Command) error {
var warnMessages []string
This action could be destructive, please ensure you have a copy of your app
data beforehand.
Chas mode ("--chaos") will deploy your local checkout of a recipe as-is,
including unstaged changes and can be useful for live hacking and testing new
recipes.
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
app := internal.ValidateApp(cmd)
stackName := app.StackName()
if !internal.Chaos {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
specificVersion := cmd.Args().Get(1)
if specificVersion != "" {
log.Debugf("overriding env file version (%s) with %s", app.Recipe.Version, specificVersion)
app.Recipe.Version = specificVersion
}
r, err := recipe.Get(app.Recipe)
if err != nil {
logrus.Fatal(err)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
if err := lint.LintForErrors(r); err != nil {
logrus.Fatal(err)
if err := lint.LintForErrors(app.Recipe); err != nil {
log.Fatal(err)
}
log.Debugf("checking whether %s is already deployed", stackName)
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
logrus.Debugf("checking whether %s is already deployed", stackName)
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
deployMeta, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if !isDeployed {
logrus.Fatalf("%s is not deployed?", app.Name)
if !deployMeta.IsDeployed {
log.Fatalf("%s is not deployed?", app.Name)
}
catl, err := recipe.ReadRecipeCatalogue()
versions, err := app.Recipe.Tags()
if err != nil {
logrus.Fatal(err)
}
versions, err := recipe.GetRecipeCatalogueVersions(app.Recipe, catl)
if err != nil {
logrus.Fatal(err)
}
if len(versions) == 0 && !internal.Chaos {
logrus.Fatalf("no published releases for %s in the recipe catalogue?", app.Recipe)
log.Fatal(err)
}
var availableUpgrades []string
if deployedVersion == "unknown" {
if deployMeta.Version == "unknown" {
availableUpgrades = versions
logrus.Warnf("failed to determine version of deployed %s", app.Name)
warnMessages = append(warnMessages, fmt.Sprintf("failed to determine deployed version of %s", app.Name))
}
if deployedVersion != "unknown" && !internal.Chaos {
if specificVersion != "" {
parsedDeployedVersion, err := tagcmp.Parse(deployMeta.Version)
if err != nil {
log.Fatalf("'%s' is not a known version for %s", deployMeta.Version, app.Recipe.Name)
}
parsedSpecificVersion, err := tagcmp.Parse(specificVersion)
if err != nil {
log.Fatalf("'%s' is not a known version for %s", specificVersion, app.Recipe.Name)
}
if parsedSpecificVersion.IsLessThan(parsedDeployedVersion) && !parsedSpecificVersion.Equals(parsedDeployedVersion) {
log.Fatalf("%s is not an upgrade for %s?", deployMeta.Version, specificVersion)
}
if parsedSpecificVersion.Equals(parsedDeployedVersion) && !internal.Force {
log.Fatalf("%s is not an upgrade for %s?", deployMeta.Version, specificVersion)
}
availableUpgrades = append(availableUpgrades, specificVersion)
}
parsedDeployedVersion, err := tagcmp.Parse(deployMeta.Version)
if err != nil {
log.Fatal(err)
}
if deployMeta.Version != "unknown" && specificVersion == "" {
if deployMeta.IsChaos {
warnMessages = append(warnMessages, fmt.Sprintf("attempting to upgrade a chaos deployment"))
}
for _, version := range versions {
parsedDeployedVersion, err := tagcmp.Parse(deployedVersion)
if err != nil {
logrus.Fatal(err)
}
parsedVersion, err := tagcmp.Parse(version)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if parsedVersion.IsGreaterThan(parsedDeployedVersion) {
if parsedVersion.IsGreaterThan(parsedDeployedVersion) && !(parsedVersion.Equals(parsedDeployedVersion)) {
availableUpgrades = append(availableUpgrades, version)
}
}
if len(availableUpgrades) == 0 && !internal.Force {
logrus.Infof("no available upgrades, you're on latest (%s) ✌️", deployedVersion)
log.Info("no available upgrades")
return nil
}
}
availableUpgrades = internal.ReverseStringList(availableUpgrades)
var chosenUpgrade string
if len(availableUpgrades) > 0 && !internal.Chaos {
if internal.Force || internal.NoInput {
if len(availableUpgrades) > 0 {
if internal.Force || internal.NoInput || specificVersion != "" {
chosenUpgrade = availableUpgrades[len(availableUpgrades)-1]
logrus.Debugf("choosing %s as version to upgrade to", chosenUpgrade)
log.Debugf("choosing %s as version to upgrade to", chosenUpgrade)
} else {
prompt := &survey.Select{
Message: fmt.Sprintf("Please select an upgrade (current version: %s):", deployedVersion),
Options: availableUpgrades,
msg := fmt.Sprintf("please select an upgrade (version: %s):", deployMeta.Version)
if deployMeta.IsChaos {
msg = fmt.Sprintf("please select an upgrade (version: %s, chaosVersion: %s):", deployMeta.Version, deployMeta.ChaosVersion)
}
prompt := &survey.Select{
Message: msg,
Options: internal.ReverseStringList(availableUpgrades),
}
if err := survey.AskOne(prompt, &chosenUpgrade); err != nil {
return err
}
}
}
if internal.Force && chosenUpgrade == "" {
warnMessages = append(warnMessages, fmt.Sprintf("%s is already upgraded to latest", app.Name))
chosenUpgrade = deployMeta.Version
}
// if release notes written after git tag published, read them before we
// check out the tag and then they'll appear to be missing. this covers
// when we obviously will forget to write release notes before publishing
releaseNotes, err := internal.GetReleaseNotes(app.Recipe, chosenUpgrade)
if err != nil {
return err
}
if !internal.Chaos {
if err := recipe.EnsureVersion(app.Recipe, chosenUpgrade); err != nil {
logrus.Fatal(err)
}
}
if internal.Chaos {
logrus.Warn("chaos mode engaged")
var err error
chosenUpgrade, err = recipe.ChaosVersion(app.Recipe)
var releaseNotes string
if chosenUpgrade != "" {
parsedChosenUpgrade, err := tagcmp.Parse(chosenUpgrade)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
for _, version := range versions {
parsedVersion, err := tagcmp.Parse(version)
if err != nil {
log.Fatal(err)
}
if parsedVersion.IsGreaterThan(parsedDeployedVersion) && parsedVersion.IsLessThan(parsedChosenUpgrade) {
note, err := app.Recipe.GetReleaseNotes(version)
if err != nil {
return err
}
if note != "" {
releaseNotes += fmt.Sprintf("%s\n", note)
}
}
}
}
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, app.Recipe, "abra.sh")
abraShEnv, err := config.ReadAbraShEnvVars(abraShPath)
log.Debugf("choosing %s as version to upgrade", chosenUpgrade)
if _, err := app.Recipe.EnsureVersion(chosenUpgrade); err != nil {
log.Fatal(err)
}
abraShEnv, err := envfile.ReadAbraShEnvVars(app.Recipe.AbraShPath)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
for k, v := range abraShEnv {
app.Env[k] = v
}
composeFiles, err := config.GetAppComposeFiles(app.Recipe, app.Env)
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
deployOpts := stack.Deploy{
Composefiles: composeFiles,
Namespace: stackName,
Prune: false,
ResolveImage: stack.ResolveImageAlways,
Detach: false,
}
compose, err := config.GetAppComposeConfig(app.Name, deployOpts, app.Env)
compose, err := appPkg.GetAppComposeConfig(app.Name, deployOpts, app.Env)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if err := internal.NewVersionOverview(app, deployedVersion, chosenUpgrade, releaseNotes); err != nil {
logrus.Fatal(err)
appPkg.ExposeAllEnv(stackName, compose, app.Env)
appPkg.SetRecipeLabel(compose, stackName, app.Recipe.Name)
appPkg.SetChaosLabel(compose, stackName, internal.Chaos)
appPkg.SetChaosVersionLabel(compose, stackName, chosenUpgrade)
appPkg.SetUpdateLabel(compose, stackName, app.Env)
envVars, err := appPkg.CheckEnv(app)
if err != nil {
log.Fatal(err)
}
if err := stack.RunDeploy(cl, deployOpts, compose, app.StackName(), internal.DontWaitConverge); err != nil {
logrus.Fatal(err)
for _, envVar := range envVars {
if !envVar.Present {
warnMessages = append(warnMessages,
fmt.Sprintf("env var %s missing from %s.env, present in recipe .env.sample", envVar.Name, app.Domain),
)
}
}
if internal.ReleaseNotes {
fmt.Println()
fmt.Print(releaseNotes)
return nil
}
chaosVersion := config.CHAOS_DEFAULT
if deployMeta.IsChaos {
chaosVersion = deployMeta.ChaosVersion
}
if err := internal.NewVersionOverview(
app,
warnMessages,
"upgrade",
deployMeta.Version,
chaosVersion,
chosenUpgrade,
releaseNotes); err != nil {
log.Fatal(err)
}
stack.WaitTimeout, err = appPkg.GetTimeoutFromLabel(compose, stackName)
if err != nil {
log.Fatal(err)
}
log.Debugf("set waiting timeout to %d s", stack.WaitTimeout)
if err := stack.RunDeploy(cl, deployOpts, compose, stackName, internal.DontWaitConverge); err != nil {
log.Fatal(err)
}
postDeployCmds, ok := app.Env["POST_UPGRADE_CMDS"]
if ok && !internal.DontWaitConverge {
log.Debugf("run the following post-deploy commands: %s", postDeployCmds)
if err := internal.PostCmds(cl, app, postDeployCmds); err != nil {
log.Fatalf("attempting to run post deploy commands, saw: %s", err)
}
}
app.Recipe.Version = chosenUpgrade
log.Debugf("choosing %s as version to save to env file", app.Recipe.Version)
if err := app.WriteRecipeVersion(app.Recipe.Version, false); err != nil {
log.Fatalf("writing new recipe version in env file: %s", err)
}
return nil
},
BashComplete: autocomplete.AppNameComplete,
}

View File

@ -1,101 +0,0 @@
package app
import (
"context"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/stack"
"github.com/docker/distribution/reference"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// getImagePath returns the image name
func getImagePath(image string) (string, error) {
img, err := reference.ParseNormalizedNamed(image)
if err != nil {
return "", err
}
path := reference.Path(img)
path = formatter.StripTagMeta(path)
logrus.Debugf("parsed %s from %s", path, image)
return path, nil
}
var appVersionCommand = cli.Command{
Name: "version",
Aliases: []string{"v"},
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
},
Before: internal.SubCommandBefore,
Usage: "Show app versions",
Description: `
Show all information about versioning related to a deployed app. This includes
the individual image names, tags and digests. But also the Co-op Cloud recipe
version.
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
stackName := app.StackName()
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
logrus.Debugf("checking whether %s is already deployed", stackName)
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
logrus.Fatal(err)
}
if deployedVersion == "unknown" {
logrus.Fatalf("failed to determine version of deployed %s", app.Name)
}
if !isDeployed {
logrus.Fatalf("%s is not deployed?", app.Name)
}
recipeMeta, err := recipe.GetRecipeMeta(app.Recipe)
if err != nil {
logrus.Fatal(err)
}
versionsMeta := make(map[string]recipe.ServiceMeta)
for _, recipeVersion := range recipeMeta.Versions {
if currentVersion, exists := recipeVersion[deployedVersion]; exists {
versionsMeta = currentVersion
}
}
if len(versionsMeta) == 0 {
logrus.Fatalf("could not retrieve deployed version (%s) from recipe catalogue?", deployedVersion)
}
tableCol := []string{"version", "service", "image", "digest"}
table := formatter.CreateTable(tableCol)
table.SetAutoMergeCellsByColumnIndex([]int{0})
for serviceName, versionMeta := range versionsMeta {
table.Append([]string{deployedVersion, serviceName, versionMeta.Image, versionMeta.Digest})
}
table.Render()
return nil
},
BashComplete: autocomplete.AppNameComplete,
}

View File

@ -2,55 +2,68 @@ package app
import (
"context"
"fmt"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/upstream/stack"
"github.com/AlecAivazis/survey/v2"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
var appVolumeListCommand = cli.Command{
Name: "list",
Aliases: []string{"ls"},
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
},
Before: internal.SubCommandBefore,
Usage: "List volumes associated with an app",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
Name: "list",
Aliases: []string{"ls"},
UsageText: "abra app volume list <domain> [options]",
Before: internal.SubCommandBefore,
Usage: "List volumes associated with an app",
ShellComplete: autocomplete.AppNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
filters, err := app.Filters(false, true)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
volumeList, err := client.GetVolumes(context.Background(), app.Server, filters)
volumes, err := client.GetVolumes(cl, context.Background(), app.Server, filters)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
table := formatter.CreateTable([]string{"name", "created", "mounted"})
var volTable [][]string
for _, volume := range volumeList {
volRow := []string{volume.Name, volume.CreatedAt, volume.Mountpoint}
volTable = append(volTable, volRow)
headers := []string{"name", "created", "mounted"}
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
table.AppendBulk(volTable)
table.Headers(headers...)
if table.NumLines() > 0 {
table.Render()
} else {
logrus.Warnf("no volumes created for %s", app.Name)
var rows [][]string
for _, volume := range volumes {
row := []string{volume.Name, volume.CreatedAt, volume.Mountpoint}
rows = append(rows, row)
}
table.Rows(rows...)
if len(rows) > 0 {
fmt.Println(table)
return nil
}
log.Warnf("no volumes created for %s", app.Name)
return nil
},
}
@ -58,36 +71,49 @@ var appVolumeListCommand = cli.Command{
var appVolumeRemoveCommand = cli.Command{
Name: "remove",
Usage: "Remove volume(s) associated with an app",
Description: `
This command supports removing volumes associated with an app. The app in
question must be undeployed before you try to remove volumes. See "abra app
undeploy <domain>" for more.
Description: `Remove volumes associated with an app.
The app in question must be undeployed before you try to remove volumes. See
"abra app undeploy <domain>" for more.
The command is interactive and will show a multiple select input which allows
you to make a seclection. Use the "?" key to see more help on navigating this
interface.
Passing "--force/-f" will select all volumes for removal. Be careful.
`,
ArgsUsage: "<domain>",
Passing "--force/-f" will select all volumes for removal. Be careful.`,
UsageText: "abra app volume remove [options] <domain>",
Aliases: []string{"rm"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.ForceFlag,
},
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.AppNameComplete,
Action: func(ctx context.Context, cmd *cli.Command) error {
app := internal.ValidateApp(cmd)
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
deployMeta, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
log.Fatal(err)
}
if deployMeta.IsDeployed {
log.Fatalf("%s is still deployed. Run \"abra app undeploy %s\"", app.Name, app.Name)
}
filters, err := app.Filters(false, true)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
volumeList, err := client.GetVolumes(context.Background(), app.Server, filters)
volumeList, err := client.GetVolumes(cl, context.Background(), app.Server, filters)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
volumeNames := client.GetVolumeNames(volumeList)
@ -101,7 +127,7 @@ Passing "--force/-f" will select all volumes for removal. Be careful.
Default: volumeNames,
}
if err := survey.AskOne(volumesPrompt, &volumesToRemove); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
}
@ -109,25 +135,28 @@ Passing "--force/-f" will select all volumes for removal. Be careful.
volumesToRemove = volumeNames
}
err = client.RemoveVolumes(context.Background(), app.Server, volumesToRemove, internal.Force)
if err != nil {
logrus.Fatal(err)
}
if len(volumesToRemove) > 0 {
err := client.RemoveVolumes(cl, context.Background(), volumesToRemove, internal.Force, 5)
if err != nil {
log.Fatalf("removing volumes failed: %s", err)
}
logrus.Info("volumes removed successfully")
log.Infof("%d volumes removed successfully", len(volumesToRemove))
} else {
log.Info("no volumes removed")
}
return nil
},
BashComplete: autocomplete.AppNameComplete,
}
var appVolumeCommand = cli.Command{
Name: "volume",
Aliases: []string{"vl"},
Usage: "Manage app volumes",
ArgsUsage: "<domain>",
Subcommands: []cli.Command{
appVolumeListCommand,
appVolumeRemoveCommand,
UsageText: "abra app volume [command] [options] [arguments]",
Commands: []*cli.Command{
&appVolumeListCommand,
&appVolumeRemoveCommand,
},
}

View File

@ -1,6 +1,7 @@
package catalogue
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
@ -8,77 +9,29 @@ import (
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/catalogue"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
gitPkg "coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/limit"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
"github.com/go-git/go-git/v5"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
// CatalogueSkipList is all the repos that are not recipes.
var CatalogueSkipList = map[string]bool{
"abra": true,
"abra-apps": true,
"abra-aur": true,
"abra-bash": true,
"abra-capsul": true,
"abra-gandi": true,
"abra-hetzner": true,
"apps": true,
"aur-abra-git": true,
"auto-apps-json": true,
"auto-mirror": true,
"backup-bot": true,
"backup-bot-two": true,
"beta.coopcloud.tech": true,
"comrade-renovate-bot": true,
"coopcloud.tech": true,
"coturn": true,
"docker-cp-deploy": true,
"docker-dind-bats-kcov": true,
"docs.coopcloud.tech": true,
"drone-abra": true,
"example": true,
"gardening": true,
"go-abra": true,
"organising": true,
"outline-with-patch": true,
"pyabra": true,
"radicle-seed-node": true,
"recipes-catalogue-json": true,
"recipes-wishlist": true,
"recipes.coopcloud.tech": true,
"stack-ssh-deploy": true,
"swarm-cronjob": true,
"tagcmp": true,
"traefik-cert-dumper": true,
"tyop": true,
}
var catalogueGenerateCommand = cli.Command{
Name: "generate",
Aliases: []string{"g"},
Usage: "Generate the recipe catalogue",
Name: "generate",
Aliases: []string{"g"},
Usage: "Generate the recipe catalogue",
UsageText: "abra catalogue generate [<recipe>] [options]",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.PublishFlag,
internal.DryFlag,
internal.SkipUpdatesFlag,
internal.ChaosFlag,
},
Before: internal.SubCommandBefore,
Description: `
Generate a new copy of the recipe catalogue which can be found on:
https://recipes.coopcloud.tech (website that humans read)
https://recipes.coopcloud.tech/recipes.json (JSON that Abra reads)
It polls the entire git.coopcloud.tech/coop-cloud/... recipe repository
listing, parses README.md and git tags to produce recipe metadata which is
loaded into the catalogue JSON file.
Description: `Generate a new copy of the recipe catalogue.
It is possible to generate new metadata for a single recipe by passing
<recipe>. The existing local catalogue will be updated, not overwritten.
@ -87,20 +40,28 @@ It is quite easy to get rate limited by Docker Hub when running this command.
If you have a Hub account you can have Abra log you in to avoid this. Pass
"--user" and "--pass".
Push your new release git.coopcloud.tech with "-p/--publish". This requires
Push your new release to git.coopcloud.tech with "-p/--publish". This requires
that you have permission to git push to these repositories and have your SSH
keys configured on your account.
`,
ArgsUsage: "[<recipe>]",
Action: func(c *cli.Context) error {
recipeName := c.Args().First()
keys configured on your account.`,
ShellComplete: autocomplete.RecipeNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
recipeName := cmd.Args().First()
r := recipe.Get(recipeName)
if recipeName != "" {
internal.ValidateRecipe(c, true)
internal.ValidateRecipe(cmd)
}
if !internal.Chaos {
if err := catalogue.EnsureIsClean(); err != nil {
log.Fatal(err)
}
}
repos, err := recipe.ReadReposMetadata()
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
var barLength int
@ -114,9 +75,9 @@ keys configured on your account.
}
if !internal.SkipUpdates {
logrus.Warn(logMsg)
if err := updateRepositories(repos, recipeName); err != nil {
logrus.Fatal(err)
log.Warn(logMsg)
if err := recipe.UpdateRepositories(repos, recipeName); err != nil {
log.Fatal(err)
}
}
@ -128,19 +89,14 @@ keys configured on your account.
continue
}
if _, exists := CatalogueSkipList[recipeMeta.Name]; exists {
catlBar.Add(1)
continue
versions, err := r.GetRecipeVersions()
if err != nil {
log.Warn(err)
}
versions, err := recipe.GetRecipeVersions(recipeMeta.Name)
features, category, err := recipe.GetRecipeFeaturesAndCategory(r)
if err != nil {
logrus.Warn(err)
}
features, category, err := recipe.GetRecipeFeaturesAndCategory(recipeMeta.Name)
if err != nil {
logrus.Warn(err)
log.Warn(err)
}
catl[recipeMeta.Name] = recipe.RecipeMeta{
@ -161,158 +117,97 @@ keys configured on your account.
recipesJSON, err := json.MarshalIndent(catl, "", " ")
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if recipeName == "" {
if err := ioutil.WriteFile(config.RECIPES_JSON, recipesJSON, 0764); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
} else {
catlFS, err := recipe.ReadRecipeCatalogue()
catlFS, err := recipe.ReadRecipeCatalogue(internal.Offline)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
catlFS[recipeName] = catl[recipeName]
updatedRecipesJSON, err := json.MarshalIndent(catlFS, "", " ")
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if err := ioutil.WriteFile(config.RECIPES_JSON, updatedRecipesJSON, 0764); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
}
logrus.Infof("generated new recipe catalogue in %s", config.RECIPES_JSON)
log.Infof("generated new recipe catalogue in %s", config.RECIPES_JSON)
cataloguePath := path.Join(config.ABRA_DIR, "catalogue")
if internal.Publish {
isClean, err := gitPkg.IsClean(cataloguePath)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if isClean {
if !internal.Dry {
logrus.Fatalf("no changes discovered in %s, nothing to publish?", cataloguePath)
log.Fatalf("no changes discovered in %s, nothing to publish?", cataloguePath)
}
}
msg := "chore: publish new catalogue release changes"
if err := gitPkg.Commit(cataloguePath, "**.json", msg, internal.Dry); err != nil {
logrus.Fatal(err)
if err := gitPkg.Commit(cataloguePath, msg, internal.Dry); err != nil {
log.Fatal(err)
}
repo, err := git.PlainOpen(cataloguePath)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
sshURL := fmt.Sprintf(config.SSH_URL_TEMPLATE, config.CATALOGUE_JSON_REPO_NAME)
if err := gitPkg.CreateRemote(repo, "origin-ssh", sshURL, internal.Dry); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if err := gitPkg.Push(cataloguePath, "origin-ssh", false, internal.Dry); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
}
repo, err := git.PlainOpen(cataloguePath)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
head, err := repo.Head()
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if !internal.Dry && internal.Publish {
url := fmt.Sprintf("%s/%s/commit/%s", config.REPOS_BASE_URL, config.CATALOGUE_JSON_REPO_NAME, head.Hash())
logrus.Infof("new changes published: %s", url)
log.Infof("new changes published: %s", url)
}
if internal.Dry {
logrus.Info("dry run: no changes published")
log.Info("dry run: no changes published")
}
return nil
},
BashComplete: autocomplete.RecipeNameComplete,
}
// CatalogueCommand defines the `abra catalogue` command and sub-commands.
var CatalogueCommand = cli.Command{
Name: "catalogue",
Usage: "Manage the recipe catalogue",
Aliases: []string{"c"},
ArgsUsage: "<recipe>",
Description: "This command helps recipe packagers interact with the recipe catalogue",
Subcommands: []cli.Command{
catalogueGenerateCommand,
Name: "catalogue",
Usage: "Manage the recipe catalogue",
Aliases: []string{"c"},
UsageText: "abra catalogue [command] [options] [arguments]",
Commands: []*cli.Command{
&catalogueGenerateCommand,
},
}
func updateRepositories(repos recipe.RepoCatalogue, recipeName string) error {
var barLength int
if recipeName != "" {
barLength = 1
} else {
barLength = len(repos)
}
cloneLimiter := limit.New(10)
retrieveBar := formatter.CreateProgressbar(barLength, "ensuring recipes are cloned & up-to-date...")
ch := make(chan string, barLength)
for _, repoMeta := range repos {
go func(rm recipe.RepoMeta) {
cloneLimiter.Begin()
defer cloneLimiter.End()
if recipeName != "" && recipeName != rm.Name {
ch <- rm.Name
retrieveBar.Add(1)
return
}
if _, exists := CatalogueSkipList[rm.Name]; exists {
ch <- rm.Name
retrieveBar.Add(1)
return
}
recipeDir := path.Join(config.RECIPES_DIR, rm.Name)
if err := gitPkg.Clone(recipeDir, rm.CloneURL); err != nil {
logrus.Fatal(err)
}
isClean, err := gitPkg.IsClean(recipeDir)
if err != nil {
logrus.Fatal(err)
}
if !isClean {
logrus.Fatalf("%s has locally unstaged changes", rm.Name)
}
if err := recipe.EnsureUpToDate(rm.Name); err != nil {
logrus.Fatal(err)
}
ch <- rm.Name
retrieveBar.Add(1)
}(repoMeta)
}
for range repos {
<-ch // wait for everything
}
return nil
}

View File

@ -2,6 +2,7 @@
package cli
import (
"context"
"errors"
"fmt"
"os"
@ -12,38 +13,30 @@ import (
"coopcloud.tech/abra/cli/catalogue"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/cli/recipe"
"coopcloud.tech/abra/cli/record"
"coopcloud.tech/abra/cli/server"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/web"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
charmLog "github.com/charmbracelet/log"
"github.com/urfave/cli/v3"
)
// AutoCompleteCommand helps people set up auto-complete in their shells
var AutoCompleteCommand = cli.Command{
Name: "autocomplete",
Aliases: []string{"ac"},
Usage: "Configure shell autocompletion (recommended)",
Description: `
Set up auto-completion in your shell by downloading the relevant files and
laying out what additional information must be loaded. Supported shells are as
follows: bash, fish, fizsh & zsh.
Name: "autocomplete",
Aliases: []string{"ac"},
Usage: "Configure shell autocompletion",
UsageText: "abra autocomplete <shell> [options]",
Description: `Set up shell auto-completion.
Example:
abra autocomplete bash
`,
ArgsUsage: "<shell>",
Flags: []cli.Flag{
internal.DebugFlag,
},
Action: func(c *cli.Context) error {
shellType := c.Args().First()
Supported shells are: bash, fish, fizsh & zsh.`,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
shellType := cmd.Args().First()
if shellType == "" {
internal.ShowSubcommandHelpAndError(c, errors.New("no shell provided"))
internal.ShowSubcommandHelpAndError(cmd, errors.New("no shell provided"))
}
supportedShells := map[string]bool{
@ -54,7 +47,7 @@ Example:
}
if _, ok := supportedShells[shellType]; !ok {
logrus.Fatalf("%s is not a supported shell right now, sorry", shellType)
log.Fatalf("%s is not a supported shell right now, sorry", shellType)
}
if shellType == "fizsh" {
@ -64,44 +57,47 @@ Example:
autocompletionDir := path.Join(config.ABRA_DIR, "autocompletion")
if err := os.Mkdir(autocompletionDir, 0764); err != nil {
if !os.IsExist(err) {
logrus.Fatal(err)
log.Fatal(err)
}
logrus.Debugf("%s already created", autocompletionDir)
log.Debugf("%s already created", autocompletionDir)
}
autocompletionFile := path.Join(config.ABRA_DIR, "autocompletion", shellType)
if _, err := os.Stat(autocompletionFile); err != nil && os.IsNotExist(err) {
url := fmt.Sprintf("https://git.coopcloud.tech/coop-cloud/abra/raw/branch/main/scripts/autocomplete/%s", shellType)
logrus.Infof("fetching %s", url)
log.Infof("fetching %s", url)
if err := web.GetFile(autocompletionFile, url); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
}
switch shellType {
case "bash":
fmt.Println(fmt.Sprintf(`
# Run the following commands to install auto-completion
sudo mkdir /etc/bash_completion.d/
# run the following commands once to install auto-completion
sudo mkdir -p /etc/bash_completion.d/
sudo cp %s /etc/bash_completion.d/abra
echo "source /etc/bash_completion.d/abra" >> ~/.bashrc
# And finally run "abra app ps <hit tab key>" to test things are working, you should see app domains listed!
source /etc/bash_completion.d/abra
# To test, run the following: "abra app <hit tab key>" - you should see command completion!
`, autocompletionFile))
case "zsh":
fmt.Println(fmt.Sprintf(`
# Run the following commands to install auto-completion
sudo mkdir /etc/zsh/completion.d/
# run the following commands to once install auto-completion
sudo mkdir -p /etc/zsh/completion.d/
sudo cp %s /etc/zsh/completion.d/abra
echo "PROG=abra\n_CLI_ZSH_AUTOCOMPLETE_HACK=1\nsource /etc/zsh/completion.d/abra" >> ~/.zshrc
# And finally run "abra app ps <hit tab key>" to test things are working, you should see app domains listed!
source /etc/zsh/completion.d/abra
# to test, run the following: "abra app <hit tab key>" - you should see command completion!
`, autocompletionFile))
case "fish":
fmt.Println(fmt.Sprintf(`
# Run the following commands to install auto-completion
# run the following commands once to install auto-completion
sudo mkdir -p /etc/fish/completions
sudo cp %s /etc/fish/completions/abra
echo "source /etc/fish/completions/abra" >> ~/.config/fish/config.fish
# And finally run "abra app ps <hit tab key>" to test things are working, you should see app domains listed!
source /etc/fish/completions/abra
# to test, run the following: "abra app <hit tab key>" - you should see command completion!
`, autocompletionFile))
}
@ -111,84 +107,92 @@ echo "source /etc/fish/completions/abra" >> ~/.config/fish/config.fish
// UpgradeCommand upgrades abra in-place.
var UpgradeCommand = cli.Command{
Name: "upgrade",
Aliases: []string{"u"},
Usage: "Upgrade Abra itself",
Description: `
Upgrade Abra in-place with the latest stable or release candidate.
Name: "upgrade",
Aliases: []string{"u"},
Usage: "Upgrade abra",
UsageText: "abra upgrade [options]",
Description: `Upgrade abra in-place with the latest stable or release candidate.
Pass "-r/--rc" to install the latest release candidate. Please bear in mind
that it may contain catastrophic bugs. Thank you very much for the testing
efforts!
`,
Flags: []cli.Flag{internal.RCFlag},
Action: func(c *cli.Context) error {
Use "--rc/-r" to install the latest release candidate. Please bear in mind that
it may contain absolutely catastrophic deal-breaker bugs. Thank you very much
for the testing efforts 💗`,
Flags: []cli.Flag{internal.RCFlag},
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
mainURL := "https://install.abra.coopcloud.tech"
cmd := exec.Command("bash", "-c", fmt.Sprintf("wget -q -O- %s | bash", mainURL))
c := exec.Command("bash", "-c", fmt.Sprintf("wget -q -O- %s | bash", mainURL))
if internal.RC {
releaseCandidateURL := "https://git.coopcloud.tech/coop-cloud/abra/raw/branch/main/scripts/installer/installer"
cmd = exec.Command("bash", "-c", fmt.Sprintf("wget -q -O- %s | bash -s -- --rc", releaseCandidateURL))
c = exec.Command("bash", "-c", fmt.Sprintf("wget -q -O- %s | bash -s -- --rc", releaseCandidateURL))
}
logrus.Debugf("attempting to run %s", cmd)
log.Debugf("attempting to run %s", c)
if err := internal.RunCmd(cmd); err != nil {
logrus.Fatal(err)
if err := internal.RunCmd(c); err != nil {
log.Fatal(err)
}
return nil
},
}
func newAbraApp(version, commit string) *cli.App {
app := &cli.App{
Name: "abra",
Usage: `The Co-op Cloud command-line utility belt 🎩🐇
____ ____ _ _
/ ___|___ ___ _ __ / ___| | ___ _ _ __| |
| | / _ \ _____ / _ \| '_ \ | | | |/ _ \| | | |/ _' |
| |__| (_) |_____| (_) | |_) | | |___| | (_) | |_| | (_| |
\____\___/ \___/| .__/ \____|_|\___/ \__,_|\__,_|
|_|
`,
Version: fmt.Sprintf("%s-%s", version, commit[:7]),
Commands: []cli.Command{
app.AppCommand,
server.ServerCommand,
recipe.RecipeCommand,
catalogue.CatalogueCommand,
record.RecordCommand,
UpgradeCommand,
AutoCompleteCommand,
func newAbraApp(version, commit string) *cli.Command {
app := &cli.Command{
Name: "abra",
Usage: "The Co-op Cloud command-line utility belt 🎩🐇",
UsageText: "abra [command] [arguments] [options]",
Version: fmt.Sprintf("%s-%s", version, commit[:7]),
Flags: []cli.Flag{
// NOTE(d1): "GLOBAL OPTIONS" flags
internal.DebugFlag,
},
BashComplete: autocomplete.SubcommandComplete,
Commands: []*cli.Command{
&app.AppCommand,
&server.ServerCommand,
&recipe.RecipeCommand,
&catalogue.CatalogueCommand,
&UpgradeCommand,
&AutoCompleteCommand,
},
EnableShellCompletion: true,
UseShortOptionHandling: true,
HideHelpCommand: true,
ShellComplete: autocomplete.SubcommandComplete,
}
app.EnableBashCompletion = true
app.Before = func(c *cli.Context) error {
app.Before = func(ctx context.Context, cmd *cli.Command) error {
paths := []string{
config.ABRA_DIR,
path.Join(config.SERVERS_DIR),
path.Join(config.RECIPES_DIR),
path.Join(config.VENDOR_DIR),
path.Join(config.BACKUP_DIR),
config.SERVERS_DIR,
config.RECIPES_DIR,
config.VENDOR_DIR,
config.BACKUP_DIR,
}
for _, path := range paths {
if err := os.Mkdir(path, 0764); err != nil {
if !os.IsExist(err) {
logrus.Fatal(err)
log.Fatal(err)
}
continue
}
}
logrus.Debugf("abra version %s, commit %s", version, commit)
log.Logger.SetStyles(log.Styles())
charmLog.SetDefault(log.Logger)
log.Debugf("abra version %s, commit %s", version, commit)
return nil
}
cli.HelpFlag = &cli.BoolFlag{
Name: "help",
Aliases: []string{"h, H"},
Usage: "Show help",
}
return app
}
@ -196,7 +200,7 @@ func newAbraApp(version, commit string) *cli.App {
func RunApp(version, commit string) {
app := newAbraApp(version, commit)
if err := app.Run(os.Args); err != nil {
logrus.Fatal(err)
if err := app.Run(context.Background(), os.Args); err != nil {
log.Fatal(err)
}
}

View File

@ -1,35 +1,67 @@
package internal
import (
"strings"
"context"
"coopcloud.tech/abra/pkg/config"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/service"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
)
// SafeSplit splits up a string into a list of commands safely.
func SafeSplit(s string) []string {
split := strings.Split(s, " ")
var result []string
var inquote string
var block string
for _, i := range split {
if inquote == "" {
if strings.HasPrefix(i, "'") || strings.HasPrefix(i, "\"") {
inquote = string(i[0])
block = strings.TrimPrefix(i, inquote) + " "
} else {
result = append(result, i)
}
} else {
if !strings.HasSuffix(i, inquote) {
block += i + " "
} else {
block += strings.TrimSuffix(i, inquote)
inquote = ""
result = append(result, block)
block = ""
}
}
// RetrieveBackupBotContainer gets the deployed backupbot container.
func RetrieveBackupBotContainer(cl *dockerClient.Client) (types.Container, error) {
ctx := context.Background()
chosenService, err := service.GetServiceByLabel(ctx, cl, config.BackupbotLabel, NoInput)
if err != nil {
return types.Container{}, err
}
return result
log.Debugf("retrieved %s as backup enabled service", chosenService.Spec.Name)
filters := filters.NewArgs()
filters.Add("name", chosenService.Spec.Name)
targetContainer, err := containerPkg.GetContainer(
ctx,
cl,
filters,
NoInput,
)
if err != nil {
return types.Container{}, err
}
return targetContainer, nil
}
// RunBackupCmdRemote runs a backup related command on a remote backupbot container.
func RunBackupCmdRemote(cl *dockerClient.Client, backupCmd string, containerID string, execEnv []string) error {
execBackupListOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: []string{"/usr/bin/backup", "--", backupCmd},
Detach: false,
Env: execEnv,
Tty: true,
}
log.Debugf("running backup %s on %s with exec config %v", backupCmd, containerID, execBackupListOpts)
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
if _, err := container.RunExec(dcli, cl, containerID, &execBackupListOpts); err != nil {
return err
}
return nil
}

View File

@ -1,11 +1,11 @@
package internal
import (
"context"
"os"
logrusStack "github.com/Gurpartap/logrus-stack"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"coopcloud.tech/abra/pkg/log"
"github.com/urfave/cli/v3"
)
// Secrets stores the variable from SecretsFlag
@ -13,7 +13,8 @@ var Secrets bool
// SecretsFlag turns on/off automatically generating secrets
var SecretsFlag = &cli.BoolFlag{
Name: "secrets, S",
Name: "secrets",
Aliases: []string{"S"},
Usage: "Automatically generate secrets",
Destination: &Secrets,
}
@ -23,7 +24,8 @@ var Pass bool
// PassFlag turns on/off storing generated secrets in pass
var PassFlag = &cli.BoolFlag{
Name: "pass, p",
Name: "pass",
Aliases: []string{"p"},
Usage: "Store the generated secrets in a local pass store",
Destination: &Pass,
}
@ -33,17 +35,35 @@ var PassRemove bool
// PassRemoveFlag turns on/off removing generated secrets from pass
var PassRemoveFlag = &cli.BoolFlag{
Name: "pass, p",
Name: "pass",
Aliases: []string{"p"},
Usage: "Remove generated secrets from a local pass store",
Destination: &PassRemove,
}
var File bool
var FileFlag = &cli.BoolFlag{
Name: "file",
Aliases: []string{"f"},
Usage: "Treat input as a file",
Destination: &File,
}
var Trim bool
var TrimFlag = &cli.BoolFlag{
Name: "trim",
Aliases: []string{"t"},
Usage: "Trim input",
Destination: &Trim,
}
// Force force functionality without asking.
var Force bool
// ForceFlag turns on/off force functionality.
var ForceFlag = &cli.BoolFlag{
Name: "force, f",
Name: "force",
Aliases: []string{"f"},
Usage: "Perform action without further prompt. Use with care!",
Destination: &Force,
}
@ -53,204 +73,73 @@ var Chaos bool
// ChaosFlag turns on/off chaos functionality.
var ChaosFlag = &cli.BoolFlag{
Name: "chaos, C",
Usage: "Deploy uncommitted recipes changes. Use with care!",
Name: "chaos",
Aliases: []string{"C"},
Usage: "Ignore uncommitted recipes changes. Use with care!",
Destination: &Chaos,
}
// DNSProvider specifies a DNS provider.
var DNSProvider string
// Disable tty to run commands from script
var Tty bool
// DNSProviderFlag selects a DNS provider.
var DNSProviderFlag = &cli.StringFlag{
Name: "provider, p",
Value: "",
Usage: "DNS provider",
Destination: &DNSProvider,
// TtyFlag turns on/off tty mode.
var TtyFlag = &cli.BoolFlag{
Name: "tty",
Aliases: []string{"T"},
Usage: "Disables TTY mode to run this command from a script.",
Destination: &Tty,
}
var NoInput bool
var NoInputFlag = &cli.BoolFlag{
Name: "no-input, n",
Name: "no-input",
Aliases: []string{"n"},
Usage: "Toggle non-interactive mode",
Destination: &NoInput,
}
var DNSType string
var DNSTypeFlag = &cli.StringFlag{
Name: "record-type, rt",
Value: "",
Usage: "Domain name record type (e.g. A)",
Destination: &DNSType,
}
var DNSName string
var DNSNameFlag = &cli.StringFlag{
Name: "record-name, rn",
Value: "",
Usage: "Domain name record name (e.g. mysubdomain)",
Destination: &DNSName,
}
var DNSValue string
var DNSValueFlag = &cli.StringFlag{
Name: "record-value, rv",
Value: "",
Usage: "Domain name record value (e.g. 192.168.1.1)",
Destination: &DNSValue,
}
var DNSTTL string
var DNSTTLFlag = &cli.StringFlag{
Name: "record-ttl, rl",
Value: "600s",
Usage: "Domain name TTL value (seconds)",
Destination: &DNSTTL,
}
var DNSPriority int
var DNSPriorityFlag = &cli.IntFlag{
Name: "record-priority, rp",
Value: 10,
Usage: "Domain name priority value",
Destination: &DNSPriority,
}
var ServerProvider string
var ServerProviderFlag = &cli.StringFlag{
Name: "provider, p",
Usage: "3rd party server provider",
Destination: &ServerProvider,
}
var CapsulInstanceURL string
var CapsulInstanceURLFlag = &cli.StringFlag{
Name: "capsul-url, cu",
Value: "yolo.servers.coop",
Usage: "capsul instance URL",
Destination: &CapsulInstanceURL,
}
var CapsulName string
var CapsulNameFlag = &cli.StringFlag{
Name: "capsul-name, cn",
Value: "",
Usage: "capsul name",
Destination: &CapsulName,
}
var CapsulType string
var CapsulTypeFlag = &cli.StringFlag{
Name: "capsul-type, ct",
Value: "f1-xs",
Usage: "capsul type",
Destination: &CapsulType,
}
var CapsulImage string
var CapsulImageFlag = &cli.StringFlag{
Name: "capsul-image, ci",
Value: "debian10",
Usage: "capsul image",
Destination: &CapsulImage,
}
var CapsulSSHKeys cli.StringSlice
var CapsulSSHKeysFlag = &cli.StringSliceFlag{
Name: "capsul-ssh-keys, cs",
Usage: "capsul SSH key",
Value: &CapsulSSHKeys,
}
var CapsulAPIToken string
var CapsulAPITokenFlag = &cli.StringFlag{
Name: "capsul-token, ca",
Usage: "capsul API token",
EnvVar: "CAPSUL_TOKEN",
Destination: &CapsulAPIToken,
}
var HetznerCloudName string
var HetznerCloudNameFlag = &cli.StringFlag{
Name: "hetzner-name, hn",
Value: "",
Usage: "hetzner cloud name",
Destination: &HetznerCloudName,
}
var HetznerCloudType string
var HetznerCloudTypeFlag = &cli.StringFlag{
Name: "hetzner-type, ht",
Usage: "hetzner cloud type",
Destination: &HetznerCloudType,
Value: "cx11",
}
var HetznerCloudImage string
var HetznerCloudImageFlag = &cli.StringFlag{
Name: "hetzner-image, hi",
Usage: "hetzner cloud image",
Value: "debian-10",
Destination: &HetznerCloudImage,
}
var HetznerCloudSSHKeys cli.StringSlice
var HetznerCloudSSHKeysFlag = &cli.StringSliceFlag{
Name: "hetzner-ssh-keys, hs",
Usage: "hetzner cloud SSH keys (e.g. me@foo.com)",
Value: &HetznerCloudSSHKeys,
}
var HetznerCloudLocation string
var HetznerCloudLocationFlag = &cli.StringFlag{
Name: "hetzner-location, hl",
Usage: "hetzner cloud server location",
Value: "hel1",
Destination: &HetznerCloudLocation,
}
var HetznerCloudAPIToken string
var HetznerCloudAPITokenFlag = &cli.StringFlag{
Name: "hetzner-token, ha",
Usage: "hetzner cloud API token",
EnvVar: "HCLOUD_TOKEN",
Destination: &HetznerCloudAPIToken,
}
// Debug stores the variable from DebugFlag.
var Debug bool
// DebugFlag turns on/off verbose logging down to the DEBUG level.
var DebugFlag = &cli.BoolFlag{
Name: "debug, d",
Name: "debug",
Aliases: []string{"d"},
Destination: &Debug,
Usage: "Show DEBUG messages",
}
// Offline stores the variable from OfflineFlag.
var Offline bool
// DebugFlag turns on/off offline mode.
var OfflineFlag = &cli.BoolFlag{
Name: "offline",
Aliases: []string{"o"},
Destination: &Offline,
Usage: "Prefer offline & filesystem access",
}
// ReleaseNotes stores the variable from ReleaseNotesFlag.
var ReleaseNotes bool
// ReleaseNotesFlag turns on/off printing only release notes when upgrading.
var ReleaseNotesFlag = &cli.BoolFlag{
Name: "releasenotes",
Aliases: []string{"r"},
Destination: &ReleaseNotes,
Usage: "Only show release notes",
}
// MachineReadable stores the variable from MachineReadableFlag
var MachineReadable bool
// MachineReadableFlag turns on/off machine readable output where supported
var MachineReadableFlag = &cli.BoolFlag{
Name: "machine, m",
Name: "machine",
Aliases: []string{"m"},
Destination: &MachineReadable,
Usage: "Output in a machine-readable format (where supported)",
Usage: "Machine-readable output",
}
// RC signifies the latest release candidate
@ -258,49 +147,56 @@ var RC bool
// RCFlag chooses the latest release candidate for install
var RCFlag = &cli.BoolFlag{
Name: "rc, r",
Name: "rc",
Aliases: []string{"r"},
Destination: &RC,
Usage: "Insatll the latest release candidate",
Usage: "Install the latest release candidate",
}
var Major bool
var MajorFlag = &cli.BoolFlag{
Name: "major, x",
Name: "major",
Aliases: []string{"x"},
Usage: "Increase the major part of the version",
Destination: &Major,
}
var Minor bool
var MinorFlag = &cli.BoolFlag{
Name: "minor, y",
Name: "minor",
Aliases: []string{"y"},
Usage: "Increase the minor part of the version",
Destination: &Minor,
}
var Patch bool
var PatchFlag = &cli.BoolFlag{
Name: "patch, z",
Name: "patch",
Aliases: []string{"z"},
Usage: "Increase the patch part of the version",
Destination: &Patch,
}
var Dry bool
var DryFlag = &cli.BoolFlag{
Name: "dry-run, r",
Name: "dry-run",
Aliases: []string{"r"},
Usage: "Only reports changes that would be made",
Destination: &Dry,
}
var Publish bool
var PublishFlag = &cli.BoolFlag{
Name: "publish, p",
Name: "publish",
Aliases: []string{"p"},
Usage: "Publish changes to git.coopcloud.tech",
Destination: &Publish,
}
var Domain string
var DomainFlag = &cli.StringFlag{
Name: "domain, D",
Name: "domain",
Aliases: []string{"D"},
Value: "",
Usage: "Choose a domain name",
Destination: &Domain,
@ -308,7 +204,8 @@ var DomainFlag = &cli.StringFlag{
var NewAppServer string
var NewAppServerFlag = &cli.StringFlag{
Name: "server, s",
Name: "server",
Aliases: []string{"s"},
Value: "",
Usage: "Show apps of a specific server",
Destination: &NewAppServer,
@ -316,142 +213,118 @@ var NewAppServerFlag = &cli.StringFlag{
var NoDomainChecks bool
var NoDomainChecksFlag = &cli.BoolFlag{
Name: "no-domain-checks, D",
Usage: "Disable app domain sanity checks",
Name: "no-domain-checks",
Aliases: []string{"D"},
Usage: "Disable public DNS checks",
Destination: &NoDomainChecks,
}
var StdErrOnly bool
var StdErrOnlyFlag = &cli.BoolFlag{
Name: "stderr, s",
Name: "stderr",
Aliases: []string{"s"},
Usage: "Only tail stderr",
Destination: &StdErrOnly,
}
var SinceLogs string
var SinceLogsFlag = &cli.StringFlag{
Name: "since",
Aliases: []string{"S"},
Value: "",
Usage: "tail logs since YYYY-MM-DDTHH:MM:SSZ",
Destination: &SinceLogs,
}
var DontWaitConverge bool
var DontWaitConvergeFlag = &cli.BoolFlag{
Name: "no-converge-checks, c",
Name: "no-converge-checks",
Aliases: []string{"c"},
Usage: "Don't wait for converge logic checks",
Destination: &DontWaitConverge,
}
var Watch bool
var WatchFlag = &cli.BoolFlag{
Name: "watch, w",
Name: "watch",
Aliases: []string{"w"},
Usage: "Watch status by polling repeatedly",
Destination: &Watch,
}
var OnlyErrors bool
var OnlyErrorFlag = &cli.BoolFlag{
Name: "errors, e",
Name: "errors",
Aliases: []string{"e"},
Usage: "Only show errors",
Destination: &OnlyErrors,
}
var SkipUpdates bool
var SkipUpdatesFlag = &cli.BoolFlag{
Name: "skip-updates, s",
Name: "skip-updates",
Aliases: []string{"s"},
Usage: "Skip updating recipe repositories",
Destination: &SkipUpdates,
}
var AllTags bool
var AllTagsFlag = &cli.BoolFlag{
Name: "all-tags, a",
Name: "all-tags",
Aliases: []string{"a"},
Usage: "List all tags, not just upgrades",
Destination: &AllTags,
}
var LocalCmd bool
var LocalCmdFlag = &cli.BoolFlag{
Name: "local, l",
Name: "local",
Aliases: []string{"l"},
Usage: "Run command locally",
Destination: &LocalCmd,
}
var RemoteUser string
var RemoteUserFlag = &cli.StringFlag{
Name: "user, u",
Name: "user",
Aliases: []string{"u"},
Value: "",
Usage: "User to run command within a service context",
Destination: &RemoteUser,
}
// SSHFailMsg is a hopefully helpful SSH failure message
var SSHFailMsg = `
Woops, Abra is unable to connect to connect to %s.
var GitName string
var GitNameFlag = &cli.StringFlag{
Name: "git-name",
Aliases: []string{"gn"},
Value: "",
Usage: "Git (user) name to do commits with",
Destination: &GitName,
}
Here are a few tips for debugging your local SSH config. Abra uses plain 'ol
SSH to make connections to servers, so if your SSH config is working, Abra is
working.
var GitEmail string
var GitEmailFlag = &cli.StringFlag{
Name: "git-email",
Aliases: []string{"ge"},
Value: "",
Usage: "Git email name to do commits with",
Destination: &GitEmail,
}
In the first place, Abra will always try to read your Docker context connection
string for SSH connection details. You can view your server context configs
with the following command. Are they correct?
abra server ls
Is your ssh-agent running? You can start it by running the following command:
eval "$(ssh-agent)"
If your SSH private key loaded? You can check by running the following command:
ssh-add -L
If, you can add it with:
ssh-add ~/.ssh/<private-key-part>
If you are using a non-default public/private key, you can configure this in
your ~/.ssh/config file which Abra will read in order to figure out connection
details:
Host foo.coopcloud.tech
Hostname foo.coopcloud.tech
User bar
Port 12345
IdentityFile ~/.ssh/bar@foo.coopcloud.tech
If you're only using password authentication, you can use the following config:
Host foo.coopcloud.tech
Hostname foo.coopcloud.tech
User bar
Port 12345
PreferredAuthentications=password
PubkeyAuthentication=no
Good luck!
`
var ServerAddFailMsg = `
Failed to add server %s.
This could be caused by two things.
Abra isn't picking up your SSH configuration or you need to specify it on the
command-line (e.g you use a non-standard port or username to connect). Run
"server add" with "-d/--debug" to learn more about what Abra is doing under the
hood.
Docker is not installed on your server. You can pass "-p/--provision" to
install Docker and initialise Docker Swarm mode. See help output for "server
add"
See "abra server add -h" for more.
`
var AllServices bool
var AllServicesFlag = &cli.BoolFlag{
Name: "all-services",
Aliases: []string{"a"},
Usage: "Restart all services",
Destination: &AllServices,
}
// SubCommandBefore wires up pre-action machinery (e.g. --debug handling).
func SubCommandBefore(c *cli.Context) error {
func SubCommandBefore(ctx context.Context, cmd *cli.Command) error {
if Debug {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetFormatter(&logrus.TextFormatter{})
logrus.SetOutput(os.Stderr)
logrus.AddHook(logrusStack.StandardHook())
log.SetLevel(log.DebugLevel)
log.SetOutput(os.Stderr)
log.SetReportCaller(true)
}
return nil

View File

@ -2,10 +2,109 @@ package internal
import (
"bufio"
"context"
"fmt"
"io/ioutil"
"os/exec"
"strings"
appPkg "coopcloud.tech/abra/pkg/app"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
)
// RunCmdRemote executes an abra.sh command in the target service
func RunCmdRemote(cl *dockerClient.Client, app appPkg.App, abraSh, serviceName, cmdName, cmdArgs string) error {
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), serviceName))
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, false)
if err != nil {
return err
}
log.Debugf("retrieved %s as target container on %s", formatter.ShortenID(targetContainer.ID), app.Server)
toTarOpts := &archive.TarOptions{NoOverwriteDirNonDir: true, Compression: archive.Gzip}
content, err := archive.TarWithOptions(abraSh, toTarOpts)
if err != nil {
return err
}
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
if err := cl.CopyToContainer(context.Background(), targetContainer.ID, "/tmp", content, copyOpts); err != nil {
return err
}
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
shell := "/bin/bash"
findShell := []string{"test", "-e", shell}
execCreateOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: findShell,
Detach: false,
Tty: false,
}
if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
log.Infof("%s does not exist for %s, use /bin/sh as fallback", shell, app.Name)
shell = "/bin/sh"
}
var cmd []string
if cmdArgs != "" {
cmd = []string{shell, "-c", fmt.Sprintf("TARGET=%s; APP_NAME=%s; STACK_NAME=%s; . /tmp/abra.sh; %s %s", serviceName, app.Name, app.StackName(), cmdName, cmdArgs)}
} else {
cmd = []string{shell, "-c", fmt.Sprintf("TARGET=%s; APP_NAME=%s; STACK_NAME=%s; . /tmp/abra.sh; %s", serviceName, app.Name, app.StackName(), cmdName)}
}
log.Debugf("running command: %s", strings.Join(cmd, " "))
if RemoteUser != "" {
log.Debugf("running command with user %s", RemoteUser)
execCreateOpts.User = RemoteUser
}
execCreateOpts.Cmd = cmd
execCreateOpts.Tty = true
if Tty {
execCreateOpts.Tty = false
}
if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
return err
}
return nil
}
func EnsureCommand(abraSh, recipeName, execCmd string) error {
bytes, err := ioutil.ReadFile(abraSh)
if err != nil {
return err
}
if !strings.Contains(string(bytes), execCmd) {
return fmt.Errorf("%s doesn't have a %s function", recipeName, execCmd)
}
return nil
}
// RunCmd runs a shell command and streams stdout/stderr in real-time.
func RunCmd(cmd *exec.Cmd) error {
r, err := cmd.StdoutPipe()

View File

@ -1,165 +1,107 @@
package internal
import (
"context"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/dns"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/lint"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/stack"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/log"
"github.com/AlecAivazis/survey/v2"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/charmbracelet/lipgloss"
dockerClient "github.com/docker/docker/client"
)
// DeployAction is the main command-line action for this package
func DeployAction(c *cli.Context) error {
app := ValidateApp(c)
var borderStyle = lipgloss.NewStyle().
BorderStyle(lipgloss.ThickBorder()).
Padding(0, 1, 0, 1).
MaxWidth(79).
BorderForeground(lipgloss.Color("63"))
if !Chaos {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
var headerStyle = lipgloss.NewStyle().
Underline(true).
Bold(true)
var leftStyle = lipgloss.NewStyle().
Bold(true)
var rightStyle = lipgloss.NewStyle()
// horizontal is a JoinHorizontal helper function.
func horizontal(left, mid, right string) string {
return lipgloss.JoinHorizontal(lipgloss.Left, left, mid, right)
}
// NewVersionOverview shows an upgrade or downgrade overview
func NewVersionOverview(
app appPkg.App,
warnMessages []string,
kind,
currentVersion,
chaosVersion,
newVersion,
releaseNotes string) error {
deployConfig := "compose.yml"
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
deployConfig = strings.Join(strings.Split(composeFiles, ":"), "\n")
}
r, err := recipe.Get(app.Recipe)
if err != nil {
logrus.Fatal(err)
server := app.Server
if app.Server == "default" {
server = "local"
}
if err := lint.LintForErrors(r); err != nil {
logrus.Fatal(err)
}
body := strings.Builder{}
body.WriteString(
borderStyle.Render(
lipgloss.JoinVertical(
lipgloss.Center,
headerStyle.Render(fmt.Sprintf("%s OVERVIEW", strings.ToUpper(kind))),
lipgloss.JoinVertical(
lipgloss.Left,
horizontal(leftStyle.Render("SERVER"), " ", rightStyle.Render(server)),
horizontal(leftStyle.Render("DOMAIN"), " ", rightStyle.Render(app.Domain)),
horizontal(leftStyle.Render("RECIPE"), " ", rightStyle.Render(app.Recipe.Name)),
horizontal(leftStyle.Render("CONFIG"), " ", rightStyle.Render(deployConfig)),
horizontal(leftStyle.Render("VERSION"), " ", rightStyle.Render(currentVersion)),
horizontal(leftStyle.Render("CHAOS"), " ", rightStyle.Render(chaosVersion)),
horizontal(leftStyle.Render("DEPLOY"), " ", rightStyle.Padding(0).Render(newVersion)),
),
),
),
)
fmt.Println(body.String())
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
logrus.Debugf("checking whether %s is already deployed", app.StackName())
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
logrus.Fatal(err)
}
if isDeployed {
if Force || Chaos {
logrus.Warnf("%s is already deployed but continuing (--force/--chaos)", app.Name)
} else {
logrus.Fatalf("%s is already deployed", app.Name)
}
}
version := deployedVersion
if version == "unknown" && !Chaos {
catl, err := recipe.ReadRecipeCatalogue()
if err != nil {
logrus.Fatal(err)
}
versions, err := recipe.GetRecipeCatalogueVersions(app.Recipe, catl)
if err != nil {
logrus.Fatal(err)
}
if len(versions) > 0 {
version = versions[len(versions)-1]
logrus.Debugf("choosing %s as version to deploy", version)
if err := recipe.EnsureVersion(app.Recipe, version); err != nil {
logrus.Fatal(err)
}
} else {
head, err := git.GetRecipeHead(app.Recipe)
if err != nil {
logrus.Fatal(err)
}
version = formatter.SmallSHA(head.String())
logrus.Warn("no versions detected, using latest commit")
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
}
if version == "unknown" && !Chaos {
logrus.Debugf("choosing %s as version to deploy", version)
if err := recipe.EnsureVersion(app.Recipe, version); err != nil {
logrus.Fatal(err)
}
}
if version != "unknown" && !Chaos {
if err := recipe.EnsureVersion(app.Recipe, version); err != nil {
logrus.Fatal(err)
}
}
if Chaos {
logrus.Warnf("chaos mode engaged")
var err error
version, err = recipe.ChaosVersion(app.Recipe)
if err != nil {
logrus.Fatal(err)
}
}
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, app.Recipe, "abra.sh")
abraShEnv, err := config.ReadAbraShEnvVars(abraShPath)
if err != nil {
logrus.Fatal(err)
}
for k, v := range abraShEnv {
app.Env[k] = v
}
composeFiles, err := config.GetAppComposeFiles(app.Recipe, app.Env)
if err != nil {
logrus.Fatal(err)
}
deployOpts := stack.Deploy{
Composefiles: composeFiles,
Namespace: app.StackName(),
Prune: false,
ResolveImage: stack.ResolveImageAlways,
}
compose, err := config.GetAppComposeConfig(app.Name, deployOpts, app.Env)
if err != nil {
logrus.Fatal(err)
}
if err := DeployOverview(app, version, "continue with deployment?"); err != nil {
logrus.Fatal(err)
}
if !NoDomainChecks {
domainName := app.Env["DOMAIN"]
if _, err = dns.EnsureDomainsResolveSameIPv4(domainName, app.Server); err != nil {
logrus.Fatal(err)
}
if releaseNotes != "" && newVersion != "" {
fmt.Println()
fmt.Print(releaseNotes)
} else {
logrus.Warn("skipping domain checks as requested")
warnMessages = append(warnMessages, fmt.Sprintf("no release notes available for %s", newVersion))
}
if err := stack.RunDeploy(cl, deployOpts, compose, app.Name, DontWaitConverge); err != nil {
logrus.Fatal(err)
for _, msg := range warnMessages {
log.Warn(msg)
}
if NoInput {
return nil
}
response := false
prompt := &survey.Confirm{Message: "proceed?"}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
log.Fatal("deployment cancelled")
}
return nil
}
// DeployOverview shows a deployment overview
func DeployOverview(app config.App, version, message string) error {
tableCol := []string{"server", "recipe", "config", "domain", "version"}
table := formatter.CreateTable(tableCol)
func DeployOverview(app appPkg.App, warnMessages []string, version, chaosVersion string) error {
deployConfig := "compose.yml"
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
deployConfig = strings.Join(strings.Split(composeFiles, ":"), "\n")
@ -170,97 +112,97 @@ func DeployOverview(app config.App, version, message string) error {
server = "local"
}
table.Append([]string{server, app.Recipe, deployConfig, app.Domain, version})
table.Render()
body := strings.Builder{}
body.WriteString(
borderStyle.Render(
lipgloss.JoinVertical(
lipgloss.Center,
headerStyle.Render("DEPLOY OVERVIEW"),
lipgloss.JoinVertical(
lipgloss.Left,
horizontal(leftStyle.Render("SERVER"), " ", rightStyle.Render(server)),
horizontal(leftStyle.Render("DOMAIN"), " ", rightStyle.Render(app.Domain)),
horizontal(leftStyle.Render("RECIPE"), " ", rightStyle.Render(app.Recipe.Name)),
horizontal(leftStyle.Render("CONFIG"), " ", rightStyle.Render(deployConfig)),
horizontal(leftStyle.Render("VERSION"), " ", rightStyle.Render(version)),
horizontal(leftStyle.Render("CHAOS"), " ", rightStyle.Padding(0).Render(chaosVersion)),
),
),
),
)
fmt.Println(body.String())
for _, msg := range warnMessages {
log.Warn(msg)
}
if NoInput {
return nil
}
response := false
prompt := &survey.Confirm{
Message: message,
}
prompt := &survey.Confirm{Message: "proceed?"}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
log.Fatal("deployment cancelled")
}
return nil
}
// NewVersionOverview shows an upgrade or downgrade overview
func NewVersionOverview(app config.App, currentVersion, newVersion, releaseNotes string) error {
tableCol := []string{"server", "recipe", "config", "domain", "current version", "to be deployed"}
table := formatter.CreateTable(tableCol)
deployConfig := "compose.yml"
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
deployConfig = strings.Join(strings.Split(composeFiles, ":"), "\n")
// PostCmds parses a string of commands and executes them inside of the respective services
// the commands string must have the following format:
// "<service> <command> <arguments>|<service> <command> <arguments>|... "
func PostCmds(cl *dockerClient.Client, app appPkg.App, commands string) error {
if _, err := os.Stat(app.Recipe.AbraShPath); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf(fmt.Sprintf("%s does not exist for %s?", app.Recipe.AbraShPath, app.Name))
}
return err
}
server := app.Server
if app.Server == "default" {
server = "local"
}
for _, command := range strings.Split(commands, "|") {
commandParts := strings.Split(command, " ")
if len(commandParts) < 2 {
return fmt.Errorf(fmt.Sprintf("not enough arguments: %s", command))
}
targetServiceName := commandParts[0]
cmdName := commandParts[1]
parsedCmdArgs := ""
if len(commandParts) > 2 {
parsedCmdArgs = fmt.Sprintf("%s ", strings.Join(commandParts[2:], " "))
}
log.Infof("running post-command '%s %s' in container %s", cmdName, parsedCmdArgs, targetServiceName)
table.Append([]string{server, app.Recipe, deployConfig, app.Domain, currentVersion, newVersion})
table.Render()
if err := EnsureCommand(app.Recipe.AbraShPath, app.Recipe.Name, cmdName); err != nil {
return err
}
if releaseNotes == "" {
var err error
releaseNotes, err = GetReleaseNotes(app.Recipe, newVersion)
serviceNames, err := appPkg.GetAppServiceNames(app.Name)
if err != nil {
return err
}
}
if releaseNotes != "" && newVersion != "" {
fmt.Println()
fmt.Println(fmt.Sprintf("%s release notes:\n\n%s", newVersion, releaseNotes))
} else {
logrus.Warnf("no release notes available for %s", newVersion)
}
matchingServiceName := false
for _, serviceName := range serviceNames {
if serviceName == targetServiceName {
matchingServiceName = true
}
}
if NoInput {
return nil
}
if !matchingServiceName {
return fmt.Errorf(fmt.Sprintf("no service %s for %s?", targetServiceName, app.Name))
}
response := false
prompt := &survey.Confirm{
Message: "continue with deployment?",
}
log.Debugf("running command %s %s within the context of %s_%s", cmdName, parsedCmdArgs, app.StackName(), targetServiceName)
if err := survey.AskOne(prompt, &response); err != nil {
return err
Tty = true
if err := RunCmdRemote(cl, app, app.Recipe.AbraShPath, targetServiceName, cmdName, parsedCmdArgs); err != nil {
return err
}
}
if !response {
logrus.Fatal("exiting as requested")
}
return nil
}
// GetReleaseNotes prints release notes for a recipe version
func GetReleaseNotes(recipeName, version string) (string, error) {
if version == "" {
return "", nil
}
fpath := path.Join(config.RECIPES_DIR, recipeName, "release", version)
if _, err := os.Stat(fpath); !os.IsNotExist(err) {
releaseNotes, err := ioutil.ReadFile(fpath)
if err != nil {
return "", err
}
return string(releaseNotes), nil
}
return "", nil
}

View File

@ -3,16 +3,16 @@ package internal
import (
"os"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"coopcloud.tech/abra/pkg/log"
"github.com/urfave/cli/v3"
)
// ShowSubcommandHelpAndError exits the program on error, logs the error to the
// terminal, and shows the help command.
func ShowSubcommandHelpAndError(c *cli.Context, err interface{}) {
if err2 := cli.ShowSubcommandHelp(c); err2 != nil {
logrus.Error(err2)
func ShowSubcommandHelpAndError(cmd *cli.Command, err interface{}) {
if err2 := cli.ShowSubcommandHelp(cmd); err2 != nil {
log.Error(err2)
}
logrus.Error(err)
log.Error(err)
os.Exit(1)
}

View File

@ -1,196 +0,0 @@
package internal
import (
"fmt"
"path"
"coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/recipe"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/secret"
"coopcloud.tech/abra/pkg/ssh"
"github.com/AlecAivazis/survey/v2"
"github.com/olekukonko/tablewriter"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// AppSecrets represents all app secrest
type AppSecrets map[string]string
// RecipeName is used for configuring recipe name programmatically
var RecipeName string
// createSecrets creates all secrets for a new app.
func createSecrets(sanitisedAppName string) (AppSecrets, error) {
appEnvPath := path.Join(config.ABRA_DIR, "servers", NewAppServer, fmt.Sprintf("%s.env", Domain))
appEnv, err := config.ReadEnv(appEnvPath)
if err != nil {
return nil, err
}
secretEnvVars := secret.ReadSecretEnvVars(appEnv)
secrets, err := secret.GenerateSecrets(secretEnvVars, sanitisedAppName, NewAppServer)
if err != nil {
return nil, err
}
if Pass {
for secretName := range secrets {
secretValue := secrets[secretName]
if err := secret.PassInsertSecret(secretValue, secretName, Domain, NewAppServer); err != nil {
return nil, err
}
}
}
return secrets, nil
}
// ensureDomainFlag checks if the domain flag was used. if not, asks the user for it/
func ensureDomainFlag(recipe recipe.Recipe, server string) error {
if Domain == "" && !NoInput {
prompt := &survey.Input{
Message: "Specify app domain",
Default: fmt.Sprintf("%s.%s", recipe.Name, server),
}
if err := survey.AskOne(prompt, &Domain); err != nil {
return err
}
}
if Domain == "" {
return fmt.Errorf("no domain provided")
}
return nil
}
// promptForSecrets asks if we should generate secrets for a new app.
func promptForSecrets(appName string) error {
app, err := app.Get(appName)
if err != nil {
return err
}
secretEnvVars := secret.ReadSecretEnvVars(app.Env)
if len(secretEnvVars) == 0 {
logrus.Debugf("%s has no secrets to generate, skipping...", app.Recipe)
return nil
}
if !Secrets && !NoInput {
prompt := &survey.Confirm{
Message: "Generate app secrets?",
}
if err := survey.AskOne(prompt, &Secrets); err != nil {
return err
}
}
return nil
}
// ensureServerFlag checks if the server flag was used. if not, asks the user for it.
func ensureServerFlag() error {
servers, err := config.GetServers()
if err != nil {
return err
}
if NewAppServer == "" && !NoInput {
prompt := &survey.Select{
Message: "Select app server:",
Options: servers,
}
if err := survey.AskOne(prompt, &NewAppServer); err != nil {
return err
}
}
if NewAppServer == "" {
return fmt.Errorf("no server provided")
}
return nil
}
// NewAction is the new app creation logic
func NewAction(c *cli.Context) error {
recipe := ValidateRecipeWithPrompt(c, false)
if err := recipePkg.EnsureUpToDate(recipe.Name); err != nil {
logrus.Fatal(err)
}
if err := ensureServerFlag(); err != nil {
logrus.Fatal(err)
}
if err := ensureDomainFlag(recipe, NewAppServer); err != nil {
logrus.Fatal(err)
}
sanitisedAppName := config.SanitiseAppName(Domain)
logrus.Debugf("%s sanitised as %s for new app", Domain, sanitisedAppName)
if err := config.TemplateAppEnvSample(recipe.Name, Domain, NewAppServer, Domain); err != nil {
logrus.Fatal(err)
}
if err := promptForSecrets(Domain); err != nil {
logrus.Fatal(err)
}
var secrets AppSecrets
var secretTable *tablewriter.Table
if Secrets {
if err := ssh.EnsureHostKey(NewAppServer); err != nil {
logrus.Fatal(err)
}
var err error
secrets, err = createSecrets(sanitisedAppName)
if err != nil {
logrus.Fatal(err)
}
secretCols := []string{"Name", "Value"}
secretTable = formatter.CreateTable(secretCols)
for secret := range secrets {
secretTable.Append([]string{secret, secrets[secret]})
}
}
if NewAppServer == "default" {
NewAppServer = "local"
}
tableCol := []string{"server", "recipe", "domain"}
table := formatter.CreateTable(tableCol)
table.Append([]string{NewAppServer, recipe.Name, Domain})
fmt.Println("")
fmt.Println(fmt.Sprintf("A new %s app has been created! Here is an overview:", recipe.Name))
fmt.Println("")
table.Render()
fmt.Println("")
fmt.Println("You can configure this app by running the following:")
fmt.Println(fmt.Sprintf("\n abra app config %s", Domain))
fmt.Println("")
fmt.Println("You can deploy this app by running the following:")
fmt.Println(fmt.Sprintf("\n abra app deploy %s", Domain))
fmt.Println("")
if len(secrets) > 0 {
fmt.Println("Here are your generated secrets:")
fmt.Println("")
secretTable.Render()
fmt.Println("")
logrus.Warn("generated secrets are not shown again, please take note of them *now*")
}
return nil
}

View File

@ -4,10 +4,10 @@ import (
"fmt"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
"github.com/AlecAivazis/survey/v2"
"github.com/docker/distribution/reference"
"github.com/sirupsen/logrus"
"github.com/distribution/reference"
)
// PromptBumpType prompts for version bump type
@ -65,7 +65,7 @@ func GetBumpType() string {
} else if Patch {
bumpType = "patch"
} else {
logrus.Fatal("no version bump type specififed?")
log.Fatal("no version bump type specififed?")
}
return bumpType
@ -80,7 +80,7 @@ func SetBumpType(bumpType string) {
} else if bumpType == "patch" {
Patch = true
} else {
logrus.Fatal("no version bump type specififed?")
log.Fatal("no version bump type specififed?")
}
}
@ -88,7 +88,11 @@ func SetBumpType(bumpType string) {
func GetMainAppImage(recipe recipe.Recipe) (string, error) {
var path string
for _, service := range recipe.Config.Services {
config, err := recipe.GetComposeConfig(nil)
if err != nil {
return "", err
}
for _, service := range config.Services {
if service.Name == "app" {
img, err := reference.ParseNormalizedNamed(service.Image)
if err != nil {

View File

@ -2,64 +2,26 @@ package internal
import (
"errors"
"fmt"
"os"
"strings"
"coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/ssh"
"github.com/AlecAivazis/survey/v2"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
// AppName is used for configuring app name programmatically
var AppName string
// ValidateRecipe ensures the recipe arg is valid.
func ValidateRecipe(c *cli.Context, ensureLatest bool) recipe.Recipe {
recipeName := c.Args().First()
if recipeName == "" {
ShowSubcommandHelpAndError(c, errors.New("no recipe name provided"))
}
chosenRecipe, err := recipe.Get(recipeName)
if err != nil {
if c.Command.Name == "generate" {
if strings.Contains(err.Error(), "missing a compose") {
logrus.Fatal(err)
}
logrus.Warn(err)
} else {
logrus.Fatal(err)
}
}
if ensureLatest {
if err := recipe.EnsureLatest(recipeName); err != nil {
logrus.Fatal(err)
}
}
logrus.Debugf("validated %s as recipe argument", recipeName)
return chosenRecipe
}
// ValidateRecipeWithPrompt ensures a recipe argument is present before
// validating, asking for input if required.
func ValidateRecipeWithPrompt(c *cli.Context, ensureLatest bool) recipe.Recipe {
recipeName := c.Args().First()
func ValidateRecipe(cmd *cli.Command) recipe.Recipe {
recipeName := cmd.Args().First()
if recipeName == "" && !NoInput {
var recipes []string
catl, err := recipe.ReadRecipeCatalogue()
catl, err := recipe.ReadRecipeCatalogue(Offline)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
knownRecipes := make(map[string]bool)
@ -69,7 +31,7 @@ func ValidateRecipeWithPrompt(c *cli.Context, ensureLatest bool) recipe.Recipe {
localRecipes, err := recipe.GetRecipesLocal()
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
for _, recipeLocal := range localRecipes {
@ -87,69 +49,60 @@ func ValidateRecipeWithPrompt(c *cli.Context, ensureLatest bool) recipe.Recipe {
Options: recipes,
}
if err := survey.AskOne(prompt, &recipeName); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
}
if RecipeName != "" {
recipeName = RecipeName
logrus.Debugf("programmatically setting recipe name to %s", recipeName)
}
if recipeName == "" {
ShowSubcommandHelpAndError(c, errors.New("no recipe name provided"))
ShowSubcommandHelpAndError(cmd, errors.New("no recipe name provided"))
}
chosenRecipe, err := recipe.Get(recipeName)
chosenRecipe := recipe.Get(recipeName)
err := chosenRecipe.EnsureExists()
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if ensureLatest {
if err := recipe.EnsureLatest(recipeName); err != nil {
logrus.Fatal(err)
_, err = chosenRecipe.GetComposeConfig(nil)
if err != nil {
if cmd.Name == "generate" {
if strings.Contains(err.Error(), "missing a compose") {
log.Fatal(err)
}
log.Warn(err)
} else {
if strings.Contains(err.Error(), "template_driver is not allowed") {
log.Warnf("ensure %s recipe compose.* files include \"version: '3.8'\"", recipeName)
}
log.Fatalf("unable to validate recipe: %s", err)
}
}
logrus.Debugf("validated %s as recipe argument", recipeName)
log.Debugf("validated %s as recipe argument", recipeName)
return chosenRecipe
}
// ValidateApp ensures the app name arg is valid.
func ValidateApp(c *cli.Context) config.App {
appName := c.Args().First()
if AppName != "" {
appName = AppName
logrus.Debugf("programmatically setting app name to %s", appName)
}
func ValidateApp(cmd *cli.Command) app.App {
appName := cmd.Args().First()
if appName == "" {
ShowSubcommandHelpAndError(c, errors.New("no app provided"))
ShowSubcommandHelpAndError(cmd, errors.New("no app provided"))
}
app, err := app.Get(appName)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if err := ssh.EnsureHostKey(app.Server); err != nil {
logrus.Fatal(err)
}
logrus.Debugf("validated %s as app argument", appName)
log.Debugf("validated %s as app argument", appName)
return app
}
// ValidateDomain ensures the domain name arg is valid.
func ValidateDomain(c *cli.Context) string {
domainName := c.Args().First()
func ValidateDomain(cmd *cli.Command) string {
domainName := cmd.Args().First()
if domainName == "" && !NoInput {
prompt := &survey.Input{
@ -157,24 +110,24 @@ func ValidateDomain(c *cli.Context) string {
Default: "example.com",
}
if err := survey.AskOne(prompt, &domainName); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
}
if domainName == "" {
ShowSubcommandHelpAndError(c, errors.New("no domain provided"))
ShowSubcommandHelpAndError(cmd, errors.New("no domain provided"))
}
logrus.Debugf("validated %s as domain argument", domainName)
log.Debugf("validated %s as domain argument", domainName)
return domainName
}
// ValidateSubCmdFlags ensures flag order conforms to correct order
func ValidateSubCmdFlags(c *cli.Context) bool {
for argIdx, arg := range c.Args() {
func ValidateSubCmdFlags(cmd *cli.Command) bool {
for argIdx, arg := range cmd.Args().Slice() {
if !strings.HasPrefix(arg, "--") {
for _, flag := range c.Args()[argIdx:] {
for _, flag := range cmd.Args().Slice()[argIdx:] {
if strings.HasPrefix(flag, "--") {
return false
}
@ -185,12 +138,12 @@ func ValidateSubCmdFlags(c *cli.Context) bool {
}
// ValidateServer ensures the server name arg is valid.
func ValidateServer(c *cli.Context) string {
serverName := c.Args().First()
func ValidateServer(cmd *cli.Command) string {
serverName := cmd.Args().First()
serverNames, err := config.ReadServerNames()
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if serverName == "" && !NoInput {
@ -199,7 +152,7 @@ func ValidateServer(c *cli.Context) string {
Options: serverNames,
}
if err := survey.AskOne(prompt, &serverName); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
}
@ -210,309 +163,15 @@ func ValidateServer(c *cli.Context) string {
}
}
if !matched {
ShowSubcommandHelpAndError(c, errors.New("server doesn't exist?"))
}
if serverName == "" {
ShowSubcommandHelpAndError(c, errors.New("no server provided"))
ShowSubcommandHelpAndError(cmd, errors.New("no server provided"))
}
logrus.Debugf("validated %s as server argument", serverName)
if !matched {
ShowSubcommandHelpAndError(cmd, errors.New("server doesn't exist?"))
}
log.Debugf("validated %s as server argument", serverName)
return serverName
}
// EnsureDNSProvider ensures a DNS provider is chosen.
func EnsureDNSProvider() error {
if DNSProvider == "" && !NoInput {
prompt := &survey.Select{
Message: "Select DNS provider",
Options: []string{"gandi"},
}
if err := survey.AskOne(prompt, &DNSProvider); err != nil {
return err
}
}
if DNSProvider == "" {
return fmt.Errorf("missing DNS provider?")
}
return nil
}
// EnsureDNSTypeFlag ensures a DNS type flag is present.
func EnsureDNSTypeFlag(c *cli.Context) error {
if DNSType == "" && !NoInput {
prompt := &survey.Input{
Message: "Specify DNS record type",
Default: "A",
}
if err := survey.AskOne(prompt, &DNSType); err != nil {
return err
}
}
if DNSType == "" {
ShowSubcommandHelpAndError(c, errors.New("no record type provided"))
}
return nil
}
// EnsureDNSNameFlag ensures a DNS name flag is present.
func EnsureDNSNameFlag(c *cli.Context) error {
if DNSName == "" && !NoInput {
prompt := &survey.Input{
Message: "Specify DNS record name",
Default: "mysubdomain",
}
if err := survey.AskOne(prompt, &DNSName); err != nil {
return err
}
}
if DNSName == "" {
ShowSubcommandHelpAndError(c, errors.New("no record name provided"))
}
return nil
}
// EnsureDNSValueFlag ensures a DNS value flag is present.
func EnsureDNSValueFlag(c *cli.Context) error {
if DNSValue == "" && !NoInput {
prompt := &survey.Input{
Message: "Specify DNS record value",
Default: "192.168.1.2",
}
if err := survey.AskOne(prompt, &DNSValue); err != nil {
return err
}
}
if DNSValue == "" {
ShowSubcommandHelpAndError(c, errors.New("no record value provided"))
}
return nil
}
// EnsureZoneArgument ensures a zone argument is present.
func EnsureZoneArgument(c *cli.Context) (string, error) {
zone := c.Args().First()
if zone == "" && !NoInput {
prompt := &survey.Input{
Message: "Specify a domain name zone",
Default: "example.com",
}
if err := survey.AskOne(prompt, &zone); err != nil {
return zone, err
}
}
if zone == "" {
ShowSubcommandHelpAndError(c, errors.New("no zone value provided"))
}
return zone, nil
}
// EnsureServerProvider ensures a 3rd party server provider is chosen.
func EnsureServerProvider() error {
if ServerProvider == "" && !NoInput {
prompt := &survey.Select{
Message: "Select server provider",
Options: []string{"capsul", "hetzner-cloud"},
}
if err := survey.AskOne(prompt, &ServerProvider); err != nil {
return err
}
}
if ServerProvider == "" {
return fmt.Errorf("missing server provider?")
}
return nil
}
// EnsureNewCapsulVPSFlags ensure all flags are present.
func EnsureNewCapsulVPSFlags(c *cli.Context) error {
if CapsulName == "" && !NoInput {
prompt := &survey.Input{
Message: "specify capsul name",
}
if err := survey.AskOne(prompt, &CapsulName); err != nil {
return err
}
}
if !NoInput {
prompt := &survey.Input{
Message: "specify capsul instance URL",
Default: CapsulInstanceURL,
}
if err := survey.AskOne(prompt, &CapsulInstanceURL); err != nil {
return err
}
}
if !NoInput {
prompt := &survey.Input{
Message: "specify capsul type",
Default: CapsulType,
}
if err := survey.AskOne(prompt, &CapsulType); err != nil {
return err
}
}
if !NoInput {
prompt := &survey.Input{
Message: "specify capsul image",
Default: CapsulImage,
}
if err := survey.AskOne(prompt, &CapsulImage); err != nil {
return err
}
}
if len(CapsulSSHKeys.Value()) == 0 && !NoInput {
var sshKeys string
prompt := &survey.Input{
Message: "specify capsul SSH keys (e.g. me@foo.com)",
Default: "",
}
if err := survey.AskOne(prompt, &CapsulSSHKeys); err != nil {
return err
}
CapsulSSHKeys = cli.StringSlice(strings.Split(sshKeys, ","))
}
if CapsulAPIToken == "" && !NoInput {
token, ok := os.LookupEnv("CAPSUL_TOKEN")
if !ok {
prompt := &survey.Input{
Message: "specify capsul API token",
}
if err := survey.AskOne(prompt, &CapsulAPIToken); err != nil {
return err
}
} else {
CapsulAPIToken = token
}
}
if CapsulName == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul name?"))
}
if CapsulInstanceURL == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul instance url?"))
}
if CapsulType == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul type?"))
}
if CapsulImage == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul image?"))
}
if len(CapsulSSHKeys.Value()) == 0 {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul ssh keys?"))
}
if CapsulAPIToken == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul API token?"))
}
return nil
}
// EnsureNewHetznerCloudVPSFlags ensure all flags are present.
func EnsureNewHetznerCloudVPSFlags(c *cli.Context) error {
if HetznerCloudName == "" && !NoInput {
prompt := &survey.Input{
Message: "specify hetzner cloud VPS name",
}
if err := survey.AskOne(prompt, &HetznerCloudName); err != nil {
return err
}
}
if !NoInput {
prompt := &survey.Input{
Message: "specify hetzner cloud VPS type",
Default: HetznerCloudType,
}
if err := survey.AskOne(prompt, &HetznerCloudType); err != nil {
return err
}
}
if !NoInput {
prompt := &survey.Input{
Message: "specify hetzner cloud VPS image",
Default: HetznerCloudImage,
}
if err := survey.AskOne(prompt, &HetznerCloudImage); err != nil {
return err
}
}
if len(HetznerCloudSSHKeys.Value()) == 0 && !NoInput {
var sshKeys string
prompt := &survey.Input{
Message: "specify hetzner cloud SSH keys (e.g. me@foo.com)",
Default: "",
}
if err := survey.AskOne(prompt, &sshKeys); err != nil {
return err
}
HetznerCloudSSHKeys = cli.StringSlice(strings.Split(sshKeys, ","))
}
if !NoInput {
prompt := &survey.Input{
Message: "specify hetzner cloud VPS location",
Default: HetznerCloudLocation,
}
if err := survey.AskOne(prompt, &HetznerCloudLocation); err != nil {
return err
}
}
if HetznerCloudAPIToken == "" && !NoInput {
token, ok := os.LookupEnv("HCLOUD_TOKEN")
if !ok {
prompt := &survey.Input{
Message: "specify hetzner cloud API token",
}
if err := survey.AskOne(prompt, &HetznerCloudAPIToken); err != nil {
return err
}
} else {
HetznerCloudAPIToken = token
}
}
if HetznerCloudName == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud VPS name?"))
}
if HetznerCloudType == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud VPS type?"))
}
if HetznerCloudImage == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud image?"))
}
if HetznerCloudLocation == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud VPS location?"))
}
if HetznerCloudAPIToken == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud API token?"))
}
return nil
}

31
cli/recipe/diff.go Normal file
View File

@ -0,0 +1,31 @@
package recipe
import (
"context"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
gitPkg "coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/log"
"github.com/urfave/cli/v3"
)
var recipeDiffCommand = cli.Command{
Name: "diff",
Usage: "Show unstaged changes in recipe config",
Description: "This command requires /usr/bin/git.",
Aliases: []string{"d"},
UsageText: "abra recipe diff <recipe> [options]",
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.RecipeNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
r := internal.ValidateRecipe(cmd)
if err := gitPkg.DiffUnstaged(r.Dir); err != nil {
log.Fatal(err)
}
return nil
},
}

50
cli/recipe/fetch.go Normal file
View File

@ -0,0 +1,50 @@
package recipe
import (
"context"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
"github.com/urfave/cli/v3"
)
var recipeFetchCommand = cli.Command{
Name: "fetch",
Usage: "Fetch recipe(s)",
Aliases: []string{"f"},
UsageText: "abra recipe fetch [<recipe>] [options]",
Description: "Retrieves all recipes if no <recipe> argument is passed",
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.RecipeNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
recipeName := cmd.Args().First()
r := recipe.Get(recipeName)
if recipeName != "" {
internal.ValidateRecipe(cmd)
if err := r.Ensure(false, false); err != nil {
log.Fatal(err)
}
return nil
}
catalogue, err := recipe.ReadRecipeCatalogue(internal.Offline)
if err != nil {
log.Fatal(err)
}
catlBar := formatter.CreateProgressbar(len(catalogue), "fetching latest recipes...")
for recipeName := range catalogue {
r := recipe.Get(recipeName)
if err := r.Ensure(false, false); err != nil {
log.Error(err)
}
catlBar.Add(1)
}
return nil
},
}

View File

@ -1,77 +1,120 @@
package recipe
import (
"context"
"fmt"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/lint"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"coopcloud.tech/abra/pkg/log"
"github.com/urfave/cli/v3"
)
var recipeLintCommand = cli.Command{
Name: "lint",
Usage: "Lint a recipe",
Aliases: []string{"l"},
ArgsUsage: "<recipe>",
UsageText: "abra recipe lint <recipe> [options]",
Flags: []cli.Flag{
internal.DebugFlag,
internal.OnlyErrorFlag,
internal.ChaosFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipe := internal.ValidateRecipe(c, true)
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.RecipeNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
recipe := internal.ValidateRecipe(cmd)
if err := recipePkg.EnsureUpToDate(recipe.Name); err != nil {
logrus.Fatal(err)
if err := recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
tableCol := []string{"ref", "rule", "satisfied", "severity", "resolve"}
table := formatter.CreateTable(tableCol)
headers := []string{
"ref",
"rule",
"severity",
"satisfied",
"skipped",
"resolve",
}
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
table.Headers(headers...)
hasError := false
bar := formatter.CreateProgressbar(-1, "running recipe lint rules...")
var rows [][]string
var warnMessages []string
for level := range lint.LintRules {
for _, rule := range lint.LintRules[level] {
ok, err := rule.Function(recipe)
if err != nil {
logrus.Warn(err)
if internal.OnlyErrors && rule.Level != "error" {
log.Debugf("skipping %s, does not have level \"error\"", rule.Ref)
continue
}
if !ok && rule.Level == "error" {
hasError = true
skipped := false
if rule.Skip(recipe) {
skipped = true
}
var result string
if ok {
result = "yes"
} else {
result = "NO"
skippedOutput := "-"
if skipped {
skippedOutput = "✅"
}
if internal.OnlyErrors {
if !ok && rule.Level == "error" {
table.Append([]string{rule.Ref, rule.Description, result, rule.Level, rule.HowToResolve})
bar.Add(1)
satisfied := false
if !skipped {
ok, err := rule.Function(recipe)
if err != nil {
warnMessages = append(warnMessages, err.Error())
}
if !ok && rule.Level == "error" {
hasError = true
}
if ok {
satisfied = true
}
} else {
table.Append([]string{rule.Ref, rule.Description, result, rule.Level, rule.HowToResolve})
bar.Add(1)
}
satisfiedOutput := "✅"
if !satisfied {
satisfiedOutput = "❌"
if skipped {
satisfiedOutput = "-"
}
}
row := []string{
rule.Ref,
rule.Description,
rule.Level,
satisfiedOutput,
skippedOutput,
rule.HowToResolve,
}
rows = append(rows, row)
table.Row(row...)
}
}
if table.NumLines() > 0 {
fmt.Println()
table.Render()
}
if len(rows) > 0 {
fmt.Println(table)
if hasError {
logrus.Warn("watch out, some critical errors are present in your recipe config")
for _, warnMsg := range warnMessages {
log.Warn(warnMsg)
}
if hasError {
log.Warnf("critical errors present in %s config", recipe.Name)
}
}
return nil

View File

@ -1,6 +1,7 @@
package recipe
import (
"context"
"fmt"
"sort"
"strconv"
@ -8,43 +9,61 @@ import (
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
var pattern string
var patternFlag = &cli.StringFlag{
Name: "pattern, p",
Name: "pattern",
Aliases: []string{"p"},
Value: "",
Usage: "Simple string to filter recipes",
Destination: &pattern,
}
var recipeListCommand = cli.Command{
Name: "list",
Usage: "List available recipes",
Aliases: []string{"ls"},
Name: "list",
Usage: "List recipes",
UsageText: "abra recipe list [options]",
Aliases: []string{"ls"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.MachineReadableFlag,
patternFlag,
},
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
catl, err := recipe.ReadRecipeCatalogue()
Before: internal.SubCommandBefore,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
catl, err := recipe.ReadRecipeCatalogue(internal.Offline)
if err != nil {
logrus.Fatal(err.Error())
log.Fatal(err.Error())
}
recipes := catl.Flatten()
sort.Sort(recipe.ByRecipeName(recipes))
tableCol := []string{"name", "category", "status", "healthcheck", "backups", "email", "tests", "SSO"}
table := formatter.CreateTable(tableCol)
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
len := 0
headers := []string{
"name",
"category",
"status",
"healthcheck",
"backups",
"email",
"tests",
"SSO",
}
table.Headers(headers...)
var rows [][]string
for _, recipe := range recipes {
tableRow := []string{
row := []string{
recipe.Name,
recipe.Category,
strconv.Itoa(recipe.Features.Status),
@ -57,19 +76,27 @@ var recipeListCommand = cli.Command{
if pattern != "" {
if strings.Contains(recipe.Name, pattern) {
table.Append(tableRow)
len++
table.Row(row...)
rows = append(rows, row)
}
} else {
table.Append(tableRow)
len++
table.Row(row...)
rows = append(rows, row)
}
}
table.SetCaption(true, fmt.Sprintf("total recipes: %v", len))
if len(rows) > 0 {
if internal.MachineReadable {
out, err := formatter.ToJSON(headers, rows)
if err != nil {
log.Fatal("unable to render to JSON: %s", err)
}
fmt.Println(out)
return nil
}
if table.NumLines() > 0 {
table.Render()
fmt.Println(table)
log.Infof("total recipes: %v", len(rows))
}
return nil

View File

@ -2,9 +2,9 @@ package recipe
import (
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"text/template"
@ -12,8 +12,9 @@ import (
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/git"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
"github.com/urfave/cli/v3"
)
// recipeMetadata is the recipe metadata for the README.md
@ -34,90 +35,66 @@ var recipeNewCommand = cli.Command{
Name: "new",
Aliases: []string{"n"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.GitNameFlag,
internal.GitEmailFlag,
},
Before: internal.SubCommandBefore,
Usage: "Create a new recipe",
ArgsUsage: "<recipe>",
Description: `
Create a new recipe.
UsageText: "abra recipe new <recipe> [options]",
Description: `Create a new recipe.
Abra uses the built-in example repository which is available here:
https://git.coopcloud.tech/coop-cloud/example
Files within the example repository make use of the Golang templating system
which Abra uses to inject values into the generated recipe folder (e.g. name of
recipe and domain in the sample environment config).
`,
Action: func(c *cli.Context) error {
recipeName := c.Args().First()
https://git.coopcloud.tech/coop-cloud/example`,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
recipeName := cmd.Args().First()
r := recipe.Get(recipeName)
if recipeName == "" {
internal.ShowSubcommandHelpAndError(c, errors.New("no recipe name provided"))
internal.ShowSubcommandHelpAndError(cmd, errors.New("no recipe name provided"))
}
directory := path.Join(config.RECIPES_DIR, recipeName)
if _, err := os.Stat(directory); !os.IsNotExist(err) {
logrus.Fatalf("%s recipe directory already exists?", directory)
if _, err := os.Stat(r.Dir); !os.IsNotExist(err) {
log.Fatalf("%s recipe directory already exists?", r.Dir)
}
url := fmt.Sprintf("%s/example.git", config.REPOS_BASE_URL)
if err := git.Clone(directory, url); err != nil {
logrus.Fatal(err)
if err := git.Clone(r.Dir, url); err != nil {
log.Fatal(err)
}
gitRepo := path.Join(config.RECIPES_DIR, recipeName, ".git")
gitRepo := path.Join(r.Dir, ".git")
if err := os.RemoveAll(gitRepo); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
logrus.Debugf("removed example git repo in %s", gitRepo)
log.Debugf("removed example git repo in %s", gitRepo)
meta := newRecipeMeta(recipeName)
toParse := []string{
path.Join(config.RECIPES_DIR, recipeName, "README.md"),
path.Join(config.RECIPES_DIR, recipeName, ".env.sample"),
}
for _, path := range toParse {
for _, path := range []string{r.ReadmePath, r.SampleEnvPath} {
tpl, err := template.ParseFiles(path)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
var templated bytes.Buffer
if err := tpl.Execute(&templated, meta); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if err := ioutil.WriteFile(path, templated.Bytes(), 0644); err != nil {
logrus.Fatal(err)
if err := os.WriteFile(path, templated.Bytes(), 0o644); err != nil {
log.Fatal(err)
}
}
newGitRepo := path.Join(config.RECIPES_DIR, recipeName)
if err := git.Init(newGitRepo, true); err != nil {
logrus.Fatal(err)
if err := git.Init(r.Dir, true, internal.GitName, internal.GitEmail); err != nil {
log.Fatal(err)
}
fmt.Print(fmt.Sprintf(`
Your new %s recipe has been created in %s.
In order to share your recipe, you can upload it the git repository to:
https://git.coopcloud.tech/coop-cloud/%s
If you're not sure how to do that, come chat with us:
https://docs.coopcloud.tech/intro/contact
See "abra recipe -h" for additional recipe maintainer commands.
Happy Hacking!
`, recipeName, path.Join(config.RECIPES_DIR, recipeName), recipeName))
log.Infof("new recipe '%s' created: %s", recipeName, path.Join(r.Dir))
log.Info("happy hacking 🎉")
return nil
},

View File

@ -1,7 +1,7 @@
package recipe
import (
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
// RecipeCommand defines all recipe related sub-commands.
@ -9,25 +9,26 @@ var RecipeCommand = cli.Command{
Name: "recipe",
Aliases: []string{"r"},
Usage: "Manage recipes",
ArgsUsage: "<recipe>",
Description: `
A recipe is a blueprint for an app. It is a bunch of config files which
describe how to deploy and maintain an app. Recipes are maintained by the Co-op
Cloud community and you can use Abra to read them, deploy them and create apps
for you.
UsageText: "abra recipe [command] [arguments] [options]",
Description: `A recipe is a blueprint for an app.
It is a bunch of config files which describe how to deploy and maintain an app.
Recipes are maintained by the Co-op Cloud community and you can use Abra to
read them, deploy them and create apps for you.
Anyone who uses a recipe can become a maintainer. Maintainers typically make
sure the recipe is in good working order and the config upgraded in a timely
manner. Abra supports convenient automation for recipe maintainenace, see the
"abra recipe upgrade", "abra recipe sync" and "abra recipe release" commands.
`,
Subcommands: []cli.Command{
recipeListCommand,
recipeVersionCommand,
recipeReleaseCommand,
recipeNewCommand,
recipeUpgradeCommand,
recipeSyncCommand,
recipeLintCommand,
manner.`,
Commands: []*cli.Command{
&recipeFetchCommand,
&recipeLintCommand,
&recipeListCommand,
&recipeNewCommand,
&recipeReleaseCommand,
&recipeSyncCommand,
&recipeUpgradeCommand,
&recipeVersionCommand,
&recipeResetCommand,
&recipeDiffCommand,
},
}

View File

@ -1,34 +1,36 @@
package recipe
import (
"context"
"errors"
"fmt"
"os"
"path"
"strconv"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
gitPkg "coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/tagcmp"
"github.com/AlecAivazis/survey/v2"
"github.com/docker/distribution/reference"
"github.com/distribution/reference"
"github.com/go-git/go-git/v5"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
var recipeReleaseCommand = cli.Command{
Name: "release",
Aliases: []string{"rl"},
Usage: "Release a new recipe version",
ArgsUsage: "<recipe> [<version>]",
Description: `
Create a new version of a recipe. These versions are then published on the
Co-op Cloud recipe catalogue. These versions take the following form:
UsageText: "abra recipe release <recipe> [<version>] [options]",
Description: `Create a new version of a recipe.
These versions are then published on the Co-op Cloud recipe catalogue. These
versions take the following form:
a.b.c+x.y.z
@ -44,80 +46,90 @@ major and therefore require intervention while doing the upgrade work.
Publish your new release to git.coopcloud.tech with "-p/--publish". This
requires that you have permission to git push to these repositories and have
your SSH keys configured on your account.
`,
your SSH keys configured on your account.`,
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.DryFlag,
internal.MajorFlag,
internal.MinorFlag,
internal.PatchFlag,
internal.PublishFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipe := internal.ValidateRecipeWithPrompt(c, false)
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.RecipeNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
recipe := internal.ValidateRecipe(cmd)
imagesTmp, err := getImageVersions(recipe)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
mainApp, err := internal.GetMainAppImage(recipe)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
mainAppVersion := imagesTmp[mainApp]
if mainAppVersion == "" {
logrus.Fatalf("main app service version for %s is empty?", recipe.Name)
log.Fatalf("main app service version for %s is empty?", recipe.Name)
}
tagString := c.Args().Get(1)
tagString := cmd.Args().Get(1)
if tagString != "" {
if _, err := tagcmp.Parse(tagString); err != nil {
logrus.Fatalf("cannot parse %s, invalid tag specified?", tagString)
log.Fatalf("cannot parse %s, invalid tag specified?", tagString)
}
}
if (internal.Major || internal.Minor || internal.Patch) && tagString != "" {
logrus.Fatal("cannot specify tag and bump type at the same time")
log.Fatal("cannot specify tag and bump type at the same time")
}
if tagString != "" {
if err := createReleaseFromTag(recipe, tagString, mainAppVersion); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
}
tags, err := recipe.Tags()
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if tagString == "" && (!internal.Major && !internal.Minor && !internal.Patch) {
var err error
tagString, err = getLabelVersion(recipe, false)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
}
isClean, err := gitPkg.IsClean(recipe.Dir)
if err != nil {
log.Fatal(err)
}
if !isClean {
log.Infof("%s currently has these unstaged changes 👇", recipe.Name)
if err := gitPkg.DiffUnstaged(recipe.Dir); err != nil {
log.Fatal(err)
}
}
if len(tags) > 0 {
logrus.Warnf("previous git tags detected, assuming this is a new semver release")
log.Warnf("previous git tags detected, assuming this is a new semver release")
if err := createReleaseFromPreviousTag(tagString, mainAppVersion, recipe, tags); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
} else {
logrus.Warnf("no tag specified and no previous tag available for %s, assuming this is the initial release", recipe.Name)
log.Warnf("no tag specified and no previous tag available for %s, assuming this is the initial release", recipe.Name)
if err := createReleaseFromTag(recipe, tagString, mainAppVersion); err != nil {
if cleanUpErr := cleanUpTag(tagString, recipe.Name); err != nil {
logrus.Fatal(cleanUpErr)
if cleanUpErr := cleanUpTag(recipe, tagString); err != nil {
log.Fatal(cleanUpErr)
}
logrus.Fatal(err)
log.Fatal(err)
}
}
@ -127,10 +139,14 @@ your SSH keys configured on your account.
// getImageVersions retrieves image versions for a recipe
func getImageVersions(recipe recipe.Recipe) (map[string]string, error) {
var services = make(map[string]string)
services := make(map[string]string)
config, err := recipe.GetComposeConfig(nil)
if err != nil {
return nil, err
}
missingTag := false
for _, service := range recipe.Config.Services {
for _, service := range config.Services {
if service.Image == "" {
continue
}
@ -169,8 +185,7 @@ func getImageVersions(recipe recipe.Recipe) (map[string]string, error) {
func createReleaseFromTag(recipe recipe.Recipe, tagString, mainAppVersion string) error {
var err error
directory := path.Join(config.RECIPES_DIR, recipe.Name)
repo, err := git.PlainOpen(directory)
repo, err := git.PlainOpen(recipe.Dir)
if err != nil {
return err
}
@ -194,16 +209,20 @@ func createReleaseFromTag(recipe recipe.Recipe, tagString, mainAppVersion string
tagString = fmt.Sprintf("%s+%s", tag.String(), mainAppVersion)
}
if err := addReleaseNotes(recipe, tagString); err != nil {
log.Fatal(err)
}
if err := commitRelease(recipe, tagString); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if err := tagRelease(tagString, repo); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if err := pushRelease(recipe, tagString); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
return nil
@ -224,26 +243,100 @@ func getTagCreateOptions(tag string) (git.CreateTagOptions, error) {
return git.CreateTagOptions{Message: msg}, nil
}
func commitRelease(recipe recipe.Recipe, tag string) error {
if internal.Dry {
logrus.Debugf("dry run: no changes committed")
// addReleaseNotes checks if the release/next release note exists and moves the
// file to release/<tag>.
func addReleaseNotes(recipe recipe.Recipe, tag string) error {
tagReleaseNotePath := path.Join(recipe.Dir, "release", tag)
if _, err := os.Stat(tagReleaseNotePath); err == nil {
// Release note for current tag already exist exists.
return nil
} else if !errors.Is(err, os.ErrNotExist) {
return err
}
nextReleaseNotePath := path.Join(recipe.Dir, "release", "next")
if _, err := os.Stat(nextReleaseNotePath); err == nil {
// release/next note exists. Move it to release/<tag>
if internal.Dry {
log.Debugf("dry run: move release note from 'next' to %s", tag)
return nil
}
if !internal.NoInput {
prompt := &survey.Input{
Message: "Use release note in release/next?",
}
var addReleaseNote bool
if err := survey.AskOne(prompt, &addReleaseNote); err != nil {
return err
}
if !addReleaseNote {
return nil
}
}
err := os.Rename(nextReleaseNotePath, tagReleaseNotePath)
if err != nil {
return err
}
err = gitPkg.Add(recipe.Dir, path.Join("release", "next"), internal.Dry)
if err != nil {
return err
}
err = gitPkg.Add(recipe.Dir, path.Join("release", tag), internal.Dry)
if err != nil {
return err
}
} else if !errors.Is(err, os.ErrNotExist) {
return err
}
// No release note exists for the current release.
if internal.NoInput {
return nil
}
isClean, err := gitPkg.IsClean(recipe.Dir())
prompt := &survey.Input{
Message: "Release Note (leave empty for no release note)",
}
var releaseNote string
if err := survey.AskOne(prompt, &releaseNote); err != nil {
return err
}
if releaseNote == "" {
return nil
}
err := os.WriteFile(tagReleaseNotePath, []byte(releaseNote), 0o644)
if err != nil {
return err
}
err = gitPkg.Add(recipe.Dir, path.Join("release", tag), internal.Dry)
if err != nil {
return err
}
return nil
}
func commitRelease(recipe recipe.Recipe, tag string) error {
if internal.Dry {
log.Debugf("dry run: no changes committed")
return nil
}
isClean, err := gitPkg.IsClean(recipe.Dir)
if err != nil {
return err
}
if isClean {
if !internal.Dry {
return fmt.Errorf("no changes discovered in %s, nothing to publish?", recipe.Dir())
return fmt.Errorf("no changes discovered in %s, nothing to publish?", recipe.Dir)
}
}
msg := fmt.Sprintf("chore: publish %s release", tag)
repoPath := path.Join(config.RECIPES_DIR, recipe.Name)
if err := gitPkg.Commit(repoPath, ".", msg, internal.Dry); err != nil {
if err := gitPkg.Commit(recipe.Dir, msg, internal.Dry); err != nil {
return err
}
@ -252,7 +345,7 @@ func commitRelease(recipe recipe.Recipe, tag string) error {
func tagRelease(tagString string, repo *git.Repository) error {
if internal.Dry {
logrus.Debugf("dry run: no git tag created (%s)", tagString)
log.Debugf("dry run: no git tag created (%s)", tagString)
return nil
}
@ -272,14 +365,14 @@ func tagRelease(tagString string, repo *git.Repository) error {
}
hash := formatter.SmallSHA(head.Hash().String())
logrus.Debugf(fmt.Sprintf("created tag %s at %s", tagString, hash))
log.Debugf(fmt.Sprintf("created tag %s at %s", tagString, hash))
return nil
}
func pushRelease(recipe recipe.Recipe, tagString string) error {
if internal.Dry {
logrus.Info("dry run: no changes published")
log.Info("dry run: no changes published")
return nil
}
@ -297,18 +390,17 @@ func pushRelease(recipe recipe.Recipe, tagString string) error {
if err := recipe.Push(internal.Dry); err != nil {
return err
}
url := fmt.Sprintf("%s/%s/src/tag/%s", config.REPOS_BASE_URL, recipe.Name, tagString)
logrus.Infof("new release published: %s", url)
url := fmt.Sprintf("%s/src/tag/%s", recipe.GitURL, tagString)
log.Infof("new release published: %s", url)
} else {
logrus.Info("no -p/--publish passed, not publishing")
log.Info("no -p/--publish passed, not publishing")
}
return nil
}
func createReleaseFromPreviousTag(tagString, mainAppVersion string, recipe recipe.Recipe, tags []string) error {
directory := path.Join(config.RECIPES_DIR, recipe.Name)
repo, err := git.PlainOpen(directory)
repo, err := git.PlainOpen(recipe.Dir)
if err != nil {
return err
}
@ -373,7 +465,7 @@ func createReleaseFromPreviousTag(tagString, mainAppVersion string, recipe recip
}
if lastGitTag.String() == tagString {
logrus.Fatalf("latest git tag (%s) and synced lable (%s) are the same?", lastGitTag, tagString)
log.Fatalf("latest git tag (%s) and synced label (%s) are the same?", lastGitTag, tagString)
}
if !internal.NoInput {
@ -383,33 +475,36 @@ func createReleaseFromPreviousTag(tagString, mainAppVersion string, recipe recip
var ok bool
if err := survey.AskOne(prompt, &ok); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if !ok {
logrus.Fatal("exiting as requested")
log.Fatal("exiting as requested")
}
}
if err := addReleaseNotes(recipe, tagString); err != nil {
log.Fatal(err)
}
if err := commitRelease(recipe, tagString); err != nil {
logrus.Fatalf("failed to commit changes: %s", err.Error())
log.Fatalf("failed to commit changes: %s", err.Error())
}
if err := tagRelease(tagString, repo); err != nil {
logrus.Fatalf("failed to tag release: %s", err.Error())
log.Fatalf("failed to tag release: %s", err.Error())
}
if err := pushRelease(recipe, tagString); err != nil {
logrus.Fatalf("failed to publish new release: %s", err.Error())
log.Fatalf("failed to publish new release: %s", err.Error())
}
return nil
}
// cleanUpTag removes a freshly created tag
func cleanUpTag(tag, recipeName string) error {
directory := path.Join(config.RECIPES_DIR, recipeName)
repo, err := git.PlainOpen(directory)
func cleanUpTag(recipe recipe.Recipe, tag string) error {
repo, err := git.PlainOpen(recipe.Dir)
if err != nil {
return err
}
@ -420,22 +515,22 @@ func cleanUpTag(tag, recipeName string) error {
}
}
logrus.Debugf("removed freshly created tag %s", tag)
log.Debugf("removed freshly created tag %s", tag)
return nil
}
func getLabelVersion(recipe recipe.Recipe, prompt bool) (string, error) {
initTag, err := recipePkg.GetVersionLabelLocal(recipe)
initTag, err := recipe.GetVersionLabelLocal()
if err != nil {
return "", err
}
if initTag == "" {
logrus.Fatalf("unable to read version for %s from synced label. Did you try running \"abra recipe sync %s\" already?", recipe.Name, recipe.Name)
log.Fatalf("unable to read version for %s from synced label. Did you try running \"abra recipe sync %s\" already?", recipe.Name, recipe.Name)
}
logrus.Warnf("discovered %s as currently synced recipe label", initTag)
log.Warnf("discovered %s as currently synced recipe label", initTag)
if prompt && !internal.NoInput {
var response bool

53
cli/recipe/reset.go Normal file
View File

@ -0,0 +1,53 @@
package recipe
import (
"context"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
"github.com/go-git/go-git/v5"
"github.com/urfave/cli/v3"
)
var recipeResetCommand = cli.Command{
Name: "reset",
Usage: "Remove all unstaged changes from recipe config",
Description: "WARNING: this will delete your changes. Be Careful.",
Aliases: []string{"rs"},
UsageText: "abra recipe reset <recipe> [options]",
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.RecipeNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
recipeName := cmd.Args().First()
r := recipe.Get(recipeName)
if recipeName != "" {
internal.ValidateRecipe(cmd)
}
repo, err := git.PlainOpen(r.Dir)
if err != nil {
log.Fatal(err)
}
ref, err := repo.Head()
if err != nil {
log.Fatal(err)
}
worktree, err := repo.Worktree()
if err != nil {
log.Fatal(err)
}
opts := &git.ResetOptions{Commit: ref.Hash(), Mode: git.HardReset}
if err := worktree.Reset(opts); err != nil {
log.Fatal(err)
}
return nil
},
}

View File

@ -1,68 +1,70 @@
package recipe
import (
"context"
"fmt"
"path"
"strconv"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
gitPkg "coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/tagcmp"
"github.com/AlecAivazis/survey/v2"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
var recipeSyncCommand = cli.Command{
Name: "sync",
Aliases: []string{"s"},
Usage: "Sync recipe version label",
ArgsUsage: "<recipe> [<version>]",
UsageText: "abra recipe lint <recipe> [<version>] [options]",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.DryFlag,
internal.MajorFlag,
internal.MinorFlag,
internal.PatchFlag,
},
Before: internal.SubCommandBefore,
Description: `
Generate labels for the main recipe service (i.e. by convention, the service
named "app") which corresponds to the following format:
Description: `Generate labels for the main recipe service.
By convention, the service named "app" using the following format:
coop-cloud.${STACK_NAME}.version=<version>
Where <version> can be specifed on the command-line or Abra can attempt to
auto-generate it for you. The <recipe> configuration will be updated on the
local file system.
`,
Action: func(c *cli.Context) error {
recipe := internal.ValidateRecipeWithPrompt(c, false)
local file system.`,
ShellComplete: autocomplete.RecipeNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
recipe := internal.ValidateRecipe(cmd)
mainApp, err := internal.GetMainAppImage(recipe)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
imagesTmp, err := getImageVersions(recipe)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
mainAppVersion := imagesTmp[mainApp]
tags, err := recipe.Tags()
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
nextTag := c.Args().Get(1)
nextTag := cmd.Args().Get(1)
if len(tags) == 0 && nextTag == "" {
logrus.Warnf("no git tags found for %s", recipe.Name)
log.Warnf("no git tags found for %s", recipe.Name)
if internal.NoInput {
log.Fatalf("unable to continue, input required for initial version")
}
fmt.Println(fmt.Sprintf(`
The following options are two types of initial semantic version that you can
pick for %s that will be published in the recipe catalogue. This follows the
@ -88,7 +90,7 @@ likely to change.
}
if err := survey.AskOne(edPrompt, &chosenVersion); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
nextTag = fmt.Sprintf("%s+%s", chosenVersion, mainAppVersion)
@ -97,26 +99,26 @@ likely to change.
if nextTag == "" && (!internal.Major && !internal.Minor && !internal.Patch) {
latestRelease := tags[len(tags)-1]
if err := internal.PromptBumpType("", latestRelease); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
}
if nextTag == "" {
recipeDir := path.Join(config.RECIPES_DIR, recipe.Name)
repo, err := git.PlainOpen(recipeDir)
repo, err := git.PlainOpen(recipe.Dir)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
var lastGitTag tagcmp.Tag
iter, err := repo.Tags()
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if err := iter.ForEach(func(ref *plumbing.Reference) error {
obj, err := repo.TagObject(ref.Hash())
if err != nil {
log.Fatal("Tag at commit ", ref.Hash(), " is unannotated or otherwise broken. Please fix it.")
return err
}
@ -133,7 +135,7 @@ likely to change.
return nil
}); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
// bumpType is used to decide what part of the tag should be incremented
@ -141,7 +143,7 @@ likely to change.
if bumpType != 0 {
// a bitwise check if the number is a power of 2
if (bumpType & (bumpType - 1)) != 0 {
logrus.Fatal("you can only use one version flag: --major, --minor or --patch")
log.Fatal("you can only use one version flag: --major, --minor or --patch")
}
}
@ -150,14 +152,14 @@ likely to change.
if internal.Patch {
now, err := strconv.Atoi(newTag.Patch)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
newTag.Patch = strconv.Itoa(now + 1)
} else if internal.Minor {
now, err := strconv.Atoi(newTag.Minor)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
newTag.Patch = "0"
@ -165,7 +167,7 @@ likely to change.
} else if internal.Major {
now, err := strconv.Atoi(newTag.Major)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
newTag.Patch = "0"
@ -175,25 +177,35 @@ likely to change.
}
newTag.Metadata = mainAppVersion
logrus.Debugf("choosing %s as new version for %s", newTag.String(), recipe.Name)
log.Debugf("choosing %s as new version for %s", newTag.String(), recipe.Name)
nextTag = newTag.String()
}
if _, err := tagcmp.Parse(nextTag); err != nil {
logrus.Fatalf("invalid version %s specified", nextTag)
log.Fatalf("invalid version %s specified", nextTag)
}
mainService := "app"
label := fmt.Sprintf("coop-cloud.${STACK_NAME}.version=%s", nextTag)
if !internal.Dry {
if err := recipe.UpdateLabel("compose.y*ml", mainService, label); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
} else {
logrus.Infof("dry run: not syncing label %s for recipe %s", nextTag, recipe.Name)
log.Infof("dry run: not syncing label %s for recipe %s", nextTag, recipe.Name)
}
isClean, err := gitPkg.IsClean(recipe.Dir)
if err != nil {
log.Fatal(err)
}
if !isClean {
log.Infof("%s currently has these unstaged changes 👇", recipe.Name)
if err := gitPkg.DiffUnstaged(recipe.Dir); err != nil {
log.Fatal(err)
}
}
return nil
},
BashComplete: autocomplete.RecipeNameComplete,
}

View File

@ -2,6 +2,8 @@ package recipe
import (
"bufio"
"context"
"encoding/json"
"fmt"
"os"
"path"
@ -11,14 +13,14 @@ import (
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
gitPkg "coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/log"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/tagcmp"
"github.com/AlecAivazis/survey/v2"
"github.com/docker/distribution/reference"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/distribution/reference"
"github.com/urfave/cli/v3"
)
type imgPin struct {
@ -26,14 +28,23 @@ type imgPin struct {
version tagcmp.Tag
}
// anUpgrade represents a single service upgrade (as within a recipe), and the
// list of tags that it can be upgraded to, for serialization purposes.
type anUpgrade struct {
Service string `json:"service"`
Image string `json:"image"`
Tag string `json:"tag"`
UpgradeTags []string `json:"upgrades"`
}
var recipeUpgradeCommand = cli.Command{
Name: "upgrade",
Aliases: []string{"u"},
Usage: "Upgrade recipe image tags",
Description: `
Parse all image tags within the given <recipe> configuration and prompt with
more recent tags to upgrade to. It will update the relevant compose file tags
on the local file system.
Name: "upgrade",
Aliases: []string{"u"},
Usage: "Upgrade recipe image tags",
UsageText: "abra recipe upgrade [<recipe>] [options]",
Description: `Upgrade a given <recipe> configuration.
It will update the relevant compose file tags on the local file system.
Some image tags cannot be parsed because they do not follow some sort of
semver-like convention. In this case, all possible tags will be listed and it
@ -43,58 +54,60 @@ The command is interactive and will show a select input which allows you to
make a seclection. Use the "?" key to see more help on navigating this
interface.
You may invoke this command in "wizard" mode and be prompted for input:
abra recipe upgrade
`,
BashComplete: autocomplete.RecipeNameComplete,
ArgsUsage: "<recipe>",
You may invoke this command in "wizard" mode and be prompted for input.`,
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.PatchFlag,
internal.MinorFlag,
internal.MajorFlag,
internal.MachineReadableFlag,
internal.AllTagsFlag,
},
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
recipe := internal.ValidateRecipeWithPrompt(c, true)
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.RecipeNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
recipe := internal.ValidateRecipe(cmd)
if err := recipePkg.EnsureUpToDate(recipe.Name); err != nil {
logrus.Fatal(err)
if err := recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
bumpType := btoi(internal.Major)*4 + btoi(internal.Minor)*2 + btoi(internal.Patch)
if bumpType != 0 {
// a bitwise check if the number is a power of 2
if (bumpType & (bumpType - 1)) != 0 {
logrus.Fatal("you can only use one of: --major, --minor, --patch.")
log.Fatal("you can only use one of: --major, --minor, --patch.")
}
}
if internal.MachineReadable {
// -m implies -n in this case
internal.NoInput = true
}
upgradeList := make(map[string]anUpgrade)
// check for versions file and load pinned versions
versionsPresent := false
recipeDir := path.Join(config.RECIPES_DIR, recipe.Name)
versionsPath := path.Join(recipeDir, "versions")
var servicePins = make(map[string]imgPin)
versionsPath := path.Join(recipe.Dir, "versions")
servicePins := make(map[string]imgPin)
if _, err := os.Stat(versionsPath); err == nil {
logrus.Debugf("found versions file for %s", recipe.Name)
log.Debugf("found versions file for %s", recipe.Name)
file, err := os.Open(versionsPath)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
splitLine := strings.Split(line, " ")
if splitLine[0] != "pin" || len(splitLine) != 3 {
logrus.Fatalf("malformed version pin specification: %s", line)
log.Fatalf("malformed version pin specification: %s", line)
}
pinSlice := strings.Split(splitLine[2], ":")
pinTag, err := tagcmp.Parse(pinSlice[1])
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
pin := imgPin{
image: pinSlice[0],
@ -103,45 +116,50 @@ You may invoke this command in "wizard" mode and be prompted for input:
servicePins[splitLine[1]] = pin
}
if err := scanner.Err(); err != nil {
logrus.Error(err)
log.Error(err)
}
versionsPresent = true
} else {
logrus.Debugf("did not find versions file for %s", recipe.Name)
log.Debugf("did not find versions file for %s", recipe.Name)
}
for _, service := range recipe.Config.Services {
config, err := recipe.GetComposeConfig(nil)
if err != nil {
log.Fatal(err)
}
for _, service := range config.Services {
img, err := reference.ParseNormalizedNamed(service.Image)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
regVersions, err := client.GetRegistryTags(img)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
image := reference.Path(img)
logrus.Debugf("retrieved %s from remote registry for %s", regVersions, image)
log.Debugf("retrieved %s from remote registry for %s", regVersions, image)
image = formatter.StripTagMeta(image)
switch img.(type) {
case reference.NamedTagged:
if !tagcmp.IsParsable(img.(reference.NamedTagged).Tag()) {
logrus.Debugf("%s not considered semver-like", img.(reference.NamedTagged).Tag())
log.Debugf("%s not considered semver-like", img.(reference.NamedTagged).Tag())
}
default:
logrus.Warnf("unable to read tag for image %s, is it missing? skipping upgrade for %s", image, service.Name)
log.Warnf("unable to read tag for image %s, is it missing? skipping upgrade for %s", image, service.Name)
continue
}
tag, err := tagcmp.Parse(img.(reference.NamedTagged).Tag())
if err != nil {
logrus.Warnf("unable to parse %s, error was: %s, skipping upgrade for %s", image, err.Error(), service.Name)
log.Warnf("unable to parse %s, error was: %s, skipping upgrade for %s", image, err.Error(), service.Name)
continue
}
logrus.Debugf("parsed %s for %s", tag, service.Name)
log.Debugf("parsed %s for %s", tag, service.Name)
var compatible []tagcmp.Tag
for _, regVersion := range regVersions {
@ -155,18 +173,18 @@ You may invoke this command in "wizard" mode and be prompted for input:
}
}
logrus.Debugf("detected potential upgradable tags %s for %s", compatible, service.Name)
log.Debugf("detected potential upgradable tags %s for %s", compatible, service.Name)
sort.Sort(tagcmp.ByTagDesc(compatible))
if len(compatible) == 0 && !internal.AllTags {
logrus.Info(fmt.Sprintf("no new versions available for %s, assuming %s is the latest (use -a/--all-tags to see all anyway)", image, tag))
log.Info(fmt.Sprintf("no new versions available for %s, assuming %s is the latest (use -a/--all-tags to see all anyway)", image, tag))
continue // skip on to the next tag and don't update any compose files
}
catlVersions, err := recipePkg.VersionsOfService(recipe.Name, service.Name)
catlVersions, err := recipePkg.VersionsOfService(recipe.Name, service.Name, internal.Offline)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
compatibleStrings := []string{"skip"}
@ -182,7 +200,7 @@ You may invoke this command in "wizard" mode and be prompted for input:
}
}
logrus.Debugf("detected compatible upgradable tags %s for %s", compatibleStrings, service.Name)
log.Debugf("detected compatible upgradable tags %s for %s", compatibleStrings, service.Name)
var upgradeTag string
_, ok := servicePins[service.Name]
@ -199,13 +217,13 @@ You may invoke this command in "wizard" mode and be prompted for input:
}
}
if contains {
logrus.Infof("upgrading service %s from %s to %s (pinned tag: %s)", service.Name, tag.String(), upgradeTag, pinnedTagString)
log.Infof("upgrading service %s from %s to %s (pinned tag: %s)", service.Name, tag.String(), upgradeTag, pinnedTagString)
} else {
logrus.Infof("service %s, image %s pinned to %s, no compatible upgrade found", service.Name, servicePins[service.Name].image, pinnedTagString)
log.Infof("service %s, image %s pinned to %s, no compatible upgrade found", service.Name, servicePins[service.Name].image, pinnedTagString)
continue
}
} else {
logrus.Fatalf("service %s is at version %s, but pinned to %s, please correct your compose.yml file manually!", service.Name, tag.String(), pinnedTag.String())
log.Fatalf("service %s is at version %s, but pinned to %s, please correct your compose.yml file manually!", service.Name, tag.String(), pinnedTag.String())
continue
}
} else {
@ -222,7 +240,7 @@ You may invoke this command in "wizard" mode and be prompted for input:
}
}
if upgradeTag == "" {
logrus.Warnf("not upgrading from %s to %s for %s, because the upgrade type is more serious than what user wants", tag.String(), compatible[0].String(), image)
log.Warnf("not upgrading from %s to %s for %s, because the upgrade type is more serious than what user wants", tag.String(), compatible[0].String(), image)
continue
}
} else {
@ -230,7 +248,7 @@ You may invoke this command in "wizard" mode and be prompted for input:
if !tagcmp.IsParsable(img.(reference.NamedTagged).Tag()) || internal.AllTags {
tag := img.(reference.NamedTagged).Tag()
if !internal.AllTags {
logrus.Warning(fmt.Sprintf("unable to determine versioning semantics of %s, listing all tags", tag))
log.Warn(fmt.Sprintf("unable to determine versioning semantics of %s, listing all tags", tag))
}
msg = fmt.Sprintf("upgrade to which tag? (service: %s, tag: %s)", service.Name, tag)
compatibleStrings = []string{"skip"}
@ -239,27 +257,83 @@ You may invoke this command in "wizard" mode and be prompted for input:
}
}
prompt := &survey.Select{
Message: msg,
Help: "enter / return to confirm, choose 'skip' to not upgrade this tag, vim mode is enabled",
VimMode: true,
Options: compatibleStrings,
// there is always at least the item "skip" in compatibleStrings (a list of
// possible upgradable tags) and at least one other tag.
upgradableTags := compatibleStrings[1:]
upgrade := anUpgrade{
Service: service.Name,
Image: image,
Tag: tag.String(),
UpgradeTags: make([]string, len(upgradableTags)),
}
if err := survey.AskOne(prompt, &upgradeTag); err != nil {
logrus.Fatal(err)
for n, s := range upgradableTags {
var sb strings.Builder
if _, err := sb.WriteString(s); err != nil {
}
upgrade.UpgradeTags[n] = sb.String()
}
upgradeList[upgrade.Service] = upgrade
if internal.NoInput {
upgradeTag = "skip"
} else {
prompt := &survey.Select{
Message: msg,
Help: "enter / return to confirm, choose 'skip' to not upgrade this tag, vim mode is enabled",
VimMode: true,
Options: compatibleStrings,
}
if err := survey.AskOne(prompt, &upgradeTag); err != nil {
log.Fatal(err)
}
}
}
}
if upgradeTag != "skip" {
ok, err := recipe.UpdateTag(image, upgradeTag)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if ok {
logrus.Infof("tag upgraded from %s to %s for %s", tag.String(), upgradeTag, image)
log.Infof("tag upgraded from %s to %s for %s", tag.String(), upgradeTag, image)
}
} else {
logrus.Warnf("not upgrading %s, skipping as requested", image)
if !internal.NoInput {
log.Warnf("not upgrading %s, skipping as requested", image)
}
}
}
if internal.NoInput {
if internal.MachineReadable {
jsonstring, err := json.Marshal(upgradeList)
if err != nil {
log.Fatal(err)
}
fmt.Println(string(jsonstring))
return nil
}
for _, upgrade := range upgradeList {
log.Infof("can upgrade service: %s, image: %s, tag: %s ::", upgrade.Service, upgrade.Image, upgrade.Tag)
for _, utag := range upgrade.UpgradeTags {
log.Infof(" %s", utag)
}
}
}
isClean, err := gitPkg.IsClean(recipe.Dir)
if err != nil {
log.Fatal(err)
}
if !isClean {
log.Infof("%s currently has these unstaged changes 👇", recipe.Name)
if err := gitPkg.DiffUnstaged(recipe.Dir); err != nil {
log.Fatal(err)
}
}

View File

@ -1,54 +1,110 @@
package recipe
import (
"context"
"fmt"
"sort"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
func sortServiceByName(versions [][]string) func(i, j int) bool {
return func(i, j int) bool {
// NOTE(d1): corresponds to the `tableCol` definition below
if versions[i][1] == "app" {
return true
}
return versions[i][1] < versions[j][1]
}
}
var recipeVersionCommand = cli.Command{
Name: "versions",
Aliases: []string{"v"},
Usage: "List recipe versions",
ArgsUsage: "<recipe>",
UsageText: "abra recipe version <recipe> [options]",
Flags: []cli.Flag{
internal.DebugFlag,
internal.MachineReadableFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipe := internal.ValidateRecipe(c, false)
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.RecipeNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
var warnMessages []string
catalogue, err := recipePkg.ReadRecipeCatalogue()
recipe := internal.ValidateRecipe(cmd)
catl, err := recipePkg.ReadRecipeCatalogue(internal.Offline)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
recipeMeta, ok := catalogue[recipe.Name]
recipeMeta, ok := catl[recipe.Name]
if !ok {
logrus.Fatalf("%s recipe doesn't exist?", recipe.Name)
warnMessages = append(warnMessages, "retrieved versions from local recipe repository")
recipeVersions, err := recipe.GetRecipeVersions()
if err != nil {
warnMessages = append(warnMessages, err.Error())
}
recipeMeta = recipePkg.RecipeMeta{Versions: recipeVersions}
}
tableCol := []string{"Version", "Service", "Image", "Tag", "Digest"}
table := formatter.CreateTable(tableCol)
if len(recipeMeta.Versions) == 0 {
log.Fatalf("%s has no published versions?", recipe.Name)
}
for i := len(recipeMeta.Versions) - 1; i >= 0; i-- {
for tag, meta := range recipeMeta.Versions[i] {
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
table.Headers("SERVICE", "NAME", "TAG")
for version, meta := range recipeMeta.Versions[i] {
var allRows [][]string
var rows [][]string
for service, serviceMeta := range meta {
table.Append([]string{tag, service, serviceMeta.Image, serviceMeta.Tag, serviceMeta.Digest})
rows = append(rows, []string{service, serviceMeta.Image, serviceMeta.Tag})
allRows = append(allRows, []string{version, service, serviceMeta.Image, serviceMeta.Tag})
}
sort.Slice(rows, sortServiceByName(rows))
table.Rows(rows...)
if !internal.MachineReadable {
fmt.Println(table)
log.Infof("VERSION: %s", version)
fmt.Println()
continue
}
if internal.MachineReadable {
sort.Slice(allRows, sortServiceByName(allRows))
headers := []string{"VERSION", "SERVICE", "NAME", "TAG"}
out, err := formatter.ToJSON(headers, allRows)
if err != nil {
log.Fatal("unable to render to JSON: %s", err)
}
fmt.Println(out)
continue
}
}
}
table.SetAutoMergeCells(true)
if table.NumLines() > 0 {
table.Render()
} else {
logrus.Fatalf("%s has no published versions?", recipe.Name)
if !internal.MachineReadable {
for _, warnMsg := range warnMessages {
log.Warn(warnMsg)
}
}
return nil

View File

@ -1,82 +0,0 @@
package record
import (
"context"
"fmt"
"strconv"
"coopcloud.tech/abra/cli/internal"
gandiPkg "coopcloud.tech/abra/pkg/dns/gandi"
"coopcloud.tech/abra/pkg/formatter"
"github.com/libdns/gandi"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// RecordListCommand lists domains.
var RecordListCommand = cli.Command{
Name: "list",
Usage: "List domain name records",
Aliases: []string{"ls"},
ArgsUsage: "<zone>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.DNSProviderFlag,
},
Before: internal.SubCommandBefore,
Description: `
List all domain name records managed by a 3rd party provider for a specific
zone.
You must specify a zone (e.g. example.com) under which your domain name records
are listed. This zone must already be created on your provider account.
`,
Action: func(c *cli.Context) error {
if err := internal.EnsureDNSProvider(); err != nil {
logrus.Fatal(err)
}
zone, err := internal.EnsureZoneArgument(c)
if err != nil {
logrus.Fatal(err)
}
var provider gandi.Provider
switch internal.DNSProvider {
case "gandi":
provider, err = gandiPkg.New()
if err != nil {
logrus.Fatal(err)
}
default:
logrus.Fatalf("%s is not a supported DNS provider", internal.DNSProvider)
}
records, err := provider.GetRecords(context.Background(), zone)
if err != nil {
logrus.Fatal(err)
}
tableCol := []string{"type", "name", "value", "TTL", "priority"}
table := formatter.CreateTable(tableCol)
for _, record := range records {
value := record.Value
if len(record.Value) > 30 {
value = fmt.Sprintf("%s...", record.Value[:30])
}
table.Append([]string{
record.Type,
record.Name,
value,
record.TTL.String(),
strconv.Itoa(record.Priority),
})
}
table.Render()
return nil
},
}

View File

@ -1,148 +0,0 @@
package record
import (
"context"
"fmt"
"strconv"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/dns"
gandiPkg "coopcloud.tech/abra/pkg/dns/gandi"
"coopcloud.tech/abra/pkg/formatter"
"github.com/libdns/gandi"
"github.com/libdns/libdns"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// RecordNewCommand creates a new domain name record.
var RecordNewCommand = cli.Command{
Name: "new",
Usage: "Create a new domain record",
Aliases: []string{"n"},
ArgsUsage: "<zone>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.DNSProviderFlag,
internal.DNSTypeFlag,
internal.DNSNameFlag,
internal.DNSValueFlag,
internal.DNSTTLFlag,
internal.DNSPriorityFlag,
},
Before: internal.SubCommandBefore,
Description: `
Create a new domain name record for a specific zone.
You must specify a zone (e.g. example.com) under which your domain name records
are listed. This zone must already be created on your provider account.
Example:
abra record new foo.com -p gandi -t A -n myapp -v 192.168.178.44
You may also invoke this command in "wizard" mode and be prompted for input:
abra record new
`,
Action: func(c *cli.Context) error {
zone, err := internal.EnsureZoneArgument(c)
if err != nil {
logrus.Fatal(err)
}
if err := internal.EnsureDNSProvider(); err != nil {
logrus.Fatal(err)
}
var provider gandi.Provider
switch internal.DNSProvider {
case "gandi":
provider, err = gandiPkg.New()
if err != nil {
logrus.Fatal(err)
}
default:
logrus.Fatalf("%s is not a supported DNS provider", internal.DNSProvider)
}
if err := internal.EnsureDNSTypeFlag(c); err != nil {
logrus.Fatal(err)
}
if err := internal.EnsureDNSNameFlag(c); err != nil {
logrus.Fatal(err)
}
if err := internal.EnsureDNSValueFlag(c); err != nil {
logrus.Fatal(err)
}
ttl, err := dns.GetTTL(internal.DNSTTL)
if err != nil {
return err
}
record := libdns.Record{
Type: internal.DNSType,
Name: internal.DNSName,
Value: internal.DNSValue,
TTL: ttl,
}
if internal.DNSType == "MX" || internal.DNSType == "SRV" || internal.DNSType == "URI" {
record.Priority = internal.DNSPriority
}
records, err := provider.GetRecords(context.Background(), zone)
if err != nil {
logrus.Fatal(err)
}
for _, existingRecord := range records {
if existingRecord.Type == record.Type &&
existingRecord.Name == record.Name &&
existingRecord.Value == record.Value {
logrus.Fatalf("%s record for %s already exists?", record.Type, zone)
}
}
createdRecords, err := provider.SetRecords(
context.Background(),
zone,
[]libdns.Record{record},
)
if err != nil {
logrus.Fatal(err)
}
if len(createdRecords) == 0 {
logrus.Fatal("provider library reports that no record was created?")
}
createdRecord := createdRecords[0]
tableCol := []string{"type", "name", "value", "TTL", "priority"}
table := formatter.CreateTable(tableCol)
value := createdRecord.Value
if len(createdRecord.Value) > 30 {
value = fmt.Sprintf("%s...", createdRecord.Value[:30])
}
table.Append([]string{
createdRecord.Type,
createdRecord.Name,
value,
createdRecord.TTL.String(),
strconv.Itoa(createdRecord.Priority),
})
table.Render()
logrus.Info("record created")
return nil
},
}

View File

@ -1,37 +0,0 @@
package record
import (
"github.com/urfave/cli"
)
// RecordCommand supports managing DNS entries.
var RecordCommand = cli.Command{
Name: "record",
Usage: "Manage domain name records",
Aliases: []string{"rc"},
ArgsUsage: "<record>",
Description: `
Manage domain name records via 3rd party providers such as Gandi DNS. It
supports listing, creating and removing all types of records that you might
need for managing Co-op Cloud apps.
The following providers are supported:
Gandi DNS https://www.gandi.net
You need an account with such a provider already. Typically, you need to
provide an API token on the Abra command-line when using these commands so that
you can authenticate with your provider account.
New providers can be integrated, we welcome change sets. See the underlying DNS
library documentation for more. It supports many existing providers and allows
to implement new provider support easily.
https://pkg.go.dev/github.com/libdns/libdns
`,
Subcommands: []cli.Command{
RecordListCommand,
RecordNewCommand,
RecordRemoveCommand,
},
}

View File

@ -1,136 +0,0 @@
package record
import (
"context"
"fmt"
"strconv"
"coopcloud.tech/abra/cli/internal"
gandiPkg "coopcloud.tech/abra/pkg/dns/gandi"
"coopcloud.tech/abra/pkg/formatter"
"github.com/AlecAivazis/survey/v2"
"github.com/libdns/gandi"
"github.com/libdns/libdns"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// RecordRemoveCommand lists domains.
var RecordRemoveCommand = cli.Command{
Name: "remove",
Usage: "Remove a domain name record",
Aliases: []string{"rm"},
ArgsUsage: "<zone>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.DNSProviderFlag,
internal.DNSTypeFlag,
internal.DNSNameFlag,
},
Before: internal.SubCommandBefore,
Description: `
Remove a domain name record for a specific zone.
It uses the type of record and name to match existing records and choose one
for deletion. You must specify a zone (e.g. example.com) under which your
domain name records are listed. This zone must already be created on your
provider account.
Example:
abra record remove foo.com -p gandi -t A -n myapp
You may also invoke this command in "wizard" mode and be prompted for input:
abra record rm
`,
Action: func(c *cli.Context) error {
zone, err := internal.EnsureZoneArgument(c)
if err != nil {
logrus.Fatal(err)
}
if err := internal.EnsureDNSProvider(); err != nil {
logrus.Fatal(err)
}
var provider gandi.Provider
switch internal.DNSProvider {
case "gandi":
provider, err = gandiPkg.New()
if err != nil {
logrus.Fatal(err)
}
default:
logrus.Fatalf("%s is not a supported DNS provider", internal.DNSProvider)
}
if err := internal.EnsureDNSTypeFlag(c); err != nil {
logrus.Fatal(err)
}
if err := internal.EnsureDNSNameFlag(c); err != nil {
logrus.Fatal(err)
}
records, err := provider.GetRecords(context.Background(), zone)
if err != nil {
logrus.Fatal(err)
}
var toDelete libdns.Record
for _, record := range records {
if record.Type == internal.DNSType && record.Name == internal.DNSName {
toDelete = record
break
}
}
if (libdns.Record{}) == toDelete {
logrus.Fatal("provider library reports no matching record?")
}
tableCol := []string{"type", "name", "value", "TTL", "priority"}
table := formatter.CreateTable(tableCol)
value := toDelete.Value
if len(toDelete.Value) > 30 {
value = fmt.Sprintf("%s...", toDelete.Value[:30])
}
table.Append([]string{
toDelete.Type,
toDelete.Name,
value,
toDelete.TTL.String(),
strconv.Itoa(toDelete.Priority),
})
table.Render()
if !internal.NoInput {
response := false
prompt := &survey.Confirm{
Message: "continue with record deletion?",
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
}
_, err = provider.DeleteRecords(context.Background(), zone, []libdns.Record{toDelete})
if err != nil {
logrus.Fatal(err)
}
logrus.Info("record successfully deleted")
return nil
},
}

View File

@ -3,509 +3,196 @@ package server
import (
"context"
"errors"
"fmt"
"os"
"os/exec"
"os/user"
"path/filepath"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
contextPkg "coopcloud.tech/abra/pkg/context"
"coopcloud.tech/abra/pkg/dns"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/server"
"coopcloud.tech/abra/pkg/ssh"
"github.com/AlecAivazis/survey/v2"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var (
dockerInstallMsg = `
A docker installation cannot be found on %s. This is a required system
dependency for running Co-op Cloud apps on your server. If you would like, Abra
can attempt to install Docker for you using the upstream non-interactive
installation script.
See the following documentation for more:
https://docs.docker.com/engine/install/debian/#install-using-the-convenience-script
N.B Docker doesn't recommend it for production environments but many use it for
such purposes. Docker stable is now installed by default by this script. The
source for this script can be seen here:
https://github.com/docker/docker-install
`
sshPkg "coopcloud.tech/abra/pkg/ssh"
"github.com/urfave/cli/v3"
)
var local bool
var localFlag = &cli.BoolFlag{
Name: "local, l",
Name: "local",
Aliases: []string{"l"},
Usage: "Use local server",
Destination: &local,
}
var provision bool
var provisionFlag = &cli.BoolFlag{
Name: "provision, p",
Usage: "Provision server so it can deploy apps",
Destination: &provision,
}
var sshAuth string
var sshAuthFlag = &cli.StringFlag{
Name: "ssh-auth, s",
Value: "identity-file",
Usage: "Select SSH authentication method (identity-file, password)",
Destination: &sshAuth,
}
var askSudoPass bool
var askSudoPassFlag = &cli.BoolFlag{
Name: "ask-sudo-pass, a",
Usage: "Ask for sudo password",
Destination: &askSudoPass,
}
func cleanUp(domainName string) {
logrus.Warnf("cleaning up context for %s", domainName)
if err := client.DeleteContext(domainName); err != nil {
logrus.Fatal(err)
}
logrus.Warnf("cleaning up server directory for %s", domainName)
if err := os.RemoveAll(filepath.Join(config.SERVERS_DIR, domainName)); err != nil {
logrus.Fatal(err)
}
}
func installDockerLocal(c *cli.Context) error {
fmt.Println(fmt.Sprintf(dockerInstallMsg, "this local server"))
response := false
prompt := &survey.Confirm{
Message: fmt.Sprintf("attempt install docker on local server?"),
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
for _, exe := range []string{"wget", "bash"} {
exists, err := ensureLocalExecutable(exe)
if err != nil {
return err
}
if !exists {
return fmt.Errorf("%s missing, please install it", exe)
// cleanUp cleans up the partially created context/client details for a failed
// "server add" attempt.
func cleanUp(name string) {
if name != "default" {
log.Debugf("serverAdd: cleanUp: cleaning up context for %s", name)
if err := client.DeleteContext(name); err != nil {
log.Fatal(err)
}
}
cmd := exec.Command("bash", "-c", "wget -O- https://get.docker.com | bash")
if err := internal.RunCmd(cmd); err != nil {
return err
}
return nil
}
func newLocalServer(c *cli.Context, domainName string) error {
if err := createServerDir(domainName); err != nil {
return err
}
cl, err := newClient(c, domainName)
serverDir := filepath.Join(config.SERVERS_DIR, name)
files, err := config.GetAllFilesInDirectory(serverDir)
if err != nil {
return err
log.Fatalf("serverAdd: cleanUp: unable to list files in %s: %s", serverDir, err)
}
if provision {
exists, err := ensureLocalExecutable("docker")
if err != nil {
return err
}
if !exists {
if err := installDockerLocal(c); err != nil {
return err
}
}
if err := initSwarmLocal(c, cl, domainName); err != nil {
if !strings.Contains(err.Error(), "proxy already exists") {
logrus.Fatal(err)
}
}
if len(files) > 0 {
log.Debugf("serverAdd: cleanUp: %s is not empty, aborting cleanup", serverDir)
return
}
logrus.Info("local server has been added")
return nil
if err := os.RemoveAll(serverDir); err != nil {
log.Fatalf("serverAdd: cleanUp: failed to remove %s: %s", serverDir, err)
}
}
func newContext(c *cli.Context, domainName, username, port string) error {
// newContext creates a new internal Docker context for a server. This is how
// Docker manages SSH connection details. These are stored to disk in
// ~/.docker. Abra can manage this completely for the user, so it's an
// implementation detail.
func newContext(name string) (bool, error) {
store := contextPkg.NewDefaultDockerContextStore()
contexts, err := store.Store.List()
if err != nil {
return err
return false, err
}
for _, context := range contexts {
if context.Name == domainName {
logrus.Debugf("context for %s already exists", domainName)
return nil
if context.Name == name {
log.Debugf("context for %s already exists", name)
return false, nil
}
}
logrus.Debugf("creating context with domain %s, username %s and port %s", domainName, username, port)
log.Debugf("creating context with domain %s", name)
if err := client.CreateContext(domainName, username, port); err != nil {
return err
if err := client.CreateContext(name); err != nil {
return false, nil
}
return nil
return true, nil
}
func newClient(c *cli.Context, domainName string) (*dockerClient.Client, error) {
cl, err := client.New(domainName)
if err != nil {
return &dockerClient.Client{}, err
}
return cl, nil
}
func installDocker(c *cli.Context, cl *dockerClient.Client, sshCl *ssh.Client, domainName string) error {
exists, err := ensureRemoteExecutable("docker", sshCl)
if err != nil {
return err
}
if !exists {
fmt.Println(fmt.Sprintf(dockerInstallMsg, domainName))
response := false
prompt := &survey.Confirm{
Message: fmt.Sprintf("attempt install docker on %s?", domainName),
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
exes := []string{"wget", "bash"}
if askSudoPass {
exes = append(exes, "ssh-askpass")
}
for _, exe := range exes {
exists, err := ensureRemoteExecutable(exe, sshCl)
if err != nil {
return err
}
if !exists {
return fmt.Errorf("%s missing on remote, please install it", exe)
}
}
var sudoPass string
if askSudoPass {
cmd := "wget -O- https://get.docker.com | bash"
prompt := &survey.Password{
Message: "sudo password?",
}
if err := survey.AskOne(prompt, &sudoPass); err != nil {
return err
}
logrus.Debugf("running %s on %s now with sudo password", cmd, domainName)
if sudoPass == "" {
return fmt.Errorf("missing sudo password but requested --ask-sudo-pass?")
}
logrus.Warn("installing docker, this could take some time...")
if err := ssh.RunSudoCmd(cmd, sudoPass, sshCl); err != nil {
fmt.Print(fmt.Sprintf(`
Abra was unable to bootstrap Docker, see below for logs:
%s
If nothing works, you can try running the Docker install script manually on your server:
wget -O- https://get.docker.com | bash
`, string(err.Error())))
logrus.Fatal("Process exited with status 1")
}
logrus.Infof("docker is installed on %s", domainName)
remoteUser := sshCl.SSHClient.Conn.User()
logrus.Infof("adding %s to docker group", remoteUser)
permsCmd := fmt.Sprintf("sudo usermod -aG docker %s", remoteUser)
if err := ssh.RunSudoCmd(permsCmd, sudoPass, sshCl); err != nil {
return err
}
} else {
cmd := "wget -O- https://get.docker.com | bash"
logrus.Debugf("running %s on %s now without sudo password", cmd, domainName)
logrus.Warn("installing docker, this could take some time...")
if out, err := sshCl.Exec(cmd); err != nil {
fmt.Print(fmt.Sprintf(`
Abra was unable to bootstrap Docker, see below for logs:
%s
This could be due to several reasons. One of the most common is that your
server user account does not have sudo access, and if it does, you need to pass
"--ask-sudo-pass" in order to supply Abra with your password.
If nothing works, you try running the Docker install script manually on your server:
wget -O- https://get.docker.com | bash
`, string(out)))
logrus.Fatal(err)
}
logrus.Infof("docker is installed on %s", domainName)
}
}
return nil
}
func initSwarmLocal(c *cli.Context, cl *dockerClient.Client, domainName string) error {
initReq := swarm.InitRequest{ListenAddr: "0.0.0.0:2377"}
if _, err := cl.SwarmInit(context.Background(), initReq); err != nil {
if strings.Contains(err.Error(), "is already part of a swarm") ||
strings.Contains(err.Error(), "must specify a listening address") {
logrus.Infof("swarm mode already initialised on %s", domainName)
} else {
return err
}
} else {
logrus.Infof("initialised swarm mode on local server")
}
netOpts := types.NetworkCreate{Driver: "overlay", Scope: "swarm"}
if _, err := cl.NetworkCreate(context.Background(), "proxy", netOpts); err != nil {
if !strings.Contains(err.Error(), "proxy already exists") {
return err
}
logrus.Info("swarm overlay network already created on local server")
} else {
logrus.Infof("swarm overlay network created on local server")
}
return nil
}
func initSwarm(c *cli.Context, cl *dockerClient.Client, domainName string) error {
ipv4, err := dns.EnsureIPv4(domainName)
if err != nil {
return err
}
initReq := swarm.InitRequest{
ListenAddr: "0.0.0.0:2377",
AdvertiseAddr: ipv4,
}
if _, err := cl.SwarmInit(context.Background(), initReq); err != nil {
if strings.Contains(err.Error(), "is already part of a swarm") ||
strings.Contains(err.Error(), "must specify a listening address") {
logrus.Infof("swarm mode already initialised on %s", domainName)
} else {
return err
}
} else {
logrus.Infof("initialised swarm mode on %s", domainName)
}
netOpts := types.NetworkCreate{Driver: "overlay", Scope: "swarm"}
if _, err := cl.NetworkCreate(context.Background(), "proxy", netOpts); err != nil {
if !strings.Contains(err.Error(), "proxy already exists") {
return err
}
logrus.Infof("swarm overlay network already created on %s", domainName)
} else {
logrus.Infof("swarm overlay network created on %s", domainName)
}
return nil
}
func createServerDir(domainName string) error {
if err := server.CreateServerDir(domainName); err != nil {
// createServerDir creates the ~/.abra/servers/... directory for a new server.
func createServerDir(name string) (bool, error) {
if err := server.CreateServerDir(name); err != nil {
if !os.IsExist(err) {
return err
return false, err
}
logrus.Debugf("server dir for %s already created", domainName)
log.Debugf("server dir for %s already created", name)
return false, nil
}
return nil
return true, nil
}
var serverAddCommand = cli.Command{
Name: "add",
Aliases: []string{"a"},
Usage: "Add a server to your configuration",
Description: `
Add a new server to your configuration so that it can be managed by Abra. This
command can also provision your server ("--provision/-p") with a Docker
installation so that it is capable of hosting Co-op Cloud apps.
Name: "add",
Aliases: []string{"a"},
Usage: "Add a new server",
UsageText: "abra server add <domain> [options]",
Description: `Add a new server to your configuration so that it can be managed by Abra.
Abra will default to expecting that you have a running ssh-agent and are using
SSH keys to connect to your new server. Abra will also read your SSH config
(matching "Host" as <domain>). SSH connection details precedence follows as
such: command-line > SSH config > guessed defaults.
Abra relies on the standard SSH command-line and ~/.ssh/config for client
connection details. You must configure an entry per-host in your ~/.ssh/config
for each server:
If you have no SSH key configured for this host and are instead using password
authentication, you may pass "--ssh-auth password" to have Abra ask you for the
password. "--ask-sudo-pass" may be passed if you run your provisioning commands
via sudo privilege escalation.
The <domain> argument must be a publicy accessible domain name which points to
your server. You should have working SSH access to this server already, Abra
will assume port 22 and will use your current system username to make an
initial connection. You can use the <user> and <port> arguments to adjust this.
Example:
abra server add varia.zone glodemodem 12345 -p
Abra will construct the following SSH connection and Docker context:
ssh://globemodem@varia.zone:12345
All communication between Abra and the server will use this SSH connection.
Host example.com example
Hostname example.com
User exampleUser
Port 12345
IdentityFile ~/.ssh/example@somewhere
If "--local" is passed, then Abra assumes that the current local server is
intended as the target server. This is useful when you want to have your entire
Co-op Cloud config located on the server itself, and not on your local
developer machine.
`,
developer machine. The domain is then set to "default".
You can also pass "--no-domain-checks/-D" flag to use any arbitrary name
instead of a real domain. The host will be resolved with the "Hostname" entry
of your ~/.ssh/config. Checks for a valid online domain will be skipped.`,
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.NoDomainChecksFlag,
localFlag,
provisionFlag,
sshAuthFlag,
askSudoPassFlag,
},
Before: internal.SubCommandBefore,
ArgsUsage: "<domain> [<user>] [<port>]",
Action: func(c *cli.Context) error {
if len(c.Args()) > 0 && local || !internal.ValidateSubCmdFlags(c) {
err := errors.New("cannot use <domain> and --local together")
internal.ShowSubcommandHelpAndError(c, err)
Before: internal.SubCommandBefore,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
if cmd.Args().Len() > 0 && local || !internal.ValidateSubCmdFlags(cmd) {
err := errors.New("cannot use <name> and --local together")
internal.ShowSubcommandHelpAndError(cmd, err)
}
if sshAuth != "password" && sshAuth != "identity-file" {
err := errors.New("--ssh-auth only accepts identity-file or password")
internal.ShowSubcommandHelpAndError(c, err)
var name string
if local {
name = "default"
} else {
name = internal.ValidateDomain(cmd)
}
domainName := internal.ValidateDomain(c)
// NOTE(d1): reasonable 5 second timeout for connections which can't
// succeed. The connection is attempted twice, so this results in 10
// seconds.
timeout := client.WithTimeout(5)
if local {
if err := newLocalServer(c, "default"); err != nil {
logrus.Fatal(err)
created, err := createServerDir(name)
if err != nil {
log.Fatal(err)
}
log.Debugf("attempting to create client for %s", name)
if _, err := client.New(name, timeout); err != nil {
cleanUp(name)
log.Fatal(err)
}
if created {
log.Info("local server successfully added")
} else {
log.Warn("local server already exists")
}
return nil
}
username := c.Args().Get(1)
if username == "" {
systemUser, err := user.Current()
if err != nil {
return err
if !internal.NoDomainChecks {
if _, err := dns.EnsureIPv4(name); err != nil {
log.Fatal(err)
}
username = systemUser.Username
}
port := c.Args().Get(2)
if port == "" {
port = "22"
}
if err := createServerDir(domainName); err != nil {
logrus.Fatal(err)
}
if err := newContext(c, domainName, username, port); err != nil {
logrus.Fatal(err)
}
cl, err := newClient(c, domainName)
_, err := createServerDir(name)
if err != nil {
cleanUp(domainName)
logrus.Debugf("failed to construct client for %s, saw %s", domainName, err.Error())
logrus.Fatalf(fmt.Sprintf(internal.ServerAddFailMsg, domainName))
log.Fatal(err)
}
if provision {
logrus.Debugf("attempting to construct SSH client for %s", domainName)
sshCl, err := ssh.New(domainName, sshAuth, username, port)
if err != nil {
cleanUp(domainName)
logrus.Fatalf(fmt.Sprintf(internal.ServerAddFailMsg, domainName))
}
defer sshCl.Close()
logrus.Debugf("successfully created SSH client for %s", domainName)
if err := installDocker(c, cl, sshCl, domainName); err != nil {
logrus.Fatal(err)
}
if err := initSwarm(c, cl, domainName); err != nil {
logrus.Fatal(err)
}
created, err := newContext(name)
if err != nil {
cleanUp(name)
log.Fatal(err)
}
if _, err := cl.Info(context.Background()); err != nil {
cleanUp(domainName)
logrus.Fatalf(fmt.Sprintf(internal.ServerAddFailMsg, domainName))
log.Debugf("attempting to create client for %s", name)
if _, err := client.New(name, timeout); err != nil {
cleanUp(name)
log.Fatal(sshPkg.Fatal(name, err))
}
if created {
log.Infof("%s successfully added", name)
} else {
log.Warnf("%s already exists", name)
}
return nil
},
}
// ensureLocalExecutable ensures that an executable is present on the local machine
func ensureLocalExecutable(exe string) (bool, error) {
out, err := exec.Command("which", exe).Output()
if err != nil {
return false, err
}
return string(out) != "", nil
}
// ensureRemoteExecutable ensures that an executable is present on a remote machine
func ensureRemoteExecutable(exe string, sshCl *ssh.Client) (bool, error) {
out, err := sshCl.Exec(fmt.Sprintf("which %s", exe))
if err != nil && string(out) != "" {
return false, err
}
return string(out) != "", nil
}

View File

@ -1,67 +1,97 @@
package server
import (
"context"
"fmt"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/context"
contextPkg "coopcloud.tech/abra/pkg/context"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"github.com/docker/cli/cli/connhelper/ssh"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
var serverListCommand = cli.Command{
Name: "list",
Aliases: []string{"ls"},
Usage: "List managed servers",
Name: "list",
Aliases: []string{"ls"},
Usage: "List managed servers",
UsageText: "abra server list [options]",
Flags: []cli.Flag{
internal.DebugFlag,
internal.MachineReadableFlag,
},
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
dockerContextStore := context.NewDefaultDockerContextStore()
Before: internal.SubCommandBefore,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
dockerContextStore := contextPkg.NewDefaultDockerContextStore()
contexts, err := dockerContextStore.Store.List()
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
tableColumns := []string{"name", "host", "user", "port"}
table := formatter.CreateTable(tableColumns)
defer table.Render()
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
headers := []string{"NAME", "HOST"}
table.Headers(headers...)
serverNames, err := config.ReadServerNames()
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
var rows [][]string
for _, serverName := range serverNames {
var row []string
for _, ctx := range contexts {
endpoint, err := context.GetContextEndpoint(ctx)
for _, dockerCtx := range contexts {
endpoint, err := contextPkg.GetContextEndpoint(dockerCtx)
if err != nil && strings.Contains(err.Error(), "does not exist") {
// No local context found, we can continue safely
continue
}
if ctx.Name == serverName {
if dockerCtx.Name == serverName {
sp, err := ssh.ParseURL(endpoint)
if err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
row = []string{serverName, sp.Host, sp.User, sp.Port}
if sp.Host == "" {
sp.Host = "unknown"
}
row = []string{serverName, sp.Host}
rows = append(rows, row)
}
}
if len(row) == 0 {
if serverName == "default" {
row = []string{serverName, "local", "n/a", "n/a"}
row = []string{serverName, "local"}
} else {
row = []string{serverName, "unknown", "unknown", "unknown"}
row = []string{serverName, "unknown"}
}
rows = append(rows, row)
}
table.Append(row)
table.Row(row...)
}
if internal.MachineReadable {
out, err := formatter.ToJSON(headers, rows)
if err != nil {
log.Fatal("unable to render to JSON: %s", err)
}
fmt.Println(out)
return nil
}
fmt.Println(table)
return nil
},
}

View File

@ -1,260 +0,0 @@
package server
import (
"context"
"fmt"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/libcapsul"
"github.com/AlecAivazis/survey/v2"
"github.com/hetznercloud/hcloud-go/hcloud"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
func newHetznerCloudVPS(c *cli.Context) error {
if err := internal.EnsureNewHetznerCloudVPSFlags(c); err != nil {
return err
}
client := hcloud.NewClient(hcloud.WithToken(internal.HetznerCloudAPIToken))
var sshKeysRaw []string
var sshKeys []*hcloud.SSHKey
for _, sshKey := range c.StringSlice("hetzner-ssh-keys") {
if sshKey == "" {
continue
}
sshKey, _, err := client.SSHKey.GetByName(context.Background(), sshKey)
if err != nil {
return err
}
sshKeys = append(sshKeys, sshKey)
sshKeysRaw = append(sshKeysRaw, sshKey.Name)
}
serverOpts := hcloud.ServerCreateOpts{
Name: internal.HetznerCloudName,
ServerType: &hcloud.ServerType{Name: internal.HetznerCloudType},
Image: &hcloud.Image{Name: internal.HetznerCloudImage},
SSHKeys: sshKeys,
Location: &hcloud.Location{Name: internal.HetznerCloudLocation},
}
sshKeyIDs := strings.Join(sshKeysRaw, "\n")
if sshKeyIDs == "" {
sshKeyIDs = "N/A (password auth)"
}
tableColumns := []string{"name", "type", "image", "ssh-keys", "location"}
table := formatter.CreateTable(tableColumns)
table.Append([]string{
internal.HetznerCloudName,
internal.HetznerCloudType,
internal.HetznerCloudImage,
sshKeyIDs,
internal.HetznerCloudLocation,
})
table.Render()
response := false
prompt := &survey.Confirm{
Message: "continue with hetzner cloud VPS creation?",
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
res, _, err := client.Server.Create(context.Background(), serverOpts)
if err != nil {
return err
}
var rootPassword string
if len(sshKeys) > 0 {
rootPassword = "N/A (using SSH keys)"
} else {
rootPassword = res.RootPassword
}
ip := res.Server.PublicNet.IPv4.IP.String()
fmt.Println(fmt.Sprintf(`
Your new Hetzner Cloud VPS has successfully been created! Here are the details:
name: %s
IP address: %s
root password: %s
You can access this new VPS via SSH using the following command:
ssh root@%s
Please note, this server is not managed by Abra yet (i.e. "abra server ls" will
not list this server)! You will need to assign a domain name record (manually
or by using "abra record new") and add the server to your Abra configuration
("abra server add") to have a working server that you can deploy Co-op Cloud
apps to.
When setting up domain name records, you probably want to set up the following
2 A records. This supports deploying apps to your root domain (e.g.
example.com) and other apps on sub-domains (e.g. foo.example.com,
bar.example.com).
@ 1800 IN A %s
* 1800 IN A %s
`,
internal.HetznerCloudName, ip, rootPassword,
ip, ip, ip,
))
return nil
}
func newCapsulVPS(c *cli.Context) error {
if err := internal.EnsureNewCapsulVPSFlags(c); err != nil {
return err
}
capsulCreateURL := fmt.Sprintf("https://%s/api/capsul/create", internal.CapsulInstanceURL)
var sshKeys []string
for _, sshKey := range c.StringSlice("capsul-ssh-keys") {
if sshKey == "" {
continue
}
sshKeys = append(sshKeys, sshKey)
}
tableColumns := []string{"instance", "name", "type", "image", "ssh-keys"}
table := formatter.CreateTable(tableColumns)
table.Append([]string{
internal.CapsulInstanceURL,
internal.CapsulName,
internal.CapsulType,
internal.CapsulImage,
strings.Join(sshKeys, "\n"),
})
table.Render()
response := false
prompt := &survey.Confirm{
Message: "continue with capsul creation?",
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
capsulClient := libcapsul.New(capsulCreateURL, internal.CapsulAPIToken)
resp, err := capsulClient.Create(
internal.CapsulName,
internal.CapsulType,
internal.CapsulImage,
sshKeys,
)
if err != nil {
return err
}
fmt.Println(fmt.Sprintf(`
Your new Capsul has successfully been created! Here are the details:
Capsul name: %s
Capsul ID: %v
You will need to log into your Capsul instance web interface to retrieve the IP
address. You can learn all about how to get SSH access to your new Capsul on:
%s/about-ssh
Please note, this server is not managed by Abra yet (i.e. "abra server ls" will
not list this server)! You will need to assign a domain name record (manually
or by using "abra record new") and add the server to your Abra configuration
("abra server add") to have a working server that you can deploy Co-op Cloud
apps to.
When setting up domain name records, you probably want to set up the following
2 A records. This supports deploying apps to your root domain (e.g.
example.com) and other apps on sub-domains (e.g. foo.example.com,
bar.example.com).
@ 1800 IN A <your-capsul-ip>
* 1800 IN A <your-capsul-ip>
`, internal.CapsulName, resp.ID, internal.CapsulInstanceURL))
return nil
}
var serverNewCommand = cli.Command{
Name: "new",
Aliases: []string{"n"},
Usage: "Create a new server using a 3rd party provider",
Description: `
Create a new server via a 3rd party provider.
The following providers are supported:
Capsul https://git.cyberia.club/Cyberia/capsul-flask
Hetzner Cloud https://docs.hetzner.com/cloud
You may invoke this command in "wizard" mode and be prompted for input:
abra record new
API tokens are read from the environment if specified, e.g.
export HCLOUD_TOKEN=...
`,
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.ServerProviderFlag,
// Capsul
internal.CapsulInstanceURLFlag,
internal.CapsulTypeFlag,
internal.CapsulImageFlag,
internal.CapsulSSHKeysFlag,
internal.CapsulAPITokenFlag,
// Hetzner
internal.HetznerCloudNameFlag,
internal.HetznerCloudTypeFlag,
internal.HetznerCloudImageFlag,
internal.HetznerCloudSSHKeysFlag,
internal.HetznerCloudLocationFlag,
internal.HetznerCloudAPITokenFlag,
},
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
if err := internal.EnsureServerProvider(); err != nil {
logrus.Fatal(err)
}
switch internal.ServerProvider {
case "capsul":
if err := newCapsulVPS(c); err != nil {
logrus.Fatal(err)
}
case "hetzner-cloud":
if err := newHetznerCloudVPS(c); err != nil {
logrus.Fatal(err)
}
}
return nil
},
}

100
cli/server/prune.go Normal file
View File

@ -0,0 +1,100 @@
package server
import (
"context"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"github.com/docker/docker/api/types/filters"
"github.com/urfave/cli/v3"
)
var allFilter bool
var allFilterFlag = &cli.BoolFlag{
Name: "all",
Aliases: []string{"a"},
Usage: "Remove all unused images not just dangling ones",
Destination: &allFilter,
}
var volumesFilter bool
var volumesFilterFlag = &cli.BoolFlag{
Name: "volumes",
Aliases: []string{"v"},
Usage: "Prune volumes. This will remove app data, Be Careful!",
Destination: &volumesFilter,
}
var serverPruneCommand = cli.Command{
Name: "prune",
Aliases: []string{"p"},
Usage: "Prune resources on a server",
UsageText: "abra server prune <server> [options]",
Description: `Prunes unused containers, networks, and dangling images.
Use "-v/--volumes" to remove volumes that are not associated with a deployed
app. This can result in unwanted data loss if not used carefully.`,
Flags: []cli.Flag{
allFilterFlag,
volumesFilterFlag,
},
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.ServerNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
serverName := internal.ValidateServer(cmd)
cl, err := client.New(serverName)
if err != nil {
log.Fatal(err)
}
var args filters.Args
cr, err := cl.ContainersPrune(ctx, args)
if err != nil {
log.Fatal(err)
}
cntSpaceReclaimed := formatter.ByteCountSI(cr.SpaceReclaimed)
log.Infof("containers pruned: %d; space reclaimed: %s", len(cr.ContainersDeleted), cntSpaceReclaimed)
nr, err := cl.NetworksPrune(ctx, args)
if err != nil {
log.Fatal(err)
}
log.Infof("networks pruned: %d", len(nr.NetworksDeleted))
pruneFilters := filters.NewArgs()
if allFilter {
log.Debugf("removing all images, not only dangling ones")
pruneFilters.Add("dangling", "false")
}
ir, err := cl.ImagesPrune(ctx, pruneFilters)
if err != nil {
log.Fatal(err)
}
imgSpaceReclaimed := formatter.ByteCountSI(ir.SpaceReclaimed)
log.Infof("images pruned: %d; space reclaimed: %s", len(ir.ImagesDeleted), imgSpaceReclaimed)
if volumesFilter {
vr, err := cl.VolumesPrune(ctx, args)
if err != nil {
log.Fatal(err)
}
volSpaceReclaimed := formatter.ByteCountSI(vr.SpaceReclaimed)
log.Infof("volumes pruned: %d; space reclaimed: %s", len(vr.VolumesDeleted), volSpaceReclaimed)
}
return nil
},
}

View File

@ -2,182 +2,42 @@ package server
import (
"context"
"fmt"
"os"
"path/filepath"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"github.com/AlecAivazis/survey/v2"
"github.com/hetznercloud/hcloud-go/hcloud"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"coopcloud.tech/abra/pkg/log"
"github.com/urfave/cli/v3"
)
var rmServer bool
var rmServerFlag = &cli.BoolFlag{
Name: "server, s",
Usage: "remove the actual server also",
Destination: &rmServer,
}
func rmHetznerCloudVPS(c *cli.Context) error {
if internal.HetznerCloudName == "" && !internal.NoInput {
prompt := &survey.Input{
Message: "specify hetzner cloud VPS name",
}
if err := survey.AskOne(prompt, &internal.HetznerCloudName); err != nil {
return err
}
}
if internal.HetznerCloudAPIToken == "" && !internal.NoInput {
token, ok := os.LookupEnv("HCLOUD_TOKEN")
if !ok {
prompt := &survey.Input{
Message: "specify hetzner cloud API token",
}
if err := survey.AskOne(prompt, &internal.HetznerCloudAPIToken); err != nil {
return err
}
} else {
internal.HetznerCloudAPIToken = token
}
}
client := hcloud.NewClient(hcloud.WithToken(internal.HetznerCloudAPIToken))
server, _, err := client.Server.Get(context.Background(), internal.HetznerCloudName)
if err != nil {
return err
}
if server == nil {
logrus.Fatalf("library provider reports that %s doesn't exist?", internal.HetznerCloudName)
}
fmt.Println(fmt.Sprintf(`
You have requested that Abra delete the following server (%s). Please be
absolutely sure that this is indeed the server that you would like to have
removed. There will be no going back once you confirm, the server will be
destroyed.
`, server.Name))
tableColumns := []string{"name", "type", "image", "location"}
table := formatter.CreateTable(tableColumns)
table.Append([]string{
server.Name,
server.ServerType.Name,
server.Image.Name,
server.Datacenter.Name,
})
table.Render()
response := false
prompt := &survey.Confirm{
Message: "continue with hetzner cloud VPS removal?",
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
_, err = client.Server.Delete(context.Background(), server)
if err != nil {
return err
}
logrus.Infof("%s has been deleted from your hetzner cloud account", internal.HetznerCloudName)
return nil
}
var serverRemoveCommand = cli.Command{
Name: "remove",
Aliases: []string{"rm"},
ArgsUsage: "[<server>]",
UsageText: "abra server remove <domain> [options]",
Usage: "Remove a managed server",
Description: `
Remova a server from Abra management.
Description: `Remove a managed server.
Depending on whether you used a 3rd party provider to create this server ("abra
server new"), you can also destroy the virtual server as well. Pass
"--server/-s" to tell Abra to try to delete this VPS.
Otherwise, Abra will remove the internal bookkeeping (~/.abra/servers/...) and
Abra will remove the internal bookkeeping ($ABRA_DIR/servers/...) and
underlying client connection context. This server will then be lost in time,
like tears in rain.
`,
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
rmServerFlag,
internal.ServerProviderFlag,
// Hetzner
internal.HetznerCloudNameFlag,
internal.HetznerCloudAPITokenFlag,
},
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
serverName := internal.ValidateServer(c)
warnMsg := `Did not pass -s/--server for actual server deletion, prompting!
Abra doesn't currently know if it helped you create this server with one of the
3rd party integrations (e.g. Capsul). You have a choice here to actually,
really and finally destroy this server using those integrations. If you want to
do this, choose Yes.
If you just want to remove the server config files & context, choose No.
`
if !rmServer {
logrus.Warn(fmt.Sprintf(warnMsg))
response := false
prompt := &survey.Confirm{
Message: "delete actual live server?",
}
if err := survey.AskOne(prompt, &response); err != nil {
logrus.Fatal(err)
}
if response {
logrus.Info("setting -s/--server and attempting to remove actual server")
rmServer = true
}
}
if rmServer {
if err := internal.EnsureServerProvider(); err != nil {
logrus.Fatal(err)
}
switch internal.ServerProvider {
case "capsul":
logrus.Warn("capsul provider does not support automatic removal yet, sorry!")
case "hetzner-cloud":
if err := rmHetznerCloudVPS(c); err != nil {
logrus.Fatal(err)
}
}
}
like tears in rain.`,
Before: internal.SubCommandBefore,
ShellComplete: autocomplete.ServerNameComplete,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
serverName := internal.ValidateServer(cmd)
if err := client.DeleteContext(serverName); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
if err := os.RemoveAll(filepath.Join(config.SERVERS_DIR, serverName)); err != nil {
logrus.Fatal(err)
log.Fatal(err)
}
logrus.Infof("server at %s has been lost in time, like tears in rain", serverName)
log.Infof("%s is now lost in time, like tears in rain", serverName)
return nil
},

View File

@ -1,26 +1,19 @@
package server
import (
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
// ServerCommand defines the `abra server` command and its subcommands
var ServerCommand = cli.Command{
Name: "server",
Aliases: []string{"s"},
Usage: "Manage servers",
Description: `
Create, manage and remove servers using 3rd party integrations.
Servers can be created from scratch using the "abra server new" command. If you
already have a server, you can add it to your configuration using "abra server
add". Abra can provision servers so that they are ready to deploy Co-op Cloud
recipes, see available flags on "abra server add" for more.
`,
Subcommands: []cli.Command{
serverNewCommand,
serverAddCommand,
serverListCommand,
serverRemoveCommand,
Name: "server",
Aliases: []string{"s"},
Usage: "Manage servers",
UsageText: "abra server [command] [arguments] [options]",
Commands: []*cli.Command{
&serverAddCommand,
&serverListCommand,
&serverRemoveCommand,
&serverPruneCommand,
},
}

512
cli/updater/updater.go Normal file
View File

@ -0,0 +1,512 @@
package updater
import (
"context"
"fmt"
"os"
"strconv"
"strings"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/envfile"
"coopcloud.tech/abra/pkg/lint"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/convert"
"coopcloud.tech/abra/pkg/upstream/stack"
"coopcloud.tech/tagcmp"
charmLog "github.com/charmbracelet/log"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerclient "github.com/docker/docker/client"
"coopcloud.tech/abra/pkg/log"
"github.com/urfave/cli/v3"
)
const SERVER = "localhost"
var majorUpdate bool
var majorFlag = &cli.BoolFlag{
Name: "major",
Aliases: []string{"m"},
Usage: "Also check for major updates",
Destination: &majorUpdate,
}
var updateAll bool
var allFlag = &cli.BoolFlag{
Name: "all",
Aliases: []string{"a"},
Usage: "Update all deployed apps",
Destination: &updateAll,
}
// Notify checks for available upgrades
var Notify = cli.Command{
Name: "notify",
Aliases: []string{"n"},
Usage: "Check for available upgrades",
UsageText: "kadabra notify [options]",
Flags: []cli.Flag{
majorFlag,
},
Before: internal.SubCommandBefore,
Description: `Notify on new versions for deployed apps.
If a new patch/minor version is available, a notification is printed.
Use "--major" to include new major versions.`,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
cl, err := client.New("default")
if err != nil {
log.Fatal(err)
}
stacks, err := stack.GetStacks(cl)
if err != nil {
log.Fatal(err)
}
for _, stackInfo := range stacks {
stackName := stackInfo.Name
recipeName, err := getLabel(cl, stackName, "recipe")
if err != nil {
log.Fatal(err)
}
if recipeName != "" {
_, err = getLatestUpgrade(cl, stackName, recipeName)
if err != nil {
log.Fatal(err)
}
}
}
return nil
},
}
// UpgradeApp upgrades apps.
var UpgradeApp = cli.Command{
Name: "upgrade",
Aliases: []string{"u"},
Usage: "Upgrade apps",
UsageText: "kadabra notify <stack> <recipe> [options]",
Flags: []cli.Flag{
internal.ChaosFlag,
majorFlag,
allFlag,
},
Before: internal.SubCommandBefore,
Description: `Upgrade an app by specifying stack name and recipe.
Use "--all" to upgrade every deployed app.
For each app with auto updates enabled, the deployed version is compared with
the current recipe catalogue version. If a new patch/minor version is
available, the app is upgraded.
To include major versions use the "--major" flag. You probably don't want that
as it will break things. Only apps that are not deployed with "--chaos" are
upgraded, to update chaos deployments use the "--chaos" flag. Use it with care.`,
HideHelp: true,
Action: func(ctx context.Context, cmd *cli.Command) error {
cl, err := client.New("default")
if err != nil {
log.Fatal(err)
}
if !updateAll {
stackName := cmd.Args().Get(0)
recipeName := cmd.Args().Get(1)
err = tryUpgrade(cl, stackName, recipeName)
if err != nil {
log.Fatal(err)
}
return nil
}
stacks, err := stack.GetStacks(cl)
if err != nil {
log.Fatal(err)
}
for _, stackInfo := range stacks {
stackName := stackInfo.Name
recipeName, err := getLabel(cl, stackName, "recipe")
if err != nil {
log.Fatal(err)
}
err = tryUpgrade(cl, stackName, recipeName)
if err != nil {
log.Fatal(err)
}
}
return nil
},
}
// getLabel reads docker labels from running services in the format of "coop-cloud.${STACK_NAME}.${LABEL}".
func getLabel(cl *dockerclient.Client, stackName string, label string) (string, error) {
filter := filters.NewArgs()
filter.Add("label", fmt.Sprintf("%s=%s", convert.LabelNamespace, stackName))
services, err := cl.ServiceList(context.Background(), types.ServiceListOptions{Filters: filter})
if err != nil {
return "", err
}
for _, service := range services {
labelKey := fmt.Sprintf("coop-cloud.%s.%s", stackName, label)
if labelValue, ok := service.Spec.Labels[labelKey]; ok {
return labelValue, nil
}
}
log.Debugf("no %s label found for %s", label, stackName)
return "", nil
}
// getBoolLabel reads a boolean docker label from running services
func getBoolLabel(cl *dockerclient.Client, stackName string, label string) (bool, error) {
lableValue, err := getLabel(cl, stackName, label)
if err != nil {
return false, err
}
if lableValue != "" {
value, err := strconv.ParseBool(lableValue)
if err != nil {
return false, err
}
return value, nil
}
log.Debugf("boolean label %s could not be found for %s, set default to false.", label, stackName)
return false, nil
}
// getEnv reads env variables from docker services.
func getEnv(cl *dockerclient.Client, stackName string) (envfile.AppEnv, error) {
envMap := make(map[string]string)
filter := filters.NewArgs()
filter.Add("label", fmt.Sprintf("%s=%s", convert.LabelNamespace, stackName))
services, err := cl.ServiceList(context.Background(), types.ServiceListOptions{Filters: filter})
if err != nil {
return nil, err
}
for _, service := range services {
envList := service.Spec.TaskTemplate.ContainerSpec.Env
for _, envString := range envList {
splitString := strings.SplitN(envString, "=", 2)
if len(splitString) != 2 {
log.Debugf("can't separate key from value: %s (this variable is probably unset)", envString)
continue
}
k := splitString[0]
v := splitString[1]
log.Debugf("for %s read env %s with value: %s from docker service", stackName, k, v)
envMap[k] = v
}
}
return envMap, nil
}
// getLatestUpgrade returns the latest available version for an app respecting
// the "--major" flag if it is newer than the currently deployed version.
func getLatestUpgrade(cl *dockerclient.Client, stackName string, recipeName string) (string, error) {
deployedVersion, err := getDeployedVersion(cl, stackName, recipeName)
if err != nil {
return "", err
}
availableUpgrades, err := getAvailableUpgrades(cl, stackName, recipeName, deployedVersion)
if err != nil {
return "", err
}
if len(availableUpgrades) == 0 {
log.Debugf("no available upgrades for %s", stackName)
return "", nil
}
var chosenUpgrade string
if len(availableUpgrades) > 0 {
chosenUpgrade = availableUpgrades[len(availableUpgrades)-1]
log.Infof("%s (%s) can be upgraded from version %s to %s", stackName, recipeName, deployedVersion, chosenUpgrade)
}
return chosenUpgrade, nil
}
// getDeployedVersion returns the currently deployed version of an app.
func getDeployedVersion(cl *dockerclient.Client, stackName string, recipeName string) (string, error) {
log.Debugf("retrieve deployed version whether %s is already deployed", stackName)
deployMeta, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
return "", err
}
if !deployMeta.IsDeployed {
return "", fmt.Errorf("%s is not deployed?", stackName)
}
if deployMeta.Version == "unknown" {
return "", fmt.Errorf("failed to determine deployed version of %s", stackName)
}
return deployMeta.Version, nil
}
// getAvailableUpgrades returns all available versions of an app that are newer
// than the deployed version. It only includes major upgrades if the "--major"
// flag is set.
func getAvailableUpgrades(cl *dockerclient.Client, stackName string, recipeName string,
deployedVersion string) ([]string, error) {
catl, err := recipe.ReadRecipeCatalogue(internal.Offline)
if err != nil {
return nil, err
}
versions, err := recipe.GetRecipeCatalogueVersions(recipeName, catl)
if err != nil {
return nil, err
}
if len(versions) == 0 {
log.Warnf("no published releases for %s in the recipe catalogue?", recipeName)
return nil, nil
}
var availableUpgrades []string
for _, version := range versions {
parsedDeployedVersion, err := tagcmp.Parse(deployedVersion)
if err != nil {
return nil, err
}
parsedVersion, err := tagcmp.Parse(version)
if err != nil {
return nil, err
}
versionDelta, err := parsedDeployedVersion.UpgradeDelta(parsedVersion)
if err != nil {
return nil, err
}
if 0 < versionDelta.UpgradeType() && (versionDelta.UpgradeType() < 4 || majorUpdate) {
availableUpgrades = append(availableUpgrades, version)
}
}
log.Debugf("available updates for %s: %s", stackName, availableUpgrades)
return availableUpgrades, nil
}
// processRecipeRepoVersion clones, pulls, checks out the version and lints the
// recipe repository.
func processRecipeRepoVersion(r recipe.Recipe, version string) error {
if err := r.EnsureExists(); err != nil {
return err
}
if err := r.EnsureUpToDate(); err != nil {
return err
}
if _, err := r.EnsureVersion(version); err != nil {
return err
}
if err := lint.LintForErrors(r); err != nil {
return err
}
return nil
}
// mergeAbraShEnv merges abra.sh env vars into the app env vars.
func mergeAbraShEnv(recipe recipe.Recipe, env envfile.AppEnv) error {
abraShEnv, err := envfile.ReadAbraShEnvVars(recipe.AbraShPath)
if err != nil {
return err
}
for k, v := range abraShEnv {
log.Debugf("read v:%s k: %s", v, k)
env[k] = v
}
return nil
}
// createDeployConfig merges and enriches the compose config for the deployment.
func createDeployConfig(r recipe.Recipe, stackName string, env envfile.AppEnv) (*composetypes.Config, stack.Deploy, error) {
env["STACK_NAME"] = stackName
deployOpts := stack.Deploy{
Namespace: stackName,
Prune: false,
ResolveImage: stack.ResolveImageAlways,
Detach: false,
}
composeFiles, err := r.GetComposeFiles(env)
if err != nil {
return nil, deployOpts, err
}
deployOpts.Composefiles = composeFiles
compose, err := appPkg.GetAppComposeConfig(stackName, deployOpts, env)
if err != nil {
return nil, deployOpts, err
}
appPkg.ExposeAllEnv(stackName, compose, env)
// after the upgrade the deployment won't be in chaos state anymore
appPkg.SetChaosLabel(compose, stackName, false)
appPkg.SetRecipeLabel(compose, stackName, r.Name)
appPkg.SetUpdateLabel(compose, stackName, env)
return compose, deployOpts, nil
}
// tryUpgrade performs the upgrade if all the requirements are fulfilled.
func tryUpgrade(cl *dockerclient.Client, stackName, recipeName string) error {
if recipeName == "" {
log.Debugf("don't update %s due to missing recipe name", stackName)
return nil
}
chaos, err := getBoolLabel(cl, stackName, "chaos")
if err != nil {
return err
}
if chaos && !internal.Chaos {
log.Debugf("don't update %s due to chaos deployment", stackName)
return nil
}
updatesEnabled, err := getBoolLabel(cl, stackName, "autoupdate")
if err != nil {
return err
}
if !updatesEnabled {
log.Debugf("don't update %s due to disabled auto updates or missing ENABLE_AUTO_UPDATE env", stackName)
return nil
}
upgradeVersion, err := getLatestUpgrade(cl, stackName, recipeName)
if err != nil {
return err
}
if upgradeVersion == "" {
log.Debugf("don't update %s due to no new version", stackName)
return nil
}
err = upgrade(cl, stackName, recipeName, upgradeVersion)
return err
}
// upgrade performs all necessary steps to upgrade an app.
func upgrade(cl *dockerclient.Client, stackName, recipeName, upgradeVersion string) error {
env, err := getEnv(cl, stackName)
if err != nil {
return err
}
app := appPkg.App{
Name: stackName,
Recipe: recipe.Get(recipeName),
Server: SERVER,
Env: env,
}
r := recipe.Get(recipeName)
if err = processRecipeRepoVersion(r, upgradeVersion); err != nil {
return err
}
if err = mergeAbraShEnv(app.Recipe, app.Env); err != nil {
return err
}
compose, deployOpts, err := createDeployConfig(r, stackName, app.Env)
if err != nil {
return err
}
log.Infof("upgrade %s (%s) to version %s", stackName, recipeName, upgradeVersion)
err = stack.RunDeploy(cl, deployOpts, compose, stackName, true)
return err
}
func newKadabraApp(version, commit string) *cli.Command {
app := &cli.Command{
Name: "kadabra",
Version: fmt.Sprintf("%s-%s", version, commit[:7]),
Usage: "The Co-op Cloud auto-updater 🤖 🚀",
UsageText: "kadabra [command] [options]",
UseShortOptionHandling: true,
HideHelpCommand: true,
Flags: []cli.Flag{
// NOTE(d1): "GLOBAL OPTIONS" flags
internal.DebugFlag,
},
Commands: []*cli.Command{
&Notify,
&UpgradeApp,
},
}
app.Before = func(ctx context.Context, cmd *cli.Command) error {
log.Logger.SetStyles(log.Styles())
charmLog.SetDefault(log.Logger)
log.Debugf("kadabra version %s, commit %s", version, commit)
return nil
}
cli.HelpFlag = &cli.BoolFlag{
Name: "help",
Aliases: []string{"h, H"},
Usage: "Show help",
}
return app
}
// RunApp runs CLI abra app.
func RunApp(version, commit string) {
app := newKadabraApp(version, commit)
if err := app.Run(context.Background(), os.Args); err != nil {
log.Fatal(err)
}
}

View File

@ -5,10 +5,10 @@ import (
"coopcloud.tech/abra/cli"
)
// Version is the current version of Abra
// Version is the current version of Abra.
var Version string
// Commit is the current git commit of Abra
// Commit is the current git commit of Abra.
var Commit string
func main() {

23
cmd/kadabra/main.go Normal file
View File

@ -0,0 +1,23 @@
// Package main provides the command-line entrypoint.
package main
import (
"coopcloud.tech/abra/cli/updater"
)
// Version is the current version of Kadabra.
var Version string
// Commit is the current git commit of Kadabra.
var Commit string
func main() {
if Version == "" {
Version = "dev"
}
if Commit == "" {
Commit = " "
}
updater.RunApp(Version, Commit)
}

159
go.mod
View File

@ -1,52 +1,139 @@
module coopcloud.tech/abra
go 1.16
go 1.21
replace github.com/urfave/cli/v3 => github.com/urfave/cli/v3 v3.0.0-alpha9.1.0.20241019193437-5053ec708a44
require (
coopcloud.tech/tagcmp v0.0.0-20211103052201-885b22f77d52
github.com/AlecAivazis/survey/v2 v2.3.6
github.com/Autonomic-Cooperative/godotenv v1.3.1-0.20210731094149-b031ea1211e7
github.com/Gurpartap/logrus-stack v0.0.0-20170710170904-89c00d8a28f4
github.com/docker/cli v20.10.21+incompatible
github.com/docker/distribution v2.8.1+incompatible
github.com/docker/docker v20.10.21+incompatible
coopcloud.tech/tagcmp v0.0.0-20230809071031-eb3e7758d4eb
git.coopcloud.tech/coop-cloud/godotenv v1.5.2-0.20231130100509-01bff8284355
github.com/AlecAivazis/survey/v2 v2.3.7
github.com/charmbracelet/lipgloss v0.11.1
github.com/charmbracelet/log v0.4.0
github.com/distribution/reference v0.6.0
github.com/docker/cli v27.0.3+incompatible
github.com/docker/docker v27.0.3+incompatible
github.com/docker/go-units v0.5.0
github.com/go-git/go-git/v5 v5.5.1
github.com/hetznercloud/hcloud-go v1.38.0
github.com/go-git/go-git/v5 v5.12.0
github.com/google/go-cmp v0.6.0
github.com/moby/sys/signal v0.7.0
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
github.com/olekukonko/tablewriter v0.0.5
github.com/moby/term v0.5.0
github.com/pkg/errors v0.9.1
github.com/schollz/progressbar/v3 v3.12.2
github.com/sirupsen/logrus v1.9.0
gotest.tools/v3 v3.4.0
github.com/schollz/progressbar/v3 v3.14.4
github.com/urfave/cli/v3 v3.0.0-alpha9
golang.org/x/term v0.22.0
gopkg.in/yaml.v3 v3.0.1
gotest.tools/v3 v3.5.1
)
require (
coopcloud.tech/libcapsul v0.0.0-20211022074848-c35e78fe3f3e
github.com/buger/goterm v1.0.4
github.com/containerd/containerd v1.5.9 // indirect
dario.cat/mergo v1.0.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/BurntSushi/toml v1.4.0 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/ProtonMail/go-crypto v1.0.0 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/charmbracelet/x/ansi v0.1.3 // indirect
github.com/cloudflare/circl v1.3.9 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/cyphar/filepath-securejoin v0.2.5 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.5.0 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-viper/mapstructure/v2 v2.0.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/sys/user v0.1.0 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/muesli/termenv v0.15.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/runc v1.1.13 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/skeema/knownhosts v1.2.2 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
go.opentelemetry.io/otel v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.28.0 // indirect
go.opentelemetry.io/otel/trace v1.28.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
golang.org/x/crypto v0.25.0 // indirect
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect
golang.org/x/net v0.27.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/grpc v1.65.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)
require (
github.com/containerd/containerd v1.7.19 // indirect
github.com/containers/image v3.0.2+incompatible
github.com/containers/storage v1.38.2 // indirect
github.com/decentral1se/passgen v1.0.1
github.com/docker/docker-credential-helpers v0.6.4 // indirect
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/fvbommel/sortorder v1.0.2 // indirect
github.com/gliderlabs/ssh v0.3.5
github.com/docker/docker-credential-helpers v0.8.2 // indirect
github.com/fvbommel/sortorder v1.1.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/hashicorp/go-retryablehttp v0.7.1
github.com/kevinburke/ssh_config v1.2.0
github.com/klauspost/pgzip v1.2.5
github.com/libdns/gandi v1.0.2
github.com/libdns/libdns v0.2.1
github.com/moby/sys/mount v0.2.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 // indirect
github.com/sergi/go-diff v1.2.0 // indirect
github.com/spf13/cobra v1.3.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.7
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/prometheus/client_golang v1.19.1 // indirect
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
github.com/spf13/cobra v1.8.1 // indirect
github.com/stretchr/testify v1.9.0
github.com/theupdateframework/notary v0.7.0 // indirect
github.com/urfave/cli v1.22.9
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b // indirect
golang.org/x/crypto v0.4.0
golang.org/x/sys v0.3.0
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
golang.org/x/sys v0.22.0
)

737
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -1,85 +1,624 @@
package app
import (
"context"
"bufio"
"fmt"
"os"
"path"
"regexp"
"sort"
"strings"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/envfile"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/convert"
"coopcloud.tech/abra/pkg/upstream/stack"
apiclient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"coopcloud.tech/abra/pkg/log"
loader "coopcloud.tech/abra/pkg/upstream/stack"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types/filters"
"github.com/schollz/progressbar/v3"
)
// Get retrieves an app
func Get(appName string) (config.App, error) {
files, err := config.LoadAppFiles("")
func Get(appName string) (App, error) {
files, err := LoadAppFiles("")
if err != nil {
return config.App{}, err
return App{}, err
}
app, err := config.GetApp(files, appName)
app, err := GetApp(files, appName)
if err != nil {
return config.App{}, err
return App{}, err
}
logrus.Debugf("retrieved %s for %s", app, appName)
log.Debugf("retrieved %s for %s", app, appName)
return app, nil
}
// deployedServiceSpec represents a deployed service of an app.
type deployedServiceSpec struct {
Name string
Version string
}
// VersionSpec represents a deployed app and associated metadata.
type VersionSpec map[string]deployedServiceSpec
// DeployedVersions lists metadata (e.g. versions) for deployed
func DeployedVersions(ctx context.Context, cl *apiclient.Client, app config.App) (VersionSpec, bool, error) {
services, err := stack.GetStackServices(ctx, cl, app.StackName())
if err != nil {
return VersionSpec{}, false, err
// GetApp loads an apps settings, reading it from file, in preparation to use
// it. It should only be used when ready to use the env file to keep IO
// operations down.
func GetApp(apps AppFiles, name AppName) (App, error) {
appFile, exists := apps[name]
if !exists {
return App{}, fmt.Errorf("cannot find app with name %s", name)
}
appSpec := make(VersionSpec)
for _, service := range services {
serviceName := ParseServiceName(service.Spec.Name)
label := fmt.Sprintf("coop-cloud.%s.%s.version", app.StackName(), serviceName)
if deployLabel, ok := service.Spec.Labels[label]; ok {
version, _ := ParseVersionLabel(deployLabel)
appSpec[serviceName] = deployedServiceSpec{Name: serviceName, Version: version}
app, err := ReadAppEnvFile(appFile, name)
if err != nil {
return App{}, err
}
return app, nil
}
// GetApps returns a slice of Apps with their env files read from a given
// slice of AppFiles.
func GetApps(appFiles AppFiles, recipeFilter string) ([]App, error) {
var apps []App
for name := range appFiles {
app, err := GetApp(appFiles, name)
if err != nil {
return nil, err
}
if recipeFilter != "" {
if app.Recipe.Name == recipeFilter {
apps = append(apps, app)
}
} else {
apps = append(apps, app)
}
}
deployed := len(services) > 0
return apps, nil
}
if deployed {
logrus.Debugf("detected %s as deployed versions of %s", appSpec, app.Name)
} else {
logrus.Debugf("detected %s as not deployed", app.Name)
// App reprents an app with its env file read into memory
type App struct {
Name AppName
Recipe recipe.Recipe
Domain string
Env envfile.AppEnv
Server string
Path string
}
// Type aliases to make code hints easier to understand
// AppName is AppName
type AppName = string
// AppFile represents app env files on disk without reading the contents
type AppFile struct {
Path string
Server string
}
// AppFiles is a slice of appfiles
type AppFiles map[AppName]AppFile
// See documentation of config.StackName
func (a App) StackName() string {
if _, exists := a.Env["STACK_NAME"]; exists {
return a.Env["STACK_NAME"]
}
return appSpec, len(services) > 0, nil
stackName := StackName(a.Name)
a.Env["STACK_NAME"] = stackName
return stackName
}
// ParseVersionLabel parses a $VERSION-$DIGEST app service label.
func ParseVersionLabel(label string) (string, string) {
// versions may look like v4.2-abcd or v4.2-alpine-abcd
idx := strings.LastIndex(label, "-")
version := label[:idx]
digest := label[idx+1:]
logrus.Debugf("parsed %s as version from %s", version, label)
logrus.Debugf("parsed %s as digest from %s", digest, label)
return version, digest
// StackName gets whatever the docker safe (uses the right delimiting
// character, e.g. "_") stack name is for the app. In general, you don't want
// to use this to show anything to end-users, you want use a.Name instead.
func StackName(appName string) string {
stackName := SanitiseAppName(appName)
if len(stackName) > config.MAX_SANITISED_APP_NAME_LENGTH {
log.Debugf("trimming %s to %s to avoid runtime limits", stackName, stackName[:config.MAX_SANITISED_APP_NAME_LENGTH])
stackName = stackName[:config.MAX_SANITISED_APP_NAME_LENGTH]
}
return stackName
}
// ParseServiceName parses a $STACK_NAME_$SERVICE_NAME service label.
func ParseServiceName(label string) string {
idx := strings.LastIndex(label, "_")
serviceName := label[idx+1:]
logrus.Debugf("parsed %s as service name from %s", serviceName, label)
return serviceName
// Filters retrieves app filters for querying the container runtime. By default
// it filters on all services in the app. It is also possible to pass an
// otional list of service names, which get filtered instead.
//
// Due to upstream issues, filtering works different depending on what you're
// querying. So, for example, secrets don't work with regex! The caller needs
// to implement their own validation that the right secrets are matched. In
// order to handle these cases, we provide the `appendServiceNames` /
// `exactMatch` modifiers.
func (a App) Filters(appendServiceNames, exactMatch bool, services ...string) (filters.Args, error) {
filters := filters.NewArgs()
if len(services) > 0 {
for _, serviceName := range services {
filters.Add("name", ServiceFilter(a.StackName(), serviceName, exactMatch))
}
return filters, nil
}
// When not appending the service name, just add one filter for the whole
// stack.
if !appendServiceNames {
f := fmt.Sprintf("%s", a.StackName())
if exactMatch {
f = fmt.Sprintf("^%s", f)
}
filters.Add("name", f)
return filters, nil
}
composeFiles, err := a.Recipe.GetComposeFiles(a.Env)
if err != nil {
return filters, err
}
opts := stack.Deploy{Composefiles: composeFiles}
compose, err := GetAppComposeConfig(a.Recipe.Name, opts, a.Env)
if err != nil {
return filters, err
}
for _, service := range compose.Services {
f := ServiceFilter(a.StackName(), service.Name, exactMatch)
filters.Add("name", f)
}
return filters, nil
}
// ServiceFilter creates a filter string for filtering a service in the docker
// container runtime. When exact match is true, it uses regex to match the
// string exactly.
func ServiceFilter(stack, service string, exact bool) string {
if exact {
return fmt.Sprintf("^%s_%s", stack, service)
}
return fmt.Sprintf("%s_%s", stack, service)
}
// ByServer sort a slice of Apps
type ByServer []App
func (a ByServer) Len() int { return len(a) }
func (a ByServer) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByServer) Less(i, j int) bool {
return strings.ToLower(a[i].Server) < strings.ToLower(a[j].Server)
}
// ByServerAndRecipe sort a slice of Apps
type ByServerAndRecipe []App
func (a ByServerAndRecipe) Len() int { return len(a) }
func (a ByServerAndRecipe) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByServerAndRecipe) Less(i, j int) bool {
if a[i].Server == a[j].Server {
return strings.ToLower(a[i].Recipe.Name) < strings.ToLower(a[j].Recipe.Name)
}
return strings.ToLower(a[i].Server) < strings.ToLower(a[j].Server)
}
// ByRecipe sort a slice of Apps
type ByRecipe []App
func (a ByRecipe) Len() int { return len(a) }
func (a ByRecipe) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByRecipe) Less(i, j int) bool {
return strings.ToLower(a[i].Recipe.Name) < strings.ToLower(a[j].Recipe.Name)
}
// ByName sort a slice of Apps
type ByName []App
func (a ByName) Len() int { return len(a) }
func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByName) Less(i, j int) bool {
return strings.ToLower(a[i].Name) < strings.ToLower(a[j].Name)
}
func ReadAppEnvFile(appFile AppFile, name AppName) (App, error) {
env, err := envfile.ReadEnv(appFile.Path)
if err != nil {
return App{}, fmt.Errorf("env file for %s couldn't be read: %s", name, err.Error())
}
log.Debugf("read env %s from %s", env, appFile.Path)
app, err := NewApp(env, name, appFile)
if err != nil {
return App{}, fmt.Errorf("env file for %s has issues: %s", name, err.Error())
}
return app, nil
}
// NewApp creates new App object
func NewApp(env envfile.AppEnv, name string, appFile AppFile) (App, error) {
domain := env["DOMAIN"]
recipeName, exists := env["RECIPE"]
if !exists {
recipeName, exists = env["TYPE"]
if !exists {
return App{}, fmt.Errorf("%s is missing the TYPE env var?", name)
}
}
return App{
Name: name,
Domain: domain,
Recipe: recipe.Get(recipeName),
Env: env,
Server: appFile.Server,
Path: appFile.Path,
}, nil
}
// LoadAppFiles gets all app files for a given set of servers or all servers.
func LoadAppFiles(servers ...string) (AppFiles, error) {
appFiles := make(AppFiles)
if len(servers) == 1 {
if servers[0] == "" {
// Empty servers flag, one string will always be passed
var err error
servers, err = config.GetAllFoldersInDirectory(config.SERVERS_DIR)
if err != nil {
return appFiles, err
}
}
}
log.Debugf("collecting metadata from %v servers: %s", len(servers), strings.Join(servers, ", "))
for _, server := range servers {
serverDir := path.Join(config.SERVERS_DIR, server)
files, err := config.GetAllFilesInDirectory(serverDir)
if err != nil {
return appFiles, fmt.Errorf("server %s doesn't exist? Run \"abra server ls\" to check", server)
}
for _, file := range files {
appName := strings.TrimSuffix(file.Name(), ".env")
appFilePath := path.Join(config.SERVERS_DIR, server, file.Name())
appFiles[appName] = AppFile{
Path: appFilePath,
Server: server,
}
}
}
return appFiles, nil
}
// GetAppServiceNames retrieves a list of app service names.
func GetAppServiceNames(appName string) ([]string, error) {
var serviceNames []string
appFiles, err := LoadAppFiles("")
if err != nil {
return serviceNames, err
}
app, err := GetApp(appFiles, appName)
if err != nil {
return serviceNames, err
}
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
if err != nil {
return serviceNames, err
}
opts := stack.Deploy{Composefiles: composeFiles}
compose, err := GetAppComposeConfig(app.Recipe.Name, opts, app.Env)
if err != nil {
return serviceNames, err
}
for _, service := range compose.Services {
serviceNames = append(serviceNames, service.Name)
}
return serviceNames, nil
}
// GetAppNames retrieves a list of app names.
func GetAppNames() ([]string, error) {
var appNames []string
appFiles, err := LoadAppFiles("")
if err != nil {
return appNames, err
}
apps, err := GetApps(appFiles, "")
if err != nil {
return appNames, err
}
for _, app := range apps {
appNames = append(appNames, app.Name)
}
return appNames, nil
}
// TemplateAppEnvSample copies the example env file for the app into the users
// env files.
func TemplateAppEnvSample(r recipe.Recipe, appName, server, domain string) error {
envSample, err := os.ReadFile(r.SampleEnvPath)
if err != nil {
return err
}
appEnvPath := path.Join(config.ABRA_DIR, "servers", server, fmt.Sprintf("%s.env", appName))
if _, err := os.Stat(appEnvPath); !os.IsNotExist(err) {
return fmt.Errorf("%s already exists?", appEnvPath)
}
err = os.WriteFile(appEnvPath, envSample, 0o664)
if err != nil {
return err
}
read, err := os.ReadFile(appEnvPath)
if err != nil {
return err
}
newContents := strings.Replace(string(read), r.Name+".example.com", domain, -1)
err = os.WriteFile(appEnvPath, []byte(newContents), 0)
if err != nil {
return err
}
log.Debugf("copied & templated %s to %s", r.SampleEnvPath, appEnvPath)
return nil
}
// SanitiseAppName makes a app name usable with Docker by replacing illegal
// characters.
func SanitiseAppName(name string) string {
return strings.ReplaceAll(name, ".", "_")
}
// GetAppStatuses queries servers to check the deployment status of given apps.
func GetAppStatuses(apps []App, MachineReadable bool) (map[string]map[string]string, error) {
statuses := make(map[string]map[string]string)
servers := make(map[string]struct{})
for _, app := range apps {
if _, ok := servers[app.Server]; !ok {
servers[app.Server] = struct{}{}
}
}
var bar *progressbar.ProgressBar
if !MachineReadable {
bar = formatter.CreateProgressbar(len(servers), "querying remote servers...")
}
ch := make(chan stack.StackStatus, len(servers))
for server := range servers {
cl, err := client.New(server)
if err != nil {
return statuses, err
}
go func(s string) {
ch <- stack.GetAllDeployedServices(cl, s)
if !MachineReadable {
bar.Add(1)
}
}(server)
}
for range servers {
status := <-ch
if status.Err != nil {
return statuses, status.Err
}
for _, service := range status.Services {
result := make(map[string]string)
name := service.Spec.Labels[convert.LabelNamespace]
if _, ok := statuses[name]; !ok {
result["status"] = "deployed"
}
labelKey := fmt.Sprintf("coop-cloud.%s.chaos", name)
chaos, ok := service.Spec.Labels[labelKey]
if ok {
result["chaos"] = chaos
}
labelKey = fmt.Sprintf("coop-cloud.%s.chaos-version", name)
if chaosVersion, ok := service.Spec.Labels[labelKey]; ok {
result["chaosVersion"] = chaosVersion
}
labelKey = fmt.Sprintf("coop-cloud.%s.autoupdate", name)
if autoUpdate, ok := service.Spec.Labels[labelKey]; ok {
result["autoUpdate"] = autoUpdate
} else {
result["autoUpdate"] = "false"
}
labelKey = fmt.Sprintf("coop-cloud.%s.version", name)
if version, ok := service.Spec.Labels[labelKey]; ok {
result["version"] = version
} else {
continue
}
statuses[name] = result
}
}
log.Debugf("retrieved app statuses: %s", statuses)
return statuses, nil
}
// GetAppComposeConfig retrieves a compose specification for a recipe. This
// specification is the result of a merge of all the compose.**.yml files in
// the recipe repository.
func GetAppComposeConfig(recipe string, opts stack.Deploy, appEnv envfile.AppEnv) (*composetypes.Config, error) {
compose, err := loader.LoadComposefile(opts, appEnv)
if err != nil {
return &composetypes.Config{}, err
}
log.Debugf("retrieved %s for %s", compose.Filename, recipe)
return compose, nil
}
// ExposeAllEnv exposes all env variables to the app container
func ExposeAllEnv(stackName string, compose *composetypes.Config, appEnv envfile.AppEnv) {
for _, service := range compose.Services {
if service.Name == "app" {
log.Debugf("add the following environment to the app service config of %s:", stackName)
for k, v := range appEnv {
_, exists := service.Environment[k]
if !exists {
value := v
service.Environment[k] = &value
log.Debugf("add env var: %s value: %s to %s", k, value, stackName)
}
}
}
}
}
func CheckEnv(app App) ([]envfile.EnvVar, error) {
var envVars []envfile.EnvVar
envSample, err := app.Recipe.SampleEnv()
if err != nil {
return envVars, err
}
var keys []string
for key := range envSample {
keys = append(keys, key)
}
sort.Strings(keys)
for _, key := range keys {
if _, ok := app.Env[key]; ok {
envVars = append(envVars, envfile.EnvVar{Name: key, Present: true})
} else {
envVars = append(envVars, envfile.EnvVar{Name: key, Present: false})
}
}
return envVars, nil
}
// ReadAbraShCmdNames reads the names of commands.
func ReadAbraShCmdNames(abraSh string) ([]string, error) {
var cmdNames []string
file, err := os.Open(abraSh)
if err != nil {
if os.IsNotExist(err) {
return cmdNames, nil
}
return cmdNames, err
}
defer file.Close()
cmdNameRegex, err := regexp.Compile(`(\w+)(\(\).*\{)`)
if err != nil {
return cmdNames, err
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
matches := cmdNameRegex.FindStringSubmatch(line)
if len(matches) > 0 {
cmdNames = append(cmdNames, matches[1])
}
}
if len(cmdNames) > 0 {
log.Debugf("read %s from %s", strings.Join(cmdNames, " "), abraSh)
} else {
log.Debugf("read 0 command names from %s", abraSh)
}
return cmdNames, nil
}
func (a App) WriteRecipeVersion(version string, dryRun bool) error {
file, err := os.Open(a.Path)
if err != nil {
return err
}
defer file.Close()
skipped := false
scanner := bufio.NewScanner(file)
lines := []string{}
for scanner.Scan() {
line := scanner.Text()
if !strings.HasPrefix(line, "RECIPE=") && !strings.HasPrefix(line, "TYPE=") {
lines = append(lines, line)
continue
}
if strings.HasPrefix(line, "#") {
lines = append(lines, line)
continue
}
if strings.Contains(line, version) {
skipped = true
lines = append(lines, line)
continue
}
splitted := strings.Split(line, ":")
line = fmt.Sprintf("%s:%s", splitted[0], version)
lines = append(lines, line)
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
if !dryRun {
if err := os.WriteFile(a.Path, []byte(strings.Join(lines, "\n")), os.ModePerm); err != nil {
log.Fatal(err)
}
} else {
log.Debugf("skipping writing version %s because dry run", version)
}
if !skipped {
log.Infof("version %s saved to %s.env", version, a.Domain)
} else {
log.Debugf("skipping version %s write as already exists in %s.env", version, a.Domain)
}
return nil
}

200
pkg/app/app_test.go Normal file
View File

@ -0,0 +1,200 @@
package app_test
import (
"encoding/json"
"fmt"
"reflect"
"testing"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/envfile"
"coopcloud.tech/abra/pkg/recipe"
testPkg "coopcloud.tech/abra/pkg/test"
"github.com/docker/docker/api/types/filters"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
)
func TestNewApp(t *testing.T) {
app, err := appPkg.NewApp(testPkg.ExpectedAppEnv, testPkg.AppName, testPkg.ExpectedAppFile)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(app, testPkg.ExpectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, testPkg.ExpectedApp)
}
}
func TestReadAppEnvFile(t *testing.T) {
app, err := appPkg.ReadAppEnvFile(testPkg.ExpectedAppFile, testPkg.AppName)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(app, testPkg.ExpectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, testPkg.ExpectedApp)
}
}
func TestGetApp(t *testing.T) {
app, err := appPkg.GetApp(testPkg.ExpectedAppFiles, testPkg.AppName)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(app, testPkg.ExpectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, testPkg.ExpectedApp)
}
}
func TestGetComposeFiles(t *testing.T) {
r := recipe.Get("abra-test-recipe")
err := r.EnsureExists()
if err != nil {
t.Fatal(err)
}
tests := []struct {
appEnv map[string]string
composeFiles []string
}{
{
map[string]string{},
[]string{
fmt.Sprintf("%s/compose.yml", r.Dir),
},
},
{
map[string]string{"COMPOSE_FILE": "compose.yml"},
[]string{
fmt.Sprintf("%s/compose.yml", r.Dir),
},
},
{
map[string]string{"COMPOSE_FILE": "compose.extra_secret.yml"},
[]string{
fmt.Sprintf("%s/compose.extra_secret.yml", r.Dir),
},
},
{
map[string]string{"COMPOSE_FILE": "compose.yml:compose.extra_secret.yml"},
[]string{
fmt.Sprintf("%s/compose.yml", r.Dir),
fmt.Sprintf("%s/compose.extra_secret.yml", r.Dir),
},
},
}
for _, test := range tests {
composeFiles, err := r.GetComposeFiles(test.appEnv)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, composeFiles, test.composeFiles)
}
}
func TestGetComposeFilesError(t *testing.T) {
r := recipe.Get("abra-test-recipe")
err := r.EnsureExists()
if err != nil {
t.Fatal(err)
}
tests := []struct{ appEnv map[string]string }{
{map[string]string{"COMPOSE_FILE": "compose.yml::compose.foo.yml"}},
{map[string]string{"COMPOSE_FILE": "doesnt.exist.yml"}},
}
for _, test := range tests {
_, err := r.GetComposeFiles(test.appEnv)
if err == nil {
t.Fatalf("should have failed: %v", test.appEnv)
}
}
}
func TestFilters(t *testing.T) {
oldDir := config.RECIPES_DIR
config.RECIPES_DIR = "./testdata"
defer func() {
config.RECIPES_DIR = oldDir
}()
app, err := appPkg.NewApp(envfile.AppEnv{
"DOMAIN": "test.example.com",
"RECIPE": "test-recipe",
}, "test_example_com", appPkg.AppFile{
Path: "./testdata/filtertest.end",
Server: "local",
})
if err != nil {
t.Fatal(err)
}
f, err := app.Filters(false, false)
if err != nil {
t.Error(err)
}
compareFilter(t, f, map[string]map[string]bool{
"name": {
"test_example_com": true,
},
})
f2, err := app.Filters(false, true)
if err != nil {
t.Error(err)
}
compareFilter(t, f2, map[string]map[string]bool{
"name": {
"^test_example_com": true,
},
})
f3, err := app.Filters(true, false)
if err != nil {
t.Error(err)
}
compareFilter(t, f3, map[string]map[string]bool{
"name": {
"test_example_com_bar": true,
"test_example_com_foo": true,
},
})
f4, err := app.Filters(true, true)
if err != nil {
t.Error(err)
}
compareFilter(t, f4, map[string]map[string]bool{
"name": {
"^test_example_com_bar": true,
"^test_example_com_foo": true,
},
})
f5, err := app.Filters(false, false, "foo")
if err != nil {
t.Error(err)
}
compareFilter(t, f5, map[string]map[string]bool{
"name": {
"test_example_com_foo": true,
},
})
}
func compareFilter(t *testing.T, f1 filters.Args, f2 map[string]map[string]bool) {
t.Helper()
j1, err := f1.MarshalJSON()
if err != nil {
t.Error(err)
}
j2, err := json.Marshal(f2)
if err != nil {
t.Error(err)
}
if diff := cmp.Diff(string(j2), string(j1)); diff != "" {
t.Errorf("filters mismatch (-want +got):\n%s", diff)
}
}

88
pkg/app/compose.go Normal file
View File

@ -0,0 +1,88 @@
package app
import (
"fmt"
"strconv"
"coopcloud.tech/abra/pkg/envfile"
"coopcloud.tech/abra/pkg/log"
composetypes "github.com/docker/cli/cli/compose/types"
)
// SetRecipeLabel adds the label 'coop-cloud.${STACK_NAME}.recipe=${RECIPE}' to the app container
// to signal which recipe is connected to the deployed app
func SetRecipeLabel(compose *composetypes.Config, stackName string, recipe string) {
for _, service := range compose.Services {
if service.Name == "app" {
log.Debugf("set recipe label 'coop-cloud.%s.recipe' to %s for %s", stackName, recipe, stackName)
labelKey := fmt.Sprintf("coop-cloud.%s.recipe", stackName)
service.Deploy.Labels[labelKey] = recipe
}
}
}
// SetChaosLabel adds the label 'coop-cloud.${STACK_NAME}.chaos=true/false' to the app container
// to signal if the app is deployed in chaos mode
func SetChaosLabel(compose *composetypes.Config, stackName string, chaos bool) {
for _, service := range compose.Services {
if service.Name == "app" {
log.Debugf("set label 'coop-cloud.%s.chaos' to %v for %s", stackName, chaos, stackName)
labelKey := fmt.Sprintf("coop-cloud.%s.chaos", stackName)
service.Deploy.Labels[labelKey] = strconv.FormatBool(chaos)
}
}
}
// SetChaosVersionLabel adds the label 'coop-cloud.${STACK_NAME}.chaos-version=$(GIT_COMMIT)' to the app container
func SetChaosVersionLabel(compose *composetypes.Config, stackName string, chaosVersion string) {
for _, service := range compose.Services {
if service.Name == "app" {
log.Debugf("set label 'coop-cloud.%s.chaos-version' to %v for %s", stackName, chaosVersion, stackName)
labelKey := fmt.Sprintf("coop-cloud.%s.chaos-version", stackName)
service.Deploy.Labels[labelKey] = chaosVersion
}
}
}
// SetUpdateLabel adds env ENABLE_AUTO_UPDATE as label to enable/disable the
// auto update process for this app. The default if this variable is not set is to disable
// the auto update process.
func SetUpdateLabel(compose *composetypes.Config, stackName string, appEnv envfile.AppEnv) {
for _, service := range compose.Services {
if service.Name == "app" {
enable_auto_update, exists := appEnv["ENABLE_AUTO_UPDATE"]
if !exists {
enable_auto_update = "false"
}
log.Debugf("set label 'coop-cloud.%s.autoupdate' to %s for %s", stackName, enable_auto_update, stackName)
labelKey := fmt.Sprintf("coop-cloud.%s.autoupdate", stackName)
service.Deploy.Labels[labelKey] = enable_auto_update
}
}
}
// GetLabel reads docker labels in the format of "coop-cloud.${STACK_NAME}.${LABEL}" from the local compose files
func GetLabel(compose *composetypes.Config, stackName string, label string) string {
for _, service := range compose.Services {
if service.Name == "app" {
labelKey := fmt.Sprintf("coop-cloud.%s.%s", stackName, label)
log.Debugf("get label '%s'", labelKey)
if labelValue, ok := service.Deploy.Labels[labelKey]; ok {
return labelValue
}
}
}
log.Debugf("no %s label found for %s", label, stackName)
return ""
}
// GetTimeoutFromLabel reads the timeout value from docker label "coop-cloud.${STACK_NAME}.TIMEOUT" and returns 50 as default value
func GetTimeoutFromLabel(compose *composetypes.Config, stackName string) (int, error) {
timeout := 50 // Default Timeout
var err error = nil
if timeoutLabel := GetLabel(compose, stackName, "timeout"); timeoutLabel != "" {
log.Debugf("timeout label: %s", timeoutLabel)
timeout, err = strconv.Atoi(timeoutLabel)
}
return timeout, err
}

2
pkg/app/testdata/filtertest.env vendored Normal file
View File

@ -0,0 +1,2 @@
RECIPE=test-recipe
DOMAIN=test.example.com

View File

@ -0,0 +1,6 @@
version: "3.8"
services:
foo:
image: debian
bar:
image: debian

View File

@ -1,22 +1,23 @@
package autocomplete
import (
"context"
"fmt"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/urfave/cli/v3"
)
// AppNameComplete copletes app names
func AppNameComplete(c *cli.Context) {
appNames, err := config.GetAppNames()
// AppNameComplete copletes app names.
func AppNameComplete(ctx context.Context, cmd *cli.Command) {
appNames, err := app.GetAppNames()
if err != nil {
logrus.Warn(err)
log.Warn(err)
}
if c.NArg() > 0 {
if cmd.NArg() > 0 {
return
}
@ -25,14 +26,24 @@ func AppNameComplete(c *cli.Context) {
}
}
// RecipeNameComplete completes recipe names
func RecipeNameComplete(c *cli.Context) {
catl, err := recipe.ReadRecipeCatalogue()
func ServiceNameComplete(appName string) {
serviceNames, err := app.GetAppServiceNames(appName)
if err != nil {
logrus.Warn(err)
return
}
for _, s := range serviceNames {
fmt.Println(s)
}
}
// RecipeNameComplete completes recipe names.
func RecipeNameComplete(ctx context.Context, cmd *cli.Command) {
catl, err := recipe.ReadRecipeCatalogue(false)
if err != nil {
log.Warn(err)
}
if c.NArg() > 0 {
if cmd.NArg() > 0 {
return
}
@ -41,9 +52,39 @@ func RecipeNameComplete(c *cli.Context) {
}
}
// SubcommandComplete completes subcommands.
func SubcommandComplete(c *cli.Context) {
if c.NArg() > 0 {
// RecipeVersionComplete completes versions for the recipe.
func RecipeVersionComplete(recipeName string) {
catl, err := recipe.ReadRecipeCatalogue(false)
if err != nil {
log.Warn(err)
}
for _, v := range catl[recipeName].Versions {
for v2 := range v {
fmt.Println(v2)
}
}
}
// ServerNameComplete completes server names.
func ServerNameComplete(ctx context.Context, cmd *cli.Command) {
files, err := app.LoadAppFiles("")
if err != nil {
log.Fatal(err)
}
if cmd.NArg() > 0 {
return
}
for _, appFile := range files {
fmt.Println(appFile.Server)
}
}
// SubcommandComplete completes sub-commands.
func SubcommandComplete(ctx context.Context, cmd *cli.Command) {
if cmd.NArg() > 0 {
return
}
@ -52,7 +93,6 @@ func SubcommandComplete(c *cli.Context) {
"autocomplete",
"catalogue",
"recipe",
"record",
"server",
"upgrade",
}

View File

@ -0,0 +1,88 @@
package catalogue
import (
"fmt"
"os"
"path"
"strings"
"coopcloud.tech/abra/pkg/config"
gitPkg "coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/log"
"github.com/go-git/go-git/v5"
)
// EnsureCatalogue ensures that the catalogue is cloned locally & present.
func EnsureCatalogue() error {
catalogueDir := path.Join(config.ABRA_DIR, "catalogue")
if _, err := os.Stat(catalogueDir); err != nil && os.IsNotExist(err) {
log.Warnf("local recipe catalogue is missing, retrieving now")
url := fmt.Sprintf("%s/%s.git", config.REPOS_BASE_URL, config.CATALOGUE_JSON_REPO_NAME)
if err := gitPkg.Clone(catalogueDir, url); err != nil {
return err
}
log.Debugf("cloned catalogue repository to %s", catalogueDir)
}
return nil
}
// EnsureIsClean makes sure that the catalogue has no unstaged changes.
func EnsureIsClean() error {
isClean, err := gitPkg.IsClean(config.CATALOGUE_DIR)
if err != nil {
return err
}
if !isClean {
msg := "%s has locally unstaged changes? please commit/remove your changes before proceeding"
return fmt.Errorf(msg, config.CATALOGUE_DIR)
}
return nil
}
// EnsureUpToDate ensures that the local catalogue is up to date.
func EnsureUpToDate() error {
repo, err := git.PlainOpen(config.CATALOGUE_DIR)
if err != nil {
return err
}
remotes, err := repo.Remotes()
if err != nil {
return err
}
if len(remotes) == 0 {
msg := "cannot ensure %s is up-to-date, no git remotes configured"
log.Debugf(msg, config.CATALOGUE_DIR)
return nil
}
worktree, err := repo.Worktree()
if err != nil {
return err
}
branch, err := gitPkg.CheckoutDefaultBranch(repo, config.CATALOGUE_DIR)
if err != nil {
return err
}
opts := &git.PullOptions{
Force: true,
ReferenceName: branch,
}
if err := worktree.Pull(opts); err != nil {
if !strings.Contains(err.Error(), "already up-to-date") {
return err
}
}
log.Debugf("fetched latest git changes for %s", config.CATALOGUE_DIR)
return nil
}

View File

@ -2,24 +2,46 @@
package client
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"time"
contextPkg "coopcloud.tech/abra/pkg/context"
"coopcloud.tech/abra/pkg/log"
sshPkg "coopcloud.tech/abra/pkg/ssh"
commandconnPkg "coopcloud.tech/abra/pkg/upstream/commandconn"
"github.com/docker/docker/client"
"github.com/sirupsen/logrus"
)
// New initiates a new Docker client.
func New(contextName string) (*client.Client, error) {
// Conf is a Docker client configuration.
type Conf struct {
Timeout int
}
// Opt is a Docker client option.
type Opt func(c *Conf)
// WithTimeout specifies a timeout for a Docker client.
func WithTimeout(timeout int) Opt {
return func(c *Conf) {
c.Timeout = timeout
}
}
// New initiates a new Docker client. New client connections are validated so
// that we ensure connections via SSH to the daemon can succeed. It takes into
// account that you may only want the local client and not communicate via SSH.
// For this use-case, please pass "default" as the contextName.
func New(serverName string, opts ...Opt) (*client.Client, error) {
var clientOpts []client.Opt
if contextName != "default" {
context, err := GetContext(contextName)
if serverName != "default" {
context, err := GetContext(serverName)
if err != nil {
return nil, err
return nil, fmt.Errorf("unknown server, run \"abra server add %s\"?", serverName)
}
ctxEndpoint, err := contextPkg.GetContextEndpoint(context)
@ -27,13 +49,17 @@ func New(contextName string) (*client.Client, error) {
return nil, err
}
helper, err := commandconnPkg.NewConnectionHelper(ctxEndpoint)
conf := &Conf{}
for _, opt := range opts {
opt(conf)
}
helper, err := commandconnPkg.NewConnectionHelper(ctxEndpoint, conf.Timeout)
if err != nil {
return nil, err
}
httpClient := &http.Client{
// No tls, no proxy
Transport: &http.Transport{
DialContext: helper.Dialer,
IdleConnTimeout: 30 * time.Second,
@ -59,7 +85,20 @@ func New(contextName string) (*client.Client, error) {
return nil, err
}
logrus.Debugf("created client for %s", contextName)
log.Debugf("created client for %s", serverName)
info, err := cl.Info(context.Background())
if err != nil {
return cl, sshPkg.Fatal(serverName, err)
}
if info.Swarm.LocalNodeState == "inactive" {
if serverName != "default" {
return cl, fmt.Errorf("swarm mode not enabled on %s?", serverName)
}
return cl, errors.New("swarm mode not enabled on local server?")
}
return cl, nil
}

View File

@ -5,28 +5,25 @@ import (
"fmt"
"coopcloud.tech/abra/pkg/context"
"coopcloud.tech/abra/pkg/log"
commandconnPkg "coopcloud.tech/abra/pkg/upstream/commandconn"
dConfig "github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/context/docker"
contextStore "github.com/docker/cli/cli/context/store"
"github.com/sirupsen/logrus"
)
type Context = contextStore.Metadata
func CreateContext(contextName string, user string, port string) error {
host := contextName
if user != "" {
host = fmt.Sprintf("%s@%s", user, host)
}
if port != "" {
host = fmt.Sprintf("%s:%s", host, port)
}
host = fmt.Sprintf("ssh://%s", host)
// CreateContext creates a new Docker context.
func CreateContext(contextName string) error {
host := fmt.Sprintf("ssh://%s", contextName)
if err := createContext(contextName, host); err != nil {
return err
}
logrus.Debugf("created the %s context", contextName)
log.Debugf("created the %s context", contextName)
return nil
}

View File

@ -3,13 +3,10 @@ package client
import (
"context"
"fmt"
"strings"
"github.com/containers/image/docker"
"github.com/containers/image/types"
"github.com/docker/distribution/reference"
"github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/distribution/reference"
)
// GetRegistryTags retrieves all tags of an image from a container registry.
@ -29,29 +26,3 @@ func GetRegistryTags(img reference.Named) ([]string, error) {
return tags, nil
}
// GetTagDigest retrieves an image digest from a container registry.
func GetTagDigest(cl *client.Client, image reference.Named) (string, error) {
target := fmt.Sprintf("//%s", reference.Path(image))
ref, err := docker.ParseReference(target)
if err != nil {
return "", fmt.Errorf("failed to parse image %s, saw: %s", image, err.Error())
}
ctx := context.Background()
img, err := ref.NewImage(ctx, nil)
if err != nil {
logrus.Debugf("failed to query remote registry for %s, saw: %s", image, err.Error())
return "", fmt.Errorf("unable to read digest for %s", image)
}
defer img.Close()
digest := img.ConfigInfo().Digest.String()
if digest == "" {
return digest, fmt.Errorf("unable to read digest for %s", image)
}
return strings.Split(digest, ":")[1][:7], nil
}

View File

@ -4,20 +4,14 @@ import (
"context"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
)
func StoreSecret(secretName, secretValue, server string) error {
cl, err := New(server)
if err != nil {
return err
}
ctx := context.Background()
func StoreSecret(cl *client.Client, secretName, secretValue, server string) error {
ann := swarm.Annotations{Name: secretName}
spec := swarm.SecretSpec{Annotations: ann, Data: []byte(secretValue)}
// We don't bother with the secret IDs for now
if _, err := cl.SecretCreate(ctx, spec); err != nil {
if _, err := cl.SecretCreate(context.Background(), spec); err != nil {
return err
}

View File

@ -2,18 +2,17 @@ package client
import (
"context"
"fmt"
"time"
"github.com/docker/docker/api/types"
"coopcloud.tech/abra/pkg/log"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/volume"
"github.com/docker/docker/client"
)
func GetVolumes(ctx context.Context, server string, fs filters.Args) ([]*types.Volume, error) {
cl, err := New(server)
if err != nil {
return nil, err
}
volumeListOKBody, err := cl.VolumeList(ctx, fs)
func GetVolumes(cl *client.Client, ctx context.Context, server string, fs filters.Args) ([]*volume.Volume, error) {
volumeListOKBody, err := cl.VolumeList(ctx, volume.ListOptions{Filters: fs})
volumeList := volumeListOKBody.Volumes
if err != nil {
return volumeList, err
@ -22,7 +21,7 @@ func GetVolumes(ctx context.Context, server string, fs filters.Args) ([]*types.V
return volumeList, nil
}
func GetVolumeNames(volumes []*types.Volume) []string {
func GetVolumeNames(volumes []*volume.Volume) []string {
var volumeNames []string
for _, vol := range volumes {
@ -32,18 +31,32 @@ func GetVolumeNames(volumes []*types.Volume) []string {
return volumeNames
}
func RemoveVolumes(ctx context.Context, server string, volumeNames []string, force bool) error {
cl, err := New(server)
if err != nil {
return err
}
func RemoveVolumes(cl *client.Client, ctx context.Context, volumeNames []string, force bool, retries int) error {
for _, volName := range volumeNames {
err := cl.VolumeRemove(ctx, volName, force)
err := retryFunc(5, func() error {
return cl.VolumeRemove(context.Background(), volName, force)
})
if err != nil {
return err
return fmt.Errorf("volume %s: %s", volName, err)
}
}
return nil
}
// retryFunc retries the given function for the given retries. After the nth
// retry it waits (n + 1)^2 seconds before the next retry (starting with n=0).
// It returns an error if the function still failed after the last retry.
func retryFunc(retries int, fn func() error) error {
for i := 0; i < retries; i++ {
err := fn()
if err == nil {
return nil
}
if i+1 < retries {
sleep := time.Duration(i+1) * time.Duration(i+1)
log.Infof("%s: waiting %d seconds before next retry", err, sleep)
time.Sleep(sleep * time.Second)
}
}
return fmt.Errorf("%d retries failed", retries)
}

View File

@ -0,0 +1,26 @@
package client
import (
"fmt"
"testing"
)
func TestRetryFunc(t *testing.T) {
err := retryFunc(1, func() error { return nil })
if err != nil {
t.Errorf("should not return an error: %s", err)
}
i := 0
fn := func() error {
i++
return fmt.Errorf("oh no, something went wrong!")
}
err = retryFunc(2, fn)
if err == nil {
t.Error("should return an error")
}
if i != 2 {
t.Errorf("The function should have been called 1 times, got %d", i)
}
}

View File

@ -1,158 +0,0 @@
package compose
import (
"fmt"
"io/ioutil"
"path"
"path/filepath"
"strings"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/upstream/stack"
loader "coopcloud.tech/abra/pkg/upstream/stack"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/distribution/reference"
"github.com/sirupsen/logrus"
)
// UpdateTag updates an image tag in-place on file system local compose files.
func UpdateTag(pattern, image, tag, recipeName string) (bool, error) {
composeFiles, err := filepath.Glob(pattern)
if err != nil {
return false, err
}
logrus.Debugf("considering %s config(s) for tag update", strings.Join(composeFiles, ", "))
for _, composeFile := range composeFiles {
opts := stack.Deploy{Composefiles: []string{composeFile}}
envSamplePath := path.Join(config.RECIPES_DIR, recipeName, ".env.sample")
sampleEnv, err := config.ReadEnv(envSamplePath)
if err != nil {
return false, err
}
compose, err := loader.LoadComposefile(opts, sampleEnv)
if err != nil {
return false, err
}
for _, service := range compose.Services {
if service.Image == "" {
continue // may be a compose.$optional.yml file
}
img, _ := reference.ParseNormalizedNamed(service.Image)
if err != nil {
return false, err
}
var composeTag string
switch img.(type) {
case reference.NamedTagged:
composeTag = img.(reference.NamedTagged).Tag()
default:
logrus.Debugf("unable to parse %s, skipping", img)
continue
}
composeImage := formatter.StripTagMeta(reference.Path(img))
logrus.Debugf("parsed %s from %s", composeTag, service.Image)
if image == composeImage {
bytes, err := ioutil.ReadFile(composeFile)
if err != nil {
return false, err
}
old := fmt.Sprintf("%s:%s", composeImage, composeTag)
new := fmt.Sprintf("%s:%s", composeImage, tag)
replacedBytes := strings.Replace(string(bytes), old, new, -1)
logrus.Debugf("updating %s to %s in %s", old, new, compose.Filename)
if err := ioutil.WriteFile(compose.Filename, []byte(replacedBytes), 0764); err != nil {
return false, err
}
}
}
}
return false, nil
}
// UpdateLabel updates a label in-place on file system local compose files.
func UpdateLabel(pattern, serviceName, label, recipeName string) error {
composeFiles, err := filepath.Glob(pattern)
if err != nil {
return err
}
logrus.Debugf("considering %s config(s) for label update", strings.Join(composeFiles, ", "))
for _, composeFile := range composeFiles {
opts := stack.Deploy{Composefiles: []string{composeFile}}
envSamplePath := path.Join(config.RECIPES_DIR, recipeName, ".env.sample")
sampleEnv, err := config.ReadEnv(envSamplePath)
if err != nil {
return err
}
compose, err := loader.LoadComposefile(opts, sampleEnv)
if err != nil {
return err
}
serviceExists := false
var service composetypes.ServiceConfig
for _, s := range compose.Services {
if s.Name == serviceName {
service = s
serviceExists = true
}
}
if !serviceExists {
continue
}
discovered := false
for oldLabel, value := range service.Deploy.Labels {
if strings.HasPrefix(oldLabel, "coop-cloud") {
discovered = true
bytes, err := ioutil.ReadFile(composeFile)
if err != nil {
return err
}
old := fmt.Sprintf("coop-cloud.${STACK_NAME}.version=%s", value)
replacedBytes := strings.Replace(string(bytes), old, label, -1)
if old == label {
logrus.Warnf("%s is already set, nothing to do?", label)
return nil
}
logrus.Debugf("updating %s to %s in %s", old, label, compose.Filename)
if err := ioutil.WriteFile(compose.Filename, []byte(replacedBytes), 0764); err != nil {
return err
}
logrus.Infof("synced label %s to service %s", label, serviceName)
}
}
if !discovered {
logrus.Warn("no existing label found, automagic insertion not supported yet")
logrus.Fatalf("add '- \"%s\"' manually to the 'app' service in %s", label, composeFile)
}
}
return nil
}

111
pkg/config/abra.go Normal file
View File

@ -0,0 +1,111 @@
package config
import (
"os"
"path"
"path/filepath"
"coopcloud.tech/abra/pkg/log"
"gopkg.in/yaml.v3"
)
// LoadAbraConfig returns the abra configuration. It tries to find a abra
// configuration file (see findAbraConfig for lookup logic). When no
// configuration was found it returns the default config.
func LoadAbraConfig() Abra {
wd, _ := os.Getwd()
configFile := findAbraConfig(wd)
if configFile == "" {
log.Debugf("no config file found")
return Abra{}
}
data, err := os.ReadFile(configFile)
if err != nil {
// Do nothing, when an error occurs
log.Debugf("error reading config file: %s", err)
return Abra{}
}
config := Abra{}
err = yaml.Unmarshal(data, &config)
if err != nil {
// Do nothing, when an error occurs
log.Debugf("error loading config file: %s", err)
return Abra{}
}
log.Debugf("config file loaded from: %s", configFile)
config.configPath = filepath.Dir(configFile)
return config
}
// findAbraConfig recursively looks for a abra.y(a)ml file in the given directory.
// When the file was not found it calls the function again with the parent
// directory until the home directory is hit. When no abra config was found it
// returns an empty string.
func findAbraConfig(dir string) string {
dir, err := filepath.Abs(dir)
if err != nil {
return ""
}
if dir == os.ExpandEnv("$HOME") || dir == "/" {
return ""
}
p := path.Join(dir, "abra.yaml")
if _, err := os.Stat(p); err == nil {
return p
}
p = path.Join(dir, "abra.yml")
if _, err := os.Stat(p); err == nil {
return p
}
return findAbraConfig(filepath.Dir(dir))
}
// Abra defines the configuration file for abra.
type Abra struct {
configPath string
AbraDir string `yaml:"abraDir"`
}
// GetAbraDir returns the abra dir. It has the following logic:
// 1. check if $ABRA_DIR is set
// 2. check if abraDir was set in a config file
// 3. use $HOME/.abra when above two options failed
func (a Abra) GetAbraDir() string {
if dir, exists := os.LookupEnv("ABRA_DIR"); exists && dir != "" {
log.Debug("read abra dir from $ABRA_DIR")
return dir
}
if a.AbraDir != "" {
log.Debug("read abra dir from config file")
if path.IsAbs(a.AbraDir) {
return a.AbraDir
}
// Make the path absolute
return path.Join(a.configPath, a.AbraDir)
}
log.Debug("using default abra dir")
return os.ExpandEnv("$HOME/.abra")
}
func (a Abra) GetServersDir() string { return path.Join(a.GetAbraDir(), "servers") }
func (a Abra) GetRecipesDir() string { return path.Join(a.GetAbraDir(), "recipes") }
func (a Abra) GetVendorDir() string { return path.Join(a.GetAbraDir(), "vendor") }
func (a Abra) GetBackupDir() string { return path.Join(a.GetAbraDir(), "backups") }
func (a Abra) GetCatalogueDir() string { return path.Join(a.GetAbraDir(), "catalogue") }
var config = LoadAbraConfig()
var (
ABRA_DIR = config.GetAbraDir()
SERVERS_DIR = config.GetServersDir()
RECIPES_DIR = config.GetRecipesDir()
VENDOR_DIR = config.GetVendorDir()
BACKUP_DIR = config.GetBackupDir()
CATALOGUE_DIR = config.GetCatalogueDir()
RECIPES_JSON = path.Join(config.GetCatalogueDir(), "recipes.json")
REPOS_BASE_URL = "https://git.coopcloud.tech/coop-cloud"
CATALOGUE_JSON_REPO_NAME = "recipes-catalogue-json"
SSH_URL_TEMPLATE = "ssh://git@git.coopcloud.tech:2222/coop-cloud/%s.git"
CHAOS_DEFAULT = "false"
)

133
pkg/config/abra_test.go Normal file
View File

@ -0,0 +1,133 @@
package config
import (
"log"
"os"
"path/filepath"
"testing"
)
func TestFindAbraConfig(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
tests := []struct {
Dir string
Config string
}{
{
Dir: "testdata/abraconfig1",
Config: filepath.Join(wd, "testdata/abraconfig1/abra.yaml"),
},
{
Dir: "testdata/abraconfig1/subdir",
Config: filepath.Join(wd, "testdata/abraconfig1/abra.yaml"),
},
{
Dir: "testdata/abraconfig2",
Config: filepath.Join(wd, "testdata/abraconfig2/abra.yml"),
},
{
Dir: "testdata/abraconfig2/subdir",
Config: filepath.Join(wd, "testdata/abraconfig2/abra.yml"),
},
{
Dir: "testdata",
Config: "",
},
}
for _, tc := range tests {
t.Run(tc.Dir, func(t *testing.T) {
config := findAbraConfig(tc.Dir)
if config != tc.Config {
t.Errorf("\nwant: %s\ngot: %s", tc.Config, config)
}
})
}
}
func TestLoadAbraConfigGetAbraDir(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
t.Setenv("ABRA_DIR", "")
t.Run("default", func(t *testing.T) {
cfg := LoadAbraConfig()
wantAbraDir := os.ExpandEnv("$HOME/.abra")
if cfg.GetAbraDir() != wantAbraDir {
t.Errorf("\nwant: %s\ngot: %s", wantAbraDir, cfg.GetAbraDir())
}
})
t.Run("from config file", func(t *testing.T) {
t.Cleanup(func() { os.Chdir(wd) })
err = os.Chdir(filepath.Join(wd, "testdata/abraconfig1"))
if err != nil {
log.Fatal(err)
}
cfg := LoadAbraConfig()
wantAbraDir := filepath.Join(wd, "testdata/abraconfig1/foobar")
if cfg.GetAbraDir() != wantAbraDir {
t.Errorf("\nwant: %s\ngot: %s", wantAbraDir, cfg.GetAbraDir())
}
})
t.Run("default when config file is empty", func(t *testing.T) {
t.Cleanup(func() { os.Chdir(wd) })
err := os.Chdir(filepath.Join(wd, "testdata/abraconfig2"))
if err != nil {
log.Fatal(err)
}
cfg := LoadAbraConfig()
wantAbraDir := os.ExpandEnv("$HOME/.abra")
if cfg.GetAbraDir() != wantAbraDir {
t.Errorf("\nwant: %s\ngot: %s", wantAbraDir, cfg.GetAbraDir())
}
})
t.Run("from env variable", func(t *testing.T) {
t.Setenv("ABRA_DIR", "foo")
cfg := LoadAbraConfig()
wantAbraDir := "foo"
if cfg.GetAbraDir() != wantAbraDir {
t.Errorf("\nwant: %s\ngot: %s", wantAbraDir, cfg.GetAbraDir())
}
})
}
func TestLoadAbraConfigServersDir(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
t.Setenv("ABRA_DIR", "")
t.Run("default", func(t *testing.T) {
cfg := LoadAbraConfig()
wantServersDir := os.ExpandEnv("$HOME/.abra/servers")
if cfg.GetServersDir() != wantServersDir {
t.Errorf("\nwant: %s\ngot: %s", wantServersDir, cfg.GetServersDir())
}
})
t.Run("from config file", func(t *testing.T) {
t.Cleanup(func() { os.Chdir(wd) })
err = os.Chdir(filepath.Join(wd, "testdata/abraconfig1"))
if err != nil {
log.Fatal(err)
}
cfg := LoadAbraConfig()
log.Println(cfg)
wantServersDir := filepath.Join(wd, "testdata/abraconfig1/foobar/servers")
if cfg.GetServersDir() != wantServersDir {
t.Errorf("\nwant: %s\ngot: %s", wantServersDir, cfg.GetServersDir())
}
})
}

View File

@ -1,430 +0,0 @@
package config
import (
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/upstream/convert"
loader "coopcloud.tech/abra/pkg/upstream/stack"
stack "coopcloud.tech/abra/pkg/upstream/stack"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types/filters"
"github.com/sirupsen/logrus"
)
// Type aliases to make code hints easier to understand
// AppEnv is a map of the values in an apps env config
type AppEnv = map[string]string
// AppName is AppName
type AppName = string
// AppFile represents app env files on disk without reading the contents
type AppFile struct {
Path string
Server string
}
// AppFiles is a slice of appfiles
type AppFiles map[AppName]AppFile
// App reprents an app with its env file read into memory
type App struct {
Name AppName
Recipe string
Domain string
Env AppEnv
Server string
Path string
}
// StackName gets what the docker safe stack name is for the app. This should
// not not shown to the user, use a.Name for that. Give the output of this
// command to Docker only.
func (a App) StackName() string {
if _, exists := a.Env["STACK_NAME"]; exists {
return a.Env["STACK_NAME"]
}
stackName := SanitiseAppName(a.Name)
if len(stackName) > 45 {
logrus.Debugf("trimming %s to %s to avoid runtime limits", stackName, stackName[:45])
stackName = stackName[:45]
}
a.Env["STACK_NAME"] = stackName
return stackName
}
// Filters retrieves exact app filters for querying the container runtime. Due
// to upstream issues, filtering works different depending on what you're
// querying. So, for example, secrets don't work with regex! The caller needs
// to implement their own validation that the right secrets are matched. In
// order to handle these cases, we provide the `appendServiceNames` /
// `exactMatch` modifiers.
func (a App) Filters(appendServiceNames, exactMatch bool) (filters.Args, error) {
filters := filters.NewArgs()
composeFiles, err := GetAppComposeFiles(a.Recipe, a.Env)
if err != nil {
return filters, err
}
opts := stack.Deploy{Composefiles: composeFiles}
compose, err := GetAppComposeConfig(a.Recipe, opts, a.Env)
if err != nil {
return filters, err
}
for _, service := range compose.Services {
var filter string
if appendServiceNames {
if exactMatch {
filter = fmt.Sprintf("^%s_%s", a.StackName(), service.Name)
} else {
filter = fmt.Sprintf("%s_%s", a.StackName(), service.Name)
}
} else {
if exactMatch {
filter = fmt.Sprintf("^%s", a.StackName())
} else {
filter = fmt.Sprintf("%s", a.StackName())
}
}
filters.Add("name", filter)
}
return filters, nil
}
// ByServer sort a slice of Apps
type ByServer []App
func (a ByServer) Len() int { return len(a) }
func (a ByServer) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByServer) Less(i, j int) bool {
return strings.ToLower(a[i].Server) < strings.ToLower(a[j].Server)
}
// ByServerAndRecipe sort a slice of Apps
type ByServerAndRecipe []App
func (a ByServerAndRecipe) Len() int { return len(a) }
func (a ByServerAndRecipe) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByServerAndRecipe) Less(i, j int) bool {
if a[i].Server == a[j].Server {
return strings.ToLower(a[i].Recipe) < strings.ToLower(a[j].Recipe)
}
return strings.ToLower(a[i].Server) < strings.ToLower(a[j].Server)
}
// ByRecipe sort a slice of Apps
type ByRecipe []App
func (a ByRecipe) Len() int { return len(a) }
func (a ByRecipe) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByRecipe) Less(i, j int) bool {
return strings.ToLower(a[i].Recipe) < strings.ToLower(a[j].Recipe)
}
// ByName sort a slice of Apps
type ByName []App
func (a ByName) Len() int { return len(a) }
func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByName) Less(i, j int) bool {
return strings.ToLower(a[i].Name) < strings.ToLower(a[j].Name)
}
func readAppEnvFile(appFile AppFile, name AppName) (App, error) {
env, err := ReadEnv(appFile.Path)
if err != nil {
return App{}, fmt.Errorf("env file for %s couldn't be read: %s", name, err.Error())
}
logrus.Debugf("read env %s from %s", env, appFile.Path)
app, err := newApp(env, name, appFile)
if err != nil {
return App{}, fmt.Errorf("env file for %s has issues: %s", name, err.Error())
}
return app, nil
}
// newApp creates new App object
func newApp(env AppEnv, name string, appFile AppFile) (App, error) {
domain := env["DOMAIN"]
recipe, exists := env["RECIPE"]
if !exists {
recipe, exists = env["TYPE"]
if !exists {
return App{}, fmt.Errorf("%s is missing the RECIPE env var", name)
}
}
return App{
Name: name,
Domain: domain,
Recipe: recipe,
Env: env,
Server: appFile.Server,
Path: appFile.Path,
}, nil
}
// LoadAppFiles gets all app files for a given set of servers or all servers
func LoadAppFiles(servers ...string) (AppFiles, error) {
appFiles := make(AppFiles)
if len(servers) == 1 {
if servers[0] == "" {
// Empty servers flag, one string will always be passed
var err error
servers, err = GetAllFoldersInDirectory(SERVERS_DIR)
if err != nil {
return nil, err
}
}
}
logrus.Debugf("collecting metadata from %v servers: %s", len(servers), strings.Join(servers, ", "))
for _, server := range servers {
serverDir := path.Join(SERVERS_DIR, server)
files, err := getAllFilesInDirectory(serverDir)
if err != nil {
return nil, fmt.Errorf("server %s doesn't exist? Run \"abra server ls\" to check", server)
}
for _, file := range files {
appName := strings.TrimSuffix(file.Name(), ".env")
appFilePath := path.Join(SERVERS_DIR, server, file.Name())
appFiles[appName] = AppFile{
Path: appFilePath,
Server: server,
}
}
}
return appFiles, nil
}
// GetApp loads an apps settings, reading it from file, in preparation to use it
//
// ONLY use when ready to use the env file to keep IO down
func GetApp(apps AppFiles, name AppName) (App, error) {
appFile, exists := apps[name]
if !exists {
return App{}, fmt.Errorf("cannot find app with name %s", name)
}
app, err := readAppEnvFile(appFile, name)
if err != nil {
return App{}, err
}
return app, nil
}
// GetApps returns a slice of Apps with their env files read from a given slice of AppFiles
func GetApps(appFiles AppFiles) ([]App, error) {
var apps []App
for name := range appFiles {
app, err := GetApp(appFiles, name)
if err != nil {
return nil, err
}
apps = append(apps, app)
}
return apps, nil
}
// GetAppServiceNames retrieves a list of app service names.
func GetAppServiceNames(appName string) ([]string, error) {
var serviceNames []string
appFiles, err := LoadAppFiles("")
if err != nil {
return serviceNames, err
}
app, err := GetApp(appFiles, appName)
if err != nil {
return serviceNames, err
}
composeFiles, err := GetAppComposeFiles(app.Recipe, app.Env)
if err != nil {
return serviceNames, err
}
opts := stack.Deploy{Composefiles: composeFiles}
compose, err := GetAppComposeConfig(app.Recipe, opts, app.Env)
if err != nil {
return serviceNames, err
}
for _, service := range compose.Services {
serviceNames = append(serviceNames, service.Name)
}
return serviceNames, nil
}
// GetAppNames retrieves a list of app names.
func GetAppNames() ([]string, error) {
var appNames []string
appFiles, err := LoadAppFiles("")
if err != nil {
return appNames, err
}
apps, err := GetApps(appFiles)
if err != nil {
return appNames, err
}
for _, app := range apps {
appNames = append(appNames, app.Name)
}
return appNames, nil
}
// TemplateAppEnvSample copies the example env file for the app into the users env files
func TemplateAppEnvSample(recipeName, appName, server, domain string) error {
envSamplePath := path.Join(RECIPES_DIR, recipeName, ".env.sample")
envSample, err := ioutil.ReadFile(envSamplePath)
if err != nil {
return err
}
appEnvPath := path.Join(ABRA_DIR, "servers", server, fmt.Sprintf("%s.env", appName))
if _, err := os.Stat(appEnvPath); os.IsExist(err) {
return fmt.Errorf("%s already exists?", appEnvPath)
}
err = ioutil.WriteFile(appEnvPath, envSample, 0664)
if err != nil {
return err
}
read, err := ioutil.ReadFile(appEnvPath)
if err != nil {
return err
}
newContents := strings.Replace(string(read), recipeName+".example.com", domain, -1)
err = ioutil.WriteFile(appEnvPath, []byte(newContents), 0)
if err != nil {
return err
}
logrus.Debugf("copied & templated %s to %s", envSamplePath, appEnvPath)
return nil
}
// SanitiseAppName makes a app name usable with Docker by replacing illegal characters
func SanitiseAppName(name string) string {
return strings.ReplaceAll(name, ".", "_")
}
// GetAppStatuses queries servers to check the deployment status of given apps
func GetAppStatuses(appFiles AppFiles) (map[string]map[string]string, error) {
statuses := make(map[string]map[string]string)
var unique []string
servers := make(map[string]struct{})
for _, appFile := range appFiles {
if _, ok := servers[appFile.Server]; !ok {
servers[appFile.Server] = struct{}{}
unique = append(unique, appFile.Server)
}
}
bar := formatter.CreateProgressbar(len(servers), "querying remote servers...")
ch := make(chan stack.StackStatus, len(servers))
for server := range servers {
go func(s string) {
ch <- stack.GetAllDeployedServices(s)
bar.Add(1)
}(server)
}
for range servers {
status := <-ch
for _, service := range status.Services {
result := make(map[string]string)
name := service.Spec.Labels[convert.LabelNamespace]
if _, ok := statuses[name]; !ok {
result["status"] = "deployed"
}
labelKey := fmt.Sprintf("coop-cloud.%s.version", name)
if version, ok := service.Spec.Labels[labelKey]; ok {
result["version"] = version
} else {
continue
}
statuses[name] = result
}
}
logrus.Debugf("retrieved app statuses: %s", statuses)
return statuses, nil
}
// GetAppComposeFiles gets the list of compose files for an app which should be
// merged into a composetypes.Config while respecting the COMPOSE_FILE env var.
func GetAppComposeFiles(recipe string, appEnv AppEnv) ([]string, error) {
var composeFiles []string
if _, ok := appEnv["COMPOSE_FILE"]; !ok {
logrus.Debug("no COMPOSE_FILE detected, loading compose.yml")
path := fmt.Sprintf("%s/%s/compose.yml", RECIPES_DIR, recipe)
composeFiles = append(composeFiles, path)
return composeFiles, nil
}
composeFileEnvVar := appEnv["COMPOSE_FILE"]
envVars := strings.Split(composeFileEnvVar, ":")
logrus.Debugf("COMPOSE_FILE detected (%s), loading %s", composeFileEnvVar, strings.Join(envVars, ", "))
for _, file := range strings.Split(composeFileEnvVar, ":") {
path := fmt.Sprintf("%s/%s/%s", RECIPES_DIR, recipe, file)
composeFiles = append(composeFiles, path)
}
logrus.Debugf("retrieved %s configs for %s", strings.Join(composeFiles, ", "), recipe)
return composeFiles, nil
}
// GetAppComposeConfig retrieves a compose specification for a recipe. This
// specification is the result of a merge of all the compose.**.yml files in
// the recipe repository.
func GetAppComposeConfig(recipe string, opts stack.Deploy, appEnv AppEnv) (*composetypes.Config, error) {
compose, err := loader.LoadComposefile(opts, appEnv)
if err != nil {
return &composetypes.Config{}, err
}
logrus.Debugf("retrieved %s for %s", compose.Filename, recipe)
return compose, nil
}

View File

@ -1,36 +0,0 @@
package config
import (
"reflect"
"testing"
)
func TestNewApp(t *testing.T) {
app, err := newApp(expectedAppEnv, appName, expectedAppFile)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(app, expectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, expectedApp)
}
}
func TestReadAppEnvFile(t *testing.T) {
app, err := readAppEnvFile(expectedAppFile, appName)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(app, expectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, expectedApp)
}
}
func TestGetApp(t *testing.T) {
app, err := GetApp(expectedAppFiles, appName)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(app, expectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, expectedApp)
}
}

View File

@ -1,7 +1,6 @@
package config
import (
"bufio"
"fmt"
"io/fs"
"io/ioutil"
@ -10,19 +9,13 @@ import (
"path/filepath"
"strings"
"github.com/Autonomic-Cooperative/godotenv"
"github.com/sirupsen/logrus"
"coopcloud.tech/abra/pkg/log"
)
var ABRA_DIR = os.ExpandEnv("$HOME/.abra")
var SERVERS_DIR = path.Join(ABRA_DIR, "servers")
var RECIPES_DIR = path.Join(ABRA_DIR, "recipes")
var VENDOR_DIR = path.Join(ABRA_DIR, "vendor")
var BACKUP_DIR = path.Join(ABRA_DIR, "backups")
var RECIPES_JSON = path.Join(ABRA_DIR, "catalogue", "recipes.json")
var REPOS_BASE_URL = "https://git.coopcloud.tech/coop-cloud"
var CATALOGUE_JSON_REPO_NAME = "recipes-catalogue-json"
var SSH_URL_TEMPLATE = "ssh://git@git.coopcloud.tech:2222/coop-cloud/%s.git"
const MAX_SANITISED_APP_NAME_LENGTH = 45
const MAX_DOCKER_SECRET_LENGTH = 64
var BackupbotLabel = "coop-cloud.backupbot.enabled"
// GetServers retrieves all servers.
func GetServers() ([]string, error) {
@ -33,25 +26,11 @@ func GetServers() ([]string, error) {
return servers, err
}
logrus.Debugf("retrieved %v servers: %s", len(servers), servers)
log.Debugf("retrieved %v servers: %s", len(servers), servers)
return servers, nil
}
// ReadEnv loads an app envivornment into a map.
func ReadEnv(filePath string) (AppEnv, error) {
var envFile AppEnv
envFile, err := godotenv.Read(filePath)
if err != nil {
return nil, err
}
logrus.Debugf("read %s from %s", envFile, filePath)
return envFile, nil
}
// ReadServerNames retrieves all server names.
func ReadServerNames() ([]string, error) {
serverNames, err := GetAllFoldersInDirectory(SERVERS_DIR)
@ -60,13 +39,13 @@ func ReadServerNames() ([]string, error) {
return nil, err
}
logrus.Debugf("read %s from %s", strings.Join(serverNames, ","), SERVERS_DIR)
log.Debugf("read %s from %s", strings.Join(serverNames, ","), SERVERS_DIR)
return serverNames, nil
}
// getAllFilesInDirectory returns filenames of all files in directory
func getAllFilesInDirectory(directory string) ([]fs.FileInfo, error) {
// GetAllFilesInDirectory returns filenames of all files in directory
func GetAllFilesInDirectory(directory string) ([]fs.FileInfo, error) {
var realFiles []fs.FileInfo
files, err := ioutil.ReadDir(directory)
@ -84,7 +63,7 @@ func getAllFilesInDirectory(directory string) ([]fs.FileInfo, error) {
realPath, err := filepath.EvalSymlinks(filePath)
if err != nil {
logrus.Warningf("broken symlink in your abra config folders: %s", filePath)
log.Warnf("broken symlink in your abra config folders: %s", filePath)
} else {
realFile, err := os.Stat(realPath)
if err != nil {
@ -117,7 +96,7 @@ func GetAllFoldersInDirectory(directory string) ([]string, error) {
filePath := path.Join(directory, file.Name())
realDir, err := filepath.EvalSymlinks(filePath)
if err != nil {
logrus.Warningf("broken symlink in your abra config folders: %s", filePath)
log.Warnf("broken symlink in your abra config folders: %s", filePath)
} else if stat, err := os.Stat(realDir); err == nil && stat.IsDir() {
// path is a directory
folders = append(folders, file.Name())
@ -127,34 +106,3 @@ func GetAllFoldersInDirectory(directory string) ([]string, error) {
return folders, nil
}
// ReadAbraShEnvVars reads env vars from an abra.sh recipe file.
func ReadAbraShEnvVars(abraSh string) (map[string]string, error) {
envVars := make(map[string]string)
file, err := os.Open(abraSh)
if err != nil {
if os.IsNotExist(err) {
return envVars, nil
}
return envVars, err
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, "export") {
splitVals := strings.Split(line, "export ")
envVarDef := splitVals[len(splitVals)-1]
keyVal := strings.Split(envVarDef, "=")
if len(keyVal) != 2 {
return envVars, fmt.Errorf("couldn't parse %s", line)
}
envVars[keyVal[0]] = keyVal[1]
}
}
logrus.Debugf("read %s from %s", envVars, abraSh)
return envVars, nil
}

View File

@ -1,84 +0,0 @@
package config
import (
"os"
"path"
"reflect"
"strings"
"testing"
)
var testFolder = os.ExpandEnv("$PWD/../../tests/resources/test_folder")
var validAbraConf = os.ExpandEnv("$PWD/../../tests/resources/valid_abra_config")
// make sure these are in alphabetical order
var tFolders = []string{"folder1", "folder2"}
var tFiles = []string{"bar.env", "foo.env"}
var appName = "ecloud"
var serverName = "evil.corp"
var expectedAppEnv = AppEnv{
"DOMAIN": "ecloud.evil.corp",
"RECIPE": "ecloud",
}
var expectedApp = App{
Name: appName,
Recipe: expectedAppEnv["RECIPE"],
Domain: expectedAppEnv["DOMAIN"],
Env: expectedAppEnv,
Path: expectedAppFile.Path,
Server: expectedAppFile.Server,
}
var expectedAppFile = AppFile{
Path: path.Join(validAbraConf, "servers", serverName, appName+".env"),
Server: serverName,
}
var expectedAppFiles = map[string]AppFile{
appName: expectedAppFile,
}
// var expectedServerNames = []string{"evil.corp"}
func TestGetAllFoldersInDirectory(t *testing.T) {
folders, err := GetAllFoldersInDirectory(testFolder)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(folders, tFolders) {
t.Fatalf("did not get expected folders. Expected: (%s), Got: (%s)", strings.Join(tFolders, ","), strings.Join(folders, ","))
}
}
func TestGetAllFilesInDirectory(t *testing.T) {
files, err := getAllFilesInDirectory(testFolder)
if err != nil {
t.Fatal(err)
}
var fileNames []string
for _, file := range files {
fileNames = append(fileNames, file.Name())
}
if !reflect.DeepEqual(fileNames, tFiles) {
t.Fatalf("did not get expected files. Expected: (%s), Got: (%s)", strings.Join(tFiles, ","), strings.Join(fileNames, ","))
}
}
func TestReadEnv(t *testing.T) {
env, err := ReadEnv(expectedAppFile.Path)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(env, expectedAppEnv) {
t.Fatalf(
"did not get expected application settings. Expected: DOMAIN=%s RECIPE=%s; Got: DOMAIN=%s RECIPE=%s",
expectedAppEnv["DOMAIN"],
expectedAppEnv["RECIPE"],
env["DOMAIN"],
env["RECIPE"],
)
}
}

View File

@ -0,0 +1 @@
abraDir: foobar

View File

View File

View File

View File

@ -6,18 +6,19 @@ import (
"strings"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"github.com/AlecAivazis/survey/v2"
"github.com/docker/docker/api/types"
containerTypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/sirupsen/logrus"
)
// GetContainer retrieves a container. If noInput is false and the retrievd
// count of containers does not match 1, then a prompt is presented to let the
// user choose. A count of 0 is handled gracefully.
func GetContainer(c context.Context, cl *client.Client, filters filters.Args, noInput bool) (types.Container, error) {
containerOpts := types.ContainerListOptions{Filters: filters}
containerOpts := containerTypes.ListOptions{Filters: filters}
containers, err := cl.ContainerList(c, containerOpts)
if err != nil {
return types.Container{}, err
@ -28,7 +29,7 @@ func GetContainer(c context.Context, cl *client.Client, filters filters.Args, no
return types.Container{}, fmt.Errorf("no containers matching the %v filter found?", filter)
}
if len(containers) != 1 {
if len(containers) > 1 {
var containersRaw []string
for _, container := range containers {
containerName := strings.Join(container.Names, " ")
@ -42,7 +43,7 @@ func GetContainer(c context.Context, cl *client.Client, filters filters.Args, no
return types.Container{}, err
}
logrus.Warnf("ambiguous container list received, prompting for input")
log.Warnf("ambiguous container list received, prompting for input")
var response string
prompt := &survey.Select{
@ -63,8 +64,20 @@ func GetContainer(c context.Context, cl *client.Client, filters filters.Args, no
}
}
logrus.Panic("failed to match chosen container")
log.Fatal("failed to match chosen container")
}
return containers[0], nil
}
// GetContainerFromStackAndService retrieves the container for the given stack and service.
func GetContainerFromStackAndService(cl *client.Client, stack, service string) (types.Container, error) {
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", stack, service))
container, err := GetContainer(context.Background(), cl, filters, true)
if err != nil {
return types.Container{}, err
}
return container, nil
}

View File

@ -8,22 +8,19 @@ import (
"github.com/docker/cli/cli/context"
contextStore "github.com/docker/cli/cli/context/store"
cliflags "github.com/docker/cli/cli/flags"
"github.com/moby/term"
)
func NewDefaultDockerContextStore() *command.ContextStoreWithDefault {
_, _, stderr := term.StdStreams()
dockerConfig := dConfig.LoadDefaultConfigFile(stderr)
contextDir := dConfig.ContextStoreDir()
storeConfig := command.DefaultContextStoreConfig()
store := newContextStore(contextDir, storeConfig)
opts := &cliflags.CommonOptions{Context: "default"}
opts := &cliflags.ClientOptions{Context: "default"}
dockerContextStore := &command.ContextStoreWithDefault{
Store: store,
Resolver: func() (*command.DefaultContext, error) {
return command.ResolveDefaultContext(opts, dockerConfig, storeConfig, stderr)
return command.ResolveDefaultContext(opts, storeConfig)
},
}

View File

@ -1,107 +0,0 @@
package dns
import (
"context"
"fmt"
"net"
"os"
"time"
"github.com/AlecAivazis/survey/v2"
"github.com/sirupsen/logrus"
)
// NewToken constructs a new DNS provider token.
func NewToken(provider, providerTokenEnvVar string) (string, error) {
if token, present := os.LookupEnv(providerTokenEnvVar); present {
return token, nil
}
logrus.Debugf("no %s in environment, asking via stdin", providerTokenEnvVar)
var token string
prompt := &survey.Input{
Message: fmt.Sprintf("%s API token?", provider),
}
if err := survey.AskOne(prompt, &token); err != nil {
return "", err
}
return token, nil
}
// EnsureIPv4 ensures that an ipv4 address is set for a domain name
func EnsureIPv4(domainName string) (string, error) {
var ipv4 string
// comrade librehosters DNS resolver -> https://www.privacy-handbuch.de/handbuch_93d.htm
freifunkDNS := "5.1.66.255:53"
resolver := &net.Resolver{
PreferGo: false,
Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
d := net.Dialer{
Timeout: time.Millisecond * time.Duration(10000),
}
return d.DialContext(ctx, "udp", freifunkDNS)
},
}
ctx := context.Background()
ips, err := resolver.LookupIPAddr(ctx, domainName)
if err != nil {
return ipv4, err
}
if len(ips) == 0 {
return ipv4, fmt.Errorf("unable to retrieve ipv4 address for %s", domainName)
}
ipv4 = ips[0].IP.To4().String()
logrus.Debugf("%s points to %s (resolver: %s)", domainName, ipv4, freifunkDNS)
return ipv4, nil
}
// EnsureDomainsResolveSameIPv4 ensures that domains resolve to the same ipv4 address
func EnsureDomainsResolveSameIPv4(domainName, server string) (string, error) {
if server == "default" || server == "local" {
return "", nil
}
var ipv4 string
domainIPv4, err := EnsureIPv4(domainName)
if err != nil {
return ipv4, err
}
if domainIPv4 == "" {
return ipv4, fmt.Errorf("cannot resolve ipv4 for %s?", domainName)
}
serverIPv4, err := EnsureIPv4(server)
if err != nil {
return ipv4, err
}
if serverIPv4 == "" {
return ipv4, fmt.Errorf("cannot resolve ipv4 for %s?", server)
}
if domainIPv4 != serverIPv4 {
err := "app domain %s (%s) does not appear to resolve to app server %s (%s)?"
return ipv4, fmt.Errorf(err, domainName, domainIPv4, server, serverIPv4)
}
return ipv4, nil
}
// GetTTL parses a ttl string into a duration
func GetTTL(ttl string) (time.Duration, error) {
val, err := time.ParseDuration(ttl)
if err != nil {
return val, err
}
return val, nil
}

55
pkg/dns/dns.go Normal file
View File

@ -0,0 +1,55 @@
package dns
import (
"fmt"
"net"
)
// EnsureIPv4 ensures that an ipv4 address is set for a domain name
func EnsureIPv4(domainName string) (string, error) {
ipv4, err := net.ResolveIPAddr("ip4", domainName)
if err != nil {
return "", fmt.Errorf("unable to resolve ipv4 address for %s, %s", domainName, err)
}
// NOTE(d1): e.g. when there is only an ipv6 record available
if ipv4 == nil {
return "", fmt.Errorf("unable to resolve ipv4 address for %s", domainName)
}
return ipv4.String(), nil
}
// EnsureDomainsResolveSameIPv4 ensures that domains resolve to the same ipv4 address
func EnsureDomainsResolveSameIPv4(domainName, server string) (string, error) {
if server == "default" || server == "local" {
return "", nil
}
var ipv4 string
domainIPv4, err := EnsureIPv4(domainName)
if err != nil {
return ipv4, err
}
if domainIPv4 == "" {
return ipv4, fmt.Errorf("cannot resolve ipv4 for %s?", domainName)
}
serverIPv4, err := EnsureIPv4(server)
if err != nil {
return ipv4, err
}
if serverIPv4 == "" {
return ipv4, fmt.Errorf("cannot resolve ipv4 for %s?", server)
}
if domainIPv4 != serverIPv4 {
err := "app domain %s (%s) does not appear to resolve to app server %s (%s)?"
return ipv4, fmt.Errorf(err, domainName, domainIPv4, server, serverIPv4)
}
return ipv4, nil
}

64
pkg/dns/dns_test.go Normal file
View File

@ -0,0 +1,64 @@
package dns
import (
"fmt"
"testing"
"gotest.tools/v3/assert"
)
func TestEnsureDomainsResolveSameIPv4(t *testing.T) {
tests := []struct {
domainName string
serverName string
shouldValidate bool
}{
// NOTE(d1): DNS records get checked, so use something that is maintained
// within the federation. if you're here because of a failing test, try
// `dig +short <domain>` to ensure stuff matches first! If flakyness
// becomes an issue we can look into mocking
{"docs.coopcloud.tech", "coopcloud.tech", true},
{"docs.coopcloud.tech", "swarm.autonomic.zone", true},
// NOTE(d1): special case handling for "--local"
{"", "default", true},
{"", "local", true},
{"", "", false},
{"123", "", false},
}
for _, test := range tests {
_, err := EnsureDomainsResolveSameIPv4(test.domainName, test.serverName)
if err != nil && test.shouldValidate {
t.Fatal(err)
}
if err == nil && !test.shouldValidate {
t.Fatal(fmt.Errorf("should have failed but did not: %v", test))
}
}
}
func TestEnsureIpv4(t *testing.T) {
// NOTE(d1): DNS records get checked, so use something that is maintained
// within the federation. if you're here because of a failing test, try `dig
// +short <domain>` to ensure stuff matches first! If flakyness becomes an
// issue we can look into mocking
domainName := "collabora.ostrom.collective.tools"
serverName := "ostrom.collective.tools"
for i := 0; i < 15; i++ {
domainIpv4, err := EnsureIPv4(domainName)
if err != nil {
t.Fatal(err)
}
serverIpv4, err := EnsureIPv4(serverName)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, domainIpv4, serverIpv4)
}
}

Some files were not shown because too many files have changed in this diff Show More