Compare commits

..

2 Commits

Author SHA1 Message Date
d6fd95c045 Merge branch 'main' into docker-dependency
All checks were successful
continuous-integration/drone/pr Build is passing
2022-06-03 11:30:54 +00:00
f918798c3d install: check for docker
All checks were successful
continuous-integration/drone/pr Build is passing
2022-06-03 13:26:59 +02:00
3772 changed files with 7936 additions and 995679 deletions

View File

@ -1,8 +0,0 @@
*.swo
*.swp
.dockerignore
Dockerfile
abra
dist
kadabra
tags

View File

@ -3,20 +3,35 @@ kind: pipeline
name: coopcloud.tech/abra
steps:
- name: make check
image: golang:1.21
image: golang:1.18
commands:
- make check
- name: make test
image: golang:1.21
environment:
CATL_URL: https://git.coopcloud.tech/coop-cloud/recipes-catalogue-json.git
- name: make build
image: golang:1.18
commands:
- make build
- name: make test
image: golang:1.18
commands:
- mkdir -p $HOME/.abra
- git clone $CATL_URL $HOME/.abra/catalogue
- make test
- name: notify on failure
image: plugins/matrix
settings:
homeserver: https://matrix.autonomic.zone
roomid: "IFazIpLtxiScqbHqoa:autonomic.zone"
userid: "@autono-bot:autonomic.zone"
accesstoken:
from_secret: autono_bot_access_token
depends_on:
- make check
- make build
- make test
when:
status:
- failure
- name: fetch
image: docker:git
@ -24,12 +39,13 @@ steps:
- git fetch --tags
depends_on:
- make check
- make build
- make test
when:
event: tag
- name: release
image: goreleaser/goreleaser:v1.24.0
image: golang:1.18
environment:
GITEA_TOKEN:
from_secret: goreleaser_gitea_token
@ -37,53 +53,12 @@ steps:
- name: deps
path: /go
commands:
- goreleaser release
- curl -sL https://git.io/goreleaser | bash
depends_on:
- fetch
when:
event: tag
- name: publish image
image: plugins/docker
settings:
auto_tag: true
username: 3wordchant
password:
from_secret: git_coopcloud_tech_token_3wc
repo: git.coopcloud.tech/coop-cloud/abra
tags: dev
registry: git.coopcloud.tech
when:
branch:
- main
depends_on:
- make check
- make test
- name: integration test
image: appleboy/drone-ssh
settings:
host:
- int.coopcloud.tech
username: abra
key:
from_secret: abra_int_private_key
port: 22
command_timeout: 60m
script_stop: true
request_pty: true
script:
- |
wget https://git.coopcloud.tech/coop-cloud/abra/raw/branch/main/scripts/tests/run-ci-int -O run-ci-int
chmod +x run-ci-int
sh run-ci-int
when:
event:
- cron:
cron:
# @daily https://docs.drone.io/cron/
- integration
volumes:
- name: deps
temp: {}

4
.e2e.env.sample Normal file
View File

@ -0,0 +1,4 @@
GANDI_TOKEN=...
HCLOUD_TOKEN=...
REGISTRY_PASSWORD=...
REGISTRY_USERNAME=...

View File

@ -1,7 +1,6 @@
# integration test suite
# export ABRA_DIR="$HOME/.abra_test"
# export ABRA_TEST_DOMAIN=test.example.com
# export ABRA_CI=1
go env -w GOPRIVATE=coopcloud.tech
# release automation
# export GITEA_TOKEN=
# export PASSWORD_STORE_DIR=$(pwd)/../../autonomic/passwords/passwords/
# export HCLOUD_TOKEN=$(pass show logins/hetzner/cicd/api_key)
# export CAPSUL_TOKEN=...
# export GITEA_TOKEN=...

4
.gitignore vendored
View File

@ -2,7 +2,7 @@
.e2e.env
.envrc
.vscode/
/kadabra
abra
dist/
tests/integration/.bats
tests/integration/.abra/catalogue
vendor/

View File

@ -1,19 +1,16 @@
---
project_name: abra
gitea_urls:
api: https://git.coopcloud.tech/api/v1
download: https://git.coopcloud.tech/
skip_tls_verify: false
before:
hooks:
- go mod tidy
builds:
- id: abra
binary: abra
- env:
- CGO_ENABLED=0
dir: cmd/abra
env:
- CGO_ENABLED=0
goos:
- linux
- darwin
@ -29,48 +26,21 @@ builds:
ldflags:
- "-X 'main.Commit={{ .Commit }}'"
- "-X 'main.Version={{ .Version }}'"
- "-s"
- "-w"
- id: kadabra
binary: kadabra
dir: cmd/kadabra
env:
- CGO_ENABLED=0
goos:
- linux
- darwin
goarch:
- 386
- amd64
- arm
- arm64
goarm:
- 5
- 6
- 7
gcflags:
- "all=-l -B"
ldflags:
- "-X 'main.Commit={{ .Commit }}'"
- "-X 'main.Version={{ .Version }}'"
- "-s"
- "-w"
archives:
- replacements:
386: i386
amd64: x86_64
format: binary
checksum:
name_template: "checksums.txt"
snapshot:
name_template: "{{ incpatch .Version }}-next"
changelog:
sort: desc
filters:
exclude:
- "^Merge"
- "^Revert"
- "^WIP:"
- "^chore(deps):"
- "^style:"
- "^test:"
- "^tests:"
- "^Revert"

View File

@ -1,19 +1,10 @@
# authors
> If you're looking at this and you hack on `abra` and you're not listed here,
> please do add yourself! This is a community project, let's show some 💞
> If you're looking at this and you hack on Abra and you're not listed here,
> please do add yourself! This is a community project, let's show
- 3wordchant
- cassowary
- codegod100
- decentral1se
- fauno
- frando
- kawaiipunk
- knoflook
- moritz
- p4u1
- rix
- roxxers
- vera
- yksflip

View File

@ -1,30 +0,0 @@
# Build image
FROM golang:1.21-alpine AS build
ENV GOPRIVATE coopcloud.tech
RUN apk add --no-cache \
gcc \
git \
make \
musl-dev
COPY . /app
WORKDIR /app
RUN CGO_ENABLED=0 make build
# Release image ("slim")
FROM alpine:3.19.1
RUN apk add --no-cache \
ca-certificates \
git \
openssh
RUN update-ca-certificates
COPY --from=build /app/abra /abra
ENTRYPOINT ["/abra"]

View File

@ -1,5 +1,5 @@
Abra: The Co-op Cloud utility belt
Copyright (C) 2022 Co-op Cloud <helo@coopcloud.tech>
Copyright (C) 2022 Co-op Cloud <helo@coopcloud.tech>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@ -1,55 +1,32 @@
ABRA := ./cmd/abra
KADABRA := ./cmd/kadabra
COMMIT := $(shell git rev-list -1 HEAD)
GOPATH := $(shell go env GOPATH)
GOVERSION := 1.21
LDFLAGS := "-X 'main.Commit=$(COMMIT)'"
ABRA := ./cmd/abra
COMMIT := $(shell git rev-list -1 HEAD)
GOPATH := $(shell go env GOPATH)
LDFLAGS := "-X 'main.Commit=$(COMMIT)'"
DIST_LDFLAGS := $(LDFLAGS)" -s -w"
GCFLAGS := "all=-l -B"
export GOPRIVATE=coopcloud.tech
# NOTE(d1): default `make` optimised for Abra hacking
all: format check build-abra test
all: format check build test
run-abra:
@go run -gcflags=$(GCFLAGS) -ldflags=$(LDFLAGS) $(ABRA)
run:
@go run -ldflags=$(LDFLAGS) $(ABRA)
run-kadabra:
@go run -gcflags=$(GCFLAGS) -ldflags=$(LDFLAGS) $(KADABRA)
install:
@go install -ldflags=$(LDFLAGS) $(ABRA)
install-abra:
@go install -gcflags=$(GCFLAGS) -ldflags=$(LDFLAGS) $(ABRA)
build-dev:
@go build -ldflags=$(LDFLAGS) $(ABRA)
install-kadabra:
@go install -gcflags=$(GCFLAGS) -ldflags=$(LDFLAGS) $(KADABRA)
install: install-abra install-kadabra
build-abra:
@go build -v -gcflags=$(GCFLAGS) -ldflags=$(DIST_LDFLAGS) $(ABRA)
build-kadabra:
@go build -v -gcflags=$(GCFLAGS) -ldflags=$(DIST_LDFLAGS) $(KADABRA)
build: build-abra build-kadabra
build-docker-abra:
@docker run -it -v $(PWD):/abra golang:$(GOVERSION) \
bash -c 'cd /abra; ./scripts/docker/build.sh'
build-docker: build-docker-abra
build:
@go build -ldflags=$(DIST_LDFLAGS) $(ABRA)
clean:
@rm '$(GOPATH)/bin/abra'
@rm '$(GOPATH)/bin/kadabra'
format:
@gofmt -s -w .
check:
@test -z $$(gofmt -l .) || \
(echo "gofmt: formatting issue - run 'make format' to resolve" && exit 1)
@test -z $$(gofmt -l .) || (echo "gofmt: formatting issue - run 'make format' to resolve" && exit 1)
test:
@go test ./... -cover -v
@ -57,5 +34,9 @@ test:
loc:
@find . -name "*.go" | xargs wc -l
deps:
@go get -t -u ./...
loc-author:
@git ls-files -z | \
xargs -0rn 1 -P "$$(nproc)" -I{} sh -c 'git blame -w -M -C -C --line-porcelain -- {} | grep -I --line-buffered "^author "' | \
sort -f | \
uniq -ic | \
sort -n

View File

@ -2,12 +2,11 @@
[![Build Status](https://build.coopcloud.tech/api/badges/coop-cloud/abra/status.svg?ref=refs/heads/main)](https://build.coopcloud.tech/coop-cloud/abra)
[![Go Report Card](https://goreportcard.com/badge/git.coopcloud.tech/coop-cloud/abra)](https://goreportcard.com/report/git.coopcloud.tech/coop-cloud/abra)
[![Go Reference](https://pkg.go.dev/badge/coopcloud.tech/abra.svg)](https://pkg.go.dev/coopcloud.tech/abra)
The Co-op Cloud utility belt 🎩🐇
<a href="https://github.com/egonelbre/gophers"><img align="right" width="150" src="https://github.com/egonelbre/gophers/raw/master/.thumb/sketch/adventure/poking-fire.png"/></a>
`abra` is the flagship client & command-line tool for Co-op Cloud. It has been developed specifically for the purpose of making the day-to-day operations of [operators](https://docs.coopcloud.tech/operators/) and [maintainers](https://docs.coopcloud.tech/maintainers/) pleasant & convenient. It is libre software, written in [Go](https://go.dev) and maintained and extended by the community 💖
`abra` is our flagship client & command-line tool which has been developed specifically in the context of the Co-op Cloud project for the purpose of making the day-to-day operations of [operators](https://docs.coopcloud.tech/operators/) and [maintainers](https://docs.coopcloud.tech/maintainers/) pleasant & convenient. It is libre software, written in [Go](https://go.dev) and maintained and extended by the community :heart:
Please see [docs.coopcloud.tech/abra](https://docs.coopcloud.tech/abra) for help on install, upgrade, hacking, troubleshooting & more!

View File

@ -5,30 +5,32 @@ import (
)
var AppCommand = cli.Command{
Name: "app",
Aliases: []string{"a"},
Usage: "Manage apps",
ArgsUsage: "<domain>",
Name: "app",
Aliases: []string{"a"},
Usage: "Manage apps",
ArgsUsage: "<domain>",
Description: "Functionality for managing the life cycle of your apps",
Subcommands: []cli.Command{
appBackupCommand,
appCheckCommand,
appCmdCommand,
appConfigCommand,
appCpCommand,
appDeployCommand,
appListCommand,
appLogsCommand,
appNewCommand,
appPsCommand,
appRemoveCommand,
appConfigCommand,
appRestartCommand,
appRestoreCommand,
appRollbackCommand,
appRunCommand,
appSecretCommand,
appServicesCommand,
appUndeployCommand,
appDeployCommand,
appUpgradeCommand,
appUndeployCommand,
appRemoveCommand,
appCheckCommand,
appListCommand,
appPsCommand,
appLogsCommand,
appCpCommand,
appRunCommand,
appRollbackCommand,
appSecretCommand,
appVolumeCommand,
appVersionCommand,
appErrorsCommand,
appCmdCommand,
appBackupCommand,
appRestoreCommand,
},
}

View File

@ -1,279 +1,389 @@
package app
import (
"archive/tar"
"context"
"fmt"
"io"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/config"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/system"
"github.com/klauspost/pgzip"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var snapshot string
var snapshotFlag = &cli.StringFlag{
Name: "snapshot, s",
Usage: "Lists specific snapshot",
Destination: &snapshot,
}
var includePath string
var includePathFlag = &cli.StringFlag{
Name: "path, p",
Usage: "Include path",
Destination: &includePath,
}
var resticRepo string
var resticRepoFlag = &cli.StringFlag{
Name: "repo, r",
Usage: "Restic repository",
Destination: &resticRepo,
}
var appBackupListCommand = cli.Command{
Name: "list",
Aliases: []string{"ls"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
snapshotFlag,
includePathFlag,
},
Before: internal.SubCommandBefore,
Usage: "List all backups",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
log.Fatal(err)
}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if snapshot != "" {
log.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if includePath != "" {
log.Debugf("including INCLUDE_PATH=%s in backupbot exec invocation", includePath)
execEnv = append(execEnv, fmt.Sprintf("INCLUDE_PATH=%s", includePath))
}
if err := internal.RunBackupCmdRemote(cl, "ls", targetContainer.ID, execEnv); err != nil {
log.Fatal(err)
}
return nil
},
}
var appBackupDownloadCommand = cli.Command{
Name: "download",
Aliases: []string{"d"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
snapshotFlag,
includePathFlag,
},
Before: internal.SubCommandBefore,
Usage: "Download a backup",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := app.Recipe.EnsureExists(); err != nil {
log.Fatal(err)
}
if !internal.Chaos {
if err := app.Recipe.EnsureIsClean(); err != nil {
log.Fatal(err)
}
if !internal.Offline {
if err := app.Recipe.EnsureUpToDate(); err != nil {
log.Fatal(err)
}
}
if err := app.Recipe.EnsureLatest(); err != nil {
log.Fatal(err)
}
}
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
log.Fatal(err)
}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if snapshot != "" {
log.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if includePath != "" {
log.Debugf("including INCLUDE_PATH=%s in backupbot exec invocation", includePath)
execEnv = append(execEnv, fmt.Sprintf("INCLUDE_PATH=%s", includePath))
}
if err := internal.RunBackupCmdRemote(cl, "download", targetContainer.ID, execEnv); err != nil {
log.Fatal(err)
}
remoteBackupDir := "/tmp/backup.tar.gz"
currentWorkingDir := "."
if err = CopyFromContainer(cl, targetContainer.ID, remoteBackupDir, currentWorkingDir); err != nil {
log.Fatal(err)
}
fmt.Println("backup successfully downloaded to current working directory")
return nil
},
}
var appBackupCreateCommand = cli.Command{
Name: "create",
Aliases: []string{"c"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
resticRepoFlag,
},
Before: internal.SubCommandBefore,
Usage: "Create a new backup",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := app.Recipe.EnsureExists(); err != nil {
log.Fatal(err)
}
if !internal.Chaos {
if err := app.Recipe.EnsureIsClean(); err != nil {
log.Fatal(err)
}
if !internal.Offline {
if err := app.Recipe.EnsureUpToDate(); err != nil {
log.Fatal(err)
}
}
if err := app.Recipe.EnsureLatest(); err != nil {
log.Fatal(err)
}
}
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
log.Fatal(err)
}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if resticRepo != "" {
log.Debugf("including RESTIC_REPO=%s in backupbot exec invocation", resticRepo)
execEnv = append(execEnv, fmt.Sprintf("RESTIC_REPO=%s", resticRepo))
}
if err := internal.RunBackupCmdRemote(cl, "create", targetContainer.ID, execEnv); err != nil {
log.Fatal(err)
}
return nil
},
}
var appBackupSnapshotsCommand = cli.Command{
Name: "snapshots",
Aliases: []string{"s"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
snapshotFlag,
},
Before: internal.SubCommandBefore,
Usage: "List backup snapshots",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := app.Recipe.EnsureExists(); err != nil {
log.Fatal(err)
}
if !internal.Chaos {
if err := app.Recipe.EnsureIsClean(); err != nil {
log.Fatal(err)
}
if !internal.Offline {
if err := app.Recipe.EnsureUpToDate(); err != nil {
log.Fatal(err)
}
}
if err := app.Recipe.EnsureLatest(); err != nil {
log.Fatal(err)
}
}
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
log.Fatal(err)
}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if snapshot != "" {
log.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if err := internal.RunBackupCmdRemote(cl, "snapshots", targetContainer.ID, execEnv); err != nil {
log.Fatal(err)
}
return nil
},
type backupConfig struct {
preHookCmd string
postHookCmd string
backupPaths []string
}
var appBackupCommand = cli.Command{
Name: "backup",
Aliases: []string{"b"},
Usage: "Manage app backups",
ArgsUsage: "<domain>",
Subcommands: []cli.Command{
appBackupListCommand,
appBackupSnapshotsCommand,
appBackupDownloadCommand,
appBackupCreateCommand,
Aliases: []string{"bk"},
Usage: "Run app backup",
ArgsUsage: "<domain> [<service>]",
Flags: []cli.Flag{
internal.DebugFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Description: `
Run an app backup.
A backup command and pre/post hook commands are defined in the recipe
configuration. Abra reads this configuration and run the comands in the context
of the deployed services. Pass <service> if you only want to back up a single
service. All backups are placed in the ~/.abra/backups directory.
A single backup file is produced for all backup paths specified for a service.
If we have the following backup configuration:
- "backupbot.backup.path=/var/lib/foo,/var/lib/bar"
And we run "abra app backup example.com app", Abra will produce a file that
looks like:
~/.abra/backups/example_com_app_609341138.tar.gz
This file is a compressed archive which contains all backup paths. To see paths, run:
tar -tf ~/.abra/backups/example_com_app_609341138.tar.gz
(Make sure to change the name of the backup file)
This single file can be used to restore your app. See "abra app restore" for more.
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
recipe, err := recipe.Get(app.Recipe)
if err != nil {
logrus.Fatal(err)
}
backupConfigs := make(map[string]backupConfig)
for _, service := range recipe.Config.Services {
if backupsEnabled, ok := service.Deploy.Labels["backupbot.backup"]; ok {
if backupsEnabled == "true" {
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), service.Name)
bkConfig := backupConfig{}
logrus.Debugf("backup config detected for %s", fullServiceName)
if paths, ok := service.Deploy.Labels["backupbot.backup.path"]; ok {
logrus.Debugf("detected backup paths for %s: %s", fullServiceName, paths)
bkConfig.backupPaths = strings.Split(paths, ",")
}
if preHookCmd, ok := service.Deploy.Labels["backupbot.backup.pre-hook"]; ok {
logrus.Debugf("detected pre-hook command for %s: %s", fullServiceName, preHookCmd)
bkConfig.preHookCmd = preHookCmd
}
if postHookCmd, ok := service.Deploy.Labels["backupbot.backup.post-hook"]; ok {
logrus.Debugf("detected post-hook command for %s: %s", fullServiceName, postHookCmd)
bkConfig.postHookCmd = postHookCmd
}
backupConfigs[service.Name] = bkConfig
}
}
}
serviceName := c.Args().Get(1)
if serviceName != "" {
backupConfig, ok := backupConfigs[serviceName]
if !ok {
logrus.Fatalf("no backup config for %s? does %s exist?", serviceName, serviceName)
}
logrus.Infof("running backup for the %s service", serviceName)
if err := runBackup(app, serviceName, backupConfig); err != nil {
logrus.Fatal(err)
}
} else {
for serviceName, backupConfig := range backupConfigs {
logrus.Infof("running backup for the %s service", serviceName)
if err := runBackup(app, serviceName, backupConfig); err != nil {
logrus.Fatal(err)
}
}
}
return nil
},
}
// runBackup does the actual backup logic.
func runBackup(app config.App, serviceName string, bkConfig backupConfig) error {
if len(bkConfig.backupPaths) == 0 {
return fmt.Errorf("backup paths are empty for %s?", serviceName)
}
cl, err := client.New(app.Server)
if err != nil {
return err
}
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), serviceName))
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, true)
if err != nil {
return err
}
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), serviceName)
if bkConfig.preHookCmd != "" {
splitCmd := internal.SafeSplit(bkConfig.preHookCmd)
logrus.Debugf("split pre-hook command for %s into %s", fullServiceName, splitCmd)
preHookExecOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: splitCmd,
Detach: false,
Tty: true,
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &preHookExecOpts); err != nil {
return fmt.Errorf("failed to run %s on %s: %s", bkConfig.preHookCmd, targetContainer.ID, err.Error())
}
logrus.Infof("succesfully ran %s pre-hook command: %s", fullServiceName, bkConfig.preHookCmd)
}
var tempBackupPaths []string
for _, remoteBackupPath := range bkConfig.backupPaths {
timestamp := strconv.Itoa(time.Now().Nanosecond())
sanitisedPath := strings.ReplaceAll(remoteBackupPath, "/", "_")
localBackupPath := filepath.Join(config.BACKUP_DIR, fmt.Sprintf("%s%s_%s.tar.gz", fullServiceName, sanitisedPath, timestamp))
logrus.Debugf("temporarily backing up %s:%s to %s", fullServiceName, remoteBackupPath, localBackupPath)
logrus.Infof("backing up %s:%s", fullServiceName, remoteBackupPath)
content, _, err := cl.CopyFromContainer(context.Background(), targetContainer.ID, remoteBackupPath)
if err != nil {
logrus.Debugf("failed to copy %s from container: %s", remoteBackupPath, err.Error())
if err := cleanupTempArchives(tempBackupPaths); err != nil {
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
}
return fmt.Errorf("failed to copy %s from container: %s", remoteBackupPath, err.Error())
}
defer content.Close()
_, srcBase := archive.SplitPathDirEntry(remoteBackupPath)
preArchive := archive.RebaseArchiveEntries(content, srcBase, remoteBackupPath)
if err := copyToFile(localBackupPath, preArchive); err != nil {
logrus.Debugf("failed to create tar archive (%s): %s", localBackupPath, err.Error())
if err := cleanupTempArchives(tempBackupPaths); err != nil {
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
}
return fmt.Errorf("failed to create tar archive (%s): %s", localBackupPath, err.Error())
}
tempBackupPaths = append(tempBackupPaths, localBackupPath)
}
logrus.Infof("compressing and merging archives...")
if err := mergeArchives(tempBackupPaths, fullServiceName); err != nil {
logrus.Debugf("failed to merge archive files: %s", err.Error())
if err := cleanupTempArchives(tempBackupPaths); err != nil {
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
}
return fmt.Errorf("failed to merge archive files: %s", err.Error())
}
if err := cleanupTempArchives(tempBackupPaths); err != nil {
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
}
if bkConfig.postHookCmd != "" {
splitCmd := internal.SafeSplit(bkConfig.postHookCmd)
logrus.Debugf("split post-hook command for %s into %s", fullServiceName, splitCmd)
postHookExecOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: splitCmd,
Detach: false,
Tty: true,
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &postHookExecOpts); err != nil {
return err
}
logrus.Infof("succesfully ran %s post-hook command: %s", fullServiceName, bkConfig.postHookCmd)
}
return nil
}
func copyToFile(outfile string, r io.Reader) error {
tmpFile, err := system.TempFileSequential(filepath.Dir(outfile), ".tar_temp")
if err != nil {
return err
}
tmpPath := tmpFile.Name()
_, err = io.Copy(tmpFile, r)
tmpFile.Close()
if err != nil {
os.Remove(tmpPath)
return err
}
if err = os.Rename(tmpPath, outfile); err != nil {
os.Remove(tmpPath)
return err
}
return nil
}
func cleanupTempArchives(tarPaths []string) error {
for _, tarPath := range tarPaths {
if err := os.RemoveAll(tarPath); err != nil {
return err
}
logrus.Debugf("remove temporary archive file %s", tarPath)
}
return nil
}
func mergeArchives(tarPaths []string, serviceName string) error {
var out io.Writer
var cout *pgzip.Writer
timestamp := strconv.Itoa(time.Now().Nanosecond())
localBackupPath := filepath.Join(config.BACKUP_DIR, fmt.Sprintf("%s_%s.tar.gz", serviceName, timestamp))
fout, err := os.Create(localBackupPath)
if err != nil {
return fmt.Errorf("Failed to open %s: %s", localBackupPath, err)
}
defer fout.Close()
out = fout
cout = pgzip.NewWriter(out)
out = cout
tw := tar.NewWriter(out)
for _, tarPath := range tarPaths {
if err := addTar(tw, tarPath); err != nil {
return fmt.Errorf("failed to merge %s: %v", tarPath, err)
}
}
if err := tw.Close(); err != nil {
return fmt.Errorf("failed to close tar writer %v", err)
}
if cout != nil {
if err := cout.Flush(); err != nil {
return fmt.Errorf("failed to flush: %s", err)
} else if err = cout.Close(); err != nil {
return fmt.Errorf("failed to close compressed writer: %s", err)
}
}
logrus.Infof("backed up %s to %s", serviceName, localBackupPath)
return nil
}
func addTar(tw *tar.Writer, pth string) (err error) {
var tr *tar.Reader
var rc io.ReadCloser
var hdr *tar.Header
if tr, rc, err = openTarFile(pth); err != nil {
return
}
for {
if hdr, err = tr.Next(); err != nil {
if err == io.EOF {
err = nil
}
break
}
if err = tw.WriteHeader(hdr); err != nil {
break
} else if _, err = io.Copy(tw, tr); err != nil {
break
}
}
if err == nil {
err = rc.Close()
} else {
rc.Close()
}
return
}
func openTarFile(pth string) (tr *tar.Reader, rc io.ReadCloser, err error) {
var fin *os.File
var n int
buff := make([]byte, 1024)
if fin, err = os.Open(pth); err != nil {
return
}
if n, err = fin.Read(buff); err != nil {
fin.Close()
return
} else if n == 0 {
fin.Close()
err = fmt.Errorf("%s is empty", pth)
return
}
if _, err = fin.Seek(0, 0); err != nil {
fin.Close()
return
}
rc = fin
tr = tar.NewReader(rc)
return tr, rc, nil
}

View File

@ -1,81 +1,57 @@
package app
import (
"fmt"
"os"
"path"
"strings"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"github.com/charmbracelet/lipgloss"
"coopcloud.tech/abra/pkg/config"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var appCheckCommand = cli.Command{
Name: "check",
Aliases: []string{"chk"},
Usage: "Ensure an app is well configured",
Description: `
This command compares env vars in both the app ".env" and recipe ".env.sample"
file.
The goal is to ensure that recipe ".env.sample" env vars are defined in your
app ".env" file. Only env var definitions in the ".env.sample" which are
uncommented, e.g. "FOO=bar" are checked. If an app ".env" file does not include
these env vars, then "check" will complain.
Recipe maintainers may or may not provide defaults for env vars within their
recipes regardless of commenting or not (e.g. through the use of
${FOO:<default>} syntax). "check" does not confirm or deny this for you.`,
Name: "check",
Aliases: []string{"chk"},
Usage: "Check if app is configured correctly",
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.ChaosFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
envSamplePath := path.Join(config.RECIPES_DIR, app.Recipe, ".env.sample")
if _, err := os.Stat(envSamplePath); err != nil {
if os.IsNotExist(err) {
logrus.Fatalf("%s does not exist?", envSamplePath)
}
logrus.Fatal(err)
}
table, err := formatter.CreateTable()
envSample, err := config.ReadEnv(envSamplePath)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
table.
Headers("RECIPE ENV SAMPLE", "APP ENV").
StyleFunc(func(row, col int) lipgloss.Style {
switch {
case col == 1:
return lipgloss.NewStyle().Padding(0, 1, 0, 1).Align(lipgloss.Center)
default:
return lipgloss.NewStyle().Padding(0, 1, 0, 1)
}
})
envVars, err := appPkg.CheckEnv(app)
if err != nil {
log.Fatal(err)
}
for _, envVar := range envVars {
if envVar.Present {
val := []string{envVar.Name, "✅"}
table.Row(val...)
} else {
val := []string{envVar.Name, "❌"}
table.Row(val...)
var missing []string
for k := range envSample {
if _, ok := app.Env[k]; !ok {
missing = append(missing, k)
}
}
fmt.Println(table)
if len(missing) > 0 {
missingEnvVars := strings.Join(missing, ", ")
logrus.Fatalf("%s is missing %s", app.Path, missingEnvVars)
}
logrus.Infof("all necessary environment variables defined for %s", app.Name)
return nil
},
BashComplete: autocomplete.AppNameComplete,
}

View File

@ -1,131 +1,131 @@
package app
import (
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"sort"
"path"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/app"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/config"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/pkg/archive"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var localCmd bool
var localCmdFlag = &cli.BoolFlag{
Name: "local, l",
Usage: "Run command locally",
Destination: &localCmd,
}
var remoteUser string
var remoteUserFlag = &cli.StringFlag{
Name: "user, u",
Value: "",
Usage: "User to run command within a service context",
Destination: &remoteUser,
}
var appCmdCommand = cli.Command{
Name: "command",
Aliases: []string{"cmd"},
Usage: "Run app commands",
Description: `Run an app specific command.
Description: `
Run an app specific command.
These commands are bash functions, defined in the abra.sh of the recipe itself.
They can be run within the context of a service (e.g. app) or locally on your
work station by passing "--local". Arguments can be passed into these functions
using the "-- <args>" syntax.
**WARNING**: options must be passed directly after the sub-command "cmd".
Example:
EXAMPLE:
abra app cmd --local example.com app create_user -- me@example.com`,
ArgsUsage: "<domain> [<service>] <command> [-- <args>]",
abra app cmd example.com app create_user -- me@example.com
`,
ArgsUsage: "<domain> [<service>] <command>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.LocalCmdFlag,
internal.RemoteUserFlag,
internal.TtyFlag,
internal.OfflineFlag,
internal.ChaosFlag,
},
Before: internal.SubCommandBefore,
Subcommands: []cli.Command{appCmdListCommand},
BashComplete: func(ctx *cli.Context) {
args := ctx.Args()
switch len(args) {
case 0:
autocomplete.AppNameComplete(ctx)
case 1:
autocomplete.ServiceNameComplete(args.Get(0))
case 2:
cmdNameComplete(args.Get(0))
}
localCmdFlag,
remoteUserFlag,
},
BashComplete: autocomplete.AppNameComplete,
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
if localCmd && remoteUser != "" {
internal.ShowSubcommandHelpAndError(c, errors.New("cannot use --local & <user> together"))
}
if internal.LocalCmd && internal.RemoteUser != "" {
internal.ShowSubcommandHelpAndError(c, errors.New("cannot use --local & --user together"))
}
hasCmdArgs, parsedCmdArgs := parseCmdArgs(c.Args(), internal.LocalCmd)
if _, err := os.Stat(app.Recipe.AbraShPath); err != nil {
abraSh := path.Join(config.RECIPES_DIR, app.Recipe, "abra.sh")
if _, err := os.Stat(abraSh); err != nil {
if os.IsNotExist(err) {
log.Fatalf("%s does not exist for %s?", app.Recipe.AbraShPath, app.Name)
logrus.Fatalf("%s does not exist for %s?", abraSh, app.Name)
}
log.Fatal(err)
logrus.Fatal(err)
}
if internal.LocalCmd {
if !(len(c.Args()) >= 2) {
internal.ShowSubcommandHelpAndError(c, errors.New("missing arguments"))
var parsedCmdArgs string
var cmdArgsIdx int
var hasCmdArgs bool
for idx, arg := range c.Args() {
if arg == "--" {
cmdArgsIdx = idx
hasCmdArgs = true
}
if hasCmdArgs && idx > cmdArgsIdx {
parsedCmdArgs += fmt.Sprintf("%s ", c.Args().Get(idx))
}
}
if localCmd {
cmdName := c.Args().Get(1)
if err := internal.EnsureCommand(app.Recipe.AbraShPath, app.Recipe.Name, cmdName); err != nil {
log.Fatal(err)
if err := ensureCommand(abraSh, app.Recipe, cmdName); err != nil {
logrus.Fatal(err)
}
log.Debugf("--local detected, running %s on local work station", cmdName)
var exportEnv string
for k, v := range app.Env {
exportEnv = exportEnv + fmt.Sprintf("%s='%s'; ", k, v)
}
logrus.Debugf("--local detected, running %s on local work station", cmdName)
var sourceAndExec string
if hasCmdArgs {
log.Debugf("parsed following command arguments: %s", parsedCmdArgs)
sourceAndExec = fmt.Sprintf("TARGET=local; APP_NAME=%s; STACK_NAME=%s; %s . %s; %s %s", app.Name, app.StackName(), exportEnv, app.Recipe.AbraShPath, cmdName, parsedCmdArgs)
logrus.Debugf("parsed following command arguments: %s", parsedCmdArgs)
sourceAndExec = fmt.Sprintf("TARGET=local; APP_NAME=%s; STACK_NAME=%s; . %s; %s %s", app.Name, app.StackName(), abraSh, cmdName, parsedCmdArgs)
} else {
log.Debug("did not detect any command arguments")
sourceAndExec = fmt.Sprintf("TARGET=local; APP_NAME=%s; STACK_NAME=%s; %s . %s; %s", app.Name, app.StackName(), exportEnv, app.Recipe.AbraShPath, cmdName)
logrus.Debug("did not detect any command arguments")
sourceAndExec = fmt.Sprintf("TARGET=local; APP_NAME=%s; STACK_NAME=%s; . %s; %s", app.Name, app.StackName(), abraSh, cmdName)
}
shell := "/bin/bash"
if _, err := os.Stat(shell); errors.Is(err, os.ErrNotExist) {
log.Debugf("%s does not exist locally, use /bin/sh as fallback", shell)
shell = "/bin/sh"
}
cmd := exec.Command(shell, "-c", sourceAndExec)
cmd := exec.Command("/bin/sh", "-c", sourceAndExec)
if err := internal.RunCmd(cmd); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
} else {
if !(len(c.Args()) >= 3) {
internal.ShowSubcommandHelpAndError(c, errors.New("missing arguments"))
}
targetServiceName := c.Args().Get(1)
cmdName := c.Args().Get(2)
if err := internal.EnsureCommand(app.Recipe.AbraShPath, app.Recipe.Name, cmdName); err != nil {
log.Fatal(err)
if err := ensureCommand(abraSh, app.Recipe, cmdName); err != nil {
logrus.Fatal(err)
}
serviceNames, err := appPkg.GetAppServiceNames(app.Name)
serviceNames, err := config.GetAppServiceNames(app.Name)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
matchingServiceName := false
@ -136,24 +136,19 @@ EXAMPLE:
}
if !matchingServiceName {
log.Fatalf("no service %s for %s?", targetServiceName, app.Name)
logrus.Fatalf("no service %s for %s?", targetServiceName, app.Name)
}
log.Debugf("running command %s within the context of %s_%s", cmdName, app.StackName(), targetServiceName)
logrus.Debugf("running command %s within the context of %s_%s", cmdName, app.StackName(), targetServiceName)
if hasCmdArgs {
log.Debugf("parsed following command arguments: %s", parsedCmdArgs)
logrus.Debugf("parsed following command arguments: %s", parsedCmdArgs)
} else {
log.Debug("did not detect any command arguments")
logrus.Debug("did not detect any command arguments")
}
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
if err := internal.RunCmdRemote(cl, app, app.Recipe.AbraShPath, targetServiceName, cmdName, parsedCmdArgs); err != nil {
log.Fatal(err)
if err := runCmdRemote(app, abraSh, targetServiceName, cmdName, parsedCmdArgs); err != nil {
logrus.Fatal(err)
}
}
@ -161,93 +156,78 @@ EXAMPLE:
},
}
func parseCmdArgs(args []string, isLocal bool) (bool, string) {
var (
parsedCmdArgs string
hasCmdArgs bool
)
func ensureCommand(abraSh, recipeName, execCmd string) error {
bytes, err := ioutil.ReadFile(abraSh)
if err != nil {
return err
}
if isLocal {
if len(args) > 2 {
return true, fmt.Sprintf("%s ", strings.Join(args[2:], " "))
}
if !strings.Contains(string(bytes), execCmd) {
return fmt.Errorf("%s doesn't have a %s function", recipeName, execCmd)
}
return nil
}
func runCmdRemote(app config.App, abraSh, serviceName, cmdName, cmdArgs string) error {
cl, err := client.New(app.Server)
if err != nil {
return err
}
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), serviceName))
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, true)
if err != nil {
return err
}
logrus.Debugf("retrieved %s as target container on %s", formatter.ShortenID(targetContainer.ID), app.Server)
toTarOpts := &archive.TarOptions{NoOverwriteDirNonDir: true, Compression: archive.Gzip}
content, err := archive.TarWithOptions(abraSh, toTarOpts)
if err != nil {
return err
}
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
if err := cl.CopyToContainer(context.Background(), targetContainer.ID, "/tmp", content, copyOpts); err != nil {
return err
}
var cmd []string
if cmdArgs != "" {
cmd = []string{"/bin/sh", "-c", fmt.Sprintf("TARGET=%s; APP_NAME=%s; STACK_NAME=%s; . /tmp/abra.sh; %s %s", serviceName, app.Name, app.StackName(), cmdName, cmdArgs)}
} else {
if len(args) > 3 {
return true, fmt.Sprintf("%s ", strings.Join(args[3:], " "))
}
cmd = []string{"/bin/sh", "-c", fmt.Sprintf("TARGET=%s; APP_NAME=%s; STACK_NAME=%s; . /tmp/abra.sh; %s", serviceName, app.Name, app.StackName(), cmdName)}
}
return hasCmdArgs, parsedCmdArgs
}
logrus.Debugf("running command: %s", strings.Join(cmd, " "))
func cmdNameComplete(appName string) {
app, err := app.Get(appName)
execCreateOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: cmd,
Detach: false,
Tty: true,
}
if remoteUser != "" {
logrus.Debugf("running command with user %s", remoteUser)
execCreateOpts.User = remoteUser
}
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
return
return err
}
cmdNames, _ := getShCmdNames(app)
if err != nil {
return
}
for _, n := range cmdNames {
fmt.Println(n)
if err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
return err
}
}
var appCmdListCommand = cli.Command{
Name: "list",
Aliases: []string{"ls"},
Usage: "List all available commands",
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
internal.ChaosFlag,
},
BashComplete: autocomplete.AppNameComplete,
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := app.Recipe.EnsureExists(); err != nil {
log.Fatal(err)
}
if !internal.Chaos {
if err := app.Recipe.EnsureIsClean(); err != nil {
log.Fatal(err)
}
if !internal.Offline {
if err := app.Recipe.EnsureUpToDate(); err != nil {
log.Fatal(err)
}
}
if err := app.Recipe.EnsureLatest(); err != nil {
log.Fatal(err)
}
}
cmdNames, err := getShCmdNames(app)
if err != nil {
log.Fatal(err)
}
for _, cmdName := range cmdNames {
fmt.Println(cmdName)
}
return nil
},
}
func getShCmdNames(app appPkg.App) ([]string, error) {
cmdNames, err := appPkg.ReadAbraShCmdNames(app.Recipe.AbraShPath)
if err != nil {
return nil, err
}
sort.Strings(cmdNames)
return cmdNames, nil
return nil
}

View File

@ -1,31 +0,0 @@
package app
import (
"strings"
"testing"
)
func TestParseCmdArgs(t *testing.T) {
tests := []struct {
input []string
shouldParse bool
expectedOutput string
}{
// `--` is not parsed when passed in from the command-line e.g. -- foo bar baz
// so we need to eumlate that as missing when testing if bash args are passed in
// see https://git.coopcloud.tech/coop-cloud/organising/issues/336 for more
{[]string{"foo.com", "app", "test"}, false, ""},
{[]string{"foo.com", "app", "test", "foo"}, true, "foo "},
{[]string{"foo.com", "app", "test", "foo", "bar", "baz"}, true, "foo bar baz "},
}
for _, test := range tests {
ok, parsed := parseCmdArgs(test.input, false)
if ok != test.shouldParse {
t.Fatalf("[%s] should not parse", strings.Join(test.input, " "))
}
if parsed != test.expectedOutput {
t.Fatalf("%s does not match %s", parsed, test.expectedOutput)
}
}
}

View File

@ -6,10 +6,10 @@ import (
"os/exec"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/config"
"github.com/AlecAivazis/survey/v2"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -21,8 +21,7 @@ var appConfigCommand = cli.Command{
Flags: []cli.Flag{
internal.DebugFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
appName := c.Args().First()
@ -30,24 +29,24 @@ var appConfigCommand = cli.Command{
internal.ShowSubcommandHelpAndError(c, errors.New("no app provided"))
}
files, err := appPkg.LoadAppFiles("")
files, err := config.LoadAppFiles("")
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
appFile, exists := files[appName]
if !exists {
log.Fatalf("cannot find app with name %s", appName)
logrus.Fatalf("cannot find app with name %s", appName)
}
ed, ok := os.LookupEnv("EDITOR")
if !ok {
edPrompt := &survey.Select{
Message: "which editor do you wish to use?",
Message: "Which editor do you wish to use?",
Options: []string{"vi", "vim", "nvim", "nano", "pico", "emacs"},
}
if err := survey.AskOne(edPrompt, &ed); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
}
@ -56,9 +55,10 @@ var appConfigCommand = cli.Command{
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
return nil
},
BashComplete: autocomplete.AppNameComplete,
}

View File

@ -2,26 +2,20 @@ package app
import (
"context"
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
dockerClient "github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/pkg/archive"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -34,7 +28,7 @@ var appCpCommand = cli.Command{
internal.NoInputFlag,
},
Before: internal.SubCommandBefore,
Usage: "Copy files to/from a deployed app service",
Usage: "Copy files to/from a running app service",
Description: `
Copy files to and from any app service file system.
@ -44,340 +38,114 @@ If you want to copy a myfile.txt to the root of the app service:
And if you want to copy that file back to your current working directory locally:
abra app cp <domain> app:/myfile.txt .
abra app cp <domain> app:/myfile.txt .
`,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
src := c.Args().Get(1)
dst := c.Args().Get(2)
if src == "" {
log.Fatal("missing <src> argument")
}
if dst == "" {
log.Fatal("missing <dest> argument")
logrus.Fatal("missing <src> argument")
} else if dst == "" {
logrus.Fatal("missing <dest> argument")
}
srcPath, dstPath, service, toContainer, err := parseSrcAndDst(src, dst)
parsedSrc := strings.SplitN(src, ":", 2)
parsedDst := strings.SplitN(dst, ":", 2)
errorMsg := "one of <src>/<dest> arguments must take $SERVICE:$PATH form"
if len(parsedSrc) == 2 && len(parsedDst) == 2 {
logrus.Fatal(errorMsg)
} else if len(parsedSrc) != 2 {
if len(parsedDst) != 2 {
logrus.Fatal(errorMsg)
}
} else if len(parsedDst) != 2 {
if len(parsedSrc) != 2 {
logrus.Fatal(errorMsg)
}
}
var service string
var srcPath string
var dstPath string
isToContainer := false // <container:src> <dst>
if len(parsedSrc) == 2 {
service = parsedSrc[0]
srcPath = parsedSrc[1]
dstPath = dst
logrus.Debugf("assuming transfer is coming FROM the container")
} else if len(parsedDst) == 2 {
service = parsedDst[0]
dstPath = parsedDst[1]
srcPath = src
isToContainer = true // <src> <container:dst>
logrus.Debugf("assuming transfer is going TO the container")
}
if !isToContainer {
if _, err := os.Stat(dstPath); os.IsNotExist(err) {
logrus.Fatalf("%s does not exist locally?", dstPath)
}
}
err := configureAndCp(c, app, srcPath, dstPath, service, isToContainer)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
container, err := containerPkg.GetContainerFromStackAndService(cl, app.StackName(), service)
if err != nil {
log.Fatal(err)
}
log.Debugf("retrieved %s as target container on %s", formatter.ShortenID(container.ID), app.Server)
if toContainer {
err = CopyToContainer(cl, container.ID, srcPath, dstPath)
} else {
err = CopyFromContainer(cl, container.ID, srcPath, dstPath)
}
if err != nil {
log.Fatal(err)
}
return nil
},
BashComplete: autocomplete.AppNameComplete,
}
var errServiceMissing = errors.New("one of <src>/<dest> arguments must take $SERVICE:$PATH form")
// parseSrcAndDst parses src and dest string. One of src or dst must be of the form $SERVICE:$PATH
func parseSrcAndDst(src, dst string) (srcPath string, dstPath string, service string, toContainer bool, err error) {
parsedSrc := strings.SplitN(src, ":", 2)
parsedDst := strings.SplitN(dst, ":", 2)
if len(parsedSrc)+len(parsedDst) != 3 {
return "", "", "", false, errServiceMissing
}
if len(parsedSrc) == 2 {
return parsedSrc[1], dst, parsedSrc[0], false, nil
}
if len(parsedDst) == 2 {
return src, parsedDst[1], parsedDst[0], true, nil
}
return "", "", "", false, errServiceMissing
}
// CopyToContainer copies a file or directory from the local file system to the container.
// See the possible copy modes and their documentation.
func CopyToContainer(cl *dockerClient.Client, containerID, srcPath, dstPath string) error {
srcStat, err := os.Stat(srcPath)
func configureAndCp(
c *cli.Context,
app config.App,
srcPath string,
dstPath string,
service string,
isToContainer bool) error {
cl, err := client.New(app.Server)
if err != nil {
return fmt.Errorf("local %s ", err)
logrus.Fatal(err)
}
dstStat, err := cl.ContainerStatPath(context.Background(), containerID, dstPath)
dstExists := true
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), service))
container, err := container.GetContainer(context.Background(), cl, filters, internal.NoInput)
if err != nil {
if errdefs.IsNotFound(err) {
dstExists = false
} else {
return fmt.Errorf("remote path: %s", err)
logrus.Fatal(err)
}
logrus.Debugf("retrieved %s as target container on %s", formatter.ShortenID(container.ID), app.Server)
if isToContainer {
if _, err := os.Stat(srcPath); err != nil {
logrus.Fatalf("%s does not exist?", srcPath)
}
}
mode, err := copyMode(srcPath, dstPath, srcStat.Mode(), dstStat.Mode, dstExists)
if err != nil {
return err
}
movePath := ""
switch mode {
case CopyModeDirToDir:
// Add the src directory to the destination path
_, srcDir := path.Split(srcPath)
dstPath = path.Join(dstPath, srcDir)
// Make sure the dst directory exits.
dcli, err := command.NewDockerCli()
toTarOpts := &archive.TarOptions{NoOverwriteDirNonDir: true, Compression: archive.Gzip}
content, err := archive.TarWithOptions(srcPath, toTarOpts)
if err != nil {
return err
logrus.Fatal(err)
}
if _, err := container.RunExec(dcli, cl, containerID, &types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: []string{"mkdir", "-p", dstPath},
Detach: false,
Tty: true,
}); err != nil {
return fmt.Errorf("create remote directory: %s", err)
}
case CopyModeFileToFile:
// Remove the file component from the path, since docker can only copy
// to a directory.
dstPath, _ = path.Split(dstPath)
case CopyModeFileToFileRename:
// Copy the file to the temp directory and move it to its dstPath
// afterwards.
movePath = dstPath
dstPath = "/tmp"
}
toTarOpts := &archive.TarOptions{IncludeSourceDir: true, NoOverwriteDirNonDir: true, Compression: archive.Gzip}
content, err := archive.TarWithOptions(srcPath, toTarOpts)
if err != nil {
return err
}
log.Debugf("copy %s from local to %s on container", srcPath, dstPath)
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
if err := cl.CopyToContainer(context.Background(), containerID, dstPath, content, copyOpts); err != nil {
return err
}
if movePath != "" {
_, srcFile := path.Split(srcPath)
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
if _, err := container.RunExec(dcli, cl, containerID, &types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: []string{"mv", path.Join("/tmp", srcFile), movePath},
Detach: false,
Tty: true,
}); err != nil {
return fmt.Errorf("create remote directory: %s", err)
}
}
return nil
}
// CopyFromContainer copies a file or directory from the given container to the local file system.
// See the possible copy modes and their documentation.
func CopyFromContainer(cl *dockerClient.Client, containerID, srcPath, dstPath string) error {
srcStat, err := cl.ContainerStatPath(context.Background(), containerID, srcPath)
if err != nil {
if errdefs.IsNotFound(err) {
return fmt.Errorf("remote: %s does not exist", srcPath)
} else {
return fmt.Errorf("remote path: %s", err)
}
}
dstStat, err := os.Stat(dstPath)
dstExists := true
var dstMode os.FileMode
if err != nil {
if os.IsNotExist(err) {
dstExists = false
} else {
return fmt.Errorf("remote path: %s", err)
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
if err := cl.CopyToContainer(context.Background(), container.ID, dstPath, content, copyOpts); err != nil {
logrus.Fatal(err)
}
} else {
dstMode = dstStat.Mode()
}
mode, err := copyMode(srcPath, dstPath, srcStat.Mode, dstMode, dstExists)
if err != nil {
return err
}
moveDstDir := ""
moveDstFile := ""
switch mode {
case CopyModeFileToFile:
// Remove the file component from the path, since docker can only copy
// to a directory.
dstPath, _ = path.Split(dstPath)
case CopyModeFileToFileRename:
// Copy the file to the temp directory and move it to its dstPath
// afterwards.
moveDstFile = dstPath
dstPath = "/tmp"
case CopyModeFilesToDir:
// Copy the directory to the temp directory and move it to its
// dstPath afterwards.
moveDstDir = path.Join(dstPath, "/")
dstPath = "/tmp"
// Make sure the temp directory always gets removed
defer os.Remove(path.Join("/tmp"))
}
content, _, err := cl.CopyFromContainer(context.Background(), containerID, srcPath)
if err != nil {
return fmt.Errorf("copy: %s", err)
}
defer content.Close()
if err := archive.Untar(content, dstPath, &archive.TarOptions{
NoOverwriteDirNonDir: true,
Compression: archive.Gzip,
NoLchown: true,
}); err != nil {
return fmt.Errorf("untar: %s", err)
}
if moveDstFile != "" {
_, srcFile := path.Split(strings.TrimSuffix(srcPath, "/"))
if err := moveFile(path.Join("/tmp", srcFile), moveDstFile); err != nil {
return err
}
}
if moveDstDir != "" {
_, srcDir := path.Split(strings.TrimSuffix(srcPath, "/"))
if err := moveDir(path.Join("/tmp", srcDir), moveDstDir); err != nil {
return err
}
}
return nil
}
var (
ErrCopyDirToFile = fmt.Errorf("can't copy dir to file")
ErrDstDirNotExist = fmt.Errorf("destination directory does not exist")
)
type CopyMode int
const (
// Copy a src file to a dest file. The src and dest file names are the same.
// <dir_src>/<file> + <dir_dst>/<file> -> <dir_dst>/<file>
CopyModeFileToFile = CopyMode(iota)
// Copy a src file to a dest file. The src and dest file names are not the same.
// <dir_src>/<file_src> + <dir_dst>/<file_dst> -> <dir_dst>/<file_dst>
CopyModeFileToFileRename
// Copy a src file to dest directory. The dest file gets created in the dest
// folder with the src filename.
// <dir_src>/<file> + <dir_dst> -> <dir_dst>/<file>
CopyModeFileToDir
// Copy a src directory to dest directory.
// <dir_src> + <dir_dst> -> <dir_dst>/<dir_src>
CopyModeDirToDir
// Copy all files in the src directory to the dest directory. This works recursively.
// <dir_src>/ + <dir_dst> -> <dir_dst>/<files_from_dir_src>
CopyModeFilesToDir
)
// copyMode takes a src and dest path and file mode to determine the copy mode.
// See the possible copy modes and their documentation.
func copyMode(srcPath, dstPath string, srcMode os.FileMode, dstMode os.FileMode, dstExists bool) (CopyMode, error) {
_, srcFile := path.Split(srcPath)
_, dstFile := path.Split(dstPath)
if srcMode.IsDir() {
if !dstExists {
return -1, ErrDstDirNotExist
}
if dstMode.IsDir() {
if strings.HasSuffix(srcPath, "/") {
return CopyModeFilesToDir, nil
}
return CopyModeDirToDir, nil
}
return -1, ErrCopyDirToFile
}
if dstMode.IsDir() {
return CopyModeFileToDir, nil
}
if srcFile != dstFile {
return CopyModeFileToFileRename, nil
}
return CopyModeFileToFile, nil
}
// moveDir moves all files from a source path to the destination path recursively.
func moveDir(sourcePath, destPath string) error {
return filepath.Walk(sourcePath, func(p string, info os.FileInfo, err error) error {
content, _, err := cl.CopyFromContainer(context.Background(), container.ID, srcPath)
if err != nil {
return err
logrus.Fatal(err)
}
newPath := path.Join(destPath, strings.TrimPrefix(p, sourcePath))
if info.IsDir() {
err := os.Mkdir(newPath, info.Mode())
if err != nil {
if os.IsExist(err) {
return nil
}
return err
}
defer content.Close()
fromTarOpts := &archive.TarOptions{NoOverwriteDirNonDir: true, Compression: archive.Gzip}
if err := archive.Untar(content, dstPath, fromTarOpts); err != nil {
logrus.Fatal(err)
}
if info.Mode().IsRegular() {
return moveFile(p, newPath)
}
return nil
})
}
// moveFile moves a file from a source path to a destination path.
func moveFile(sourcePath, destPath string) error {
inputFile, err := os.Open(sourcePath)
if err != nil {
return err
}
outputFile, err := os.Create(destPath)
if err != nil {
inputFile.Close()
return err
}
defer outputFile.Close()
_, err = io.Copy(outputFile, inputFile)
inputFile.Close()
if err != nil {
return err
}
// Remove file after succesfull copy.
err = os.Remove(sourcePath)
if err != nil {
return err
}
return nil
}

View File

@ -1,113 +0,0 @@
package app
import (
"os"
"testing"
)
func TestParse(t *testing.T) {
tests := []struct {
src string
dst string
srcPath string
dstPath string
service string
toContainer bool
err error
}{
{src: "foo", dst: "bar", err: errServiceMissing},
{src: "app:foo", dst: "app:bar", err: errServiceMissing},
{src: "app:foo", dst: "bar", srcPath: "foo", dstPath: "bar", service: "app", toContainer: false},
{src: "foo", dst: "app:bar", srcPath: "foo", dstPath: "bar", service: "app", toContainer: true},
}
for i, tc := range tests {
srcPath, dstPath, service, toContainer, err := parseSrcAndDst(tc.src, tc.dst)
if srcPath != tc.srcPath {
t.Errorf("[%d] srcPath: want (%s), got(%s)", i, tc.srcPath, srcPath)
}
if dstPath != tc.dstPath {
t.Errorf("[%d] dstPath: want (%s), got(%s)", i, tc.dstPath, dstPath)
}
if service != tc.service {
t.Errorf("[%d] service: want (%s), got(%s)", i, tc.service, service)
}
if toContainer != tc.toContainer {
t.Errorf("[%d] toConainer: want (%t), got(%t)", i, tc.toContainer, toContainer)
}
if err == nil && tc.err != nil && err.Error() != tc.err.Error() {
t.Errorf("[%d] err: want (%s), got(%s)", i, tc.err, err)
}
}
}
func TestCopyMode(t *testing.T) {
tests := []struct {
srcPath string
dstPath string
srcMode os.FileMode
dstMode os.FileMode
dstExists bool
mode CopyMode
err error
}{
{
srcPath: "foo.txt",
dstPath: "foo.txt",
srcMode: os.ModePerm,
dstMode: os.ModePerm,
dstExists: true,
mode: CopyModeFileToFile,
},
{
srcPath: "foo.txt",
dstPath: "bar.txt",
srcMode: os.ModePerm,
dstExists: true,
mode: CopyModeFileToFileRename,
},
{
srcPath: "foo",
dstPath: "foo",
srcMode: os.ModeDir,
dstMode: os.ModeDir,
dstExists: true,
mode: CopyModeDirToDir,
},
{
srcPath: "foo/",
dstPath: "foo",
srcMode: os.ModeDir,
dstMode: os.ModeDir,
dstExists: true,
mode: CopyModeFilesToDir,
},
{
srcPath: "foo",
dstPath: "foo",
srcMode: os.ModeDir,
dstExists: false,
mode: -1,
err: ErrDstDirNotExist,
},
{
srcPath: "foo",
dstPath: "foo",
srcMode: os.ModeDir,
dstMode: os.ModePerm,
dstExists: true,
mode: -1,
err: ErrCopyDirToFile,
},
}
for i, tc := range tests {
mode, err := copyMode(tc.srcPath, tc.dstPath, tc.srcMode, tc.dstMode, tc.dstExists)
if mode != tc.mode {
t.Errorf("[%d] mode: want (%d), got(%d)", i, tc.mode, mode)
}
if err != tc.err {
t.Errorf("[%d] err: want (%s), got(%s)", i, tc.err, err)
}
}
}

View File

@ -1,22 +1,8 @@
package app
import (
"context"
"fmt"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/envfile"
"coopcloud.tech/abra/pkg/secret"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/dns"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/lint"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/upstream/stack"
"github.com/urfave/cli"
)
@ -24,7 +10,7 @@ var appDeployCommand = cli.Command{
Name: "deploy",
Aliases: []string{"d"},
Usage: "Deploy an app",
ArgsUsage: "<domain> [<version>]",
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
@ -32,236 +18,19 @@ var appDeployCommand = cli.Command{
internal.ChaosFlag,
internal.NoDomainChecksFlag,
internal.DontWaitConvergeFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Description: `Deploy an app.
Description: `
Deploy an app. It does not support incrementing the version of a deployed app,
for this you need to look at the "abra app upgrade <domain>" command.
This command supports chaos operations. Use "--chaos" to deploy your recipe
checkout as-is. Recipe commit hashes are also supported values for
"[<version>]". Please note, "upgrade"/"rollback" do not support chaos
operations.
You may pass "--force" to re-deploy the same version again. This can be useful
if the container runtime has gotten into a weird state.
EXAMPLE:
abra app deploy foo.example.com
abra app deploy foo.example.com 1.2.3+3.2.1
abra app deploy foo.example.com 1e83340e`,
Chas mode ("--chaos") will deploy your local checkout of a recipe as-is,
including unstaged changes and can be useful for live hacking and testing new
recipes.
`,
Action: internal.DeployAction,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
var warnMessages []string
app := internal.ValidateApp(c)
stackName := app.StackName()
specificVersion := c.Args().Get(1)
if specificVersion != "" && internal.Chaos {
log.Fatal("cannot use <version> and --chaos together")
}
if specificVersion != "" {
log.Debugf("overriding env file version (%s) with %s", app.Recipe.Version, specificVersion)
app.Recipe.Version = specificVersion
}
if specificVersion == "" && app.Recipe.Version != "" && !internal.Chaos {
log.Debugf("retrieved %s as version from env file", app.Recipe.Version)
specificVersion = app.Recipe.Version
}
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
if err := lint.LintForErrors(app.Recipe); err != nil {
log.Fatal(err)
}
log.Debugf("checking whether %s is already deployed", stackName)
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
deployMeta, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
log.Fatal(err)
}
// NOTE(d1): handles "<version> as git hash" use case
var isChaosCommit bool
// NOTE(d1): check out specific version before dealing with secrets. This
// is because we need to deal with GetComposeFiles under the hood and these
// files change from version to version which therefore affects which
// secrets might be generated
version := deployMeta.Version
if specificVersion != "" {
version = specificVersion
log.Debugf("choosing %s as version to deploy", version)
var err error
isChaosCommit, err = app.Recipe.EnsureVersion(version)
if err != nil {
log.Fatal(err)
}
if isChaosCommit {
log.Debugf("assuming '%s' is a chaos commit", version)
internal.Chaos = true
}
}
secStats, err := secret.PollSecretsStatus(cl, app)
if err != nil {
log.Fatal(err)
}
for _, secStat := range secStats {
if !secStat.CreatedOnRemote {
log.Fatalf("unable to deploy, secrets not generated (%s)?", secStat.LocalName)
}
}
if deployMeta.IsDeployed {
if internal.Force || internal.Chaos {
warnMessages = append(warnMessages, fmt.Sprintf("%s is already deployed", app.Name))
} else {
log.Fatalf("%s is already deployed", app.Name)
}
}
if !internal.Chaos && specificVersion == "" {
versions, err := app.Recipe.Tags()
if err != nil {
log.Fatal(err)
}
if len(versions) > 0 && !internal.Chaos {
version = versions[len(versions)-1]
log.Debugf("choosing %s as version to deploy", version)
if _, err := app.Recipe.EnsureVersion(version); err != nil {
log.Fatal(err)
}
} else {
head, err := app.Recipe.Head()
if err != nil {
log.Fatal(err)
}
version = formatter.SmallSHA(head.String())
warnMessages = append(warnMessages, fmt.Sprintf("no versions detected, using latest commit"))
}
}
chaosVersion := config.CHAOS_DEFAULT
if internal.Chaos {
warnMessages = append(warnMessages, "chaos mode engaged")
if isChaosCommit {
chaosVersion = specificVersion
versionLabelLocal, err := app.Recipe.GetVersionLabelLocal()
if err != nil {
log.Fatal(err)
}
version = versionLabelLocal
} else {
var err error
chaosVersion, err = app.Recipe.ChaosVersion()
if err != nil {
log.Fatal(err)
}
}
}
abraShEnv, err := envfile.ReadAbraShEnvVars(app.Recipe.AbraShPath)
if err != nil {
log.Fatal(err)
}
for k, v := range abraShEnv {
app.Env[k] = v
}
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
if err != nil {
log.Fatal(err)
}
deployOpts := stack.Deploy{
Composefiles: composeFiles,
Namespace: stackName,
Prune: false,
ResolveImage: stack.ResolveImageAlways,
Detach: false,
}
compose, err := appPkg.GetAppComposeConfig(app.Name, deployOpts, app.Env)
if err != nil {
log.Fatal(err)
}
appPkg.ExposeAllEnv(stackName, compose, app.Env)
appPkg.SetRecipeLabel(compose, stackName, app.Recipe.Name)
appPkg.SetChaosLabel(compose, stackName, internal.Chaos)
appPkg.SetChaosVersionLabel(compose, stackName, chaosVersion)
appPkg.SetUpdateLabel(compose, stackName, app.Env)
envVars, err := appPkg.CheckEnv(app)
if err != nil {
log.Fatal(err)
}
for _, envVar := range envVars {
if !envVar.Present {
warnMessages = append(warnMessages,
fmt.Sprintf("env var %s missing from %s.env, present in recipe .env.sample", envVar.Name, app.Domain),
)
}
}
if !internal.NoDomainChecks {
domainName, ok := app.Env["DOMAIN"]
if ok {
if _, err = dns.EnsureDomainsResolveSameIPv4(domainName, app.Server); err != nil {
log.Fatal(err)
}
} else {
warnMessages = append(warnMessages, "skipping domain checks as no DOMAIN=... configured for app")
}
} else {
warnMessages = append(warnMessages, "skipping domain checks as requested")
}
if err := internal.DeployOverview(app, warnMessages, version, chaosVersion); err != nil {
log.Fatal(err)
}
stack.WaitTimeout, err = appPkg.GetTimeoutFromLabel(compose, stackName)
if err != nil {
log.Fatal(err)
}
log.Debugf("set waiting timeout to %d s", stack.WaitTimeout)
if err := stack.RunDeploy(cl, deployOpts, compose, app.Name, internal.DontWaitConverge); err != nil {
log.Fatal(err)
}
postDeployCmds, ok := app.Env["POST_DEPLOY_CMDS"]
if ok && !internal.DontWaitConverge {
log.Debugf("run the following post-deploy commands: %s", postDeployCmds)
if err := internal.PostCmds(cl, app, postDeployCmds); err != nil {
log.Fatalf("attempting to run post deploy commands, saw: %s", err)
}
}
app.Recipe.Version = version
if chaosVersion != config.CHAOS_DEFAULT {
app.Recipe.Version = chaosVersion
}
log.Debugf("choosing %s as version to save to env file", app.Recipe.Version)
if err := app.WriteRecipeVersion(app.Recipe.Version, false); err != nil {
log.Fatalf("writing new recipe version in env file: %s", err)
}
return nil
},
}

143
cli/app/errors.go Normal file
View File

@ -0,0 +1,143 @@
package app
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/recipe"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var appErrorsCommand = cli.Command{
Name: "errors",
Usage: "List errors for a deployed app",
ArgsUsage: "<domain>",
Description: `
List errors for a deployed app.
This is a best-effort implementation and an attempt to gather a number of tips
& tricks for finding errors together into one convenient command. When an app
is failing to deploy or having issues, it could be a lot of things.
This command currently takes into account:
Is the service deployed?
Is the service killed by an OOM error?
Is the service reporting an error (like in "ps --no-trunc" output)
Is the service healthcheck failing? what are the healthcheck logs?
Got any more ideas? Please let us know:
https://git.coopcloud.tech/coop-cloud/organising/issues/new/choose
This command is best accompanied by "abra app logs <domain>" which may reveal
further information which can help you debug the cause of an app failure via
the logs.
`,
Aliases: []string{"e"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.WatchFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
logrus.Fatal(err)
}
if !isDeployed {
logrus.Fatalf("%s is not deployed?", app.Name)
}
if !internal.Watch {
if err := checkErrors(c, cl, app); err != nil {
logrus.Fatal(err)
}
return nil
}
for {
if err := checkErrors(c, cl, app); err != nil {
logrus.Fatal(err)
}
time.Sleep(2 * time.Second)
}
return nil
},
}
func checkErrors(c *cli.Context, cl *dockerClient.Client, app config.App) error {
recipe, err := recipe.Get(app.Recipe)
if err != nil {
return err
}
for _, service := range recipe.Config.Services {
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), service.Name))
containers, err := cl.ContainerList(context.Background(), types.ContainerListOptions{Filters: filters})
if err != nil {
return err
}
if len(containers) == 0 {
logrus.Warnf("%s is not up, something seems wrong", service.Name)
continue
}
container := containers[0]
containerState, err := cl.ContainerInspect(context.Background(), container.ID)
if err != nil {
logrus.Fatal(err)
}
if containerState.State.OOMKilled {
logrus.Warnf("%s has been killed due to an out of memory error", service.Name)
}
if containerState.State.Error != "" {
logrus.Warnf("%s reports this error: %s", service.Name, containerState.State.Error)
}
if containerState.State.Health != nil {
if containerState.State.Health.Status != "healthy" {
logrus.Warnf("%s healthcheck status is %s", service.Name, containerState.State.Health.Status)
logrus.Warnf("%s healthcheck has failed %s times", service.Name, strconv.Itoa(containerState.State.Health.FailingStreak))
for _, log := range containerState.State.Health.Log {
logrus.Warnf("%s healthcheck logs: %s", service.Name, strings.TrimSpace(log.Output))
}
}
}
}
return nil
}
func getServiceName(names []string) string {
containerName := strings.Join(names, " ")
trimmed := strings.TrimPrefix(containerName, "/")
return strings.Split(trimmed, ".")[0]
}

View File

@ -1,69 +1,60 @@
package app
import (
"encoding/json"
"fmt"
"sort"
"strconv"
"strings"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/ssh"
"coopcloud.tech/tagcmp"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var (
status bool
statusFlag = &cli.BoolFlag{
Name: "status, S",
Usage: "Show app deployment status",
Destination: &status,
}
)
var status bool
var statusFlag = &cli.BoolFlag{
Name: "status, S",
Usage: "Show app deployment status",
Destination: &status,
}
var (
recipeFilter string
recipeFlag = &cli.StringFlag{
Name: "recipe, r",
Value: "",
Usage: "Show apps of a specific recipe",
Destination: &recipeFilter,
}
)
var appRecipe string
var recipeFlag = &cli.StringFlag{
Name: "recipe, r",
Value: "",
Usage: "Show apps of a specific recipe",
Destination: &appRecipe,
}
var (
listAppServer string
listAppServerFlag = &cli.StringFlag{
Name: "server, s",
Value: "",
Usage: "Show apps of a specific server",
Destination: &listAppServer,
}
)
var listAppServer string
var listAppServerFlag = &cli.StringFlag{
Name: "server, s",
Value: "",
Usage: "Show apps of a specific server",
Destination: &listAppServer,
}
type appStatus struct {
Server string `json:"server"`
Recipe string `json:"recipe"`
AppName string `json:"appName"`
Domain string `json:"domain"`
Status string `json:"status"`
Chaos string `json:"chaos"`
ChaosVersion string `json:"chaosVersion"`
AutoUpdate string `json:"autoUpdate"`
Version string `json:"version"`
Upgrade string `json:"upgrade"`
server string
recipe string
appName string
domain string
status string
version string
upgrade string
}
type serverStatus struct {
Apps []appStatus `json:"apps"`
AppCount int `json:"appCount"`
VersionCount int `json:"versionCount"`
UnversionedCount int `json:"unversionedCount"`
LatestCount int `json:"latestCount"`
UpgradeCount int `json:"upgradeCount"`
apps []appStatus
appCount int
versionCount int
unversionedCount int
latestCount int
upgradeCount int
}
var appListCommand = cli.Command{
@ -76,41 +67,50 @@ generate a report of all your apps.
By passing the "--status/-S" flag, you can query all your servers for the
actual live deployment status. Depending on how many servers you manage, this
can take some time.`,
can take some time.
`,
Flags: []cli.Flag{
internal.DebugFlag,
internal.MachineReadableFlag,
statusFlag,
listAppServerFlag,
recipeFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
appFiles, err := appPkg.LoadAppFiles(listAppServer)
appFiles, err := config.LoadAppFiles(listAppServer)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
apps, err := appPkg.GetApps(appFiles, recipeFilter)
apps, err := config.GetApps(appFiles)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
sort.Sort(appPkg.ByServerAndRecipe(apps))
sort.Sort(config.ByServerAndRecipe(apps))
statuses := make(map[string]map[string]string)
var catl recipe.RecipeCatalogue
if status {
alreadySeen := make(map[string]bool)
for _, app := range apps {
if _, ok := alreadySeen[app.Server]; !ok {
if err := ssh.EnsureHostKey(app.Server); err != nil {
logrus.Fatal(fmt.Sprintf(internal.SSHFailMsg, app.Server))
}
alreadySeen[app.Server] = true
}
}
statuses, err = appPkg.GetAppStatuses(apps, internal.MachineReadable)
statuses, err = config.GetAppStatuses(appFiles)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
var err error
catl, err = recipe.ReadRecipeCatalogue()
if err != nil {
logrus.Fatal(err)
}
}
@ -122,73 +122,58 @@ can take some time.`,
var ok bool
if stats, ok = allStats[app.Server]; !ok {
stats = serverStatus{}
if recipeFilter == "" {
if appRecipe == "" {
// count server, no filtering
totalServersCount++
}
}
if app.Recipe.Name == recipeFilter || recipeFilter == "" {
if recipeFilter != "" {
if app.Recipe == appRecipe || appRecipe == "" {
if appRecipe != "" {
// only count server if matches filter
totalServersCount++
}
appStats := appStatus{}
stats.AppCount++
stats.appCount++
totalAppsCount++
if status {
status := "unknown"
version := "unknown"
chaos := "unknown"
chaosVersion := "unknown"
autoUpdate := "unknown"
if statusMeta, ok := statuses[app.StackName()]; ok {
if currentVersion, exists := statusMeta["version"]; exists {
if currentVersion != "" {
version = currentVersion
}
}
if chaosDeploy, exists := statusMeta["chaos"]; exists {
chaos = chaosDeploy
}
if chaosDeployVersion, exists := statusMeta["chaosVersion"]; exists {
chaosVersion = chaosDeployVersion
}
if autoUpdateState, exists := statusMeta["autoUpdate"]; exists {
autoUpdate = autoUpdateState
}
if statusMeta["status"] != "" {
status = statusMeta["status"]
}
stats.VersionCount++
stats.versionCount++
} else {
stats.UnversionedCount++
stats.unversionedCount++
}
appStats.Status = status
appStats.Chaos = chaos
appStats.ChaosVersion = chaosVersion
appStats.Version = version
appStats.AutoUpdate = autoUpdate
appStats.status = status
appStats.version = version
var newUpdates []string
if version != "unknown" {
updates, err := app.Recipe.Tags()
updates, err := recipe.GetRecipeCatalogueVersions(app.Recipe, catl)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
parsedVersion, err := tagcmp.Parse(version)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
for _, update := range updates {
parsedUpdate, err := tagcmp.Parse(update)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if update != version && parsedUpdate.IsGreaterThan(parsedVersion) {
@ -199,38 +184,29 @@ can take some time.`,
if len(newUpdates) == 0 {
if version == "unknown" {
appStats.Upgrade = "unknown"
appStats.upgrade = "unknown"
} else {
appStats.Upgrade = "latest"
stats.LatestCount++
appStats.upgrade = "latest"
stats.latestCount++
}
} else {
newUpdates = internal.ReverseStringList(newUpdates)
appStats.Upgrade = strings.Join(newUpdates, "\n")
stats.UpgradeCount++
appStats.upgrade = strings.Join(newUpdates, "\n")
stats.upgradeCount++
}
}
appStats.Server = app.Server
appStats.Recipe = app.Recipe.Name
appStats.AppName = app.Name
appStats.Domain = app.Domain
appStats.server = app.Server
appStats.recipe = app.Recipe
appStats.appName = app.Name
appStats.domain = app.Domain
stats.Apps = append(stats.Apps, appStats)
stats.apps = append(stats.apps, appStats)
}
allStats[app.Server] = stats
}
if internal.MachineReadable {
jsonstring, err := json.Marshal(allStats)
if err != nil {
log.Fatal(err)
} else {
fmt.Println(string(jsonstring))
}
return nil
}
alreadySeen := make(map[string]bool)
for _, app := range apps {
if _, ok := alreadySeen[app.Server]; ok {
@ -239,82 +215,48 @@ can take some time.`,
serverStat := allStats[app.Server]
headers := []string{"RECIPE", "DOMAIN"}
tableCol := []string{"recipe", "domain"}
if status {
headers = append(headers, []string{
"STATUS",
"CHAOS",
"VERSION",
"UPGRADE",
"AUTOUPDATE"}...,
)
tableCol = append(tableCol, []string{"status", "version", "upgrade"}...)
}
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
table := formatter.CreateTable(tableCol)
table.Headers(headers...)
var rows [][]string
for _, appStat := range serverStat.Apps {
row := []string{appStat.Recipe, appStat.Domain}
for _, appStat := range serverStat.apps {
tableRow := []string{appStat.recipe, appStat.domain}
if status {
chaosStatus := appStat.Chaos
if chaosStatus != "unknown" {
chaosEnabled, err := strconv.ParseBool(chaosStatus)
if err != nil {
log.Fatal(err)
}
if chaosEnabled && appStat.ChaosVersion != "unknown" {
chaosStatus = appStat.ChaosVersion
}
}
row = append(row, []string{
appStat.Status,
chaosStatus,
appStat.Version,
appStat.Upgrade,
appStat.AutoUpdate}...,
)
tableRow = append(tableRow, []string{appStat.status, appStat.version, appStat.upgrade}...)
}
rows = append(rows, row)
table.Append(tableRow)
}
table.Rows(rows...)
if len(rows) > 0 {
fmt.Println(table)
if table.NumLines() > 0 {
table.Render()
if status {
fmt.Println(fmt.Sprintf(
"SERVER: %s | TOTAL APPS: %v | VERSIONED: %v | UNVERSIONED: %v | LATEST : %v | UPGRADE: %v",
"server: %s | total apps: %v | versioned: %v | unversioned: %v | latest: %v | upgrade: %v",
app.Server,
serverStat.AppCount,
serverStat.VersionCount,
serverStat.UnversionedCount,
serverStat.LatestCount,
serverStat.UpgradeCount,
serverStat.appCount,
serverStat.versionCount,
serverStat.unversionedCount,
serverStat.latestCount,
serverStat.upgradeCount,
))
} else {
log.Infof("SERVER: %s TOTAL APPS: %v", app.Server, serverStat.AppCount)
fmt.Println(fmt.Sprintf("server: %s | total apps: %v", app.Server, serverStat.appCount))
}
}
if len(allStats) > 1 && len(rows) > 0 {
fmt.Println() // newline separator for multiple servers
}
if len(allStats) > 1 && table.NumLines() > 0 {
fmt.Println() // newline separator for multiple servers
}
alreadySeen[app.Server] = true
}
if len(allStats) > 1 {
totalServers := formatter.BoldStyle.Render("TOTAL SERVERS")
totalApps := formatter.BoldStyle.Render("TOTAL APPS")
log.Infof("%s: %v | %s: %v ", totalServers, totalServersCount, totalApps, totalAppsCount)
fmt.Println(fmt.Sprintf("total servers: %v | total apps: %v ", totalServersCount, totalAppsCount))
}
return nil

View File

@ -2,26 +2,71 @@ package app
import (
"context"
"fmt"
"io"
"os"
"slices"
"sync"
"time"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/upstream/stack"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/service"
"github.com/docker/docker/api/types"
containerTypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var logOpts = types.ContainerLogsOptions{
Details: false,
Follow: true,
ShowStderr: true,
ShowStdout: true,
Tail: "20",
Timestamps: true,
}
// stackLogs lists logs for all stack services
func stackLogs(c *cli.Context, app config.App, client *dockerClient.Client) {
filters, err := app.Filters(true, false)
if err != nil {
logrus.Fatal(err)
}
serviceOpts := types.ServiceListOptions{Filters: filters}
services, err := client.ServiceList(context.Background(), serviceOpts)
if err != nil {
logrus.Fatal(err)
}
var wg sync.WaitGroup
for _, service := range services {
wg.Add(1)
go func(s string) {
if internal.StdErrOnly {
logOpts.ShowStdout = false
}
logs, err := client.ServiceLogs(context.Background(), s, logOpts)
if err != nil {
logrus.Fatal(err)
}
defer logs.Close()
_, err = io.Copy(os.Stdout, logs)
if err != nil && err != io.EOF {
logrus.Fatal(err)
}
}(service.ID)
}
wg.Wait()
os.Exit(0)
}
var appLogsCommand = cli.Command{
Name: "logs",
Aliases: []string{"l"},
@ -29,111 +74,56 @@ var appLogsCommand = cli.Command{
Usage: "Tail app logs",
Flags: []cli.Flag{
internal.StdErrOnlyFlag,
internal.SinceLogsFlag,
internal.DebugFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
stackName := app.StackName()
if err := app.Recipe.EnsureExists(); err != nil {
log.Fatal(err)
}
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
deployMeta, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
log.Fatal(err)
}
if !deployMeta.IsDeployed {
log.Fatalf("%s is not deployed?", app.Name)
logrus.Fatal(err)
}
serviceName := c.Args().Get(1)
serviceNames := []string{}
if serviceName != "" {
serviceNames = []string{serviceName}
}
err = tailLogs(cl, app, serviceNames)
if err != nil {
log.Fatal(err)
if serviceName == "" {
logrus.Debugf("tailing logs for all %s services", app.Recipe)
stackLogs(c, app, cl)
} else {
logrus.Debugf("tailing logs for %s", serviceName)
if err := tailServiceLogs(c, cl, app, serviceName); err != nil {
logrus.Fatal(err)
}
}
return nil
},
}
// tailLogs prints logs for the given app with optional service names to be
// filtered on. It also checks if the latest task is not runnning and then
// prints the past tasks.
func tailLogs(cl *dockerClient.Client, app appPkg.App, serviceNames []string) error {
f, err := app.Filters(true, false, serviceNames...)
func tailServiceLogs(c *cli.Context, cl *dockerClient.Client, app config.App, serviceName string) error {
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("%s_%s", app.StackName(), serviceName))
chosenService, err := service.GetService(context.Background(), cl, filters, internal.NoInput)
if err != nil {
return err
logrus.Fatal(err)
}
services, err := cl.ServiceList(context.Background(), types.ServiceListOptions{Filters: f})
if internal.StdErrOnly {
logOpts.ShowStdout = false
}
logs, err := cl.ServiceLogs(context.Background(), chosenService.ID, logOpts)
if err != nil {
return err
logrus.Fatal(err)
}
defer logs.Close()
var wg sync.WaitGroup
for _, service := range services {
filters := filters.NewArgs()
filters.Add("name", service.Spec.Name)
tasks, err := cl.TaskList(context.Background(), types.TaskListOptions{Filters: f})
if err != nil {
return err
}
if len(tasks) > 0 {
// Need to sort the tasks by the CreatedAt field in the inverse order.
// Otherwise they are in the reversed order and not sorted properly.
slices.SortFunc[[]swarm.Task](tasks, func(t1, t2 swarm.Task) int {
return int(t2.Meta.CreatedAt.Unix() - t1.Meta.CreatedAt.Unix())
})
lastTask := tasks[0].Status
if lastTask.State != swarm.TaskStateRunning {
for _, task := range tasks {
log.Errorf("[%s] %s State %s: %s", service.Spec.Name, task.Meta.CreatedAt.Format(time.RFC3339), task.Status.State, task.Status.Err)
}
}
}
// Collect the logs in a go routine, so the logs from all services are
// collected in parallel.
wg.Add(1)
go func(serviceID string) {
logs, err := cl.ServiceLogs(context.Background(), serviceID, containerTypes.LogsOptions{
ShowStderr: true,
ShowStdout: !internal.StdErrOnly,
Since: internal.SinceLogs,
Until: "",
Timestamps: true,
Follow: true,
Tail: "20",
Details: false,
})
if err != nil {
log.Fatal(err)
}
defer logs.Close()
_, err = io.Copy(os.Stdout, logs)
if err != nil && err != io.EOF {
log.Fatal(err)
}
}(service.ID)
_, err = io.Copy(os.Stdout, logs)
if err != nil && err != io.EOF {
logrus.Fatal(err)
}
// Wait for all log streams to be closed.
wg.Wait()
return nil
}

View File

@ -1,27 +1,14 @@
package app
import (
"fmt"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/app"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/secret"
"github.com/AlecAivazis/survey/v2"
"github.com/charmbracelet/lipgloss/table"
dockerClient "github.com/docker/docker/client"
"github.com/urfave/cli"
)
var appNewDescription = `
Creates a new app from a default recipe. This new app configuration is stored
in your $ABRA_DIR directory under the appropriate server.
Take a recipe and uses it to create a new app. This new app configuration is
stored in your ~/.abra directory under the appropriate server.
This command does not deploy your app for you. You will need to run "abra app
deploy <domain>" to do so.
@ -29,8 +16,6 @@ deploy <domain>" to do so.
You can see what recipes are available (i.e. values for the <recipe> argument)
by running "abra recipe ls".
Recipe commit hashes are supported values for "[<version>]".
Passing the "--secrets/-S" flag will automatically generate secrets for your
app and store them encrypted at rest on the chosen target server. These
generated secrets are only visible at generation time, so please take care to
@ -38,7 +23,8 @@ store them somewhere safe.
You can use the "--pass/-P" to store these generated passwords locally in a
pass store (see passwordstore.org for more). The pass command must be available
on your $PATH.`
on your $PATH.
`
var appNewCommand = cli.Command{
Name: "new",
@ -52,272 +38,9 @@ var appNewCommand = cli.Command{
internal.DomainFlag,
internal.PassFlag,
internal.SecretsFlag,
internal.OfflineFlag,
internal.ChaosFlag,
},
Before: internal.SubCommandBefore,
ArgsUsage: "[<recipe>] [<version>]",
BashComplete: func(ctx *cli.Context) {
args := ctx.Args()
switch len(args) {
case 0:
autocomplete.RecipeNameComplete(ctx)
case 1:
autocomplete.RecipeVersionComplete(ctx.Args().Get(0))
}
},
Action: func(c *cli.Context) error {
recipe := internal.ValidateRecipe(c)
var version string
if !internal.Chaos {
if err := recipe.EnsureIsClean(); err != nil {
log.Fatal(err)
}
if !internal.Offline {
if err := recipe.EnsureUpToDate(); err != nil {
log.Fatal(err)
}
}
if c.Args().Get(1) == "" {
recipeVersions, err := recipe.GetRecipeVersions()
if err != nil {
log.Fatal(err)
}
if len(recipeVersions) > 0 {
latest := recipeVersions[len(recipeVersions)-1]
for tag := range latest {
version = tag
}
if _, err := recipe.EnsureVersion(version); err != nil {
log.Fatal(err)
}
} else {
if err := recipe.EnsureLatest(); err != nil {
log.Fatal(err)
}
}
} else {
version = c.Args().Get(1)
if _, err := recipe.EnsureVersion(version); err != nil {
log.Fatal(err)
}
}
}
if err := ensureServerFlag(); err != nil {
log.Fatal(err)
}
if err := ensureDomainFlag(recipe, internal.NewAppServer); err != nil {
log.Fatal(err)
}
sanitisedAppName := appPkg.SanitiseAppName(internal.Domain)
log.Debugf("%s sanitised as %s for new app", internal.Domain, sanitisedAppName)
if err := appPkg.TemplateAppEnvSample(
recipe,
internal.Domain,
internal.NewAppServer,
internal.Domain,
); err != nil {
log.Fatal(err)
}
var secrets AppSecrets
var secretsTable *table.Table
if internal.Secrets {
sampleEnv, err := recipe.SampleEnv()
if err != nil {
log.Fatal(err)
}
composeFiles, err := recipe.GetComposeFiles(sampleEnv)
if err != nil {
log.Fatal(err)
}
secretsConfig, err := secret.ReadSecretsConfig(recipe.SampleEnvPath, composeFiles, appPkg.StackName(internal.Domain))
if err != nil {
return err
}
if err := promptForSecrets(recipe.Name, secretsConfig); err != nil {
log.Fatal(err)
}
cl, err := client.New(internal.NewAppServer)
if err != nil {
log.Fatal(err)
}
secrets, err = createSecrets(cl, secretsConfig, sanitisedAppName)
if err != nil {
log.Fatal(err)
}
secretsTable, err = formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
headers := []string{"NAME", "VALUE"}
secretsTable.Headers(headers...)
for name, val := range secrets {
secretsTable.Row(name, val)
}
}
if internal.NewAppServer == "default" {
internal.NewAppServer = "local"
}
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
headers := []string{"SERVER", "DOMAIN", "RECIPE", "VERSION"}
table.Headers(headers...)
table.Row(internal.NewAppServer, internal.Domain, recipe.Name, version)
log.Infof("new app '%s' created 🌞", recipe.Name)
fmt.Println("")
fmt.Println(table)
fmt.Println("")
fmt.Println("Configure this app:")
fmt.Println(fmt.Sprintf("\n abra app config %s", internal.Domain))
fmt.Println("")
fmt.Println("Deploy this app:")
fmt.Println(fmt.Sprintf("\n abra app deploy %s", internal.Domain))
if len(secrets) > 0 {
fmt.Println("")
fmt.Println("Generated secrets:")
fmt.Println("")
fmt.Println(secretsTable)
log.Warnf(
"generated secrets %s shown again, please take note of them %s",
formatter.BoldStyle.Render("NOT"),
formatter.BoldStyle.Render("NOW"),
)
}
app, err := app.Get(internal.Domain)
if err != nil {
log.Fatal(err)
}
log.Debugf("choosing %s as version to save to env file", version)
if err := app.WriteRecipeVersion(version, false); err != nil {
log.Fatalf("writing new recipe version in env file: %s", err)
}
return nil
},
}
// AppSecrets represents all app secrest
type AppSecrets map[string]string
// createSecrets creates all secrets for a new app.
func createSecrets(cl *dockerClient.Client, secretsConfig map[string]secret.Secret, sanitisedAppName string) (AppSecrets, error) {
// NOTE(d1): trim to match app.StackName() implementation
if len(sanitisedAppName) > config.MAX_SANITISED_APP_NAME_LENGTH {
log.Debugf("trimming %s to %s to avoid runtime limits", sanitisedAppName, sanitisedAppName[:config.MAX_SANITISED_APP_NAME_LENGTH])
sanitisedAppName = sanitisedAppName[:config.MAX_SANITISED_APP_NAME_LENGTH]
}
secrets, err := secret.GenerateSecrets(cl, secretsConfig, internal.NewAppServer)
if err != nil {
return nil, err
}
if internal.Pass {
for secretName := range secrets {
secretValue := secrets[secretName]
if err := secret.PassInsertSecret(
secretValue,
secretName,
internal.Domain,
internal.NewAppServer,
); err != nil {
return nil, err
}
}
}
return secrets, nil
}
// ensureDomainFlag checks if the domain flag was used. if not, asks the user for it/
func ensureDomainFlag(recipe recipePkg.Recipe, server string) error {
if internal.Domain == "" && !internal.NoInput {
prompt := &survey.Input{
Message: "Specify app domain",
Default: fmt.Sprintf("%s.%s", recipe.Name, server),
}
if err := survey.AskOne(prompt, &internal.Domain); err != nil {
return err
}
}
if internal.Domain == "" {
return fmt.Errorf("no domain provided")
}
return nil
}
// promptForSecrets asks if we should generate secrets for a new app.
func promptForSecrets(recipeName string, secretsConfig map[string]secret.Secret) error {
if len(secretsConfig) == 0 {
log.Debugf("%s has no secrets to generate, skipping...", recipeName)
return nil
}
if !internal.Secrets && !internal.NoInput {
prompt := &survey.Confirm{
Message: "Generate app secrets?",
}
if err := survey.AskOne(prompt, &internal.Secrets); err != nil {
return err
}
}
return nil
}
// ensureServerFlag checks if the server flag was used. if not, asks the user for it.
func ensureServerFlag() error {
servers, err := config.GetServers()
if err != nil {
return err
}
if internal.NewAppServer == "" && !internal.NoInput {
prompt := &survey.Select{
Message: "Select app server:",
Options: servers,
}
if err := survey.AskOne(prompt, &internal.NewAppServer); err != nil {
return err
}
}
if internal.NewAppServer == "" {
return fmt.Errorf("no server provided")
}
return nil
Before: internal.SubCommandBefore,
ArgsUsage: "[<recipe>]",
Action: internal.NewAction,
BashComplete: autocomplete.RecipeNameComplete,
}

View File

@ -2,22 +2,21 @@ package app
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
abraService "coopcloud.tech/abra/pkg/service"
"coopcloud.tech/abra/pkg/service"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/buger/goterm"
dockerFormatter "github.com/docker/cli/cli/command/formatter"
containerTypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -26,152 +25,77 @@ var appPsCommand = cli.Command{
Aliases: []string{"p"},
Usage: "Check app status",
ArgsUsage: "<domain>",
Description: "Show status of a deployed app.",
Description: "Show a more detailed status output of a specific deployed app",
Flags: []cli.Flag{
internal.MachineReadableFlag,
internal.WatchFlag,
internal.DebugFlag,
internal.ChaosFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
deployMeta, err := stack.IsDeployed(context.Background(), cl, app.StackName())
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if !deployMeta.IsDeployed {
log.Fatalf("%s is not deployed?", app.Name)
if !isDeployed {
logrus.Fatalf("%s is not deployed?", app.Name)
}
chaosVersion := config.CHAOS_DEFAULT
statuses, err := appPkg.GetAppStatuses([]appPkg.App{app}, true)
if statusMeta, ok := statuses[app.StackName()]; ok {
if isChaos, exists := statusMeta["chaos"]; exists && isChaos == "true" {
chaosVersion, err = app.Recipe.ChaosVersion()
if err != nil {
log.Fatal(err)
}
}
if !internal.Watch {
showPSOutput(c, app, cl)
return nil
}
showPSOutput(app, cl, deployMeta.Version, chaosVersion)
return nil
goterm.Clear()
for {
goterm.MoveCursor(1, 1)
showPSOutput(c, app, cl)
goterm.Flush()
time.Sleep(2 * time.Second)
}
},
}
// showPSOutput renders ps output.
func showPSOutput(app appPkg.App, cl *dockerClient.Client, deployedVersion, chaosVersion string) {
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
func showPSOutput(c *cli.Context, app config.App, cl *dockerClient.Client) {
filters, err := app.Filters(true, true)
if err != nil {
log.Fatal(err)
return
logrus.Fatal(err)
}
deployOpts := stack.Deploy{
Composefiles: composeFiles,
Namespace: app.StackName(),
Prune: false,
ResolveImage: stack.ResolveImageAlways,
}
compose, err := appPkg.GetAppComposeConfig(app.Name, deployOpts, app.Env)
containers, err := cl.ContainerList(context.Background(), types.ContainerListOptions{Filters: filters})
if err != nil {
log.Fatal(err)
return
logrus.Fatal(err)
}
var rows [][]string
allContainerStats := make(map[string]map[string]string)
for _, service := range compose.Services {
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), service.Name))
tableCol := []string{"service name", "image", "created", "status", "state", "ports"}
table := formatter.CreateTable(tableCol)
containers, err := cl.ContainerList(context.Background(), containerTypes.ListOptions{Filters: filters})
if err != nil {
log.Fatal(err)
return
for _, container := range containers {
var containerNames []string
for _, containerName := range container.Names {
trimmed := strings.TrimPrefix(containerName, "/")
containerNames = append(containerNames, trimmed)
}
var containerStats map[string]string
if len(containers) == 0 {
containerStats = map[string]string{
"version": deployedVersion,
"chaos": chaosVersion,
"service": service.Name,
"image": "unknown",
"created": "unknown",
"status": "unknown",
"state": "unknown",
"ports": "unknown",
}
} else {
container := containers[0]
containerStats = map[string]string{
"version": deployedVersion,
"chaos": chaosVersion,
"service": abraService.ContainerToServiceName(container.Names, app.StackName()),
"image": formatter.RemoveSha(container.Image),
"created": formatter.HumanDuration(container.Created),
"status": container.Status,
"state": container.State,
"ports": dockerFormatter.DisplayablePorts(container.Ports),
}
tableRow := []string{
service.ContainerToServiceName(container.Names, app.StackName()),
formatter.RemoveSha(container.Image),
formatter.HumanDuration(container.Created),
container.Status,
container.State,
dockerFormatter.DisplayablePorts(container.Ports),
}
allContainerStats[containerStats["service"]] = containerStats
row := []string{
containerStats["service"],
containerStats["image"],
containerStats["created"],
containerStats["status"],
containerStats["state"],
containerStats["ports"],
}
rows = append(rows, row)
table.Append(tableRow)
}
if internal.MachineReadable {
jsonstring, err := json.Marshal(allContainerStats)
if err != nil {
log.Fatal("unable to convert to JSON: %s", err)
}
fmt.Println(string(jsonstring))
return
}
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
headers := []string{
"SERVICE",
"IMAGE",
"CREATED",
"STATUS",
"STATE",
"PORTS",
}
table.
Headers(headers...).
Rows(rows...)
fmt.Println(table)
log.Infof("VERSION: %s CHAOS: %s", deployedVersion, chaosVersion)
table.Render()
}

View File

@ -8,81 +8,71 @@ import (
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/log"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/AlecAivazis/survey/v2"
"github.com/docker/docker/api/types"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// Volumes stores the variable from VolumesFlag
var Volumes bool
// VolumesFlag is used to specify if volumes should be deleted when deleting an app
var VolumesFlag = &cli.BoolFlag{
Name: "volumes, V",
Destination: &Volumes,
}
var appRemoveCommand = cli.Command{
Name: "remove",
Aliases: []string{"rm"},
ArgsUsage: "<domain>",
Usage: "Remove all app data, locally and remotely",
Description: `
This command removes everything related to an app which is already undeployed.
By default, it will prompt for confirmation before proceeding. All secrets,
volumes and the local app env file will be deleted.
Only run this command when you are sure you want to completely remove the app
and all associated app data. This is a destructive action, Be Careful!
If you would like to delete specific volumes or secrets, please use removal
sub-commands under "app volume" and "app secret" instead.
Please note, if you delete the local app env file without removing volumes and
secrets first, Abra will *not* be able to help you remove them afterwards.
To delete everything without prompt, use the "--force/-f" or the "--no-input/n"
flag.`,
Usage: "Remove an already undeployed app",
Flags: []cli.Flag{
VolumesFlag,
internal.ForceFlag,
internal.DebugFlag,
internal.NoInputFlag,
internal.OfflineFlag,
},
BashComplete: autocomplete.AppNameComplete,
Before: internal.SubCommandBefore,
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if !internal.Force && !internal.NoInput {
log.Warnf("ALERTA ALERTA: this will completely remove %s data and config locally and remotely", app.Name)
response := false
prompt := &survey.Confirm{Message: "are you sure?"}
if err := survey.AskOne(prompt, &response); err != nil {
log.Fatal(err)
prompt := &survey.Confirm{
Message: fmt.Sprintf("about to remove %s, are you sure?", app.Name),
}
if err := survey.AskOne(prompt, &response); err != nil {
logrus.Fatal(err)
}
if !response {
log.Fatal("aborting as requested")
logrus.Fatal("aborting as requested")
}
}
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
deployMeta, err := stack.IsDeployed(context.Background(), cl, app.StackName())
isDeployed, _, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if deployMeta.IsDeployed {
log.Fatalf("%s is still deployed. Run \"abra app undeploy %s\"", app.Name, app.Name)
if isDeployed {
logrus.Fatalf("%s is still deployed. Run \"abra app undeploy %s\"", app.Name, app.Name)
}
fs, err := app.Filters(false, false)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
secretList, err := cl.SecretList(context.Background(), types.SecretListOptions{Filters: fs})
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
secrets := make(map[string]string)
@ -94,45 +84,91 @@ flag.`,
}
if len(secrets) > 0 {
for _, name := range secretNames {
var secretNamesToRemove []string
if !internal.Force && !internal.NoInput {
secretsPrompt := &survey.MultiSelect{
Message: "which secrets do you want to remove?",
Help: "'x' indicates selected, enter / return to confirm, ctrl-c to exit, vim mode is enabled",
VimMode: true,
Options: secretNames,
Default: secretNames,
}
if err := survey.AskOne(secretsPrompt, &secretNamesToRemove); err != nil {
logrus.Fatal(err)
}
}
if internal.Force || internal.NoInput {
secretNamesToRemove = secretNames
}
for _, name := range secretNamesToRemove {
err := cl.SecretRemove(context.Background(), secrets[name])
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
log.Info(fmt.Sprintf("secret: %s removed", name))
logrus.Info(fmt.Sprintf("secret: %s removed", name))
}
} else {
log.Info("no secrets to remove")
logrus.Info("no secrets to remove")
}
fs, err = app.Filters(false, true)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
volumeList, err := client.GetVolumes(cl, context.Background(), app.Server, fs)
volumeListOKBody, err := cl.VolumeList(context.Background(), fs)
volumeList := volumeListOKBody.Volumes
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
volumeNames := client.GetVolumeNames(volumeList)
if len(volumeNames) > 0 {
err := client.RemoveVolumes(cl, context.Background(), volumeNames, internal.Force, 5)
if err != nil {
log.Fatalf("removing volumes failed: %s", err)
var vols []string
for _, vol := range volumeList {
vols = append(vols, vol.Name)
}
if len(vols) > 0 {
if Volumes {
var removeVols []string
if !internal.Force && !internal.NoInput {
volumesPrompt := &survey.MultiSelect{
Message: "which volumes do you want to remove?",
Help: "'x' indicates selected, enter / return to confirm, ctrl-c to exit, vim mode is enabled",
VimMode: true,
Options: vols,
Default: vols,
}
if err := survey.AskOne(volumesPrompt, &removeVols); err != nil {
logrus.Fatal(err)
}
}
for _, vol := range removeVols {
err := cl.VolumeRemove(context.Background(), vol, internal.Force) // last argument is for force removing
if err != nil {
logrus.Fatal(err)
}
logrus.Info(fmt.Sprintf("volume %s removed", vol))
}
} else {
logrus.Info("no volumes were removed")
}
log.Infof("%d volumes removed successfully", len(volumeNames))
} else {
log.Info("no volumes to remove")
if Volumes {
logrus.Info("no volumes to remove")
}
}
if err = os.Remove(app.Path); err != nil {
log.Fatal(err)
err = os.Remove(app.Path)
if err != nil {
logrus.Fatal(err)
}
log.Info(fmt.Sprintf("file: %s removed", app.Path))
logrus.Info(fmt.Sprintf("file: %s removed", app.Path))
return nil
},
BashComplete: autocomplete.AppNameComplete,
}

View File

@ -6,12 +6,11 @@ import (
"fmt"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/log"
upstream "coopcloud.tech/abra/pkg/upstream/service"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -19,93 +18,53 @@ var appRestartCommand = cli.Command{
Name: "restart",
Aliases: []string{"re"},
Usage: "Restart an app",
ArgsUsage: "<domain> [<service>]",
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
internal.AllServicesFlag,
},
Before: internal.SubCommandBefore,
Description: `
This command restarts services within a deployed app.
Run "abra app ps <domain>" to see a list of service names.
Pass "--all-services/-a" to restart all services.
EXAMPLE:
abra app restart example.com app`,
Before: internal.SubCommandBefore,
Description: `This command restarts a service within a deployed app.`,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := app.Recipe.Ensure(false, false); err != nil {
log.Fatal(err)
}
serviceName := c.Args().Get(1)
if serviceName == "" && !internal.AllServices {
err := errors.New("missing <service>")
serviceNameShort := c.Args().Get(1)
if serviceNameShort == "" {
err := errors.New("missing service?")
internal.ShowSubcommandHelpAndError(c, err)
}
if serviceName != "" && internal.AllServices {
log.Fatal("cannot use <service> and --all-services together")
}
var serviceNames []string
if internal.AllServices {
var err error
serviceNames, err = appPkg.GetAppServiceNames(app.Name)
if err != nil {
log.Fatal(err)
}
} else {
serviceNames = append(serviceNames, serviceName)
}
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
deployMeta, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
log.Fatal(err)
serviceName := fmt.Sprintf("%s_%s", app.StackName(), serviceNameShort)
logrus.Debugf("attempting to scale %s to 0 (restart logic)", serviceName)
if err := upstream.RunServiceScale(context.Background(), cl, serviceName, 0); err != nil {
logrus.Fatal(err)
}
if !deployMeta.IsDeployed {
log.Fatalf("%s is not deployed?", app.Name)
if err := stack.WaitOnService(context.Background(), cl, serviceName, app.Name); err != nil {
logrus.Fatal(err)
}
for _, serviceName := range serviceNames {
stackServiceName := fmt.Sprintf("%s_%s", app.StackName(), serviceName)
logrus.Debugf("%s has been scaled to 0 (restart logic)", serviceName)
log.Debugf("attempting to scale %s to 0", stackServiceName)
if err := upstream.RunServiceScale(context.Background(), cl, stackServiceName, 0); err != nil {
log.Fatal(err)
}
if err := stack.WaitOnService(context.Background(), cl, stackServiceName, app.Name); err != nil {
log.Fatal(err)
}
log.Debugf("%s has been scaled to 0", stackServiceName)
log.Debugf("attempting to scale %s to 1", stackServiceName)
if err := upstream.RunServiceScale(context.Background(), cl, stackServiceName, 1); err != nil {
log.Fatal(err)
}
if err := stack.WaitOnService(context.Background(), cl, stackServiceName, app.Name); err != nil {
log.Fatal(err)
}
log.Debugf("%s has been scaled to 1", stackServiceName)
log.Infof("%s service successfully restarted", serviceName)
logrus.Debugf("attempting to scale %s to 1 (restart logic)", serviceName)
if err := upstream.RunServiceScale(context.Background(), cl, serviceName, 1); err != nil {
logrus.Fatal(err)
}
if err := stack.WaitOnService(context.Background(), cl, serviceName, app.Name); err != nil {
logrus.Fatal(err)
}
logrus.Debugf("%s has been scaled to 1 (restart logic)", serviceName)
logrus.Infof("%s service successfully restarted", serviceNameShort)
return nil
},
}

View File

@ -1,64 +1,201 @@
package app
import (
"context"
"errors"
"fmt"
"os"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/config"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/pkg/archive"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var targetPath string
var targetPathFlag = &cli.StringFlag{
Name: "target, t",
Usage: "Target path",
Destination: &targetPath,
type restoreConfig struct {
preHookCmd string
postHookCmd string
}
var appRestoreCommand = cli.Command{
Name: "restore",
Aliases: []string{"rs"},
Usage: "Restore an app backup",
ArgsUsage: "<domain> <service>",
Usage: "Run app restore",
ArgsUsage: "<domain> <service> <file>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
targetPathFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Description: `
Run an app restore.
Pre/post hook commands are defined in the recipe configuration. Abra reads this
configuration and run the comands in the context of the service before
restoring the backup.
Unlike "abra app backup", restore must be run on a per-service basis. You can
not restore all services in one go. Backup files produced by Abra are
compressed archives which use absolute paths. This allows Abra to restore
according to standard tar command logic.
Example:
abra app restore example.com app ~/.abra/backups/example_com_app_609341138.tar.gz
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
serviceName := c.Args().Get(1)
if serviceName == "" {
internal.ShowSubcommandHelpAndError(c, errors.New("missing <service>?"))
}
cl, err := client.New(app.Server)
backupPath := c.Args().Get(2)
if backupPath == "" {
internal.ShowSubcommandHelpAndError(c, errors.New("missing <file>?"))
}
if _, err := os.Stat(backupPath); err != nil {
if os.IsNotExist(err) {
logrus.Fatalf("%s doesn't exist?", backupPath)
}
}
recipe, err := recipe.Get(app.Recipe)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
log.Fatal(err)
restoreConfigs := make(map[string]restoreConfig)
for _, service := range recipe.Config.Services {
if restoreEnabled, ok := service.Deploy.Labels["backupbot.restore"]; ok {
if restoreEnabled == "true" {
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), service.Name)
rsConfig := restoreConfig{}
logrus.Debugf("restore config detected for %s", fullServiceName)
if preHookCmd, ok := service.Deploy.Labels["backupbot.restore.pre-hook"]; ok {
logrus.Debugf("detected pre-hook command for %s: %s", fullServiceName, preHookCmd)
rsConfig.preHookCmd = preHookCmd
}
if postHookCmd, ok := service.Deploy.Labels["backupbot.restore.post-hook"]; ok {
logrus.Debugf("detected post-hook command for %s: %s", fullServiceName, postHookCmd)
rsConfig.postHookCmd = postHookCmd
}
restoreConfigs[service.Name] = rsConfig
}
}
}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if snapshot != "" {
log.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
rsConfig, ok := restoreConfigs[serviceName]
if !ok {
rsConfig = restoreConfig{}
}
if targetPath != "" {
log.Debugf("including TARGET=%s in backupbot exec invocation", targetPath)
execEnv = append(execEnv, fmt.Sprintf("TARGET=%s", targetPath))
}
if err := internal.RunBackupCmdRemote(cl, "restore", targetContainer.ID, execEnv); err != nil {
log.Fatal(err)
if err := runRestore(app, backupPath, serviceName, rsConfig); err != nil {
logrus.Fatal(err)
}
return nil
},
}
// runRestore does the actual restore logic.
func runRestore(app config.App, backupPath, serviceName string, rsConfig restoreConfig) error {
cl, err := client.New(app.Server)
if err != nil {
return err
}
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), serviceName))
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, true)
if err != nil {
return err
}
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), serviceName)
if rsConfig.preHookCmd != "" {
splitCmd := internal.SafeSplit(rsConfig.preHookCmd)
logrus.Debugf("split pre-hook command for %s into %s", fullServiceName, splitCmd)
preHookExecOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: splitCmd,
Detach: false,
Tty: true,
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &preHookExecOpts); err != nil {
return err
}
logrus.Infof("succesfully ran %s pre-hook command: %s", fullServiceName, rsConfig.preHookCmd)
}
backupReader, err := os.Open(backupPath)
if err != nil {
return err
}
content, err := archive.DecompressStream(backupReader)
if err != nil {
return err
}
// we use absolute paths so tar knows what to do. it will restore files
// according to the paths set in the compresed archive
restorePath := "/"
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
if err := cl.CopyToContainer(context.Background(), targetContainer.ID, restorePath, content, copyOpts); err != nil {
return err
}
logrus.Infof("restored %s to %s", backupPath, fullServiceName)
if rsConfig.postHookCmd != "" {
splitCmd := internal.SafeSplit(rsConfig.postHookCmd)
logrus.Debugf("split post-hook command for %s into %s", fullServiceName, splitCmd)
postHookExecOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: splitCmd,
Detach: false,
Tty: true,
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &postHookExecOpts); err != nil {
return err
}
logrus.Infof("succesfully ran %s post-hook command: %s", fullServiceName, rsConfig.postHookCmd)
}
return nil
}

View File

@ -4,18 +4,17 @@ import (
"context"
"fmt"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/envfile"
"coopcloud.tech/abra/pkg/lint"
"coopcloud.tech/abra/pkg/recipe"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"coopcloud.tech/tagcmp"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/log"
"github.com/AlecAivazis/survey/v2"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -23,210 +22,171 @@ var appRollbackCommand = cli.Command{
Name: "rollback",
Aliases: []string{"rl"},
Usage: "Roll an app back to a previous version",
ArgsUsage: "<domain> [<version>]",
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.ForceFlag,
internal.ChaosFlag,
internal.NoDomainChecksFlag,
internal.DontWaitConvergeFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Description: `
This command rolls an app back to a previous version.
This command rolls an app back to a previous version if one exists.
Unlike "deploy", chaos operations are not supported here. Only recipe versions
are supported values for "[<version>]".
You may pass "--force/-f" to downgrade to the same version again. This can be
useful if the container runtime has gotten into a weird state.
A rollback can be destructive, please ensure you have a copy of your app data
beforehand.
This action could be destructive, please ensure you have a copy of your app
data beforehand.
EXAMPLE:
abra app rollback foo.example.com
abra app rollback foo.example.com 1.2.3+3.2.1`,
Chas mode ("--chaos") will deploy your local checkout of a recipe as-is,
including unstaged changes and can be useful for live hacking and testing new
recipes.
`,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
var warnMessages []string
app := internal.ValidateApp(c)
stackName := app.StackName()
specificVersion := c.Args().Get(1)
if specificVersion != "" {
log.Debugf("overriding env file version (%s) with %s", app.Recipe.Version, specificVersion)
app.Recipe.Version = specificVersion
if !internal.Chaos {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
r, err := recipe.Get(app.Recipe)
if err != nil {
logrus.Fatal(err)
}
if err := lint.LintForErrors(app.Recipe); err != nil {
log.Fatal(err)
if err := lint.LintForErrors(r); err != nil {
logrus.Fatal(err)
}
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
log.Debugf("checking whether %s is already deployed", stackName)
logrus.Debugf("checking whether %s is already deployed", stackName)
deployMeta, err := stack.IsDeployed(context.Background(), cl, stackName)
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if !deployMeta.IsDeployed {
log.Fatalf("%s is not deployed?", app.Name)
if !isDeployed {
logrus.Fatalf("%s is not deployed?", app.Name)
}
versions, err := app.Recipe.Tags()
catl, err := recipe.ReadRecipeCatalogue()
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
versions, err := recipe.GetRecipeCatalogueVersions(app.Recipe, catl)
if err != nil {
logrus.Fatal(err)
}
if len(versions) == 0 && !internal.Chaos {
logrus.Fatalf("no published releases for %s in the recipe catalogue?", app.Recipe)
}
var availableDowngrades []string
if deployMeta.Version == "unknown" {
if deployedVersion == "unknown" {
availableDowngrades = versions
warnMessages = append(warnMessages, fmt.Sprintf("failed to determine deployed version of %s", app.Name))
logrus.Warnf("failed to determine version of deployed %s", app.Name)
}
if specificVersion != "" {
parsedDeployedVersion, err := tagcmp.Parse(deployMeta.Version)
if err != nil {
log.Fatalf("'%s' is not a known version for %s", deployMeta.Version, app.Recipe.Name)
}
parsedSpecificVersion, err := tagcmp.Parse(specificVersion)
if err != nil {
log.Fatalf("'%s' is not a known version for %s", specificVersion, app.Recipe.Name)
}
if parsedSpecificVersion.IsGreaterThan(parsedDeployedVersion) && !parsedSpecificVersion.Equals(parsedDeployedVersion) {
log.Fatalf("%s is not a downgrade for %s?", deployMeta.Version, specificVersion)
}
if parsedSpecificVersion.Equals(parsedDeployedVersion) && !internal.Force {
log.Fatalf("%s is not a downgrade for %s?", deployMeta.Version, specificVersion)
}
availableDowngrades = append(availableDowngrades, specificVersion)
}
if deployMeta.Version != "unknown" && specificVersion == "" {
if deployMeta.IsChaos {
warnMessages = append(warnMessages, fmt.Sprintf("attempting to rollback a chaos deployment"))
}
if deployedVersion != "unknown" && !internal.Chaos {
for _, version := range versions {
parsedDeployedVersion, err := tagcmp.Parse(deployMeta.Version)
parsedDeployedVersion, err := tagcmp.Parse(deployedVersion)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
parsedVersion, err := tagcmp.Parse(version)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if parsedVersion.IsLessThan(parsedDeployedVersion) && !(parsedVersion.Equals(parsedDeployedVersion)) {
if parsedVersion != parsedDeployedVersion && parsedVersion.IsLessThan(parsedDeployedVersion) {
availableDowngrades = append(availableDowngrades, version)
}
}
if len(availableDowngrades) == 0 && !internal.Force {
log.Info("no available downgrades")
if len(availableDowngrades) == 0 {
logrus.Info("no available downgrades, you're on oldest ✌️")
return nil
}
}
availableDowngrades = internal.ReverseStringList(availableDowngrades)
var chosenDowngrade string
if len(availableDowngrades) > 0 {
if internal.Force || internal.NoInput || specificVersion != "" {
chosenDowngrade = availableDowngrades[len(availableDowngrades)-1]
log.Debugf("choosing %s as version to downgrade to (--force/--no-input)", chosenDowngrade)
if !internal.Chaos {
if internal.Force || internal.NoInput {
chosenDowngrade = availableDowngrades[0]
logrus.Debugf("choosing %s as version to downgrade to (--force)", chosenDowngrade)
} else {
msg := fmt.Sprintf("please select a downgrade (version: %s):", deployMeta.Version)
if deployMeta.IsChaos {
msg = fmt.Sprintf("please select a downgrade (version: %s, chaosVersion: %s):", deployMeta.Version, deployMeta.ChaosVersion)
}
prompt := &survey.Select{
Message: msg,
Options: internal.ReverseStringList(availableDowngrades),
Message: fmt.Sprintf("Please select a downgrade (current version: %s):", deployedVersion),
Options: availableDowngrades,
}
if err := survey.AskOne(prompt, &chosenDowngrade); err != nil {
return err
}
}
}
log.Debugf("choosing %s as version to rollback", chosenDowngrade)
if _, err := app.Recipe.EnsureVersion(chosenDowngrade); err != nil {
log.Fatal(err)
if !internal.Chaos {
if err := recipe.EnsureVersion(app.Recipe, chosenDowngrade); err != nil {
logrus.Fatal(err)
}
}
abraShEnv, err := envfile.ReadAbraShEnvVars(app.Recipe.AbraShPath)
if internal.Chaos {
logrus.Warn("chaos mode engaged")
var err error
chosenDowngrade, err = recipe.ChaosVersion(app.Recipe)
if err != nil {
logrus.Fatal(err)
}
}
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, app.Recipe, "abra.sh")
abraShEnv, err := config.ReadAbraShEnvVars(abraShPath)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
for k, v := range abraShEnv {
app.Env[k] = v
}
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
composeFiles, err := config.GetAppComposeFiles(app.Recipe, app.Env)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
deployOpts := stack.Deploy{
Composefiles: composeFiles,
Namespace: stackName,
Prune: false,
ResolveImage: stack.ResolveImageAlways,
Detach: false,
}
compose, err := appPkg.GetAppComposeConfig(app.Name, deployOpts, app.Env)
compose, err := config.GetAppComposeConfig(app.Name, deployOpts, app.Env)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
appPkg.ExposeAllEnv(stackName, compose, app.Env)
appPkg.SetRecipeLabel(compose, stackName, app.Recipe.Name)
appPkg.SetChaosLabel(compose, stackName, internal.Chaos)
appPkg.SetChaosVersionLabel(compose, stackName, chosenDowngrade)
appPkg.SetUpdateLabel(compose, stackName, app.Env)
chaosVersion := config.CHAOS_DEFAULT
if deployMeta.IsChaos {
chaosVersion = deployMeta.ChaosVersion
if !internal.Force {
if err := internal.NewVersionOverview(app, deployedVersion, chosenDowngrade, ""); err != nil {
logrus.Fatal(err)
}
}
// NOTE(d1): no release notes implemeneted for rolling back
if err := internal.NewVersionOverview(
app,
warnMessages,
"rollback",
deployMeta.Version,
chaosVersion,
chosenDowngrade,
""); err != nil {
log.Fatal(err)
}
if err := stack.RunDeploy(cl, deployOpts, compose, stackName, internal.DontWaitConverge); err != nil {
log.Fatal(err)
}
app.Recipe.Version = chosenDowngrade
log.Debugf("choosing %s as version to save to env file", app.Recipe.Version)
if err := app.WriteRecipeVersion(app.Recipe.Version, false); err != nil {
log.Fatalf("writing new recipe version in env file: %s", err)
if err := stack.RunDeploy(cl, deployOpts, compose, app.StackName(), internal.DontWaitConverge); err != nil {
logrus.Fatal(err)
}
return nil

View File

@ -9,11 +9,11 @@ import (
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -55,7 +55,7 @@ var appRunCommand = cli.Command{
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
serviceName := c.Args().Get(1)
@ -65,7 +65,7 @@ var appRunCommand = cli.Command{
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, false)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
cmd := c.Args()[2:]
@ -88,11 +88,11 @@ var appRunCommand = cli.Command{
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
log.Fatal(err)
if err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
logrus.Fatal(err)
}
return nil

View File

@ -6,37 +6,32 @@ import (
"fmt"
"os"
"strconv"
"strings"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/secret"
"github.com/docker/docker/api/types"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var (
allSecrets bool
allSecretsFlag = &cli.BoolFlag{
Name: "all, a",
Destination: &allSecrets,
Usage: "Generate all secrets",
}
)
var allSecrets bool
var allSecretsFlag = &cli.BoolFlag{
Name: "all, a",
Destination: &allSecrets,
Usage: "Generate all secrets",
}
var (
rmAllSecrets bool
rmAllSecretsFlag = &cli.BoolFlag{
Name: "all, a",
Destination: &rmAllSecrets,
Usage: "Remove all secrets",
}
)
var rmAllSecrets bool
var rmAllSecretsFlag = &cli.BoolFlag{
Name: "all, a",
Destination: &rmAllSecrets,
Usage: "Remove all secrets",
}
var appSecretGenerateCommand = cli.Command{
Name: "generate",
@ -47,17 +42,11 @@ var appSecretGenerateCommand = cli.Command{
internal.DebugFlag,
allSecretsFlag,
internal.PassFlag,
internal.MachineReadableFlag,
internal.OfflineFlag,
internal.ChaosFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
if len(c.Args()) == 1 && !allSecrets {
err := errors.New("missing arguments <secret>/<version> or '--all'")
@ -69,83 +58,52 @@ var appSecretGenerateCommand = cli.Command{
internal.ShowSubcommandHelpAndError(c, err)
}
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
if err != nil {
log.Fatal(err)
}
secrets, err := secret.ReadSecretsConfig(app.Path, composeFiles, app.StackName())
if err != nil {
log.Fatal(err)
}
if !allSecrets {
secretsToCreate := make(map[string]string)
secretEnvVars := secret.ReadSecretEnvVars(app.Env)
if allSecrets {
secretsToCreate = secretEnvVars
} else {
secretName := c.Args().Get(1)
secretVersion := c.Args().Get(2)
s, ok := secrets[secretName]
if !ok {
log.Fatalf("%s doesn't exist in the env config?", secretName)
matches := false
for sec := range secretEnvVars {
parsed := secret.ParseSecretEnvVarName(sec)
if secretName == parsed {
secretsToCreate[sec] = secretVersion
matches = true
}
}
s.Version = secretVersion
secrets = map[string]secret.Secret{
secretName: s,
if !matches {
logrus.Fatalf("%s doesn't exist in the env config?", secretName)
}
}
cl, err := client.New(app.Server)
secretVals, err := secret.GenerateSecrets(secretsToCreate, app.StackName(), app.Server)
if err != nil {
log.Fatal(err)
}
secretVals, err := secret.GenerateSecrets(cl, secrets, app.Server)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if internal.Pass {
for name, data := range secretVals {
if err := secret.PassInsertSecret(data, name, app.Name, app.Server); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
}
}
if len(secretVals) == 0 {
log.Warn("no secrets generated")
logrus.Warn("no secrets generated")
os.Exit(1)
}
headers := []string{"NAME", "VALUE"}
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
table.Headers(headers...)
var rows [][]string
tableCol := []string{"name", "value"}
table := formatter.CreateTable(tableCol)
for name, val := range secretVals {
row := []string{name, val}
rows = append(rows, row)
table.Row(row...)
table.Append([]string{name, val})
}
if internal.MachineReadable {
out, err := formatter.ToJSON(headers, rows)
if err != nil {
log.Fatal("unable to render to JSON: %s", err)
}
fmt.Println(out)
return nil
}
fmt.Println(table)
log.Warnf(
"generated secrets %s shown again, please take note of them %s",
formatter.BoldStyle.Render("NOT"),
formatter.BoldStyle.Render("NOW"),
)
table.Render()
logrus.Warn("generated secrets are not shown again, please take note of them *now*")
return nil
},
@ -158,9 +116,6 @@ var appSecretInsertCommand = cli.Command{
Flags: []cli.Flag{
internal.DebugFlag,
internal.PassFlag,
internal.FileFlag,
internal.TrimFlag,
internal.ChaosFlag,
},
Before: internal.SubCommandBefore,
ArgsUsage: "<domain> <secret-name> <version> <data>",
@ -179,45 +134,25 @@ Example:
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
if len(c.Args()) != 4 {
internal.ShowSubcommandHelpAndError(c, errors.New("missing arguments?"))
}
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
name := c.Args().Get(1)
version := c.Args().Get(2)
data := c.Args().Get(3)
if internal.File {
raw, err := os.ReadFile(data)
if err != nil {
log.Fatalf("reading secret from file: %s", err)
}
data = string(raw)
}
if internal.Trim {
data = strings.TrimSpace(data)
}
secretName := fmt.Sprintf("%s_%s_%s", app.StackName(), name, version)
if err := client.StoreSecret(cl, secretName, data, app.Server); err != nil {
log.Fatal(err)
if err := client.StoreSecret(secretName, data, app.Server); err != nil {
logrus.Fatal(err)
}
log.Infof("%s successfully stored on server", secretName)
logrus.Infof("%s successfully stored on server", secretName)
if internal.Pass {
if err := secret.PassInsertSecret(data, name, app.Name, app.Server); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
}
@ -226,19 +161,19 @@ Example:
}
// secretRm removes a secret.
func secretRm(cl *dockerClient.Client, app appPkg.App, secretName, parsed string) error {
func secretRm(cl *dockerClient.Client, app config.App, secretName, parsed string) error {
if err := cl.SecretRemove(context.Background(), secretName); err != nil {
return err
}
log.Infof("deleted %s successfully from server", secretName)
logrus.Infof("deleted %s successfully from server", secretName)
if internal.PassRemove {
if err := secret.PassRmSecret(parsed, app.StackName(), app.Server); err != nil {
return err
}
log.Infof("deleted %s successfully from local pass store", secretName)
logrus.Infof("deleted %s successfully from local pass store", secretName)
}
return nil
@ -253,8 +188,6 @@ var appSecretRmCommand = cli.Command{
internal.NoInputFlag,
rmAllSecretsFlag,
internal.PassRemoveFlag,
internal.OfflineFlag,
internal.ChaosFlag,
},
Before: internal.SubCommandBefore,
ArgsUsage: "<domain> [<secret-name>]",
@ -268,19 +201,7 @@ Example:
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
if err != nil {
log.Fatal(err)
}
secrets, err := secret.ReadSecretsConfig(app.Path, composeFiles, app.StackName())
if err != nil {
log.Fatal(err)
}
secrets := secret.ReadSecretEnvVars(app.Env)
if c.Args().Get(1) != "" && rmAllSecrets {
internal.ShowSubcommandHelpAndError(c, errors.New("cannot use '<secret-name>' and '--all' together"))
@ -292,17 +213,17 @@ Example:
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
filters, err := app.Filters(false, false)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
secretList, err := cl.SecretList(context.Background(), types.SecretListOptions{Filters: filters})
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
remoteSecretNames := make(map[string]bool)
@ -312,13 +233,20 @@ Example:
match := false
secretToRm := c.Args().Get(1)
for secretName, val := range secrets {
secretRemoteName := fmt.Sprintf("%s_%s_%s", app.StackName(), secretName, val.Version)
for sec := range secrets {
secretName := secret.ParseSecretEnvVarName(sec)
secVal, err := secret.ParseSecretEnvVarValue(secrets[sec])
if err != nil {
logrus.Fatal(err)
}
secretRemoteName := fmt.Sprintf("%s_%s_%s", app.StackName(), secretName, secVal.Version)
if _, ok := remoteSecretNames[secretRemoteName]; ok {
if secretToRm != "" {
if secretName == secretToRm {
if err := secretRm(cl, app, secretRemoteName, secretName); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
return nil
@ -327,18 +255,18 @@ Example:
match = true
if err := secretRm(cl, app, secretRemoteName, secretName); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
}
}
}
if !match && secretToRm != "" {
log.Fatalf("%s doesn't exist on server?", secretToRm)
logrus.Fatalf("%s doesn't exist on server?", secretToRm)
}
if !match {
log.Fatal("no secrets to remove?")
logrus.Fatal("no secrets to remove?")
}
return nil
@ -350,68 +278,60 @@ var appSecretLsCommand = cli.Command{
Aliases: []string{"ls"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
internal.ChaosFlag,
internal.MachineReadableFlag,
},
Before: internal.SubCommandBefore,
Usage: "List all secrets",
BashComplete: autocomplete.AppNameComplete,
Before: internal.SubCommandBefore,
Usage: "List all secrets",
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
secrets := secret.ReadSecretEnvVars(app.Env)
tableCol := []string{"Name", "Version", "Generated Name", "Created On Server"}
table := formatter.CreateTable(tableCol)
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
headers := []string{"NAME", "VERSION", "GENERATED NAME", "CREATED ON SERVER"}
table, err := formatter.CreateTable()
filters, err := app.Filters(false, false)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
table.Headers(headers...)
secStats, err := secret.PollSecretsStatus(cl, app)
secretList, err := cl.SecretList(context.Background(), types.SecretListOptions{Filters: filters})
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
var rows [][]string
for _, secStat := range secStats {
row := []string{
secStat.LocalName,
secStat.Version,
secStat.RemoteName,
strconv.FormatBool(secStat.CreatedOnRemote),
remoteSecretNames := make(map[string]bool)
for _, cont := range secretList {
remoteSecretNames[cont.Spec.Annotations.Name] = true
}
for sec := range secrets {
createdRemote := false
secretName := secret.ParseSecretEnvVarName(sec)
secVal, err := secret.ParseSecretEnvVarValue(secrets[sec])
if err != nil {
logrus.Fatal(err)
}
rows = append(rows, row)
table.Row(row...)
}
if len(rows) > 0 {
if internal.MachineReadable {
out, err := formatter.ToJSON(headers, rows)
if err != nil {
log.Fatal("unable to render to JSON: %s", err)
}
fmt.Println(out)
return nil
secretRemoteName := fmt.Sprintf("%s_%s_%s", app.StackName(), secretName, secVal.Version)
if _, ok := remoteSecretNames[secretRemoteName]; ok {
createdRemote = true
}
fmt.Println(table)
return nil
tableRow := []string{secretName, secVal.Version, secretRemoteName, strconv.FormatBool(createdRemote)}
table.Append(tableRow)
}
log.Warnf("no secrets stored for %s", app.Name)
if table.NumLines() > 0 {
table.Render()
} else {
logrus.Warnf("no secrets stored for %s", app.Name)
}
return nil
},
BashComplete: autocomplete.AppNameComplete,
}
var appSecretCommand = cli.Command{

View File

@ -1,95 +0,0 @@
package app
import (
"context"
"fmt"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/service"
stack "coopcloud.tech/abra/pkg/upstream/stack"
containerTypes "github.com/docker/docker/api/types/container"
"github.com/urfave/cli"
)
var appServicesCommand = cli.Command{
Name: "services",
Aliases: []string{"sr"},
Usage: "Display all services of an app",
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
}
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
deployMeta, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
log.Fatal(err)
}
if !deployMeta.IsDeployed {
log.Fatalf("%s is not deployed?", app.Name)
}
filters, err := app.Filters(true, true)
if err != nil {
log.Fatal(err)
}
containers, err := cl.ContainerList(context.Background(), containerTypes.ListOptions{Filters: filters})
if err != nil {
log.Fatal(err)
}
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
headers := []string{"SERVICE (SHORT)", "SERVICE (LONG)", "IMAGE"}
table.Headers(headers...)
var rows [][]string
for _, container := range containers {
var containerNames []string
for _, containerName := range container.Names {
trimmed := strings.TrimPrefix(containerName, "/")
containerNames = append(containerNames, trimmed)
}
serviceShortName := service.ContainerToServiceName(container.Names, app.StackName())
serviceLongName := fmt.Sprintf("%s_%s", app.StackName(), serviceShortName)
row := []string{
serviceShortName,
serviceLongName,
formatter.RemoveSha(container.Image),
}
rows = append(rows, row)
}
table.Rows(rows...)
if len(rows) > 0 {
fmt.Println(table)
}
return nil
},
}

View File

@ -2,65 +2,15 @@ package app
import (
"context"
"fmt"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var prune bool
var pruneFlag = &cli.BoolFlag{
Name: "prune, p",
Destination: &prune,
Usage: "Prunes unused containers, networks, and dangling images for an app",
}
// pruneApp runs the equivalent of a "docker system prune" but only filtering
// against resources connected with the app deployment. It is not a system wide
// prune. Volumes are not pruned to avoid unwated data loss.
func pruneApp(cl *dockerClient.Client, app appPkg.App) error {
stackName := app.StackName()
ctx := context.Background()
pruneFilters := filters.NewArgs()
stackSearch := fmt.Sprintf("%s*", stackName)
pruneFilters.Add("label", stackSearch)
cr, err := cl.ContainersPrune(ctx, pruneFilters)
if err != nil {
return err
}
cntSpaceReclaimed := formatter.ByteCountSI(cr.SpaceReclaimed)
log.Infof("containers pruned: %d; space reclaimed: %s", len(cr.ContainersDeleted), cntSpaceReclaimed)
nr, err := cl.NetworksPrune(ctx, pruneFilters)
if err != nil {
return err
}
log.Infof("networks pruned: %d", len(nr.NetworksDeleted))
ir, err := cl.ImagesPrune(ctx, pruneFilters)
if err != nil {
return err
}
imgSpaceReclaimed := formatter.ByteCountSI(ir.SpaceReclaimed)
log.Infof("images pruned: %d; space reclaimed: %s", len(ir.ImagesDeleted), imgSpaceReclaimed)
return nil
}
var appUndeployCommand = cli.Command{
Name: "undeploy",
Aliases: []string{"un"},
@ -68,62 +18,44 @@ var appUndeployCommand = cli.Command{
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.OfflineFlag,
pruneFlag,
},
Before: internal.SubCommandBefore,
Usage: "Undeploy an app",
BashComplete: autocomplete.AppNameComplete,
Before: internal.SubCommandBefore,
Usage: "Undeploy an app",
Description: `
This does not destroy any of the application data.
However, you should remain vigilant, as your swarm installation will consider
any previously attached volumes as eligible for pruning once undeployed.
Passing "-p/--prune" does not remove those volumes.`,
This does not destroy any of the application data. However, you should remain
vigilant, as your swarm installation will consider any previously attached
volumes as eligiblef or pruning once undeployed.
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
stackName := app.StackName()
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
log.Debugf("checking whether %s is already deployed", stackName)
logrus.Debugf("checking whether %s is already deployed", stackName)
deployMeta, err := stack.IsDeployed(context.Background(), cl, stackName)
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if !deployMeta.IsDeployed {
log.Fatalf("%s is not deployed?", app.Name)
if !isDeployed {
logrus.Fatalf("%s is not deployed?", app.Name)
}
chaosVersion := config.CHAOS_DEFAULT
if deployMeta.IsChaos {
chaosVersion = deployMeta.ChaosVersion
if err := internal.DeployOverview(app, deployedVersion, "continue with undeploy?"); err != nil {
logrus.Fatal(err)
}
if err := internal.DeployOverview(app, []string{}, deployMeta.Version, chaosVersion); err != nil {
log.Fatal(err)
}
rmOpts := stack.Remove{
Namespaces: []string{app.StackName()},
Detach: false,
}
rmOpts := stack.Remove{Namespaces: []string{app.StackName()}}
if err := stack.RunRemove(context.Background(), cl, rmOpts); err != nil {
log.Fatal(err)
}
if prune {
if err := pruneApp(cl, app); err != nil {
log.Fatal(err)
}
logrus.Fatal(err)
}
return nil
},
BashComplete: autocomplete.AppNameComplete,
}

View File

@ -5,16 +5,15 @@ import (
"fmt"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/envfile"
"coopcloud.tech/abra/pkg/lint"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"coopcloud.tech/tagcmp"
"github.com/AlecAivazis/survey/v2"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -22,274 +21,184 @@ var appUpgradeCommand = cli.Command{
Name: "upgrade",
Aliases: []string{"up"},
Usage: "Upgrade an app",
ArgsUsage: "<domain> [<version>]",
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.ForceFlag,
internal.ChaosFlag,
internal.NoDomainChecksFlag,
internal.DontWaitConvergeFlag,
internal.OfflineFlag,
internal.ReleaseNotesFlag,
},
Before: internal.SubCommandBefore,
Description: `
Upgrade an app.
Upgrade an app. You can use it to choose and roll out a new upgrade to an
existing app.
Unlike "deploy", chaos operations are not supported here. Only recipe versions
are supported values for "[<version>]".
This command specifically supports incrementing the version of running apps, as
opposed to "abra app deploy <domain>" which will not change the version of a
deployed app.
An upgrade can be destructive, please ensure you have a copy of your app data
beforehand.
You may pass "--force/-f" to upgrade to the same version again. This can be
useful if the container runtime has gotten into a weird state.
EXAMPLE:
This action could be destructive, please ensure you have a copy of your app
data beforehand.
abra app upgrade foo.example.com
abra app upgrade foo.example.com 1.2.3+3.2.1`,
BashComplete: autocomplete.AppNameComplete,
Chas mode ("--chaos") will deploy your local checkout of a recipe as-is,
including unstaged changes and can be useful for live hacking and testing new
recipes.
`,
Action: func(c *cli.Context) error {
var warnMessages []string
app := internal.ValidateApp(c)
stackName := app.StackName()
specificVersion := c.Args().Get(1)
if specificVersion != "" {
log.Debugf("overriding env file version (%s) with %s", app.Recipe.Version, specificVersion)
app.Recipe.Version = specificVersion
if !internal.Chaos {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := app.Recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
r, err := recipe.Get(app.Recipe)
if err != nil {
logrus.Fatal(err)
}
if err := lint.LintForErrors(app.Recipe); err != nil {
log.Fatal(err)
if err := lint.LintForErrors(r); err != nil {
logrus.Fatal(err)
}
log.Debugf("checking whether %s is already deployed", stackName)
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
deployMeta, err := stack.IsDeployed(context.Background(), cl, stackName)
logrus.Debugf("checking whether %s is already deployed", stackName)
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if !deployMeta.IsDeployed {
log.Fatalf("%s is not deployed?", app.Name)
if !isDeployed {
logrus.Fatalf("%s is not deployed?", app.Name)
}
versions, err := app.Recipe.Tags()
catl, err := recipe.ReadRecipeCatalogue()
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
versions, err := recipe.GetRecipeCatalogueVersions(app.Recipe, catl)
if err != nil {
logrus.Fatal(err)
}
if len(versions) == 0 && !internal.Chaos {
logrus.Fatalf("no published releases for %s in the recipe catalogue?", app.Recipe)
}
var availableUpgrades []string
if deployMeta.Version == "unknown" {
if deployedVersion == "unknown" {
availableUpgrades = versions
warnMessages = append(warnMessages, fmt.Sprintf("failed to determine deployed version of %s", app.Name))
logrus.Warnf("failed to determine version of deployed %s", app.Name)
}
if specificVersion != "" {
parsedDeployedVersion, err := tagcmp.Parse(deployMeta.Version)
if err != nil {
log.Fatalf("'%s' is not a known version for %s", deployMeta.Version, app.Recipe.Name)
}
parsedSpecificVersion, err := tagcmp.Parse(specificVersion)
if err != nil {
log.Fatalf("'%s' is not a known version for %s", specificVersion, app.Recipe.Name)
}
if parsedSpecificVersion.IsLessThan(parsedDeployedVersion) && !parsedSpecificVersion.Equals(parsedDeployedVersion) {
log.Fatalf("%s is not an upgrade for %s?", deployMeta.Version, specificVersion)
}
if parsedSpecificVersion.Equals(parsedDeployedVersion) && !internal.Force {
log.Fatalf("%s is not an upgrade for %s?", deployMeta.Version, specificVersion)
}
availableUpgrades = append(availableUpgrades, specificVersion)
}
parsedDeployedVersion, err := tagcmp.Parse(deployMeta.Version)
if err != nil {
log.Fatal(err)
}
if deployMeta.Version != "unknown" && specificVersion == "" {
if deployMeta.IsChaos {
warnMessages = append(warnMessages, fmt.Sprintf("attempting to upgrade a chaos deployment"))
}
if deployedVersion != "unknown" && !internal.Chaos {
for _, version := range versions {
parsedDeployedVersion, err := tagcmp.Parse(deployedVersion)
if err != nil {
logrus.Fatal(err)
}
parsedVersion, err := tagcmp.Parse(version)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if parsedVersion.IsGreaterThan(parsedDeployedVersion) && !(parsedVersion.Equals(parsedDeployedVersion)) {
if parsedVersion.IsGreaterThan(parsedDeployedVersion) {
availableUpgrades = append(availableUpgrades, version)
}
}
if len(availableUpgrades) == 0 && !internal.Force {
log.Info("no available upgrades")
logrus.Infof("no available upgrades, you're on latest (%s) ✌️", deployedVersion)
return nil
}
}
availableUpgrades = internal.ReverseStringList(availableUpgrades)
var chosenUpgrade string
if len(availableUpgrades) > 0 {
if internal.Force || internal.NoInput || specificVersion != "" {
if len(availableUpgrades) > 0 && !internal.Chaos {
if internal.Force || internal.NoInput {
chosenUpgrade = availableUpgrades[len(availableUpgrades)-1]
log.Debugf("choosing %s as version to upgrade to", chosenUpgrade)
logrus.Debugf("choosing %s as version to upgrade to", chosenUpgrade)
} else {
msg := fmt.Sprintf("please select an upgrade (version: %s):", deployMeta.Version)
if deployMeta.IsChaos {
msg = fmt.Sprintf("please select an upgrade (version: %s, chaosVersion: %s):", deployMeta.Version, deployMeta.ChaosVersion)
}
prompt := &survey.Select{
Message: msg,
Options: internal.ReverseStringList(availableUpgrades),
Message: fmt.Sprintf("Please select an upgrade (current version: %s):", deployedVersion),
Options: availableUpgrades,
}
if err := survey.AskOne(prompt, &chosenUpgrade); err != nil {
return err
}
}
}
if internal.Force && chosenUpgrade == "" {
warnMessages = append(warnMessages, fmt.Sprintf("%s is already upgraded to latest", app.Name))
chosenUpgrade = deployMeta.Version
}
// if release notes written after git tag published, read them before we
// check out the tag and then they'll appear to be missing. this covers
// when we obviously will forget to write release notes before publishing
var releaseNotes string
if chosenUpgrade != "" {
parsedChosenUpgrade, err := tagcmp.Parse(chosenUpgrade)
if err != nil {
log.Fatal(err)
}
for _, version := range versions {
parsedVersion, err := tagcmp.Parse(version)
if err != nil {
log.Fatal(err)
}
if parsedVersion.IsGreaterThan(parsedDeployedVersion) && parsedVersion.IsLessThan(parsedChosenUpgrade) {
note, err := app.Recipe.GetReleaseNotes(version)
if err != nil {
return err
}
if note != "" {
releaseNotes += fmt.Sprintf("%s\n", note)
}
}
}
}
log.Debugf("choosing %s as version to upgrade", chosenUpgrade)
if _, err := app.Recipe.EnsureVersion(chosenUpgrade); err != nil {
log.Fatal(err)
}
abraShEnv, err := envfile.ReadAbraShEnvVars(app.Recipe.AbraShPath)
releaseNotes, err := internal.GetReleaseNotes(app.Recipe, chosenUpgrade)
if err != nil {
log.Fatal(err)
return err
}
if !internal.Chaos {
if err := recipe.EnsureVersion(app.Recipe, chosenUpgrade); err != nil {
logrus.Fatal(err)
}
}
if internal.Chaos {
logrus.Warn("chaos mode engaged")
var err error
chosenUpgrade, err = recipe.ChaosVersion(app.Recipe)
if err != nil {
logrus.Fatal(err)
}
}
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, app.Recipe, "abra.sh")
abraShEnv, err := config.ReadAbraShEnvVars(abraShPath)
if err != nil {
logrus.Fatal(err)
}
for k, v := range abraShEnv {
app.Env[k] = v
}
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
composeFiles, err := config.GetAppComposeFiles(app.Recipe, app.Env)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
deployOpts := stack.Deploy{
Composefiles: composeFiles,
Namespace: stackName,
Prune: false,
ResolveImage: stack.ResolveImageAlways,
Detach: false,
}
compose, err := appPkg.GetAppComposeConfig(app.Name, deployOpts, app.Env)
compose, err := config.GetAppComposeConfig(app.Name, deployOpts, app.Env)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
appPkg.ExposeAllEnv(stackName, compose, app.Env)
appPkg.SetRecipeLabel(compose, stackName, app.Recipe.Name)
appPkg.SetChaosLabel(compose, stackName, internal.Chaos)
appPkg.SetChaosVersionLabel(compose, stackName, chosenUpgrade)
appPkg.SetUpdateLabel(compose, stackName, app.Env)
envVars, err := appPkg.CheckEnv(app)
if err != nil {
log.Fatal(err)
if err := internal.NewVersionOverview(app, deployedVersion, chosenUpgrade, releaseNotes); err != nil {
logrus.Fatal(err)
}
for _, envVar := range envVars {
if !envVar.Present {
warnMessages = append(warnMessages,
fmt.Sprintf("env var %s missing from %s.env, present in recipe .env.sample", envVar.Name, app.Domain),
)
}
}
if internal.ReleaseNotes {
fmt.Println()
fmt.Print(releaseNotes)
return nil
}
chaosVersion := config.CHAOS_DEFAULT
if deployMeta.IsChaos {
chaosVersion = deployMeta.ChaosVersion
}
if err := internal.NewVersionOverview(
app,
warnMessages,
"upgrade",
deployMeta.Version,
chaosVersion,
chosenUpgrade,
releaseNotes); err != nil {
log.Fatal(err)
}
stack.WaitTimeout, err = appPkg.GetTimeoutFromLabel(compose, stackName)
if err != nil {
log.Fatal(err)
}
log.Debugf("set waiting timeout to %d s", stack.WaitTimeout)
if err := stack.RunDeploy(cl, deployOpts, compose, stackName, internal.DontWaitConverge); err != nil {
log.Fatal(err)
}
postDeployCmds, ok := app.Env["POST_UPGRADE_CMDS"]
if ok && !internal.DontWaitConverge {
log.Debugf("run the following post-deploy commands: %s", postDeployCmds)
if err := internal.PostCmds(cl, app, postDeployCmds); err != nil {
log.Fatalf("attempting to run post deploy commands, saw: %s", err)
}
}
app.Recipe.Version = chosenUpgrade
log.Debugf("choosing %s as version to save to env file", app.Recipe.Version)
if err := app.WriteRecipeVersion(app.Recipe.Version, false); err != nil {
log.Fatalf("writing new recipe version in env file: %s", err)
if err := stack.RunDeploy(cl, deployOpts, compose, app.StackName(), internal.DontWaitConverge); err != nil {
logrus.Fatal(err)
}
return nil
},
BashComplete: autocomplete.AppNameComplete,
}

101
cli/app/version.go Normal file
View File

@ -0,0 +1,101 @@
package app
import (
"context"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/stack"
"github.com/docker/distribution/reference"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// getImagePath returns the image name
func getImagePath(image string) (string, error) {
img, err := reference.ParseNormalizedNamed(image)
if err != nil {
return "", err
}
path := reference.Path(img)
path = formatter.StripTagMeta(path)
logrus.Debugf("parsed %s from %s", path, image)
return path, nil
}
var appVersionCommand = cli.Command{
Name: "version",
Aliases: []string{"v"},
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
},
Before: internal.SubCommandBefore,
Usage: "Show app versions",
Description: `
Show all information about versioning related to a deployed app. This includes
the individual image names, tags and digests. But also the Co-op Cloud recipe
version.
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
stackName := app.StackName()
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
logrus.Debugf("checking whether %s is already deployed", stackName)
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
logrus.Fatal(err)
}
if deployedVersion == "unknown" {
logrus.Fatalf("failed to determine version of deployed %s", app.Name)
}
if !isDeployed {
logrus.Fatalf("%s is not deployed?", app.Name)
}
recipeMeta, err := recipe.GetRecipeMeta(app.Recipe)
if err != nil {
logrus.Fatal(err)
}
versionsMeta := make(map[string]recipe.ServiceMeta)
for _, recipeVersion := range recipeMeta.Versions {
if currentVersion, exists := recipeVersion[deployedVersion]; exists {
versionsMeta = currentVersion
}
}
if len(versionsMeta) == 0 {
logrus.Fatalf("could not retrieve deployed version (%s) from recipe catalogue?", deployedVersion)
}
tableCol := []string{"version", "service", "image", "digest"}
table := formatter.CreateTable(tableCol)
table.SetAutoMergeCellsByColumnIndex([]int{0})
for serviceName, versionMeta := range versionsMeta {
table.Append([]string{deployedVersion, serviceName, versionMeta.Image, versionMeta.Digest})
}
table.Render()
return nil
},
BashComplete: autocomplete.AppNameComplete,
}

View File

@ -2,15 +2,13 @@ package app
import (
"context"
"fmt"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/upstream/stack"
"github.com/AlecAivazis/survey/v2"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -28,45 +26,31 @@ var appVolumeListCommand = cli.Command{
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
filters, err := app.Filters(false, true)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
volumes, err := client.GetVolumes(cl, context.Background(), app.Server, filters)
volumeList, err := client.GetVolumes(context.Background(), app.Server, filters)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
headers := []string{"name", "created", "mounted"}
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
table := formatter.CreateTable([]string{"name", "created", "mounted"})
var volTable [][]string
for _, volume := range volumeList {
volRow := []string{volume.Name, volume.CreatedAt, volume.Mountpoint}
volTable = append(volTable, volRow)
}
table.Headers(headers...)
table.AppendBulk(volTable)
var rows [][]string
for _, volume := range volumes {
row := []string{volume.Name, volume.CreatedAt, volume.Mountpoint}
rows = append(rows, row)
if table.NumLines() > 0 {
table.Render()
} else {
logrus.Warnf("no volumes created for %s", app.Name)
}
table.Rows(rows...)
if len(rows) > 0 {
fmt.Println(table)
return nil
}
log.Warnf("no volumes created for %s", app.Name)
return nil
},
}
@ -83,7 +67,8 @@ The command is interactive and will show a multiple select input which allows
you to make a seclection. Use the "?" key to see more help on navigating this
interface.
Passing "--force/-f" will select all volumes for removal. Be careful.`,
Passing "--force/-f" will select all volumes for removal. Be careful.
`,
ArgsUsage: "<domain>",
Aliases: []string{"rm"},
Flags: []cli.Flag{
@ -91,33 +76,18 @@ Passing "--force/-f" will select all volumes for removal. Be careful.`,
internal.NoInputFlag,
internal.ForceFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
deployMeta, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
log.Fatal(err)
}
if deployMeta.IsDeployed {
log.Fatalf("%s is still deployed. Run \"abra app undeploy %s\"", app.Name, app.Name)
}
filters, err := app.Filters(false, true)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
volumeList, err := client.GetVolumes(cl, context.Background(), app.Server, filters)
volumeList, err := client.GetVolumes(context.Background(), app.Server, filters)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
volumeNames := client.GetVolumeNames(volumeList)
@ -131,7 +101,7 @@ Passing "--force/-f" will select all volumes for removal. Be careful.`,
Default: volumeNames,
}
if err := survey.AskOne(volumesPrompt, &volumesToRemove); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
}
@ -139,19 +109,16 @@ Passing "--force/-f" will select all volumes for removal. Be careful.`,
volumesToRemove = volumeNames
}
if len(volumesToRemove) > 0 {
err := client.RemoveVolumes(cl, context.Background(), volumesToRemove, internal.Force, 5)
if err != nil {
log.Fatalf("removing volumes failed: %s", err)
}
log.Infof("%d volumes removed successfully", len(volumesToRemove))
} else {
log.Info("no volumes removed")
err = client.RemoveVolumes(context.Background(), app.Server, volumesToRemove, internal.Force)
if err != nil {
logrus.Fatal(err)
}
logrus.Info("volumes removed successfully")
return nil
},
BashComplete: autocomplete.AppNameComplete,
}
var appVolumeCommand = cli.Command{

View File

@ -8,16 +8,56 @@ import (
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/catalogue"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
gitPkg "coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/limit"
"coopcloud.tech/abra/pkg/recipe"
"github.com/go-git/go-git/v5"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// CatalogueSkipList is all the repos that are not recipes.
var CatalogueSkipList = map[string]bool{
"abra": true,
"abra-apps": true,
"abra-aur": true,
"abra-bash": true,
"abra-capsul": true,
"abra-gandi": true,
"abra-hetzner": true,
"apps": true,
"aur-abra-git": true,
"auto-apps-json": true,
"auto-mirror": true,
"backup-bot": true,
"backup-bot-two": true,
"beta.coopcloud.tech": true,
"comrade-renovate-bot": true,
"coopcloud.tech": true,
"coturn": true,
"docker-cp-deploy": true,
"docker-dind-bats-kcov": true,
"docs.coopcloud.tech": true,
"drone-abra": true,
"example": true,
"gardening": true,
"go-abra": true,
"organising": true,
"outline-with-patch": true,
"pyabra": true,
"radicle-seed-node": true,
"recipes-catalogue-json": true,
"recipes-wishlist": true,
"recipes.coopcloud.tech": true,
"stack-ssh-deploy": true,
"swarm-cronjob": true,
"tagcmp": true,
"traefik-cert-dumper": true,
"tyop": true,
}
var catalogueGenerateCommand = cli.Command{
Name: "generate",
Aliases: []string{"g"},
@ -28,12 +68,17 @@ var catalogueGenerateCommand = cli.Command{
internal.PublishFlag,
internal.DryFlag,
internal.SkipUpdatesFlag,
internal.ChaosFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Description: `
Generate a new copy of the recipe catalogue.
Generate a new copy of the recipe catalogue which can be found on:
https://recipes.coopcloud.tech (website that humans read)
https://recipes.coopcloud.tech/recipes.json (JSON that Abra reads)
It polls the entire git.coopcloud.tech/coop-cloud/... recipe repository
listing, parses README.md and git tags to produce recipe metadata which is
loaded into the catalogue JSON file.
It is possible to generate new metadata for a single recipe by passing
<recipe>. The existing local catalogue will be updated, not overwritten.
@ -42,28 +87,20 @@ It is quite easy to get rate limited by Docker Hub when running this command.
If you have a Hub account you can have Abra log you in to avoid this. Pass
"--user" and "--pass".
Push your new release to git.coopcloud.tech with "-p/--publish". This requires
Push your new release git.coopcloud.tech with "-p/--publish". This requires
that you have permission to git push to these repositories and have your SSH
keys configured on your account.`,
ArgsUsage: "[<recipe>]",
BashComplete: autocomplete.RecipeNameComplete,
keys configured on your account.
`,
ArgsUsage: "[<recipe>]",
Action: func(c *cli.Context) error {
recipeName := c.Args().First()
r := recipe.Get(recipeName)
if recipeName != "" {
internal.ValidateRecipe(c)
}
if !internal.Chaos {
if err := catalogue.EnsureIsClean(); err != nil {
log.Fatal(err)
}
internal.ValidateRecipe(c, true)
}
repos, err := recipe.ReadReposMetadata()
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
var barLength int
@ -77,9 +114,9 @@ keys configured on your account.`,
}
if !internal.SkipUpdates {
log.Warn(logMsg)
if err := recipe.UpdateRepositories(repos, recipeName); err != nil {
log.Fatal(err)
logrus.Warn(logMsg)
if err := updateRepositories(repos, recipeName); err != nil {
logrus.Fatal(err)
}
}
@ -91,14 +128,19 @@ keys configured on your account.`,
continue
}
versions, err := r.GetRecipeVersions()
if err != nil {
log.Warn(err)
if _, exists := CatalogueSkipList[recipeMeta.Name]; exists {
catlBar.Add(1)
continue
}
features, category, err := recipe.GetRecipeFeaturesAndCategory(r)
versions, err := recipe.GetRecipeVersions(recipeMeta.Name)
if err != nil {
log.Warn(err)
logrus.Warn(err)
}
features, category, err := recipe.GetRecipeFeaturesAndCategory(recipeMeta.Name)
if err != nil {
logrus.Warn(err)
}
catl[recipeMeta.Name] = recipe.RecipeMeta{
@ -119,97 +161,158 @@ keys configured on your account.`,
recipesJSON, err := json.MarshalIndent(catl, "", " ")
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if recipeName == "" {
if err := ioutil.WriteFile(config.RECIPES_JSON, recipesJSON, 0764); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
} else {
catlFS, err := recipe.ReadRecipeCatalogue(internal.Offline)
catlFS, err := recipe.ReadRecipeCatalogue()
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
catlFS[recipeName] = catl[recipeName]
updatedRecipesJSON, err := json.MarshalIndent(catlFS, "", " ")
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if err := ioutil.WriteFile(config.RECIPES_JSON, updatedRecipesJSON, 0764); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
}
log.Infof("generated new recipe catalogue in %s", config.RECIPES_JSON)
logrus.Infof("generated new recipe catalogue in %s", config.RECIPES_JSON)
cataloguePath := path.Join(config.ABRA_DIR, "catalogue")
if internal.Publish {
isClean, err := gitPkg.IsClean(cataloguePath)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if isClean {
if !internal.Dry {
log.Fatalf("no changes discovered in %s, nothing to publish?", cataloguePath)
logrus.Fatalf("no changes discovered in %s, nothing to publish?", cataloguePath)
}
}
msg := "chore: publish new catalogue release changes"
if err := gitPkg.Commit(cataloguePath, msg, internal.Dry); err != nil {
log.Fatal(err)
if err := gitPkg.Commit(cataloguePath, "**.json", msg, internal.Dry); err != nil {
logrus.Fatal(err)
}
repo, err := git.PlainOpen(cataloguePath)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
sshURL := fmt.Sprintf(config.SSH_URL_TEMPLATE, config.CATALOGUE_JSON_REPO_NAME)
if err := gitPkg.CreateRemote(repo, "origin-ssh", sshURL, internal.Dry); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if err := gitPkg.Push(cataloguePath, "origin-ssh", false, internal.Dry); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
}
repo, err := git.PlainOpen(cataloguePath)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
head, err := repo.Head()
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if !internal.Dry && internal.Publish {
url := fmt.Sprintf("%s/%s/commit/%s", config.REPOS_BASE_URL, config.CATALOGUE_JSON_REPO_NAME, head.Hash())
log.Infof("new changes published: %s", url)
logrus.Infof("new changes published: %s", url)
}
if internal.Dry {
log.Info("dry run: no changes published")
logrus.Info("dry run: no changes published")
}
return nil
},
BashComplete: autocomplete.RecipeNameComplete,
}
// CatalogueCommand defines the `abra catalogue` command and sub-commands.
var CatalogueCommand = cli.Command{
Name: "catalogue",
Usage: "Manage the recipe catalogue",
Aliases: []string{"c"},
ArgsUsage: "<recipe>",
Name: "catalogue",
Usage: "Manage the recipe catalogue",
Aliases: []string{"c"},
ArgsUsage: "<recipe>",
Description: "This command helps recipe packagers interact with the recipe catalogue",
Subcommands: []cli.Command{
catalogueGenerateCommand,
},
}
func updateRepositories(repos recipe.RepoCatalogue, recipeName string) error {
var barLength int
if recipeName != "" {
barLength = 1
} else {
barLength = len(repos)
}
cloneLimiter := limit.New(10)
retrieveBar := formatter.CreateProgressbar(barLength, "ensuring recipes are cloned & up-to-date...")
ch := make(chan string, barLength)
for _, repoMeta := range repos {
go func(rm recipe.RepoMeta) {
cloneLimiter.Begin()
defer cloneLimiter.End()
if recipeName != "" && recipeName != rm.Name {
ch <- rm.Name
retrieveBar.Add(1)
return
}
if _, exists := CatalogueSkipList[rm.Name]; exists {
ch <- rm.Name
retrieveBar.Add(1)
return
}
recipeDir := path.Join(config.RECIPES_DIR, rm.Name)
if err := gitPkg.Clone(recipeDir, rm.CloneURL); err != nil {
logrus.Fatal(err)
}
isClean, err := gitPkg.IsClean(recipeDir)
if err != nil {
logrus.Fatal(err)
}
if !isClean {
logrus.Fatalf("%s has locally unstaged changes", rm.Name)
}
if err := recipe.EnsureUpToDate(rm.Name); err != nil {
logrus.Fatal(err)
}
ch <- rm.Name
retrieveBar.Add(1)
}(repoMeta)
}
for range repos {
<-ch // wait for everything
}
return nil
}

View File

@ -12,12 +12,12 @@ import (
"coopcloud.tech/abra/cli/catalogue"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/cli/recipe"
"coopcloud.tech/abra/cli/record"
"coopcloud.tech/abra/cli/server"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/web"
charmLog "github.com/charmbracelet/log"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -25,15 +25,16 @@ import (
var AutoCompleteCommand = cli.Command{
Name: "autocomplete",
Aliases: []string{"ac"},
Usage: "Configure shell autocompletion",
Usage: "Configure shell autocompletion (recommended)",
Description: `
Set up shell auto-completion.
Set up auto-completion in your shell by downloading the relevant files and
laying out what additional information must be loaded. Supported shells are as
follows: bash, fizsh & zsh.
Supported shells are: bash, fish, fizsh & zsh.
Example:
EXAMPLE:
abra autocomplete bash`,
abra autocomplete bash
`,
ArgsUsage: "<shell>",
Flags: []cli.Flag{
internal.DebugFlag,
@ -49,11 +50,10 @@ EXAMPLE:
"bash": true,
"zsh": true,
"fizsh": true,
"fish": true,
}
if _, ok := supportedShells[shellType]; !ok {
log.Fatalf("%s is not a supported shell right now, sorry", shellType)
logrus.Fatalf("%s is not a supported shell right now, sorry", shellType)
}
if shellType == "fizsh" {
@ -63,47 +63,36 @@ EXAMPLE:
autocompletionDir := path.Join(config.ABRA_DIR, "autocompletion")
if err := os.Mkdir(autocompletionDir, 0764); err != nil {
if !os.IsExist(err) {
log.Fatal(err)
logrus.Fatal(err)
}
log.Debugf("%s already created", autocompletionDir)
logrus.Debugf("%s already created", autocompletionDir)
}
autocompletionFile := path.Join(config.ABRA_DIR, "autocompletion", shellType)
if _, err := os.Stat(autocompletionFile); err != nil && os.IsNotExist(err) {
url := fmt.Sprintf("https://git.coopcloud.tech/coop-cloud/abra/raw/branch/main/scripts/autocomplete/%s", shellType)
log.Infof("fetching %s", url)
logrus.Infof("fetching %s", url)
if err := web.GetFile(autocompletionFile, url); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
}
switch shellType {
case "bash":
fmt.Println(fmt.Sprintf(`
# run the following commands once to install auto-completion
sudo mkdir -p /etc/bash_completion.d/
# Run the following commands to install auto-completion
sudo mkdir /etc/bash_completion.d/
sudo cp %s /etc/bash_completion.d/abra
echo "source /etc/bash_completion.d/abra" >> ~/.bashrc
source /etc/bash_completion.d/abra
# To test, run the following: "abra app <hit tab key>" - you should see command completion!
# And finally run "abra app ps <hit tab key>" to test things are working, you should see app domains listed!
`, autocompletionFile))
case "zsh":
fmt.Println(fmt.Sprintf(`
# run the following commands to once install auto-completion
sudo mkdir -p /etc/zsh/completion.d/
# Run the following commands to install auto-completion
sudo mkdir /etc/zsh/completion.d/
sudo cp %s /etc/zsh/completion.d/abra
echo "PROG=abra\n_CLI_ZSH_AUTOCOMPLETE_HACK=1\nsource /etc/zsh/completion.d/abra" >> ~/.zshrc
source /etc/zsh/completion.d/abra
# to test, run the following: "abra app <hit tab key>" - you should see command completion!
`, autocompletionFile))
case "fish":
fmt.Println(fmt.Sprintf(`
# run the following commands once to install auto-completion
sudo mkdir -p /etc/fish/completions
sudo cp %s /etc/fish/completions/abra
echo "source /etc/fish/completions/abra" >> ~/.config/fish/config.fish
source /etc/fish/completions/abra
# to test, run the following: "abra app <hit tab key>" - you should see command completion!
# And finally run "abra app ps <hit tab key>" to test things are working, you should see app domains listed!
`, autocompletionFile))
}
@ -115,18 +104,14 @@ source /etc/fish/completions/abra
var UpgradeCommand = cli.Command{
Name: "upgrade",
Aliases: []string{"u"},
Usage: "Upgrade abra",
Usage: "Upgrade Abra itself",
Description: `
Upgrade abra in-place with the latest stable or release candidate.
Upgrade Abra in-place with the latest stable or release candidate.
Use "-r/--rc" to install the latest release candidate. Please bear in mind that
it may contain absolutely catastrophic deal-breaker bugs. Thank you very much
for the testing efforts 💗
EXAMPLE:
abra upgrade
abra upgrade --rc`,
Pass "-r/--rc" to install the latest release candidate. Please bear in mind
that it may contain catastrophic bugs. Thank you very much for the testing
efforts!
`,
Flags: []cli.Flag{internal.RCFlag},
Action: func(c *cli.Context) error {
mainURL := "https://install.abra.coopcloud.tech"
@ -137,10 +122,10 @@ EXAMPLE:
cmd = exec.Command("bash", "-c", fmt.Sprintf("wget -q -O- %s | bash -s -- --rc", releaseCandidateURL))
}
log.Debugf("attempting to run %s", cmd)
logrus.Debugf("attempting to run %s", cmd)
if err := internal.RunCmd(cmd); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
return nil
@ -150,7 +135,7 @@ EXAMPLE:
func newAbraApp(version, commit string) *cli.App {
app := &cli.App{
Name: "abra",
Usage: `the Co-op Cloud command-line utility belt 🎩🐇
Usage: `The Co-op Cloud command-line utility belt 🎩🐇
____ ____ _ _
/ ___|___ ___ _ __ / ___| | ___ _ _ __| |
| | / _ \ _____ / _ \| '_ \ | | | |/ _ \| | | |/ _' |
@ -164,6 +149,7 @@ func newAbraApp(version, commit string) *cli.App {
server.ServerCommand,
recipe.RecipeCommand,
catalogue.CatalogueCommand,
record.RecordCommand,
UpgradeCommand,
AutoCompleteCommand,
},
@ -175,29 +161,25 @@ func newAbraApp(version, commit string) *cli.App {
app.Before = func(c *cli.Context) error {
paths := []string{
config.ABRA_DIR,
config.SERVERS_DIR,
config.RECIPES_DIR,
config.VENDOR_DIR,
config.BACKUP_DIR,
path.Join(config.SERVERS_DIR),
path.Join(config.RECIPES_DIR),
path.Join(config.VENDOR_DIR),
path.Join(config.BACKUP_DIR),
}
for _, path := range paths {
if err := os.Mkdir(path, 0764); err != nil {
if !os.IsExist(err) {
log.Fatal(err)
logrus.Fatal(err)
}
continue
}
}
log.Logger.SetStyles(log.Styles())
charmLog.SetDefault(log.Logger)
log.Debugf("abra version %s, commit %s", version, commit)
logrus.Debugf("abra version %s, commit %s", version, commit)
return nil
}
return app
}
@ -206,6 +188,6 @@ func RunApp(version, commit string) {
app := newAbraApp(version, commit)
if err := app.Run(os.Args); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
}

View File

@ -1,67 +1,35 @@
package internal
import (
"context"
"coopcloud.tech/abra/pkg/config"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/service"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"strings"
)
// RetrieveBackupBotContainer gets the deployed backupbot container.
func RetrieveBackupBotContainer(cl *dockerClient.Client) (types.Container, error) {
ctx := context.Background()
chosenService, err := service.GetServiceByLabel(ctx, cl, config.BackupbotLabel, NoInput)
if err != nil {
return types.Container{}, err
// SafeSplit splits up a string into a list of commands safely.
func SafeSplit(s string) []string {
split := strings.Split(s, " ")
var result []string
var inquote string
var block string
for _, i := range split {
if inquote == "" {
if strings.HasPrefix(i, "'") || strings.HasPrefix(i, "\"") {
inquote = string(i[0])
block = strings.TrimPrefix(i, inquote) + " "
} else {
result = append(result, i)
}
} else {
if !strings.HasSuffix(i, inquote) {
block += i + " "
} else {
block += strings.TrimSuffix(i, inquote)
inquote = ""
result = append(result, block)
block = ""
}
}
}
log.Debugf("retrieved %s as backup enabled service", chosenService.Spec.Name)
filters := filters.NewArgs()
filters.Add("name", chosenService.Spec.Name)
targetContainer, err := containerPkg.GetContainer(
ctx,
cl,
filters,
NoInput,
)
if err != nil {
return types.Container{}, err
}
return targetContainer, nil
}
// RunBackupCmdRemote runs a backup related command on a remote backupbot container.
func RunBackupCmdRemote(cl *dockerClient.Client, backupCmd string, containerID string, execEnv []string) error {
execBackupListOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: []string{"/usr/bin/backup", "--", backupCmd},
Detach: false,
Env: execEnv,
Tty: true,
}
log.Debugf("running backup %s on %s with exec config %v", backupCmd, containerID, execBackupListOpts)
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
if _, err := container.RunExec(dcli, cl, containerID, &execBackupListOpts); err != nil {
return err
}
return nil
return result
}

View File

@ -3,7 +3,8 @@ package internal
import (
"os"
"coopcloud.tech/abra/pkg/log"
logrusStack "github.com/Gurpartap/logrus-stack"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -37,20 +38,6 @@ var PassRemoveFlag = &cli.BoolFlag{
Destination: &PassRemove,
}
var File bool
var FileFlag = &cli.BoolFlag{
Name: "file, f",
Usage: "Treat input as a file",
Destination: &File,
}
var Trim bool
var TrimFlag = &cli.BoolFlag{
Name: "trim, t",
Usage: "Trim input",
Destination: &Trim,
}
// Force force functionality without asking.
var Force bool
@ -67,18 +54,19 @@ var Chaos bool
// ChaosFlag turns on/off chaos functionality.
var ChaosFlag = &cli.BoolFlag{
Name: "chaos, C",
Usage: "Proceed with uncommitted recipes changes. Use with care!",
Usage: "Deploy uncommitted recipes changes. Use with care!",
Destination: &Chaos,
}
// Disable tty to run commands from script
var Tty bool
// DNSProvider specifies a DNS provider.
var DNSProvider string
// TtyFlag turns on/off tty mode.
var TtyFlag = &cli.BoolFlag{
Name: "tty, T",
Usage: "Disables TTY mode to run this command from a script.",
Destination: &Tty,
// DNSProviderFlag selects a DNS provider.
var DNSProviderFlag = &cli.StringFlag{
Name: "provider, p",
Value: "",
Usage: "DNS provider",
Destination: &DNSProvider,
}
var NoInput bool
@ -88,6 +76,163 @@ var NoInputFlag = &cli.BoolFlag{
Destination: &NoInput,
}
var DNSType string
var DNSTypeFlag = &cli.StringFlag{
Name: "record-type, rt",
Value: "",
Usage: "Domain name record type (e.g. A)",
Destination: &DNSType,
}
var DNSName string
var DNSNameFlag = &cli.StringFlag{
Name: "record-name, rn",
Value: "",
Usage: "Domain name record name (e.g. mysubdomain)",
Destination: &DNSName,
}
var DNSValue string
var DNSValueFlag = &cli.StringFlag{
Name: "record-value, rv",
Value: "",
Usage: "Domain name record value (e.g. 192.168.1.1)",
Destination: &DNSValue,
}
var DNSTTL string
var DNSTTLFlag = &cli.StringFlag{
Name: "record-ttl, rl",
Value: "600s",
Usage: "Domain name TTL value (seconds)",
Destination: &DNSTTL,
}
var DNSPriority int
var DNSPriorityFlag = &cli.IntFlag{
Name: "record-priority, rp",
Value: 10,
Usage: "Domain name priority value",
Destination: &DNSPriority,
}
var ServerProvider string
var ServerProviderFlag = &cli.StringFlag{
Name: "provider, p",
Usage: "3rd party server provider",
Destination: &ServerProvider,
}
var CapsulInstanceURL string
var CapsulInstanceURLFlag = &cli.StringFlag{
Name: "capsul-url, cu",
Value: "yolo.servers.coop",
Usage: "capsul instance URL",
Destination: &CapsulInstanceURL,
}
var CapsulName string
var CapsulNameFlag = &cli.StringFlag{
Name: "capsul-name, cn",
Value: "",
Usage: "capsul name",
Destination: &CapsulName,
}
var CapsulType string
var CapsulTypeFlag = &cli.StringFlag{
Name: "capsul-type, ct",
Value: "f1-xs",
Usage: "capsul type",
Destination: &CapsulType,
}
var CapsulImage string
var CapsulImageFlag = &cli.StringFlag{
Name: "capsul-image, ci",
Value: "debian10",
Usage: "capsul image",
Destination: &CapsulImage,
}
var CapsulSSHKeys cli.StringSlice
var CapsulSSHKeysFlag = &cli.StringSliceFlag{
Name: "capsul-ssh-keys, cs",
Usage: "capsul SSH key",
Value: &CapsulSSHKeys,
}
var CapsulAPIToken string
var CapsulAPITokenFlag = &cli.StringFlag{
Name: "capsul-token, ca",
Usage: "capsul API token",
EnvVar: "CAPSUL_TOKEN",
Destination: &CapsulAPIToken,
}
var HetznerCloudName string
var HetznerCloudNameFlag = &cli.StringFlag{
Name: "hetzner-name, hn",
Value: "",
Usage: "hetzner cloud name",
Destination: &HetznerCloudName,
}
var HetznerCloudType string
var HetznerCloudTypeFlag = &cli.StringFlag{
Name: "hetzner-type, ht",
Usage: "hetzner cloud type",
Destination: &HetznerCloudType,
Value: "cx11",
}
var HetznerCloudImage string
var HetznerCloudImageFlag = &cli.StringFlag{
Name: "hetzner-image, hi",
Usage: "hetzner cloud image",
Value: "debian-10",
Destination: &HetznerCloudImage,
}
var HetznerCloudSSHKeys cli.StringSlice
var HetznerCloudSSHKeysFlag = &cli.StringSliceFlag{
Name: "hetzner-ssh-keys, hs",
Usage: "hetzner cloud SSH keys (e.g. me@foo.com)",
Value: &HetznerCloudSSHKeys,
}
var HetznerCloudLocation string
var HetznerCloudLocationFlag = &cli.StringFlag{
Name: "hetzner-location, hl",
Usage: "hetzner cloud server location",
Value: "hel1",
Destination: &HetznerCloudLocation,
}
var HetznerCloudAPIToken string
var HetznerCloudAPITokenFlag = &cli.StringFlag{
Name: "hetzner-token, ha",
Usage: "hetzner cloud API token",
EnvVar: "HCLOUD_TOKEN",
Destination: &HetznerCloudAPIToken,
}
// Debug stores the variable from DebugFlag.
var Debug bool
@ -98,36 +243,6 @@ var DebugFlag = &cli.BoolFlag{
Usage: "Show DEBUG messages",
}
// Offline stores the variable from OfflineFlag.
var Offline bool
// DebugFlag turns on/off offline mode.
var OfflineFlag = &cli.BoolFlag{
Name: "offline, o",
Destination: &Offline,
Usage: "Prefer offline & filesystem access when possible",
}
// ReleaseNotes stores the variable from ReleaseNotesFlag.
var ReleaseNotes bool
// ReleaseNotesFlag turns on/off printing only release notes when upgrading.
var ReleaseNotesFlag = &cli.BoolFlag{
Name: "releasenotes, r",
Destination: &ReleaseNotes,
Usage: "Only show release notes",
}
// MachineReadable stores the variable from MachineReadableFlag
var MachineReadable bool
// MachineReadableFlag turns on/off machine readable output where supported
var MachineReadableFlag = &cli.BoolFlag{
Name: "machine, m",
Destination: &MachineReadable,
Usage: "Output in a machine-readable format (where supported)",
}
// RC signifies the latest release candidate
var RC bool
@ -135,7 +250,7 @@ var RC bool
var RCFlag = &cli.BoolFlag{
Name: "rc, r",
Destination: &RC,
Usage: "Install the latest release candidate",
Usage: "Insatll the latest release candidate",
}
var Major bool
@ -192,7 +307,7 @@ var NewAppServerFlag = &cli.StringFlag{
var NoDomainChecks bool
var NoDomainChecksFlag = &cli.BoolFlag{
Name: "no-domain-checks, D",
Usage: "Disable public DNS checks",
Usage: "Disable app domain sanity checks",
Destination: &NoDomainChecks,
}
@ -203,14 +318,6 @@ var StdErrOnlyFlag = &cli.BoolFlag{
Destination: &StdErrOnly,
}
var SinceLogs string
var SinceLogsFlag = &cli.StringFlag{
Name: "since, S",
Value: "",
Usage: "tail logs since YYYY-MM-DDTHH:MM:SSZ",
Destination: &SinceLogs,
}
var DontWaitConverge bool
var DontWaitConvergeFlag = &cli.BoolFlag{
Name: "no-converge-checks, c",
@ -246,50 +353,80 @@ var AllTagsFlag = &cli.BoolFlag{
Destination: &AllTags,
}
var LocalCmd bool
var LocalCmdFlag = &cli.BoolFlag{
Name: "local, l",
Usage: "Run command locally",
Destination: &LocalCmd,
}
// SSHFailMsg is a hopefully helpful SSH failure message
var SSHFailMsg = `
Woops, Abra is unable to connect to connect to %s.
var RemoteUser string
var RemoteUserFlag = &cli.StringFlag{
Name: "user, u",
Value: "",
Usage: "User to run command within a service context",
Destination: &RemoteUser,
}
Here are a few tips for debugging your local SSH config. Abra uses plain 'ol
SSH to make connections to servers, so if your SSH config is working, Abra is
working.
var GitName string
var GitNameFlag = &cli.StringFlag{
Name: "git-name, gn",
Value: "",
Usage: "Git (user) name to do commits with",
Destination: &GitName,
}
In the first place, Abra will always try to read your Docker context connection
string for SSH connection details. You can view your server context configs
with the following command. Are they correct?
var GitEmail string
var GitEmailFlag = &cli.StringFlag{
Name: "git-email, ge",
Value: "",
Usage: "Git email name to do commits with",
Destination: &GitEmail,
}
abra server ls
var AllServices bool
var AllServicesFlag = &cli.BoolFlag{
Name: "all-services, a",
Usage: "Restart all services",
Destination: &AllServices,
}
Is your ssh-agent running? You can start it by running the following command:
eval "$(ssh-agent)"
If your SSH private key loaded? You can check by running the following command:
ssh-add -L
If, you can add it with:
ssh-add ~/.ssh/<private-key-part>
If you are using a non-default public/private key, you can configure this in
your ~/.ssh/config file which Abra will read in order to figure out connection
details:
Host foo.coopcloud.tech
Hostname foo.coopcloud.tech
User bar
Port 12345
IdentityFile ~/.ssh/bar@foo.coopcloud.tech
If you're only using password authentication, you can use the following config:
Host foo.coopcloud.tech
Hostname foo.coopcloud.tech
User bar
Port 12345
PreferredAuthentications=password
PubkeyAuthentication=no
Good luck!
`
var ServerAddFailMsg = `
Failed to add server %s.
This could be caused by two things.
Abra isn't picking up your SSH configuration or you need to specify it on the
command-line (e.g you use a non-standard port or username to connect). Run
"server add" with "-d/--debug" to learn more about what Abra is doing under the
hood.
Docker is not installed on your server. You can pass "-p/--provision" to
install Docker and initialise Docker Swarm mode. See help output for "server
add"
See "abra server add -h" for more.
`
// SubCommandBefore wires up pre-action machinery (e.g. --debug handling).
func SubCommandBefore(c *cli.Context) error {
if Debug {
log.SetLevel(log.DebugLevel)
log.SetOutput(os.Stderr)
log.SetReportCaller(true)
logrus.SetLevel(logrus.DebugLevel)
logrus.SetFormatter(&logrus.TextFormatter{})
logrus.SetOutput(os.Stderr)
logrus.AddHook(logrusStack.StandardHook())
}
return nil

View File

@ -2,109 +2,10 @@ package internal
import (
"bufio"
"context"
"fmt"
"io/ioutil"
"os/exec"
"strings"
appPkg "coopcloud.tech/abra/pkg/app"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
)
// RunCmdRemote executes an abra.sh command in the target service
func RunCmdRemote(cl *dockerClient.Client, app appPkg.App, abraSh, serviceName, cmdName, cmdArgs string) error {
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), serviceName))
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, false)
if err != nil {
return err
}
log.Debugf("retrieved %s as target container on %s", formatter.ShortenID(targetContainer.ID), app.Server)
toTarOpts := &archive.TarOptions{NoOverwriteDirNonDir: true, Compression: archive.Gzip}
content, err := archive.TarWithOptions(abraSh, toTarOpts)
if err != nil {
return err
}
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
if err := cl.CopyToContainer(context.Background(), targetContainer.ID, "/tmp", content, copyOpts); err != nil {
return err
}
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
shell := "/bin/bash"
findShell := []string{"test", "-e", shell}
execCreateOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: findShell,
Detach: false,
Tty: false,
}
if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
log.Infof("%s does not exist for %s, use /bin/sh as fallback", shell, app.Name)
shell = "/bin/sh"
}
var cmd []string
if cmdArgs != "" {
cmd = []string{shell, "-c", fmt.Sprintf("TARGET=%s; APP_NAME=%s; STACK_NAME=%s; . /tmp/abra.sh; %s %s", serviceName, app.Name, app.StackName(), cmdName, cmdArgs)}
} else {
cmd = []string{shell, "-c", fmt.Sprintf("TARGET=%s; APP_NAME=%s; STACK_NAME=%s; . /tmp/abra.sh; %s", serviceName, app.Name, app.StackName(), cmdName)}
}
log.Debugf("running command: %s", strings.Join(cmd, " "))
if RemoteUser != "" {
log.Debugf("running command with user %s", RemoteUser)
execCreateOpts.User = RemoteUser
}
execCreateOpts.Cmd = cmd
execCreateOpts.Tty = true
if Tty {
execCreateOpts.Tty = false
}
if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
return err
}
return nil
}
func EnsureCommand(abraSh, recipeName, execCmd string) error {
bytes, err := ioutil.ReadFile(abraSh)
if err != nil {
return err
}
if !strings.Contains(string(bytes), execCmd) {
return fmt.Errorf("%s doesn't have a %s function", recipeName, execCmd)
}
return nil
}
// RunCmd runs a shell command and streams stdout/stderr in real-time.
func RunCmd(cmd *exec.Cmd) error {
r, err := cmd.StdoutPipe()

View File

@ -1,107 +1,165 @@
package internal
import (
"context"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/dns"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/lint"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/stack"
"github.com/AlecAivazis/survey/v2"
"github.com/charmbracelet/lipgloss"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var borderStyle = lipgloss.NewStyle().
BorderStyle(lipgloss.ThickBorder()).
Padding(0, 1, 0, 1).
MaxWidth(79).
BorderForeground(lipgloss.Color("63"))
// DeployAction is the main command-line action for this package
func DeployAction(c *cli.Context) error {
app := ValidateApp(c)
var headerStyle = lipgloss.NewStyle().
Underline(true).
Bold(true)
var leftStyle = lipgloss.NewStyle().
Bold(true)
var rightStyle = lipgloss.NewStyle()
// horizontal is a JoinHorizontal helper function.
func horizontal(left, mid, right string) string {
return lipgloss.JoinHorizontal(lipgloss.Left, left, mid, right)
}
// NewVersionOverview shows an upgrade or downgrade overview
func NewVersionOverview(
app appPkg.App,
warnMessages []string,
kind,
currentVersion,
chaosVersion,
newVersion,
releaseNotes string) error {
deployConfig := "compose.yml"
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
deployConfig = strings.Join(strings.Split(composeFiles, ":"), "\n")
if !Chaos {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
server := app.Server
if app.Server == "default" {
server = "local"
r, err := recipe.Get(app.Recipe)
if err != nil {
logrus.Fatal(err)
}
body := strings.Builder{}
body.WriteString(
borderStyle.Render(
lipgloss.JoinVertical(
lipgloss.Center,
headerStyle.Render(fmt.Sprintf("%s OVERVIEW", strings.ToUpper(kind))),
lipgloss.JoinVertical(
lipgloss.Left,
horizontal(leftStyle.Render("SERVER"), " ", rightStyle.Render(server)),
horizontal(leftStyle.Render("DOMAIN"), " ", rightStyle.Render(app.Domain)),
horizontal(leftStyle.Render("RECIPE"), " ", rightStyle.Render(app.Recipe.Name)),
horizontal(leftStyle.Render("CONFIG"), " ", rightStyle.Render(deployConfig)),
horizontal(leftStyle.Render("VERSION"), " ", rightStyle.Render(currentVersion)),
horizontal(leftStyle.Render("CHAOS"), " ", rightStyle.Render(chaosVersion)),
horizontal(leftStyle.Render("DEPLOY"), " ", rightStyle.Padding(0).Render(newVersion)),
),
),
),
)
fmt.Println(body.String())
if err := lint.LintForErrors(r); err != nil {
logrus.Fatal(err)
}
if releaseNotes != "" && newVersion != "" {
fmt.Println()
fmt.Print(releaseNotes)
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
logrus.Debugf("checking whether %s is already deployed", app.StackName())
isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, app.StackName())
if err != nil {
logrus.Fatal(err)
}
if isDeployed {
if Force || Chaos {
logrus.Warnf("%s is already deployed but continuing (--force/--chaos)", app.Name)
} else {
logrus.Fatalf("%s is already deployed", app.Name)
}
}
version := deployedVersion
if version == "unknown" && !Chaos {
catl, err := recipe.ReadRecipeCatalogue()
if err != nil {
logrus.Fatal(err)
}
versions, err := recipe.GetRecipeCatalogueVersions(app.Recipe, catl)
if err != nil {
logrus.Fatal(err)
}
if len(versions) > 0 {
version = versions[len(versions)-1]
logrus.Debugf("choosing %s as version to deploy", version)
if err := recipe.EnsureVersion(app.Recipe, version); err != nil {
logrus.Fatal(err)
}
} else {
head, err := git.GetRecipeHead(app.Recipe)
if err != nil {
logrus.Fatal(err)
}
version = formatter.SmallSHA(head.String())
logrus.Warn("no versions detected, using latest commit")
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
}
if version == "unknown" && !Chaos {
logrus.Debugf("choosing %s as version to deploy", version)
if err := recipe.EnsureVersion(app.Recipe, version); err != nil {
logrus.Fatal(err)
}
}
if version != "unknown" && !Chaos {
if err := recipe.EnsureVersion(app.Recipe, version); err != nil {
logrus.Fatal(err)
}
}
if Chaos {
logrus.Warnf("chaos mode engaged")
var err error
version, err = recipe.ChaosVersion(app.Recipe)
if err != nil {
logrus.Fatal(err)
}
}
abraShPath := fmt.Sprintf("%s/%s/%s", config.RECIPES_DIR, app.Recipe, "abra.sh")
abraShEnv, err := config.ReadAbraShEnvVars(abraShPath)
if err != nil {
logrus.Fatal(err)
}
for k, v := range abraShEnv {
app.Env[k] = v
}
composeFiles, err := config.GetAppComposeFiles(app.Recipe, app.Env)
if err != nil {
logrus.Fatal(err)
}
deployOpts := stack.Deploy{
Composefiles: composeFiles,
Namespace: app.StackName(),
Prune: false,
ResolveImage: stack.ResolveImageAlways,
}
compose, err := config.GetAppComposeConfig(app.Name, deployOpts, app.Env)
if err != nil {
logrus.Fatal(err)
}
if err := DeployOverview(app, version, "continue with deployment?"); err != nil {
logrus.Fatal(err)
}
if !NoDomainChecks {
domainName := app.Env["DOMAIN"]
if _, err = dns.EnsureDomainsResolveSameIPv4(domainName, app.Server); err != nil {
logrus.Fatal(err)
}
} else {
warnMessages = append(warnMessages, fmt.Sprintf("no release notes available for %s", newVersion))
logrus.Warn("skipping domain checks as requested")
}
for _, msg := range warnMessages {
log.Warn(msg)
}
if NoInput {
return nil
}
response := false
prompt := &survey.Confirm{Message: "proceed?"}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
log.Fatal("deployment cancelled")
if err := stack.RunDeploy(cl, deployOpts, compose, app.Name, DontWaitConverge); err != nil {
logrus.Fatal(err)
}
return nil
}
// DeployOverview shows a deployment overview
func DeployOverview(app appPkg.App, warnMessages []string, version, chaosVersion string) error {
func DeployOverview(app config.App, version, message string) error {
tableCol := []string{"server", "recipe", "config", "domain", "version"}
table := formatter.CreateTable(tableCol)
deployConfig := "compose.yml"
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
deployConfig = strings.Join(strings.Split(composeFiles, ":"), "\n")
@ -112,28 +170,60 @@ func DeployOverview(app appPkg.App, warnMessages []string, version, chaosVersion
server = "local"
}
body := strings.Builder{}
body.WriteString(
borderStyle.Render(
lipgloss.JoinVertical(
lipgloss.Center,
headerStyle.Render("DEPLOY OVERVIEW"),
lipgloss.JoinVertical(
lipgloss.Left,
horizontal(leftStyle.Render("SERVER"), " ", rightStyle.Render(server)),
horizontal(leftStyle.Render("DOMAIN"), " ", rightStyle.Render(app.Domain)),
horizontal(leftStyle.Render("RECIPE"), " ", rightStyle.Render(app.Recipe.Name)),
horizontal(leftStyle.Render("CONFIG"), " ", rightStyle.Render(deployConfig)),
horizontal(leftStyle.Render("VERSION"), " ", rightStyle.Render(version)),
horizontal(leftStyle.Render("CHAOS"), " ", rightStyle.Padding(0).Render(chaosVersion)),
),
),
),
)
fmt.Println(body.String())
table.Append([]string{server, app.Recipe, deployConfig, app.Domain, version})
table.Render()
for _, msg := range warnMessages {
log.Warn(msg)
if NoInput {
return nil
}
response := false
prompt := &survey.Confirm{
Message: message,
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
return nil
}
// NewVersionOverview shows an upgrade or downgrade overview
func NewVersionOverview(app config.App, currentVersion, newVersion, releaseNotes string) error {
tableCol := []string{"server", "recipe", "config", "domain", "current version", "to be deployed"}
table := formatter.CreateTable(tableCol)
deployConfig := "compose.yml"
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
deployConfig = strings.Join(strings.Split(composeFiles, ":"), "\n")
}
server := app.Server
if app.Server == "default" {
server = "local"
}
table.Append([]string{server, app.Recipe, deployConfig, app.Domain, currentVersion, newVersion})
table.Render()
if releaseNotes == "" {
var err error
releaseNotes, err = GetReleaseNotes(app.Recipe, newVersion)
if err != nil {
return err
}
}
if releaseNotes != "" && newVersion != "" {
fmt.Println()
fmt.Println(fmt.Sprintf("%s release notes:\n\n%s", newVersion, releaseNotes))
} else {
logrus.Warnf("no release notes available for %s", newVersion)
}
if NoInput {
@ -141,68 +231,36 @@ func DeployOverview(app appPkg.App, warnMessages []string, version, chaosVersion
}
response := false
prompt := &survey.Confirm{Message: "proceed?"}
prompt := &survey.Confirm{
Message: "continue with deployment?",
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
log.Fatal("deployment cancelled")
logrus.Fatal("exiting as requested")
}
return nil
}
// PostCmds parses a string of commands and executes them inside of the respective services
// the commands string must have the following format:
// "<service> <command> <arguments>|<service> <command> <arguments>|... "
func PostCmds(cl *dockerClient.Client, app appPkg.App, commands string) error {
if _, err := os.Stat(app.Recipe.AbraShPath); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf(fmt.Sprintf("%s does not exist for %s?", app.Recipe.AbraShPath, app.Name))
}
return err
// GetReleaseNotes prints release notes for a recipe version
func GetReleaseNotes(recipeName, version string) (string, error) {
if version == "" {
return "", nil
}
for _, command := range strings.Split(commands, "|") {
commandParts := strings.Split(command, " ")
if len(commandParts) < 2 {
return fmt.Errorf(fmt.Sprintf("not enough arguments: %s", command))
}
targetServiceName := commandParts[0]
cmdName := commandParts[1]
parsedCmdArgs := ""
if len(commandParts) > 2 {
parsedCmdArgs = fmt.Sprintf("%s ", strings.Join(commandParts[2:], " "))
}
log.Infof("running post-command '%s %s' in container %s", cmdName, parsedCmdArgs, targetServiceName)
fpath := path.Join(config.RECIPES_DIR, recipeName, "release", version)
if err := EnsureCommand(app.Recipe.AbraShPath, app.Recipe.Name, cmdName); err != nil {
return err
}
serviceNames, err := appPkg.GetAppServiceNames(app.Name)
if _, err := os.Stat(fpath); !os.IsNotExist(err) {
releaseNotes, err := ioutil.ReadFile(fpath)
if err != nil {
return err
}
matchingServiceName := false
for _, serviceName := range serviceNames {
if serviceName == targetServiceName {
matchingServiceName = true
}
}
if !matchingServiceName {
return fmt.Errorf(fmt.Sprintf("no service %s for %s?", targetServiceName, app.Name))
}
log.Debugf("running command %s %s within the context of %s_%s", cmdName, parsedCmdArgs, app.StackName(), targetServiceName)
Tty = true
if err := RunCmdRemote(cl, app, app.Recipe.AbraShPath, targetServiceName, cmdName, parsedCmdArgs); err != nil {
return err
return "", err
}
return string(releaseNotes), nil
}
return nil
return "", nil
}

View File

@ -3,7 +3,7 @@ package internal
import (
"os"
"coopcloud.tech/abra/pkg/log"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -11,8 +11,8 @@ import (
// terminal, and shows the help command.
func ShowSubcommandHelpAndError(c *cli.Context, err interface{}) {
if err2 := cli.ShowSubcommandHelp(c); err2 != nil {
log.Error(err2)
logrus.Error(err2)
}
log.Error(err)
logrus.Error(err)
os.Exit(1)
}

196
cli/internal/new.go Normal file
View File

@ -0,0 +1,196 @@
package internal
import (
"fmt"
"path"
"coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/recipe"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/secret"
"coopcloud.tech/abra/pkg/ssh"
"github.com/AlecAivazis/survey/v2"
"github.com/olekukonko/tablewriter"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// AppSecrets represents all app secrest
type AppSecrets map[string]string
// RecipeName is used for configuring recipe name programmatically
var RecipeName string
// createSecrets creates all secrets for a new app.
func createSecrets(sanitisedAppName string) (AppSecrets, error) {
appEnvPath := path.Join(config.ABRA_DIR, "servers", NewAppServer, fmt.Sprintf("%s.env", Domain))
appEnv, err := config.ReadEnv(appEnvPath)
if err != nil {
return nil, err
}
secretEnvVars := secret.ReadSecretEnvVars(appEnv)
secrets, err := secret.GenerateSecrets(secretEnvVars, sanitisedAppName, NewAppServer)
if err != nil {
return nil, err
}
if Pass {
for secretName := range secrets {
secretValue := secrets[secretName]
if err := secret.PassInsertSecret(secretValue, secretName, Domain, NewAppServer); err != nil {
return nil, err
}
}
}
return secrets, nil
}
// ensureDomainFlag checks if the domain flag was used. if not, asks the user for it/
func ensureDomainFlag(recipe recipe.Recipe, server string) error {
if Domain == "" && !NoInput {
prompt := &survey.Input{
Message: "Specify app domain",
Default: fmt.Sprintf("%s.%s", recipe.Name, server),
}
if err := survey.AskOne(prompt, &Domain); err != nil {
return err
}
}
if Domain == "" {
return fmt.Errorf("no domain provided")
}
return nil
}
// promptForSecrets asks if we should generate secrets for a new app.
func promptForSecrets(appName string) error {
app, err := app.Get(appName)
if err != nil {
return err
}
secretEnvVars := secret.ReadSecretEnvVars(app.Env)
if len(secretEnvVars) == 0 {
logrus.Debugf("%s has no secrets to generate, skipping...", app.Recipe)
return nil
}
if !Secrets && !NoInput {
prompt := &survey.Confirm{
Message: "Generate app secrets?",
}
if err := survey.AskOne(prompt, &Secrets); err != nil {
return err
}
}
return nil
}
// ensureServerFlag checks if the server flag was used. if not, asks the user for it.
func ensureServerFlag() error {
servers, err := config.GetServers()
if err != nil {
return err
}
if NewAppServer == "" && !NoInput {
prompt := &survey.Select{
Message: "Select app server:",
Options: servers,
}
if err := survey.AskOne(prompt, &NewAppServer); err != nil {
return err
}
}
if NewAppServer == "" {
return fmt.Errorf("no server provided")
}
return nil
}
// NewAction is the new app creation logic
func NewAction(c *cli.Context) error {
recipe := ValidateRecipeWithPrompt(c, false)
if err := recipePkg.EnsureUpToDate(recipe.Name); err != nil {
logrus.Fatal(err)
}
if err := ensureServerFlag(); err != nil {
logrus.Fatal(err)
}
if err := ensureDomainFlag(recipe, NewAppServer); err != nil {
logrus.Fatal(err)
}
sanitisedAppName := config.SanitiseAppName(Domain)
logrus.Debugf("%s sanitised as %s for new app", Domain, sanitisedAppName)
if err := config.TemplateAppEnvSample(recipe.Name, Domain, NewAppServer, Domain); err != nil {
logrus.Fatal(err)
}
if err := promptForSecrets(Domain); err != nil {
logrus.Fatal(err)
}
var secrets AppSecrets
var secretTable *tablewriter.Table
if Secrets {
if err := ssh.EnsureHostKey(NewAppServer); err != nil {
logrus.Fatal(err)
}
var err error
secrets, err = createSecrets(sanitisedAppName)
if err != nil {
logrus.Fatal(err)
}
secretCols := []string{"Name", "Value"}
secretTable = formatter.CreateTable(secretCols)
for secret := range secrets {
secretTable.Append([]string{secret, secrets[secret]})
}
}
if NewAppServer == "default" {
NewAppServer = "local"
}
tableCol := []string{"server", "recipe", "domain"}
table := formatter.CreateTable(tableCol)
table.Append([]string{NewAppServer, recipe.Name, Domain})
fmt.Println("")
fmt.Println(fmt.Sprintf("A new %s app has been created! Here is an overview:", recipe.Name))
fmt.Println("")
table.Render()
fmt.Println("")
fmt.Println("You can configure this app by running the following:")
fmt.Println(fmt.Sprintf("\n abra app config %s", Domain))
fmt.Println("")
fmt.Println("You can deploy this app by running the following:")
fmt.Println(fmt.Sprintf("\n abra app deploy %s", Domain))
fmt.Println("")
if len(secrets) > 0 {
fmt.Println("Here are your generated secrets:")
fmt.Println("")
secretTable.Render()
fmt.Println("")
logrus.Warn("generated secrets are not shown again, please take note of them *now*")
}
return nil
}

View File

@ -4,10 +4,10 @@ import (
"fmt"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
"github.com/AlecAivazis/survey/v2"
"github.com/distribution/reference"
"github.com/docker/distribution/reference"
"github.com/sirupsen/logrus"
)
// PromptBumpType prompts for version bump type
@ -65,7 +65,7 @@ func GetBumpType() string {
} else if Patch {
bumpType = "patch"
} else {
log.Fatal("no version bump type specififed?")
logrus.Fatal("no version bump type specififed?")
}
return bumpType
@ -80,7 +80,7 @@ func SetBumpType(bumpType string) {
} else if bumpType == "patch" {
Patch = true
} else {
log.Fatal("no version bump type specififed?")
logrus.Fatal("no version bump type specififed?")
}
}
@ -88,11 +88,7 @@ func SetBumpType(bumpType string) {
func GetMainAppImage(recipe recipe.Recipe) (string, error) {
var path string
config, err := recipe.GetComposeConfig(nil)
if err != nil {
return "", err
}
for _, service := range config.Services {
for _, service := range recipe.Config.Services {
if service.Name == "app" {
img, err := reference.ParseNormalizedNamed(service.Image)
if err != nil {

View File

@ -2,26 +2,64 @@ package internal
import (
"errors"
"fmt"
"os"
"strings"
"coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/ssh"
"github.com/AlecAivazis/survey/v2"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// AppName is used for configuring app name programmatically
var AppName string
// ValidateRecipe ensures the recipe arg is valid.
func ValidateRecipe(c *cli.Context) recipe.Recipe {
func ValidateRecipe(c *cli.Context, ensureLatest bool) recipe.Recipe {
recipeName := c.Args().First()
if recipeName == "" {
ShowSubcommandHelpAndError(c, errors.New("no recipe name provided"))
}
chosenRecipe, err := recipe.Get(recipeName)
if err != nil {
if c.Command.Name == "generate" {
if strings.Contains(err.Error(), "missing a compose") {
logrus.Fatal(err)
}
logrus.Warn(err)
} else {
logrus.Fatal(err)
}
}
if ensureLatest {
if err := recipe.EnsureLatest(recipeName); err != nil {
logrus.Fatal(err)
}
}
logrus.Debugf("validated %s as recipe argument", recipeName)
return chosenRecipe
}
// ValidateRecipeWithPrompt ensures a recipe argument is present before
// validating, asking for input if required.
func ValidateRecipeWithPrompt(c *cli.Context, ensureLatest bool) recipe.Recipe {
recipeName := c.Args().First()
if recipeName == "" && !NoInput {
var recipes []string
catl, err := recipe.ReadRecipeCatalogue(Offline)
catl, err := recipe.ReadRecipeCatalogue()
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
knownRecipes := make(map[string]bool)
@ -31,7 +69,7 @@ func ValidateRecipe(c *cli.Context) recipe.Recipe {
localRecipes, err := recipe.GetRecipesLocal()
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
for _, recipeLocal := range localRecipes {
@ -49,53 +87,62 @@ func ValidateRecipe(c *cli.Context) recipe.Recipe {
Options: recipes,
}
if err := survey.AskOne(prompt, &recipeName); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
}
if RecipeName != "" {
recipeName = RecipeName
logrus.Debugf("programmatically setting recipe name to %s", recipeName)
}
if recipeName == "" {
ShowSubcommandHelpAndError(c, errors.New("no recipe name provided"))
}
chosenRecipe := recipe.Get(recipeName)
err := chosenRecipe.EnsureExists()
chosenRecipe, err := recipe.Get(recipeName)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
_, err = chosenRecipe.GetComposeConfig(nil)
if err != nil {
if c.Command.Name == "generate" {
if strings.Contains(err.Error(), "missing a compose") {
log.Fatal(err)
}
log.Warn(err)
} else {
if strings.Contains(err.Error(), "template_driver is not allowed") {
log.Warnf("ensure %s recipe compose.* files include \"version: '3.8'\"", recipeName)
}
log.Fatalf("unable to validate recipe: %s", err)
if ensureLatest {
if err := recipe.EnsureLatest(recipeName); err != nil {
logrus.Fatal(err)
}
}
log.Debugf("validated %s as recipe argument", recipeName)
logrus.Debugf("validated %s as recipe argument", recipeName)
return chosenRecipe
}
// ValidateApp ensures the app name arg is valid.
func ValidateApp(c *cli.Context) app.App {
func ValidateApp(c *cli.Context) config.App {
appName := c.Args().First()
if AppName != "" {
appName = AppName
logrus.Debugf("programmatically setting app name to %s", appName)
}
if appName == "" {
ShowSubcommandHelpAndError(c, errors.New("no app provided"))
}
app, err := app.Get(appName)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
log.Debugf("validated %s as app argument", appName)
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if err := ssh.EnsureHostKey(app.Server); err != nil {
logrus.Fatal(err)
}
logrus.Debugf("validated %s as app argument", appName)
return app
}
@ -110,7 +157,7 @@ func ValidateDomain(c *cli.Context) string {
Default: "example.com",
}
if err := survey.AskOne(prompt, &domainName); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
}
@ -118,7 +165,7 @@ func ValidateDomain(c *cli.Context) string {
ShowSubcommandHelpAndError(c, errors.New("no domain provided"))
}
log.Debugf("validated %s as domain argument", domainName)
logrus.Debugf("validated %s as domain argument", domainName)
return domainName
}
@ -143,7 +190,7 @@ func ValidateServer(c *cli.Context) string {
serverNames, err := config.ReadServerNames()
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if serverName == "" && !NoInput {
@ -152,7 +199,7 @@ func ValidateServer(c *cli.Context) string {
Options: serverNames,
}
if err := survey.AskOne(prompt, &serverName); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
}
@ -163,15 +210,309 @@ func ValidateServer(c *cli.Context) string {
}
}
if serverName == "" {
ShowSubcommandHelpAndError(c, errors.New("no server provided"))
}
if !matched {
ShowSubcommandHelpAndError(c, errors.New("server doesn't exist?"))
}
log.Debugf("validated %s as server argument", serverName)
if serverName == "" {
ShowSubcommandHelpAndError(c, errors.New("no server provided"))
}
logrus.Debugf("validated %s as server argument", serverName)
return serverName
}
// EnsureDNSProvider ensures a DNS provider is chosen.
func EnsureDNSProvider() error {
if DNSProvider == "" && !NoInput {
prompt := &survey.Select{
Message: "Select DNS provider",
Options: []string{"gandi"},
}
if err := survey.AskOne(prompt, &DNSProvider); err != nil {
return err
}
}
if DNSProvider == "" {
return fmt.Errorf("missing DNS provider?")
}
return nil
}
// EnsureDNSTypeFlag ensures a DNS type flag is present.
func EnsureDNSTypeFlag(c *cli.Context) error {
if DNSType == "" && !NoInput {
prompt := &survey.Input{
Message: "Specify DNS record type",
Default: "A",
}
if err := survey.AskOne(prompt, &DNSType); err != nil {
return err
}
}
if DNSType == "" {
ShowSubcommandHelpAndError(c, errors.New("no record type provided"))
}
return nil
}
// EnsureDNSNameFlag ensures a DNS name flag is present.
func EnsureDNSNameFlag(c *cli.Context) error {
if DNSName == "" && !NoInput {
prompt := &survey.Input{
Message: "Specify DNS record name",
Default: "mysubdomain",
}
if err := survey.AskOne(prompt, &DNSName); err != nil {
return err
}
}
if DNSName == "" {
ShowSubcommandHelpAndError(c, errors.New("no record name provided"))
}
return nil
}
// EnsureDNSValueFlag ensures a DNS value flag is present.
func EnsureDNSValueFlag(c *cli.Context) error {
if DNSValue == "" && !NoInput {
prompt := &survey.Input{
Message: "Specify DNS record value",
Default: "192.168.1.2",
}
if err := survey.AskOne(prompt, &DNSValue); err != nil {
return err
}
}
if DNSValue == "" {
ShowSubcommandHelpAndError(c, errors.New("no record value provided"))
}
return nil
}
// EnsureZoneArgument ensures a zone argument is present.
func EnsureZoneArgument(c *cli.Context) (string, error) {
zone := c.Args().First()
if zone == "" && !NoInput {
prompt := &survey.Input{
Message: "Specify a domain name zone",
Default: "example.com",
}
if err := survey.AskOne(prompt, &zone); err != nil {
return zone, err
}
}
if zone == "" {
ShowSubcommandHelpAndError(c, errors.New("no zone value provided"))
}
return zone, nil
}
// EnsureServerProvider ensures a 3rd party server provider is chosen.
func EnsureServerProvider() error {
if ServerProvider == "" && !NoInput {
prompt := &survey.Select{
Message: "Select server provider",
Options: []string{"capsul", "hetzner-cloud"},
}
if err := survey.AskOne(prompt, &ServerProvider); err != nil {
return err
}
}
if ServerProvider == "" {
return fmt.Errorf("missing server provider?")
}
return nil
}
// EnsureNewCapsulVPSFlags ensure all flags are present.
func EnsureNewCapsulVPSFlags(c *cli.Context) error {
if CapsulName == "" && !NoInput {
prompt := &survey.Input{
Message: "specify capsul name",
}
if err := survey.AskOne(prompt, &CapsulName); err != nil {
return err
}
}
if !NoInput {
prompt := &survey.Input{
Message: "specify capsul instance URL",
Default: CapsulInstanceURL,
}
if err := survey.AskOne(prompt, &CapsulInstanceURL); err != nil {
return err
}
}
if !NoInput {
prompt := &survey.Input{
Message: "specify capsul type",
Default: CapsulType,
}
if err := survey.AskOne(prompt, &CapsulType); err != nil {
return err
}
}
if !NoInput {
prompt := &survey.Input{
Message: "specify capsul image",
Default: CapsulImage,
}
if err := survey.AskOne(prompt, &CapsulImage); err != nil {
return err
}
}
if len(CapsulSSHKeys.Value()) == 0 && !NoInput {
var sshKeys string
prompt := &survey.Input{
Message: "specify capsul SSH keys (e.g. me@foo.com)",
Default: "",
}
if err := survey.AskOne(prompt, &CapsulSSHKeys); err != nil {
return err
}
CapsulSSHKeys = cli.StringSlice(strings.Split(sshKeys, ","))
}
if CapsulAPIToken == "" && !NoInput {
token, ok := os.LookupEnv("CAPSUL_TOKEN")
if !ok {
prompt := &survey.Input{
Message: "specify capsul API token",
}
if err := survey.AskOne(prompt, &CapsulAPIToken); err != nil {
return err
}
} else {
CapsulAPIToken = token
}
}
if CapsulName == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul name?"))
}
if CapsulInstanceURL == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul instance url?"))
}
if CapsulType == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul type?"))
}
if CapsulImage == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul image?"))
}
if len(CapsulSSHKeys.Value()) == 0 {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul ssh keys?"))
}
if CapsulAPIToken == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing capsul API token?"))
}
return nil
}
// EnsureNewHetznerCloudVPSFlags ensure all flags are present.
func EnsureNewHetznerCloudVPSFlags(c *cli.Context) error {
if HetznerCloudName == "" && !NoInput {
prompt := &survey.Input{
Message: "specify hetzner cloud VPS name",
}
if err := survey.AskOne(prompt, &HetznerCloudName); err != nil {
return err
}
}
if !NoInput {
prompt := &survey.Input{
Message: "specify hetzner cloud VPS type",
Default: HetznerCloudType,
}
if err := survey.AskOne(prompt, &HetznerCloudType); err != nil {
return err
}
}
if !NoInput {
prompt := &survey.Input{
Message: "specify hetzner cloud VPS image",
Default: HetznerCloudImage,
}
if err := survey.AskOne(prompt, &HetznerCloudImage); err != nil {
return err
}
}
if len(HetznerCloudSSHKeys.Value()) == 0 && !NoInput {
var sshKeys string
prompt := &survey.Input{
Message: "specify hetzner cloud SSH keys (e.g. me@foo.com)",
Default: "",
}
if err := survey.AskOne(prompt, &sshKeys); err != nil {
return err
}
HetznerCloudSSHKeys = cli.StringSlice(strings.Split(sshKeys, ","))
}
if !NoInput {
prompt := &survey.Input{
Message: "specify hetzner cloud VPS location",
Default: HetznerCloudLocation,
}
if err := survey.AskOne(prompt, &HetznerCloudLocation); err != nil {
return err
}
}
if HetznerCloudAPIToken == "" && !NoInput {
token, ok := os.LookupEnv("HCLOUD_TOKEN")
if !ok {
prompt := &survey.Input{
Message: "specify hetzner cloud API token",
}
if err := survey.AskOne(prompt, &HetznerCloudAPIToken); err != nil {
return err
}
} else {
HetznerCloudAPIToken = token
}
}
if HetznerCloudName == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud VPS name?"))
}
if HetznerCloudType == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud VPS type?"))
}
if HetznerCloudImage == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud image?"))
}
if HetznerCloudLocation == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud VPS location?"))
}
if HetznerCloudAPIToken == "" {
ShowSubcommandHelpAndError(c, fmt.Errorf("missing hetzner cloud API token?"))
}
return nil
}

View File

@ -1,32 +0,0 @@
package recipe
import (
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
gitPkg "coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/log"
"github.com/urfave/cli"
)
var recipeDiffCommand = cli.Command{
Name: "diff",
Usage: "Show unstaged changes in recipe config",
Description: "This command requires /usr/bin/git.",
Aliases: []string{"d"},
ArgsUsage: "<recipe>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
r := internal.ValidateRecipe(c)
if err := gitPkg.DiffUnstaged(r.Dir); err != nil {
log.Fatal(err)
}
return nil
},
}

View File

@ -1,52 +0,0 @@
package recipe
import (
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
"github.com/urfave/cli"
)
var recipeFetchCommand = cli.Command{
Name: "fetch",
Usage: "Fetch recipe(s)",
Aliases: []string{"f"},
ArgsUsage: "[<recipe>]",
Description: "Retrieves all recipes if no <recipe> argument is passed",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipeName := c.Args().First()
r := recipe.Get(recipeName)
if recipeName != "" {
internal.ValidateRecipe(c)
if err := r.Ensure(false, false); err != nil {
log.Fatal(err)
}
return nil
}
catalogue, err := recipe.ReadRecipeCatalogue(internal.Offline)
if err != nil {
log.Fatal(err)
}
catlBar := formatter.CreateProgressbar(len(catalogue), "fetching latest recipes...")
for recipeName := range catalogue {
r := recipe.Get(recipeName)
if err := r.Ensure(false, false); err != nil {
log.Error(err)
}
catlBar.Add(1)
}
return nil
},
}

View File

@ -7,7 +7,8 @@ import (
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/lint"
"coopcloud.tech/abra/pkg/log"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -19,103 +20,58 @@ var recipeLintCommand = cli.Command{
Flags: []cli.Flag{
internal.DebugFlag,
internal.OnlyErrorFlag,
internal.OfflineFlag,
internal.NoInputFlag,
internal.ChaosFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipe := internal.ValidateRecipe(c)
recipe := internal.ValidateRecipe(c, true)
if err := recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
if err := recipePkg.EnsureUpToDate(recipe.Name); err != nil {
logrus.Fatal(err)
}
headers := []string{
"ref",
"rule",
"severity",
"satisfied",
"skipped",
"resolve",
}
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
table.Headers(headers...)
tableCol := []string{"ref", "rule", "satisfied", "severity", "resolve"}
table := formatter.CreateTable(tableCol)
hasError := false
var rows [][]string
var warnMessages []string
bar := formatter.CreateProgressbar(-1, "running recipe lint rules...")
for level := range lint.LintRules {
for _, rule := range lint.LintRules[level] {
if internal.OnlyErrors && rule.Level != "error" {
log.Debugf("skipping %s, does not have level \"error\"", rule.Ref)
continue
ok, err := rule.Function(recipe)
if err != nil {
logrus.Warn(err)
}
skipped := false
if rule.Skip(recipe) {
skipped = true
if !ok && rule.Level == "error" {
hasError = true
}
skippedOutput := "-"
if skipped {
skippedOutput = ""
var result string
if ok {
result = "yes"
} else {
result = "NO"
}
satisfied := false
if !skipped {
ok, err := rule.Function(recipe)
if err != nil {
warnMessages = append(warnMessages, err.Error())
}
if internal.OnlyErrors {
if !ok && rule.Level == "error" {
hasError = true
}
if ok {
satisfied = true
table.Append([]string{rule.Ref, rule.Description, result, rule.Level, rule.HowToResolve})
bar.Add(1)
}
} else {
table.Append([]string{rule.Ref, rule.Description, result, rule.Level, rule.HowToResolve})
bar.Add(1)
}
satisfiedOutput := "✅"
if !satisfied {
satisfiedOutput = "❌"
if skipped {
satisfiedOutput = "-"
}
}
row := []string{
rule.Ref,
rule.Description,
rule.Level,
satisfiedOutput,
skippedOutput,
rule.HowToResolve,
}
rows = append(rows, row)
table.Row(row...)
}
}
if len(rows) > 0 {
fmt.Println(table)
if table.NumLines() > 0 {
fmt.Println()
table.Render()
}
for _, warnMsg := range warnMessages {
log.Warn(warnMsg)
}
if hasError {
log.Warnf("critical errors present in %s config", recipe.Name)
}
if hasError {
logrus.Warn("watch out, some critical errors are present in your recipe config")
}
return nil

View File

@ -8,8 +8,8 @@ import (
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -27,41 +27,24 @@ var recipeListCommand = cli.Command{
Aliases: []string{"ls"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.MachineReadableFlag,
patternFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
catl, err := recipe.ReadRecipeCatalogue(internal.Offline)
catl, err := recipe.ReadRecipeCatalogue()
if err != nil {
log.Fatal(err.Error())
logrus.Fatal(err.Error())
}
recipes := catl.Flatten()
sort.Sort(recipe.ByRecipeName(recipes))
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
tableCol := []string{"name", "category", "status", "healthcheck", "backups", "email", "tests", "SSO"}
table := formatter.CreateTable(tableCol)
headers := []string{
"name",
"category",
"status",
"healthcheck",
"backups",
"email",
"tests",
"SSO",
}
table.Headers(headers...)
var rows [][]string
len := 0
for _, recipe := range recipes {
row := []string{
tableRow := []string{
recipe.Name,
recipe.Category,
strconv.Itoa(recipe.Features.Status),
@ -74,27 +57,19 @@ var recipeListCommand = cli.Command{
if pattern != "" {
if strings.Contains(recipe.Name, pattern) {
table.Row(row...)
rows = append(rows, row)
table.Append(tableRow)
len++
}
} else {
table.Row(row...)
rows = append(rows, row)
table.Append(tableRow)
len++
}
}
if len(rows) > 0 {
if internal.MachineReadable {
out, err := formatter.ToJSON(headers, rows)
if err != nil {
log.Fatal("unable to render to JSON: %s", err)
}
fmt.Println(out)
return nil
}
table.SetCaption(true, fmt.Sprintf("total recipes: %v", len))
fmt.Println(table)
log.Infof("total recipes: %v", len(rows))
if table.NumLines() > 0 {
table.Render()
}
return nil

View File

@ -4,6 +4,7 @@ import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"text/template"
@ -11,8 +12,7 @@ import (
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -36,9 +36,6 @@ var recipeNewCommand = cli.Command{
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.OfflineFlag,
internal.GitNameFlag,
internal.GitEmailFlag,
},
Before: internal.SubCommandBefore,
Usage: "Create a new recipe",
@ -48,55 +45,79 @@ Create a new recipe.
Abra uses the built-in example repository which is available here:
https://git.coopcloud.tech/coop-cloud/example`,
https://git.coopcloud.tech/coop-cloud/example
Files within the example repository make use of the Golang templating system
which Abra uses to inject values into the generated recipe folder (e.g. name of
recipe and domain in the sample environment config).
`,
Action: func(c *cli.Context) error {
recipeName := c.Args().First()
r := recipe.Get(recipeName)
if recipeName == "" {
internal.ShowSubcommandHelpAndError(c, errors.New("no recipe name provided"))
}
if _, err := os.Stat(r.Dir); !os.IsNotExist(err) {
log.Fatalf("%s recipe directory already exists?", r.Dir)
directory := path.Join(config.RECIPES_DIR, recipeName)
if _, err := os.Stat(directory); !os.IsNotExist(err) {
logrus.Fatalf("%s recipe directory already exists?", directory)
}
url := fmt.Sprintf("%s/example.git", config.REPOS_BASE_URL)
if err := git.Clone(r.Dir, url); err != nil {
log.Fatal(err)
if err := git.Clone(directory, url); err != nil {
logrus.Fatal(err)
}
gitRepo := path.Join(r.Dir, ".git")
gitRepo := path.Join(config.RECIPES_DIR, recipeName, ".git")
if err := os.RemoveAll(gitRepo); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
log.Debugf("removed example git repo in %s", gitRepo)
logrus.Debugf("removed example git repo in %s", gitRepo)
meta := newRecipeMeta(recipeName)
for _, path := range []string{r.ReadmePath, r.SampleEnvPath} {
toParse := []string{
path.Join(config.RECIPES_DIR, recipeName, "README.md"),
path.Join(config.RECIPES_DIR, recipeName, ".env.sample"),
}
for _, path := range toParse {
tpl, err := template.ParseFiles(path)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
var templated bytes.Buffer
if err := tpl.Execute(&templated, meta); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if err := os.WriteFile(path, templated.Bytes(), 0o644); err != nil {
log.Fatal(err)
if err := ioutil.WriteFile(path, templated.Bytes(), 0644); err != nil {
logrus.Fatal(err)
}
}
if err := git.Init(r.Dir, true, internal.GitName, internal.GitEmail); err != nil {
log.Fatal(err)
newGitRepo := path.Join(config.RECIPES_DIR, recipeName)
if err := git.Init(newGitRepo, true); err != nil {
logrus.Fatal(err)
}
log.Infof("new recipe '%s' created: %s", recipeName, path.Join(r.Dir))
log.Info("happy hacking 🎉")
fmt.Print(fmt.Sprintf(`
Your new %s recipe has been created in %s.
In order to share your recipe, you can upload it the git repository to:
https://git.coopcloud.tech/coop-cloud/%s
If you're not sure how to do that, come chat with us:
https://docs.coopcloud.tech/intro/contact
See "abra recipe -h" for additional recipe maintainer commands.
Happy Hacking!
`, recipeName, path.Join(config.RECIPES_DIR, recipeName), recipeName))
return nil
},

View File

@ -18,17 +18,16 @@ for you.
Anyone who uses a recipe can become a maintainer. Maintainers typically make
sure the recipe is in good working order and the config upgraded in a timely
manner.`,
manner. Abra supports convenient automation for recipe maintainenace, see the
"abra recipe upgrade", "abra recipe sync" and "abra recipe release" commands.
`,
Subcommands: []cli.Command{
recipeFetchCommand,
recipeLintCommand,
recipeListCommand,
recipeNewCommand,
recipeReleaseCommand,
recipeSyncCommand,
recipeUpgradeCommand,
recipeVersionCommand,
recipeResetCommand,
recipeDiffCommand,
recipeReleaseCommand,
recipeNewCommand,
recipeUpgradeCommand,
recipeSyncCommand,
recipeLintCommand,
},
}

View File

@ -1,23 +1,23 @@
package recipe
import (
"errors"
"fmt"
"os"
"path"
"strconv"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
gitPkg "coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/tagcmp"
"github.com/AlecAivazis/survey/v2"
"github.com/distribution/reference"
"github.com/docker/distribution/reference"
"github.com/go-git/go-git/v5"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -44,7 +44,8 @@ major and therefore require intervention while doing the upgrade work.
Publish your new release to git.coopcloud.tech with "-p/--publish". This
requires that you have permission to git push to these repositories and have
your SSH keys configured on your account.`,
your SSH keys configured on your account.
`,
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
@ -53,83 +54,70 @@ your SSH keys configured on your account.`,
internal.MinorFlag,
internal.PatchFlag,
internal.PublishFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipe := internal.ValidateRecipe(c)
recipe := internal.ValidateRecipeWithPrompt(c, false)
imagesTmp, err := getImageVersions(recipe)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
mainApp, err := internal.GetMainAppImage(recipe)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
mainAppVersion := imagesTmp[mainApp]
if mainAppVersion == "" {
log.Fatalf("main app service version for %s is empty?", recipe.Name)
logrus.Fatalf("main app service version for %s is empty?", recipe.Name)
}
tagString := c.Args().Get(1)
if tagString != "" {
if _, err := tagcmp.Parse(tagString); err != nil {
log.Fatalf("cannot parse %s, invalid tag specified?", tagString)
logrus.Fatalf("cannot parse %s, invalid tag specified?", tagString)
}
}
if (internal.Major || internal.Minor || internal.Patch) && tagString != "" {
log.Fatal("cannot specify tag and bump type at the same time")
logrus.Fatal("cannot specify tag and bump type at the same time")
}
if tagString != "" {
if err := createReleaseFromTag(recipe, tagString, mainAppVersion); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
}
tags, err := recipe.Tags()
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if tagString == "" && (!internal.Major && !internal.Minor && !internal.Patch) {
var err error
tagString, err = getLabelVersion(recipe, false)
if err != nil {
log.Fatal(err)
}
}
isClean, err := gitPkg.IsClean(recipe.Dir)
if err != nil {
log.Fatal(err)
}
if !isClean {
log.Infof("%s currently has these unstaged changes 👇", recipe.Name)
if err := gitPkg.DiffUnstaged(recipe.Dir); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
}
if len(tags) > 0 {
log.Warnf("previous git tags detected, assuming this is a new semver release")
logrus.Warnf("previous git tags detected, assuming this is a new semver release")
if err := createReleaseFromPreviousTag(tagString, mainAppVersion, recipe, tags); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
} else {
log.Warnf("no tag specified and no previous tag available for %s, assuming this is the initial release", recipe.Name)
logrus.Warnf("no tag specified and no previous tag available for %s, assuming this is the initial release", recipe.Name)
if err := createReleaseFromTag(recipe, tagString, mainAppVersion); err != nil {
if cleanUpErr := cleanUpTag(recipe, tagString); err != nil {
log.Fatal(cleanUpErr)
if cleanUpErr := cleanUpTag(tagString, recipe.Name); err != nil {
logrus.Fatal(cleanUpErr)
}
log.Fatal(err)
logrus.Fatal(err)
}
}
@ -139,14 +127,10 @@ your SSH keys configured on your account.`,
// getImageVersions retrieves image versions for a recipe
func getImageVersions(recipe recipe.Recipe) (map[string]string, error) {
services := make(map[string]string)
var services = make(map[string]string)
config, err := recipe.GetComposeConfig(nil)
if err != nil {
return nil, err
}
missingTag := false
for _, service := range config.Services {
for _, service := range recipe.Config.Services {
if service.Image == "" {
continue
}
@ -185,7 +169,8 @@ func getImageVersions(recipe recipe.Recipe) (map[string]string, error) {
func createReleaseFromTag(recipe recipe.Recipe, tagString, mainAppVersion string) error {
var err error
repo, err := git.PlainOpen(recipe.Dir)
directory := path.Join(config.RECIPES_DIR, recipe.Name)
repo, err := git.PlainOpen(directory)
if err != nil {
return err
}
@ -209,20 +194,16 @@ func createReleaseFromTag(recipe recipe.Recipe, tagString, mainAppVersion string
tagString = fmt.Sprintf("%s+%s", tag.String(), mainAppVersion)
}
if err := addReleaseNotes(recipe, tagString); err != nil {
log.Fatal(err)
}
if err := commitRelease(recipe, tagString); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if err := tagRelease(tagString, repo); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if err := pushRelease(recipe, tagString); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
return nil
@ -243,100 +224,26 @@ func getTagCreateOptions(tag string) (git.CreateTagOptions, error) {
return git.CreateTagOptions{Message: msg}, nil
}
// addReleaseNotes checks if the release/next release note exists and moves the
// file to release/<tag>.
func addReleaseNotes(recipe recipe.Recipe, tag string) error {
tagReleaseNotePath := path.Join(recipe.Dir, "release", tag)
if _, err := os.Stat(tagReleaseNotePath); err == nil {
// Release note for current tag already exist exists.
return nil
} else if !errors.Is(err, os.ErrNotExist) {
return err
}
nextReleaseNotePath := path.Join(recipe.Dir, "release", "next")
if _, err := os.Stat(nextReleaseNotePath); err == nil {
// release/next note exists. Move it to release/<tag>
if internal.Dry {
log.Debugf("dry run: move release note from 'next' to %s", tag)
return nil
}
if !internal.NoInput {
prompt := &survey.Input{
Message: "Use release note in release/next?",
}
var addReleaseNote bool
if err := survey.AskOne(prompt, &addReleaseNote); err != nil {
return err
}
if !addReleaseNote {
return nil
}
}
err := os.Rename(nextReleaseNotePath, tagReleaseNotePath)
if err != nil {
return err
}
err = gitPkg.Add(recipe.Dir, path.Join("release", "next"), internal.Dry)
if err != nil {
return err
}
err = gitPkg.Add(recipe.Dir, path.Join("release", tag), internal.Dry)
if err != nil {
return err
}
} else if !errors.Is(err, os.ErrNotExist) {
return err
}
// No release note exists for the current release.
if internal.NoInput {
return nil
}
prompt := &survey.Input{
Message: "Release Note (leave empty for no release note)",
}
var releaseNote string
if err := survey.AskOne(prompt, &releaseNote); err != nil {
return err
}
if releaseNote == "" {
return nil
}
err := os.WriteFile(tagReleaseNotePath, []byte(releaseNote), 0o644)
if err != nil {
return err
}
err = gitPkg.Add(recipe.Dir, path.Join("release", tag), internal.Dry)
if err != nil {
return err
}
return nil
}
func commitRelease(recipe recipe.Recipe, tag string) error {
if internal.Dry {
log.Debugf("dry run: no changes committed")
logrus.Debugf("dry run: no changes committed")
return nil
}
isClean, err := gitPkg.IsClean(recipe.Dir)
isClean, err := gitPkg.IsClean(recipe.Dir())
if err != nil {
return err
}
if isClean {
if !internal.Dry {
return fmt.Errorf("no changes discovered in %s, nothing to publish?", recipe.Dir)
return fmt.Errorf("no changes discovered in %s, nothing to publish?", recipe.Dir())
}
}
msg := fmt.Sprintf("chore: publish %s release", tag)
if err := gitPkg.Commit(recipe.Dir, msg, internal.Dry); err != nil {
repoPath := path.Join(config.RECIPES_DIR, recipe.Name)
if err := gitPkg.Commit(repoPath, ".", msg, internal.Dry); err != nil {
return err
}
@ -345,7 +252,7 @@ func commitRelease(recipe recipe.Recipe, tag string) error {
func tagRelease(tagString string, repo *git.Repository) error {
if internal.Dry {
log.Debugf("dry run: no git tag created (%s)", tagString)
logrus.Debugf("dry run: no git tag created (%s)", tagString)
return nil
}
@ -365,14 +272,14 @@ func tagRelease(tagString string, repo *git.Repository) error {
}
hash := formatter.SmallSHA(head.Hash().String())
log.Debugf(fmt.Sprintf("created tag %s at %s", tagString, hash))
logrus.Debugf(fmt.Sprintf("created tag %s at %s", tagString, hash))
return nil
}
func pushRelease(recipe recipe.Recipe, tagString string) error {
if internal.Dry {
log.Info("dry run: no changes published")
logrus.Info("dry run: no changes published")
return nil
}
@ -390,17 +297,18 @@ func pushRelease(recipe recipe.Recipe, tagString string) error {
if err := recipe.Push(internal.Dry); err != nil {
return err
}
url := fmt.Sprintf("%s/src/tag/%s", recipe.GitURL, tagString)
log.Infof("new release published: %s", url)
url := fmt.Sprintf("%s/%s/src/tag/%s", config.REPOS_BASE_URL, recipe.Name, tagString)
logrus.Infof("new release published: %s", url)
} else {
log.Info("no -p/--publish passed, not publishing")
logrus.Info("no -p/--publish passed, not publishing")
}
return nil
}
func createReleaseFromPreviousTag(tagString, mainAppVersion string, recipe recipe.Recipe, tags []string) error {
repo, err := git.PlainOpen(recipe.Dir)
directory := path.Join(config.RECIPES_DIR, recipe.Name)
repo, err := git.PlainOpen(directory)
if err != nil {
return err
}
@ -465,7 +373,7 @@ func createReleaseFromPreviousTag(tagString, mainAppVersion string, recipe recip
}
if lastGitTag.String() == tagString {
log.Fatalf("latest git tag (%s) and synced label (%s) are the same?", lastGitTag, tagString)
logrus.Fatalf("latest git tag (%s) and synced lable (%s) are the same?", lastGitTag, tagString)
}
if !internal.NoInput {
@ -475,36 +383,33 @@ func createReleaseFromPreviousTag(tagString, mainAppVersion string, recipe recip
var ok bool
if err := survey.AskOne(prompt, &ok); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if !ok {
log.Fatal("exiting as requested")
logrus.Fatal("exiting as requested")
}
}
if err := addReleaseNotes(recipe, tagString); err != nil {
log.Fatal(err)
}
if err := commitRelease(recipe, tagString); err != nil {
log.Fatalf("failed to commit changes: %s", err.Error())
logrus.Fatalf("failed to commit changes: %s", err.Error())
}
if err := tagRelease(tagString, repo); err != nil {
log.Fatalf("failed to tag release: %s", err.Error())
logrus.Fatalf("failed to tag release: %s", err.Error())
}
if err := pushRelease(recipe, tagString); err != nil {
log.Fatalf("failed to publish new release: %s", err.Error())
logrus.Fatalf("failed to publish new release: %s", err.Error())
}
return nil
}
// cleanUpTag removes a freshly created tag
func cleanUpTag(recipe recipe.Recipe, tag string) error {
repo, err := git.PlainOpen(recipe.Dir)
func cleanUpTag(tag, recipeName string) error {
directory := path.Join(config.RECIPES_DIR, recipeName)
repo, err := git.PlainOpen(directory)
if err != nil {
return err
}
@ -515,22 +420,22 @@ func cleanUpTag(recipe recipe.Recipe, tag string) error {
}
}
log.Debugf("removed freshly created tag %s", tag)
logrus.Debugf("removed freshly created tag %s", tag)
return nil
}
func getLabelVersion(recipe recipe.Recipe, prompt bool) (string, error) {
initTag, err := recipe.GetVersionLabelLocal()
initTag, err := recipePkg.GetVersionLabelLocal(recipe)
if err != nil {
return "", err
}
if initTag == "" {
log.Fatalf("unable to read version for %s from synced label. Did you try running \"abra recipe sync %s\" already?", recipe.Name, recipe.Name)
logrus.Fatalf("unable to read version for %s from synced label. Did you try running \"abra recipe sync %s\" already?", recipe.Name, recipe.Name)
}
log.Warnf("discovered %s as currently synced recipe label", initTag)
logrus.Warnf("discovered %s as currently synced recipe label", initTag)
if prompt && !internal.NoInput {
var response bool

View File

@ -1,54 +0,0 @@
package recipe
import (
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/recipe"
"github.com/go-git/go-git/v5"
"github.com/urfave/cli"
)
var recipeResetCommand = cli.Command{
Name: "reset",
Usage: "Remove all unstaged changes from recipe config",
Description: "WARNING: this will delete your changes. Be Careful.",
Aliases: []string{"rs"},
ArgsUsage: "<recipe>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
recipeName := c.Args().First()
r := recipe.Get(recipeName)
if recipeName != "" {
internal.ValidateRecipe(c)
}
repo, err := git.PlainOpen(r.Dir)
if err != nil {
log.Fatal(err)
}
ref, err := repo.Head()
if err != nil {
log.Fatal(err)
}
worktree, err := repo.Worktree()
if err != nil {
log.Fatal(err)
}
opts := &git.ResetOptions{Commit: ref.Hash(), Mode: git.HardReset}
if err := worktree.Reset(opts); err != nil {
log.Fatal(err)
}
return nil
},
}

View File

@ -2,16 +2,17 @@ package recipe
import (
"fmt"
"path"
"strconv"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
gitPkg "coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/tagcmp"
"github.com/AlecAivazis/survey/v2"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -37,34 +38,31 @@ named "app") which corresponds to the following format:
Where <version> can be specifed on the command-line or Abra can attempt to
auto-generate it for you. The <recipe> configuration will be updated on the
local file system.`,
BashComplete: autocomplete.RecipeNameComplete,
local file system.
`,
Action: func(c *cli.Context) error {
recipe := internal.ValidateRecipe(c)
recipe := internal.ValidateRecipeWithPrompt(c, false)
mainApp, err := internal.GetMainAppImage(recipe)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
imagesTmp, err := getImageVersions(recipe)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
mainAppVersion := imagesTmp[mainApp]
tags, err := recipe.Tags()
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
nextTag := c.Args().Get(1)
if len(tags) == 0 && nextTag == "" {
log.Warnf("no git tags found for %s", recipe.Name)
if internal.NoInput {
log.Fatalf("unable to continue, input required for initial version")
}
logrus.Warnf("no git tags found for %s", recipe.Name)
fmt.Println(fmt.Sprintf(`
The following options are two types of initial semantic version that you can
pick for %s that will be published in the recipe catalogue. This follows the
@ -90,7 +88,7 @@ likely to change.
}
if err := survey.AskOne(edPrompt, &chosenVersion); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
nextTag = fmt.Sprintf("%s+%s", chosenVersion, mainAppVersion)
@ -99,26 +97,26 @@ likely to change.
if nextTag == "" && (!internal.Major && !internal.Minor && !internal.Patch) {
latestRelease := tags[len(tags)-1]
if err := internal.PromptBumpType("", latestRelease); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
}
if nextTag == "" {
repo, err := git.PlainOpen(recipe.Dir)
recipeDir := path.Join(config.RECIPES_DIR, recipe.Name)
repo, err := git.PlainOpen(recipeDir)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
var lastGitTag tagcmp.Tag
iter, err := repo.Tags()
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if err := iter.ForEach(func(ref *plumbing.Reference) error {
obj, err := repo.TagObject(ref.Hash())
if err != nil {
log.Fatal("Tag at commit ", ref.Hash(), " is unannotated or otherwise broken. Please fix it.")
return err
}
@ -135,7 +133,7 @@ likely to change.
return nil
}); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
// bumpType is used to decide what part of the tag should be incremented
@ -143,7 +141,7 @@ likely to change.
if bumpType != 0 {
// a bitwise check if the number is a power of 2
if (bumpType & (bumpType - 1)) != 0 {
log.Fatal("you can only use one version flag: --major, --minor or --patch")
logrus.Fatal("you can only use one version flag: --major, --minor or --patch")
}
}
@ -152,14 +150,14 @@ likely to change.
if internal.Patch {
now, err := strconv.Atoi(newTag.Patch)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
newTag.Patch = strconv.Itoa(now + 1)
} else if internal.Minor {
now, err := strconv.Atoi(newTag.Minor)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
newTag.Patch = "0"
@ -167,7 +165,7 @@ likely to change.
} else if internal.Major {
now, err := strconv.Atoi(newTag.Major)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
newTag.Patch = "0"
@ -177,35 +175,25 @@ likely to change.
}
newTag.Metadata = mainAppVersion
log.Debugf("choosing %s as new version for %s", newTag.String(), recipe.Name)
logrus.Debugf("choosing %s as new version for %s", newTag.String(), recipe.Name)
nextTag = newTag.String()
}
if _, err := tagcmp.Parse(nextTag); err != nil {
log.Fatalf("invalid version %s specified", nextTag)
logrus.Fatalf("invalid version %s specified", nextTag)
}
mainService := "app"
label := fmt.Sprintf("coop-cloud.${STACK_NAME}.version=%s", nextTag)
if !internal.Dry {
if err := recipe.UpdateLabel("compose.y*ml", mainService, label); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
} else {
log.Infof("dry run: not syncing label %s for recipe %s", nextTag, recipe.Name)
}
isClean, err := gitPkg.IsClean(recipe.Dir)
if err != nil {
log.Fatal(err)
}
if !isClean {
log.Infof("%s currently has these unstaged changes 👇", recipe.Name)
if err := gitPkg.DiffUnstaged(recipe.Dir); err != nil {
log.Fatal(err)
}
logrus.Infof("dry run: not syncing label %s for recipe %s", nextTag, recipe.Name)
}
return nil
},
BashComplete: autocomplete.RecipeNameComplete,
}

View File

@ -2,7 +2,6 @@ package recipe
import (
"bufio"
"encoding/json"
"fmt"
"os"
"path"
@ -12,13 +11,13 @@ import (
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
gitPkg "coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/log"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/tagcmp"
"github.com/AlecAivazis/survey/v2"
"github.com/distribution/reference"
"github.com/docker/distribution/reference"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -27,15 +26,6 @@ type imgPin struct {
version tagcmp.Tag
}
// anUpgrade represents a single service upgrade (as within a recipe), and the list of tags that it can be upgraded to,
// for serialization purposes.
type anUpgrade struct {
Service string `json:"service"`
Image string `json:"image"`
Tag string `json:"tag"`
UpgradeTags []string `json:"upgrades"`
}
var recipeUpgradeCommand = cli.Command{
Name: "upgrade",
Aliases: []string{"u"},
@ -53,66 +43,58 @@ The command is interactive and will show a select input which allows you to
make a seclection. Use the "?" key to see more help on navigating this
interface.
You may invoke this command in "wizard" mode and be prompted for input.
You may invoke this command in "wizard" mode and be prompted for input:
EXAMPLE:
abra recipe upgrade`,
ArgsUsage: "<recipe>",
abra recipe upgrade
`,
BashComplete: autocomplete.RecipeNameComplete,
ArgsUsage: "<recipe>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.PatchFlag,
internal.MinorFlag,
internal.MajorFlag,
internal.MachineReadableFlag,
internal.AllTagsFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
recipe := internal.ValidateRecipe(c)
recipe := internal.ValidateRecipeWithPrompt(c, true)
if err := recipe.Ensure(internal.Chaos, internal.Offline); err != nil {
log.Fatal(err)
if err := recipePkg.EnsureUpToDate(recipe.Name); err != nil {
logrus.Fatal(err)
}
bumpType := btoi(internal.Major)*4 + btoi(internal.Minor)*2 + btoi(internal.Patch)
if bumpType != 0 {
// a bitwise check if the number is a power of 2
if (bumpType & (bumpType - 1)) != 0 {
log.Fatal("you can only use one of: --major, --minor, --patch.")
logrus.Fatal("you can only use one of: --major, --minor, --patch.")
}
}
if internal.MachineReadable {
// -m implies -n in this case
internal.NoInput = true
}
upgradeList := make(map[string]anUpgrade)
// check for versions file and load pinned versions
versionsPresent := false
versionsPath := path.Join(recipe.Dir, "versions")
servicePins := make(map[string]imgPin)
recipeDir := path.Join(config.RECIPES_DIR, recipe.Name)
versionsPath := path.Join(recipeDir, "versions")
var servicePins = make(map[string]imgPin)
if _, err := os.Stat(versionsPath); err == nil {
log.Debugf("found versions file for %s", recipe.Name)
logrus.Debugf("found versions file for %s", recipe.Name)
file, err := os.Open(versionsPath)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
splitLine := strings.Split(line, " ")
if splitLine[0] != "pin" || len(splitLine) != 3 {
log.Fatalf("malformed version pin specification: %s", line)
logrus.Fatalf("malformed version pin specification: %s", line)
}
pinSlice := strings.Split(splitLine[2], ":")
pinTag, err := tagcmp.Parse(pinSlice[1])
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
pin := imgPin{
image: pinSlice[0],
@ -121,50 +103,45 @@ EXAMPLE:
servicePins[splitLine[1]] = pin
}
if err := scanner.Err(); err != nil {
log.Error(err)
logrus.Error(err)
}
versionsPresent = true
} else {
log.Debugf("did not find versions file for %s", recipe.Name)
logrus.Debugf("did not find versions file for %s", recipe.Name)
}
config, err := recipe.GetComposeConfig(nil)
if err != nil {
log.Fatal(err)
}
for _, service := range config.Services {
for _, service := range recipe.Config.Services {
img, err := reference.ParseNormalizedNamed(service.Image)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
regVersions, err := client.GetRegistryTags(img)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
image := reference.Path(img)
log.Debugf("retrieved %s from remote registry for %s", regVersions, image)
logrus.Debugf("retrieved %s from remote registry for %s", regVersions, image)
image = formatter.StripTagMeta(image)
switch img.(type) {
case reference.NamedTagged:
if !tagcmp.IsParsable(img.(reference.NamedTagged).Tag()) {
log.Debugf("%s not considered semver-like", img.(reference.NamedTagged).Tag())
logrus.Debugf("%s not considered semver-like", img.(reference.NamedTagged).Tag())
}
default:
log.Warnf("unable to read tag for image %s, is it missing? skipping upgrade for %s", image, service.Name)
logrus.Warnf("unable to read tag for image %s, is it missing? skipping upgrade for %s", image, service.Name)
continue
}
tag, err := tagcmp.Parse(img.(reference.NamedTagged).Tag())
if err != nil {
log.Warnf("unable to parse %s, error was: %s, skipping upgrade for %s", image, err.Error(), service.Name)
logrus.Warnf("unable to parse %s, error was: %s, skipping upgrade for %s", image, err.Error(), service.Name)
continue
}
log.Debugf("parsed %s for %s", tag, service.Name)
logrus.Debugf("parsed %s for %s", tag, service.Name)
var compatible []tagcmp.Tag
for _, regVersion := range regVersions {
@ -178,18 +155,18 @@ EXAMPLE:
}
}
log.Debugf("detected potential upgradable tags %s for %s", compatible, service.Name)
logrus.Debugf("detected potential upgradable tags %s for %s", compatible, service.Name)
sort.Sort(tagcmp.ByTagDesc(compatible))
if len(compatible) == 0 && !internal.AllTags {
log.Info(fmt.Sprintf("no new versions available for %s, assuming %s is the latest (use -a/--all-tags to see all anyway)", image, tag))
logrus.Info(fmt.Sprintf("no new versions available for %s, assuming %s is the latest (use -a/--all-tags to see all anyway)", image, tag))
continue // skip on to the next tag and don't update any compose files
}
catlVersions, err := recipePkg.VersionsOfService(recipe.Name, service.Name, internal.Offline)
catlVersions, err := recipePkg.VersionsOfService(recipe.Name, service.Name)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
compatibleStrings := []string{"skip"}
@ -205,7 +182,7 @@ EXAMPLE:
}
}
log.Debugf("detected compatible upgradable tags %s for %s", compatibleStrings, service.Name)
logrus.Debugf("detected compatible upgradable tags %s for %s", compatibleStrings, service.Name)
var upgradeTag string
_, ok := servicePins[service.Name]
@ -222,13 +199,13 @@ EXAMPLE:
}
}
if contains {
log.Infof("upgrading service %s from %s to %s (pinned tag: %s)", service.Name, tag.String(), upgradeTag, pinnedTagString)
logrus.Infof("upgrading service %s from %s to %s (pinned tag: %s)", service.Name, tag.String(), upgradeTag, pinnedTagString)
} else {
log.Infof("service %s, image %s pinned to %s, no compatible upgrade found", service.Name, servicePins[service.Name].image, pinnedTagString)
logrus.Infof("service %s, image %s pinned to %s, no compatible upgrade found", service.Name, servicePins[service.Name].image, pinnedTagString)
continue
}
} else {
log.Fatalf("service %s is at version %s, but pinned to %s, please correct your compose.yml file manually!", service.Name, tag.String(), pinnedTag.String())
logrus.Fatalf("service %s is at version %s, but pinned to %s, please correct your compose.yml file manually!", service.Name, tag.String(), pinnedTag.String())
continue
}
} else {
@ -245,7 +222,7 @@ EXAMPLE:
}
}
if upgradeTag == "" {
log.Warnf("not upgrading from %s to %s for %s, because the upgrade type is more serious than what user wants", tag.String(), compatible[0].String(), image)
logrus.Warnf("not upgrading from %s to %s for %s, because the upgrade type is more serious than what user wants", tag.String(), compatible[0].String(), image)
continue
}
} else {
@ -253,7 +230,7 @@ EXAMPLE:
if !tagcmp.IsParsable(img.(reference.NamedTagged).Tag()) || internal.AllTags {
tag := img.(reference.NamedTagged).Tag()
if !internal.AllTags {
log.Warn(fmt.Sprintf("unable to determine versioning semantics of %s, listing all tags", tag))
logrus.Warning(fmt.Sprintf("unable to determine versioning semantics of %s, listing all tags", tag))
}
msg = fmt.Sprintf("upgrade to which tag? (service: %s, tag: %s)", service.Name, tag)
compatibleStrings = []string{"skip"}
@ -262,83 +239,27 @@ EXAMPLE:
}
}
// there is always at least the item "skip" in compatibleStrings (a list of
// possible upgradable tags) and at least one other tag.
upgradableTags := compatibleStrings[1:]
upgrade := anUpgrade{
Service: service.Name,
Image: image,
Tag: tag.String(),
UpgradeTags: make([]string, len(upgradableTags)),
prompt := &survey.Select{
Message: msg,
Help: "enter / return to confirm, choose 'skip' to not upgrade this tag, vim mode is enabled",
VimMode: true,
Options: compatibleStrings,
}
for n, s := range upgradableTags {
var sb strings.Builder
if _, err := sb.WriteString(s); err != nil {
}
upgrade.UpgradeTags[n] = sb.String()
}
upgradeList[upgrade.Service] = upgrade
if internal.NoInput {
upgradeTag = "skip"
} else {
prompt := &survey.Select{
Message: msg,
Help: "enter / return to confirm, choose 'skip' to not upgrade this tag, vim mode is enabled",
VimMode: true,
Options: compatibleStrings,
}
if err := survey.AskOne(prompt, &upgradeTag); err != nil {
log.Fatal(err)
}
if err := survey.AskOne(prompt, &upgradeTag); err != nil {
logrus.Fatal(err)
}
}
}
if upgradeTag != "skip" {
ok, err := recipe.UpdateTag(image, upgradeTag)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if ok {
log.Infof("tag upgraded from %s to %s for %s", tag.String(), upgradeTag, image)
logrus.Infof("tag upgraded from %s to %s for %s", tag.String(), upgradeTag, image)
}
} else {
if !internal.NoInput {
log.Warnf("not upgrading %s, skipping as requested", image)
}
}
}
if internal.NoInput {
if internal.MachineReadable {
jsonstring, err := json.Marshal(upgradeList)
if err != nil {
log.Fatal(err)
}
fmt.Println(string(jsonstring))
return nil
}
for _, upgrade := range upgradeList {
log.Infof("can upgrade service: %s, image: %s, tag: %s ::", upgrade.Service, upgrade.Image, upgrade.Tag)
for _, utag := range upgrade.UpgradeTags {
log.Infof(" %s", utag)
}
}
}
isClean, err := gitPkg.IsClean(recipe.Dir)
if err != nil {
log.Fatal(err)
}
if !isClean {
log.Infof("%s currently has these unstaged changes 👇", recipe.Name)
if err := gitPkg.DiffUnstaged(recipe.Dir); err != nil {
log.Fatal(err)
logrus.Warnf("not upgrading %s, skipping as requested", image)
}
}

View File

@ -1,27 +1,14 @@
package recipe
import (
"fmt"
"sort"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
func sortServiceByName(versions [][]string) func(i, j int) bool {
return func(i, j int) bool {
// NOTE(d1): corresponds to the `tableCol` definition below
if versions[i][1] == "app" {
return true
}
return versions[i][1] < versions[j][1]
}
}
var recipeVersionCommand = cli.Command{
Name: "versions",
Aliases: []string{"v"},
@ -29,83 +16,39 @@ var recipeVersionCommand = cli.Command{
ArgsUsage: "<recipe>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
internal.NoInputFlag,
internal.MachineReadableFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.RecipeNameComplete,
Action: func(c *cli.Context) error {
var warnMessages []string
recipe := internal.ValidateRecipe(c, false)
recipe := internal.ValidateRecipe(c)
catl, err := recipePkg.ReadRecipeCatalogue(internal.Offline)
catalogue, err := recipePkg.ReadRecipeCatalogue()
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
recipeMeta, ok := catl[recipe.Name]
recipeMeta, ok := catalogue[recipe.Name]
if !ok {
warnMessages = append(warnMessages, "retrieved versions from local recipe repository")
recipeVersions, err := recipe.GetRecipeVersions()
if err != nil {
warnMessages = append(warnMessages, err.Error())
}
recipeMeta = recipePkg.RecipeMeta{Versions: recipeVersions}
logrus.Fatalf("%s recipe doesn't exist?", recipe.Name)
}
if len(recipeMeta.Versions) == 0 {
log.Fatalf("%s has no published versions?", recipe.Name)
}
tableCol := []string{"Version", "Service", "Image", "Tag", "Digest"}
table := formatter.CreateTable(tableCol)
for i := len(recipeMeta.Versions) - 1; i >= 0; i-- {
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
table.Headers("SERVICE", "NAME", "TAG")
for version, meta := range recipeMeta.Versions[i] {
var allRows [][]string
var rows [][]string
for tag, meta := range recipeMeta.Versions[i] {
for service, serviceMeta := range meta {
rows = append(rows, []string{service, serviceMeta.Image, serviceMeta.Tag})
allRows = append(allRows, []string{version, service, serviceMeta.Image, serviceMeta.Tag})
}
sort.Slice(rows, sortServiceByName(rows))
table.Rows(rows...)
if !internal.MachineReadable {
fmt.Println(table)
log.Infof("VERSION: %s", version)
fmt.Println()
continue
}
if internal.MachineReadable {
sort.Slice(allRows, sortServiceByName(allRows))
headers := []string{"VERSION", "SERVICE", "NAME", "TAG"}
out, err := formatter.ToJSON(headers, allRows)
if err != nil {
log.Fatal("unable to render to JSON: %s", err)
}
fmt.Println(out)
continue
table.Append([]string{tag, service, serviceMeta.Image, serviceMeta.Tag, serviceMeta.Digest})
}
}
}
if !internal.MachineReadable {
for _, warnMsg := range warnMessages {
log.Warn(warnMsg)
}
table.SetAutoMergeCells(true)
if table.NumLines() > 0 {
table.Render()
} else {
logrus.Fatalf("%s has no published versions?", recipe.Name)
}
return nil

82
cli/record/list.go Normal file
View File

@ -0,0 +1,82 @@
package record
import (
"context"
"fmt"
"strconv"
"coopcloud.tech/abra/cli/internal"
gandiPkg "coopcloud.tech/abra/pkg/dns/gandi"
"coopcloud.tech/abra/pkg/formatter"
"github.com/libdns/gandi"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// RecordListCommand lists domains.
var RecordListCommand = cli.Command{
Name: "list",
Usage: "List domain name records",
Aliases: []string{"ls"},
ArgsUsage: "<zone>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.DNSProviderFlag,
},
Before: internal.SubCommandBefore,
Description: `
List all domain name records managed by a 3rd party provider for a specific
zone.
You must specify a zone (e.g. example.com) under which your domain name records
are listed. This zone must already be created on your provider account.
`,
Action: func(c *cli.Context) error {
if err := internal.EnsureDNSProvider(); err != nil {
logrus.Fatal(err)
}
zone, err := internal.EnsureZoneArgument(c)
if err != nil {
logrus.Fatal(err)
}
var provider gandi.Provider
switch internal.DNSProvider {
case "gandi":
provider, err = gandiPkg.New()
if err != nil {
logrus.Fatal(err)
}
default:
logrus.Fatalf("%s is not a supported DNS provider", internal.DNSProvider)
}
records, err := provider.GetRecords(context.Background(), zone)
if err != nil {
logrus.Fatal(err)
}
tableCol := []string{"type", "name", "value", "TTL", "priority"}
table := formatter.CreateTable(tableCol)
for _, record := range records {
value := record.Value
if len(record.Value) > 30 {
value = fmt.Sprintf("%s...", record.Value[:30])
}
table.Append([]string{
record.Type,
record.Name,
value,
record.TTL.String(),
strconv.Itoa(record.Priority),
})
}
table.Render()
return nil
},
}

148
cli/record/new.go Normal file
View File

@ -0,0 +1,148 @@
package record
import (
"context"
"fmt"
"strconv"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/dns"
gandiPkg "coopcloud.tech/abra/pkg/dns/gandi"
"coopcloud.tech/abra/pkg/formatter"
"github.com/libdns/gandi"
"github.com/libdns/libdns"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// RecordNewCommand creates a new domain name record.
var RecordNewCommand = cli.Command{
Name: "new",
Usage: "Create a new domain record",
Aliases: []string{"n"},
ArgsUsage: "<zone>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.DNSProviderFlag,
internal.DNSTypeFlag,
internal.DNSNameFlag,
internal.DNSValueFlag,
internal.DNSTTLFlag,
internal.DNSPriorityFlag,
},
Before: internal.SubCommandBefore,
Description: `
Create a new domain name record for a specific zone.
You must specify a zone (e.g. example.com) under which your domain name records
are listed. This zone must already be created on your provider account.
Example:
abra record new foo.com -p gandi -t A -n myapp -v 192.168.178.44
You may also invoke this command in "wizard" mode and be prompted for input:
abra record new
`,
Action: func(c *cli.Context) error {
zone, err := internal.EnsureZoneArgument(c)
if err != nil {
logrus.Fatal(err)
}
if err := internal.EnsureDNSProvider(); err != nil {
logrus.Fatal(err)
}
var provider gandi.Provider
switch internal.DNSProvider {
case "gandi":
provider, err = gandiPkg.New()
if err != nil {
logrus.Fatal(err)
}
default:
logrus.Fatalf("%s is not a supported DNS provider", internal.DNSProvider)
}
if err := internal.EnsureDNSTypeFlag(c); err != nil {
logrus.Fatal(err)
}
if err := internal.EnsureDNSNameFlag(c); err != nil {
logrus.Fatal(err)
}
if err := internal.EnsureDNSValueFlag(c); err != nil {
logrus.Fatal(err)
}
ttl, err := dns.GetTTL(internal.DNSTTL)
if err != nil {
return err
}
record := libdns.Record{
Type: internal.DNSType,
Name: internal.DNSName,
Value: internal.DNSValue,
TTL: ttl,
}
if internal.DNSType == "MX" || internal.DNSType == "SRV" || internal.DNSType == "URI" {
record.Priority = internal.DNSPriority
}
records, err := provider.GetRecords(context.Background(), zone)
if err != nil {
logrus.Fatal(err)
}
for _, existingRecord := range records {
if existingRecord.Type == record.Type &&
existingRecord.Name == record.Name &&
existingRecord.Value == record.Value {
logrus.Fatalf("%s record for %s already exists?", record.Type, zone)
}
}
createdRecords, err := provider.SetRecords(
context.Background(),
zone,
[]libdns.Record{record},
)
if err != nil {
logrus.Fatal(err)
}
if len(createdRecords) == 0 {
logrus.Fatal("provider library reports that no record was created?")
}
createdRecord := createdRecords[0]
tableCol := []string{"type", "name", "value", "TTL", "priority"}
table := formatter.CreateTable(tableCol)
value := createdRecord.Value
if len(createdRecord.Value) > 30 {
value = fmt.Sprintf("%s...", createdRecord.Value[:30])
}
table.Append([]string{
createdRecord.Type,
createdRecord.Name,
value,
createdRecord.TTL.String(),
strconv.Itoa(createdRecord.Priority),
})
table.Render()
logrus.Info("record created")
return nil
},
}

37
cli/record/record.go Normal file
View File

@ -0,0 +1,37 @@
package record
import (
"github.com/urfave/cli"
)
// RecordCommand supports managing DNS entries.
var RecordCommand = cli.Command{
Name: "record",
Usage: "Manage domain name records",
Aliases: []string{"rc"},
ArgsUsage: "<record>",
Description: `
Manage domain name records via 3rd party providers such as Gandi DNS. It
supports listing, creating and removing all types of records that you might
need for managing Co-op Cloud apps.
The following providers are supported:
Gandi DNS https://www.gandi.net
You need an account with such a provider already. Typically, you need to
provide an API token on the Abra command-line when using these commands so that
you can authenticate with your provider account.
New providers can be integrated, we welcome change sets. See the underlying DNS
library documentation for more. It supports many existing providers and allows
to implement new provider support easily.
https://pkg.go.dev/github.com/libdns/libdns
`,
Subcommands: []cli.Command{
RecordListCommand,
RecordNewCommand,
RecordRemoveCommand,
},
}

136
cli/record/remove.go Normal file
View File

@ -0,0 +1,136 @@
package record
import (
"context"
"fmt"
"strconv"
"coopcloud.tech/abra/cli/internal"
gandiPkg "coopcloud.tech/abra/pkg/dns/gandi"
"coopcloud.tech/abra/pkg/formatter"
"github.com/AlecAivazis/survey/v2"
"github.com/libdns/gandi"
"github.com/libdns/libdns"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// RecordRemoveCommand lists domains.
var RecordRemoveCommand = cli.Command{
Name: "remove",
Usage: "Remove a domain name record",
Aliases: []string{"rm"},
ArgsUsage: "<zone>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.DNSProviderFlag,
internal.DNSTypeFlag,
internal.DNSNameFlag,
},
Before: internal.SubCommandBefore,
Description: `
Remove a domain name record for a specific zone.
It uses the type of record and name to match existing records and choose one
for deletion. You must specify a zone (e.g. example.com) under which your
domain name records are listed. This zone must already be created on your
provider account.
Example:
abra record remove foo.com -p gandi -t A -n myapp
You may also invoke this command in "wizard" mode and be prompted for input:
abra record rm
`,
Action: func(c *cli.Context) error {
zone, err := internal.EnsureZoneArgument(c)
if err != nil {
logrus.Fatal(err)
}
if err := internal.EnsureDNSProvider(); err != nil {
logrus.Fatal(err)
}
var provider gandi.Provider
switch internal.DNSProvider {
case "gandi":
provider, err = gandiPkg.New()
if err != nil {
logrus.Fatal(err)
}
default:
logrus.Fatalf("%s is not a supported DNS provider", internal.DNSProvider)
}
if err := internal.EnsureDNSTypeFlag(c); err != nil {
logrus.Fatal(err)
}
if err := internal.EnsureDNSNameFlag(c); err != nil {
logrus.Fatal(err)
}
records, err := provider.GetRecords(context.Background(), zone)
if err != nil {
logrus.Fatal(err)
}
var toDelete libdns.Record
for _, record := range records {
if record.Type == internal.DNSType && record.Name == internal.DNSName {
toDelete = record
break
}
}
if (libdns.Record{}) == toDelete {
logrus.Fatal("provider library reports no matching record?")
}
tableCol := []string{"type", "name", "value", "TTL", "priority"}
table := formatter.CreateTable(tableCol)
value := toDelete.Value
if len(toDelete.Value) > 30 {
value = fmt.Sprintf("%s...", toDelete.Value[:30])
}
table.Append([]string{
toDelete.Type,
toDelete.Name,
value,
toDelete.TTL.String(),
strconv.Itoa(toDelete.Priority),
})
table.Render()
if !internal.NoInput {
response := false
prompt := &survey.Confirm{
Message: "continue with record deletion?",
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
}
_, err = provider.DeleteRecords(context.Background(), zone, []libdns.Record{toDelete})
if err != nil {
logrus.Fatal(err)
}
logrus.Info("record successfully deleted")
return nil
},
}

View File

@ -1,21 +1,49 @@
package server
import (
"context"
"errors"
"fmt"
"os"
"os/exec"
"os/user"
"path/filepath"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
contextPkg "coopcloud.tech/abra/pkg/context"
"coopcloud.tech/abra/pkg/dns"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/server"
sshPkg "coopcloud.tech/abra/pkg/ssh"
"coopcloud.tech/abra/pkg/ssh"
"github.com/AlecAivazis/survey/v2"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var (
dockerInstallMsg = `
A docker installation cannot be found on %s. This is a required system
dependency for running Co-op Cloud apps on your server. If you would like, Abra
can attempt to install Docker for you using the upstream non-interactive
installation script.
See the following documentation for more:
https://docs.docker.com/engine/install/debian/#install-using-the-convenience-script
N.B Docker doesn't recommend it for production environments but many use it for
such purposes. Docker stable is now installed by default by this script. The
source for this script can be seen here:
https://github.com/docker/docker-install
`
)
var local bool
var localFlag = &cli.BoolFlag{
Name: "local, l",
@ -23,182 +51,461 @@ var localFlag = &cli.BoolFlag{
Destination: &local,
}
// cleanUp cleans up the partially created context/client details for a failed
// "server add" attempt.
func cleanUp(name string) {
if name != "default" {
log.Debugf("serverAdd: cleanUp: cleaning up context for %s", name)
if err := client.DeleteContext(name); err != nil {
log.Fatal(err)
}
var provision bool
var provisionFlag = &cli.BoolFlag{
Name: "provision, p",
Usage: "Provision server so it can deploy apps",
Destination: &provision,
}
var sshAuth string
var sshAuthFlag = &cli.StringFlag{
Name: "ssh-auth, s",
Value: "identity-file",
Usage: "Select SSH authentication method (identity-file, password)",
Destination: &sshAuth,
}
var askSudoPass bool
var askSudoPassFlag = &cli.BoolFlag{
Name: "ask-sudo-pass, a",
Usage: "Ask for sudo password",
Destination: &askSudoPass,
}
func cleanUp(domainName string) {
logrus.Warnf("cleaning up context for %s", domainName)
if err := client.DeleteContext(domainName); err != nil {
logrus.Fatal(err)
}
serverDir := filepath.Join(config.SERVERS_DIR, name)
files, err := config.GetAllFilesInDirectory(serverDir)
if err != nil {
log.Fatalf("serverAdd: cleanUp: unable to list files in %s: %s", serverDir, err)
}
if len(files) > 0 {
log.Debugf("serverAdd: cleanUp: %s is not empty, aborting cleanup", serverDir)
return
}
if err := os.RemoveAll(serverDir); err != nil {
log.Fatalf("serverAdd: cleanUp: failed to remove %s: %s", serverDir, err)
logrus.Warnf("cleaning up server directory for %s", domainName)
if err := os.RemoveAll(filepath.Join(config.SERVERS_DIR, domainName)); err != nil {
logrus.Fatal(err)
}
}
// newContext creates a new internal Docker context for a server. This is how
// Docker manages SSH connection details. These are stored to disk in
// ~/.docker. Abra can manage this completely for the user, so it's an
// implementation detail.
func newContext(name string) (bool, error) {
func installDockerLocal(c *cli.Context) error {
fmt.Println(fmt.Sprintf(dockerInstallMsg, "this local server"))
response := false
prompt := &survey.Confirm{
Message: fmt.Sprintf("attempt install docker on local server?"),
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
for _, exe := range []string{"wget", "bash"} {
exists, err := ensureLocalExecutable(exe)
if err != nil {
return err
}
if !exists {
return fmt.Errorf("%s missing, please install it", exe)
}
}
cmd := exec.Command("bash", "-c", "wget -O- https://get.docker.com | bash")
if err := internal.RunCmd(cmd); err != nil {
return err
}
return nil
}
func newLocalServer(c *cli.Context, domainName string) error {
if err := createServerDir(domainName); err != nil {
return err
}
cl, err := newClient(c, domainName)
if err != nil {
return err
}
if provision {
exists, err := ensureLocalExecutable("docker")
if err != nil {
return err
}
if !exists {
if err := installDockerLocal(c); err != nil {
return err
}
}
if err := initSwarmLocal(c, cl, domainName); err != nil {
if !strings.Contains(err.Error(), "proxy already exists") {
logrus.Fatal(err)
}
}
}
logrus.Info("local server has been added")
return nil
}
func newContext(c *cli.Context, domainName, username, port string) error {
store := contextPkg.NewDefaultDockerContextStore()
contexts, err := store.Store.List()
if err != nil {
return false, err
return err
}
for _, context := range contexts {
if context.Name == name {
log.Debugf("context for %s already exists", name)
return false, nil
if context.Name == domainName {
logrus.Debugf("context for %s already exists", domainName)
return nil
}
}
log.Debugf("creating context with domain %s", name)
logrus.Debugf("creating context with domain %s, username %s and port %s", domainName, username, port)
if err := client.CreateContext(name); err != nil {
return false, nil
if err := client.CreateContext(domainName, username, port); err != nil {
return err
}
return true, nil
return nil
}
// createServerDir creates the ~/.abra/servers/... directory for a new server.
func createServerDir(name string) (bool, error) {
if err := server.CreateServerDir(name); err != nil {
if !os.IsExist(err) {
return false, err
}
func newClient(c *cli.Context, domainName string) (*dockerClient.Client, error) {
cl, err := client.New(domainName)
if err != nil {
return &dockerClient.Client{}, err
}
return cl, nil
}
log.Debugf("server dir for %s already created", name)
return false, nil
func installDocker(c *cli.Context, cl *dockerClient.Client, sshCl *ssh.Client, domainName string) error {
exists, err := ensureRemoteExecutable("docker", sshCl)
if err != nil {
return err
}
return true, nil
if !exists {
fmt.Println(fmt.Sprintf(dockerInstallMsg, domainName))
response := false
prompt := &survey.Confirm{
Message: fmt.Sprintf("attempt install docker on %s?", domainName),
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
exes := []string{"wget", "bash"}
if askSudoPass {
exes = append(exes, "ssh-askpass")
}
for _, exe := range exes {
exists, err := ensureRemoteExecutable(exe, sshCl)
if err != nil {
return err
}
if !exists {
return fmt.Errorf("%s missing on remote, please install it", exe)
}
}
var sudoPass string
if askSudoPass {
cmd := "wget -O- https://get.docker.com | bash"
prompt := &survey.Password{
Message: "sudo password?",
}
if err := survey.AskOne(prompt, &sudoPass); err != nil {
return err
}
logrus.Debugf("running %s on %s now with sudo password", cmd, domainName)
if sudoPass == "" {
return fmt.Errorf("missing sudo password but requested --ask-sudo-pass?")
}
logrus.Warn("installing docker, this could take some time...")
if err := ssh.RunSudoCmd(cmd, sudoPass, sshCl); err != nil {
fmt.Print(fmt.Sprintf(`
Abra was unable to bootstrap Docker, see below for logs:
%s
If nothing works, you can try running the Docker install script manually on your server:
wget -O- https://get.docker.com | bash
`, string(err.Error())))
logrus.Fatal("Process exited with status 1")
}
logrus.Infof("docker is installed on %s", domainName)
remoteUser := sshCl.SSHClient.Conn.User()
logrus.Infof("adding %s to docker group", remoteUser)
permsCmd := fmt.Sprintf("sudo usermod -aG docker %s", remoteUser)
if err := ssh.RunSudoCmd(permsCmd, sudoPass, sshCl); err != nil {
return err
}
} else {
cmd := "wget -O- https://get.docker.com | bash"
logrus.Debugf("running %s on %s now without sudo password", cmd, domainName)
logrus.Warn("installing docker, this could take some time...")
if out, err := sshCl.Exec(cmd); err != nil {
fmt.Print(fmt.Sprintf(`
Abra was unable to bootstrap Docker, see below for logs:
%s
This could be due to several reasons. One of the most common is that your
server user account does not have sudo access, and if it does, you need to pass
"--ask-sudo-pass" in order to supply Abra with your password.
If nothing works, you try running the Docker install script manually on your server:
wget -O- https://get.docker.com | bash
`, string(out)))
logrus.Fatal(err)
}
logrus.Infof("docker is installed on %s", domainName)
}
}
return nil
}
func initSwarmLocal(c *cli.Context, cl *dockerClient.Client, domainName string) error {
initReq := swarm.InitRequest{ListenAddr: "0.0.0.0:2377"}
if _, err := cl.SwarmInit(context.Background(), initReq); err != nil {
if strings.Contains(err.Error(), "is already part of a swarm") ||
strings.Contains(err.Error(), "must specify a listening address") {
logrus.Infof("swarm mode already initialised on %s", domainName)
} else {
return err
}
} else {
logrus.Infof("initialised swarm mode on local server")
}
netOpts := types.NetworkCreate{Driver: "overlay", Scope: "swarm"}
if _, err := cl.NetworkCreate(context.Background(), "proxy", netOpts); err != nil {
if !strings.Contains(err.Error(), "proxy already exists") {
return err
}
logrus.Info("swarm overlay network already created on local server")
} else {
logrus.Infof("swarm overlay network created on local server")
}
return nil
}
func initSwarm(c *cli.Context, cl *dockerClient.Client, domainName string) error {
ipv4, err := dns.EnsureIPv4(domainName)
if err != nil {
return err
}
initReq := swarm.InitRequest{
ListenAddr: "0.0.0.0:2377",
AdvertiseAddr: ipv4,
}
if _, err := cl.SwarmInit(context.Background(), initReq); err != nil {
if strings.Contains(err.Error(), "is already part of a swarm") ||
strings.Contains(err.Error(), "must specify a listening address") {
logrus.Infof("swarm mode already initialised on %s", domainName)
} else {
return err
}
} else {
logrus.Infof("initialised swarm mode on %s", domainName)
}
netOpts := types.NetworkCreate{Driver: "overlay", Scope: "swarm"}
if _, err := cl.NetworkCreate(context.Background(), "proxy", netOpts); err != nil {
if !strings.Contains(err.Error(), "proxy already exists") {
return err
}
logrus.Infof("swarm overlay network already created on %s", domainName)
} else {
logrus.Infof("swarm overlay network created on %s", domainName)
}
return nil
}
func createServerDir(domainName string) error {
if err := server.CreateServerDir(domainName); err != nil {
if !os.IsExist(err) {
return err
}
logrus.Debugf("server dir for %s already created", domainName)
}
return nil
}
var serverAddCommand = cli.Command{
Name: "add",
Aliases: []string{"a"},
Usage: "Add a new server to your configuration",
Usage: "Add a server to your configuration",
Description: `
Add a new server to your configuration so that it can be managed by Abra.
Add a new server to your configuration so that it can be managed by Abra. This
command can also provision your server ("--provision/-p") with a Docker
installation so that it is capable of hosting Co-op Cloud apps.
Abra relies on the standard SSH command-line and ~/.ssh/config for client
connection details. You must configure an entry per-host in your ~/.ssh/config
for each server. For example:
Abra will default to expecting that you have a running ssh-agent and are using
SSH keys to connect to your new server. Abra will also read your SSH config
(matching "Host" as <domain>). SSH connection details precedence follows as
such: command-line > SSH config > guessed defaults.
Host example.com example
Hostname example.com
User exampleUser
Port 12345
IdentityFile ~/.ssh/example@somewhere
If you have no SSH key configured for this host and are instead using password
authentication, you may pass "--ssh-auth password" to have Abra ask you for the
password. "--ask-sudo-pass" may be passed if you run your provisioning commands
via sudo privilege escalation.
You can then add a server like so:
The <domain> argument must be a publicy accessible domain name which points to
your server. You should have working SSH access to this server already, Abra
will assume port 22 and will use your current system username to make an
initial connection. You can use the <user> and <port> arguments to adjust this.
abra server add example.com
Example:
abra server add varia.zone glodemodem 12345 -p
Abra will construct the following SSH connection and Docker context:
ssh://globemodem@varia.zone:12345
All communication between Abra and the server will use this SSH connection.
If "--local" is passed, then Abra assumes that the current local server is
intended as the target server. This is useful when you want to have your entire
Co-op Cloud config located on the server itself, and not on your local
developer machine. The domain is then set to "default".
You can also pass "--no-domain-checks/-D" flag to use any arbitrary name
instead of a real domain. The host will be resolved with the "Hostname" entry
of your ~/.ssh/config. Checks for a valid online domain will be skipped:
abra server add -D example`,
developer machine.
`,
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.NoDomainChecksFlag,
localFlag,
provisionFlag,
sshAuthFlag,
askSudoPassFlag,
},
Before: internal.SubCommandBefore,
ArgsUsage: "<name>",
ArgsUsage: "<domain> [<user>] [<port>]",
Action: func(c *cli.Context) error {
if len(c.Args()) > 0 && local || !internal.ValidateSubCmdFlags(c) {
err := errors.New("cannot use <name> and --local together")
err := errors.New("cannot use <domain> and --local together")
internal.ShowSubcommandHelpAndError(c, err)
}
var name string
if local {
name = "default"
} else {
name = internal.ValidateDomain(c)
if sshAuth != "password" && sshAuth != "identity-file" {
err := errors.New("--ssh-auth only accepts identity-file or password")
internal.ShowSubcommandHelpAndError(c, err)
}
// NOTE(d1): reasonable 5 second timeout for connections which can't
// succeed. The connection is attempted twice, so this results in 10
// seconds.
timeout := client.WithTimeout(5)
domainName := internal.ValidateDomain(c)
if local {
created, err := createServerDir(name)
if err != nil {
log.Fatal(err)
if err := newLocalServer(c, "default"); err != nil {
logrus.Fatal(err)
}
log.Debugf("attempting to create client for %s", name)
if _, err := client.New(name, timeout); err != nil {
cleanUp(name)
log.Fatal(err)
}
if created {
log.Info("local server successfully added")
} else {
log.Warn("local server already exists")
}
return nil
}
if !internal.NoDomainChecks {
if _, err := dns.EnsureIPv4(name); err != nil {
log.Fatal(err)
username := c.Args().Get(1)
if username == "" {
systemUser, err := user.Current()
if err != nil {
return err
}
username = systemUser.Username
}
port := c.Args().Get(2)
if port == "" {
port = "22"
}
if err := createServerDir(domainName); err != nil {
logrus.Fatal(err)
}
if err := newContext(c, domainName, username, port); err != nil {
logrus.Fatal(err)
}
cl, err := newClient(c, domainName)
if err != nil {
cleanUp(domainName)
logrus.Debugf("failed to construct client for %s, saw %s", domainName, err.Error())
logrus.Fatalf(fmt.Sprintf(internal.ServerAddFailMsg, domainName))
}
if provision {
logrus.Debugf("attempting to construct SSH client for %s", domainName)
sshCl, err := ssh.New(domainName, sshAuth, username, port)
if err != nil {
cleanUp(domainName)
logrus.Fatalf(fmt.Sprintf(internal.ServerAddFailMsg, domainName))
}
defer sshCl.Close()
logrus.Debugf("successfully created SSH client for %s", domainName)
if err := installDocker(c, cl, sshCl, domainName); err != nil {
logrus.Fatal(err)
}
if err := initSwarm(c, cl, domainName); err != nil {
logrus.Fatal(err)
}
}
_, err := createServerDir(name)
if err != nil {
log.Fatal(err)
}
created, err := newContext(name)
if err != nil {
cleanUp(name)
log.Fatal(err)
}
log.Debugf("attempting to create client for %s", name)
if _, err := client.New(name, timeout); err != nil {
cleanUp(name)
log.Fatal(sshPkg.Fatal(name, err))
}
if created {
log.Infof("%s successfully added", name)
} else {
log.Warnf("%s already exists", name)
if _, err := cl.Info(context.Background()); err != nil {
cleanUp(domainName)
logrus.Fatalf(fmt.Sprintf(internal.ServerAddFailMsg, domainName))
}
return nil
},
}
// ensureLocalExecutable ensures that an executable is present on the local machine
func ensureLocalExecutable(exe string) (bool, error) {
out, err := exec.Command("which", exe).Output()
if err != nil {
return false, err
}
return string(out) != "", nil
}
// ensureRemoteExecutable ensures that an executable is present on a remote machine
func ensureRemoteExecutable(exe string, sshCl *ssh.Client) (bool, error) {
out, err := sshCl.Exec(fmt.Sprintf("which %s", exe))
if err != nil && string(out) != "" {
return false, err
}
return string(out) != "", nil
}

View File

@ -1,15 +1,14 @@
package server
import (
"fmt"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/context"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"github.com/docker/cli/cli/connhelper/ssh"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -19,31 +18,24 @@ var serverListCommand = cli.Command{
Usage: "List managed servers",
Flags: []cli.Flag{
internal.DebugFlag,
internal.MachineReadableFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
dockerContextStore := context.NewDefaultDockerContextStore()
contexts, err := dockerContextStore.Store.List()
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
table, err := formatter.CreateTable()
if err != nil {
log.Fatal(err)
}
headers := []string{"NAME", "HOST"}
table.Headers(headers...)
tableColumns := []string{"name", "host", "user", "port"}
table := formatter.CreateTable(tableColumns)
defer table.Render()
serverNames, err := config.ReadServerNames()
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
var rows [][]string
for _, serverName := range serverNames {
var row []string
for _, ctx := range contexts {
@ -52,45 +44,24 @@ var serverListCommand = cli.Command{
// No local context found, we can continue safely
continue
}
if ctx.Name == serverName {
sp, err := ssh.ParseURL(endpoint)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if sp.Host == "" {
sp.Host = "unknown"
}
row = []string{serverName, sp.Host}
rows = append(rows, row)
row = []string{serverName, sp.Host, sp.User, sp.Port}
}
}
if len(row) == 0 {
if serverName == "default" {
row = []string{serverName, "local"}
row = []string{serverName, "local", "n/a", "n/a"}
} else {
row = []string{serverName, "unknown"}
row = []string{serverName, "unknown", "unknown", "unknown"}
}
rows = append(rows, row)
}
table.Row(row...)
table.Append(row)
}
if internal.MachineReadable {
out, err := formatter.ToJSON(headers, rows)
if err != nil {
log.Fatal("unable to render to JSON: %s", err)
}
fmt.Println(out)
return nil
}
fmt.Println(table)
return nil
},
}

260
cli/server/new.go Normal file
View File

@ -0,0 +1,260 @@
package server
import (
"context"
"fmt"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/libcapsul"
"github.com/AlecAivazis/survey/v2"
"github.com/hetznercloud/hcloud-go/hcloud"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
func newHetznerCloudVPS(c *cli.Context) error {
if err := internal.EnsureNewHetznerCloudVPSFlags(c); err != nil {
return err
}
client := hcloud.NewClient(hcloud.WithToken(internal.HetznerCloudAPIToken))
var sshKeysRaw []string
var sshKeys []*hcloud.SSHKey
for _, sshKey := range c.StringSlice("hetzner-ssh-keys") {
if sshKey == "" {
continue
}
sshKey, _, err := client.SSHKey.GetByName(context.Background(), sshKey)
if err != nil {
return err
}
sshKeys = append(sshKeys, sshKey)
sshKeysRaw = append(sshKeysRaw, sshKey.Name)
}
serverOpts := hcloud.ServerCreateOpts{
Name: internal.HetznerCloudName,
ServerType: &hcloud.ServerType{Name: internal.HetznerCloudType},
Image: &hcloud.Image{Name: internal.HetznerCloudImage},
SSHKeys: sshKeys,
Location: &hcloud.Location{Name: internal.HetznerCloudLocation},
}
sshKeyIDs := strings.Join(sshKeysRaw, "\n")
if sshKeyIDs == "" {
sshKeyIDs = "N/A (password auth)"
}
tableColumns := []string{"name", "type", "image", "ssh-keys", "location"}
table := formatter.CreateTable(tableColumns)
table.Append([]string{
internal.HetznerCloudName,
internal.HetznerCloudType,
internal.HetznerCloudImage,
sshKeyIDs,
internal.HetznerCloudLocation,
})
table.Render()
response := false
prompt := &survey.Confirm{
Message: "continue with hetzner cloud VPS creation?",
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
res, _, err := client.Server.Create(context.Background(), serverOpts)
if err != nil {
return err
}
var rootPassword string
if len(sshKeys) > 0 {
rootPassword = "N/A (using SSH keys)"
} else {
rootPassword = res.RootPassword
}
ip := res.Server.PublicNet.IPv4.IP.String()
fmt.Println(fmt.Sprintf(`
Your new Hetzner Cloud VPS has successfully been created! Here are the details:
name: %s
IP address: %s
root password: %s
You can access this new VPS via SSH using the following command:
ssh root@%s
Please note, this server is not managed by Abra yet (i.e. "abra server ls" will
not list this server)! You will need to assign a domain name record (manually
or by using "abra record new") and add the server to your Abra configuration
("abra server add") to have a working server that you can deploy Co-op Cloud
apps to.
When setting up domain name records, you probably want to set up the following
2 A records. This supports deploying apps to your root domain (e.g.
example.com) and other apps on sub-domains (e.g. foo.example.com,
bar.example.com).
@ 1800 IN A %s
* 1800 IN A %s
`,
internal.HetznerCloudName, ip, rootPassword,
ip, ip, ip,
))
return nil
}
func newCapsulVPS(c *cli.Context) error {
if err := internal.EnsureNewCapsulVPSFlags(c); err != nil {
return err
}
capsulCreateURL := fmt.Sprintf("https://%s/api/capsul/create", internal.CapsulInstanceURL)
var sshKeys []string
for _, sshKey := range c.StringSlice("capsul-ssh-keys") {
if sshKey == "" {
continue
}
sshKeys = append(sshKeys, sshKey)
}
tableColumns := []string{"instance", "name", "type", "image", "ssh-keys"}
table := formatter.CreateTable(tableColumns)
table.Append([]string{
internal.CapsulInstanceURL,
internal.CapsulName,
internal.CapsulType,
internal.CapsulImage,
strings.Join(sshKeys, "\n"),
})
table.Render()
response := false
prompt := &survey.Confirm{
Message: "continue with capsul creation?",
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
capsulClient := libcapsul.New(capsulCreateURL, internal.CapsulAPIToken)
resp, err := capsulClient.Create(
internal.CapsulName,
internal.CapsulType,
internal.CapsulImage,
sshKeys,
)
if err != nil {
return err
}
fmt.Println(fmt.Sprintf(`
Your new Capsul has successfully been created! Here are the details:
Capsul name: %s
Capsul ID: %v
You will need to log into your Capsul instance web interface to retrieve the IP
address. You can learn all about how to get SSH access to your new Capsul on:
%s/about-ssh
Please note, this server is not managed by Abra yet (i.e. "abra server ls" will
not list this server)! You will need to assign a domain name record (manually
or by using "abra record new") and add the server to your Abra configuration
("abra server add") to have a working server that you can deploy Co-op Cloud
apps to.
When setting up domain name records, you probably want to set up the following
2 A records. This supports deploying apps to your root domain (e.g.
example.com) and other apps on sub-domains (e.g. foo.example.com,
bar.example.com).
@ 1800 IN A <your-capsul-ip>
* 1800 IN A <your-capsul-ip>
`, internal.CapsulName, resp.ID, internal.CapsulInstanceURL))
return nil
}
var serverNewCommand = cli.Command{
Name: "new",
Aliases: []string{"n"},
Usage: "Create a new server using a 3rd party provider",
Description: `
Create a new server via a 3rd party provider.
The following providers are supported:
Capsul https://git.cyberia.club/Cyberia/capsul-flask
Hetzner Cloud https://docs.hetzner.com/cloud
You may invoke this command in "wizard" mode and be prompted for input:
abra record new
API tokens are read from the environment if specified, e.g.
export HCLOUD_TOKEN=...
`,
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.ServerProviderFlag,
// Capsul
internal.CapsulInstanceURLFlag,
internal.CapsulTypeFlag,
internal.CapsulImageFlag,
internal.CapsulSSHKeysFlag,
internal.CapsulAPITokenFlag,
// Hetzner
internal.HetznerCloudNameFlag,
internal.HetznerCloudTypeFlag,
internal.HetznerCloudImageFlag,
internal.HetznerCloudSSHKeysFlag,
internal.HetznerCloudLocationFlag,
internal.HetznerCloudAPITokenFlag,
},
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
if err := internal.EnsureServerProvider(); err != nil {
logrus.Fatal(err)
}
switch internal.ServerProvider {
case "capsul":
if err := newCapsulVPS(c); err != nil {
logrus.Fatal(err)
}
case "hetzner-cloud":
if err := newHetznerCloudVPS(c); err != nil {
logrus.Fatal(err)
}
}
return nil
},
}

View File

@ -1,102 +0,0 @@
package server
import (
"context"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"github.com/docker/docker/api/types/filters"
"github.com/urfave/cli"
)
var allFilter bool
var allFilterFlag = &cli.BoolFlag{
Name: "all, a",
Usage: "Remove all unused images not just dangling ones",
Destination: &allFilter,
}
var volumesFilter bool
var volumesFilterFlag = &cli.BoolFlag{
Name: "volumes, v",
Usage: "Prune volumes. This will remove app data, Be Careful!",
Destination: &volumesFilter,
}
var serverPruneCommand = cli.Command{
Name: "prune",
Aliases: []string{"p"},
Usage: "Prune resources on a server",
Description: `
Prunes unused containers, networks, and dangling images.
Use "-v/--volumes" to remove volumes that are not associated with a deployed
app. This can result in unwanted data loss if not used carefully.`,
ArgsUsage: "[<server>]",
Flags: []cli.Flag{
allFilterFlag,
volumesFilterFlag,
internal.DebugFlag,
internal.OfflineFlag,
internal.NoInputFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.ServerNameComplete,
Action: func(c *cli.Context) error {
serverName := internal.ValidateServer(c)
cl, err := client.New(serverName)
if err != nil {
log.Fatal(err)
}
var args filters.Args
ctx := context.Background()
cr, err := cl.ContainersPrune(ctx, args)
if err != nil {
log.Fatal(err)
}
cntSpaceReclaimed := formatter.ByteCountSI(cr.SpaceReclaimed)
log.Infof("containers pruned: %d; space reclaimed: %s", len(cr.ContainersDeleted), cntSpaceReclaimed)
nr, err := cl.NetworksPrune(ctx, args)
if err != nil {
log.Fatal(err)
}
log.Infof("networks pruned: %d", len(nr.NetworksDeleted))
pruneFilters := filters.NewArgs()
if allFilter {
log.Debugf("removing all images, not only dangling ones")
pruneFilters.Add("dangling", "false")
}
ir, err := cl.ImagesPrune(ctx, pruneFilters)
if err != nil {
log.Fatal(err)
}
imgSpaceReclaimed := formatter.ByteCountSI(ir.SpaceReclaimed)
log.Infof("images pruned: %d; space reclaimed: %s", len(ir.ImagesDeleted), imgSpaceReclaimed)
if volumesFilter {
vr, err := cl.VolumesPrune(ctx, args)
if err != nil {
log.Fatal(err)
}
volSpaceReclaimed := formatter.ByteCountSI(vr.SpaceReclaimed)
log.Infof("volumes pruned: %d; space reclaimed: %s", len(vr.VolumesDeleted), volSpaceReclaimed)
}
return nil
},
}

View File

@ -1,47 +1,183 @@
package server
import (
"context"
"fmt"
"os"
"path/filepath"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/formatter"
"github.com/AlecAivazis/survey/v2"
"github.com/hetznercloud/hcloud-go/hcloud"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var rmServer bool
var rmServerFlag = &cli.BoolFlag{
Name: "server, s",
Usage: "remove the actual server also",
Destination: &rmServer,
}
func rmHetznerCloudVPS(c *cli.Context) error {
if internal.HetznerCloudName == "" && !internal.NoInput {
prompt := &survey.Input{
Message: "specify hetzner cloud VPS name",
}
if err := survey.AskOne(prompt, &internal.HetznerCloudName); err != nil {
return err
}
}
if internal.HetznerCloudAPIToken == "" && !internal.NoInput {
token, ok := os.LookupEnv("HCLOUD_TOKEN")
if !ok {
prompt := &survey.Input{
Message: "specify hetzner cloud API token",
}
if err := survey.AskOne(prompt, &internal.HetznerCloudAPIToken); err != nil {
return err
}
} else {
internal.HetznerCloudAPIToken = token
}
}
client := hcloud.NewClient(hcloud.WithToken(internal.HetznerCloudAPIToken))
server, _, err := client.Server.Get(context.Background(), internal.HetznerCloudName)
if err != nil {
return err
}
if server == nil {
logrus.Fatalf("library provider reports that %s doesn't exist?", internal.HetznerCloudName)
}
fmt.Println(fmt.Sprintf(`
You have requested that Abra delete the following server (%s). Please be
absolutely sure that this is indeed the server that you would like to have
removed. There will be no going back once you confirm, the server will be
destroyed.
`, server.Name))
tableColumns := []string{"name", "type", "image", "location"}
table := formatter.CreateTable(tableColumns)
table.Append([]string{
server.Name,
server.ServerType.Name,
server.Image.Name,
server.Datacenter.Name,
})
table.Render()
response := false
prompt := &survey.Confirm{
Message: "continue with hetzner cloud VPS removal?",
}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
logrus.Fatal("exiting as requested")
}
_, err = client.Server.Delete(context.Background(), server)
if err != nil {
return err
}
logrus.Infof("%s has been deleted from your hetzner cloud account", internal.HetznerCloudName)
return nil
}
var serverRemoveCommand = cli.Command{
Name: "remove",
Aliases: []string{"rm"},
ArgsUsage: "<server>",
ArgsUsage: "[<server>]",
Usage: "Remove a managed server",
Description: `
Remove a managed server.
Remova a server from Abra management.
Abra will remove the internal bookkeeping (~/.abra/servers/...) and underlying
client connection context. This server will then be lost in time, like tears in
rain.`,
Depending on whether you used a 3rd party provider to create this server ("abra
server new"), you can also destroy the virtual server as well. Pass
"--server/-s" to tell Abra to try to delete this VPS.
Otherwise, Abra will remove the internal bookkeeping (~/.abra/servers/...) and
underlying client connection context. This server will then be lost in time,
like tears in rain.
`,
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.OfflineFlag,
rmServerFlag,
internal.ServerProviderFlag,
// Hetzner
internal.HetznerCloudNameFlag,
internal.HetznerCloudAPITokenFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.ServerNameComplete,
Before: internal.SubCommandBefore,
Action: func(c *cli.Context) error {
serverName := internal.ValidateServer(c)
warnMsg := `Did not pass -s/--server for actual server deletion, prompting!
Abra doesn't currently know if it helped you create this server with one of the
3rd party integrations (e.g. Capsul). You have a choice here to actually,
really and finally destroy this server using those integrations. If you want to
do this, choose Yes.
If you just want to remove the server config files & context, choose No.
`
if !rmServer {
logrus.Warn(fmt.Sprintf(warnMsg))
response := false
prompt := &survey.Confirm{
Message: "delete actual live server?",
}
if err := survey.AskOne(prompt, &response); err != nil {
logrus.Fatal(err)
}
if response {
logrus.Info("setting -s/--server and attempting to remove actual server")
rmServer = true
}
}
if rmServer {
if err := internal.EnsureServerProvider(); err != nil {
logrus.Fatal(err)
}
switch internal.ServerProvider {
case "capsul":
logrus.Warn("capsul provider does not support automatic removal yet, sorry!")
case "hetzner-cloud":
if err := rmHetznerCloudVPS(c); err != nil {
logrus.Fatal(err)
}
}
}
if err := client.DeleteContext(serverName); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
if err := os.RemoveAll(filepath.Join(config.SERVERS_DIR, serverName)); err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
log.Infof("%s is now lost in time, like tears in rain", serverName)
logrus.Infof("server at %s has been lost in time, like tears in rain", serverName)
return nil
},

View File

@ -9,10 +9,18 @@ var ServerCommand = cli.Command{
Name: "server",
Aliases: []string{"s"},
Usage: "Manage servers",
Description: `
Create, manage and remove servers using 3rd party integrations.
Servers can be created from scratch using the "abra server new" command. If you
already have a server, you can add it to your configuration using "abra server
add". Abra can provision servers so that they are ready to deploy Co-op Cloud
recipes, see available flags on "abra server add" for more.
`,
Subcommands: []cli.Command{
serverNewCommand,
serverAddCommand,
serverListCommand,
serverRemoveCommand,
serverPruneCommand,
},
}

View File

@ -1,508 +0,0 @@
package updater
import (
"context"
"fmt"
"os"
"strconv"
"strings"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/envfile"
"coopcloud.tech/abra/pkg/lint"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/convert"
"coopcloud.tech/abra/pkg/upstream/stack"
"coopcloud.tech/tagcmp"
charmLog "github.com/charmbracelet/log"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerclient "github.com/docker/docker/client"
"coopcloud.tech/abra/pkg/log"
"github.com/urfave/cli"
)
const SERVER = "localhost"
var majorUpdate bool
var majorFlag = &cli.BoolFlag{
Name: "major, m",
Usage: "Also check for major updates",
Destination: &majorUpdate,
}
var updateAll bool
var allFlag = &cli.BoolFlag{
Name: "all, a",
Usage: "Update all deployed apps",
Destination: &updateAll,
}
// Notify checks for available upgrades
var Notify = cli.Command{
Name: "notify",
Aliases: []string{"n"},
Usage: "Check for available upgrades",
Flags: []cli.Flag{
internal.DebugFlag,
majorFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Description: `
Read the deployed app versions and look for new versions in the recipe
catalogue.
If a new patch/minor version is available, a notification is printed.
Use "--major" to include new major versions.`,
Action: func(c *cli.Context) error {
cl, err := client.New("default")
if err != nil {
log.Fatal(err)
}
stacks, err := stack.GetStacks(cl)
if err != nil {
log.Fatal(err)
}
for _, stackInfo := range stacks {
stackName := stackInfo.Name
recipeName, err := getLabel(cl, stackName, "recipe")
if err != nil {
log.Fatal(err)
}
if recipeName != "" {
_, err = getLatestUpgrade(cl, stackName, recipeName)
if err != nil {
log.Fatal(err)
}
}
}
return nil
},
}
// UpgradeApp upgrades apps.
var UpgradeApp = cli.Command{
Name: "upgrade",
Aliases: []string{"u"},
Usage: "Upgrade apps",
ArgsUsage: "<stack-name> <recipe>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.ChaosFlag,
majorFlag,
allFlag,
internal.OfflineFlag,
},
Before: internal.SubCommandBefore,
Description: `
Upgrade an app by specifying stack name and recipe.
Use "--all" to upgrade every deployed app.
For each app with auto updates enabled, the deployed version is compared with
the current recipe catalogue version. If a new patch/minor version is
available, the app is upgraded.
To include major versions use the "--major" flag. You probably don't want that
as it will break things. Only apps that are not deployed with "--chaos" are
upgraded, to update chaos deployments use the "--chaos" flag. Use it with care.`,
Action: func(c *cli.Context) error {
cl, err := client.New("default")
if err != nil {
log.Fatal(err)
}
if !updateAll {
stackName := c.Args().Get(0)
recipeName := c.Args().Get(1)
err = tryUpgrade(cl, stackName, recipeName)
if err != nil {
log.Fatal(err)
}
return nil
}
stacks, err := stack.GetStacks(cl)
if err != nil {
log.Fatal(err)
}
for _, stackInfo := range stacks {
stackName := stackInfo.Name
recipeName, err := getLabel(cl, stackName, "recipe")
if err != nil {
log.Fatal(err)
}
err = tryUpgrade(cl, stackName, recipeName)
if err != nil {
log.Fatal(err)
}
}
return nil
},
}
// getLabel reads docker labels from running services in the format of "coop-cloud.${STACK_NAME}.${LABEL}".
func getLabel(cl *dockerclient.Client, stackName string, label string) (string, error) {
filter := filters.NewArgs()
filter.Add("label", fmt.Sprintf("%s=%s", convert.LabelNamespace, stackName))
services, err := cl.ServiceList(context.Background(), types.ServiceListOptions{Filters: filter})
if err != nil {
return "", err
}
for _, service := range services {
labelKey := fmt.Sprintf("coop-cloud.%s.%s", stackName, label)
if labelValue, ok := service.Spec.Labels[labelKey]; ok {
return labelValue, nil
}
}
log.Debugf("no %s label found for %s", label, stackName)
return "", nil
}
// getBoolLabel reads a boolean docker label from running services
func getBoolLabel(cl *dockerclient.Client, stackName string, label string) (bool, error) {
lableValue, err := getLabel(cl, stackName, label)
if err != nil {
return false, err
}
if lableValue != "" {
value, err := strconv.ParseBool(lableValue)
if err != nil {
return false, err
}
return value, nil
}
log.Debugf("boolean label %s could not be found for %s, set default to false.", label, stackName)
return false, nil
}
// getEnv reads env variables from docker services.
func getEnv(cl *dockerclient.Client, stackName string) (envfile.AppEnv, error) {
envMap := make(map[string]string)
filter := filters.NewArgs()
filter.Add("label", fmt.Sprintf("%s=%s", convert.LabelNamespace, stackName))
services, err := cl.ServiceList(context.Background(), types.ServiceListOptions{Filters: filter})
if err != nil {
return nil, err
}
for _, service := range services {
envList := service.Spec.TaskTemplate.ContainerSpec.Env
for _, envString := range envList {
splitString := strings.SplitN(envString, "=", 2)
if len(splitString) != 2 {
log.Debugf("can't separate key from value: %s (this variable is probably unset)", envString)
continue
}
k := splitString[0]
v := splitString[1]
log.Debugf("for %s read env %s with value: %s from docker service", stackName, k, v)
envMap[k] = v
}
}
return envMap, nil
}
// getLatestUpgrade returns the latest available version for an app respecting
// the "--major" flag if it is newer than the currently deployed version.
func getLatestUpgrade(cl *dockerclient.Client, stackName string, recipeName string) (string, error) {
deployedVersion, err := getDeployedVersion(cl, stackName, recipeName)
if err != nil {
return "", err
}
availableUpgrades, err := getAvailableUpgrades(cl, stackName, recipeName, deployedVersion)
if err != nil {
return "", err
}
if len(availableUpgrades) == 0 {
log.Debugf("no available upgrades for %s", stackName)
return "", nil
}
var chosenUpgrade string
if len(availableUpgrades) > 0 {
chosenUpgrade = availableUpgrades[len(availableUpgrades)-1]
log.Infof("%s (%s) can be upgraded from version %s to %s", stackName, recipeName, deployedVersion, chosenUpgrade)
}
return chosenUpgrade, nil
}
// getDeployedVersion returns the currently deployed version of an app.
func getDeployedVersion(cl *dockerclient.Client, stackName string, recipeName string) (string, error) {
log.Debugf("retrieve deployed version whether %s is already deployed", stackName)
deployMeta, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
return "", err
}
if !deployMeta.IsDeployed {
return "", fmt.Errorf("%s is not deployed?", stackName)
}
if deployMeta.Version == "unknown" {
return "", fmt.Errorf("failed to determine deployed version of %s", stackName)
}
return deployMeta.Version, nil
}
// getAvailableUpgrades returns all available versions of an app that are newer
// than the deployed version. It only includes major upgrades if the "--major"
// flag is set.
func getAvailableUpgrades(cl *dockerclient.Client, stackName string, recipeName string,
deployedVersion string) ([]string, error) {
catl, err := recipe.ReadRecipeCatalogue(internal.Offline)
if err != nil {
return nil, err
}
versions, err := recipe.GetRecipeCatalogueVersions(recipeName, catl)
if err != nil {
return nil, err
}
if len(versions) == 0 {
log.Warnf("no published releases for %s in the recipe catalogue?", recipeName)
return nil, nil
}
var availableUpgrades []string
for _, version := range versions {
parsedDeployedVersion, err := tagcmp.Parse(deployedVersion)
if err != nil {
return nil, err
}
parsedVersion, err := tagcmp.Parse(version)
if err != nil {
return nil, err
}
versionDelta, err := parsedDeployedVersion.UpgradeDelta(parsedVersion)
if err != nil {
return nil, err
}
if 0 < versionDelta.UpgradeType() && (versionDelta.UpgradeType() < 4 || majorUpdate) {
availableUpgrades = append(availableUpgrades, version)
}
}
log.Debugf("available updates for %s: %s", stackName, availableUpgrades)
return availableUpgrades, nil
}
// processRecipeRepoVersion clones, pulls, checks out the version and lints the
// recipe repository.
func processRecipeRepoVersion(r recipe.Recipe, version string) error {
if err := r.EnsureExists(); err != nil {
return err
}
if err := r.EnsureUpToDate(); err != nil {
return err
}
if _, err := r.EnsureVersion(version); err != nil {
return err
}
if err := lint.LintForErrors(r); err != nil {
return err
}
return nil
}
// mergeAbraShEnv merges abra.sh env vars into the app env vars.
func mergeAbraShEnv(recipe recipe.Recipe, env envfile.AppEnv) error {
abraShEnv, err := envfile.ReadAbraShEnvVars(recipe.AbraShPath)
if err != nil {
return err
}
for k, v := range abraShEnv {
log.Debugf("read v:%s k: %s", v, k)
env[k] = v
}
return nil
}
// createDeployConfig merges and enriches the compose config for the deployment.
func createDeployConfig(r recipe.Recipe, stackName string, env envfile.AppEnv) (*composetypes.Config, stack.Deploy, error) {
env["STACK_NAME"] = stackName
deployOpts := stack.Deploy{
Namespace: stackName,
Prune: false,
ResolveImage: stack.ResolveImageAlways,
Detach: false,
}
composeFiles, err := r.GetComposeFiles(env)
if err != nil {
return nil, deployOpts, err
}
deployOpts.Composefiles = composeFiles
compose, err := appPkg.GetAppComposeConfig(stackName, deployOpts, env)
if err != nil {
return nil, deployOpts, err
}
appPkg.ExposeAllEnv(stackName, compose, env)
// after the upgrade the deployment won't be in chaos state anymore
appPkg.SetChaosLabel(compose, stackName, false)
appPkg.SetRecipeLabel(compose, stackName, r.Name)
appPkg.SetUpdateLabel(compose, stackName, env)
return compose, deployOpts, nil
}
// tryUpgrade performs the upgrade if all the requirements are fulfilled.
func tryUpgrade(cl *dockerclient.Client, stackName, recipeName string) error {
if recipeName == "" {
log.Debugf("don't update %s due to missing recipe name", stackName)
return nil
}
chaos, err := getBoolLabel(cl, stackName, "chaos")
if err != nil {
return err
}
if chaos && !internal.Chaos {
log.Debugf("don't update %s due to chaos deployment", stackName)
return nil
}
updatesEnabled, err := getBoolLabel(cl, stackName, "autoupdate")
if err != nil {
return err
}
if !updatesEnabled {
log.Debugf("don't update %s due to disabled auto updates or missing ENABLE_AUTO_UPDATE env", stackName)
return nil
}
upgradeVersion, err := getLatestUpgrade(cl, stackName, recipeName)
if err != nil {
return err
}
if upgradeVersion == "" {
log.Debugf("don't update %s due to no new version", stackName)
return nil
}
err = upgrade(cl, stackName, recipeName, upgradeVersion)
return err
}
// upgrade performs all necessary steps to upgrade an app.
func upgrade(cl *dockerclient.Client, stackName, recipeName, upgradeVersion string) error {
env, err := getEnv(cl, stackName)
if err != nil {
return err
}
app := appPkg.App{
Name: stackName,
Recipe: recipe.Get(recipeName),
Server: SERVER,
Env: env,
}
r := recipe.Get(recipeName)
if err = processRecipeRepoVersion(r, upgradeVersion); err != nil {
return err
}
if err = mergeAbraShEnv(app.Recipe, app.Env); err != nil {
return err
}
compose, deployOpts, err := createDeployConfig(r, stackName, app.Env)
if err != nil {
return err
}
log.Infof("upgrade %s (%s) to version %s", stackName, recipeName, upgradeVersion)
err = stack.RunDeploy(cl, deployOpts, compose, stackName, true)
return err
}
func newAbraApp(version, commit string) *cli.App {
app := &cli.App{
Name: "kadabra",
Usage: `The Co-op Cloud auto-updater
____ ____ _ _
/ ___|___ ___ _ __ / ___| | ___ _ _ __| |
| | / _ \ _____ / _ \| '_ \ | | | |/ _ \| | | |/ _' |
| |__| (_) |_____| (_) | |_) | | |___| | (_) | |_| | (_| |
\____\___/ \___/| .__/ \____|_|\___/ \__,_|\__,_|
|_|
`,
Version: fmt.Sprintf("%s-%s", version, commit[:7]),
Commands: []cli.Command{
Notify,
UpgradeApp,
},
}
app.Before = func(c *cli.Context) error {
log.Logger.SetStyles(log.Styles())
charmLog.SetDefault(log.Logger)
log.Debugf("kadabra version %s, commit %s", version, commit)
return nil
}
return app
}
// RunApp runs CLI abra app.
func RunApp(version, commit string) {
app := newAbraApp(version, commit)
if err := app.Run(os.Args); err != nil {
log.Fatal(err)
}
}

View File

@ -5,10 +5,10 @@ import (
"coopcloud.tech/abra/cli"
)
// Version is the current version of Abra.
// Version is the current version of Abra
var Version string
// Commit is the current git commit of Abra.
// Commit is the current git commit of Abra
var Commit string
func main() {

View File

@ -1,23 +0,0 @@
// Package main provides the command-line entrypoint.
package main
import (
"coopcloud.tech/abra/cli/updater"
)
// Version is the current version of Kadabra.
var Version string
// Commit is the current git commit of Kadabra.
var Commit string
func main() {
if Version == "" {
Version = "dev"
}
if Commit == "" {
Commit = " "
}
updater.RunApp(Version, Commit)
}

165
go.mod
View File

@ -1,139 +1,54 @@
module coopcloud.tech/abra
go 1.21
go 1.16
require (
coopcloud.tech/tagcmp v0.0.0-20230809071031-eb3e7758d4eb
git.coopcloud.tech/coop-cloud/godotenv v1.5.2-0.20231130100509-01bff8284355
github.com/AlecAivazis/survey/v2 v2.3.7
github.com/charmbracelet/lipgloss v0.11.1
github.com/charmbracelet/log v0.4.0
github.com/distribution/reference v0.6.0
github.com/docker/cli v27.0.3+incompatible
github.com/docker/docker v27.0.3+incompatible
github.com/docker/go-units v0.5.0
github.com/go-git/go-git/v5 v5.12.0
github.com/google/go-cmp v0.6.0
coopcloud.tech/tagcmp v0.0.0-20211103052201-885b22f77d52
github.com/AlecAivazis/survey/v2 v2.3.4
github.com/Autonomic-Cooperative/godotenv v1.3.1-0.20210731094149-b031ea1211e7
github.com/Gurpartap/logrus-stack v0.0.0-20170710170904-89c00d8a28f4
github.com/docker/cli v20.10.16+incompatible
github.com/docker/distribution v2.8.1+incompatible
github.com/docker/docker v20.10.16+incompatible
github.com/docker/go-units v0.4.0
github.com/go-git/go-git/v5 v5.4.2
github.com/hetznercloud/hcloud-go v1.33.2
github.com/moby/sys/signal v0.7.0
github.com/moby/term v0.5.0
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6
github.com/olekukonko/tablewriter v0.0.5
github.com/pkg/errors v0.9.1
github.com/schollz/progressbar/v3 v3.14.4
golang.org/x/term v0.22.0
gopkg.in/yaml.v3 v3.0.1
gotest.tools/v3 v3.5.1
github.com/schollz/progressbar/v3 v3.8.6
github.com/schultz-is/passgen v1.0.1
github.com/sirupsen/logrus v1.8.1
gotest.tools/v3 v3.2.0
)
require (
dario.cat/mergo v1.0.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/BurntSushi/toml v1.4.0 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/ProtonMail/go-crypto v1.0.0 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/charmbracelet/x/ansi v0.1.3 // indirect
github.com/cloudflare/circl v1.3.9 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/cyphar/filepath-securejoin v0.2.5 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.5.0 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-viper/mapstructure/v2 v2.0.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/sys/user v0.1.0 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/muesli/termenv v0.15.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/runc v1.1.13 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/skeema/knownhosts v1.2.2 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
go.opentelemetry.io/otel v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.28.0 // indirect
go.opentelemetry.io/otel/trace v1.28.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
golang.org/x/crypto v0.25.0 // indirect
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect
golang.org/x/net v0.27.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/grpc v1.65.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)
require (
github.com/containerd/containerd v1.7.19 // indirect
coopcloud.tech/libcapsul v0.0.0-20211022074848-c35e78fe3f3e
github.com/ProtonMail/go-crypto v0.0.0-20211112122917-428f8eabeeb3 // indirect
github.com/buger/goterm v1.0.4
github.com/containerd/containerd v1.5.9 // indirect
github.com/containers/image v3.0.2+incompatible
github.com/containers/storage v1.38.2 // indirect
github.com/decentral1se/passgen v1.0.1
github.com/docker/docker-credential-helpers v0.8.2 // indirect
github.com/fvbommel/sortorder v1.1.0 // indirect
github.com/docker/docker-credential-helpers v0.6.4 // indirect
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/fvbommel/sortorder v1.0.2 // indirect
github.com/gliderlabs/ssh v0.3.4
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.7
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/prometheus/client_golang v1.19.1 // indirect
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
github.com/spf13/cobra v1.8.1 // indirect
github.com/stretchr/testify v1.9.0
github.com/gorilla/mux v1.8.0 // indirect
github.com/hashicorp/go-retryablehttp v0.7.1
github.com/kevinburke/ssh_config v1.2.0
github.com/klauspost/pgzip v1.2.5
github.com/libdns/gandi v1.0.2
github.com/libdns/libdns v0.2.1
github.com/moby/sys/mount v0.2.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 // indirect
github.com/sergi/go-diff v1.2.0 // indirect
github.com/spf13/cobra v1.3.0 // indirect
github.com/theupdateframework/notary v0.7.0 // indirect
github.com/urfave/cli v1.22.15
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
golang.org/x/sys v0.22.0
github.com/urfave/cli v1.22.9
github.com/xanzy/ssh-agent v0.3.1 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b // indirect
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27
)

771
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -1,624 +1,85 @@
package app
import (
"bufio"
"context"
"fmt"
"os"
"path"
"regexp"
"sort"
"strings"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/envfile"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/convert"
"coopcloud.tech/abra/pkg/upstream/stack"
"coopcloud.tech/abra/pkg/log"
loader "coopcloud.tech/abra/pkg/upstream/stack"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types/filters"
"github.com/schollz/progressbar/v3"
apiclient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
)
// Get retrieves an app
func Get(appName string) (App, error) {
files, err := LoadAppFiles("")
func Get(appName string) (config.App, error) {
files, err := config.LoadAppFiles("")
if err != nil {
return App{}, err
return config.App{}, err
}
app, err := GetApp(files, appName)
app, err := config.GetApp(files, appName)
if err != nil {
return App{}, err
return config.App{}, err
}
log.Debugf("retrieved %s for %s", app, appName)
logrus.Debugf("retrieved %s for %s", app, appName)
return app, nil
}
// GetApp loads an apps settings, reading it from file, in preparation to use
// it. It should only be used when ready to use the env file to keep IO
// operations down.
func GetApp(apps AppFiles, name AppName) (App, error) {
appFile, exists := apps[name]
if !exists {
return App{}, fmt.Errorf("cannot find app with name %s", name)
}
app, err := ReadAppEnvFile(appFile, name)
if err != nil {
return App{}, err
}
return app, nil
// deployedServiceSpec represents a deployed service of an app.
type deployedServiceSpec struct {
Name string
Version string
}
// GetApps returns a slice of Apps with their env files read from a given
// slice of AppFiles.
func GetApps(appFiles AppFiles, recipeFilter string) ([]App, error) {
var apps []App
// VersionSpec represents a deployed app and associated metadata.
type VersionSpec map[string]deployedServiceSpec
for name := range appFiles {
app, err := GetApp(appFiles, name)
if err != nil {
return nil, err
}
// DeployedVersions lists metadata (e.g. versions) for deployed
func DeployedVersions(ctx context.Context, cl *apiclient.Client, app config.App) (VersionSpec, bool, error) {
services, err := stack.GetStackServices(ctx, cl, app.StackName())
if err != nil {
return VersionSpec{}, false, err
}
if recipeFilter != "" {
if app.Recipe.Name == recipeFilter {
apps = append(apps, app)
}
} else {
apps = append(apps, app)
appSpec := make(VersionSpec)
for _, service := range services {
serviceName := ParseServiceName(service.Spec.Name)
label := fmt.Sprintf("coop-cloud.%s.%s.version", app.StackName(), serviceName)
if deployLabel, ok := service.Spec.Labels[label]; ok {
version, _ := ParseVersionLabel(deployLabel)
appSpec[serviceName] = deployedServiceSpec{Name: serviceName, Version: version}
}
}
return apps, nil
}
deployed := len(services) > 0
// App reprents an app with its env file read into memory
type App struct {
Name AppName
Recipe recipe.Recipe
Domain string
Env envfile.AppEnv
Server string
Path string
}
// Type aliases to make code hints easier to understand
// AppName is AppName
type AppName = string
// AppFile represents app env files on disk without reading the contents
type AppFile struct {
Path string
Server string
}
// AppFiles is a slice of appfiles
type AppFiles map[AppName]AppFile
// See documentation of config.StackName
func (a App) StackName() string {
if _, exists := a.Env["STACK_NAME"]; exists {
return a.Env["STACK_NAME"]
}
stackName := StackName(a.Name)
a.Env["STACK_NAME"] = stackName
return stackName
}
// StackName gets whatever the docker safe (uses the right delimiting
// character, e.g. "_") stack name is for the app. In general, you don't want
// to use this to show anything to end-users, you want use a.Name instead.
func StackName(appName string) string {
stackName := SanitiseAppName(appName)
if len(stackName) > config.MAX_SANITISED_APP_NAME_LENGTH {
log.Debugf("trimming %s to %s to avoid runtime limits", stackName, stackName[:config.MAX_SANITISED_APP_NAME_LENGTH])
stackName = stackName[:config.MAX_SANITISED_APP_NAME_LENGTH]
}
return stackName
}
// Filters retrieves app filters for querying the container runtime. By default
// it filters on all services in the app. It is also possible to pass an
// otional list of service names, which get filtered instead.
//
// Due to upstream issues, filtering works different depending on what you're
// querying. So, for example, secrets don't work with regex! The caller needs
// to implement their own validation that the right secrets are matched. In
// order to handle these cases, we provide the `appendServiceNames` /
// `exactMatch` modifiers.
func (a App) Filters(appendServiceNames, exactMatch bool, services ...string) (filters.Args, error) {
filters := filters.NewArgs()
if len(services) > 0 {
for _, serviceName := range services {
filters.Add("name", ServiceFilter(a.StackName(), serviceName, exactMatch))
}
return filters, nil
}
// When not appending the service name, just add one filter for the whole
// stack.
if !appendServiceNames {
f := fmt.Sprintf("%s", a.StackName())
if exactMatch {
f = fmt.Sprintf("^%s", f)
}
filters.Add("name", f)
return filters, nil
}
composeFiles, err := a.Recipe.GetComposeFiles(a.Env)
if err != nil {
return filters, err
}
opts := stack.Deploy{Composefiles: composeFiles}
compose, err := GetAppComposeConfig(a.Recipe.Name, opts, a.Env)
if err != nil {
return filters, err
}
for _, service := range compose.Services {
f := ServiceFilter(a.StackName(), service.Name, exactMatch)
filters.Add("name", f)
}
return filters, nil
}
// ServiceFilter creates a filter string for filtering a service in the docker
// container runtime. When exact match is true, it uses regex to match the
// string exactly.
func ServiceFilter(stack, service string, exact bool) string {
if exact {
return fmt.Sprintf("^%s_%s", stack, service)
}
return fmt.Sprintf("%s_%s", stack, service)
}
// ByServer sort a slice of Apps
type ByServer []App
func (a ByServer) Len() int { return len(a) }
func (a ByServer) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByServer) Less(i, j int) bool {
return strings.ToLower(a[i].Server) < strings.ToLower(a[j].Server)
}
// ByServerAndRecipe sort a slice of Apps
type ByServerAndRecipe []App
func (a ByServerAndRecipe) Len() int { return len(a) }
func (a ByServerAndRecipe) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByServerAndRecipe) Less(i, j int) bool {
if a[i].Server == a[j].Server {
return strings.ToLower(a[i].Recipe.Name) < strings.ToLower(a[j].Recipe.Name)
}
return strings.ToLower(a[i].Server) < strings.ToLower(a[j].Server)
}
// ByRecipe sort a slice of Apps
type ByRecipe []App
func (a ByRecipe) Len() int { return len(a) }
func (a ByRecipe) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByRecipe) Less(i, j int) bool {
return strings.ToLower(a[i].Recipe.Name) < strings.ToLower(a[j].Recipe.Name)
}
// ByName sort a slice of Apps
type ByName []App
func (a ByName) Len() int { return len(a) }
func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByName) Less(i, j int) bool {
return strings.ToLower(a[i].Name) < strings.ToLower(a[j].Name)
}
func ReadAppEnvFile(appFile AppFile, name AppName) (App, error) {
env, err := envfile.ReadEnv(appFile.Path)
if err != nil {
return App{}, fmt.Errorf("env file for %s couldn't be read: %s", name, err.Error())
}
log.Debugf("read env %s from %s", env, appFile.Path)
app, err := NewApp(env, name, appFile)
if err != nil {
return App{}, fmt.Errorf("env file for %s has issues: %s", name, err.Error())
}
return app, nil
}
// NewApp creates new App object
func NewApp(env envfile.AppEnv, name string, appFile AppFile) (App, error) {
domain := env["DOMAIN"]
recipeName, exists := env["RECIPE"]
if !exists {
recipeName, exists = env["TYPE"]
if !exists {
return App{}, fmt.Errorf("%s is missing the TYPE env var?", name)
}
}
return App{
Name: name,
Domain: domain,
Recipe: recipe.Get(recipeName),
Env: env,
Server: appFile.Server,
Path: appFile.Path,
}, nil
}
// LoadAppFiles gets all app files for a given set of servers or all servers.
func LoadAppFiles(servers ...string) (AppFiles, error) {
appFiles := make(AppFiles)
if len(servers) == 1 {
if servers[0] == "" {
// Empty servers flag, one string will always be passed
var err error
servers, err = config.GetAllFoldersInDirectory(config.SERVERS_DIR)
if err != nil {
return appFiles, err
}
}
}
log.Debugf("collecting metadata from %v servers: %s", len(servers), strings.Join(servers, ", "))
for _, server := range servers {
serverDir := path.Join(config.SERVERS_DIR, server)
files, err := config.GetAllFilesInDirectory(serverDir)
if err != nil {
return appFiles, fmt.Errorf("server %s doesn't exist? Run \"abra server ls\" to check", server)
}
for _, file := range files {
appName := strings.TrimSuffix(file.Name(), ".env")
appFilePath := path.Join(config.SERVERS_DIR, server, file.Name())
appFiles[appName] = AppFile{
Path: appFilePath,
Server: server,
}
}
}
return appFiles, nil
}
// GetAppServiceNames retrieves a list of app service names.
func GetAppServiceNames(appName string) ([]string, error) {
var serviceNames []string
appFiles, err := LoadAppFiles("")
if err != nil {
return serviceNames, err
}
app, err := GetApp(appFiles, appName)
if err != nil {
return serviceNames, err
}
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
if err != nil {
return serviceNames, err
}
opts := stack.Deploy{Composefiles: composeFiles}
compose, err := GetAppComposeConfig(app.Recipe.Name, opts, app.Env)
if err != nil {
return serviceNames, err
}
for _, service := range compose.Services {
serviceNames = append(serviceNames, service.Name)
}
return serviceNames, nil
}
// GetAppNames retrieves a list of app names.
func GetAppNames() ([]string, error) {
var appNames []string
appFiles, err := LoadAppFiles("")
if err != nil {
return appNames, err
}
apps, err := GetApps(appFiles, "")
if err != nil {
return appNames, err
}
for _, app := range apps {
appNames = append(appNames, app.Name)
}
return appNames, nil
}
// TemplateAppEnvSample copies the example env file for the app into the users
// env files.
func TemplateAppEnvSample(r recipe.Recipe, appName, server, domain string) error {
envSample, err := os.ReadFile(r.SampleEnvPath)
if err != nil {
return err
}
appEnvPath := path.Join(config.ABRA_DIR, "servers", server, fmt.Sprintf("%s.env", appName))
if _, err := os.Stat(appEnvPath); !os.IsNotExist(err) {
return fmt.Errorf("%s already exists?", appEnvPath)
}
err = os.WriteFile(appEnvPath, envSample, 0o664)
if err != nil {
return err
}
read, err := os.ReadFile(appEnvPath)
if err != nil {
return err
}
newContents := strings.Replace(string(read), r.Name+".example.com", domain, -1)
err = os.WriteFile(appEnvPath, []byte(newContents), 0)
if err != nil {
return err
}
log.Debugf("copied & templated %s to %s", r.SampleEnvPath, appEnvPath)
return nil
}
// SanitiseAppName makes a app name usable with Docker by replacing illegal
// characters.
func SanitiseAppName(name string) string {
return strings.ReplaceAll(name, ".", "_")
}
// GetAppStatuses queries servers to check the deployment status of given apps.
func GetAppStatuses(apps []App, MachineReadable bool) (map[string]map[string]string, error) {
statuses := make(map[string]map[string]string)
servers := make(map[string]struct{})
for _, app := range apps {
if _, ok := servers[app.Server]; !ok {
servers[app.Server] = struct{}{}
}
}
var bar *progressbar.ProgressBar
if !MachineReadable {
bar = formatter.CreateProgressbar(len(servers), "querying remote servers...")
}
ch := make(chan stack.StackStatus, len(servers))
for server := range servers {
cl, err := client.New(server)
if err != nil {
return statuses, err
}
go func(s string) {
ch <- stack.GetAllDeployedServices(cl, s)
if !MachineReadable {
bar.Add(1)
}
}(server)
}
for range servers {
status := <-ch
if status.Err != nil {
return statuses, status.Err
}
for _, service := range status.Services {
result := make(map[string]string)
name := service.Spec.Labels[convert.LabelNamespace]
if _, ok := statuses[name]; !ok {
result["status"] = "deployed"
}
labelKey := fmt.Sprintf("coop-cloud.%s.chaos", name)
chaos, ok := service.Spec.Labels[labelKey]
if ok {
result["chaos"] = chaos
}
labelKey = fmt.Sprintf("coop-cloud.%s.chaos-version", name)
if chaosVersion, ok := service.Spec.Labels[labelKey]; ok {
result["chaosVersion"] = chaosVersion
}
labelKey = fmt.Sprintf("coop-cloud.%s.autoupdate", name)
if autoUpdate, ok := service.Spec.Labels[labelKey]; ok {
result["autoUpdate"] = autoUpdate
} else {
result["autoUpdate"] = "false"
}
labelKey = fmt.Sprintf("coop-cloud.%s.version", name)
if version, ok := service.Spec.Labels[labelKey]; ok {
result["version"] = version
} else {
continue
}
statuses[name] = result
}
}
log.Debugf("retrieved app statuses: %s", statuses)
return statuses, nil
}
// GetAppComposeConfig retrieves a compose specification for a recipe. This
// specification is the result of a merge of all the compose.**.yml files in
// the recipe repository.
func GetAppComposeConfig(recipe string, opts stack.Deploy, appEnv envfile.AppEnv) (*composetypes.Config, error) {
compose, err := loader.LoadComposefile(opts, appEnv)
if err != nil {
return &composetypes.Config{}, err
}
log.Debugf("retrieved %s for %s", compose.Filename, recipe)
return compose, nil
}
// ExposeAllEnv exposes all env variables to the app container
func ExposeAllEnv(stackName string, compose *composetypes.Config, appEnv envfile.AppEnv) {
for _, service := range compose.Services {
if service.Name == "app" {
log.Debugf("add the following environment to the app service config of %s:", stackName)
for k, v := range appEnv {
_, exists := service.Environment[k]
if !exists {
value := v
service.Environment[k] = &value
log.Debugf("add env var: %s value: %s to %s", k, value, stackName)
}
}
}
}
}
func CheckEnv(app App) ([]envfile.EnvVar, error) {
var envVars []envfile.EnvVar
envSample, err := app.Recipe.SampleEnv()
if err != nil {
return envVars, err
}
var keys []string
for key := range envSample {
keys = append(keys, key)
}
sort.Strings(keys)
for _, key := range keys {
if _, ok := app.Env[key]; ok {
envVars = append(envVars, envfile.EnvVar{Name: key, Present: true})
} else {
envVars = append(envVars, envfile.EnvVar{Name: key, Present: false})
}
}
return envVars, nil
}
// ReadAbraShCmdNames reads the names of commands.
func ReadAbraShCmdNames(abraSh string) ([]string, error) {
var cmdNames []string
file, err := os.Open(abraSh)
if err != nil {
if os.IsNotExist(err) {
return cmdNames, nil
}
return cmdNames, err
}
defer file.Close()
cmdNameRegex, err := regexp.Compile(`(\w+)(\(\).*\{)`)
if err != nil {
return cmdNames, err
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
matches := cmdNameRegex.FindStringSubmatch(line)
if len(matches) > 0 {
cmdNames = append(cmdNames, matches[1])
}
}
if len(cmdNames) > 0 {
log.Debugf("read %s from %s", strings.Join(cmdNames, " "), abraSh)
if deployed {
logrus.Debugf("detected %s as deployed versions of %s", appSpec, app.Name)
} else {
log.Debugf("read 0 command names from %s", abraSh)
logrus.Debugf("detected %s as not deployed", app.Name)
}
return cmdNames, nil
return appSpec, len(services) > 0, nil
}
func (a App) WriteRecipeVersion(version string, dryRun bool) error {
file, err := os.Open(a.Path)
if err != nil {
return err
}
defer file.Close()
skipped := false
scanner := bufio.NewScanner(file)
lines := []string{}
for scanner.Scan() {
line := scanner.Text()
if !strings.HasPrefix(line, "RECIPE=") && !strings.HasPrefix(line, "TYPE=") {
lines = append(lines, line)
continue
}
if strings.HasPrefix(line, "#") {
lines = append(lines, line)
continue
}
if strings.Contains(line, version) {
skipped = true
lines = append(lines, line)
continue
}
splitted := strings.Split(line, ":")
line = fmt.Sprintf("%s:%s", splitted[0], version)
lines = append(lines, line)
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
if !dryRun {
if err := os.WriteFile(a.Path, []byte(strings.Join(lines, "\n")), os.ModePerm); err != nil {
log.Fatal(err)
}
} else {
log.Debugf("skipping writing version %s because dry run", version)
}
if !skipped {
log.Infof("version %s saved to %s.env", version, a.Domain)
} else {
log.Debugf("skipping version %s write as already exists in %s.env", version, a.Domain)
}
return nil
// ParseVersionLabel parses a $VERSION-$DIGEST app service label.
func ParseVersionLabel(label string) (string, string) {
// versions may look like v4.2-abcd or v4.2-alpine-abcd
idx := strings.LastIndex(label, "-")
version := label[:idx]
digest := label[idx+1:]
logrus.Debugf("parsed %s as version from %s", version, label)
logrus.Debugf("parsed %s as digest from %s", digest, label)
return version, digest
}
// ParseServiceName parses a $STACK_NAME_$SERVICE_NAME service label.
func ParseServiceName(label string) string {
idx := strings.LastIndex(label, "_")
serviceName := label[idx+1:]
logrus.Debugf("parsed %s as service name from %s", serviceName, label)
return serviceName
}

View File

@ -1,200 +0,0 @@
package app_test
import (
"encoding/json"
"fmt"
"reflect"
"testing"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/envfile"
"coopcloud.tech/abra/pkg/recipe"
testPkg "coopcloud.tech/abra/pkg/test"
"github.com/docker/docker/api/types/filters"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
)
func TestNewApp(t *testing.T) {
app, err := appPkg.NewApp(testPkg.ExpectedAppEnv, testPkg.AppName, testPkg.ExpectedAppFile)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(app, testPkg.ExpectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, testPkg.ExpectedApp)
}
}
func TestReadAppEnvFile(t *testing.T) {
app, err := appPkg.ReadAppEnvFile(testPkg.ExpectedAppFile, testPkg.AppName)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(app, testPkg.ExpectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, testPkg.ExpectedApp)
}
}
func TestGetApp(t *testing.T) {
app, err := appPkg.GetApp(testPkg.ExpectedAppFiles, testPkg.AppName)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(app, testPkg.ExpectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, testPkg.ExpectedApp)
}
}
func TestGetComposeFiles(t *testing.T) {
r := recipe.Get("abra-test-recipe")
err := r.EnsureExists()
if err != nil {
t.Fatal(err)
}
tests := []struct {
appEnv map[string]string
composeFiles []string
}{
{
map[string]string{},
[]string{
fmt.Sprintf("%s/compose.yml", r.Dir),
},
},
{
map[string]string{"COMPOSE_FILE": "compose.yml"},
[]string{
fmt.Sprintf("%s/compose.yml", r.Dir),
},
},
{
map[string]string{"COMPOSE_FILE": "compose.extra_secret.yml"},
[]string{
fmt.Sprintf("%s/compose.extra_secret.yml", r.Dir),
},
},
{
map[string]string{"COMPOSE_FILE": "compose.yml:compose.extra_secret.yml"},
[]string{
fmt.Sprintf("%s/compose.yml", r.Dir),
fmt.Sprintf("%s/compose.extra_secret.yml", r.Dir),
},
},
}
for _, test := range tests {
composeFiles, err := r.GetComposeFiles(test.appEnv)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, composeFiles, test.composeFiles)
}
}
func TestGetComposeFilesError(t *testing.T) {
r := recipe.Get("abra-test-recipe")
err := r.EnsureExists()
if err != nil {
t.Fatal(err)
}
tests := []struct{ appEnv map[string]string }{
{map[string]string{"COMPOSE_FILE": "compose.yml::compose.foo.yml"}},
{map[string]string{"COMPOSE_FILE": "doesnt.exist.yml"}},
}
for _, test := range tests {
_, err := r.GetComposeFiles(test.appEnv)
if err == nil {
t.Fatalf("should have failed: %v", test.appEnv)
}
}
}
func TestFilters(t *testing.T) {
oldDir := config.RECIPES_DIR
config.RECIPES_DIR = "./testdata"
defer func() {
config.RECIPES_DIR = oldDir
}()
app, err := appPkg.NewApp(envfile.AppEnv{
"DOMAIN": "test.example.com",
"RECIPE": "test-recipe",
}, "test_example_com", appPkg.AppFile{
Path: "./testdata/filtertest.end",
Server: "local",
})
if err != nil {
t.Fatal(err)
}
f, err := app.Filters(false, false)
if err != nil {
t.Error(err)
}
compareFilter(t, f, map[string]map[string]bool{
"name": {
"test_example_com": true,
},
})
f2, err := app.Filters(false, true)
if err != nil {
t.Error(err)
}
compareFilter(t, f2, map[string]map[string]bool{
"name": {
"^test_example_com": true,
},
})
f3, err := app.Filters(true, false)
if err != nil {
t.Error(err)
}
compareFilter(t, f3, map[string]map[string]bool{
"name": {
"test_example_com_bar": true,
"test_example_com_foo": true,
},
})
f4, err := app.Filters(true, true)
if err != nil {
t.Error(err)
}
compareFilter(t, f4, map[string]map[string]bool{
"name": {
"^test_example_com_bar": true,
"^test_example_com_foo": true,
},
})
f5, err := app.Filters(false, false, "foo")
if err != nil {
t.Error(err)
}
compareFilter(t, f5, map[string]map[string]bool{
"name": {
"test_example_com_foo": true,
},
})
}
func compareFilter(t *testing.T, f1 filters.Args, f2 map[string]map[string]bool) {
t.Helper()
j1, err := f1.MarshalJSON()
if err != nil {
t.Error(err)
}
j2, err := json.Marshal(f2)
if err != nil {
t.Error(err)
}
if diff := cmp.Diff(string(j2), string(j1)); diff != "" {
t.Errorf("filters mismatch (-want +got):\n%s", diff)
}
}

View File

@ -1,88 +0,0 @@
package app
import (
"fmt"
"strconv"
"coopcloud.tech/abra/pkg/envfile"
"coopcloud.tech/abra/pkg/log"
composetypes "github.com/docker/cli/cli/compose/types"
)
// SetRecipeLabel adds the label 'coop-cloud.${STACK_NAME}.recipe=${RECIPE}' to the app container
// to signal which recipe is connected to the deployed app
func SetRecipeLabel(compose *composetypes.Config, stackName string, recipe string) {
for _, service := range compose.Services {
if service.Name == "app" {
log.Debugf("set recipe label 'coop-cloud.%s.recipe' to %s for %s", stackName, recipe, stackName)
labelKey := fmt.Sprintf("coop-cloud.%s.recipe", stackName)
service.Deploy.Labels[labelKey] = recipe
}
}
}
// SetChaosLabel adds the label 'coop-cloud.${STACK_NAME}.chaos=true/false' to the app container
// to signal if the app is deployed in chaos mode
func SetChaosLabel(compose *composetypes.Config, stackName string, chaos bool) {
for _, service := range compose.Services {
if service.Name == "app" {
log.Debugf("set label 'coop-cloud.%s.chaos' to %v for %s", stackName, chaos, stackName)
labelKey := fmt.Sprintf("coop-cloud.%s.chaos", stackName)
service.Deploy.Labels[labelKey] = strconv.FormatBool(chaos)
}
}
}
// SetChaosVersionLabel adds the label 'coop-cloud.${STACK_NAME}.chaos-version=$(GIT_COMMIT)' to the app container
func SetChaosVersionLabel(compose *composetypes.Config, stackName string, chaosVersion string) {
for _, service := range compose.Services {
if service.Name == "app" {
log.Debugf("set label 'coop-cloud.%s.chaos-version' to %v for %s", stackName, chaosVersion, stackName)
labelKey := fmt.Sprintf("coop-cloud.%s.chaos-version", stackName)
service.Deploy.Labels[labelKey] = chaosVersion
}
}
}
// SetUpdateLabel adds env ENABLE_AUTO_UPDATE as label to enable/disable the
// auto update process for this app. The default if this variable is not set is to disable
// the auto update process.
func SetUpdateLabel(compose *composetypes.Config, stackName string, appEnv envfile.AppEnv) {
for _, service := range compose.Services {
if service.Name == "app" {
enable_auto_update, exists := appEnv["ENABLE_AUTO_UPDATE"]
if !exists {
enable_auto_update = "false"
}
log.Debugf("set label 'coop-cloud.%s.autoupdate' to %s for %s", stackName, enable_auto_update, stackName)
labelKey := fmt.Sprintf("coop-cloud.%s.autoupdate", stackName)
service.Deploy.Labels[labelKey] = enable_auto_update
}
}
}
// GetLabel reads docker labels in the format of "coop-cloud.${STACK_NAME}.${LABEL}" from the local compose files
func GetLabel(compose *composetypes.Config, stackName string, label string) string {
for _, service := range compose.Services {
if service.Name == "app" {
labelKey := fmt.Sprintf("coop-cloud.%s.%s", stackName, label)
log.Debugf("get label '%s'", labelKey)
if labelValue, ok := service.Deploy.Labels[labelKey]; ok {
return labelValue
}
}
}
log.Debugf("no %s label found for %s", label, stackName)
return ""
}
// GetTimeoutFromLabel reads the timeout value from docker label "coop-cloud.${STACK_NAME}.TIMEOUT" and returns 50 as default value
func GetTimeoutFromLabel(compose *composetypes.Config, stackName string) (int, error) {
timeout := 50 // Default Timeout
var err error = nil
if timeoutLabel := GetLabel(compose, stackName, "timeout"); timeoutLabel != "" {
log.Debugf("timeout label: %s", timeoutLabel)
timeout, err = strconv.Atoi(timeoutLabel)
}
return timeout, err
}

View File

@ -1,2 +0,0 @@
RECIPE=test-recipe
DOMAIN=test.example.com

View File

@ -1,6 +0,0 @@
version: "3.8"
services:
foo:
image: debian
bar:
image: debian

View File

@ -3,17 +3,17 @@ package autocomplete
import (
"fmt"
"coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// AppNameComplete copletes app names.
// AppNameComplete copletes app names
func AppNameComplete(c *cli.Context) {
appNames, err := app.GetAppNames()
appNames, err := config.GetAppNames()
if err != nil {
log.Warn(err)
logrus.Warn(err)
}
if c.NArg() > 0 {
@ -25,21 +25,11 @@ func AppNameComplete(c *cli.Context) {
}
}
func ServiceNameComplete(appName string) {
serviceNames, err := app.GetAppServiceNames(appName)
if err != nil {
return
}
for _, s := range serviceNames {
fmt.Println(s)
}
}
// RecipeNameComplete completes recipe names.
// RecipeNameComplete completes recipe names
func RecipeNameComplete(c *cli.Context) {
catl, err := recipe.ReadRecipeCatalogue(false)
catl, err := recipe.ReadRecipeCatalogue()
if err != nil {
log.Warn(err)
logrus.Warn(err)
}
if c.NArg() > 0 {
@ -51,37 +41,7 @@ func RecipeNameComplete(c *cli.Context) {
}
}
// RecipeVersionComplete completes versions for the recipe.
func RecipeVersionComplete(recipeName string) {
catl, err := recipe.ReadRecipeCatalogue(false)
if err != nil {
log.Warn(err)
}
for _, v := range catl[recipeName].Versions {
for v2 := range v {
fmt.Println(v2)
}
}
}
// ServerNameComplete completes server names.
func ServerNameComplete(c *cli.Context) {
files, err := app.LoadAppFiles("")
if err != nil {
log.Fatal(err)
}
if c.NArg() > 0 {
return
}
for _, appFile := range files {
fmt.Println(appFile.Server)
}
}
// SubcommandComplete completes sub-commands.
// SubcommandComplete completes subcommands.
func SubcommandComplete(c *cli.Context) {
if c.NArg() > 0 {
return
@ -92,6 +52,7 @@ func SubcommandComplete(c *cli.Context) {
"autocomplete",
"catalogue",
"recipe",
"record",
"server",
"upgrade",
}

View File

@ -1,88 +0,0 @@
package catalogue
import (
"fmt"
"os"
"path"
"strings"
"coopcloud.tech/abra/pkg/config"
gitPkg "coopcloud.tech/abra/pkg/git"
"coopcloud.tech/abra/pkg/log"
"github.com/go-git/go-git/v5"
)
// EnsureCatalogue ensures that the catalogue is cloned locally & present.
func EnsureCatalogue() error {
catalogueDir := path.Join(config.ABRA_DIR, "catalogue")
if _, err := os.Stat(catalogueDir); err != nil && os.IsNotExist(err) {
log.Warnf("local recipe catalogue is missing, retrieving now")
url := fmt.Sprintf("%s/%s.git", config.REPOS_BASE_URL, config.CATALOGUE_JSON_REPO_NAME)
if err := gitPkg.Clone(catalogueDir, url); err != nil {
return err
}
log.Debugf("cloned catalogue repository to %s", catalogueDir)
}
return nil
}
// EnsureIsClean makes sure that the catalogue has no unstaged changes.
func EnsureIsClean() error {
isClean, err := gitPkg.IsClean(config.CATALOGUE_DIR)
if err != nil {
return err
}
if !isClean {
msg := "%s has locally unstaged changes? please commit/remove your changes before proceeding"
return fmt.Errorf(msg, config.CATALOGUE_DIR)
}
return nil
}
// EnsureUpToDate ensures that the local catalogue is up to date.
func EnsureUpToDate() error {
repo, err := git.PlainOpen(config.CATALOGUE_DIR)
if err != nil {
return err
}
remotes, err := repo.Remotes()
if err != nil {
return err
}
if len(remotes) == 0 {
msg := "cannot ensure %s is up-to-date, no git remotes configured"
log.Debugf(msg, config.CATALOGUE_DIR)
return nil
}
worktree, err := repo.Worktree()
if err != nil {
return err
}
branch, err := gitPkg.CheckoutDefaultBranch(repo, config.CATALOGUE_DIR)
if err != nil {
return err
}
opts := &git.PullOptions{
Force: true,
ReferenceName: branch,
}
if err := worktree.Pull(opts); err != nil {
if !strings.Contains(err.Error(), "already up-to-date") {
return err
}
}
log.Debugf("fetched latest git changes for %s", config.CATALOGUE_DIR)
return nil
}

View File

@ -2,46 +2,24 @@
package client
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"time"
contextPkg "coopcloud.tech/abra/pkg/context"
"coopcloud.tech/abra/pkg/log"
sshPkg "coopcloud.tech/abra/pkg/ssh"
commandconnPkg "coopcloud.tech/abra/pkg/upstream/commandconn"
"github.com/docker/docker/client"
"github.com/sirupsen/logrus"
)
// Conf is a Docker client configuration.
type Conf struct {
Timeout int
}
// Opt is a Docker client option.
type Opt func(c *Conf)
// WithTimeout specifies a timeout for a Docker client.
func WithTimeout(timeout int) Opt {
return func(c *Conf) {
c.Timeout = timeout
}
}
// New initiates a new Docker client. New client connections are validated so
// that we ensure connections via SSH to the daemon can succeed. It takes into
// account that you may only want the local client and not communicate via SSH.
// For this use-case, please pass "default" as the contextName.
func New(serverName string, opts ...Opt) (*client.Client, error) {
// New initiates a new Docker client.
func New(contextName string) (*client.Client, error) {
var clientOpts []client.Opt
if serverName != "default" {
context, err := GetContext(serverName)
if contextName != "default" {
context, err := GetContext(contextName)
if err != nil {
return nil, fmt.Errorf("unknown server, run \"abra server add %s\"?", serverName)
return nil, err
}
ctxEndpoint, err := contextPkg.GetContextEndpoint(context)
@ -49,17 +27,13 @@ func New(serverName string, opts ...Opt) (*client.Client, error) {
return nil, err
}
conf := &Conf{}
for _, opt := range opts {
opt(conf)
}
helper, err := commandconnPkg.NewConnectionHelper(ctxEndpoint, conf.Timeout)
helper, err := commandconnPkg.NewConnectionHelper(ctxEndpoint)
if err != nil {
return nil, err
}
httpClient := &http.Client{
// No tls, no proxy
Transport: &http.Transport{
DialContext: helper.Dialer,
IdleConnTimeout: 30 * time.Second,
@ -85,20 +59,7 @@ func New(serverName string, opts ...Opt) (*client.Client, error) {
return nil, err
}
log.Debugf("created client for %s", serverName)
info, err := cl.Info(context.Background())
if err != nil {
return cl, sshPkg.Fatal(serverName, err)
}
if info.Swarm.LocalNodeState == "inactive" {
if serverName != "default" {
return cl, fmt.Errorf("swarm mode not enabled on %s?", serverName)
}
return cl, errors.New("swarm mode not enabled on local server?")
}
logrus.Debugf("created client for %s", contextName)
return cl, nil
}

View File

@ -5,25 +5,28 @@ import (
"fmt"
"coopcloud.tech/abra/pkg/context"
"coopcloud.tech/abra/pkg/log"
commandconnPkg "coopcloud.tech/abra/pkg/upstream/commandconn"
dConfig "github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/context/docker"
contextStore "github.com/docker/cli/cli/context/store"
"github.com/sirupsen/logrus"
)
type Context = contextStore.Metadata
// CreateContext creates a new Docker context.
func CreateContext(contextName string) error {
host := fmt.Sprintf("ssh://%s", contextName)
func CreateContext(contextName string, user string, port string) error {
host := contextName
if user != "" {
host = fmt.Sprintf("%s@%s", user, host)
}
if port != "" {
host = fmt.Sprintf("%s:%s", host, port)
}
host = fmt.Sprintf("ssh://%s", host)
if err := createContext(contextName, host); err != nil {
return err
}
log.Debugf("created the %s context", contextName)
logrus.Debugf("created the %s context", contextName)
return nil
}

View File

@ -3,10 +3,13 @@ package client
import (
"context"
"fmt"
"strings"
"github.com/containers/image/docker"
"github.com/containers/image/types"
"github.com/distribution/reference"
"github.com/docker/distribution/reference"
"github.com/docker/docker/client"
"github.com/sirupsen/logrus"
)
// GetRegistryTags retrieves all tags of an image from a container registry.
@ -26,3 +29,29 @@ func GetRegistryTags(img reference.Named) ([]string, error) {
return tags, nil
}
// GetTagDigest retrieves an image digest from a container registry.
func GetTagDigest(cl *client.Client, image reference.Named) (string, error) {
target := fmt.Sprintf("//%s", reference.Path(image))
ref, err := docker.ParseReference(target)
if err != nil {
return "", fmt.Errorf("failed to parse image %s, saw: %s", image, err.Error())
}
ctx := context.Background()
img, err := ref.NewImage(ctx, nil)
if err != nil {
logrus.Debugf("failed to query remote registry for %s, saw: %s", image, err.Error())
return "", fmt.Errorf("unable to read digest for %s", image)
}
defer img.Close()
digest := img.ConfigInfo().Digest.String()
if digest == "" {
return digest, fmt.Errorf("unable to read digest for %s", image)
}
return strings.Split(digest, ":")[1][:7], nil
}

View File

@ -4,14 +4,20 @@ import (
"context"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
)
func StoreSecret(cl *client.Client, secretName, secretValue, server string) error {
func StoreSecret(secretName, secretValue, server string) error {
cl, err := New(server)
if err != nil {
return err
}
ctx := context.Background()
ann := swarm.Annotations{Name: secretName}
spec := swarm.SecretSpec{Annotations: ann, Data: []byte(secretValue)}
if _, err := cl.SecretCreate(context.Background(), spec); err != nil {
// We don't bother with the secret IDs for now
if _, err := cl.SecretCreate(ctx, spec); err != nil {
return err
}

View File

@ -2,17 +2,18 @@ package client
import (
"context"
"fmt"
"time"
"coopcloud.tech/abra/pkg/log"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/volume"
"github.com/docker/docker/client"
)
func GetVolumes(cl *client.Client, ctx context.Context, server string, fs filters.Args) ([]*volume.Volume, error) {
volumeListOKBody, err := cl.VolumeList(ctx, volume.ListOptions{Filters: fs})
func GetVolumes(ctx context.Context, server string, fs filters.Args) ([]*types.Volume, error) {
cl, err := New(server)
if err != nil {
return nil, err
}
volumeListOKBody, err := cl.VolumeList(ctx, fs)
volumeList := volumeListOKBody.Volumes
if err != nil {
return volumeList, err
@ -21,7 +22,7 @@ func GetVolumes(cl *client.Client, ctx context.Context, server string, fs filter
return volumeList, nil
}
func GetVolumeNames(volumes []*volume.Volume) []string {
func GetVolumeNames(volumes []*types.Volume) []string {
var volumeNames []string
for _, vol := range volumes {
@ -31,32 +32,18 @@ func GetVolumeNames(volumes []*volume.Volume) []string {
return volumeNames
}
func RemoveVolumes(cl *client.Client, ctx context.Context, volumeNames []string, force bool, retries int) error {
func RemoveVolumes(ctx context.Context, server string, volumeNames []string, force bool) error {
cl, err := New(server)
if err != nil {
return err
}
for _, volName := range volumeNames {
err := retryFunc(5, func() error {
return cl.VolumeRemove(context.Background(), volName, force)
})
err := cl.VolumeRemove(ctx, volName, force)
if err != nil {
return fmt.Errorf("volume %s: %s", volName, err)
return err
}
}
return nil
}
// retryFunc retries the given function for the given retries. After the nth
// retry it waits (n + 1)^2 seconds before the next retry (starting with n=0).
// It returns an error if the function still failed after the last retry.
func retryFunc(retries int, fn func() error) error {
for i := 0; i < retries; i++ {
err := fn()
if err == nil {
return nil
}
if i+1 < retries {
sleep := time.Duration(i+1) * time.Duration(i+1)
log.Infof("%s: waiting %d seconds before next retry", err, sleep)
time.Sleep(sleep * time.Second)
}
}
return fmt.Errorf("%d retries failed", retries)
}

View File

@ -1,26 +0,0 @@
package client
import (
"fmt"
"testing"
)
func TestRetryFunc(t *testing.T) {
err := retryFunc(1, func() error { return nil })
if err != nil {
t.Errorf("should not return an error: %s", err)
}
i := 0
fn := func() error {
i++
return fmt.Errorf("oh no, something went wrong!")
}
err = retryFunc(2, fn)
if err == nil {
t.Error("should return an error")
}
if i != 2 {
t.Errorf("The function should have been called 1 times, got %d", i)
}
}

158
pkg/compose/compose.go Normal file
View File

@ -0,0 +1,158 @@
package compose
import (
"fmt"
"io/ioutil"
"path"
"path/filepath"
"strings"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/upstream/stack"
loader "coopcloud.tech/abra/pkg/upstream/stack"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/distribution/reference"
"github.com/sirupsen/logrus"
)
// UpdateTag updates an image tag in-place on file system local compose files.
func UpdateTag(pattern, image, tag, recipeName string) (bool, error) {
composeFiles, err := filepath.Glob(pattern)
if err != nil {
return false, err
}
logrus.Debugf("considering %s config(s) for tag update", strings.Join(composeFiles, ", "))
for _, composeFile := range composeFiles {
opts := stack.Deploy{Composefiles: []string{composeFile}}
envSamplePath := path.Join(config.RECIPES_DIR, recipeName, ".env.sample")
sampleEnv, err := config.ReadEnv(envSamplePath)
if err != nil {
return false, err
}
compose, err := loader.LoadComposefile(opts, sampleEnv)
if err != nil {
return false, err
}
for _, service := range compose.Services {
if service.Image == "" {
continue // may be a compose.$optional.yml file
}
img, _ := reference.ParseNormalizedNamed(service.Image)
if err != nil {
return false, err
}
var composeTag string
switch img.(type) {
case reference.NamedTagged:
composeTag = img.(reference.NamedTagged).Tag()
default:
logrus.Debugf("unable to parse %s, skipping", img)
continue
}
composeImage := formatter.StripTagMeta(reference.Path(img))
logrus.Debugf("parsed %s from %s", composeTag, service.Image)
if image == composeImage {
bytes, err := ioutil.ReadFile(composeFile)
if err != nil {
return false, err
}
old := fmt.Sprintf("%s:%s", composeImage, composeTag)
new := fmt.Sprintf("%s:%s", composeImage, tag)
replacedBytes := strings.Replace(string(bytes), old, new, -1)
logrus.Debugf("updating %s to %s in %s", old, new, compose.Filename)
if err := ioutil.WriteFile(compose.Filename, []byte(replacedBytes), 0764); err != nil {
return false, err
}
}
}
}
return false, nil
}
// UpdateLabel updates a label in-place on file system local compose files.
func UpdateLabel(pattern, serviceName, label, recipeName string) error {
composeFiles, err := filepath.Glob(pattern)
if err != nil {
return err
}
logrus.Debugf("considering %s config(s) for label update", strings.Join(composeFiles, ", "))
for _, composeFile := range composeFiles {
opts := stack.Deploy{Composefiles: []string{composeFile}}
envSamplePath := path.Join(config.RECIPES_DIR, recipeName, ".env.sample")
sampleEnv, err := config.ReadEnv(envSamplePath)
if err != nil {
return err
}
compose, err := loader.LoadComposefile(opts, sampleEnv)
if err != nil {
return err
}
serviceExists := false
var service composetypes.ServiceConfig
for _, s := range compose.Services {
if s.Name == serviceName {
service = s
serviceExists = true
}
}
if !serviceExists {
continue
}
discovered := false
for oldLabel, value := range service.Deploy.Labels {
if strings.HasPrefix(oldLabel, "coop-cloud") {
discovered = true
bytes, err := ioutil.ReadFile(composeFile)
if err != nil {
return err
}
old := fmt.Sprintf("coop-cloud.${STACK_NAME}.version=%s", value)
replacedBytes := strings.Replace(string(bytes), old, label, -1)
if old == label {
logrus.Warnf("%s is already set, nothing to do?", label)
return nil
}
logrus.Debugf("updating %s to %s in %s", old, label, compose.Filename)
if err := ioutil.WriteFile(compose.Filename, []byte(replacedBytes), 0764); err != nil {
return err
}
logrus.Infof("synced label %s to service %s", label, serviceName)
}
}
if !discovered {
logrus.Warn("no existing label found, automagic insertion not supported yet")
logrus.Fatalf("add '- \"%s\"' manually to the 'app' service in %s", label, composeFile)
}
}
return nil
}

View File

@ -1,111 +0,0 @@
package config
import (
"os"
"path"
"path/filepath"
"coopcloud.tech/abra/pkg/log"
"gopkg.in/yaml.v3"
)
// LoadAbraConfig returns the abra configuration. It tries to find a abra
// configuration file (see findAbraConfig for lookup logic). When no
// configuration was found it returns the default config.
func LoadAbraConfig() Abra {
wd, _ := os.Getwd()
configFile := findAbraConfig(wd)
if configFile == "" {
log.Debugf("no config file found")
return Abra{}
}
data, err := os.ReadFile(configFile)
if err != nil {
// Do nothing, when an error occurs
log.Debugf("error reading config file: %s", err)
return Abra{}
}
config := Abra{}
err = yaml.Unmarshal(data, &config)
if err != nil {
// Do nothing, when an error occurs
log.Debugf("error loading config file: %s", err)
return Abra{}
}
log.Debugf("config file loaded from: %s", configFile)
config.configPath = filepath.Dir(configFile)
return config
}
// findAbraConfig recursively looks for a abra.y(a)ml file in the given directory.
// When the file was not found it calls the function again with the parent
// directory until the home directory is hit. When no abra config was found it
// returns an empty string.
func findAbraConfig(dir string) string {
dir, err := filepath.Abs(dir)
if err != nil {
return ""
}
if dir == os.ExpandEnv("$HOME") || dir == "/" {
return ""
}
p := path.Join(dir, "abra.yaml")
if _, err := os.Stat(p); err == nil {
return p
}
p = path.Join(dir, "abra.yml")
if _, err := os.Stat(p); err == nil {
return p
}
return findAbraConfig(filepath.Dir(dir))
}
// Abra defines the configuration file for abra.
type Abra struct {
configPath string
AbraDir string `yaml:"abraDir"`
}
// GetAbraDir returns the abra dir. It has the following logic:
// 1. check if $ABRA_DIR is set
// 2. check if abraDir was set in a config file
// 3. use $HOME/.abra when above two options failed
func (a Abra) GetAbraDir() string {
if dir, exists := os.LookupEnv("ABRA_DIR"); exists && dir != "" {
log.Debug("read abra dir from $ABRA_DIR")
return dir
}
if a.AbraDir != "" {
log.Debug("read abra dir from config file")
if path.IsAbs(a.AbraDir) {
return a.AbraDir
}
// Make the path absolute
return path.Join(a.configPath, a.AbraDir)
}
log.Debug("using default abra dir")
return os.ExpandEnv("$HOME/.abra")
}
func (a Abra) GetServersDir() string { return path.Join(a.GetAbraDir(), "servers") }
func (a Abra) GetRecipesDir() string { return path.Join(a.GetAbraDir(), "recipes") }
func (a Abra) GetVendorDir() string { return path.Join(a.GetAbraDir(), "vendor") }
func (a Abra) GetBackupDir() string { return path.Join(a.GetAbraDir(), "backups") }
func (a Abra) GetCatalogueDir() string { return path.Join(a.GetAbraDir(), "catalogue") }
var config = LoadAbraConfig()
var (
ABRA_DIR = config.GetAbraDir()
SERVERS_DIR = config.GetServersDir()
RECIPES_DIR = config.GetRecipesDir()
VENDOR_DIR = config.GetVendorDir()
BACKUP_DIR = config.GetBackupDir()
CATALOGUE_DIR = config.GetCatalogueDir()
RECIPES_JSON = path.Join(config.GetCatalogueDir(), "recipes.json")
REPOS_BASE_URL = "https://git.coopcloud.tech/coop-cloud"
CATALOGUE_JSON_REPO_NAME = "recipes-catalogue-json"
SSH_URL_TEMPLATE = "ssh://git@git.coopcloud.tech:2222/coop-cloud/%s.git"
CHAOS_DEFAULT = "false"
)

View File

@ -1,133 +0,0 @@
package config
import (
"log"
"os"
"path/filepath"
"testing"
)
func TestFindAbraConfig(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
tests := []struct {
Dir string
Config string
}{
{
Dir: "testdata/abraconfig1",
Config: filepath.Join(wd, "testdata/abraconfig1/abra.yaml"),
},
{
Dir: "testdata/abraconfig1/subdir",
Config: filepath.Join(wd, "testdata/abraconfig1/abra.yaml"),
},
{
Dir: "testdata/abraconfig2",
Config: filepath.Join(wd, "testdata/abraconfig2/abra.yml"),
},
{
Dir: "testdata/abraconfig2/subdir",
Config: filepath.Join(wd, "testdata/abraconfig2/abra.yml"),
},
{
Dir: "testdata",
Config: "",
},
}
for _, tc := range tests {
t.Run(tc.Dir, func(t *testing.T) {
config := findAbraConfig(tc.Dir)
if config != tc.Config {
t.Errorf("\nwant: %s\ngot: %s", tc.Config, config)
}
})
}
}
func TestLoadAbraConfigGetAbraDir(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
t.Setenv("ABRA_DIR", "")
t.Run("default", func(t *testing.T) {
cfg := LoadAbraConfig()
wantAbraDir := os.ExpandEnv("$HOME/.abra")
if cfg.GetAbraDir() != wantAbraDir {
t.Errorf("\nwant: %s\ngot: %s", wantAbraDir, cfg.GetAbraDir())
}
})
t.Run("from config file", func(t *testing.T) {
t.Cleanup(func() { os.Chdir(wd) })
err = os.Chdir(filepath.Join(wd, "testdata/abraconfig1"))
if err != nil {
log.Fatal(err)
}
cfg := LoadAbraConfig()
wantAbraDir := filepath.Join(wd, "testdata/abraconfig1/foobar")
if cfg.GetAbraDir() != wantAbraDir {
t.Errorf("\nwant: %s\ngot: %s", wantAbraDir, cfg.GetAbraDir())
}
})
t.Run("default when config file is empty", func(t *testing.T) {
t.Cleanup(func() { os.Chdir(wd) })
err := os.Chdir(filepath.Join(wd, "testdata/abraconfig2"))
if err != nil {
log.Fatal(err)
}
cfg := LoadAbraConfig()
wantAbraDir := os.ExpandEnv("$HOME/.abra")
if cfg.GetAbraDir() != wantAbraDir {
t.Errorf("\nwant: %s\ngot: %s", wantAbraDir, cfg.GetAbraDir())
}
})
t.Run("from env variable", func(t *testing.T) {
t.Setenv("ABRA_DIR", "foo")
cfg := LoadAbraConfig()
wantAbraDir := "foo"
if cfg.GetAbraDir() != wantAbraDir {
t.Errorf("\nwant: %s\ngot: %s", wantAbraDir, cfg.GetAbraDir())
}
})
}
func TestLoadAbraConfigServersDir(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
t.Setenv("ABRA_DIR", "")
t.Run("default", func(t *testing.T) {
cfg := LoadAbraConfig()
wantServersDir := os.ExpandEnv("$HOME/.abra/servers")
if cfg.GetServersDir() != wantServersDir {
t.Errorf("\nwant: %s\ngot: %s", wantServersDir, cfg.GetServersDir())
}
})
t.Run("from config file", func(t *testing.T) {
t.Cleanup(func() { os.Chdir(wd) })
err = os.Chdir(filepath.Join(wd, "testdata/abraconfig1"))
if err != nil {
log.Fatal(err)
}
cfg := LoadAbraConfig()
log.Println(cfg)
wantServersDir := filepath.Join(wd, "testdata/abraconfig1/foobar/servers")
if cfg.GetServersDir() != wantServersDir {
t.Errorf("\nwant: %s\ngot: %s", wantServersDir, cfg.GetServersDir())
}
})
}

440
pkg/config/app.go Normal file
View File

@ -0,0 +1,440 @@
package config
import (
"fmt"
"html/template"
"io/ioutil"
"os"
"path"
"strings"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/upstream/convert"
loader "coopcloud.tech/abra/pkg/upstream/stack"
stack "coopcloud.tech/abra/pkg/upstream/stack"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types/filters"
"github.com/sirupsen/logrus"
)
// Type aliases to make code hints easier to understand
// AppEnv is a map of the values in an apps env config
type AppEnv = map[string]string
// AppName is AppName
type AppName = string
// AppFile represents app env files on disk without reading the contents
type AppFile struct {
Path string
Server string
}
// AppFiles is a slice of appfiles
type AppFiles map[AppName]AppFile
// App reprents an app with its env file read into memory
type App struct {
Name AppName
Recipe string
Domain string
Env AppEnv
Server string
Path string
}
// StackName gets what the docker safe stack name is for the app. This should
// not not shown to the user, use a.Name for that. Give the output of this
// command to Docker only.
func (a App) StackName() string {
if _, exists := a.Env["STACK_NAME"]; exists {
return a.Env["STACK_NAME"]
}
stackName := SanitiseAppName(a.Name)
if len(stackName) > 45 {
logrus.Debugf("trimming %s to %s to avoid runtime limits", stackName, stackName[:45])
stackName = stackName[:45]
}
a.Env["STACK_NAME"] = stackName
return stackName
}
// Filters retrieves exact app filters for querying the container runtime. Due
// to upstream issues, filtering works different depending on what you're
// querying. So, for example, secrets don't work with regex! The caller needs
// to implement their own validation that the right secrets are matched. In
// order to handle these cases, we provide the `appendServiceNames` /
// `exactMatch` modifiers.
func (a App) Filters(appendServiceNames, exactMatch bool) (filters.Args, error) {
filters := filters.NewArgs()
composeFiles, err := GetAppComposeFiles(a.Recipe, a.Env)
if err != nil {
return filters, err
}
opts := stack.Deploy{Composefiles: composeFiles}
compose, err := GetAppComposeConfig(a.Recipe, opts, a.Env)
if err != nil {
return filters, err
}
for _, service := range compose.Services {
var filter string
if appendServiceNames {
if exactMatch {
filter = fmt.Sprintf("^%s_%s", a.StackName(), service.Name)
} else {
filter = fmt.Sprintf("%s_%s", a.StackName(), service.Name)
}
} else {
if exactMatch {
filter = fmt.Sprintf("^%s", a.StackName())
} else {
filter = fmt.Sprintf("%s", a.StackName())
}
}
filters.Add("name", filter)
}
return filters, nil
}
// ByServer sort a slice of Apps
type ByServer []App
func (a ByServer) Len() int { return len(a) }
func (a ByServer) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByServer) Less(i, j int) bool {
return strings.ToLower(a[i].Server) < strings.ToLower(a[j].Server)
}
// ByServerAndRecipe sort a slice of Apps
type ByServerAndRecipe []App
func (a ByServerAndRecipe) Len() int { return len(a) }
func (a ByServerAndRecipe) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByServerAndRecipe) Less(i, j int) bool {
if a[i].Server == a[j].Server {
return strings.ToLower(a[i].Recipe) < strings.ToLower(a[j].Recipe)
}
return strings.ToLower(a[i].Server) < strings.ToLower(a[j].Server)
}
// ByRecipe sort a slice of Apps
type ByRecipe []App
func (a ByRecipe) Len() int { return len(a) }
func (a ByRecipe) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByRecipe) Less(i, j int) bool {
return strings.ToLower(a[i].Recipe) < strings.ToLower(a[j].Recipe)
}
// ByName sort a slice of Apps
type ByName []App
func (a ByName) Len() int { return len(a) }
func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByName) Less(i, j int) bool {
return strings.ToLower(a[i].Name) < strings.ToLower(a[j].Name)
}
func readAppEnvFile(appFile AppFile, name AppName) (App, error) {
env, err := ReadEnv(appFile.Path)
if err != nil {
return App{}, fmt.Errorf("env file for %s couldn't be read: %s", name, err.Error())
}
logrus.Debugf("read env %s from %s", env, appFile.Path)
app, err := newApp(env, name, appFile)
if err != nil {
return App{}, fmt.Errorf("env file for %s has issues: %s", name, err.Error())
}
return app, nil
}
// newApp creates new App object
func newApp(env AppEnv, name string, appFile AppFile) (App, error) {
domain := env["DOMAIN"]
recipe, exists := env["RECIPE"]
if !exists {
recipe, exists = env["TYPE"]
if !exists {
return App{}, fmt.Errorf("%s is missing the RECIPE env var", name)
}
}
return App{
Name: name,
Domain: domain,
Recipe: recipe,
Env: env,
Server: appFile.Server,
Path: appFile.Path,
}, nil
}
// LoadAppFiles gets all app files for a given set of servers or all servers
func LoadAppFiles(servers ...string) (AppFiles, error) {
appFiles := make(AppFiles)
if len(servers) == 1 {
if servers[0] == "" {
// Empty servers flag, one string will always be passed
var err error
servers, err = GetAllFoldersInDirectory(SERVERS_DIR)
if err != nil {
return nil, err
}
}
}
logrus.Debugf("collecting metadata from %v servers: %s", len(servers), strings.Join(servers, ", "))
for _, server := range servers {
serverDir := path.Join(SERVERS_DIR, server)
files, err := getAllFilesInDirectory(serverDir)
if err != nil {
return nil, fmt.Errorf("server %s doesn't exist? Run \"abra server ls\" to check", server)
}
for _, file := range files {
appName := strings.TrimSuffix(file.Name(), ".env")
appFilePath := path.Join(SERVERS_DIR, server, file.Name())
appFiles[appName] = AppFile{
Path: appFilePath,
Server: server,
}
}
}
return appFiles, nil
}
// GetApp loads an apps settings, reading it from file, in preparation to use it
//
// ONLY use when ready to use the env file to keep IO down
func GetApp(apps AppFiles, name AppName) (App, error) {
appFile, exists := apps[name]
if !exists {
return App{}, fmt.Errorf("cannot find app with name %s", name)
}
app, err := readAppEnvFile(appFile, name)
if err != nil {
return App{}, err
}
return app, nil
}
// GetApps returns a slice of Apps with their env files read from a given slice of AppFiles
func GetApps(appFiles AppFiles) ([]App, error) {
var apps []App
for name := range appFiles {
app, err := GetApp(appFiles, name)
if err != nil {
return nil, err
}
apps = append(apps, app)
}
return apps, nil
}
// GetAppServiceNames retrieves a list of app service names.
func GetAppServiceNames(appName string) ([]string, error) {
var serviceNames []string
appFiles, err := LoadAppFiles("")
if err != nil {
return serviceNames, err
}
app, err := GetApp(appFiles, appName)
if err != nil {
return serviceNames, err
}
composeFiles, err := GetAppComposeFiles(app.Recipe, app.Env)
if err != nil {
return serviceNames, err
}
opts := stack.Deploy{Composefiles: composeFiles}
compose, err := GetAppComposeConfig(app.Recipe, opts, app.Env)
if err != nil {
return serviceNames, err
}
for _, service := range compose.Services {
serviceNames = append(serviceNames, service.Name)
}
return serviceNames, nil
}
// GetAppNames retrieves a list of app names.
func GetAppNames() ([]string, error) {
var appNames []string
appFiles, err := LoadAppFiles("")
if err != nil {
return appNames, err
}
apps, err := GetApps(appFiles)
if err != nil {
return appNames, err
}
for _, app := range apps {
appNames = append(appNames, app.Name)
}
return appNames, nil
}
// TemplateAppEnvSample copies the example env file for the app into the users env files
func TemplateAppEnvSample(recipeName, appName, server, domain string) error {
envSamplePath := path.Join(RECIPES_DIR, recipeName, ".env.sample")
envSample, err := ioutil.ReadFile(envSamplePath)
if err != nil {
return err
}
appEnvPath := path.Join(ABRA_DIR, "servers", server, fmt.Sprintf("%s.env", appName))
if _, err := os.Stat(appEnvPath); os.IsExist(err) {
return fmt.Errorf("%s already exists?", appEnvPath)
}
err = ioutil.WriteFile(appEnvPath, envSample, 0664)
if err != nil {
return err
}
file, err := os.OpenFile(appEnvPath, os.O_RDWR, 0664)
if err != nil {
return err
}
defer file.Close()
tpl, err := template.ParseFiles(appEnvPath)
if err != nil {
return err
}
type templateVars struct {
Name string
Domain string
}
tvars := templateVars{Name: recipeName, Domain: domain}
if err := tpl.Execute(file, tvars); err != nil {
return err
}
logrus.Debugf("copied & templated %s to %s", envSamplePath, appEnvPath)
return nil
}
// SanitiseAppName makes a app name usable with Docker by replacing illegal characters
func SanitiseAppName(name string) string {
return strings.ReplaceAll(name, ".", "_")
}
// GetAppStatuses queries servers to check the deployment status of given apps
func GetAppStatuses(appFiles AppFiles) (map[string]map[string]string, error) {
statuses := make(map[string]map[string]string)
var unique []string
servers := make(map[string]struct{})
for _, appFile := range appFiles {
if _, ok := servers[appFile.Server]; !ok {
servers[appFile.Server] = struct{}{}
unique = append(unique, appFile.Server)
}
}
bar := formatter.CreateProgressbar(len(servers), "querying remote servers...")
ch := make(chan stack.StackStatus, len(servers))
for server := range servers {
go func(s string) {
ch <- stack.GetAllDeployedServices(s)
bar.Add(1)
}(server)
}
for range servers {
status := <-ch
for _, service := range status.Services {
result := make(map[string]string)
name := service.Spec.Labels[convert.LabelNamespace]
if _, ok := statuses[name]; !ok {
result["status"] = "deployed"
}
labelKey := fmt.Sprintf("coop-cloud.%s.version", name)
if version, ok := service.Spec.Labels[labelKey]; ok {
result["version"] = version
} else {
continue
}
statuses[name] = result
}
}
logrus.Debugf("retrieved app statuses: %s", statuses)
return statuses, nil
}
// GetAppComposeFiles gets the list of compose files for an app which should be
// merged into a composetypes.Config while respecting the COMPOSE_FILE env var.
func GetAppComposeFiles(recipe string, appEnv AppEnv) ([]string, error) {
var composeFiles []string
if _, ok := appEnv["COMPOSE_FILE"]; !ok {
logrus.Debug("no COMPOSE_FILE detected, loading compose.yml")
path := fmt.Sprintf("%s/%s/compose.yml", RECIPES_DIR, recipe)
composeFiles = append(composeFiles, path)
return composeFiles, nil
}
composeFileEnvVar := appEnv["COMPOSE_FILE"]
envVars := strings.Split(composeFileEnvVar, ":")
logrus.Debugf("COMPOSE_FILE detected (%s), loading %s", composeFileEnvVar, strings.Join(envVars, ", "))
for _, file := range strings.Split(composeFileEnvVar, ":") {
path := fmt.Sprintf("%s/%s/%s", RECIPES_DIR, recipe, file)
composeFiles = append(composeFiles, path)
}
logrus.Debugf("retrieved %s configs for %s", strings.Join(composeFiles, ", "), recipe)
return composeFiles, nil
}
// GetAppComposeConfig retrieves a compose specification for a recipe. This
// specification is the result of a merge of all the compose.**.yml files in
// the recipe repository.
func GetAppComposeConfig(recipe string, opts stack.Deploy, appEnv AppEnv) (*composetypes.Config, error) {
compose, err := loader.LoadComposefile(opts, appEnv)
if err != nil {
return &composetypes.Config{}, err
}
logrus.Debugf("retrieved %s for %s", compose.Filename, recipe)
return compose, nil
}

36
pkg/config/app_test.go Normal file
View File

@ -0,0 +1,36 @@
package config
import (
"reflect"
"testing"
)
func TestNewApp(t *testing.T) {
app, err := newApp(expectedAppEnv, appName, expectedAppFile)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(app, expectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, expectedApp)
}
}
func TestReadAppEnvFile(t *testing.T) {
app, err := readAppEnvFile(expectedAppFile, appName)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(app, expectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, expectedApp)
}
}
func TestGetApp(t *testing.T) {
app, err := GetApp(expectedAppFiles, appName)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(app, expectedApp) {
t.Fatalf("did not get expected app type. Expected: %s; Got: %s", app, expectedApp)
}
}

View File

@ -1,6 +1,7 @@
package config
import (
"bufio"
"fmt"
"io/fs"
"io/ioutil"
@ -9,13 +10,19 @@ import (
"path/filepath"
"strings"
"coopcloud.tech/abra/pkg/log"
"github.com/Autonomic-Cooperative/godotenv"
"github.com/sirupsen/logrus"
)
const MAX_SANITISED_APP_NAME_LENGTH = 45
const MAX_DOCKER_SECRET_LENGTH = 64
var BackupbotLabel = "coop-cloud.backupbot.enabled"
var ABRA_DIR = os.ExpandEnv("$HOME/.abra")
var SERVERS_DIR = path.Join(ABRA_DIR, "servers")
var RECIPES_DIR = path.Join(ABRA_DIR, "recipes")
var VENDOR_DIR = path.Join(ABRA_DIR, "vendor")
var BACKUP_DIR = path.Join(ABRA_DIR, "backups")
var RECIPES_JSON = path.Join(ABRA_DIR, "catalogue", "recipes.json")
var REPOS_BASE_URL = "https://git.coopcloud.tech/coop-cloud"
var CATALOGUE_JSON_REPO_NAME = "recipes-catalogue-json"
var SSH_URL_TEMPLATE = "ssh://git@git.coopcloud.tech:2222/coop-cloud/%s.git"
// GetServers retrieves all servers.
func GetServers() ([]string, error) {
@ -26,11 +33,25 @@ func GetServers() ([]string, error) {
return servers, err
}
log.Debugf("retrieved %v servers: %s", len(servers), servers)
logrus.Debugf("retrieved %v servers: %s", len(servers), servers)
return servers, nil
}
// ReadEnv loads an app envivornment into a map.
func ReadEnv(filePath string) (AppEnv, error) {
var envFile AppEnv
envFile, err := godotenv.Read(filePath)
if err != nil {
return nil, err
}
logrus.Debugf("read %s from %s", envFile, filePath)
return envFile, nil
}
// ReadServerNames retrieves all server names.
func ReadServerNames() ([]string, error) {
serverNames, err := GetAllFoldersInDirectory(SERVERS_DIR)
@ -39,13 +60,13 @@ func ReadServerNames() ([]string, error) {
return nil, err
}
log.Debugf("read %s from %s", strings.Join(serverNames, ","), SERVERS_DIR)
logrus.Debugf("read %s from %s", strings.Join(serverNames, ","), SERVERS_DIR)
return serverNames, nil
}
// GetAllFilesInDirectory returns filenames of all files in directory
func GetAllFilesInDirectory(directory string) ([]fs.FileInfo, error) {
// getAllFilesInDirectory returns filenames of all files in directory
func getAllFilesInDirectory(directory string) ([]fs.FileInfo, error) {
var realFiles []fs.FileInfo
files, err := ioutil.ReadDir(directory)
@ -63,7 +84,7 @@ func GetAllFilesInDirectory(directory string) ([]fs.FileInfo, error) {
realPath, err := filepath.EvalSymlinks(filePath)
if err != nil {
log.Warnf("broken symlink in your abra config folders: %s", filePath)
logrus.Warningf("broken symlink in your abra config folders: %s", filePath)
} else {
realFile, err := os.Stat(realPath)
if err != nil {
@ -96,7 +117,7 @@ func GetAllFoldersInDirectory(directory string) ([]string, error) {
filePath := path.Join(directory, file.Name())
realDir, err := filepath.EvalSymlinks(filePath)
if err != nil {
log.Warnf("broken symlink in your abra config folders: %s", filePath)
logrus.Warningf("broken symlink in your abra config folders: %s", filePath)
} else if stat, err := os.Stat(realDir); err == nil && stat.IsDir() {
// path is a directory
folders = append(folders, file.Name())
@ -106,3 +127,34 @@ func GetAllFoldersInDirectory(directory string) ([]string, error) {
return folders, nil
}
// ReadAbraShEnvVars reads env vars from an abra.sh recipe file.
func ReadAbraShEnvVars(abraSh string) (map[string]string, error) {
envVars := make(map[string]string)
file, err := os.Open(abraSh)
if err != nil {
if os.IsNotExist(err) {
return envVars, nil
}
return envVars, err
}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, "export") {
splitVals := strings.Split(line, "export ")
envVarDef := splitVals[len(splitVals)-1]
keyVal := strings.Split(envVarDef, "=")
if len(keyVal) != 2 {
return envVars, fmt.Errorf("couldn't parse %s", line)
}
envVars[keyVal[0]] = keyVal[1]
}
}
logrus.Debugf("read %s from %s", envVars, abraSh)
return envVars, nil
}

84
pkg/config/env_test.go Normal file
View File

@ -0,0 +1,84 @@
package config
import (
"os"
"path"
"reflect"
"strings"
"testing"
)
var testFolder = os.ExpandEnv("$PWD/../../tests/resources/test_folder")
var validAbraConf = os.ExpandEnv("$PWD/../../tests/resources/valid_abra_config")
// make sure these are in alphabetical order
var tFolders = []string{"folder1", "folder2"}
var tFiles = []string{"bar.env", "foo.env"}
var appName = "ecloud"
var serverName = "evil.corp"
var expectedAppEnv = AppEnv{
"DOMAIN": "ecloud.evil.corp",
"RECIPE": "ecloud",
}
var expectedApp = App{
Name: appName,
Recipe: expectedAppEnv["RECIPE"],
Domain: expectedAppEnv["DOMAIN"],
Env: expectedAppEnv,
Path: expectedAppFile.Path,
Server: expectedAppFile.Server,
}
var expectedAppFile = AppFile{
Path: path.Join(validAbraConf, "servers", serverName, appName+".env"),
Server: serverName,
}
var expectedAppFiles = map[string]AppFile{
appName: expectedAppFile,
}
// var expectedServerNames = []string{"evil.corp"}
func TestGetAllFoldersInDirectory(t *testing.T) {
folders, err := GetAllFoldersInDirectory(testFolder)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(folders, tFolders) {
t.Fatalf("did not get expected folders. Expected: (%s), Got: (%s)", strings.Join(tFolders, ","), strings.Join(folders, ","))
}
}
func TestGetAllFilesInDirectory(t *testing.T) {
files, err := getAllFilesInDirectory(testFolder)
if err != nil {
t.Fatal(err)
}
var fileNames []string
for _, file := range files {
fileNames = append(fileNames, file.Name())
}
if !reflect.DeepEqual(fileNames, tFiles) {
t.Fatalf("did not get expected files. Expected: (%s), Got: (%s)", strings.Join(tFiles, ","), strings.Join(fileNames, ","))
}
}
func TestReadEnv(t *testing.T) {
env, err := ReadEnv(expectedAppFile.Path)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(env, expectedAppEnv) {
t.Fatalf(
"did not get expected application settings. Expected: DOMAIN=%s RECIPE=%s; Got: DOMAIN=%s RECIPE=%s",
expectedAppEnv["DOMAIN"],
expectedAppEnv["RECIPE"],
env["DOMAIN"],
env["RECIPE"],
)
}
}

View File

@ -1 +0,0 @@
abraDir: foobar

View File

@ -6,19 +6,18 @@ import (
"strings"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
"github.com/AlecAivazis/survey/v2"
"github.com/docker/docker/api/types"
containerTypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/sirupsen/logrus"
)
// GetContainer retrieves a container. If noInput is false and the retrievd
// count of containers does not match 1, then a prompt is presented to let the
// user choose. A count of 0 is handled gracefully.
func GetContainer(c context.Context, cl *client.Client, filters filters.Args, noInput bool) (types.Container, error) {
containerOpts := containerTypes.ListOptions{Filters: filters}
containerOpts := types.ContainerListOptions{Filters: filters}
containers, err := cl.ContainerList(c, containerOpts)
if err != nil {
return types.Container{}, err
@ -29,7 +28,7 @@ func GetContainer(c context.Context, cl *client.Client, filters filters.Args, no
return types.Container{}, fmt.Errorf("no containers matching the %v filter found?", filter)
}
if len(containers) > 1 {
if len(containers) != 1 {
var containersRaw []string
for _, container := range containers {
containerName := strings.Join(container.Names, " ")
@ -43,7 +42,7 @@ func GetContainer(c context.Context, cl *client.Client, filters filters.Args, no
return types.Container{}, err
}
log.Warnf("ambiguous container list received, prompting for input")
logrus.Warnf("ambiguous container list received, prompting for input")
var response string
prompt := &survey.Select{
@ -64,20 +63,8 @@ func GetContainer(c context.Context, cl *client.Client, filters filters.Args, no
}
}
log.Fatal("failed to match chosen container")
logrus.Panic("failed to match chosen container")
}
return containers[0], nil
}
// GetContainerFromStackAndService retrieves the container for the given stack and service.
func GetContainerFromStackAndService(cl *client.Client, stack, service string) (types.Container, error) {
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", stack, service))
container, err := GetContainer(context.Background(), cl, filters, true)
if err != nil {
return types.Container{}, err
}
return container, nil
}

View File

@ -8,19 +8,22 @@ import (
"github.com/docker/cli/cli/context"
contextStore "github.com/docker/cli/cli/context/store"
cliflags "github.com/docker/cli/cli/flags"
"github.com/moby/term"
)
func NewDefaultDockerContextStore() *command.ContextStoreWithDefault {
_, _, stderr := term.StdStreams()
dockerConfig := dConfig.LoadDefaultConfigFile(stderr)
contextDir := dConfig.ContextStoreDir()
storeConfig := command.DefaultContextStoreConfig()
store := newContextStore(contextDir, storeConfig)
opts := &cliflags.ClientOptions{Context: "default"}
opts := &cliflags.CommonOptions{Context: "default"}
dockerContextStore := &command.ContextStoreWithDefault{
Store: store,
Resolver: func() (*command.DefaultContext, error) {
return command.ResolveDefaultContext(opts, storeConfig)
return command.ResolveDefaultContext(opts, dockerConfig, storeConfig, stderr)
},
}

103
pkg/dns/common.go Normal file
View File

@ -0,0 +1,103 @@
package dns
import (
"context"
"fmt"
"net"
"os"
"time"
"github.com/AlecAivazis/survey/v2"
"github.com/sirupsen/logrus"
)
// NewToken constructs a new DNS provider token.
func NewToken(provider, providerTokenEnvVar string) (string, error) {
if token, present := os.LookupEnv(providerTokenEnvVar); present {
return token, nil
}
logrus.Debugf("no %s in environment, asking via stdin", providerTokenEnvVar)
var token string
prompt := &survey.Input{
Message: fmt.Sprintf("%s API token?", provider),
}
if err := survey.AskOne(prompt, &token); err != nil {
return "", err
}
return token, nil
}
// EnsureIPv4 ensures that an ipv4 address is set for a domain name
func EnsureIPv4(domainName string) (string, error) {
var ipv4 string
// comrade librehosters DNS resolver -> https://www.privacy-handbuch.de/handbuch_93d.htm
freifunkDNS := "5.1.66.255:53"
resolver := &net.Resolver{
PreferGo: false,
Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
d := net.Dialer{
Timeout: time.Millisecond * time.Duration(10000),
}
return d.DialContext(ctx, "udp", freifunkDNS)
},
}
ctx := context.Background()
ips, err := resolver.LookupIPAddr(ctx, domainName)
if err != nil {
return ipv4, err
}
if len(ips) == 0 {
return ipv4, fmt.Errorf("unable to retrieve ipv4 address for %s", domainName)
}
ipv4 = ips[0].IP.To4().String()
logrus.Debugf("%s points to %s (resolver: %s)", domainName, ipv4, freifunkDNS)
return ipv4, nil
}
// EnsureDomainsResolveSameIPv4 ensures that domains resolve to the same ipv4 address
func EnsureDomainsResolveSameIPv4(domainName, server string) (string, error) {
var ipv4 string
domainIPv4, err := EnsureIPv4(domainName)
if err != nil {
return ipv4, err
}
if domainIPv4 == "" {
return ipv4, fmt.Errorf("cannot resolve ipv4 for %s?", domainName)
}
serverIPv4, err := EnsureIPv4(server)
if err != nil {
return ipv4, err
}
if serverIPv4 == "" {
return ipv4, fmt.Errorf("cannot resolve ipv4 for %s?", server)
}
if domainIPv4 != serverIPv4 {
err := "app domain %s (%s) does not appear to resolve to app server %s (%s)?"
return ipv4, fmt.Errorf(err, domainName, domainIPv4, server, serverIPv4)
}
return ipv4, nil
}
// GetTTL parses a ttl string into a duration
func GetTTL(ttl string) (time.Duration, error) {
val, err := time.ParseDuration(ttl)
if err != nil {
return val, err
}
return val, nil
}

View File

@ -1,55 +0,0 @@
package dns
import (
"fmt"
"net"
)
// EnsureIPv4 ensures that an ipv4 address is set for a domain name
func EnsureIPv4(domainName string) (string, error) {
ipv4, err := net.ResolveIPAddr("ip4", domainName)
if err != nil {
return "", fmt.Errorf("unable to resolve ipv4 address for %s, %s", domainName, err)
}
// NOTE(d1): e.g. when there is only an ipv6 record available
if ipv4 == nil {
return "", fmt.Errorf("unable to resolve ipv4 address for %s", domainName)
}
return ipv4.String(), nil
}
// EnsureDomainsResolveSameIPv4 ensures that domains resolve to the same ipv4 address
func EnsureDomainsResolveSameIPv4(domainName, server string) (string, error) {
if server == "default" || server == "local" {
return "", nil
}
var ipv4 string
domainIPv4, err := EnsureIPv4(domainName)
if err != nil {
return ipv4, err
}
if domainIPv4 == "" {
return ipv4, fmt.Errorf("cannot resolve ipv4 for %s?", domainName)
}
serverIPv4, err := EnsureIPv4(server)
if err != nil {
return ipv4, err
}
if serverIPv4 == "" {
return ipv4, fmt.Errorf("cannot resolve ipv4 for %s?", server)
}
if domainIPv4 != serverIPv4 {
err := "app domain %s (%s) does not appear to resolve to app server %s (%s)?"
return ipv4, fmt.Errorf(err, domainName, domainIPv4, server, serverIPv4)
}
return ipv4, nil
}

Some files were not shown because too many files have changed in this diff Show More