Compare commits
58 Commits
0.10.0-rc1
...
main
Author | SHA1 | Date | |
---|---|---|---|
7c3b740e14 | |||
2fbef41a3a | |||
6fb41e5300 | |||
1432f480c7 | |||
83af39771b | |||
4d1333202e | |||
55c24f070c | |||
229e8eb9da | |||
b3ab95750e | |||
de009921a2 | |||
d081bbaefa | |||
515b5466ca | |||
6965799bdc | |||
f75c9a6259 | |||
a43a092ba7 | |||
fa084a61d2 | |||
895a7fe7d6 | |||
742a726778 | |||
2b9a185aff | |||
b7c1e87c0b | |||
cdfb8a08bb | |||
8943cea13f | |||
6d64e0edd3 | |||
47045ca8f1 | |||
d0f982456e | |||
80ad6c6681 | |||
cb63cfe9c2 | |||
d1e49d17ce | |||
1574aa0631 | |||
1723025fbf | |||
a2b678caf6 | |||
0a371ec360 | |||
e58a716fe1 | |||
d09a19a385 | |||
cee808ff06 | |||
4326d1d259 | |||
b976872f77 | |||
7b6ea76437 | |||
9069758969 | |||
15d6b1a2a5 | |||
8a7fe4ca07 | |||
64ad60663f | |||
cb3f46b46e | |||
41e514ae9a | |||
086b4828ff | |||
ed263854d4 | |||
eb6fe4ba6e | |||
993172d31b | |||
c70b6e72a7 | |||
22e4dd7fca | |||
b6009057a8 | |||
b978f04910 | |||
3ac29d54d9 | |||
877c17fab5 | |||
f01fd26ce3 | |||
273c165a41 | |||
c88fc66c99 | |||
9b271a6963 |
35
.drone.yml
35
.drone.yml
@ -3,12 +3,12 @@ kind: pipeline
|
||||
name: coopcloud.tech/abra
|
||||
steps:
|
||||
- name: make check
|
||||
image: golang:1.22
|
||||
image: golang:1.24
|
||||
commands:
|
||||
- make check
|
||||
|
||||
- name: make test
|
||||
image: golang:1.22
|
||||
image: golang:1.24
|
||||
environment:
|
||||
CATL_URL: https://git.coopcloud.tech/toolshed/recipes-catalogue-json.git
|
||||
commands:
|
||||
@ -60,7 +60,31 @@ steps:
|
||||
- make check
|
||||
- make test
|
||||
|
||||
- name: integration test
|
||||
- name: on-demand integration test
|
||||
image: appleboy/drone-ssh
|
||||
settings:
|
||||
host:
|
||||
- int.coopcloud.tech
|
||||
username: abra
|
||||
key:
|
||||
from_secret: abra_int_private_key
|
||||
port: 22
|
||||
command_timeout: 60m
|
||||
script_stop: true
|
||||
request_pty: true
|
||||
script:
|
||||
- |
|
||||
wget https://git.coopcloud.tech/toolshed/abra/raw/branch/main/scripts/tests/run-ci-int -O run-ci-int
|
||||
chmod +x run-ci-int
|
||||
sh run-ci-int
|
||||
when:
|
||||
ref:
|
||||
- refs/heads/int-*
|
||||
depends_on:
|
||||
- make check
|
||||
- make test
|
||||
|
||||
- name: nightly integration test
|
||||
image: appleboy/drone-ssh
|
||||
settings:
|
||||
host:
|
||||
@ -87,3 +111,8 @@ steps:
|
||||
volumes:
|
||||
- name: deps
|
||||
temp: {}
|
||||
|
||||
trigger:
|
||||
action:
|
||||
exclude:
|
||||
- synchronized
|
||||
|
@ -4,6 +4,7 @@
|
||||
> please do add yourself! This is a community project, let's show some 💞
|
||||
|
||||
- 3wordchant
|
||||
- ammaratef45
|
||||
- cassowary
|
||||
- codegod100
|
||||
- decentral1se
|
||||
@ -17,3 +18,5 @@
|
||||
- roxxers
|
||||
- vera
|
||||
- yksflip
|
||||
- basebuilder
|
||||
- mayel
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Build image
|
||||
FROM golang:1.22-alpine AS build
|
||||
FROM golang:1.24-alpine AS build
|
||||
|
||||
ENV GOPRIVATE=coopcloud.tech
|
||||
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@ ABRA := ./cmd/abra
|
||||
KADABRA := ./cmd/kadabra
|
||||
COMMIT := $(shell git rev-list -1 HEAD)
|
||||
GOPATH := $(shell go env GOPATH)
|
||||
GOVERSION := 1.22
|
||||
GOVERSION := 1.24
|
||||
LDFLAGS := "-X 'main.Commit=$(COMMIT)'"
|
||||
DIST_LDFLAGS := $(LDFLAGS)" -s -w"
|
||||
GCFLAGS := "all=-l -B"
|
||||
|
@ -183,7 +183,7 @@ does not).`,
|
||||
if err := internal.RunCmdRemote(
|
||||
cl,
|
||||
app,
|
||||
requestTTY,
|
||||
disableTTY,
|
||||
app.Recipe.AbraShPath,
|
||||
targetServiceName, cmdName, parsedCmdArgs, remoteUser); err != nil {
|
||||
log.Fatal(err)
|
||||
@ -238,7 +238,7 @@ func parseCmdArgs(args []string, isLocal bool) (bool, string) {
|
||||
var (
|
||||
local bool
|
||||
remoteUser string
|
||||
requestTTY bool
|
||||
disableTTY bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -259,11 +259,11 @@ func init() {
|
||||
)
|
||||
|
||||
AppCmdCommand.Flags().BoolVarP(
|
||||
&requestTTY,
|
||||
&disableTTY,
|
||||
"tty",
|
||||
"t",
|
||||
"T",
|
||||
false,
|
||||
"request remote TTY",
|
||||
"disable remote TTY",
|
||||
)
|
||||
|
||||
AppCmdCommand.Flags().BoolVarP(
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/upstream/container"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
@ -33,7 +33,7 @@ var AppCpCommand = &cobra.Command{
|
||||
abra app cp 1312.net myfile.txt app:/
|
||||
|
||||
# copy that file back to your current working directory locally
|
||||
abra app cp 1312.net app:/myfile.txt`,
|
||||
abra app cp 1312.net app:/myfile.txt ./`,
|
||||
Args: cobra.ExactArgs(3),
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
@ -134,7 +134,7 @@ func CopyToContainer(cl *dockerClient.Client, containerID, srcPath, dstPath stri
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := container.RunExec(dcli, cl, containerID, &types.ExecConfig{
|
||||
if _, err := container.RunExec(dcli, cl, containerID, &containertypes.ExecOptions{
|
||||
AttachStderr: true,
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
@ -162,7 +162,7 @@ func CopyToContainer(cl *dockerClient.Client, containerID, srcPath, dstPath stri
|
||||
}
|
||||
|
||||
log.Debugf("copy %s from local to %s on container", srcPath, dstPath)
|
||||
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
|
||||
copyOpts := containertypes.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
|
||||
if err := cl.CopyToContainer(context.Background(), containerID, dstPath, content, copyOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -173,7 +173,7 @@ func CopyToContainer(cl *dockerClient.Client, containerID, srcPath, dstPath stri
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := container.RunExec(dcli, cl, containerID, &types.ExecConfig{
|
||||
if _, err := container.RunExec(dcli, cl, containerID, &containertypes.ExecOptions{
|
||||
AttachStderr: true,
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
|
@ -3,6 +3,7 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/app"
|
||||
@ -46,7 +47,8 @@ checkout as-is. Recipe commit hashes are also supported as values for
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
toComplete string,
|
||||
) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.AppNameComplete()
|
||||
@ -63,10 +65,8 @@ checkout as-is. Recipe commit hashes are also supported as values for
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var (
|
||||
deployWarnMessages []string
|
||||
toDeployVersion string
|
||||
isChaosCommit bool
|
||||
toDeployChaosVersion = config.CHAOS_DEFAULT
|
||||
deployWarnMessages []string
|
||||
toDeployVersion string
|
||||
)
|
||||
|
||||
app := internal.ValidateApp(args)
|
||||
@ -79,10 +79,6 @@ checkout as-is. Recipe commit hashes are also supported as values for
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := lint.LintForErrors(app.Recipe); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@ -99,46 +95,20 @@ checkout as-is. Recipe commit hashes are also supported as values for
|
||||
log.Fatalf("%s is already deployed", app.Name)
|
||||
}
|
||||
|
||||
if len(args) == 2 && args[1] != "" {
|
||||
toDeployVersion = args[1]
|
||||
}
|
||||
|
||||
if !deployMeta.IsDeployed &&
|
||||
toDeployVersion == "" &&
|
||||
app.Recipe.EnvVersion != "" && !internal.IgnoreEnvVersion {
|
||||
log.Debugf("new deployment, choosing .env version: %s", app.Recipe.EnvVersion)
|
||||
toDeployVersion = app.Recipe.EnvVersion
|
||||
}
|
||||
|
||||
if !internal.Chaos && toDeployVersion == "" {
|
||||
if err := getLatestVersionOrCommit(app, &toDeployVersion); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if internal.Chaos {
|
||||
if err := getChaosVersion(app, &toDeployVersion, &toDeployChaosVersion); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
toDeployVersion, err = getDeployVersion(args, deployMeta, app)
|
||||
if err != nil {
|
||||
log.Fatal(fmt.Errorf("get deploy version: %s", err))
|
||||
}
|
||||
|
||||
if !internal.Chaos {
|
||||
isChaosCommit, err = app.Recipe.EnsureVersion(toDeployVersion)
|
||||
_, err = app.Recipe.EnsureVersion(toDeployVersion)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatalf("ensure recipe: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if isChaosCommit {
|
||||
log.Debugf("assuming chaos commit: %s", toDeployVersion)
|
||||
|
||||
internal.Chaos = true
|
||||
toDeployChaosVersion = toDeployVersion
|
||||
|
||||
toDeployVersion, err = app.Recipe.GetVersionLabelLocal()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
if err := lint.LintForErrors(app.Recipe); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := validateSecrets(cl, app); err != nil {
|
||||
@ -171,16 +141,14 @@ checkout as-is. Recipe commit hashes are also supported as values for
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
toDeployChaosVersionLabel := toDeployChaosVersion
|
||||
if app.Recipe.Dirty {
|
||||
toDeployChaosVersionLabel = formatter.AddDirtyMarker(toDeployChaosVersionLabel)
|
||||
}
|
||||
|
||||
appPkg.ExposeAllEnv(stackName, compose, app.Env)
|
||||
appPkg.SetRecipeLabel(compose, stackName, app.Recipe.Name)
|
||||
appPkg.SetChaosLabel(compose, stackName, internal.Chaos)
|
||||
appPkg.SetChaosVersionLabel(compose, stackName, toDeployChaosVersionLabel)
|
||||
if internal.Chaos {
|
||||
appPkg.SetChaosVersionLabel(compose, stackName, toDeployVersion)
|
||||
}
|
||||
appPkg.SetUpdateLabel(compose, stackName, app.Env)
|
||||
appPkg.SetVersionLabel(compose, stackName, toDeployVersion)
|
||||
|
||||
envVars, err := appPkg.CheckEnv(app)
|
||||
if err != nil {
|
||||
@ -212,19 +180,12 @@ checkout as-is. Recipe commit hashes are also supported as values for
|
||||
deployedVersion = deployMeta.Version
|
||||
}
|
||||
|
||||
toWriteVersion := toDeployVersion
|
||||
if internal.Chaos || isChaosCommit {
|
||||
toWriteVersion = toDeployChaosVersion
|
||||
}
|
||||
|
||||
if err := internal.DeployOverview(
|
||||
app,
|
||||
deployWarnMessages,
|
||||
deployedVersion,
|
||||
deployMeta.ChaosVersion,
|
||||
toDeployVersion,
|
||||
toDeployChaosVersion,
|
||||
toWriteVersion,
|
||||
"",
|
||||
deployWarnMessages,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@ -236,7 +197,25 @@ checkout as-is. Recipe commit hashes are also supported as values for
|
||||
|
||||
log.Debugf("set waiting timeout to %d second(s)", stack.WaitTimeout)
|
||||
|
||||
if err := stack.RunDeploy(cl, deployOpts, compose, app.Name, internal.DontWaitConverge); err != nil {
|
||||
serviceNames, err := appPkg.GetAppServiceNames(app.Name)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := app.Filters(true, false, serviceNames...)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := stack.RunDeploy(
|
||||
cl,
|
||||
deployOpts,
|
||||
compose,
|
||||
app.Name,
|
||||
app.Server,
|
||||
internal.DontWaitConverge,
|
||||
f,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@ -248,53 +227,28 @@ checkout as-is. Recipe commit hashes are also supported as values for
|
||||
}
|
||||
}
|
||||
|
||||
if err := app.WriteRecipeVersion(toWriteVersion, false); err != nil {
|
||||
if err := app.WriteRecipeVersion(toDeployVersion, false); err != nil {
|
||||
log.Fatalf("writing recipe version failed: %s", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func getChaosVersion(app app.App, toDeployVersion, toDeployChaosVersion *string) error {
|
||||
var err error
|
||||
*toDeployChaosVersion, err = app.Recipe.ChaosVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*toDeployVersion, err = app.Recipe.GetVersionLabelLocal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getLatestVersionOrCommit(app app.App, toDeployVersion *string) error {
|
||||
func getLatestVersionOrCommit(app app.App) (string, error) {
|
||||
versions, err := app.Recipe.Tags()
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(versions) > 0 && !internal.Chaos {
|
||||
*toDeployVersion = versions[len(versions)-1]
|
||||
|
||||
log.Debugf("choosing %s as version to deploy", *toDeployVersion)
|
||||
|
||||
if _, err := app.Recipe.EnsureVersion(*toDeployVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return versions[len(versions)-1], nil
|
||||
}
|
||||
|
||||
head, err := app.Recipe.Head()
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
|
||||
*toDeployVersion = formatter.SmallSHA(head.String())
|
||||
|
||||
return nil
|
||||
return formatter.SmallSHA(head.String()), nil
|
||||
}
|
||||
|
||||
// validateArgsAndFlags ensures compatible args/flags.
|
||||
@ -321,6 +275,46 @@ func validateSecrets(cl *dockerClient.Client, app app.App) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDeployVersion(cliArgs []string, deployMeta stack.DeployMeta, app app.App) (string, error) {
|
||||
// Chaos mode overrides everything
|
||||
if internal.Chaos {
|
||||
v, err := app.Recipe.ChaosVersion()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
log.Debugf("version: taking chaos version: %s", v)
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// Check if the deploy version is set with a cli argument
|
||||
if len(cliArgs) == 2 && cliArgs[1] != "" {
|
||||
log.Debugf("version: taking version from cli arg: %s", cliArgs[1])
|
||||
return cliArgs[1], nil
|
||||
}
|
||||
|
||||
// Check if the recipe has a version in the .env file
|
||||
if app.Recipe.EnvVersion != "" && !internal.IgnoreEnvVersion {
|
||||
if strings.HasSuffix(app.Recipe.EnvVersionRaw, "+U") {
|
||||
return "", fmt.Errorf("version: can not redeploy chaos version %s", app.Recipe.EnvVersionRaw)
|
||||
}
|
||||
log.Debugf("version: taking version from .env file: %s", app.Recipe.EnvVersion)
|
||||
return app.Recipe.EnvVersion, nil
|
||||
}
|
||||
|
||||
// Take deployed version
|
||||
if deployMeta.IsDeployed {
|
||||
log.Debugf("version: taking deployed version: %s", deployMeta.Version)
|
||||
return deployMeta.Version, nil
|
||||
}
|
||||
|
||||
v, err := getLatestVersionOrCommit(app)
|
||||
log.Debugf("version: taking new recipe version: %s", v)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
AppDeployCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
|
@ -142,10 +142,14 @@ Use "--status/-S" flag to query all servers for the live deployment status.`,
|
||||
appStats.AutoUpdate = autoUpdate
|
||||
|
||||
var newUpdates []string
|
||||
if version != "unknown" {
|
||||
if version != "unknown" && chaosVersion == "unknown" {
|
||||
if err := app.Recipe.EnsureExists(); err != nil {
|
||||
log.Fatalf("unable to clone %s: %s", app.Name, err)
|
||||
}
|
||||
|
||||
updates, err := app.Recipe.Tags()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatalf("unable to retrieve tags for %s: %s", app.Name, err)
|
||||
}
|
||||
|
||||
parsedVersion, err := tagcmp.Parse(version)
|
||||
|
@ -3,23 +3,14 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/logs"
|
||||
"coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"github.com/docker/docker/api/types"
|
||||
containerTypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -73,80 +64,25 @@ var AppLogsCommand = &cobra.Command{
|
||||
serviceNames = []string{args[1]}
|
||||
}
|
||||
|
||||
if err = tailLogs(cl, app, serviceNames); err != nil {
|
||||
f, err := app.Filters(true, false, serviceNames...)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
opts := logs.TailOpts{
|
||||
AppName: app.Name,
|
||||
Services: serviceNames,
|
||||
StdErr: stdErr,
|
||||
Since: sinceLogs,
|
||||
Filters: f,
|
||||
}
|
||||
|
||||
if err := logs.TailLogs(cl, opts); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// tailLogs prints logs for the given app with optional service names to be
|
||||
// filtered on. It also checks if the latest task is not runnning and then
|
||||
// prints the past tasks.
|
||||
func tailLogs(cl *dockerClient.Client, app appPkg.App, serviceNames []string) error {
|
||||
f, err := app.Filters(true, false, serviceNames...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
services, err := cl.ServiceList(context.Background(), types.ServiceListOptions{Filters: f})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, service := range services {
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("name", service.Spec.Name)
|
||||
tasks, err := cl.TaskList(context.Background(), types.TaskListOptions{Filters: f})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(tasks) > 0 {
|
||||
// Need to sort the tasks by the CreatedAt field in the inverse order.
|
||||
// Otherwise they are in the reversed order and not sorted properly.
|
||||
slices.SortFunc[[]swarm.Task](tasks, func(t1, t2 swarm.Task) int {
|
||||
return int(t2.Meta.CreatedAt.Unix() - t1.Meta.CreatedAt.Unix())
|
||||
})
|
||||
lastTask := tasks[0].Status
|
||||
if lastTask.State != swarm.TaskStateRunning {
|
||||
for _, task := range tasks {
|
||||
log.Errorf("[%s] %s State %s: %s", service.Spec.Name, task.Meta.CreatedAt.Format(time.RFC3339), task.Status.State, task.Status.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect the logs in a go routine, so the logs from all services are
|
||||
// collected in parallel.
|
||||
wg.Add(1)
|
||||
go func(serviceID string) {
|
||||
logs, err := cl.ServiceLogs(context.Background(), serviceID, containerTypes.LogsOptions{
|
||||
ShowStderr: true,
|
||||
ShowStdout: !stdErr,
|
||||
Since: sinceLogs,
|
||||
Until: "",
|
||||
Timestamps: true,
|
||||
Follow: true,
|
||||
Tail: "20",
|
||||
Details: false,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer logs.Close()
|
||||
|
||||
_, err = io.Copy(os.Stdout, logs)
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}(service.ID)
|
||||
}
|
||||
|
||||
// Wait for all log streams to be closed.
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
stdErr bool
|
||||
sinceLogs string
|
||||
|
@ -75,43 +75,50 @@ var AppNewCommand = &cobra.Command{
|
||||
|
||||
chaosVersion := config.CHAOS_DEFAULT
|
||||
if internal.Chaos {
|
||||
recipeVersion = chaosVersion
|
||||
|
||||
if !internal.Offline {
|
||||
if err := recipe.EnsureUpToDate(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !internal.Chaos {
|
||||
if err := recipe.EnsureIsClean(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
var recipeVersions recipePkg.RecipeVersions
|
||||
if recipeVersion == "" {
|
||||
var err error
|
||||
recipeVersions, _, err = recipe.GetRecipeVersions()
|
||||
chaosVersion, err = recipe.ChaosVersion()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(recipeVersions) > 0 {
|
||||
latest := recipeVersions[len(recipeVersions)-1]
|
||||
for tag := range latest {
|
||||
recipeVersion = tag
|
||||
}
|
||||
|
||||
if _, err := recipe.EnsureVersion(recipeVersion); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
recipeVersion = chaosVersion
|
||||
} else {
|
||||
if err := recipe.EnsureLatest(); err != nil {
|
||||
if err := recipe.EnsureIsClean(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var recipeVersions recipePkg.RecipeVersions
|
||||
if recipeVersion == "" {
|
||||
var err error
|
||||
recipeVersions, _, err = recipe.GetRecipeVersions()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(recipeVersions) > 0 {
|
||||
latest := recipeVersions[len(recipeVersions)-1]
|
||||
for tag := range latest {
|
||||
recipeVersion = tag
|
||||
}
|
||||
|
||||
if _, err := recipe.EnsureVersion(recipeVersion); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
if err := recipe.EnsureLatest(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if recipeVersion == "" {
|
||||
head, err := recipe.Head()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to retrieve latest commit for %s: %s", recipe.Name, err)
|
||||
}
|
||||
|
||||
recipeVersion = formatter.SmallSHA(head.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := ensureServerFlag(); err != nil {
|
||||
@ -187,7 +194,7 @@ var AppNewCommand = &cobra.Command{
|
||||
newAppServer = "local"
|
||||
}
|
||||
|
||||
log.Infof("%s created successfully (version: %s, chaos: %s)", appDomain, recipeVersion, chaosVersion)
|
||||
log.Infof("%s created (version: %s)", appDomain, recipeVersion)
|
||||
|
||||
if len(appSecrets) > 0 {
|
||||
rows := [][]string{}
|
||||
@ -295,6 +302,12 @@ func ensureServerFlag() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(servers) == 1 {
|
||||
newAppServer = servers[0]
|
||||
log.Infof("single server detected, choosing %s automatically", newAppServer)
|
||||
return nil
|
||||
}
|
||||
|
||||
if newAppServer == "" && !internal.NoInput {
|
||||
prompt := &survey.Select{
|
||||
Message: "Select app server:",
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
@ -91,9 +92,14 @@ func showPSOutput(app appPkg.App, cl *dockerClient.Client, deployedVersion, chao
|
||||
return
|
||||
}
|
||||
|
||||
services := compose.Services
|
||||
sort.Slice(services, func(i, j int) bool {
|
||||
return services[i].Name < services[j].Name
|
||||
})
|
||||
|
||||
var rows [][]string
|
||||
allContainerStats := make(map[string]map[string]string)
|
||||
for _, service := range compose.Services {
|
||||
for _, service := range services {
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), service.Name))
|
||||
|
||||
@ -143,10 +149,10 @@ func showPSOutput(app appPkg.App, cl *dockerClient.Client, deployedVersion, chao
|
||||
|
||||
row := []string{
|
||||
containerStats["service"],
|
||||
containerStats["status"],
|
||||
containerStats["image"],
|
||||
dVersion,
|
||||
cVersion,
|
||||
containerStats["status"],
|
||||
}
|
||||
|
||||
rows = append(rows, row)
|
||||
@ -170,10 +176,10 @@ func showPSOutput(app appPkg.App, cl *dockerClient.Client, deployedVersion, chao
|
||||
|
||||
headers := []string{
|
||||
"SERVICE",
|
||||
"STATUS",
|
||||
"IMAGE",
|
||||
"VERSION",
|
||||
"CHAOS",
|
||||
"STATUS",
|
||||
}
|
||||
|
||||
table.
|
||||
|
@ -9,8 +9,10 @@ import (
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/ui"
|
||||
upstream "coopcloud.tech/abra/pkg/upstream/service"
|
||||
stack "coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -93,13 +95,36 @@ Pass "--all-services/-a" to restart all services.`,
|
||||
for _, serviceName := range serviceNames {
|
||||
stackServiceName := fmt.Sprintf("%s_%s", app.StackName(), serviceName)
|
||||
|
||||
service, _, err := cl.ServiceInspectWithRaw(
|
||||
context.Background(),
|
||||
stackServiceName,
|
||||
types.ServiceInspectOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Debugf("attempting to scale %s to 0", stackServiceName)
|
||||
|
||||
if err := upstream.RunServiceScale(context.Background(), cl, stackServiceName, 0); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := stack.WaitOnService(context.Background(), cl, stackServiceName, app.Name); err != nil {
|
||||
f, err := app.Filters(true, false, serviceName)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
waitOpts := stack.WaitOpts{
|
||||
Services: []ui.ServiceMeta{{Name: stackServiceName, ID: service.ID}},
|
||||
AppName: app.Name,
|
||||
ServerName: app.Server,
|
||||
Filters: f,
|
||||
NoLog: true,
|
||||
Quiet: true,
|
||||
}
|
||||
|
||||
if err := stack.WaitOnServices(cmd.Context(), cl, waitOpts); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@ -110,7 +135,7 @@ Pass "--all-services/-a" to restart all services.`,
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := stack.WaitOnService(context.Background(), cl, stackServiceName, app.Name); err != nil {
|
||||
if err := stack.WaitOnServices(cmd.Context(), cl, waitOpts); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@ -123,6 +148,13 @@ Pass "--all-services/-a" to restart all services.`,
|
||||
var allServices bool
|
||||
|
||||
func init() {
|
||||
AppRestartCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
AppRestartCommand.Flags().BoolVarP(
|
||||
&allServices,
|
||||
"all-services",
|
||||
|
@ -178,28 +178,48 @@ beforehand. See "abra app backup" for more.`,
|
||||
appPkg.ExposeAllEnv(stackName, compose, app.Env)
|
||||
appPkg.SetRecipeLabel(compose, stackName, app.Recipe.Name)
|
||||
appPkg.SetChaosLabel(compose, stackName, internal.Chaos)
|
||||
appPkg.SetChaosVersionLabel(compose, stackName, chosenDowngrade)
|
||||
if internal.Chaos {
|
||||
appPkg.SetChaosVersionLabel(compose, stackName, chosenDowngrade)
|
||||
}
|
||||
appPkg.SetUpdateLabel(compose, stackName, app.Env)
|
||||
|
||||
chaosVersion := config.CHAOS_DEFAULT
|
||||
if deployMeta.IsChaos {
|
||||
chaosVersion = deployMeta.ChaosVersion
|
||||
}
|
||||
|
||||
// NOTE(d1): no release notes implemeneted for rolling back
|
||||
if err := internal.NewVersionOverview(
|
||||
if err := internal.DeployOverview(
|
||||
app,
|
||||
downgradeWarnMessages,
|
||||
"rollback",
|
||||
deployMeta.Version,
|
||||
chaosVersion,
|
||||
chosenDowngrade,
|
||||
"",
|
||||
downgradeWarnMessages,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := stack.RunDeploy(cl, deployOpts, compose, stackName, internal.DontWaitConverge); err != nil {
|
||||
stack.WaitTimeout, err = appPkg.GetTimeoutFromLabel(compose, stackName)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Debugf("set waiting timeout to %d second(s)", stack.WaitTimeout)
|
||||
|
||||
serviceNames, err := appPkg.GetAppServiceNames(app.Name)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := app.Filters(true, false, serviceNames...)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := stack.RunDeploy(
|
||||
cl,
|
||||
deployOpts,
|
||||
compose,
|
||||
stackName,
|
||||
app.Server,
|
||||
internal.DontWaitConverge,
|
||||
f,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@ -247,7 +267,7 @@ func validateDowngradeVersionArg(
|
||||
) error {
|
||||
parsedDeployedVersion, err := tagcmp.Parse(deployMeta.Version)
|
||||
if err != nil {
|
||||
return fmt.Errorf("'%s' is not a known version for %s", deployMeta.Version, app.Recipe.Name)
|
||||
return fmt.Errorf("current deployment '%s' is not a known version for %s", deployMeta.Version, app.Recipe.Name)
|
||||
}
|
||||
|
||||
parsedSpecificVersion, err := tagcmp.Parse(specificVersion)
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/upstream/container"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@ -64,7 +64,7 @@ var AppRunCommand = &cobra.Command{
|
||||
}
|
||||
|
||||
userCmd := args[2:]
|
||||
execCreateOpts := types.ExecConfig{
|
||||
execCreateOpts := containertypes.ExecOptions{
|
||||
AttachStderr: true,
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
|
@ -49,11 +49,11 @@ var AppSecretGenerateCommand = &cobra.Command{
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if len(args) == 1 && !generateAllSecrets {
|
||||
if len(args) <= 2 && !generateAllSecrets {
|
||||
log.Fatal("missing arguments [secret]/[version] or '--all'")
|
||||
}
|
||||
|
||||
if len(args) > 1 && generateAllSecrets {
|
||||
if len(args) > 2 && generateAllSecrets {
|
||||
log.Fatal("cannot use '[secret] [version]' and '--all' together")
|
||||
}
|
||||
|
||||
|
@ -54,25 +54,34 @@ Passing "--prune/-p" does not remove those volumes.`,
|
||||
log.Fatalf("%s is not deployed?", app.Name)
|
||||
}
|
||||
|
||||
chaosVersion := config.CHAOS_DEFAULT
|
||||
if deployMeta.IsChaos {
|
||||
chaosVersion = deployMeta.ChaosVersion
|
||||
}
|
||||
|
||||
toWriteVersion := deployMeta.Version
|
||||
if deployMeta.IsChaos {
|
||||
toWriteVersion = chaosVersion
|
||||
}
|
||||
|
||||
if err := internal.UndeployOverview(
|
||||
if err := internal.DeployOverview(
|
||||
app,
|
||||
deployMeta.Version,
|
||||
chaosVersion,
|
||||
toWriteVersion,
|
||||
config.NO_DOMAIN_DEFAULT,
|
||||
"",
|
||||
nil,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
opts := stack.Deploy{Composefiles: composeFiles, Namespace: stackName}
|
||||
compose, err := appPkg.GetAppComposeConfig(app.Name, opts, app.Env)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
stack.WaitTimeout, err = appPkg.GetTimeoutFromLabel(compose, stackName)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Info("initialising undeploy")
|
||||
|
||||
rmOpts := stack.Remove{
|
||||
Namespaces: []string{stackName},
|
||||
Detach: false,
|
||||
@ -87,7 +96,9 @@ Passing "--prune/-p" does not remove those volumes.`,
|
||||
}
|
||||
}
|
||||
|
||||
if err := app.WriteRecipeVersion(toWriteVersion, false); err != nil {
|
||||
log.Info("undeploy succeeded 🟢")
|
||||
|
||||
if err := app.WriteRecipeVersion(deployMeta.Version, false); err != nil {
|
||||
log.Fatalf("writing recipe version failed: %s", err)
|
||||
}
|
||||
},
|
||||
|
@ -3,6 +3,7 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/app"
|
||||
@ -14,6 +15,7 @@ import (
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/lint"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
stack "coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"coopcloud.tech/tagcmp"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
@ -43,7 +45,8 @@ beforehand. See "abra app backup" for more.`,
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
toComplete string,
|
||||
) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.AppNameComplete()
|
||||
@ -68,7 +71,13 @@ beforehand. See "abra app backup" for more.`,
|
||||
|
||||
app := internal.ValidateApp(args)
|
||||
|
||||
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
|
||||
if err := app.Recipe.Ensure(recipe.EnsureContext{
|
||||
Chaos: internal.Chaos,
|
||||
Offline: internal.Offline,
|
||||
// Ignore the env version for now, to make sure we are at the latest commit.
|
||||
// This enables us to get release notes, that were added after a release.
|
||||
IgnoreEnvVersion: true,
|
||||
}); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@ -141,10 +150,9 @@ beforehand. See "abra app backup" for more.`,
|
||||
|
||||
log.Debugf("choosing %s as version to upgrade", chosenUpgrade)
|
||||
|
||||
// NOTE(d1): if release notes written after git tag published, read them
|
||||
// before we check out the tag and then they'll appear to be missing. this
|
||||
// covers when we obviously will forget to write release notes before
|
||||
// publishing
|
||||
// Get the release notes before checking out the new version in the
|
||||
// recipe. This enables us to get release notes, that were added after
|
||||
// a release.
|
||||
if err := getReleaseNotes(app, versions, chosenUpgrade, deployMeta, &upgradeReleaseNotes); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@ -183,7 +191,9 @@ beforehand. See "abra app backup" for more.`,
|
||||
appPkg.ExposeAllEnv(stackName, compose, app.Env)
|
||||
appPkg.SetRecipeLabel(compose, stackName, app.Recipe.Name)
|
||||
appPkg.SetChaosLabel(compose, stackName, internal.Chaos)
|
||||
appPkg.SetChaosVersionLabel(compose, stackName, chosenUpgrade)
|
||||
if internal.Chaos {
|
||||
appPkg.SetChaosVersionLabel(compose, stackName, chosenUpgrade)
|
||||
}
|
||||
appPkg.SetUpdateLabel(compose, stackName, app.Env)
|
||||
|
||||
envVars, err := appPkg.CheckEnv(app)
|
||||
@ -204,23 +214,19 @@ beforehand. See "abra app backup" for more.`,
|
||||
return
|
||||
}
|
||||
|
||||
chaosVersion := config.CHAOS_DEFAULT
|
||||
if deployMeta.IsChaos {
|
||||
chaosVersion = deployMeta.ChaosVersion
|
||||
|
||||
if deployMeta.ChaosVersion == "" {
|
||||
chaosVersion = config.UNKNOWN_DEFAULT
|
||||
}
|
||||
if upgradeReleaseNotes == "" {
|
||||
upgradeWarnMessages = append(
|
||||
upgradeWarnMessages,
|
||||
fmt.Sprintf("no release notes available for %s", chosenUpgrade),
|
||||
)
|
||||
}
|
||||
|
||||
if err := internal.NewVersionOverview(
|
||||
if err := internal.DeployOverview(
|
||||
app,
|
||||
upgradeWarnMessages,
|
||||
"upgrade",
|
||||
deployMeta.Version,
|
||||
chaosVersion,
|
||||
chosenUpgrade,
|
||||
upgradeReleaseNotes,
|
||||
upgradeWarnMessages,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@ -232,7 +238,25 @@ beforehand. See "abra app backup" for more.`,
|
||||
|
||||
log.Debugf("set waiting timeout to %d second(s)", stack.WaitTimeout)
|
||||
|
||||
if err := stack.RunDeploy(cl, deployOpts, compose, stackName, internal.DontWaitConverge); err != nil {
|
||||
serviceNames, err := appPkg.GetAppServiceNames(app.Name)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := app.Filters(true, false, serviceNames...)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := stack.RunDeploy(
|
||||
cl,
|
||||
deployOpts,
|
||||
compose,
|
||||
stackName,
|
||||
app.Server,
|
||||
internal.DontWaitConverge,
|
||||
f,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@ -312,6 +336,11 @@ func getReleaseNotes(
|
||||
}
|
||||
|
||||
if note != "" {
|
||||
// NOTE(d1): trim any final newline on the end of the note itself before
|
||||
// we manually handle newlines (for multiple release notes and
|
||||
// ensuring space between the warning messages)
|
||||
note = strings.TrimSuffix(note, "\n")
|
||||
|
||||
*upgradeReleaseNotes += fmt.Sprintf("%s\n", note)
|
||||
}
|
||||
}
|
||||
@ -363,7 +392,7 @@ func validateUpgradeVersionArg(
|
||||
|
||||
parsedDeployedVersion, err := tagcmp.Parse(deployMeta.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("'%s' is not a known version", deployMeta.Version)
|
||||
}
|
||||
|
||||
if parsedSpecificVersion.IsLessThan(parsedDeployedVersion) &&
|
||||
@ -395,9 +424,7 @@ func ensureDeployed(cl *dockerClient.Client, app app.App) (stack.DeployMeta, err
|
||||
return deployMeta, nil
|
||||
}
|
||||
|
||||
var (
|
||||
showReleaseNotes bool
|
||||
)
|
||||
var showReleaseNotes bool
|
||||
|
||||
func init() {
|
||||
AppUpgradeCommand.Flags().BoolVarP(
|
||||
|
@ -25,6 +25,11 @@ var CatalogueGenerateCommand = &cobra.Command{
|
||||
Short: "Generate the recipe catalogue",
|
||||
Long: `Generate a new copy of the recipe catalogue.
|
||||
|
||||
N.B. this command **will** wipe local unstaged changes from your local recipes
|
||||
if present. "--chaos/-C" on this command refers to the catalogue repository
|
||||
("$ABRA_DIR/catalogue") and not the recipes. Please take care not to lose your
|
||||
changes.
|
||||
|
||||
It is possible to generate new metadata for a single recipe by passing
|
||||
[recipe]. The existing local catalogue will be updated, not overwritten.
|
||||
|
||||
|
@ -12,17 +12,16 @@ var AutocompleteCommand = &cobra.Command{
|
||||
Long: `To load completions:
|
||||
|
||||
Bash:
|
||||
|
||||
# Load autocompletion for the current Bash session
|
||||
$ source <(abra autocomplete bash)
|
||||
|
||||
# To load autocompletion for each session, execute once:
|
||||
# Linux:
|
||||
$ abra autocomplete bash > /etc/bash_completion.d/abra
|
||||
$ abra autocomplete bash | sudo tee /etc/bash_completion.d/abra
|
||||
# macOS:
|
||||
$ abra autocomplete bash > $(brew --prefix)/etc/bash_completion.d/abra
|
||||
$ abra autocomplete bash | sudo tee $(brew --prefix)/etc/bash_completion.d/abra
|
||||
|
||||
Zsh:
|
||||
|
||||
# If shell autocompletion is not already enabled in your environment,
|
||||
# you will need to enable it. You can execute the following once:
|
||||
|
||||
@ -34,14 +33,12 @@ Zsh:
|
||||
# You will need to start a new shell for this setup to take effect.
|
||||
|
||||
fish:
|
||||
|
||||
$ abra autocomplete fish | source
|
||||
|
||||
# To load autocompletions for each session, execute once:
|
||||
$ abra autocomplete fish > ~/.config/fish/completions/abra.fish
|
||||
|
||||
PowerShell:
|
||||
|
||||
PS> abra autocomplete powershell | Out-String | Invoke-Expression
|
||||
|
||||
# To load autocompletions for every new session, run:
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"coopcloud.tech/abra/pkg/upstream/container"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
)
|
||||
@ -47,7 +48,7 @@ func RunBackupCmdRemote(
|
||||
backupCmd string,
|
||||
containerID string,
|
||||
execEnv []string) (io.Writer, error) {
|
||||
execBackupListOpts := types.ExecConfig{
|
||||
execBackupListOpts := containertypes.ExecOptions{
|
||||
AttachStderr: true,
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/upstream/container"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
@ -24,7 +24,7 @@ import (
|
||||
func RunCmdRemote(
|
||||
cl *dockerClient.Client,
|
||||
app appPkg.App,
|
||||
requestTTY bool,
|
||||
disableTTY bool,
|
||||
abraSh, serviceName, cmdName, cmdArgs, remoteUser string) error {
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), serviceName))
|
||||
@ -42,7 +42,7 @@ func RunCmdRemote(
|
||||
return err
|
||||
}
|
||||
|
||||
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
|
||||
copyOpts := containertypes.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
|
||||
if err := cl.CopyToContainer(context.Background(), targetContainer.ID, "/tmp", content, copyOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -55,7 +55,7 @@ func RunCmdRemote(
|
||||
|
||||
shell := "/bin/bash"
|
||||
findShell := []string{"test", "-e", shell}
|
||||
execCreateOpts := types.ExecConfig{
|
||||
execCreateOpts := containertypes.ExecOptions{
|
||||
AttachStderr: true,
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
@ -84,8 +84,10 @@ func RunCmdRemote(
|
||||
}
|
||||
|
||||
execCreateOpts.Cmd = cmd
|
||||
execCreateOpts.Tty = requestTTY
|
||||
if !requestTTY {
|
||||
|
||||
execCreateOpts.Tty = true
|
||||
if disableTTY {
|
||||
execCreateOpts.Tty = false
|
||||
log.Debugf("not requesting a remote TTY")
|
||||
}
|
||||
|
||||
|
@ -10,7 +10,6 @@ import (
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/tagcmp"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
@ -38,100 +37,21 @@ func horizontal(left, mid, right string) string {
|
||||
return lipgloss.JoinHorizontal(lipgloss.Left, left, mid, right)
|
||||
}
|
||||
|
||||
// NewVersionOverview shows an upgrade or downgrade overview
|
||||
func NewVersionOverview(
|
||||
app appPkg.App,
|
||||
warnMessages []string,
|
||||
kind,
|
||||
deployedVersion,
|
||||
deployedChaosVersion,
|
||||
toDeployVersion,
|
||||
releaseNotes string) error {
|
||||
deployConfig := "compose.yml"
|
||||
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
|
||||
deployConfig = composeFiles
|
||||
}
|
||||
|
||||
server := app.Server
|
||||
if app.Server == "default" {
|
||||
server = "local"
|
||||
}
|
||||
|
||||
domain := app.Domain
|
||||
if domain == "" {
|
||||
domain = config.NO_DOMAIN_DEFAULT
|
||||
}
|
||||
|
||||
upperKind := strings.ToUpper(kind)
|
||||
|
||||
rows := [][]string{
|
||||
{"DOMAIN", domain},
|
||||
{"RECIPE", app.Recipe.Name},
|
||||
{"SERVER", server},
|
||||
{"CONFIG", deployConfig},
|
||||
|
||||
{"CURRENT DEPLOYMENT", "---"},
|
||||
{"VERSION", formatter.BoldDirtyDefault(deployedVersion)},
|
||||
{"CHAOS ", formatter.BoldDirtyDefault(deployedChaosVersion)},
|
||||
|
||||
{upperKind, "---"},
|
||||
{"VERSION", formatter.BoldDirtyDefault(toDeployVersion)},
|
||||
|
||||
{fmt.Sprintf("%s.ENV", strings.ToUpper(app.Domain)), "---"},
|
||||
{"CURRENT VERSION", formatter.BoldDirtyDefault(app.Recipe.EnvVersion)},
|
||||
{"NEW VERSION", formatter.BoldDirtyDefault(toDeployVersion)},
|
||||
}
|
||||
|
||||
overview := formatter.CreateOverview(
|
||||
fmt.Sprintf("%s OVERVIEW", upperKind),
|
||||
rows,
|
||||
)
|
||||
|
||||
fmt.Println(overview)
|
||||
|
||||
if releaseNotes != "" && toDeployVersion != "" {
|
||||
fmt.Print(releaseNotes)
|
||||
} else {
|
||||
warnMessages = append(
|
||||
warnMessages,
|
||||
fmt.Sprintf("no release notes available for %s", toDeployVersion),
|
||||
)
|
||||
}
|
||||
|
||||
for _, msg := range warnMessages {
|
||||
log.Warn(msg)
|
||||
}
|
||||
|
||||
if NoInput {
|
||||
return nil
|
||||
}
|
||||
|
||||
response := false
|
||||
prompt := &survey.Confirm{Message: "proceed?"}
|
||||
if err := survey.AskOne(prompt, &response); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !response {
|
||||
log.Fatal("deployment cancelled")
|
||||
}
|
||||
|
||||
return nil
|
||||
func formatComposeFiles(composeFiles string) string {
|
||||
return strings.ReplaceAll(composeFiles, ":", "\n")
|
||||
}
|
||||
|
||||
// DeployOverview shows a deployment overview
|
||||
func DeployOverview(
|
||||
app appPkg.App,
|
||||
warnMessages []string,
|
||||
deployedVersion string,
|
||||
deployedChaosVersion string,
|
||||
toDeployVersion,
|
||||
toDeployChaosVersion string,
|
||||
toWriteVersion string,
|
||||
toDeployVersion string,
|
||||
releaseNotes string,
|
||||
warnMessages []string,
|
||||
) error {
|
||||
deployConfig := "compose.yml"
|
||||
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
|
||||
deployConfig = composeFiles
|
||||
deployConfig = formatComposeFiles(composeFiles)
|
||||
}
|
||||
|
||||
server := app.Server
|
||||
@ -144,21 +64,7 @@ func DeployOverview(
|
||||
domain = config.NO_DOMAIN_DEFAULT
|
||||
}
|
||||
|
||||
if app.Recipe.Dirty {
|
||||
toWriteVersion = formatter.AddDirtyMarker(toWriteVersion)
|
||||
toDeployChaosVersion = formatter.AddDirtyMarker(toDeployChaosVersion)
|
||||
}
|
||||
|
||||
recipeName, exists := app.Env["RECIPE"]
|
||||
if !exists {
|
||||
recipeName = app.Env["TYPE"]
|
||||
}
|
||||
|
||||
envVersion, err := recipe.GetEnvVersionRaw(recipeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
envVersion := app.Recipe.EnvVersionRaw
|
||||
if envVersion == "" {
|
||||
envVersion = config.NO_VERSION_DEFAULT
|
||||
}
|
||||
@ -168,24 +74,21 @@ func DeployOverview(
|
||||
{"RECIPE", app.Recipe.Name},
|
||||
{"SERVER", server},
|
||||
{"CONFIG", deployConfig},
|
||||
|
||||
{"CURRENT DEPLOYMENT", "---"},
|
||||
{"VERSION", formatter.BoldDirtyDefault(deployedVersion)},
|
||||
{"CHAOS", formatter.BoldDirtyDefault(deployedChaosVersion)},
|
||||
|
||||
{"NEW DEPLOYMENT", "---"},
|
||||
{"VERSION", formatter.BoldDirtyDefault(toDeployVersion)},
|
||||
{"CHAOS", formatter.BoldDirtyDefault(toDeployChaosVersion)},
|
||||
|
||||
{fmt.Sprintf("%s.ENV", strings.ToUpper(app.Name)), "---"},
|
||||
{"CURRENT VERSION", formatter.BoldDirtyDefault(envVersion)},
|
||||
{"NEW VERSION", formatter.BoldDirtyDefault(toWriteVersion)},
|
||||
{"", ""},
|
||||
{"CURRENT DEPLOYMENT", formatter.BoldDirtyDefault(deployedVersion)},
|
||||
{"ENV VERSION", formatter.BoldDirtyDefault(envVersion)},
|
||||
{"NEW DEPLOYMENT", formatter.BoldDirtyDefault(toDeployVersion)},
|
||||
}
|
||||
|
||||
overview := formatter.CreateOverview("DEPLOY OVERVIEW", rows)
|
||||
deployType := getDeployType(deployedVersion, toDeployVersion)
|
||||
overview := formatter.CreateOverview(fmt.Sprintf("%s OVERVIEW", deployType), rows)
|
||||
|
||||
fmt.Println(overview)
|
||||
|
||||
if releaseNotes != "" {
|
||||
fmt.Print(releaseNotes)
|
||||
}
|
||||
|
||||
for _, msg := range warnMessages {
|
||||
log.Warn(msg)
|
||||
}
|
||||
@ -207,76 +110,34 @@ func DeployOverview(
|
||||
return nil
|
||||
}
|
||||
|
||||
// UndeployOverview shows an undeployment overview
|
||||
func UndeployOverview(
|
||||
app appPkg.App,
|
||||
deployedVersion,
|
||||
deployedChaosVersion,
|
||||
toWriteVersion string,
|
||||
) error {
|
||||
deployConfig := "compose.yml"
|
||||
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
|
||||
deployConfig = composeFiles
|
||||
func getDeployType(currentVersion, newVersion string) string {
|
||||
if newVersion == config.NO_DOMAIN_DEFAULT {
|
||||
return "UNDEPLOY"
|
||||
}
|
||||
|
||||
server := app.Server
|
||||
if app.Server == "default" {
|
||||
server = "local"
|
||||
if strings.Contains(newVersion, "+U") {
|
||||
return "CHAOS DEPLOY"
|
||||
}
|
||||
|
||||
domain := app.Domain
|
||||
if domain == "" {
|
||||
domain = config.NO_DOMAIN_DEFAULT
|
||||
if strings.Contains(currentVersion, "+U") {
|
||||
return "UNCHAOS DEPLOY"
|
||||
}
|
||||
|
||||
recipeName, exists := app.Env["RECIPE"]
|
||||
if !exists {
|
||||
recipeName = app.Env["TYPE"]
|
||||
if currentVersion == newVersion {
|
||||
return "REDEPLOY"
|
||||
}
|
||||
|
||||
envVersion, err := recipe.GetEnvVersionRaw(recipeName)
|
||||
if currentVersion == config.NO_VERSION_DEFAULT {
|
||||
return "NEW DEPLOY"
|
||||
}
|
||||
currentParsed, err := tagcmp.Parse(currentVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
return "DEPLOY"
|
||||
}
|
||||
|
||||
if envVersion == "" {
|
||||
envVersion = config.NO_VERSION_DEFAULT
|
||||
newParsed, err := tagcmp.Parse(newVersion)
|
||||
if err != nil {
|
||||
return "DEPLOY"
|
||||
}
|
||||
|
||||
rows := [][]string{
|
||||
{"DOMAIN", domain},
|
||||
{"RECIPE", app.Recipe.Name},
|
||||
{"SERVER", server},
|
||||
{"CONFIG", deployConfig},
|
||||
|
||||
{"CURRENT DEPLOYMENT", "---"},
|
||||
{"VERSION", formatter.BoldDirtyDefault(deployedVersion)},
|
||||
{"CHAOS", formatter.BoldDirtyDefault(deployedChaosVersion)},
|
||||
|
||||
{fmt.Sprintf("%s.ENV", strings.ToUpper(app.Name)), "---"},
|
||||
{"CURRENT VERSION", formatter.BoldDirtyDefault(envVersion)},
|
||||
{"NEW VERSION", formatter.BoldDirtyDefault(toWriteVersion)},
|
||||
if currentParsed.IsLessThan(newParsed) {
|
||||
return "UPGRADE"
|
||||
}
|
||||
|
||||
overview := formatter.CreateOverview("UNDEPLOY OVERVIEW", rows)
|
||||
|
||||
fmt.Println(overview)
|
||||
|
||||
if NoInput {
|
||||
return nil
|
||||
}
|
||||
|
||||
response := false
|
||||
prompt := &survey.Confirm{Message: "proceed?"}
|
||||
if err := survey.AskOne(prompt, &response); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !response {
|
||||
log.Fatal("undeploy cancelled")
|
||||
}
|
||||
|
||||
return nil
|
||||
return "DOWNGRADE"
|
||||
}
|
||||
|
||||
// PostCmds parses a string of commands and executes them inside of the respective services
|
||||
|
@ -1,11 +1,15 @@
|
||||
package recipe
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"github.com/go-git/go-git/v5"
|
||||
gitCfg "github.com/go-git/go-git/v5/config"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -13,7 +17,16 @@ var RecipeFetchCommand = &cobra.Command{
|
||||
Use: "fetch [recipe | --all] [flags]",
|
||||
Aliases: []string{"f"},
|
||||
Short: "Clone recipe(s) locally",
|
||||
Long: `Using "--force/-f" Git syncs an existing recipe. It does not erase unstaged changes.`,
|
||||
Args: cobra.RangeArgs(0, 1),
|
||||
Example: ` # fetch from recipe catalogue
|
||||
abra recipe fetch gitea
|
||||
|
||||
# fetch from remote recipe
|
||||
abra recipe fetch git.foo.org/recipes/myrecipe
|
||||
|
||||
# fetch with ssh remote for hacking
|
||||
abra recipe fetch gitea --ssh`,
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
@ -36,10 +49,39 @@ var RecipeFetchCommand = &cobra.Command{
|
||||
|
||||
ensureCtx := internal.GetEnsureContext()
|
||||
if recipeName != "" {
|
||||
r := internal.ValidateRecipe(args, cmd.Name())
|
||||
if err := r.Ensure(ensureCtx); err != nil {
|
||||
log.Fatal(err)
|
||||
r := recipe.Get(recipeName)
|
||||
if _, err := os.Stat(r.Dir); !os.IsNotExist(err) {
|
||||
if !force {
|
||||
log.Warnf("%s is already fetched", r.Name)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r = internal.ValidateRecipe(args, cmd.Name())
|
||||
|
||||
if sshRemote {
|
||||
if r.SSHURL == "" {
|
||||
log.Warnf("unable to discover SSH remote for %s", r.Name)
|
||||
return
|
||||
}
|
||||
|
||||
repo, err := git.PlainOpen(r.Dir)
|
||||
if err != nil {
|
||||
log.Fatalf("unable to open %s: %s", r.Dir, err)
|
||||
}
|
||||
|
||||
if err = repo.DeleteRemote("origin"); err != nil {
|
||||
log.Fatalf("unable to remove default remote in %s: %s", r.Dir, err)
|
||||
}
|
||||
|
||||
if _, err := repo.CreateRemote(&gitCfg.RemoteConfig{
|
||||
Name: "origin",
|
||||
URLs: []string{r.SSHURL},
|
||||
}); err != nil {
|
||||
log.Fatalf("unable to set SSH remote in %s: %s", r.Dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@ -61,6 +103,8 @@ var RecipeFetchCommand = &cobra.Command{
|
||||
|
||||
var (
|
||||
fetchAllRecipes bool
|
||||
sshRemote bool
|
||||
force bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -71,4 +115,20 @@ func init() {
|
||||
false,
|
||||
"fetch all recipes",
|
||||
)
|
||||
|
||||
RecipeFetchCommand.Flags().BoolVarP(
|
||||
&sshRemote,
|
||||
"ssh",
|
||||
"s",
|
||||
false,
|
||||
"automatically set ssh remote",
|
||||
)
|
||||
|
||||
RecipeFetchCommand.Flags().BoolVarP(
|
||||
&force,
|
||||
"force",
|
||||
"f",
|
||||
false,
|
||||
"force re-fetch",
|
||||
)
|
||||
}
|
||||
|
@ -267,6 +267,8 @@ func addReleaseNotes(recipe recipe.Recipe, tag string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
var addNextAsReleaseNotes bool
|
||||
|
||||
nextReleaseNotePath := path.Join(releaseDir, "next")
|
||||
if _, err := os.Stat(nextReleaseNotePath); err == nil {
|
||||
// release/next note exists. Move it to release/<tag>
|
||||
@ -276,38 +278,37 @@ func addReleaseNotes(recipe recipe.Recipe, tag string) error {
|
||||
}
|
||||
|
||||
if !internal.NoInput {
|
||||
prompt := &survey.Input{
|
||||
prompt := &survey.Confirm{
|
||||
Message: "Use release note in release/next?",
|
||||
}
|
||||
var addReleaseNote bool
|
||||
if err := survey.AskOne(prompt, &addReleaseNote); err != nil {
|
||||
|
||||
if err := survey.AskOne(prompt, &addNextAsReleaseNotes); err != nil {
|
||||
return err
|
||||
}
|
||||
if !addReleaseNote {
|
||||
|
||||
if !addNextAsReleaseNotes {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
err := os.Rename(nextReleaseNotePath, tagReleaseNotePath)
|
||||
if err != nil {
|
||||
if err := os.Rename(nextReleaseNotePath, tagReleaseNotePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = gitPkg.Add(recipe.Dir, path.Join("release", "next"), internal.Dry)
|
||||
if err != nil {
|
||||
if err := gitPkg.Add(recipe.Dir, path.Join("release", "next"), internal.Dry); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = gitPkg.Add(recipe.Dir, path.Join("release", tag), internal.Dry)
|
||||
if err != nil {
|
||||
if err := gitPkg.Add(recipe.Dir, path.Join("release", tag), internal.Dry); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if !errors.Is(err, os.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
|
||||
// No release note exists for the current release.
|
||||
if internal.NoInput {
|
||||
// NOTE(d1): No release note exists for the current release. Or, we've
|
||||
// already used release/next as the release note
|
||||
if internal.NoInput || addNextAsReleaseNotes {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,7 @@ func Run(version, commit string) {
|
||||
config.ABRA_DIR,
|
||||
config.SERVERS_DIR,
|
||||
config.RECIPES_DIR,
|
||||
config.LOGS_DIR,
|
||||
config.VENDOR_DIR, // TODO(d1): remove > 0.9.x
|
||||
config.BACKUP_DIR, // TODO(d1): remove > 0.9.x
|
||||
}
|
||||
@ -51,6 +52,10 @@ func Run(version, commit string) {
|
||||
log.Logger.SetStyles(charmLog.DefaultStyles())
|
||||
charmLog.SetDefault(log.Logger)
|
||||
|
||||
if internal.MachineReadable {
|
||||
log.SetOutput(os.Stderr)
|
||||
}
|
||||
|
||||
if internal.Debug {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
log.SetOutput(os.Stderr)
|
||||
|
@ -103,8 +103,7 @@ developer machine. The domain is then set to "default".`,
|
||||
|
||||
if _, err := client.New(name, timeout); err != nil {
|
||||
cleanUp(name)
|
||||
log.Debugf("ssh %s error: %s", name, sshPkg.Fatal(name, err))
|
||||
log.Fatalf("can't ssh to %s, make sure \"ssh %s\" works", name, name)
|
||||
log.Fatalf("ssh %s error: %s", name, sshPkg.Fatal(name, err))
|
||||
}
|
||||
|
||||
if created {
|
||||
|
@ -441,7 +441,25 @@ func upgrade(cl *dockerclient.Client, stackName, recipeName, upgradeVersion stri
|
||||
|
||||
log.Infof("upgrade %s (%s) to version %s", stackName, recipeName, upgradeVersion)
|
||||
|
||||
err = stack.RunDeploy(cl, deployOpts, compose, stackName, true)
|
||||
serviceNames, err := appPkg.GetAppServiceNames(app.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := app.Filters(true, false, serviceNames...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = stack.RunDeploy(
|
||||
cl,
|
||||
deployOpts,
|
||||
compose,
|
||||
stackName,
|
||||
app.Server,
|
||||
true,
|
||||
f,
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
110
go.mod
110
go.mod
@ -1,6 +1,6 @@
|
||||
module coopcloud.tech/abra
|
||||
|
||||
go 1.22.7
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.23.1
|
||||
|
||||
@ -8,21 +8,22 @@ require (
|
||||
coopcloud.tech/tagcmp v0.0.0-20230809071031-eb3e7758d4eb
|
||||
git.coopcloud.tech/toolshed/godotenv v1.5.2-0.20250103171850-4d0ca41daa5c
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7
|
||||
github.com/charmbracelet/lipgloss v1.0.0
|
||||
github.com/charmbracelet/log v0.4.0
|
||||
github.com/charmbracelet/bubbletea v1.3.4
|
||||
github.com/charmbracelet/lipgloss v1.1.0
|
||||
github.com/charmbracelet/log v0.4.1
|
||||
github.com/distribution/reference v0.6.0
|
||||
github.com/docker/cli v27.4.1+incompatible
|
||||
github.com/docker/docker v27.4.1+incompatible
|
||||
github.com/docker/cli v28.0.1+incompatible
|
||||
github.com/docker/docker v28.0.1+incompatible
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/go-git/go-git/v5 v5.13.1
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/go-git/go-git/v5 v5.14.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/moby/sys/signal v0.7.1
|
||||
github.com/moby/term v0.5.0
|
||||
github.com/moby/term v0.5.2
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/schollz/progressbar/v3 v3.17.1
|
||||
golang.org/x/term v0.27.0
|
||||
github.com/schollz/progressbar/v3 v3.18.0
|
||||
golang.org/x/term v0.30.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gotest.tools/v3 v3.5.1
|
||||
gotest.tools/v3 v3.5.2
|
||||
)
|
||||
|
||||
require (
|
||||
@ -31,16 +32,19 @@ require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/BurntSushi/toml v1.4.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.1.3 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.1.6 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.6.0 // indirect
|
||||
github.com/cloudflare/circl v1.5.0 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
|
||||
github.com/charmbracelet/x/ansi v0.8.0 // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13 // indirect
|
||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||
github.com/cloudflare/circl v1.6.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||
@ -48,11 +52,12 @@ require (
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.6.1 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.6.2 // indirect
|
||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
@ -60,70 +65,71 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-localereader v0.0.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mmcloughlin/avo v0.6.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||
github.com/moby/sys/user v0.3.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/muesli/termenv v0.15.2 // indirect
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||
github.com/muesli/termenv v0.16.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/runc v1.1.13 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.1.0 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pjbgf/sha1cd v0.3.1 // indirect
|
||||
github.com/pjbgf/sha1cd v0.3.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.61.0 // indirect
|
||||
github.com/prometheus/common v0.63.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/skeema/knownhosts v1.3.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/skeema/knownhosts v1.3.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
|
||||
go.opentelemetry.io/otel v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.33.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.8.0 // indirect
|
||||
golang.org/x/tools v0.28.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250102185135-69823020774d // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect
|
||||
google.golang.org/grpc v1.69.2 // indirect
|
||||
google.golang.org/protobuf v1.36.1 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect
|
||||
golang.org/x/net v0.37.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250313205543-e70fdf4c4cb4 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 // indirect
|
||||
google.golang.org/grpc v1.71.0 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
||||
@ -132,19 +138,19 @@ require (
|
||||
github.com/containers/image v3.0.2+incompatible
|
||||
github.com/containers/storage v1.38.2 // indirect
|
||||
github.com/decentral1se/passgen v1.0.1
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.9.3 // indirect
|
||||
github.com/fvbommel/sortorder v1.1.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7
|
||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/prometheus/client_golang v1.20.5 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/prometheus/client_golang v1.21.1 // indirect
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/theupdateframework/notary v0.7.0 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
golang.org/x/sys v0.28.0
|
||||
golang.org/x/sys v0.31.0
|
||||
)
|
||||
|
227
go.sum
227
go.sum
@ -79,8 +79,8 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0
|
||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
|
||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk=
|
||||
github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
|
||||
github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw=
|
||||
github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
@ -135,14 +135,22 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/charmbracelet/lipgloss v1.0.0 h1:O7VkGDvqEdGi93X+DeqsQ7PKHDgtQfF8j8/O2qFMQNg=
|
||||
github.com/charmbracelet/lipgloss v1.0.0/go.mod h1:U5fy9Z+C38obMs+T+tJqst9VGzlOYGj4ri9reL3qUlo=
|
||||
github.com/charmbracelet/log v0.4.0 h1:G9bQAcx8rWA2T3pWvx7YtPTPwgqpk7D68BX21IRW8ZM=
|
||||
github.com/charmbracelet/log v0.4.0/go.mod h1:63bXt/djrizTec0l11H20t8FDSvA4CRZJ1KH22MdptM=
|
||||
github.com/charmbracelet/x/ansi v0.6.0 h1:qOznutrb93gx9oMiGf7caF7bqqubh6YIM0SWKyA08pA=
|
||||
github.com/charmbracelet/x/ansi v0.6.0/go.mod h1:KBUFw1la39nl0dLl10l5ORDAqGXaeurTQmwyyVKse/Q=
|
||||
github.com/charmbracelet/bubbletea v1.3.4 h1:kCg7B+jSCFPLYRA52SDZjr51kG/fMUEoPoZrkaDHyoI=
|
||||
github.com/charmbracelet/bubbletea v1.3.4/go.mod h1:dtcUCyCGEX3g9tosuYiut3MXgY/Jsv9nKVdibKKRRXo=
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs=
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk=
|
||||
github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
|
||||
github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
|
||||
github.com/charmbracelet/log v0.4.1 h1:6AYnoHKADkghm/vt4neaNEXkxcXLSV2g1rdyFDOpTyk=
|
||||
github.com/charmbracelet/log v0.4.1/go.mod h1:pXgyTsqsVu4N9hGdHmQ0xEA4RsXof402LX9ZgiITn2I=
|
||||
github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE=
|
||||
github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
|
||||
github.com/charmbracelet/x/exp/golden v0.0.0-20240806155701-69247e0abc2a h1:G99klV19u0QnhiizODirwVksQB91TJKV/UaTnACcG30=
|
||||
github.com/charmbracelet/x/exp/golden v0.0.0-20240806155701-69247e0abc2a/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U=
|
||||
github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
|
||||
github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
||||
@ -160,8 +168,8 @@ github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2u
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ=
|
||||
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
|
||||
github.com/cloudflare/circl v1.5.0 h1:hxIWksrX6XN5a1L2TI/h53AGPhNHoUBo+TD1ms9+pys=
|
||||
github.com/cloudflare/circl v1.5.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
|
||||
github.com/cloudflare/circl v1.6.0 h1:cr5JKic4HI+LkINy2lg3W2jF8sHCVTBncJr5gIIq7qk=
|
||||
github.com/cloudflare/circl v1.6.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
@ -275,7 +283,6 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
@ -285,8 +292,8 @@ github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM=
|
||||
github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s=
|
||||
github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
|
||||
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
|
||||
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
|
||||
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
|
||||
@ -305,19 +312,19 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v27.4.1+incompatible h1:VzPiUlRJ/xh+otB75gva3r05isHMo5wXDfPRi5/b4hI=
|
||||
github.com/docker/cli v27.4.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v28.0.1+incompatible h1:g0h5NQNda3/CxIsaZfH4Tyf6vpxFth7PYl3hgCPOKzs=
|
||||
github.com/docker/cli v28.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4=
|
||||
github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v28.0.1+incompatible h1:FCHjSRdXhNRFjlHMTv4jUNlIBbTeRjrWfeFuJp7jpo0=
|
||||
github.com/docker/docker v28.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
|
||||
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||
github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8=
|
||||
github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo=
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
@ -340,8 +347,8 @@ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:Htrtb
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/elazarl/goproxy v1.2.3 h1:xwIyKHbaP5yfT6O9KIeYJR5549MXRQkoQMRXGztz8YQ=
|
||||
github.com/elazarl/goproxy v1.2.3/go.mod h1:YfEbZtqP4AetfO6d40vWchF3znWX7C7Vd6ZMfdL8z64=
|
||||
github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o=
|
||||
github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
||||
@ -352,6 +359,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
@ -376,12 +385,12 @@ github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
|
||||
github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU=
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
|
||||
github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/SoRA=
|
||||
github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE=
|
||||
github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM=
|
||||
github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
|
||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
|
||||
github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M=
|
||||
github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0qu3XXXVixc=
|
||||
github.com/go-git/go-git/v5 v5.14.0 h1:/MD3lCrGjCen5WfEAzKg00MJJffKhC8gzS80ycmCi60=
|
||||
github.com/go-git/go-git/v5 v5.14.0/go.mod h1:Z5Xhoia5PcWA3NF8vRLURn9E5FRhSl7dGj9ItW3Wk5k=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
@ -478,8 +487,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
|
||||
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@ -520,8 +529,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
@ -583,8 +592,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@ -616,13 +625,14 @@ github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7
|
||||
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
|
||||
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
@ -649,8 +659,6 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
|
||||
github.com/mmcloughlin/avo v0.6.0 h1:QH6FU8SKoTLaVs80GA8TJuLNkUYl4VokHKlPhVDg4YY=
|
||||
github.com/mmcloughlin/avo v0.6.0/go.mod h1:8CoAGaCSYXtCPR+8y18Y9aB/kxb8JSS6FRI7mSkvD+8=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||
@ -671,8 +679,8 @@ github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85
|
||||
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
|
||||
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
@ -681,8 +689,12 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
|
||||
github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo=
|
||||
github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
|
||||
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
|
||||
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
|
||||
github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
|
||||
github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
@ -719,8 +731,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
@ -750,8 +762,8 @@ github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrap
|
||||
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pjbgf/sha1cd v0.3.1 h1:Dh2GYdpJnO84lIw0LJwTFXjcNbasP/bklicSznyAaPI=
|
||||
github.com/pjbgf/sha1cd v0.3.1/go.mod h1:Y8t7jSB/dEI/lQE04A1HVKteqjj9bX5O4+Cex0TCu8s=
|
||||
github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4=
|
||||
github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@ -767,8 +779,8 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
|
||||
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
|
||||
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@ -782,8 +794,8 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
|
||||
github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
|
||||
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
|
||||
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
|
||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
@ -804,15 +816,15 @@ github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/schollz/progressbar/v3 v3.17.1 h1:bI1MTaoQO+v5kzklBjYNRQLoVpe0zbyRZNK6DFkVC5U=
|
||||
github.com/schollz/progressbar/v3 v3.17.1/go.mod h1:RzqpnsPQNjUyIgdglUjRLgD7sVnxN1wpmBMV+UiEbL4=
|
||||
github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA=
|
||||
github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec=
|
||||
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
@ -829,8 +841,8 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY=
|
||||
github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M=
|
||||
github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8=
|
||||
github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
@ -845,8 +857,8 @@ github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3
|
||||
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
@ -855,8 +867,9 @@ github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
|
||||
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
|
||||
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
@ -911,6 +924,8 @@ github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@ -931,29 +946,29 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
|
||||
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.33.0 h1:7F29RDmnlqk6B5d+sUqemt8TBfDqxryYW5gX6L74RFA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.33.0/go.mod h1:ZiGDq7xwDMKmWDrN1XsXAj0iC7hns+2DhxBFSncNHSE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
|
||||
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
||||
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 h1:QcFwRrZLc82r8wODjvyCbP7Ifp3UANaBSmhDSFjnqSc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0/go.mod h1:CXIWhUomyWBG/oY2/r/kLp6K/cmx9e/7DLpBuuGdLCA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
|
||||
go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
|
||||
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q=
|
||||
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
|
||||
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
|
||||
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
|
||||
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
||||
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
|
||||
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
@ -978,8 +993,8 @@ golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWP
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@ -990,8 +1005,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 h1:9kj3STMvgqy3YA4VQXBrN7925ICMxD5wzMRcgA30588=
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c=
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw=
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@ -1014,8 +1029,6 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -1057,8 +1070,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
|
||||
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@ -1076,8 +1089,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -1147,6 +1160,7 @@ golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@ -1154,16 +1168,15 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@ -1173,16 +1186,16 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
|
||||
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@ -1228,8 +1241,6 @@ golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4X
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
|
||||
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@ -1278,10 +1289,10 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250102185135-69823020774d h1:H8tOf8XM88HvKqLTxe755haY6r1fqqzLbEnfrmLXlSA=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250102185135-69823020774d/go.mod h1:2v7Z7gP2ZUOGsaFyxATQSRoBnKygqVq2Cwnvom7QiqY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250313205543-e70fdf4c4cb4 h1:IFnXJq3UPB3oBREOodn1v1aGQeZYQclEmvWRMN0PSsY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:c8q6Z6OCqnfVIqUFJkCzKcrj8eCvUrz+K4KRzSTuANg=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 h1:iK2jbkWL86DXjEx0qiHcRE9dE4/Ahua5k6V8OWFb//c=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
@ -1301,8 +1312,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
|
||||
google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg=
|
||||
google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@ -1316,8 +1327,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
|
||||
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII=
|
||||
@ -1360,8 +1371,8 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
|
||||
gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
@ -655,19 +655,6 @@ func (a App) WriteRecipeVersion(version string, dryRun bool) error {
|
||||
|
||||
splitted := strings.Split(line, ":")
|
||||
|
||||
if a.Recipe.Dirty {
|
||||
dirtyVersion = fmt.Sprintf("%s%s", version, config.DIRTY_DEFAULT)
|
||||
if strings.Contains(line, dirtyVersion) {
|
||||
skipped = true
|
||||
lines = append(lines, line)
|
||||
continue
|
||||
}
|
||||
|
||||
line = fmt.Sprintf("%s:%s", splitted[0], dirtyVersion)
|
||||
lines = append(lines, line)
|
||||
continue
|
||||
}
|
||||
|
||||
line = fmt.Sprintf("%s:%s", splitted[0], version)
|
||||
lines = append(lines, line)
|
||||
}
|
||||
|
@ -223,16 +223,4 @@ func TestWriteRecipeVersionOverwrite(t *testing.T) {
|
||||
}
|
||||
|
||||
assert.Equal(t, "foo", app.Recipe.EnvVersion)
|
||||
|
||||
app.Recipe.Dirty = true
|
||||
if err := app.WriteRecipeVersion("foo+U", false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
app, err = appPkg.GetApp(testPkg.ExpectedAppFiles, testPkg.AppName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.Equal(t, "foo+U", app.Recipe.EnvVersion)
|
||||
}
|
||||
|
@ -44,6 +44,16 @@ func SetChaosVersionLabel(compose *composetypes.Config, stackName string, chaosV
|
||||
}
|
||||
}
|
||||
|
||||
func SetVersionLabel(compose *composetypes.Config, stackName string, version string) {
|
||||
for _, service := range compose.Services {
|
||||
if service.Name == "app" {
|
||||
log.Debugf("set label 'coop-cloud.%s.version' to %v for %s", stackName, version, stackName)
|
||||
labelKey := fmt.Sprintf("coop-cloud.%s.version", stackName)
|
||||
service.Deploy.Labels[labelKey] = version
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetUpdateLabel adds env ENABLE_AUTO_UPDATE as label to enable/disable the
|
||||
// auto update process for this app. The default if this variable is not set is to disable
|
||||
// the auto update process.
|
||||
|
@ -90,6 +90,7 @@ func (a Abra) GetAbraDir() string {
|
||||
|
||||
func (a Abra) GetServersDir() string { return path.Join(a.GetAbraDir(), "servers") }
|
||||
func (a Abra) GetRecipesDir() string { return path.Join(a.GetAbraDir(), "recipes") }
|
||||
func (a Abra) GetLogsDir() string { return path.Join(a.GetAbraDir(), "logs") }
|
||||
func (a Abra) GetVendorDir() string { return path.Join(a.GetAbraDir(), "vendor") }
|
||||
func (a Abra) GetBackupDir() string { return path.Join(a.GetAbraDir(), "backups") }
|
||||
func (a Abra) GetCatalogueDir() string { return path.Join(a.GetAbraDir(), "catalogue") }
|
||||
@ -100,6 +101,7 @@ var (
|
||||
ABRA_DIR = config.GetAbraDir()
|
||||
SERVERS_DIR = config.GetServersDir()
|
||||
RECIPES_DIR = config.GetRecipesDir()
|
||||
LOGS_DIR = config.GetLogsDir()
|
||||
VENDOR_DIR = config.GetVendorDir()
|
||||
BACKUP_DIR = config.GetBackupDir()
|
||||
CATALOGUE_DIR = config.GetCatalogueDir()
|
||||
|
@ -192,7 +192,7 @@ func TestEnvVarCommentsRemoved(t *testing.T) {
|
||||
|
||||
envVar, exists = envSample["SECRET_TEST_PASS_TWO_VERSION"]
|
||||
if !exists {
|
||||
t.Fatal("WITH_COMMENT env var should be present in .env.sample")
|
||||
t.Fatal("SECRET_TEST_PASS_TWO_VERSION env var should be present in .env.sample")
|
||||
}
|
||||
|
||||
if strings.Contains(envVar, "length") {
|
||||
|
@ -1,7 +1,10 @@
|
||||
package git
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
@ -22,46 +25,81 @@ func gitCloneIgnoreErr(err error) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Clone runs a git clone which accounts for different default branches.
|
||||
// Clone runs a git clone which accounts for different default branches. This
|
||||
// function respects Ctrl+C (SIGINT) calls from the user, cancelling the
|
||||
// context and deleting the (typically) half-baked clone of the repository.
|
||||
// This avoids broken state for future clone / recipe ops.
|
||||
func Clone(dir, url string) error {
|
||||
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
||||
log.Debugf("git clone: %s", dir, url)
|
||||
ctx := context.Background()
|
||||
ctx, cancelCtx := context.WithCancel(ctx)
|
||||
|
||||
_, err := git.PlainClone(dir, false, &git.CloneOptions{
|
||||
URL: url,
|
||||
Tags: git.AllTags,
|
||||
ReferenceName: plumbing.ReferenceName("refs/heads/main"),
|
||||
SingleBranch: true,
|
||||
})
|
||||
sigIntCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigIntCh, os.Interrupt)
|
||||
defer func() {
|
||||
signal.Stop(sigIntCh)
|
||||
cancelCtx()
|
||||
}()
|
||||
|
||||
if err != nil && gitCloneIgnoreErr(err) {
|
||||
log.Debugf("git clone: %s cloned successfully", dir)
|
||||
return nil
|
||||
}
|
||||
errCh := make(chan error)
|
||||
|
||||
if err != nil {
|
||||
log.Debug("git clone: main branch failed, attempting master branch")
|
||||
go func() {
|
||||
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
||||
log.Debugf("git clone: %s", url)
|
||||
|
||||
_, err := git.PlainClone(dir, false, &git.CloneOptions{
|
||||
_, err := git.PlainCloneContext(ctx, dir, false, &git.CloneOptions{
|
||||
URL: url,
|
||||
Tags: git.AllTags,
|
||||
ReferenceName: plumbing.ReferenceName("refs/heads/master"),
|
||||
ReferenceName: plumbing.ReferenceName("refs/heads/main"),
|
||||
SingleBranch: true,
|
||||
})
|
||||
|
||||
if err != nil && gitCloneIgnoreErr(err) {
|
||||
log.Debugf("git clone: %s cloned successfully", dir)
|
||||
return nil
|
||||
errCh <- nil
|
||||
}
|
||||
|
||||
if err := ctx.Err(); err != nil {
|
||||
errCh <- fmt.Errorf("git clone %s: cancelled due to interrupt", dir)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
log.Debug("git clone: main branch failed, attempting master branch")
|
||||
|
||||
_, err := git.PlainCloneContext(ctx, dir, false, &git.CloneOptions{
|
||||
URL: url,
|
||||
Tags: git.AllTags,
|
||||
ReferenceName: plumbing.ReferenceName("refs/heads/master"),
|
||||
SingleBranch: true,
|
||||
})
|
||||
|
||||
if err != nil && gitCloneIgnoreErr(err) {
|
||||
log.Debugf("git clone: %s cloned successfully", dir)
|
||||
errCh <- nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("git clone: %s cloned successfully", dir)
|
||||
} else {
|
||||
log.Debugf("git clone: %s already exists", dir)
|
||||
}
|
||||
|
||||
log.Debugf("git clone: %s cloned successfully", dir)
|
||||
} else {
|
||||
log.Debugf("git clone: %s already exists", dir)
|
||||
errCh <- nil
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-sigIntCh:
|
||||
cancelCtx()
|
||||
fmt.Println() // NOTE(d1): newline after ^C
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
return fmt.Errorf("unable to clean up git clone of %s: %s", dir, err)
|
||||
}
|
||||
return fmt.Errorf("git clone %s: cancelled due to interrupt", dir)
|
||||
case err := <-errCh:
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
48
pkg/git/clone_test.go
Normal file
48
pkg/git/clone_test.go
Normal file
@ -0,0 +1,48 @@
|
||||
package git
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
)
|
||||
|
||||
func TestClone(t *testing.T) {
|
||||
dir := path.Join(config.RECIPES_DIR, "gitea")
|
||||
os.RemoveAll(dir)
|
||||
|
||||
gitURL := fmt.Sprintf("%s/%s.git", config.REPOS_BASE_URL, "gitea")
|
||||
if err := Clone(dir, gitURL); err != nil {
|
||||
t.Fatalf("unable to git clone gitea: %s", err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) {
|
||||
t.Fatal("gitea repo was not cloned successfully")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCancelGitClone(t *testing.T) {
|
||||
dir := path.Join(config.RECIPES_DIR, "gitea")
|
||||
os.RemoveAll(dir)
|
||||
|
||||
go func() {
|
||||
p, err := os.FindProcess(os.Getpid())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find current process: %s", err)
|
||||
}
|
||||
|
||||
p.Signal(syscall.SIGINT)
|
||||
}()
|
||||
|
||||
gitURL := fmt.Sprintf("%s/%s.git", config.REPOS_BASE_URL, "gitea")
|
||||
if err := Clone(dir, gitURL); err == nil {
|
||||
t.Fatal("cloning should have been interrupted")
|
||||
}
|
||||
|
||||
if _, err := os.Stat(dir); err != nil && !os.IsNotExist(err) {
|
||||
t.Fatal("recipe repo was not deleted")
|
||||
}
|
||||
}
|
@ -15,8 +15,10 @@ import (
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
)
|
||||
|
||||
var Warn = "warn"
|
||||
var Critical = "critical"
|
||||
var (
|
||||
Warn = "warn"
|
||||
Critical = "critical"
|
||||
)
|
||||
|
||||
type LintFunction func(recipe.Recipe) (bool, error)
|
||||
|
||||
@ -194,7 +196,7 @@ func LintForErrors(recipe recipe.Recipe) error {
|
||||
|
||||
ok, err := rule.Function(recipe)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("lint %s: %s", rule.Ref, err)
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("lint error in %s configs: \"%s\" failed lint checks (%s)", recipe.Name, rule.Description, rule.Ref)
|
||||
|
@ -2,8 +2,10 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"math"
|
||||
"os"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
charmLog "github.com/charmbracelet/log"
|
||||
)
|
||||
|
||||
@ -32,3 +34,13 @@ var SetLevel = Logger.SetLevel
|
||||
var DebugLevel = charmLog.DebugLevel
|
||||
var SetOutput = charmLog.SetOutput
|
||||
var SetReportCaller = charmLog.SetReportCaller
|
||||
|
||||
type f func() (tea.Model, error)
|
||||
|
||||
func Without(fn f) (tea.Model, error) {
|
||||
l := Logger.GetLevel()
|
||||
Logger.SetLevel(math.MaxInt)
|
||||
m, err := fn()
|
||||
Logger.SetLevel(l)
|
||||
return m, err
|
||||
}
|
||||
|
104
pkg/logs/logs.go
Normal file
104
pkg/logs/logs.go
Normal file
@ -0,0 +1,104 @@
|
||||
package logs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
containerTypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
type TailOpts struct {
|
||||
AppName string
|
||||
Services []string
|
||||
StdErr bool
|
||||
Since string
|
||||
Buffer *[]string
|
||||
ToBuffer bool
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// TailLogs gathers logs for the given app with optional service names to be
|
||||
// filtered on. These logs can be printed to os.Stdout or gathered to a buffer.
|
||||
func TailLogs(
|
||||
cl *dockerClient.Client,
|
||||
opts TailOpts,
|
||||
) error {
|
||||
sigIntCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigIntCh, os.Interrupt)
|
||||
defer signal.Stop(sigIntCh)
|
||||
|
||||
services, err := cl.ServiceList(
|
||||
context.Background(),
|
||||
types.ServiceListOptions{Filters: opts.Filters},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
errCh := make(chan error)
|
||||
waitCh := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
var wg sync.WaitGroup
|
||||
for _, service := range services {
|
||||
wg.Add(1)
|
||||
go func(serviceID string) {
|
||||
tail := "50"
|
||||
if opts.ToBuffer {
|
||||
// NOTE(d1): more logs from before deployment when analysing via file
|
||||
tail = "150"
|
||||
}
|
||||
|
||||
logs, err := cl.ServiceLogs(context.Background(), serviceID, containerTypes.LogsOptions{
|
||||
ShowStderr: true,
|
||||
ShowStdout: !opts.StdErr,
|
||||
Since: opts.Since,
|
||||
Until: "",
|
||||
Timestamps: true,
|
||||
Follow: true,
|
||||
Tail: tail,
|
||||
Details: false,
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
defer logs.Close()
|
||||
if opts.ToBuffer {
|
||||
buf := bufio.NewScanner(logs)
|
||||
for buf.Scan() {
|
||||
line := fmt.Sprintf("%s: %s", service.Spec.Name, buf.Text())
|
||||
*opts.Buffer = append(*opts.Buffer, line)
|
||||
}
|
||||
logs.Close()
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = io.Copy(os.Stdout, logs); err != nil && err != io.EOF {
|
||||
errCh <- fmt.Errorf("tailLogs: unable to copy buffer: %s", err)
|
||||
}
|
||||
}
|
||||
}(service.ID)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(waitCh)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-waitCh:
|
||||
return nil
|
||||
case <-sigIntCh:
|
||||
return nil
|
||||
case err := <-errCh:
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -4,11 +4,14 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
gitPkg "coopcloud.tech/abra/pkg/git"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/tagcmp"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
@ -43,6 +46,9 @@ func (r Recipe) Ensure(ctx EnsureContext) error {
|
||||
|
||||
if r.EnvVersion != "" && !ctx.IgnoreEnvVersion {
|
||||
log.Debugf("ensuring env version %s", r.EnvVersion)
|
||||
if strings.Contains(r.EnvVersion, "+U") {
|
||||
log.Fatalf("can not redeploy chaos version (%s) without --chaos", r.EnvVersion)
|
||||
}
|
||||
|
||||
if _, err := r.EnsureVersion(r.EnvVersion); err != nil {
|
||||
return err
|
||||
@ -272,19 +278,14 @@ func (r Recipe) EnsureUpToDate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDirty checks whether a recipe is dirty or not. N.B., if you call IsDirty
|
||||
// from another Recipe method, you should propagate the pointer reference (*).
|
||||
func (r *Recipe) IsDirty() error {
|
||||
// IsDirty checks whether a recipe is dirty or not.
|
||||
func (r *Recipe) IsDirty() (bool, error) {
|
||||
isClean, err := gitPkg.IsClean(r.Dir)
|
||||
if err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !isClean {
|
||||
r.Dirty = true
|
||||
}
|
||||
|
||||
return nil
|
||||
return !isClean, nil
|
||||
}
|
||||
|
||||
// ChaosVersion constructs a chaos mode recipe version.
|
||||
@ -298,8 +299,12 @@ func (r *Recipe) ChaosVersion() (string, error) {
|
||||
|
||||
version = formatter.SmallSHA(head.String())
|
||||
|
||||
if err := r.IsDirty(); err != nil {
|
||||
return version, err
|
||||
dirty, err := r.IsDirty()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if dirty {
|
||||
return fmt.Sprintf("%s%s", version, config.DIRTY_DEFAULT), nil
|
||||
}
|
||||
|
||||
return version, nil
|
||||
@ -345,6 +350,18 @@ func (r Recipe) Tags() ([]string, error) {
|
||||
return tags, err
|
||||
}
|
||||
|
||||
sort.Slice(tags, func(i, j int) bool {
|
||||
version1, err := tagcmp.Parse(tags[i])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
version2, err := tagcmp.Parse(tags[j])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return version1.IsLessThan(version2)
|
||||
})
|
||||
|
||||
log.Debugf("detected %s as tags for recipe %s", strings.Join(tags, ", "), r.Name)
|
||||
|
||||
return tags, nil
|
||||
|
@ -15,10 +15,6 @@ func TestIsDirty(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := r.IsDirty(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.False(t, r.Dirty)
|
||||
|
||||
fpath := filepath.Join(r.Dir, "foo.txt")
|
||||
@ -31,9 +27,10 @@ func TestIsDirty(t *testing.T) {
|
||||
os.Remove(fpath)
|
||||
})
|
||||
|
||||
if err := r.IsDirty(); err != nil {
|
||||
dirty, err := r.IsDirty()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.True(t, r.Dirty)
|
||||
assert.True(t, dirty)
|
||||
}
|
||||
|
@ -12,6 +12,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-git/go-git/v5"
|
||||
|
||||
"coopcloud.tech/abra/pkg/catalogue"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
@ -20,7 +22,6 @@ import (
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/web"
|
||||
"coopcloud.tech/tagcmp"
|
||||
"github.com/go-git/go-git/v5"
|
||||
)
|
||||
|
||||
// RecipeCatalogueURL is the only current recipe catalogue available.
|
||||
@ -119,22 +120,9 @@ type Features struct {
|
||||
SSO string `json:"sso"`
|
||||
}
|
||||
|
||||
func GetEnvVersionRaw(name string) (string, error) {
|
||||
var version string
|
||||
|
||||
if strings.Contains(name, ":") {
|
||||
split := strings.Split(name, ":")
|
||||
if len(split) > 2 {
|
||||
return version, fmt.Errorf("version seems invalid: %s", name)
|
||||
}
|
||||
version = split[1]
|
||||
}
|
||||
|
||||
return version, nil
|
||||
}
|
||||
|
||||
func Get(name string) Recipe {
|
||||
version := ""
|
||||
versionRaw := ""
|
||||
if strings.Contains(name, ":") {
|
||||
split := strings.Split(name, ":")
|
||||
if len(split) > 2 {
|
||||
@ -143,6 +131,7 @@ func Get(name string) Recipe {
|
||||
name = split[0]
|
||||
|
||||
version = split[1]
|
||||
versionRaw = version
|
||||
if strings.HasSuffix(version, config.DIRTY_DEFAULT) {
|
||||
version = strings.Replace(split[1], config.DIRTY_DEFAULT, "", 1)
|
||||
log.Debugf("removed dirty suffix from .env version: %s -> %s", split[1], version)
|
||||
@ -167,11 +156,12 @@ func Get(name string) Recipe {
|
||||
dir := path.Join(config.RECIPES_DIR, escapeRecipeName(name))
|
||||
|
||||
r := Recipe{
|
||||
Name: name,
|
||||
EnvVersion: version,
|
||||
Dir: dir,
|
||||
GitURL: gitURL,
|
||||
SSHURL: sshURL,
|
||||
Name: name,
|
||||
EnvVersion: version,
|
||||
EnvVersionRaw: versionRaw,
|
||||
Dir: dir,
|
||||
GitURL: gitURL,
|
||||
SSHURL: sshURL,
|
||||
|
||||
ComposePath: path.Join(dir, "compose.yml"),
|
||||
ReadmePath: path.Join(dir, "README.md"),
|
||||
@ -179,20 +169,23 @@ func Get(name string) Recipe {
|
||||
AbraShPath: path.Join(dir, "abra.sh"),
|
||||
}
|
||||
|
||||
if err := r.IsDirty(); err != nil && !errors.Is(err, git.ErrRepositoryNotExists) {
|
||||
dirty, err := r.IsDirty()
|
||||
if err != nil && !errors.Is(err, git.ErrRepositoryNotExists) {
|
||||
log.Fatalf("failed to check git status of %s: %s", r.Name, err)
|
||||
}
|
||||
r.Dirty = dirty
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
type Recipe struct {
|
||||
Name string
|
||||
EnvVersion string
|
||||
Dirty bool // NOTE(d1): git terminology for unstaged changes
|
||||
Dir string
|
||||
GitURL string
|
||||
SSHURL string
|
||||
Name string
|
||||
EnvVersion string
|
||||
EnvVersionRaw string
|
||||
Dirty bool // NOTE(d1): git terminology for unstaged changes
|
||||
Dir string
|
||||
GitURL string
|
||||
SSHURL string
|
||||
|
||||
ComposePath string
|
||||
ReadmePath string
|
||||
|
@ -34,6 +34,7 @@ func TestGet(t *testing.T) {
|
||||
recipe: Recipe{
|
||||
Name: "foo",
|
||||
EnvVersion: "1.2.3",
|
||||
EnvVersionRaw: "1.2.3",
|
||||
Dir: path.Join(cfg.GetAbraDir(), "/recipes/foo"),
|
||||
GitURL: "https://git.coopcloud.tech/coop-cloud/foo.git",
|
||||
SSHURL: "ssh://git@git.coopcloud.tech:2222/coop-cloud/foo.git",
|
||||
@ -61,6 +62,22 @@ func TestGet(t *testing.T) {
|
||||
recipe: Recipe{
|
||||
Name: "mygit.org/myorg/cool-recipe",
|
||||
EnvVersion: "1.2.4",
|
||||
EnvVersionRaw: "1.2.4",
|
||||
Dir: path.Join(cfg.GetAbraDir(), "/recipes/mygit_org_myorg_cool-recipe"),
|
||||
GitURL: "https://mygit.org/myorg/cool-recipe.git",
|
||||
SSHURL: "ssh://git@mygit.org/myorg/cool-recipe.git",
|
||||
ComposePath: path.Join(cfg.GetAbraDir(), "recipes/mygit_org_myorg_cool-recipe/compose.yml"),
|
||||
ReadmePath: path.Join(cfg.GetAbraDir(), "recipes/mygit_org_myorg_cool-recipe/README.md"),
|
||||
SampleEnvPath: path.Join(cfg.GetAbraDir(), "recipes/mygit_org_myorg_cool-recipe/.env.sample"),
|
||||
AbraShPath: path.Join(cfg.GetAbraDir(), "recipes/mygit_org_myorg_cool-recipe/abra.sh"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mygit.org/myorg/cool-recipe:1e83340e+U",
|
||||
recipe: Recipe{
|
||||
Name: "mygit.org/myorg/cool-recipe",
|
||||
EnvVersion: "1e83340e",
|
||||
EnvVersionRaw: "1e83340e+U",
|
||||
Dir: path.Join(cfg.GetAbraDir(), "/recipes/mygit_org_myorg_cool-recipe"),
|
||||
GitURL: "https://mygit.org/myorg/cool-recipe.git",
|
||||
SSHURL: "ssh://git@mygit.org/myorg/cool-recipe.git",
|
||||
@ -105,16 +122,3 @@ func TestGetVersionLabelLocalDoesNotUseTimeoutLabel(t *testing.T) {
|
||||
assert.NotEqual(t, label, defaultTimeoutLabel)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirtyMarkerRemoved(t *testing.T) {
|
||||
r := Get("abra-test-recipe:1e83340e+U")
|
||||
assert.Equal(t, "1e83340e", r.EnvVersion)
|
||||
}
|
||||
|
||||
func TestGetEnvVersionRaw(t *testing.T) {
|
||||
v, err := GetEnvVersionRaw("abra-test-recipe:1e83340e+U")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, "1e83340e+U", v)
|
||||
}
|
||||
|
@ -33,6 +33,10 @@ type Secret struct {
|
||||
// variable. For Example:
|
||||
// SECRET_FOO=v1 # length=12
|
||||
Length int
|
||||
// Charset comes from the charset modifier at the secret version environment
|
||||
// variable. For Example:
|
||||
// SECRET_FOO=v1 # charset=default,special
|
||||
Charset string
|
||||
// RemoteName is the name of the secret on the server. For example:
|
||||
// name: ${STACK_NAME}_test_pass_two_${SECRET_TEST_PASS_TWO_VERSION}
|
||||
// With the following:
|
||||
@ -43,38 +47,38 @@ type Secret struct {
|
||||
RemoteName string
|
||||
}
|
||||
|
||||
// GeneratePasswords generates passwords.
|
||||
func GeneratePasswords(count, length uint) ([]string, error) {
|
||||
// GeneratePassword generates passwords.
|
||||
func GeneratePassword(length uint, charset string) (string, error) {
|
||||
passwords, err := passgen.GeneratePasswords(
|
||||
count,
|
||||
1,
|
||||
length,
|
||||
passgen.AlphabetDefault,
|
||||
charset,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
log.Debugf("generated %s", strings.Join(passwords, ", "))
|
||||
|
||||
return passwords, nil
|
||||
return passwords[0], nil
|
||||
}
|
||||
|
||||
// GeneratePassphrases generates human readable and rememberable passphrases.
|
||||
func GeneratePassphrases(count uint) ([]string, error) {
|
||||
// GeneratePassphrase generates human readable and rememberable passphrases.
|
||||
func GeneratePassphrase() (string, error) {
|
||||
passphrases, err := passgen.GeneratePassphrases(
|
||||
count,
|
||||
1,
|
||||
passgen.PassphraseWordCountDefault,
|
||||
rune('-'),
|
||||
passgen.PassphraseCasingDefault,
|
||||
passgen.WordListDefault,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
log.Debugf("generated %s", strings.Join(passphrases, ", "))
|
||||
|
||||
return passphrases, nil
|
||||
return passphrases[0], nil
|
||||
}
|
||||
|
||||
// ReadSecretsConfig reads secret names/versions from the recipe config. The
|
||||
@ -150,6 +154,8 @@ func ReadSecretsConfig(appEnvPath string, composeFiles []string, stackName strin
|
||||
}
|
||||
value.Length = length
|
||||
}
|
||||
|
||||
value.Charset = resolveCharset(modifierValues["charset"])
|
||||
break
|
||||
}
|
||||
secretValues[secretId] = value
|
||||
@ -158,6 +164,22 @@ func ReadSecretsConfig(appEnvPath string, composeFiles []string, stackName strin
|
||||
return secretValues, nil
|
||||
}
|
||||
|
||||
// resolveCharset sets the passgen Alphabet required for a secret
|
||||
func resolveCharset(input string) string {
|
||||
switch strings.ToLower(input) {
|
||||
case "special":
|
||||
return passgen.AlphabetSpecial
|
||||
case "safespecial":
|
||||
return "!@#%^&*_-+="
|
||||
case "default,special", "special,default":
|
||||
return passgen.AlphabetDefault + passgen.AlphabetSpecial
|
||||
case "default,safespecial", "safespecial,default":
|
||||
return passgen.AlphabetDefault + "!@#%^&*_-+="
|
||||
default:
|
||||
return passgen.AlphabetDefault // Fallback to default
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateSecrets generates secrets locally and sends them to a remote server for storage.
|
||||
func GenerateSecrets(cl *dockerClient.Client, secrets map[string]Secret, server string) (map[string]string, error) {
|
||||
secretsGenerated := map[string]string{}
|
||||
@ -173,13 +195,13 @@ func GenerateSecrets(cl *dockerClient.Client, secrets map[string]Secret, server
|
||||
log.Debugf("attempting to generate and store %s on %s", secret.RemoteName, server)
|
||||
|
||||
if secret.Length > 0 {
|
||||
passwords, err := GeneratePasswords(1, uint(secret.Length))
|
||||
password, err := GeneratePassword(uint(secret.Length), secret.Charset)
|
||||
if err != nil {
|
||||
ch <- err
|
||||
return
|
||||
}
|
||||
|
||||
if err := client.StoreSecret(cl, secret.RemoteName, passwords[0], server); err != nil {
|
||||
if err := client.StoreSecret(cl, secret.RemoteName, password, server); err != nil {
|
||||
if strings.Contains(err.Error(), "AlreadyExists") {
|
||||
log.Warnf("%s already exists", secret.RemoteName)
|
||||
ch <- nil
|
||||
@ -191,15 +213,15 @@ func GenerateSecrets(cl *dockerClient.Client, secrets map[string]Secret, server
|
||||
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
secretsGenerated[secretName] = passwords[0]
|
||||
secretsGenerated[secretName] = password
|
||||
} else {
|
||||
passphrases, err := GeneratePassphrases(1)
|
||||
passphrase, err := GeneratePassphrase()
|
||||
if err != nil {
|
||||
ch <- err
|
||||
return
|
||||
}
|
||||
|
||||
if err := client.StoreSecret(cl, secret.RemoteName, passphrases[0], server); err != nil {
|
||||
if err := client.StoreSecret(cl, secret.RemoteName, passphrase, server); err != nil {
|
||||
if strings.Contains(err.Error(), "AlreadyExists") {
|
||||
log.Warnf("%s already exists", secret.RemoteName)
|
||||
ch <- nil
|
||||
@ -211,7 +233,7 @@ func GenerateSecrets(cl *dockerClient.Client, secrets map[string]Secret, server
|
||||
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
secretsGenerated[secretName] = passphrases[0]
|
||||
secretsGenerated[secretName] = passphrase
|
||||
}
|
||||
ch <- nil
|
||||
}(n, v)
|
||||
|
@ -17,16 +17,37 @@ func TestReadSecretsConfig(t *testing.T) {
|
||||
assert.Equal(t, "test_example_com_test_pass_one_v2", secretsFromConfig["test_pass_one"].RemoteName)
|
||||
assert.Equal(t, "v2", secretsFromConfig["test_pass_one"].Version)
|
||||
assert.Equal(t, 0, secretsFromConfig["test_pass_one"].Length)
|
||||
assert.Equal(t, "abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789", secretsFromConfig["test_pass_one"].Charset)
|
||||
|
||||
// Has a length modifier
|
||||
assert.Equal(t, "test_example_com_test_pass_two_v1", secretsFromConfig["test_pass_two"].RemoteName)
|
||||
assert.Equal(t, "v1", secretsFromConfig["test_pass_two"].Version)
|
||||
assert.Equal(t, 10, secretsFromConfig["test_pass_two"].Length)
|
||||
assert.Equal(t, "abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789", secretsFromConfig["test_pass_two"].Charset)
|
||||
|
||||
// Secret name does not include the secret id
|
||||
assert.Equal(t, "test_example_com_pass_three_v2", secretsFromConfig["test_pass_three"].RemoteName)
|
||||
assert.Equal(t, "v2", secretsFromConfig["test_pass_three"].Version)
|
||||
assert.Equal(t, 0, secretsFromConfig["test_pass_three"].Length)
|
||||
assert.Equal(t, "abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789", secretsFromConfig["test_pass_three"].Charset)
|
||||
|
||||
// Has a length modifier and a charset=default,safespecial modifier
|
||||
assert.Equal(t, "test_example_com_test_pass_four_v1", secretsFromConfig["test_pass_four"].RemoteName)
|
||||
assert.Equal(t, "v1", secretsFromConfig["test_pass_four"].Version)
|
||||
assert.Equal(t, 12, secretsFromConfig["test_pass_four"].Length)
|
||||
assert.Equal(t, "abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789!@#%^&*_-+=", secretsFromConfig["test_pass_four"].Charset)
|
||||
|
||||
// Has a length modifier and a charset=default,special modifier
|
||||
assert.Equal(t, "test_example_com_test_pass_five_v1", secretsFromConfig["test_pass_five"].RemoteName)
|
||||
assert.Equal(t, "v1", secretsFromConfig["test_pass_five"].Version)
|
||||
assert.Equal(t, 12, secretsFromConfig["test_pass_five"].Length)
|
||||
assert.Equal(t, "abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789!@#$%^&*_-+=", secretsFromConfig["test_pass_five"].Charset)
|
||||
|
||||
// Has only a charset=default,special modifier, which gets setted but ignored in the generation
|
||||
assert.Equal(t, "test_example_com_test_pass_six_v1", secretsFromConfig["test_pass_six"].RemoteName)
|
||||
assert.Equal(t, "v1", secretsFromConfig["test_pass_six"].Version)
|
||||
assert.Equal(t, 0, secretsFromConfig["test_pass_six"].Length)
|
||||
assert.Equal(t, "abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789!@#$%^&*_-+=", secretsFromConfig["test_pass_six"].Charset)
|
||||
}
|
||||
|
||||
func TestReadSecretsConfigWithLongDomain(t *testing.T) {
|
||||
|
@ -1,3 +1,6 @@
|
||||
SECRET_TEST_PASS_ONE_VERSION=v2
|
||||
SECRET_TEST_PASS_TWO_VERSION=v1 # length=10
|
||||
SECRET_TEST_PASS_THREE_VERSION=v2
|
||||
SECRET_TEST_PASS_FOUR_VERSION=v1 # length=12 charset=default,safespecial
|
||||
SECRET_TEST_PASS_FIVE_VERSION=v1 # length=12 charset=default,special
|
||||
SECRET_TEST_PASS_SIX_VERSION=v1 # charset=default,special
|
||||
|
@ -8,6 +8,9 @@ services:
|
||||
- test_pass_one
|
||||
- test_pass_two
|
||||
- test_pass_three
|
||||
- test_pass_four
|
||||
- test_pass_five
|
||||
- test_pass_six
|
||||
|
||||
secrets:
|
||||
test_pass_one:
|
||||
@ -19,3 +22,12 @@ secrets:
|
||||
test_pass_three:
|
||||
external: true
|
||||
name: ${STACK_NAME}_pass_three_${SECRET_TEST_PASS_THREE_VERSION} # secretId and name don't match
|
||||
test_pass_four:
|
||||
external: true
|
||||
name: ${STACK_NAME}_test_pass_four_${SECRET_TEST_PASS_FOUR_VERSION}
|
||||
test_pass_five:
|
||||
external: true
|
||||
name: ${STACK_NAME}_test_pass_five_${SECRET_TEST_PASS_FIVE_VERSION}
|
||||
test_pass_six:
|
||||
external: true
|
||||
name: ${STACK_NAME}_test_pass_six_${SECRET_TEST_PASS_SIX_VERSION}
|
||||
|
@ -20,6 +20,8 @@ func Fatal(hostname string, err error) error {
|
||||
return fmt.Errorf("ssh auth: permission denied for %s", hostname)
|
||||
} else if strings.Contains(out, "Network is unreachable") {
|
||||
return fmt.Errorf("unable to connect to %s, please check your SSH config", hostname)
|
||||
} else if strings.Contains(out, "Is the docker daemon running") {
|
||||
return fmt.Errorf("docker: is the daemon running / your user has docker permissions?")
|
||||
}
|
||||
|
||||
return err
|
||||
|
353
pkg/ui/deploy.go
Normal file
353
pkg/ui/deploy.go
Normal file
@ -0,0 +1,353 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/logs"
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
"github.com/docker/cli/cli/command/service/progress"
|
||||
containerTypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
)
|
||||
|
||||
var IsRunning bool
|
||||
|
||||
type statusMsg struct {
|
||||
stream stream
|
||||
jsonMsg jsonmessage.JSONMessage
|
||||
}
|
||||
|
||||
type progressCompleteMsg struct {
|
||||
stream stream
|
||||
failed bool
|
||||
}
|
||||
|
||||
type healthcheckMsg struct {
|
||||
stream stream
|
||||
health string
|
||||
}
|
||||
|
||||
type ServiceMeta struct {
|
||||
Name string
|
||||
ID string
|
||||
}
|
||||
|
||||
type Model struct {
|
||||
appName string
|
||||
cl *dockerClient.Client
|
||||
count int
|
||||
ctx context.Context
|
||||
timeout time.Duration
|
||||
width int
|
||||
filters filters.Args
|
||||
|
||||
Streams *[]stream
|
||||
Logs *[]string
|
||||
Failed bool
|
||||
TimedOut bool
|
||||
Quit bool
|
||||
}
|
||||
|
||||
func (m Model) complete() bool {
|
||||
if m.count == len(*m.Streams) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
Name string
|
||||
Err error
|
||||
|
||||
decoder *json.Decoder
|
||||
id string
|
||||
reader *io.PipeReader
|
||||
writer *io.PipeWriter
|
||||
status string
|
||||
retries int
|
||||
health string
|
||||
rollback bool
|
||||
}
|
||||
|
||||
func (s stream) String() string {
|
||||
out := fmt.Sprintf("{decoder: %v, ", s.decoder)
|
||||
out += fmt.Sprintf("err: %v, ", s.Err)
|
||||
out += fmt.Sprintf("id: %s, ", s.id)
|
||||
out += fmt.Sprintf("name: %s, ", s.Name)
|
||||
out += fmt.Sprintf("reader: %v, ", s.reader)
|
||||
out += fmt.Sprintf("writer: %v, ", s.writer)
|
||||
out += fmt.Sprintf("status: %s, ", s.status)
|
||||
return out
|
||||
}
|
||||
|
||||
func (s stream) progress(m Model) tea.Msg {
|
||||
if err := progress.ServiceProgress(m.ctx, m.cl, s.id, s.writer); err != nil {
|
||||
return progressCompleteMsg{
|
||||
stream: s,
|
||||
failed: true,
|
||||
}
|
||||
}
|
||||
|
||||
return progressCompleteMsg{stream: s}
|
||||
}
|
||||
|
||||
func (s stream) process() tea.Msg {
|
||||
var jsonMsg jsonmessage.JSONMessage
|
||||
|
||||
if err := s.decoder.Decode(&jsonMsg); err != nil {
|
||||
if err == io.EOF {
|
||||
// NOTE(d1): end processing messages
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return statusMsg{
|
||||
stream: s,
|
||||
jsonMsg: jsonMsg,
|
||||
}
|
||||
}
|
||||
|
||||
func (s stream) healthcheck(m Model) tea.Msg {
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("name", fmt.Sprintf("^%s", s.Name))
|
||||
|
||||
containers, err := m.cl.ContainerList(m.ctx, containerTypes.ListOptions{Filters: filters})
|
||||
if err != nil {
|
||||
s.Err = err
|
||||
return healthcheckMsg{stream: s}
|
||||
}
|
||||
|
||||
if len(containers) == 0 {
|
||||
return healthcheckMsg{stream: s}
|
||||
}
|
||||
|
||||
container := containers[0]
|
||||
containerState, err := m.cl.ContainerInspect(m.ctx, container.ID)
|
||||
if err != nil {
|
||||
s.Err = err
|
||||
return healthcheckMsg{stream: s}
|
||||
}
|
||||
|
||||
var health string
|
||||
if containerState.State.Health != nil {
|
||||
health = containerState.State.Health.Status
|
||||
}
|
||||
|
||||
return healthcheckMsg{stream: s, health: health}
|
||||
}
|
||||
|
||||
func DeployInitialModel(
|
||||
ctx context.Context,
|
||||
cl *dockerClient.Client,
|
||||
services []ServiceMeta,
|
||||
appName string,
|
||||
timeout time.Duration,
|
||||
filters filters.Args,
|
||||
) Model {
|
||||
var streams []stream
|
||||
for _, service := range services {
|
||||
r, w := io.Pipe()
|
||||
d := json.NewDecoder(r)
|
||||
streams = append(streams, stream{
|
||||
Name: service.Name,
|
||||
id: service.ID,
|
||||
reader: r,
|
||||
writer: w,
|
||||
decoder: d,
|
||||
retries: -1, // NOTE(d1): skip first attempt
|
||||
health: "?",
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(streams, func(i, j int) bool {
|
||||
return streams[i].Name < streams[j].Name
|
||||
})
|
||||
|
||||
return Model{
|
||||
ctx: ctx,
|
||||
cl: cl,
|
||||
appName: appName,
|
||||
timeout: timeout,
|
||||
filters: filters,
|
||||
Streams: &streams,
|
||||
Logs: &[]string{},
|
||||
}
|
||||
}
|
||||
|
||||
func (m Model) Init() tea.Cmd {
|
||||
var cmds []tea.Cmd
|
||||
|
||||
for _, stream := range *m.Streams {
|
||||
cmds = append(
|
||||
cmds,
|
||||
[]tea.Cmd{
|
||||
func() tea.Msg { return stream.progress(m) },
|
||||
func() tea.Msg { return stream.process() },
|
||||
func() tea.Msg { return stream.healthcheck(m) },
|
||||
}...,
|
||||
)
|
||||
}
|
||||
|
||||
cmds = append(cmds, func() tea.Msg { return deployTimeout(m) })
|
||||
cmds = append(cmds, func() tea.Msg { return m.gatherLogs() })
|
||||
|
||||
return tea.Batch(cmds...)
|
||||
}
|
||||
|
||||
func (m Model) gatherLogs() tea.Msg {
|
||||
var services []string
|
||||
for _, s := range *m.Streams {
|
||||
services = append(services, s.Name)
|
||||
}
|
||||
|
||||
opts := logs.TailOpts{
|
||||
AppName: m.appName,
|
||||
Services: services,
|
||||
StdErr: true,
|
||||
Buffer: m.Logs,
|
||||
ToBuffer: true,
|
||||
Filters: m.filters,
|
||||
}
|
||||
|
||||
// NOTE(d1): not interested in log polling errors. if we don't see logs it
|
||||
// will hopefully be self-evident based on what happened in the deployment
|
||||
logs.TailLogs(m.cl, opts)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type timeoutMsg struct{}
|
||||
|
||||
func deployTimeout(m Model) tea.Msg {
|
||||
<-time.After(m.timeout)
|
||||
return timeoutMsg{}
|
||||
}
|
||||
|
||||
func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
var cmds []tea.Cmd
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case tea.KeyMsg:
|
||||
switch msg.String() {
|
||||
case "ctrl+c", "q":
|
||||
m.Quit = true
|
||||
return m, tea.Quit
|
||||
}
|
||||
|
||||
case tea.WindowSizeMsg:
|
||||
m.width = msg.Width
|
||||
|
||||
case progressCompleteMsg:
|
||||
if msg.failed {
|
||||
m.Failed = true
|
||||
}
|
||||
|
||||
m.count += 1
|
||||
|
||||
if m.complete() {
|
||||
return m, tea.Quit
|
||||
}
|
||||
|
||||
case timeoutMsg:
|
||||
m.TimedOut = true
|
||||
return m, tea.Quit
|
||||
|
||||
case healthcheckMsg:
|
||||
for idx, s := range *m.Streams {
|
||||
if s.id == msg.stream.id {
|
||||
h := "?"
|
||||
if s.health != "" {
|
||||
h = s.health
|
||||
}
|
||||
if msg.health != "" {
|
||||
h = msg.health
|
||||
}
|
||||
(*m.Streams)[idx].health = h
|
||||
}
|
||||
}
|
||||
|
||||
cmds = append(
|
||||
cmds,
|
||||
func() tea.Msg { return msg.stream.healthcheck(m) },
|
||||
)
|
||||
|
||||
case statusMsg:
|
||||
for idx, s := range *m.Streams {
|
||||
if s.id == msg.stream.id {
|
||||
|
||||
if msg.jsonMsg.ID == "rollback" {
|
||||
m.Failed = true
|
||||
(*m.Streams)[idx].rollback = true
|
||||
}
|
||||
|
||||
if msg.jsonMsg.ID != "overall progress" {
|
||||
newStatus := strings.ToLower(msg.jsonMsg.Status)
|
||||
currentStatus := (*m.Streams)[idx].status
|
||||
|
||||
if !strings.Contains(currentStatus, "starting") &&
|
||||
strings.Contains(newStatus, "starting") {
|
||||
(*m.Streams)[idx].retries += 1
|
||||
}
|
||||
|
||||
if (*m.Streams)[idx].rollback {
|
||||
if msg.jsonMsg.ID == "rollback" {
|
||||
(*m.Streams)[idx].status = newStatus
|
||||
}
|
||||
} else {
|
||||
(*m.Streams)[idx].status = newStatus
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cmds = append(
|
||||
cmds,
|
||||
func() tea.Msg { return msg.stream.process() },
|
||||
)
|
||||
}
|
||||
|
||||
return m, tea.Batch(cmds...)
|
||||
}
|
||||
|
||||
func (m Model) View() string {
|
||||
body := strings.Builder{}
|
||||
|
||||
for _, stream := range *m.Streams {
|
||||
split := strings.Split(stream.Name, "_")
|
||||
short := split[len(split)-1]
|
||||
|
||||
status := stream.status
|
||||
if strings.Contains(stream.status, "converged") && !stream.rollback {
|
||||
status = "succeeded"
|
||||
}
|
||||
if strings.Contains(stream.status, "rolled back") {
|
||||
status = "rolled back"
|
||||
}
|
||||
|
||||
retries := 0
|
||||
if stream.retries > 0 {
|
||||
retries = stream.retries
|
||||
}
|
||||
|
||||
output := fmt.Sprintf("%s: %s (retries: %v, healthcheck: %s)",
|
||||
formatter.BoldStyle.Render(short),
|
||||
status,
|
||||
retries,
|
||||
stream.health,
|
||||
)
|
||||
|
||||
body.WriteString(output)
|
||||
body.WriteString("\n")
|
||||
}
|
||||
|
||||
return body.String()
|
||||
}
|
@ -9,14 +9,14 @@ import (
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
apiclient "github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
// RunExec runs a command on a remote container. io.Writer corresponds to the
|
||||
// command output.
|
||||
func RunExec(dockerCli command.Cli, client *apiclient.Client, containerID string,
|
||||
execConfig *types.ExecConfig) (io.Writer, error) {
|
||||
execOptions *container.ExecOptions) (io.Writer, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
// We need to check the tty _before_ we do the ContainerExecCreate, because
|
||||
@ -26,13 +26,13 @@ func RunExec(dockerCli command.Cli, client *apiclient.Client, containerID string
|
||||
if _, err := client.ContainerInspect(ctx, containerID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !execConfig.Detach {
|
||||
if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil {
|
||||
if !execOptions.Detach {
|
||||
if err := dockerCli.In().CheckTty(execOptions.AttachStdin, execOptions.Tty); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
response, err := client.ContainerExecCreate(ctx, containerID, *execConfig)
|
||||
response, err := client.ContainerExecCreate(ctx, containerID, *execOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -42,40 +42,40 @@ func RunExec(dockerCli command.Cli, client *apiclient.Client, containerID string
|
||||
return nil, errors.New("exec ID empty")
|
||||
}
|
||||
|
||||
if execConfig.Detach {
|
||||
execStartCheck := types.ExecStartCheck{
|
||||
Detach: execConfig.Detach,
|
||||
Tty: execConfig.Tty,
|
||||
if execOptions.Detach {
|
||||
execStartCheck := container.ExecStartOptions{
|
||||
Detach: execOptions.Detach,
|
||||
Tty: execOptions.Tty,
|
||||
}
|
||||
return nil, client.ContainerExecStart(ctx, execID, execStartCheck)
|
||||
}
|
||||
return interactiveExec(ctx, dockerCli, client, execConfig, execID)
|
||||
return interactiveExec(ctx, dockerCli, client, execOptions, execID)
|
||||
}
|
||||
|
||||
func interactiveExec(ctx context.Context, dockerCli command.Cli, client *apiclient.Client,
|
||||
execConfig *types.ExecConfig, execID string) (io.Writer, error) {
|
||||
execOpts *container.ExecOptions, execID string) (io.Writer, error) {
|
||||
// Interactive exec requested.
|
||||
var (
|
||||
out, stderr io.Writer
|
||||
in io.ReadCloser
|
||||
)
|
||||
|
||||
if execConfig.AttachStdin {
|
||||
if execOpts.AttachStdin {
|
||||
in = dockerCli.In()
|
||||
}
|
||||
if execConfig.AttachStdout {
|
||||
if execOpts.AttachStdout {
|
||||
out = dockerCli.Out()
|
||||
}
|
||||
if execConfig.AttachStderr {
|
||||
if execConfig.Tty {
|
||||
if execOpts.AttachStderr {
|
||||
if execOpts.Tty {
|
||||
stderr = dockerCli.Out()
|
||||
} else {
|
||||
stderr = dockerCli.Err()
|
||||
}
|
||||
}
|
||||
|
||||
execStartCheck := types.ExecStartCheck{
|
||||
Tty: execConfig.Tty,
|
||||
execStartCheck := container.ExecStartOptions{
|
||||
Tty: execOpts.Tty,
|
||||
}
|
||||
resp, err := client.ContainerExecAttach(ctx, execID, execStartCheck)
|
||||
if err != nil {
|
||||
@ -94,15 +94,15 @@ func interactiveExec(ctx context.Context, dockerCli command.Cli, client *apiclie
|
||||
outputStream: out,
|
||||
errorStream: stderr,
|
||||
resp: resp,
|
||||
tty: execConfig.Tty,
|
||||
detachKeys: execConfig.DetachKeys,
|
||||
tty: execOpts.Tty,
|
||||
detachKeys: execOpts.DetachKeys,
|
||||
}
|
||||
|
||||
return streamer.stream(ctx)
|
||||
}()
|
||||
}()
|
||||
|
||||
if execConfig.Tty && dockerCli.In().IsTerminal() {
|
||||
if execOpts.Tty && dockerCli.In().IsTerminal() {
|
||||
if err := MonitorTtySize(ctx, client, dockerCli, execID, true); err != nil {
|
||||
fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err)
|
||||
}
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"strings"
|
||||
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
networktypes "github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
)
|
||||
@ -52,13 +51,13 @@ func AddStackLabel(namespace Namespace, labels map[string]string) map[string]str
|
||||
type networkMap map[string]composetypes.NetworkConfig
|
||||
|
||||
// Networks from the compose-file type to the engine API type
|
||||
func Networks(namespace Namespace, networks networkMap, servicesNetworks map[string]struct{}) (map[string]types.NetworkCreate, []string) {
|
||||
func Networks(namespace Namespace, networks networkMap, servicesNetworks map[string]struct{}) (map[string]networktypes.CreateOptions, []string) {
|
||||
if networks == nil {
|
||||
networks = make(map[string]composetypes.NetworkConfig)
|
||||
}
|
||||
|
||||
externalNetworks := []string{}
|
||||
result := make(map[string]types.NetworkCreate)
|
||||
result := make(map[string]networktypes.CreateOptions)
|
||||
for internalName := range servicesNetworks {
|
||||
network := networks[internalName]
|
||||
if network.External.External {
|
||||
@ -66,7 +65,7 @@ func Networks(namespace Namespace, networks networkMap, servicesNetworks map[str
|
||||
continue
|
||||
}
|
||||
|
||||
createOpts := types.NetworkCreate{
|
||||
createOpts := networktypes.CreateOptions{
|
||||
Labels: AddStackLabel(namespace, network.Labels),
|
||||
Driver: network.Driver,
|
||||
Options: network.DriverOpts,
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"testing"
|
||||
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
@ -67,7 +66,7 @@ func TestNetworks(t *testing.T) {
|
||||
Name: "othername",
|
||||
},
|
||||
}
|
||||
expected := map[string]types.NetworkCreate{
|
||||
expected := map[string]network.CreateOptions{
|
||||
"foo_default": {
|
||||
Labels: map[string]string{
|
||||
LabelNamespace: "foo",
|
||||
|
@ -3,11 +3,15 @@ package stack // https://github.com/docker/cli/blob/master/cli/command/stack/rem
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/client"
|
||||
@ -17,57 +21,87 @@ import (
|
||||
|
||||
// RunRemove is the swarm implementation of docker stack remove
|
||||
func RunRemove(ctx context.Context, client *apiclient.Client, opts Remove) error {
|
||||
var errs []string
|
||||
for _, namespace := range opts.Namespaces {
|
||||
services, err := GetStackServices(ctx, client, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sigIntCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigIntCh, os.Interrupt)
|
||||
defer signal.Stop(sigIntCh)
|
||||
|
||||
networks, err := getStackNetworks(ctx, client, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
waitCh := make(chan struct{})
|
||||
errCh := make(chan error)
|
||||
|
||||
var secrets []swarm.Secret
|
||||
if versions.GreaterThanOrEqualTo(client.ClientVersion(), "1.25") {
|
||||
secrets, err = getStackSecrets(ctx, client, namespace)
|
||||
go func() {
|
||||
var errs []string
|
||||
for _, namespace := range opts.Namespaces {
|
||||
services, err := GetStackServices(ctx, client, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
|
||||
networks, err := getStackNetworks(ctx, client, namespace)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
|
||||
var secrets []swarm.Secret
|
||||
if versions.GreaterThanOrEqualTo(client.ClientVersion(), "1.25") {
|
||||
secrets, err = getStackSecrets(ctx, client, namespace)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var configs []swarm.Config
|
||||
if versions.GreaterThanOrEqualTo(client.ClientVersion(), "1.30") {
|
||||
configs, err = getStackConfigs(ctx, client, namespace)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(services)+len(networks)+len(secrets)+len(configs) == 0 {
|
||||
log.Warnf("nothing found in stack: %s", namespace)
|
||||
continue
|
||||
}
|
||||
|
||||
hasError := removeServices(ctx, client, services)
|
||||
hasError = removeSecrets(ctx, client, secrets) || hasError
|
||||
hasError = removeConfigs(ctx, client, configs) || hasError
|
||||
hasError = removeNetworks(ctx, client, networks) || hasError
|
||||
|
||||
if hasError {
|
||||
errs = append(errs, fmt.Sprintf("failed to remove some resources from stack: %s", namespace))
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info("polling undeploy status")
|
||||
timeout, err := waitOnTasks(ctx, client, namespace)
|
||||
if timeout {
|
||||
errs = append(errs, err.Error())
|
||||
} else {
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Sprintf("failed to wait on tasks of stack: %s: %s", namespace, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var configs []swarm.Config
|
||||
if versions.GreaterThanOrEqualTo(client.ClientVersion(), "1.30") {
|
||||
configs, err = getStackConfigs(ctx, client, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
errCh <- errors.Errorf(strings.Join(errs, "\n"))
|
||||
return
|
||||
}
|
||||
|
||||
if len(services)+len(networks)+len(secrets)+len(configs) == 0 {
|
||||
log.Warnf("nothing found in stack: %s", namespace)
|
||||
continue
|
||||
}
|
||||
close(waitCh)
|
||||
}()
|
||||
|
||||
hasError := removeServices(ctx, client, services)
|
||||
hasError = removeSecrets(ctx, client, secrets) || hasError
|
||||
hasError = removeConfigs(ctx, client, configs) || hasError
|
||||
hasError = removeNetworks(ctx, client, networks) || hasError
|
||||
|
||||
if hasError {
|
||||
errs = append(errs, fmt.Sprintf("failed to remove some resources from stack: %s", namespace))
|
||||
continue
|
||||
}
|
||||
|
||||
err = waitOnTasks(ctx, client, namespace)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Sprintf("failed to wait on tasks of stack: %s: %s", namespace, err))
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return errors.Errorf(strings.Join(errs, "\n"))
|
||||
select {
|
||||
case <-waitCh:
|
||||
return nil
|
||||
case <-sigIntCh:
|
||||
return fmt.Errorf("skipping as requested, undeploy still in progress 🟠")
|
||||
case err := <-errCh:
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -87,7 +121,7 @@ func removeServices(
|
||||
var hasError bool
|
||||
sort.Slice(services, sortServiceByName(services))
|
||||
for _, service := range services {
|
||||
log.Infof("removing service %s", service.Spec.Name)
|
||||
log.Debugf("removing service %s", service.Spec.Name)
|
||||
if err := client.ServiceRemove(ctx, service.ID); err != nil {
|
||||
hasError = true
|
||||
log.Fatalf("failed to remove service %s: %s", service.ID, err)
|
||||
@ -99,11 +133,11 @@ func removeServices(
|
||||
func removeNetworks(
|
||||
ctx context.Context,
|
||||
client *apiclient.Client,
|
||||
networks []types.NetworkResource,
|
||||
networks []network.Inspect,
|
||||
) bool {
|
||||
var hasError bool
|
||||
for _, network := range networks {
|
||||
log.Infof("removing network %s", network.Name)
|
||||
log.Debugf("removing network %s", network.Name)
|
||||
if err := client.NetworkRemove(ctx, network.ID); err != nil {
|
||||
hasError = true
|
||||
log.Fatalf("failed to remove network %s: %s", network.ID, err)
|
||||
@ -119,7 +153,7 @@ func removeSecrets(
|
||||
) bool {
|
||||
var hasError bool
|
||||
for _, secret := range secrets {
|
||||
log.Infof("removing secret %s", secret.Spec.Name)
|
||||
log.Debugf("removing secret %s", secret.Spec.Name)
|
||||
if err := client.SecretRemove(ctx, secret.ID); err != nil {
|
||||
hasError = true
|
||||
log.Fatalf("Failed to remove secret %s: %s", secret.ID, err)
|
||||
@ -135,7 +169,7 @@ func removeConfigs(
|
||||
) bool {
|
||||
var hasError bool
|
||||
for _, config := range configs {
|
||||
log.Infof("removing config %s", config.Spec.Name)
|
||||
log.Debugf("removing config %s", config.Spec.Name)
|
||||
if err := client.ConfigRemove(ctx, config.ID); err != nil {
|
||||
hasError = true
|
||||
log.Fatalf("failed to remove config %s: %s", config.ID, err)
|
||||
@ -169,12 +203,23 @@ func terminalState(state swarm.TaskState) bool {
|
||||
return numberedStates[state] > numberedStates[swarm.TaskStateRunning]
|
||||
}
|
||||
|
||||
func waitOnTasks(ctx context.Context, client apiclient.APIClient, namespace string) error {
|
||||
func waitOnTasks(ctx context.Context, client apiclient.APIClient, namespace string) (bool, error) {
|
||||
var timedOut bool
|
||||
|
||||
log.Debugf("waiting on undeploy tasks (timeout=%v secs)", WaitTimeout)
|
||||
|
||||
go func() {
|
||||
t := time.Duration(WaitTimeout) * time.Second
|
||||
<-time.After(t)
|
||||
log.Debug("timed out on undeploy")
|
||||
timedOut = true
|
||||
}()
|
||||
|
||||
terminalStatesReached := 0
|
||||
for {
|
||||
tasks, err := getStackTasks(ctx, client, namespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get tasks: %w", err)
|
||||
return false, fmt.Errorf("failed to get tasks: %w", err)
|
||||
}
|
||||
|
||||
for _, task := range tasks {
|
||||
@ -187,6 +232,11 @@ func waitOnTasks(ctx context.Context, client apiclient.APIClient, namespace stri
|
||||
if terminalStatesReached == len(tasks) {
|
||||
break
|
||||
}
|
||||
|
||||
if timedOut {
|
||||
return true, fmt.Errorf("deployment timed out 🟠")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
@ -3,25 +3,26 @@ package stack // https://github.com/docker/cli/blob/master/cli/command/stack/swa
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
stdlibErr "errors"
|
||||
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/ui"
|
||||
"coopcloud.tech/abra/pkg/upstream/convert"
|
||||
"github.com/docker/cli/cli/command/service/progress"
|
||||
"github.com/docker/cli/cli/command/stack/formatter"
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
networktypes "github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/client"
|
||||
@ -176,7 +177,7 @@ func IsDeployed(ctx context.Context, cl *dockerClient.Client, stackName string)
|
||||
func pruneServices(ctx context.Context, cl *dockerClient.Client, namespace convert.Namespace, services map[string]struct{}) {
|
||||
oldServices, err := GetStackServices(ctx, cl, namespace.Name())
|
||||
if err != nil {
|
||||
log.Infof("failed to list services: %s", err)
|
||||
log.Warnf("failed to list services: %s", err)
|
||||
}
|
||||
|
||||
pruneServices := []swarm.Service{}
|
||||
@ -190,7 +191,17 @@ func pruneServices(ctx context.Context, cl *dockerClient.Client, namespace conve
|
||||
}
|
||||
|
||||
// RunDeploy is the swarm implementation of docker stack deploy
|
||||
func RunDeploy(cl *dockerClient.Client, opts Deploy, cfg *composetypes.Config, appName string, dontWait bool) error {
|
||||
func RunDeploy(
|
||||
cl *dockerClient.Client,
|
||||
opts Deploy,
|
||||
cfg *composetypes.Config,
|
||||
appName string,
|
||||
serverName string,
|
||||
dontWait bool,
|
||||
filters filters.Args,
|
||||
) error {
|
||||
log.Info("initialising deployment")
|
||||
|
||||
if err := validateResolveImageFlag(&opts); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -200,7 +211,16 @@ func RunDeploy(cl *dockerClient.Client, opts Deploy, cfg *composetypes.Config, a
|
||||
opts.ResolveImage = ResolveImageNever
|
||||
}
|
||||
|
||||
return deployCompose(context.Background(), cl, opts, cfg, appName, dontWait)
|
||||
return deployCompose(
|
||||
context.Background(),
|
||||
cl,
|
||||
opts,
|
||||
cfg,
|
||||
appName,
|
||||
serverName,
|
||||
dontWait,
|
||||
filters,
|
||||
)
|
||||
}
|
||||
|
||||
// validateResolveImageFlag validates the opts.resolveImage command line option
|
||||
@ -213,7 +233,16 @@ func validateResolveImageFlag(opts *Deploy) error {
|
||||
}
|
||||
}
|
||||
|
||||
func deployCompose(ctx context.Context, cl *dockerClient.Client, opts Deploy, config *composetypes.Config, appName string, dontWait bool) error {
|
||||
func deployCompose(
|
||||
ctx context.Context,
|
||||
cl *dockerClient.Client,
|
||||
opts Deploy,
|
||||
config *composetypes.Config,
|
||||
appName string,
|
||||
serverName string,
|
||||
dontWait bool,
|
||||
filters filters.Args,
|
||||
) error {
|
||||
namespace := convert.NewNamespace(opts.Namespace)
|
||||
|
||||
if opts.Prune {
|
||||
@ -254,7 +283,14 @@ func deployCompose(ctx context.Context, cl *dockerClient.Client, opts Deploy, co
|
||||
return err
|
||||
}
|
||||
|
||||
serviceIDs, err := deployServices(ctx, cl, services, namespace, opts.SendRegistryAuth, opts.ResolveImage)
|
||||
serviceIDs, err := deployServices(
|
||||
ctx,
|
||||
cl,
|
||||
services,
|
||||
namespace,
|
||||
opts.SendRegistryAuth,
|
||||
opts.ResolveImage,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -264,13 +300,16 @@ func deployCompose(ctx context.Context, cl *dockerClient.Client, opts Deploy, co
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Infof("waiting for %s to deploy... please hold 🤚", appName)
|
||||
|
||||
if err := waitOnServices(ctx, cl, serviceIDs, appName); err != nil {
|
||||
return err
|
||||
waitOpts := WaitOpts{
|
||||
Services: serviceIDs,
|
||||
AppName: appName,
|
||||
ServerName: serverName,
|
||||
Filters: filters,
|
||||
}
|
||||
|
||||
log.Infof("successfully deployed %s", appName)
|
||||
if err := WaitOnServices(ctx, cl, waitOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -296,7 +335,7 @@ func validateExternalNetworks(ctx context.Context, client dockerClient.NetworkAP
|
||||
// local-scoped networks, so there's no need to inspect them.
|
||||
continue
|
||||
}
|
||||
network, err := client.NetworkInspect(ctx, networkName, types.NetworkInspectOptions{})
|
||||
network, err := client.NetworkInspect(ctx, networkName, networktypes.InspectOptions{})
|
||||
switch {
|
||||
case dockerClient.IsErrNotFound(err):
|
||||
return errors.Errorf("network %q is declared as external, but could not be found. You need to create a swarm-scoped network before the stack is deployed, which you can do by running this on the server: docker network create -d overlay proxy", networkName)
|
||||
@ -342,7 +381,7 @@ func createConfigs(ctx context.Context, cl *dockerClient.Client, configs []swarm
|
||||
}
|
||||
case dockerClient.IsErrNotFound(err):
|
||||
// config does not exist, then we create a new one.
|
||||
log.Infof("creating config %s", configSpec.Name)
|
||||
log.Debugf("creating config %s", configSpec.Name)
|
||||
if _, err := cl.ConfigCreate(ctx, configSpec); err != nil {
|
||||
return errors.Wrapf(err, "failed to create config %s", configSpec.Name)
|
||||
}
|
||||
@ -353,13 +392,13 @@ func createConfigs(ctx context.Context, cl *dockerClient.Client, configs []swarm
|
||||
return nil
|
||||
}
|
||||
|
||||
func createNetworks(ctx context.Context, cl *dockerClient.Client, namespace convert.Namespace, networks map[string]types.NetworkCreate) error {
|
||||
func createNetworks(ctx context.Context, cl *dockerClient.Client, namespace convert.Namespace, networks map[string]networktypes.CreateOptions) error {
|
||||
existingNetworks, err := getStackNetworks(ctx, cl, namespace.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
existingNetworkMap := make(map[string]types.NetworkResource)
|
||||
existingNetworkMap := make(map[string]networktypes.Inspect)
|
||||
for _, network := range existingNetworks {
|
||||
existingNetworkMap[network.Name] = network
|
||||
}
|
||||
@ -373,7 +412,7 @@ func createNetworks(ctx context.Context, cl *dockerClient.Client, namespace conv
|
||||
createOpts.Driver = defaultNetworkDriver
|
||||
}
|
||||
|
||||
log.Infof("creating network %s", name)
|
||||
log.Debugf("creating network %s", name)
|
||||
if _, err := cl.NetworkCreate(ctx, name, createOpts); err != nil {
|
||||
return errors.Wrapf(err, "failed to create network %s", name)
|
||||
}
|
||||
@ -387,10 +426,12 @@ func deployServices(
|
||||
services map[string]swarm.ServiceSpec,
|
||||
namespace convert.Namespace,
|
||||
sendAuth bool,
|
||||
resolveImage string) ([]string, error) {
|
||||
resolveImage string) ([]ui.ServiceMeta, error) {
|
||||
var servicesMeta []ui.ServiceMeta
|
||||
|
||||
existingServices, err := GetStackServices(ctx, cl, namespace.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return servicesMeta, err
|
||||
}
|
||||
|
||||
existingServiceMap := make(map[string]swarm.Service)
|
||||
@ -398,8 +439,6 @@ func deployServices(
|
||||
existingServiceMap[service.Spec.Name] = service
|
||||
}
|
||||
|
||||
var serviceIDs []string
|
||||
|
||||
for internalName, serviceSpec := range services {
|
||||
var (
|
||||
name = namespace.Scope(internalName)
|
||||
@ -408,7 +447,7 @@ func deployServices(
|
||||
)
|
||||
|
||||
if service, exists := existingServiceMap[name]; exists {
|
||||
log.Infof("updating %s", name)
|
||||
log.Debugf("updating %s", name)
|
||||
|
||||
updateOpts := types.ServiceUpdateOptions{EncodedRegistryAuth: encodedAuth}
|
||||
|
||||
@ -450,9 +489,12 @@ func deployServices(
|
||||
log.Warn(warning)
|
||||
}
|
||||
|
||||
serviceIDs = append(serviceIDs, service.ID)
|
||||
servicesMeta = append(servicesMeta, ui.ServiceMeta{
|
||||
Name: name,
|
||||
ID: service.ID,
|
||||
})
|
||||
} else {
|
||||
log.Infof("creating %s", name)
|
||||
log.Debugf("creating %s", name)
|
||||
|
||||
createOpts := types.ServiceCreateOptions{EncodedRegistryAuth: encodedAuth}
|
||||
|
||||
@ -466,15 +508,18 @@ func deployServices(
|
||||
return nil, errors.Wrapf(err, "failed to create %s", name)
|
||||
}
|
||||
|
||||
serviceIDs = append(serviceIDs, serviceCreateResponse.ID)
|
||||
servicesMeta = append(servicesMeta, ui.ServiceMeta{
|
||||
Name: name,
|
||||
ID: serviceCreateResponse.ID,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return serviceIDs, nil
|
||||
return servicesMeta, nil
|
||||
}
|
||||
|
||||
func getStackNetworks(ctx context.Context, dockerclient client.APIClient, namespace string) ([]types.NetworkResource, error) {
|
||||
return dockerclient.NetworkList(ctx, types.NetworkListOptions{Filters: getStackFilter(namespace)})
|
||||
func getStackNetworks(ctx context.Context, dockerclient client.APIClient, namespace string) ([]networktypes.Inspect, error) {
|
||||
return dockerclient.NetworkList(ctx, networktypes.ListOptions{Filters: getStackFilter(namespace)})
|
||||
}
|
||||
|
||||
func getStackSecrets(ctx context.Context, dockerclient client.APIClient, namespace string) ([]swarm.Secret, error) {
|
||||
@ -485,67 +530,89 @@ func getStackConfigs(ctx context.Context, dockerclient client.APIClient, namespa
|
||||
return dockerclient.ConfigList(ctx, types.ConfigListOptions{Filters: getStackFilter(namespace)})
|
||||
}
|
||||
|
||||
func waitOnServices(ctx context.Context, cl *dockerClient.Client, serviceIDs []string, appName string) error {
|
||||
var errs []error
|
||||
func timestamp() string {
|
||||
ts := time.Now().UTC().Format(time.RFC3339)
|
||||
return strings.Replace(ts, ":", "", -1) // get rid of offensive colons
|
||||
}
|
||||
|
||||
for _, serviceID := range serviceIDs {
|
||||
if err := WaitOnService(ctx, cl, serviceID, appName); err != nil {
|
||||
errs = append(errs, fmt.Errorf("%s: %w", serviceID, err))
|
||||
}
|
||||
type WaitOpts struct {
|
||||
AppName string
|
||||
Filters filters.Args
|
||||
NoLog bool
|
||||
Quiet bool
|
||||
ServerName string
|
||||
Services []ui.ServiceMeta
|
||||
}
|
||||
|
||||
func WaitOnServices(ctx context.Context, cl *dockerClient.Client, opts WaitOpts) error {
|
||||
timeout := time.Duration(WaitTimeout) * time.Second
|
||||
model := ui.DeployInitialModel(ctx, cl, opts.Services, opts.AppName, timeout, opts.Filters)
|
||||
tui := tea.NewProgram(model)
|
||||
|
||||
if !opts.Quiet {
|
||||
log.Info("polling deployment status")
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
m, err := log.Without(
|
||||
func() (tea.Model, error) {
|
||||
return tui.Run()
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("waitOnServices: error running TUI: %s", err)
|
||||
}
|
||||
|
||||
deployModel := m.(ui.Model)
|
||||
if deployModel.TimedOut || deployModel.Failed || deployModel.Quit {
|
||||
var errs []error
|
||||
|
||||
if deployModel.Failed {
|
||||
errs = append(errs, fmt.Errorf("deploy failed 🛑"))
|
||||
} else if deployModel.TimedOut {
|
||||
errs = append(errs, fmt.Errorf("deploy timed out 🟠"))
|
||||
} else {
|
||||
errs = append(errs, fmt.Errorf("deploy in progress 🟠"))
|
||||
}
|
||||
|
||||
for _, s := range *deployModel.Streams {
|
||||
if s.Err != nil {
|
||||
errs = append(errs, fmt.Errorf("%s: %s", s.Name, s.Err))
|
||||
}
|
||||
}
|
||||
|
||||
if len(*deployModel.Logs) > 0 && !opts.NoLog {
|
||||
logsPath := filepath.Join(
|
||||
config.LOGS_DIR,
|
||||
opts.ServerName,
|
||||
fmt.Sprintf("%s_%s", opts.AppName, timestamp()),
|
||||
)
|
||||
|
||||
if err := os.MkdirAll(filepath.Join(config.LOGS_DIR, opts.ServerName), 0764); err != nil {
|
||||
return fmt.Errorf("waitOnServices: error creating log dir: %s", err)
|
||||
}
|
||||
|
||||
file, err := os.Create(logsPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("waitOnServices: error opening file: %s", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
s := strings.Join(*deployModel.Logs, "\n")
|
||||
if _, err := file.WriteString(s); err != nil {
|
||||
return fmt.Errorf("waitOnServices: writeFile: %s", err)
|
||||
}
|
||||
|
||||
errs = append(errs, fmt.Errorf("logs: %s", logsPath))
|
||||
}
|
||||
|
||||
return stdlibErr.Join(errs...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// https://github.com/docker/cli/blob/master/cli/command/service/helpers.go
|
||||
// https://github.com/docker/cli/blob/master/cli/command/service/progress/progress.go
|
||||
func WaitOnService(ctx context.Context, cl *dockerClient.Client, serviceID, appName string) error {
|
||||
errChan := make(chan error, 1)
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
sigintChannel := make(chan os.Signal, 1)
|
||||
signal.Notify(sigintChannel, os.Interrupt)
|
||||
defer signal.Stop(sigintChannel)
|
||||
|
||||
go func() {
|
||||
errChan <- progress.ServiceProgress(ctx, cl, serviceID, pipeWriter)
|
||||
}()
|
||||
|
||||
go io.Copy(ioutil.Discard, pipeReader)
|
||||
|
||||
timeout := time.Duration(WaitTimeout) * time.Second
|
||||
|
||||
select {
|
||||
case err := <-errChan:
|
||||
return err
|
||||
case <-sigintChannel:
|
||||
return fmt.Errorf(`
|
||||
Not waiting for %s to deploy. The deployment is ongoing...
|
||||
|
||||
If you want to stop the deployment, try:
|
||||
|
||||
abra app undeploy %s`, appName, appName)
|
||||
case <-time.After(timeout):
|
||||
return fmt.Errorf(`
|
||||
%s has not converged (%s second timeout reached).
|
||||
|
||||
This does not necessarily mean your deployment has failed, it may just be that
|
||||
the app is taking longer to deploy based on your server resources or network
|
||||
latency.
|
||||
|
||||
You can track latest deployment status with:
|
||||
|
||||
abra app ps %s
|
||||
|
||||
And inspect the logs with:
|
||||
|
||||
abra app logs %s
|
||||
`, appName, timeout, appName, appName)
|
||||
if !opts.Quiet {
|
||||
log.Info("deploy succeeded 🟢")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copypasta from https://github.com/docker/cli/blob/master/cli/command/stack/swarm/list.go
|
||||
|
@ -1,8 +1,8 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
ABRA_VERSION="0.9.0-beta"
|
||||
ABRA_VERSION="0.10.0-beta"
|
||||
ABRA_RELEASE_URL="https://git.coopcloud.tech/api/v1/repos/toolshed/abra/releases/tags/$ABRA_VERSION"
|
||||
RC_VERSION="0.10.0-rc1-beta"
|
||||
RC_VERSION="0.10.0-beta"
|
||||
RC_VERSION_URL="https://git.coopcloud.tech/api/v1/repos/toolshed/abra/releases/tags/$RC_VERSION"
|
||||
|
||||
for arg in "$@"; do
|
||||
|
@ -3,5 +3,5 @@ STACK := abra_installer_script
|
||||
default: deploy
|
||||
|
||||
deploy:
|
||||
@DOCKER_CONTEXT=swarm.autonomic.zone docker stack rm $(STACK) && \
|
||||
DOCKER_CONTEXT=swarm.autonomic.zone docker stack deploy -c compose.yml $(STACK)
|
||||
@DOCKER_CONTEXT=swarm-0.coopcloud.tech docker stack rm $(STACK) && \
|
||||
DOCKER_CONTEXT=swarm-0.coopcloud.tech docker stack deploy -c compose.yml $(STACK)
|
||||
|
@ -50,6 +50,9 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
@ -62,6 +65,9 @@ teardown(){
|
||||
run $ABRA app check "$TEST_APP_DOMAIN" --chaos
|
||||
assert_success
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
@ -53,6 +53,9 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
@ -66,6 +69,9 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial 'baz'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
@ -24,6 +24,9 @@ teardown(){
|
||||
_rm_remote "/etc/*.txt"
|
||||
|
||||
_rm "$BATS_TMPDIR/mydir"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
||||
@test "validate app argument" {
|
||||
@ -34,6 +37,42 @@ teardown(){
|
||||
assert_failure
|
||||
}
|
||||
|
||||
@test "bail if unstaged changes and no --chaos" {
|
||||
_mkdir "$BATS_TMPDIR/mydir"
|
||||
_mkfile "$BATS_TMPDIR/mydir/myfile.txt" "foo"
|
||||
|
||||
run bash -c "echo foo >> $ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
|
||||
run $ABRA app cp "$TEST_APP_DOMAIN" "$BATS_TMPDIR/mydir" app:/etc
|
||||
assert_failure
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
||||
@test "do not bail if unstaged changes and --chaos" {
|
||||
_mkdir "$BATS_TMPDIR/mydir"
|
||||
_mkfile "$BATS_TMPDIR/mydir/myfile.txt" "foo"
|
||||
|
||||
run bash -c "echo foo >> $ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
|
||||
run $ABRA app cp "$TEST_APP_DOMAIN" "$BATS_TMPDIR/mydir" app:/etc --chaos
|
||||
assert_success
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
||||
@test "error if missing src/dest arguments" {
|
||||
run $ABRA app cp "$TEST_APP_DOMAIN"
|
||||
assert_failure
|
||||
|
@ -21,8 +21,10 @@ setup(){
|
||||
|
||||
teardown(){
|
||||
_reset_recipe
|
||||
_reset_app
|
||||
_undeploy_app
|
||||
_undeploy_app2 "gitea.$TEST_SERVER"
|
||||
|
||||
_reset_app
|
||||
_reset_tags
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
@ -46,6 +48,9 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial 'foo'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" --no-input
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
@ -62,6 +67,9 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial 'foo'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" \
|
||||
--chaos --no-input --no-converge-checks
|
||||
assert_success
|
||||
@ -100,8 +108,6 @@ teardown(){
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "deploy latest commit if no published versions and no --chaos" {
|
||||
latestCommit="$(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" rev-parse --short HEAD)"
|
||||
|
||||
_remove_tags
|
||||
_wipe_env_version
|
||||
|
||||
@ -109,7 +115,7 @@ teardown(){
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" \
|
||||
--no-input --no-converge-checks --offline
|
||||
assert_success
|
||||
assert_output --partial "$latestCommit"
|
||||
assert_output --partial "${_get_head_hash:0:8}"
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@ -216,19 +222,6 @@ teardown(){
|
||||
run $ABRA app deploy "gitea.$TEST_SERVER" --no-input --no-converge-checks
|
||||
assert_success
|
||||
assert_output --partial "$latestVersion"
|
||||
|
||||
run $ABRA app undeploy "gitea.$TEST_SERVER" --no-input
|
||||
assert_success
|
||||
|
||||
run $ABRA app secret remove "gitea.$TEST_SERVER" --all --no-input
|
||||
assert_success
|
||||
|
||||
run $ABRA app volume remove "gitea.$TEST_SERVER" --no-input
|
||||
assert_success
|
||||
|
||||
run $ABRA app remove "gitea.$TEST_SERVER" --no-input
|
||||
assert_success
|
||||
assert_not_exists "$ABRA_DIR/servers/$TEST_SERVER/gitea.$TEST_SERVER.env"
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@ -408,8 +401,6 @@ teardown(){
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "ignore env version on new deploy" {
|
||||
tagHash=$(_get_tag_hash "0.1.0+1.20.0")
|
||||
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "0.1.0+1.20.0" \
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
@ -423,3 +414,12 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial "$latestRelease"
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "no chaos version label if no chaos" {
|
||||
_deploy_app
|
||||
|
||||
run $ABRA app labels "$TEST_APP_DOMAIN" --no-input
|
||||
assert_success
|
||||
refute_output --regexp "coop-cloud.abra-test-recipe.$TEST_SERVER.chaos-version"
|
||||
}
|
||||
|
@ -54,13 +54,21 @@ teardown(){
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "chaos commit written to env" {
|
||||
@test "deploy commit written to env and redeploy keeps that version" {
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "1e83340e" --no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:1e83340e" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" \
|
||||
--force --no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:1e83340e" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@ -98,12 +106,15 @@ teardown(){
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "deploy overwrites chaos deploy" {
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "1e83340e" \
|
||||
--no-input --no-converge-checks
|
||||
@test "takes deployed version when no .env version is present " {
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "0.1.0+1.20.0" --no-input --no-converge-checks --ignore-env-version
|
||||
assert_success
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:1e83340e" \
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.1.0+1.20.0" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
|
||||
run sed -i 's/TYPE=abra-test-recipe:.*/TYPE=abra-test-recipe/g' \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
|
||||
@ -111,7 +122,7 @@ teardown(){
|
||||
--force --no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:1e83340e" \
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.1.0+1.20.0" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_failure
|
||||
assert_success
|
||||
}
|
||||
|
@ -20,8 +20,8 @@ setup(){
|
||||
|
||||
teardown(){
|
||||
_reset_recipe
|
||||
_reset_app
|
||||
_undeploy_app
|
||||
_reset_app
|
||||
_reset_tags
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
@ -37,17 +37,10 @@ teardown(){
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*N/A'
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.* ' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*N/A'
|
||||
assert_output --regexp 'NEW VERSION.*' + "${latestRelease}"
|
||||
assert_output --partial 'NEW DEPLOY OVERVIEW'
|
||||
assert_output --partial 'CURRENT DEPLOYMENT N/A'
|
||||
assert_output --partial 'ENV VERSION N/A'
|
||||
assert_output --partial "NEW DEPLOYMENT ${latestRelease}"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${latestRelease}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
@ -61,17 +54,10 @@ teardown(){
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*N/A'
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.* ' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${latestRelease}"
|
||||
assert_output --partial 'NEW DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT N/A"
|
||||
assert_output --partial "ENV VERSION ${latestRelease}"
|
||||
assert_output --partial "NEW DEPLOYMENT ${latestRelease}"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${latestRelease}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
@ -90,17 +76,10 @@ teardown(){
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*N/A'
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.1.1+1.20.2"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "0.1.1+1.20.2"
|
||||
assert_output --regexp 'NEW VERSION.*' + "0.1.1+1.20.2"
|
||||
assert_output --partial 'NEW DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT N/A"
|
||||
assert_output --partial "ENV VERSION 0.1.1+1.20.2"
|
||||
assert_output --partial "NEW DEPLOYMENT 0.1.1+1.20.2"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.1.1+1.20.2" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
@ -120,17 +99,10 @@ teardown(){
|
||||
--no-input --no-converge-checks --ignore-env-version
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*N/A'
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "0.1.1+1.20.2"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${latestRelease}"
|
||||
assert_output --partial 'NEW DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT N/A"
|
||||
assert_output --partial "ENV VERSION 0.1.1+1.20.2"
|
||||
assert_output --partial "NEW DEPLOYMENT ${latestRelease}"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${latestRelease}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
@ -153,17 +125,10 @@ teardown(){
|
||||
--no-input --no-converge-checks --chaos
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}+U"
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${headHash:0:8}+U"
|
||||
assert_output --partial 'CHAOS DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT ${latestRelease}"
|
||||
assert_output --partial "ENV VERSION ${latestRelease}"
|
||||
assert_output --partial "NEW DEPLOYMENT ${headHash:0:8}+U"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
@ -173,7 +138,7 @@ teardown(){
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "chaos deploy then force deploy" {
|
||||
@test "can not redeploy chaos version without --chaos" {
|
||||
headHash=$(_get_head_hash)
|
||||
latestRelease=$(_latest_release)
|
||||
|
||||
@ -189,27 +154,12 @@ teardown(){
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" \
|
||||
--no-input --no-converge-checks --force
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}+U"
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${headHash:0:8}+U"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${latestRelease}"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${latestRelease}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
--no-input --no-converge-checks --force --debug
|
||||
assert_failure
|
||||
assert_output --regexp 'can not redeploy chaos version .*' + "${headHash:0:8}+U"
|
||||
}
|
||||
|
||||
@test "deploy then force chaos commit deploy" {
|
||||
@test "deploy then force commit deploy" {
|
||||
headHash=$(_get_head_hash)
|
||||
latestRelease=$(_latest_release)
|
||||
|
||||
@ -225,17 +175,10 @@ teardown(){
|
||||
--no-input --no-converge-checks --force
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}"
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${headHash:0:8}"
|
||||
assert_output --partial 'DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT ${latestRelease}"
|
||||
assert_output --partial "ENV VERSION ${latestRelease}"
|
||||
assert_output --partial "NEW DEPLOYMENT ${headHash:0:8}"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${headHash:0:8}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
@ -250,17 +193,10 @@ teardown(){
|
||||
--no-input --no-converge-checks --chaos
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*N/A'
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}"
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${headHash:0:8}"
|
||||
assert_output --partial 'NEW DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT N/A"
|
||||
assert_output --partial "ENV VERSION ${latestRelease}"
|
||||
assert_output --partial "NEW DEPLOYMENT ${headHash:0:8}"
|
||||
|
||||
run bash -c 'echo "unstaged changes" >> "$ABRA_DIR/recipes/$TEST_RECIPE/foo"'
|
||||
assert_success
|
||||
@ -270,17 +206,28 @@ teardown(){
|
||||
--no-input --no-converge-checks --chaos
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}"
|
||||
assert_output --partial 'CHAOS DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT ${headHash:0:8}"
|
||||
assert_output --partial "ENV VERSION ${headHash:0:8}"
|
||||
assert_output --partial "NEW DEPLOYMENT ${headHash:0:8}+U"
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}+U"
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" \
|
||||
--no-input --no-converge-checks --chaos
|
||||
assert_success
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${headHash:0:8}+U"
|
||||
assert_output --partial 'CHAOS DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT ${headHash:0:8}+U"
|
||||
assert_output --partial "ENV VERSION ${headHash:0:8}+U"
|
||||
assert_output --partial "NEW DEPLOYMENT ${headHash:0:8}+U"
|
||||
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" \
|
||||
--no-input --no-converge-checks --chaos
|
||||
assert_success
|
||||
|
||||
assert_output --partial 'CHAOS DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT ${headHash:0:8}+U"
|
||||
assert_output --partial "ENV VERSION ${headHash:0:8}+U"
|
||||
assert_output --partial "NEW DEPLOYMENT ${headHash:0:8}+U"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${headHash:0:8}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
@ -302,19 +249,8 @@ teardown(){
|
||||
--no-input --no-converge-checks --force
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}"
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${headHash:0:8}"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${latestRelease}"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${latestRelease}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
assert_output --partial 'REDEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT ${headHash:0:8}"
|
||||
assert_output --partial "ENV VERSION ${headHash:0:8}"
|
||||
assert_output --partial "NEW DEPLOYMENT ${headHash:0:8}"
|
||||
}
|
||||
|
@ -20,8 +20,11 @@ setup(){
|
||||
|
||||
teardown(){
|
||||
_reset_recipe
|
||||
_reset_app
|
||||
_undeploy_app
|
||||
_reset_app
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
||||
@test "validate app argument" {
|
||||
@ -41,6 +44,16 @@ teardown(){
|
||||
}
|
||||
|
||||
@test "show env version despite --chaos" {
|
||||
run bash -c "echo foo >> $ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
|
||||
run $ABRA app env "$TEST_APP_DOMAIN"
|
||||
assert_success
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
@ -21,8 +21,8 @@ setup(){
|
||||
|
||||
teardown(){
|
||||
_reset_recipe
|
||||
_reset_app
|
||||
_undeploy_app
|
||||
_reset_app
|
||||
_reset_tags
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
@ -46,6 +46,9 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial 'foo'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run $ABRA app labels "$TEST_APP_DOMAIN" --no-input
|
||||
assert_failure
|
||||
}
|
||||
@ -59,6 +62,9 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial 'foo'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run $ABRA app labels "$TEST_APP_DOMAIN" --chaos
|
||||
assert_success
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ setup_file(){
|
||||
}
|
||||
|
||||
teardown_file(){
|
||||
_undeploy_app
|
||||
_rm_app
|
||||
_rm_server
|
||||
}
|
||||
@ -18,6 +19,7 @@ setup(){
|
||||
}
|
||||
|
||||
teardown(){
|
||||
_reset_recipe
|
||||
_undeploy_app
|
||||
}
|
||||
|
||||
@ -87,6 +89,10 @@ teardown(){
|
||||
assert_success
|
||||
refute_output --partial "$TEST_RECIPE"
|
||||
assert_output --partial "foo-recipe"
|
||||
|
||||
run rm -rf "$ABRA_DIR/servers/foo.com"
|
||||
assert_success
|
||||
assert_not_exists "$ABRA_DIR/servers/foo.com"
|
||||
}
|
||||
|
||||
@test "output is machine readable" {
|
||||
@ -98,3 +104,36 @@ teardown(){
|
||||
|
||||
assert_output --partial "$expectedOutput"
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "list with status fetches recipe" {
|
||||
_deploy_app
|
||||
|
||||
run $ABRA app ls --status
|
||||
assert_success
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE"
|
||||
assert_success
|
||||
|
||||
run $ABRA app ls --status
|
||||
assert_success
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "list with chaos version" {
|
||||
run bash -c "echo foo >> $ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" \
|
||||
--no-input --no-converge-checks --chaos
|
||||
assert_success
|
||||
|
||||
run $ABRA app ls --status
|
||||
assert_success
|
||||
assert_output --partial "+U"
|
||||
|
||||
run rm -rf "$ABRA_DIR/servers/foo.com"
|
||||
assert_success
|
||||
assert_not_exists "$ABRA_DIR/servers/foo.com"
|
||||
}
|
||||
|
@ -20,6 +20,10 @@ setup(){
|
||||
teardown(){
|
||||
_rm_app
|
||||
_reset_recipe
|
||||
_reset_tags
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
||||
@test "create new app" {
|
||||
@ -47,25 +51,22 @@ teardown(){
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
|
||||
assert_equal $(_get_tag_hash 0.3.0+1.21.0) $(_get_current_hash)
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.3.0+1.21.0" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "create new app with chaos commit" {
|
||||
run $ABRA app new "$TEST_RECIPE" 1e83340e \
|
||||
@test "create new app with version commit" {
|
||||
tagHash=$(_get_tag_hash "0.3.0+1.21.0")
|
||||
|
||||
run $ABRA app new "$TEST_RECIPE" "$tagHash" \
|
||||
--no-input \
|
||||
--server "$TEST_SERVER" \
|
||||
--domain "$TEST_APP_DOMAIN"
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
|
||||
currentHash=$(_get_current_hash)
|
||||
assert_equal 1e83340e ${currentHash:0:8}
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:1e83340e" \
|
||||
run grep -q "TYPE=$TEST_RECIPE:${tagHash}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
@ -101,6 +102,9 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
@ -122,6 +126,13 @@ teardown(){
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
|
||||
assert_success
|
||||
assert_output --partial 'foo'
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
@ -167,6 +178,8 @@ teardown(){
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "generate secrets" {
|
||||
latestRelease=$(_latest_release)
|
||||
|
||||
run $ABRA app new "$TEST_RECIPE" \
|
||||
--no-input \
|
||||
--server "$TEST_SERVER" \
|
||||
@ -178,4 +191,69 @@ teardown(){
|
||||
run $ABRA app secret ls "$TEST_APP_DOMAIN"
|
||||
assert_success
|
||||
assert_output --partial 'test_pass_one'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${latestRelease}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "app new from chaos recipe" {
|
||||
currentHash=$(_get_current_hash)
|
||||
latestRelease=$(_latest_release)
|
||||
|
||||
run bash -c "echo foo >> $ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
|
||||
run $ABRA app new "$TEST_RECIPE" \
|
||||
--no-input \
|
||||
--server "$TEST_SERVER" \
|
||||
--domain "$TEST_APP_DOMAIN" \
|
||||
--secrets \
|
||||
--chaos
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_output --partial "version: ${currentHash:0:8}+U"
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${currentHash:0:8}+U" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "app new, no releases, from chaos recipe" {
|
||||
currentHash=$(_get_current_hash)
|
||||
_remove_tags
|
||||
|
||||
run bash -c "echo foo >> $ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
|
||||
run $ABRA app new "$TEST_RECIPE" \
|
||||
--no-input \
|
||||
--server "$TEST_SERVER" \
|
||||
--domain "$TEST_APP_DOMAIN" \
|
||||
--secrets \
|
||||
--chaos
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_output --partial "version: ${currentHash:0:8}+U"
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${currentHash:0:8}+U" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "automatically select single server" {
|
||||
# NOTE(d1): no --no-input required, single server available
|
||||
run $ABRA app new "$TEST_RECIPE" --domain "$TEST_APP_DOMAIN"
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
}
|
||||
|
@ -55,6 +55,9 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
@ -70,6 +73,9 @@ teardown(){
|
||||
run $ABRA app ps --chaos "$TEST_APP_DOMAIN"
|
||||
assert_success
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
@ -117,6 +123,8 @@ teardown(){
|
||||
@test "show ps report" {
|
||||
_deploy_app
|
||||
|
||||
_ensure_env_version "$(_latest_release)"
|
||||
|
||||
run $ABRA app ps "$TEST_APP_DOMAIN"
|
||||
assert_success
|
||||
assert_output --partial 'app'
|
||||
|
@ -20,6 +20,7 @@ setup(){
|
||||
|
||||
teardown(){
|
||||
_undeploy_app
|
||||
_reset_app
|
||||
_reset_recipe
|
||||
}
|
||||
|
||||
@ -152,7 +153,7 @@ teardown(){
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "rollback chaos deployment" {
|
||||
@test "rollback chaos deployment is not possible" {
|
||||
tagHash=$(_get_tag_hash "0.2.0+1.21.0")
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" checkout "$tagHash"
|
||||
assert_success
|
||||
@ -162,17 +163,8 @@ teardown(){
|
||||
assert_output --partial "${tagHash:0:8}"
|
||||
|
||||
run $ABRA app rollback "$TEST_APP_DOMAIN" "0.1.1+1.20.2" --no-input --no-converge-checks
|
||||
assert_success
|
||||
assert_output --partial "0.1.1+1.20.2"
|
||||
assert_output --partial "${tagHash:0:8}"
|
||||
|
||||
run $ABRA app rollback "$TEST_APP_DOMAIN" "0.1.0+1.20.0" --no-input --no-converge-checks
|
||||
assert_success
|
||||
assert_output --partial "0.1.0+1.20.0"
|
||||
|
||||
tagHash=$(_get_tag_hash "0.1.1+1.20.2")
|
||||
refute_output --partial "${tagHash:0:8}"
|
||||
assert_output --partial "false"
|
||||
assert_failure
|
||||
assert_output --partial 'current deployment' + "${tagHash:0:8}" + 'is not a known version'
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@ -185,3 +177,16 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial "not a known version"
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "no chaos version label if no chaos" {
|
||||
_deploy_app
|
||||
|
||||
run $ABRA app rollback "$TEST_APP_DOMAIN" \
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
run $ABRA app labels "$TEST_APP_DOMAIN" --no-input
|
||||
assert_success
|
||||
refute_output --regexp "coop-cloud.abra-test-recipe.$TEST_SERVER.chaos-version"
|
||||
}
|
||||
|
@ -20,10 +20,11 @@ setup(){
|
||||
|
||||
teardown(){
|
||||
_undeploy_app
|
||||
_reset_app
|
||||
_reset_recipe
|
||||
}
|
||||
|
||||
@test "deploy then rollback" {
|
||||
@test "deploy then rollback" {
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "0.2.0+1.21.0" \
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
@ -32,24 +33,17 @@ teardown(){
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.1.0+1.20.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'NEW VERSION.*' + "0.1.0+1.20.0"
|
||||
assert_output --partial 'DOWNGRADE OVERVIEW'
|
||||
assert_output --partial 'CURRENT DEPLOYMENT 0.2.0+1.21.0'
|
||||
assert_output --partial 'ENV VERSION 0.2.0+1.21.0'
|
||||
assert_output --partial 'NEW DEPLOYMENT 0.1.0+1.20.0'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.1.0+1.20.0" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "force rollback" {
|
||||
@test "force rollback" {
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "0.2.0+1.21.0" \
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
@ -58,19 +52,33 @@ teardown(){
|
||||
--no-input --no-converge-checks --force
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'NEW VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --partial 'REDEPLOY OVERVIEW'
|
||||
assert_output --partial 'CURRENT DEPLOYMENT 0.2.0+1.21.0'
|
||||
assert_output --partial 'ENV VERSION 0.2.0+1.21.0'
|
||||
assert_output --partial 'NEW DEPLOYMENT 0.2.0+1.21.0'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.2.0+1.21.0" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "app rollback no .env version" {
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "0.2.0+1.21.0" \
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
_wipe_env_version
|
||||
|
||||
run $ABRA app rollback "$TEST_APP_DOMAIN" "0.1.0+1.20.0" \
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
assert_output --partial 'DOWNGRADE OVERVIEW'
|
||||
assert_output --partial 'CURRENT DEPLOYMENT 0.2.0+1.21.0'
|
||||
assert_output --partial 'ENV VERSION N/A'
|
||||
assert_output --partial 'NEW DEPLOYMENT 0.1.0+1.20.0'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${latestRelease}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
@ -41,6 +41,11 @@ teardown(){
|
||||
|
||||
run $ABRA app secret generate "$TEST_APP_DOMAIN"
|
||||
assert_failure
|
||||
assert_output --partial 'missing arguments'
|
||||
|
||||
run $ABRA app secret generate "$TEST_APP_DOMAIN" test_pass_one
|
||||
assert_failure
|
||||
assert_output --partial 'missing arguments'
|
||||
|
||||
run $ABRA app secret generate "$TEST_APP_DOMAIN" testSecret testVersion --all
|
||||
assert_failure
|
||||
@ -131,6 +136,9 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
@ -271,6 +279,9 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
@ -319,6 +330,9 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
@ -92,9 +92,6 @@ teardown(){
|
||||
|
||||
run $ABRA app undeploy "$TEST_APP_DOMAIN" --no-input
|
||||
assert_success
|
||||
|
||||
# NOTE(d1): ensure not chaos undeploy
|
||||
assert_output --partial 'false'
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
|
@ -33,13 +33,10 @@ teardown(){
|
||||
run $ABRA app undeploy "$TEST_APP_DOMAIN" --no-input
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.1.0+1.20.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "0.1.0+1.20.0"
|
||||
assert_output --regexp 'NEW VERSION.*' + "0.1.0+1.20.0"
|
||||
assert_output --partial 'UNDEPLOY OVERVIEW'
|
||||
assert_output --partial 'CURRENT DEPLOYMENT 0.1.0+1.20.0'
|
||||
assert_output --partial 'ENV VERSION 0.1.0+1.20.0'
|
||||
assert_output --partial 'NEW DEPLOYMENT N/A'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.1.0+1.20.0" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
@ -57,13 +54,10 @@ teardown(){
|
||||
run $ABRA app undeploy "$TEST_APP_DOMAIN" --no-input
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}"
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${headHash:0:8}"
|
||||
assert_output --partial 'UNDEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT ${headHash:0:8}"
|
||||
assert_output --partial "ENV VERSION ${headHash:0:8}"
|
||||
assert_output --partial 'NEW DEPLOYMENT N/A'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${headHash:0:8}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
@ -72,7 +66,6 @@ teardown(){
|
||||
|
||||
@test "chaos deploy with unstaged commits and undeploy" {
|
||||
headHash=$(_get_head_hash)
|
||||
latestRelease=$(_latest_release)
|
||||
|
||||
run bash -c 'echo "unstaged changes" >> "$ABRA_DIR/recipes/$TEST_RECIPE/foo"'
|
||||
assert_success
|
||||
@ -85,13 +78,10 @@ teardown(){
|
||||
run $ABRA app undeploy "$TEST_APP_DOMAIN" --no-input
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}+U"
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${headHash:0:8}+U"
|
||||
assert_output --partial 'UNDEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT ${headHash:0:8}+U"
|
||||
assert_output --partial "ENV VERSION ${headHash:0:8}+U"
|
||||
assert_output --partial 'NEW DEPLOYMENT N/A'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${headHash:0:8}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
|
@ -205,7 +205,19 @@ teardown(){
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "upgrade chaos deployment" {
|
||||
@test "show release note added after release" {
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "0.2.0+1.21.0" --no-input --no-converge-checks
|
||||
assert_success
|
||||
assert_output --partial '0.2.0+1.21.0'
|
||||
|
||||
run $ABRA app upgrade "$TEST_APP_DOMAIN" "0.3.0+1.21.0" --no-input --no-converge-checks
|
||||
assert_success
|
||||
assert_output --partial '0.3.0+1.21.0'
|
||||
assert_output --partial 'A release note added after the release'
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "upgrade commit deployment not possible" {
|
||||
tagHash=$(_get_tag_hash "0.1.0+1.20.0")
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" checkout "$tagHash"
|
||||
assert_success
|
||||
@ -215,17 +227,8 @@ teardown(){
|
||||
assert_output --partial "${tagHash:0:8}"
|
||||
|
||||
run $ABRA app upgrade "$TEST_APP_DOMAIN" "0.1.1+1.20.2" --no-input --no-converge-checks
|
||||
assert_success
|
||||
assert_output --partial "0.1.1+1.20.2"
|
||||
assert_output --partial "${tagHash:0:8}"
|
||||
|
||||
run $ABRA app upgrade "$TEST_APP_DOMAIN" "0.2.0+1.21.0" --no-input --no-converge-checks
|
||||
assert_success
|
||||
assert_output --partial "0.2.0+1.21.0"
|
||||
|
||||
tagHash=$(_get_tag_hash "0.1.1+1.20.2")
|
||||
refute_output --partial "${tagHash:0:8}"
|
||||
assert_output --partial "false"
|
||||
assert_failure
|
||||
assert_output --partial "not a known version"
|
||||
}
|
||||
|
||||
@test "chaos commit upgrade not possible" {
|
||||
@ -239,3 +242,16 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial "not a known version"
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "no chaos version label if no chaos" {
|
||||
_deploy_app
|
||||
|
||||
run $ABRA app upgrade "$TEST_APP_DOMAIN" \
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
run $ABRA app labels "$TEST_APP_DOMAIN" --no-input
|
||||
assert_success
|
||||
refute_output --regexp "coop-cloud.abra-test-recipe.$TEST_SERVER.chaos-version"
|
||||
}
|
||||
|
@ -31,24 +31,17 @@ teardown(){
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.1.0+1.20.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "0.1.0+1.20.0"
|
||||
assert_output --regexp 'NEW VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --partial 'UPGRADE OVERVIEW'
|
||||
assert_output --partial 'CURRENT DEPLOYMENT 0.1.0+1.20.0'
|
||||
assert_output --partial 'ENV VERSION 0.1.0+1.20.0'
|
||||
assert_output --partial 'NEW DEPLOYMENT 0.2.0+1.21.0'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.2.0+1.21.0" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "force upgrade" {
|
||||
@test "force upgrade" {
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "0.2.0+1.21.0" \
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
@ -57,19 +50,35 @@ teardown(){
|
||||
--no-input --no-converge-checks --force
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'NEW VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --partial 'REDEPLOY OVERVIEW'
|
||||
assert_output --partial 'CURRENT DEPLOYMENT 0.2.0+1.21.0'
|
||||
assert_output --partial 'ENV VERSION 0.2.0+1.21.0'
|
||||
assert_output --partial 'NEW DEPLOYMENT 0.2.0+1.21.0'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.2.0+1.21.0" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "app upgrade no .env version" {
|
||||
latestRelease=$(_latest_release)
|
||||
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "0.2.0+1.21.0" \
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
_wipe_env_version
|
||||
|
||||
run $ABRA app upgrade "$TEST_APP_DOMAIN" \
|
||||
--no-input --no-converge-checks --force
|
||||
assert_success
|
||||
|
||||
assert_output --partial 'UPGRADE OVERVIEW'
|
||||
assert_output --partial 'CURRENT DEPLOYMENT 0.2.0+1.21.0'
|
||||
assert_output --partial 'ENV VERSION N/A'
|
||||
assert_output --partial 'NEW DEPLOYMENT 0.3.1+1.21.0'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${latestRelease}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
@ -30,6 +30,15 @@ _undeploy_app() {
|
||||
assert_output --partial 'unknown'
|
||||
}
|
||||
|
||||
_undeploy_app2() {
|
||||
run $ABRA app undeploy "$1" --no-input
|
||||
|
||||
run $ABRA app ls --server "$TEST_SERVER" --status
|
||||
assert_success
|
||||
assert_output --partial "$1"
|
||||
assert_output --partial 'unknown'
|
||||
}
|
||||
|
||||
_rm_app() {
|
||||
# NOTE(d1): not asserting outcomes on teardown here since some might fail
|
||||
# depending on what the test created. all commands run through anyway
|
||||
|
@ -38,6 +38,8 @@ _set_git_author() {
|
||||
}
|
||||
|
||||
_git_commit() {
|
||||
_set_git_author
|
||||
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" add .
|
||||
assert_success
|
||||
|
||||
@ -60,3 +62,7 @@ _get_current_hash() {
|
||||
_get_n_hash() {
|
||||
echo $(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" show -s --format="%H" "HEAD~$1")
|
||||
}
|
||||
|
||||
_git_status() {
|
||||
echo $(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status --porcelain)
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
_latest_release(){
|
||||
echo $(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" tag -l | tail -n 1)
|
||||
echo $(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" tag -l --sort=v:refname | tail -n 1)
|
||||
}
|
||||
|
||||
_fetch_recipe() {
|
||||
@ -22,15 +22,6 @@ _reset_recipe(){
|
||||
_fetch_recipe
|
||||
}
|
||||
|
||||
_ensure_latest_version(){
|
||||
latestRelease=$(_latest_release)
|
||||
|
||||
if [ ! $latestRelease = "$1" ]; then
|
||||
echo "expected latest recipe version of '$1', saw: $latestRelease"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
_ensure_catalogue(){
|
||||
if [[ ! -d "$ABRA_DIR/catalogue" ]]; then
|
||||
run git clone https://git.coopcloud.tech/toolshed/recipes-catalogue-json.git $ABRA_DIR/catalogue
|
||||
|
@ -25,15 +25,3 @@ teardown(){
|
||||
run "$HOME/.local/bin/abra" -v
|
||||
assert_output --partial 'beta'
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "install release candidate from script" {
|
||||
skip "current RC is brokenly specified in the installer script"
|
||||
|
||||
run bash -c 'curl https://install.abra.coopcloud.tech | bash -s -- --rc'
|
||||
assert_success
|
||||
|
||||
assert_exists "$HOME/.local/bin/abra"
|
||||
run "$HOME/.local/bin/abra" -v
|
||||
assert_output --partial '-rc'
|
||||
}
|
||||
|
@ -5,6 +5,16 @@ setup() {
|
||||
_common_setup
|
||||
}
|
||||
|
||||
teardown(){
|
||||
run rm -rf "$ABRA_DIR/recipes/matrix-synapse"
|
||||
assert_success
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/git_coopcloud_tech_coop-cloud_matrix-synapse"
|
||||
assert_success
|
||||
assert_not_exists "$ABRA_DIR/recipes/git_coopcloud_tech_coop-cloud_matrix-synapse"
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "recipe fetch all" {
|
||||
run rm -rf "$ABRA_DIR/recipes/matrix-synapse"
|
||||
@ -35,3 +45,81 @@ setup() {
|
||||
run $ABRA recipe fetch matrix-synapse --all
|
||||
assert_failure
|
||||
}
|
||||
|
||||
@test "do not refetch without --force" {
|
||||
run $ABRA recipe fetch matrix-synapse
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/matrix-synapse"
|
||||
|
||||
run $ABRA recipe fetch matrix-synapse
|
||||
assert_output --partial "already fetched"
|
||||
}
|
||||
|
||||
@test "refetch with --force" {
|
||||
run $ABRA recipe fetch matrix-synapse
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/matrix-synapse"
|
||||
|
||||
run $ABRA recipe fetch matrix-synapse --force
|
||||
assert_success
|
||||
refute_output --partial "already fetched"
|
||||
}
|
||||
|
||||
@test "refetch with --force does not erase unstaged changes" {
|
||||
run $ABRA recipe fetch matrix-synapse
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/matrix-synapse"
|
||||
|
||||
run bash -c "echo foo >> $ABRA_DIR/recipes/matrix-synapse/foo"
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/matrix-synapse/foo"
|
||||
|
||||
run $ABRA recipe fetch matrix-synapse --force
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/matrix-synapse"
|
||||
assert_exists "$ABRA_DIR/recipes/matrix-synapse/foo"
|
||||
}
|
||||
|
||||
@test "fetch with --ssh" {
|
||||
run $ABRA recipe fetch matrix-synapse --ssh
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/matrix-synapse"
|
||||
|
||||
run git -C "$ABRA_DIR/recipes/matrix-synapse" remote -v
|
||||
assert_success
|
||||
assert_output --partial "ssh://"
|
||||
}
|
||||
|
||||
@test "re-fetch with --ssh/--force" {
|
||||
run $ABRA recipe fetch matrix-synapse
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/matrix-synapse"
|
||||
|
||||
run git -C "$ABRA_DIR/recipes/matrix-synapse" remote -v
|
||||
assert_success
|
||||
assert_output --partial "https://"
|
||||
|
||||
run $ABRA recipe fetch matrix-synapse --ssh --force
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/matrix-synapse"
|
||||
|
||||
run git -C "$ABRA_DIR/recipes/matrix-synapse" remote -v
|
||||
assert_success
|
||||
assert_output --partial "ssh://"
|
||||
}
|
||||
|
||||
@test "fetch remote recipe" {
|
||||
run $ABRA recipe fetch git.coopcloud.tech/coop-cloud/matrix-synapse
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/git_coopcloud_tech_coop-cloud_matrix-synapse"
|
||||
}
|
||||
|
||||
@test "remote recipe do not refetch without --force" {
|
||||
run $ABRA recipe fetch git.coopcloud.tech/coop-cloud/matrix-synapse
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/git_coopcloud_tech_coop-cloud_matrix-synapse"
|
||||
|
||||
run $ABRA recipe fetch git.coopcloud.tech/coop-cloud/matrix-synapse
|
||||
assert_success
|
||||
assert_output --partial "already fetched"
|
||||
}
|
||||
|
@ -41,6 +41,9 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial 'foo'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run $ABRA recipe lint "$TEST_RECIPE"
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
@ -58,6 +61,9 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial 'foo'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run $ABRA recipe lint "$TEST_RECIPE" --chaos
|
||||
assert_success
|
||||
|
||||
|
@ -68,3 +68,27 @@ teardown(){
|
||||
assert_output --partial 'fooUser'
|
||||
assert_output --partial 'foo@example.com'
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "recipe new, app new, no releases, latest commit" {
|
||||
recipeName="foobar"
|
||||
|
||||
run $ABRA recipe new "$recipeName"
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/$recipeName"
|
||||
|
||||
currentHash=$(git -C "$ABRA_DIR/recipes/$recipeName" show -s --format="%H")
|
||||
domain="$recipeName.$TEST_APP_SERVER"
|
||||
|
||||
run $ABRA app new "$recipeName" \
|
||||
--no-input \
|
||||
--server "$TEST_SERVER" \
|
||||
--domain "$domain"
|
||||
assert_success
|
||||
assert_output --partial "version: ${currentHash:0:8}"
|
||||
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$domain.env"
|
||||
|
||||
run grep -q "TYPE=$recipeName:${currentHash:0:8}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$domain.env"
|
||||
assert_success
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ setup(){
|
||||
|
||||
teardown() {
|
||||
_reset_recipe
|
||||
_reset_tags
|
||||
}
|
||||
|
||||
@test "validate recipe argument" {
|
||||
@ -31,8 +32,6 @@ teardown() {
|
||||
}
|
||||
|
||||
@test "release patch bump" {
|
||||
_ensure_latest_version "0.3.0+1.21.0"
|
||||
|
||||
run $ABRA recipe upgrade "$TEST_RECIPE" --no-input --patch
|
||||
assert_success
|
||||
|
||||
@ -40,6 +39,12 @@ teardown() {
|
||||
assert_success
|
||||
assert_output --partial 'image: nginx:1.21.6'
|
||||
|
||||
# NOTE(d1): ensure the latest tag is the one we expect
|
||||
_remove_tags
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" tag \
|
||||
-a "0.3.0+1.21.0" -m "fake: 0.3.0+1.21.0"
|
||||
assert_success
|
||||
|
||||
run $ABRA recipe sync "$TEST_RECIPE" --no-input --patch
|
||||
assert_success
|
||||
assert_output --partial 'synced label'
|
||||
@ -58,8 +63,6 @@ teardown() {
|
||||
}
|
||||
|
||||
@test "release minor bump" {
|
||||
_ensure_latest_version "0.3.0+1.21.0"
|
||||
|
||||
run $ABRA recipe upgrade "$TEST_RECIPE" --no-input --minor
|
||||
assert_success
|
||||
|
||||
@ -67,6 +70,12 @@ teardown() {
|
||||
assert_success
|
||||
assert_output --regexp 'image: nginx:1.2.*'
|
||||
|
||||
# NOTE(d1): ensure the latest tag is the one we expect
|
||||
_remove_tags
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" tag \
|
||||
-a "0.3.0+1.21.0" -m "fake: 0.3.0+1.21.0"
|
||||
assert_success
|
||||
|
||||
run $ABRA recipe sync "$TEST_RECIPE" --no-input --minor
|
||||
assert_success
|
||||
assert_output --partial 'synced label'
|
||||
@ -102,8 +111,6 @@ teardown() {
|
||||
}
|
||||
|
||||
@test "release with next release note" {
|
||||
_ensure_latest_version "0.3.0+1.21.0"
|
||||
|
||||
_mkfile "$ABRA_DIR/recipes/$TEST_RECIPE/release/next" "those are some release notes for the next release"
|
||||
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" add release/next
|
||||
|
@ -40,6 +40,9 @@ teardown(){
|
||||
run $ABRA recipe sync "$TEST_RECIPE" --no-input --patch
|
||||
assert_success
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "M compose.yml ?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_success
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
@ -58,8 +61,6 @@ teardown(){
|
||||
}
|
||||
|
||||
@test "sync patch label bump" {
|
||||
_ensure_latest_version "0.3.0+1.21.0"
|
||||
|
||||
run $ABRA recipe upgrade "$TEST_RECIPE" --no-input --patch
|
||||
assert_success
|
||||
|
||||
@ -67,6 +68,12 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial 'image: nginx:1.21.6'
|
||||
|
||||
# NOTE(d1): ensure the latest tag is the one we expect
|
||||
_remove_tags
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" tag \
|
||||
-a "0.3.0+1.21.0" -m "fake: 0.3.0+1.21.0"
|
||||
assert_success
|
||||
|
||||
run $ABRA recipe sync "$TEST_RECIPE" --no-input --patch
|
||||
assert_success
|
||||
|
||||
@ -76,8 +83,6 @@ teardown(){
|
||||
}
|
||||
|
||||
@test "sync minor label bump" {
|
||||
_ensure_latest_version "0.3.0+1.21.0"
|
||||
|
||||
run $ABRA recipe upgrade "$TEST_RECIPE" --no-input --minor
|
||||
assert_success
|
||||
|
||||
@ -85,6 +90,12 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --regexp 'image: nginx:1.2.*'
|
||||
|
||||
# NOTE(d1): ensure the latest tag is the one we expect
|
||||
_remove_tags
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" tag \
|
||||
-a "0.3.0+1.21.0" -m "fake: 0.3.0+1.21.0"
|
||||
assert_success
|
||||
|
||||
run $ABRA recipe sync "$TEST_RECIPE" --no-input --minor
|
||||
assert_success
|
||||
|
||||
|
@ -54,6 +54,9 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
@ -26,16 +26,3 @@ teardown(){
|
||||
run "$HOME/.local/bin/abra" -v
|
||||
assert_output --partial 'beta'
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "abra upgrade release candidate" {
|
||||
skip "TODO: RC publishing broke somehow, needs investigation"
|
||||
|
||||
run $ABRA upgrade --rc
|
||||
assert_success
|
||||
assert_output --partial 'Public interest infrastructure'
|
||||
|
||||
assert_exists "$HOME/.local/bin/abra"
|
||||
run "$HOME/.local/bin/abra" -v
|
||||
assert_output --partial '-rc'
|
||||
}
|
||||
|
8
vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go
generated
vendored
8
vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go
generated
vendored
@ -49,16 +49,16 @@ func ShiftNBytesLeft(dst, x []byte, n int) {
|
||||
dst = append(dst, make([]byte, n/8)...)
|
||||
}
|
||||
|
||||
// XorBytesMut assumes equal input length, replaces X with X XOR Y
|
||||
// XorBytesMut replaces X with X XOR Y. len(X) must be >= len(Y).
|
||||
func XorBytesMut(X, Y []byte) {
|
||||
for i := 0; i < len(X); i++ {
|
||||
for i := 0; i < len(Y); i++ {
|
||||
X[i] ^= Y[i]
|
||||
}
|
||||
}
|
||||
|
||||
// XorBytes assumes equal input length, puts X XOR Y into Z
|
||||
// XorBytes puts X XOR Y into Z. len(Z) and len(X) must be >= len(Y).
|
||||
func XorBytes(Z, X, Y []byte) {
|
||||
for i := 0; i < len(X); i++ {
|
||||
for i := 0; i < len(Y); i++ {
|
||||
Z[i] = X[i] ^ Y[i]
|
||||
}
|
||||
}
|
||||
|
55
vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go
generated
vendored
55
vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go
generated
vendored
@ -109,8 +109,10 @@ func (o *ocb) Seal(dst, nonce, plaintext, adata []byte) []byte {
|
||||
if len(nonce) > o.nonceSize {
|
||||
panic("crypto/ocb: Incorrect nonce length given to OCB")
|
||||
}
|
||||
ret, out := byteutil.SliceForAppend(dst, len(plaintext)+o.tagSize)
|
||||
o.crypt(enc, out, nonce, adata, plaintext)
|
||||
sep := len(plaintext)
|
||||
ret, out := byteutil.SliceForAppend(dst, sep+o.tagSize)
|
||||
tag := o.crypt(enc, out[:sep], nonce, adata, plaintext)
|
||||
copy(out[sep:], tag)
|
||||
return ret
|
||||
}
|
||||
|
||||
@ -122,12 +124,10 @@ func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) {
|
||||
return nil, ocbError("Ciphertext shorter than tag length")
|
||||
}
|
||||
sep := len(ciphertext) - o.tagSize
|
||||
ret, out := byteutil.SliceForAppend(dst, len(ciphertext))
|
||||
ret, out := byteutil.SliceForAppend(dst, sep)
|
||||
ciphertextData := ciphertext[:sep]
|
||||
tag := ciphertext[sep:]
|
||||
o.crypt(dec, out, nonce, adata, ciphertextData)
|
||||
if subtle.ConstantTimeCompare(ret[sep:], tag) == 1 {
|
||||
ret = ret[:sep]
|
||||
tag := o.crypt(dec, out, nonce, adata, ciphertextData)
|
||||
if subtle.ConstantTimeCompare(tag, ciphertext[sep:]) == 1 {
|
||||
return ret, nil
|
||||
}
|
||||
for i := range out {
|
||||
@ -137,7 +137,8 @@ func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
// On instruction enc (resp. dec), crypt is the encrypt (resp. decrypt)
|
||||
// function. It returns the resulting plain/ciphertext with the tag appended.
|
||||
// function. It writes the resulting plain/ciphertext into Y and returns
|
||||
// the tag.
|
||||
func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte {
|
||||
//
|
||||
// Consider X as a sequence of 128-bit blocks
|
||||
@ -194,13 +195,14 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte {
|
||||
byteutil.XorBytesMut(offset, o.mask.L[bits.TrailingZeros(uint(i+1))])
|
||||
blockX := X[i*blockSize : (i+1)*blockSize]
|
||||
blockY := Y[i*blockSize : (i+1)*blockSize]
|
||||
byteutil.XorBytes(blockY, blockX, offset)
|
||||
switch instruction {
|
||||
case enc:
|
||||
byteutil.XorBytesMut(checksum, blockX)
|
||||
byteutil.XorBytes(blockY, blockX, offset)
|
||||
o.block.Encrypt(blockY, blockY)
|
||||
byteutil.XorBytesMut(blockY, offset)
|
||||
byteutil.XorBytesMut(checksum, blockX)
|
||||
case dec:
|
||||
byteutil.XorBytes(blockY, blockX, offset)
|
||||
o.block.Decrypt(blockY, blockY)
|
||||
byteutil.XorBytesMut(blockY, offset)
|
||||
byteutil.XorBytesMut(checksum, blockY)
|
||||
@ -216,31 +218,24 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte {
|
||||
o.block.Encrypt(pad, offset)
|
||||
chunkX := X[blockSize*m:]
|
||||
chunkY := Y[blockSize*m : len(X)]
|
||||
byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)])
|
||||
// P_* || bit(1) || zeroes(127) - len(P_*)
|
||||
switch instruction {
|
||||
case enc:
|
||||
paddedY := append(chunkX, byte(128))
|
||||
paddedY = append(paddedY, make([]byte, blockSize-len(chunkX)-1)...)
|
||||
byteutil.XorBytesMut(checksum, paddedY)
|
||||
byteutil.XorBytesMut(checksum, chunkX)
|
||||
checksum[len(chunkX)] ^= 128
|
||||
byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)])
|
||||
// P_* || bit(1) || zeroes(127) - len(P_*)
|
||||
case dec:
|
||||
paddedX := append(chunkY, byte(128))
|
||||
paddedX = append(paddedX, make([]byte, blockSize-len(chunkY)-1)...)
|
||||
byteutil.XorBytesMut(checksum, paddedX)
|
||||
byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)])
|
||||
// P_* || bit(1) || zeroes(127) - len(P_*)
|
||||
byteutil.XorBytesMut(checksum, chunkY)
|
||||
checksum[len(chunkY)] ^= 128
|
||||
}
|
||||
byteutil.XorBytes(tag, checksum, offset)
|
||||
byteutil.XorBytesMut(tag, o.mask.lDol)
|
||||
o.block.Encrypt(tag, tag)
|
||||
byteutil.XorBytesMut(tag, o.hash(adata))
|
||||
copy(Y[blockSize*m+len(chunkY):], tag[:o.tagSize])
|
||||
} else {
|
||||
byteutil.XorBytes(tag, checksum, offset)
|
||||
byteutil.XorBytesMut(tag, o.mask.lDol)
|
||||
o.block.Encrypt(tag, tag)
|
||||
byteutil.XorBytesMut(tag, o.hash(adata))
|
||||
copy(Y[blockSize*m:], tag[:o.tagSize])
|
||||
}
|
||||
return Y
|
||||
byteutil.XorBytes(tag, checksum, offset)
|
||||
byteutil.XorBytesMut(tag, o.mask.lDol)
|
||||
o.block.Encrypt(tag, tag)
|
||||
byteutil.XorBytesMut(tag, o.hash(adata))
|
||||
return tag[:o.tagSize]
|
||||
}
|
||||
|
||||
// This hash function is used to compute the tag. Per design, on empty input it
|
||||
|
12
vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go
generated
vendored
12
vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go
generated
vendored
@ -7,6 +7,7 @@ package armor
|
||||
import (
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"sort"
|
||||
)
|
||||
|
||||
var armorHeaderSep = []byte(": ")
|
||||
@ -159,8 +160,15 @@ func encode(out io.Writer, blockType string, headers map[string]string, checksum
|
||||
return
|
||||
}
|
||||
|
||||
for k, v := range headers {
|
||||
err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline)
|
||||
keys := make([]string, len(headers))
|
||||
i := 0
|
||||
for k := range headers {
|
||||
keys[i] = k
|
||||
i++
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
err = writeSlices(out, []byte(k), armorHeaderSep, []byte(headers[k]), newline)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
20
vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go
generated
vendored
20
vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go
generated
vendored
@ -6,6 +6,7 @@
|
||||
package errors // import "github.com/ProtonMail/go-crypto/openpgp/errors"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
@ -178,3 +179,22 @@ type ErrMalformedMessage string
|
||||
func (dke ErrMalformedMessage) Error() string {
|
||||
return "openpgp: malformed message " + string(dke)
|
||||
}
|
||||
|
||||
// ErrEncryptionKeySelection is returned if encryption key selection fails (v2 API).
|
||||
type ErrEncryptionKeySelection struct {
|
||||
PrimaryKeyId string
|
||||
PrimaryKeyErr error
|
||||
EncSelectionKeyId *string
|
||||
EncSelectionErr error
|
||||
}
|
||||
|
||||
func (eks ErrEncryptionKeySelection) Error() string {
|
||||
prefix := fmt.Sprintf("openpgp: key selection for primary key %s:", eks.PrimaryKeyId)
|
||||
if eks.PrimaryKeyErr != nil {
|
||||
return fmt.Sprintf("%s invalid primary key: %s", prefix, eks.PrimaryKeyErr)
|
||||
}
|
||||
if eks.EncSelectionKeyId != nil {
|
||||
return fmt.Sprintf("%s invalid encryption key %s: %s", prefix, *eks.EncSelectionKeyId, eks.EncSelectionErr)
|
||||
}
|
||||
return fmt.Sprintf("%s no encryption key: %s", prefix, eks.EncSelectionErr)
|
||||
}
|
||||
|
120
vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go
generated
vendored
120
vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go
generated
vendored
@ -3,7 +3,6 @@
|
||||
package packet
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/cipher"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
@ -15,12 +14,11 @@ import (
|
||||
type aeadCrypter struct {
|
||||
aead cipher.AEAD
|
||||
chunkSize int
|
||||
initialNonce []byte
|
||||
nonce []byte
|
||||
associatedData []byte // Chunk-independent associated data
|
||||
chunkIndex []byte // Chunk counter
|
||||
packetTag packetType // SEIP packet (v2) or AEAD Encrypted Data packet
|
||||
bytesProcessed int // Amount of plaintext bytes encrypted/decrypted
|
||||
buffer bytes.Buffer // Buffered bytes across chunks
|
||||
}
|
||||
|
||||
// computeNonce takes the incremental index and computes an eXclusive OR with
|
||||
@ -28,12 +26,12 @@ type aeadCrypter struct {
|
||||
// 5.16.1 and 5.16.2). It returns the resulting nonce.
|
||||
func (wo *aeadCrypter) computeNextNonce() (nonce []byte) {
|
||||
if wo.packetTag == packetTypeSymmetricallyEncryptedIntegrityProtected {
|
||||
return append(wo.initialNonce, wo.chunkIndex...)
|
||||
return wo.nonce
|
||||
}
|
||||
|
||||
nonce = make([]byte, len(wo.initialNonce))
|
||||
copy(nonce, wo.initialNonce)
|
||||
offset := len(wo.initialNonce) - 8
|
||||
nonce = make([]byte, len(wo.nonce))
|
||||
copy(nonce, wo.nonce)
|
||||
offset := len(wo.nonce) - 8
|
||||
for i := 0; i < 8; i++ {
|
||||
nonce[i+offset] ^= wo.chunkIndex[i]
|
||||
}
|
||||
@ -62,8 +60,9 @@ func (wo *aeadCrypter) incrementIndex() error {
|
||||
type aeadDecrypter struct {
|
||||
aeadCrypter // Embedded ciphertext opener
|
||||
reader io.Reader // 'reader' is a partialLengthReader
|
||||
chunkBytes []byte
|
||||
peekedBytes []byte // Used to detect last chunk
|
||||
eof bool
|
||||
buffer []byte // Buffered decrypted bytes
|
||||
}
|
||||
|
||||
// Read decrypts bytes and reads them into dst. It decrypts when necessary and
|
||||
@ -71,59 +70,44 @@ type aeadDecrypter struct {
|
||||
// and an error.
|
||||
func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) {
|
||||
// Return buffered plaintext bytes from previous calls
|
||||
if ar.buffer.Len() > 0 {
|
||||
return ar.buffer.Read(dst)
|
||||
}
|
||||
|
||||
// Return EOF if we've previously validated the final tag
|
||||
if ar.eof {
|
||||
return 0, io.EOF
|
||||
if len(ar.buffer) > 0 {
|
||||
n = copy(dst, ar.buffer)
|
||||
ar.buffer = ar.buffer[n:]
|
||||
return
|
||||
}
|
||||
|
||||
// Read a chunk
|
||||
tagLen := ar.aead.Overhead()
|
||||
cipherChunkBuf := new(bytes.Buffer)
|
||||
_, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize+tagLen))
|
||||
cipherChunk := cipherChunkBuf.Bytes()
|
||||
if errRead != nil && errRead != io.EOF {
|
||||
copy(ar.chunkBytes, ar.peekedBytes) // Copy bytes peeked in previous chunk or in initialization
|
||||
bytesRead, errRead := io.ReadFull(ar.reader, ar.chunkBytes[tagLen:])
|
||||
if errRead != nil && errRead != io.EOF && errRead != io.ErrUnexpectedEOF {
|
||||
return 0, errRead
|
||||
}
|
||||
|
||||
if len(cipherChunk) > 0 {
|
||||
decrypted, errChunk := ar.openChunk(cipherChunk)
|
||||
if bytesRead > 0 {
|
||||
ar.peekedBytes = ar.chunkBytes[bytesRead:bytesRead+tagLen]
|
||||
|
||||
decrypted, errChunk := ar.openChunk(ar.chunkBytes[:bytesRead])
|
||||
if errChunk != nil {
|
||||
return 0, errChunk
|
||||
}
|
||||
|
||||
// Return decrypted bytes, buffering if necessary
|
||||
if len(dst) < len(decrypted) {
|
||||
n = copy(dst, decrypted[:len(dst)])
|
||||
ar.buffer.Write(decrypted[len(dst):])
|
||||
} else {
|
||||
n = copy(dst, decrypted)
|
||||
}
|
||||
n = copy(dst, decrypted)
|
||||
ar.buffer = decrypted[n:]
|
||||
return
|
||||
}
|
||||
|
||||
// Check final authentication tag
|
||||
if errRead == io.EOF {
|
||||
errChunk := ar.validateFinalTag(ar.peekedBytes)
|
||||
if errChunk != nil {
|
||||
return n, errChunk
|
||||
}
|
||||
ar.eof = true // Mark EOF for when we've returned all buffered data
|
||||
}
|
||||
return
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// Close is noOp. The final authentication tag of the stream was already
|
||||
// checked in the last Read call. In the future, this function could be used to
|
||||
// wipe the reader and peeked, decrypted bytes, if necessary.
|
||||
// Close checks the final authentication tag of the stream.
|
||||
// In the future, this function could also be used to wipe the reader
|
||||
// and peeked & decrypted bytes, if necessary.
|
||||
func (ar *aeadDecrypter) Close() (err error) {
|
||||
if !ar.eof {
|
||||
errChunk := ar.validateFinalTag(ar.peekedBytes)
|
||||
if errChunk != nil {
|
||||
return errChunk
|
||||
}
|
||||
errChunk := ar.validateFinalTag(ar.peekedBytes)
|
||||
if errChunk != nil {
|
||||
return errChunk
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -132,20 +116,13 @@ func (ar *aeadDecrypter) Close() (err error) {
|
||||
// the underlying plaintext and an error. It accesses peeked bytes from next
|
||||
// chunk, to identify the last chunk and decrypt/validate accordingly.
|
||||
func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) {
|
||||
tagLen := ar.aead.Overhead()
|
||||
// Restore carried bytes from last call
|
||||
chunkExtra := append(ar.peekedBytes, data...)
|
||||
// 'chunk' contains encrypted bytes, followed by an authentication tag.
|
||||
chunk := chunkExtra[:len(chunkExtra)-tagLen]
|
||||
ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:]
|
||||
|
||||
adata := ar.associatedData
|
||||
if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted {
|
||||
adata = append(ar.associatedData, ar.chunkIndex...)
|
||||
}
|
||||
|
||||
nonce := ar.computeNextNonce()
|
||||
plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata)
|
||||
plainChunk, err := ar.aead.Open(data[:0:len(data)], nonce, data, adata)
|
||||
if err != nil {
|
||||
return nil, errors.ErrAEADTagVerification
|
||||
}
|
||||
@ -183,27 +160,29 @@ func (ar *aeadDecrypter) validateFinalTag(tag []byte) error {
|
||||
type aeadEncrypter struct {
|
||||
aeadCrypter // Embedded plaintext sealer
|
||||
writer io.WriteCloser // 'writer' is a partialLengthWriter
|
||||
chunkBytes []byte
|
||||
offset int
|
||||
}
|
||||
|
||||
// Write encrypts and writes bytes. It encrypts when necessary and buffers extra
|
||||
// plaintext bytes for next call. When the stream is finished, Close() MUST be
|
||||
// called to append the final tag.
|
||||
func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) {
|
||||
// Append plaintextBytes to existing buffered bytes
|
||||
n, err = aw.buffer.Write(plaintextBytes)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
// Encrypt and write chunks
|
||||
for aw.buffer.Len() >= aw.chunkSize {
|
||||
plainChunk := aw.buffer.Next(aw.chunkSize)
|
||||
encryptedChunk, err := aw.sealChunk(plainChunk)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
_, err = aw.writer.Write(encryptedChunk)
|
||||
if err != nil {
|
||||
return n, err
|
||||
for n != len(plaintextBytes) {
|
||||
copied := copy(aw.chunkBytes[aw.offset:aw.chunkSize], plaintextBytes[n:])
|
||||
n += copied
|
||||
aw.offset += copied
|
||||
|
||||
if aw.offset == aw.chunkSize {
|
||||
encryptedChunk, err := aw.sealChunk(aw.chunkBytes[:aw.offset])
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
_, err = aw.writer.Write(encryptedChunk)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
aw.offset = 0
|
||||
}
|
||||
}
|
||||
return
|
||||
@ -215,9 +194,8 @@ func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) {
|
||||
func (aw *aeadEncrypter) Close() (err error) {
|
||||
// Encrypt and write a chunk if there's buffered data left, or if we haven't
|
||||
// written any chunks yet.
|
||||
if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 {
|
||||
plainChunk := aw.buffer.Bytes()
|
||||
lastEncryptedChunk, err := aw.sealChunk(plainChunk)
|
||||
if aw.offset > 0 || aw.bytesProcessed == 0 {
|
||||
lastEncryptedChunk, err := aw.sealChunk(aw.chunkBytes[:aw.offset])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -263,7 +241,7 @@ func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
nonce := aw.computeNextNonce()
|
||||
encrypted := aw.aead.Seal(nil, nonce, data, adata)
|
||||
encrypted := aw.aead.Seal(data[:0], nonce, data, adata)
|
||||
aw.bytesProcessed += len(data)
|
||||
if err := aw.aeadCrypter.incrementIndex(); err != nil {
|
||||
return nil, err
|
||||
|
12
vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go
generated
vendored
12
vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go
generated
vendored
@ -65,24 +65,28 @@ func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) {
|
||||
blockCipher := ae.cipher.new(key)
|
||||
aead := ae.mode.new(blockCipher)
|
||||
// Carry the first tagLen bytes
|
||||
chunkSize := decodeAEADChunkSize(ae.chunkSizeByte)
|
||||
tagLen := ae.mode.TagLength()
|
||||
peekedBytes := make([]byte, tagLen)
|
||||
chunkBytes := make([]byte, chunkSize+tagLen*2)
|
||||
peekedBytes := chunkBytes[chunkSize+tagLen:]
|
||||
n, err := io.ReadFull(ae.Contents, peekedBytes)
|
||||
if n < tagLen || (err != nil && err != io.EOF) {
|
||||
return nil, errors.AEADError("Not enough data to decrypt:" + err.Error())
|
||||
}
|
||||
chunkSize := decodeAEADChunkSize(ae.chunkSizeByte)
|
||||
|
||||
return &aeadDecrypter{
|
||||
aeadCrypter: aeadCrypter{
|
||||
aead: aead,
|
||||
chunkSize: chunkSize,
|
||||
initialNonce: ae.initialNonce,
|
||||
nonce: ae.initialNonce,
|
||||
associatedData: ae.associatedData(),
|
||||
chunkIndex: make([]byte, 8),
|
||||
packetTag: packetTypeAEADEncrypted,
|
||||
},
|
||||
reader: ae.Contents,
|
||||
peekedBytes: peekedBytes}, nil
|
||||
chunkBytes: chunkBytes,
|
||||
peekedBytes: peekedBytes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// associatedData for chunks: tag, version, cipher, mode, chunk size byte
|
||||
|
12
vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go
generated
vendored
12
vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go
generated
vendored
@ -173,6 +173,11 @@ type Config struct {
|
||||
// weaknesses in the hash algo, potentially hindering e.g. some chosen-prefix attacks.
|
||||
// The default behavior, when the config or flag is nil, is to enable the feature.
|
||||
NonDeterministicSignaturesViaNotation *bool
|
||||
|
||||
// InsecureAllowAllKeyFlagsWhenMissing determines how a key without valid key flags is handled.
|
||||
// When set to true, a key without flags is treated as if all flags are enabled.
|
||||
// This behavior is consistent with GPG.
|
||||
InsecureAllowAllKeyFlagsWhenMissing bool
|
||||
}
|
||||
|
||||
func (c *Config) Random() io.Reader {
|
||||
@ -403,6 +408,13 @@ func (c *Config) RandomizeSignaturesViaNotation() bool {
|
||||
return *c.NonDeterministicSignaturesViaNotation
|
||||
}
|
||||
|
||||
func (c *Config) AllowAllKeyFlagsWhenMissing() bool {
|
||||
if c == nil {
|
||||
return false
|
||||
}
|
||||
return c.InsecureAllowAllKeyFlagsWhenMissing
|
||||
}
|
||||
|
||||
// BoolPointer is a helper function to set a boolean pointer in the Config.
|
||||
// e.g., config.CheckPacketSequence = BoolPointer(true)
|
||||
func BoolPointer(value bool) *bool {
|
||||
|
7
vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go
generated
vendored
7
vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go
generated
vendored
@ -1048,12 +1048,17 @@ func (pk *PublicKey) VerifyDirectKeySignature(sig *Signature) (err error) {
|
||||
// KeyIdString returns the public key's fingerprint in capital hex
|
||||
// (e.g. "6C7EE1B8621CC013").
|
||||
func (pk *PublicKey) KeyIdString() string {
|
||||
return fmt.Sprintf("%X", pk.Fingerprint[12:20])
|
||||
return fmt.Sprintf("%016X", pk.KeyId)
|
||||
}
|
||||
|
||||
// KeyIdShortString returns the short form of public key's fingerprint
|
||||
// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
|
||||
// This function will return the full key id for v5 and v6 keys
|
||||
// since the short key id is undefined for them.
|
||||
func (pk *PublicKey) KeyIdShortString() string {
|
||||
if pk.Version >= 5 {
|
||||
return pk.KeyIdString()
|
||||
}
|
||||
return fmt.Sprintf("%X", pk.Fingerprint[16:20])
|
||||
}
|
||||
|
||||
|
4
vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go
generated
vendored
4
vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go
generated
vendored
@ -1288,7 +1288,9 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp
|
||||
if sig.IssuerKeyId != nil && sig.Version == 4 {
|
||||
keyId := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId)
|
||||
subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, true, keyId})
|
||||
// Note: making this critical breaks RPM <=4.16.
|
||||
// See: https://github.com/ProtonMail/go-crypto/issues/263
|
||||
subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId})
|
||||
}
|
||||
// Notation Data
|
||||
for _, notation := range sig.Notations {
|
||||
|
27
vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go
generated
vendored
27
vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go
generated
vendored
@ -70,8 +70,10 @@ func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, e
|
||||
|
||||
aead, nonce := getSymmetricallyEncryptedAeadInstance(se.Cipher, se.Mode, inputKey, se.Salt[:], se.associatedData())
|
||||
// Carry the first tagLen bytes
|
||||
chunkSize := decodeAEADChunkSize(se.ChunkSizeByte)
|
||||
tagLen := se.Mode.TagLength()
|
||||
peekedBytes := make([]byte, tagLen)
|
||||
chunkBytes := make([]byte, chunkSize+tagLen*2)
|
||||
peekedBytes := chunkBytes[chunkSize+tagLen:]
|
||||
n, err := io.ReadFull(se.Contents, peekedBytes)
|
||||
if n < tagLen || (err != nil && err != io.EOF) {
|
||||
return nil, errors.StructuralError("not enough data to decrypt:" + err.Error())
|
||||
@ -81,12 +83,13 @@ func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, e
|
||||
aeadCrypter: aeadCrypter{
|
||||
aead: aead,
|
||||
chunkSize: decodeAEADChunkSize(se.ChunkSizeByte),
|
||||
initialNonce: nonce,
|
||||
nonce: nonce,
|
||||
associatedData: se.associatedData(),
|
||||
chunkIndex: make([]byte, 8),
|
||||
chunkIndex: nonce[len(nonce)-8:],
|
||||
packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected,
|
||||
},
|
||||
reader: se.Contents,
|
||||
chunkBytes: chunkBytes,
|
||||
peekedBytes: peekedBytes,
|
||||
}, nil
|
||||
}
|
||||
@ -130,16 +133,20 @@ func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite
|
||||
|
||||
aead, nonce := getSymmetricallyEncryptedAeadInstance(cipherSuite.Cipher, cipherSuite.Mode, inputKey, salt, prefix)
|
||||
|
||||
chunkSize := decodeAEADChunkSize(chunkSizeByte)
|
||||
tagLen := aead.Overhead()
|
||||
chunkBytes := make([]byte, chunkSize+tagLen)
|
||||
return &aeadEncrypter{
|
||||
aeadCrypter: aeadCrypter{
|
||||
aead: aead,
|
||||
chunkSize: decodeAEADChunkSize(chunkSizeByte),
|
||||
chunkSize: chunkSize,
|
||||
associatedData: prefix,
|
||||
chunkIndex: make([]byte, 8),
|
||||
initialNonce: nonce,
|
||||
nonce: nonce,
|
||||
chunkIndex: nonce[len(nonce)-8:],
|
||||
packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected,
|
||||
},
|
||||
writer: ciphertext,
|
||||
writer: ciphertext,
|
||||
chunkBytes: chunkBytes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -149,10 +156,10 @@ func getSymmetricallyEncryptedAeadInstance(c CipherFunction, mode AEADMode, inpu
|
||||
encryptionKey := make([]byte, c.KeySize())
|
||||
_, _ = readFull(hkdfReader, encryptionKey)
|
||||
|
||||
// Last 64 bits of nonce are the counter
|
||||
nonce = make([]byte, mode.IvLength()-8)
|
||||
nonce = make([]byte, mode.IvLength())
|
||||
|
||||
_, _ = readFull(hkdfReader, nonce)
|
||||
// Last 64 bits of nonce are the counter
|
||||
_, _ = readFull(hkdfReader, nonce[:len(nonce)-8])
|
||||
|
||||
blockCipher := c.new(encryptionKey)
|
||||
aead = mode.new(blockCipher)
|
||||
|
1
vendor/github.com/charmbracelet/bubbletea/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/charmbracelet/bubbletea/.gitattributes
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
*.golden -text
|
23
vendor/github.com/charmbracelet/bubbletea/.gitignore
generated
vendored
Normal file
23
vendor/github.com/charmbracelet/bubbletea/.gitignore
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
.DS_Store
|
||||
.envrc
|
||||
|
||||
examples/fullscreen/fullscreen
|
||||
examples/help/help
|
||||
examples/http/http
|
||||
examples/list-default/list-default
|
||||
examples/list-fancy/list-fancy
|
||||
examples/list-simple/list-simple
|
||||
examples/mouse/mouse
|
||||
examples/pager/pager
|
||||
examples/progress-download/color_vortex.blend
|
||||
examples/progress-download/progress-download
|
||||
examples/simple/simple
|
||||
examples/spinner/spinner
|
||||
examples/textinput/textinput
|
||||
examples/textinputs/textinputs
|
||||
examples/views/views
|
||||
tutorials/basics/basics
|
||||
tutorials/commands/commands
|
||||
.idea
|
||||
coverage.txt
|
||||
dist/
|
@ -1,5 +1,6 @@
|
||||
run:
|
||||
tests: false
|
||||
issues-exit-code: 0
|
||||
|
||||
issues:
|
||||
include:
|
||||
@ -36,5 +37,4 @@ linters:
|
||||
- govet
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- unused
|
28
vendor/github.com/charmbracelet/bubbletea/.golangci.yml
generated
vendored
Normal file
28
vendor/github.com/charmbracelet/bubbletea/.golangci.yml
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
run:
|
||||
tests: false
|
||||
|
||||
issues:
|
||||
include:
|
||||
- EXC0001
|
||||
- EXC0005
|
||||
- EXC0011
|
||||
- EXC0012
|
||||
- EXC0013
|
||||
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 0
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- bodyclose
|
||||
- gofumpt
|
||||
- goimports
|
||||
- gosec
|
||||
- nilerr
|
||||
- revive
|
||||
- rowserrcheck
|
||||
- sqlclosecheck
|
||||
- tparallel
|
||||
- unconvert
|
||||
- unparam
|
||||
- whitespace
|
5
vendor/github.com/charmbracelet/bubbletea/.goreleaser.yml
generated
vendored
Normal file
5
vendor/github.com/charmbracelet/bubbletea/.goreleaser.yml
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
# yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json
|
||||
version: 2
|
||||
includes:
|
||||
- from_url:
|
||||
url: charmbracelet/meta/main/goreleaser-lib.yaml
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user