Compare commits

..

1 Commits

Author SHA1 Message Date
c23d2d34b9 feat: add git-user and git-email flags to recipe new
All checks were successful
continuous-integration/drone/pr Build is passing
2024-06-22 17:36:24 +02:00
32 changed files with 509 additions and 650 deletions

View File

@ -29,7 +29,7 @@ steps:
event: tag
- name: release
image: goreleaser/goreleaser:v1.24.0
image: goreleaser/goreleaser:v1.18.2
environment:
GITEA_TOKEN:
from_secret: goreleaser_gitea_token

View File

@ -51,6 +51,12 @@ builds:
- "-X 'main.Commit={{ .Commit }}'"
- "-X 'main.Version={{ .Version }}'"
archives:
- replacements:
386: i386
amd64: x86_64
format: binary
checksum:
name_template: "checksums.txt"

View File

@ -1,414 +1,296 @@
package app
import (
"archive/tar"
"context"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
containerPkg "coopcloud.tech/abra/pkg/container"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
"github.com/klauspost/pgzip"
"coopcloud.tech/abra/pkg/recipe"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
type backupConfig struct {
preHookCmd string
postHookCmd string
backupPaths []string
var snapshot string
var snapshotFlag = &cli.StringFlag{
Name: "snapshot, s",
Usage: "Lists specific snapshot",
Destination: &snapshot,
}
var appBackupCommand = cli.Command{
Name: "backup",
Aliases: []string{"bk"},
Usage: "Run app backup",
ArgsUsage: "<domain> [<service>]",
var includePath string
var includePathFlag = &cli.StringFlag{
Name: "path, p",
Usage: "Include path",
Destination: &includePath,
}
var resticRepo string
var resticRepoFlag = &cli.StringFlag{
Name: "repo, r",
Usage: "Restic repository",
Destination: &resticRepo,
}
var appBackupListCommand = cli.Command{
Name: "list",
Aliases: []string{"ls"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
internal.ChaosFlag,
snapshotFlag,
includePathFlag,
},
Before: internal.SubCommandBefore,
Usage: "List all backups",
BashComplete: autocomplete.AppNameComplete,
Description: `
Run an app backup.
A backup command and pre/post hook commands are defined in the recipe
configuration. Abra reads this configuration and run the comands in the context
of the deployed services. Pass <service> if you only want to back up a single
service. All backups are placed in the ~/.abra/backups directory.
A single backup file is produced for all backup paths specified for a service.
If we have the following backup configuration:
- "backupbot.backup.path=/var/lib/foo,/var/lib/bar"
And we run "abra app backup example.com app", Abra will produce a file that
looks like:
~/.abra/backups/example_com_app_609341138.tar.gz
This file is a compressed archive which contains all backup paths. To see paths, run:
tar -tf ~/.abra/backups/example_com_app_609341138.tar.gz
(Make sure to change the name of the backup file)
This single file can be used to restore your app. See "abra app restore" for more.
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
recipe, err := recipePkg.Get(app.Recipe, internal.Offline)
if err != nil {
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Chaos {
if err := recipePkg.EnsureIsClean(app.Recipe); err != nil {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipePkg.EnsureUpToDate(app.Recipe); err != nil {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipePkg.EnsureLatest(app.Recipe); err != nil {
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
backupConfigs := make(map[string]backupConfig)
for _, service := range recipe.Config.Services {
if backupsEnabled, ok := service.Deploy.Labels["backupbot.backup"]; ok {
if backupsEnabled == "true" {
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), service.Name)
bkConfig := backupConfig{}
logrus.Debugf("backup config detected for %s", fullServiceName)
if paths, ok := service.Deploy.Labels["backupbot.backup.path"]; ok {
logrus.Debugf("detected backup paths for %s: %s", fullServiceName, paths)
bkConfig.backupPaths = strings.Split(paths, ",")
}
if preHookCmd, ok := service.Deploy.Labels["backupbot.backup.pre-hook"]; ok {
logrus.Debugf("detected pre-hook command for %s: %s", fullServiceName, preHookCmd)
bkConfig.preHookCmd = preHookCmd
}
if postHookCmd, ok := service.Deploy.Labels["backupbot.backup.post-hook"]; ok {
logrus.Debugf("detected post-hook command for %s: %s", fullServiceName, postHookCmd)
bkConfig.postHookCmd = postHookCmd
}
backupConfigs[service.Name] = bkConfig
}
}
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
serviceName := c.Args().Get(1)
if serviceName != "" {
backupConfig, ok := backupConfigs[serviceName]
if !ok {
logrus.Fatalf("no backup config for %s? does %s exist?", serviceName, serviceName)
}
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
logrus.Fatal(err)
}
logrus.Infof("running backup for the %s service", serviceName)
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if snapshot != "" {
logrus.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if includePath != "" {
logrus.Debugf("including INCLUDE_PATH=%s in backupbot exec invocation", includePath)
execEnv = append(execEnv, fmt.Sprintf("INCLUDE_PATH=%s", includePath))
}
if err := runBackup(cl, app, serviceName, backupConfig); err != nil {
logrus.Fatal(err)
}
} else {
if len(backupConfigs) == 0 {
logrus.Fatalf("no backup configs discovered for %s?", app.Name)
}
for serviceName, backupConfig := range backupConfigs {
logrus.Infof("running backup for the %s service", serviceName)
if err := runBackup(cl, app, serviceName, backupConfig); err != nil {
logrus.Fatal(err)
}
}
if err := internal.RunBackupCmdRemote(cl, "ls", targetContainer.ID, execEnv); err != nil {
logrus.Fatal(err)
}
return nil
},
}
// TimeStamp generates a file name friendly timestamp.
func TimeStamp() string {
ts := time.Now().UTC().Format(time.RFC3339)
return strings.Replace(ts, ":", "-", -1)
}
var appBackupDownloadCommand = cli.Command{
Name: "download",
Aliases: []string{"d"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
snapshotFlag,
includePathFlag,
},
Before: internal.SubCommandBefore,
Usage: "Download a backup",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
// runBackup does the actual backup logic.
func runBackup(cl *dockerClient.Client, app config.App, serviceName string, bkConfig backupConfig) error {
if len(bkConfig.backupPaths) == 0 {
return fmt.Errorf("backup paths are empty for %s?", serviceName)
}
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), serviceName))
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, true)
if err != nil {
return err
}
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), serviceName)
if bkConfig.preHookCmd != "" {
splitCmd := internal.SafeSplit(bkConfig.preHookCmd)
logrus.Debugf("split pre-hook command for %s into %s", fullServiceName, splitCmd)
preHookExecOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: splitCmd,
Detach: false,
Tty: true,
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &preHookExecOpts); err != nil {
return fmt.Errorf("failed to run %s on %s: %s", bkConfig.preHookCmd, targetContainer.ID, err.Error())
if !internal.Chaos {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
logrus.Infof("succesfully ran %s pre-hook command: %s", fullServiceName, bkConfig.preHookCmd)
}
var tempBackupPaths []string
for _, remoteBackupPath := range bkConfig.backupPaths {
sanitisedPath := strings.ReplaceAll(remoteBackupPath, "/", "_")
localBackupPath := filepath.Join(config.BACKUP_DIR, fmt.Sprintf("%s%s_%s.tar.gz", fullServiceName, sanitisedPath, TimeStamp()))
logrus.Debugf("temporarily backing up %s:%s to %s", fullServiceName, remoteBackupPath, localBackupPath)
logrus.Infof("backing up %s:%s", fullServiceName, remoteBackupPath)
content, _, err := cl.CopyFromContainer(context.Background(), targetContainer.ID, remoteBackupPath)
cl, err := client.New(app.Server)
if err != nil {
logrus.Debugf("failed to copy %s from container: %s", remoteBackupPath, err.Error())
if err := cleanupTempArchives(tempBackupPaths); err != nil {
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
logrus.Fatal(err)
}
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
logrus.Fatal(err)
}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if snapshot != "" {
logrus.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if includePath != "" {
logrus.Debugf("including INCLUDE_PATH=%s in backupbot exec invocation", includePath)
execEnv = append(execEnv, fmt.Sprintf("INCLUDE_PATH=%s", includePath))
}
if err := internal.RunBackupCmdRemote(cl, "download", targetContainer.ID, execEnv); err != nil {
logrus.Fatal(err)
}
remoteBackupDir := "/tmp/backup.tar.gz"
currentWorkingDir := "."
if err = CopyFromContainer(cl, targetContainer.ID, remoteBackupDir, currentWorkingDir); err != nil {
logrus.Fatal(err)
}
fmt.Println("backup successfully downloaded to current working directory")
return nil
},
}
var appBackupCreateCommand = cli.Command{
Name: "create",
Aliases: []string{"c"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
resticRepoFlag,
},
Before: internal.SubCommandBefore,
Usage: "Create a new backup",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Chaos {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
return fmt.Errorf("failed to copy %s from container: %s", remoteBackupPath, err.Error())
}
defer content.Close()
_, srcBase := archive.SplitPathDirEntry(remoteBackupPath)
preArchive := archive.RebaseArchiveEntries(content, srcBase, remoteBackupPath)
if err := copyToFile(localBackupPath, preArchive); err != nil {
logrus.Debugf("failed to create tar archive (%s): %s", localBackupPath, err.Error())
if err := cleanupTempArchives(tempBackupPaths); err != nil {
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
if !internal.Offline {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
return fmt.Errorf("failed to create tar archive (%s): %s", localBackupPath, err.Error())
}
tempBackupPaths = append(tempBackupPaths, localBackupPath)
}
logrus.Infof("compressing and merging archives...")
if err := mergeArchives(tempBackupPaths, fullServiceName); err != nil {
logrus.Debugf("failed to merge archive files: %s", err.Error())
if err := cleanupTempArchives(tempBackupPaths); err != nil {
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
}
return fmt.Errorf("failed to merge archive files: %s", err.Error())
}
if err := cleanupTempArchives(tempBackupPaths); err != nil {
return fmt.Errorf("failed to clean up temporary archives: %s", err.Error())
}
if bkConfig.postHookCmd != "" {
splitCmd := internal.SafeSplit(bkConfig.postHookCmd)
logrus.Debugf("split post-hook command for %s into %s", fullServiceName, splitCmd)
postHookExecOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: splitCmd,
Detach: false,
Tty: true,
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &postHookExecOpts); err != nil {
return err
}
logrus.Infof("succesfully ran %s post-hook command: %s", fullServiceName, bkConfig.postHookCmd)
}
return nil
}
func copyToFile(outfile string, r io.Reader) error {
tmpFile, err := os.CreateTemp(filepath.Dir(outfile), ".tar_temp")
if err != nil {
return err
}
tmpPath := tmpFile.Name()
_, err = io.Copy(tmpFile, r)
tmpFile.Close()
if err != nil {
os.Remove(tmpPath)
return err
}
if err = os.Rename(tmpPath, outfile); err != nil {
os.Remove(tmpPath)
return err
}
return nil
}
func cleanupTempArchives(tarPaths []string) error {
for _, tarPath := range tarPaths {
if err := os.RemoveAll(tarPath); err != nil {
return err
}
logrus.Debugf("remove temporary archive file %s", tarPath)
}
return nil
}
func mergeArchives(tarPaths []string, serviceName string) error {
var out io.Writer
var cout *pgzip.Writer
localBackupPath := filepath.Join(config.BACKUP_DIR, fmt.Sprintf("%s_%s.tar.gz", serviceName, TimeStamp()))
fout, err := os.Create(localBackupPath)
if err != nil {
return fmt.Errorf("Failed to open %s: %s", localBackupPath, err)
}
defer fout.Close()
out = fout
cout = pgzip.NewWriter(out)
out = cout
tw := tar.NewWriter(out)
for _, tarPath := range tarPaths {
if err := addTar(tw, tarPath); err != nil {
return fmt.Errorf("failed to merge %s: %v", tarPath, err)
}
}
if err := tw.Close(); err != nil {
return fmt.Errorf("failed to close tar writer %v", err)
}
if cout != nil {
if err := cout.Flush(); err != nil {
return fmt.Errorf("failed to flush: %s", err)
} else if err = cout.Close(); err != nil {
return fmt.Errorf("failed to close compressed writer: %s", err)
}
}
logrus.Infof("backed up %s to %s", serviceName, localBackupPath)
return nil
}
func addTar(tw *tar.Writer, pth string) (err error) {
var tr *tar.Reader
var rc io.ReadCloser
var hdr *tar.Header
if tr, rc, err = openTarFile(pth); err != nil {
return
}
for {
if hdr, err = tr.Next(); err != nil {
if err == io.EOF {
err = nil
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
break
}
if err = tw.WriteHeader(hdr); err != nil {
break
} else if _, err = io.Copy(tw, tr); err != nil {
break
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
}
if err == nil {
err = rc.Close()
} else {
rc.Close()
}
return
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
logrus.Fatal(err)
}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if resticRepo != "" {
logrus.Debugf("including RESTIC_REPO=%s in backupbot exec invocation", resticRepo)
execEnv = append(execEnv, fmt.Sprintf("RESTIC_REPO=%s", resticRepo))
}
if err := internal.RunBackupCmdRemote(cl, "create", targetContainer.ID, execEnv); err != nil {
logrus.Fatal(err)
}
return nil
},
}
func openTarFile(pth string) (tr *tar.Reader, rc io.ReadCloser, err error) {
var fin *os.File
var n int
buff := make([]byte, 1024)
var appBackupSnapshotsCommand = cli.Command{
Name: "snapshots",
Aliases: []string{"s"},
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
snapshotFlag,
},
Before: internal.SubCommandBefore,
Usage: "List backup snapshots",
BashComplete: autocomplete.AppNameComplete,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
if fin, err = os.Open(pth); err != nil {
return
}
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if n, err = fin.Read(buff); err != nil {
fin.Close()
return
} else if n == 0 {
fin.Close()
err = fmt.Errorf("%s is empty", pth)
return
}
if !internal.Chaos {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if _, err = fin.Seek(0, 0); err != nil {
fin.Close()
return
}
if !internal.Offline {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
rc = fin
tr = tar.NewReader(rc)
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
return tr, rc, nil
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
logrus.Fatal(err)
}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if snapshot != "" {
logrus.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if err := internal.RunBackupCmdRemote(cl, "snapshots", targetContainer.ID, execEnv); err != nil {
logrus.Fatal(err)
}
return nil
},
}
var appBackupCommand = cli.Command{
Name: "backup",
Aliases: []string{"b"},
Usage: "Manage app backups",
ArgsUsage: "<domain>",
Subcommands: []cli.Command{
appBackupListCommand,
appBackupSnapshotsCommand,
appBackupDownloadCommand,
appBackupCreateCommand,
},
}

View File

@ -76,9 +76,9 @@ And if you want to copy that file back to your current working directory locally
logrus.Debugf("retrieved %s as target container on %s", formatter.ShortenID(container.ID), app.Server)
if toContainer {
err = copyToContainer(cl, container.ID, srcPath, dstPath)
err = CopyToContainer(cl, container.ID, srcPath, dstPath)
} else {
err = copyFromContainer(cl, container.ID, srcPath, dstPath)
err = CopyFromContainer(cl, container.ID, srcPath, dstPath)
}
if err != nil {
logrus.Fatal(err)
@ -106,9 +106,9 @@ func parseSrcAndDst(src, dst string) (srcPath string, dstPath string, service st
return "", "", "", false, errServiceMissing
}
// copyToContainer copies a file or directory from the local file system to the container.
// CopyToContainer copies a file or directory from the local file system to the container.
// See the possible copy modes and their documentation.
func copyToContainer(cl *dockerClient.Client, containerID, srcPath, dstPath string) error {
func CopyToContainer(cl *dockerClient.Client, containerID, srcPath, dstPath string) error {
srcStat, err := os.Stat(srcPath)
if err != nil {
return fmt.Errorf("local %s ", err)
@ -140,7 +140,7 @@ func copyToContainer(cl *dockerClient.Client, containerID, srcPath, dstPath stri
if err != nil {
return err
}
if err := container.RunExec(dcli, cl, containerID, &types.ExecConfig{
if _, err := container.RunExec(dcli, cl, containerID, &types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
@ -179,7 +179,7 @@ func copyToContainer(cl *dockerClient.Client, containerID, srcPath, dstPath stri
if err != nil {
return err
}
if err := container.RunExec(dcli, cl, containerID, &types.ExecConfig{
if _, err := container.RunExec(dcli, cl, containerID, &types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
@ -194,9 +194,9 @@ func copyToContainer(cl *dockerClient.Client, containerID, srcPath, dstPath stri
return nil
}
// copyFromContainer copies a file or directory from the given container to the local file system.
// CopyFromContainer copies a file or directory from the given container to the local file system.
// See the possible copy modes and their documentation.
func copyFromContainer(cl *dockerClient.Client, containerID, srcPath, dstPath string) error {
func CopyFromContainer(cl *dockerClient.Client, containerID, srcPath, dstPath string) error {
srcStat, err := cl.ContainerStatPath(context.Background(), containerID, srcPath)
if err != nil {
if errdefs.IsNotFound(err) {

View File

@ -1,223 +1,82 @@
package app
import (
"context"
"errors"
"fmt"
"os"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/recipe"
recipePkg "coopcloud.tech/abra/pkg/recipe"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
type restoreConfig struct {
preHookCmd string
postHookCmd string
var targetPath string
var targetPathFlag = &cli.StringFlag{
Name: "target, t",
Usage: "Target path",
Destination: &targetPath,
}
var appRestoreCommand = cli.Command{
Name: "restore",
Aliases: []string{"rs"},
Usage: "Run app restore",
ArgsUsage: "<domain> <service> <file>",
Usage: "Restore an app backup",
ArgsUsage: "<domain> <service>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.OfflineFlag,
internal.ChaosFlag,
targetPathFlag,
},
Before: internal.SubCommandBefore,
BashComplete: autocomplete.AppNameComplete,
Description: `
Run an app restore.
Pre/post hook commands are defined in the recipe configuration. Abra reads this
configuration and run the comands in the context of the service before
restoring the backup.
Unlike "abra app backup", restore must be run on a per-service basis. You can
not restore all services in one go. Backup files produced by Abra are
compressed archives which use absolute paths. This allows Abra to restore
according to standard tar command logic, i.e. the backup will be restored to
the path it was originally backed up from.
Example:
abra app restore example.com app ~/.abra/backups/example_com_app_609341138.tar.gz
`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
recipe, err := recipe.Get(app.Recipe, internal.Offline)
if err != nil {
if err := recipe.EnsureExists(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Chaos {
if err := recipePkg.EnsureIsClean(app.Recipe); err != nil {
if err := recipe.EnsureIsClean(app.Recipe); err != nil {
logrus.Fatal(err)
}
if !internal.Offline {
if err := recipePkg.EnsureUpToDate(app.Recipe); err != nil {
if err := recipe.EnsureUpToDate(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
if err := recipePkg.EnsureLatest(app.Recipe); err != nil {
if err := recipe.EnsureLatest(app.Recipe); err != nil {
logrus.Fatal(err)
}
}
serviceName := c.Args().Get(1)
if serviceName == "" {
internal.ShowSubcommandHelpAndError(c, errors.New("missing <service>?"))
}
backupPath := c.Args().Get(2)
if backupPath == "" {
internal.ShowSubcommandHelpAndError(c, errors.New("missing <file>?"))
}
if _, err := os.Stat(backupPath); err != nil {
if os.IsNotExist(err) {
logrus.Fatalf("%s doesn't exist?", backupPath)
}
}
restoreConfigs := make(map[string]restoreConfig)
for _, service := range recipe.Config.Services {
if restoreEnabled, ok := service.Deploy.Labels["backupbot.restore"]; ok {
if restoreEnabled == "true" {
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), service.Name)
rsConfig := restoreConfig{}
logrus.Debugf("restore config detected for %s", fullServiceName)
if preHookCmd, ok := service.Deploy.Labels["backupbot.restore.pre-hook"]; ok {
logrus.Debugf("detected pre-hook command for %s: %s", fullServiceName, preHookCmd)
rsConfig.preHookCmd = preHookCmd
}
if postHookCmd, ok := service.Deploy.Labels["backupbot.restore.post-hook"]; ok {
logrus.Debugf("detected post-hook command for %s: %s", fullServiceName, postHookCmd)
rsConfig.postHookCmd = postHookCmd
}
restoreConfigs[service.Name] = rsConfig
}
}
}
rsConfig, ok := restoreConfigs[serviceName]
if !ok {
rsConfig = restoreConfig{}
}
cl, err := client.New(app.Server)
if err != nil {
logrus.Fatal(err)
}
if err := runRestore(cl, app, backupPath, serviceName, rsConfig); err != nil {
targetContainer, err := internal.RetrieveBackupBotContainer(cl)
if err != nil {
logrus.Fatal(err)
}
execEnv := []string{fmt.Sprintf("SERVICE=%s", app.Domain)}
if snapshot != "" {
logrus.Debugf("including SNAPSHOT=%s in backupbot exec invocation", snapshot)
execEnv = append(execEnv, fmt.Sprintf("SNAPSHOT=%s", snapshot))
}
if targetPath != "" {
logrus.Debugf("including TARGET=%s in backupbot exec invocation", targetPath)
execEnv = append(execEnv, fmt.Sprintf("TARGET=%s", targetPath))
}
if err := internal.RunBackupCmdRemote(cl, "restore", targetContainer.ID, execEnv); err != nil {
logrus.Fatal(err)
}
return nil
},
}
// runRestore does the actual restore logic.
func runRestore(cl *dockerClient.Client, app config.App, backupPath, serviceName string, rsConfig restoreConfig) error {
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), serviceName))
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, filters, true)
if err != nil {
return err
}
fullServiceName := fmt.Sprintf("%s_%s", app.StackName(), serviceName)
if rsConfig.preHookCmd != "" {
splitCmd := internal.SafeSplit(rsConfig.preHookCmd)
logrus.Debugf("split pre-hook command for %s into %s", fullServiceName, splitCmd)
preHookExecOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: splitCmd,
Detach: false,
Tty: true,
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &preHookExecOpts); err != nil {
return err
}
logrus.Infof("succesfully ran %s pre-hook command: %s", fullServiceName, rsConfig.preHookCmd)
}
backupReader, err := os.Open(backupPath)
if err != nil {
return err
}
content, err := archive.DecompressStream(backupReader)
if err != nil {
return err
}
// NOTE(d1): we use absolute paths so tar knows what to do. it will restore
// files according to the paths set in the compressed archive
restorePath := "/"
copyOpts := types.CopyToContainerOptions{AllowOverwriteDirWithFile: false, CopyUIDGID: false}
if err := cl.CopyToContainer(context.Background(), targetContainer.ID, restorePath, content, copyOpts); err != nil {
return err
}
logrus.Infof("restored %s to %s", backupPath, fullServiceName)
if rsConfig.postHookCmd != "" {
splitCmd := internal.SafeSplit(rsConfig.postHookCmd)
logrus.Debugf("split post-hook command for %s into %s", fullServiceName, splitCmd)
postHookExecOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: splitCmd,
Detach: false,
Tty: true,
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &postHookExecOpts); err != nil {
return err
}
logrus.Infof("succesfully ran %s post-hook command: %s", fullServiceName, rsConfig.postHookCmd)
}
return nil
}

View File

@ -91,7 +91,7 @@ var appRunCommand = cli.Command{
logrus.Fatal(err)
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
logrus.Fatal(err)
}

View File

@ -1,35 +1,67 @@
package internal
import (
"strings"
"context"
"coopcloud.tech/abra/pkg/config"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/service"
"coopcloud.tech/abra/pkg/upstream/container"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
)
// SafeSplit splits up a string into a list of commands safely.
func SafeSplit(s string) []string {
split := strings.Split(s, " ")
var result []string
var inquote string
var block string
for _, i := range split {
if inquote == "" {
if strings.HasPrefix(i, "'") || strings.HasPrefix(i, "\"") {
inquote = string(i[0])
block = strings.TrimPrefix(i, inquote) + " "
} else {
result = append(result, i)
}
} else {
if !strings.HasSuffix(i, inquote) {
block += i + " "
} else {
block += strings.TrimSuffix(i, inquote)
inquote = ""
result = append(result, block)
block = ""
}
}
// RetrieveBackupBotContainer gets the deployed backupbot container.
func RetrieveBackupBotContainer(cl *dockerClient.Client) (types.Container, error) {
ctx := context.Background()
chosenService, err := service.GetServiceByLabel(ctx, cl, config.BackupbotLabel, NoInput)
if err != nil {
return types.Container{}, err
}
return result
logrus.Debugf("retrieved %s as backup enabled service", chosenService.Spec.Name)
filters := filters.NewArgs()
filters.Add("name", chosenService.Spec.Name)
targetContainer, err := containerPkg.GetContainer(
ctx,
cl,
filters,
NoInput,
)
if err != nil {
return types.Container{}, err
}
return targetContainer, nil
}
// RunBackupCmdRemote runs a backup related command on a remote backupbot container.
func RunBackupCmdRemote(cl *dockerClient.Client, backupCmd string, containerID string, execEnv []string) error {
execBackupListOpts := types.ExecConfig{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Cmd: []string{"/usr/bin/backup", "--", backupCmd},
Detach: false,
Env: execEnv,
Tty: true,
}
logrus.Debugf("running backup %s on %s with exec config %v", backupCmd, containerID, execBackupListOpts)
// FIXME: avoid instantiating a new CLI
dcli, err := command.NewDockerCli()
if err != nil {
return err
}
if _, err := container.RunExec(dcli, cl, containerID, &execBackupListOpts); err != nil {
return err
}
return nil
}

View File

@ -238,6 +238,22 @@ var RemoteUserFlag = &cli.StringFlag{
Destination: &RemoteUser,
}
var GitName string
var GitNameFlag = &cli.StringFlag{
Name: "git-name, gn",
Value: "",
Usage: "Git (user) name to do commits with",
Destination: &GitName,
}
var GitEmail string
var GitEmailFlag = &cli.StringFlag{
Name: "git-email, ge",
Value: "",
Usage: "Git email name to do commits with",
Destination: &GitEmail,
}
// SubCommandBefore wires up pre-action machinery (e.g. --debug handling).
func SubCommandBefore(c *cli.Context) error {
if Debug {

View File

@ -60,7 +60,7 @@ func RunCmdRemote(cl *dockerClient.Client, app config.App, abraSh, serviceName,
Tty: false,
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
logrus.Infof("%s does not exist for %s, use /bin/sh as fallback", shell, app.Name)
shell = "/bin/sh"
}
@ -85,7 +85,7 @@ func RunCmdRemote(cl *dockerClient.Client, app config.App, abraSh, serviceName,
execCreateOpts.Tty = false
}
if err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
if _, err := container.RunExec(dcli, cl, targetContainer.ID, &execCreateOpts); err != nil {
return err
}

View File

@ -4,7 +4,6 @@ import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"text/template"
@ -37,6 +36,8 @@ var recipeNewCommand = cli.Command{
internal.DebugFlag,
internal.NoInputFlag,
internal.OfflineFlag,
internal.GitNameFlag,
internal.GitEmailFlag,
},
Before: internal.SubCommandBefore,
Usage: "Create a new recipe",
@ -92,14 +93,14 @@ recipe and domain in the sample environment config).
logrus.Fatal(err)
}
if err := ioutil.WriteFile(path, templated.Bytes(), 0644); err != nil {
if err := os.WriteFile(path, templated.Bytes(), 0644); err != nil {
logrus.Fatal(err)
}
}
newGitRepo := path.Join(config.RECIPES_DIR, recipeName)
if err := git.Init(newGitRepo, true); err != nil {
if err := git.Init(newGitRepo, true, internal.GitName, internal.GitEmail); err != nil {
logrus.Fatal(err)
}

View File

@ -36,6 +36,8 @@ var REPOS_BASE_URL = "https://git.coopcloud.tech/coop-cloud"
var CATALOGUE_JSON_REPO_NAME = "recipes-catalogue-json"
var SSH_URL_TEMPLATE = "ssh://git@git.coopcloud.tech:2222/coop-cloud/%s.git"
var BackupbotLabel = "coop-cloud.backupbot.enabled"
// envVarModifiers is a list of env var modifier strings. These are added to
// env vars as comments and modify their processing by Abra, e.g. determining
// how long secrets should be.

View File

@ -28,7 +28,7 @@ func GetContainer(c context.Context, cl *client.Client, filters filters.Args, no
return types.Container{}, fmt.Errorf("no containers matching the %v filter found?", filter)
}
if len(containers) != 1 {
if len(containers) > 1 {
var containersRaw []string
for _, container := range containers {
containerName := strings.Join(container.Names, " ")

View File

@ -1,35 +1,41 @@
package git
import (
"fmt"
"github.com/go-git/go-git/v5"
gitPkg "github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/sirupsen/logrus"
)
// Init inits a new repo and commits all the stuff if you want
func Init(repoPath string, commit bool) error {
if _, err := gitPkg.PlainInit(repoPath, false); err != nil {
logrus.Fatal(err)
func Init(repoPath string, commit bool, gitName, gitEmail string) error {
if _, err := git.PlainInit(repoPath, false); err != nil {
return fmt.Errorf("git init: %s", err)
}
logrus.Debugf("initialised new git repo in %s", repoPath)
if commit {
commitRepo, err := git.PlainOpen(repoPath)
if err != nil {
logrus.Fatal(err)
return fmt.Errorf("git open: %s", err)
}
commitWorktree, err := commitRepo.Worktree()
if err != nil {
logrus.Fatal(err)
return fmt.Errorf("git worktree: %s", err)
}
if err := commitWorktree.AddWithOptions(&git.AddOptions{All: true}); err != nil {
return err
return fmt.Errorf("git add: %s", err)
}
if _, err = commitWorktree.Commit("init", &git.CommitOptions{}); err != nil {
return err
var author *object.Signature
if gitName != "" && gitEmail != "" {
author = &object.Signature{Name: gitName, Email: gitEmail}
}
if _, err = commitWorktree.Commit("init", &git.CommitOptions{Author: author}); err != nil {
return fmt.Errorf("git commit: %s", err)
}
logrus.Debugf("init committed all files for new git repo in %s", repoPath)
}

View File

@ -14,6 +14,70 @@ import (
"github.com/sirupsen/logrus"
)
// GetService retrieves a service container based on a label. If prompt is true
// and the retrievd count of service containers does not match 1, then a prompt
// is presented to let the user choose. An error is returned when no service is
// found.
func GetServiceByLabel(c context.Context, cl *client.Client, label string, prompt bool) (swarm.Service, error) {
services, err := cl.ServiceList(c, types.ServiceListOptions{})
if err != nil {
return swarm.Service{}, err
}
if len(services) == 0 {
return swarm.Service{}, fmt.Errorf("no services deployed?")
}
var matchingServices []swarm.Service
for _, service := range services {
if enabled, exists := service.Spec.Labels[label]; exists && enabled == "true" {
matchingServices = append(matchingServices, service)
}
}
if len(matchingServices) == 0 {
return swarm.Service{}, fmt.Errorf("no services deployed matching label '%s'?", label)
}
if len(matchingServices) > 1 {
var servicesRaw []string
for _, service := range matchingServices {
serviceName := service.Spec.Name
created := formatter.HumanDuration(service.CreatedAt.Unix())
servicesRaw = append(servicesRaw, fmt.Sprintf("%s (created %v)", serviceName, created))
}
if !prompt {
err := fmt.Errorf("expected 1 service but found %v: %s", len(matchingServices), strings.Join(servicesRaw, " "))
return swarm.Service{}, err
}
logrus.Warnf("ambiguous service list received, prompting for input")
var response string
prompt := &survey.Select{
Message: "which service are you looking for?",
Options: servicesRaw,
}
if err := survey.AskOne(prompt, &response); err != nil {
return swarm.Service{}, err
}
chosenService := strings.TrimSpace(strings.Split(response, " ")[0])
for _, service := range matchingServices {
serviceName := strings.ToLower(service.Spec.Name)
if serviceName == chosenService {
return service, nil
}
}
logrus.Panic("failed to match chosen service")
}
return matchingServices[0], nil
}
// GetService retrieves a service container. If prompt is true and the retrievd
// count of service containers does not match 1, then a prompt is presented to
// let the user choose. A count of 0 is handled gracefully.

View File

@ -13,7 +13,10 @@ import (
"github.com/sirupsen/logrus"
)
func RunExec(dockerCli command.Cli, client *apiclient.Client, containerID string, execConfig *types.ExecConfig) error {
// RunExec runs a command on a remote container. io.Writer corresponds to the
// command output.
func RunExec(dockerCli command.Cli, client *apiclient.Client, containerID string,
execConfig *types.ExecConfig) (io.Writer, error) {
ctx := context.Background()
// We need to check the tty _before_ we do the ContainerExecCreate, because
@ -21,22 +24,22 @@ func RunExec(dockerCli command.Cli, client *apiclient.Client, containerID string
// there's no easy way to clean those up). But also in order to make "not
// exist" errors take precedence we do a dummy inspect first.
if _, err := client.ContainerInspect(ctx, containerID); err != nil {
return err
return nil, err
}
if !execConfig.Detach {
if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil {
return err
return nil, err
}
}
response, err := client.ContainerExecCreate(ctx, containerID, *execConfig)
if err != nil {
return err
return nil, err
}
execID := response.ID
if execID == "" {
return errors.New("exec ID empty")
return nil, errors.New("exec ID empty")
}
if execConfig.Detach {
@ -44,13 +47,13 @@ func RunExec(dockerCli command.Cli, client *apiclient.Client, containerID string
Detach: execConfig.Detach,
Tty: execConfig.Tty,
}
return client.ContainerExecStart(ctx, execID, execStartCheck)
return nil, client.ContainerExecStart(ctx, execID, execStartCheck)
}
return interactiveExec(ctx, dockerCli, client, execConfig, execID)
}
func interactiveExec(ctx context.Context, dockerCli command.Cli, client *apiclient.Client,
execConfig *types.ExecConfig, execID string) error {
execConfig *types.ExecConfig, execID string) (io.Writer, error) {
// Interactive exec requested.
var (
out, stderr io.Writer
@ -76,7 +79,7 @@ func interactiveExec(ctx context.Context, dockerCli command.Cli, client *apiclie
}
resp, err := client.ContainerExecAttach(ctx, execID, execStartCheck)
if err != nil {
return err
return out, err
}
defer resp.Close()
@ -107,10 +110,10 @@ func interactiveExec(ctx context.Context, dockerCli command.Cli, client *apiclie
if err := <-errCh; err != nil {
logrus.Debugf("Error hijack: %s", err)
return err
return out, err
}
return getExecExitStatus(ctx, client, execID)
return out, getExecExitStatus(ctx, client, execID)
}
func getExecExitStatus(ctx context.Context, client apiclient.ContainerAPIClient, execID string) error {

View File

@ -1,8 +1,8 @@
#!/usr/bin/env bash
ABRA_VERSION="0.9.0-beta"
ABRA_VERSION="0.8.1-beta"
ABRA_RELEASE_URL="https://git.coopcloud.tech/api/v1/repos/coop-cloud/abra/releases/tags/$ABRA_VERSION"
RC_VERSION="0.8.0-rc1-beta"
RC_VERSION="0.8.1-beta"
RC_VERSION_URL="https://git.coopcloud.tech/api/v1/repos/coop-cloud/abra/releases/tags/$RC_VERSION"
for arg in "$@"; do

View File

@ -70,13 +70,13 @@ setup(){
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
run $ABRA app check "$TEST_APP_DOMAIN"
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
refute_output --partial 'behind 3'
_reset_recipe
}
@ -86,7 +86,7 @@ setup(){
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial "Your branch is behind 'origin/main' by 1 commit"
assert_output --partial 'behind 1'
# NOTE(d1): we can't quite tell if this will fail or not in the future, so,
# since it isn't an important part of what we're testing here, we don't check
@ -94,7 +94,7 @@ setup(){
run $ABRA app check "$TEST_APP_DOMAIN" --offline
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial "Your branch is behind 'origin/main' by 1 commit"
assert_output --partial 'behind 1'
_reset_recipe
}

View File

@ -58,7 +58,7 @@ test_cmd_export"
assert_success
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE"
run $ABRA app cmd --local "$TEST_APP_DOMAIN" test_cmd
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd --local
assert_success
assert_output --partial 'baz'
@ -70,7 +70,7 @@ test_cmd_export"
assert_success
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
run $ABRA app cmd --local "$TEST_APP_DOMAIN" test_cmd
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd --local
assert_failure
assert_output --partial 'locally unstaged changes'
@ -83,7 +83,7 @@ test_cmd_export"
assert_success
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
run $ABRA app cmd --local --chaos "$TEST_APP_DOMAIN" test_cmd
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd --local --chaos
assert_success
assert_output --partial 'baz'
@ -96,14 +96,14 @@ test_cmd_export"
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
run $ABRA app cmd --local "$TEST_APP_DOMAIN" test_cmd
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd --local
assert_success
assert_output --partial 'baz'
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial "up to date"
refute_output --partial 'behind 3'
_reset_recipe "$TEST_RECIPE"
}
@ -113,14 +113,14 @@ test_cmd_export"
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
run $ABRA app cmd --local --offline "$TEST_APP_DOMAIN" test_cmd
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd --local --offline
assert_success
assert_output --partial 'baz'
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
_reset_recipe "$TEST_RECIPE"
}
@ -132,13 +132,13 @@ test_cmd_export"
}
@test "error if missing arguments when passing --local" {
run $ABRA app cmd --local "$TEST_APP_DOMAIN"
run $ABRA app cmd "$TEST_APP_DOMAIN" --local
assert_failure
assert_output --partial 'missing arguments'
}
@test "cannot use --local and --user at same time" {
run $ABRA app cmd --local --user root "$TEST_APP_DOMAIN" test_cmd
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd --local --user root
assert_failure
assert_output --partial 'cannot use --local & --user together'
}
@ -147,7 +147,7 @@ test_cmd_export"
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/abra.sh"
assert_success
run $ABRA app cmd --local --chaos "$TEST_APP_DOMAIN" test_cmd
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd --local --chaos
assert_failure
assert_output --partial "$ABRA_DIR/recipes/$TEST_RECIPE/abra.sh does not exist"
@ -155,25 +155,25 @@ test_cmd_export"
}
@test "error if missing command" {
run $ABRA app cmd --local "$TEST_APP_DOMAIN" doesnt_exist
run $ABRA app cmd "$TEST_APP_DOMAIN" doesnt_exist --local
assert_failure
assert_output --partial "doesn't have a doesnt_exist function"
}
@test "run --local command" {
run $ABRA app cmd --local "$TEST_APP_DOMAIN" test_cmd
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd --local
assert_success
assert_output --partial 'baz'
}
@test "run command with single arg" {
run $ABRA app cmd --local "$TEST_APP_DOMAIN" test_cmd_arg -- bing
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd_arg --local -- bing
assert_success
assert_output --partial 'bing'
}
@test "run command with several args" {
run $ABRA app cmd --local "$TEST_APP_DOMAIN" test_cmd_args -- bong bang
run $ABRA app cmd "$TEST_APP_DOMAIN" test_cmd_args --local -- bong bang
assert_success
assert_output --partial 'bong bang'
}

View File

@ -16,7 +16,6 @@ teardown_file(){
setup(){
load "$PWD/tests/integration/helpers/common"
_common_setup
_reset_recipe
}
teardown(){
@ -83,13 +82,13 @@ teardown(){
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
run $ABRA app deploy "$TEST_APP_DOMAIN" --no-input --no-converge-checks
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
refute_output --regexp 'behind .* 3 commits'
refute_output --partial 'behind 3'
_reset_recipe
_undeploy_app
@ -101,7 +100,7 @@ teardown(){
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
# NOTE(d1): need to use --chaos to force same commit
run $ABRA app deploy "$TEST_APP_DOMAIN" \
@ -109,7 +108,7 @@ teardown(){
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
_undeploy_app
_reset_recipe
@ -117,9 +116,6 @@ teardown(){
# bats test_tags=slow
@test "deploy latest commit if no published versions and no --chaos" {
# TODO(d1): fix with a new test recipe which has no published versions?
skip "known issue, abra-test-recipe has published versions now"
latestCommit="$(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" rev-parse --short HEAD)"
_remove_tags
@ -144,7 +140,7 @@ teardown(){
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
threeCommitsBack="$(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" rev-parse --short HEAD)"
@ -277,10 +273,6 @@ teardown(){
}
@test "ensure domain is checked" {
if [[ "$TEST_SERVER" == "default" ]]; then
skip "domain checks are disabled for local server"
fi
appDomain="custom-html.DOESNTEXIST"
run $ABRA app new custom-html \

View File

@ -45,7 +45,7 @@ teardown(){
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial "up to date"
assert_output --partial "Your branch is up to date with 'origin/main'."
}
@test "create new app with version" {
@ -121,7 +121,7 @@ teardown(){
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial "Your branch is behind 'origin/main' by 3 commits, and can be fast-forwarded."
run $ABRA app new "$TEST_RECIPE" \
--no-input \
@ -131,7 +131,7 @@ teardown(){
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial "up to date"
assert_output --partial "Your branch is up to date with 'origin/main'."
_reset_recipe
}
@ -141,7 +141,7 @@ teardown(){
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial "Your branch is behind 'origin/main' by 3 commits, and can be fast-forwarded."
# NOTE(d1): need to use --chaos to force same commit
run $ABRA app new "$TEST_RECIPE" \
@ -154,7 +154,7 @@ teardown(){
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial "Your branch is behind 'origin/main' by 3 commits, and can be fast-forwarded."
_reset_recipe
}

View File

@ -104,10 +104,7 @@ teardown(){
_undeploy_app
# TODO: should wait as long as volume is no longer in use
sleep 10
run $ABRA app volume rm "$TEST_APP_DOMAIN" --no-input
run $ABRA app volume rm "$TEST_APP_DOMAIN"
assert_success
run $ABRA app volume ls "$TEST_APP_DOMAIN"

View File

@ -109,13 +109,13 @@ teardown(){
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
run $ABRA app restore "$TEST_APP_DOMAIN" app
run $ABRA app restore "$TEST_APP_DOMAIN" app DOESNTEXIST
assert_failure
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial "up to date"
refute_output --partial 'behind 3'
}
@test "ensure recipe not up to date if --offline" {
@ -126,19 +126,19 @@ teardown(){
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
run $ABRA app restore "$TEST_APP_DOMAIN" app --offline
run $ABRA app restore "$TEST_APP_DOMAIN" app DOESNTEXIST --offline
assert_failure
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" checkout "$latestCommit"
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial "HEAD detached at $latestCommit"
refute_output --partial 'behind 3'
}
@test "error if missing service" {

View File

@ -50,13 +50,13 @@ teardown(){
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
run $ABRA app rollback "$TEST_APP_DOMAIN" --no-input --no-converge-checks
assert_failure
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --partial "up to date"
refute_output --partial 'behind 3'
}
@test "ensure recipe not up to date if --offline" {
@ -67,14 +67,14 @@ teardown(){
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
run $ABRA app rollback "$TEST_APP_DOMAIN" \
--no-input --no-converge-checks --offline
assert_failure
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" checkout "$latestCommit"
assert_success
@ -131,7 +131,7 @@ teardown(){
latestCommit="$(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" rev-parse --short HEAD)"
run $ABRA app deploy "$TEST_APP_DOMAIN" \
--no-input --chaos
--no-input --no-converge-checks --chaos
assert_success
assert_output --partial "$latestCommit"
assert_output --partial 'chaos'

View File

@ -8,7 +8,7 @@ setup_file(){
run $ABRA app new "$TEST_RECIPE" \
--no-input \
--server "$TEST_SERVER" \
--domain "$TEST_APP_DOMAIN"
--domain "$TEST_APP_DOMAIN" \
assert_success
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
}
@ -19,6 +19,13 @@ teardown_file(){
_reset_recipe
}
teardown(){
# https://github.com/bats-core/bats-core/issues/383#issuecomment-738628888
if [[ -z "${BATS_TEST_COMPLETED}" ]]; then
_undeploy_app
fi
}
setup(){
load "$PWD/tests/integration/helpers/common"
_common_setup

View File

@ -59,8 +59,6 @@ teardown(){
# bats test_tags=slow
@test "error if not in catalogue" {
skip "known issue, see https://git.coopcloud.tech/coop-cloud/recipes-catalogue-json/issues/6"
_deploy_app
run $ABRA app version "$TEST_APP_DOMAIN"
@ -94,7 +92,7 @@ teardown(){
assert_success
# NOTE(d1): to let the stack come down before nuking volumes
sleep 5
sleep 3
run $ABRA app volume remove "$appDomain" --no-input
assert_success

View File

@ -79,7 +79,7 @@ teardown(){
_undeploy_app
# NOTE(d1): to let the stack come down before nuking volumes
sleep 10
sleep 5
run $ABRA app volume rm "$TEST_APP_DOMAIN" --force
assert_success
@ -93,7 +93,7 @@ teardown(){
_undeploy_app
# NOTE(d1): to let the stack come down before nuking volumes
sleep 10
sleep 5
run $ABRA app volume rm "$TEST_APP_DOMAIN" --force
assert_success

View File

@ -49,7 +49,7 @@ _reset_app(){
run $ABRA app new "$TEST_RECIPE" \
--no-input \
--server "$TEST_SERVER" \
--domain "$TEST_APP_DOMAIN"
--domain "$TEST_APP_DOMAIN" \
assert_success
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
}

View File

@ -11,11 +11,7 @@ _add_server() {
}
_rm_server() {
if [[ "$TEST_SERVER" == "default" ]]; then
run rm -rf "$ABRA_DIR/servers/default"
else
run $ABRA server remove --no-input "$TEST_SERVER"
fi
run $ABRA server remove --no-input "$TEST_SERVER"
assert_success
assert_not_exists "$ABRA_DIR/servers/$TEST_SERVER"
}

View File

@ -66,13 +66,13 @@ setup() {
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
run $ABRA recipe lint "$TEST_RECIPE"
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
refute_output --partial 'behind 3'
_reset_recipe
}
@ -82,13 +82,13 @@ setup() {
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
run $ABRA recipe lint "$TEST_RECIPE" --offline
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
_reset_recipe
}

View File

@ -23,14 +23,14 @@ teardown(){
}
@test "create new recipe" {
run $ABRA recipe new foobar
run $ABRA recipe new foobar --git-name foo --git-email foo@example.com
assert_success
assert_output --partial 'Your new foobar recipe has been created'
assert_exists "$ABRA_DIR/recipes/foobar"
}
@test "create new app from new recipe" {
run $ABRA recipe new foobar
run $ABRA recipe new foobar --git-name foo --git-email foo@example.com
assert_success
run $ABRA app new foobar \

View File

@ -61,14 +61,14 @@ setup(){
assert_success
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
assert_output --partial 'behind 3'
run $ABRA recipe upgrade "$TEST_RECIPE" --no-input
assert_success
assert_output --partial 'can upgrade service: app'
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
assert_output --regexp 'behind .* 3 commits'
refute_output --partial 'behind 3'
_reset_recipe
}

View File

@ -12,8 +12,6 @@ setup() {
}
@test "error if not present in catalogue" {
skip "known issue, see https://git.coopcloud.tech/coop-cloud/recipes-catalogue-json/issues/6"
run $ABRA recipe versions "$TEST_RECIPE"
assert_failure
assert_output --partial "is not published on the catalogue"