abra/cli/app/undeploy.go
decentral1se c65be64e7d
All checks were successful
continuous-integration/drone/pr Build is passing
continuous-integration/drone/push Build is passing
fix: dont checkout version for abra app undeploy
See coop-cloud/organising#628
2024-07-24 16:09:27 +02:00

130 lines
3.3 KiB
Go

package app
import (
"context"
"fmt"
"coopcloud.tech/abra/cli/internal"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/config"
"coopcloud.tech/abra/pkg/formatter"
"coopcloud.tech/abra/pkg/log"
stack "coopcloud.tech/abra/pkg/upstream/stack"
"github.com/docker/docker/api/types/filters"
dockerClient "github.com/docker/docker/client"
"github.com/urfave/cli"
)
var prune bool
var pruneFlag = &cli.BoolFlag{
Name: "prune, p",
Destination: &prune,
Usage: "Prunes unused containers, networks, and dangling images for an app",
}
// pruneApp runs the equivalent of a "docker system prune" but only filtering
// against resources connected with the app deployment. It is not a system wide
// prune. Volumes are not pruned to avoid unwated data loss.
func pruneApp(cl *dockerClient.Client, app appPkg.App) error {
stackName := app.StackName()
ctx := context.Background()
pruneFilters := filters.NewArgs()
stackSearch := fmt.Sprintf("%s*", stackName)
pruneFilters.Add("label", stackSearch)
cr, err := cl.ContainersPrune(ctx, pruneFilters)
if err != nil {
return err
}
cntSpaceReclaimed := formatter.ByteCountSI(cr.SpaceReclaimed)
log.Infof("containers pruned: %d; space reclaimed: %s", len(cr.ContainersDeleted), cntSpaceReclaimed)
nr, err := cl.NetworksPrune(ctx, pruneFilters)
if err != nil {
return err
}
log.Infof("networks pruned: %d", len(nr.NetworksDeleted))
ir, err := cl.ImagesPrune(ctx, pruneFilters)
if err != nil {
return err
}
imgSpaceReclaimed := formatter.ByteCountSI(ir.SpaceReclaimed)
log.Infof("images pruned: %d; space reclaimed: %s", len(ir.ImagesDeleted), imgSpaceReclaimed)
return nil
}
var appUndeployCommand = cli.Command{
Name: "undeploy",
Aliases: []string{"un"},
ArgsUsage: "<domain>",
Flags: []cli.Flag{
internal.DebugFlag,
internal.NoInputFlag,
internal.OfflineFlag,
pruneFlag,
},
Before: internal.SubCommandBefore,
Usage: "Undeploy an app",
BashComplete: autocomplete.AppNameComplete,
Description: `
This does not destroy any of the application data.
However, you should remain vigilant, as your swarm installation will consider
any previously attached volumes as eligible for pruning once undeployed.
Passing "-p/--prune" does not remove those volumes.`,
Action: func(c *cli.Context) error {
app := internal.ValidateApp(c)
stackName := app.StackName()
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
log.Debugf("checking whether %s is already deployed", stackName)
deployMeta, err := stack.IsDeployed(context.Background(), cl, stackName)
if err != nil {
log.Fatal(err)
}
if !deployMeta.IsDeployed {
log.Fatalf("%s is not deployed?", app.Name)
}
chaosVersion := config.CHAOS_DEFAULT
if deployMeta.IsChaos {
chaosVersion = deployMeta.ChaosVersion
}
if err := internal.DeployOverview(app, []string{}, deployMeta.Version, chaosVersion); err != nil {
log.Fatal(err)
}
rmOpts := stack.Remove{
Namespaces: []string{app.StackName()},
Detach: false,
}
if err := stack.RunRemove(context.Background(), cl, rmOpts); err != nil {
log.Fatal(err)
}
if prune {
if err := pruneApp(cl, app); err != nil {
log.Fatal(err)
}
}
return nil
},
}