diff --git a/cli/app/undeploy.go b/cli/app/undeploy.go index 79836d24..3abf5304 100644 --- a/cli/app/undeploy.go +++ b/cli/app/undeploy.go @@ -8,8 +8,10 @@ import ( "coopcloud.tech/abra/cli/internal" "coopcloud.tech/abra/pkg/autocomplete" "coopcloud.tech/abra/pkg/client" + "coopcloud.tech/abra/pkg/config" stack "coopcloud.tech/abra/pkg/upstream/stack" "github.com/docker/docker/api/types/filters" + dockerClient "github.com/docker/docker/client" "github.com/sirupsen/logrus" "github.com/urfave/cli" ) @@ -22,19 +24,19 @@ var pruneFlag = &cli.BoolFlag{ Usage: "Prunes unused containers, networks, and dangling images for an app", } -func cleanup(c *cli.Context) error { +// pruneSystem runs the equivalent of a "docker system prune" after undeploying +// in order to clean up left over state related to the deployment. We must run +// this logic inside a loop as it may take some time for the undeployed to come +// down. +func pruneSystem(c *cli.Context, cl *dockerClient.Client, app config.App) error { for { if !prune { return nil } - app := internal.ValidateApp(c) - stackName := app.StackName() - cl, err := client.New(app.Server) - if err != nil { - logrus.Fatal(err) - } - ctx := context.Background() + stackName := app.StackName() + + ctx := context.Background() pruneFilters := filters.NewArgs() stackSearch := fmt.Sprintf("%s*", stackName) pruneFilters.Add("label", stackSearch) @@ -44,7 +46,8 @@ func cleanup(c *cli.Context) error { time.Sleep(time.Second) continue } - logrus.Infof("Containers deleted: %s; Space reclaimed: %v", cr.ContainersDeleted, cr.SpaceReclaimed) + + logrus.Infof("containers deleted: %s; space reclaimed: %v", cr.ContainersDeleted, cr.SpaceReclaimed) nr, err := cl.NetworksPrune(ctx, pruneFilters) if err != nil { @@ -52,7 +55,8 @@ func cleanup(c *cli.Context) error { time.Sleep(time.Second) continue } - logrus.Infof("Networks deleted %s", nr.NetworksDeleted) + + logrus.Infof("networks deleted %s", nr.NetworksDeleted) ir, err := cl.ImagesPrune(ctx, pruneFilters) if err != nil { @@ -60,9 +64,12 @@ func cleanup(c *cli.Context) error { time.Sleep(time.Second) continue } - logrus.Infof("Images deleted: %s; Space reclaimed: %v", ir.ImagesDeleted, ir.SpaceReclaimed) + + logrus.Infof("images deleted: %s; space reclaimed: %v", ir.ImagesDeleted, ir.SpaceReclaimed) + break } + return nil } @@ -75,8 +82,9 @@ var appUndeployCommand = cli.Command{ internal.NoInputFlag, pruneFlag, }, - Before: internal.SubCommandBefore, - Usage: "Undeploy an app", + Before: internal.SubCommandBefore, + Usage: "Undeploy an app", + BashComplete: autocomplete.AppNameComplete, Description: ` This does not destroy any of the application data. However, you should remain vigilant, as your swarm installation will consider any previously attached @@ -109,8 +117,11 @@ volumes as eligible for pruning once undeployed. if err := stack.RunRemove(context.Background(), cl, rmOpts); err != nil { logrus.Fatal(err) } - cleanup(c) + + if err := pruneSystem(c, cl, app); err != nil { + logrus.Fatal(err) + } + return nil }, - BashComplete: autocomplete.AppNameComplete, }