package app import ( "context" "fmt" "time" "coopcloud.tech/abra/cli/internal" "coopcloud.tech/abra/pkg/autocomplete" "coopcloud.tech/abra/pkg/client" "coopcloud.tech/abra/pkg/config" stack "coopcloud.tech/abra/pkg/upstream/stack" "github.com/docker/docker/api/types/filters" dockerClient "github.com/docker/docker/client" "github.com/sirupsen/logrus" "github.com/urfave/cli" ) var prune bool var pruneFlag = &cli.BoolFlag{ Name: "prune, p", Destination: &prune, Usage: "Prunes unused containers, networks, and dangling images for an app", } // pruneSystem runs the equivalent of a "docker system prune" after undeploying // in order to clean up left over state related to the deployment. We must run // this logic inside a loop as it may take some time for the undeployed to come // down. func pruneSystem(c *cli.Context, cl *dockerClient.Client, app config.App) error { for { if !prune { return nil } stackName := app.StackName() ctx := context.Background() pruneFilters := filters.NewArgs() stackSearch := fmt.Sprintf("%s*", stackName) pruneFilters.Add("label", stackSearch) cr, err := cl.ContainersPrune(ctx, pruneFilters) if err != nil { logrus.Errorf(err.Error()) time.Sleep(time.Second) continue } logrus.Infof("containers deleted: %s; space reclaimed: %v", cr.ContainersDeleted, cr.SpaceReclaimed) nr, err := cl.NetworksPrune(ctx, pruneFilters) if err != nil { logrus.Errorf(err.Error()) time.Sleep(time.Second) continue } logrus.Infof("networks deleted %s", nr.NetworksDeleted) ir, err := cl.ImagesPrune(ctx, pruneFilters) if err != nil { logrus.Errorf(err.Error()) time.Sleep(time.Second) continue } logrus.Infof("images deleted: %s; space reclaimed: %v", ir.ImagesDeleted, ir.SpaceReclaimed) break } return nil } var appUndeployCommand = cli.Command{ Name: "undeploy", Aliases: []string{"un"}, ArgsUsage: "", Flags: []cli.Flag{ internal.DebugFlag, internal.NoInputFlag, pruneFlag, }, Before: internal.SubCommandBefore, Usage: "Undeploy an app", BashComplete: autocomplete.AppNameComplete, Description: ` This does not destroy any of the application data. However, you should remain vigilant, as your swarm installation will consider any previously attached volumes as eligible for pruning once undeployed. `, Action: func(c *cli.Context) error { app := internal.ValidateApp(c) stackName := app.StackName() cl, err := client.New(app.Server) if err != nil { logrus.Fatal(err) } logrus.Debugf("checking whether %s is already deployed", stackName) isDeployed, deployedVersion, err := stack.IsDeployed(context.Background(), cl, stackName) if err != nil { logrus.Fatal(err) } if !isDeployed { logrus.Fatalf("%s is not deployed?", app.Name) } if err := internal.DeployOverview(app, deployedVersion, "continue with undeploy?"); err != nil { logrus.Fatal(err) } rmOpts := stack.Remove{Namespaces: []string{app.StackName()}} if err := stack.RunRemove(context.Background(), cl, rmOpts); err != nil { logrus.Fatal(err) } if err := pruneSystem(c, cl, app); err != nil { logrus.Fatal(err) } return nil }, }