package app

import (
	"context"
	"fmt"

	"coopcloud.tech/abra/cli/internal"
	appPkg "coopcloud.tech/abra/pkg/app"
	"coopcloud.tech/abra/pkg/autocomplete"
	"coopcloud.tech/abra/pkg/client"
	"coopcloud.tech/abra/pkg/config"
	"coopcloud.tech/abra/pkg/formatter"
	"coopcloud.tech/abra/pkg/log"
	stack "coopcloud.tech/abra/pkg/upstream/stack"
	"github.com/docker/docker/api/types/filters"
	dockerClient "github.com/docker/docker/client"
	"github.com/spf13/cobra"
)

var AppUndeployCommand = &cobra.Command{
	Use:     "undeploy <domain> [flags]",
	Aliases: []string{"un"},
	Short:   "Undeploy an app",
	Long: `This does not destroy any application data.

However, you should remain vigilant, as your swarm installation will consider
any previously attached volumes as eligible for pruning once undeployed.

Passing "--prune/-p" does not remove those volumes.`,
	Args: cobra.ExactArgs(1),
	ValidArgsFunction: func(
		cmd *cobra.Command,
		args []string,
		toComplete string) ([]string, cobra.ShellCompDirective) {
		return autocomplete.AppNameComplete()
	},
	Run: func(cmd *cobra.Command, args []string) {
		app := internal.ValidateApp(args)
		stackName := app.StackName()

		cl, err := client.New(app.Server)
		if err != nil {
			log.Fatal(err)
		}

		log.Debugf("checking whether %s is already deployed", stackName)

		deployMeta, err := stack.IsDeployed(context.Background(), cl, stackName)
		if err != nil {
			log.Fatal(err)
		}

		if !deployMeta.IsDeployed {
			log.Fatalf("%s is not deployed?", app.Name)
		}

		if err := internal.DeployOverview(
			app,
			deployMeta.Version,
			config.NO_DOMAIN_DEFAULT,
			"",
			nil,
		); err != nil {
			log.Fatal(err)
		}

		composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
		if err != nil {
			log.Fatal(err)
		}

		opts := stack.Deploy{Composefiles: composeFiles, Namespace: stackName}
		compose, err := appPkg.GetAppComposeConfig(app.Name, opts, app.Env)
		if err != nil {
			log.Fatal(err)
		}

		stack.WaitTimeout, err = appPkg.GetTimeoutFromLabel(compose, stackName)
		if err != nil {
			log.Fatal(err)
		}

		log.Info("initialising undeploy")

		rmOpts := stack.Remove{
			Namespaces: []string{stackName},
			Detach:     false,
		}
		if err := stack.RunRemove(context.Background(), cl, rmOpts); err != nil {
			log.Fatal(err)
		}

		if prune {
			if err := pruneApp(cl, app); err != nil {
				log.Fatal(err)
			}
		}

		log.Info("undeploy succeeded 🟢")

		if err := app.WriteRecipeVersion(deployMeta.Version, false); err != nil {
			log.Fatalf("writing recipe version failed: %s", err)
		}
	},
}

// pruneApp runs the equivalent of a "docker system prune" but only filtering
// against resources connected with the app deployment. It is not a system wide
// prune. Volumes are not pruned to avoid unwated data loss.
func pruneApp(cl *dockerClient.Client, app appPkg.App) error {
	stackName := app.StackName()
	ctx := context.Background()

	pruneFilters := filters.NewArgs()
	stackSearch := fmt.Sprintf("%s*", stackName)
	pruneFilters.Add("label", stackSearch)
	cr, err := cl.ContainersPrune(ctx, pruneFilters)
	if err != nil {
		return err
	}

	cntSpaceReclaimed := formatter.ByteCountSI(cr.SpaceReclaimed)
	log.Infof("containers pruned: %d; space reclaimed: %s", len(cr.ContainersDeleted), cntSpaceReclaimed)

	nr, err := cl.NetworksPrune(ctx, pruneFilters)
	if err != nil {
		return err
	}

	log.Infof("networks pruned: %d", len(nr.NetworksDeleted))

	ir, err := cl.ImagesPrune(ctx, pruneFilters)
	if err != nil {
		return err
	}

	imgSpaceReclaimed := formatter.ByteCountSI(ir.SpaceReclaimed)
	log.Infof("images pruned: %d; space reclaimed: %s", len(ir.ImagesDeleted), imgSpaceReclaimed)

	return nil
}

var (
	prune bool
)

func init() {
	AppUndeployCommand.Flags().BoolVarP(
		&prune,
		"prune",
		"p",
		false,
		"prune unused containers, networks, and dangling images",
	)
}