feat: Adds abra app move #605

Closed
p4u1 wants to merge 1 commits from p4u1/abra:abra-app-move into main
7 changed files with 373 additions and 5 deletions

313
cli/app/move.go Normal file
View File

@ -0,0 +1,313 @@
package app
import (
"context"
"fmt"
"os"
"os/exec"
"strings"
"coopcloud.tech/abra/cli/internal"
"coopcloud.tech/abra/pkg/app"
appPkg "coopcloud.tech/abra/pkg/app"
"coopcloud.tech/abra/pkg/autocomplete"
"coopcloud.tech/abra/pkg/client"
containerPkg "coopcloud.tech/abra/pkg/container"
"coopcloud.tech/abra/pkg/log"
"coopcloud.tech/abra/pkg/secret"
"coopcloud.tech/abra/pkg/upstream/stack"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/volume"
dockerclient "github.com/docker/docker/client"
"github.com/spf13/cobra"
)
var AppMoveCommand = &cobra.Command{
Use: "move <domain> <server> [flags]",
Short: "Moves an app to a different server",
Long: `Move an app to a differnt server.
This will copy secrets and volumes from the old server to the new one. It will also undeploy the app from old server but not deploy it on the new. You will have to do that your self, after the move finished.
Use "--dry-run/-r" to see which secrets and volumes will be moved.`,
Example: ` # moving an app
abra app move nextcloud.example.com myserver.com`,
Args: cobra.RangeArgs(1, 2),
ValidArgsFunction: func(
cmd *cobra.Command,
args []string,
toComplete string,
) ([]string, cobra.ShellCompDirective) {
switch l := len(args); l {
case 0:
return autocomplete.AppNameComplete()
case 1:
return autocomplete.ServerNameComplete()
default:
return nil, cobra.ShellCompDirectiveDefault
}
},
Run: func(cmd *cobra.Command, args []string) {
app := internal.ValidateApp(args)
if len(args) <= 1 {
log.Fatal("no server provided")
}
newServer := args[1]
if err := app.Recipe.Ensure(internal.GetEnsureContext()); err != nil {
log.Fatal(err)
}
cl, err := client.New(app.Server)
if err != nil {
log.Fatal(err)
}
resources, err := getAppResources(cl, app)
if err != nil {
log.Fatal(err)
}
internal.MoveOverview(app, newServer, resources.SecretNames(), resources.VolumeNames())
if err := internal.PromptProcced(); err != nil {
return
}
// NOTE: wait timeout will be removed, until it actually is just set it to a high value.
stack.WaitTimeout = 500
rmOpts := stack.Remove{
Namespaces: []string{app.StackName()},
Detach: false,
}
if err := stack.RunRemove(context.Background(), cl, rmOpts); err != nil {
log.Fatal(err)
}
cl2, err := client.New(newServer)
if err != nil {
log.Fatal(err)
}
for _, s := range resources.SecretList {
sname := strings.Split(strings.TrimPrefix(s.Spec.Name, app.StackName()+"_"), "_")
secretName := strings.Join(sname[:len(sname)-1], "_")
data := resources.Secrets[secretName]
if err := client.StoreSecret(cl2, s.Spec.Name, data); err != nil {
log.Infof("creating secret: %s", s.Spec.Name)
decentral1se marked this conversation as resolved
Review

Is this log.Infof in the right place? Seems like it should log earlier in the success case?

Is this `log.Infof` in the right place? Seems like it should log earlier in the success case?
log.Errorf("failed to store secret on new server: %s", err)
}
}
for _, v := range resources.Volumes {
log.Infof("moving volume: %s", v.Name)
// Need to create the volume before copying the data, because when
// docker creates a new volume it set the folder permissions to
// root, which might be wrong. This ensures we always have the
// correct folder permissions inside the volume.
log.Debug("creating volume: %s", v.Name)
decentral1se marked this conversation as resolved
Review

log.Debugf? (there are more cases of this below)

`log.Debugf`? (there are more cases of this below)
_, err := cl2.VolumeCreate(context.Background(), volume.CreateOptions{
Name: v.Name,
Driver: v.Driver,
})
if err != nil {
log.Errorf("failed to create volume: %s", err)
}
fileName := fmt.Sprintf("%s.tar.gz", v.Name)
log.Debug("creating %s", fileName)
cmd := exec.Command("ssh", app.Server, "-tt", fmt.Sprintf("sudo tar --same-owner -czhpf %s -C /var/lib/docker/volumes %s", fileName, v.Name))
if out, err := cmd.CombinedOutput(); err != nil {
log.Errorf("failed to tar volume: %s", err)
fmt.Println(string(out))
}
log.Debug("copying %s to local machine", fileName)
cmd = exec.Command("scp", fmt.Sprintf("%s:%s", app.Server, fileName), fileName)
if out, err := cmd.CombinedOutput(); err != nil {
log.Errorf("failed to copy tar to local machine: %s", err)
fmt.Println(string(out))
}
log.Debug("copying %s to %s", fileName, newServer)
cmd = exec.Command("scp", fileName, fmt.Sprintf("%s:%s", newServer, fileName))
if out, err := cmd.CombinedOutput(); err != nil {
log.Errorf("failed to copy tar to new server: %s", err)
fmt.Println(string(out))
}
log.Debug("extracting %s on %s", fileName, newServer)
cmd = exec.Command("ssh", newServer, "-tt", fmt.Sprintf("sudo tar --same-owner -xzpf %s -C /var/lib/docker/volumes", fileName))
if out, err := cmd.CombinedOutput(); err != nil {
log.Errorf("failed to extract tar: %s", err)
fmt.Println(string(out))
}
// Remove tar files
cmd = exec.Command("ssh", newServer, "-tt", fmt.Sprintf("sudo rm %s", fileName))
if out, err := cmd.CombinedOutput(); err != nil {
log.Errorf("failed to remove tar from new server: %s", err)
fmt.Println(string(out))
}
cmd = exec.Command("ssh", app.Server, "-tt", fmt.Sprintf("sudo rm %s", fileName))
if out, err := cmd.CombinedOutput(); err != nil {
log.Errorf("failed to remove tar from old server: %s", err)
fmt.Println(string(out))
}
cmd = exec.Command("rm", fileName)
if out, err := cmd.CombinedOutput(); err != nil {
log.Errorf("failed to remove tar on local machine: %s", err)
fmt.Println(string(out))
}
}
log.Debug("moving app config to new server")
if err := copyFile(app.Path, strings.ReplaceAll(app.Path, app.Server, newServer)); err != nil {
log.Fatal(err)
}
if err := os.Remove(app.Path); err != nil {
log.Fatal(err)
}
fmt.Println("% was succefully moved to %s", app.Name, newServer)
fmt.Println("Run the following command to deploy the app", app.Name, newServer)
fmt.Println(" abra app deploy --no-domain-checks", app.Domain)
fmt.Println()
fmt.Println("And don't forget to update you DNS record. And don't panic, as it might take a bit for the dust to settle. Traefik for example might fail to obtain the lets encrypt certificate for a while.", app.Domain)
decentral1se marked this conversation as resolved
Review

I like this but I think the DNS record part should be in the main --help description. I wouldn't mention Traefik in the case in the future that other proxies are used (I think Caddy is already being used...) and this is out-of-date / potentially confusing.

I like this but I think the DNS record part should be in the main `--help` description. I wouldn't mention Traefik in the case in the future that other proxies are used (I think Caddy is already being used...) and this is out-of-date / potentially confusing.
fmt.Println()
fmt.Println("If anything goes wrong, you can always move the app config file to the original server and deploy it there again. There was no data removed on the old server")
decentral1se marked this conversation as resolved
Review

I would add this really centrally to the main --help output instead so people know beforehand!

I would add this really centrally to the main `--help` output instead so people know beforehand!
return
},
}
type AppResources struct {
Secrets map[string]string
SecretList []swarm.Secret
Volumes map[string]containertypes.MountPoint
}
func (a *AppResources) SecretNames() []string {
secrets := []string{}
for name := range a.Secrets {
secrets = append(secrets, name)
}
return secrets
}
func (a *AppResources) VolumeNames() []string {
volumes := []string{}
for name := range a.Volumes {
volumes = append(volumes, name)
}
return volumes
}
func getAppResources(cl *dockerclient.Client, app app.App) (*AppResources, error) {
filter, err := app.Filters(false, false)
if err != nil {
return nil, err
}
services, err := cl.ServiceList(context.Background(), types.ServiceListOptions{Filters: filter})
if err != nil {
return nil, err
}
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
if err != nil {
return nil, err
}
secretList, err := cl.SecretList(context.Background(), types.SecretListOptions{Filters: filter})
if err != nil {
log.Fatal(err)
}
secretConfigs, err := secret.ReadSecretsConfig(app.Path, composeFiles, app.StackName())
if err != nil {
log.Fatal(err)
}
opts := stack.Deploy{Composefiles: composeFiles, Namespace: app.StackName()}
compose, err := appPkg.GetAppComposeConfig(app.Name, opts, app.Env)
if err != nil {
log.Fatal(err)
}
resources := &AppResources{
Secrets: make(map[string]string),
SecretList: secretList,
Volumes: make(map[string]containertypes.MountPoint),
}
for _, s := range services {
secretNames := map[string]string{}
for _, serviceCompose := range compose.Services {
if app.StackName()+"_"+serviceCompose.Name != s.Spec.Name {
continue
}
for _, secret := range serviceCompose.Secrets {
for _, s := range secretList {
if s.Spec.Name == app.StackName()+"_"+secret.Source+"_"+secretConfigs[secret.Source].Version {
secretNames[secret.Source] = s.ID
break
}
}
}
}
f := filters.NewArgs()
f.Add("name", s.Spec.Name)
targetContainer, err := containerPkg.GetContainer(context.Background(), cl, f, true)
if err != nil {
log.Error(err)
continue
decentral1se marked this conversation as resolved
Review

This code used to be in a for loop and now isn't? continue looks suspect here.

This code used to be in a for loop and now isn't? `continue` looks suspect here.
}
for _, m := range targetContainer.Mounts {
if m.Type == mount.TypeVolume {
resources.Volumes[m.Name] = m
}
}
for secretName, secretID := range secretNames {
if _, ok := resources.Secrets[secretName]; ok {
continue
}
log.Debugf("extracting secret %s", secretName)
out, err := exec.Command("ssh", app.Server, "-tt", fmt.Sprintf("sudo cat /var/lib/docker/containers/%s/mounts/secrets/%s", targetContainer.ID, secretID)).Output()
decentral1se marked this conversation as resolved
Review

I think it should mention that tar / cat are required on the target server in the main --help output. You might also consider doing a check up-front at the start of the command execution? It will be slower but less painful if things are missing...

I think it should mention that `tar` / `cat` are required on the target server in the main `--help` output. You might also consider doing a check up-front at the start of the command execution? It will be slower but less painful if things are missing...
if err != nil {
fmt.Println(string(out))
fmt.Println(err)
continue
}
resources.Secrets[secretName] = string(out)
}
}
return resources, nil
}
func copyFile(src string, dst string) error {
// Read all content of src to data, may cause OOM for a large file.
data, err := os.ReadFile(src)
if err != nil {
return err
}
// Write data to dst
err = os.WriteFile(dst, data, 0o644)
if err != nil {
return err
}
return nil
}
func init() {
AppMoveCommand.Flags().BoolVarP(
&internal.Dry,
"dry-run",
"r",
false,
"report changes that would be made",
)
}

View File

@ -224,7 +224,7 @@ environment. Typically, you can let Abra generate them for you on app creation
}
secretName := fmt.Sprintf("%s_%s_%s", app.StackName(), name, version)
if err := client.StoreSecret(cl, secretName, data, app.Server); err != nil {
if err := client.StoreSecret(cl, secretName, data); err != nil {
log.Fatal(err)
}

View File

@ -1,6 +1,7 @@
package internal
import (
"errors"
"fmt"
"os"
"sort"
@ -140,6 +141,59 @@ func getDeployType(currentVersion, newVersion string) string {
return "DOWNGRADE"
}
// MoveOverview shows a overview before moving an app to a different server
func MoveOverview(
app appPkg.App,
newServer string,
secrets []string,
volumes []string,
) {
server := app.Server
if app.Server == "default" {
server = "local"
}
domain := app.Domain
if domain == "" {
domain = config.NO_DOMAIN_DEFAULT
}
rows := [][]string{
{"DOMAIN", domain},
{"RECIPE", app.Recipe.Name},
{"OLD SERVER", server},
{"New SERVER", newServer},
decentral1se marked this conversation as resolved
Review

NEW

`NEW`
{"SECRETS", strings.Join(secrets, "\n")},
{"VOLUMES", strings.Join(volumes, "\n")},
}
overview := formatter.CreateOverview("MOVE OVERVIEW", rows)
fmt.Println(overview)
}
func PromptProcced() error {
if NoInput {
return nil
}
if Dry {
return fmt.Errorf("dry run")
}
response := false
prompt := &survey.Confirm{Message: "proceed?"}
if err := survey.AskOne(prompt, &response); err != nil {
return err
}
if !response {
return errors.New("cancelled")
}
return nil
}
// PostCmds parses a string of commands and executes them inside of the respective services
// the commands string must have the following format:
// "<service> <command> <arguments>|<service> <command> <arguments>|... "

View File

@ -204,6 +204,7 @@ func Run(version, commit string) {
app.AppRestartCommand,
app.AppRestoreCommand,
app.AppRollbackCommand,
app.AppMoveCommand,
app.AppRunCommand,
app.AppSecretCommand,
app.AppServicesCommand,

View File

@ -7,7 +7,7 @@ import (
"github.com/docker/docker/client"
)
func StoreSecret(cl *client.Client, secretName, secretValue, server string) error {
func StoreSecret(cl *client.Client, secretName, secretValue string) error {
ann := swarm.Annotations{Name: secretName}
spec := swarm.SecretSpec{Annotations: ann, Data: []byte(secretValue)}

View File

@ -216,7 +216,7 @@ func GenerateSecrets(cl *dockerClient.Client, secrets map[string]Secret, server
return
}
if err := client.StoreSecret(cl, secret.RemoteName, password, server); err != nil {
if err := client.StoreSecret(cl, secret.RemoteName, password); err != nil {
if strings.Contains(err.Error(), "AlreadyExists") {
log.Warnf("%s already exists", secret.RemoteName)
ch <- nil
@ -236,7 +236,7 @@ func GenerateSecrets(cl *dockerClient.Client, secrets map[string]Secret, server
return
}
if err := client.StoreSecret(cl, secret.RemoteName, passphrase, server); err != nil {
if err := client.StoreSecret(cl, secret.RemoteName, passphrase); err != nil {
if strings.Contains(err.Error(), "AlreadyExists") {
log.Warnf("%s already exists", secret.RemoteName)
ch <- nil

View File

@ -76,7 +76,7 @@ func RunRemove(ctx context.Context, client *apiclient.Client, opts Remove) error
continue
}
log.Info("polling undeploy status")
log.Debug("polling undeploy status")
decentral1se marked this conversation as resolved
Review

That seems like a somewhat significant "off-topic" change to the undeploy screen for this PR. I have no strong feelings about it but we regularly see reported issues when the output of deploy/undeploy changes. If you are happy to deal with that, then I'm fine with it 🙃

That seems like a somewhat significant "off-topic" change to the undeploy screen for this PR. I have no strong feelings about it but we regularly see reported issues when the output of `deploy`/`undeploy` changes. If you are happy to deal with that, then I'm fine with it 🙃
timeout, err := waitOnTasks(ctx, client, namespace)
if timeout {
errs = append(errs, err.Error())