forked from toolshed/abra
@ -197,7 +197,25 @@ checkout as-is. Recipe commit hashes are also supported as values for
|
||||
|
||||
log.Debugf("set waiting timeout to %d second(s)", stack.WaitTimeout)
|
||||
|
||||
if err := stack.RunDeploy(cl, deployOpts, compose, app.Name, internal.DontWaitConverge); err != nil {
|
||||
serviceNames, err := appPkg.GetAppServiceNames(app.Name)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := app.Filters(true, false, serviceNames...)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := stack.RunDeploy(
|
||||
cl,
|
||||
deployOpts,
|
||||
compose,
|
||||
app.Name,
|
||||
app.Server,
|
||||
internal.DontWaitConverge,
|
||||
f,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -3,23 +3,14 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
appPkg "coopcloud.tech/abra/pkg/app"
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/logs"
|
||||
"coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"github.com/docker/docker/api/types"
|
||||
containerTypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
dockerClient "github.com/docker/docker/client"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -73,80 +64,25 @@ var AppLogsCommand = &cobra.Command{
|
||||
serviceNames = []string{args[1]}
|
||||
}
|
||||
|
||||
if err = tailLogs(cl, app, serviceNames); err != nil {
|
||||
f, err := app.Filters(true, false, serviceNames...)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
opts := logs.TailOpts{
|
||||
AppName: app.Name,
|
||||
Services: serviceNames,
|
||||
StdErr: stdErr,
|
||||
Since: sinceLogs,
|
||||
Filters: f,
|
||||
}
|
||||
|
||||
if err := logs.TailLogs(cl, opts); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// tailLogs prints logs for the given app with optional service names to be
|
||||
// filtered on. It also checks if the latest task is not runnning and then
|
||||
// prints the past tasks.
|
||||
func tailLogs(cl *dockerClient.Client, app appPkg.App, serviceNames []string) error {
|
||||
f, err := app.Filters(true, false, serviceNames...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
services, err := cl.ServiceList(context.Background(), types.ServiceListOptions{Filters: f})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, service := range services {
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("name", service.Spec.Name)
|
||||
tasks, err := cl.TaskList(context.Background(), types.TaskListOptions{Filters: f})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(tasks) > 0 {
|
||||
// Need to sort the tasks by the CreatedAt field in the inverse order.
|
||||
// Otherwise they are in the reversed order and not sorted properly.
|
||||
slices.SortFunc[[]swarm.Task](tasks, func(t1, t2 swarm.Task) int {
|
||||
return int(t2.Meta.CreatedAt.Unix() - t1.Meta.CreatedAt.Unix())
|
||||
})
|
||||
lastTask := tasks[0].Status
|
||||
if lastTask.State != swarm.TaskStateRunning {
|
||||
for _, task := range tasks {
|
||||
log.Errorf("[%s] %s State %s: %s", service.Spec.Name, task.Meta.CreatedAt.Format(time.RFC3339), task.Status.State, task.Status.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect the logs in a go routine, so the logs from all services are
|
||||
// collected in parallel.
|
||||
wg.Add(1)
|
||||
go func(serviceID string) {
|
||||
logs, err := cl.ServiceLogs(context.Background(), serviceID, containerTypes.LogsOptions{
|
||||
ShowStderr: true,
|
||||
ShowStdout: !stdErr,
|
||||
Since: sinceLogs,
|
||||
Until: "",
|
||||
Timestamps: true,
|
||||
Follow: true,
|
||||
Tail: "20",
|
||||
Details: false,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer logs.Close()
|
||||
|
||||
_, err = io.Copy(os.Stdout, logs)
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}(service.ID)
|
||||
}
|
||||
|
||||
// Wait for all log streams to be closed.
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
stdErr bool
|
||||
sinceLogs string
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
@ -91,9 +92,14 @@ func showPSOutput(app appPkg.App, cl *dockerClient.Client, deployedVersion, chao
|
||||
return
|
||||
}
|
||||
|
||||
services := compose.Services
|
||||
sort.Slice(services, func(i, j int) bool {
|
||||
return services[i].Name < services[j].Name
|
||||
})
|
||||
|
||||
var rows [][]string
|
||||
allContainerStats := make(map[string]map[string]string)
|
||||
for _, service := range compose.Services {
|
||||
for _, service := range services {
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), service.Name))
|
||||
|
||||
|
@ -9,8 +9,10 @@ import (
|
||||
"coopcloud.tech/abra/pkg/autocomplete"
|
||||
"coopcloud.tech/abra/pkg/client"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/ui"
|
||||
upstream "coopcloud.tech/abra/pkg/upstream/service"
|
||||
stack "coopcloud.tech/abra/pkg/upstream/stack"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -93,13 +95,36 @@ Pass "--all-services/-a" to restart all services.`,
|
||||
for _, serviceName := range serviceNames {
|
||||
stackServiceName := fmt.Sprintf("%s_%s", app.StackName(), serviceName)
|
||||
|
||||
service, _, err := cl.ServiceInspectWithRaw(
|
||||
context.Background(),
|
||||
stackServiceName,
|
||||
types.ServiceInspectOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Debugf("attempting to scale %s to 0", stackServiceName)
|
||||
|
||||
if err := upstream.RunServiceScale(context.Background(), cl, stackServiceName, 0); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := stack.WaitOnService(context.Background(), cl, stackServiceName, app.Name); err != nil {
|
||||
f, err := app.Filters(true, false, serviceName)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
waitOpts := stack.WaitOpts{
|
||||
Services: []ui.ServiceMeta{{Name: stackServiceName, ID: service.ID}},
|
||||
AppName: app.Name,
|
||||
ServerName: app.Server,
|
||||
Filters: f,
|
||||
NoLog: true,
|
||||
Quiet: true,
|
||||
}
|
||||
|
||||
if err := stack.WaitOnServices(cmd.Context(), cl, waitOpts); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@ -110,7 +135,7 @@ Pass "--all-services/-a" to restart all services.`,
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := stack.WaitOnService(context.Background(), cl, stackServiceName, app.Name); err != nil {
|
||||
if err := stack.WaitOnServices(cmd.Context(), cl, waitOpts); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -194,7 +194,32 @@ beforehand. See "abra app backup" for more.`,
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := stack.RunDeploy(cl, deployOpts, compose, stackName, internal.DontWaitConverge); err != nil {
|
||||
stack.WaitTimeout, err = appPkg.GetTimeoutFromLabel(compose, stackName)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Debugf("set waiting timeout to %d second(s)", stack.WaitTimeout)
|
||||
|
||||
serviceNames, err := appPkg.GetAppServiceNames(app.Name)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := app.Filters(true, false, serviceNames...)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := stack.RunDeploy(
|
||||
cl,
|
||||
deployOpts,
|
||||
compose,
|
||||
stackName,
|
||||
app.Server,
|
||||
internal.DontWaitConverge,
|
||||
f,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -64,6 +64,24 @@ Passing "--prune/-p" does not remove those volumes.`,
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
composeFiles, err := app.Recipe.GetComposeFiles(app.Env)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
opts := stack.Deploy{Composefiles: composeFiles, Namespace: stackName}
|
||||
compose, err := appPkg.GetAppComposeConfig(app.Name, opts, app.Env)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
stack.WaitTimeout, err = appPkg.GetTimeoutFromLabel(compose, stackName)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Info("initialising undeploy")
|
||||
|
||||
rmOpts := stack.Remove{
|
||||
Namespaces: []string{stackName},
|
||||
Detach: false,
|
||||
@ -78,6 +96,8 @@ Passing "--prune/-p" does not remove those volumes.`,
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("undeploy succeeded 🟢")
|
||||
|
||||
if err := app.WriteRecipeVersion(deployMeta.Version, false); err != nil {
|
||||
log.Fatalf("writing recipe version failed: %s", err)
|
||||
}
|
||||
|
@ -239,7 +239,25 @@ beforehand. See "abra app backup" for more.`,
|
||||
|
||||
log.Debugf("set waiting timeout to %d second(s)", stack.WaitTimeout)
|
||||
|
||||
if err := stack.RunDeploy(cl, deployOpts, compose, stackName, internal.DontWaitConverge); err != nil {
|
||||
serviceNames, err := appPkg.GetAppServiceNames(app.Name)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := app.Filters(true, false, serviceNames...)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := stack.RunDeploy(
|
||||
cl,
|
||||
deployOpts,
|
||||
compose,
|
||||
stackName,
|
||||
app.Server,
|
||||
internal.DontWaitConverge,
|
||||
f,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user