chore: migrate all upstream code to own dir

This commit is contained in:
2021-10-21 19:35:13 +02:00
parent 80921c9f55
commit dc04cf5ff7
20 changed files with 15 additions and 15 deletions

View File

@ -1,130 +0,0 @@
package container
import (
"context"
"errors"
"fmt"
"io"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
apiclient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
)
func RunExec(dockerCli command.Cli, client *apiclient.Client, containerID string, execConfig *types.ExecConfig) error {
ctx := context.Background()
// We need to check the tty _before_ we do the ContainerExecCreate, because
// otherwise if we error out we will leak execIDs on the server (and
// there's no easy way to clean those up). But also in order to make "not
// exist" errors take precedence we do a dummy inspect first.
if _, err := client.ContainerInspect(ctx, containerID); err != nil {
return err
}
if !execConfig.Detach {
if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil {
return err
}
}
response, err := client.ContainerExecCreate(ctx, containerID, *execConfig)
if err != nil {
return err
}
execID := response.ID
if execID == "" {
return errors.New("exec ID empty")
}
if execConfig.Detach {
execStartCheck := types.ExecStartCheck{
Detach: execConfig.Detach,
Tty: execConfig.Tty,
}
return client.ContainerExecStart(ctx, execID, execStartCheck)
}
return interactiveExec(ctx, dockerCli, client, execConfig, execID)
}
func interactiveExec(ctx context.Context, dockerCli command.Cli, client *apiclient.Client,
execConfig *types.ExecConfig, execID string) error {
// Interactive exec requested.
var (
out, stderr io.Writer
in io.ReadCloser
)
if execConfig.AttachStdin {
in = dockerCli.In()
}
if execConfig.AttachStdout {
out = dockerCli.Out()
}
if execConfig.AttachStderr {
if execConfig.Tty {
stderr = dockerCli.Out()
} else {
stderr = dockerCli.Err()
}
}
execStartCheck := types.ExecStartCheck{
Tty: execConfig.Tty,
}
resp, err := client.ContainerExecAttach(ctx, execID, execStartCheck)
if err != nil {
return err
}
defer resp.Close()
errCh := make(chan error, 1)
go func() {
defer close(errCh)
errCh <- func() error {
streamer := hijackedIOStreamer{
streams: dockerCli,
inputStream: in,
outputStream: out,
errorStream: stderr,
resp: resp,
tty: execConfig.Tty,
detachKeys: execConfig.DetachKeys,
}
return streamer.stream(ctx)
}()
}()
if execConfig.Tty && dockerCli.In().IsTerminal() {
if err := MonitorTtySize(ctx, client, dockerCli, execID, true); err != nil {
fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err)
}
}
if err := <-errCh; err != nil {
logrus.Debugf("Error hijack: %s", err)
return err
}
return getExecExitStatus(ctx, client, execID)
}
func getExecExitStatus(ctx context.Context, client apiclient.ContainerAPIClient, execID string) error {
resp, err := client.ContainerExecInspect(ctx, execID)
if err != nil {
// If we can't connect, then the daemon probably died.
if !apiclient.IsErrConnectionFailed(err) {
return err
}
return cli.StatusError{StatusCode: -1}
}
status := resp.ExitCode
if status != 0 {
return cli.StatusError{StatusCode: status}
}
return nil
}

View File

@ -1,208 +0,0 @@
package container
import (
"context"
"fmt"
"io"
"runtime"
"sync"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/stdcopy"
"github.com/moby/term"
"github.com/sirupsen/logrus"
)
// The default escape key sequence: ctrl-p, ctrl-q
// TODO: This could be moved to `pkg/term`.
var defaultEscapeKeys = []byte{16, 17}
// A hijackedIOStreamer handles copying input to and output from streams to the
// connection.
type hijackedIOStreamer struct {
streams command.Streams
inputStream io.ReadCloser
outputStream io.Writer
errorStream io.Writer
resp types.HijackedResponse
tty bool
detachKeys string
}
// stream handles setting up the IO and then begins streaming stdin/stdout
// to/from the hijacked connection, blocking until it is either done reading
// output, the user inputs the detach key sequence when in TTY mode, or when
// the given context is cancelled.
func (h *hijackedIOStreamer) stream(ctx context.Context) error {
restoreInput, err := h.setupInput()
if err != nil {
return fmt.Errorf("unable to setup input stream: %s", err)
}
defer restoreInput()
outputDone := h.beginOutputStream(restoreInput)
inputDone, detached := h.beginInputStream(restoreInput)
select {
case err := <-outputDone:
return err
case <-inputDone:
// Input stream has closed.
if h.outputStream != nil || h.errorStream != nil {
// Wait for output to complete streaming.
select {
case err := <-outputDone:
return err
case <-ctx.Done():
return ctx.Err()
}
}
return nil
case err := <-detached:
// Got a detach key sequence.
return err
case <-ctx.Done():
return ctx.Err()
}
}
func (h *hijackedIOStreamer) setupInput() (restore func(), err error) {
if h.inputStream == nil || !h.tty {
// No need to setup input TTY.
// The restore func is a nop.
return func() {}, nil
}
if err := setRawTerminal(h.streams); err != nil {
return nil, fmt.Errorf("unable to set IO streams as raw terminal: %s", err)
}
// Use sync.Once so we may call restore multiple times but ensure we
// only restore the terminal once.
var restoreOnce sync.Once
restore = func() {
restoreOnce.Do(func() {
restoreTerminal(h.streams, h.inputStream)
})
}
// Wrap the input to detect detach escape sequence.
// Use default escape keys if an invalid sequence is given.
escapeKeys := defaultEscapeKeys
if h.detachKeys != "" {
customEscapeKeys, err := term.ToBytes(h.detachKeys)
if err != nil {
logrus.Warnf("invalid detach escape keys, using default: %s", err)
} else {
escapeKeys = customEscapeKeys
}
}
h.inputStream = ioutils.NewReadCloserWrapper(term.NewEscapeProxy(h.inputStream, escapeKeys), h.inputStream.Close)
return restore, nil
}
func (h *hijackedIOStreamer) beginOutputStream(restoreInput func()) <-chan error {
if h.outputStream == nil && h.errorStream == nil {
// There is no need to copy output.
return nil
}
outputDone := make(chan error)
go func() {
var err error
// When TTY is ON, use regular copy
if h.outputStream != nil && h.tty {
_, err = io.Copy(h.outputStream, h.resp.Reader)
// We should restore the terminal as soon as possible
// once the connection ends so any following print
// messages will be in normal type.
restoreInput()
} else {
_, err = stdcopy.StdCopy(h.outputStream, h.errorStream, h.resp.Reader)
}
logrus.Debug("[hijack] End of stdout")
if err != nil {
logrus.Debugf("Error receiveStdout: %s", err)
}
outputDone <- err
}()
return outputDone
}
func (h *hijackedIOStreamer) beginInputStream(restoreInput func()) (doneC <-chan struct{}, detachedC <-chan error) {
inputDone := make(chan struct{})
detached := make(chan error)
go func() {
if h.inputStream != nil {
_, err := io.Copy(h.resp.Conn, h.inputStream)
// We should restore the terminal as soon as possible
// once the connection ends so any following print
// messages will be in normal type.
restoreInput()
logrus.Debug("[hijack] End of stdin")
if _, ok := err.(term.EscapeError); ok {
detached <- err
return
}
if err != nil {
// This error will also occur on the receive
// side (from stdout) where it will be
// propagated back to the caller.
logrus.Debugf("Error sendStdin: %s", err)
}
}
if err := h.resp.CloseWrite(); err != nil {
logrus.Debugf("Couldn't send EOF: %s", err)
}
close(inputDone)
}()
return inputDone, detached
}
func setRawTerminal(streams command.Streams) error {
if err := streams.In().SetRawTerminal(); err != nil {
return err
}
return streams.Out().SetRawTerminal()
}
// nolint: unparam
func restoreTerminal(streams command.Streams, in io.Closer) error {
streams.In().RestoreTerminal()
streams.Out().RestoreTerminal()
// WARNING: DO NOT REMOVE THE OS CHECKS !!!
// For some reason this Close call blocks on darwin..
// As the client exits right after, simply discard the close
// until we find a better solution.
//
// This can also cause the client on Windows to get stuck in Win32 CloseHandle()
// in some cases. See https://github.com/docker/docker/issues/28267#issuecomment-288237442
// Tracked internally at Microsoft by VSO #11352156. In the
// Windows case, you hit this if you are using the native/v2 console,
// not the "legacy" console, and you start the client in a new window. eg
// `start docker run --rm -it microsoft/nanoserver cmd /s /c echo foobar`
// will hang. Remove start, and it won't repro.
if in != nil && runtime.GOOS != "darwin" && runtime.GOOS != "windows" {
return in.Close()
}
return nil
}

View File

@ -1,98 +0,0 @@
package container
import (
"context"
"fmt"
"os"
gosignal "os/signal"
"runtime"
"time"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
apiclient "github.com/docker/docker/client"
"github.com/moby/sys/signal"
"github.com/sirupsen/logrus"
)
// resizeTtyTo resizes tty to specific height and width
func resizeTtyTo(ctx context.Context, client client.ContainerAPIClient, id string, height, width uint, isExec bool) error {
if height == 0 && width == 0 {
return nil
}
options := types.ResizeOptions{
Height: height,
Width: width,
}
var err error
if isExec {
err = client.ContainerExecResize(ctx, id, options)
} else {
err = client.ContainerResize(ctx, id, options)
}
if err != nil {
logrus.Debugf("Error resize: %s\r", err)
}
return err
}
// resizeTty is to resize the tty with cli out's tty size
func resizeTty(ctx context.Context, client *apiclient.Client, cli command.Cli, id string, isExec bool) error {
height, width := cli.Out().GetTtySize()
return resizeTtyTo(ctx, client, id, height, width, isExec)
}
// initTtySize is to init the tty's size to the same as the window, if there is an error, it will retry 5 times.
func initTtySize(ctx context.Context, client *apiclient.Client, cli command.Cli, id string, isExec bool, resizeTtyFunc func(ctx context.Context, client *apiclient.Client, cli command.Cli, id string, isExec bool) error) {
rttyFunc := resizeTtyFunc
if rttyFunc == nil {
rttyFunc = resizeTty
}
if err := rttyFunc(ctx, client, cli, id, isExec); err != nil {
go func() {
var err error
for retry := 0; retry < 5; retry++ {
time.Sleep(10 * time.Millisecond)
if err = rttyFunc(ctx, client, cli, id, isExec); err == nil {
break
}
}
if err != nil {
fmt.Fprintln(cli.Err(), "failed to resize tty, using default size")
}
}()
}
}
// MonitorTtySize updates the container tty size when the terminal tty changes size
func MonitorTtySize(ctx context.Context, client *apiclient.Client, cli command.Cli, id string, isExec bool) error {
initTtySize(ctx, client, cli, id, isExec, resizeTty)
if runtime.GOOS == "windows" {
go func() {
prevH, prevW := cli.Out().GetTtySize()
for {
time.Sleep(time.Millisecond * 250)
h, w := cli.Out().GetTtySize()
if prevW != w || prevH != h {
resizeTty(ctx, client, cli, id, isExec)
}
prevH = h
prevW = w
}
}()
} else {
sigchan := make(chan os.Signal, 1)
gosignal.Notify(sigchan, signal.SIGWINCH)
go func() {
for range sigchan {
resizeTty(ctx, client, cli, id, isExec)
}
}()
}
return nil
}

View File

@ -1,199 +0,0 @@
package convert
import (
"io/ioutil"
"strings"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types"
networktypes "github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/swarm"
)
const (
// LabelNamespace is the label used to track stack resources
LabelNamespace = "com.docker.stack.namespace"
)
// Namespace mangles names by prepending the name
type Namespace struct {
name string
}
// Scope prepends the namespace to a name
func (n Namespace) Scope(name string) string {
return n.name + "_" + name
}
// Descope returns the name without the namespace prefix
func (n Namespace) Descope(name string) string {
return strings.TrimPrefix(name, n.name+"_")
}
// Name returns the name of the namespace
func (n Namespace) Name() string {
return n.name
}
// NewNamespace returns a new Namespace for scoping of names
func NewNamespace(name string) Namespace {
return Namespace{name: name}
}
// AddStackLabel returns labels with the namespace label added
func AddStackLabel(namespace Namespace, labels map[string]string) map[string]string {
if labels == nil {
labels = make(map[string]string)
}
labels[LabelNamespace] = namespace.name
return labels
}
type networkMap map[string]composetypes.NetworkConfig
// Networks from the compose-file type to the engine API type
func Networks(namespace Namespace, networks networkMap, servicesNetworks map[string]struct{}) (map[string]types.NetworkCreate, []string) {
if networks == nil {
networks = make(map[string]composetypes.NetworkConfig)
}
externalNetworks := []string{}
result := make(map[string]types.NetworkCreate)
for internalName := range servicesNetworks {
network := networks[internalName]
if network.External.External {
externalNetworks = append(externalNetworks, network.Name)
continue
}
createOpts := types.NetworkCreate{
Labels: AddStackLabel(namespace, network.Labels),
Driver: network.Driver,
Options: network.DriverOpts,
Internal: network.Internal,
Attachable: network.Attachable,
}
if network.Ipam.Driver != "" || len(network.Ipam.Config) > 0 {
createOpts.IPAM = &networktypes.IPAM{}
}
if network.Ipam.Driver != "" {
createOpts.IPAM.Driver = network.Ipam.Driver
}
for _, ipamConfig := range network.Ipam.Config {
config := networktypes.IPAMConfig{
Subnet: ipamConfig.Subnet,
}
createOpts.IPAM.Config = append(createOpts.IPAM.Config, config)
}
networkName := namespace.Scope(internalName)
if network.Name != "" {
networkName = network.Name
}
result[networkName] = createOpts
}
return result, externalNetworks
}
// Secrets converts secrets from the Compose type to the engine API type
func Secrets(namespace Namespace, secrets map[string]composetypes.SecretConfig) ([]swarm.SecretSpec, error) {
result := []swarm.SecretSpec{}
for name, secret := range secrets {
if secret.External.External {
continue
}
var obj swarmFileObject
var err error
if secret.Driver != "" {
obj = driverObjectConfig(namespace, name, composetypes.FileObjectConfig(secret))
} else {
obj, err = fileObjectConfig(namespace, name, composetypes.FileObjectConfig(secret))
}
if err != nil {
return nil, err
}
spec := swarm.SecretSpec{Annotations: obj.Annotations, Data: obj.Data}
if secret.Driver != "" {
spec.Driver = &swarm.Driver{
Name: secret.Driver,
Options: secret.DriverOpts,
}
}
if secret.TemplateDriver != "" {
spec.Templating = &swarm.Driver{
Name: secret.TemplateDriver,
}
}
result = append(result, spec)
}
return result, nil
}
// Configs converts config objects from the Compose type to the engine API type
func Configs(namespace Namespace, configs map[string]composetypes.ConfigObjConfig) ([]swarm.ConfigSpec, error) {
result := []swarm.ConfigSpec{}
for name, config := range configs {
if config.External.External {
continue
}
obj, err := fileObjectConfig(namespace, name, composetypes.FileObjectConfig(config))
if err != nil {
return nil, err
}
spec := swarm.ConfigSpec{Annotations: obj.Annotations, Data: obj.Data}
if config.TemplateDriver != "" {
spec.Templating = &swarm.Driver{
Name: config.TemplateDriver,
}
}
result = append(result, spec)
}
return result, nil
}
type swarmFileObject struct {
Annotations swarm.Annotations
Data []byte
}
func driverObjectConfig(namespace Namespace, name string, obj composetypes.FileObjectConfig) swarmFileObject {
if obj.Name != "" {
name = obj.Name
} else {
name = namespace.Scope(name)
}
return swarmFileObject{
Annotations: swarm.Annotations{
Name: name,
Labels: AddStackLabel(namespace, obj.Labels),
},
Data: []byte{},
}
}
func fileObjectConfig(namespace Namespace, name string, obj composetypes.FileObjectConfig) (swarmFileObject, error) {
data, err := ioutil.ReadFile(obj.File)
if err != nil {
return swarmFileObject{}, err
}
if obj.Name != "" {
name = obj.Name
} else {
name = namespace.Scope(name)
}
return swarmFileObject{
Annotations: swarm.Annotations{
Name: name,
Labels: AddStackLabel(namespace, obj.Labels),
},
Data: data,
}, nil
}

View File

@ -1,861 +0,0 @@
package convert
import (
"context"
"fmt"
"os"
"sort"
"strings"
"time"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/client"
"github.com/docker/go-units"
"github.com/pkg/errors"
)
const (
defaultNetwork = "default"
// LabelImage is the label used to store image name provided in the compose file
LabelImage = "com.docker.stack.image"
)
// ParseSecrets retrieves the secrets with the requested names and fills
// secret IDs into the secret references.
func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*swarmtypes.SecretReference) ([]*swarmtypes.SecretReference, error) {
if len(requestedSecrets) == 0 {
return []*swarmtypes.SecretReference{}, nil
}
secretRefs := make(map[string]*swarmtypes.SecretReference)
ctx := context.Background()
for _, secret := range requestedSecrets {
if _, exists := secretRefs[secret.File.Name]; exists {
return nil, errors.Errorf("duplicate secret target for %s not allowed", secret.SecretName)
}
secretRef := new(swarmtypes.SecretReference)
*secretRef = *secret
secretRefs[secret.File.Name] = secretRef
}
args := filters.NewArgs()
for _, s := range secretRefs {
args.Add("name", s.SecretName)
}
secrets, err := client.SecretList(ctx, types.SecretListOptions{
Filters: args,
})
if err != nil {
return nil, err
}
foundSecrets := make(map[string]string)
for _, secret := range secrets {
foundSecrets[secret.Spec.Annotations.Name] = secret.ID
}
addedSecrets := []*swarmtypes.SecretReference{}
for _, ref := range secretRefs {
id, ok := foundSecrets[ref.SecretName]
if !ok {
return nil, errors.Errorf("secret not found: %s", ref.SecretName)
}
// set the id for the ref to properly assign in swarm
// since swarm needs the ID instead of the name
ref.SecretID = id
addedSecrets = append(addedSecrets, ref)
}
return addedSecrets, nil
}
// ParseConfigs retrieves the configs from the requested names and converts
// them to config references to use with the spec
func ParseConfigs(client client.ConfigAPIClient, requestedConfigs []*swarmtypes.ConfigReference) ([]*swarmtypes.ConfigReference, error) {
if len(requestedConfigs) == 0 {
return []*swarmtypes.ConfigReference{}, nil
}
// the configRefs map has two purposes: it prevents duplication of config
// target filenames, and it it used to get all configs so we can resolve
// their IDs. unfortunately, there are other targets for ConfigReferences,
// besides just a File; specifically, the Runtime target, which is used for
// CredentialSpecs. Therefore, we need to have a list of ConfigReferences
// that are not File targets as well. at this time of writing, the only use
// for Runtime targets is CredentialSpecs. However, to future-proof this
// functionality, we should handle the case where multiple Runtime targets
// are in use for the same Config, and we should deduplicate
// such ConfigReferences, as no matter how many times the Config is used,
// it is only needed to be referenced once.
configRefs := make(map[string]*swarmtypes.ConfigReference)
runtimeRefs := make(map[string]*swarmtypes.ConfigReference)
ctx := context.Background()
for _, config := range requestedConfigs {
// copy the config, so we don't mutate the args
configRef := new(swarmtypes.ConfigReference)
*configRef = *config
if config.Runtime != nil {
// by assigning to a map based on ConfigName, if the same Config
// is required as a Runtime target for multiple purposes, we only
// include it once in the final set of configs.
runtimeRefs[config.ConfigName] = config
// continue, so we skip the logic below for handling file-type
// configs
continue
}
if _, exists := configRefs[config.File.Name]; exists {
return nil, errors.Errorf("duplicate config target for %s not allowed", config.ConfigName)
}
configRefs[config.File.Name] = configRef
}
args := filters.NewArgs()
for _, s := range configRefs {
args.Add("name", s.ConfigName)
}
for _, s := range runtimeRefs {
args.Add("name", s.ConfigName)
}
configs, err := client.ConfigList(ctx, types.ConfigListOptions{
Filters: args,
})
if err != nil {
return nil, err
}
foundConfigs := make(map[string]string)
for _, config := range configs {
foundConfigs[config.Spec.Annotations.Name] = config.ID
}
addedConfigs := []*swarmtypes.ConfigReference{}
for _, ref := range configRefs {
id, ok := foundConfigs[ref.ConfigName]
if !ok {
return nil, errors.Errorf("config not found: %s", ref.ConfigName)
}
// set the id for the ref to properly assign in swarm
// since swarm needs the ID instead of the name
ref.ConfigID = id
addedConfigs = append(addedConfigs, ref)
}
// unfortunately, because the key of configRefs and runtimeRefs is different
// values that may collide, we can't just do some fancy trickery to
// concat maps, we need to do two separate loops
for _, ref := range runtimeRefs {
id, ok := foundConfigs[ref.ConfigName]
if !ok {
return nil, errors.Errorf("config not found: %s", ref.ConfigName)
}
ref.ConfigID = id
addedConfigs = append(addedConfigs, ref)
}
return addedConfigs, nil
}
// Services from compose-file types to engine API types
func Services(
namespace Namespace,
config *composetypes.Config,
client client.CommonAPIClient,
) (map[string]swarm.ServiceSpec, error) {
result := make(map[string]swarm.ServiceSpec)
services := config.Services
volumes := config.Volumes
networks := config.Networks
for _, service := range services {
secrets, err := convertServiceSecrets(client, namespace, service.Secrets, config.Secrets)
if err != nil {
return nil, errors.Wrapf(err, "service %s", service.Name)
}
configs, err := convertServiceConfigObjs(client, namespace, service, config.Configs)
if err != nil {
return nil, errors.Wrapf(err, "service %s", service.Name)
}
serviceSpec, err := Service(client.ClientVersion(), namespace, service, networks, volumes, secrets, configs)
if err != nil {
return nil, errors.Wrapf(err, "service %s", service.Name)
}
result[service.Name] = serviceSpec
}
return result, nil
}
// Service converts a ServiceConfig into a swarm ServiceSpec
func Service(
apiVersion string,
namespace Namespace,
service composetypes.ServiceConfig,
networkConfigs map[string]composetypes.NetworkConfig,
volumes map[string]composetypes.VolumeConfig,
secrets []*swarm.SecretReference,
configs []*swarm.ConfigReference,
) (swarm.ServiceSpec, error) {
name := namespace.Scope(service.Name)
endpoint := convertEndpointSpec(service.Deploy.EndpointMode, service.Ports)
mode, err := convertDeployMode(service.Deploy.Mode, service.Deploy.Replicas)
if err != nil {
return swarm.ServiceSpec{}, err
}
mounts, err := Volumes(service.Volumes, volumes, namespace)
if err != nil {
return swarm.ServiceSpec{}, err
}
resources, err := convertResources(service.Deploy.Resources)
if err != nil {
return swarm.ServiceSpec{}, err
}
restartPolicy, err := convertRestartPolicy(
service.Restart, service.Deploy.RestartPolicy)
if err != nil {
return swarm.ServiceSpec{}, err
}
healthcheck, err := convertHealthcheck(service.HealthCheck)
if err != nil {
return swarm.ServiceSpec{}, err
}
networks, err := convertServiceNetworks(service.Networks, networkConfigs, namespace, service.Name)
if err != nil {
return swarm.ServiceSpec{}, err
}
dnsConfig := convertDNSConfig(service.DNS, service.DNSSearch)
var privileges swarm.Privileges
privileges.CredentialSpec, err = convertCredentialSpec(
namespace, service.CredentialSpec, configs,
)
if err != nil {
return swarm.ServiceSpec{}, err
}
var logDriver *swarm.Driver
if service.Logging != nil {
logDriver = &swarm.Driver{
Name: service.Logging.Driver,
Options: service.Logging.Options,
}
}
capAdd, capDrop := opts.EffectiveCapAddCapDrop(service.CapAdd, service.CapDrop)
serviceSpec := swarm.ServiceSpec{
Annotations: swarm.Annotations{
Name: name,
Labels: AddStackLabel(namespace, service.Deploy.Labels),
},
TaskTemplate: swarm.TaskSpec{
ContainerSpec: &swarm.ContainerSpec{
Image: service.Image,
Command: service.Entrypoint,
Args: service.Command,
Hostname: service.Hostname,
Hosts: convertExtraHosts(service.ExtraHosts),
DNSConfig: dnsConfig,
Healthcheck: healthcheck,
Env: sortStrings(convertEnvironment(service.Environment)),
Labels: AddStackLabel(namespace, service.Labels),
Dir: service.WorkingDir,
User: service.User,
Mounts: mounts,
StopGracePeriod: composetypes.ConvertDurationPtr(service.StopGracePeriod),
StopSignal: service.StopSignal,
TTY: service.Tty,
OpenStdin: service.StdinOpen,
Secrets: secrets,
Configs: configs,
ReadOnly: service.ReadOnly,
Privileges: &privileges,
Isolation: container.Isolation(service.Isolation),
Init: service.Init,
Sysctls: service.Sysctls,
CapabilityAdd: capAdd,
CapabilityDrop: capDrop,
Ulimits: convertUlimits(service.Ulimits),
},
LogDriver: logDriver,
Resources: resources,
RestartPolicy: restartPolicy,
Placement: &swarm.Placement{
Constraints: service.Deploy.Placement.Constraints,
Preferences: getPlacementPreference(service.Deploy.Placement.Preferences),
MaxReplicas: service.Deploy.Placement.MaxReplicas,
},
},
EndpointSpec: endpoint,
Mode: mode,
UpdateConfig: convertUpdateConfig(service.Deploy.UpdateConfig),
RollbackConfig: convertUpdateConfig(service.Deploy.RollbackConfig),
}
// add an image label to serviceSpec
serviceSpec.Labels[LabelImage] = service.Image
// ServiceSpec.Networks is deprecated and should not have been used by
// this package. It is possible to update TaskTemplate.Networks, but it
// is not possible to update ServiceSpec.Networks. Unfortunately, we
// can't unconditionally start using TaskTemplate.Networks, because that
// will break with older daemons that don't support migrating from
// ServiceSpec.Networks to TaskTemplate.Networks. So which field to use
// is conditional on daemon version.
if versions.LessThan(apiVersion, "1.29") {
serviceSpec.Networks = networks
} else {
serviceSpec.TaskTemplate.Networks = networks
}
return serviceSpec, nil
}
func getPlacementPreference(preferences []composetypes.PlacementPreferences) []swarm.PlacementPreference {
result := []swarm.PlacementPreference{}
for _, preference := range preferences {
spreadDescriptor := preference.Spread
result = append(result, swarm.PlacementPreference{
Spread: &swarm.SpreadOver{
SpreadDescriptor: spreadDescriptor,
},
})
}
return result
}
func sortStrings(strs []string) []string {
sort.Strings(strs)
return strs
}
func convertServiceNetworks(
networks map[string]*composetypes.ServiceNetworkConfig,
networkConfigs networkMap,
namespace Namespace,
name string,
) ([]swarm.NetworkAttachmentConfig, error) {
if len(networks) == 0 {
networks = map[string]*composetypes.ServiceNetworkConfig{
defaultNetwork: {},
}
}
nets := []swarm.NetworkAttachmentConfig{}
for networkName, network := range networks {
networkConfig, ok := networkConfigs[networkName]
if !ok && networkName != defaultNetwork {
return nil, errors.Errorf("undefined network %q", networkName)
}
var aliases []string
if network != nil {
aliases = network.Aliases
}
target := namespace.Scope(networkName)
if networkConfig.Name != "" {
target = networkConfig.Name
}
netAttachConfig := swarm.NetworkAttachmentConfig{
Target: target,
Aliases: aliases,
}
// Only add default aliases to user defined networks. Other networks do
// not support aliases.
if container.NetworkMode(target).IsUserDefined() {
netAttachConfig.Aliases = append(netAttachConfig.Aliases, name)
}
nets = append(nets, netAttachConfig)
}
sort.Slice(nets, func(i, j int) bool {
return nets[i].Target < nets[j].Target
})
return nets, nil
}
// TODO: fix secrets API so that SecretAPIClient is not required here
func convertServiceSecrets(
client client.SecretAPIClient,
namespace Namespace,
secrets []composetypes.ServiceSecretConfig,
secretSpecs map[string]composetypes.SecretConfig,
) ([]*swarm.SecretReference, error) {
refs := []*swarm.SecretReference{}
lookup := func(key string) (composetypes.FileObjectConfig, error) {
secretSpec, exists := secretSpecs[key]
if !exists {
return composetypes.FileObjectConfig{}, errors.Errorf("undefined secret %q", key)
}
return composetypes.FileObjectConfig(secretSpec), nil
}
for _, secret := range secrets {
obj, err := convertFileObject(namespace, composetypes.FileReferenceConfig(secret), lookup)
if err != nil {
return nil, err
}
file := swarm.SecretReferenceFileTarget(obj.File)
refs = append(refs, &swarm.SecretReference{
File: &file,
SecretName: obj.Name,
})
}
secrs, err := ParseSecrets(client, refs)
if err != nil {
return nil, err
}
// sort to ensure idempotence (don't restart services just because the entries are in different order)
sort.SliceStable(secrs, func(i, j int) bool { return secrs[i].SecretName < secrs[j].SecretName })
return secrs, err
}
// convertServiceConfigObjs takes an API client, a namespace, a ServiceConfig,
// and a set of compose Config specs, and creates the swarm ConfigReferences
// required by the serivce. Unlike convertServiceSecrets, this takes the whole
// ServiceConfig, because some Configs may be needed as a result of other
// fields (like CredentialSpecs).
//
// TODO: fix configs API so that ConfigsAPIClient is not required here
func convertServiceConfigObjs(
client client.ConfigAPIClient,
namespace Namespace,
service composetypes.ServiceConfig,
configSpecs map[string]composetypes.ConfigObjConfig,
) ([]*swarm.ConfigReference, error) {
refs := []*swarm.ConfigReference{}
lookup := func(key string) (composetypes.FileObjectConfig, error) {
configSpec, exists := configSpecs[key]
if !exists {
return composetypes.FileObjectConfig{}, errors.Errorf("undefined config %q", key)
}
return composetypes.FileObjectConfig(configSpec), nil
}
for _, config := range service.Configs {
obj, err := convertFileObject(namespace, composetypes.FileReferenceConfig(config), lookup)
if err != nil {
return nil, err
}
file := swarm.ConfigReferenceFileTarget(obj.File)
refs = append(refs, &swarm.ConfigReference{
File: &file,
ConfigName: obj.Name,
})
}
// finally, after converting all of the file objects, create any
// Runtime-type configs that are needed. these are configs that are not
// mounted into the container, but are used in some other way by the
// container runtime. Currently, this only means CredentialSpecs, but in
// the future it may be used for other fields
// grab the CredentialSpec out of the Service
credSpec := service.CredentialSpec
// if the credSpec uses a config, then we should grab the config name, and
// create a config reference for it. A File or Registry-type CredentialSpec
// does not need this operation.
if credSpec.Config != "" {
// look up the config in the configSpecs.
obj, err := lookup(credSpec.Config)
if err != nil {
return nil, err
}
// get the actual correct name.
name := namespace.Scope(credSpec.Config)
if obj.Name != "" {
name = obj.Name
}
// now append a Runtime-type config.
refs = append(refs, &swarm.ConfigReference{
ConfigName: name,
Runtime: &swarm.ConfigReferenceRuntimeTarget{},
})
}
confs, err := ParseConfigs(client, refs)
if err != nil {
return nil, err
}
// sort to ensure idempotence (don't restart services just because the entries are in different order)
sort.SliceStable(confs, func(i, j int) bool { return confs[i].ConfigName < confs[j].ConfigName })
return confs, err
}
type swarmReferenceTarget struct {
Name string
UID string
GID string
Mode os.FileMode
}
type swarmReferenceObject struct {
File swarmReferenceTarget
ID string
Name string
}
func convertFileObject(
namespace Namespace,
config composetypes.FileReferenceConfig,
lookup func(key string) (composetypes.FileObjectConfig, error),
) (swarmReferenceObject, error) {
obj, err := lookup(config.Source)
if err != nil {
return swarmReferenceObject{}, err
}
source := namespace.Scope(config.Source)
if obj.Name != "" {
source = obj.Name
}
target := config.Target
if target == "" {
target = config.Source
}
uid := config.UID
gid := config.GID
if uid == "" {
uid = "0"
}
if gid == "" {
gid = "0"
}
mode := config.Mode
if mode == nil {
mode = uint32Ptr(0444)
}
return swarmReferenceObject{
File: swarmReferenceTarget{
Name: target,
UID: uid,
GID: gid,
Mode: os.FileMode(*mode),
},
Name: source,
}, nil
}
func uint32Ptr(value uint32) *uint32 {
return &value
}
// convertExtraHosts converts <host>:<ip> mappings to SwarmKit notation:
// "IP-address hostname(s)". The original order of mappings is preserved.
func convertExtraHosts(extraHosts composetypes.HostsList) []string {
hosts := []string{}
for _, hostIP := range extraHosts {
if v := strings.SplitN(hostIP, ":", 2); len(v) == 2 {
// Convert to SwarmKit notation: IP-address hostname(s)
hosts = append(hosts, fmt.Sprintf("%s %s", v[1], v[0]))
}
}
return hosts
}
func convertHealthcheck(healthcheck *composetypes.HealthCheckConfig) (*container.HealthConfig, error) {
if healthcheck == nil {
return nil, nil
}
var (
timeout, interval, startPeriod time.Duration
retries int
)
if healthcheck.Disable {
if len(healthcheck.Test) != 0 {
return nil, errors.Errorf("test and disable can't be set at the same time")
}
return &container.HealthConfig{
Test: []string{"NONE"},
}, nil
}
if healthcheck.Timeout != nil {
timeout = time.Duration(*healthcheck.Timeout)
}
if healthcheck.Interval != nil {
interval = time.Duration(*healthcheck.Interval)
}
if healthcheck.StartPeriod != nil {
startPeriod = time.Duration(*healthcheck.StartPeriod)
}
if healthcheck.Retries != nil {
retries = int(*healthcheck.Retries)
}
return &container.HealthConfig{
Test: healthcheck.Test,
Timeout: timeout,
Interval: interval,
Retries: retries,
StartPeriod: startPeriod,
}, nil
}
func convertRestartPolicy(restart string, source *composetypes.RestartPolicy) (*swarm.RestartPolicy, error) {
// TODO: log if restart is being ignored
if source == nil {
policy, err := opts.ParseRestartPolicy(restart)
if err != nil {
return nil, err
}
switch {
case policy.IsNone():
return nil, nil
case policy.IsAlways(), policy.IsUnlessStopped():
return &swarm.RestartPolicy{
Condition: swarm.RestartPolicyConditionAny,
}, nil
case policy.IsOnFailure():
attempts := uint64(policy.MaximumRetryCount)
return &swarm.RestartPolicy{
Condition: swarm.RestartPolicyConditionOnFailure,
MaxAttempts: &attempts,
}, nil
default:
return nil, errors.Errorf("unknown restart policy: %s", restart)
}
}
return &swarm.RestartPolicy{
Condition: swarm.RestartPolicyCondition(source.Condition),
Delay: composetypes.ConvertDurationPtr(source.Delay),
MaxAttempts: source.MaxAttempts,
Window: composetypes.ConvertDurationPtr(source.Window),
}, nil
}
func convertUpdateConfig(source *composetypes.UpdateConfig) *swarm.UpdateConfig {
if source == nil {
return nil
}
parallel := uint64(1)
if source.Parallelism != nil {
parallel = *source.Parallelism
}
return &swarm.UpdateConfig{
Parallelism: parallel,
Delay: time.Duration(source.Delay),
FailureAction: source.FailureAction,
Monitor: time.Duration(source.Monitor),
MaxFailureRatio: source.MaxFailureRatio,
Order: source.Order,
}
}
func convertResources(source composetypes.Resources) (*swarm.ResourceRequirements, error) {
resources := &swarm.ResourceRequirements{}
var err error
if source.Limits != nil {
var cpus int64
if source.Limits.NanoCPUs != "" {
cpus, err = opts.ParseCPUs(source.Limits.NanoCPUs)
if err != nil {
return nil, err
}
}
resources.Limits = &swarm.Limit{
NanoCPUs: cpus,
MemoryBytes: int64(source.Limits.MemoryBytes),
Pids: source.Limits.Pids,
}
}
if source.Reservations != nil {
var cpus int64
if source.Reservations.NanoCPUs != "" {
cpus, err = opts.ParseCPUs(source.Reservations.NanoCPUs)
if err != nil {
return nil, err
}
}
var generic []swarm.GenericResource
for _, res := range source.Reservations.GenericResources {
var r swarm.GenericResource
if res.DiscreteResourceSpec != nil {
r.DiscreteResourceSpec = &swarm.DiscreteGenericResource{
Kind: res.DiscreteResourceSpec.Kind,
Value: res.DiscreteResourceSpec.Value,
}
}
generic = append(generic, r)
}
resources.Reservations = &swarm.Resources{
NanoCPUs: cpus,
MemoryBytes: int64(source.Reservations.MemoryBytes),
GenericResources: generic,
}
}
return resources, nil
}
func convertEndpointSpec(endpointMode string, source []composetypes.ServicePortConfig) *swarm.EndpointSpec {
portConfigs := []swarm.PortConfig{}
for _, port := range source {
portConfig := swarm.PortConfig{
Protocol: swarm.PortConfigProtocol(port.Protocol),
TargetPort: port.Target,
PublishedPort: port.Published,
PublishMode: swarm.PortConfigPublishMode(port.Mode),
}
portConfigs = append(portConfigs, portConfig)
}
sort.Slice(portConfigs, func(i, j int) bool {
return portConfigs[i].PublishedPort < portConfigs[j].PublishedPort
})
return &swarm.EndpointSpec{
Mode: swarm.ResolutionMode(strings.ToLower(endpointMode)),
Ports: portConfigs,
}
}
func convertEnvironment(source map[string]*string) []string {
var output []string
for name, value := range source {
switch value {
case nil:
output = append(output, name)
default:
output = append(output, fmt.Sprintf("%s=%s", name, *value))
}
}
return output
}
func convertDeployMode(mode string, replicas *uint64) (swarm.ServiceMode, error) {
serviceMode := swarm.ServiceMode{}
switch mode {
case "global":
if replicas != nil {
return serviceMode, errors.Errorf("replicas can only be used with replicated mode")
}
serviceMode.Global = &swarm.GlobalService{}
case "replicated", "":
serviceMode.Replicated = &swarm.ReplicatedService{Replicas: replicas}
default:
return serviceMode, errors.Errorf("Unknown mode: %s", mode)
}
return serviceMode, nil
}
func convertDNSConfig(DNS []string, DNSSearch []string) *swarm.DNSConfig {
if DNS != nil || DNSSearch != nil {
return &swarm.DNSConfig{
Nameservers: DNS,
Search: DNSSearch,
}
}
return nil
}
func convertCredentialSpec(namespace Namespace, spec composetypes.CredentialSpecConfig, refs []*swarm.ConfigReference) (*swarm.CredentialSpec, error) {
var o []string
// Config was added in API v1.40
if spec.Config != "" {
o = append(o, `"Config"`)
}
if spec.File != "" {
o = append(o, `"File"`)
}
if spec.Registry != "" {
o = append(o, `"Registry"`)
}
l := len(o)
switch {
case l == 0:
return nil, nil
case l == 2:
return nil, errors.Errorf("invalid credential spec: cannot specify both %s and %s", o[0], o[1])
case l > 2:
return nil, errors.Errorf("invalid credential spec: cannot specify both %s, and %s", strings.Join(o[:l-1], ", "), o[l-1])
}
swarmCredSpec := swarm.CredentialSpec(spec)
// if we're using a swarm Config for the credential spec, over-write it
// here with the config ID
if swarmCredSpec.Config != "" {
for _, config := range refs {
if swarmCredSpec.Config == config.ConfigName {
swarmCredSpec.Config = config.ConfigID
return &swarmCredSpec, nil
}
}
// if none of the configs match, try namespacing
for _, config := range refs {
if namespace.Scope(swarmCredSpec.Config) == config.ConfigName {
swarmCredSpec.Config = config.ConfigID
return &swarmCredSpec, nil
}
}
return nil, errors.Errorf("invalid credential spec: spec specifies config %v, but no such config can be found", swarmCredSpec.Config)
}
return &swarmCredSpec, nil
}
func convertUlimits(origUlimits map[string]*composetypes.UlimitsConfig) []*units.Ulimit {
newUlimits := make(map[string]*units.Ulimit)
for name, u := range origUlimits {
if u.Single != 0 {
newUlimits[name] = &units.Ulimit{
Name: name,
Soft: int64(u.Single),
Hard: int64(u.Single),
}
} else {
newUlimits[name] = &units.Ulimit{
Name: name,
Soft: int64(u.Soft),
Hard: int64(u.Hard),
}
}
}
var ulimits []*units.Ulimit
for _, ulimit := range newUlimits {
ulimits = append(ulimits, ulimit)
}
sort.SliceStable(ulimits, func(i, j int) bool {
return ulimits[i].Name < ulimits[j].Name
})
return ulimits
}

View File

@ -1,162 +0,0 @@
package convert
import (
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types/mount"
"github.com/pkg/errors"
)
type volumes map[string]composetypes.VolumeConfig
// Volumes from compose-file types to engine api types
func Volumes(serviceVolumes []composetypes.ServiceVolumeConfig, stackVolumes volumes, namespace Namespace) ([]mount.Mount, error) {
var mounts []mount.Mount
for _, volumeConfig := range serviceVolumes {
mount, err := convertVolumeToMount(volumeConfig, stackVolumes, namespace)
if err != nil {
return nil, err
}
mounts = append(mounts, mount)
}
return mounts, nil
}
func createMountFromVolume(volume composetypes.ServiceVolumeConfig) mount.Mount {
return mount.Mount{
Type: mount.Type(volume.Type),
Target: volume.Target,
ReadOnly: volume.ReadOnly,
Source: volume.Source,
Consistency: mount.Consistency(volume.Consistency),
}
}
func handleVolumeToMount(
volume composetypes.ServiceVolumeConfig,
stackVolumes volumes,
namespace Namespace,
) (mount.Mount, error) {
result := createMountFromVolume(volume)
if volume.Tmpfs != nil {
return mount.Mount{}, errors.New("tmpfs options are incompatible with type volume")
}
if volume.Bind != nil {
return mount.Mount{}, errors.New("bind options are incompatible with type volume")
}
// Anonymous volumes
if volume.Source == "" {
return result, nil
}
stackVolume, exists := stackVolumes[volume.Source]
if !exists {
return mount.Mount{}, errors.Errorf("undefined volume %q", volume.Source)
}
result.Source = namespace.Scope(volume.Source)
result.VolumeOptions = &mount.VolumeOptions{}
if volume.Volume != nil {
result.VolumeOptions.NoCopy = volume.Volume.NoCopy
}
if stackVolume.Name != "" {
result.Source = stackVolume.Name
}
// External named volumes
if stackVolume.External.External {
return result, nil
}
result.VolumeOptions.Labels = AddStackLabel(namespace, stackVolume.Labels)
if stackVolume.Driver != "" || stackVolume.DriverOpts != nil {
result.VolumeOptions.DriverConfig = &mount.Driver{
Name: stackVolume.Driver,
Options: stackVolume.DriverOpts,
}
}
return result, nil
}
func handleBindToMount(volume composetypes.ServiceVolumeConfig) (mount.Mount, error) {
result := createMountFromVolume(volume)
if volume.Source == "" {
return mount.Mount{}, errors.New("invalid bind source, source cannot be empty")
}
if volume.Volume != nil {
return mount.Mount{}, errors.New("volume options are incompatible with type bind")
}
if volume.Tmpfs != nil {
return mount.Mount{}, errors.New("tmpfs options are incompatible with type bind")
}
if volume.Bind != nil {
result.BindOptions = &mount.BindOptions{
Propagation: mount.Propagation(volume.Bind.Propagation),
}
}
return result, nil
}
func handleTmpfsToMount(volume composetypes.ServiceVolumeConfig) (mount.Mount, error) {
result := createMountFromVolume(volume)
if volume.Source != "" {
return mount.Mount{}, errors.New("invalid tmpfs source, source must be empty")
}
if volume.Bind != nil {
return mount.Mount{}, errors.New("bind options are incompatible with type tmpfs")
}
if volume.Volume != nil {
return mount.Mount{}, errors.New("volume options are incompatible with type tmpfs")
}
if volume.Tmpfs != nil {
result.TmpfsOptions = &mount.TmpfsOptions{
SizeBytes: volume.Tmpfs.Size,
}
}
return result, nil
}
func handleNpipeToMount(volume composetypes.ServiceVolumeConfig) (mount.Mount, error) {
result := createMountFromVolume(volume)
if volume.Source == "" {
return mount.Mount{}, errors.New("invalid npipe source, source cannot be empty")
}
if volume.Volume != nil {
return mount.Mount{}, errors.New("volume options are incompatible with type npipe")
}
if volume.Tmpfs != nil {
return mount.Mount{}, errors.New("tmpfs options are incompatible with type npipe")
}
if volume.Bind != nil {
result.BindOptions = &mount.BindOptions{
Propagation: mount.Propagation(volume.Bind.Propagation),
}
}
return result, nil
}
func convertVolumeToMount(
volume composetypes.ServiceVolumeConfig,
stackVolumes volumes,
namespace Namespace,
) (mount.Mount, error) {
switch volume.Type {
case "volume", "":
return handleVolumeToMount(volume, stackVolumes, namespace)
case "bind":
return handleBindToMount(volume)
case "tmpfs":
return handleTmpfsToMount(volume)
case "npipe":
return handleNpipeToMount(volume)
}
return mount.Mount{}, errors.New("volume type must be volume, bind, tmpfs or npipe")
}

View File

@ -1,131 +0,0 @@
package stack
import (
"fmt"
"io/ioutil"
"path/filepath"
"sort"
"strings"
"github.com/docker/cli/cli/compose/loader"
"github.com/docker/cli/cli/compose/schema"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/sirupsen/logrus"
)
// LoadComposefile parse the composefile specified in the cli and returns its Config and version.
func LoadComposefile(opts Deploy, appEnv map[string]string) (*composetypes.Config, error) {
configDetails, err := getConfigDetails(opts.Composefiles, appEnv)
if err != nil {
return nil, err
}
dicts := getDictsFrom(configDetails.ConfigFiles)
config, err := loader.Load(configDetails)
if err != nil {
if fpe, ok := err.(*loader.ForbiddenPropertiesError); ok {
return nil, fmt.Errorf("compose file contains unsupported options:\n\n%s",
propertyWarnings(fpe.Properties))
}
return nil, err
}
unsupportedProperties := loader.GetUnsupportedProperties(dicts...)
if len(unsupportedProperties) > 0 {
logrus.Warnf("Ignoring unsupported options: %s\n\n",
strings.Join(unsupportedProperties, ", "))
}
deprecatedProperties := loader.GetDeprecatedProperties(dicts...)
if len(deprecatedProperties) > 0 {
logrus.Warnf("Ignoring deprecated options:\n\n%s\n\n",
propertyWarnings(deprecatedProperties))
}
return config, nil
}
func getDictsFrom(configFiles []composetypes.ConfigFile) []map[string]interface{} {
dicts := []map[string]interface{}{}
for _, configFile := range configFiles {
dicts = append(dicts, configFile.Config)
}
return dicts
}
func propertyWarnings(properties map[string]string) string {
var msgs []string
for name, description := range properties {
msgs = append(msgs, fmt.Sprintf("%s: %s", name, description))
}
sort.Strings(msgs)
return strings.Join(msgs, "\n\n")
}
func getConfigDetails(composefiles []string, appEnv map[string]string) (composetypes.ConfigDetails, error) {
var details composetypes.ConfigDetails
absPath, err := filepath.Abs(composefiles[0])
if err != nil {
return details, err
}
details.WorkingDir = filepath.Dir(absPath)
details.ConfigFiles, err = loadConfigFiles(composefiles)
if err != nil {
return details, err
}
// Take the first file version (2 files can't have different version)
details.Version = schema.Version(details.ConfigFiles[0].Config)
details.Environment = appEnv
return details, err
}
func buildEnvironment(env []string) (map[string]string, error) {
result := make(map[string]string, len(env))
for _, s := range env {
// if value is empty, s is like "K=", not "K".
if !strings.Contains(s, "=") {
return result, fmt.Errorf("unexpected environment %q", s)
}
kv := strings.SplitN(s, "=", 2)
result[kv[0]] = kv[1]
}
return result, nil
}
func loadConfigFiles(filenames []string) ([]composetypes.ConfigFile, error) {
var configFiles []composetypes.ConfigFile
for _, filename := range filenames {
configFile, err := loadConfigFile(filename)
if err != nil {
return configFiles, err
}
configFiles = append(configFiles, *configFile)
}
return configFiles, nil
}
func loadConfigFile(filename string) (*composetypes.ConfigFile, error) {
var bytes []byte
var err error
bytes, err = ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
config, err := loader.ParseYAML(bytes)
if err != nil {
return nil, err
}
return &composetypes.ConfigFile{
Filename: filename,
Config: config,
}, nil
}

View File

@ -1,15 +0,0 @@
package stack
// Deploy holds docker stack deploy options
type Deploy struct {
Composefiles []string
Namespace string
ResolveImage string
SendRegistryAuth bool
Prune bool
}
// Remove holds docker stack remove options
type Remove struct {
Namespaces []string
}

View File

@ -1,138 +0,0 @@
package stack
import (
"context"
"fmt"
"sort"
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/versions"
apiclient "github.com/docker/docker/client"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// RunRemove is the swarm implementation of docker stack remove
func RunRemove(ctx context.Context, client *apiclient.Client, opts Remove) error {
var errs []string
for _, namespace := range opts.Namespaces {
services, err := GetStackServices(ctx, client, namespace)
if err != nil {
return err
}
networks, err := getStackNetworks(ctx, client, namespace)
if err != nil {
return err
}
var secrets []swarm.Secret
if versions.GreaterThanOrEqualTo(client.ClientVersion(), "1.25") {
secrets, err = getStackSecrets(ctx, client, namespace)
if err != nil {
return err
}
}
var configs []swarm.Config
if versions.GreaterThanOrEqualTo(client.ClientVersion(), "1.30") {
configs, err = getStackConfigs(ctx, client, namespace)
if err != nil {
return err
}
}
if len(services)+len(networks)+len(secrets)+len(configs) == 0 {
logrus.Warning(fmt.Errorf("nothing found in stack: %s", namespace))
continue
}
hasError := removeServices(ctx, client, services)
hasError = removeSecrets(ctx, client, secrets) || hasError
hasError = removeConfigs(ctx, client, configs) || hasError
hasError = removeNetworks(ctx, client, networks) || hasError
if hasError {
errs = append(errs, fmt.Sprintf("failed to remove some resources from stack: %s", namespace))
}
}
if len(errs) > 0 {
return errors.Errorf(strings.Join(errs, "\n"))
}
return nil
}
func sortServiceByName(services []swarm.Service) func(i, j int) bool {
return func(i, j int) bool {
return services[i].Spec.Name < services[j].Spec.Name
}
}
func removeServices(
ctx context.Context,
client *apiclient.Client,
services []swarm.Service,
) bool {
var hasError bool
sort.Slice(services, sortServiceByName(services))
for _, service := range services {
logrus.Infof("removing service %s\n", service.Spec.Name)
if err := client.ServiceRemove(ctx, service.ID); err != nil {
hasError = true
logrus.Fatalf("failed to remove service %s: %s", service.ID, err)
}
}
return hasError
}
func removeNetworks(
ctx context.Context,
client *apiclient.Client,
networks []types.NetworkResource,
) bool {
var hasError bool
for _, network := range networks {
logrus.Infof("removing network %s\n", network.Name)
if err := client.NetworkRemove(ctx, network.ID); err != nil {
hasError = true
logrus.Fatalf("failed to remove network %s: %s", network.ID, err)
}
}
return hasError
}
func removeSecrets(
ctx context.Context,
client *apiclient.Client,
secrets []swarm.Secret,
) bool {
var hasError bool
for _, secret := range secrets {
logrus.Infof("Removing secret %s\n", secret.Spec.Name)
if err := client.SecretRemove(ctx, secret.ID); err != nil {
hasError = true
logrus.Fatalf("Failed to remove secret %s: %s", secret.ID, err)
}
}
return hasError
}
func removeConfigs(
ctx context.Context,
client *apiclient.Client,
configs []swarm.Config,
) bool {
var hasError bool
for _, config := range configs {
logrus.Infof("removing config %s\n", config.Spec.Name)
if err := client.ConfigRemove(ctx, config.ID); err != nil {
hasError = true
logrus.Fatalf("failed to remove config %s: %s", config.ID, err)
}
}
return hasError
}

View File

@ -1,439 +0,0 @@
package stack
import (
"context"
"fmt"
"strings"
abraClient "coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/client/convert"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/client"
dockerclient "github.com/docker/docker/client"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// Resolve image constants
const (
defaultNetworkDriver = "overlay"
ResolveImageAlways = "always"
ResolveImageChanged = "changed"
ResolveImageNever = "never"
)
type StackStatus struct {
Services []swarm.Service
Err error
}
func getStackFilter(namespace string) filters.Args {
filter := filters.NewArgs()
filter.Add("label", convert.LabelNamespace+"="+namespace)
return filter
}
func getStackServiceFilter(namespace string) filters.Args {
return getStackFilter(namespace)
}
func getAllStacksFilter() filters.Args {
filter := filters.NewArgs()
filter.Add("label", convert.LabelNamespace)
return filter
}
func GetStackServices(ctx context.Context, dockerclient client.APIClient, namespace string) ([]swarm.Service, error) {
return dockerclient.ServiceList(ctx, types.ServiceListOptions{Filters: getStackServiceFilter(namespace)})
}
// GetDeployedServicesByLabel filters services by label
func GetDeployedServicesByLabel(contextName string, label string) StackStatus {
cl, err := abraClient.New(contextName)
if err != nil {
if strings.Contains(err.Error(), "does not exist") {
// No local context found, bail out gracefully
return StackStatus{[]swarm.Service{}, nil}
}
return StackStatus{[]swarm.Service{}, err}
}
ctx := context.Background()
filters := filters.NewArgs()
filters.Add("label", label)
services, err := cl.ServiceList(ctx, types.ServiceListOptions{Filters: filters})
if err != nil {
return StackStatus{[]swarm.Service{}, err}
}
return StackStatus{services, nil}
}
func GetAllDeployedServices(contextName string) StackStatus {
cl, err := abraClient.New(contextName)
if err != nil {
if strings.Contains(err.Error(), "does not exist") {
// No local context found, bail out gracefully
return StackStatus{[]swarm.Service{}, nil}
}
return StackStatus{[]swarm.Service{}, err}
}
ctx := context.Background()
services, err := cl.ServiceList(ctx, types.ServiceListOptions{Filters: getAllStacksFilter()})
if err != nil {
return StackStatus{[]swarm.Service{}, err}
}
return StackStatus{services, nil}
}
// GetDeployedServicesByName filters services by name
func GetDeployedServicesByName(ctx context.Context, cl *dockerclient.Client, stackName, serviceName string) StackStatus {
filters := filters.NewArgs()
filters.Add("name", fmt.Sprintf("%s_%s", stackName, serviceName))
services, err := cl.ServiceList(ctx, types.ServiceListOptions{Filters: filters})
if err != nil {
return StackStatus{[]swarm.Service{}, err}
}
return StackStatus{services, nil}
}
// IsDeployed chekcks whether an appp is deployed or not.
func IsDeployed(ctx context.Context, cl *dockerclient.Client, stackName string) (bool, string, error) {
version := ""
isDeployed := false
filter := filters.NewArgs()
filter.Add("label", fmt.Sprintf("%s=%s", convert.LabelNamespace, stackName))
services, err := cl.ServiceList(ctx, types.ServiceListOptions{Filters: filter})
if err != nil {
return false, version, err
}
if len(services) > 0 {
for _, service := range services {
labelKey := fmt.Sprintf("coop-cloud.%s.version", stackName)
if deployedVersion, ok := service.Spec.Labels[labelKey]; ok {
version = deployedVersion
break
}
}
logrus.Debugf("'%s' has been detected as deployed with version '%s'", stackName, version)
return true, version, nil
}
logrus.Debugf("'%s' has been detected as not deployed", stackName)
return isDeployed, version, nil
}
// pruneServices removes services that are no longer referenced in the source
func pruneServices(ctx context.Context, cl *dockerclient.Client, namespace convert.Namespace, services map[string]struct{}) {
oldServices, err := GetStackServices(ctx, cl, namespace.Name())
if err != nil {
logrus.Infof("Failed to list services: %s\n", err)
}
pruneServices := []swarm.Service{}
for _, service := range oldServices {
if _, exists := services[namespace.Descope(service.Spec.Name)]; !exists {
pruneServices = append(pruneServices, service)
}
}
removeServices(ctx, cl, pruneServices)
}
// RunDeploy is the swarm implementation of docker stack deploy
func RunDeploy(cl *dockerclient.Client, opts Deploy, cfg *composetypes.Config) error {
ctx := context.Background()
if err := validateResolveImageFlag(&opts); err != nil {
return err
}
// client side image resolution should not be done when the supported
// server version is older than 1.30
if versions.LessThan(cl.ClientVersion(), "1.30") {
opts.ResolveImage = ResolveImageNever
}
return deployCompose(ctx, cl, opts, cfg)
}
// validateResolveImageFlag validates the opts.resolveImage command line option
func validateResolveImageFlag(opts *Deploy) error {
switch opts.ResolveImage {
case ResolveImageAlways, ResolveImageChanged, ResolveImageNever:
return nil
default:
return errors.Errorf("Invalid option %s for flag --resolve-image", opts.ResolveImage)
}
}
func deployCompose(ctx context.Context, cl *dockerclient.Client, opts Deploy, config *composetypes.Config) error {
namespace := convert.NewNamespace(opts.Namespace)
if opts.Prune {
services := map[string]struct{}{}
for _, service := range config.Services {
services[service.Name] = struct{}{}
}
pruneServices(ctx, cl, namespace, services)
}
serviceNetworks := getServicesDeclaredNetworks(config.Services)
networks, externalNetworks := convert.Networks(namespace, config.Networks, serviceNetworks)
if err := validateExternalNetworks(ctx, cl, externalNetworks); err != nil {
return err
}
if err := createNetworks(ctx, cl, namespace, networks); err != nil {
return err
}
secrets, err := convert.Secrets(namespace, config.Secrets)
if err != nil {
return err
}
if err := createSecrets(ctx, cl, secrets); err != nil {
return err
}
configs, err := convert.Configs(namespace, config.Configs)
if err != nil {
return err
}
if err := createConfigs(ctx, cl, configs); err != nil {
return err
}
services, err := convert.Services(namespace, config, cl)
if err != nil {
return err
}
return deployServices(ctx, cl, services, namespace, opts.SendRegistryAuth, opts.ResolveImage)
}
func getServicesDeclaredNetworks(serviceConfigs []composetypes.ServiceConfig) map[string]struct{} {
serviceNetworks := map[string]struct{}{}
for _, serviceConfig := range serviceConfigs {
if len(serviceConfig.Networks) == 0 {
serviceNetworks["default"] = struct{}{}
continue
}
for network := range serviceConfig.Networks {
serviceNetworks[network] = struct{}{}
}
}
return serviceNetworks
}
func validateExternalNetworks(ctx context.Context, client dockerclient.NetworkAPIClient, externalNetworks []string) error {
for _, networkName := range externalNetworks {
if !container.NetworkMode(networkName).IsUserDefined() {
// Networks that are not user defined always exist on all nodes as
// local-scoped networks, so there's no need to inspect them.
continue
}
network, err := client.NetworkInspect(ctx, networkName, types.NetworkInspectOptions{})
switch {
case dockerclient.IsErrNotFound(err):
return errors.Errorf("network %q is declared as external, but could not be found. You need to create a swarm-scoped network before the stack is deployed", networkName)
case err != nil:
return err
case network.Scope != "swarm":
return errors.Errorf("network %q is declared as external, but it is not in the right scope: %q instead of \"swarm\"", networkName, network.Scope)
}
}
return nil
}
func createSecrets(ctx context.Context, cl *dockerclient.Client, secrets []swarm.SecretSpec) error {
for _, secretSpec := range secrets {
secret, _, err := cl.SecretInspectWithRaw(ctx, secretSpec.Name)
switch {
case err == nil:
// secret already exists, then we update that
if err := cl.SecretUpdate(ctx, secret.ID, secret.Meta.Version, secretSpec); err != nil {
return errors.Wrapf(err, "failed to update secret %s", secretSpec.Name)
}
case dockerclient.IsErrNotFound(err):
// secret does not exist, then we create a new one.
logrus.Infof("Creating secret %s\n", secretSpec.Name)
if _, err := cl.SecretCreate(ctx, secretSpec); err != nil {
return errors.Wrapf(err, "failed to create secret %s", secretSpec.Name)
}
default:
return err
}
}
return nil
}
func createConfigs(ctx context.Context, cl *dockerclient.Client, configs []swarm.ConfigSpec) error {
for _, configSpec := range configs {
config, _, err := cl.ConfigInspectWithRaw(ctx, configSpec.Name)
switch {
case err == nil:
// config already exists, then we update that
if err := cl.ConfigUpdate(ctx, config.ID, config.Meta.Version, configSpec); err != nil {
return errors.Wrapf(err, "failed to update config %s", configSpec.Name)
}
case dockerclient.IsErrNotFound(err):
// config does not exist, then we create a new one.
logrus.Infof("Creating config %s\n", configSpec.Name)
if _, err := cl.ConfigCreate(ctx, configSpec); err != nil {
return errors.Wrapf(err, "failed to create config %s", configSpec.Name)
}
default:
return err
}
}
return nil
}
func createNetworks(ctx context.Context, cl *dockerclient.Client, namespace convert.Namespace, networks map[string]types.NetworkCreate) error {
existingNetworks, err := getStackNetworks(ctx, cl, namespace.Name())
if err != nil {
return err
}
existingNetworkMap := make(map[string]types.NetworkResource)
for _, network := range existingNetworks {
existingNetworkMap[network.Name] = network
}
for name, createOpts := range networks {
if _, exists := existingNetworkMap[name]; exists {
continue
}
if createOpts.Driver == "" {
createOpts.Driver = defaultNetworkDriver
}
logrus.Infof("Creating network %s\n", name)
if _, err := cl.NetworkCreate(ctx, name, createOpts); err != nil {
return errors.Wrapf(err, "failed to create network %s", name)
}
}
return nil
}
func deployServices(
ctx context.Context,
cl *dockerclient.Client,
services map[string]swarm.ServiceSpec,
namespace convert.Namespace,
sendAuth bool,
resolveImage string) error {
existingServices, err := GetStackServices(ctx, cl, namespace.Name())
if err != nil {
return err
}
existingServiceMap := make(map[string]swarm.Service)
for _, service := range existingServices {
existingServiceMap[service.Spec.Name] = service
}
for internalName, serviceSpec := range services {
var (
name = namespace.Scope(internalName)
image = serviceSpec.TaskTemplate.ContainerSpec.Image
encodedAuth string
)
// FIXME: disable for now as not sure how to avoid having a `dockerCli`
// instance here and would rather not copy/pasta that entire module in
// right now for something that we don't even support right now. Will skip
// this for now.
if sendAuth {
// Retrieve encoded auth token from the image reference
// encodedAuth, err = command.RetrieveAuthTokenFromImage(ctx, dockerCli, image)
// if err != nil {
// return err
// }
}
if service, exists := existingServiceMap[name]; exists {
logrus.Infof("Updating service %s (id: %s)\n", name, service.ID)
updateOpts := types.ServiceUpdateOptions{EncodedRegistryAuth: encodedAuth}
switch resolveImage {
case ResolveImageAlways:
// image should be updated by the server using QueryRegistry
updateOpts.QueryRegistry = true
case ResolveImageChanged:
if image != service.Spec.Labels[convert.LabelImage] {
// Query the registry to resolve digest for the updated image
updateOpts.QueryRegistry = true
} else {
// image has not changed; update the serviceSpec with the
// existing information that was set by QueryRegistry on the
// previous deploy. Otherwise this will trigger an incorrect
// service update.
serviceSpec.TaskTemplate.ContainerSpec.Image = service.Spec.TaskTemplate.ContainerSpec.Image
}
default:
if image == service.Spec.Labels[convert.LabelImage] {
// image has not changed; update the serviceSpec with the
// existing information that was set by QueryRegistry on the
// previous deploy. Otherwise this will trigger an incorrect
// service update.
serviceSpec.TaskTemplate.ContainerSpec.Image = service.Spec.TaskTemplate.ContainerSpec.Image
}
}
// Stack deploy does not have a `--force` option. Preserve existing
// ForceUpdate value so that tasks are not re-deployed if not updated.
// TODO move this to API client?
serviceSpec.TaskTemplate.ForceUpdate = service.Spec.TaskTemplate.ForceUpdate
response, err := cl.ServiceUpdate(ctx, service.ID, service.Version, serviceSpec, updateOpts)
if err != nil {
return errors.Wrapf(err, "failed to update service %s", name)
}
for _, warning := range response.Warnings {
logrus.Warn(warning)
}
} else {
logrus.Infof("Creating service %s\n", name)
createOpts := types.ServiceCreateOptions{EncodedRegistryAuth: encodedAuth}
// query registry if flag disabling it was not set
if resolveImage == ResolveImageAlways || resolveImage == ResolveImageChanged {
createOpts.QueryRegistry = true
}
if _, err := cl.ServiceCreate(ctx, serviceSpec, createOpts); err != nil {
return errors.Wrapf(err, "failed to create service %s", name)
}
}
}
return nil
}
func getStackNetworks(ctx context.Context, dockerclient client.APIClient, namespace string) ([]types.NetworkResource, error) {
return dockerclient.NetworkList(ctx, types.NetworkListOptions{Filters: getStackFilter(namespace)})
}
func getStackSecrets(ctx context.Context, dockerclient client.APIClient, namespace string) ([]swarm.Secret, error) {
return dockerclient.SecretList(ctx, types.SecretListOptions{Filters: getStackFilter(namespace)})
}
func getStackConfigs(ctx context.Context, dockerclient client.APIClient, namespace string) ([]swarm.Config, error) {
return dockerclient.ConfigList(ctx, types.ConfigListOptions{Filters: getStackFilter(namespace)})
}