From 239c925d66a7663b23f3f2a219ac9d574c7aa69e Mon Sep 17 00:00:00 2001 From: Roxie Gibson Date: Tue, 3 Aug 2021 08:49:16 +0100 Subject: [PATCH] WIP: foundations for app deploy --- client/convert/LICENSE | 191 ++++++++ client/convert/README.md | 8 + client/convert/compose.go | 199 ++++++++ client/convert/compose_test.go | 171 +++++++ client/convert/service.go | 861 +++++++++++++++++++++++++++++++++ client/convert/service_test.go | 678 ++++++++++++++++++++++++++ client/convert/volume.go | 162 +++++++ client/convert/volume_test.go | 361 ++++++++++++++ client/stack.go | 36 -- client/stack/loader.go | 133 +++++ client/stack/stack.go | 104 ++++ config/app.go | 9 +- go.mod | 3 + go.sum | 5 + 14 files changed, 2881 insertions(+), 40 deletions(-) create mode 100644 client/convert/LICENSE create mode 100644 client/convert/README.md create mode 100644 client/convert/compose.go create mode 100644 client/convert/compose_test.go create mode 100644 client/convert/service.go create mode 100644 client/convert/service_test.go create mode 100644 client/convert/volume.go create mode 100644 client/convert/volume_test.go delete mode 100644 client/stack.go create mode 100644 client/stack/loader.go create mode 100644 client/stack/stack.go diff --git a/client/convert/LICENSE b/client/convert/LICENSE new file mode 100644 index 00000000..9c8e20ab --- /dev/null +++ b/client/convert/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/client/convert/README.md b/client/convert/README.md new file mode 100644 index 00000000..8f075164 --- /dev/null +++ b/client/convert/README.md @@ -0,0 +1,8 @@ +# github.com/docker/cli/cli/compose/convert + +DISCLAIMER: This is like the entire `github.com/docker/cli/cli/compose/convert` package. +This should be an easy import but importing it creates DEPENDENCY HELL +I tried for an hour to fix it but it would work. TRY TO FIX AT YOUR OWN RISK!!! + +Due to this literally just being copy-pasted from the lib, the Apache license will be posted in this folder. +Small edits to the source code have been to function names and parts we don't need deleted. diff --git a/client/convert/compose.go b/client/convert/compose.go new file mode 100644 index 00000000..189a9096 --- /dev/null +++ b/client/convert/compose.go @@ -0,0 +1,199 @@ +package convert + +import ( + "io/ioutil" + "strings" + + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/docker/docker/api/types" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/swarm" +) + +const ( + // LabelNamespace is the label used to track stack resources + LabelNamespace = "com.docker.stack.namespace" +) + +// Namespace mangles names by prepending the name +type Namespace struct { + name string +} + +// Scope prepends the namespace to a name +func (n Namespace) Scope(name string) string { + return n.name + "_" + name +} + +// Descope returns the name without the namespace prefix +func (n Namespace) Descope(name string) string { + return strings.TrimPrefix(name, n.name+"_") +} + +// Name returns the name of the namespace +func (n Namespace) Name() string { + return n.name +} + +// NewNamespace returns a new Namespace for scoping of names +func NewNamespace(name string) Namespace { + return Namespace{name: name} +} + +// AddStackLabel returns labels with the namespace label added +func AddStackLabel(namespace Namespace, labels map[string]string) map[string]string { + if labels == nil { + labels = make(map[string]string) + } + labels[LabelNamespace] = namespace.name + return labels +} + +type networkMap map[string]composetypes.NetworkConfig + +// Networks from the compose-file type to the engine API type +func Networks(namespace Namespace, networks networkMap, servicesNetworks map[string]struct{}) (map[string]types.NetworkCreate, []string) { + if networks == nil { + networks = make(map[string]composetypes.NetworkConfig) + } + + externalNetworks := []string{} + result := make(map[string]types.NetworkCreate) + for internalName := range servicesNetworks { + network := networks[internalName] + if network.External.External { + externalNetworks = append(externalNetworks, network.Name) + continue + } + + createOpts := types.NetworkCreate{ + Labels: AddStackLabel(namespace, network.Labels), + Driver: network.Driver, + Options: network.DriverOpts, + Internal: network.Internal, + Attachable: network.Attachable, + } + + if network.Ipam.Driver != "" || len(network.Ipam.Config) > 0 { + createOpts.IPAM = &networktypes.IPAM{} + } + + if network.Ipam.Driver != "" { + createOpts.IPAM.Driver = network.Ipam.Driver + } + for _, ipamConfig := range network.Ipam.Config { + config := networktypes.IPAMConfig{ + Subnet: ipamConfig.Subnet, + } + createOpts.IPAM.Config = append(createOpts.IPAM.Config, config) + } + + networkName := namespace.Scope(internalName) + if network.Name != "" { + networkName = network.Name + } + result[networkName] = createOpts + } + + return result, externalNetworks +} + +// Secrets converts secrets from the Compose type to the engine API type +func Secrets(namespace Namespace, secrets map[string]composetypes.SecretConfig) ([]swarm.SecretSpec, error) { + result := []swarm.SecretSpec{} + for name, secret := range secrets { + if secret.External.External { + continue + } + + var obj swarmFileObject + var err error + if secret.Driver != "" { + obj = driverObjectConfig(namespace, name, composetypes.FileObjectConfig(secret)) + } else { + obj, err = fileObjectConfig(namespace, name, composetypes.FileObjectConfig(secret)) + } + if err != nil { + return nil, err + } + spec := swarm.SecretSpec{Annotations: obj.Annotations, Data: obj.Data} + if secret.Driver != "" { + spec.Driver = &swarm.Driver{ + Name: secret.Driver, + Options: secret.DriverOpts, + } + } + if secret.TemplateDriver != "" { + spec.Templating = &swarm.Driver{ + Name: secret.TemplateDriver, + } + } + result = append(result, spec) + } + return result, nil +} + +// Configs converts config objects from the Compose type to the engine API type +func Configs(namespace Namespace, configs map[string]composetypes.ConfigObjConfig) ([]swarm.ConfigSpec, error) { + result := []swarm.ConfigSpec{} + for name, config := range configs { + if config.External.External { + continue + } + + obj, err := fileObjectConfig(namespace, name, composetypes.FileObjectConfig(config)) + if err != nil { + return nil, err + } + spec := swarm.ConfigSpec{Annotations: obj.Annotations, Data: obj.Data} + if config.TemplateDriver != "" { + spec.Templating = &swarm.Driver{ + Name: config.TemplateDriver, + } + } + result = append(result, spec) + } + return result, nil +} + +type swarmFileObject struct { + Annotations swarm.Annotations + Data []byte +} + +func driverObjectConfig(namespace Namespace, name string, obj composetypes.FileObjectConfig) swarmFileObject { + if obj.Name != "" { + name = obj.Name + } else { + name = namespace.Scope(name) + } + + return swarmFileObject{ + Annotations: swarm.Annotations{ + Name: name, + Labels: AddStackLabel(namespace, obj.Labels), + }, + Data: []byte{}, + } +} + +func fileObjectConfig(namespace Namespace, name string, obj composetypes.FileObjectConfig) (swarmFileObject, error) { + data, err := ioutil.ReadFile(obj.File) + if err != nil { + return swarmFileObject{}, err + } + + if obj.Name != "" { + name = obj.Name + } else { + name = namespace.Scope(name) + } + + return swarmFileObject{ + Annotations: swarm.Annotations{ + Name: name, + Labels: AddStackLabel(namespace, obj.Labels), + }, + Data: data, + }, nil +} diff --git a/client/convert/compose_test.go b/client/convert/compose_test.go new file mode 100644 index 00000000..0dfac14e --- /dev/null +++ b/client/convert/compose_test.go @@ -0,0 +1,171 @@ +package convert + +import ( + "testing" + + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "gotest.tools/v3/assert" + is "gotest.tools/v3/assert/cmp" + "gotest.tools/v3/fs" +) + +func TestNamespaceScope(t *testing.T) { + scoped := Namespace{name: "foo"}.Scope("bar") + assert.Check(t, is.Equal("foo_bar", scoped)) +} + +func TestAddStackLabel(t *testing.T) { + labels := map[string]string{ + "something": "labeled", + } + actual := AddStackLabel(Namespace{name: "foo"}, labels) + expected := map[string]string{ + "something": "labeled", + LabelNamespace: "foo", + } + assert.Check(t, is.DeepEqual(expected, actual)) +} + +func TestNetworks(t *testing.T) { + namespace := Namespace{name: "foo"} + serviceNetworks := map[string]struct{}{ + "normal": {}, + "outside": {}, + "default": {}, + "attachablenet": {}, + "named": {}, + } + source := networkMap{ + "normal": composetypes.NetworkConfig{ + Driver: "overlay", + DriverOpts: map[string]string{ + "opt": "value", + }, + Ipam: composetypes.IPAMConfig{ + Driver: "driver", + Config: []*composetypes.IPAMPool{ + { + Subnet: "10.0.0.0", + }, + }, + }, + Labels: map[string]string{ + "something": "labeled", + }, + }, + "outside": composetypes.NetworkConfig{ + External: composetypes.External{External: true}, + Name: "special", + }, + "attachablenet": composetypes.NetworkConfig{ + Driver: "overlay", + Attachable: true, + }, + "named": composetypes.NetworkConfig{ + Name: "othername", + }, + } + expected := map[string]types.NetworkCreate{ + "foo_default": { + Labels: map[string]string{ + LabelNamespace: "foo", + }, + }, + "foo_normal": { + Driver: "overlay", + IPAM: &network.IPAM{ + Driver: "driver", + Config: []network.IPAMConfig{ + { + Subnet: "10.0.0.0", + }, + }, + }, + Options: map[string]string{ + "opt": "value", + }, + Labels: map[string]string{ + LabelNamespace: "foo", + "something": "labeled", + }, + }, + "foo_attachablenet": { + Driver: "overlay", + Attachable: true, + Labels: map[string]string{ + LabelNamespace: "foo", + }, + }, + "othername": { + Labels: map[string]string{LabelNamespace: "foo"}, + }, + } + + networks, externals := Networks(namespace, source, serviceNetworks) + assert.DeepEqual(t, expected, networks) + assert.DeepEqual(t, []string{"special"}, externals) +} + +func TestSecrets(t *testing.T) { + namespace := Namespace{name: "foo"} + + secretText := "this is the first secret" + secretFile := fs.NewFile(t, "convert-secrets", fs.WithContent(secretText)) + defer secretFile.Remove() + + source := map[string]composetypes.SecretConfig{ + "one": { + File: secretFile.Path(), + Labels: map[string]string{"monster": "mash"}, + }, + "ext": { + External: composetypes.External{ + External: true, + }, + }, + } + + specs, err := Secrets(namespace, source) + assert.NilError(t, err) + assert.Assert(t, is.Len(specs, 1)) + secret := specs[0] + assert.Check(t, is.Equal("foo_one", secret.Name)) + assert.Check(t, is.DeepEqual(map[string]string{ + "monster": "mash", + LabelNamespace: "foo", + }, secret.Labels)) + assert.Check(t, is.DeepEqual([]byte(secretText), secret.Data)) +} + +func TestConfigs(t *testing.T) { + namespace := Namespace{name: "foo"} + + configText := "this is the first config" + configFile := fs.NewFile(t, "convert-configs", fs.WithContent(configText)) + defer configFile.Remove() + + source := map[string]composetypes.ConfigObjConfig{ + "one": { + File: configFile.Path(), + Labels: map[string]string{"monster": "mash"}, + }, + "ext": { + External: composetypes.External{ + External: true, + }, + }, + } + + specs, err := Configs(namespace, source) + assert.NilError(t, err) + assert.Assert(t, is.Len(specs, 1)) + config := specs[0] + assert.Check(t, is.Equal("foo_one", config.Name)) + assert.Check(t, is.DeepEqual(map[string]string{ + "monster": "mash", + LabelNamespace: "foo", + }, config.Labels)) + assert.Check(t, is.DeepEqual([]byte(configText), config.Data)) +} diff --git a/client/convert/service.go b/client/convert/service.go new file mode 100644 index 00000000..e23c5d97 --- /dev/null +++ b/client/convert/service.go @@ -0,0 +1,861 @@ +package convert + +import ( + "context" + "fmt" + "os" + "sort" + "strings" + "time" + + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/client" + "github.com/docker/go-units" + "github.com/pkg/errors" +) + +const ( + defaultNetwork = "default" + // LabelImage is the label used to store image name provided in the compose file + LabelImage = "com.docker.stack.image" +) + +// ParseSecrets retrieves the secrets with the requested names and fills +// secret IDs into the secret references. +func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*swarmtypes.SecretReference) ([]*swarmtypes.SecretReference, error) { + if len(requestedSecrets) == 0 { + return []*swarmtypes.SecretReference{}, nil + } + + secretRefs := make(map[string]*swarmtypes.SecretReference) + ctx := context.Background() + + for _, secret := range requestedSecrets { + if _, exists := secretRefs[secret.File.Name]; exists { + return nil, errors.Errorf("duplicate secret target for %s not allowed", secret.SecretName) + } + secretRef := new(swarmtypes.SecretReference) + *secretRef = *secret + secretRefs[secret.File.Name] = secretRef + } + + args := filters.NewArgs() + for _, s := range secretRefs { + args.Add("name", s.SecretName) + } + + secrets, err := client.SecretList(ctx, types.SecretListOptions{ + Filters: args, + }) + if err != nil { + return nil, err + } + + foundSecrets := make(map[string]string) + for _, secret := range secrets { + foundSecrets[secret.Spec.Annotations.Name] = secret.ID + } + + addedSecrets := []*swarmtypes.SecretReference{} + + for _, ref := range secretRefs { + id, ok := foundSecrets[ref.SecretName] + if !ok { + return nil, errors.Errorf("secret not found: %s", ref.SecretName) + } + + // set the id for the ref to properly assign in swarm + // since swarm needs the ID instead of the name + ref.SecretID = id + addedSecrets = append(addedSecrets, ref) + } + + return addedSecrets, nil +} + +// ParseConfigs retrieves the configs from the requested names and converts +// them to config references to use with the spec +func ParseConfigs(client client.ConfigAPIClient, requestedConfigs []*swarmtypes.ConfigReference) ([]*swarmtypes.ConfigReference, error) { + if len(requestedConfigs) == 0 { + return []*swarmtypes.ConfigReference{}, nil + } + + // the configRefs map has two purposes: it prevents duplication of config + // target filenames, and it it used to get all configs so we can resolve + // their IDs. unfortunately, there are other targets for ConfigReferences, + // besides just a File; specifically, the Runtime target, which is used for + // CredentialSpecs. Therefore, we need to have a list of ConfigReferences + // that are not File targets as well. at this time of writing, the only use + // for Runtime targets is CredentialSpecs. However, to future-proof this + // functionality, we should handle the case where multiple Runtime targets + // are in use for the same Config, and we should deduplicate + // such ConfigReferences, as no matter how many times the Config is used, + // it is only needed to be referenced once. + configRefs := make(map[string]*swarmtypes.ConfigReference) + runtimeRefs := make(map[string]*swarmtypes.ConfigReference) + ctx := context.Background() + + for _, config := range requestedConfigs { + // copy the config, so we don't mutate the args + configRef := new(swarmtypes.ConfigReference) + *configRef = *config + + if config.Runtime != nil { + // by assigning to a map based on ConfigName, if the same Config + // is required as a Runtime target for multiple purposes, we only + // include it once in the final set of configs. + runtimeRefs[config.ConfigName] = config + // continue, so we skip the logic below for handling file-type + // configs + continue + } + + if _, exists := configRefs[config.File.Name]; exists { + return nil, errors.Errorf("duplicate config target for %s not allowed", config.ConfigName) + } + + configRefs[config.File.Name] = configRef + } + + args := filters.NewArgs() + for _, s := range configRefs { + args.Add("name", s.ConfigName) + } + for _, s := range runtimeRefs { + args.Add("name", s.ConfigName) + } + + configs, err := client.ConfigList(ctx, types.ConfigListOptions{ + Filters: args, + }) + if err != nil { + return nil, err + } + + foundConfigs := make(map[string]string) + for _, config := range configs { + foundConfigs[config.Spec.Annotations.Name] = config.ID + } + + addedConfigs := []*swarmtypes.ConfigReference{} + + for _, ref := range configRefs { + id, ok := foundConfigs[ref.ConfigName] + if !ok { + return nil, errors.Errorf("config not found: %s", ref.ConfigName) + } + + // set the id for the ref to properly assign in swarm + // since swarm needs the ID instead of the name + ref.ConfigID = id + addedConfigs = append(addedConfigs, ref) + } + + // unfortunately, because the key of configRefs and runtimeRefs is different + // values that may collide, we can't just do some fancy trickery to + // concat maps, we need to do two separate loops + for _, ref := range runtimeRefs { + id, ok := foundConfigs[ref.ConfigName] + if !ok { + return nil, errors.Errorf("config not found: %s", ref.ConfigName) + } + + ref.ConfigID = id + addedConfigs = append(addedConfigs, ref) + } + + return addedConfigs, nil +} + +// Services from compose-file types to engine API types +func Services( + namespace Namespace, + config *composetypes.Config, + client client.CommonAPIClient, +) (map[string]swarm.ServiceSpec, error) { + result := make(map[string]swarm.ServiceSpec) + + services := config.Services + volumes := config.Volumes + networks := config.Networks + + for _, service := range services { + secrets, err := convertServiceSecrets(client, namespace, service.Secrets, config.Secrets) + if err != nil { + return nil, errors.Wrapf(err, "service %s", service.Name) + } + configs, err := convertServiceConfigObjs(client, namespace, service, config.Configs) + if err != nil { + return nil, errors.Wrapf(err, "service %s", service.Name) + } + + serviceSpec, err := Service(client.ClientVersion(), namespace, service, networks, volumes, secrets, configs) + if err != nil { + return nil, errors.Wrapf(err, "service %s", service.Name) + } + result[service.Name] = serviceSpec + } + + return result, nil +} + +// Service converts a ServiceConfig into a swarm ServiceSpec +func Service( + apiVersion string, + namespace Namespace, + service composetypes.ServiceConfig, + networkConfigs map[string]composetypes.NetworkConfig, + volumes map[string]composetypes.VolumeConfig, + secrets []*swarm.SecretReference, + configs []*swarm.ConfigReference, +) (swarm.ServiceSpec, error) { + name := namespace.Scope(service.Name) + endpoint := convertEndpointSpec(service.Deploy.EndpointMode, service.Ports) + + mode, err := convertDeployMode(service.Deploy.Mode, service.Deploy.Replicas) + if err != nil { + return swarm.ServiceSpec{}, err + } + + mounts, err := Volumes(service.Volumes, volumes, namespace) + if err != nil { + return swarm.ServiceSpec{}, err + } + + resources, err := convertResources(service.Deploy.Resources) + if err != nil { + return swarm.ServiceSpec{}, err + } + + restartPolicy, err := convertRestartPolicy( + service.Restart, service.Deploy.RestartPolicy) + if err != nil { + return swarm.ServiceSpec{}, err + } + + healthcheck, err := convertHealthcheck(service.HealthCheck) + if err != nil { + return swarm.ServiceSpec{}, err + } + + networks, err := convertServiceNetworks(service.Networks, networkConfigs, namespace, service.Name) + if err != nil { + return swarm.ServiceSpec{}, err + } + + dnsConfig := convertDNSConfig(service.DNS, service.DNSSearch) + + var privileges swarm.Privileges + privileges.CredentialSpec, err = convertCredentialSpec( + namespace, service.CredentialSpec, configs, + ) + if err != nil { + return swarm.ServiceSpec{}, err + } + + var logDriver *swarm.Driver + if service.Logging != nil { + logDriver = &swarm.Driver{ + Name: service.Logging.Driver, + Options: service.Logging.Options, + } + } + + capAdd, capDrop := opts.EffectiveCapAddCapDrop(service.CapAdd, service.CapDrop) + + serviceSpec := swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: name, + Labels: AddStackLabel(namespace, service.Deploy.Labels), + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: service.Image, + Command: service.Entrypoint, + Args: service.Command, + Hostname: service.Hostname, + Hosts: convertExtraHosts(service.ExtraHosts), + DNSConfig: dnsConfig, + Healthcheck: healthcheck, + Env: sortStrings(convertEnvironment(service.Environment)), + Labels: AddStackLabel(namespace, service.Labels), + Dir: service.WorkingDir, + User: service.User, + Mounts: mounts, + StopGracePeriod: composetypes.ConvertDurationPtr(service.StopGracePeriod), + StopSignal: service.StopSignal, + TTY: service.Tty, + OpenStdin: service.StdinOpen, + Secrets: secrets, + Configs: configs, + ReadOnly: service.ReadOnly, + Privileges: &privileges, + Isolation: container.Isolation(service.Isolation), + Init: service.Init, + Sysctls: service.Sysctls, + CapabilityAdd: capAdd, + CapabilityDrop: capDrop, + Ulimits: convertUlimits(service.Ulimits), + }, + LogDriver: logDriver, + Resources: resources, + RestartPolicy: restartPolicy, + Placement: &swarm.Placement{ + Constraints: service.Deploy.Placement.Constraints, + Preferences: getPlacementPreference(service.Deploy.Placement.Preferences), + MaxReplicas: service.Deploy.Placement.MaxReplicas, + }, + }, + EndpointSpec: endpoint, + Mode: mode, + UpdateConfig: convertUpdateConfig(service.Deploy.UpdateConfig), + RollbackConfig: convertUpdateConfig(service.Deploy.RollbackConfig), + } + + // add an image label to serviceSpec + serviceSpec.Labels[LabelImage] = service.Image + + // ServiceSpec.Networks is deprecated and should not have been used by + // this package. It is possible to update TaskTemplate.Networks, but it + // is not possible to update ServiceSpec.Networks. Unfortunately, we + // can't unconditionally start using TaskTemplate.Networks, because that + // will break with older daemons that don't support migrating from + // ServiceSpec.Networks to TaskTemplate.Networks. So which field to use + // is conditional on daemon version. + if versions.LessThan(apiVersion, "1.29") { + serviceSpec.Networks = networks + } else { + serviceSpec.TaskTemplate.Networks = networks + } + return serviceSpec, nil +} + +func getPlacementPreference(preferences []composetypes.PlacementPreferences) []swarm.PlacementPreference { + result := []swarm.PlacementPreference{} + for _, preference := range preferences { + spreadDescriptor := preference.Spread + result = append(result, swarm.PlacementPreference{ + Spread: &swarm.SpreadOver{ + SpreadDescriptor: spreadDescriptor, + }, + }) + } + return result +} + +func sortStrings(strs []string) []string { + sort.Strings(strs) + return strs +} + +func convertServiceNetworks( + networks map[string]*composetypes.ServiceNetworkConfig, + networkConfigs networkMap, + namespace Namespace, + name string, +) ([]swarm.NetworkAttachmentConfig, error) { + if len(networks) == 0 { + networks = map[string]*composetypes.ServiceNetworkConfig{ + defaultNetwork: {}, + } + } + + nets := []swarm.NetworkAttachmentConfig{} + for networkName, network := range networks { + networkConfig, ok := networkConfigs[networkName] + if !ok && networkName != defaultNetwork { + return nil, errors.Errorf("undefined network %q", networkName) + } + var aliases []string + if network != nil { + aliases = network.Aliases + } + target := namespace.Scope(networkName) + if networkConfig.Name != "" { + target = networkConfig.Name + } + netAttachConfig := swarm.NetworkAttachmentConfig{ + Target: target, + Aliases: aliases, + } + // Only add default aliases to user defined networks. Other networks do + // not support aliases. + if container.NetworkMode(target).IsUserDefined() { + netAttachConfig.Aliases = append(netAttachConfig.Aliases, name) + } + nets = append(nets, netAttachConfig) + } + + sort.Slice(nets, func(i, j int) bool { + return nets[i].Target < nets[j].Target + }) + return nets, nil +} + +// TODO: fix secrets API so that SecretAPIClient is not required here +func convertServiceSecrets( + client client.SecretAPIClient, + namespace Namespace, + secrets []composetypes.ServiceSecretConfig, + secretSpecs map[string]composetypes.SecretConfig, +) ([]*swarm.SecretReference, error) { + refs := []*swarm.SecretReference{} + + lookup := func(key string) (composetypes.FileObjectConfig, error) { + secretSpec, exists := secretSpecs[key] + if !exists { + return composetypes.FileObjectConfig{}, errors.Errorf("undefined secret %q", key) + } + return composetypes.FileObjectConfig(secretSpec), nil + } + for _, secret := range secrets { + obj, err := convertFileObject(namespace, composetypes.FileReferenceConfig(secret), lookup) + if err != nil { + return nil, err + } + + file := swarm.SecretReferenceFileTarget(obj.File) + refs = append(refs, &swarm.SecretReference{ + File: &file, + SecretName: obj.Name, + }) + } + + secrs, err := ParseSecrets(client, refs) + if err != nil { + return nil, err + } + // sort to ensure idempotence (don't restart services just because the entries are in different order) + sort.SliceStable(secrs, func(i, j int) bool { return secrs[i].SecretName < secrs[j].SecretName }) + return secrs, err +} + +// convertServiceConfigObjs takes an API client, a namespace, a ServiceConfig, +// and a set of compose Config specs, and creates the swarm ConfigReferences +// required by the serivce. Unlike convertServiceSecrets, this takes the whole +// ServiceConfig, because some Configs may be needed as a result of other +// fields (like CredentialSpecs). +// +// TODO: fix configs API so that ConfigsAPIClient is not required here +func convertServiceConfigObjs( + client client.ConfigAPIClient, + namespace Namespace, + service composetypes.ServiceConfig, + configSpecs map[string]composetypes.ConfigObjConfig, +) ([]*swarm.ConfigReference, error) { + refs := []*swarm.ConfigReference{} + + lookup := func(key string) (composetypes.FileObjectConfig, error) { + configSpec, exists := configSpecs[key] + if !exists { + return composetypes.FileObjectConfig{}, errors.Errorf("undefined config %q", key) + } + return composetypes.FileObjectConfig(configSpec), nil + } + for _, config := range service.Configs { + obj, err := convertFileObject(namespace, composetypes.FileReferenceConfig(config), lookup) + if err != nil { + return nil, err + } + + file := swarm.ConfigReferenceFileTarget(obj.File) + refs = append(refs, &swarm.ConfigReference{ + File: &file, + ConfigName: obj.Name, + }) + } + + // finally, after converting all of the file objects, create any + // Runtime-type configs that are needed. these are configs that are not + // mounted into the container, but are used in some other way by the + // container runtime. Currently, this only means CredentialSpecs, but in + // the future it may be used for other fields + + // grab the CredentialSpec out of the Service + credSpec := service.CredentialSpec + // if the credSpec uses a config, then we should grab the config name, and + // create a config reference for it. A File or Registry-type CredentialSpec + // does not need this operation. + if credSpec.Config != "" { + // look up the config in the configSpecs. + obj, err := lookup(credSpec.Config) + if err != nil { + return nil, err + } + + // get the actual correct name. + name := namespace.Scope(credSpec.Config) + if obj.Name != "" { + name = obj.Name + } + + // now append a Runtime-type config. + refs = append(refs, &swarm.ConfigReference{ + ConfigName: name, + Runtime: &swarm.ConfigReferenceRuntimeTarget{}, + }) + + } + + confs, err := ParseConfigs(client, refs) + if err != nil { + return nil, err + } + // sort to ensure idempotence (don't restart services just because the entries are in different order) + sort.SliceStable(confs, func(i, j int) bool { return confs[i].ConfigName < confs[j].ConfigName }) + return confs, err +} + +type swarmReferenceTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +type swarmReferenceObject struct { + File swarmReferenceTarget + ID string + Name string +} + +func convertFileObject( + namespace Namespace, + config composetypes.FileReferenceConfig, + lookup func(key string) (composetypes.FileObjectConfig, error), +) (swarmReferenceObject, error) { + obj, err := lookup(config.Source) + if err != nil { + return swarmReferenceObject{}, err + } + + source := namespace.Scope(config.Source) + if obj.Name != "" { + source = obj.Name + } + + target := config.Target + if target == "" { + target = config.Source + } + + uid := config.UID + gid := config.GID + if uid == "" { + uid = "0" + } + if gid == "" { + gid = "0" + } + mode := config.Mode + if mode == nil { + mode = uint32Ptr(0444) + } + + return swarmReferenceObject{ + File: swarmReferenceTarget{ + Name: target, + UID: uid, + GID: gid, + Mode: os.FileMode(*mode), + }, + Name: source, + }, nil +} + +func uint32Ptr(value uint32) *uint32 { + return &value +} + +// convertExtraHosts converts : mappings to SwarmKit notation: +// "IP-address hostname(s)". The original order of mappings is preserved. +func convertExtraHosts(extraHosts composetypes.HostsList) []string { + hosts := []string{} + for _, hostIP := range extraHosts { + if v := strings.SplitN(hostIP, ":", 2); len(v) == 2 { + // Convert to SwarmKit notation: IP-address hostname(s) + hosts = append(hosts, fmt.Sprintf("%s %s", v[1], v[0])) + } + } + return hosts +} + +func convertHealthcheck(healthcheck *composetypes.HealthCheckConfig) (*container.HealthConfig, error) { + if healthcheck == nil { + return nil, nil + } + var ( + timeout, interval, startPeriod time.Duration + retries int + ) + if healthcheck.Disable { + if len(healthcheck.Test) != 0 { + return nil, errors.Errorf("test and disable can't be set at the same time") + } + return &container.HealthConfig{ + Test: []string{"NONE"}, + }, nil + + } + if healthcheck.Timeout != nil { + timeout = time.Duration(*healthcheck.Timeout) + } + if healthcheck.Interval != nil { + interval = time.Duration(*healthcheck.Interval) + } + if healthcheck.StartPeriod != nil { + startPeriod = time.Duration(*healthcheck.StartPeriod) + } + if healthcheck.Retries != nil { + retries = int(*healthcheck.Retries) + } + return &container.HealthConfig{ + Test: healthcheck.Test, + Timeout: timeout, + Interval: interval, + Retries: retries, + StartPeriod: startPeriod, + }, nil +} + +func convertRestartPolicy(restart string, source *composetypes.RestartPolicy) (*swarm.RestartPolicy, error) { + // TODO: log if restart is being ignored + if source == nil { + policy, err := opts.ParseRestartPolicy(restart) + if err != nil { + return nil, err + } + switch { + case policy.IsNone(): + return nil, nil + case policy.IsAlways(), policy.IsUnlessStopped(): + return &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyConditionAny, + }, nil + case policy.IsOnFailure(): + attempts := uint64(policy.MaximumRetryCount) + return &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyConditionOnFailure, + MaxAttempts: &attempts, + }, nil + default: + return nil, errors.Errorf("unknown restart policy: %s", restart) + } + } + + return &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyCondition(source.Condition), + Delay: composetypes.ConvertDurationPtr(source.Delay), + MaxAttempts: source.MaxAttempts, + Window: composetypes.ConvertDurationPtr(source.Window), + }, nil +} + +func convertUpdateConfig(source *composetypes.UpdateConfig) *swarm.UpdateConfig { + if source == nil { + return nil + } + parallel := uint64(1) + if source.Parallelism != nil { + parallel = *source.Parallelism + } + return &swarm.UpdateConfig{ + Parallelism: parallel, + Delay: time.Duration(source.Delay), + FailureAction: source.FailureAction, + Monitor: time.Duration(source.Monitor), + MaxFailureRatio: source.MaxFailureRatio, + Order: source.Order, + } +} + +func convertResources(source composetypes.Resources) (*swarm.ResourceRequirements, error) { + resources := &swarm.ResourceRequirements{} + var err error + if source.Limits != nil { + var cpus int64 + if source.Limits.NanoCPUs != "" { + cpus, err = opts.ParseCPUs(source.Limits.NanoCPUs) + if err != nil { + return nil, err + } + } + resources.Limits = &swarm.Limit{ + NanoCPUs: cpus, + MemoryBytes: int64(source.Limits.MemoryBytes), + Pids: source.Limits.Pids, + } + } + if source.Reservations != nil { + var cpus int64 + if source.Reservations.NanoCPUs != "" { + cpus, err = opts.ParseCPUs(source.Reservations.NanoCPUs) + if err != nil { + return nil, err + } + } + + var generic []swarm.GenericResource + for _, res := range source.Reservations.GenericResources { + var r swarm.GenericResource + + if res.DiscreteResourceSpec != nil { + r.DiscreteResourceSpec = &swarm.DiscreteGenericResource{ + Kind: res.DiscreteResourceSpec.Kind, + Value: res.DiscreteResourceSpec.Value, + } + } + + generic = append(generic, r) + } + + resources.Reservations = &swarm.Resources{ + NanoCPUs: cpus, + MemoryBytes: int64(source.Reservations.MemoryBytes), + GenericResources: generic, + } + } + return resources, nil +} + +func convertEndpointSpec(endpointMode string, source []composetypes.ServicePortConfig) *swarm.EndpointSpec { + portConfigs := []swarm.PortConfig{} + for _, port := range source { + portConfig := swarm.PortConfig{ + Protocol: swarm.PortConfigProtocol(port.Protocol), + TargetPort: port.Target, + PublishedPort: port.Published, + PublishMode: swarm.PortConfigPublishMode(port.Mode), + } + portConfigs = append(portConfigs, portConfig) + } + + sort.Slice(portConfigs, func(i, j int) bool { + return portConfigs[i].PublishedPort < portConfigs[j].PublishedPort + }) + + return &swarm.EndpointSpec{ + Mode: swarm.ResolutionMode(strings.ToLower(endpointMode)), + Ports: portConfigs, + } +} + +func convertEnvironment(source map[string]*string) []string { + var output []string + + for name, value := range source { + switch value { + case nil: + output = append(output, name) + default: + output = append(output, fmt.Sprintf("%s=%s", name, *value)) + } + } + + return output +} + +func convertDeployMode(mode string, replicas *uint64) (swarm.ServiceMode, error) { + serviceMode := swarm.ServiceMode{} + + switch mode { + case "global": + if replicas != nil { + return serviceMode, errors.Errorf("replicas can only be used with replicated mode") + } + serviceMode.Global = &swarm.GlobalService{} + case "replicated", "": + serviceMode.Replicated = &swarm.ReplicatedService{Replicas: replicas} + default: + return serviceMode, errors.Errorf("Unknown mode: %s", mode) + } + return serviceMode, nil +} + +func convertDNSConfig(DNS []string, DNSSearch []string) *swarm.DNSConfig { + if DNS != nil || DNSSearch != nil { + return &swarm.DNSConfig{ + Nameservers: DNS, + Search: DNSSearch, + } + } + return nil +} + +func convertCredentialSpec(namespace Namespace, spec composetypes.CredentialSpecConfig, refs []*swarm.ConfigReference) (*swarm.CredentialSpec, error) { + var o []string + + // Config was added in API v1.40 + if spec.Config != "" { + o = append(o, `"Config"`) + } + if spec.File != "" { + o = append(o, `"File"`) + } + if spec.Registry != "" { + o = append(o, `"Registry"`) + } + l := len(o) + switch { + case l == 0: + return nil, nil + case l == 2: + return nil, errors.Errorf("invalid credential spec: cannot specify both %s and %s", o[0], o[1]) + case l > 2: + return nil, errors.Errorf("invalid credential spec: cannot specify both %s, and %s", strings.Join(o[:l-1], ", "), o[l-1]) + } + swarmCredSpec := swarm.CredentialSpec(spec) + // if we're using a swarm Config for the credential spec, over-write it + // here with the config ID + if swarmCredSpec.Config != "" { + for _, config := range refs { + if swarmCredSpec.Config == config.ConfigName { + swarmCredSpec.Config = config.ConfigID + return &swarmCredSpec, nil + } + } + // if none of the configs match, try namespacing + for _, config := range refs { + if namespace.Scope(swarmCredSpec.Config) == config.ConfigName { + swarmCredSpec.Config = config.ConfigID + return &swarmCredSpec, nil + } + } + return nil, errors.Errorf("invalid credential spec: spec specifies config %v, but no such config can be found", swarmCredSpec.Config) + } + return &swarmCredSpec, nil +} + +func convertUlimits(origUlimits map[string]*composetypes.UlimitsConfig) []*units.Ulimit { + newUlimits := make(map[string]*units.Ulimit) + for name, u := range origUlimits { + if u.Single != 0 { + newUlimits[name] = &units.Ulimit{ + Name: name, + Soft: int64(u.Single), + Hard: int64(u.Single), + } + } else { + newUlimits[name] = &units.Ulimit{ + Name: name, + Soft: int64(u.Soft), + Hard: int64(u.Hard), + } + } + } + var ulimits []*units.Ulimit + for _, ulimit := range newUlimits { + ulimits = append(ulimits, ulimit) + } + sort.SliceStable(ulimits, func(i, j int) bool { + return ulimits[i].Name < ulimits[j].Name + }) + return ulimits +} diff --git a/client/convert/service_test.go b/client/convert/service_test.go new file mode 100644 index 00000000..0607f395 --- /dev/null +++ b/client/convert/service_test.go @@ -0,0 +1,678 @@ +package convert + +import ( + "context" + "os" + "sort" + "strings" + "testing" + "time" + + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/pkg/errors" + "gotest.tools/v3/assert" + is "gotest.tools/v3/assert/cmp" +) + +func TestConvertRestartPolicyFromNone(t *testing.T) { + policy, err := convertRestartPolicy("no", nil) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual((*swarm.RestartPolicy)(nil), policy)) +} + +func TestConvertRestartPolicyFromUnknown(t *testing.T) { + _, err := convertRestartPolicy("unknown", nil) + assert.Error(t, err, "unknown restart policy: unknown") +} + +func TestConvertRestartPolicyFromAlways(t *testing.T) { + policy, err := convertRestartPolicy("always", nil) + expected := &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyConditionAny, + } + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, policy)) +} + +func TestConvertRestartPolicyFromFailure(t *testing.T) { + policy, err := convertRestartPolicy("on-failure:4", nil) + attempts := uint64(4) + expected := &swarm.RestartPolicy{ + Condition: swarm.RestartPolicyConditionOnFailure, + MaxAttempts: &attempts, + } + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, policy)) +} + +func strPtr(val string) *string { + return &val +} + +func TestConvertEnvironment(t *testing.T) { + source := map[string]*string{ + "foo": strPtr("bar"), + "key": strPtr("value"), + } + env := convertEnvironment(source) + sort.Strings(env) + assert.Check(t, is.DeepEqual([]string{"foo=bar", "key=value"}, env)) +} + +func TestConvertExtraHosts(t *testing.T) { + source := composetypes.HostsList{ + "zulu:127.0.0.2", + "alpha:127.0.0.1", + "zulu:ff02::1", + } + assert.Check(t, is.DeepEqual([]string{"127.0.0.2 zulu", "127.0.0.1 alpha", "ff02::1 zulu"}, convertExtraHosts(source))) +} + +func TestConvertResourcesFull(t *testing.T) { + source := composetypes.Resources{ + Limits: &composetypes.ResourceLimit{ + NanoCPUs: "0.003", + MemoryBytes: composetypes.UnitBytes(300000000), + }, + Reservations: &composetypes.Resource{ + NanoCPUs: "0.002", + MemoryBytes: composetypes.UnitBytes(200000000), + }, + } + resources, err := convertResources(source) + assert.NilError(t, err) + + expected := &swarm.ResourceRequirements{ + Limits: &swarm.Limit{ + NanoCPUs: 3000000, + MemoryBytes: 300000000, + }, + Reservations: &swarm.Resources{ + NanoCPUs: 2000000, + MemoryBytes: 200000000, + }, + } + assert.Check(t, is.DeepEqual(expected, resources)) +} + +func TestConvertResourcesOnlyMemory(t *testing.T) { + source := composetypes.Resources{ + Limits: &composetypes.ResourceLimit{ + MemoryBytes: composetypes.UnitBytes(300000000), + }, + Reservations: &composetypes.Resource{ + MemoryBytes: composetypes.UnitBytes(200000000), + }, + } + resources, err := convertResources(source) + assert.NilError(t, err) + + expected := &swarm.ResourceRequirements{ + Limits: &swarm.Limit{ + MemoryBytes: 300000000, + }, + Reservations: &swarm.Resources{ + MemoryBytes: 200000000, + }, + } + assert.Check(t, is.DeepEqual(expected, resources)) +} + +func TestConvertHealthcheck(t *testing.T) { + retries := uint64(10) + timeout := composetypes.Duration(30 * time.Second) + interval := composetypes.Duration(2 * time.Millisecond) + source := &composetypes.HealthCheckConfig{ + Test: []string{"EXEC", "touch", "/foo"}, + Timeout: &timeout, + Interval: &interval, + Retries: &retries, + } + expected := &container.HealthConfig{ + Test: source.Test, + Timeout: time.Duration(timeout), + Interval: time.Duration(interval), + Retries: 10, + } + + healthcheck, err := convertHealthcheck(source) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, healthcheck)) +} + +func TestConvertHealthcheckDisable(t *testing.T) { + source := &composetypes.HealthCheckConfig{Disable: true} + expected := &container.HealthConfig{ + Test: []string{"NONE"}, + } + + healthcheck, err := convertHealthcheck(source) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, healthcheck)) +} + +func TestConvertHealthcheckDisableWithTest(t *testing.T) { + source := &composetypes.HealthCheckConfig{ + Disable: true, + Test: []string{"EXEC", "touch"}, + } + _, err := convertHealthcheck(source) + assert.Error(t, err, "test and disable can't be set at the same time") +} + +func TestConvertEndpointSpec(t *testing.T) { + source := []composetypes.ServicePortConfig{ + { + Protocol: "udp", + Target: 53, + Published: 1053, + Mode: "host", + }, + { + Target: 8080, + Published: 80, + }, + } + endpoint := convertEndpointSpec("vip", source) + + expected := swarm.EndpointSpec{ + Mode: swarm.ResolutionMode(strings.ToLower("vip")), + Ports: []swarm.PortConfig{ + { + TargetPort: 8080, + PublishedPort: 80, + }, + { + Protocol: "udp", + TargetPort: 53, + PublishedPort: 1053, + PublishMode: "host", + }, + }, + } + + assert.Check(t, is.DeepEqual(expected, *endpoint)) +} + +func TestConvertServiceNetworksOnlyDefault(t *testing.T) { + networkConfigs := networkMap{} + + configs, err := convertServiceNetworks( + nil, networkConfigs, NewNamespace("foo"), "service") + + expected := []swarm.NetworkAttachmentConfig{ + { + Target: "foo_default", + Aliases: []string{"service"}, + }, + } + + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, configs)) +} + +func TestConvertServiceNetworks(t *testing.T) { + networkConfigs := networkMap{ + "front": composetypes.NetworkConfig{ + External: composetypes.External{External: true}, + Name: "fronttier", + }, + "back": composetypes.NetworkConfig{}, + } + networks := map[string]*composetypes.ServiceNetworkConfig{ + "front": { + Aliases: []string{"something"}, + }, + "back": { + Aliases: []string{"other"}, + }, + } + + configs, err := convertServiceNetworks( + networks, networkConfigs, NewNamespace("foo"), "service") + + expected := []swarm.NetworkAttachmentConfig{ + { + Target: "foo_back", + Aliases: []string{"other", "service"}, + }, + { + Target: "fronttier", + Aliases: []string{"something", "service"}, + }, + } + + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, configs)) +} + +func TestConvertServiceNetworksCustomDefault(t *testing.T) { + networkConfigs := networkMap{ + "default": composetypes.NetworkConfig{ + External: composetypes.External{External: true}, + Name: "custom", + }, + } + networks := map[string]*composetypes.ServiceNetworkConfig{} + + configs, err := convertServiceNetworks( + networks, networkConfigs, NewNamespace("foo"), "service") + + expected := []swarm.NetworkAttachmentConfig{ + { + Target: "custom", + Aliases: []string{"service"}, + }, + } + + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, configs)) +} + +func TestConvertDNSConfigEmpty(t *testing.T) { + dnsConfig := convertDNSConfig(nil, nil) + assert.Check(t, is.DeepEqual((*swarm.DNSConfig)(nil), dnsConfig)) +} + +var ( + nameservers = []string{"8.8.8.8", "9.9.9.9"} + search = []string{"dc1.example.com", "dc2.example.com"} +) + +func TestConvertDNSConfigAll(t *testing.T) { + dnsConfig := convertDNSConfig(nameservers, search) + assert.Check(t, is.DeepEqual(&swarm.DNSConfig{ + Nameservers: nameservers, + Search: search, + }, dnsConfig)) +} + +func TestConvertDNSConfigNameservers(t *testing.T) { + dnsConfig := convertDNSConfig(nameservers, nil) + assert.Check(t, is.DeepEqual(&swarm.DNSConfig{ + Nameservers: nameservers, + Search: nil, + }, dnsConfig)) +} + +func TestConvertDNSConfigSearch(t *testing.T) { + dnsConfig := convertDNSConfig(nil, search) + assert.Check(t, is.DeepEqual(&swarm.DNSConfig{ + Nameservers: nil, + Search: search, + }, dnsConfig)) +} + +func TestConvertCredentialSpec(t *testing.T) { + tests := []struct { + name string + in composetypes.CredentialSpecConfig + out *swarm.CredentialSpec + configs []*swarm.ConfigReference + expectedErr string + }{ + { + name: "empty", + }, + { + name: "config-and-file", + in: composetypes.CredentialSpecConfig{Config: "0bt9dmxjvjiqermk6xrop3ekq", File: "somefile.json"}, + expectedErr: `invalid credential spec: cannot specify both "Config" and "File"`, + }, + { + name: "config-and-registry", + in: composetypes.CredentialSpecConfig{Config: "0bt9dmxjvjiqermk6xrop3ekq", Registry: "testing"}, + expectedErr: `invalid credential spec: cannot specify both "Config" and "Registry"`, + }, + { + name: "file-and-registry", + in: composetypes.CredentialSpecConfig{File: "somefile.json", Registry: "testing"}, + expectedErr: `invalid credential spec: cannot specify both "File" and "Registry"`, + }, + { + name: "config-and-file-and-registry", + in: composetypes.CredentialSpecConfig{Config: "0bt9dmxjvjiqermk6xrop3ekq", File: "somefile.json", Registry: "testing"}, + expectedErr: `invalid credential spec: cannot specify both "Config", "File", and "Registry"`, + }, + { + name: "missing-config-reference", + in: composetypes.CredentialSpecConfig{Config: "missing"}, + expectedErr: "invalid credential spec: spec specifies config missing, but no such config can be found", + configs: []*swarm.ConfigReference{ + { + ConfigName: "someName", + ConfigID: "missing", + }, + }, + }, + { + name: "namespaced-config", + in: composetypes.CredentialSpecConfig{Config: "name"}, + configs: []*swarm.ConfigReference{ + { + ConfigName: "namespaced-config_name", + ConfigID: "someID", + }, + }, + out: &swarm.CredentialSpec{Config: "someID"}, + }, + { + name: "config", + in: composetypes.CredentialSpecConfig{Config: "someName"}, + configs: []*swarm.ConfigReference{ + { + ConfigName: "someOtherName", + ConfigID: "someOtherID", + }, { + ConfigName: "someName", + ConfigID: "someID", + }, + }, + out: &swarm.CredentialSpec{Config: "someID"}, + }, + { + name: "file", + in: composetypes.CredentialSpecConfig{File: "somefile.json"}, + out: &swarm.CredentialSpec{File: "somefile.json"}, + }, + { + name: "registry", + in: composetypes.CredentialSpecConfig{Registry: "testing"}, + out: &swarm.CredentialSpec{Registry: "testing"}, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + namespace := NewNamespace(tc.name) + swarmSpec, err := convertCredentialSpec(namespace, tc.in, tc.configs) + + if tc.expectedErr != "" { + assert.Error(t, err, tc.expectedErr) + } else { + assert.NilError(t, err) + } + assert.DeepEqual(t, swarmSpec, tc.out) + }) + } +} + +func TestConvertUpdateConfigOrder(t *testing.T) { + // test default behavior + updateConfig := convertUpdateConfig(&composetypes.UpdateConfig{}) + assert.Check(t, is.Equal("", updateConfig.Order)) + + // test start-first + updateConfig = convertUpdateConfig(&composetypes.UpdateConfig{ + Order: "start-first", + }) + assert.Check(t, is.Equal(updateConfig.Order, "start-first")) + + // test stop-first + updateConfig = convertUpdateConfig(&composetypes.UpdateConfig{ + Order: "stop-first", + }) + assert.Check(t, is.Equal(updateConfig.Order, "stop-first")) +} + +func TestConvertFileObject(t *testing.T) { + namespace := NewNamespace("testing") + config := composetypes.FileReferenceConfig{ + Source: "source", + Target: "target", + UID: "user", + GID: "group", + Mode: uint32Ptr(0644), + } + swarmRef, err := convertFileObject(namespace, config, lookupConfig) + assert.NilError(t, err) + + expected := swarmReferenceObject{ + Name: "testing_source", + File: swarmReferenceTarget{ + Name: config.Target, + UID: config.UID, + GID: config.GID, + Mode: os.FileMode(0644), + }, + } + assert.Check(t, is.DeepEqual(expected, swarmRef)) +} + +func lookupConfig(key string) (composetypes.FileObjectConfig, error) { + if key != "source" { + return composetypes.FileObjectConfig{}, errors.New("bad key") + } + return composetypes.FileObjectConfig{}, nil +} + +func TestConvertFileObjectDefaults(t *testing.T) { + namespace := NewNamespace("testing") + config := composetypes.FileReferenceConfig{Source: "source"} + swarmRef, err := convertFileObject(namespace, config, lookupConfig) + assert.NilError(t, err) + + expected := swarmReferenceObject{ + Name: "testing_source", + File: swarmReferenceTarget{ + Name: config.Source, + UID: "0", + GID: "0", + Mode: os.FileMode(0444), + }, + } + assert.Check(t, is.DeepEqual(expected, swarmRef)) +} + +func TestServiceConvertsIsolation(t *testing.T) { + src := composetypes.ServiceConfig{ + Isolation: "hyperv", + } + result, err := Service("1.35", Namespace{name: "foo"}, src, nil, nil, nil, nil) + assert.NilError(t, err) + assert.Check(t, is.Equal(container.IsolationHyperV, result.TaskTemplate.ContainerSpec.Isolation)) +} + +func TestConvertServiceSecrets(t *testing.T) { + namespace := Namespace{name: "foo"} + secrets := []composetypes.ServiceSecretConfig{ + {Source: "foo_secret"}, + {Source: "bar_secret"}, + } + secretSpecs := map[string]composetypes.SecretConfig{ + "foo_secret": { + Name: "foo_secret", + }, + "bar_secret": { + Name: "bar_secret", + }, + } + client := &fakeClient{ + secretListFunc: func(opts types.SecretListOptions) ([]swarm.Secret, error) { + assert.Check(t, is.Contains(opts.Filters.Get("name"), "foo_secret")) + assert.Check(t, is.Contains(opts.Filters.Get("name"), "bar_secret")) + return []swarm.Secret{ + {Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "foo_secret"}}}, + {Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "bar_secret"}}}, + }, nil + }, + } + refs, err := convertServiceSecrets(client, namespace, secrets, secretSpecs) + assert.NilError(t, err) + expected := []*swarm.SecretReference{ + { + SecretName: "bar_secret", + File: &swarm.SecretReferenceFileTarget{ + Name: "bar_secret", + UID: "0", + GID: "0", + Mode: 0444, + }, + }, + { + SecretName: "foo_secret", + File: &swarm.SecretReferenceFileTarget{ + Name: "foo_secret", + UID: "0", + GID: "0", + Mode: 0444, + }, + }, + } + assert.DeepEqual(t, expected, refs) +} + +func TestConvertServiceConfigs(t *testing.T) { + namespace := Namespace{name: "foo"} + service := composetypes.ServiceConfig{ + Configs: []composetypes.ServiceConfigObjConfig{ + {Source: "foo_config"}, + {Source: "bar_config"}, + }, + CredentialSpec: composetypes.CredentialSpecConfig{ + Config: "baz_config", + }, + } + configSpecs := map[string]composetypes.ConfigObjConfig{ + "foo_config": { + Name: "foo_config", + }, + "bar_config": { + Name: "bar_config", + }, + "baz_config": { + Name: "baz_config", + }, + } + client := &fakeClient{ + configListFunc: func(opts types.ConfigListOptions) ([]swarm.Config, error) { + assert.Check(t, is.Contains(opts.Filters.Get("name"), "foo_config")) + assert.Check(t, is.Contains(opts.Filters.Get("name"), "bar_config")) + assert.Check(t, is.Contains(opts.Filters.Get("name"), "baz_config")) + return []swarm.Config{ + {Spec: swarm.ConfigSpec{Annotations: swarm.Annotations{Name: "foo_config"}}}, + {Spec: swarm.ConfigSpec{Annotations: swarm.Annotations{Name: "bar_config"}}}, + {Spec: swarm.ConfigSpec{Annotations: swarm.Annotations{Name: "baz_config"}}}, + }, nil + }, + } + refs, err := convertServiceConfigObjs(client, namespace, service, configSpecs) + assert.NilError(t, err) + expected := []*swarm.ConfigReference{ + { + ConfigName: "bar_config", + File: &swarm.ConfigReferenceFileTarget{ + Name: "bar_config", + UID: "0", + GID: "0", + Mode: 0444, + }, + }, + { + ConfigName: "baz_config", + Runtime: &swarm.ConfigReferenceRuntimeTarget{}, + }, + { + ConfigName: "foo_config", + File: &swarm.ConfigReferenceFileTarget{ + Name: "foo_config", + UID: "0", + GID: "0", + Mode: 0444, + }, + }, + } + assert.DeepEqual(t, expected, refs) +} + +type fakeClient struct { + client.Client + secretListFunc func(types.SecretListOptions) ([]swarm.Secret, error) + configListFunc func(types.ConfigListOptions) ([]swarm.Config, error) +} + +func (c *fakeClient) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if c.secretListFunc != nil { + return c.secretListFunc(options) + } + return []swarm.Secret{}, nil +} + +func (c *fakeClient) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if c.configListFunc != nil { + return c.configListFunc(options) + } + return []swarm.Config{}, nil +} + +func TestConvertUpdateConfigParallelism(t *testing.T) { + parallel := uint64(4) + + // test default behavior + updateConfig := convertUpdateConfig(&composetypes.UpdateConfig{}) + assert.Check(t, is.Equal(uint64(1), updateConfig.Parallelism)) + + // Non default value + updateConfig = convertUpdateConfig(&composetypes.UpdateConfig{ + Parallelism: ¶llel, + }) + assert.Check(t, is.Equal(parallel, updateConfig.Parallelism)) +} + +func TestConvertServiceCapAddAndCapDrop(t *testing.T) { + tests := []struct { + title string + in, out composetypes.ServiceConfig + }{ + { + title: "default behavior", + }, + { + title: "some values", + in: composetypes.ServiceConfig{ + CapAdd: []string{"SYS_NICE", "CAP_NET_ADMIN"}, + CapDrop: []string{"CHOWN", "CAP_NET_ADMIN", "DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER"}, + }, + out: composetypes.ServiceConfig{ + CapAdd: []string{"CAP_NET_ADMIN", "CAP_SYS_NICE"}, + CapDrop: []string{"CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FOWNER", "CAP_FSETID"}, + }, + }, + { + title: "adding ALL capabilities", + in: composetypes.ServiceConfig{ + CapAdd: []string{"ALL", "CAP_NET_ADMIN"}, + CapDrop: []string{"CHOWN", "CAP_NET_ADMIN", "DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER"}, + }, + out: composetypes.ServiceConfig{ + CapAdd: []string{"ALL"}, + CapDrop: []string{"CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FOWNER", "CAP_FSETID", "CAP_NET_ADMIN"}, + }, + }, + { + title: "dropping ALL capabilities", + in: composetypes.ServiceConfig{ + CapAdd: []string{"CHOWN", "CAP_NET_ADMIN", "DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER"}, + CapDrop: []string{"ALL", "CAP_NET_ADMIN", "CAP_FOO"}, + }, + out: composetypes.ServiceConfig{ + CapAdd: []string{"CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FOWNER", "CAP_FSETID", "CAP_NET_ADMIN"}, + CapDrop: []string{"ALL"}, + }, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.title, func(t *testing.T) { + result, err := Service("1.41", Namespace{name: "foo"}, tc.in, nil, nil, nil, nil) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(result.TaskTemplate.ContainerSpec.CapabilityAdd, tc.out.CapAdd)) + assert.Check(t, is.DeepEqual(result.TaskTemplate.ContainerSpec.CapabilityDrop, tc.out.CapDrop)) + }) + } +} diff --git a/client/convert/volume.go b/client/convert/volume.go new file mode 100644 index 00000000..c2bebe07 --- /dev/null +++ b/client/convert/volume.go @@ -0,0 +1,162 @@ +package convert + +import ( + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/docker/docker/api/types/mount" + "github.com/pkg/errors" +) + +type volumes map[string]composetypes.VolumeConfig + +// Volumes from compose-file types to engine api types +func Volumes(serviceVolumes []composetypes.ServiceVolumeConfig, stackVolumes volumes, namespace Namespace) ([]mount.Mount, error) { + var mounts []mount.Mount + + for _, volumeConfig := range serviceVolumes { + mount, err := convertVolumeToMount(volumeConfig, stackVolumes, namespace) + if err != nil { + return nil, err + } + mounts = append(mounts, mount) + } + return mounts, nil +} + +func createMountFromVolume(volume composetypes.ServiceVolumeConfig) mount.Mount { + return mount.Mount{ + Type: mount.Type(volume.Type), + Target: volume.Target, + ReadOnly: volume.ReadOnly, + Source: volume.Source, + Consistency: mount.Consistency(volume.Consistency), + } +} + +func handleVolumeToMount( + volume composetypes.ServiceVolumeConfig, + stackVolumes volumes, + namespace Namespace, +) (mount.Mount, error) { + result := createMountFromVolume(volume) + + if volume.Tmpfs != nil { + return mount.Mount{}, errors.New("tmpfs options are incompatible with type volume") + } + if volume.Bind != nil { + return mount.Mount{}, errors.New("bind options are incompatible with type volume") + } + // Anonymous volumes + if volume.Source == "" { + return result, nil + } + + stackVolume, exists := stackVolumes[volume.Source] + if !exists { + return mount.Mount{}, errors.Errorf("undefined volume %q", volume.Source) + } + + result.Source = namespace.Scope(volume.Source) + result.VolumeOptions = &mount.VolumeOptions{} + + if volume.Volume != nil { + result.VolumeOptions.NoCopy = volume.Volume.NoCopy + } + + if stackVolume.Name != "" { + result.Source = stackVolume.Name + } + + // External named volumes + if stackVolume.External.External { + return result, nil + } + + result.VolumeOptions.Labels = AddStackLabel(namespace, stackVolume.Labels) + if stackVolume.Driver != "" || stackVolume.DriverOpts != nil { + result.VolumeOptions.DriverConfig = &mount.Driver{ + Name: stackVolume.Driver, + Options: stackVolume.DriverOpts, + } + } + + return result, nil +} + +func handleBindToMount(volume composetypes.ServiceVolumeConfig) (mount.Mount, error) { + result := createMountFromVolume(volume) + + if volume.Source == "" { + return mount.Mount{}, errors.New("invalid bind source, source cannot be empty") + } + if volume.Volume != nil { + return mount.Mount{}, errors.New("volume options are incompatible with type bind") + } + if volume.Tmpfs != nil { + return mount.Mount{}, errors.New("tmpfs options are incompatible with type bind") + } + if volume.Bind != nil { + result.BindOptions = &mount.BindOptions{ + Propagation: mount.Propagation(volume.Bind.Propagation), + } + } + return result, nil +} + +func handleTmpfsToMount(volume composetypes.ServiceVolumeConfig) (mount.Mount, error) { + result := createMountFromVolume(volume) + + if volume.Source != "" { + return mount.Mount{}, errors.New("invalid tmpfs source, source must be empty") + } + if volume.Bind != nil { + return mount.Mount{}, errors.New("bind options are incompatible with type tmpfs") + } + if volume.Volume != nil { + return mount.Mount{}, errors.New("volume options are incompatible with type tmpfs") + } + if volume.Tmpfs != nil { + result.TmpfsOptions = &mount.TmpfsOptions{ + SizeBytes: volume.Tmpfs.Size, + } + } + return result, nil +} + +func handleNpipeToMount(volume composetypes.ServiceVolumeConfig) (mount.Mount, error) { + result := createMountFromVolume(volume) + + if volume.Source == "" { + return mount.Mount{}, errors.New("invalid npipe source, source cannot be empty") + } + if volume.Volume != nil { + return mount.Mount{}, errors.New("volume options are incompatible with type npipe") + } + if volume.Tmpfs != nil { + return mount.Mount{}, errors.New("tmpfs options are incompatible with type npipe") + } + if volume.Bind != nil { + result.BindOptions = &mount.BindOptions{ + Propagation: mount.Propagation(volume.Bind.Propagation), + } + } + return result, nil +} + +func convertVolumeToMount( + volume composetypes.ServiceVolumeConfig, + stackVolumes volumes, + namespace Namespace, +) (mount.Mount, error) { + + switch volume.Type { + case "volume", "": + return handleVolumeToMount(volume, stackVolumes, namespace) + case "bind": + return handleBindToMount(volume) + case "tmpfs": + return handleTmpfsToMount(volume) + case "npipe": + return handleNpipeToMount(volume) + } + return mount.Mount{}, errors.New("volume type must be volume, bind, tmpfs or npipe") +} diff --git a/client/convert/volume_test.go b/client/convert/volume_test.go new file mode 100644 index 00000000..2b08357b --- /dev/null +++ b/client/convert/volume_test.go @@ -0,0 +1,361 @@ +package convert + +import ( + "testing" + + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/docker/docker/api/types/mount" + "gotest.tools/v3/assert" + is "gotest.tools/v3/assert/cmp" +) + +func TestConvertVolumeToMountAnonymousVolume(t *testing.T) { + config := composetypes.ServiceVolumeConfig{ + Type: "volume", + Target: "/foo/bar", + } + expected := mount.Mount{ + Type: mount.TypeVolume, + Target: "/foo/bar", + } + mount, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo")) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, mount)) +} + +func TestConvertVolumeToMountAnonymousBind(t *testing.T) { + config := composetypes.ServiceVolumeConfig{ + Type: "bind", + Target: "/foo/bar", + Bind: &composetypes.ServiceVolumeBind{ + Propagation: "slave", + }, + } + _, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo")) + assert.Error(t, err, "invalid bind source, source cannot be empty") +} + +func TestConvertVolumeToMountUnapprovedType(t *testing.T) { + config := composetypes.ServiceVolumeConfig{ + Type: "foo", + Target: "/foo/bar", + } + _, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo")) + assert.Error(t, err, "volume type must be volume, bind, tmpfs or npipe") +} + +func TestConvertVolumeToMountConflictingOptionsBindInVolume(t *testing.T) { + namespace := NewNamespace("foo") + + config := composetypes.ServiceVolumeConfig{ + Type: "volume", + Source: "foo", + Target: "/target", + Bind: &composetypes.ServiceVolumeBind{ + Propagation: "slave", + }, + } + _, err := convertVolumeToMount(config, volumes{}, namespace) + assert.Error(t, err, "bind options are incompatible with type volume") +} + +func TestConvertVolumeToMountConflictingOptionsTmpfsInVolume(t *testing.T) { + namespace := NewNamespace("foo") + + config := composetypes.ServiceVolumeConfig{ + Type: "volume", + Source: "foo", + Target: "/target", + Tmpfs: &composetypes.ServiceVolumeTmpfs{ + Size: 1000, + }, + } + _, err := convertVolumeToMount(config, volumes{}, namespace) + assert.Error(t, err, "tmpfs options are incompatible with type volume") +} + +func TestConvertVolumeToMountConflictingOptionsVolumeInBind(t *testing.T) { + namespace := NewNamespace("foo") + + config := composetypes.ServiceVolumeConfig{ + Type: "bind", + Source: "/foo", + Target: "/target", + Volume: &composetypes.ServiceVolumeVolume{ + NoCopy: true, + }, + } + _, err := convertVolumeToMount(config, volumes{}, namespace) + assert.Error(t, err, "volume options are incompatible with type bind") +} + +func TestConvertVolumeToMountConflictingOptionsTmpfsInBind(t *testing.T) { + namespace := NewNamespace("foo") + + config := composetypes.ServiceVolumeConfig{ + Type: "bind", + Source: "/foo", + Target: "/target", + Tmpfs: &composetypes.ServiceVolumeTmpfs{ + Size: 1000, + }, + } + _, err := convertVolumeToMount(config, volumes{}, namespace) + assert.Error(t, err, "tmpfs options are incompatible with type bind") +} + +func TestConvertVolumeToMountConflictingOptionsBindInTmpfs(t *testing.T) { + namespace := NewNamespace("foo") + + config := composetypes.ServiceVolumeConfig{ + Type: "tmpfs", + Target: "/target", + Bind: &composetypes.ServiceVolumeBind{ + Propagation: "slave", + }, + } + _, err := convertVolumeToMount(config, volumes{}, namespace) + assert.Error(t, err, "bind options are incompatible with type tmpfs") +} + +func TestConvertVolumeToMountConflictingOptionsVolumeInTmpfs(t *testing.T) { + namespace := NewNamespace("foo") + + config := composetypes.ServiceVolumeConfig{ + Type: "tmpfs", + Target: "/target", + Volume: &composetypes.ServiceVolumeVolume{ + NoCopy: true, + }, + } + _, err := convertVolumeToMount(config, volumes{}, namespace) + assert.Error(t, err, "volume options are incompatible with type tmpfs") +} + +func TestConvertVolumeToMountNamedVolume(t *testing.T) { + stackVolumes := volumes{ + "normal": composetypes.VolumeConfig{ + Driver: "glusterfs", + DriverOpts: map[string]string{ + "opt": "value", + }, + Labels: map[string]string{ + "something": "labeled", + }, + }, + } + namespace := NewNamespace("foo") + expected := mount.Mount{ + Type: mount.TypeVolume, + Source: "foo_normal", + Target: "/foo", + ReadOnly: true, + VolumeOptions: &mount.VolumeOptions{ + Labels: map[string]string{ + LabelNamespace: "foo", + "something": "labeled", + }, + DriverConfig: &mount.Driver{ + Name: "glusterfs", + Options: map[string]string{ + "opt": "value", + }, + }, + NoCopy: true, + }, + } + config := composetypes.ServiceVolumeConfig{ + Type: "volume", + Source: "normal", + Target: "/foo", + ReadOnly: true, + Volume: &composetypes.ServiceVolumeVolume{ + NoCopy: true, + }, + } + mount, err := convertVolumeToMount(config, stackVolumes, namespace) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, mount)) +} + +func TestConvertVolumeToMountNamedVolumeWithNameCustomizd(t *testing.T) { + stackVolumes := volumes{ + "normal": composetypes.VolumeConfig{ + Name: "user_specified_name", + Driver: "vsphere", + DriverOpts: map[string]string{ + "opt": "value", + }, + Labels: map[string]string{ + "something": "labeled", + }, + }, + } + namespace := NewNamespace("foo") + expected := mount.Mount{ + Type: mount.TypeVolume, + Source: "user_specified_name", + Target: "/foo", + ReadOnly: true, + VolumeOptions: &mount.VolumeOptions{ + Labels: map[string]string{ + LabelNamespace: "foo", + "something": "labeled", + }, + DriverConfig: &mount.Driver{ + Name: "vsphere", + Options: map[string]string{ + "opt": "value", + }, + }, + NoCopy: true, + }, + } + config := composetypes.ServiceVolumeConfig{ + Type: "volume", + Source: "normal", + Target: "/foo", + ReadOnly: true, + Volume: &composetypes.ServiceVolumeVolume{ + NoCopy: true, + }, + } + mount, err := convertVolumeToMount(config, stackVolumes, namespace) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, mount)) +} + +func TestConvertVolumeToMountNamedVolumeExternal(t *testing.T) { + stackVolumes := volumes{ + "outside": composetypes.VolumeConfig{ + Name: "special", + External: composetypes.External{External: true}, + }, + } + namespace := NewNamespace("foo") + expected := mount.Mount{ + Type: mount.TypeVolume, + Source: "special", + Target: "/foo", + VolumeOptions: &mount.VolumeOptions{NoCopy: false}, + } + config := composetypes.ServiceVolumeConfig{ + Type: "volume", + Source: "outside", + Target: "/foo", + } + mount, err := convertVolumeToMount(config, stackVolumes, namespace) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, mount)) +} + +func TestConvertVolumeToMountNamedVolumeExternalNoCopy(t *testing.T) { + stackVolumes := volumes{ + "outside": composetypes.VolumeConfig{ + Name: "special", + External: composetypes.External{External: true}, + }, + } + namespace := NewNamespace("foo") + expected := mount.Mount{ + Type: mount.TypeVolume, + Source: "special", + Target: "/foo", + VolumeOptions: &mount.VolumeOptions{ + NoCopy: true, + }, + } + config := composetypes.ServiceVolumeConfig{ + Type: "volume", + Source: "outside", + Target: "/foo", + Volume: &composetypes.ServiceVolumeVolume{ + NoCopy: true, + }, + } + mount, err := convertVolumeToMount(config, stackVolumes, namespace) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, mount)) +} + +func TestConvertVolumeToMountBind(t *testing.T) { + stackVolumes := volumes{} + namespace := NewNamespace("foo") + expected := mount.Mount{ + Type: mount.TypeBind, + Source: "/bar", + Target: "/foo", + ReadOnly: true, + BindOptions: &mount.BindOptions{Propagation: mount.PropagationShared}, + } + config := composetypes.ServiceVolumeConfig{ + Type: "bind", + Source: "/bar", + Target: "/foo", + ReadOnly: true, + Bind: &composetypes.ServiceVolumeBind{Propagation: "shared"}, + } + mount, err := convertVolumeToMount(config, stackVolumes, namespace) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, mount)) +} + +func TestConvertVolumeToMountVolumeDoesNotExist(t *testing.T) { + namespace := NewNamespace("foo") + config := composetypes.ServiceVolumeConfig{ + Type: "volume", + Source: "unknown", + Target: "/foo", + ReadOnly: true, + } + _, err := convertVolumeToMount(config, volumes{}, namespace) + assert.Error(t, err, "undefined volume \"unknown\"") +} + +func TestConvertTmpfsToMountVolume(t *testing.T) { + config := composetypes.ServiceVolumeConfig{ + Type: "tmpfs", + Target: "/foo/bar", + Tmpfs: &composetypes.ServiceVolumeTmpfs{ + Size: 1000, + }, + } + expected := mount.Mount{ + Type: mount.TypeTmpfs, + Target: "/foo/bar", + TmpfsOptions: &mount.TmpfsOptions{SizeBytes: 1000}, + } + mount, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo")) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, mount)) +} + +func TestConvertTmpfsToMountVolumeWithSource(t *testing.T) { + config := composetypes.ServiceVolumeConfig{ + Type: "tmpfs", + Source: "/bar", + Target: "/foo/bar", + Tmpfs: &composetypes.ServiceVolumeTmpfs{ + Size: 1000, + }, + } + + _, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo")) + assert.Error(t, err, "invalid tmpfs source, source must be empty") +} + +func TestConvertVolumeToMountAnonymousNpipe(t *testing.T) { + config := composetypes.ServiceVolumeConfig{ + Type: "npipe", + Source: `\\.\pipe\foo`, + Target: `\\.\pipe\foo`, + } + expected := mount.Mount{ + Type: mount.TypeNamedPipe, + Source: `\\.\pipe\foo`, + Target: `\\.\pipe\foo`, + } + mount, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo")) + assert.NilError(t, err) + assert.Check(t, is.DeepEqual(expected, mount)) +} diff --git a/client/stack.go b/client/stack.go deleted file mode 100644 index 1226c5df..00000000 --- a/client/stack.go +++ /dev/null @@ -1,36 +0,0 @@ -package client - -import ( - "context" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -const StackNamespace = "com.docker.stack.namespace" - -type StackStatus struct { - Services []swarm.Service - Err error -} - -func QueryStackStatus(contextName string) StackStatus { - cl, err := NewClientWithContext(contextName) - if err != nil { - if strings.Contains(err.Error(), "does not exist") { - // No local context found, bail out gracefully - return StackStatus{[]swarm.Service{}, nil} - } - return StackStatus{[]swarm.Service{}, err} - } - ctx := context.Background() - filter := filters.NewArgs() - filter.Add("label", StackNamespace) - services, err := cl.ServiceList(ctx, types.ServiceListOptions{Filters: filter}) - if err != nil { - return StackStatus{[]swarm.Service{}, err} - } - return StackStatus{services, nil} -} diff --git a/client/stack/loader.go b/client/stack/loader.go new file mode 100644 index 00000000..d3672b77 --- /dev/null +++ b/client/stack/loader.go @@ -0,0 +1,133 @@ +package stack + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/docker/cli/cli/command/stack/options" + "github.com/docker/cli/cli/compose/loader" + "github.com/docker/cli/cli/compose/schema" + composetypes "github.com/docker/cli/cli/compose/types" + "github.com/sirupsen/logrus" +) + +// LoadComposefile parse the composefile specified in the cli and returns its Config and version. +func LoadComposefile(opts options.Deploy) (*composetypes.Config, error) { + configDetails, err := getConfigDetails(opts.Composefiles) + if err != nil { + return nil, err + } + + dicts := getDictsFrom(configDetails.ConfigFiles) + config, err := loader.Load(configDetails) + if err != nil { + if fpe, ok := err.(*loader.ForbiddenPropertiesError); ok { + return nil, fmt.Errorf("compose file contains unsupported options:\n\n%s", + propertyWarnings(fpe.Properties)) + } + + return nil, err + } + + unsupportedProperties := loader.GetUnsupportedProperties(dicts...) + if len(unsupportedProperties) > 0 { + logrus.Warnf("Ignoring unsupported options: %s\n\n", + strings.Join(unsupportedProperties, ", ")) + } + + deprecatedProperties := loader.GetDeprecatedProperties(dicts...) + if len(deprecatedProperties) > 0 { + logrus.Warnf("Ignoring deprecated options:\n\n%s\n\n", + propertyWarnings(deprecatedProperties)) + } + return config, nil +} + +func getDictsFrom(configFiles []composetypes.ConfigFile) []map[string]interface{} { + dicts := []map[string]interface{}{} + + for _, configFile := range configFiles { + dicts = append(dicts, configFile.Config) + } + + return dicts +} + +func propertyWarnings(properties map[string]string) string { + var msgs []string + for name, description := range properties { + msgs = append(msgs, fmt.Sprintf("%s: %s", name, description)) + } + sort.Strings(msgs) + return strings.Join(msgs, "\n\n") +} + +func getConfigDetails(composefiles []string) (composetypes.ConfigDetails, error) { + var details composetypes.ConfigDetails + + absPath, err := filepath.Abs(composefiles[0]) + if err != nil { + return details, err + } + details.WorkingDir = filepath.Dir(absPath) + + details.ConfigFiles, err = loadConfigFiles(composefiles) + if err != nil { + return details, err + } + // Take the first file version (2 files can't have different version) + details.Version = schema.Version(details.ConfigFiles[0].Config) + details.Environment, err = buildEnvironment(os.Environ()) + return details, err +} + +func buildEnvironment(env []string) (map[string]string, error) { + result := make(map[string]string, len(env)) + for _, s := range env { + // if value is empty, s is like "K=", not "K". + if !strings.Contains(s, "=") { + return result, fmt.Errorf("unexpected environment %q", s) + } + kv := strings.SplitN(s, "=", 2) + result[kv[0]] = kv[1] + } + return result, nil +} + +func loadConfigFiles(filenames []string) ([]composetypes.ConfigFile, error) { + var configFiles []composetypes.ConfigFile + + for _, filename := range filenames { + configFile, err := loadConfigFile(filename) + if err != nil { + return configFiles, err + } + configFiles = append(configFiles, *configFile) + } + + return configFiles, nil +} + +func loadConfigFile(filename string) (*composetypes.ConfigFile, error) { + var bytes []byte + var err error + + bytes, err = ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + config, err := loader.ParseYAML(bytes) + if err != nil { + return nil, err + } + + return &composetypes.ConfigFile{ + Filename: filename, + Config: config, + }, nil +} diff --git a/client/stack/stack.go b/client/stack/stack.go new file mode 100644 index 00000000..776ac3e5 --- /dev/null +++ b/client/stack/stack.go @@ -0,0 +1,104 @@ +package stack + +import ( + "context" + "errors" + "fmt" + "strings" + "unicode" + + abraClient "coopcloud.tech/abra/client" + "coopcloud.tech/abra/client/convert" + "github.com/docker/cli/opts" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" +) + +type StackStatus struct { + Services []swarm.Service + Err error +} + +func getStackFilter(namespace string) filters.Args { + filter := filters.NewArgs() + filter.Add("label", convert.LabelNamespace+"="+namespace) + return filter +} + +func getStackServiceFilter(namespace string) filters.Args { + return getStackFilter(namespace) +} + +func getStackFilterFromOpt(namespace string, opt opts.FilterOpt) filters.Args { + filter := opt.Value() + filter.Add("label", convert.LabelNamespace+"="+namespace) + return filter +} + +func getAllStacksFilter() filters.Args { + filter := filters.NewArgs() + filter.Add("label", convert.LabelNamespace) + return filter +} + +func getStackServices(ctx context.Context, apiclient client.APIClient, namespace string) ([]swarm.Service, error) { + return apiclient.ServiceList(ctx, types.ServiceListOptions{Filters: getStackServiceFilter(namespace)}) +} + +func GetAllDeployedServices(contextName string) StackStatus { + cl, err := abraClient.NewClientWithContext(contextName) + if err != nil { + if strings.Contains(err.Error(), "does not exist") { + // No local context found, bail out gracefully + return StackStatus{[]swarm.Service{}, nil} + } + return StackStatus{[]swarm.Service{}, err} + } + ctx := context.Background() + services, err := cl.ServiceList(ctx, types.ServiceListOptions{Filters: getAllStacksFilter()}) + if err != nil { + return StackStatus{[]swarm.Service{}, err} + } + return StackStatus{services, nil} +} + +func checkDaemonIsSwarmManager(contextName string) error { + cl, err := abraClient.NewClientWithContext(contextName) + if err != nil { + return err + } + info, err := cl.Info(context.Background()) + if err != nil { + return err + } + if !info.Swarm.ControlAvailable { + return errors.New("this server is not a swarm manager. Did you run \"abra server init\"?") + } + return nil +} + +// validateStackName checks if the provided string is a valid stack name (namespace). +// It currently only does a rudimentary check if the string is empty, or consists +// of only whitespace and quoting characters. +func validateStackName(namespace string) error { + v := strings.TrimFunc(namespace, quotesOrWhitespace) + if v == "" { + return fmt.Errorf("invalid stack name: %q", namespace) + } + return nil +} + +func quotesOrWhitespace(r rune) bool { + return unicode.IsSpace(r) || r == '"' || r == '\'' +} + +func DeployStack(namespace string) { +} + +// TODO: Prune services from stack + +func pruneServices() error { + return nil +} diff --git a/config/app.go b/config/app.go index 6c11d700..c9ea2920 100644 --- a/config/app.go +++ b/config/app.go @@ -8,7 +8,8 @@ import ( "path" "strings" - "coopcloud.tech/abra/client" + "coopcloud.tech/abra/client/convert" + "coopcloud.tech/abra/client/stack" ) // Type aliases to make code hints easier to understand @@ -200,10 +201,10 @@ func SanitiseAppName(name string) string { // GetAppStatuses queries servers to check the deployment status of given apps func GetAppStatuses(appFiles AppFiles) (map[string]string, error) { servers := appFiles.GetServers() - ch := make(chan client.StackStatus, len(servers)) + ch := make(chan stack.StackStatus, len(servers)) for _, server := range servers { go func(s string) { - ch <- client.QueryStackStatus(s) + ch <- stack.GetAllDeployedServices(s) }(server) } @@ -211,7 +212,7 @@ func GetAppStatuses(appFiles AppFiles) (map[string]string, error) { for range servers { status := <-ch for _, service := range status.Services { - name := service.Spec.Labels[client.StackNamespace] + name := service.Spec.Labels[convert.LabelNamespace] if _, ok := statuses[name]; !ok { statuses[name] = "deployed" } diff --git a/go.mod b/go.mod index 06f57e43..5095d62f 100644 --- a/go.mod +++ b/go.mod @@ -12,13 +12,16 @@ require ( github.com/docker/go-units v0.4.0 github.com/fvbommel/sortorder v1.0.2 // indirect github.com/go-git/go-git/v5 v5.4.2 + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/hetznercloud/hcloud-go v1.28.0 github.com/moby/sys/mount v0.2.0 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 github.com/morikuni/aec v1.0.0 // indirect github.com/olekukonko/tablewriter v0.0.5 + github.com/pkg/errors v0.9.1 github.com/schultz-is/passgen v1.0.1 github.com/sirupsen/logrus v1.8.1 github.com/theupdateframework/notary v0.7.0 // indirect github.com/urfave/cli/v2 v2.3.0 //ct + gotest.tools/v3 v3.0.3 ) diff --git a/go.sum b/go.sum index c0a8adea..aff827a9 100644 --- a/go.sum +++ b/go.sum @@ -400,6 +400,8 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -731,8 +733,11 @@ github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f h1:mvXjJIHRZyhNuGassLTcXTwjiWq7NmjdavZsUnmFybQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=