refactor: use pkg directory structure

This commit is contained in:
2021-09-05 21:37:03 +02:00
parent f59380a35e
commit b7742d5e18
55 changed files with 55 additions and 55 deletions

52
pkg/client/client.go Normal file
View File

@ -0,0 +1,52 @@
// Package client provides Docker client initiatialisation functions.
package client
import (
"net/http"
"os"
"github.com/docker/docker/client"
"github.com/sirupsen/logrus"
)
// New initiates a new Docker client.
func New(contextName string) (*client.Client, error) {
context, err := GetContext(contextName)
if err != nil {
return nil, err
}
ctxEndpoint, err := GetContextEndpoint(context)
if err != nil {
return nil, err
}
helper := newConnectionHelper(ctxEndpoint)
httpClient := &http.Client{
// No tls, no proxy
Transport: &http.Transport{
DialContext: helper.Dialer,
},
}
var clientOpts []client.Opt
clientOpts = append(clientOpts,
client.WithHTTPClient(httpClient),
client.WithHost(helper.Host),
client.WithDialContext(helper.Dialer),
)
version := os.Getenv("DOCKER_API_VERSION")
if version != "" {
clientOpts = append(clientOpts, client.WithVersion(version))
} else {
clientOpts = append(clientOpts, client.WithAPIVersionNegotiation())
}
cl, err := client.NewClientWithOpts(clientOpts...)
if err != nil {
logrus.Fatalf("unable to create Docker client: %s", err)
}
return cl, nil
}

45
pkg/client/connection.go Normal file
View File

@ -0,0 +1,45 @@
package client
import (
"github.com/docker/cli/cli/connhelper"
"github.com/docker/cli/cli/context/docker"
dCliContextStore "github.com/docker/cli/cli/context/store"
dClient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
)
func newConnectionHelper(daemonURL string) *connhelper.ConnectionHelper {
helper, err := connhelper.GetConnectionHelper(daemonURL)
if err != nil {
logrus.Fatal(err)
}
return helper
}
func getDockerEndpoint(host string) (docker.Endpoint, error) {
skipTLSVerify := false
ep := docker.Endpoint{
EndpointMeta: docker.EndpointMeta{
Host: host,
SkipTLSVerify: skipTLSVerify,
},
}
// try to resolve a docker client, validating the configuration
opts, err := ep.ClientOpts()
if err != nil {
return docker.Endpoint{}, err
}
if _, err := dClient.NewClientWithOpts(opts...); err != nil {
return docker.Endpoint{}, err
}
return ep, nil
}
func getDockerEndpointMetadataAndTLS(host string) (docker.EndpointMeta, *dCliContextStore.EndpointTLSData, error) {
ep, err := getDockerEndpoint(host)
if err != nil {
return docker.EndpointMeta{}, nil, err
}
return ep.EndpointMeta, ep.TLSData.ToStoreTLSData(), nil
}

View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2013-2017 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,7 @@
# github.com/docker/cli/cli/command/container
Due to this literally just being copy-pasted from the lib, the Apache license
will be posted in this folder. Small edits to the source code have been to
function names and parts we don't need deleted.
Same vibe as [../convert](../convert).

View File

@ -0,0 +1,130 @@
package container
import (
"context"
"errors"
"fmt"
"io"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
apiclient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
)
func RunExec(dockerCli command.Cli, client *apiclient.Client, containerID string, execConfig *types.ExecConfig) error {
ctx := context.Background()
// We need to check the tty _before_ we do the ContainerExecCreate, because
// otherwise if we error out we will leak execIDs on the server (and
// there's no easy way to clean those up). But also in order to make "not
// exist" errors take precedence we do a dummy inspect first.
if _, err := client.ContainerInspect(ctx, containerID); err != nil {
return err
}
if !execConfig.Detach {
if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil {
return err
}
}
response, err := client.ContainerExecCreate(ctx, containerID, *execConfig)
if err != nil {
return err
}
execID := response.ID
if execID == "" {
return errors.New("exec ID empty")
}
if execConfig.Detach {
execStartCheck := types.ExecStartCheck{
Detach: execConfig.Detach,
Tty: execConfig.Tty,
}
return client.ContainerExecStart(ctx, execID, execStartCheck)
}
return interactiveExec(ctx, dockerCli, client, execConfig, execID)
}
func interactiveExec(ctx context.Context, dockerCli command.Cli, client *apiclient.Client,
execConfig *types.ExecConfig, execID string) error {
// Interactive exec requested.
var (
out, stderr io.Writer
in io.ReadCloser
)
if execConfig.AttachStdin {
in = dockerCli.In()
}
if execConfig.AttachStdout {
out = dockerCli.Out()
}
if execConfig.AttachStderr {
if execConfig.Tty {
stderr = dockerCli.Out()
} else {
stderr = dockerCli.Err()
}
}
execStartCheck := types.ExecStartCheck{
Tty: execConfig.Tty,
}
resp, err := client.ContainerExecAttach(ctx, execID, execStartCheck)
if err != nil {
return err
}
defer resp.Close()
errCh := make(chan error, 1)
go func() {
defer close(errCh)
errCh <- func() error {
streamer := hijackedIOStreamer{
streams: dockerCli,
inputStream: in,
outputStream: out,
errorStream: stderr,
resp: resp,
tty: execConfig.Tty,
detachKeys: execConfig.DetachKeys,
}
return streamer.stream(ctx)
}()
}()
if execConfig.Tty && dockerCli.In().IsTerminal() {
if err := MonitorTtySize(ctx, client, dockerCli, execID, true); err != nil {
fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err)
}
}
if err := <-errCh; err != nil {
logrus.Debugf("Error hijack: %s", err)
return err
}
return getExecExitStatus(ctx, client, execID)
}
func getExecExitStatus(ctx context.Context, client apiclient.ContainerAPIClient, execID string) error {
resp, err := client.ContainerExecInspect(ctx, execID)
if err != nil {
// If we can't connect, then the daemon probably died.
if !apiclient.IsErrConnectionFailed(err) {
return err
}
return cli.StatusError{StatusCode: -1}
}
status := resp.ExitCode
if status != 0 {
return cli.StatusError{StatusCode: status}
}
return nil
}

View File

@ -0,0 +1,208 @@
package container
import (
"context"
"fmt"
"io"
"runtime"
"sync"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/stdcopy"
"github.com/moby/term"
"github.com/sirupsen/logrus"
)
// The default escape key sequence: ctrl-p, ctrl-q
// TODO: This could be moved to `pkg/term`.
var defaultEscapeKeys = []byte{16, 17}
// A hijackedIOStreamer handles copying input to and output from streams to the
// connection.
type hijackedIOStreamer struct {
streams command.Streams
inputStream io.ReadCloser
outputStream io.Writer
errorStream io.Writer
resp types.HijackedResponse
tty bool
detachKeys string
}
// stream handles setting up the IO and then begins streaming stdin/stdout
// to/from the hijacked connection, blocking until it is either done reading
// output, the user inputs the detach key sequence when in TTY mode, or when
// the given context is cancelled.
func (h *hijackedIOStreamer) stream(ctx context.Context) error {
restoreInput, err := h.setupInput()
if err != nil {
return fmt.Errorf("unable to setup input stream: %s", err)
}
defer restoreInput()
outputDone := h.beginOutputStream(restoreInput)
inputDone, detached := h.beginInputStream(restoreInput)
select {
case err := <-outputDone:
return err
case <-inputDone:
// Input stream has closed.
if h.outputStream != nil || h.errorStream != nil {
// Wait for output to complete streaming.
select {
case err := <-outputDone:
return err
case <-ctx.Done():
return ctx.Err()
}
}
return nil
case err := <-detached:
// Got a detach key sequence.
return err
case <-ctx.Done():
return ctx.Err()
}
}
func (h *hijackedIOStreamer) setupInput() (restore func(), err error) {
if h.inputStream == nil || !h.tty {
// No need to setup input TTY.
// The restore func is a nop.
return func() {}, nil
}
if err := setRawTerminal(h.streams); err != nil {
return nil, fmt.Errorf("unable to set IO streams as raw terminal: %s", err)
}
// Use sync.Once so we may call restore multiple times but ensure we
// only restore the terminal once.
var restoreOnce sync.Once
restore = func() {
restoreOnce.Do(func() {
restoreTerminal(h.streams, h.inputStream)
})
}
// Wrap the input to detect detach escape sequence.
// Use default escape keys if an invalid sequence is given.
escapeKeys := defaultEscapeKeys
if h.detachKeys != "" {
customEscapeKeys, err := term.ToBytes(h.detachKeys)
if err != nil {
logrus.Warnf("invalid detach escape keys, using default: %s", err)
} else {
escapeKeys = customEscapeKeys
}
}
h.inputStream = ioutils.NewReadCloserWrapper(term.NewEscapeProxy(h.inputStream, escapeKeys), h.inputStream.Close)
return restore, nil
}
func (h *hijackedIOStreamer) beginOutputStream(restoreInput func()) <-chan error {
if h.outputStream == nil && h.errorStream == nil {
// There is no need to copy output.
return nil
}
outputDone := make(chan error)
go func() {
var err error
// When TTY is ON, use regular copy
if h.outputStream != nil && h.tty {
_, err = io.Copy(h.outputStream, h.resp.Reader)
// We should restore the terminal as soon as possible
// once the connection ends so any following print
// messages will be in normal type.
restoreInput()
} else {
_, err = stdcopy.StdCopy(h.outputStream, h.errorStream, h.resp.Reader)
}
logrus.Debug("[hijack] End of stdout")
if err != nil {
logrus.Debugf("Error receiveStdout: %s", err)
}
outputDone <- err
}()
return outputDone
}
func (h *hijackedIOStreamer) beginInputStream(restoreInput func()) (doneC <-chan struct{}, detachedC <-chan error) {
inputDone := make(chan struct{})
detached := make(chan error)
go func() {
if h.inputStream != nil {
_, err := io.Copy(h.resp.Conn, h.inputStream)
// We should restore the terminal as soon as possible
// once the connection ends so any following print
// messages will be in normal type.
restoreInput()
logrus.Debug("[hijack] End of stdin")
if _, ok := err.(term.EscapeError); ok {
detached <- err
return
}
if err != nil {
// This error will also occur on the receive
// side (from stdout) where it will be
// propagated back to the caller.
logrus.Debugf("Error sendStdin: %s", err)
}
}
if err := h.resp.CloseWrite(); err != nil {
logrus.Debugf("Couldn't send EOF: %s", err)
}
close(inputDone)
}()
return inputDone, detached
}
func setRawTerminal(streams command.Streams) error {
if err := streams.In().SetRawTerminal(); err != nil {
return err
}
return streams.Out().SetRawTerminal()
}
// nolint: unparam
func restoreTerminal(streams command.Streams, in io.Closer) error {
streams.In().RestoreTerminal()
streams.Out().RestoreTerminal()
// WARNING: DO NOT REMOVE THE OS CHECKS !!!
// For some reason this Close call blocks on darwin..
// As the client exits right after, simply discard the close
// until we find a better solution.
//
// This can also cause the client on Windows to get stuck in Win32 CloseHandle()
// in some cases. See https://github.com/docker/docker/issues/28267#issuecomment-288237442
// Tracked internally at Microsoft by VSO #11352156. In the
// Windows case, you hit this if you are using the native/v2 console,
// not the "legacy" console, and you start the client in a new window. eg
// `start docker run --rm -it microsoft/nanoserver cmd /s /c echo foobar`
// will hang. Remove start, and it won't repro.
if in != nil && runtime.GOOS != "darwin" && runtime.GOOS != "windows" {
return in.Close()
}
return nil
}

View File

@ -0,0 +1,98 @@
package container
import (
"context"
"fmt"
"os"
gosignal "os/signal"
"runtime"
"time"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
apiclient "github.com/docker/docker/client"
"github.com/moby/sys/signal"
"github.com/sirupsen/logrus"
)
// resizeTtyTo resizes tty to specific height and width
func resizeTtyTo(ctx context.Context, client client.ContainerAPIClient, id string, height, width uint, isExec bool) error {
if height == 0 && width == 0 {
return nil
}
options := types.ResizeOptions{
Height: height,
Width: width,
}
var err error
if isExec {
err = client.ContainerExecResize(ctx, id, options)
} else {
err = client.ContainerResize(ctx, id, options)
}
if err != nil {
logrus.Debugf("Error resize: %s\r", err)
}
return err
}
// resizeTty is to resize the tty with cli out's tty size
func resizeTty(ctx context.Context, client *apiclient.Client, cli command.Cli, id string, isExec bool) error {
height, width := cli.Out().GetTtySize()
return resizeTtyTo(ctx, client, id, height, width, isExec)
}
// initTtySize is to init the tty's size to the same as the window, if there is an error, it will retry 5 times.
func initTtySize(ctx context.Context, client *apiclient.Client, cli command.Cli, id string, isExec bool, resizeTtyFunc func(ctx context.Context, client *apiclient.Client, cli command.Cli, id string, isExec bool) error) {
rttyFunc := resizeTtyFunc
if rttyFunc == nil {
rttyFunc = resizeTty
}
if err := rttyFunc(ctx, client, cli, id, isExec); err != nil {
go func() {
var err error
for retry := 0; retry < 5; retry++ {
time.Sleep(10 * time.Millisecond)
if err = rttyFunc(ctx, client, cli, id, isExec); err == nil {
break
}
}
if err != nil {
fmt.Fprintln(cli.Err(), "failed to resize tty, using default size")
}
}()
}
}
// MonitorTtySize updates the container tty size when the terminal tty changes size
func MonitorTtySize(ctx context.Context, client *apiclient.Client, cli command.Cli, id string, isExec bool) error {
initTtySize(ctx, client, cli, id, isExec, resizeTty)
if runtime.GOOS == "windows" {
go func() {
prevH, prevW := cli.Out().GetTtySize()
for {
time.Sleep(time.Millisecond * 250)
h, w := cli.Out().GetTtySize()
if prevW != w || prevH != h {
resizeTty(ctx, client, cli, id, isExec)
}
prevH = h
prevW = w
}
}()
} else {
sigchan := make(chan os.Signal, 1)
gosignal.Notify(sigchan, signal.SIGWINCH)
go func() {
for range sigchan {
resizeTty(ctx, client, cli, id, isExec)
}
}()
}
return nil
}

119
pkg/client/context.go Normal file
View File

@ -0,0 +1,119 @@
package client
import (
"errors"
"fmt"
command "github.com/docker/cli/cli/command"
dConfig "github.com/docker/cli/cli/config"
context "github.com/docker/cli/cli/context"
"github.com/docker/cli/cli/context/docker"
contextStore "github.com/docker/cli/cli/context/store"
"github.com/moby/term"
)
type Context = contextStore.Metadata
func CreateContext(contextName string, user string, port string) error {
host := contextName
if user != "" {
host = fmt.Sprintf("%s@%s", user, host)
}
if port != "" {
host = fmt.Sprintf("%s:%s", host, port)
}
host = fmt.Sprintf("ssh://%s", host)
if err := createContext(contextName, host); err != nil {
return err
}
return nil
}
// createContext interacts with Docker Context to create a Docker context config
func createContext(name string, host string) error {
s := NewDefaultDockerContextStore()
contextMetadata := contextStore.Metadata{
Endpoints: make(map[string]interface{}),
Name: name,
}
contextTLSData := contextStore.ContextTLSData{
Endpoints: make(map[string]contextStore.EndpointTLSData),
}
dockerEP, dockerTLS, err := getDockerEndpointMetadataAndTLS(host)
if err != nil {
return err
}
contextMetadata.Endpoints[docker.DockerEndpoint] = dockerEP
if dockerTLS != nil {
contextTLSData.Endpoints[docker.DockerEndpoint] = *dockerTLS
}
if err := s.CreateOrUpdate(contextMetadata); err != nil {
return err
}
if err := s.ResetTLSMaterial(name, &contextTLSData); err != nil {
return err
}
return nil
}
func DeleteContext(name string) error {
if name == "default" {
return errors.New("context 'default' cannot be removed")
}
if _, err := GetContext(name); err != nil {
return err
}
// remove any context that might be loaded
// TODO: Check if the context we are removing is the active one rather than doing it all the time
cfg := dConfig.LoadDefaultConfigFile(nil)
cfg.CurrentContext = ""
if err := cfg.Save(); err != nil {
return err
}
return NewDefaultDockerContextStore().Remove(name)
}
func GetContext(contextName string) (contextStore.Metadata, error) {
ctx, err := NewDefaultDockerContextStore().GetMetadata(contextName)
if err != nil {
return contextStore.Metadata{}, err
}
return ctx, nil
}
func GetContextEndpoint(ctx contextStore.Metadata) (string, error) {
// safe to use docker key hardcoded since abra doesn't use k8s... yet...
endpointmeta, ok := ctx.Endpoints["docker"].(context.EndpointMetaBase)
if !ok {
err := errors.New("context lacks Docker endpoint")
return "", err
}
return endpointmeta.Host, nil
}
func newContextStore(dir string, config contextStore.Config) contextStore.Store {
return contextStore.New(dir, config)
}
func NewDefaultDockerContextStore() *command.ContextStoreWithDefault {
// Grabbing the stderr from Docker commands
// Much easier to fit this into the code we are using to replicate docker cli commands
_, _, stderr := term.StdStreams()
// TODO: Look into custom docker configs in case users want that
dockerConfig := dConfig.LoadDefaultConfigFile(stderr)
contextDir := dConfig.ContextStoreDir()
storeConfig := command.DefaultContextStoreConfig()
store := newContextStore(contextDir, storeConfig)
dockerContextStore := &command.ContextStoreWithDefault{
Store: store,
Resolver: func() (*command.DefaultContext, error) {
// nil for the Opts because it works without it and its a cli thing
return command.ResolveDefaultContext(nil, dockerConfig, storeConfig, stderr)
},
}
return dockerContextStore
}

View File

@ -0,0 +1,52 @@
package client_test
import (
"testing"
"coopcloud.tech/abra/client"
dContext "github.com/docker/cli/cli/context"
dCliContextStore "github.com/docker/cli/cli/context/store"
)
type TestContext struct {
context dCliContextStore.Metadata
expected_endpoint string
}
func dockerContext(host, key string) TestContext {
dockerContext := dCliContextStore.Metadata{
Name: "foo",
Metadata: nil,
Endpoints: map[string]interface{}{
key: dContext.EndpointMetaBase{
Host: host,
SkipTLSVerify: false,
},
},
}
return TestContext{
context: dockerContext,
expected_endpoint: host,
}
}
func TestGetContextEndpoint(t *testing.T) {
var testDockerContexts = []TestContext{
dockerContext("ssh://foobar", "docker"),
dockerContext("ssh://foobar", "k8"),
}
for _, context := range testDockerContexts {
endpoint, err := client.GetContextEndpoint(context.context)
if err != nil {
if err.Error() != "context lacks Docker endpoint" {
t.Error(err)
}
} else {
if endpoint != context.expected_endpoint {
t.Errorf("did not get correct context endpoint. Expected: %s, received: %s", context.expected_endpoint, endpoint)
}
}
}
}

191
pkg/client/convert/LICENSE Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2013-2017 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,10 @@
# github.com/docker/cli/cli/compose/convert
DISCLAIMER: This is like the entire `github.com/docker/cli/cli/compose/convert`
package. This should be an easy import but importing it creates DEPENDENCY
HELL. I tried for an hour to fix it but it would work. TRY TO FIX AT YOUR OWN
RISK!!!
Due to this literally just being copy-pasted from the lib, the Apache license
will be posted in this folder. Small edits to the source code have been to
function names and parts we don't need deleted.

View File

@ -0,0 +1,199 @@
package convert
import (
"io/ioutil"
"strings"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types"
networktypes "github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/swarm"
)
const (
// LabelNamespace is the label used to track stack resources
LabelNamespace = "com.docker.stack.namespace"
)
// Namespace mangles names by prepending the name
type Namespace struct {
name string
}
// Scope prepends the namespace to a name
func (n Namespace) Scope(name string) string {
return n.name + "_" + name
}
// Descope returns the name without the namespace prefix
func (n Namespace) Descope(name string) string {
return strings.TrimPrefix(name, n.name+"_")
}
// Name returns the name of the namespace
func (n Namespace) Name() string {
return n.name
}
// NewNamespace returns a new Namespace for scoping of names
func NewNamespace(name string) Namespace {
return Namespace{name: name}
}
// AddStackLabel returns labels with the namespace label added
func AddStackLabel(namespace Namespace, labels map[string]string) map[string]string {
if labels == nil {
labels = make(map[string]string)
}
labels[LabelNamespace] = namespace.name
return labels
}
type networkMap map[string]composetypes.NetworkConfig
// Networks from the compose-file type to the engine API type
func Networks(namespace Namespace, networks networkMap, servicesNetworks map[string]struct{}) (map[string]types.NetworkCreate, []string) {
if networks == nil {
networks = make(map[string]composetypes.NetworkConfig)
}
externalNetworks := []string{}
result := make(map[string]types.NetworkCreate)
for internalName := range servicesNetworks {
network := networks[internalName]
if network.External.External {
externalNetworks = append(externalNetworks, network.Name)
continue
}
createOpts := types.NetworkCreate{
Labels: AddStackLabel(namespace, network.Labels),
Driver: network.Driver,
Options: network.DriverOpts,
Internal: network.Internal,
Attachable: network.Attachable,
}
if network.Ipam.Driver != "" || len(network.Ipam.Config) > 0 {
createOpts.IPAM = &networktypes.IPAM{}
}
if network.Ipam.Driver != "" {
createOpts.IPAM.Driver = network.Ipam.Driver
}
for _, ipamConfig := range network.Ipam.Config {
config := networktypes.IPAMConfig{
Subnet: ipamConfig.Subnet,
}
createOpts.IPAM.Config = append(createOpts.IPAM.Config, config)
}
networkName := namespace.Scope(internalName)
if network.Name != "" {
networkName = network.Name
}
result[networkName] = createOpts
}
return result, externalNetworks
}
// Secrets converts secrets from the Compose type to the engine API type
func Secrets(namespace Namespace, secrets map[string]composetypes.SecretConfig) ([]swarm.SecretSpec, error) {
result := []swarm.SecretSpec{}
for name, secret := range secrets {
if secret.External.External {
continue
}
var obj swarmFileObject
var err error
if secret.Driver != "" {
obj = driverObjectConfig(namespace, name, composetypes.FileObjectConfig(secret))
} else {
obj, err = fileObjectConfig(namespace, name, composetypes.FileObjectConfig(secret))
}
if err != nil {
return nil, err
}
spec := swarm.SecretSpec{Annotations: obj.Annotations, Data: obj.Data}
if secret.Driver != "" {
spec.Driver = &swarm.Driver{
Name: secret.Driver,
Options: secret.DriverOpts,
}
}
if secret.TemplateDriver != "" {
spec.Templating = &swarm.Driver{
Name: secret.TemplateDriver,
}
}
result = append(result, spec)
}
return result, nil
}
// Configs converts config objects from the Compose type to the engine API type
func Configs(namespace Namespace, configs map[string]composetypes.ConfigObjConfig) ([]swarm.ConfigSpec, error) {
result := []swarm.ConfigSpec{}
for name, config := range configs {
if config.External.External {
continue
}
obj, err := fileObjectConfig(namespace, name, composetypes.FileObjectConfig(config))
if err != nil {
return nil, err
}
spec := swarm.ConfigSpec{Annotations: obj.Annotations, Data: obj.Data}
if config.TemplateDriver != "" {
spec.Templating = &swarm.Driver{
Name: config.TemplateDriver,
}
}
result = append(result, spec)
}
return result, nil
}
type swarmFileObject struct {
Annotations swarm.Annotations
Data []byte
}
func driverObjectConfig(namespace Namespace, name string, obj composetypes.FileObjectConfig) swarmFileObject {
if obj.Name != "" {
name = obj.Name
} else {
name = namespace.Scope(name)
}
return swarmFileObject{
Annotations: swarm.Annotations{
Name: name,
Labels: AddStackLabel(namespace, obj.Labels),
},
Data: []byte{},
}
}
func fileObjectConfig(namespace Namespace, name string, obj composetypes.FileObjectConfig) (swarmFileObject, error) {
data, err := ioutil.ReadFile(obj.File)
if err != nil {
return swarmFileObject{}, err
}
if obj.Name != "" {
name = obj.Name
} else {
name = namespace.Scope(name)
}
return swarmFileObject{
Annotations: swarm.Annotations{
Name: name,
Labels: AddStackLabel(namespace, obj.Labels),
},
Data: data,
}, nil
}

View File

@ -0,0 +1,171 @@
package convert
import (
"testing"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/network"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
"gotest.tools/v3/fs"
)
func TestNamespaceScope(t *testing.T) {
scoped := Namespace{name: "foo"}.Scope("bar")
assert.Check(t, is.Equal("foo_bar", scoped))
}
func TestAddStackLabel(t *testing.T) {
labels := map[string]string{
"something": "labeled",
}
actual := AddStackLabel(Namespace{name: "foo"}, labels)
expected := map[string]string{
"something": "labeled",
LabelNamespace: "foo",
}
assert.Check(t, is.DeepEqual(expected, actual))
}
func TestNetworks(t *testing.T) {
namespace := Namespace{name: "foo"}
serviceNetworks := map[string]struct{}{
"normal": {},
"outside": {},
"default": {},
"attachablenet": {},
"named": {},
}
source := networkMap{
"normal": composetypes.NetworkConfig{
Driver: "overlay",
DriverOpts: map[string]string{
"opt": "value",
},
Ipam: composetypes.IPAMConfig{
Driver: "driver",
Config: []*composetypes.IPAMPool{
{
Subnet: "10.0.0.0",
},
},
},
Labels: map[string]string{
"something": "labeled",
},
},
"outside": composetypes.NetworkConfig{
External: composetypes.External{External: true},
Name: "special",
},
"attachablenet": composetypes.NetworkConfig{
Driver: "overlay",
Attachable: true,
},
"named": composetypes.NetworkConfig{
Name: "othername",
},
}
expected := map[string]types.NetworkCreate{
"foo_default": {
Labels: map[string]string{
LabelNamespace: "foo",
},
},
"foo_normal": {
Driver: "overlay",
IPAM: &network.IPAM{
Driver: "driver",
Config: []network.IPAMConfig{
{
Subnet: "10.0.0.0",
},
},
},
Options: map[string]string{
"opt": "value",
},
Labels: map[string]string{
LabelNamespace: "foo",
"something": "labeled",
},
},
"foo_attachablenet": {
Driver: "overlay",
Attachable: true,
Labels: map[string]string{
LabelNamespace: "foo",
},
},
"othername": {
Labels: map[string]string{LabelNamespace: "foo"},
},
}
networks, externals := Networks(namespace, source, serviceNetworks)
assert.DeepEqual(t, expected, networks)
assert.DeepEqual(t, []string{"special"}, externals)
}
func TestSecrets(t *testing.T) {
namespace := Namespace{name: "foo"}
secretText := "this is the first secret"
secretFile := fs.NewFile(t, "convert-secrets", fs.WithContent(secretText))
defer secretFile.Remove()
source := map[string]composetypes.SecretConfig{
"one": {
File: secretFile.Path(),
Labels: map[string]string{"monster": "mash"},
},
"ext": {
External: composetypes.External{
External: true,
},
},
}
specs, err := Secrets(namespace, source)
assert.NilError(t, err)
assert.Assert(t, is.Len(specs, 1))
secret := specs[0]
assert.Check(t, is.Equal("foo_one", secret.Name))
assert.Check(t, is.DeepEqual(map[string]string{
"monster": "mash",
LabelNamespace: "foo",
}, secret.Labels))
assert.Check(t, is.DeepEqual([]byte(secretText), secret.Data))
}
func TestConfigs(t *testing.T) {
namespace := Namespace{name: "foo"}
configText := "this is the first config"
configFile := fs.NewFile(t, "convert-configs", fs.WithContent(configText))
defer configFile.Remove()
source := map[string]composetypes.ConfigObjConfig{
"one": {
File: configFile.Path(),
Labels: map[string]string{"monster": "mash"},
},
"ext": {
External: composetypes.External{
External: true,
},
},
}
specs, err := Configs(namespace, source)
assert.NilError(t, err)
assert.Assert(t, is.Len(specs, 1))
config := specs[0]
assert.Check(t, is.Equal("foo_one", config.Name))
assert.Check(t, is.DeepEqual(map[string]string{
"monster": "mash",
LabelNamespace: "foo",
}, config.Labels))
assert.Check(t, is.DeepEqual([]byte(configText), config.Data))
}

View File

@ -0,0 +1,861 @@
package convert
import (
"context"
"fmt"
"os"
"sort"
"strings"
"time"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/client"
"github.com/docker/go-units"
"github.com/pkg/errors"
)
const (
defaultNetwork = "default"
// LabelImage is the label used to store image name provided in the compose file
LabelImage = "com.docker.stack.image"
)
// ParseSecrets retrieves the secrets with the requested names and fills
// secret IDs into the secret references.
func ParseSecrets(client client.SecretAPIClient, requestedSecrets []*swarmtypes.SecretReference) ([]*swarmtypes.SecretReference, error) {
if len(requestedSecrets) == 0 {
return []*swarmtypes.SecretReference{}, nil
}
secretRefs := make(map[string]*swarmtypes.SecretReference)
ctx := context.Background()
for _, secret := range requestedSecrets {
if _, exists := secretRefs[secret.File.Name]; exists {
return nil, errors.Errorf("duplicate secret target for %s not allowed", secret.SecretName)
}
secretRef := new(swarmtypes.SecretReference)
*secretRef = *secret
secretRefs[secret.File.Name] = secretRef
}
args := filters.NewArgs()
for _, s := range secretRefs {
args.Add("name", s.SecretName)
}
secrets, err := client.SecretList(ctx, types.SecretListOptions{
Filters: args,
})
if err != nil {
return nil, err
}
foundSecrets := make(map[string]string)
for _, secret := range secrets {
foundSecrets[secret.Spec.Annotations.Name] = secret.ID
}
addedSecrets := []*swarmtypes.SecretReference{}
for _, ref := range secretRefs {
id, ok := foundSecrets[ref.SecretName]
if !ok {
return nil, errors.Errorf("secret not found: %s", ref.SecretName)
}
// set the id for the ref to properly assign in swarm
// since swarm needs the ID instead of the name
ref.SecretID = id
addedSecrets = append(addedSecrets, ref)
}
return addedSecrets, nil
}
// ParseConfigs retrieves the configs from the requested names and converts
// them to config references to use with the spec
func ParseConfigs(client client.ConfigAPIClient, requestedConfigs []*swarmtypes.ConfigReference) ([]*swarmtypes.ConfigReference, error) {
if len(requestedConfigs) == 0 {
return []*swarmtypes.ConfigReference{}, nil
}
// the configRefs map has two purposes: it prevents duplication of config
// target filenames, and it it used to get all configs so we can resolve
// their IDs. unfortunately, there are other targets for ConfigReferences,
// besides just a File; specifically, the Runtime target, which is used for
// CredentialSpecs. Therefore, we need to have a list of ConfigReferences
// that are not File targets as well. at this time of writing, the only use
// for Runtime targets is CredentialSpecs. However, to future-proof this
// functionality, we should handle the case where multiple Runtime targets
// are in use for the same Config, and we should deduplicate
// such ConfigReferences, as no matter how many times the Config is used,
// it is only needed to be referenced once.
configRefs := make(map[string]*swarmtypes.ConfigReference)
runtimeRefs := make(map[string]*swarmtypes.ConfigReference)
ctx := context.Background()
for _, config := range requestedConfigs {
// copy the config, so we don't mutate the args
configRef := new(swarmtypes.ConfigReference)
*configRef = *config
if config.Runtime != nil {
// by assigning to a map based on ConfigName, if the same Config
// is required as a Runtime target for multiple purposes, we only
// include it once in the final set of configs.
runtimeRefs[config.ConfigName] = config
// continue, so we skip the logic below for handling file-type
// configs
continue
}
if _, exists := configRefs[config.File.Name]; exists {
return nil, errors.Errorf("duplicate config target for %s not allowed", config.ConfigName)
}
configRefs[config.File.Name] = configRef
}
args := filters.NewArgs()
for _, s := range configRefs {
args.Add("name", s.ConfigName)
}
for _, s := range runtimeRefs {
args.Add("name", s.ConfigName)
}
configs, err := client.ConfigList(ctx, types.ConfigListOptions{
Filters: args,
})
if err != nil {
return nil, err
}
foundConfigs := make(map[string]string)
for _, config := range configs {
foundConfigs[config.Spec.Annotations.Name] = config.ID
}
addedConfigs := []*swarmtypes.ConfigReference{}
for _, ref := range configRefs {
id, ok := foundConfigs[ref.ConfigName]
if !ok {
return nil, errors.Errorf("config not found: %s", ref.ConfigName)
}
// set the id for the ref to properly assign in swarm
// since swarm needs the ID instead of the name
ref.ConfigID = id
addedConfigs = append(addedConfigs, ref)
}
// unfortunately, because the key of configRefs and runtimeRefs is different
// values that may collide, we can't just do some fancy trickery to
// concat maps, we need to do two separate loops
for _, ref := range runtimeRefs {
id, ok := foundConfigs[ref.ConfigName]
if !ok {
return nil, errors.Errorf("config not found: %s", ref.ConfigName)
}
ref.ConfigID = id
addedConfigs = append(addedConfigs, ref)
}
return addedConfigs, nil
}
// Services from compose-file types to engine API types
func Services(
namespace Namespace,
config *composetypes.Config,
client client.CommonAPIClient,
) (map[string]swarm.ServiceSpec, error) {
result := make(map[string]swarm.ServiceSpec)
services := config.Services
volumes := config.Volumes
networks := config.Networks
for _, service := range services {
secrets, err := convertServiceSecrets(client, namespace, service.Secrets, config.Secrets)
if err != nil {
return nil, errors.Wrapf(err, "service %s", service.Name)
}
configs, err := convertServiceConfigObjs(client, namespace, service, config.Configs)
if err != nil {
return nil, errors.Wrapf(err, "service %s", service.Name)
}
serviceSpec, err := Service(client.ClientVersion(), namespace, service, networks, volumes, secrets, configs)
if err != nil {
return nil, errors.Wrapf(err, "service %s", service.Name)
}
result[service.Name] = serviceSpec
}
return result, nil
}
// Service converts a ServiceConfig into a swarm ServiceSpec
func Service(
apiVersion string,
namespace Namespace,
service composetypes.ServiceConfig,
networkConfigs map[string]composetypes.NetworkConfig,
volumes map[string]composetypes.VolumeConfig,
secrets []*swarm.SecretReference,
configs []*swarm.ConfigReference,
) (swarm.ServiceSpec, error) {
name := namespace.Scope(service.Name)
endpoint := convertEndpointSpec(service.Deploy.EndpointMode, service.Ports)
mode, err := convertDeployMode(service.Deploy.Mode, service.Deploy.Replicas)
if err != nil {
return swarm.ServiceSpec{}, err
}
mounts, err := Volumes(service.Volumes, volumes, namespace)
if err != nil {
return swarm.ServiceSpec{}, err
}
resources, err := convertResources(service.Deploy.Resources)
if err != nil {
return swarm.ServiceSpec{}, err
}
restartPolicy, err := convertRestartPolicy(
service.Restart, service.Deploy.RestartPolicy)
if err != nil {
return swarm.ServiceSpec{}, err
}
healthcheck, err := convertHealthcheck(service.HealthCheck)
if err != nil {
return swarm.ServiceSpec{}, err
}
networks, err := convertServiceNetworks(service.Networks, networkConfigs, namespace, service.Name)
if err != nil {
return swarm.ServiceSpec{}, err
}
dnsConfig := convertDNSConfig(service.DNS, service.DNSSearch)
var privileges swarm.Privileges
privileges.CredentialSpec, err = convertCredentialSpec(
namespace, service.CredentialSpec, configs,
)
if err != nil {
return swarm.ServiceSpec{}, err
}
var logDriver *swarm.Driver
if service.Logging != nil {
logDriver = &swarm.Driver{
Name: service.Logging.Driver,
Options: service.Logging.Options,
}
}
capAdd, capDrop := opts.EffectiveCapAddCapDrop(service.CapAdd, service.CapDrop)
serviceSpec := swarm.ServiceSpec{
Annotations: swarm.Annotations{
Name: name,
Labels: AddStackLabel(namespace, service.Deploy.Labels),
},
TaskTemplate: swarm.TaskSpec{
ContainerSpec: &swarm.ContainerSpec{
Image: service.Image,
Command: service.Entrypoint,
Args: service.Command,
Hostname: service.Hostname,
Hosts: convertExtraHosts(service.ExtraHosts),
DNSConfig: dnsConfig,
Healthcheck: healthcheck,
Env: sortStrings(convertEnvironment(service.Environment)),
Labels: AddStackLabel(namespace, service.Labels),
Dir: service.WorkingDir,
User: service.User,
Mounts: mounts,
StopGracePeriod: composetypes.ConvertDurationPtr(service.StopGracePeriod),
StopSignal: service.StopSignal,
TTY: service.Tty,
OpenStdin: service.StdinOpen,
Secrets: secrets,
Configs: configs,
ReadOnly: service.ReadOnly,
Privileges: &privileges,
Isolation: container.Isolation(service.Isolation),
Init: service.Init,
Sysctls: service.Sysctls,
CapabilityAdd: capAdd,
CapabilityDrop: capDrop,
Ulimits: convertUlimits(service.Ulimits),
},
LogDriver: logDriver,
Resources: resources,
RestartPolicy: restartPolicy,
Placement: &swarm.Placement{
Constraints: service.Deploy.Placement.Constraints,
Preferences: getPlacementPreference(service.Deploy.Placement.Preferences),
MaxReplicas: service.Deploy.Placement.MaxReplicas,
},
},
EndpointSpec: endpoint,
Mode: mode,
UpdateConfig: convertUpdateConfig(service.Deploy.UpdateConfig),
RollbackConfig: convertUpdateConfig(service.Deploy.RollbackConfig),
}
// add an image label to serviceSpec
serviceSpec.Labels[LabelImage] = service.Image
// ServiceSpec.Networks is deprecated and should not have been used by
// this package. It is possible to update TaskTemplate.Networks, but it
// is not possible to update ServiceSpec.Networks. Unfortunately, we
// can't unconditionally start using TaskTemplate.Networks, because that
// will break with older daemons that don't support migrating from
// ServiceSpec.Networks to TaskTemplate.Networks. So which field to use
// is conditional on daemon version.
if versions.LessThan(apiVersion, "1.29") {
serviceSpec.Networks = networks
} else {
serviceSpec.TaskTemplate.Networks = networks
}
return serviceSpec, nil
}
func getPlacementPreference(preferences []composetypes.PlacementPreferences) []swarm.PlacementPreference {
result := []swarm.PlacementPreference{}
for _, preference := range preferences {
spreadDescriptor := preference.Spread
result = append(result, swarm.PlacementPreference{
Spread: &swarm.SpreadOver{
SpreadDescriptor: spreadDescriptor,
},
})
}
return result
}
func sortStrings(strs []string) []string {
sort.Strings(strs)
return strs
}
func convertServiceNetworks(
networks map[string]*composetypes.ServiceNetworkConfig,
networkConfigs networkMap,
namespace Namespace,
name string,
) ([]swarm.NetworkAttachmentConfig, error) {
if len(networks) == 0 {
networks = map[string]*composetypes.ServiceNetworkConfig{
defaultNetwork: {},
}
}
nets := []swarm.NetworkAttachmentConfig{}
for networkName, network := range networks {
networkConfig, ok := networkConfigs[networkName]
if !ok && networkName != defaultNetwork {
return nil, errors.Errorf("undefined network %q", networkName)
}
var aliases []string
if network != nil {
aliases = network.Aliases
}
target := namespace.Scope(networkName)
if networkConfig.Name != "" {
target = networkConfig.Name
}
netAttachConfig := swarm.NetworkAttachmentConfig{
Target: target,
Aliases: aliases,
}
// Only add default aliases to user defined networks. Other networks do
// not support aliases.
if container.NetworkMode(target).IsUserDefined() {
netAttachConfig.Aliases = append(netAttachConfig.Aliases, name)
}
nets = append(nets, netAttachConfig)
}
sort.Slice(nets, func(i, j int) bool {
return nets[i].Target < nets[j].Target
})
return nets, nil
}
// TODO: fix secrets API so that SecretAPIClient is not required here
func convertServiceSecrets(
client client.SecretAPIClient,
namespace Namespace,
secrets []composetypes.ServiceSecretConfig,
secretSpecs map[string]composetypes.SecretConfig,
) ([]*swarm.SecretReference, error) {
refs := []*swarm.SecretReference{}
lookup := func(key string) (composetypes.FileObjectConfig, error) {
secretSpec, exists := secretSpecs[key]
if !exists {
return composetypes.FileObjectConfig{}, errors.Errorf("undefined secret %q", key)
}
return composetypes.FileObjectConfig(secretSpec), nil
}
for _, secret := range secrets {
obj, err := convertFileObject(namespace, composetypes.FileReferenceConfig(secret), lookup)
if err != nil {
return nil, err
}
file := swarm.SecretReferenceFileTarget(obj.File)
refs = append(refs, &swarm.SecretReference{
File: &file,
SecretName: obj.Name,
})
}
secrs, err := ParseSecrets(client, refs)
if err != nil {
return nil, err
}
// sort to ensure idempotence (don't restart services just because the entries are in different order)
sort.SliceStable(secrs, func(i, j int) bool { return secrs[i].SecretName < secrs[j].SecretName })
return secrs, err
}
// convertServiceConfigObjs takes an API client, a namespace, a ServiceConfig,
// and a set of compose Config specs, and creates the swarm ConfigReferences
// required by the serivce. Unlike convertServiceSecrets, this takes the whole
// ServiceConfig, because some Configs may be needed as a result of other
// fields (like CredentialSpecs).
//
// TODO: fix configs API so that ConfigsAPIClient is not required here
func convertServiceConfigObjs(
client client.ConfigAPIClient,
namespace Namespace,
service composetypes.ServiceConfig,
configSpecs map[string]composetypes.ConfigObjConfig,
) ([]*swarm.ConfigReference, error) {
refs := []*swarm.ConfigReference{}
lookup := func(key string) (composetypes.FileObjectConfig, error) {
configSpec, exists := configSpecs[key]
if !exists {
return composetypes.FileObjectConfig{}, errors.Errorf("undefined config %q", key)
}
return composetypes.FileObjectConfig(configSpec), nil
}
for _, config := range service.Configs {
obj, err := convertFileObject(namespace, composetypes.FileReferenceConfig(config), lookup)
if err != nil {
return nil, err
}
file := swarm.ConfigReferenceFileTarget(obj.File)
refs = append(refs, &swarm.ConfigReference{
File: &file,
ConfigName: obj.Name,
})
}
// finally, after converting all of the file objects, create any
// Runtime-type configs that are needed. these are configs that are not
// mounted into the container, but are used in some other way by the
// container runtime. Currently, this only means CredentialSpecs, but in
// the future it may be used for other fields
// grab the CredentialSpec out of the Service
credSpec := service.CredentialSpec
// if the credSpec uses a config, then we should grab the config name, and
// create a config reference for it. A File or Registry-type CredentialSpec
// does not need this operation.
if credSpec.Config != "" {
// look up the config in the configSpecs.
obj, err := lookup(credSpec.Config)
if err != nil {
return nil, err
}
// get the actual correct name.
name := namespace.Scope(credSpec.Config)
if obj.Name != "" {
name = obj.Name
}
// now append a Runtime-type config.
refs = append(refs, &swarm.ConfigReference{
ConfigName: name,
Runtime: &swarm.ConfigReferenceRuntimeTarget{},
})
}
confs, err := ParseConfigs(client, refs)
if err != nil {
return nil, err
}
// sort to ensure idempotence (don't restart services just because the entries are in different order)
sort.SliceStable(confs, func(i, j int) bool { return confs[i].ConfigName < confs[j].ConfigName })
return confs, err
}
type swarmReferenceTarget struct {
Name string
UID string
GID string
Mode os.FileMode
}
type swarmReferenceObject struct {
File swarmReferenceTarget
ID string
Name string
}
func convertFileObject(
namespace Namespace,
config composetypes.FileReferenceConfig,
lookup func(key string) (composetypes.FileObjectConfig, error),
) (swarmReferenceObject, error) {
obj, err := lookup(config.Source)
if err != nil {
return swarmReferenceObject{}, err
}
source := namespace.Scope(config.Source)
if obj.Name != "" {
source = obj.Name
}
target := config.Target
if target == "" {
target = config.Source
}
uid := config.UID
gid := config.GID
if uid == "" {
uid = "0"
}
if gid == "" {
gid = "0"
}
mode := config.Mode
if mode == nil {
mode = uint32Ptr(0444)
}
return swarmReferenceObject{
File: swarmReferenceTarget{
Name: target,
UID: uid,
GID: gid,
Mode: os.FileMode(*mode),
},
Name: source,
}, nil
}
func uint32Ptr(value uint32) *uint32 {
return &value
}
// convertExtraHosts converts <host>:<ip> mappings to SwarmKit notation:
// "IP-address hostname(s)". The original order of mappings is preserved.
func convertExtraHosts(extraHosts composetypes.HostsList) []string {
hosts := []string{}
for _, hostIP := range extraHosts {
if v := strings.SplitN(hostIP, ":", 2); len(v) == 2 {
// Convert to SwarmKit notation: IP-address hostname(s)
hosts = append(hosts, fmt.Sprintf("%s %s", v[1], v[0]))
}
}
return hosts
}
func convertHealthcheck(healthcheck *composetypes.HealthCheckConfig) (*container.HealthConfig, error) {
if healthcheck == nil {
return nil, nil
}
var (
timeout, interval, startPeriod time.Duration
retries int
)
if healthcheck.Disable {
if len(healthcheck.Test) != 0 {
return nil, errors.Errorf("test and disable can't be set at the same time")
}
return &container.HealthConfig{
Test: []string{"NONE"},
}, nil
}
if healthcheck.Timeout != nil {
timeout = time.Duration(*healthcheck.Timeout)
}
if healthcheck.Interval != nil {
interval = time.Duration(*healthcheck.Interval)
}
if healthcheck.StartPeriod != nil {
startPeriod = time.Duration(*healthcheck.StartPeriod)
}
if healthcheck.Retries != nil {
retries = int(*healthcheck.Retries)
}
return &container.HealthConfig{
Test: healthcheck.Test,
Timeout: timeout,
Interval: interval,
Retries: retries,
StartPeriod: startPeriod,
}, nil
}
func convertRestartPolicy(restart string, source *composetypes.RestartPolicy) (*swarm.RestartPolicy, error) {
// TODO: log if restart is being ignored
if source == nil {
policy, err := opts.ParseRestartPolicy(restart)
if err != nil {
return nil, err
}
switch {
case policy.IsNone():
return nil, nil
case policy.IsAlways(), policy.IsUnlessStopped():
return &swarm.RestartPolicy{
Condition: swarm.RestartPolicyConditionAny,
}, nil
case policy.IsOnFailure():
attempts := uint64(policy.MaximumRetryCount)
return &swarm.RestartPolicy{
Condition: swarm.RestartPolicyConditionOnFailure,
MaxAttempts: &attempts,
}, nil
default:
return nil, errors.Errorf("unknown restart policy: %s", restart)
}
}
return &swarm.RestartPolicy{
Condition: swarm.RestartPolicyCondition(source.Condition),
Delay: composetypes.ConvertDurationPtr(source.Delay),
MaxAttempts: source.MaxAttempts,
Window: composetypes.ConvertDurationPtr(source.Window),
}, nil
}
func convertUpdateConfig(source *composetypes.UpdateConfig) *swarm.UpdateConfig {
if source == nil {
return nil
}
parallel := uint64(1)
if source.Parallelism != nil {
parallel = *source.Parallelism
}
return &swarm.UpdateConfig{
Parallelism: parallel,
Delay: time.Duration(source.Delay),
FailureAction: source.FailureAction,
Monitor: time.Duration(source.Monitor),
MaxFailureRatio: source.MaxFailureRatio,
Order: source.Order,
}
}
func convertResources(source composetypes.Resources) (*swarm.ResourceRequirements, error) {
resources := &swarm.ResourceRequirements{}
var err error
if source.Limits != nil {
var cpus int64
if source.Limits.NanoCPUs != "" {
cpus, err = opts.ParseCPUs(source.Limits.NanoCPUs)
if err != nil {
return nil, err
}
}
resources.Limits = &swarm.Limit{
NanoCPUs: cpus,
MemoryBytes: int64(source.Limits.MemoryBytes),
Pids: source.Limits.Pids,
}
}
if source.Reservations != nil {
var cpus int64
if source.Reservations.NanoCPUs != "" {
cpus, err = opts.ParseCPUs(source.Reservations.NanoCPUs)
if err != nil {
return nil, err
}
}
var generic []swarm.GenericResource
for _, res := range source.Reservations.GenericResources {
var r swarm.GenericResource
if res.DiscreteResourceSpec != nil {
r.DiscreteResourceSpec = &swarm.DiscreteGenericResource{
Kind: res.DiscreteResourceSpec.Kind,
Value: res.DiscreteResourceSpec.Value,
}
}
generic = append(generic, r)
}
resources.Reservations = &swarm.Resources{
NanoCPUs: cpus,
MemoryBytes: int64(source.Reservations.MemoryBytes),
GenericResources: generic,
}
}
return resources, nil
}
func convertEndpointSpec(endpointMode string, source []composetypes.ServicePortConfig) *swarm.EndpointSpec {
portConfigs := []swarm.PortConfig{}
for _, port := range source {
portConfig := swarm.PortConfig{
Protocol: swarm.PortConfigProtocol(port.Protocol),
TargetPort: port.Target,
PublishedPort: port.Published,
PublishMode: swarm.PortConfigPublishMode(port.Mode),
}
portConfigs = append(portConfigs, portConfig)
}
sort.Slice(portConfigs, func(i, j int) bool {
return portConfigs[i].PublishedPort < portConfigs[j].PublishedPort
})
return &swarm.EndpointSpec{
Mode: swarm.ResolutionMode(strings.ToLower(endpointMode)),
Ports: portConfigs,
}
}
func convertEnvironment(source map[string]*string) []string {
var output []string
for name, value := range source {
switch value {
case nil:
output = append(output, name)
default:
output = append(output, fmt.Sprintf("%s=%s", name, *value))
}
}
return output
}
func convertDeployMode(mode string, replicas *uint64) (swarm.ServiceMode, error) {
serviceMode := swarm.ServiceMode{}
switch mode {
case "global":
if replicas != nil {
return serviceMode, errors.Errorf("replicas can only be used with replicated mode")
}
serviceMode.Global = &swarm.GlobalService{}
case "replicated", "":
serviceMode.Replicated = &swarm.ReplicatedService{Replicas: replicas}
default:
return serviceMode, errors.Errorf("Unknown mode: %s", mode)
}
return serviceMode, nil
}
func convertDNSConfig(DNS []string, DNSSearch []string) *swarm.DNSConfig {
if DNS != nil || DNSSearch != nil {
return &swarm.DNSConfig{
Nameservers: DNS,
Search: DNSSearch,
}
}
return nil
}
func convertCredentialSpec(namespace Namespace, spec composetypes.CredentialSpecConfig, refs []*swarm.ConfigReference) (*swarm.CredentialSpec, error) {
var o []string
// Config was added in API v1.40
if spec.Config != "" {
o = append(o, `"Config"`)
}
if spec.File != "" {
o = append(o, `"File"`)
}
if spec.Registry != "" {
o = append(o, `"Registry"`)
}
l := len(o)
switch {
case l == 0:
return nil, nil
case l == 2:
return nil, errors.Errorf("invalid credential spec: cannot specify both %s and %s", o[0], o[1])
case l > 2:
return nil, errors.Errorf("invalid credential spec: cannot specify both %s, and %s", strings.Join(o[:l-1], ", "), o[l-1])
}
swarmCredSpec := swarm.CredentialSpec(spec)
// if we're using a swarm Config for the credential spec, over-write it
// here with the config ID
if swarmCredSpec.Config != "" {
for _, config := range refs {
if swarmCredSpec.Config == config.ConfigName {
swarmCredSpec.Config = config.ConfigID
return &swarmCredSpec, nil
}
}
// if none of the configs match, try namespacing
for _, config := range refs {
if namespace.Scope(swarmCredSpec.Config) == config.ConfigName {
swarmCredSpec.Config = config.ConfigID
return &swarmCredSpec, nil
}
}
return nil, errors.Errorf("invalid credential spec: spec specifies config %v, but no such config can be found", swarmCredSpec.Config)
}
return &swarmCredSpec, nil
}
func convertUlimits(origUlimits map[string]*composetypes.UlimitsConfig) []*units.Ulimit {
newUlimits := make(map[string]*units.Ulimit)
for name, u := range origUlimits {
if u.Single != 0 {
newUlimits[name] = &units.Ulimit{
Name: name,
Soft: int64(u.Single),
Hard: int64(u.Single),
}
} else {
newUlimits[name] = &units.Ulimit{
Name: name,
Soft: int64(u.Soft),
Hard: int64(u.Hard),
}
}
}
var ulimits []*units.Ulimit
for _, ulimit := range newUlimits {
ulimits = append(ulimits, ulimit)
}
sort.SliceStable(ulimits, func(i, j int) bool {
return ulimits[i].Name < ulimits[j].Name
})
return ulimits
}

View File

@ -0,0 +1,678 @@
package convert
import (
"context"
"os"
"sort"
"strings"
"testing"
"time"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"github.com/pkg/errors"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
func TestConvertRestartPolicyFromNone(t *testing.T) {
policy, err := convertRestartPolicy("no", nil)
assert.NilError(t, err)
assert.Check(t, is.DeepEqual((*swarm.RestartPolicy)(nil), policy))
}
func TestConvertRestartPolicyFromUnknown(t *testing.T) {
_, err := convertRestartPolicy("unknown", nil)
assert.Error(t, err, "unknown restart policy: unknown")
}
func TestConvertRestartPolicyFromAlways(t *testing.T) {
policy, err := convertRestartPolicy("always", nil)
expected := &swarm.RestartPolicy{
Condition: swarm.RestartPolicyConditionAny,
}
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(expected, policy))
}
func TestConvertRestartPolicyFromFailure(t *testing.T) {
policy, err := convertRestartPolicy("on-failure:4", nil)
attempts := uint64(4)
expected := &swarm.RestartPolicy{
Condition: swarm.RestartPolicyConditionOnFailure,
MaxAttempts: &attempts,
}
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(expected, policy))
}
func strPtr(val string) *string {
return &val
}
func TestConvertEnvironment(t *testing.T) {
source := map[string]*string{
"foo": strPtr("bar"),
"key": strPtr("value"),
}
env := convertEnvironment(source)
sort.Strings(env)
assert.Check(t, is.DeepEqual([]string{"foo=bar", "key=value"}, env))
}
func TestConvertExtraHosts(t *testing.T) {
source := composetypes.HostsList{
"zulu:127.0.0.2",
"alpha:127.0.0.1",
"zulu:ff02::1",
}
assert.Check(t, is.DeepEqual([]string{"127.0.0.2 zulu", "127.0.0.1 alpha", "ff02::1 zulu"}, convertExtraHosts(source)))
}
func TestConvertResourcesFull(t *testing.T) {
source := composetypes.Resources{
Limits: &composetypes.ResourceLimit{
NanoCPUs: "0.003",
MemoryBytes: composetypes.UnitBytes(300000000),
},
Reservations: &composetypes.Resource{
NanoCPUs: "0.002",
MemoryBytes: composetypes.UnitBytes(200000000),
},
}
resources, err := convertResources(source)
assert.NilError(t, err)
expected := &swarm.ResourceRequirements{
Limits: &swarm.Limit{
NanoCPUs: 3000000,
MemoryBytes: 300000000,
},
Reservations: &swarm.Resources{
NanoCPUs: 2000000,
MemoryBytes: 200000000,
},
}
assert.Check(t, is.DeepEqual(expected, resources))
}
func TestConvertResourcesOnlyMemory(t *testing.T) {
source := composetypes.Resources{
Limits: &composetypes.ResourceLimit{
MemoryBytes: composetypes.UnitBytes(300000000),
},
Reservations: &composetypes.Resource{
MemoryBytes: composetypes.UnitBytes(200000000),
},
}
resources, err := convertResources(source)
assert.NilError(t, err)
expected := &swarm.ResourceRequirements{
Limits: &swarm.Limit{
MemoryBytes: 300000000,
},
Reservations: &swarm.Resources{
MemoryBytes: 200000000,
},
}
assert.Check(t, is.DeepEqual(expected, resources))
}
func TestConvertHealthcheck(t *testing.T) {
retries := uint64(10)
timeout := composetypes.Duration(30 * time.Second)
interval := composetypes.Duration(2 * time.Millisecond)
source := &composetypes.HealthCheckConfig{
Test: []string{"EXEC", "touch", "/foo"},
Timeout: &timeout,
Interval: &interval,
Retries: &retries,
}
expected := &container.HealthConfig{
Test: source.Test,
Timeout: time.Duration(timeout),
Interval: time.Duration(interval),
Retries: 10,
}
healthcheck, err := convertHealthcheck(source)
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(expected, healthcheck))
}
func TestConvertHealthcheckDisable(t *testing.T) {
source := &composetypes.HealthCheckConfig{Disable: true}
expected := &container.HealthConfig{
Test: []string{"NONE"},
}
healthcheck, err := convertHealthcheck(source)
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(expected, healthcheck))
}
func TestConvertHealthcheckDisableWithTest(t *testing.T) {
source := &composetypes.HealthCheckConfig{
Disable: true,
Test: []string{"EXEC", "touch"},
}
_, err := convertHealthcheck(source)
assert.Error(t, err, "test and disable can't be set at the same time")
}
func TestConvertEndpointSpec(t *testing.T) {
source := []composetypes.ServicePortConfig{
{
Protocol: "udp",
Target: 53,
Published: 1053,
Mode: "host",
},
{
Target: 8080,
Published: 80,
},
}
endpoint := convertEndpointSpec("vip", source)
expected := swarm.EndpointSpec{
Mode: swarm.ResolutionMode(strings.ToLower("vip")),
Ports: []swarm.PortConfig{
{
TargetPort: 8080,
PublishedPort: 80,
},
{
Protocol: "udp",
TargetPort: 53,
PublishedPort: 1053,
PublishMode: "host",
},
},
}
assert.Check(t, is.DeepEqual(expected, *endpoint))
}
func TestConvertServiceNetworksOnlyDefault(t *testing.T) {
networkConfigs := networkMap{}
configs, err := convertServiceNetworks(
nil, networkConfigs, NewNamespace("foo"), "service")
expected := []swarm.NetworkAttachmentConfig{
{
Target: "foo_default",
Aliases: []string{"service"},
},
}
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(expected, configs))
}
func TestConvertServiceNetworks(t *testing.T) {
networkConfigs := networkMap{
"front": composetypes.NetworkConfig{
External: composetypes.External{External: true},
Name: "fronttier",
},
"back": composetypes.NetworkConfig{},
}
networks := map[string]*composetypes.ServiceNetworkConfig{
"front": {
Aliases: []string{"something"},
},
"back": {
Aliases: []string{"other"},
},
}
configs, err := convertServiceNetworks(
networks, networkConfigs, NewNamespace("foo"), "service")
expected := []swarm.NetworkAttachmentConfig{
{
Target: "foo_back",
Aliases: []string{"other", "service"},
},
{
Target: "fronttier",
Aliases: []string{"something", "service"},
},
}
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(expected, configs))
}
func TestConvertServiceNetworksCustomDefault(t *testing.T) {
networkConfigs := networkMap{
"default": composetypes.NetworkConfig{
External: composetypes.External{External: true},
Name: "custom",
},
}
networks := map[string]*composetypes.ServiceNetworkConfig{}
configs, err := convertServiceNetworks(
networks, networkConfigs, NewNamespace("foo"), "service")
expected := []swarm.NetworkAttachmentConfig{
{
Target: "custom",
Aliases: []string{"service"},
},
}
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(expected, configs))
}
func TestConvertDNSConfigEmpty(t *testing.T) {
dnsConfig := convertDNSConfig(nil, nil)
assert.Check(t, is.DeepEqual((*swarm.DNSConfig)(nil), dnsConfig))
}
var (
nameservers = []string{"8.8.8.8", "9.9.9.9"}
search = []string{"dc1.example.com", "dc2.example.com"}
)
func TestConvertDNSConfigAll(t *testing.T) {
dnsConfig := convertDNSConfig(nameservers, search)
assert.Check(t, is.DeepEqual(&swarm.DNSConfig{
Nameservers: nameservers,
Search: search,
}, dnsConfig))
}
func TestConvertDNSConfigNameservers(t *testing.T) {
dnsConfig := convertDNSConfig(nameservers, nil)
assert.Check(t, is.DeepEqual(&swarm.DNSConfig{
Nameservers: nameservers,
Search: nil,
}, dnsConfig))
}
func TestConvertDNSConfigSearch(t *testing.T) {
dnsConfig := convertDNSConfig(nil, search)
assert.Check(t, is.DeepEqual(&swarm.DNSConfig{
Nameservers: nil,
Search: search,
}, dnsConfig))
}
func TestConvertCredentialSpec(t *testing.T) {
tests := []struct {
name string
in composetypes.CredentialSpecConfig
out *swarm.CredentialSpec
configs []*swarm.ConfigReference
expectedErr string
}{
{
name: "empty",
},
{
name: "config-and-file",
in: composetypes.CredentialSpecConfig{Config: "0bt9dmxjvjiqermk6xrop3ekq", File: "somefile.json"},
expectedErr: `invalid credential spec: cannot specify both "Config" and "File"`,
},
{
name: "config-and-registry",
in: composetypes.CredentialSpecConfig{Config: "0bt9dmxjvjiqermk6xrop3ekq", Registry: "testing"},
expectedErr: `invalid credential spec: cannot specify both "Config" and "Registry"`,
},
{
name: "file-and-registry",
in: composetypes.CredentialSpecConfig{File: "somefile.json", Registry: "testing"},
expectedErr: `invalid credential spec: cannot specify both "File" and "Registry"`,
},
{
name: "config-and-file-and-registry",
in: composetypes.CredentialSpecConfig{Config: "0bt9dmxjvjiqermk6xrop3ekq", File: "somefile.json", Registry: "testing"},
expectedErr: `invalid credential spec: cannot specify both "Config", "File", and "Registry"`,
},
{
name: "missing-config-reference",
in: composetypes.CredentialSpecConfig{Config: "missing"},
expectedErr: "invalid credential spec: spec specifies config missing, but no such config can be found",
configs: []*swarm.ConfigReference{
{
ConfigName: "someName",
ConfigID: "missing",
},
},
},
{
name: "namespaced-config",
in: composetypes.CredentialSpecConfig{Config: "name"},
configs: []*swarm.ConfigReference{
{
ConfigName: "namespaced-config_name",
ConfigID: "someID",
},
},
out: &swarm.CredentialSpec{Config: "someID"},
},
{
name: "config",
in: composetypes.CredentialSpecConfig{Config: "someName"},
configs: []*swarm.ConfigReference{
{
ConfigName: "someOtherName",
ConfigID: "someOtherID",
}, {
ConfigName: "someName",
ConfigID: "someID",
},
},
out: &swarm.CredentialSpec{Config: "someID"},
},
{
name: "file",
in: composetypes.CredentialSpecConfig{File: "somefile.json"},
out: &swarm.CredentialSpec{File: "somefile.json"},
},
{
name: "registry",
in: composetypes.CredentialSpecConfig{Registry: "testing"},
out: &swarm.CredentialSpec{Registry: "testing"},
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
namespace := NewNamespace(tc.name)
swarmSpec, err := convertCredentialSpec(namespace, tc.in, tc.configs)
if tc.expectedErr != "" {
assert.Error(t, err, tc.expectedErr)
} else {
assert.NilError(t, err)
}
assert.DeepEqual(t, swarmSpec, tc.out)
})
}
}
func TestConvertUpdateConfigOrder(t *testing.T) {
// test default behavior
updateConfig := convertUpdateConfig(&composetypes.UpdateConfig{})
assert.Check(t, is.Equal("", updateConfig.Order))
// test start-first
updateConfig = convertUpdateConfig(&composetypes.UpdateConfig{
Order: "start-first",
})
assert.Check(t, is.Equal(updateConfig.Order, "start-first"))
// test stop-first
updateConfig = convertUpdateConfig(&composetypes.UpdateConfig{
Order: "stop-first",
})
assert.Check(t, is.Equal(updateConfig.Order, "stop-first"))
}
func TestConvertFileObject(t *testing.T) {
namespace := NewNamespace("testing")
config := composetypes.FileReferenceConfig{
Source: "source",
Target: "target",
UID: "user",
GID: "group",
Mode: uint32Ptr(0644),
}
swarmRef, err := convertFileObject(namespace, config, lookupConfig)
assert.NilError(t, err)
expected := swarmReferenceObject{
Name: "testing_source",
File: swarmReferenceTarget{
Name: config.Target,
UID: config.UID,
GID: config.GID,
Mode: os.FileMode(0644),
},
}
assert.Check(t, is.DeepEqual(expected, swarmRef))
}
func lookupConfig(key string) (composetypes.FileObjectConfig, error) {
if key != "source" {
return composetypes.FileObjectConfig{}, errors.New("bad key")
}
return composetypes.FileObjectConfig{}, nil
}
func TestConvertFileObjectDefaults(t *testing.T) {
namespace := NewNamespace("testing")
config := composetypes.FileReferenceConfig{Source: "source"}
swarmRef, err := convertFileObject(namespace, config, lookupConfig)
assert.NilError(t, err)
expected := swarmReferenceObject{
Name: "testing_source",
File: swarmReferenceTarget{
Name: config.Source,
UID: "0",
GID: "0",
Mode: os.FileMode(0444),
},
}
assert.Check(t, is.DeepEqual(expected, swarmRef))
}
func TestServiceConvertsIsolation(t *testing.T) {
src := composetypes.ServiceConfig{
Isolation: "hyperv",
}
result, err := Service("1.35", Namespace{name: "foo"}, src, nil, nil, nil, nil)
assert.NilError(t, err)
assert.Check(t, is.Equal(container.IsolationHyperV, result.TaskTemplate.ContainerSpec.Isolation))
}
func TestConvertServiceSecrets(t *testing.T) {
namespace := Namespace{name: "foo"}
secrets := []composetypes.ServiceSecretConfig{
{Source: "foo_secret"},
{Source: "bar_secret"},
}
secretSpecs := map[string]composetypes.SecretConfig{
"foo_secret": {
Name: "foo_secret",
},
"bar_secret": {
Name: "bar_secret",
},
}
client := &fakeClient{
secretListFunc: func(opts types.SecretListOptions) ([]swarm.Secret, error) {
assert.Check(t, is.Contains(opts.Filters.Get("name"), "foo_secret"))
assert.Check(t, is.Contains(opts.Filters.Get("name"), "bar_secret"))
return []swarm.Secret{
{Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "foo_secret"}}},
{Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "bar_secret"}}},
}, nil
},
}
refs, err := convertServiceSecrets(client, namespace, secrets, secretSpecs)
assert.NilError(t, err)
expected := []*swarm.SecretReference{
{
SecretName: "bar_secret",
File: &swarm.SecretReferenceFileTarget{
Name: "bar_secret",
UID: "0",
GID: "0",
Mode: 0444,
},
},
{
SecretName: "foo_secret",
File: &swarm.SecretReferenceFileTarget{
Name: "foo_secret",
UID: "0",
GID: "0",
Mode: 0444,
},
},
}
assert.DeepEqual(t, expected, refs)
}
func TestConvertServiceConfigs(t *testing.T) {
namespace := Namespace{name: "foo"}
service := composetypes.ServiceConfig{
Configs: []composetypes.ServiceConfigObjConfig{
{Source: "foo_config"},
{Source: "bar_config"},
},
CredentialSpec: composetypes.CredentialSpecConfig{
Config: "baz_config",
},
}
configSpecs := map[string]composetypes.ConfigObjConfig{
"foo_config": {
Name: "foo_config",
},
"bar_config": {
Name: "bar_config",
},
"baz_config": {
Name: "baz_config",
},
}
client := &fakeClient{
configListFunc: func(opts types.ConfigListOptions) ([]swarm.Config, error) {
assert.Check(t, is.Contains(opts.Filters.Get("name"), "foo_config"))
assert.Check(t, is.Contains(opts.Filters.Get("name"), "bar_config"))
assert.Check(t, is.Contains(opts.Filters.Get("name"), "baz_config"))
return []swarm.Config{
{Spec: swarm.ConfigSpec{Annotations: swarm.Annotations{Name: "foo_config"}}},
{Spec: swarm.ConfigSpec{Annotations: swarm.Annotations{Name: "bar_config"}}},
{Spec: swarm.ConfigSpec{Annotations: swarm.Annotations{Name: "baz_config"}}},
}, nil
},
}
refs, err := convertServiceConfigObjs(client, namespace, service, configSpecs)
assert.NilError(t, err)
expected := []*swarm.ConfigReference{
{
ConfigName: "bar_config",
File: &swarm.ConfigReferenceFileTarget{
Name: "bar_config",
UID: "0",
GID: "0",
Mode: 0444,
},
},
{
ConfigName: "baz_config",
Runtime: &swarm.ConfigReferenceRuntimeTarget{},
},
{
ConfigName: "foo_config",
File: &swarm.ConfigReferenceFileTarget{
Name: "foo_config",
UID: "0",
GID: "0",
Mode: 0444,
},
},
}
assert.DeepEqual(t, expected, refs)
}
type fakeClient struct {
client.Client
secretListFunc func(types.SecretListOptions) ([]swarm.Secret, error)
configListFunc func(types.ConfigListOptions) ([]swarm.Config, error)
}
func (c *fakeClient) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) {
if c.secretListFunc != nil {
return c.secretListFunc(options)
}
return []swarm.Secret{}, nil
}
func (c *fakeClient) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) {
if c.configListFunc != nil {
return c.configListFunc(options)
}
return []swarm.Config{}, nil
}
func TestConvertUpdateConfigParallelism(t *testing.T) {
parallel := uint64(4)
// test default behavior
updateConfig := convertUpdateConfig(&composetypes.UpdateConfig{})
assert.Check(t, is.Equal(uint64(1), updateConfig.Parallelism))
// Non default value
updateConfig = convertUpdateConfig(&composetypes.UpdateConfig{
Parallelism: &parallel,
})
assert.Check(t, is.Equal(parallel, updateConfig.Parallelism))
}
func TestConvertServiceCapAddAndCapDrop(t *testing.T) {
tests := []struct {
title string
in, out composetypes.ServiceConfig
}{
{
title: "default behavior",
},
{
title: "some values",
in: composetypes.ServiceConfig{
CapAdd: []string{"SYS_NICE", "CAP_NET_ADMIN"},
CapDrop: []string{"CHOWN", "CAP_NET_ADMIN", "DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER"},
},
out: composetypes.ServiceConfig{
CapAdd: []string{"CAP_NET_ADMIN", "CAP_SYS_NICE"},
CapDrop: []string{"CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FOWNER", "CAP_FSETID"},
},
},
{
title: "adding ALL capabilities",
in: composetypes.ServiceConfig{
CapAdd: []string{"ALL", "CAP_NET_ADMIN"},
CapDrop: []string{"CHOWN", "CAP_NET_ADMIN", "DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER"},
},
out: composetypes.ServiceConfig{
CapAdd: []string{"ALL"},
CapDrop: []string{"CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FOWNER", "CAP_FSETID", "CAP_NET_ADMIN"},
},
},
{
title: "dropping ALL capabilities",
in: composetypes.ServiceConfig{
CapAdd: []string{"CHOWN", "CAP_NET_ADMIN", "DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER"},
CapDrop: []string{"ALL", "CAP_NET_ADMIN", "CAP_FOO"},
},
out: composetypes.ServiceConfig{
CapAdd: []string{"CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FOWNER", "CAP_FSETID", "CAP_NET_ADMIN"},
CapDrop: []string{"ALL"},
},
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.title, func(t *testing.T) {
result, err := Service("1.41", Namespace{name: "foo"}, tc.in, nil, nil, nil, nil)
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(result.TaskTemplate.ContainerSpec.CapabilityAdd, tc.out.CapAdd))
assert.Check(t, is.DeepEqual(result.TaskTemplate.ContainerSpec.CapabilityDrop, tc.out.CapDrop))
})
}
}

View File

@ -0,0 +1,162 @@
package convert
import (
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types/mount"
"github.com/pkg/errors"
)
type volumes map[string]composetypes.VolumeConfig
// Volumes from compose-file types to engine api types
func Volumes(serviceVolumes []composetypes.ServiceVolumeConfig, stackVolumes volumes, namespace Namespace) ([]mount.Mount, error) {
var mounts []mount.Mount
for _, volumeConfig := range serviceVolumes {
mount, err := convertVolumeToMount(volumeConfig, stackVolumes, namespace)
if err != nil {
return nil, err
}
mounts = append(mounts, mount)
}
return mounts, nil
}
func createMountFromVolume(volume composetypes.ServiceVolumeConfig) mount.Mount {
return mount.Mount{
Type: mount.Type(volume.Type),
Target: volume.Target,
ReadOnly: volume.ReadOnly,
Source: volume.Source,
Consistency: mount.Consistency(volume.Consistency),
}
}
func handleVolumeToMount(
volume composetypes.ServiceVolumeConfig,
stackVolumes volumes,
namespace Namespace,
) (mount.Mount, error) {
result := createMountFromVolume(volume)
if volume.Tmpfs != nil {
return mount.Mount{}, errors.New("tmpfs options are incompatible with type volume")
}
if volume.Bind != nil {
return mount.Mount{}, errors.New("bind options are incompatible with type volume")
}
// Anonymous volumes
if volume.Source == "" {
return result, nil
}
stackVolume, exists := stackVolumes[volume.Source]
if !exists {
return mount.Mount{}, errors.Errorf("undefined volume %q", volume.Source)
}
result.Source = namespace.Scope(volume.Source)
result.VolumeOptions = &mount.VolumeOptions{}
if volume.Volume != nil {
result.VolumeOptions.NoCopy = volume.Volume.NoCopy
}
if stackVolume.Name != "" {
result.Source = stackVolume.Name
}
// External named volumes
if stackVolume.External.External {
return result, nil
}
result.VolumeOptions.Labels = AddStackLabel(namespace, stackVolume.Labels)
if stackVolume.Driver != "" || stackVolume.DriverOpts != nil {
result.VolumeOptions.DriverConfig = &mount.Driver{
Name: stackVolume.Driver,
Options: stackVolume.DriverOpts,
}
}
return result, nil
}
func handleBindToMount(volume composetypes.ServiceVolumeConfig) (mount.Mount, error) {
result := createMountFromVolume(volume)
if volume.Source == "" {
return mount.Mount{}, errors.New("invalid bind source, source cannot be empty")
}
if volume.Volume != nil {
return mount.Mount{}, errors.New("volume options are incompatible with type bind")
}
if volume.Tmpfs != nil {
return mount.Mount{}, errors.New("tmpfs options are incompatible with type bind")
}
if volume.Bind != nil {
result.BindOptions = &mount.BindOptions{
Propagation: mount.Propagation(volume.Bind.Propagation),
}
}
return result, nil
}
func handleTmpfsToMount(volume composetypes.ServiceVolumeConfig) (mount.Mount, error) {
result := createMountFromVolume(volume)
if volume.Source != "" {
return mount.Mount{}, errors.New("invalid tmpfs source, source must be empty")
}
if volume.Bind != nil {
return mount.Mount{}, errors.New("bind options are incompatible with type tmpfs")
}
if volume.Volume != nil {
return mount.Mount{}, errors.New("volume options are incompatible with type tmpfs")
}
if volume.Tmpfs != nil {
result.TmpfsOptions = &mount.TmpfsOptions{
SizeBytes: volume.Tmpfs.Size,
}
}
return result, nil
}
func handleNpipeToMount(volume composetypes.ServiceVolumeConfig) (mount.Mount, error) {
result := createMountFromVolume(volume)
if volume.Source == "" {
return mount.Mount{}, errors.New("invalid npipe source, source cannot be empty")
}
if volume.Volume != nil {
return mount.Mount{}, errors.New("volume options are incompatible with type npipe")
}
if volume.Tmpfs != nil {
return mount.Mount{}, errors.New("tmpfs options are incompatible with type npipe")
}
if volume.Bind != nil {
result.BindOptions = &mount.BindOptions{
Propagation: mount.Propagation(volume.Bind.Propagation),
}
}
return result, nil
}
func convertVolumeToMount(
volume composetypes.ServiceVolumeConfig,
stackVolumes volumes,
namespace Namespace,
) (mount.Mount, error) {
switch volume.Type {
case "volume", "":
return handleVolumeToMount(volume, stackVolumes, namespace)
case "bind":
return handleBindToMount(volume)
case "tmpfs":
return handleTmpfsToMount(volume)
case "npipe":
return handleNpipeToMount(volume)
}
return mount.Mount{}, errors.New("volume type must be volume, bind, tmpfs or npipe")
}

View File

@ -0,0 +1,361 @@
package convert
import (
"testing"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types/mount"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
func TestConvertVolumeToMountAnonymousVolume(t *testing.T) {
config := composetypes.ServiceVolumeConfig{
Type: "volume",
Target: "/foo/bar",
}
expected := mount.Mount{
Type: mount.TypeVolume,
Target: "/foo/bar",
}
mount, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo"))
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(expected, mount))
}
func TestConvertVolumeToMountAnonymousBind(t *testing.T) {
config := composetypes.ServiceVolumeConfig{
Type: "bind",
Target: "/foo/bar",
Bind: &composetypes.ServiceVolumeBind{
Propagation: "slave",
},
}
_, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo"))
assert.Error(t, err, "invalid bind source, source cannot be empty")
}
func TestConvertVolumeToMountUnapprovedType(t *testing.T) {
config := composetypes.ServiceVolumeConfig{
Type: "foo",
Target: "/foo/bar",
}
_, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo"))
assert.Error(t, err, "volume type must be volume, bind, tmpfs or npipe")
}
func TestConvertVolumeToMountConflictingOptionsBindInVolume(t *testing.T) {
namespace := NewNamespace("foo")
config := composetypes.ServiceVolumeConfig{
Type: "volume",
Source: "foo",
Target: "/target",
Bind: &composetypes.ServiceVolumeBind{
Propagation: "slave",
},
}
_, err := convertVolumeToMount(config, volumes{}, namespace)
assert.Error(t, err, "bind options are incompatible with type volume")
}
func TestConvertVolumeToMountConflictingOptionsTmpfsInVolume(t *testing.T) {
namespace := NewNamespace("foo")
config := composetypes.ServiceVolumeConfig{
Type: "volume",
Source: "foo",
Target: "/target",
Tmpfs: &composetypes.ServiceVolumeTmpfs{
Size: 1000,
},
}
_, err := convertVolumeToMount(config, volumes{}, namespace)
assert.Error(t, err, "tmpfs options are incompatible with type volume")
}
func TestConvertVolumeToMountConflictingOptionsVolumeInBind(t *testing.T) {
namespace := NewNamespace("foo")
config := composetypes.ServiceVolumeConfig{
Type: "bind",
Source: "/foo",
Target: "/target",
Volume: &composetypes.ServiceVolumeVolume{
NoCopy: true,
},
}
_, err := convertVolumeToMount(config, volumes{}, namespace)
assert.Error(t, err, "volume options are incompatible with type bind")
}
func TestConvertVolumeToMountConflictingOptionsTmpfsInBind(t *testing.T) {
namespace := NewNamespace("foo")
config := composetypes.ServiceVolumeConfig{
Type: "bind",
Source: "/foo",
Target: "/target",
Tmpfs: &composetypes.ServiceVolumeTmpfs{
Size: 1000,
},
}
_, err := convertVolumeToMount(config, volumes{}, namespace)
assert.Error(t, err, "tmpfs options are incompatible with type bind")
}
func TestConvertVolumeToMountConflictingOptionsBindInTmpfs(t *testing.T) {
namespace := NewNamespace("foo")
config := composetypes.ServiceVolumeConfig{
Type: "tmpfs",
Target: "/target",
Bind: &composetypes.ServiceVolumeBind{
Propagation: "slave",
},
}
_, err := convertVolumeToMount(config, volumes{}, namespace)
assert.Error(t, err, "bind options are incompatible with type tmpfs")
}
func TestConvertVolumeToMountConflictingOptionsVolumeInTmpfs(t *testing.T) {
namespace := NewNamespace("foo")
config := composetypes.ServiceVolumeConfig{
Type: "tmpfs",
Target: "/target",
Volume: &composetypes.ServiceVolumeVolume{
NoCopy: true,
},
}
_, err := convertVolumeToMount(config, volumes{}, namespace)
assert.Error(t, err, "volume options are incompatible with type tmpfs")
}
func TestConvertVolumeToMountNamedVolume(t *testing.T) {
stackVolumes := volumes{
"normal": composetypes.VolumeConfig{
Driver: "glusterfs",
DriverOpts: map[string]string{
"opt": "value",
},
Labels: map[string]string{
"something": "labeled",
},
},
}
namespace := NewNamespace("foo")
expected := mount.Mount{
Type: mount.TypeVolume,
Source: "foo_normal",
Target: "/foo",
ReadOnly: true,
VolumeOptions: &mount.VolumeOptions{
Labels: map[string]string{
LabelNamespace: "foo",
"something": "labeled",
},
DriverConfig: &mount.Driver{
Name: "glusterfs",
Options: map[string]string{
"opt": "value",
},
},
NoCopy: true,
},
}
config := composetypes.ServiceVolumeConfig{
Type: "volume",
Source: "normal",
Target: "/foo",
ReadOnly: true,
Volume: &composetypes.ServiceVolumeVolume{
NoCopy: true,
},
}
mount, err := convertVolumeToMount(config, stackVolumes, namespace)
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(expected, mount))
}
func TestConvertVolumeToMountNamedVolumeWithNameCustomizd(t *testing.T) {
stackVolumes := volumes{
"normal": composetypes.VolumeConfig{
Name: "user_specified_name",
Driver: "vsphere",
DriverOpts: map[string]string{
"opt": "value",
},
Labels: map[string]string{
"something": "labeled",
},
},
}
namespace := NewNamespace("foo")
expected := mount.Mount{
Type: mount.TypeVolume,
Source: "user_specified_name",
Target: "/foo",
ReadOnly: true,
VolumeOptions: &mount.VolumeOptions{
Labels: map[string]string{
LabelNamespace: "foo",
"something": "labeled",
},
DriverConfig: &mount.Driver{
Name: "vsphere",
Options: map[string]string{
"opt": "value",
},
},
NoCopy: true,
},
}
config := composetypes.ServiceVolumeConfig{
Type: "volume",
Source: "normal",
Target: "/foo",
ReadOnly: true,
Volume: &composetypes.ServiceVolumeVolume{
NoCopy: true,
},
}
mount, err := convertVolumeToMount(config, stackVolumes, namespace)
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(expected, mount))
}
func TestConvertVolumeToMountNamedVolumeExternal(t *testing.T) {
stackVolumes := volumes{
"outside": composetypes.VolumeConfig{
Name: "special",
External: composetypes.External{External: true},
},
}
namespace := NewNamespace("foo")
expected := mount.Mount{
Type: mount.TypeVolume,
Source: "special",
Target: "/foo",
VolumeOptions: &mount.VolumeOptions{NoCopy: false},
}
config := composetypes.ServiceVolumeConfig{
Type: "volume",
Source: "outside",
Target: "/foo",
}
mount, err := convertVolumeToMount(config, stackVolumes, namespace)
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(expected, mount))
}
func TestConvertVolumeToMountNamedVolumeExternalNoCopy(t *testing.T) {
stackVolumes := volumes{
"outside": composetypes.VolumeConfig{
Name: "special",
External: composetypes.External{External: true},
},
}
namespace := NewNamespace("foo")
expected := mount.Mount{
Type: mount.TypeVolume,
Source: "special",
Target: "/foo",
VolumeOptions: &mount.VolumeOptions{
NoCopy: true,
},
}
config := composetypes.ServiceVolumeConfig{
Type: "volume",
Source: "outside",
Target: "/foo",
Volume: &composetypes.ServiceVolumeVolume{
NoCopy: true,
},
}
mount, err := convertVolumeToMount(config, stackVolumes, namespace)
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(expected, mount))
}
func TestConvertVolumeToMountBind(t *testing.T) {
stackVolumes := volumes{}
namespace := NewNamespace("foo")
expected := mount.Mount{
Type: mount.TypeBind,
Source: "/bar",
Target: "/foo",
ReadOnly: true,
BindOptions: &mount.BindOptions{Propagation: mount.PropagationShared},
}
config := composetypes.ServiceVolumeConfig{
Type: "bind",
Source: "/bar",
Target: "/foo",
ReadOnly: true,
Bind: &composetypes.ServiceVolumeBind{Propagation: "shared"},
}
mount, err := convertVolumeToMount(config, stackVolumes, namespace)
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(expected, mount))
}
func TestConvertVolumeToMountVolumeDoesNotExist(t *testing.T) {
namespace := NewNamespace("foo")
config := composetypes.ServiceVolumeConfig{
Type: "volume",
Source: "unknown",
Target: "/foo",
ReadOnly: true,
}
_, err := convertVolumeToMount(config, volumes{}, namespace)
assert.Error(t, err, "undefined volume \"unknown\"")
}
func TestConvertTmpfsToMountVolume(t *testing.T) {
config := composetypes.ServiceVolumeConfig{
Type: "tmpfs",
Target: "/foo/bar",
Tmpfs: &composetypes.ServiceVolumeTmpfs{
Size: 1000,
},
}
expected := mount.Mount{
Type: mount.TypeTmpfs,
Target: "/foo/bar",
TmpfsOptions: &mount.TmpfsOptions{SizeBytes: 1000},
}
mount, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo"))
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(expected, mount))
}
func TestConvertTmpfsToMountVolumeWithSource(t *testing.T) {
config := composetypes.ServiceVolumeConfig{
Type: "tmpfs",
Source: "/bar",
Target: "/foo/bar",
Tmpfs: &composetypes.ServiceVolumeTmpfs{
Size: 1000,
},
}
_, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo"))
assert.Error(t, err, "invalid tmpfs source, source must be empty")
}
func TestConvertVolumeToMountAnonymousNpipe(t *testing.T) {
config := composetypes.ServiceVolumeConfig{
Type: "npipe",
Source: `\\.\pipe\foo`,
Target: `\\.\pipe\foo`,
}
expected := mount.Mount{
Type: mount.TypeNamedPipe,
Source: `\\.\pipe\foo`,
Target: `\\.\pipe\foo`,
}
mount, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo"))
assert.NilError(t, err)
assert.Check(t, is.DeepEqual(expected, mount))
}

170
pkg/client/registry.go Normal file
View File

@ -0,0 +1,170 @@
package client
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"coopcloud.tech/abra/pkg/web"
"github.com/docker/distribution/reference"
)
type RawTag struct {
Layer string
Name string
}
type RawTags []RawTag
var registryURL = "https://registry.hub.docker.com/v1/repositories/%s/tags"
func GetRegistryTags(image string) (RawTags, error) {
var tags RawTags
tagsUrl := fmt.Sprintf(registryURL, image)
if err := web.ReadJSON(tagsUrl, &tags); err != nil {
return tags, err
}
return tags, nil
}
// getRegv2Token retrieves a registry v2 authentication token.
func getRegv2Token(image reference.Named) (string, error) {
img := reference.Path(image)
authTokenURL := fmt.Sprintf("https://auth.docker.io/token?service=registry.docker.io&scope=repository:%s:pull", img)
req, err := http.NewRequest("GET", authTokenURL, nil)
if err != nil {
return "", err
}
client := &http.Client{Timeout: web.Timeout}
res, err := client.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
_, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", nil
}
tokenRes := struct {
Token string
Expiry string
Issued string
}{}
if err := json.Unmarshal(body, &tokenRes); err != nil {
return "", err
}
return tokenRes.Token, nil
}
// GetTagDigest retrieves an image digest from a v2 registry
func GetTagDigest(image reference.Named) (string, error) {
img := reference.Path(image)
tag := image.(reference.NamedTagged).Tag()
manifestURL := fmt.Sprintf("https://index.docker.io/v2/%s/manifests/%s", img, tag)
req, err := http.NewRequest("GET", manifestURL, nil)
if err != nil {
return "", err
}
token, err := getRegv2Token(image)
if err != nil {
return "", err
}
req.Header = http.Header{
"Accept": []string{
"application/vnd.docker.distribution.manifest.v2+json",
"application/vnd.docker.distribution.manifest.list.v2+json",
},
"Authorization": []string{fmt.Sprintf("Bearer %s", token)},
}
client := &http.Client{Timeout: web.Timeout}
res, err := client.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
_, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
registryResT1 := struct {
SchemaVersion int
MediaType string
Manifests []struct {
MediaType string
Size int
Digest string
Platform struct {
Architecture string
Os string
}
}
}{}
registryResT2 := struct {
SchemaVersion int
MediaType string
Config struct {
MediaType string
Size int
Digest string
}
Layers []struct {
MediaType string
Size int
Digest string
}
}{}
if err := json.Unmarshal(body, &registryResT1); err != nil {
return "", err
}
var digest string
for _, manifest := range registryResT1.Manifests {
if string(manifest.Platform.Architecture) == "amd64" {
digest = strings.Split(manifest.Digest, ":")[1][:7]
}
}
if digest == "" {
if err := json.Unmarshal(body, &registryResT2); err != nil {
return "", err
}
digest = strings.Split(registryResT2.Config.Digest, ":")[1][:7]
}
if digest == "" {
return "", fmt.Errorf("Unable to retrieve amd64 digest for '%s'", image)
}
return digest, nil
}

25
pkg/client/secret.go Normal file
View File

@ -0,0 +1,25 @@
package client
import (
"context"
"github.com/docker/docker/api/types/swarm"
)
func StoreSecret(secretName, secretValue, server string) error {
cl, err := New(server)
if err != nil {
return err
}
ctx := context.Background()
ann := swarm.Annotations{Name: secretName}
spec := swarm.SecretSpec{Annotations: ann, Data: []byte(secretValue)}
// We don't bother with the secret IDs for now
if _, err := cl.SecretCreate(ctx, spec); err != nil {
return err
}
return nil
}

131
pkg/client/stack/loader.go Normal file
View File

@ -0,0 +1,131 @@
package stack
import (
"fmt"
"io/ioutil"
"path/filepath"
"sort"
"strings"
"github.com/docker/cli/cli/compose/loader"
"github.com/docker/cli/cli/compose/schema"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/sirupsen/logrus"
)
// LoadComposefile parse the composefile specified in the cli and returns its Config and version.
func LoadComposefile(opts Deploy, appEnv map[string]string) (*composetypes.Config, error) {
configDetails, err := getConfigDetails(opts.Composefiles, appEnv)
if err != nil {
return nil, err
}
dicts := getDictsFrom(configDetails.ConfigFiles)
config, err := loader.Load(configDetails)
if err != nil {
if fpe, ok := err.(*loader.ForbiddenPropertiesError); ok {
return nil, fmt.Errorf("compose file contains unsupported options:\n\n%s",
propertyWarnings(fpe.Properties))
}
return nil, err
}
unsupportedProperties := loader.GetUnsupportedProperties(dicts...)
if len(unsupportedProperties) > 0 {
logrus.Warnf("Ignoring unsupported options: %s\n\n",
strings.Join(unsupportedProperties, ", "))
}
deprecatedProperties := loader.GetDeprecatedProperties(dicts...)
if len(deprecatedProperties) > 0 {
logrus.Warnf("Ignoring deprecated options:\n\n%s\n\n",
propertyWarnings(deprecatedProperties))
}
return config, nil
}
func getDictsFrom(configFiles []composetypes.ConfigFile) []map[string]interface{} {
dicts := []map[string]interface{}{}
for _, configFile := range configFiles {
dicts = append(dicts, configFile.Config)
}
return dicts
}
func propertyWarnings(properties map[string]string) string {
var msgs []string
for name, description := range properties {
msgs = append(msgs, fmt.Sprintf("%s: %s", name, description))
}
sort.Strings(msgs)
return strings.Join(msgs, "\n\n")
}
func getConfigDetails(composefiles []string, appEnv map[string]string) (composetypes.ConfigDetails, error) {
var details composetypes.ConfigDetails
absPath, err := filepath.Abs(composefiles[0])
if err != nil {
return details, err
}
details.WorkingDir = filepath.Dir(absPath)
details.ConfigFiles, err = loadConfigFiles(composefiles)
if err != nil {
return details, err
}
// Take the first file version (2 files can't have different version)
details.Version = schema.Version(details.ConfigFiles[0].Config)
details.Environment = appEnv
return details, err
}
func buildEnvironment(env []string) (map[string]string, error) {
result := make(map[string]string, len(env))
for _, s := range env {
// if value is empty, s is like "K=", not "K".
if !strings.Contains(s, "=") {
return result, fmt.Errorf("unexpected environment %q", s)
}
kv := strings.SplitN(s, "=", 2)
result[kv[0]] = kv[1]
}
return result, nil
}
func loadConfigFiles(filenames []string) ([]composetypes.ConfigFile, error) {
var configFiles []composetypes.ConfigFile
for _, filename := range filenames {
configFile, err := loadConfigFile(filename)
if err != nil {
return configFiles, err
}
configFiles = append(configFiles, *configFile)
}
return configFiles, nil
}
func loadConfigFile(filename string) (*composetypes.ConfigFile, error) {
var bytes []byte
var err error
bytes, err = ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
config, err := loader.ParseYAML(bytes)
if err != nil {
return nil, err
}
return &composetypes.ConfigFile{
Filename: filename,
Config: config,
}, nil
}

View File

@ -0,0 +1,15 @@
package stack
// Deploy holds docker stack deploy options
type Deploy struct {
Composefiles []string
Namespace string
ResolveImage string
SendRegistryAuth bool
Prune bool
}
// Remove holds docker stack remove options
type Remove struct {
Namespaces []string
}

138
pkg/client/stack/remove.go Normal file
View File

@ -0,0 +1,138 @@
package stack
import (
"context"
"fmt"
"sort"
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/versions"
apiclient "github.com/docker/docker/client"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// RunRemove is the swarm implementation of docker stack remove
func RunRemove(ctx context.Context, client *apiclient.Client, opts Remove) error {
var errs []string
for _, namespace := range opts.Namespaces {
services, err := getStackServices(ctx, client, namespace)
if err != nil {
return err
}
networks, err := getStackNetworks(ctx, client, namespace)
if err != nil {
return err
}
var secrets []swarm.Secret
if versions.GreaterThanOrEqualTo(client.ClientVersion(), "1.25") {
secrets, err = getStackSecrets(ctx, client, namespace)
if err != nil {
return err
}
}
var configs []swarm.Config
if versions.GreaterThanOrEqualTo(client.ClientVersion(), "1.30") {
configs, err = getStackConfigs(ctx, client, namespace)
if err != nil {
return err
}
}
if len(services)+len(networks)+len(secrets)+len(configs) == 0 {
logrus.Warning(fmt.Errorf("nothing found in stack: %s", namespace))
continue
}
hasError := removeServices(ctx, client, services)
hasError = removeSecrets(ctx, client, secrets) || hasError
hasError = removeConfigs(ctx, client, configs) || hasError
hasError = removeNetworks(ctx, client, networks) || hasError
if hasError {
errs = append(errs, fmt.Sprintf("failed to remove some resources from stack: %s", namespace))
}
}
if len(errs) > 0 {
return errors.Errorf(strings.Join(errs, "\n"))
}
return nil
}
func sortServiceByName(services []swarm.Service) func(i, j int) bool {
return func(i, j int) bool {
return services[i].Spec.Name < services[j].Spec.Name
}
}
func removeServices(
ctx context.Context,
client *apiclient.Client,
services []swarm.Service,
) bool {
var hasError bool
sort.Slice(services, sortServiceByName(services))
for _, service := range services {
logrus.Infof("removing service %s\n", service.Spec.Name)
if err := client.ServiceRemove(ctx, service.ID); err != nil {
hasError = true
logrus.Fatalf("failed to remove service %s: %s", service.ID, err)
}
}
return hasError
}
func removeNetworks(
ctx context.Context,
client *apiclient.Client,
networks []types.NetworkResource,
) bool {
var hasError bool
for _, network := range networks {
logrus.Infof("removing network %s\n", network.Name)
if err := client.NetworkRemove(ctx, network.ID); err != nil {
hasError = true
logrus.Fatalf("failed to remove network %s: %s", network.ID, err)
}
}
return hasError
}
func removeSecrets(
ctx context.Context,
client *apiclient.Client,
secrets []swarm.Secret,
) bool {
var hasError bool
for _, secret := range secrets {
logrus.Infof("Removing secret %s\n", secret.Spec.Name)
if err := client.SecretRemove(ctx, secret.ID); err != nil {
hasError = true
logrus.Fatalf("Failed to remove secret %s: %s", secret.ID, err)
}
}
return hasError
}
func removeConfigs(
ctx context.Context,
client *apiclient.Client,
configs []swarm.Config,
) bool {
var hasError bool
for _, config := range configs {
logrus.Infof("removing config %s\n", config.Spec.Name)
if err := client.ConfigRemove(ctx, config.ID); err != nil {
hasError = true
logrus.Fatalf("failed to remove config %s: %s", config.ID, err)
}
}
return hasError
}

393
pkg/client/stack/stack.go Normal file
View File

@ -0,0 +1,393 @@
package stack
import (
"context"
"strings"
abraClient "coopcloud.tech/abra/pkg/client"
"coopcloud.tech/abra/pkg/client/convert"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/client"
dockerclient "github.com/docker/docker/client"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// Resolve image constants
const (
defaultNetworkDriver = "overlay"
ResolveImageAlways = "always"
ResolveImageChanged = "changed"
ResolveImageNever = "never"
)
type StackStatus struct {
Services []swarm.Service
Err error
}
func getStackFilter(namespace string) filters.Args {
filter := filters.NewArgs()
filter.Add("label", convert.LabelNamespace+"="+namespace)
return filter
}
func getStackServiceFilter(namespace string) filters.Args {
return getStackFilter(namespace)
}
func getAllStacksFilter() filters.Args {
filter := filters.NewArgs()
filter.Add("label", convert.LabelNamespace)
return filter
}
func getStackServices(ctx context.Context, dockerclient client.APIClient, namespace string) ([]swarm.Service, error) {
return dockerclient.ServiceList(ctx, types.ServiceListOptions{Filters: getStackServiceFilter(namespace)})
}
// GetDeployedServicesByLabel filters services by label
func GetDeployedServicesByLabel(contextName string, label string) StackStatus {
cl, err := abraClient.New(contextName)
if err != nil {
if strings.Contains(err.Error(), "does not exist") {
// No local context found, bail out gracefully
return StackStatus{[]swarm.Service{}, nil}
}
return StackStatus{[]swarm.Service{}, err}
}
ctx := context.Background()
filters := filters.NewArgs()
filters.Add("label", label)
services, err := cl.ServiceList(ctx, types.ServiceListOptions{Filters: filters})
if err != nil {
return StackStatus{[]swarm.Service{}, err}
}
return StackStatus{services, nil}
}
func GetAllDeployedServices(contextName string) StackStatus {
cl, err := abraClient.New(contextName)
if err != nil {
if strings.Contains(err.Error(), "does not exist") {
// No local context found, bail out gracefully
return StackStatus{[]swarm.Service{}, nil}
}
return StackStatus{[]swarm.Service{}, err}
}
ctx := context.Background()
services, err := cl.ServiceList(ctx, types.ServiceListOptions{Filters: getAllStacksFilter()})
if err != nil {
return StackStatus{[]swarm.Service{}, err}
}
return StackStatus{services, nil}
}
// pruneServices removes services that are no longer referenced in the source
func pruneServices(ctx context.Context, cl *dockerclient.Client, namespace convert.Namespace, services map[string]struct{}) {
oldServices, err := getStackServices(ctx, cl, namespace.Name())
if err != nil {
logrus.Infof("Failed to list services: %s\n", err)
}
pruneServices := []swarm.Service{}
for _, service := range oldServices {
if _, exists := services[namespace.Descope(service.Spec.Name)]; !exists {
pruneServices = append(pruneServices, service)
}
}
removeServices(ctx, cl, pruneServices)
}
// RunDeploy is the swarm implementation of docker stack deploy
func RunDeploy(cl *dockerclient.Client, opts Deploy, cfg *composetypes.Config) error {
ctx := context.Background()
if err := validateResolveImageFlag(&opts); err != nil {
return err
}
// client side image resolution should not be done when the supported
// server version is older than 1.30
if versions.LessThan(cl.ClientVersion(), "1.30") {
opts.ResolveImage = ResolveImageNever
}
return deployCompose(ctx, cl, opts, cfg)
}
// validateResolveImageFlag validates the opts.resolveImage command line option
func validateResolveImageFlag(opts *Deploy) error {
switch opts.ResolveImage {
case ResolveImageAlways, ResolveImageChanged, ResolveImageNever:
return nil
default:
return errors.Errorf("Invalid option %s for flag --resolve-image", opts.ResolveImage)
}
}
func deployCompose(ctx context.Context, cl *dockerclient.Client, opts Deploy, config *composetypes.Config) error {
namespace := convert.NewNamespace(opts.Namespace)
if opts.Prune {
services := map[string]struct{}{}
for _, service := range config.Services {
services[service.Name] = struct{}{}
}
pruneServices(ctx, cl, namespace, services)
}
serviceNetworks := getServicesDeclaredNetworks(config.Services)
networks, externalNetworks := convert.Networks(namespace, config.Networks, serviceNetworks)
if err := validateExternalNetworks(ctx, cl, externalNetworks); err != nil {
return err
}
if err := createNetworks(ctx, cl, namespace, networks); err != nil {
return err
}
secrets, err := convert.Secrets(namespace, config.Secrets)
if err != nil {
return err
}
if err := createSecrets(ctx, cl, secrets); err != nil {
return err
}
configs, err := convert.Configs(namespace, config.Configs)
if err != nil {
return err
}
if err := createConfigs(ctx, cl, configs); err != nil {
return err
}
services, err := convert.Services(namespace, config, cl)
if err != nil {
return err
}
return deployServices(ctx, cl, services, namespace, opts.SendRegistryAuth, opts.ResolveImage)
}
func getServicesDeclaredNetworks(serviceConfigs []composetypes.ServiceConfig) map[string]struct{} {
serviceNetworks := map[string]struct{}{}
for _, serviceConfig := range serviceConfigs {
if len(serviceConfig.Networks) == 0 {
serviceNetworks["default"] = struct{}{}
continue
}
for network := range serviceConfig.Networks {
serviceNetworks[network] = struct{}{}
}
}
return serviceNetworks
}
func validateExternalNetworks(ctx context.Context, client dockerclient.NetworkAPIClient, externalNetworks []string) error {
for _, networkName := range externalNetworks {
if !container.NetworkMode(networkName).IsUserDefined() {
// Networks that are not user defined always exist on all nodes as
// local-scoped networks, so there's no need to inspect them.
continue
}
network, err := client.NetworkInspect(ctx, networkName, types.NetworkInspectOptions{})
switch {
case dockerclient.IsErrNotFound(err):
return errors.Errorf("network %q is declared as external, but could not be found. You need to create a swarm-scoped network before the stack is deployed", networkName)
case err != nil:
return err
case network.Scope != "swarm":
return errors.Errorf("network %q is declared as external, but it is not in the right scope: %q instead of \"swarm\"", networkName, network.Scope)
}
}
return nil
}
func createSecrets(ctx context.Context, cl *dockerclient.Client, secrets []swarm.SecretSpec) error {
for _, secretSpec := range secrets {
secret, _, err := cl.SecretInspectWithRaw(ctx, secretSpec.Name)
switch {
case err == nil:
// secret already exists, then we update that
if err := cl.SecretUpdate(ctx, secret.ID, secret.Meta.Version, secretSpec); err != nil {
return errors.Wrapf(err, "failed to update secret %s", secretSpec.Name)
}
case dockerclient.IsErrNotFound(err):
// secret does not exist, then we create a new one.
logrus.Infof("Creating secret %s\n", secretSpec.Name)
if _, err := cl.SecretCreate(ctx, secretSpec); err != nil {
return errors.Wrapf(err, "failed to create secret %s", secretSpec.Name)
}
default:
return err
}
}
return nil
}
func createConfigs(ctx context.Context, cl *dockerclient.Client, configs []swarm.ConfigSpec) error {
for _, configSpec := range configs {
config, _, err := cl.ConfigInspectWithRaw(ctx, configSpec.Name)
switch {
case err == nil:
// config already exists, then we update that
if err := cl.ConfigUpdate(ctx, config.ID, config.Meta.Version, configSpec); err != nil {
return errors.Wrapf(err, "failed to update config %s", configSpec.Name)
}
case dockerclient.IsErrNotFound(err):
// config does not exist, then we create a new one.
logrus.Infof("Creating config %s\n", configSpec.Name)
if _, err := cl.ConfigCreate(ctx, configSpec); err != nil {
return errors.Wrapf(err, "failed to create config %s", configSpec.Name)
}
default:
return err
}
}
return nil
}
func createNetworks(ctx context.Context, cl *dockerclient.Client, namespace convert.Namespace, networks map[string]types.NetworkCreate) error {
existingNetworks, err := getStackNetworks(ctx, cl, namespace.Name())
if err != nil {
return err
}
existingNetworkMap := make(map[string]types.NetworkResource)
for _, network := range existingNetworks {
existingNetworkMap[network.Name] = network
}
for name, createOpts := range networks {
if _, exists := existingNetworkMap[name]; exists {
continue
}
if createOpts.Driver == "" {
createOpts.Driver = defaultNetworkDriver
}
logrus.Infof("Creating network %s\n", name)
if _, err := cl.NetworkCreate(ctx, name, createOpts); err != nil {
return errors.Wrapf(err, "failed to create network %s", name)
}
}
return nil
}
func deployServices(
ctx context.Context,
cl *dockerclient.Client,
services map[string]swarm.ServiceSpec,
namespace convert.Namespace,
sendAuth bool,
resolveImage string) error {
existingServices, err := getStackServices(ctx, cl, namespace.Name())
if err != nil {
return err
}
existingServiceMap := make(map[string]swarm.Service)
for _, service := range existingServices {
existingServiceMap[service.Spec.Name] = service
}
for internalName, serviceSpec := range services {
var (
name = namespace.Scope(internalName)
image = serviceSpec.TaskTemplate.ContainerSpec.Image
encodedAuth string
)
// FIXME: disable for now as not sure how to avoid having a `dockerCli`
// instance here and would rather not copy/pasta that entire module in
// right now for something that we don't even support right now. Will skip
// this for now.
if sendAuth {
// Retrieve encoded auth token from the image reference
// encodedAuth, err = command.RetrieveAuthTokenFromImage(ctx, dockerCli, image)
// if err != nil {
// return err
// }
}
if service, exists := existingServiceMap[name]; exists {
logrus.Infof("Updating service %s (id: %s)\n", name, service.ID)
updateOpts := types.ServiceUpdateOptions{EncodedRegistryAuth: encodedAuth}
switch resolveImage {
case ResolveImageAlways:
// image should be updated by the server using QueryRegistry
updateOpts.QueryRegistry = true
case ResolveImageChanged:
if image != service.Spec.Labels[convert.LabelImage] {
// Query the registry to resolve digest for the updated image
updateOpts.QueryRegistry = true
} else {
// image has not changed; update the serviceSpec with the
// existing information that was set by QueryRegistry on the
// previous deploy. Otherwise this will trigger an incorrect
// service update.
serviceSpec.TaskTemplate.ContainerSpec.Image = service.Spec.TaskTemplate.ContainerSpec.Image
}
default:
if image == service.Spec.Labels[convert.LabelImage] {
// image has not changed; update the serviceSpec with the
// existing information that was set by QueryRegistry on the
// previous deploy. Otherwise this will trigger an incorrect
// service update.
serviceSpec.TaskTemplate.ContainerSpec.Image = service.Spec.TaskTemplate.ContainerSpec.Image
}
}
// Stack deploy does not have a `--force` option. Preserve existing
// ForceUpdate value so that tasks are not re-deployed if not updated.
// TODO move this to API client?
serviceSpec.TaskTemplate.ForceUpdate = service.Spec.TaskTemplate.ForceUpdate
response, err := cl.ServiceUpdate(ctx, service.ID, service.Version, serviceSpec, updateOpts)
if err != nil {
return errors.Wrapf(err, "failed to update service %s", name)
}
for _, warning := range response.Warnings {
logrus.Warn(warning)
}
} else {
logrus.Infof("Creating service %s\n", name)
createOpts := types.ServiceCreateOptions{EncodedRegistryAuth: encodedAuth}
// query registry if flag disabling it was not set
if resolveImage == ResolveImageAlways || resolveImage == ResolveImageChanged {
createOpts.QueryRegistry = true
}
if _, err := cl.ServiceCreate(ctx, serviceSpec, createOpts); err != nil {
return errors.Wrapf(err, "failed to create service %s", name)
}
}
}
return nil
}
func getStackNetworks(ctx context.Context, dockerclient client.APIClient, namespace string) ([]types.NetworkResource, error) {
return dockerclient.NetworkList(ctx, types.NetworkListOptions{Filters: getStackFilter(namespace)})
}
func getStackSecrets(ctx context.Context, dockerclient client.APIClient, namespace string) ([]swarm.Secret, error) {
return dockerclient.SecretList(ctx, types.SecretListOptions{Filters: getStackFilter(namespace)})
}
func getStackConfigs(ctx context.Context, dockerclient client.APIClient, namespace string) ([]swarm.Config, error) {
return dockerclient.ConfigList(ctx, types.ConfigListOptions{Filters: getStackFilter(namespace)})
}

51
pkg/client/volumes.go Normal file
View File

@ -0,0 +1,51 @@
package client
import (
"context"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/sirupsen/logrus"
)
func GetVolumes(ctx context.Context, server string, appName string) ([]*types.Volume, error) {
cl, err := New(server)
if err != nil {
return nil, err
}
fs := filters.NewArgs()
fs.Add("name", appName)
volumeListOKBody, err := cl.VolumeList(ctx, fs)
volumeList := volumeListOKBody.Volumes
if err != nil {
logrus.Fatal(err)
}
return volumeList, nil
}
func GetVolumeNames(volumes []*types.Volume) []string {
var volumeNames []string
for _, vol := range volumes {
volumeNames = append(volumeNames, vol.Name)
}
return volumeNames
}
func RemoveVolumes(ctx context.Context, server string, volumeNames []string, force bool) error {
cl, err := New(server)
if err != nil {
return err
}
for _, volName := range volumeNames {
err := cl.VolumeRemove(ctx, volName, force)
if err != nil {
return err
}
}
return nil
}