Compare commits
173 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| c89750f836 | |||
| c805ad2964 | |||
| d8c6c830f8 | |||
| f89d05edcb | |||
| e1fe8f3c45 | |||
| 356eda4028 | |||
| 85148aa3f1 | |||
| 19c0311d46 | |||
| 207ff0831d | |||
| 57b27434ea | |||
| 010c234a0d | |||
| 9a5296c8f1 | |||
| b59752479b | |||
| 8997667aa2 | |||
| bcae2c4408 | |||
| 079adf3f23 | |||
| f6693b0b25 | |||
| ed16a3136b | |||
| e63ac0ea35 | |||
| c1a4358ea4 | |||
| 27ab7cc3d6 | |||
| 74bd5f143f | |||
| 8dc400713f | |||
| 543f9b32ee | |||
| 1d314f2227 | |||
| 275ab1f063 | |||
| 4f6ab11ff4 | |||
| 537309a548 | |||
| 08714b4579 | |||
| 789a15bc73 | |||
| ce12ac2d14 | |||
| 4c94a0af75 | |||
| 0717f261ed | |||
| fc8717799f | |||
| 76f4876129 | |||
| 7ea48a16e3 | |||
| 75e9075591 | |||
| 69e1094f5a | |||
| 890e29da87 | |||
| 78d52ec5d4 | |||
| c0bbca75af | |||
| b666e9a090 | |||
| 9352be5341 | |||
| b4f607fb4f | |||
| af2647d55b | |||
| c71aa11c0a | |||
| 336b2a5cac | |||
| c462e06fcd | |||
| 719508a935 | |||
| 2fa3aae9ed | |||
| 6c3a10aaed | |||
| 3ee6755815 | |||
| 16349f6e33 | |||
| 2aa77af30f | |||
| 456c1ce695 | |||
| bcadc9061c | |||
| e05745b4a5 | |||
| b6ecef353f | |||
| e380ddaddf | |||
| 12834eeff6 | |||
| bb46da9fba | |||
| 871d24d3fc | |||
| 61a9096b8d | |||
| 2ac475cf97 | |||
| 2a36695037 | |||
| dc74fc81f2 | |||
| 7e90635652 | |||
| 3f7989903a | |||
| 7059d069c3 | |||
| 4a4a1f3615 | |||
| 1274f23252 | |||
| 3af1848dda | |||
| 6d91f5d55d | |||
| d56948c12c | |||
| 9b3eea87ee | |||
| 31c092e155 | |||
| 046ffa4e87 | |||
| 51668a30f2 | |||
| 5e7f9d3c84 | |||
| 72ddefbada | |||
| 135aa72476 | |||
| 7c7fe26a6f | |||
| 1df47ffb4d | |||
| 2e7e529a18 | |||
| f8f230181e | |||
| 0ee4693953 | |||
| cb4cd04c64 | |||
| d2e771fed6 | |||
| b8911a3b33 | |||
| ebe071a9b3 | |||
| ecb972ab38 | |||
| 4c68a9666f | |||
| e245b72381 | |||
| 0ff9e5cd10 | |||
| 8e565d0399 | |||
| 8a424333f9 | |||
| fde819236b | |||
| aa6314c663 | |||
| 81ee98e861 | |||
| 8ae4453d46 | |||
| aeea559129 | |||
| 22336b332c | |||
| 2961611fda | |||
| 17adf05188 | |||
| 39f1110308 | |||
| 3dfacb55a4 | |||
| e942084530 | |||
| 50f529fa47 | |||
| b4bee9be75 | |||
| 8b0d34a5a1 | |||
| f93908213a | |||
| 4280972d65 | |||
| 984bc7411e | |||
| 92932647d3 | |||
| dee37936e5 | |||
| 3e1a0bdc23 | |||
| f2b2061cc3 | |||
| 4925fd9c34 | |||
| 5d3ab5bc0c | |||
| c12e23a4c1 | |||
| aca3f2d382 | |||
| a7488d1bcd | |||
| 5a97a93ae1 | |||
| 41910b6d68 | |||
| 1a087e87c9 | |||
| 0b11120060 | |||
| e57b20642d | |||
| b8702b8a9a | |||
| a31b20d7db | |||
| 5ba5678898 | |||
| 9de1318e36 | |||
| 19e1ab273e | |||
| ec1812188f | |||
| 6004d74b1f | |||
| e79e591ee9 | |||
| 0f22d7e295 | |||
| f250152bf4 | |||
| f9d666b057 | |||
| 342afe44fb | |||
| cfec8027ed | |||
| 78c42cf031 | |||
| dd2f13bed4 | |||
| 3b991ec615 | |||
| 34ea8bb5a5 | |||
| afb17ec70b | |||
| 62aed95bc1 | |||
| 649e4916bb | |||
| 3597d75281 | |||
| 5673816fec | |||
| a8c69c8287 | |||
| fc3dc8f058 | |||
| 2a46a3d46c | |||
| b2cf18ac2e | |||
| 44371c7c34 | |||
| 4e6798794d | |||
| d8aefad94a | |||
| 3c37d6a034 | |||
| 9d43f1ed48 | |||
| a818677813 | |||
| c204959687 | |||
| 76c09259db | |||
| 0efb62cab1 | |||
| 8789e93d6e | |||
| 0ee05a6353 | |||
| 68be7cb376 | |||
| de805da04c | |||
| b75350de7a | |||
| f96ddaedf7 | |||
| 0fb6bb35a4 | |||
| 264ee43c2a | |||
| 7f4c842e8a | |||
| e25e9d68be | |||
| 6877dedeee |
4
Makefile
4
Makefile
@ -12,14 +12,14 @@ clean: ## remove build artifacts
|
||||
|
||||
.PHONY: test-unit
|
||||
test-unit: ## run unit test
|
||||
./scripts/test/unit $(shell go list ./... | grep -vE '/vendor/|/e2e/|/e2eengine/')
|
||||
./scripts/test/unit $(shell go list ./... | grep -vE '/vendor/|/e2e/')
|
||||
|
||||
.PHONY: test
|
||||
test: test-unit ## run tests
|
||||
|
||||
.PHONY: test-coverage
|
||||
test-coverage: ## run test coverage
|
||||
./scripts/test/unit-with-coverage $(shell go list ./... | grep -vE '/vendor/|/e2e/|/e2eengine/')
|
||||
./scripts/test/unit-with-coverage $(shell go list ./... | grep -vE '/vendor/|/e2e/')
|
||||
|
||||
.PHONY: lint
|
||||
lint: ## run all the lint tools
|
||||
|
||||
@ -4,7 +4,7 @@ clone_folder: c:\gopath\src\github.com\docker\cli
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GOVERSION: 1.10.3
|
||||
GOVERSION: 1.10.8
|
||||
DEPVERSION: v0.4.1
|
||||
|
||||
install:
|
||||
|
||||
20
circle.yml
20
circle.yml
@ -16,9 +16,7 @@ jobs:
|
||||
- run:
|
||||
name: "Lint"
|
||||
command: |
|
||||
dockerfile=dockerfiles/Dockerfile.lint
|
||||
echo "COPY . ." >> $dockerfile
|
||||
docker build -f $dockerfile --tag cli-linter:$CIRCLE_BUILD_NUM .
|
||||
docker build -f dockerfiles/Dockerfile.lint --tag cli-linter:$CIRCLE_BUILD_NUM .
|
||||
docker run --rm cli-linter:$CIRCLE_BUILD_NUM
|
||||
|
||||
cross:
|
||||
@ -34,9 +32,7 @@ jobs:
|
||||
- run:
|
||||
name: "Cross"
|
||||
command: |
|
||||
dockerfile=dockerfiles/Dockerfile.cross
|
||||
echo "COPY . ." >> $dockerfile
|
||||
docker build -f $dockerfile --tag cli-builder:$CIRCLE_BUILD_NUM .
|
||||
docker build -f dockerfiles/Dockerfile.cross --tag cli-builder:$CIRCLE_BUILD_NUM .
|
||||
name=cross-$CIRCLE_BUILD_NUM-$CIRCLE_NODE_INDEX
|
||||
docker run \
|
||||
-e CROSS_GROUP=$CIRCLE_NODE_INDEX \
|
||||
@ -60,9 +56,7 @@ jobs:
|
||||
- run:
|
||||
name: "Unit Test with Coverage"
|
||||
command: |
|
||||
dockerfile=dockerfiles/Dockerfile.dev
|
||||
echo "COPY . ." >> $dockerfile
|
||||
docker build -f $dockerfile --tag cli-builder:$CIRCLE_BUILD_NUM .
|
||||
docker build -f dockerfiles/Dockerfile.dev --tag cli-builder:$CIRCLE_BUILD_NUM .
|
||||
docker run --name \
|
||||
test-$CIRCLE_BUILD_NUM cli-builder:$CIRCLE_BUILD_NUM \
|
||||
make test-coverage
|
||||
@ -89,10 +83,8 @@ jobs:
|
||||
- run:
|
||||
name: "Validate Vendor, Docs, and Code Generation"
|
||||
command: |
|
||||
dockerfile=dockerfiles/Dockerfile.dev
|
||||
echo "COPY . ." >> $dockerfile
|
||||
rm -f .dockerignore # include .git
|
||||
docker build -f $dockerfile --tag cli-builder-with-git:$CIRCLE_BUILD_NUM .
|
||||
docker build -f dockerfiles/Dockerfile.dev --tag cli-builder-with-git:$CIRCLE_BUILD_NUM .
|
||||
docker run --rm cli-builder-with-git:$CIRCLE_BUILD_NUM \
|
||||
make ci-validate
|
||||
shellcheck:
|
||||
@ -107,9 +99,7 @@ jobs:
|
||||
- run:
|
||||
name: "Run shellcheck"
|
||||
command: |
|
||||
dockerfile=dockerfiles/Dockerfile.shellcheck
|
||||
echo "COPY . ." >> $dockerfile
|
||||
docker build -f $dockerfile --tag cli-validator:$CIRCLE_BUILD_NUM .
|
||||
docker build -f dockerfiles/Dockerfile.shellcheck --tag cli-validator:$CIRCLE_BUILD_NUM .
|
||||
docker run --rm cli-validator:$CIRCLE_BUILD_NUM \
|
||||
make shellcheck
|
||||
workflows:
|
||||
|
||||
@ -3,29 +3,94 @@ package builder
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type pruneOptions struct {
|
||||
force bool
|
||||
all bool
|
||||
filter opts.FilterOpt
|
||||
keepStorage opts.MemBytes
|
||||
}
|
||||
|
||||
// NewPruneCommand returns a new cobra prune command for images
|
||||
func NewPruneCommand(dockerCli command.Cli) *cobra.Command {
|
||||
options := pruneOptions{filter: opts.NewFilterOpt()}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "prune",
|
||||
Short: "Remove build cache",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
report, err := dockerCli.Client().BuildCachePrune(context.Background())
|
||||
spaceReclaimed, output, err := runPrune(dockerCli, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(report.SpaceReclaimed)))
|
||||
if output != "" {
|
||||
fmt.Fprintln(dockerCli.Out(), output)
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed)))
|
||||
return nil
|
||||
},
|
||||
Annotations: map[string]string{"version": "1.39"},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
||||
flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones")
|
||||
flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'unused-for=24h')")
|
||||
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
const (
|
||||
normalWarning = `WARNING! This will remove all dangling build cache. Are you sure you want to continue?`
|
||||
allCacheWarning = `WARNING! This will remove all build cache. Are you sure you want to continue?`
|
||||
)
|
||||
|
||||
func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) {
|
||||
pruneFilters := options.filter.Value()
|
||||
pruneFilters = command.PruneFilters(dockerCli, pruneFilters)
|
||||
|
||||
warning := normalWarning
|
||||
if options.all {
|
||||
warning = allCacheWarning
|
||||
}
|
||||
if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) {
|
||||
return 0, "", nil
|
||||
}
|
||||
|
||||
report, err := dockerCli.Client().BuildCachePrune(context.Background(), types.BuildCachePruneOptions{
|
||||
All: options.all,
|
||||
KeepStorage: options.keepStorage.Value(),
|
||||
Filters: pruneFilters,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
|
||||
if len(report.CachesDeleted) > 0 {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("Deleted build cache objects:\n")
|
||||
for _, id := range report.CachesDeleted {
|
||||
sb.WriteString(id)
|
||||
sb.WriteByte('\n')
|
||||
}
|
||||
output = sb.String()
|
||||
}
|
||||
|
||||
return report.SpaceReclaimed, output, nil
|
||||
}
|
||||
|
||||
// CachePrune executes a prune command for build cache
|
||||
func CachePrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) {
|
||||
return runPrune(dockerCli, pruneOptions{force: true, all: all, filter: filter})
|
||||
}
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
@ -19,8 +20,8 @@ import (
|
||||
manifeststore "github.com/docker/cli/cli/manifest/store"
|
||||
registryclient "github.com/docker/cli/cli/registry/client"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
dopts "github.com/docker/cli/opts"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
@ -55,20 +56,21 @@ type Cli interface {
|
||||
ManifestStore() manifeststore.Store
|
||||
RegistryClient(bool) registryclient.RegistryClient
|
||||
ContentTrustEnabled() bool
|
||||
NewContainerizedEngineClient(sockPath string) (containerizedengine.Client, error)
|
||||
NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error)
|
||||
}
|
||||
|
||||
// DockerCli is an instance the docker command line client.
|
||||
// Instances of the client can be returned from NewDockerCli.
|
||||
type DockerCli struct {
|
||||
configFile *configfile.ConfigFile
|
||||
in *InStream
|
||||
out *OutStream
|
||||
err io.Writer
|
||||
client client.APIClient
|
||||
serverInfo ServerInfo
|
||||
clientInfo ClientInfo
|
||||
contentTrust bool
|
||||
configFile *configfile.ConfigFile
|
||||
in *InStream
|
||||
out *OutStream
|
||||
err io.Writer
|
||||
client client.APIClient
|
||||
serverInfo ServerInfo
|
||||
clientInfo ClientInfo
|
||||
contentTrust bool
|
||||
newContainerizeClient func(string) (clitypes.ContainerizedClient, error)
|
||||
}
|
||||
|
||||
// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified.
|
||||
@ -132,6 +134,20 @@ func (cli *DockerCli) ContentTrustEnabled() bool {
|
||||
return cli.contentTrust
|
||||
}
|
||||
|
||||
// BuildKitEnabled returns whether buildkit is enabled either through a daemon setting
|
||||
// or otherwise the client-side DOCKER_BUILDKIT environment variable
|
||||
func BuildKitEnabled(si ServerInfo) (bool, error) {
|
||||
buildkitEnabled := si.BuildkitVersion == types.BuilderBuildKit
|
||||
if buildkitEnv := os.Getenv("DOCKER_BUILDKIT"); buildkitEnv != "" {
|
||||
var err error
|
||||
buildkitEnabled, err = strconv.ParseBool(buildkitEnv)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value")
|
||||
}
|
||||
}
|
||||
return buildkitEnabled, nil
|
||||
}
|
||||
|
||||
// ManifestStore returns a store for local manifests
|
||||
func (cli *DockerCli) ManifestStore() manifeststore.Store {
|
||||
// TODO: support override default location from config file
|
||||
@ -233,8 +249,8 @@ func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions
|
||||
}
|
||||
|
||||
// NewContainerizedEngineClient returns a containerized engine client
|
||||
func (cli *DockerCli) NewContainerizedEngineClient(sockPath string) (containerizedengine.Client, error) {
|
||||
return containerizedengine.NewClient(sockPath)
|
||||
func (cli *DockerCli) NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error) {
|
||||
return cli.newContainerizeClient(sockPath)
|
||||
}
|
||||
|
||||
// ServerInfo stores details about the supported features and platform of the
|
||||
@ -252,27 +268,23 @@ type ClientInfo struct {
|
||||
}
|
||||
|
||||
// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err.
|
||||
func NewDockerCli(in io.ReadCloser, out, err io.Writer, isTrusted bool) *DockerCli {
|
||||
return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err, contentTrust: isTrusted}
|
||||
func NewDockerCli(in io.ReadCloser, out, err io.Writer, isTrusted bool, containerizedFn func(string) (clitypes.ContainerizedClient, error)) *DockerCli {
|
||||
return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err, contentTrust: isTrusted, newContainerizeClient: containerizedFn}
|
||||
}
|
||||
|
||||
// NewAPIClientFromFlags creates a new APIClient from command line flags
|
||||
func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) {
|
||||
unparsedHost, err := getUnparsedServerHost(opts.Hosts)
|
||||
host, err := getServerHost(opts.Hosts, opts.TLSOptions)
|
||||
if err != nil {
|
||||
return &client.Client{}, err
|
||||
}
|
||||
var clientOpts []func(*client.Client) error
|
||||
helper, err := connhelper.GetConnectionHelper(unparsedHost)
|
||||
helper, err := connhelper.GetConnectionHelper(host)
|
||||
if err != nil {
|
||||
return &client.Client{}, err
|
||||
}
|
||||
if helper == nil {
|
||||
clientOpts = append(clientOpts, withHTTPClient(opts.TLSOptions))
|
||||
host, err := dopts.ParseHost(opts.TLSOptions != nil, unparsedHost)
|
||||
if err != nil {
|
||||
return &client.Client{}, err
|
||||
}
|
||||
clientOpts = append(clientOpts, client.WithHost(host))
|
||||
} else {
|
||||
clientOpts = append(clientOpts, func(c *client.Client) error {
|
||||
@ -305,7 +317,7 @@ func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.
|
||||
return client.NewClientWithOpts(clientOpts...)
|
||||
}
|
||||
|
||||
func getUnparsedServerHost(hosts []string) (string, error) {
|
||||
func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (string, error) {
|
||||
var host string
|
||||
switch len(hosts) {
|
||||
case 0:
|
||||
@ -315,7 +327,8 @@ func getUnparsedServerHost(hosts []string) (string, error) {
|
||||
default:
|
||||
return "", errors.New("Please specify only one -H")
|
||||
}
|
||||
return host, nil
|
||||
|
||||
return dopts.ParseHost(tlsOptions != nil, host)
|
||||
}
|
||||
|
||||
func withHTTPClient(tlsOpts *tlsconfig.Options) func(*client.Client) error {
|
||||
|
||||
@ -43,6 +43,26 @@ func TestNewAPIClientFromFlags(t *testing.T) {
|
||||
assert.Check(t, is.Equal(api.DefaultVersion, apiclient.ClientVersion()))
|
||||
}
|
||||
|
||||
func TestNewAPIClientFromFlagsForDefaultSchema(t *testing.T) {
|
||||
host := ":2375"
|
||||
opts := &flags.CommonOptions{Hosts: []string{host}}
|
||||
configFile := &configfile.ConfigFile{
|
||||
HTTPHeaders: map[string]string{
|
||||
"My-Header": "Custom-Value",
|
||||
},
|
||||
}
|
||||
apiclient, err := NewAPIClientFromFlags(opts, configFile)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal("tcp://localhost"+host, apiclient.DaemonHost()))
|
||||
|
||||
expectedHeaders := map[string]string{
|
||||
"My-Header": "Custom-Value",
|
||||
"User-Agent": UserAgent(),
|
||||
}
|
||||
assert.Check(t, is.DeepEqual(expectedHeaders, apiclient.(*client.Client).CustomHTTPHeaders()))
|
||||
assert.Check(t, is.Equal(api.DefaultVersion, apiclient.ClientVersion()))
|
||||
}
|
||||
|
||||
func TestNewAPIClientFromFlagsWithAPIVersionFromEnv(t *testing.T) {
|
||||
customVersion := "v3.3.3"
|
||||
defer env.Patch(t, "DOCKER_API_VERSION", customVersion)()
|
||||
|
||||
@ -2,6 +2,7 @@ package commands
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/builder"
|
||||
@ -85,9 +86,6 @@ func AddCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
||||
// volume
|
||||
volume.NewVolumeCommand(dockerCli),
|
||||
|
||||
// engine
|
||||
engine.NewEngineCommand(dockerCli),
|
||||
|
||||
// legacy commands may be hidden
|
||||
hide(system.NewEventsCommand(dockerCli)),
|
||||
hide(system.NewInfoCommand(dockerCli)),
|
||||
@ -124,7 +122,10 @@ func AddCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
||||
hide(image.NewSaveCommand(dockerCli)),
|
||||
hide(image.NewTagCommand(dockerCli)),
|
||||
)
|
||||
|
||||
if runtime.GOOS == "linux" {
|
||||
// engine
|
||||
cmd.AddCommand(engine.NewEngineCommand(dockerCli))
|
||||
}
|
||||
}
|
||||
|
||||
func hide(cmd *cobra.Command) *cobra.Command {
|
||||
|
||||
@ -40,7 +40,7 @@ func newConfigCreateCommand(dockerCli command.Cli) *cobra.Command {
|
||||
flags := cmd.Flags()
|
||||
flags.VarP(&createOpts.labels, "label", "l", "Config labels")
|
||||
flags.StringVar(&createOpts.templateDriver, "template-driver", "", "Template driver")
|
||||
flags.SetAnnotation("driver", "version", []string{"1.37"})
|
||||
flags.SetAnnotation("template-driver", "version", []string{"1.37"})
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@ -12,19 +12,24 @@ import (
|
||||
|
||||
type fakeClient struct {
|
||||
client.Client
|
||||
inspectFunc func(string) (types.ContainerJSON, error)
|
||||
execInspectFunc func(execID string) (types.ContainerExecInspect, error)
|
||||
execCreateFunc func(container string, config types.ExecConfig) (types.IDResponse, error)
|
||||
createContainerFunc func(config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error)
|
||||
containerStartFunc func(container string, options types.ContainerStartOptions) error
|
||||
imageCreateFunc func(parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
|
||||
infoFunc func() (types.Info, error)
|
||||
containerStatPathFunc func(container, path string) (types.ContainerPathStat, error)
|
||||
containerCopyFromFunc func(container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
|
||||
logFunc func(string, types.ContainerLogsOptions) (io.ReadCloser, error)
|
||||
waitFunc func(string) (<-chan container.ContainerWaitOKBody, <-chan error)
|
||||
containerListFunc func(types.ContainerListOptions) ([]types.Container, error)
|
||||
Version string
|
||||
inspectFunc func(string) (types.ContainerJSON, error)
|
||||
execInspectFunc func(execID string) (types.ContainerExecInspect, error)
|
||||
execCreateFunc func(container string, config types.ExecConfig) (types.IDResponse, error)
|
||||
createContainerFunc func(config *container.Config,
|
||||
hostConfig *container.HostConfig,
|
||||
networkingConfig *network.NetworkingConfig,
|
||||
containerName string) (container.ContainerCreateCreatedBody, error)
|
||||
containerStartFunc func(container string, options types.ContainerStartOptions) error
|
||||
imageCreateFunc func(parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
|
||||
infoFunc func() (types.Info, error)
|
||||
containerStatPathFunc func(container, path string) (types.ContainerPathStat, error)
|
||||
containerCopyFromFunc func(container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
|
||||
logFunc func(string, types.ContainerLogsOptions) (io.ReadCloser, error)
|
||||
waitFunc func(string) (<-chan container.ContainerWaitOKBody, <-chan error)
|
||||
containerListFunc func(types.ContainerListOptions) ([]types.Container, error)
|
||||
containerExportFunc func(string) (io.ReadCloser, error)
|
||||
containerExecResizeFunc func(id string, options types.ResizeOptions) error
|
||||
Version string
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerList(_ context.Context, options types.ContainerListOptions) ([]types.Container, error) {
|
||||
@ -124,3 +129,17 @@ func (f *fakeClient) ContainerStart(_ context.Context, container string, options
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerExport(_ context.Context, container string) (io.ReadCloser, error) {
|
||||
if f.containerExportFunc != nil {
|
||||
return f.containerExportFunc(container)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerExecResize(_ context.Context, id string, options types.ResizeOptions) error {
|
||||
if f.containerExecResizeFunc != nil {
|
||||
return f.containerExecResizeFunc(id, options)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
33
cli/command/container/export_test.go
Normal file
33
cli/command/container/export_test.go
Normal file
@ -0,0 +1,33 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/test"
|
||||
"gotest.tools/assert"
|
||||
"gotest.tools/fs"
|
||||
)
|
||||
|
||||
func TestContainerExportOutputToFile(t *testing.T) {
|
||||
dir := fs.NewDir(t, "export-test")
|
||||
defer dir.Remove()
|
||||
|
||||
cli := test.NewFakeCli(&fakeClient{
|
||||
containerExportFunc: func(container string) (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(strings.NewReader("bar")), nil
|
||||
},
|
||||
})
|
||||
cmd := NewExportCommand(cli)
|
||||
cmd.SetOutput(ioutil.Discard)
|
||||
cmd.SetArgs([]string{"-o", dir.Join("foo"), "container"})
|
||||
assert.NilError(t, cmd.Execute())
|
||||
|
||||
expected := fs.Expected(t,
|
||||
fs.WithFile("foo", "bar", fs.MatchAnyFileMode),
|
||||
)
|
||||
|
||||
assert.Assert(t, fs.Equal(dir.Path(), expected))
|
||||
}
|
||||
@ -73,6 +73,6 @@ func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint6
|
||||
|
||||
// RunPrune calls the Container Prune API
|
||||
// This returns the amount of space reclaimed and a detailed output string
|
||||
func RunPrune(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) {
|
||||
func RunPrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) {
|
||||
return runPrune(dockerCli, pruneOptions{force: true, filter: filter})
|
||||
}
|
||||
|
||||
@ -16,9 +16,9 @@ import (
|
||||
)
|
||||
|
||||
// resizeTtyTo resizes tty to specific height and width
|
||||
func resizeTtyTo(ctx context.Context, client client.ContainerAPIClient, id string, height, width uint, isExec bool) {
|
||||
func resizeTtyTo(ctx context.Context, client client.ContainerAPIClient, id string, height, width uint, isExec bool) error {
|
||||
if height == 0 && width == 0 {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
options := types.ResizeOptions{
|
||||
@ -34,19 +34,42 @@ func resizeTtyTo(ctx context.Context, client client.ContainerAPIClient, id strin
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logrus.Debugf("Error resize: %s", err)
|
||||
logrus.Debugf("Error resize: %s\r", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// resizeTty is to resize the tty with cli out's tty size
|
||||
func resizeTty(ctx context.Context, cli command.Cli, id string, isExec bool) error {
|
||||
height, width := cli.Out().GetTtySize()
|
||||
return resizeTtyTo(ctx, cli.Client(), id, height, width, isExec)
|
||||
}
|
||||
|
||||
// initTtySize is to init the tty's size to the same as the window, if there is an error, it will retry 5 times.
|
||||
func initTtySize(ctx context.Context, cli command.Cli, id string, isExec bool, resizeTtyFunc func(ctx context.Context, cli command.Cli, id string, isExec bool) error) {
|
||||
rttyFunc := resizeTtyFunc
|
||||
if rttyFunc == nil {
|
||||
rttyFunc = resizeTty
|
||||
}
|
||||
if err := rttyFunc(ctx, cli, id, isExec); err != nil {
|
||||
go func() {
|
||||
var err error
|
||||
for retry := 0; retry < 5; retry++ {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if err = rttyFunc(ctx, cli, id, isExec); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Fprintln(cli.Err(), "failed to resize tty, using default size")
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// MonitorTtySize updates the container tty size when the terminal tty changes size
|
||||
func MonitorTtySize(ctx context.Context, cli command.Cli, id string, isExec bool) error {
|
||||
resizeTty := func() {
|
||||
height, width := cli.Out().GetTtySize()
|
||||
resizeTtyTo(ctx, cli.Client(), id, height, width, isExec)
|
||||
}
|
||||
|
||||
resizeTty()
|
||||
|
||||
initTtySize(ctx, cli, id, isExec, resizeTty)
|
||||
if runtime.GOOS == "windows" {
|
||||
go func() {
|
||||
prevH, prevW := cli.Out().GetTtySize()
|
||||
@ -55,7 +78,7 @@ func MonitorTtySize(ctx context.Context, cli command.Cli, id string, isExec bool
|
||||
h, w := cli.Out().GetTtySize()
|
||||
|
||||
if prevW != w || prevH != h {
|
||||
resizeTty()
|
||||
resizeTty(ctx, cli, id, isExec)
|
||||
}
|
||||
prevH = h
|
||||
prevW = w
|
||||
@ -66,7 +89,7 @@ func MonitorTtySize(ctx context.Context, cli command.Cli, id string, isExec bool
|
||||
gosignal.Notify(sigchan, signal.SIGWINCH)
|
||||
go func() {
|
||||
for range sigchan {
|
||||
resizeTty()
|
||||
resizeTty(ctx, cli, id, isExec)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
30
cli/command/container/tty_test.go
Normal file
30
cli/command/container/tty_test.go
Normal file
@ -0,0 +1,30 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/pkg/errors"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
|
||||
func TestInitTtySizeErrors(t *testing.T) {
|
||||
expectedError := "failed to resize tty, using default size\n"
|
||||
fakeContainerExecResizeFunc := func(id string, options types.ResizeOptions) error {
|
||||
return errors.Errorf("Error response from daemon: no such exec")
|
||||
}
|
||||
fakeResizeTtyFunc := func(ctx context.Context, cli command.Cli, id string, isExec bool) error {
|
||||
height, width := uint(1024), uint(768)
|
||||
return resizeTtyTo(ctx, cli.Client(), id, height, width, isExec)
|
||||
}
|
||||
ctx := context.Background()
|
||||
cli := test.NewFakeCli(&fakeClient{containerExecResizeFunc: fakeContainerExecResizeFunc})
|
||||
initTtySize(ctx, cli, "8mm8nn8tt8bb", true, fakeResizeTtyFunc)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
assert.Check(t, is.Equal(expectedError, cli.ErrBuffer().String()))
|
||||
}
|
||||
@ -3,11 +3,12 @@ package engine
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
"github.com/docker/cli/internal/licenseutils"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/licensing/model"
|
||||
"github.com/pkg/errors"
|
||||
@ -15,19 +16,21 @@ import (
|
||||
)
|
||||
|
||||
type activateOptions struct {
|
||||
licenseFile string
|
||||
version string
|
||||
registryPrefix string
|
||||
format string
|
||||
image string
|
||||
quiet bool
|
||||
displayOnly bool
|
||||
sockPath string
|
||||
licenseFile string
|
||||
version string
|
||||
registryPrefix string
|
||||
format string
|
||||
image string
|
||||
quiet bool
|
||||
displayOnly bool
|
||||
sockPath string
|
||||
licenseLoginFunc func(ctx context.Context, authConfig *types.AuthConfig) (licenseutils.HubUser, error)
|
||||
}
|
||||
|
||||
// newActivateCommand creates a new `docker engine activate` command
|
||||
func newActivateCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var options activateOptions
|
||||
options.licenseLoginFunc = licenseutils.Login
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "activate [OPTIONS]",
|
||||
@ -56,10 +59,10 @@ https://hub.docker.com/ then specify the file with the '--license' flag.
|
||||
|
||||
flags.StringVar(&options.licenseFile, "license", "", "License File")
|
||||
flags.StringVar(&options.version, "version", "", "Specify engine version (default is to use currently running version)")
|
||||
flags.StringVar(&options.registryPrefix, "registry-prefix", "docker.io/docker", "Override the default location where engine images are pulled")
|
||||
flags.StringVar(&options.image, "engine-image", containerizedengine.EnterpriseEngineImage, "Specify engine image")
|
||||
flags.StringVar(&options.registryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the default location where engine images are pulled")
|
||||
flags.StringVar(&options.image, "engine-image", "", "Specify engine image")
|
||||
flags.StringVar(&options.format, "format", "", "Pretty-print licenses using a Go template")
|
||||
flags.BoolVar(&options.displayOnly, "display-only", false, "only display the available licenses and exit")
|
||||
flags.BoolVar(&options.displayOnly, "display-only", false, "only display license information and exit")
|
||||
flags.BoolVar(&options.quiet, "quiet", false, "Only display available licenses by ID")
|
||||
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
|
||||
|
||||
@ -67,6 +70,9 @@ https://hub.docker.com/ then specify the file with the '--license' flag.
|
||||
}
|
||||
|
||||
func runActivate(cli command.Cli, options activateOptions) error {
|
||||
if !isRoot() {
|
||||
return errors.New("this command must be run as a privileged user")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client, err := cli.NewContainerizedEngineClient(options.sockPath)
|
||||
if err != nil {
|
||||
@ -94,26 +100,48 @@ func runActivate(cli command.Cli, options activateOptions) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = licenseutils.ApplyLicense(ctx, cli.Client(), license); err != nil {
|
||||
summary, err := licenseutils.GetLicenseSummary(ctx, *license)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(cli.Out(), "License: %s\n", summary)
|
||||
if options.displayOnly {
|
||||
return nil
|
||||
}
|
||||
dclient := cli.Client()
|
||||
if err = licenseutils.ApplyLicense(ctx, dclient, license); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := containerizedengine.EngineInitOptions{
|
||||
// Short circuit if the user didn't specify a version and we're already running enterprise
|
||||
if options.version == "" {
|
||||
serverVersion, err := dclient.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.Contains(strings.ToLower(serverVersion.Platform.Name), "enterprise") {
|
||||
fmt.Fprintln(cli.Out(), "Successfully activated engine license on existing enterprise engine.")
|
||||
return nil
|
||||
}
|
||||
options.version = serverVersion.Version
|
||||
}
|
||||
|
||||
opts := clitypes.EngineInitOptions{
|
||||
RegistryPrefix: options.registryPrefix,
|
||||
EngineImage: options.image,
|
||||
EngineVersion: options.version,
|
||||
}
|
||||
|
||||
return client.ActivateEngine(ctx, opts, cli.Out(), authConfig,
|
||||
func(ctx context.Context) error {
|
||||
client := cli.Client()
|
||||
_, err := client.Ping(ctx)
|
||||
return err
|
||||
})
|
||||
if err := client.ActivateEngine(ctx, opts, cli.Out(), authConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(cli.Out(), `Successfully activated engine.
|
||||
Restart docker with 'systemctl restart docker' to complete the activation.`)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getLicenses(ctx context.Context, authConfig *types.AuthConfig, cli command.Cli, options activateOptions) (*model.IssuedLicense, error) {
|
||||
user, err := licenseutils.Login(ctx, authConfig)
|
||||
user, err := options.licenseLoginFunc(ctx, authConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -1,19 +1,35 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
"github.com/docker/cli/internal/licenseutils"
|
||||
"github.com/docker/cli/internal/test"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/licensing"
|
||||
"github.com/docker/licensing/model"
|
||||
"gotest.tools/assert"
|
||||
"gotest.tools/fs"
|
||||
"gotest.tools/golden"
|
||||
)
|
||||
|
||||
const (
|
||||
// nolint: lll
|
||||
expiredLicense = `{"key_id":"irlYm3b9fdD8hMUXjazF39im7VQSSbAm9tfHK8cKUxJt","private_key":"aH5tTRDAVJpCRS2CRetTQVXIKgWUPfoCHODhDvNPvAbz","authorization":"ewogICAicGF5bG9hZCI6ICJleUpsZUhCcGNtRjBhVzl1SWpvaU1qQXhPQzB3TXkweE9GUXdOem93TURvd01Gb2lMQ0owYjJ0bGJpSTZJbkZtTVMxMlVtRmtialp5YjFaMldXdHJlVXN4VFdKMGNGUmpXR1ozVjA4MVRWZFFTM2cwUnpJd2NIYzlJaXdpYldGNFJXNW5hVzVsY3lJNk1Td2ljMk5oYm01cGJtZEZibUZpYkdWa0lqcDBjblZsTENKc2FXTmxibk5sVkhsd1pTSTZJazltWm14cGJtVWlMQ0owYVdWeUlqb2lVSEp2WkhWamRHbHZiaUo5IiwKICAgInNpZ25hdHVyZXMiOiBbCiAgICAgIHsKICAgICAgICAgImhlYWRlciI6IHsKICAgICAgICAgICAgImp3ayI6IHsKICAgICAgICAgICAgICAgImUiOiAiQVFBQiIsCiAgICAgICAgICAgICAgICJrZXlJRCI6ICJKN0xEOjY3VlI6TDVIWjpVN0JBOjJPNEc6NEFMMzpPRjJOOkpIR0I6RUZUSDo1Q1ZROk1GRU86QUVJVCIsCiAgICAgICAgICAgICAgICJraWQiOiAiSjdMRDo2N1ZSOkw1SFo6VTdCQToyTzRHOjRBTDM6T0YyTjpKSEdCOkVGVEg6NUNWUTpNRkVPOkFFSVQiLAogICAgICAgICAgICAgICAia3R5IjogIlJTQSIsCiAgICAgICAgICAgICAgICJuIjogInlkSXktbFU3bzdQY2VZLTQtcy1DUTVPRWdDeUY4Q3hJY1FJV3VLODRwSWlaY2lZNjczMHlDWW53TFNLVGx3LVU2VUNfUVJlV1Jpb01OTkU1RHM1VFlFWGJHRzZvbG0ycWRXYkJ3Y0NnLTJVVUhfT2NCOVd1UDZnUlBIcE1GTXN4RHpXd3ZheThKVXVIZ1lVTFVwbTFJdi1tcTdscDVuUV9SeHJUMEtaUkFRVFlMRU1FZkd3bTNoTU9fZ2VMUFMtaGdLUHRJSGxrZzZfV2NveFRHb0tQNzlkX3dhSFl4R05sN1doU25laUJTeGJwYlFBS2syMWxnNzk4WGI3dlp5RUFURE1yUlI5TWVFNkFkajVISnBZM0NveVJBUENtYUtHUkNLNHVvWlNvSXUwaEZWbEtVUHliYncwMDBHTy13YTJLTjhVd2dJSW0waTVJMXVXOUdrcTR6akJ5NXpoZ3F1VVhiRzliV1BBT1lycTVRYTgxRHhHY0JsSnlIWUFwLUREUEU5VEdnNHpZbVhqSm54WnFIRWR1R3FkZXZaOFhNSTB1a2ZrR0lJMTR3VU9pTUlJSXJYbEVjQmZfNDZJOGdRV0R6eHljWmVfSkdYLUxBdWF5WHJ5clVGZWhWTlVkWlVsOXdYTmFKQi1rYUNxejVRd2FSOTNzR3ctUVNmdEQwTnZMZTdDeU9ILUU2dmc2U3RfTmVUdmd2OFluaENpWElsWjhIT2ZJd05lN3RFRl9VY3o1T2JQeWttM3R5bHJOVWp0MFZ5QW10dGFjVkkyaUdpaGNVUHJtazRsVklaN1ZEX0xTVy1pN3lvU3VydHBzUFhjZTJwS0RJbzMwbEpHaE9fM0tVbWwyU1VaQ3F6SjF5RW1LcHlzSDVIRFc5Y3NJRkNBM2RlQWpmWlV2TjdVIgogICAgICAgICAgICB9LAogICAgICAgICAgICAiYWxnIjogIlJTMjU2IgogICAgICAgICB9LAogICAgICAgICAic2lnbmF0dXJlIjogIm5saTZIdzRrbW5KcTBSUmRXaGVfbkhZS2VJLVpKenM1U0d5SUpDakh1dWtnVzhBYklpVzFZYWJJR2NqWUt0QTY4dWN6T1hyUXZreGxWQXJLSlgzMDJzN0RpbzcxTlNPRzJVcnhsSjlibDFpd0F3a3ZyTEQ2T0p5MGxGLVg4WnRabXhPVmNQZmwzcmJwZFQ0dnlnWTdNcU1QRXdmb0IxTmlWZDYyZ1cxU2NSREZZcWw3R0FVaFVKNkp4QU15VzVaOXl5YVE0NV8wd0RMUk5mRjA5YWNXeVowTjRxVS1hZjhrUTZUUWZUX05ERzNCR3pRb2V3cHlEajRiMFBHb0diOFhLdDlwekpFdEdxM3lQM25VMFFBbk90a2gwTnZac1l1UFcyUnhDT3lRNEYzVlR3UkF2eF9HSTZrMVRpYmlKNnByUWluUy16Sjh6RE8zUjBuakE3OFBwNXcxcVpaUE9BdmtzZFNSYzJDcVMtcWhpTmF5YUhOVHpVNnpyOXlOZHR2S0o1QjNST0FmNUtjYXNiWURjTnVpeXBUNk90LUtqQ2I1dmYtWVpnc2FRNzJBdFBhSU4yeUpNREZHbmEwM0hpSjMxcTJRUlp5eTZrd3RYaGtwcDhTdEdIcHYxSWRaV09SVWttb0g5SFBzSGk4SExRLTZlM0tEY2x1RUQyMTNpZnljaVhtN0YzdHdaTTNHeDd1UXR1SldHaUlTZ2Z0QW9lVjZfUmI2VThkMmZxNzZuWHYxak5nckRRcE5waEZFd2tCdGRtZHZ2THByZVVYX3BWangza1AxN3pWbXFKNmNOOWkwWUc4WHg2VmRzcUxsRXUxQ2Rhd3Q0eko1M3VHMFlKTjRnUDZwc25yUS1uM0U1aFdlMDJ3d3dBZ3F3bGlPdmd4V1RTeXJyLXY2eDI0IiwKICAgICAgICAgInByb3RlY3RlZCI6ICJleUptYjNKdFlYUk1aVzVuZEdnaU9qRTNNeXdpWm05eWJXRjBWR0ZwYkNJNkltWlJJaXdpZEdsdFpTSTZJakl3TVRjdE1EVXRNRFZVTWpFNk5UYzZNek5hSW4wIgogICAgICB9CiAgIF0KfQ=="}`
|
||||
)
|
||||
|
||||
func TestActivateNoContainerd(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
func(string) (clitypes.ContainerizedClient, error) {
|
||||
return nil, fmt.Errorf("some error")
|
||||
},
|
||||
)
|
||||
isRoot = func() bool { return true }
|
||||
cmd := newActivateCommand(testCli)
|
||||
cmd.Flags().Set("license", "invalidpath")
|
||||
cmd.SilenceUsage = true
|
||||
@ -24,10 +40,11 @@ func TestActivateNoContainerd(t *testing.T) {
|
||||
|
||||
func TestActivateBadLicense(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
func(string) (clitypes.ContainerizedClient, error) {
|
||||
return &fakeContainerizedEngineClient{}, nil
|
||||
},
|
||||
)
|
||||
isRoot = func() bool { return true }
|
||||
cmd := newActivateCommand(testCli)
|
||||
cmd.SilenceUsage = true
|
||||
cmd.SilenceErrors = true
|
||||
@ -35,3 +52,95 @@ func TestActivateBadLicense(t *testing.T) {
|
||||
err := cmd.Execute()
|
||||
assert.Error(t, err, "open invalidpath: no such file or directory")
|
||||
}
|
||||
|
||||
func TestActivateExpiredLicenseDryRun(t *testing.T) {
|
||||
dir := fs.NewDir(t, "license", fs.WithFile("docker.lic", expiredLicense, fs.WithMode(0644)))
|
||||
defer dir.Remove()
|
||||
filename := dir.Join("docker.lic")
|
||||
isRoot = func() bool { return true }
|
||||
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{}, nil, types.Info{}, nil})
|
||||
c.SetContainerizedEngineClient(
|
||||
func(string) (clitypes.ContainerizedClient, error) {
|
||||
return &fakeContainerizedEngineClient{}, nil
|
||||
},
|
||||
)
|
||||
cmd := newActivateCommand(c)
|
||||
cmd.SilenceUsage = true
|
||||
cmd.SilenceErrors = true
|
||||
cmd.Flags().Set("license", filename)
|
||||
cmd.Flags().Set("display-only", "true")
|
||||
c.OutBuffer().Reset()
|
||||
err := cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
golden.Assert(t, c.OutBuffer().String(), "expired-license-display-only.golden")
|
||||
}
|
||||
|
||||
type mockLicenseClient struct{}
|
||||
|
||||
func (c mockLicenseClient) LoginViaAuth(ctx context.Context, username, password string) (authToken string, err error) {
|
||||
return "", fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (c mockLicenseClient) GetHubUserOrgs(ctx context.Context, authToken string) (orgs []model.Org, err error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) GetHubUserByName(ctx context.Context, username string) (user *model.User, err error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) VerifyLicense(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) GenerateNewTrialSubscription(ctx context.Context, authToken, dockerID string) (subscriptionID string, err error) {
|
||||
return "", fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) ListSubscriptions(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
|
||||
expires := time.Date(2010, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
return []*model.Subscription{
|
||||
{
|
||||
State: "active",
|
||||
Expires: &expires,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
func (c mockLicenseClient) ListSubscriptionsDetails(ctx context.Context, authToken, dockerID string) (response []*model.SubscriptionDetail, err error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) DownloadLicenseFromHub(ctx context.Context, authToken, subscriptionID string) (license *model.IssuedLicense, err error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) ParseLicense(license []byte) (parsedLicense *model.IssuedLicense, err error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) StoreLicense(ctx context.Context, dclnt licensing.WrappedDockerClient, licenses *model.IssuedLicense, localRootDir string) error {
|
||||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) LoadLocalLicense(ctx context.Context, dclnt licensing.WrappedDockerClient) (*model.Subscription, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
func (c mockLicenseClient) SummarizeLicense(res *model.CheckResponse, keyID string) *model.Subscription {
|
||||
return nil
|
||||
}
|
||||
func TestActivateDisplayOnlyHub(t *testing.T) {
|
||||
isRoot = func() bool { return true }
|
||||
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{}, nil, types.Info{}, nil})
|
||||
c.SetContainerizedEngineClient(
|
||||
func(string) (clitypes.ContainerizedClient, error) {
|
||||
return &fakeContainerizedEngineClient{}, nil
|
||||
},
|
||||
)
|
||||
|
||||
hubUser := licenseutils.HubUser{
|
||||
Client: mockLicenseClient{},
|
||||
}
|
||||
options := activateOptions{
|
||||
licenseLoginFunc: func(ctx context.Context, authConfig *types.AuthConfig) (licenseutils.HubUser, error) {
|
||||
return hubUser, nil
|
||||
},
|
||||
displayOnly: true,
|
||||
}
|
||||
c.OutBuffer().Reset()
|
||||
err := runActivate(c, options)
|
||||
|
||||
assert.NilError(t, err)
|
||||
golden.Assert(t, c.OutBuffer().String(), "expired-hub-license-display-only.golden")
|
||||
}
|
||||
|
||||
13
cli/command/engine/activate_unix.go
Normal file
13
cli/command/engine/activate_unix.go
Normal file
@ -0,0 +1,13 @@
|
||||
// +build !windows
|
||||
|
||||
package engine
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
isRoot = func() bool {
|
||||
return unix.Geteuid() == 0
|
||||
}
|
||||
)
|
||||
9
cli/command/engine/activate_windows.go
Normal file
9
cli/command/engine/activate_windows.go
Normal file
@ -0,0 +1,9 @@
|
||||
// +build windows
|
||||
|
||||
package engine
|
||||
|
||||
var (
|
||||
isRoot = func() bool {
|
||||
return true
|
||||
}
|
||||
)
|
||||
@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
@ -13,7 +14,7 @@ import (
|
||||
|
||||
func getRegistryAuth(cli command.Cli, registryPrefix string) (*types.AuthConfig, error) {
|
||||
if registryPrefix == "" {
|
||||
registryPrefix = "docker.io/docker"
|
||||
registryPrefix = clitypes.RegistryPrefix
|
||||
}
|
||||
distributionRef, err := reference.ParseNormalizedNamed(registryPrefix)
|
||||
if err != nil {
|
||||
|
||||
@ -7,18 +7,16 @@ import (
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
"github.com/docker/cli/internal/versions"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
releaseNotePrefix = "https://docs.docker.com/releasenotes"
|
||||
)
|
||||
|
||||
type checkOptions struct {
|
||||
registryPrefix string
|
||||
preReleases bool
|
||||
engineImage string
|
||||
downgrades bool
|
||||
upgrades bool
|
||||
format string
|
||||
@ -38,9 +36,10 @@ func newCheckForUpdatesCommand(dockerCli command.Cli) *cobra.Command {
|
||||
},
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&options.registryPrefix, "registry-prefix", "", "Override the existing location where engine images are pulled")
|
||||
flags.StringVar(&options.registryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the existing location where engine images are pulled")
|
||||
flags.BoolVar(&options.downgrades, "downgrades", false, "Report downgrades (default omits older versions)")
|
||||
flags.BoolVar(&options.preReleases, "pre-releases", false, "Include pre-release versions")
|
||||
flags.StringVar(&options.engineImage, "engine-image", "", "Specify engine image (default uses the same image as currently running)")
|
||||
flags.BoolVar(&options.upgrades, "upgrades", true, "Report available upgrades")
|
||||
flags.StringVar(&options.format, "format", "", "Pretty-print updates using a Go template")
|
||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display available versions")
|
||||
@ -50,54 +49,47 @@ func newCheckForUpdatesCommand(dockerCli command.Cli) *cobra.Command {
|
||||
}
|
||||
|
||||
func runCheck(dockerCli command.Cli, options checkOptions) error {
|
||||
if !isRoot() {
|
||||
return errors.New("this command must be run as a privileged user")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to access local containerd")
|
||||
}
|
||||
defer client.Close()
|
||||
currentOpts, err := client.GetCurrentEngineVersion(ctx)
|
||||
client := dockerCli.Client()
|
||||
serverVersion, err := client.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// override with user provided prefix if specified
|
||||
if options.registryPrefix != "" {
|
||||
currentOpts.RegistryPrefix = options.registryPrefix
|
||||
}
|
||||
imageName := currentOpts.RegistryPrefix + "/" + currentOpts.EngineImage
|
||||
currentVersion := currentOpts.EngineVersion
|
||||
versions, err := client.GetEngineVersions(ctx, dockerCli.RegistryClient(false), currentVersion, imageName)
|
||||
availVersions, err := versions.GetEngineVersions(ctx, dockerCli.RegistryClient(false), options.registryPrefix, options.engineImage, serverVersion.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
availUpdates := []containerizedengine.Update{
|
||||
{Type: "current", Version: currentVersion},
|
||||
availUpdates := []clitypes.Update{
|
||||
{Type: "current", Version: serverVersion.Version},
|
||||
}
|
||||
if len(versions.Patches) > 0 {
|
||||
if len(availVersions.Patches) > 0 {
|
||||
availUpdates = append(availUpdates,
|
||||
processVersions(
|
||||
currentVersion,
|
||||
serverVersion.Version,
|
||||
"patch",
|
||||
options.preReleases,
|
||||
versions.Patches)...)
|
||||
availVersions.Patches)...)
|
||||
}
|
||||
if options.upgrades {
|
||||
availUpdates = append(availUpdates,
|
||||
processVersions(
|
||||
currentVersion,
|
||||
serverVersion.Version,
|
||||
"upgrade",
|
||||
options.preReleases,
|
||||
versions.Upgrades)...)
|
||||
availVersions.Upgrades)...)
|
||||
}
|
||||
if options.downgrades {
|
||||
availUpdates = append(availUpdates,
|
||||
processVersions(
|
||||
currentVersion,
|
||||
serverVersion.Version,
|
||||
"downgrade",
|
||||
options.preReleases,
|
||||
versions.Downgrades)...)
|
||||
availVersions.Downgrades)...)
|
||||
}
|
||||
|
||||
format := options.format
|
||||
@ -115,17 +107,17 @@ func runCheck(dockerCli command.Cli, options checkOptions) error {
|
||||
|
||||
func processVersions(currentVersion, verType string,
|
||||
includePrerelease bool,
|
||||
versions []containerizedengine.DockerVersion) []containerizedengine.Update {
|
||||
availUpdates := []containerizedengine.Update{}
|
||||
for _, ver := range versions {
|
||||
availVersions []clitypes.DockerVersion) []clitypes.Update {
|
||||
availUpdates := []clitypes.Update{}
|
||||
for _, ver := range availVersions {
|
||||
if !includePrerelease && ver.Prerelease() != "" {
|
||||
continue
|
||||
}
|
||||
if ver.Tag != currentVersion {
|
||||
availUpdates = append(availUpdates, containerizedengine.Update{
|
||||
availUpdates = append(availUpdates, clitypes.Update{
|
||||
Type: verType,
|
||||
Version: ver.Tag,
|
||||
Notes: fmt.Sprintf("%s/%s", releaseNotePrefix, ver.Tag),
|
||||
Notes: fmt.Sprintf("%s?%s", clitypes.ReleaseNotePrefix, ver.Tag),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -5,11 +5,13 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
registryclient "github.com/docker/cli/cli/registry/client"
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
manifesttypes "github.com/docker/cli/cli/manifest/types"
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
ver "github.com/hashicorp/go-version"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"gotest.tools/assert"
|
||||
"gotest.tools/golden"
|
||||
)
|
||||
@ -18,126 +20,95 @@ var (
|
||||
testCli = test.NewFakeCli(&client.Client{})
|
||||
)
|
||||
|
||||
func TestCheckForUpdatesNoContainerd(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
return nil, fmt.Errorf("some error")
|
||||
},
|
||||
)
|
||||
cmd := newCheckForUpdatesCommand(testCli)
|
||||
cmd.SilenceUsage = true
|
||||
cmd.SilenceErrors = true
|
||||
err := cmd.Execute()
|
||||
assert.ErrorContains(t, err, "unable to access local containerd")
|
||||
type verClient struct {
|
||||
client.Client
|
||||
ver types.Version
|
||||
verErr error
|
||||
info types.Info
|
||||
infoErr error
|
||||
}
|
||||
|
||||
func (c *verClient) ServerVersion(ctx context.Context) (types.Version, error) {
|
||||
return c.ver, c.verErr
|
||||
}
|
||||
|
||||
func (c *verClient) Info(ctx context.Context) (types.Info, error) {
|
||||
return c.info, c.infoErr
|
||||
}
|
||||
|
||||
type testRegistryClient struct {
|
||||
tags []string
|
||||
}
|
||||
|
||||
func (c testRegistryClient) GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) {
|
||||
return manifesttypes.ImageManifest{}, nil
|
||||
}
|
||||
func (c testRegistryClient) GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (c testRegistryClient) MountBlob(ctx context.Context, source reference.Canonical, target reference.Named) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c testRegistryClient) PutManifest(ctx context.Context, ref reference.Named, manifest distribution.Manifest) (digest.Digest, error) {
|
||||
return "", nil
|
||||
}
|
||||
func (c testRegistryClient) GetTags(ctx context.Context, ref reference.Named) ([]string, error) {
|
||||
return c.tags, nil
|
||||
}
|
||||
|
||||
func TestCheckForUpdatesNoCurrentVersion(t *testing.T) {
|
||||
retErr := fmt.Errorf("some failure")
|
||||
getCurrentEngineVersionFunc := func(ctx context.Context) (containerizedengine.EngineInitOptions, error) {
|
||||
return containerizedengine.EngineInitOptions{}, retErr
|
||||
}
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
return &fakeContainerizedEngineClient{
|
||||
getCurrentEngineVersionFunc: getCurrentEngineVersionFunc,
|
||||
}, nil
|
||||
},
|
||||
)
|
||||
cmd := newCheckForUpdatesCommand(testCli)
|
||||
isRoot = func() bool { return true }
|
||||
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{}, nil, types.Info{}, nil})
|
||||
c.SetRegistryClient(testRegistryClient{})
|
||||
cmd := newCheckForUpdatesCommand(c)
|
||||
cmd.SilenceUsage = true
|
||||
cmd.SilenceErrors = true
|
||||
err := cmd.Execute()
|
||||
assert.Assert(t, err == retErr)
|
||||
}
|
||||
|
||||
func TestCheckForUpdatesGetEngineVersionsFail(t *testing.T) {
|
||||
retErr := fmt.Errorf("some failure")
|
||||
getEngineVersionsFunc := func(ctx context.Context,
|
||||
registryClient registryclient.RegistryClient,
|
||||
currentVersion, imageName string) (containerizedengine.AvailableVersions, error) {
|
||||
return containerizedengine.AvailableVersions{}, retErr
|
||||
}
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
return &fakeContainerizedEngineClient{
|
||||
getEngineVersionsFunc: getEngineVersionsFunc,
|
||||
}, nil
|
||||
},
|
||||
)
|
||||
cmd := newCheckForUpdatesCommand(testCli)
|
||||
cmd.SilenceUsage = true
|
||||
cmd.SilenceErrors = true
|
||||
err := cmd.Execute()
|
||||
assert.Assert(t, err == retErr)
|
||||
assert.ErrorContains(t, err, "no such file or directory")
|
||||
}
|
||||
|
||||
func TestCheckForUpdatesGetEngineVersionsHappy(t *testing.T) {
|
||||
getCurrentEngineVersionFunc := func(ctx context.Context) (containerizedengine.EngineInitOptions, error) {
|
||||
return containerizedengine.EngineInitOptions{
|
||||
EngineImage: "current engine",
|
||||
EngineVersion: "1.1.0",
|
||||
}, nil
|
||||
}
|
||||
getEngineVersionsFunc := func(ctx context.Context,
|
||||
registryClient registryclient.RegistryClient,
|
||||
currentVersion, imageName string) (containerizedengine.AvailableVersions, error) {
|
||||
return containerizedengine.AvailableVersions{
|
||||
Downgrades: parseVersions(t, "1.0.1", "1.0.2", "1.0.3-beta1"),
|
||||
Patches: parseVersions(t, "1.1.1", "1.1.2", "1.1.3-beta1"),
|
||||
Upgrades: parseVersions(t, "1.2.0", "2.0.0", "2.1.0-beta1"),
|
||||
}, nil
|
||||
}
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
return &fakeContainerizedEngineClient{
|
||||
getEngineVersionsFunc: getEngineVersionsFunc,
|
||||
getCurrentEngineVersionFunc: getCurrentEngineVersionFunc,
|
||||
}, nil
|
||||
},
|
||||
)
|
||||
cmd := newCheckForUpdatesCommand(testCli)
|
||||
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{Version: "1.1.0"}, nil, types.Info{ServerVersion: "1.1.0"}, nil})
|
||||
c.SetRegistryClient(testRegistryClient{[]string{
|
||||
"1.0.1", "1.0.2", "1.0.3-beta1",
|
||||
"1.1.1", "1.1.2", "1.1.3-beta1",
|
||||
"1.2.0", "2.0.0", "2.1.0-beta1",
|
||||
}})
|
||||
|
||||
isRoot = func() bool { return true }
|
||||
cmd := newCheckForUpdatesCommand(c)
|
||||
cmd.Flags().Set("pre-releases", "true")
|
||||
cmd.Flags().Set("downgrades", "true")
|
||||
cmd.Flags().Set("engine-image", "engine-community")
|
||||
cmd.SilenceUsage = true
|
||||
cmd.SilenceErrors = true
|
||||
err := cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
golden.Assert(t, testCli.OutBuffer().String(), "check-all.golden")
|
||||
golden.Assert(t, c.OutBuffer().String(), "check-all.golden")
|
||||
|
||||
testCli.OutBuffer().Reset()
|
||||
c.OutBuffer().Reset()
|
||||
cmd.Flags().Set("pre-releases", "false")
|
||||
cmd.Flags().Set("downgrades", "true")
|
||||
err = cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
fmt.Println(testCli.OutBuffer().String())
|
||||
golden.Assert(t, testCli.OutBuffer().String(), "check-no-prerelease.golden")
|
||||
fmt.Println(c.OutBuffer().String())
|
||||
golden.Assert(t, c.OutBuffer().String(), "check-no-prerelease.golden")
|
||||
|
||||
testCli.OutBuffer().Reset()
|
||||
c.OutBuffer().Reset()
|
||||
cmd.Flags().Set("pre-releases", "false")
|
||||
cmd.Flags().Set("downgrades", "false")
|
||||
err = cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
fmt.Println(testCli.OutBuffer().String())
|
||||
golden.Assert(t, testCli.OutBuffer().String(), "check-no-downgrades.golden")
|
||||
fmt.Println(c.OutBuffer().String())
|
||||
golden.Assert(t, c.OutBuffer().String(), "check-no-downgrades.golden")
|
||||
|
||||
testCli.OutBuffer().Reset()
|
||||
c.OutBuffer().Reset()
|
||||
cmd.Flags().Set("pre-releases", "false")
|
||||
cmd.Flags().Set("downgrades", "false")
|
||||
cmd.Flags().Set("upgrades", "false")
|
||||
err = cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
fmt.Println(testCli.OutBuffer().String())
|
||||
golden.Assert(t, testCli.OutBuffer().String(), "check-patches-only.golden")
|
||||
}
|
||||
|
||||
func makeVersion(t *testing.T, tag string) containerizedengine.DockerVersion {
|
||||
v, err := ver.NewVersion(tag)
|
||||
assert.NilError(t, err)
|
||||
return containerizedengine.DockerVersion{Version: *v, Tag: tag}
|
||||
}
|
||||
|
||||
func parseVersions(t *testing.T, tags ...string) []containerizedengine.DockerVersion {
|
||||
ret := make([]containerizedengine.DockerVersion, len(tags))
|
||||
for i, tag := range tags {
|
||||
ret[i] = makeVersion(t, tag)
|
||||
}
|
||||
return ret
|
||||
fmt.Println(c.OutBuffer().String())
|
||||
golden.Assert(t, c.OutBuffer().String(), "check-patches-only.golden")
|
||||
}
|
||||
|
||||
@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
registryclient "github.com/docker/cli/cli/registry/client"
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
@ -13,28 +13,26 @@ type (
|
||||
fakeContainerizedEngineClient struct {
|
||||
closeFunc func() error
|
||||
activateEngineFunc func(ctx context.Context,
|
||||
opts containerizedengine.EngineInitOptions,
|
||||
out containerizedengine.OutStream,
|
||||
authConfig *types.AuthConfig,
|
||||
healthfn func(context.Context) error) error
|
||||
opts clitypes.EngineInitOptions,
|
||||
out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig) error
|
||||
initEngineFunc func(ctx context.Context,
|
||||
opts containerizedengine.EngineInitOptions,
|
||||
out containerizedengine.OutStream,
|
||||
opts clitypes.EngineInitOptions,
|
||||
out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig,
|
||||
healthfn func(context.Context) error) error
|
||||
doUpdateFunc func(ctx context.Context,
|
||||
opts containerizedengine.EngineInitOptions,
|
||||
out containerizedengine.OutStream,
|
||||
authConfig *types.AuthConfig,
|
||||
healthfn func(context.Context) error) error
|
||||
opts clitypes.EngineInitOptions,
|
||||
out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig) error
|
||||
getEngineVersionsFunc func(ctx context.Context,
|
||||
registryClient registryclient.RegistryClient,
|
||||
currentVersion,
|
||||
imageName string) (containerizedengine.AvailableVersions, error)
|
||||
imageName string) (clitypes.AvailableVersions, error)
|
||||
|
||||
getEngineFunc func(ctx context.Context) (containerd.Container, error)
|
||||
removeEngineFunc func(ctx context.Context, engine containerd.Container) error
|
||||
getCurrentEngineVersionFunc func(ctx context.Context) (containerizedengine.EngineInitOptions, error)
|
||||
removeEngineFunc func(ctx context.Context) error
|
||||
getCurrentEngineVersionFunc func(ctx context.Context) (clitypes.EngineInitOptions, error)
|
||||
}
|
||||
)
|
||||
|
||||
@ -46,18 +44,17 @@ func (w *fakeContainerizedEngineClient) Close() error {
|
||||
}
|
||||
|
||||
func (w *fakeContainerizedEngineClient) ActivateEngine(ctx context.Context,
|
||||
opts containerizedengine.EngineInitOptions,
|
||||
out containerizedengine.OutStream,
|
||||
authConfig *types.AuthConfig,
|
||||
healthfn func(context.Context) error) error {
|
||||
opts clitypes.EngineInitOptions,
|
||||
out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig) error {
|
||||
if w.activateEngineFunc != nil {
|
||||
return w.activateEngineFunc(ctx, opts, out, authConfig, healthfn)
|
||||
return w.activateEngineFunc(ctx, opts, out, authConfig)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (w *fakeContainerizedEngineClient) InitEngine(ctx context.Context,
|
||||
opts containerizedengine.EngineInitOptions,
|
||||
out containerizedengine.OutStream,
|
||||
opts clitypes.EngineInitOptions,
|
||||
out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig,
|
||||
healthfn func(context.Context) error) error {
|
||||
if w.initEngineFunc != nil {
|
||||
@ -66,23 +63,22 @@ func (w *fakeContainerizedEngineClient) InitEngine(ctx context.Context,
|
||||
return nil
|
||||
}
|
||||
func (w *fakeContainerizedEngineClient) DoUpdate(ctx context.Context,
|
||||
opts containerizedengine.EngineInitOptions,
|
||||
out containerizedengine.OutStream,
|
||||
authConfig *types.AuthConfig,
|
||||
healthfn func(context.Context) error) error {
|
||||
opts clitypes.EngineInitOptions,
|
||||
out clitypes.OutStream,
|
||||
authConfig *types.AuthConfig) error {
|
||||
if w.doUpdateFunc != nil {
|
||||
return w.doUpdateFunc(ctx, opts, out, authConfig, healthfn)
|
||||
return w.doUpdateFunc(ctx, opts, out, authConfig)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (w *fakeContainerizedEngineClient) GetEngineVersions(ctx context.Context,
|
||||
registryClient registryclient.RegistryClient,
|
||||
currentVersion, imageName string) (containerizedengine.AvailableVersions, error) {
|
||||
currentVersion, imageName string) (clitypes.AvailableVersions, error) {
|
||||
|
||||
if w.getEngineVersionsFunc != nil {
|
||||
return w.getEngineVersionsFunc(ctx, registryClient, currentVersion, imageName)
|
||||
}
|
||||
return containerizedengine.AvailableVersions{}, nil
|
||||
return clitypes.AvailableVersions{}, nil
|
||||
}
|
||||
|
||||
func (w *fakeContainerizedEngineClient) GetEngine(ctx context.Context) (containerd.Container, error) {
|
||||
@ -91,15 +87,15 @@ func (w *fakeContainerizedEngineClient) GetEngine(ctx context.Context) (containe
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
func (w *fakeContainerizedEngineClient) RemoveEngine(ctx context.Context, engine containerd.Container) error {
|
||||
func (w *fakeContainerizedEngineClient) RemoveEngine(ctx context.Context) error {
|
||||
if w.removeEngineFunc != nil {
|
||||
return w.removeEngineFunc(ctx, engine)
|
||||
return w.removeEngineFunc(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (w *fakeContainerizedEngineClient) GetCurrentEngineVersion(ctx context.Context) (containerizedengine.EngineInitOptions, error) {
|
||||
func (w *fakeContainerizedEngineClient) GetCurrentEngineVersion(ctx context.Context) (clitypes.EngineInitOptions, error) {
|
||||
if w.getCurrentEngineVersionFunc != nil {
|
||||
return w.getCurrentEngineVersionFunc(ctx)
|
||||
}
|
||||
return containerizedengine.EngineInitOptions{}, nil
|
||||
return clitypes.EngineInitOptions{}, nil
|
||||
}
|
||||
|
||||
@ -15,11 +15,9 @@ func NewEngineCommand(dockerCli command.Cli) *cobra.Command {
|
||||
RunE: command.ShowHelp(dockerCli.Err()),
|
||||
}
|
||||
cmd.AddCommand(
|
||||
newInitCommand(dockerCli),
|
||||
newActivateCommand(dockerCli),
|
||||
newCheckForUpdatesCommand(dockerCli),
|
||||
newUpdateCommand(dockerCli),
|
||||
newRmCommand(dockerCli),
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@ -10,5 +10,5 @@ func TestNewEngineCommand(t *testing.T) {
|
||||
cmd := NewEngineCommand(testCli)
|
||||
|
||||
subcommands := cmd.Commands()
|
||||
assert.Assert(t, len(subcommands) == 5)
|
||||
assert.Assert(t, len(subcommands) == 3)
|
||||
}
|
||||
|
||||
@ -1,62 +1,10 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
)
|
||||
|
||||
type extendedEngineInitOptions struct {
|
||||
containerizedengine.EngineInitOptions
|
||||
clitypes.EngineInitOptions
|
||||
sockPath string
|
||||
}
|
||||
|
||||
func newInitCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var options extendedEngineInitOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "init [OPTIONS]",
|
||||
Short: "Initialize a local engine",
|
||||
Long: `This command will initialize a local engine running on containerd.
|
||||
|
||||
Configuration of the engine is managed through the daemon.json configuration
|
||||
file on the host and may be pre-created before running the 'init' command.
|
||||
`,
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runInit(dockerCli, options)
|
||||
},
|
||||
Annotations: map[string]string{"experimentalCLI": ""},
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&options.EngineVersion, "version", cli.Version, "Specify engine version")
|
||||
flags.StringVar(&options.EngineImage, "engine-image", containerizedengine.CommunityEngineImage, "Specify engine image")
|
||||
flags.StringVar(&options.RegistryPrefix, "registry-prefix", "docker.io/docker", "Override the default location where engine images are pulled")
|
||||
flags.StringVar(&options.ConfigFile, "config-file", "/etc/docker/daemon.json", "Specify the location of the daemon configuration file on the host")
|
||||
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runInit(dockerCli command.Cli, options extendedEngineInitOptions) error {
|
||||
ctx := context.Background()
|
||||
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to access local containerd")
|
||||
}
|
||||
defer client.Close()
|
||||
authConfig, err := getRegistryAuth(dockerCli, options.RegistryPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return client.InitEngine(ctx, options.EngineInitOptions, dockerCli.Out(), authConfig,
|
||||
func(ctx context.Context) error {
|
||||
client := dockerCli.Client()
|
||||
_, err := client.Ping(ctx)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
@ -1,33 +0,0 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestInitNoContainerd(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
return nil, fmt.Errorf("some error")
|
||||
},
|
||||
)
|
||||
cmd := newInitCommand(testCli)
|
||||
cmd.SilenceUsage = true
|
||||
cmd.SilenceErrors = true
|
||||
err := cmd.Execute()
|
||||
assert.ErrorContains(t, err, "unable to access local containerd")
|
||||
}
|
||||
|
||||
func TestInitHappy(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
return &fakeContainerizedEngineClient{}, nil
|
||||
},
|
||||
)
|
||||
cmd := newInitCommand(testCli)
|
||||
err := cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
@ -1,54 +0,0 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// TODO - consider adding a "purge" flag that also removes
|
||||
// configuration files and the docker root dir.
|
||||
|
||||
type rmOptions struct {
|
||||
sockPath string
|
||||
}
|
||||
|
||||
func newRmCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var options rmOptions
|
||||
cmd := &cobra.Command{
|
||||
Use: "rm [OPTIONS]",
|
||||
Short: "Remove the local engine",
|
||||
Long: `This command will remove the local engine running on containerd.
|
||||
|
||||
No state files will be removed from the host filesystem.
|
||||
`,
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runRm(dockerCli, options)
|
||||
},
|
||||
Annotations: map[string]string{"experimentalCLI": ""},
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runRm(dockerCli command.Cli, options rmOptions) error {
|
||||
ctx := context.Background()
|
||||
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to access local containerd")
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
engine, err := client.GetEngine(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return client.RemoveEngine(ctx, engine)
|
||||
}
|
||||
@ -1,33 +0,0 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestRmNoContainerd(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
return nil, fmt.Errorf("some error")
|
||||
},
|
||||
)
|
||||
cmd := newRmCommand(testCli)
|
||||
cmd.SilenceUsage = true
|
||||
cmd.SilenceErrors = true
|
||||
err := cmd.Execute()
|
||||
assert.ErrorContains(t, err, "unable to access local containerd")
|
||||
}
|
||||
|
||||
func TestRmHappy(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
return &fakeContainerizedEngineClient{}, nil
|
||||
},
|
||||
)
|
||||
cmd := newRmCommand(testCli)
|
||||
err := cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
18
cli/command/engine/testdata/check-all.golden
vendored
18
cli/command/engine/testdata/check-all.golden
vendored
@ -1,11 +1,11 @@
|
||||
TYPE VERSION NOTES
|
||||
current 1.1.0
|
||||
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
|
||||
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
|
||||
patch 1.1.3-beta1 https://docs.docker.com/releasenotes/1.1.3-beta1
|
||||
upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0
|
||||
upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0
|
||||
upgrade 2.1.0-beta1 https://docs.docker.com/releasenotes/2.1.0-beta1
|
||||
downgrade 1.0.1 https://docs.docker.com/releasenotes/1.0.1
|
||||
downgrade 1.0.2 https://docs.docker.com/releasenotes/1.0.2
|
||||
downgrade 1.0.3-beta1 https://docs.docker.com/releasenotes/1.0.3-beta1
|
||||
patch 1.1.1 https://docker.com/engine/releasenotes?1.1.1
|
||||
patch 1.1.2 https://docker.com/engine/releasenotes?1.1.2
|
||||
patch 1.1.3-beta1 https://docker.com/engine/releasenotes?1.1.3-beta1
|
||||
upgrade 1.2.0 https://docker.com/engine/releasenotes?1.2.0
|
||||
upgrade 2.0.0 https://docker.com/engine/releasenotes?2.0.0
|
||||
upgrade 2.1.0-beta1 https://docker.com/engine/releasenotes?2.1.0-beta1
|
||||
downgrade 1.0.1 https://docker.com/engine/releasenotes?1.0.1
|
||||
downgrade 1.0.2 https://docker.com/engine/releasenotes?1.0.2
|
||||
downgrade 1.0.3-beta1 https://docker.com/engine/releasenotes?1.0.3-beta1
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
TYPE VERSION NOTES
|
||||
current 1.1.0
|
||||
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
|
||||
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
|
||||
upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0
|
||||
upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0
|
||||
patch 1.1.1 https://docker.com/engine/releasenotes?1.1.1
|
||||
patch 1.1.2 https://docker.com/engine/releasenotes?1.1.2
|
||||
upgrade 1.2.0 https://docker.com/engine/releasenotes?1.2.0
|
||||
upgrade 2.0.0 https://docker.com/engine/releasenotes?2.0.0
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
TYPE VERSION NOTES
|
||||
current 1.1.0
|
||||
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
|
||||
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
|
||||
upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0
|
||||
upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0
|
||||
downgrade 1.0.1 https://docs.docker.com/releasenotes/1.0.1
|
||||
downgrade 1.0.2 https://docs.docker.com/releasenotes/1.0.2
|
||||
patch 1.1.1 https://docker.com/engine/releasenotes?1.1.1
|
||||
patch 1.1.2 https://docker.com/engine/releasenotes?1.1.2
|
||||
upgrade 1.2.0 https://docker.com/engine/releasenotes?1.2.0
|
||||
upgrade 2.0.0 https://docker.com/engine/releasenotes?2.0.0
|
||||
downgrade 1.0.1 https://docker.com/engine/releasenotes?1.0.1
|
||||
downgrade 1.0.2 https://docker.com/engine/releasenotes?1.0.2
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
TYPE VERSION NOTES
|
||||
current 1.1.0
|
||||
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
|
||||
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
|
||||
patch 1.1.1 https://docker.com/engine/releasenotes?1.1.1
|
||||
patch 1.1.2 https://docker.com/engine/releasenotes?1.1.2
|
||||
|
||||
3
cli/command/engine/testdata/expired-hub-license-display-only.golden
vendored
Normal file
3
cli/command/engine/testdata/expired-hub-license-display-only.golden
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
Looking for existing licenses for ...
|
||||
NUM OWNER PRODUCT ID EXPIRES PRICING COMPONENTS
|
||||
0 2010-01-01 00:00:00 +0000 UTC
|
||||
1
cli/command/engine/testdata/expired-license-display-only.golden
vendored
Normal file
1
cli/command/engine/testdata/expired-license-display-only.golden
vendored
Normal file
@ -0,0 +1 @@
|
||||
License: Quantity: 1 Nodes Expiration date: 2018-03-18 Expired! You will no longer receive updates. Please renew at https://docker.com/licensing
|
||||
@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@ -24,45 +25,31 @@ func newUpdateCommand(dockerCli command.Cli) *cobra.Command {
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.StringVar(&options.EngineVersion, "version", "", "Specify engine version")
|
||||
flags.StringVar(&options.EngineImage, "engine-image", "", "Specify engine image")
|
||||
flags.StringVar(&options.RegistryPrefix, "registry-prefix", "", "Override the current location where engine images are pulled")
|
||||
flags.StringVar(&options.EngineImage, "engine-image", "", "Specify engine image (default uses the same image as currently running)")
|
||||
flags.StringVar(&options.RegistryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the current location where engine images are pulled")
|
||||
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runUpdate(dockerCli command.Cli, options extendedEngineInitOptions) error {
|
||||
if !isRoot() {
|
||||
return errors.New("this command must be run as a privileged user")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to access local containerd")
|
||||
}
|
||||
defer client.Close()
|
||||
if options.EngineImage == "" || options.RegistryPrefix == "" {
|
||||
currentOpts, err := client.GetCurrentEngineVersion(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if options.EngineImage == "" {
|
||||
options.EngineImage = currentOpts.EngineImage
|
||||
}
|
||||
if options.RegistryPrefix == "" {
|
||||
options.RegistryPrefix = currentOpts.RegistryPrefix
|
||||
}
|
||||
}
|
||||
authConfig, err := getRegistryAuth(dockerCli, options.RegistryPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := client.DoUpdate(ctx, options.EngineInitOptions, dockerCli.Out(), authConfig,
|
||||
func(ctx context.Context) error {
|
||||
client := dockerCli.Client()
|
||||
_, err := client.Ping(ctx)
|
||||
return err
|
||||
}); err != nil {
|
||||
if err := client.DoUpdate(ctx, options.EngineInitOptions, dockerCli.Out(), authConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), "Success! The docker engine is now running.")
|
||||
fmt.Fprintln(dockerCli.Out(), `Successfully updated engine.
|
||||
Restart docker with 'systemctl restart docker' to complete the update.`)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -4,13 +4,16 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
"github.com/docker/cli/internal/test"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestUpdateNoContainerd(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
func(string) (clitypes.ContainerizedClient, error) {
|
||||
return nil, fmt.Errorf("some error")
|
||||
},
|
||||
)
|
||||
@ -22,14 +25,16 @@ func TestUpdateNoContainerd(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateHappy(t *testing.T) {
|
||||
testCli.SetContainerizedEngineClient(
|
||||
func(string) (containerizedengine.Client, error) {
|
||||
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{Version: "1.1.0"}, nil, types.Info{ServerVersion: "1.1.0"}, nil})
|
||||
c.SetContainerizedEngineClient(
|
||||
func(string) (clitypes.ContainerizedClient, error) {
|
||||
return &fakeContainerizedEngineClient{}, nil
|
||||
},
|
||||
)
|
||||
cmd := newUpdateCommand(testCli)
|
||||
cmd.Flags().Set("registry-prefix", "docker.io/docker")
|
||||
cmd := newUpdateCommand(c)
|
||||
cmd.Flags().Set("registry-prefix", clitypes.RegistryPrefix)
|
||||
cmd.Flags().Set("version", "someversion")
|
||||
cmd.Flags().Set("engine-image", "someimage")
|
||||
err := cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
179
cli/command/formatter/buildcache.go
Normal file
179
cli/command/formatter/buildcache.go
Normal file
@ -0,0 +1,179 @@
|
||||
package formatter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultBuildCacheTableFormat = "table {{.ID}}\t{{.Type}}\t{{.Size}}\t{{.CreatedSince}}\t{{.LastUsedSince}}\t{{.UsageCount}}\t{{.Shared}}\t{{.Description}}"
|
||||
|
||||
cacheIDHeader = "CACHE ID"
|
||||
cacheTypeHeader = "CACHE TYPE"
|
||||
parentHeader = "PARENT"
|
||||
lastUsedSinceHeader = "LAST USED"
|
||||
usageCountHeader = "USAGE"
|
||||
inUseHeader = "IN USE"
|
||||
sharedHeader = "SHARED"
|
||||
)
|
||||
|
||||
// NewBuildCacheFormat returns a Format for rendering using a Context
|
||||
func NewBuildCacheFormat(source string, quiet bool) Format {
|
||||
switch source {
|
||||
case TableFormatKey:
|
||||
if quiet {
|
||||
return defaultQuietFormat
|
||||
}
|
||||
return Format(defaultBuildCacheTableFormat)
|
||||
case RawFormatKey:
|
||||
if quiet {
|
||||
return `build_cache_id: {{.ID}}`
|
||||
}
|
||||
format := `build_cache_id: {{.ID}}
|
||||
parent_id: {{.Parent}}
|
||||
build_cache_type: {{.CacheType}}
|
||||
description: {{.Description}}
|
||||
created_at: {{.CreatedAt}}
|
||||
created_since: {{.CreatedSince}}
|
||||
last_used_at: {{.LastUsedAt}}
|
||||
last_used_since: {{.LastUsedSince}}
|
||||
usage_count: {{.UsageCount}}
|
||||
in_use: {{.InUse}}
|
||||
shared: {{.Shared}}
|
||||
`
|
||||
return Format(format)
|
||||
}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
func buildCacheSort(buildCache []*types.BuildCache) {
|
||||
sort.Slice(buildCache, func(i, j int) bool {
|
||||
lui, luj := buildCache[i].LastUsedAt, buildCache[j].LastUsedAt
|
||||
switch {
|
||||
case lui == nil && luj == nil:
|
||||
return strings.Compare(buildCache[i].ID, buildCache[j].ID) < 0
|
||||
case lui == nil:
|
||||
return true
|
||||
case luj == nil:
|
||||
return false
|
||||
case lui.Equal(*luj):
|
||||
return strings.Compare(buildCache[i].ID, buildCache[j].ID) < 0
|
||||
default:
|
||||
return lui.Before(*luj)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// BuildCacheWrite renders the context for a list of containers
|
||||
func BuildCacheWrite(ctx Context, buildCaches []*types.BuildCache) error {
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
buildCacheSort(buildCaches)
|
||||
for _, bc := range buildCaches {
|
||||
err := format(&buildCacheContext{trunc: ctx.Trunc, v: bc})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return ctx.Write(newBuildCacheContext(), render)
|
||||
}
|
||||
|
||||
type buildCacheHeaderContext map[string]string
|
||||
|
||||
type buildCacheContext struct {
|
||||
HeaderContext
|
||||
trunc bool
|
||||
v *types.BuildCache
|
||||
}
|
||||
|
||||
func newBuildCacheContext() *buildCacheContext {
|
||||
buildCacheCtx := buildCacheContext{}
|
||||
buildCacheCtx.header = buildCacheHeaderContext{
|
||||
"ID": cacheIDHeader,
|
||||
"Parent": parentHeader,
|
||||
"CacheType": cacheTypeHeader,
|
||||
"Size": sizeHeader,
|
||||
"CreatedSince": createdSinceHeader,
|
||||
"LastUsedSince": lastUsedSinceHeader,
|
||||
"UsageCount": usageCountHeader,
|
||||
"InUse": inUseHeader,
|
||||
"Shared": sharedHeader,
|
||||
"Description": descriptionHeader,
|
||||
}
|
||||
return &buildCacheCtx
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) MarshalJSON() ([]byte, error) {
|
||||
return marshalJSON(c)
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) ID() string {
|
||||
id := c.v.ID
|
||||
if c.trunc {
|
||||
id = stringid.TruncateID(c.v.ID)
|
||||
}
|
||||
if c.v.InUse {
|
||||
return id + "*"
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) Parent() string {
|
||||
if c.trunc {
|
||||
return stringid.TruncateID(c.v.Parent)
|
||||
}
|
||||
return c.v.Parent
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) CacheType() string {
|
||||
return c.v.Type
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) Description() string {
|
||||
return c.v.Description
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) Size() string {
|
||||
return units.HumanSizeWithPrecision(float64(c.v.Size), 3)
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) CreatedAt() string {
|
||||
return c.v.CreatedAt.String()
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) CreatedSince() string {
|
||||
return units.HumanDuration(time.Now().UTC().Sub(c.v.CreatedAt)) + " ago"
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) LastUsedAt() string {
|
||||
if c.v.LastUsedAt == nil {
|
||||
return ""
|
||||
}
|
||||
return c.v.LastUsedAt.String()
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) LastUsedSince() string {
|
||||
if c.v.LastUsedAt == nil {
|
||||
return ""
|
||||
}
|
||||
return units.HumanDuration(time.Now().UTC().Sub(*c.v.LastUsedAt)) + " ago"
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) UsageCount() string {
|
||||
return fmt.Sprintf("%d", c.v.UsageCount)
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) InUse() string {
|
||||
return fmt.Sprintf("%t", c.v.InUse)
|
||||
}
|
||||
|
||||
func (c *buildCacheContext) Shared() string {
|
||||
return fmt.Sprintf("%t", c.v.Shared)
|
||||
}
|
||||
@ -12,19 +12,11 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDiskUsageImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.VirtualSize}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}"
|
||||
defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Names}}"
|
||||
defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}"
|
||||
defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}"
|
||||
defaultBuildCacheVerboseFormat = `
|
||||
ID: {{.ID}}
|
||||
Description: {{.Description}}
|
||||
Mutable: {{.Mutable}}
|
||||
Size: {{.Size}}
|
||||
CreatedAt: {{.CreatedAt}}
|
||||
LastUsedAt: {{.LastUsedAt}}
|
||||
UsageCount: {{.UsageCount}}
|
||||
`
|
||||
defaultDiskUsageImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}}\t{{.VirtualSize}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}"
|
||||
defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}}\t{{.Status}}\t{{.Names}}"
|
||||
defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}"
|
||||
defaultDiskUsageBuildCacheTableFormat = "table {{.ID}}\t{{.CacheType}}\t{{.Size}}\t{{.CreatedSince}}\t{{.LastUsedSince}}\t{{.UsageCount}}\t{{.Shared}}"
|
||||
defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}"
|
||||
|
||||
typeHeader = "TYPE"
|
||||
totalHeader = "TOTAL"
|
||||
@ -32,7 +24,7 @@ UsageCount: {{.UsageCount}}
|
||||
reclaimableHeader = "RECLAIMABLE"
|
||||
containersHeader = "CONTAINERS"
|
||||
sharedSizeHeader = "SHARED SIZE"
|
||||
uniqueSizeHeader = "UNIQUE SiZE"
|
||||
uniqueSizeHeader = "UNIQUE SIZE"
|
||||
)
|
||||
|
||||
// DiskUsageContext contains disk usage specific information required by the formatter, encapsulate a Context struct.
|
||||
@ -56,14 +48,26 @@ func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template,
|
||||
return ctx.parseFormat()
|
||||
}
|
||||
|
||||
//
|
||||
// NewDiskUsageFormat returns a format for rendering an DiskUsageContext
|
||||
func NewDiskUsageFormat(source string) Format {
|
||||
switch source {
|
||||
case TableFormatKey:
|
||||
format := defaultDiskUsageTableFormat
|
||||
return Format(format)
|
||||
case RawFormatKey:
|
||||
func NewDiskUsageFormat(source string, verbose bool) Format {
|
||||
switch {
|
||||
case verbose && source == RawFormatKey:
|
||||
format := `{{range .Images}}type: Image
|
||||
` + NewImageFormat(source, false, true) + `
|
||||
{{end -}}
|
||||
{{range .Containers}}type: Container
|
||||
` + NewContainerFormat(source, false, true) + `
|
||||
{{end -}}
|
||||
{{range .Volumes}}type: Volume
|
||||
` + NewVolumeFormat(source, false) + `
|
||||
{{end -}}
|
||||
{{range .BuildCache}}type: Build Cache
|
||||
` + NewBuildCacheFormat(source, false) + `
|
||||
{{end -}}`
|
||||
return format
|
||||
case !verbose && source == TableFormatKey:
|
||||
return Format(defaultDiskUsageTableFormat)
|
||||
case !verbose && source == RawFormatKey:
|
||||
format := `type: {{.Type}}
|
||||
total: {{.TotalCount}}
|
||||
active: {{.Active}}
|
||||
@ -71,8 +75,9 @@ size: {{.Size}}
|
||||
reclaimable: {{.Reclaimable}}
|
||||
`
|
||||
return Format(format)
|
||||
default:
|
||||
return Format(source)
|
||||
}
|
||||
return Format(source)
|
||||
}
|
||||
|
||||
func (ctx *DiskUsageContext) Write() (err error) {
|
||||
@ -129,14 +134,23 @@ func (ctx *DiskUsageContext) Write() (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
func (ctx *DiskUsageContext) verboseWrite() error {
|
||||
// First images
|
||||
tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
type diskUsageContext struct {
|
||||
Images []*imageContext
|
||||
Containers []*containerContext
|
||||
Volumes []*volumeContext
|
||||
BuildCache []*buildCacheContext
|
||||
}
|
||||
|
||||
ctx.Output.Write([]byte("Images space usage:\n\n"))
|
||||
func (ctx *DiskUsageContext) verboseWrite() error {
|
||||
duc := &diskUsageContext{
|
||||
Images: make([]*imageContext, 0, len(ctx.Images)),
|
||||
Containers: make([]*containerContext, 0, len(ctx.Containers)),
|
||||
Volumes: make([]*volumeContext, 0, len(ctx.Volumes)),
|
||||
BuildCache: make([]*buildCacheContext, 0, len(ctx.BuildCache)),
|
||||
}
|
||||
trunc := ctx.Format.IsTable()
|
||||
|
||||
// First images
|
||||
for _, i := range ctx.Images {
|
||||
repo := "<none>"
|
||||
tag := "<none>"
|
||||
@ -152,55 +166,92 @@ func (ctx *DiskUsageContext) verboseWrite() error {
|
||||
}
|
||||
}
|
||||
|
||||
err := ctx.contextFormat(tmpl, &imageContext{
|
||||
duc.Images = append(duc.Images, &imageContext{
|
||||
repo: repo,
|
||||
tag: tag,
|
||||
trunc: true,
|
||||
trunc: trunc,
|
||||
i: *i,
|
||||
})
|
||||
if err != nil {
|
||||
}
|
||||
|
||||
// Now containers
|
||||
for _, c := range ctx.Containers {
|
||||
// Don't display the virtual size
|
||||
c.SizeRootFs = 0
|
||||
duc.Containers = append(duc.Containers, &containerContext{trunc: trunc, c: *c})
|
||||
}
|
||||
|
||||
// And volumes
|
||||
for _, v := range ctx.Volumes {
|
||||
duc.Volumes = append(duc.Volumes, &volumeContext{v: *v})
|
||||
}
|
||||
|
||||
// And build cache
|
||||
buildCacheSort(ctx.BuildCache)
|
||||
for _, v := range ctx.BuildCache {
|
||||
duc.BuildCache = append(duc.BuildCache, &buildCacheContext{v: v, trunc: trunc})
|
||||
}
|
||||
|
||||
if ctx.Format == TableFormatKey {
|
||||
return ctx.verboseWriteTable(duc)
|
||||
}
|
||||
|
||||
ctx.preFormat()
|
||||
tmpl, err := ctx.parseFormat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return tmpl.Execute(ctx.Output, duc)
|
||||
}
|
||||
|
||||
func (ctx *DiskUsageContext) verboseWriteTable(duc *diskUsageContext) error {
|
||||
tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ctx.Output.Write([]byte("Images space usage:\n\n"))
|
||||
for _, img := range duc.Images {
|
||||
if err := ctx.contextFormat(tmpl, img); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ctx.postFormat(tmpl, newImageContext())
|
||||
|
||||
// Now containers
|
||||
ctx.Output.Write([]byte("\nContainers space usage:\n\n"))
|
||||
tmpl, err = ctx.startSubsection(defaultDiskUsageContainerTableFormat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, c := range ctx.Containers {
|
||||
// Don't display the virtual size
|
||||
c.SizeRootFs = 0
|
||||
err := ctx.contextFormat(tmpl, &containerContext{trunc: true, c: *c})
|
||||
if err != nil {
|
||||
ctx.Output.Write([]byte("\nContainers space usage:\n\n"))
|
||||
for _, c := range duc.Containers {
|
||||
if err := ctx.contextFormat(tmpl, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ctx.postFormat(tmpl, newContainerContext())
|
||||
|
||||
// And volumes
|
||||
ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n"))
|
||||
tmpl, err = ctx.startSubsection(defaultDiskUsageVolumeTableFormat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range ctx.Volumes {
|
||||
if err := ctx.contextFormat(tmpl, &volumeContext{v: *v}); err != nil {
|
||||
ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n"))
|
||||
for _, v := range duc.Volumes {
|
||||
if err := ctx.contextFormat(tmpl, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ctx.postFormat(tmpl, newVolumeContext())
|
||||
|
||||
// And build cache
|
||||
fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuilderSize)))
|
||||
|
||||
t := template.Must(template.New("buildcache").Parse(defaultBuildCacheVerboseFormat))
|
||||
|
||||
for _, v := range ctx.BuildCache {
|
||||
t.Execute(ctx.Output, *v)
|
||||
tmpl, err = ctx.startSubsection(defaultDiskUsageBuildCacheTableFormat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuilderSize)))
|
||||
for _, v := range duc.BuildCache {
|
||||
if err := ctx.contextFormat(tmpl, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ctx.postFormat(tmpl, newBuildCacheContext())
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -416,7 +467,7 @@ func (c *diskUsageBuilderContext) Size() string {
|
||||
func (c *diskUsageBuilderContext) Reclaimable() string {
|
||||
var inUseBytes int64
|
||||
for _, bc := range c.buildCache {
|
||||
if bc.InUse {
|
||||
if bc.InUse && !bc.Shared {
|
||||
inUseBytes += bc.Size
|
||||
}
|
||||
}
|
||||
|
||||
@ -18,7 +18,7 @@ func TestDiskUsageContextFormatWrite(t *testing.T) {
|
||||
{
|
||||
DiskUsageContext{
|
||||
Context: Context{
|
||||
Format: NewDiskUsageFormat("table"),
|
||||
Format: NewDiskUsageFormat("table", false),
|
||||
},
|
||||
Verbose: false},
|
||||
`TYPE TOTAL ACTIVE SIZE RECLAIMABLE
|
||||
@ -29,14 +29,14 @@ Build Cache 0 0 0B
|
||||
`,
|
||||
},
|
||||
{
|
||||
DiskUsageContext{Verbose: true},
|
||||
DiskUsageContext{Verbose: true, Context: Context{Format: NewDiskUsageFormat("table", true)}},
|
||||
`Images space usage:
|
||||
|
||||
REPOSITORY TAG IMAGE ID CREATED ago SIZE SHARED SIZE UNIQUE SiZE CONTAINERS
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE SHARED SIZE UNIQUE SIZE CONTAINERS
|
||||
|
||||
Containers space usage:
|
||||
|
||||
CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED ago STATUS NAMES
|
||||
CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED STATUS NAMES
|
||||
|
||||
Local Volumes space usage:
|
||||
|
||||
@ -44,8 +44,17 @@ VOLUME NAME LINKS SIZE
|
||||
|
||||
Build cache usage: 0B
|
||||
|
||||
CACHE ID CACHE TYPE SIZE CREATED LAST USED USAGE SHARED
|
||||
`,
|
||||
},
|
||||
{
|
||||
DiskUsageContext{Verbose: true, Context: Context{Format: NewDiskUsageFormat("raw", true)}},
|
||||
``,
|
||||
},
|
||||
{
|
||||
DiskUsageContext{Verbose: true, Context: Context{Format: NewDiskUsageFormat("{{json .}}", true)}},
|
||||
`{"Images":[],"Containers":[],"Volumes":[],"BuildCache":[]}`,
|
||||
},
|
||||
// Errors
|
||||
{
|
||||
DiskUsageContext{
|
||||
@ -69,7 +78,7 @@ Build cache usage: 0B
|
||||
{
|
||||
DiskUsageContext{
|
||||
Context: Context{
|
||||
Format: NewDiskUsageFormat("table"),
|
||||
Format: NewDiskUsageFormat("table", false),
|
||||
},
|
||||
},
|
||||
`TYPE TOTAL ACTIVE SIZE RECLAIMABLE
|
||||
@ -82,7 +91,7 @@ Build Cache 0 0 0B
|
||||
{
|
||||
DiskUsageContext{
|
||||
Context: Context{
|
||||
Format: NewDiskUsageFormat("table {{.Type}}\t{{.Active}}"),
|
||||
Format: NewDiskUsageFormat("table {{.Type}}\t{{.Active}}", false),
|
||||
},
|
||||
},
|
||||
string(golden.Get(t, "disk-usage-context-write-custom.golden")),
|
||||
@ -91,7 +100,7 @@ Build Cache 0 0 0B
|
||||
{
|
||||
DiskUsageContext{
|
||||
Context: Context{
|
||||
Format: NewDiskUsageFormat("raw"),
|
||||
Format: NewDiskUsageFormat("raw", false),
|
||||
},
|
||||
},
|
||||
string(golden.Get(t, "disk-usage-raw-format.golden")),
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package formatter
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -31,7 +31,7 @@ func NewUpdatesFormat(source string, quiet bool) Format {
|
||||
}
|
||||
|
||||
// UpdatesWrite writes the context
|
||||
func UpdatesWrite(ctx Context, availableUpdates []containerizedengine.Update) error {
|
||||
func UpdatesWrite(ctx Context, availableUpdates []clitypes.Update) error {
|
||||
render := func(format func(subContext subContext) error) error {
|
||||
for _, update := range availableUpdates {
|
||||
updatesCtx := &updateContext{trunc: ctx.Trunc, u: update}
|
||||
@ -53,7 +53,7 @@ func UpdatesWrite(ctx Context, availableUpdates []containerizedengine.Update) er
|
||||
type updateContext struct {
|
||||
HeaderContext
|
||||
trunc bool
|
||||
u containerizedengine.Update
|
||||
u clitypes.Update
|
||||
}
|
||||
|
||||
func (c *updateContext) MarshalJSON() ([]byte, error) {
|
||||
|
||||
@ -6,7 +6,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
clitypes "github.com/docker/cli/types"
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
)
|
||||
@ -84,7 +84,7 @@ version2
|
||||
}
|
||||
|
||||
for _, testcase := range cases {
|
||||
updates := []containerizedengine.Update{
|
||||
updates := []clitypes.Update{
|
||||
{Type: "updateType1", Version: "version1", Notes: "description 1"},
|
||||
{Type: "updateType2", Version: "version2", Notes: "description 2"},
|
||||
}
|
||||
@ -100,7 +100,7 @@ version2
|
||||
}
|
||||
|
||||
func TestUpdateContextWriteJSON(t *testing.T) {
|
||||
updates := []containerizedengine.Update{
|
||||
updates := []clitypes.Update{
|
||||
{Type: "updateType1", Version: "version1", Notes: "note1"},
|
||||
{Type: "updateType2", Version: "version2", Notes: "note2"},
|
||||
}
|
||||
@ -124,7 +124,7 @@ func TestUpdateContextWriteJSON(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateContextWriteJSONField(t *testing.T) {
|
||||
updates := []containerizedengine.Update{
|
||||
updates := []clitypes.Update{
|
||||
{Type: "updateType1", Version: "version1"},
|
||||
{Type: "updateType2", Version: "version2"},
|
||||
}
|
||||
|
||||
@ -13,7 +13,6 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
@ -73,6 +72,7 @@ type buildOptions struct {
|
||||
platform string
|
||||
untrusted bool
|
||||
secrets []string
|
||||
ssh []string
|
||||
}
|
||||
|
||||
// dockerfileFromStdin returns true when the user specified that the Dockerfile
|
||||
@ -136,6 +136,8 @@ func NewBuildCommand(dockerCli command.Cli) *cobra.Command {
|
||||
flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image")
|
||||
flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources")
|
||||
flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip")
|
||||
flags.SetAnnotation("compress", "no-buildkit", nil)
|
||||
|
||||
flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options")
|
||||
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
|
||||
flags.SetAnnotation("network", "version", []string{"1.25"})
|
||||
@ -153,11 +155,18 @@ func NewBuildCommand(dockerCli command.Cli) *cobra.Command {
|
||||
flags.BoolVar(&options.stream, "stream", false, "Stream attaches to server to negotiate build context")
|
||||
flags.SetAnnotation("stream", "experimental", nil)
|
||||
flags.SetAnnotation("stream", "version", []string{"1.31"})
|
||||
flags.SetAnnotation("stream", "no-buildkit", nil)
|
||||
|
||||
flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (only if BuildKit enabled) (auto, plain, tty). Use plain to show container output")
|
||||
flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (auto, plain, tty). Use plain to show container output")
|
||||
flags.SetAnnotation("progress", "buildkit", nil)
|
||||
|
||||
flags.StringArrayVar(&options.secrets, "secret", []string{}, "Secret file to expose to the build (only if BuildKit enabled): id=mysecret,src=/local/secret")
|
||||
flags.SetAnnotation("secret", "version", []string{"1.39"})
|
||||
flags.SetAnnotation("secret", "buildkit", nil)
|
||||
|
||||
flags.StringArrayVar(&options.ssh, "ssh", []string{}, "SSH agent socket or keys to expose to the build (only if BuildKit enabled) (format: default|<id>[=<socket>|<key>[,<key>]])")
|
||||
flags.SetAnnotation("ssh", "version", []string{"1.39"})
|
||||
flags.SetAnnotation("ssh", "buildkit", nil)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@ -179,22 +188,17 @@ func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error {
|
||||
|
||||
// nolint: gocyclo
|
||||
func runBuild(dockerCli command.Cli, options buildOptions) error {
|
||||
if buildkitEnv := os.Getenv("DOCKER_BUILDKIT"); buildkitEnv != "" {
|
||||
enableBuildkit, err := strconv.ParseBool(buildkitEnv)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value")
|
||||
}
|
||||
if enableBuildkit {
|
||||
return runBuildBuildKit(dockerCli, options)
|
||||
}
|
||||
} else if dockerCli.ServerInfo().BuildkitVersion == types.BuilderBuildKit {
|
||||
buildkitEnabled, err := command.BuildKitEnabled(dockerCli.ServerInfo())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if buildkitEnabled {
|
||||
return runBuildBuildKit(dockerCli, options)
|
||||
}
|
||||
|
||||
var (
|
||||
buildCtx io.ReadCloser
|
||||
dockerfileCtx io.ReadCloser
|
||||
err error
|
||||
contextDir string
|
||||
tempDir string
|
||||
relDockerfile string
|
||||
@ -346,7 +350,7 @@ func runBuild(dockerCli command.Cli, options buildOptions) error {
|
||||
buildCtx = dockerfileCtx
|
||||
}
|
||||
|
||||
s, err := trySession(dockerCli, contextDir)
|
||||
s, err := trySession(dockerCli, contextDir, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -27,10 +27,11 @@ import (
|
||||
"github.com/moby/buildkit/session/auth/authprovider"
|
||||
"github.com/moby/buildkit/session/filesync"
|
||||
"github.com/moby/buildkit/session/secrets/secretsprovider"
|
||||
"github.com/moby/buildkit/session/sshforward/sshprovider"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tonistiigi/fsutil"
|
||||
fsutiltypes "github.com/tonistiigi/fsutil/types"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@ -42,7 +43,7 @@ var errDockerfileConflict = errors.New("ambiguous Dockerfile source: both stdin
|
||||
func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
s, err := trySession(dockerCli, options.context)
|
||||
s, err := trySession(dockerCli, options.context, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -138,6 +139,13 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
|
||||
}
|
||||
s.Allow(sp)
|
||||
}
|
||||
if len(options.ssh) > 0 {
|
||||
sshp, err := parseSSHSpecs(options.ssh)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not parse ssh: %v", options.ssh)
|
||||
}
|
||||
s.Allow(sshp)
|
||||
}
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
@ -291,7 +299,7 @@ func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, opt
|
||||
return err
|
||||
}
|
||||
|
||||
func resetUIDAndGID(s *fsutil.Stat) bool {
|
||||
func resetUIDAndGID(s *fsutiltypes.Stat) bool {
|
||||
s.Uid = 0
|
||||
s.Gid = 0
|
||||
return true
|
||||
@ -408,3 +416,26 @@ func parseSecret(value string) (*secretsprovider.FileSource, error) {
|
||||
}
|
||||
return &fs, nil
|
||||
}
|
||||
|
||||
func parseSSHSpecs(sl []string) (session.Attachable, error) {
|
||||
configs := make([]sshprovider.AgentConfig, 0, len(sl))
|
||||
for _, v := range sl {
|
||||
c, err := parseSSH(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configs = append(configs, *c)
|
||||
}
|
||||
return sshprovider.NewSSHAgentProvider(configs)
|
||||
}
|
||||
|
||||
func parseSSH(value string) (*sshprovider.AgentConfig, error) {
|
||||
parts := strings.SplitN(value, "=", 2)
|
||||
cfg := sshprovider.AgentConfig{
|
||||
ID: parts[0],
|
||||
}
|
||||
if len(parts) > 1 {
|
||||
cfg.Paths = strings.Split(parts[1], ",")
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
@ -27,16 +27,16 @@ import (
|
||||
|
||||
const clientSessionRemote = "client-session"
|
||||
|
||||
func isSessionSupported(dockerCli command.Cli) bool {
|
||||
if versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.39") {
|
||||
func isSessionSupported(dockerCli command.Cli, forStream bool) bool {
|
||||
if !forStream && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.39") {
|
||||
return true
|
||||
}
|
||||
return dockerCli.ServerInfo().HasExperimental && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.31")
|
||||
}
|
||||
|
||||
func trySession(dockerCli command.Cli, contextDir string) (*session.Session, error) {
|
||||
func trySession(dockerCli command.Cli, contextDir string, forStream bool) (*session.Session, error) {
|
||||
var s *session.Session
|
||||
if isSessionSupported(dockerCli) {
|
||||
if isSessionSupported(dockerCli, forStream) {
|
||||
sharedKey, err := getBuildSharedKey(contextDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get build shared key")
|
||||
|
||||
@ -19,6 +19,7 @@ type importOptions struct {
|
||||
reference string
|
||||
changes dockeropts.ListOpts
|
||||
message string
|
||||
platform string
|
||||
}
|
||||
|
||||
// NewImportCommand creates a new `docker import` command
|
||||
@ -43,6 +44,7 @@ func NewImportCommand(dockerCli command.Cli) *cobra.Command {
|
||||
options.changes = dockeropts.NewListOpts(nil)
|
||||
flags.VarP(&options.changes, "change", "c", "Apply Dockerfile instruction to the created image")
|
||||
flags.StringVarP(&options.message, "message", "m", "", "Set commit message for imported image")
|
||||
command.AddPlatformFlag(flags, &options.platform)
|
||||
|
||||
return cmd
|
||||
}
|
||||
@ -71,8 +73,9 @@ func runImport(dockerCli command.Cli, options importOptions) error {
|
||||
}
|
||||
|
||||
importOptions := types.ImageImportOptions{
|
||||
Message: options.message,
|
||||
Changes: options.changes.GetAll(),
|
||||
Message: options.message,
|
||||
Changes: options.changes.GetAll(),
|
||||
Platform: options.platform,
|
||||
}
|
||||
|
||||
clnt := dockerCli.Client()
|
||||
|
||||
@ -3,11 +3,14 @@ package image
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -54,8 +57,24 @@ Are you sure you want to continue?`
|
||||
Are you sure you want to continue?`
|
||||
)
|
||||
|
||||
// cloneFilter is a temporary workaround that uses existing public APIs from the filters package to clone a filter.
|
||||
// TODO(tiborvass): remove this once filters.Args.Clone() is added.
|
||||
func cloneFilter(args filters.Args) (newArgs filters.Args, err error) {
|
||||
if args.Len() == 0 {
|
||||
return filters.NewArgs(), nil
|
||||
}
|
||||
b, err := args.MarshalJSON()
|
||||
if err != nil {
|
||||
return newArgs, err
|
||||
}
|
||||
return filters.FromJSON(string(b))
|
||||
}
|
||||
|
||||
func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) {
|
||||
pruneFilters := options.filter.Value()
|
||||
pruneFilters, err := cloneFilter(options.filter.Value())
|
||||
if err != nil {
|
||||
return 0, "", errors.Wrap(err, "could not copy filter in image prune")
|
||||
}
|
||||
pruneFilters.Add("dangling", fmt.Sprintf("%v", !options.all))
|
||||
pruneFilters = command.PruneFilters(dockerCli, pruneFilters)
|
||||
|
||||
@ -73,14 +92,20 @@ func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint6
|
||||
}
|
||||
|
||||
if len(report.ImagesDeleted) > 0 {
|
||||
output = "Deleted Images:\n"
|
||||
var sb strings.Builder
|
||||
sb.WriteString("Deleted Images:\n")
|
||||
for _, st := range report.ImagesDeleted {
|
||||
if st.Untagged != "" {
|
||||
output += fmt.Sprintln("untagged:", st.Untagged)
|
||||
sb.WriteString("untagged: ")
|
||||
sb.WriteString(st.Untagged)
|
||||
sb.WriteByte('\n')
|
||||
} else {
|
||||
output += fmt.Sprintln("deleted:", st.Deleted)
|
||||
sb.WriteString("deleted: ")
|
||||
sb.WriteString(st.Deleted)
|
||||
sb.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
output = sb.String()
|
||||
spaceReclaimed = report.SpaceReclaimed
|
||||
}
|
||||
|
||||
|
||||
@ -70,6 +70,14 @@ func TestNewPruneCommandSuccess(t *testing.T) {
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "label-filter",
|
||||
args: []string{"--force", "--filter", "label=foobar"},
|
||||
imagesPruneFunc: func(pruneFilter filters.Args) (types.ImagesPruneReport, error) {
|
||||
assert.Check(t, is.Equal("foobar", pruneFilter.Get("label")[0]))
|
||||
return types.ImagesPruneReport{}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "force-untagged",
|
||||
args: []string{"--force"},
|
||||
|
||||
1
cli/command/image/testdata/prune-command-success.label-filter.golden
vendored
Normal file
1
cli/command/image/testdata/prune-command-success.label-filter.golden
vendored
Normal file
@ -0,0 +1 @@
|
||||
Total reclaimed space: 0B
|
||||
@ -18,6 +18,7 @@ type osArch struct {
|
||||
// list of valid os/arch values (see "Optional Environment Variables" section
|
||||
// of https://golang.org/doc/install/source
|
||||
// Added linux/s390x as we know System z support already exists
|
||||
// Keep in sync with _docker_manifest_annotate in contrib/completion/bash/docker
|
||||
var validOSArches = map[osArch]bool{
|
||||
{os: "darwin", arch: "386"}: true,
|
||||
{os: "darwin", arch: "amd64"}: true,
|
||||
|
||||
@ -70,7 +70,7 @@ func runPrune(dockerCli command.Cli, options pruneOptions) (output string, err e
|
||||
|
||||
// RunPrune calls the Network Prune API
|
||||
// This returns the amount of space reclaimed and a detailed output string
|
||||
func RunPrune(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) {
|
||||
func RunPrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) {
|
||||
output, err := runPrune(dockerCli, pruneOptions{force: true, filter: filter})
|
||||
return 0, output, err
|
||||
}
|
||||
|
||||
@ -45,7 +45,7 @@ func newSecretCreateCommand(dockerCli command.Cli) *cobra.Command {
|
||||
flags.StringVarP(&options.driver, "driver", "d", "", "Secret driver")
|
||||
flags.SetAnnotation("driver", "version", []string{"1.31"})
|
||||
flags.StringVar(&options.templateDriver, "template-driver", "", "Template driver")
|
||||
flags.SetAnnotation("driver", "version", []string{"1.37"})
|
||||
flags.SetAnnotation("template-driver", "version", []string{"1.37"})
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@ -302,6 +302,12 @@ func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags
|
||||
if task.Resources == nil {
|
||||
task.Resources = &swarm.ResourceRequirements{}
|
||||
}
|
||||
if task.Resources.Limits == nil {
|
||||
task.Resources.Limits = &swarm.Resources{}
|
||||
}
|
||||
if task.Resources.Reservations == nil {
|
||||
task.Resources.Reservations = &swarm.Resources{}
|
||||
}
|
||||
return task.Resources
|
||||
}
|
||||
|
||||
|
||||
@ -617,6 +617,38 @@ func TestUpdateIsolationValid(t *testing.T) {
|
||||
// and that values are not updated are not reset to their default value
|
||||
func TestUpdateLimitsReservations(t *testing.T) {
|
||||
spec := swarm.ServiceSpec{
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{},
|
||||
},
|
||||
}
|
||||
|
||||
// test that updating works if the service did not previously
|
||||
// have limits set (https://github.com/moby/moby/issues/38363)
|
||||
flags := newUpdateCommand(nil).Flags()
|
||||
err := flags.Set(flagLimitCPU, "2")
|
||||
assert.NilError(t, err)
|
||||
err = flags.Set(flagLimitMemory, "200M")
|
||||
assert.NilError(t, err)
|
||||
err = updateService(context.Background(), nil, flags, &spec)
|
||||
assert.NilError(t, err)
|
||||
|
||||
spec = swarm.ServiceSpec{
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{},
|
||||
},
|
||||
}
|
||||
|
||||
// test that updating works if the service did not previously
|
||||
// have reservations set (https://github.com/moby/moby/issues/38363)
|
||||
flags = newUpdateCommand(nil).Flags()
|
||||
err = flags.Set(flagReserveCPU, "2")
|
||||
assert.NilError(t, err)
|
||||
err = flags.Set(flagReserveMemory, "200M")
|
||||
assert.NilError(t, err)
|
||||
err = updateService(context.Background(), nil, flags, &spec)
|
||||
assert.NilError(t, err)
|
||||
|
||||
spec = swarm.ServiceSpec{
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{},
|
||||
Resources: &swarm.ResourceRequirements{
|
||||
@ -632,8 +664,8 @@ func TestUpdateLimitsReservations(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
flags := newUpdateCommand(nil).Flags()
|
||||
err := flags.Set(flagLimitCPU, "2")
|
||||
flags = newUpdateCommand(nil).Flags()
|
||||
err = flags.Set(flagLimitCPU, "2")
|
||||
assert.NilError(t, err)
|
||||
err = flags.Set(flagReserveCPU, "2")
|
||||
assert.NilError(t, err)
|
||||
|
||||
@ -10,6 +10,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
runtimeutil "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
@ -240,12 +241,12 @@ func newStackInformer(stacksClient stackListWatch, stackName string) cache.Share
|
||||
return cache.NewSharedInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.LabelSelector = labels.SelectorForStack(stackName)
|
||||
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", stackName).String()
|
||||
return stacksClient.List(options)
|
||||
},
|
||||
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labels.SelectorForStack(stackName)
|
||||
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", stackName).String()
|
||||
return stacksClient.Watch(options)
|
||||
},
|
||||
},
|
||||
|
||||
@ -2,7 +2,6 @@ package system
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
@ -38,10 +37,6 @@ func newDiskUsageCommand(dockerCli command.Cli) *cobra.Command {
|
||||
}
|
||||
|
||||
func runDiskUsage(dockerCli command.Cli, opts diskUsageOptions) error {
|
||||
if opts.verbose && len(opts.format) != 0 {
|
||||
return errors.New("the verbose and the format options conflict")
|
||||
}
|
||||
|
||||
du, err := dockerCli.Client().DiskUsage(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
@ -52,13 +47,20 @@ func runDiskUsage(dockerCli command.Cli, opts diskUsageOptions) error {
|
||||
format = formatter.TableFormatKey
|
||||
}
|
||||
|
||||
var bsz int64
|
||||
for _, bc := range du.BuildCache {
|
||||
if !bc.Shared {
|
||||
bsz += bc.Size
|
||||
}
|
||||
}
|
||||
|
||||
duCtx := formatter.DiskUsageContext{
|
||||
Context: formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: formatter.NewDiskUsageFormat(format),
|
||||
Format: formatter.NewDiskUsageFormat(format, opts.verbose),
|
||||
},
|
||||
LayersSize: du.LayersSize,
|
||||
BuilderSize: du.BuilderSize,
|
||||
BuilderSize: bsz,
|
||||
BuildCache: du.BuildCache,
|
||||
Images: du.Images,
|
||||
Containers: du.Containers,
|
||||
|
||||
@ -34,12 +34,20 @@ func runDialStdio(dockerCli command.Cli) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to open the raw stream connection")
|
||||
}
|
||||
connHalfCloser, ok := conn.(halfCloser)
|
||||
if !ok {
|
||||
defer conn.Close()
|
||||
|
||||
var connHalfCloser halfCloser
|
||||
switch t := conn.(type) {
|
||||
case halfCloser:
|
||||
connHalfCloser = t
|
||||
case halfReadWriteCloser:
|
||||
connHalfCloser = &nopCloseReader{t}
|
||||
default:
|
||||
return errors.New("the raw stream connection does not implement halfCloser")
|
||||
}
|
||||
stdin2conn := make(chan error)
|
||||
conn2stdout := make(chan error)
|
||||
|
||||
stdin2conn := make(chan error, 1)
|
||||
conn2stdout := make(chan error, 1)
|
||||
go func() {
|
||||
stdin2conn <- copier(connHalfCloser, &halfReadCloserWrapper{os.Stdin}, "stdin to stream")
|
||||
}()
|
||||
@ -90,6 +98,19 @@ type halfCloser interface {
|
||||
halfWriteCloser
|
||||
}
|
||||
|
||||
type halfReadWriteCloser interface {
|
||||
io.Reader
|
||||
halfWriteCloser
|
||||
}
|
||||
|
||||
type nopCloseReader struct {
|
||||
halfReadWriteCloser
|
||||
}
|
||||
|
||||
func (x *nopCloseReader) CloseRead() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type halfReadCloserWrapper struct {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
@ -204,6 +204,9 @@ func prettyPrintInfo(dockerCli command.Cli, info types.Info) error {
|
||||
}
|
||||
|
||||
fmt.Fprintln(dockerCli.Out(), "Live Restore Enabled:", info.LiveRestoreEnabled)
|
||||
if info.ProductLicense != "" {
|
||||
fmt.Fprintln(dockerCli.Out(), "Product License:", info.ProductLicense)
|
||||
}
|
||||
fmt.Fprint(dockerCli.Out(), "\n")
|
||||
|
||||
printWarnings(dockerCli, info)
|
||||
|
||||
@ -2,12 +2,12 @@ package system
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"text/template"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/builder"
|
||||
"github.com/docker/cli/cli/command/container"
|
||||
"github.com/docker/cli/cli/command/image"
|
||||
"github.com/docker/cli/cli/command/network"
|
||||
@ -21,20 +21,21 @@ import (
|
||||
type pruneOptions struct {
|
||||
force bool
|
||||
all bool
|
||||
pruneBuildCache bool
|
||||
pruneVolumes bool
|
||||
pruneBuildCache bool
|
||||
filter opts.FilterOpt
|
||||
}
|
||||
|
||||
// newPruneCommand creates a new cobra.Command for `docker prune`
|
||||
func newPruneCommand(dockerCli command.Cli) *cobra.Command {
|
||||
options := pruneOptions{filter: opts.NewFilterOpt(), pruneBuildCache: true}
|
||||
options := pruneOptions{filter: opts.NewFilterOpt()}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "prune [OPTIONS]",
|
||||
Short: "Remove unused data",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.pruneBuildCache = versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.31")
|
||||
return runPrune(dockerCli, options)
|
||||
},
|
||||
Annotations: map[string]string{"version": "1.25"},
|
||||
@ -57,44 +58,29 @@ const confirmationTemplate = `WARNING! This will remove:
|
||||
{{- end }}
|
||||
Are you sure you want to continue?`
|
||||
|
||||
// runBuildCachePrune executes a prune command for build cache
|
||||
func runBuildCachePrune(dockerCli command.Cli, _ opts.FilterOpt) (uint64, string, error) {
|
||||
report, err := dockerCli.Client().BuildCachePrune(context.Background())
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
return report.SpaceReclaimed, "", nil
|
||||
}
|
||||
|
||||
func runPrune(dockerCli command.Cli, options pruneOptions) error {
|
||||
// TODO version this once "until" filter is supported for volumes
|
||||
if options.pruneVolumes && options.filter.Value().Contains("until") {
|
||||
return fmt.Errorf(`ERROR: The "until" filter is not supported with "--volumes"`)
|
||||
}
|
||||
if versions.LessThan(dockerCli.Client().ClientVersion(), "1.31") {
|
||||
options.pruneBuildCache = false
|
||||
}
|
||||
if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), confirmationMessage(options)) {
|
||||
return nil
|
||||
}
|
||||
imagePrune := func(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) {
|
||||
return image.RunPrune(dockerCli, options.all, options.filter)
|
||||
}
|
||||
pruneFuncs := []func(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error){
|
||||
pruneFuncs := []func(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error){
|
||||
container.RunPrune,
|
||||
network.RunPrune,
|
||||
}
|
||||
if options.pruneVolumes {
|
||||
pruneFuncs = append(pruneFuncs, volume.RunPrune)
|
||||
}
|
||||
pruneFuncs = append(pruneFuncs, imagePrune)
|
||||
pruneFuncs = append(pruneFuncs, image.RunPrune)
|
||||
if options.pruneBuildCache {
|
||||
pruneFuncs = append(pruneFuncs, runBuildCachePrune)
|
||||
pruneFuncs = append(pruneFuncs, builder.CachePrune)
|
||||
}
|
||||
|
||||
var spaceReclaimed uint64
|
||||
for _, pruneFn := range pruneFuncs {
|
||||
spc, output, err := pruneFn(dockerCli, options.filter)
|
||||
spc, output, err := pruneFn(dockerCli, options.all, options.filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -126,7 +112,11 @@ func confirmationMessage(options pruneOptions) string {
|
||||
warnings = append(warnings, "all dangling images")
|
||||
}
|
||||
if options.pruneBuildCache {
|
||||
warnings = append(warnings, "all build cache")
|
||||
if options.all {
|
||||
warnings = append(warnings, "all build cache")
|
||||
} else {
|
||||
warnings = append(warnings, "all dangling build cache")
|
||||
}
|
||||
}
|
||||
if len(options.filter.String()) > 0 {
|
||||
warnings = append(warnings, "Elements to be pruned will be filtered with:")
|
||||
|
||||
@ -148,7 +148,7 @@ func TestAddStageSigners(t *testing.T) {
|
||||
assert.NilError(t, err)
|
||||
changeList := cl.List()
|
||||
assert.Check(t, is.Len(changeList, 4))
|
||||
// ordering is determinstic:
|
||||
// ordering is deterministic:
|
||||
|
||||
// first change is for targets/user key creation
|
||||
newSignerKeyChange := changeList[0]
|
||||
|
||||
@ -73,6 +73,6 @@ func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint6
|
||||
|
||||
// RunPrune calls the Volume Prune API
|
||||
// This returns the amount of space reclaimed and a detailed output string
|
||||
func RunPrune(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) {
|
||||
func RunPrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) {
|
||||
return runPrune(dockerCli, pruneOptions{force: true, filter: filter})
|
||||
}
|
||||
|
||||
@ -176,15 +176,21 @@ func extractVariable(value interface{}, pattern *regexp.Regexp) ([]extractedValu
|
||||
|
||||
// Soft default (fall back if unset or empty)
|
||||
func softDefault(substitution string, mapping Mapping) (string, bool, error) {
|
||||
return withDefault(substitution, mapping, "-:")
|
||||
sep := ":-"
|
||||
if !strings.Contains(substitution, sep) {
|
||||
return "", false, nil
|
||||
}
|
||||
name, defaultValue := partition(substitution, sep)
|
||||
value, ok := mapping(name)
|
||||
if !ok || value == "" {
|
||||
return defaultValue, true, nil
|
||||
}
|
||||
return value, true, nil
|
||||
}
|
||||
|
||||
// Hard default (fall back if-and-only-if empty)
|
||||
func hardDefault(substitution string, mapping Mapping) (string, bool, error) {
|
||||
return withDefault(substitution, mapping, "-")
|
||||
}
|
||||
|
||||
func withDefault(substitution string, mapping Mapping, sep string) (string, bool, error) {
|
||||
sep := "-"
|
||||
if !strings.Contains(substitution, sep) {
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
@ -78,6 +78,12 @@ func TestEmptyValueWithSoftDefault(t *testing.T) {
|
||||
assert.Check(t, is.Equal("ok def", result))
|
||||
}
|
||||
|
||||
func TestValueWithSoftDefault(t *testing.T) {
|
||||
result, err := Substitute("ok ${FOO:-def}", defaultMapping)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal("ok first", result))
|
||||
}
|
||||
|
||||
func TestEmptyValueWithHardDefault(t *testing.T) {
|
||||
result, err := Substitute("ok ${BAR-def}", defaultMapping)
|
||||
assert.NilError(t, err)
|
||||
|
||||
@ -150,9 +150,8 @@ func TestOldValidAuth(t *testing.T) {
|
||||
|
||||
// defaultIndexserver is https://index.docker.io/v1/
|
||||
ac := config.AuthConfigs["https://index.docker.io/v1/"]
|
||||
if ac.Username != "joejoe" || ac.Password != "hello" {
|
||||
t.Fatalf("Missing data from parsing:\n%q", config)
|
||||
}
|
||||
assert.Equal(t, ac.Username, "joejoe")
|
||||
assert.Equal(t, ac.Password, "hello")
|
||||
|
||||
// Now save it and make sure it shows up in new form
|
||||
configStr := saveConfigAndValidateNewFormat(t, config, tmpHome)
|
||||
@ -213,9 +212,8 @@ func TestOldJSON(t *testing.T) {
|
||||
assert.NilError(t, err)
|
||||
|
||||
ac := config.AuthConfigs["https://index.docker.io/v1/"]
|
||||
if ac.Username != "joejoe" || ac.Password != "hello" {
|
||||
t.Fatalf("Missing data from parsing:\n%q", config)
|
||||
}
|
||||
assert.Equal(t, ac.Username, "joejoe")
|
||||
assert.Equal(t, ac.Password, "hello")
|
||||
|
||||
// Now save it and make sure it shows up in new form
|
||||
configStr := saveConfigAndValidateNewFormat(t, config, tmpHome)
|
||||
@ -249,9 +247,8 @@ func TestNewJSON(t *testing.T) {
|
||||
assert.NilError(t, err)
|
||||
|
||||
ac := config.AuthConfigs["https://index.docker.io/v1/"]
|
||||
if ac.Username != "joejoe" || ac.Password != "hello" {
|
||||
t.Fatalf("Missing data from parsing:\n%q", config)
|
||||
}
|
||||
assert.Equal(t, ac.Username, "joejoe")
|
||||
assert.Equal(t, ac.Password, "hello")
|
||||
|
||||
// Now save it and make sure it shows up in new form
|
||||
configStr := saveConfigAndValidateNewFormat(t, config, tmpHome)
|
||||
@ -284,9 +281,8 @@ func TestNewJSONNoEmail(t *testing.T) {
|
||||
assert.NilError(t, err)
|
||||
|
||||
ac := config.AuthConfigs["https://index.docker.io/v1/"]
|
||||
if ac.Username != "joejoe" || ac.Password != "hello" {
|
||||
t.Fatalf("Missing data from parsing:\n%q", config)
|
||||
}
|
||||
assert.Equal(t, ac.Username, "joejoe")
|
||||
assert.Equal(t, ac.Password, "hello")
|
||||
|
||||
// Now save it and make sure it shows up in new form
|
||||
configStr := saveConfigAndValidateNewFormat(t, config, tmpHome)
|
||||
@ -431,10 +427,8 @@ func TestJSONReaderNoFile(t *testing.T) {
|
||||
assert.NilError(t, err)
|
||||
|
||||
ac := config.AuthConfigs["https://index.docker.io/v1/"]
|
||||
if ac.Username != "joejoe" || ac.Password != "hello" {
|
||||
t.Fatalf("Missing data from parsing:\n%q", config)
|
||||
}
|
||||
|
||||
assert.Equal(t, ac.Username, "joejoe")
|
||||
assert.Equal(t, ac.Password, "hello")
|
||||
}
|
||||
|
||||
func TestOldJSONReaderNoFile(t *testing.T) {
|
||||
@ -444,9 +438,8 @@ func TestOldJSONReaderNoFile(t *testing.T) {
|
||||
assert.NilError(t, err)
|
||||
|
||||
ac := config.AuthConfigs["https://index.docker.io/v1/"]
|
||||
if ac.Username != "joejoe" || ac.Password != "hello" {
|
||||
t.Fatalf("Missing data from parsing:\n%q", config)
|
||||
}
|
||||
assert.Equal(t, ac.Username, "joejoe")
|
||||
assert.Equal(t, ac.Password, "hello")
|
||||
}
|
||||
|
||||
func TestJSONWithPsFormatNoFile(t *testing.T) {
|
||||
|
||||
@ -10,8 +10,10 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/connhelper/ssh"
|
||||
@ -82,6 +84,9 @@ func newCommandConn(ctx context.Context, cmd string, args ...string) (net.Conn,
|
||||
// commandConn implements net.Conn
|
||||
type commandConn struct {
|
||||
cmd *exec.Cmd
|
||||
cmdExited bool
|
||||
cmdWaitErr error
|
||||
cmdMutex sync.Mutex
|
||||
stdin io.WriteCloser
|
||||
stdout io.ReadCloser
|
||||
stderrMu sync.Mutex
|
||||
@ -101,23 +106,75 @@ func (c *commandConn) killIfStdioClosed() error {
|
||||
if !stdioClosed {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
// NOTE: maybe already killed here
|
||||
if err = c.cmd.Process.Kill(); err == nil {
|
||||
err = c.cmd.Wait()
|
||||
return c.kill()
|
||||
}
|
||||
|
||||
// killAndWait tries sending SIGTERM to the process before sending SIGKILL.
|
||||
func killAndWait(cmd *exec.Cmd) error {
|
||||
var werr error
|
||||
if runtime.GOOS != "windows" {
|
||||
werrCh := make(chan error)
|
||||
go func() { werrCh <- cmd.Wait() }()
|
||||
cmd.Process.Signal(syscall.SIGTERM)
|
||||
select {
|
||||
case werr = <-werrCh:
|
||||
case <-time.After(3 * time.Second):
|
||||
cmd.Process.Kill()
|
||||
werr = <-werrCh
|
||||
}
|
||||
} else {
|
||||
cmd.Process.Kill()
|
||||
werr = cmd.Wait()
|
||||
}
|
||||
if err != nil {
|
||||
// err is typically "os: process already finished".
|
||||
// we check ProcessState here instead of `strings.Contains(err, "os: process already finished")`
|
||||
if c.cmd.ProcessState.Exited() {
|
||||
err = nil
|
||||
return werr
|
||||
}
|
||||
|
||||
// kill returns nil if the command terminated, regardless to the exit status.
|
||||
func (c *commandConn) kill() error {
|
||||
var werr error
|
||||
c.cmdMutex.Lock()
|
||||
if c.cmdExited {
|
||||
werr = c.cmdWaitErr
|
||||
} else {
|
||||
werr = killAndWait(c.cmd)
|
||||
c.cmdWaitErr = werr
|
||||
c.cmdExited = true
|
||||
}
|
||||
c.cmdMutex.Unlock()
|
||||
if werr == nil {
|
||||
return nil
|
||||
}
|
||||
wExitErr, ok := werr.(*exec.ExitError)
|
||||
if ok {
|
||||
if wExitErr.ProcessState.Exited() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
return errors.Wrapf(werr, "connhelper: failed to wait")
|
||||
}
|
||||
|
||||
func (c *commandConn) onEOF(eof error) error {
|
||||
werr := c.cmd.Wait()
|
||||
// when we got EOF, the command is going to be terminated
|
||||
var werr error
|
||||
c.cmdMutex.Lock()
|
||||
if c.cmdExited {
|
||||
werr = c.cmdWaitErr
|
||||
} else {
|
||||
werrCh := make(chan error)
|
||||
go func() { werrCh <- c.cmd.Wait() }()
|
||||
select {
|
||||
case werr = <-werrCh:
|
||||
c.cmdWaitErr = werr
|
||||
c.cmdExited = true
|
||||
case <-time.After(10 * time.Second):
|
||||
c.cmdMutex.Unlock()
|
||||
c.stderrMu.Lock()
|
||||
stderr := c.stderr.String()
|
||||
c.stderrMu.Unlock()
|
||||
return errors.Errorf("command %v did not exit after %v: stderr=%q", c.cmd.Args, eof, stderr)
|
||||
}
|
||||
}
|
||||
c.cmdMutex.Unlock()
|
||||
if werr == nil {
|
||||
return eof
|
||||
}
|
||||
@ -148,7 +205,10 @@ func (c *commandConn) CloseRead() error {
|
||||
c.stdioClosedMu.Lock()
|
||||
c.stdoutClosed = true
|
||||
c.stdioClosedMu.Unlock()
|
||||
return c.killIfStdioClosed()
|
||||
if err := c.killIfStdioClosed(); err != nil {
|
||||
logrus.Warnf("commandConn.CloseRead: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *commandConn) Read(p []byte) (int, error) {
|
||||
@ -167,7 +227,10 @@ func (c *commandConn) CloseWrite() error {
|
||||
c.stdioClosedMu.Lock()
|
||||
c.stdinClosed = true
|
||||
c.stdioClosedMu.Unlock()
|
||||
return c.killIfStdioClosed()
|
||||
if err := c.killIfStdioClosed(); err != nil {
|
||||
logrus.Warnf("commandConn.CloseWrite: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *commandConn) Write(p []byte) (int, error) {
|
||||
|
||||
@ -200,7 +200,7 @@ func continueOnError(err error) bool {
|
||||
}
|
||||
|
||||
func (c *client) iterateEndpoints(ctx context.Context, namedRef reference.Named, each func(context.Context, distribution.Repository, reference.Named) (bool, error)) error {
|
||||
endpoints, err := allEndpoints(namedRef)
|
||||
endpoints, err := allEndpoints(namedRef, c.insecureRegistry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -262,12 +262,18 @@ func (c *client) iterateEndpoints(ctx context.Context, namedRef reference.Named,
|
||||
}
|
||||
|
||||
// allEndpoints returns a list of endpoints ordered by priority (v2, https, v1).
|
||||
func allEndpoints(namedRef reference.Named) ([]registry.APIEndpoint, error) {
|
||||
func allEndpoints(namedRef reference.Named, insecure bool) ([]registry.APIEndpoint, error) {
|
||||
repoInfo, err := registry.ParseRepositoryInfo(namedRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
registryService, err := registry.NewService(registry.ServiceOptions{})
|
||||
|
||||
var serviceOpts registry.ServiceOptions
|
||||
if insecure {
|
||||
logrus.Debugf("allowing insecure registry for: %s", reference.Domain(namedRef))
|
||||
serviceOpts.InsecureRegistries = []string{reference.Domain(namedRef)}
|
||||
}
|
||||
registryService, err := registry.NewService(serviceOpts)
|
||||
if err != nil {
|
||||
return []registry.APIEndpoint{}, err
|
||||
}
|
||||
|
||||
@ -13,6 +13,7 @@ import (
|
||||
cliconfig "github.com/docker/cli/cli/config"
|
||||
"github.com/docker/cli/cli/debug"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
@ -32,6 +33,9 @@ func newDockerCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
SilenceErrors: true,
|
||||
TraverseChildren: true,
|
||||
Args: noArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return command.ShowHelp(dockerCli.Err())(cmd, args)
|
||||
},
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
// flags must be the top-level command flags, not cmd.Flags()
|
||||
opts.Common.SetDefaultOptions(flags)
|
||||
@ -99,8 +103,10 @@ func setHelpFunc(dockerCli *command.DockerCli, cmd *cobra.Command, flags *pflag.
|
||||
ccmd.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
hideUnsupportedFeatures(ccmd, dockerCli)
|
||||
if err := hideUnsupportedFeatures(ccmd, dockerCli); err != nil {
|
||||
ccmd.Println(err)
|
||||
return
|
||||
}
|
||||
defaultHelpFunc(ccmd, args)
|
||||
})
|
||||
}
|
||||
@ -168,7 +174,7 @@ func main() {
|
||||
stdin, stdout, stderr := term.StdStreams()
|
||||
logrus.SetOutput(stderr)
|
||||
|
||||
dockerCli := command.NewDockerCli(stdin, stdout, stderr, contentTrustEnabled())
|
||||
dockerCli := command.NewDockerCli(stdin, stdout, stderr, contentTrustEnabled(), containerizedengine.NewClient)
|
||||
cmd := newDockerCommand(dockerCli)
|
||||
|
||||
if err := cmd.Execute(); err != nil {
|
||||
@ -234,15 +240,21 @@ func hideFeatureSubCommand(subcmd *cobra.Command, hasFeature bool, annotation st
|
||||
}
|
||||
}
|
||||
|
||||
func hideUnsupportedFeatures(cmd *cobra.Command, details versionDetails) {
|
||||
func hideUnsupportedFeatures(cmd *cobra.Command, details versionDetails) error {
|
||||
clientVersion := details.Client().ClientVersion()
|
||||
osType := details.ServerInfo().OSType
|
||||
hasExperimental := details.ServerInfo().HasExperimental
|
||||
hasExperimentalCLI := details.ClientInfo().HasExperimental
|
||||
hasBuildKit, err := command.BuildKitEnabled(details.ServerInfo())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmd.Flags().VisitAll(func(f *pflag.Flag) {
|
||||
hideFeatureFlag(f, hasExperimental, "experimental")
|
||||
hideFeatureFlag(f, hasExperimentalCLI, "experimentalCLI")
|
||||
hideFeatureFlag(f, hasBuildKit, "buildkit")
|
||||
hideFeatureFlag(f, !hasBuildKit, "no-buildkit")
|
||||
// hide flags not supported by the server
|
||||
if !isOSTypeSupported(f, osType) || !isVersionSupported(f, clientVersion) {
|
||||
f.Hidden = true
|
||||
@ -258,6 +270,8 @@ func hideUnsupportedFeatures(cmd *cobra.Command, details versionDetails) {
|
||||
for _, subcmd := range cmd.Commands() {
|
||||
hideFeatureSubCommand(subcmd, hasExperimental, "experimental")
|
||||
hideFeatureSubCommand(subcmd, hasExperimentalCLI, "experimentalCLI")
|
||||
hideFeatureSubCommand(subcmd, hasBuildKit, "buildkit")
|
||||
hideFeatureSubCommand(subcmd, !hasBuildKit, "no-buildkit")
|
||||
// hide subcommands not supported by the server
|
||||
if subcmdVersion, ok := subcmd.Annotations["version"]; ok && versions.LessThan(clientVersion, subcmdVersion) {
|
||||
subcmd.Hidden = true
|
||||
@ -266,6 +280,7 @@ func hideUnsupportedFeatures(cmd *cobra.Command, details versionDetails) {
|
||||
subcmd.Hidden = true
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Checks if a command or one of its ancestors is in the list
|
||||
@ -312,6 +327,7 @@ func areFlagsSupported(cmd *cobra.Command, details versionDetails) error {
|
||||
if _, ok := f.Annotations["experimentalCLI"]; ok && !hasExperimentalCLI {
|
||||
errs = append(errs, fmt.Sprintf("\"--%s\" is on a Docker cli with experimental cli features enabled", f.Name))
|
||||
}
|
||||
// buildkit-specific flags are noop when buildkit is not enabled, so we do not add an error in that case
|
||||
}
|
||||
})
|
||||
if len(errs) > 0 {
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
@ -26,8 +27,25 @@ func TestClientDebugEnabled(t *testing.T) {
|
||||
|
||||
func TestExitStatusForInvalidSubcommandWithHelpFlag(t *testing.T) {
|
||||
discard := ioutil.Discard
|
||||
cmd := newDockerCommand(command.NewDockerCli(os.Stdin, discard, discard, false))
|
||||
cmd := newDockerCommand(command.NewDockerCli(os.Stdin, discard, discard, false, nil))
|
||||
cmd.SetArgs([]string{"help", "invalid"})
|
||||
err := cmd.Execute()
|
||||
assert.Error(t, err, "unknown help topic: invalid")
|
||||
}
|
||||
|
||||
func TestExitStatusForInvalidSubcommand(t *testing.T) {
|
||||
discard := ioutil.Discard
|
||||
cmd := newDockerCommand(command.NewDockerCli(os.Stdin, discard, discard, false, nil))
|
||||
cmd.SetArgs([]string{"invalid"})
|
||||
err := cmd.Execute()
|
||||
assert.Check(t, is.ErrorContains(err, "docker: 'invalid' is not a docker command."))
|
||||
}
|
||||
|
||||
func TestVersion(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
cmd := newDockerCommand(command.NewDockerCli(os.Stdin, &b, &b, false, nil))
|
||||
cmd.SetArgs([]string{"--version"})
|
||||
err := cmd.Execute()
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Contains(b.String(), "Docker version"))
|
||||
}
|
||||
|
||||
@ -1,12 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
# shellcheck disable=SC2016,SC2119,SC2155
|
||||
# shellcheck disable=SC2016,SC2119,SC2155,SC2206,SC2207
|
||||
#
|
||||
# Shellcheck ignore list:
|
||||
# - SC2016: Expressions don't expand in single quotes, use double quotes for that.
|
||||
# - SC2119: Use foo "$@" if function's $1 should mean script's $1.
|
||||
# - SC2155: Declare and assign separately to avoid masking return values.
|
||||
#
|
||||
# You can find more details for each warning at the following page:
|
||||
# - SC2206: Quote to prevent word splitting, or split robustly with mapfile or read -a.
|
||||
# - SC2207: Prefer mapfile or read -a to split command output (or quote to avoid splitting).
|
||||
#
|
||||
# You can find more details for each warning at the following page:
|
||||
# https://github.com/koalaman/shellcheck/wiki/<SCXXXX>
|
||||
#
|
||||
# bash completion file for core docker commands
|
||||
@ -563,23 +565,39 @@ __docker_append_to_completions() {
|
||||
COMPREPLY=( ${COMPREPLY[@]/%/"$1"} )
|
||||
}
|
||||
|
||||
# __docker_daemon_is_experimental tests whether the currently configured Docker
|
||||
# daemon runs in experimental mode. If so, the function exits with 0 (true).
|
||||
# Otherwise, or if the result cannot be determined, the exit value is 1 (false).
|
||||
__docker_daemon_is_experimental() {
|
||||
[ "$(__docker_q version -f '{{.Server.Experimental}}')" = "true" ]
|
||||
# __docker_fetch_info fetches information about the configured Docker server and updates
|
||||
# several variables with the results.
|
||||
# The result is cached for the duration of one invocation of bash completion.
|
||||
__docker_fetch_info() {
|
||||
if [ -z "$info_fetched" ] ; then
|
||||
read -r client_experimental server_experimental server_os < <(__docker_q version -f '{{.Client.Experimental}} {{.Server.Experimental}} {{.Server.Os}}')
|
||||
info_fetched=true
|
||||
fi
|
||||
}
|
||||
|
||||
# __docker_daemon_os_is tests whether the currently configured Docker daemon runs
|
||||
# __docker_client_is_experimental tests whether the Docker cli is configured to support
|
||||
# experimental features. If so, the function exits with 0 (true).
|
||||
# Otherwise, or if the result cannot be determined, the exit value is 1 (false).
|
||||
__docker_client_is_experimental() {
|
||||
__docker_fetch_info
|
||||
[ "$client_experimental" = "true" ]
|
||||
}
|
||||
|
||||
# __docker_server_is_experimental tests whether the currently configured Docker
|
||||
# server runs in experimental mode. If so, the function exits with 0 (true).
|
||||
# Otherwise, or if the result cannot be determined, the exit value is 1 (false).
|
||||
__docker_server_is_experimental() {
|
||||
__docker_fetch_info
|
||||
[ "$server_experimental" = "true" ]
|
||||
}
|
||||
|
||||
# __docker_server_os_is tests whether the currently configured Docker server runs
|
||||
# on the operating system passed in as the first argument.
|
||||
# It does so by querying the daemon for its OS. The result is cached for the duration
|
||||
# of one invocation of bash completion so that this function can be used to test for
|
||||
# several different operating systems without additional costs.
|
||||
# Known operating systems: linux, windows.
|
||||
__docker_daemon_os_is() {
|
||||
__docker_server_os_is() {
|
||||
local expected_os="$1"
|
||||
local actual_os=${daemon_os=$(__docker_q version -f '{{.Server.Os}}')}
|
||||
[ "$actual_os" = "$expected_os" ]
|
||||
__docker_fetch_info
|
||||
[ "$server_os" = "$expected_os" ]
|
||||
}
|
||||
|
||||
# __docker_stack_orchestrator_is tests whether the client is configured to use
|
||||
@ -865,6 +883,7 @@ __docker_complete_log_drivers() {
|
||||
gelf
|
||||
journald
|
||||
json-file
|
||||
local
|
||||
logentries
|
||||
none
|
||||
splunk
|
||||
@ -888,7 +907,8 @@ __docker_complete_log_options() {
|
||||
local gcplogs_options="$common_options1 $common_options2 gcp-log-cmd gcp-meta-id gcp-meta-name gcp-meta-zone gcp-project"
|
||||
local gelf_options="$common_options1 $common_options2 gelf-address gelf-compression-level gelf-compression-type gelf-tcp-max-reconnect gelf-tcp-reconnect-delay tag"
|
||||
local journald_options="$common_options1 $common_options2 tag"
|
||||
local json_file_options="$common_options1 $common_options2 max-file max-size"
|
||||
local json_file_options="$common_options1 $common_options2 compress max-file max-size"
|
||||
local local_options="$common_options1 compress max-file max-size"
|
||||
local logentries_options="$common_options1 $common_options2 line-only logentries-token tag"
|
||||
local splunk_options="$common_options1 $common_options2 splunk-caname splunk-capath splunk-format splunk-gzip splunk-gzip-level splunk-index splunk-insecureskipverify splunk-source splunk-sourcetype splunk-token splunk-url splunk-verify-connection tag"
|
||||
local syslog_options="$common_options1 $common_options2 syslog-address syslog-facility syslog-format syslog-tls-ca-cert syslog-tls-cert syslog-tls-key syslog-tls-skip-verify tag"
|
||||
@ -917,6 +937,9 @@ __docker_complete_log_options() {
|
||||
json-file)
|
||||
COMPREPLY=( $( compgen -W "$json_file_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
local)
|
||||
COMPREPLY=( $( compgen -W "$local_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
logentries)
|
||||
COMPREPLY=( $( compgen -W "$logentries_options" -S = -- "$cur" ) )
|
||||
;;
|
||||
@ -946,7 +969,7 @@ __docker_complete_log_driver_options() {
|
||||
__docker_nospace
|
||||
return
|
||||
;;
|
||||
fluentd-async-connect)
|
||||
compress|fluentd-async-connect)
|
||||
COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) )
|
||||
return
|
||||
;;
|
||||
@ -1128,7 +1151,8 @@ _docker_docker() {
|
||||
*)
|
||||
local counter=$( __docker_pos_first_nonflag "$(__docker_to_extglob "$global_options_with_args")" )
|
||||
if [ "$cword" -eq "$counter" ]; then
|
||||
__docker_daemon_is_experimental && commands+=(${experimental_commands[*]})
|
||||
__docker_client_is_experimental && commands+=(${experimental_client_commands[*]})
|
||||
__docker_server_is_experimental && commands+=(${experimental_server_commands[*]})
|
||||
COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) )
|
||||
fi
|
||||
;;
|
||||
@ -1837,14 +1861,14 @@ _docker_container_run_and_create() {
|
||||
--volume -v
|
||||
--workdir -w
|
||||
"
|
||||
__docker_daemon_os_is windows && options_with_args+="
|
||||
__docker_server_os_is windows && options_with_args+="
|
||||
--cpu-count
|
||||
--cpu-percent
|
||||
--io-maxbandwidth
|
||||
--io-maxiops
|
||||
--isolation
|
||||
"
|
||||
__docker_daemon_is_experimental && options_with_args+="
|
||||
__docker_server_is_experimental && options_with_args+="
|
||||
--platform
|
||||
"
|
||||
|
||||
@ -1960,7 +1984,7 @@ _docker_container_run_and_create() {
|
||||
return
|
||||
;;
|
||||
--isolation)
|
||||
if __docker_daemon_os_is windows ; then
|
||||
if __docker_server_os_is windows ; then
|
||||
__docker_complete_isolation
|
||||
return
|
||||
fi
|
||||
@ -2071,12 +2095,12 @@ _docker_container_start() {
|
||||
__docker_complete_detach_keys && return
|
||||
case "$prev" in
|
||||
--checkpoint)
|
||||
if __docker_daemon_is_experimental ; then
|
||||
if __docker_server_is_experimental ; then
|
||||
return
|
||||
fi
|
||||
;;
|
||||
--checkpoint-dir)
|
||||
if __docker_daemon_is_experimental ; then
|
||||
if __docker_server_is_experimental ; then
|
||||
_filedir -d
|
||||
return
|
||||
fi
|
||||
@ -2086,7 +2110,7 @@ _docker_container_start() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
local options="--attach -a --detach-keys --help --interactive -i"
|
||||
__docker_daemon_is_experimental && options+=" --checkpoint --checkpoint-dir"
|
||||
__docker_server_is_experimental && options+=" --checkpoint --checkpoint-dir"
|
||||
COMPREPLY=( $( compgen -W "$options" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
@ -2449,7 +2473,7 @@ _docker_daemon() {
|
||||
}
|
||||
|
||||
_docker_deploy() {
|
||||
__docker_daemon_is_experimental && _docker_stack_deploy
|
||||
__docker_server_is_experimental && _docker_stack_deploy
|
||||
}
|
||||
|
||||
_docker_diff() {
|
||||
@ -2535,7 +2559,7 @@ _docker_image_build() {
|
||||
--target
|
||||
--ulimit
|
||||
"
|
||||
__docker_daemon_os_is windows && options_with_args+="
|
||||
__docker_server_os_is windows && options_with_args+="
|
||||
--isolation
|
||||
"
|
||||
|
||||
@ -2549,7 +2573,7 @@ _docker_image_build() {
|
||||
--quiet -q
|
||||
--rm
|
||||
"
|
||||
if __docker_daemon_is_experimental ; then
|
||||
if __docker_server_is_experimental ; then
|
||||
options_with_args+="
|
||||
--platform
|
||||
"
|
||||
@ -2584,7 +2608,7 @@ _docker_image_build() {
|
||||
return
|
||||
;;
|
||||
--isolation)
|
||||
if __docker_daemon_os_is windows ; then
|
||||
if __docker_server_os_is windows ; then
|
||||
__docker_complete_isolation
|
||||
return
|
||||
fi
|
||||
@ -2664,14 +2688,16 @@ _docker_image_images() {
|
||||
|
||||
_docker_image_import() {
|
||||
case "$prev" in
|
||||
--change|-c|--message|-m)
|
||||
--change|-c|--message|-m|--platform)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--change -c --help --message -m" -- "$cur" ) )
|
||||
local options="--change -c --help --message -m"
|
||||
__docker_server_is_experimental && options+=" --platform"
|
||||
COMPREPLY=( $( compgen -W "$options" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag '--change|-c|--message|-m')
|
||||
@ -2779,7 +2805,7 @@ _docker_image_pull() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
local options="--all-tags -a --disable-content-trust=false --help"
|
||||
__docker_daemon_is_experimental && options+=" --platform"
|
||||
__docker_server_is_experimental && options+=" --platform"
|
||||
|
||||
COMPREPLY=( $( compgen -W "$options" -- "$cur" ) )
|
||||
;;
|
||||
@ -3395,7 +3421,6 @@ _docker_service_update_and_create() {
|
||||
local options_with_args="
|
||||
--endpoint-mode
|
||||
--entrypoint
|
||||
--force
|
||||
--health-cmd
|
||||
--health-interval
|
||||
--health-retries
|
||||
@ -3431,7 +3456,7 @@ _docker_service_update_and_create() {
|
||||
--user -u
|
||||
--workdir -w
|
||||
"
|
||||
__docker_daemon_os_is windows && options_with_args+="
|
||||
__docker_server_os_is windows && options_with_args+="
|
||||
--credential-spec
|
||||
"
|
||||
|
||||
@ -3520,6 +3545,10 @@ _docker_service_update_and_create() {
|
||||
--secret-rm
|
||||
"
|
||||
|
||||
boolean_options="$boolean_options
|
||||
--force
|
||||
"
|
||||
|
||||
case "$prev" in
|
||||
--env-rm)
|
||||
COMPREPLY=( $( compgen -e -- "$cur" ) )
|
||||
@ -3817,6 +3846,109 @@ _docker_swarm_update() {
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_manifest() {
|
||||
local subcommands="
|
||||
annotate
|
||||
create
|
||||
inspect
|
||||
push
|
||||
"
|
||||
__docker_subcommands "$subcommands" && return
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) )
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_manifest_annotate() {
|
||||
case "$prev" in
|
||||
--arch)
|
||||
COMPREPLY=( $( compgen -W "
|
||||
386
|
||||
amd64
|
||||
arm
|
||||
arm64
|
||||
mips64
|
||||
mips64le
|
||||
ppc64le
|
||||
s390x" -- "$cur" ) )
|
||||
return
|
||||
;;
|
||||
--os)
|
||||
COMPREPLY=( $( compgen -W "
|
||||
darwin
|
||||
dragonfly
|
||||
freebsd
|
||||
linux
|
||||
netbsd
|
||||
openbsd
|
||||
plan9
|
||||
solaris
|
||||
windows" -- "$cur" ) )
|
||||
return
|
||||
;;
|
||||
--os-features|--variant)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--arch --help --os --os-features --variant" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$( __docker_pos_first_nonflag "--arch|--os|--os-features|--variant" )
|
||||
if [ "$cword" -eq "$counter" ] || [ "$cword" -eq "$((counter + 1))" ]; then
|
||||
__docker_complete_images --force-tag --id
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_manifest_create() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--amend -a --help --insecure" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
__docker_complete_images --force-tag --id
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_manifest_inspect() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--help --insecure --verbose -v" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$( __docker_pos_first_nonflag )
|
||||
if [ "$cword" -eq "$counter" ] || [ "$cword" -eq "$((counter + 1))" ]; then
|
||||
__docker_complete_images --force-tag --id
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_manifest_push() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--help --insecure --purge -p" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$( __docker_pos_first_nonflag )
|
||||
if [ "$cword" -eq "$counter" ]; then
|
||||
__docker_complete_images --force-tag --id
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_docker_node() {
|
||||
local subcommands="
|
||||
demote
|
||||
@ -4451,7 +4583,7 @@ _docker_stack_deploy() {
|
||||
case "$cur" in
|
||||
-*)
|
||||
local options="--compose-file -c --help --orchestrator"
|
||||
__docker_daemon_is_experimental && __docker_stack_orchestrator_is swarm && options+=" --bundle-file"
|
||||
__docker_server_is_experimental && __docker_stack_orchestrator_is swarm && options+=" --bundle-file"
|
||||
__docker_stack_orchestrator_is kubernetes && options+=" --kubeconfig --namespace"
|
||||
__docker_stack_orchestrator_is swarm && options+=" --prune --resolve-image --with-registry-auth"
|
||||
COMPREPLY=( $( compgen -W "$options" -- "$cur" ) )
|
||||
@ -5074,7 +5206,11 @@ _docker() {
|
||||
wait
|
||||
)
|
||||
|
||||
local experimental_commands=(
|
||||
local experimental_client_commands=(
|
||||
manifest
|
||||
)
|
||||
|
||||
local experimental_server_commands=(
|
||||
checkpoint
|
||||
deploy
|
||||
)
|
||||
@ -5098,10 +5234,12 @@ _docker() {
|
||||
--tlskey
|
||||
"
|
||||
|
||||
local host config daemon_os
|
||||
|
||||
# variables to cache server info, populated on demand for performance reasons
|
||||
local info_fetched server_experimental server_os
|
||||
# variables to cache client info, populated on demand for performance reasons
|
||||
local stack_orchestrator_is_kubernetes stack_orchestrator_is_swarm
|
||||
local client_experimental stack_orchestrator_is_kubernetes stack_orchestrator_is_swarm
|
||||
|
||||
local host config
|
||||
|
||||
COMPREPLY=()
|
||||
local cur prev words cword
|
||||
|
||||
@ -17,24 +17,29 @@ ENVVARS = -e VERSION=$(VERSION) -e GITCOMMIT -e PLATFORM
|
||||
# build docker image (dockerfiles/Dockerfile.build)
|
||||
.PHONY: build_docker_image
|
||||
build_docker_image:
|
||||
docker build ${DOCKER_BUILD_ARGS} -t $(DEV_DOCKER_IMAGE_NAME) -f ./dockerfiles/Dockerfile.dev .
|
||||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
cat ./dockerfiles/Dockerfile.dev | docker build ${DOCKER_BUILD_ARGS} -t $(DEV_DOCKER_IMAGE_NAME) -
|
||||
|
||||
# build docker image having the linting tools (dockerfiles/Dockerfile.lint)
|
||||
.PHONY: build_linter_image
|
||||
build_linter_image:
|
||||
docker build ${DOCKER_BUILD_ARGS} -t $(LINTER_IMAGE_NAME) -f ./dockerfiles/Dockerfile.lint .
|
||||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
cat ./dockerfiles/Dockerfile.lint | docker build ${DOCKER_BUILD_ARGS} -t $(LINTER_IMAGE_NAME) -
|
||||
|
||||
.PHONY: build_cross_image
|
||||
build_cross_image:
|
||||
docker build ${DOCKER_BUILD_ARGS} -t $(CROSS_IMAGE_NAME) -f ./dockerfiles/Dockerfile.cross .
|
||||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
cat ./dockerfiles/Dockerfile.cross | docker build ${DOCKER_BUILD_ARGS} -t $(CROSS_IMAGE_NAME) -
|
||||
|
||||
.PHONY: build_shell_validate_image
|
||||
build_shell_validate_image:
|
||||
docker build -t $(VALIDATE_IMAGE_NAME) -f ./dockerfiles/Dockerfile.shellcheck .
|
||||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
cat ./dockerfiles/Dockerfile.shellcheck | docker build -t $(VALIDATE_IMAGE_NAME) -
|
||||
|
||||
.PHONY: build_binary_native_image
|
||||
build_binary_native_image:
|
||||
docker build -t $(BINARY_NATIVE_IMAGE_NAME) -f ./dockerfiles/Dockerfile.binary-native .
|
||||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
cat ./dockerfiles/Dockerfile.binary-native | docker build -t $(BINARY_NATIVE_IMAGE_NAME) -
|
||||
|
||||
.PHONY: build_e2e_image
|
||||
build_e2e_image:
|
||||
@ -105,7 +110,7 @@ shellcheck: build_shell_validate_image ## run shellcheck validation
|
||||
docker run -ti --rm $(ENVVARS) $(MOUNTS) $(VALIDATE_IMAGE_NAME) make shellcheck
|
||||
|
||||
.PHONY: test-e2e ## run e2e tests
|
||||
test-e2e: test-e2e-non-experimental test-e2e-experimental test-e2e-containerized
|
||||
test-e2e: test-e2e-non-experimental test-e2e-experimental test-e2e-connhelper-ssh
|
||||
|
||||
.PHONY: test-e2e-experimental
|
||||
test-e2e-experimental: build_e2e_image
|
||||
@ -115,13 +120,9 @@ test-e2e-experimental: build_e2e_image
|
||||
test-e2e-non-experimental: build_e2e_image
|
||||
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock $(E2E_IMAGE_NAME)
|
||||
|
||||
.PHONY: test-e2e-containerized
|
||||
test-e2e-containerized: build_e2e_image
|
||||
docker run --rm --privileged \
|
||||
-v /var/lib/docker \
|
||||
-v /var/lib/containerd \
|
||||
-v /lib/modules:/lib/modules \
|
||||
$(E2E_IMAGE_NAME) /go/src/github.com/docker/cli/scripts/test/engine/entry
|
||||
.PHONY: test-e2e-connhelper-ssh
|
||||
test-e2e-connhelper-ssh: build_e2e_image
|
||||
docker run -e TEST_CONNHELPER=ssh -e DOCKERD_EXPERIMENTAL=1 --rm -v /var/run/docker.sock:/var/run/docker.sock $(E2E_IMAGE_NAME)
|
||||
|
||||
.PHONY: help
|
||||
help: ## print this help
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM golang:1.10.3-alpine
|
||||
FROM golang:1.10.8-alpine
|
||||
|
||||
RUN apk add -U git bash coreutils gcc musl-dev
|
||||
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
FROM dockercore/golang-cross:1.10.3@sha256:7671b4ed357fda50124e5679d36c4c3206ded4d43f1d2e0ff3d120a1e2bf94d7
|
||||
FROM dockercore/golang-cross:1.10.8@sha256:a93210f55a8137b4aa4b9f033ac7a80b66ab6337e98e7afb62abe93b4ad73cad
|
||||
ENV DISABLE_WARN_OUTSIDE_CONTAINER=1
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
COPY . .
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
|
||||
FROM golang:1.10.3-alpine
|
||||
FROM golang:1.10.8-alpine
|
||||
|
||||
RUN apk add -U git make bash coreutils ca-certificates curl
|
||||
|
||||
@ -22,3 +22,4 @@ ENV CGO_ENABLED=0 \
|
||||
DISABLE_WARN_OUTSIDE_CONTAINER=1
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
CMD sh
|
||||
COPY . .
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
ARG GO_VERSION=1.10.3
|
||||
ARG GO_VERSION=1.10.8
|
||||
|
||||
FROM docker/containerd-shim-process:a4d1531 AS containerd-shim-process
|
||||
|
||||
@ -13,30 +13,9 @@ RUN apt-get update && apt-get install -y \
|
||||
libapparmor-dev \
|
||||
libseccomp-dev \
|
||||
iptables \
|
||||
openssh-client \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# TODO - consider replacing with an official image and a multi-stage build to pluck the binaries out
|
||||
#ARG CONTAINERD_VERSION=v1.1.2
|
||||
#ARG CONTAINERD_VERSION=47a128d
|
||||
#ARG CONTAINERD_VERSION=6c3e782f
|
||||
ARG CONTAINERD_VERSION=65839a47a88b0a1c5dc34981f1741eccefc9f2b0
|
||||
RUN git clone https://github.com/containerd/containerd.git /go/src/github.com/containerd/containerd && \
|
||||
cd /go/src/github.com/containerd/containerd && \
|
||||
git checkout ${CONTAINERD_VERSION} && \
|
||||
make && \
|
||||
make install
|
||||
COPY e2eengine/config.toml /etc/containerd/config.toml
|
||||
COPY --from=containerd-shim-process /bin/containerd-shim-process-v1 /bin/
|
||||
|
||||
|
||||
# TODO - consider replacing with an official image and a multi-stage build to pluck the binaries out
|
||||
ARG RUNC_VERSION=v1.0.0-rc5
|
||||
RUN git clone https://github.com/opencontainers/runc.git /go/src/github.com/opencontainers/runc && \
|
||||
cd /go/src/github.com/opencontainers/runc && \
|
||||
git checkout ${RUNC_VERSION} && \
|
||||
make && \
|
||||
make install
|
||||
|
||||
ARG COMPOSE_VERSION=1.21.2
|
||||
RUN curl -L https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose \
|
||||
&& chmod +x /usr/local/bin/docker-compose
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM golang:1.10.3-alpine
|
||||
FROM golang:1.10.8-alpine
|
||||
|
||||
RUN apk add -U git
|
||||
|
||||
@ -15,3 +15,4 @@ ENV CGO_ENABLED=0
|
||||
ENV DISABLE_WARN_OUTSIDE_CONTAINER=1
|
||||
ENTRYPOINT ["/usr/local/bin/gometalinter"]
|
||||
CMD ["--config=gometalinter.json", "./..."]
|
||||
COPY . .
|
||||
|
||||
@ -1,9 +1,5 @@
|
||||
FROM debian:stretch-slim
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get -y install make shellcheck && \
|
||||
apt-get clean
|
||||
|
||||
FROM koalaman/shellcheck-alpine:v0.6.0
|
||||
RUN apk add --no-cache bash make
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
ENV DISABLE_WARN_OUTSIDE_CONTAINER=1
|
||||
CMD bash
|
||||
COPY . .
|
||||
|
||||
@ -19,6 +19,37 @@ The following list of features are deprecated in Engine.
|
||||
To learn more about Docker Engine's deprecation policy,
|
||||
see [Feature Deprecation Policy](https://docs.docker.com/engine/#feature-deprecation-policy).
|
||||
|
||||
### Legacy "overlay" storage driver
|
||||
|
||||
**Deprecated in Release: v18.09.0**
|
||||
|
||||
The `overlay` storage driver is deprecated in favor of the `overlay2` storage
|
||||
driver, which has all the benefits of `overlay`, without its limitations (excessive
|
||||
inode consumption). The legacy `overlay` storage driver will be removed in a future
|
||||
release. Users of the `overlay` storage driver should migrate to the `overlay2`
|
||||
storage driver.
|
||||
|
||||
The legacy `overlay` storage driver allowed using overlayFS-backed filesystems
|
||||
on pre 4.x kernels. Now that all supported distributions are able to run `overlay2`
|
||||
(as they are either on kernel 4.x, or have support for multiple lowerdirs
|
||||
backported), there is no reason to keep maintaining the `overlay` storage driver.
|
||||
|
||||
### device mapper storage driver
|
||||
|
||||
**Deprecated in Release: v18.09.0**
|
||||
|
||||
The `devicemapper` storage driver is deprecated in favor of `overlay2`, and will
|
||||
be removed in a future release. Users of the `devicemapper` storage driver are
|
||||
recommended to migrate to a different storage driver, such as `overlay2`, which
|
||||
is now the default storage driver.
|
||||
|
||||
The `devicemapper` storage driver facilitates running Docker on older (3.x) kernels
|
||||
that have no support for other storage drivers (such as overlay2, or AUFS).
|
||||
|
||||
Now that support for `overlay2` is added to all supported distros (as they are
|
||||
either on kernel 4.x, or have support for multiple lowerdirs backported), there
|
||||
is no reason to continue maintenance of the `devicemapper` storage driver.
|
||||
|
||||
### Reserved namespaces in engine labels
|
||||
|
||||
**Deprecated in Release: v18.06.0**
|
||||
@ -167,7 +198,7 @@ The docker login command is removing the ability to automatically register for a
|
||||
|
||||
**Target For Removal In Release: v17.06**
|
||||
|
||||
The flag `--security-opt` doesn't use the colon separator(`:`) anymore to divide keys and values, it uses the equal symbol(`=`) for consistency with other similar flags, like `--storage-opt`.
|
||||
The flag `--security-opt` doesn't use the colon separator (`:`) anymore to divide keys and values, it uses the equal symbol (`=`) for consistency with other similar flags, like `--storage-opt`.
|
||||
|
||||
### `/containers/(id or name)/copy` endpoint
|
||||
|
||||
|
||||
@ -121,6 +121,28 @@ registries.
|
||||
When you're done with your build, you're ready to look into [*Pushing a
|
||||
repository to its registry*](https://docs.docker.com/engine/tutorials/dockerrepos/#/contributing-to-docker-hub).
|
||||
|
||||
|
||||
## BuildKit
|
||||
|
||||
Starting with version 18.09, Docker supports a new backend for executing your
|
||||
builds that is provided by the [moby/buildkit](https://github.com/moby/buildkit)
|
||||
project. The BuildKit backend provides many benefits compared to the old
|
||||
implementation. For example, BuildKit can:
|
||||
|
||||
* Detect and skip executing unused build stages
|
||||
* Parallelize building independent build stages
|
||||
* Incrementally transfer only the changed files in your build context between builds
|
||||
* Detect and skip transferring unused files in your build context
|
||||
* Use external Dockerfile implementations with many new features
|
||||
* Avoid side-effects with rest of the API (intermediate images and containers)
|
||||
* Prioritize your build cache for automatic pruning
|
||||
|
||||
To use the BuildKit backend, you need to set an environment variable
|
||||
`DOCKER_BUILDKIT=1` on the CLI before invoking `docker build`.
|
||||
|
||||
To learn about the experimental Dockerfile syntax available to BuildKit-based
|
||||
builds [refer to the documentation in the BuildKit repository](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md).
|
||||
|
||||
## Format
|
||||
|
||||
Here is the format of the `Dockerfile`:
|
||||
@ -224,10 +246,64 @@ following lines are all treated identically:
|
||||
# dIrEcTiVe=value
|
||||
```
|
||||
|
||||
The following parser directive is supported:
|
||||
The following parser directives are supported:
|
||||
|
||||
* `syntax`
|
||||
* `escape`
|
||||
|
||||
## syntax
|
||||
|
||||
# syntax=[remote image reference]
|
||||
|
||||
For example:
|
||||
|
||||
# syntax=docker/dockerfile
|
||||
# syntax=docker/dockerfile:1.0
|
||||
# syntax=docker.io/docker/dockerfile:1
|
||||
# syntax=docker/dockerfile:1.0.0-experimental
|
||||
# syntax=example.com/user/repo:tag@sha256:abcdef...
|
||||
|
||||
This feature is only enabled if the [BuildKit](#buildkit) backend is used.
|
||||
|
||||
The syntax directive defines the location of the Dockerfile builder that is used for
|
||||
building the current Dockerfile. The BuildKit backend allows to seamlessly use
|
||||
external implementations of builders that are distributed as Docker images and
|
||||
execute inside a container sandbox environment.
|
||||
|
||||
Custom Dockerfile implementation allows you to:
|
||||
- Automatically get bugfixes without updating the daemon
|
||||
- Make sure all users are using the same implementation to build your Dockerfile
|
||||
- Use the latest features without updating the daemon
|
||||
- Try out new experimental or third-party features
|
||||
|
||||
### Official releases
|
||||
|
||||
Docker distributes official versions of the images that can be used for building
|
||||
Dockerfiles under `docker/dockerfile` repository on Docker Hub. There are two
|
||||
channels where new images are released: stable and experimental.
|
||||
|
||||
Stable channel follows semantic versioning. For example:
|
||||
|
||||
- docker/dockerfile:1.0.0 - only allow immutable version 1.0.0
|
||||
- docker/dockerfile:1.0 - allow versions 1.0.*
|
||||
- docker/dockerfile:1 - allow versions 1.*.*
|
||||
- docker/dockerfile:latest - latest release on stable channel
|
||||
|
||||
The experimental channel uses incremental versioning with the major and minor
|
||||
component from the stable channel on the time of the release. For example:
|
||||
|
||||
- docker/dockerfile:1.0.1-experimental - only allow immutable version 1.0.1-experimental
|
||||
- docker/dockerfile:1.0-experimental - latest experimental releases after 1.0
|
||||
- docker/dockerfile:experimental - latest release on experimental channel
|
||||
|
||||
You should choose a channel that best fits your needs. If you only want
|
||||
bugfixes, you should use `docker/dockerfile:1.0`. If you want to benefit from
|
||||
experimental features, you should use the experimental channel. If you are using
|
||||
the experimental channel, newer releases may not be backwards compatible, so it
|
||||
is recommended to use an immutable full version variant.
|
||||
|
||||
For master builds and nightly feature releases refer to the description in [the source repository](https://github.com/moby/buildkit/blob/master/README.md).
|
||||
|
||||
## escape
|
||||
|
||||
# escape=\ (backslash)
|
||||
@ -1339,6 +1415,10 @@ The table below shows what command is executed for different `ENTRYPOINT` / `CMD
|
||||
| **CMD ["p1_cmd", "p2_cmd"]** | p1_cmd p2_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry p1_cmd p2_cmd |
|
||||
| **CMD exec_cmd p1_cmd** | /bin/sh -c exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry /bin/sh -c exec_cmd p1_cmd |
|
||||
|
||||
> **Note**: If `CMD` is defined from the base image, setting `ENTRYPOINT` will
|
||||
> reset `CMD` to an empty value. In this scenario, `CMD` must be defined in the
|
||||
> current image to have a value.
|
||||
|
||||
## VOLUME
|
||||
|
||||
VOLUME ["/data"]
|
||||
@ -1379,7 +1459,7 @@ Keep the following things in mind about volumes in the `Dockerfile`.
|
||||
data within the volume after it has been declared, those changes will be discarded.
|
||||
|
||||
- **JSON formatting**: The list is parsed as a JSON array.
|
||||
You must enclose words with double quotes (`"`)rather than single quotes (`'`).
|
||||
You must enclose words with double quotes (`"`) rather than single quotes (`'`).
|
||||
|
||||
- **The host directory is declared at container run-time**: The host directory
|
||||
(the mountpoint) is, by its nature, host-dependent. This is to preserve image
|
||||
@ -1623,6 +1703,38 @@ RUN echo "Hello World"
|
||||
When building this Dockerfile, the `HTTP_PROXY` is preserved in the
|
||||
`docker history`, and changing its value invalidates the build cache.
|
||||
|
||||
### Automatic platform ARGs in the global scope
|
||||
|
||||
This feature is only available when using the [BuildKit](#buildkit) backend.
|
||||
|
||||
Docker predefines a set of `ARG` variables with information on the platform of
|
||||
the node performing the build (build platform) and on the platform of the
|
||||
resulting image (target platform). The target platform can be specified with
|
||||
the `--platform` flag on `docker build`.
|
||||
|
||||
The following `ARG` variables are set automatically:
|
||||
|
||||
* `TARGETPLATFORM` - platform of the build result. Eg `linux/amd64`, `linux/arm/v7`, `windows/amd64`.
|
||||
* `TARGETOS` - OS component of TARGETPLATFORM
|
||||
* `TARGETARCH` - architecture component of TARGETPLATFORM
|
||||
* `TARGETVARIANT` - variant component of TARGETPLATFORM
|
||||
* `BUILDPLATFORM` - platform of the node performing the build.
|
||||
* `BUILDOS` - OS component of BUILDPLATFORM
|
||||
* `BUILDARCH` - OS component of BUILDPLATFORM
|
||||
* `BUILDVARIANT` - OS component of BUILDPLATFORM
|
||||
|
||||
These arguments are defined in the global scope so are not automatically
|
||||
available inside build stages or for your `RUN` commands. To expose one of
|
||||
these arguments inside the build stage redefine it without value.
|
||||
|
||||
For example:
|
||||
|
||||
```Dockerfile
|
||||
FROM alpine
|
||||
ARG TARGETPLATFORM
|
||||
RUN echo "I'm building for $TARGETPLATFORM"
|
||||
```
|
||||
|
||||
### Impact on build caching
|
||||
|
||||
`ARG` variables are not persisted into the built image as `ENV` variables are.
|
||||
@ -1931,6 +2043,14 @@ required such as `zsh`, `csh`, `tcsh` and others.
|
||||
|
||||
The `SHELL` feature was added in Docker 1.12.
|
||||
|
||||
## External implementation features
|
||||
|
||||
This feature is only available when using the [BuildKit](#buildkit) backend.
|
||||
|
||||
Docker build supports experimental features like cache mounts, build secrets and
|
||||
ssh forwarding that are enabled by using an external implementation of the
|
||||
builder with a syntax directive. To learn about these features, [refer to the documentation in BuildKit repository](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md).
|
||||
|
||||
## Dockerfile examples
|
||||
|
||||
Below you can see some examples of Dockerfile syntax. If you're interested in
|
||||
|
||||
@ -44,8 +44,8 @@ from different sessions on the Docker host.
|
||||
|
||||
To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the
|
||||
container. If `--sig-proxy` is true (the default),`CTRL-c` sends a `SIGINT` to
|
||||
the container. You can detach from a container and leave it running using the
|
||||
`CTRL-p CTRL-q` key sequence.
|
||||
the container. If the container was run with `-i` and `-t`, you can detach from
|
||||
a container and leave it running using the `CTRL-p CTRL-q` key sequence.
|
||||
|
||||
> **Note:**
|
||||
> A process running as PID 1 inside a container is treated specially by
|
||||
|
||||
@ -48,14 +48,18 @@ Options:
|
||||
'<network-name>|<network-id>': connect to a user-defined network
|
||||
--no-cache Do not use cache when building the image
|
||||
--pull Always attempt to pull a newer version of the image
|
||||
--progress Set type of progress output (only if BuildKit enabled) (auto, plain, tty).
|
||||
Use plain to show container output
|
||||
-q, --quiet Suppress the build output and print image ID on success
|
||||
--rm Remove intermediate containers after a successful build (default true)
|
||||
--secret Secret file to expose to the build (only if BuildKit enabled): id=mysecret,src=/local/secret"
|
||||
--security-opt value Security Options (default [])
|
||||
--shm-size bytes Size of /dev/shm
|
||||
The format is `<number><unit>`. `number` must be greater than `0`.
|
||||
Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes),
|
||||
or `g` (gigabytes). If you omit the unit, the system uses bytes.
|
||||
--squash Squash newly built layers into a single new layer (**Experimental Only**)
|
||||
--ssh SSH agent socket or keys to expose to the build (only if BuildKit enabled) (format: default|<id>[=<socket>|<key>[,<key>]])
|
||||
-t, --tag value Name and optionally a tag in the 'name:tag' format (default [])
|
||||
--target string Set the target build stage to build.
|
||||
--ulimit value Ulimit options (default [])
|
||||
@ -500,13 +504,13 @@ stable.
|
||||
|
||||
|
||||
Squashing layers can be beneficial if your Dockerfile produces multiple layers
|
||||
modifying the same files, for example, file that are created in one step, and
|
||||
modifying the same files, for example, files that are created in one step, and
|
||||
removed in another step. For other use-cases, squashing images may actually have
|
||||
a negative impact on performance; when pulling an image consisting of multiple
|
||||
layers, layers can be pulled in parallel, and allows sharing layers between
|
||||
images (saving space).
|
||||
|
||||
For most use cases, multi-stage are a better alternative, as they give more
|
||||
For most use cases, multi-stage builds are a better alternative, as they give more
|
||||
fine-grained control over your build, and can take advantage of future
|
||||
optimizations in the builder. Refer to the [use multi-stage builds](https://docs.docker.com/develop/develop-images/multistage-build/)
|
||||
section in the userguide for more information.
|
||||
@ -527,7 +531,7 @@ The `--squash` option has a number of known limitations:
|
||||
downloading a single layer cannot be parallelized.
|
||||
- When attempting to squash an image that does not make changes to the
|
||||
filesystem (for example, the Dockerfile only contains `ENV` instructions),
|
||||
the squash step will fail (see [issue #33823](https://github.com/moby/moby/issues/33823)
|
||||
the squash step will fail (see [issue #33823](https://github.com/moby/moby/issues/33823)).
|
||||
|
||||
#### Prerequisites
|
||||
|
||||
|
||||
@ -85,7 +85,7 @@ Options:
|
||||
--memory-reservation string Memory soft limit
|
||||
--memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap
|
||||
--memory-swappiness int Tune container memory swappiness (0 to 100) (default -1)
|
||||
--mount value Attach a filesytem mount to the container (default [])
|
||||
--mount value Attach a filesystem mount to the container (default [])
|
||||
--name string Assign a name to the container
|
||||
--network-alias value Add network-scoped alias for the container (default [])
|
||||
--network string Connect a container to a network (default "default")
|
||||
@ -256,5 +256,5 @@ docker create --device-cgroup-rule='c 42:* rmw' -name my-container my-image
|
||||
Then, a user could ask `udev` to execute a script that would `docker exec my-container mknod newDevX c 42 <minor>`
|
||||
the required device when it is added.
|
||||
|
||||
NOTE: initially present devices still need to be explicitely added to
|
||||
NOTE: initially present devices still need to be explicitly added to
|
||||
the create/run command
|
||||
|
||||
@ -191,7 +191,10 @@ $ docker -H ssh://example.com ps
|
||||
```
|
||||
|
||||
To use SSH connection, you need to set up `ssh` so that it can reach the
|
||||
remote host with public key authentication.
|
||||
remote host with public key authentication. Password authentication is not
|
||||
supported. If your key is protected with passphrase, you need to set up
|
||||
`ssh-agent`.
|
||||
|
||||
Also, you need to have `docker` binary 18.09 or later on the daemon host.
|
||||
|
||||
#### Bind Docker to another host/port or a Unix socket
|
||||
@ -300,7 +303,7 @@ the same file can share a single page cache entry (or entries), it makes
|
||||
> **Note**: As promising as `overlay` is, the feature is still quite young and
|
||||
> should not be used in production. Most notably, using `overlay` can cause
|
||||
> excessive inode consumption (especially as the number of images grows), as
|
||||
> well as > being incompatible with the use of RPMs.
|
||||
> well as being incompatible with the use of RPMs.
|
||||
|
||||
The `overlay2` uses the same fast union filesystem but takes advantage of
|
||||
[additional features](https://lkml.org/lkml/2015/2/11/106) added in Linux
|
||||
@ -1228,10 +1231,14 @@ The `--metrics-addr` option takes a tcp address to serve the metrics API.
|
||||
This feature is still experimental, therefore, the daemon must be running in experimental
|
||||
mode for this feature to work.
|
||||
|
||||
To serve the metrics API on localhost:1337 you would specify `--metrics-addr 127.0.0.1:1337`
|
||||
allowing you to make requests on the API at `127.0.0.1:1337/metrics` to receive metrics in the
|
||||
To serve the metrics API on `localhost:9323` you would specify `--metrics-addr 127.0.0.1:9323`,
|
||||
allowing you to make requests on the API at `127.0.0.1:9323/metrics` to receive metrics in the
|
||||
[prometheus](https://prometheus.io/docs/instrumenting/exposition_formats/) format.
|
||||
|
||||
Port `9323` is the [default port associated with Docker
|
||||
metrics](https://github.com/prometheus/prometheus/wiki/Default-port-allocations)
|
||||
to avoid collisions with other prometheus exporters and services.
|
||||
|
||||
If you are running a prometheus server you can add this address to your scrape configs
|
||||
to have prometheus collect metrics on Docker. For more information
|
||||
on prometheus you can view the website [here](https://prometheus.io/).
|
||||
@ -1240,7 +1247,7 @@ on prometheus you can view the website [here](https://prometheus.io/).
|
||||
scrape_configs:
|
||||
- job_name: 'docker'
|
||||
static_configs:
|
||||
- targets: ['127.0.0.1:1337']
|
||||
- targets: ['127.0.0.1:9323']
|
||||
```
|
||||
|
||||
Please note that this feature is still marked as experimental as metrics and metric
|
||||
@ -1297,12 +1304,18 @@ This is a full example of the allowed configuration options on Linux:
|
||||
"exec-opts": [],
|
||||
"exec-root": "",
|
||||
"experimental": false,
|
||||
"features": {},
|
||||
"storage-driver": "",
|
||||
"storage-opts": [],
|
||||
"labels": [],
|
||||
"live-restore": true,
|
||||
"log-driver": "",
|
||||
"log-opts": {},
|
||||
"log-driver": "json-file",
|
||||
"log-opts": {
|
||||
"max-size": "10m",
|
||||
"max-file":"5",
|
||||
"labels": "somelabel",
|
||||
"env": "os,customer"
|
||||
},
|
||||
"mtu": 0,
|
||||
"pidfile": "",
|
||||
"cluster-store": "",
|
||||
@ -1326,7 +1339,13 @@ This is a full example of the allowed configuration options on Linux:
|
||||
"userns-remap": "",
|
||||
"group": "",
|
||||
"cgroup-parent": "",
|
||||
"default-ulimits": {},
|
||||
"default-ulimits": {
|
||||
"nofile": {
|
||||
"Name": "nofile",
|
||||
"Hard": 64000,
|
||||
"Soft": 64000
|
||||
}
|
||||
},
|
||||
"init": false,
|
||||
"init-path": "/usr/libexec/docker-init",
|
||||
"ipv6": false,
|
||||
@ -1392,6 +1411,7 @@ This is a full example of the allowed configuration options on Windows:
|
||||
"dns-search": [],
|
||||
"exec-opts": [],
|
||||
"experimental": false,
|
||||
"features":{},
|
||||
"storage-driver": "",
|
||||
"storage-opts": [],
|
||||
"labels": [],
|
||||
@ -1422,6 +1442,16 @@ This is a full example of the allowed configuration options on Windows:
|
||||
}
|
||||
```
|
||||
|
||||
#### Feature options
|
||||
The optional field `features` in `daemon.json` allows users to enable or disable specific
|
||||
daemon features. For example, `{"features":{"buildkit": true}}` enables `buildkit` as the
|
||||
default docker image builder.
|
||||
|
||||
The list of currently supported feature options:
|
||||
- `buildkit`: It enables `buildkit` as default builder when set to `true` or disables it by
|
||||
`false`. Note that if this option is not explicitly set in the daemon config file, then it
|
||||
is up to the cli to determine which builder to invoke.
|
||||
|
||||
#### Configuration reload behavior
|
||||
|
||||
Some options can be reconfigured when the daemon is running without requiring
|
||||
@ -1446,11 +1476,12 @@ The list of currently supported options that can be reconfigured is this:
|
||||
the runtime shipped with the official docker packages.
|
||||
- `runtimes`: it updates the list of available OCI runtimes that can
|
||||
be used to run containers.
|
||||
- `authorization-plugin`: specifies the authorization plugins to use.
|
||||
- `authorization-plugin`: it specifies the authorization plugins to use.
|
||||
- `allow-nondistributable-artifacts`: Replaces the set of registries to which the daemon will push nondistributable artifacts with a new set of registries.
|
||||
- `insecure-registries`: it replaces the daemon insecure registries with a new set of insecure registries. If some existing insecure registries in daemon's configuration are not in newly reloaded insecure resgitries, these existing ones will be removed from daemon's config.
|
||||
- `registry-mirrors`: it replaces the daemon registry mirrors with a new set of registry mirrors. If some existing registry mirrors in daemon's configuration are not in newly reloaded registry mirrors, these existing ones will be removed from daemon's config.
|
||||
- `shutdown-timeout`: it replaces the daemon's existing configuration timeout with a new timeout for shutting down all containers.
|
||||
- `features`: it explicitly enables or disables specific features.
|
||||
|
||||
Updating and reloading the cluster configurations such as `--cluster-store`,
|
||||
`--cluster-advertise` and `--cluster-store-opts` will take effect only if
|
||||
|
||||
@ -24,6 +24,7 @@ Options:
|
||||
-c, --change value Apply Dockerfile instruction to the created image (default [])
|
||||
--help Print usage
|
||||
-m, --message string Set commit message for imported image
|
||||
--platform string Set platform if server is multi-platform capable
|
||||
```
|
||||
|
||||
## Description
|
||||
@ -87,3 +88,11 @@ Note the `sudo` in this example – you must preserve
|
||||
the ownership of the files (especially root ownership) during the
|
||||
archiving with tar. If you are not root (or the sudo command) when you
|
||||
tar, then the ownerships might not get preserved.
|
||||
|
||||
## When the daemon supports multiple operating systems
|
||||
If the daemon supports multiple operating systems, and the image being imported
|
||||
does not match the default operating system, it may be necessary to add
|
||||
`--platform`. This would be necessary when importing a Linux image into a Windows
|
||||
daemon.
|
||||
|
||||
# docker import --platform=linux .\linuximage.tar
|
||||
|
||||
@ -85,6 +85,8 @@ you can download them from:
|
||||
- Microsoft Windows Credential Manager: https://github.com/docker/docker-credential-helpers/releases
|
||||
- [pass](https://www.passwordstore.org/): https://github.com/docker/docker-credential-helpers/releases
|
||||
|
||||
#### Configure the credentials store
|
||||
|
||||
You need to specify the credentials store in `$HOME/.docker/config.json`
|
||||
to tell the docker engine to use it. The value of the config property should be
|
||||
the suffix of the program to use (i.e. everything after `docker-credential-`).
|
||||
@ -99,7 +101,7 @@ For example, to use `docker-credential-osxkeychain`:
|
||||
If you are currently logged in, run `docker logout` to remove
|
||||
the credentials from the file and run `docker login` again.
|
||||
|
||||
### Default behavior
|
||||
#### Default behavior
|
||||
|
||||
By default, Docker looks for the native binary on each of the platforms, i.e.
|
||||
"osxkeychain" on macOS, "wincred" on windows, and "pass" on Linux. A special
|
||||
@ -108,7 +110,7 @@ it cannot find the "pass" binary. If none of these binaries are present, it
|
||||
stores the credentials (i.e. password) in base64 encoding in the config files
|
||||
described above.
|
||||
|
||||
### Credential helper protocol
|
||||
#### Credential helper protocol
|
||||
|
||||
Credential helpers can be any program or script that follows a very simple protocol.
|
||||
This protocol is heavily inspired by Git, but it differs in the information shared.
|
||||
@ -162,7 +164,7 @@ designated programs to handle credentials for *specific registries*. The default
|
||||
credential store (`credsStore` or the config file itself) will not be used for
|
||||
operations concerning credentials of the specified registries.
|
||||
|
||||
### Logging out
|
||||
#### Configure credential helpers
|
||||
|
||||
If you are currently logged in, run `docker logout` to remove
|
||||
the credentials from the default store.
|
||||
@ -182,3 +184,7 @@ For example:
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Related commands
|
||||
|
||||
* [logout](logout.md)
|
||||
|
||||
@ -30,3 +30,7 @@ Options:
|
||||
```bash
|
||||
$ docker logout localhost:8080
|
||||
```
|
||||
|
||||
## Related commands
|
||||
|
||||
* [login](login.md)
|
||||
|
||||
@ -177,7 +177,7 @@ This is similar to tagging an image and pushing it to a foreign registry.
|
||||
|
||||
After you have created your local copy of the manifest list, you may optionally
|
||||
`annotate` it. Annotations allowed are the architecture and operating system (overriding the image's current values),
|
||||
os features, and an archictecure variant.
|
||||
os features, and an architecture variant.
|
||||
|
||||
Finally, you need to `push` your manifest list to the desired registry. Below are descriptions of these three commands,
|
||||
and an example putting them all together.
|
||||
@ -270,5 +270,5 @@ $ docker manifest create --insecure myprivateregistry.mycompany.com/repo/image:1
|
||||
$ docker manifest push --insecure myprivateregistry.mycompany.com/repo/image:tag
|
||||
```
|
||||
|
||||
Note that the `--insecure` flag is not required to annotate a manifest list, since annotations are to a locally-stored copy of a manifest list. You may also skip the `--insecure` flag if you are performaing a `docker manifest inspect` on a locally-stored manifest list. Be sure to keep in mind that locally-stored manifest lists are never used by the engine on a `docker pull`.
|
||||
Note that the `--insecure` flag is not required to annotate a manifest list, since annotations are to a locally-stored copy of a manifest list. You may also skip the `--insecure` flag if you are performing a `docker manifest inspect` on a locally-stored manifest list. Be sure to keep in mind that locally-stored manifest lists are never used by the engine on a `docker pull`.
|
||||
|
||||
|
||||
@ -116,6 +116,7 @@ Valid placeholders for the Go template are listed below:
|
||||
|
||||
Placeholder | Description
|
||||
----------------|------------------------------------------------------------------------------------------
|
||||
`.ID` | Task ID
|
||||
`.Name` | Task name
|
||||
`.Image` | Task image
|
||||
`.Node` | Node ID
|
||||
|
||||
@ -26,6 +26,17 @@ Options:
|
||||
--no-prune Do not delete untagged parents
|
||||
```
|
||||
|
||||
## Description
|
||||
|
||||
Removes (and un-tags) one or more images from the host node. If an image has
|
||||
multiple tags, using this command with the tag as a parameter only removes the
|
||||
tag. If the tag is the only one for the image, both the image and the tag are
|
||||
removed.
|
||||
|
||||
This does not remove images from a registry. You cannot remove an image of a
|
||||
running container unless you use the `-f` option. To see all images on a host
|
||||
use the [`docker image ls`](images.md) command.
|
||||
|
||||
## Examples
|
||||
|
||||
You can remove an image using its short or long ID, its tag, or its digest. If
|
||||
@ -46,11 +57,11 @@ $ docker rmi fd484f19954f
|
||||
Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories, use -f to force
|
||||
2013/12/11 05:47:16 Error: failed to remove one or more images
|
||||
|
||||
$ docker rmi test1
|
||||
$ docker rmi test1:latest
|
||||
|
||||
Untagged: test1:latest
|
||||
|
||||
$ docker rmi test2
|
||||
$ docker rmi test2:latest
|
||||
|
||||
Untagged: test2:latest
|
||||
|
||||
@ -60,7 +71,7 @@ $ docker images
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
|
||||
|
||||
$ docker rmi test
|
||||
$ docker rmi test:latest
|
||||
|
||||
Untagged: test:latest
|
||||
Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8
|
||||
|
||||
@ -418,7 +418,7 @@ $ docker run -l my-label --label com.example.foo=bar ubuntu bash
|
||||
```
|
||||
|
||||
The `my-label` key doesn't specify a value so the label defaults to an empty
|
||||
string(`""`). To add multiple labels, repeat the label flag (`-l` or `--label`).
|
||||
string (`""`). To add multiple labels, repeat the label flag (`-l` or `--label`).
|
||||
|
||||
The `key=value` must be unique to avoid overwriting the label value. If you
|
||||
specify labels with identical keys but different values, each subsequent value
|
||||
@ -717,15 +717,15 @@ $ docker run -d --isolation default busybox top
|
||||
On Windows, `--isolation` can take one of these values:
|
||||
|
||||
|
||||
| Value | Description |
|
||||
|:----------|:-------------------------------------------------------------------------------------------|
|
||||
| `default` | Use the value specified by the Docker daemon's `--exec-opt` or system default (see below). |
|
||||
| `process` | Shared-kernel namespace isolation (not supported on Windows client operating systems). |
|
||||
| `hyperv` | Hyper-V hypervisor partition-based isolation. |
|
||||
| Value | Description |
|
||||
|:----------|:------------------------------------------------------------------------------------------------------------------|
|
||||
| `default` | Use the value specified by the Docker daemon's `--exec-opt` or system default (see below). |
|
||||
| `process` | Shared-kernel namespace isolation (not supported on Windows client operating systems older than Windows 10 1809). |
|
||||
| `hyperv` | Hyper-V hypervisor partition-based isolation. |
|
||||
|
||||
The default isolation on Windows server operating systems is `process`. The default (and only supported)
|
||||
The default isolation on Windows server operating systems is `process`. The default
|
||||
isolation on Windows client operating systems is `hyperv`. An attempt to start a container on a client
|
||||
operating system with `--isolation process` will fail.
|
||||
operating system older than Windows 10 1809 with `--isolation process` will fail.
|
||||
|
||||
On Windows server, assuming the default configuration, these commands are equivalent
|
||||
and result in `process` isolation:
|
||||
|
||||
@ -219,7 +219,7 @@ tutorial](https://docs.docker.com/engine/swarm/swarm-tutorial/rolling-update/).
|
||||
|
||||
### Set environment variables (-e, --env)
|
||||
|
||||
This sets an environmental variable for all tasks in a service. For example:
|
||||
This sets an environment variable for all tasks in a service. For example:
|
||||
|
||||
```bash
|
||||
$ docker service create \
|
||||
|
||||
@ -171,5 +171,5 @@ On Windows:
|
||||
"table {{.ID}}\t{{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}\t{{.BlockIO}}"
|
||||
|
||||
|
||||
> **Note**: On Docker 17.09 and older, the `{{.Container}}` column was used, in
|
||||
> stead of `{{.ID}}\t{{.Name}}`.
|
||||
> **Note**: On Docker 17.09 and older, the `{{.Container}}` column was used,
|
||||
> instead of `{{.ID}}\t{{.Name}}`.
|
||||
|
||||
@ -1085,7 +1085,7 @@ per second from `/dev/sda`:
|
||||
|
||||
$ docker run -it --device-read-bps /dev/sda:1mb ubuntu
|
||||
|
||||
The `--device-write-bps` flag limits the write rate (bytes per second)to a device.
|
||||
The `--device-write-bps` flag limits the write rate (bytes per second) to a device.
|
||||
For example, this command creates a container and limits the write rate to `1mb`
|
||||
per second for `/dev/sda`:
|
||||
|
||||
@ -1555,7 +1555,7 @@ The example below mounts an empty tmpfs into the container with the `rw`,
|
||||
If neither 'rw' or 'ro' is specified then the volume is mounted in
|
||||
read-write mode.
|
||||
|
||||
The `nocopy` modes is used to disable automatic copying requested volume
|
||||
The `nocopy` mode is used to disable automatically copying the requested volume
|
||||
path in the container to the volume storage location.
|
||||
For named volumes, `copy` is the default mode. Copy modes are not supported
|
||||
for bind-mounted volumes.
|
||||
|
||||
@ -19,9 +19,10 @@ const descriptionSourcePath = "docs/reference/commandline/"
|
||||
|
||||
func generateCliYaml(opts *options) error {
|
||||
stdin, stdout, stderr := term.StdStreams()
|
||||
dockerCli := command.NewDockerCli(stdin, stdout, stderr, false)
|
||||
dockerCli := command.NewDockerCli(stdin, stdout, stderr, false, nil)
|
||||
cmd := &cobra.Command{Use: "docker"}
|
||||
commands.AddCommands(cmd, dockerCli)
|
||||
disableFlagsInUseLine(cmd)
|
||||
source := filepath.Join(opts.source, descriptionSourcePath)
|
||||
if err := loadLongDescription(cmd, source); err != nil {
|
||||
return err
|
||||
@ -31,6 +32,23 @@ func generateCliYaml(opts *options) error {
|
||||
return GenYamlTree(cmd, opts.target)
|
||||
}
|
||||
|
||||
func disableFlagsInUseLine(cmd *cobra.Command) {
|
||||
visitAll(cmd, func(ccmd *cobra.Command) {
|
||||
// do not add a `[flags]` to the end of the usage line.
|
||||
ccmd.DisableFlagsInUseLine = true
|
||||
})
|
||||
}
|
||||
|
||||
// visitAll will traverse all commands from the root.
|
||||
// This is different from the VisitAll of cobra.Command where only parents
|
||||
// are checked.
|
||||
func visitAll(root *cobra.Command, fn func(*cobra.Command)) {
|
||||
for _, cmd := range root.Commands() {
|
||||
visitAll(cmd, fn)
|
||||
}
|
||||
fn(root)
|
||||
}
|
||||
|
||||
func loadLongDescription(cmd *cobra.Command, path ...string) error {
|
||||
for _, cmd := range cmd.Commands() {
|
||||
if cmd.Name() == "" {
|
||||
|
||||
9
e2e/compose-env.connhelper-ssh.yaml
Normal file
9
e2e/compose-env.connhelper-ssh.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
version: '2.1'
|
||||
|
||||
services:
|
||||
engine:
|
||||
build:
|
||||
context: ./testdata
|
||||
dockerfile: Dockerfile.connhelper-ssh
|
||||
environment:
|
||||
- TEST_CONNHELPER_SSH_ID_RSA_PUB
|
||||
@ -106,7 +106,7 @@ func ensureBasicPluginBin() (string, error) {
|
||||
}
|
||||
installPath := filepath.Join(os.Getenv("GOPATH"), "bin", name)
|
||||
cmd := exec.Command(goBin, "build", "-o", installPath, "./basic")
|
||||
cmd.Env = append(cmd.Env, "CGO_ENABLED=0")
|
||||
cmd.Env = append(os.Environ(), "CGO_ENABLED=0")
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return "", errors.Wrapf(err, "error building basic plugin bin: %s", string(out))
|
||||
}
|
||||
|
||||
12
e2e/testdata/Dockerfile.connhelper-ssh
vendored
Normal file
12
e2e/testdata/Dockerfile.connhelper-ssh
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
FROM docker:test-dind
|
||||
RUN apk --no-cache add shadow openssh-server && \
|
||||
groupadd -f docker && \
|
||||
useradd --create-home --shell /bin/sh --password $(head -c32 /dev/urandom | base64) penguin && \
|
||||
usermod -aG docker penguin && \
|
||||
ssh-keygen -A
|
||||
# workaround: ssh session excludes /usr/local/bin from $PATH
|
||||
RUN ln -s /usr/local/bin/docker /usr/bin/docker
|
||||
COPY ./connhelper-ssh/entrypoint.sh /
|
||||
EXPOSE 22
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
# usage: docker run --privileged -e TEST_CONNHELPER_SSH_ID_RSA_PUB=$(cat ~/.ssh/id_rsa.pub) -p 22 $THIS_IMAGE
|
||||
8
e2e/testdata/connhelper-ssh/entrypoint.sh
vendored
Executable file
8
e2e/testdata/connhelper-ssh/entrypoint.sh
vendored
Executable file
@ -0,0 +1,8 @@
|
||||
#!/bin/sh
|
||||
set -ex
|
||||
mkdir -m 0700 -p /home/penguin/.ssh
|
||||
echo ${TEST_CONNHELPER_SSH_ID_RSA_PUB} > /home/penguin/.ssh/authorized_keys
|
||||
chmod 0600 /home/penguin/.ssh/authorized_keys
|
||||
chown -R penguin:penguin /home/penguin
|
||||
/usr/sbin/sshd -E /var/log/sshd.log
|
||||
exec dockerd-entrypoint.sh $@
|
||||
@ -1,42 +0,0 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/e2eengine"
|
||||
|
||||
"gotest.tools/icmd"
|
||||
)
|
||||
|
||||
func TestDockerEngineOnContainerdAltRootConfig(t *testing.T) {
|
||||
defer func() {
|
||||
err := e2eengine.CleanupEngine(t)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to cleanup engine: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
t.Log("First engine init")
|
||||
// First init
|
||||
result := icmd.RunCmd(icmd.Command("docker", "engine", "init", "--config-file", "/tmp/etc/docker/daemon.json"),
|
||||
func(c *icmd.Cmd) {
|
||||
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
|
||||
})
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "Success! The docker engine is now running.",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
|
||||
// Make sure update doesn't blow up with alternate config path
|
||||
t.Log("perform update")
|
||||
// Now update and succeed
|
||||
targetVersion := os.Getenv("VERSION")
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "update", "--version", targetVersion))
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "Success! The docker engine is now running.",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
}
|
||||
@ -1,14 +0,0 @@
|
||||
root = "/var/lib/containerd"
|
||||
state = "/run/containerd"
|
||||
oom_score = 0
|
||||
|
||||
[grpc]
|
||||
address = "/run/containerd/containerd.sock"
|
||||
uid = 0
|
||||
gid = 0
|
||||
|
||||
[debug]
|
||||
address = "/run/containerd/debug.sock"
|
||||
uid = 0
|
||||
gid = 0
|
||||
level = "debug"
|
||||
@ -1,85 +0,0 @@
|
||||
package multi
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/e2eengine"
|
||||
|
||||
"gotest.tools/icmd"
|
||||
)
|
||||
|
||||
func TestDockerEngineOnContainerdMultiTest(t *testing.T) {
|
||||
defer func() {
|
||||
err := e2eengine.CleanupEngine(t)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to cleanup engine: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
t.Log("Attempt engine init without experimental")
|
||||
// First init
|
||||
result := icmd.RunCmd(icmd.Command("docker", "engine", "init"),
|
||||
func(c *icmd.Cmd) {
|
||||
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=disabled")
|
||||
})
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "",
|
||||
Err: "docker engine init is only supported",
|
||||
ExitCode: 1,
|
||||
})
|
||||
|
||||
t.Log("First engine init")
|
||||
// First init
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "init"),
|
||||
func(c *icmd.Cmd) {
|
||||
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
|
||||
})
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "Success! The docker engine is now running.",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
|
||||
t.Log("checking for updates")
|
||||
// Check for updates
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "check", "--downgrades", "--pre-releases"))
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "VERSION",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
|
||||
t.Log("attempt second init (should fail)")
|
||||
// Attempt to init a second time and fail
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "init"),
|
||||
func(c *icmd.Cmd) {
|
||||
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
|
||||
})
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "",
|
||||
Err: "engine already present",
|
||||
ExitCode: 1,
|
||||
})
|
||||
|
||||
t.Log("perform update")
|
||||
// Now update and succeed
|
||||
targetVersion := os.Getenv("VERSION")
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "update", "--version", targetVersion))
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "Success! The docker engine is now running.",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
|
||||
t.Log("remove engine")
|
||||
result = icmd.RunCmd(icmd.Command("docker", "engine", "rm"),
|
||||
func(c *icmd.Cmd) {
|
||||
c.Env = append(c.Env, "DOCKER_CLI_EXPERIMENTAL=enabled")
|
||||
})
|
||||
result.Assert(t, icmd.Expected{
|
||||
Out: "",
|
||||
Err: "",
|
||||
ExitCode: 0,
|
||||
})
|
||||
}
|
||||
@ -1,39 +0,0 @@
|
||||
package e2eengine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/containerizedengine"
|
||||
)
|
||||
|
||||
// CleanupEngine ensures the local engine has been removed between testcases
|
||||
func CleanupEngine(t *testing.T) error {
|
||||
t.Log("doing engine cleanup")
|
||||
ctx := context.Background()
|
||||
|
||||
client, err := containerizedengine.NewClient("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// See if the engine exists first
|
||||
engine, err := client.GetEngine(ctx)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not present") {
|
||||
t.Log("engine was not detected, no cleanup to perform")
|
||||
// Nothing to do, it's not defined
|
||||
return nil
|
||||
}
|
||||
t.Logf("failed to lookup engine: %s", err)
|
||||
// Any other error is not good...
|
||||
return err
|
||||
}
|
||||
// TODO Consider nuking the docker dir too so there's no cached content between test cases
|
||||
err = client.RemoveEngine(ctx, engine)
|
||||
if err != nil {
|
||||
t.Logf("Failed to remove engine: %s", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -1,12 +1,9 @@
|
||||
package containerizedengine
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
containerdtypes "github.com/containerd/containerd/api/types"
|
||||
"github.com/containerd/containerd/cio"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/content"
|
||||
@ -14,7 +11,6 @@ import (
|
||||
prototypes "github.com/gogo/protobuf/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
type (
|
||||
@ -25,6 +21,8 @@ type (
|
||||
getImageFunc func(ctx context.Context, ref string) (containerd.Image, error)
|
||||
contentStoreFunc func() content.Store
|
||||
containerServiceFunc func() containers.Store
|
||||
installFunc func(context.Context, containerd.Image, ...containerd.InstallOpts) error
|
||||
versionFunc func(ctx context.Context) (containerd.Version, error)
|
||||
}
|
||||
fakeContainer struct {
|
||||
idFunc func() string
|
||||
@ -49,30 +47,6 @@ type (
|
||||
isUnpackedFunc func(context.Context, string) (bool, error)
|
||||
contentStoreFunc func() content.Store
|
||||
}
|
||||
fakeTask struct {
|
||||
idFunc func() string
|
||||
pidFunc func() uint32
|
||||
startFunc func(context.Context) error
|
||||
deleteFunc func(context.Context, ...containerd.ProcessDeleteOpts) (*containerd.ExitStatus, error)
|
||||
killFunc func(context.Context, syscall.Signal, ...containerd.KillOpts) error
|
||||
waitFunc func(context.Context) (<-chan containerd.ExitStatus, error)
|
||||
closeIOFunc func(context.Context, ...containerd.IOCloserOpts) error
|
||||
resizeFunc func(ctx context.Context, w, h uint32) error
|
||||
ioFunc func() cio.IO
|
||||
statusFunc func(context.Context) (containerd.Status, error)
|
||||
pauseFunc func(context.Context) error
|
||||
resumeFunc func(context.Context) error
|
||||
execFunc func(context.Context, string, *specs.Process, cio.Creator) (containerd.Process, error)
|
||||
pidsFunc func(context.Context) ([]containerd.ProcessInfo, error)
|
||||
checkpointFunc func(context.Context, ...containerd.CheckpointTaskOpts) (containerd.Image, error)
|
||||
updateFunc func(context.Context, ...containerd.UpdateTaskOpts) error
|
||||
loadProcessFunc func(context.Context, string, cio.Attach) (containerd.Process, error)
|
||||
metricsFunc func(context.Context) (*containerdtypes.Metric, error)
|
||||
}
|
||||
|
||||
testOutStream struct {
|
||||
bytes.Buffer
|
||||
}
|
||||
)
|
||||
|
||||
func (w *fakeContainerdClient) Containers(ctx context.Context, filters ...string) ([]containerd.Container, error) {
|
||||
@ -114,6 +88,18 @@ func (w *fakeContainerdClient) ContainerService() containers.Store {
|
||||
func (w *fakeContainerdClient) Close() error {
|
||||
return nil
|
||||
}
|
||||
func (w *fakeContainerdClient) Install(ctx context.Context, image containerd.Image, args ...containerd.InstallOpts) error {
|
||||
if w.installFunc != nil {
|
||||
return w.installFunc(ctx, image, args...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (w *fakeContainerdClient) Version(ctx context.Context) (containerd.Version, error) {
|
||||
if w.versionFunc != nil {
|
||||
return w.versionFunc(ctx)
|
||||
}
|
||||
return containerd.Version{}, nil
|
||||
}
|
||||
|
||||
func (c *fakeContainer) ID() string {
|
||||
if c.idFunc != nil {
|
||||
@ -230,119 +216,3 @@ func (i *fakeImage) ContentStore() content.Store {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *fakeTask) ID() string {
|
||||
if t.idFunc != nil {
|
||||
return t.idFunc()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (t *fakeTask) Pid() uint32 {
|
||||
if t.pidFunc != nil {
|
||||
return t.pidFunc()
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (t *fakeTask) Start(ctx context.Context) error {
|
||||
if t.startFunc != nil {
|
||||
return t.startFunc(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (t *fakeTask) Delete(ctx context.Context, opts ...containerd.ProcessDeleteOpts) (*containerd.ExitStatus, error) {
|
||||
if t.deleteFunc != nil {
|
||||
return t.deleteFunc(ctx, opts...)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
func (t *fakeTask) Kill(ctx context.Context, signal syscall.Signal, opts ...containerd.KillOpts) error {
|
||||
if t.killFunc != nil {
|
||||
return t.killFunc(ctx, signal, opts...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (t *fakeTask) Wait(ctx context.Context) (<-chan containerd.ExitStatus, error) {
|
||||
if t.waitFunc != nil {
|
||||
return t.waitFunc(ctx)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
func (t *fakeTask) CloseIO(ctx context.Context, opts ...containerd.IOCloserOpts) error {
|
||||
if t.closeIOFunc != nil {
|
||||
return t.closeIOFunc(ctx, opts...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (t *fakeTask) Resize(ctx context.Context, w, h uint32) error {
|
||||
if t.resizeFunc != nil {
|
||||
return t.resizeFunc(ctx, w, h)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (t *fakeTask) IO() cio.IO {
|
||||
if t.ioFunc != nil {
|
||||
return t.ioFunc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (t *fakeTask) Status(ctx context.Context) (containerd.Status, error) {
|
||||
if t.statusFunc != nil {
|
||||
return t.statusFunc(ctx)
|
||||
}
|
||||
return containerd.Status{}, nil
|
||||
}
|
||||
func (t *fakeTask) Pause(ctx context.Context) error {
|
||||
if t.pauseFunc != nil {
|
||||
return t.pauseFunc(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (t *fakeTask) Resume(ctx context.Context) error {
|
||||
if t.resumeFunc != nil {
|
||||
return t.resumeFunc(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (t *fakeTask) Exec(ctx context.Context, cmd string, proc *specs.Process, ioc cio.Creator) (containerd.Process, error) {
|
||||
if t.execFunc != nil {
|
||||
return t.execFunc(ctx, cmd, proc, ioc)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
func (t *fakeTask) Pids(ctx context.Context) ([]containerd.ProcessInfo, error) {
|
||||
if t.pidsFunc != nil {
|
||||
return t.pidsFunc(ctx)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
func (t *fakeTask) Checkpoint(ctx context.Context, opts ...containerd.CheckpointTaskOpts) (containerd.Image, error) {
|
||||
if t.checkpointFunc != nil {
|
||||
return t.checkpointFunc(ctx, opts...)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
func (t *fakeTask) Update(ctx context.Context, opts ...containerd.UpdateTaskOpts) error {
|
||||
if t.updateFunc != nil {
|
||||
return t.updateFunc(ctx, opts...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (t *fakeTask) LoadProcess(ctx context.Context, name string, attach cio.Attach) (containerd.Process, error) {
|
||||
if t.loadProcessFunc != nil {
|
||||
return t.loadProcessFunc(ctx, name, attach)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
func (t *fakeTask) Metrics(ctx context.Context) (*containerdtypes.Metric, error) {
|
||||
if t.metricsFunc != nil {
|
||||
return t.metricsFunc(ctx)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (o *testOutStream) FD() uintptr {
|
||||
return 0
|
||||
}
|
||||
func (o *testOutStream) IsTerminal() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user