Compare commits

..

55 Commits

Author SHA1 Message Date
67f9a3912c Merge pull request #1289 from andrewhsu/b
[18.06] vndr of buildkit to 98f1604
2018-08-14 16:54:37 +02:00
bb983ee75b vndr units to 6950e57
Signed-off-by: Andrew Hsu <andrewhsu@docker.com>
2018-08-14 06:22:59 +00:00
094ed67122 vndr containerd to b416337
Signed-off-by: Andrew Hsu <andrewhsu@docker.com>
2018-08-14 06:17:30 +00:00
946b13d2d9 vndr buildkit to 98f1604
Signed-off-by: Andrew Hsu <andrewhsu@docker.com>
2018-08-14 05:37:50 +00:00
2749843368 Merge pull request #1272 from thaJeztah/18.06-completion-updates
[18.06] backport various Bash completion script updates
2018-08-07 18:12:51 +02:00
c31d0ea160 Fix shellcheck warnings
Signed-off-by: Harald Albers <github@albersweb.de>
(cherry picked from commit e587ec293b)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-08-04 01:42:58 +02:00
3c6ad54e95 Add bash completion for cp --archive
Signed-off-by: Harald Albers <github@albersweb.de>
(cherry picked from commit b9b3754ad3)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-08-04 01:42:51 +02:00
b3f1fb5418 Remove outdated completion reviewers file
Clean maintainers and code owners files

Signed-off-by: Silvin Lubecki <silvin.lubecki@docker.com>
(cherry picked from commit 9022a00fbe)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-08-04 01:42:42 +02:00
37890ac58d Add bash completion for kubernetes orchestrator
Signed-off-by: Harald Albers <github@albersweb.de>
(cherry picked from commit 08f8ee1320)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-08-04 01:42:35 +02:00
df806ee61c Only complete swarm specific options with orchestrator=swarm
Signed-off-by: Harald Albers <github@albersweb.de>
(cherry picked from commit 8ef01e869e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-08-04 01:42:29 +02:00
ee7705035c Add support for orchestrator specific bash completions
Signed-off-by: Harald Albers <github@albersweb.de>
(cherry picked from commit ff953751d3)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-08-04 01:42:22 +02:00
46d424b49b Add bash completion for service logs --details|--raw
Signed-off-by: Harald Albers <github@albersweb.de>
(cherry picked from commit 4912846de2)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-08-04 01:42:13 +02:00
c2fa77357a Merge pull request #1229 from thaJeztah/18.06-backport-completion
[18.06] Added events filter scope for bash and zsh completion
2018-08-03 23:15:26 +02:00
3dbd9eaf78 Merge pull request #1267 from thaJeztah/18.06-backport-fix-stack-help-command
[18.06] backport "Fix help message flags on docker stack commands and sub-commands"
2018-08-03 23:07:31 +02:00
35d05c2de6 Merge pull request #1268 from thaJeztah/18.06-backport-1231-zsh-autocomplete-update-invalid
[18.06] backport "fixed typo breaking zsh docker update autocomplete"
2018-08-03 22:56:37 +02:00
911e86cb9f Fix help message flags on docker stack commands and sub-commands
PersistentPreRunE needs to be called within the help function to initialize all the flags (notably the orchestrator flag)
Add an e2e test as regression test

Signed-off-by: Silvin Lubecki <silvin.lubecki@docker.com>
(cherry picked from commit 21cce52b30)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-08-03 19:27:31 +02:00
050f334818 Update --compose-file flag description to mention stdin
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 2c7822b036)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-08-03 19:27:23 +02:00
2c9ca7465d Fixed typo breaking zsh docker update autocomplete
Signed-off-by: Tom Milligan <tommilligan@users.noreply.github.com>
(cherry picked from commit da00d1c49f)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-08-03 14:20:23 +02:00
11720451eb Merge pull request #1241 from thaJeztah/18.06-backport-1110-Fix-Warning-Message
[18.06] Fix: Warning Message
2018-07-30 15:46:24 +02:00
65a42c8236 Fix: Warning Message
Signed-off-by: Vimal-Raghubir <vraghubir0418@gmail.com>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit ce3d069936)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-30 11:19:46 +02:00
ddad2f519a Added events filter scope zsh completion
Signed-off-by: Paweł Szczekutowicz <pszczekutowicz@gmail.com>
(cherry picked from commit 13db1bc95f)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-23 11:43:47 +02:00
4e6dbe6a5c Added events filter scope bash completion
Signed-off-by: Paweł Szczekutowicz <pszczekutowicz@gmail.com>
(cherry picked from commit c922ea2f45)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-23 11:43:40 +02:00
2cb2e84287 Merge pull request #1207 from thaJeztah/18.06-backport-improve-version-align
[18.06] Improve version output alignment
2018-07-18 08:22:13 -07:00
97dd580c86 Merge pull request #1214 from thaJeztah/18.06-docs-fixes
[18.06] documentation and man-page fixes
2018-07-18 08:10:21 -07:00
deacc39445 For docker/docker.github.io/issues/6987
Signed-off-by: Peter Kehl <peter.kehl@gmail.com>
(cherry picked from commit 249c8652a2)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-16 12:25:32 +02:00
e1ee48ab04 Fixing issue #1167 "Format example typo"
Signed-off-by: Silvin Lubecki <silvin.lubecki@docker.com>
(cherry picked from commit d0ddf91539)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-16 12:25:27 +02:00
254566169d Add: Add missing option
Signed-off-by: Vimal-Raghubir <vraghubir0418@gmail.com>
(cherry picked from commit a205aecb80)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-16 12:25:22 +02:00
9cb345caae Merge pull request #1211 from thaJeztah/18.06-backport-completion-service-create--init
[18.06] Add bash completion for `service create|update --init`
2018-07-13 14:10:55 +02:00
a92b4dc752 Add bash completion for service create|update --init
Signed-off-by: Harald Albers <github@albersweb.de>
(cherry picked from commit 97d312e02a)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-13 11:05:14 +02:00
6f5a828403 Adapt min-column width to component information
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 0f7ae34ea9)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-12 11:37:51 +02:00
a33c562cf3 Extend version-align test with components
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 55ff66d967)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-12 11:37:45 +02:00
7178075fda Merge pull request #1205 from thaJeztah/18.06-bump-dockerd
[18.06] Bump engine to tip of 18.06 branch
2018-07-11 18:19:53 -07:00
744938f0b9 Bump engine to tip of 18.06 branch
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-12 02:53:31 +02:00
1d5e206fc2 Merge pull request #1199 from thaJeztah/18.06-backport-fix-swarm-ca-command
[18.06] Propagate the provided external CA certificate to the external CA object in swarm
2018-07-10 10:47:13 +02:00
31d6292458 Propagate the provided external CA certificate to the external CA object
in swarm.

Also, fix some CLI command confusions:
1. If the --external-ca flag is provided, require a --ca-cert flag as well, otherwise
   the external CA is set but the CA certificate is actually rotated to an internal
   cert
2. If a --ca-cert flag is provided, require a --ca-key or --external-ca flag be
   provided as well, otherwise either the server will say that the request is
   invalid, or if there was previously an external CA corresponding to the cert, it
   will succeed.  While that works, it's better to require the user to explicitly
   set all the parameters of the new desired root CA.

This also changes the `swarm update` function to set the external CA's CACert field,
which while not strictly necessary, makes the CA list more explicit.

Signed-off-by: Ying Li <ying.li@docker.com>
(cherry picked from commit 4243440e1f)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-10 01:32:52 +02:00
ebd85b10e2 Merge pull request #1190 from tiborvass/18.06-vndr
[18.06] vendor: docker/docker to docker/engine 0d029b0
2018-07-05 18:29:29 -07:00
8f5f3adf49 vendor: docker/docker moby/buildkit containerd/containerd containerd/console containerd/continuity opencontainers/runc
Signed-off-by: Tibor Vass <tibor@docker.com>
2018-07-05 18:23:07 +00:00
9fbab758a9 Merge pull request #1185 from tiborvass/18.06-cp
[18.06] build: --iidfile support with buildkit
2018-07-05 11:06:11 -07:00
b3d8fd5261 Merge pull request #1181 from thaJeztah/18.06-bump-kubernetes-1.8.14
[18.06] Bump kubernetes dependencies to 1.8.14
2018-07-05 08:47:14 -07:00
a7c8c474b9 Merge pull request #1174 from thaJeztah/18.06-backport-no-need-to-check-files
[18.06] Remove composefiles lenght check on k8s RunDeploy..
2018-07-05 16:35:54 +02:00
5f42140bab Merge pull request #1184 from thaJeztah/18.06-backport-credentials
[18.06] backport credential-helper fixes
2018-07-05 15:23:36 +02:00
ad80af43d0 Merge pull request #1187 from thaJeztah/18.06-buildkit-buildkit-envvar-zero
[18.06] build: use strconv.ParseBool to parse DOCKER_BUILDKIT to allow value "0"
2018-07-03 17:10:35 -07:00
359d5c8a76 build: use strconv.ParseBool to parse DOCKER_BUILDKIT to allow value "0"
Signed-off-by: Tibor Vass <tibor@docker.com>
(cherry picked from commit 721000e6c9)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-04 01:29:55 +02:00
08479b0776 config/credentials: don't run 'pass' to detect it
'CheckInitialized' in the credential-helper library actually invokes
`pass`, which isn't desirable (see #699).

This moves the check to be simpler, and then pass will only be invoked
when it's needed (such as for `docker login` or when pulling from a
private registry).

This logic could also reasonably live in the credential-helper library,
but it's simple enough it seems fine in either location.

Signed-off-by: Euan Kemp <euank@euank.com>
(cherry picked from commit 056015c3d8)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-04 00:51:28 +02:00
00affb1dd5 Update docker-credential-helpers dependency
This is mainly for the `pass` helper ; `pass` shouldn't be called
every docker command anymore ;).

Signed-off-by: Vincent Demeester <vincent@sbr.pm>
(cherry picked from commit d9741fc96b)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-04 00:51:20 +02:00
0627568d60 Merge pull request #1183 from andrewhsu/manifest
[18.06] Fix manifest lists to always use correct size
2018-07-04 00:46:50 +02:00
aeceff447c build: --iidfile support with buildkit
Signed-off-by: Tibor Vass <tibor@docker.com>
(cherry picked from commit c7e85c09d2)
Signed-off-by: Tibor Vass <tibor@docker.com>
2018-07-03 22:44:22 +00:00
15695813a4 Fix manifest lists to always use correct size
Stores complete OCI descriptor instead of digest and platform
fields. This includes the size which was getting lost by not
storing the original manifest bytes.

Attempt to support existing cached files, if not output
the filename with the incorrect content.

Signed-off-by: Derek McGowan <derek@mcgstyle.net>
(cherry picked from commit 1fd2d66df8)
Signed-off-by: Andrew Hsu <andrewhsu@docker.com>
2018-07-03 22:23:13 +00:00
2ea275157d Bump kubernetes dependencies to 1.8.14
Signed-off-by: Vincent Demeester <vincent@sbr.pm>
(cherry picked from commit b59c41b2a7)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-03 23:09:21 +02:00
c6c52ae29a Merge pull request #1173 from thaJeztah/18.06-backport-bash-completion
[18.06] backport bash completion enhancements
2018-07-02 20:26:58 +02:00
8600d82bd7 Remove composefiles lenght check on k8s RunDeploy..
The compose file(s) are already loaded at that point.

Signed-off-by: Vincent Demeester <vincent@sbr.pm>
(cherry picked from commit 847e0c22d4)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-02 15:58:52 +02:00
74b30e7d58 Add bash completion for exec_die event
Signed-off-by: Harald Albers <github@albersweb.de>
(cherry picked from commit 8443982188)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-02 15:12:31 +02:00
547b9a4aba Add bash completion for dockerd --default-address-pool
Signed-off-by: Harald Albers <github@albersweb.de>
(cherry picked from commit 0e6d9dfe85)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-07-02 15:12:25 +02:00
9bea855d07 Merge pull request #1165 from thaJeztah/18.06-update_go_winio
[18.06] Updated the go-winio library to release 0.4.8 that has the fix for Windows containers
2018-07-02 14:38:18 +02:00
fe4d20bbb3 Updated the go-winio library to release 0.4.8 that has the fix for Windows containers
Signed-off-by: Tejaswini Duggaraju <naduggar@microsoft.com>
(cherry picked from commit c98c4080a3)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-06-30 00:09:18 +02:00
1818 changed files with 67410 additions and 233875 deletions

View File

@ -46,6 +46,10 @@ binary-osx: ## build executable for macOS
dynbinary: ## build dynamically linked binary
./scripts/build/dynbinary
.PHONY: watch
watch: ## monitor file changes and run go test
./scripts/test/watch
vendor: vendor.conf ## check that vendor matches vendor.conf
rm -rf vendor
bash -c 'vndr |& grep -v -i clone'

View File

@ -26,11 +26,13 @@ Test<Function Name><Test Case Name>
where appropriate, but may not be appropriate in all cases.
Assertions should be made using
[gotest.tools/assert](https://godoc.org/gotest.tools/assert).
[testify/assert](https://godoc.org/github.com/stretchr/testify/assert) and test
requirements should be verified using
[testify/require](https://godoc.org/github.com/stretchr/testify/require).
Fakes, and testing utilities can be found in
[internal/test](https://godoc.org/github.com/docker/cli/internal/test) and
[gotest.tools](https://godoc.org/gotest.tools).
[gotestyourself](https://godoc.org/github.com/gotestyourself/gotestyourself).
## End-to-End Test Suite

View File

@ -1 +1 @@
18.09.0-dev
18.06.0-dev

View File

@ -4,7 +4,7 @@ clone_folder: c:\gopath\src\github.com\docker\cli
environment:
GOPATH: c:\gopath
GOVERSION: 1.10.5
GOVERSION: 1.10.3
DEPVERSION: v0.4.1
install:

View File

@ -4,11 +4,10 @@ jobs:
lint:
working_directory: /work
docker: [{image: 'docker:18.03-git'}]
docker: [{image: 'docker:17.06-git'}]
steps:
- checkout
- setup_remote_docker:
version: 18.03.1-ce
reusable: true
exclusive: false
- run:
@ -23,12 +22,11 @@ jobs:
cross:
working_directory: /work
docker: [{image: 'docker:18.03-git'}]
docker: [{image: 'docker:17.06-git'}]
parallelism: 3
steps:
- checkout
- setup_remote_docker:
version: 18.03.1-ce
reusable: true
exclusive: false
- run:
@ -50,11 +48,10 @@ jobs:
test:
working_directory: /work
docker: [{image: 'docker:18.03-git'}]
docker: [{image: 'docker:17.06-git'}]
steps:
- checkout
- setup_remote_docker:
version: 18.03.1-ce
reusable: true
exclusive: false
- run:
@ -79,11 +76,10 @@ jobs:
validate:
working_directory: /work
docker: [{image: 'docker:18.03-git'}]
docker: [{image: 'docker:17.06-git'}]
steps:
- checkout
- setup_remote_docker:
version: 18.03.1-ce
reusable: true
exclusive: false
- run:
@ -97,13 +93,10 @@ jobs:
make ci-validate
shellcheck:
working_directory: /work
docker: [{image: 'docker:18.03-git'}]
docker: [{image: 'docker:17.06-git'}]
steps:
- checkout
- setup_remote_docker:
version: 18.03.1-ce
reusable: true
exclusive: false
- setup_remote_docker
- run:
name: "Run shellcheck"
command: |

View File

@ -1,22 +0,0 @@
package builder
import (
"github.com/spf13/cobra"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
)
// NewBuilderCommand returns a cobra command for `builder` subcommands
func NewBuilderCommand(dockerCli command.Cli) *cobra.Command {
cmd := &cobra.Command{
Use: "builder",
Short: "Manage builds",
Args: cli.NoArgs,
RunE: command.ShowHelp(dockerCli.Err()),
}
cmd.AddCommand(
NewPruneCommand(dockerCli),
)
return cmd
}

View File

@ -1,96 +0,0 @@
package builder
import (
"context"
"fmt"
"strings"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types"
units "github.com/docker/go-units"
"github.com/spf13/cobra"
)
type pruneOptions struct {
force bool
all bool
filter opts.FilterOpt
keepStorage opts.MemBytes
}
// NewPruneCommand returns a new cobra prune command for images
func NewPruneCommand(dockerCli command.Cli) *cobra.Command {
options := pruneOptions{filter: opts.NewFilterOpt()}
cmd := &cobra.Command{
Use: "prune",
Short: "Remove build cache",
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
spaceReclaimed, output, err := runPrune(dockerCli, options)
if err != nil {
return err
}
if output != "" {
fmt.Fprintln(dockerCli.Out(), output)
}
fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed)))
return nil
},
Annotations: map[string]string{"version": "1.39"},
}
flags := cmd.Flags()
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones")
flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'unused-for=24h')")
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
return cmd
}
const (
normalWarning = `WARNING! This will remove all dangling build cache. Are you sure you want to continue?`
allCacheWarning = `WARNING! This will remove all build cache. Are you sure you want to continue?`
)
func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) {
pruneFilters := options.filter.Value()
pruneFilters = command.PruneFilters(dockerCli, pruneFilters)
warning := normalWarning
if options.all {
warning = allCacheWarning
}
if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) {
return 0, "", nil
}
report, err := dockerCli.Client().BuildCachePrune(context.Background(), types.BuildCachePruneOptions{
All: options.all,
KeepStorage: options.keepStorage.Value(),
Filters: pruneFilters,
})
if err != nil {
return 0, "", err
}
if len(report.CachesDeleted) > 0 {
var sb strings.Builder
sb.WriteString("Deleted build cache objects:\n")
for _, id := range report.CachesDeleted {
sb.WriteString(id)
sb.WriteByte('\n')
}
output = sb.String()
}
return report.SpaceReclaimed, output, nil
}
// CachePrune executes a prune command for build cache
func CachePrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) {
return runPrune(dockerCli, pruneOptions{force: true, all: all, filter: filter})
}

View File

@ -8,20 +8,17 @@ import (
"os"
"path/filepath"
"runtime"
"strconv"
"time"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/config"
cliconfig "github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/config/configfile"
"github.com/docker/cli/cli/connhelper"
cliflags "github.com/docker/cli/cli/flags"
manifeststore "github.com/docker/cli/cli/manifest/store"
registryclient "github.com/docker/cli/cli/registry/client"
"github.com/docker/cli/cli/trust"
dopts "github.com/docker/cli/opts"
clitypes "github.com/docker/cli/types"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
@ -56,21 +53,19 @@ type Cli interface {
ManifestStore() manifeststore.Store
RegistryClient(bool) registryclient.RegistryClient
ContentTrustEnabled() bool
NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error)
}
// DockerCli is an instance the docker command line client.
// Instances of the client can be returned from NewDockerCli.
type DockerCli struct {
configFile *configfile.ConfigFile
in *InStream
out *OutStream
err io.Writer
client client.APIClient
serverInfo ServerInfo
clientInfo ClientInfo
contentTrust bool
newContainerizeClient func(string) (clitypes.ContainerizedClient, error)
configFile *configfile.ConfigFile
in *InStream
out *OutStream
err io.Writer
client client.APIClient
serverInfo ServerInfo
clientInfo ClientInfo
contentTrust bool
}
// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified.
@ -134,20 +129,6 @@ func (cli *DockerCli) ContentTrustEnabled() bool {
return cli.contentTrust
}
// BuildKitEnabled returns whether buildkit is enabled either through a daemon setting
// or otherwise the client-side DOCKER_BUILDKIT environment variable
func BuildKitEnabled(si ServerInfo) (bool, error) {
buildkitEnabled := si.BuildkitVersion == types.BuilderBuildKit
if buildkitEnv := os.Getenv("DOCKER_BUILDKIT"); buildkitEnv != "" {
var err error
buildkitEnabled, err = strconv.ParseBool(buildkitEnv)
if err != nil {
return false, errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value")
}
}
return buildkitEnabled, nil
}
// ManifestStore returns a store for local manifests
func (cli *DockerCli) ManifestStore() manifeststore.Store {
// TODO: support override default location from config file
@ -224,7 +205,6 @@ func (cli *DockerCli) initializeFromClient() {
cli.serverInfo = ServerInfo{
HasExperimental: ping.Experimental,
OSType: ping.OSType,
BuildkitVersion: ping.BuilderVersion,
}
cli.client.NegotiateAPIVersionPing(ping)
}
@ -248,17 +228,11 @@ func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions
return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...)
}
// NewContainerizedEngineClient returns a containerized engine client
func (cli *DockerCli) NewContainerizedEngineClient(sockPath string) (clitypes.ContainerizedClient, error) {
return cli.newContainerizeClient(sockPath)
}
// ServerInfo stores details about the supported features and platform of the
// server
type ServerInfo struct {
HasExperimental bool
OSType string
BuildkitVersion types.BuilderVersion
}
// ClientInfo stores details about the supported features of the client
@ -268,8 +242,8 @@ type ClientInfo struct {
}
// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err.
func NewDockerCli(in io.ReadCloser, out, err io.Writer, isTrusted bool, containerizedFn func(string) (clitypes.ContainerizedClient, error)) *DockerCli {
return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err, contentTrust: isTrusted, newContainerizeClient: containerizedFn}
func NewDockerCli(in io.ReadCloser, out, err io.Writer, isTrusted bool) *DockerCli {
return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err, contentTrust: isTrusted}
}
// NewAPIClientFromFlags creates a new APIClient from command line flags
@ -278,43 +252,24 @@ func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.
if err != nil {
return &client.Client{}, err
}
var clientOpts []func(*client.Client) error
helper, err := connhelper.GetConnectionHelper(host)
if err != nil {
return &client.Client{}, err
}
if helper == nil {
clientOpts = append(clientOpts, withHTTPClient(opts.TLSOptions))
clientOpts = append(clientOpts, client.WithHost(host))
} else {
clientOpts = append(clientOpts, func(c *client.Client) error {
httpClient := &http.Client{
// No tls
// No proxy
Transport: &http.Transport{
DialContext: helper.Dialer,
},
}
return client.WithHTTPClient(httpClient)(c)
})
clientOpts = append(clientOpts, client.WithHost(helper.Host))
clientOpts = append(clientOpts, client.WithDialContext(helper.Dialer))
}
customHeaders := configFile.HTTPHeaders
if customHeaders == nil {
customHeaders = map[string]string{}
}
customHeaders["User-Agent"] = UserAgent()
clientOpts = append(clientOpts, client.WithHTTPHeaders(customHeaders))
verStr := api.DefaultVersion
if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" {
verStr = tmpStr
}
clientOpts = append(clientOpts, client.WithVersion(verStr))
return client.NewClientWithOpts(clientOpts...)
return client.NewClientWithOpts(
withHTTPClient(opts.TLSOptions),
client.WithHTTPHeaders(customHeaders),
client.WithVersion(verStr),
client.WithHost(host),
)
}
func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (string, error) {

View File

@ -43,26 +43,6 @@ func TestNewAPIClientFromFlags(t *testing.T) {
assert.Check(t, is.Equal(api.DefaultVersion, apiclient.ClientVersion()))
}
func TestNewAPIClientFromFlagsForDefaultSchema(t *testing.T) {
host := ":2375"
opts := &flags.CommonOptions{Hosts: []string{host}}
configFile := &configfile.ConfigFile{
HTTPHeaders: map[string]string{
"My-Header": "Custom-Value",
},
}
apiclient, err := NewAPIClientFromFlags(opts, configFile)
assert.NilError(t, err)
assert.Check(t, is.Equal("tcp://localhost"+host, apiclient.DaemonHost()))
expectedHeaders := map[string]string{
"My-Header": "Custom-Value",
"User-Agent": UserAgent(),
}
assert.Check(t, is.DeepEqual(expectedHeaders, apiclient.(*client.Client).CustomHTTPHeaders()))
assert.Check(t, is.Equal(api.DefaultVersion, apiclient.ClientVersion()))
}
func TestNewAPIClientFromFlagsWithAPIVersionFromEnv(t *testing.T) {
customVersion := "v3.3.3"
defer env.Patch(t, "DOCKER_API_VERSION", customVersion)()

View File

@ -2,14 +2,11 @@ package commands
import (
"os"
"runtime"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/builder"
"github.com/docker/cli/cli/command/checkpoint"
"github.com/docker/cli/cli/command/config"
"github.com/docker/cli/cli/command/container"
"github.com/docker/cli/cli/command/engine"
"github.com/docker/cli/cli/command/image"
"github.com/docker/cli/cli/command/manifest"
"github.com/docker/cli/cli/command/network"
@ -43,9 +40,6 @@ func AddCommands(cmd *cobra.Command, dockerCli command.Cli) {
image.NewImageCommand(dockerCli),
image.NewBuildCommand(dockerCli),
// builder
builder.NewBuilderCommand(dockerCli),
// manifest
manifest.NewManifestCommand(dockerCli),
@ -122,10 +116,7 @@ func AddCommands(cmd *cobra.Command, dockerCli command.Cli) {
hide(image.NewSaveCommand(dockerCli)),
hide(image.NewTagCommand(dockerCli)),
)
if runtime.GOOS == "linux" {
// engine
cmd.AddCommand(engine.NewEngineCommand(dockerCli))
}
}
func hide(cmd *cobra.Command) *cobra.Command {

View File

@ -9,10 +9,19 @@ import (
"github.com/docker/cli/cli/command/formatter"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
"github.com/spf13/cobra"
"vbom.ml/util/sortorder"
)
type byConfigName []swarm.Config
func (r byConfigName) Len() int { return len(r) }
func (r byConfigName) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r byConfigName) Less(i, j int) bool {
return sortorder.NaturalLess(r[i].Spec.Name, r[j].Spec.Name)
}
type listOptions struct {
quiet bool
format string
@ -58,9 +67,7 @@ func runConfigList(dockerCli command.Cli, options listOptions) error {
}
}
sort.Slice(configs, func(i, j int) bool {
return sortorder.NaturalLess(configs[i].Spec.Name, configs[j].Spec.Name)
})
sort.Sort(byConfigName(configs))
configCtx := formatter.Context{
Output: dockerCli.Out(),

View File

@ -370,24 +370,9 @@ func parse(flags *pflag.FlagSet, copts *containerOptions) (*containerConfig, err
entrypoint = []string{""}
}
publishOpts := copts.publish.GetAll()
var ports map[nat.Port]struct{}
var portBindings map[nat.Port][]nat.PortBinding
ports, portBindings, err = nat.ParsePortSpecs(publishOpts)
// If simple port parsing fails try to parse as long format
ports, portBindings, err := nat.ParsePortSpecs(copts.publish.GetAll())
if err != nil {
publishOpts, err = parsePortOpts(publishOpts)
if err != nil {
return nil, err
}
ports, portBindings, err = nat.ParsePortSpecs(publishOpts)
if err != nil {
return nil, err
}
return nil, err
}
// Merge in exposed ports to the map of published ports
@ -676,23 +661,6 @@ func parse(flags *pflag.FlagSet, copts *containerOptions) (*containerConfig, err
}, nil
}
func parsePortOpts(publishOpts []string) ([]string, error) {
optsList := []string{}
for _, publish := range publishOpts {
params := map[string]string{"protocol": "tcp"}
for _, param := range strings.Split(publish, ",") {
opt := strings.Split(param, "=")
if len(opt) < 2 {
return optsList, errors.Errorf("invalid publish opts format (should be name=value but got '%s')", param)
}
params[opt[0]] = opt[1]
}
optsList = append(optsList, fmt.Sprintf("%s:%s/%s", params["target"], params["published"], params["protocol"]))
}
return optsList, nil
}
func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) {
loggingOptsMap := opts.ConvertKVStringsToMap(loggingOpts)
if loggingDriver == "none" && len(loggingOpts) > 0 {

View File

@ -42,6 +42,7 @@ func TestValidateAttach(t *testing.T) {
}
}
// nolint: unparam
func parseRun(args []string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) {
flags, copts := setupRunFlags()
if err := flags.Parse(args); err != nil {

View File

@ -73,6 +73,6 @@ func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint6
// RunPrune calls the Container Prune API
// This returns the amount of space reclaimed and a detailed output string
func RunPrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) {
func RunPrune(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) {
return runPrune(dockerCli, pruneOptions{force: true, filter: filter})
}

View File

@ -1,209 +0,0 @@
package engine
import (
"context"
"fmt"
"strings"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/formatter"
"github.com/docker/cli/internal/licenseutils"
clitypes "github.com/docker/cli/types"
"github.com/docker/docker/api/types"
"github.com/docker/licensing/model"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
type activateOptions struct {
licenseFile string
version string
registryPrefix string
format string
image string
quiet bool
displayOnly bool
sockPath string
licenseLoginFunc func(ctx context.Context, authConfig *types.AuthConfig) (licenseutils.HubUser, error)
}
// newActivateCommand creates a new `docker engine activate` command
func newActivateCommand(dockerCli command.Cli) *cobra.Command {
var options activateOptions
options.licenseLoginFunc = licenseutils.Login
cmd := &cobra.Command{
Use: "activate [OPTIONS]",
Short: "Activate Enterprise Edition",
Long: `Activate Enterprise Edition.
With this command you may apply an existing Docker enterprise license, or
interactively download one from Docker. In the interactive exchange, you can
sign up for a new trial, or download an existing license. If you are
currently running a Community Edition engine, the daemon will be updated to
the Enterprise Edition Docker engine with additional capabilities and long
term support.
For more information about different Docker Enterprise license types visit
https://www.docker.com/licenses
For non-interactive scriptable deployments, download your license from
https://hub.docker.com/ then specify the file with the '--license' flag.
`,
RunE: func(cmd *cobra.Command, args []string) error {
return runActivate(dockerCli, options)
},
}
flags := cmd.Flags()
flags.StringVar(&options.licenseFile, "license", "", "License File")
flags.StringVar(&options.version, "version", "", "Specify engine version (default is to use currently running version)")
flags.StringVar(&options.registryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the default location where engine images are pulled")
flags.StringVar(&options.image, "engine-image", "", "Specify engine image")
flags.StringVar(&options.format, "format", "", "Pretty-print licenses using a Go template")
flags.BoolVar(&options.displayOnly, "display-only", false, "only display license information and exit")
flags.BoolVar(&options.quiet, "quiet", false, "Only display available licenses by ID")
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
return cmd
}
func runActivate(cli command.Cli, options activateOptions) error {
if !isRoot() {
return errors.New("this command must be run as a privileged user")
}
ctx := context.Background()
client, err := cli.NewContainerizedEngineClient(options.sockPath)
if err != nil {
return errors.Wrap(err, "unable to access local containerd")
}
defer client.Close()
authConfig, err := getRegistryAuth(cli, options.registryPrefix)
if err != nil {
return err
}
var license *model.IssuedLicense
// Lookup on hub if no license provided via params
if options.licenseFile == "" {
if license, err = getLicenses(ctx, authConfig, cli, options); err != nil {
return err
}
if options.displayOnly {
return nil
}
} else {
if license, err = licenseutils.LoadLocalIssuedLicense(ctx, options.licenseFile); err != nil {
return err
}
}
summary, err := licenseutils.GetLicenseSummary(ctx, *license)
if err != nil {
return err
}
fmt.Fprintf(cli.Out(), "License: %s\n", summary)
if options.displayOnly {
return nil
}
dclient := cli.Client()
if err = licenseutils.ApplyLicense(ctx, dclient, license); err != nil {
return err
}
// Short circuit if the user didn't specify a version and we're already running enterprise
if options.version == "" {
serverVersion, err := dclient.ServerVersion(ctx)
if err != nil {
return err
}
if strings.Contains(strings.ToLower(serverVersion.Platform.Name), "enterprise") {
fmt.Fprintln(cli.Out(), "Successfully activated engine license on existing enterprise engine.")
return nil
}
options.version = serverVersion.Version
}
opts := clitypes.EngineInitOptions{
RegistryPrefix: options.registryPrefix,
EngineImage: options.image,
EngineVersion: options.version,
}
if err := client.ActivateEngine(ctx, opts, cli.Out(), authConfig); err != nil {
return err
}
fmt.Fprintln(cli.Out(), `Successfully activated engine.
Restart docker with 'systemctl restart docker' to complete the activation.`)
return nil
}
func getLicenses(ctx context.Context, authConfig *types.AuthConfig, cli command.Cli, options activateOptions) (*model.IssuedLicense, error) {
user, err := options.licenseLoginFunc(ctx, authConfig)
if err != nil {
return nil, err
}
fmt.Fprintf(cli.Out(), "Looking for existing licenses for %s...\n", user.User.Username)
subs, err := user.GetAvailableLicenses(ctx)
if err != nil {
return nil, err
}
if len(subs) == 0 {
return doTrialFlow(ctx, cli, user)
}
format := options.format
if len(format) == 0 {
format = formatter.TableFormatKey
}
updatesCtx := formatter.Context{
Output: cli.Out(),
Format: formatter.NewSubscriptionsFormat(format, options.quiet),
Trunc: false,
}
if err := formatter.SubscriptionsWrite(updatesCtx, subs); err != nil {
return nil, err
}
if options.displayOnly {
return nil, nil
}
fmt.Fprintf(cli.Out(), "Please pick a license by number: ")
var num int
if _, err := fmt.Fscan(cli.In(), &num); err != nil {
return nil, errors.Wrap(err, "failed to read user input")
}
if num < 0 || num >= len(subs) {
return nil, fmt.Errorf("invalid choice")
}
return user.GetIssuedLicense(ctx, subs[num].ID)
}
func doTrialFlow(ctx context.Context, cli command.Cli, user licenseutils.HubUser) (*model.IssuedLicense, error) {
if !command.PromptForConfirmation(cli.In(), cli.Out(),
"No existing licenses found, would you like to set up a new Enterprise Basic Trial license?") {
return nil, fmt.Errorf("you must have an existing enterprise license or generate a new trial to use the Enterprise Docker Engine")
}
targetID := user.User.ID
// If the user is a member of any organizations, allow trials generated against them
if len(user.Orgs) > 0 {
fmt.Fprintf(cli.Out(), "%d\t%s\n", 0, user.User.Username)
for i, org := range user.Orgs {
fmt.Fprintf(cli.Out(), "%d\t%s\n", i+1, org.Orgname)
}
fmt.Fprintf(cli.Out(), "Please choose an account to generate the trial in:")
var num int
if _, err := fmt.Fscan(cli.In(), &num); err != nil {
return nil, errors.Wrap(err, "failed to read user input")
}
if num < 0 || num > len(user.Orgs) {
return nil, fmt.Errorf("invalid choice")
}
if num > 0 {
targetID = user.Orgs[num-1].ID
}
}
return user.GenerateTrialLicense(ctx, targetID)
}

View File

@ -1,146 +0,0 @@
package engine
import (
"context"
"fmt"
"testing"
"time"
"github.com/docker/cli/internal/licenseutils"
"github.com/docker/cli/internal/test"
clitypes "github.com/docker/cli/types"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/docker/licensing"
"github.com/docker/licensing/model"
"gotest.tools/assert"
"gotest.tools/fs"
"gotest.tools/golden"
)
const (
// nolint: lll
expiredLicense = `{"key_id":"irlYm3b9fdD8hMUXjazF39im7VQSSbAm9tfHK8cKUxJt","private_key":"aH5tTRDAVJpCRS2CRetTQVXIKgWUPfoCHODhDvNPvAbz","authorization":"ewogICAicGF5bG9hZCI6ICJleUpsZUhCcGNtRjBhVzl1SWpvaU1qQXhPQzB3TXkweE9GUXdOem93TURvd01Gb2lMQ0owYjJ0bGJpSTZJbkZtTVMxMlVtRmtialp5YjFaMldXdHJlVXN4VFdKMGNGUmpXR1ozVjA4MVRWZFFTM2cwUnpJd2NIYzlJaXdpYldGNFJXNW5hVzVsY3lJNk1Td2ljMk5oYm01cGJtZEZibUZpYkdWa0lqcDBjblZsTENKc2FXTmxibk5sVkhsd1pTSTZJazltWm14cGJtVWlMQ0owYVdWeUlqb2lVSEp2WkhWamRHbHZiaUo5IiwKICAgInNpZ25hdHVyZXMiOiBbCiAgICAgIHsKICAgICAgICAgImhlYWRlciI6IHsKICAgICAgICAgICAgImp3ayI6IHsKICAgICAgICAgICAgICAgImUiOiAiQVFBQiIsCiAgICAgICAgICAgICAgICJrZXlJRCI6ICJKN0xEOjY3VlI6TDVIWjpVN0JBOjJPNEc6NEFMMzpPRjJOOkpIR0I6RUZUSDo1Q1ZROk1GRU86QUVJVCIsCiAgICAgICAgICAgICAgICJraWQiOiAiSjdMRDo2N1ZSOkw1SFo6VTdCQToyTzRHOjRBTDM6T0YyTjpKSEdCOkVGVEg6NUNWUTpNRkVPOkFFSVQiLAogICAgICAgICAgICAgICAia3R5IjogIlJTQSIsCiAgICAgICAgICAgICAgICJuIjogInlkSXktbFU3bzdQY2VZLTQtcy1DUTVPRWdDeUY4Q3hJY1FJV3VLODRwSWlaY2lZNjczMHlDWW53TFNLVGx3LVU2VUNfUVJlV1Jpb01OTkU1RHM1VFlFWGJHRzZvbG0ycWRXYkJ3Y0NnLTJVVUhfT2NCOVd1UDZnUlBIcE1GTXN4RHpXd3ZheThKVXVIZ1lVTFVwbTFJdi1tcTdscDVuUV9SeHJUMEtaUkFRVFlMRU1FZkd3bTNoTU9fZ2VMUFMtaGdLUHRJSGxrZzZfV2NveFRHb0tQNzlkX3dhSFl4R05sN1doU25laUJTeGJwYlFBS2syMWxnNzk4WGI3dlp5RUFURE1yUlI5TWVFNkFkajVISnBZM0NveVJBUENtYUtHUkNLNHVvWlNvSXUwaEZWbEtVUHliYncwMDBHTy13YTJLTjhVd2dJSW0waTVJMXVXOUdrcTR6akJ5NXpoZ3F1VVhiRzliV1BBT1lycTVRYTgxRHhHY0JsSnlIWUFwLUREUEU5VEdnNHpZbVhqSm54WnFIRWR1R3FkZXZaOFhNSTB1a2ZrR0lJMTR3VU9pTUlJSXJYbEVjQmZfNDZJOGdRV0R6eHljWmVfSkdYLUxBdWF5WHJ5clVGZWhWTlVkWlVsOXdYTmFKQi1rYUNxejVRd2FSOTNzR3ctUVNmdEQwTnZMZTdDeU9ILUU2dmc2U3RfTmVUdmd2OFluaENpWElsWjhIT2ZJd05lN3RFRl9VY3o1T2JQeWttM3R5bHJOVWp0MFZ5QW10dGFjVkkyaUdpaGNVUHJtazRsVklaN1ZEX0xTVy1pN3lvU3VydHBzUFhjZTJwS0RJbzMwbEpHaE9fM0tVbWwyU1VaQ3F6SjF5RW1LcHlzSDVIRFc5Y3NJRkNBM2RlQWpmWlV2TjdVIgogICAgICAgICAgICB9LAogICAgICAgICAgICAiYWxnIjogIlJTMjU2IgogICAgICAgICB9LAogICAgICAgICAic2lnbmF0dXJlIjogIm5saTZIdzRrbW5KcTBSUmRXaGVfbkhZS2VJLVpKenM1U0d5SUpDakh1dWtnVzhBYklpVzFZYWJJR2NqWUt0QTY4dWN6T1hyUXZreGxWQXJLSlgzMDJzN0RpbzcxTlNPRzJVcnhsSjlibDFpd0F3a3ZyTEQ2T0p5MGxGLVg4WnRabXhPVmNQZmwzcmJwZFQ0dnlnWTdNcU1QRXdmb0IxTmlWZDYyZ1cxU2NSREZZcWw3R0FVaFVKNkp4QU15VzVaOXl5YVE0NV8wd0RMUk5mRjA5YWNXeVowTjRxVS1hZjhrUTZUUWZUX05ERzNCR3pRb2V3cHlEajRiMFBHb0diOFhLdDlwekpFdEdxM3lQM25VMFFBbk90a2gwTnZac1l1UFcyUnhDT3lRNEYzVlR3UkF2eF9HSTZrMVRpYmlKNnByUWluUy16Sjh6RE8zUjBuakE3OFBwNXcxcVpaUE9BdmtzZFNSYzJDcVMtcWhpTmF5YUhOVHpVNnpyOXlOZHR2S0o1QjNST0FmNUtjYXNiWURjTnVpeXBUNk90LUtqQ2I1dmYtWVpnc2FRNzJBdFBhSU4yeUpNREZHbmEwM0hpSjMxcTJRUlp5eTZrd3RYaGtwcDhTdEdIcHYxSWRaV09SVWttb0g5SFBzSGk4SExRLTZlM0tEY2x1RUQyMTNpZnljaVhtN0YzdHdaTTNHeDd1UXR1SldHaUlTZ2Z0QW9lVjZfUmI2VThkMmZxNzZuWHYxak5nckRRcE5waEZFd2tCdGRtZHZ2THByZVVYX3BWangza1AxN3pWbXFKNmNOOWkwWUc4WHg2VmRzcUxsRXUxQ2Rhd3Q0eko1M3VHMFlKTjRnUDZwc25yUS1uM0U1aFdlMDJ3d3dBZ3F3bGlPdmd4V1RTeXJyLXY2eDI0IiwKICAgICAgICAgInByb3RlY3RlZCI6ICJleUptYjNKdFlYUk1aVzVuZEdnaU9qRTNNeXdpWm05eWJXRjBWR0ZwYkNJNkltWlJJaXdpZEdsdFpTSTZJakl3TVRjdE1EVXRNRFZVTWpFNk5UYzZNek5hSW4wIgogICAgICB9CiAgIF0KfQ=="}`
)
func TestActivateNoContainerd(t *testing.T) {
testCli.SetContainerizedEngineClient(
func(string) (clitypes.ContainerizedClient, error) {
return nil, fmt.Errorf("some error")
},
)
isRoot = func() bool { return true }
cmd := newActivateCommand(testCli)
cmd.Flags().Set("license", "invalidpath")
cmd.SilenceUsage = true
cmd.SilenceErrors = true
err := cmd.Execute()
assert.ErrorContains(t, err, "unable to access local containerd")
}
func TestActivateBadLicense(t *testing.T) {
testCli.SetContainerizedEngineClient(
func(string) (clitypes.ContainerizedClient, error) {
return &fakeContainerizedEngineClient{}, nil
},
)
isRoot = func() bool { return true }
cmd := newActivateCommand(testCli)
cmd.SilenceUsage = true
cmd.SilenceErrors = true
cmd.Flags().Set("license", "invalidpath")
err := cmd.Execute()
assert.Error(t, err, "open invalidpath: no such file or directory")
}
func TestActivateExpiredLicenseDryRun(t *testing.T) {
dir := fs.NewDir(t, "license", fs.WithFile("docker.lic", expiredLicense, fs.WithMode(0644)))
defer dir.Remove()
filename := dir.Join("docker.lic")
isRoot = func() bool { return true }
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{}, nil, types.Info{}, nil})
c.SetContainerizedEngineClient(
func(string) (clitypes.ContainerizedClient, error) {
return &fakeContainerizedEngineClient{}, nil
},
)
cmd := newActivateCommand(c)
cmd.SilenceUsage = true
cmd.SilenceErrors = true
cmd.Flags().Set("license", filename)
cmd.Flags().Set("display-only", "true")
c.OutBuffer().Reset()
err := cmd.Execute()
assert.NilError(t, err)
golden.Assert(t, c.OutBuffer().String(), "expired-license-display-only.golden")
}
type mockLicenseClient struct{}
func (c mockLicenseClient) LoginViaAuth(ctx context.Context, username, password string) (authToken string, err error) {
return "", fmt.Errorf("not implemented")
}
func (c mockLicenseClient) GetHubUserOrgs(ctx context.Context, authToken string) (orgs []model.Org, err error) {
return nil, fmt.Errorf("not implemented")
}
func (c mockLicenseClient) GetHubUserByName(ctx context.Context, username string) (user *model.User, err error) {
return nil, fmt.Errorf("not implemented")
}
func (c mockLicenseClient) VerifyLicense(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error) {
return nil, fmt.Errorf("not implemented")
}
func (c mockLicenseClient) GenerateNewTrialSubscription(ctx context.Context, authToken, dockerID string) (subscriptionID string, err error) {
return "", fmt.Errorf("not implemented")
}
func (c mockLicenseClient) ListSubscriptions(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
expires := time.Date(2010, time.January, 1, 0, 0, 0, 0, time.UTC)
return []*model.Subscription{
{
State: "active",
Expires: &expires,
},
}, nil
}
func (c mockLicenseClient) ListSubscriptionsDetails(ctx context.Context, authToken, dockerID string) (response []*model.SubscriptionDetail, err error) {
return nil, fmt.Errorf("not implemented")
}
func (c mockLicenseClient) DownloadLicenseFromHub(ctx context.Context, authToken, subscriptionID string) (license *model.IssuedLicense, err error) {
return nil, fmt.Errorf("not implemented")
}
func (c mockLicenseClient) ParseLicense(license []byte) (parsedLicense *model.IssuedLicense, err error) {
return nil, fmt.Errorf("not implemented")
}
func (c mockLicenseClient) StoreLicense(ctx context.Context, dclnt licensing.WrappedDockerClient, licenses *model.IssuedLicense, localRootDir string) error {
return fmt.Errorf("not implemented")
}
func (c mockLicenseClient) LoadLocalLicense(ctx context.Context, dclnt licensing.WrappedDockerClient) (*model.Subscription, error) {
return nil, fmt.Errorf("not implemented")
}
func (c mockLicenseClient) SummarizeLicense(res *model.CheckResponse, keyID string) *model.Subscription {
return nil
}
func TestActivateDisplayOnlyHub(t *testing.T) {
isRoot = func() bool { return true }
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{}, nil, types.Info{}, nil})
c.SetContainerizedEngineClient(
func(string) (clitypes.ContainerizedClient, error) {
return &fakeContainerizedEngineClient{}, nil
},
)
hubUser := licenseutils.HubUser{
Client: mockLicenseClient{},
}
options := activateOptions{
licenseLoginFunc: func(ctx context.Context, authConfig *types.AuthConfig) (licenseutils.HubUser, error) {
return hubUser, nil
},
displayOnly: true,
}
c.OutBuffer().Reset()
err := runActivate(c, options)
assert.NilError(t, err)
golden.Assert(t, c.OutBuffer().String(), "expired-hub-license-display-only.golden")
}

View File

@ -1,13 +0,0 @@
// +build !windows
package engine
import (
"golang.org/x/sys/unix"
)
var (
isRoot = func() bool {
return unix.Geteuid() == 0
}
)

View File

@ -1,9 +0,0 @@
// +build windows
package engine
var (
isRoot = func() bool {
return true
}
)

View File

@ -1,34 +0,0 @@
package engine
import (
"context"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/trust"
clitypes "github.com/docker/cli/types"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/pkg/errors"
)
func getRegistryAuth(cli command.Cli, registryPrefix string) (*types.AuthConfig, error) {
if registryPrefix == "" {
registryPrefix = clitypes.RegistryPrefix
}
distributionRef, err := reference.ParseNormalizedNamed(registryPrefix)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse image name: %s", registryPrefix)
}
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(context.Background(), nil, authResolver(cli), distributionRef.String())
if err != nil {
return nil, errors.Wrap(err, "failed to get imgRefAndAuth")
}
return imgRefAndAuth.AuthConfig(), nil
}
func authResolver(cli command.Cli) func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
return func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
return command.ResolveAuthConfig(ctx, cli, index)
}
}

View File

@ -1,125 +0,0 @@
package engine
import (
"context"
"fmt"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/formatter"
"github.com/docker/cli/internal/versions"
clitypes "github.com/docker/cli/types"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
type checkOptions struct {
registryPrefix string
preReleases bool
engineImage string
downgrades bool
upgrades bool
format string
quiet bool
sockPath string
}
func newCheckForUpdatesCommand(dockerCli command.Cli) *cobra.Command {
var options checkOptions
cmd := &cobra.Command{
Use: "check [OPTIONS]",
Short: "Check for available engine updates",
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runCheck(dockerCli, options)
},
}
flags := cmd.Flags()
flags.StringVar(&options.registryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the existing location where engine images are pulled")
flags.BoolVar(&options.downgrades, "downgrades", false, "Report downgrades (default omits older versions)")
flags.BoolVar(&options.preReleases, "pre-releases", false, "Include pre-release versions")
flags.StringVar(&options.engineImage, "engine-image", "", "Specify engine image (default uses the same image as currently running)")
flags.BoolVar(&options.upgrades, "upgrades", true, "Report available upgrades")
flags.StringVar(&options.format, "format", "", "Pretty-print updates using a Go template")
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display available versions")
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
return cmd
}
func runCheck(dockerCli command.Cli, options checkOptions) error {
if !isRoot() {
return errors.New("this command must be run as a privileged user")
}
ctx := context.Background()
client := dockerCli.Client()
serverVersion, err := client.ServerVersion(ctx)
if err != nil {
return err
}
availVersions, err := versions.GetEngineVersions(ctx, dockerCli.RegistryClient(false), options.registryPrefix, options.engineImage, serverVersion.Version)
if err != nil {
return err
}
availUpdates := []clitypes.Update{
{Type: "current", Version: serverVersion.Version},
}
if len(availVersions.Patches) > 0 {
availUpdates = append(availUpdates,
processVersions(
serverVersion.Version,
"patch",
options.preReleases,
availVersions.Patches)...)
}
if options.upgrades {
availUpdates = append(availUpdates,
processVersions(
serverVersion.Version,
"upgrade",
options.preReleases,
availVersions.Upgrades)...)
}
if options.downgrades {
availUpdates = append(availUpdates,
processVersions(
serverVersion.Version,
"downgrade",
options.preReleases,
availVersions.Downgrades)...)
}
format := options.format
if len(format) == 0 {
format = formatter.TableFormatKey
}
updatesCtx := formatter.Context{
Output: dockerCli.Out(),
Format: formatter.NewUpdatesFormat(format, options.quiet),
Trunc: false,
}
return formatter.UpdatesWrite(updatesCtx, availUpdates)
}
func processVersions(currentVersion, verType string,
includePrerelease bool,
availVersions []clitypes.DockerVersion) []clitypes.Update {
availUpdates := []clitypes.Update{}
for _, ver := range availVersions {
if !includePrerelease && ver.Prerelease() != "" {
continue
}
if ver.Tag != currentVersion {
availUpdates = append(availUpdates, clitypes.Update{
Type: verType,
Version: ver.Tag,
Notes: fmt.Sprintf("%s?%s", clitypes.ReleaseNotePrefix, ver.Tag),
})
}
}
return availUpdates
}

View File

@ -1,114 +0,0 @@
package engine
import (
"context"
"fmt"
"testing"
manifesttypes "github.com/docker/cli/cli/manifest/types"
"github.com/docker/cli/internal/test"
"github.com/docker/distribution"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/opencontainers/go-digest"
"gotest.tools/assert"
"gotest.tools/golden"
)
var (
testCli = test.NewFakeCli(&client.Client{})
)
type verClient struct {
client.Client
ver types.Version
verErr error
info types.Info
infoErr error
}
func (c *verClient) ServerVersion(ctx context.Context) (types.Version, error) {
return c.ver, c.verErr
}
func (c *verClient) Info(ctx context.Context) (types.Info, error) {
return c.info, c.infoErr
}
type testRegistryClient struct {
tags []string
}
func (c testRegistryClient) GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) {
return manifesttypes.ImageManifest{}, nil
}
func (c testRegistryClient) GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) {
return nil, nil
}
func (c testRegistryClient) MountBlob(ctx context.Context, source reference.Canonical, target reference.Named) error {
return nil
}
func (c testRegistryClient) PutManifest(ctx context.Context, ref reference.Named, manifest distribution.Manifest) (digest.Digest, error) {
return "", nil
}
func (c testRegistryClient) GetTags(ctx context.Context, ref reference.Named) ([]string, error) {
return c.tags, nil
}
func TestCheckForUpdatesNoCurrentVersion(t *testing.T) {
isRoot = func() bool { return true }
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{}, nil, types.Info{}, nil})
c.SetRegistryClient(testRegistryClient{})
cmd := newCheckForUpdatesCommand(c)
cmd.SilenceUsage = true
cmd.SilenceErrors = true
err := cmd.Execute()
assert.ErrorContains(t, err, "no such file or directory")
}
func TestCheckForUpdatesGetEngineVersionsHappy(t *testing.T) {
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{Version: "1.1.0"}, nil, types.Info{ServerVersion: "1.1.0"}, nil})
c.SetRegistryClient(testRegistryClient{[]string{
"1.0.1", "1.0.2", "1.0.3-beta1",
"1.1.1", "1.1.2", "1.1.3-beta1",
"1.2.0", "2.0.0", "2.1.0-beta1",
}})
isRoot = func() bool { return true }
cmd := newCheckForUpdatesCommand(c)
cmd.Flags().Set("pre-releases", "true")
cmd.Flags().Set("downgrades", "true")
cmd.Flags().Set("engine-image", "engine-community")
cmd.SilenceUsage = true
cmd.SilenceErrors = true
err := cmd.Execute()
assert.NilError(t, err)
golden.Assert(t, c.OutBuffer().String(), "check-all.golden")
c.OutBuffer().Reset()
cmd.Flags().Set("pre-releases", "false")
cmd.Flags().Set("downgrades", "true")
err = cmd.Execute()
assert.NilError(t, err)
fmt.Println(c.OutBuffer().String())
golden.Assert(t, c.OutBuffer().String(), "check-no-prerelease.golden")
c.OutBuffer().Reset()
cmd.Flags().Set("pre-releases", "false")
cmd.Flags().Set("downgrades", "false")
err = cmd.Execute()
assert.NilError(t, err)
fmt.Println(c.OutBuffer().String())
golden.Assert(t, c.OutBuffer().String(), "check-no-downgrades.golden")
c.OutBuffer().Reset()
cmd.Flags().Set("pre-releases", "false")
cmd.Flags().Set("downgrades", "false")
cmd.Flags().Set("upgrades", "false")
err = cmd.Execute()
assert.NilError(t, err)
fmt.Println(c.OutBuffer().String())
golden.Assert(t, c.OutBuffer().String(), "check-patches-only.golden")
}

View File

@ -1,101 +0,0 @@
package engine
import (
"context"
"github.com/containerd/containerd"
registryclient "github.com/docker/cli/cli/registry/client"
clitypes "github.com/docker/cli/types"
"github.com/docker/docker/api/types"
)
type (
fakeContainerizedEngineClient struct {
closeFunc func() error
activateEngineFunc func(ctx context.Context,
opts clitypes.EngineInitOptions,
out clitypes.OutStream,
authConfig *types.AuthConfig) error
initEngineFunc func(ctx context.Context,
opts clitypes.EngineInitOptions,
out clitypes.OutStream,
authConfig *types.AuthConfig,
healthfn func(context.Context) error) error
doUpdateFunc func(ctx context.Context,
opts clitypes.EngineInitOptions,
out clitypes.OutStream,
authConfig *types.AuthConfig) error
getEngineVersionsFunc func(ctx context.Context,
registryClient registryclient.RegistryClient,
currentVersion,
imageName string) (clitypes.AvailableVersions, error)
getEngineFunc func(ctx context.Context) (containerd.Container, error)
removeEngineFunc func(ctx context.Context) error
getCurrentEngineVersionFunc func(ctx context.Context) (clitypes.EngineInitOptions, error)
}
)
func (w *fakeContainerizedEngineClient) Close() error {
if w.closeFunc != nil {
return w.closeFunc()
}
return nil
}
func (w *fakeContainerizedEngineClient) ActivateEngine(ctx context.Context,
opts clitypes.EngineInitOptions,
out clitypes.OutStream,
authConfig *types.AuthConfig) error {
if w.activateEngineFunc != nil {
return w.activateEngineFunc(ctx, opts, out, authConfig)
}
return nil
}
func (w *fakeContainerizedEngineClient) InitEngine(ctx context.Context,
opts clitypes.EngineInitOptions,
out clitypes.OutStream,
authConfig *types.AuthConfig,
healthfn func(context.Context) error) error {
if w.initEngineFunc != nil {
return w.initEngineFunc(ctx, opts, out, authConfig, healthfn)
}
return nil
}
func (w *fakeContainerizedEngineClient) DoUpdate(ctx context.Context,
opts clitypes.EngineInitOptions,
out clitypes.OutStream,
authConfig *types.AuthConfig) error {
if w.doUpdateFunc != nil {
return w.doUpdateFunc(ctx, opts, out, authConfig)
}
return nil
}
func (w *fakeContainerizedEngineClient) GetEngineVersions(ctx context.Context,
registryClient registryclient.RegistryClient,
currentVersion, imageName string) (clitypes.AvailableVersions, error) {
if w.getEngineVersionsFunc != nil {
return w.getEngineVersionsFunc(ctx, registryClient, currentVersion, imageName)
}
return clitypes.AvailableVersions{}, nil
}
func (w *fakeContainerizedEngineClient) GetEngine(ctx context.Context) (containerd.Container, error) {
if w.getEngineFunc != nil {
return w.getEngineFunc(ctx)
}
return nil, nil
}
func (w *fakeContainerizedEngineClient) RemoveEngine(ctx context.Context) error {
if w.removeEngineFunc != nil {
return w.removeEngineFunc(ctx)
}
return nil
}
func (w *fakeContainerizedEngineClient) GetCurrentEngineVersion(ctx context.Context) (clitypes.EngineInitOptions, error) {
if w.getCurrentEngineVersionFunc != nil {
return w.getCurrentEngineVersionFunc(ctx)
}
return clitypes.EngineInitOptions{}, nil
}

View File

@ -1,23 +0,0 @@
package engine
import (
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/spf13/cobra"
)
// NewEngineCommand returns a cobra command for `engine` subcommands
func NewEngineCommand(dockerCli command.Cli) *cobra.Command {
cmd := &cobra.Command{
Use: "engine COMMAND",
Short: "Manage the docker engine",
Args: cli.NoArgs,
RunE: command.ShowHelp(dockerCli.Err()),
}
cmd.AddCommand(
newActivateCommand(dockerCli),
newCheckForUpdatesCommand(dockerCli),
newUpdateCommand(dockerCli),
)
return cmd
}

View File

@ -1,14 +0,0 @@
package engine
import (
"testing"
"gotest.tools/assert"
)
func TestNewEngineCommand(t *testing.T) {
cmd := NewEngineCommand(testCli)
subcommands := cmd.Commands()
assert.Assert(t, len(subcommands) == 3)
}

View File

@ -1,10 +0,0 @@
package engine
import (
clitypes "github.com/docker/cli/types"
)
type extendedEngineInitOptions struct {
clitypes.EngineInitOptions
sockPath string
}

View File

@ -1,11 +0,0 @@
TYPE VERSION NOTES
current 1.1.0
patch 1.1.1 https://docker.com/engine/releasenotes?1.1.1
patch 1.1.2 https://docker.com/engine/releasenotes?1.1.2
patch 1.1.3-beta1 https://docker.com/engine/releasenotes?1.1.3-beta1
upgrade 1.2.0 https://docker.com/engine/releasenotes?1.2.0
upgrade 2.0.0 https://docker.com/engine/releasenotes?2.0.0
upgrade 2.1.0-beta1 https://docker.com/engine/releasenotes?2.1.0-beta1
downgrade 1.0.1 https://docker.com/engine/releasenotes?1.0.1
downgrade 1.0.2 https://docker.com/engine/releasenotes?1.0.2
downgrade 1.0.3-beta1 https://docker.com/engine/releasenotes?1.0.3-beta1

View File

@ -1,6 +0,0 @@
TYPE VERSION NOTES
current 1.1.0
patch 1.1.1 https://docker.com/engine/releasenotes?1.1.1
patch 1.1.2 https://docker.com/engine/releasenotes?1.1.2
upgrade 1.2.0 https://docker.com/engine/releasenotes?1.2.0
upgrade 2.0.0 https://docker.com/engine/releasenotes?2.0.0

View File

@ -1,8 +0,0 @@
TYPE VERSION NOTES
current 1.1.0
patch 1.1.1 https://docker.com/engine/releasenotes?1.1.1
patch 1.1.2 https://docker.com/engine/releasenotes?1.1.2
upgrade 1.2.0 https://docker.com/engine/releasenotes?1.2.0
upgrade 2.0.0 https://docker.com/engine/releasenotes?2.0.0
downgrade 1.0.1 https://docker.com/engine/releasenotes?1.0.1
downgrade 1.0.2 https://docker.com/engine/releasenotes?1.0.2

View File

@ -1,4 +0,0 @@
TYPE VERSION NOTES
current 1.1.0
patch 1.1.1 https://docker.com/engine/releasenotes?1.1.1
patch 1.1.2 https://docker.com/engine/releasenotes?1.1.2

View File

@ -1,3 +0,0 @@
Looking for existing licenses for ...
NUM OWNER PRODUCT ID EXPIRES PRICING COMPONENTS
0 2010-01-01 00:00:00 +0000 UTC

View File

@ -1 +0,0 @@
License: Quantity: 1 Nodes Expiration date: 2018-03-18 Expired! You will no longer receive updates. Please renew at https://docker.com/licensing

View File

@ -1,55 +0,0 @@
package engine
import (
"context"
"fmt"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
clitypes "github.com/docker/cli/types"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
func newUpdateCommand(dockerCli command.Cli) *cobra.Command {
var options extendedEngineInitOptions
cmd := &cobra.Command{
Use: "update [OPTIONS]",
Short: "Update a local engine",
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runUpdate(dockerCli, options)
},
}
flags := cmd.Flags()
flags.StringVar(&options.EngineVersion, "version", "", "Specify engine version")
flags.StringVar(&options.EngineImage, "engine-image", "", "Specify engine image (default uses the same image as currently running)")
flags.StringVar(&options.RegistryPrefix, "registry-prefix", clitypes.RegistryPrefix, "Override the current location where engine images are pulled")
flags.StringVar(&options.sockPath, "containerd", "", "override default location of containerd endpoint")
return cmd
}
func runUpdate(dockerCli command.Cli, options extendedEngineInitOptions) error {
if !isRoot() {
return errors.New("this command must be run as a privileged user")
}
ctx := context.Background()
client, err := dockerCli.NewContainerizedEngineClient(options.sockPath)
if err != nil {
return errors.Wrap(err, "unable to access local containerd")
}
defer client.Close()
authConfig, err := getRegistryAuth(dockerCli, options.RegistryPrefix)
if err != nil {
return err
}
if err := client.DoUpdate(ctx, options.EngineInitOptions, dockerCli.Out(), authConfig); err != nil {
return err
}
fmt.Fprintln(dockerCli.Out(), `Successfully updated engine.
Restart docker with 'systemctl restart docker' to complete the update.`)
return nil
}

View File

@ -1,40 +0,0 @@
package engine
import (
"fmt"
"testing"
"github.com/docker/cli/internal/test"
clitypes "github.com/docker/cli/types"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"gotest.tools/assert"
)
func TestUpdateNoContainerd(t *testing.T) {
testCli.SetContainerizedEngineClient(
func(string) (clitypes.ContainerizedClient, error) {
return nil, fmt.Errorf("some error")
},
)
cmd := newUpdateCommand(testCli)
cmd.SilenceUsage = true
cmd.SilenceErrors = true
err := cmd.Execute()
assert.ErrorContains(t, err, "unable to access local containerd")
}
func TestUpdateHappy(t *testing.T) {
c := test.NewFakeCli(&verClient{client.Client{}, types.Version{Version: "1.1.0"}, nil, types.Info{ServerVersion: "1.1.0"}, nil})
c.SetContainerizedEngineClient(
func(string) (clitypes.ContainerizedClient, error) {
return &fakeContainerizedEngineClient{}, nil
},
)
cmd := newUpdateCommand(c)
cmd.Flags().Set("registry-prefix", clitypes.RegistryPrefix)
cmd.Flags().Set("version", "someversion")
cmd.Flags().Set("engine-image", "someimage")
err := cmd.Execute()
assert.NilError(t, err)
}

View File

@ -1,179 +0,0 @@
package formatter
import (
"fmt"
"sort"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/go-units"
)
const (
defaultBuildCacheTableFormat = "table {{.ID}}\t{{.Type}}\t{{.Size}}\t{{.CreatedSince}}\t{{.LastUsedSince}}\t{{.UsageCount}}\t{{.Shared}}\t{{.Description}}"
cacheIDHeader = "CACHE ID"
cacheTypeHeader = "CACHE TYPE"
parentHeader = "PARENT"
lastUsedSinceHeader = "LAST USED"
usageCountHeader = "USAGE"
inUseHeader = "IN USE"
sharedHeader = "SHARED"
)
// NewBuildCacheFormat returns a Format for rendering using a Context
func NewBuildCacheFormat(source string, quiet bool) Format {
switch source {
case TableFormatKey:
if quiet {
return defaultQuietFormat
}
return Format(defaultBuildCacheTableFormat)
case RawFormatKey:
if quiet {
return `build_cache_id: {{.ID}}`
}
format := `build_cache_id: {{.ID}}
parent_id: {{.Parent}}
build_cache_type: {{.CacheType}}
description: {{.Description}}
created_at: {{.CreatedAt}}
created_since: {{.CreatedSince}}
last_used_at: {{.LastUsedAt}}
last_used_since: {{.LastUsedSince}}
usage_count: {{.UsageCount}}
in_use: {{.InUse}}
shared: {{.Shared}}
`
return Format(format)
}
return Format(source)
}
func buildCacheSort(buildCache []*types.BuildCache) {
sort.Slice(buildCache, func(i, j int) bool {
lui, luj := buildCache[i].LastUsedAt, buildCache[j].LastUsedAt
switch {
case lui == nil && luj == nil:
return strings.Compare(buildCache[i].ID, buildCache[j].ID) < 0
case lui == nil:
return true
case luj == nil:
return false
case lui.Equal(*luj):
return strings.Compare(buildCache[i].ID, buildCache[j].ID) < 0
default:
return lui.Before(*luj)
}
})
}
// BuildCacheWrite renders the context for a list of containers
func BuildCacheWrite(ctx Context, buildCaches []*types.BuildCache) error {
render := func(format func(subContext subContext) error) error {
buildCacheSort(buildCaches)
for _, bc := range buildCaches {
err := format(&buildCacheContext{trunc: ctx.Trunc, v: bc})
if err != nil {
return err
}
}
return nil
}
return ctx.Write(newBuildCacheContext(), render)
}
type buildCacheHeaderContext map[string]string
type buildCacheContext struct {
HeaderContext
trunc bool
v *types.BuildCache
}
func newBuildCacheContext() *buildCacheContext {
buildCacheCtx := buildCacheContext{}
buildCacheCtx.header = buildCacheHeaderContext{
"ID": cacheIDHeader,
"Parent": parentHeader,
"CacheType": cacheTypeHeader,
"Size": sizeHeader,
"CreatedSince": createdSinceHeader,
"LastUsedSince": lastUsedSinceHeader,
"UsageCount": usageCountHeader,
"InUse": inUseHeader,
"Shared": sharedHeader,
"Description": descriptionHeader,
}
return &buildCacheCtx
}
func (c *buildCacheContext) MarshalJSON() ([]byte, error) {
return marshalJSON(c)
}
func (c *buildCacheContext) ID() string {
id := c.v.ID
if c.trunc {
id = stringid.TruncateID(c.v.ID)
}
if c.v.InUse {
return id + "*"
}
return id
}
func (c *buildCacheContext) Parent() string {
if c.trunc {
return stringid.TruncateID(c.v.Parent)
}
return c.v.Parent
}
func (c *buildCacheContext) CacheType() string {
return c.v.Type
}
func (c *buildCacheContext) Description() string {
return c.v.Description
}
func (c *buildCacheContext) Size() string {
return units.HumanSizeWithPrecision(float64(c.v.Size), 3)
}
func (c *buildCacheContext) CreatedAt() string {
return c.v.CreatedAt.String()
}
func (c *buildCacheContext) CreatedSince() string {
return units.HumanDuration(time.Now().UTC().Sub(c.v.CreatedAt)) + " ago"
}
func (c *buildCacheContext) LastUsedAt() string {
if c.v.LastUsedAt == nil {
return ""
}
return c.v.LastUsedAt.String()
}
func (c *buildCacheContext) LastUsedSince() string {
if c.v.LastUsedAt == nil {
return ""
}
return units.HumanDuration(time.Now().UTC().Sub(*c.v.LastUsedAt)) + " ago"
}
func (c *buildCacheContext) UsageCount() string {
return fmt.Sprintf("%d", c.v.UsageCount)
}
func (c *buildCacheContext) InUse() string {
return fmt.Sprintf("%t", c.v.InUse)
}
func (c *buildCacheContext) Shared() string {
return fmt.Sprintf("%t", c.v.Shared)
}

View File

@ -269,10 +269,7 @@ func DisplayablePorts(ports []types.Port) string {
var result []string
var hostMappings []string
var groupMapKeys []string
sort.Slice(ports, func(i, j int) bool {
return comparePorts(ports[i], ports[j])
})
sort.Sort(byPortInfo(ports))
for _, port := range ports {
current := port.PrivatePort
portKey := port.Type
@ -325,18 +322,23 @@ func formGroup(key string, start, last uint16) string {
return fmt.Sprintf("%s/%s", group, groupType)
}
func comparePorts(i, j types.Port) bool {
if i.PrivatePort != j.PrivatePort {
return i.PrivatePort < j.PrivatePort
// byPortInfo is a temporary type used to sort types.Port by its fields
type byPortInfo []types.Port
func (r byPortInfo) Len() int { return len(r) }
func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r byPortInfo) Less(i, j int) bool {
if r[i].PrivatePort != r[j].PrivatePort {
return r[i].PrivatePort < r[j].PrivatePort
}
if i.IP != j.IP {
return i.IP < j.IP
if r[i].IP != r[j].IP {
return r[i].IP < r[j].IP
}
if i.PublicPort != j.PublicPort {
return i.PublicPort < j.PublicPort
if r[i].PublicPort != r[j].PublicPort {
return r[i].PublicPort < r[j].PublicPort
}
return i.Type < j.Type
return r[i].Type < r[j].Type
}

View File

@ -12,11 +12,19 @@ import (
)
const (
defaultDiskUsageImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}}\t{{.VirtualSize}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}"
defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}}\t{{.Status}}\t{{.Names}}"
defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}"
defaultDiskUsageBuildCacheTableFormat = "table {{.ID}}\t{{.CacheType}}\t{{.Size}}\t{{.CreatedSince}}\t{{.LastUsedSince}}\t{{.UsageCount}}\t{{.Shared}}"
defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}"
defaultDiskUsageImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.VirtualSize}}\t{{.SharedSize}}\t{{.UniqueSize}}\t{{.Containers}}"
defaultDiskUsageContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.LocalVolumes}}\t{{.Size}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Names}}"
defaultDiskUsageVolumeTableFormat = "table {{.Name}}\t{{.Links}}\t{{.Size}}"
defaultDiskUsageTableFormat = "table {{.Type}}\t{{.TotalCount}}\t{{.Active}}\t{{.Size}}\t{{.Reclaimable}}"
defaultBuildCacheVerboseFormat = `
ID: {{.ID}}
Description: {{.Description}}
Mutable: {{.Mutable}}
Size: {{.Size}}
CreatedAt: {{.CreatedAt}}
LastUsedAt: {{.LastUsedAt}}
UsageCount: {{.UsageCount}}
`
typeHeader = "TYPE"
totalHeader = "TOTAL"
@ -24,7 +32,7 @@ const (
reclaimableHeader = "RECLAIMABLE"
containersHeader = "CONTAINERS"
sharedSizeHeader = "SHARED SIZE"
uniqueSizeHeader = "UNIQUE SIZE"
uniqueSizeHeader = "UNIQUE SiZE"
)
// DiskUsageContext contains disk usage specific information required by the formatter, encapsulate a Context struct.
@ -48,26 +56,14 @@ func (ctx *DiskUsageContext) startSubsection(format string) (*template.Template,
return ctx.parseFormat()
}
//
// NewDiskUsageFormat returns a format for rendering an DiskUsageContext
func NewDiskUsageFormat(source string, verbose bool) Format {
switch {
case verbose && source == RawFormatKey:
format := `{{range .Images}}type: Image
` + NewImageFormat(source, false, true) + `
{{end -}}
{{range .Containers}}type: Container
` + NewContainerFormat(source, false, true) + `
{{end -}}
{{range .Volumes}}type: Volume
` + NewVolumeFormat(source, false) + `
{{end -}}
{{range .BuildCache}}type: Build Cache
` + NewBuildCacheFormat(source, false) + `
{{end -}}`
return format
case !verbose && source == TableFormatKey:
return Format(defaultDiskUsageTableFormat)
case !verbose && source == RawFormatKey:
func NewDiskUsageFormat(source string) Format {
switch source {
case TableFormatKey:
format := defaultDiskUsageTableFormat
return Format(format)
case RawFormatKey:
format := `type: {{.Type}}
total: {{.TotalCount}}
active: {{.Active}}
@ -75,9 +71,8 @@ size: {{.Size}}
reclaimable: {{.Reclaimable}}
`
return Format(format)
default:
return Format(source)
}
return Format(source)
}
func (ctx *DiskUsageContext) Write() (err error) {
@ -134,23 +129,14 @@ func (ctx *DiskUsageContext) Write() (err error) {
return err
}
type diskUsageContext struct {
Images []*imageContext
Containers []*containerContext
Volumes []*volumeContext
BuildCache []*buildCacheContext
}
func (ctx *DiskUsageContext) verboseWrite() error {
duc := &diskUsageContext{
Images: make([]*imageContext, 0, len(ctx.Images)),
Containers: make([]*containerContext, 0, len(ctx.Containers)),
Volumes: make([]*volumeContext, 0, len(ctx.Volumes)),
BuildCache: make([]*buildCacheContext, 0, len(ctx.BuildCache)),
}
trunc := ctx.Format.IsTable()
// First images
tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat)
if err != nil {
return err
}
ctx.Output.Write([]byte("Images space usage:\n\n"))
for _, i := range ctx.Images {
repo := "<none>"
tag := "<none>"
@ -166,92 +152,55 @@ func (ctx *DiskUsageContext) verboseWrite() error {
}
}
duc.Images = append(duc.Images, &imageContext{
err := ctx.contextFormat(tmpl, &imageContext{
repo: repo,
tag: tag,
trunc: trunc,
trunc: true,
i: *i,
})
}
// Now containers
for _, c := range ctx.Containers {
// Don't display the virtual size
c.SizeRootFs = 0
duc.Containers = append(duc.Containers, &containerContext{trunc: trunc, c: *c})
}
// And volumes
for _, v := range ctx.Volumes {
duc.Volumes = append(duc.Volumes, &volumeContext{v: *v})
}
// And build cache
buildCacheSort(ctx.BuildCache)
for _, v := range ctx.BuildCache {
duc.BuildCache = append(duc.BuildCache, &buildCacheContext{v: v, trunc: trunc})
}
if ctx.Format == TableFormatKey {
return ctx.verboseWriteTable(duc)
}
ctx.preFormat()
tmpl, err := ctx.parseFormat()
if err != nil {
return err
}
return tmpl.Execute(ctx.Output, duc)
}
func (ctx *DiskUsageContext) verboseWriteTable(duc *diskUsageContext) error {
tmpl, err := ctx.startSubsection(defaultDiskUsageImageTableFormat)
if err != nil {
return err
}
ctx.Output.Write([]byte("Images space usage:\n\n"))
for _, img := range duc.Images {
if err := ctx.contextFormat(tmpl, img); err != nil {
if err != nil {
return err
}
}
ctx.postFormat(tmpl, newImageContext())
// Now containers
ctx.Output.Write([]byte("\nContainers space usage:\n\n"))
tmpl, err = ctx.startSubsection(defaultDiskUsageContainerTableFormat)
if err != nil {
return err
}
ctx.Output.Write([]byte("\nContainers space usage:\n\n"))
for _, c := range duc.Containers {
if err := ctx.contextFormat(tmpl, c); err != nil {
for _, c := range ctx.Containers {
// Don't display the virtual size
c.SizeRootFs = 0
err := ctx.contextFormat(tmpl, &containerContext{trunc: true, c: *c})
if err != nil {
return err
}
}
ctx.postFormat(tmpl, newContainerContext())
// And volumes
ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n"))
tmpl, err = ctx.startSubsection(defaultDiskUsageVolumeTableFormat)
if err != nil {
return err
}
ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n"))
for _, v := range duc.Volumes {
if err := ctx.contextFormat(tmpl, v); err != nil {
for _, v := range ctx.Volumes {
if err := ctx.contextFormat(tmpl, &volumeContext{v: *v}); err != nil {
return err
}
}
ctx.postFormat(tmpl, newVolumeContext())
tmpl, err = ctx.startSubsection(defaultDiskUsageBuildCacheTableFormat)
if err != nil {
return err
}
// And build cache
fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuilderSize)))
for _, v := range duc.BuildCache {
if err := ctx.contextFormat(tmpl, v); err != nil {
return err
}
t := template.Must(template.New("buildcache").Parse(defaultBuildCacheVerboseFormat))
for _, v := range ctx.BuildCache {
t.Execute(ctx.Output, *v)
}
ctx.postFormat(tmpl, newBuildCacheContext())
return nil
}
@ -467,7 +416,7 @@ func (c *diskUsageBuilderContext) Size() string {
func (c *diskUsageBuilderContext) Reclaimable() string {
var inUseBytes int64
for _, bc := range c.buildCache {
if bc.InUse && !bc.Shared {
if bc.InUse {
inUseBytes += bc.Size
}
}

View File

@ -18,7 +18,7 @@ func TestDiskUsageContextFormatWrite(t *testing.T) {
{
DiskUsageContext{
Context: Context{
Format: NewDiskUsageFormat("table", false),
Format: NewDiskUsageFormat("table"),
},
Verbose: false},
`TYPE TOTAL ACTIVE SIZE RECLAIMABLE
@ -29,14 +29,14 @@ Build Cache 0 0 0B
`,
},
{
DiskUsageContext{Verbose: true, Context: Context{Format: NewDiskUsageFormat("table", true)}},
DiskUsageContext{Verbose: true},
`Images space usage:
REPOSITORY TAG IMAGE ID CREATED SIZE SHARED SIZE UNIQUE SIZE CONTAINERS
REPOSITORY TAG IMAGE ID CREATED ago SIZE SHARED SIZE UNIQUE SiZE CONTAINERS
Containers space usage:
CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED STATUS NAMES
CONTAINER ID IMAGE COMMAND LOCAL VOLUMES SIZE CREATED ago STATUS NAMES
Local Volumes space usage:
@ -44,17 +44,8 @@ VOLUME NAME LINKS SIZE
Build cache usage: 0B
CACHE ID CACHE TYPE SIZE CREATED LAST USED USAGE SHARED
`,
},
{
DiskUsageContext{Verbose: true, Context: Context{Format: NewDiskUsageFormat("raw", true)}},
``,
},
{
DiskUsageContext{Verbose: true, Context: Context{Format: NewDiskUsageFormat("{{json .}}", true)}},
`{"Images":[],"Containers":[],"Volumes":[],"BuildCache":[]}`,
},
// Errors
{
DiskUsageContext{
@ -78,7 +69,7 @@ CACHE ID CACHE TYPE SIZE CREATED
{
DiskUsageContext{
Context: Context{
Format: NewDiskUsageFormat("table", false),
Format: NewDiskUsageFormat("table"),
},
},
`TYPE TOTAL ACTIVE SIZE RECLAIMABLE
@ -91,7 +82,7 @@ Build Cache 0 0 0B
{
DiskUsageContext{
Context: Context{
Format: NewDiskUsageFormat("table {{.Type}}\t{{.Active}}", false),
Format: NewDiskUsageFormat("table {{.Type}}\t{{.Active}}"),
},
},
string(golden.Get(t, "disk-usage-context-write-custom.golden")),
@ -100,7 +91,7 @@ Build Cache 0 0 0B
{
DiskUsageContext{
Context: Context{
Format: NewDiskUsageFormat("raw", false),
Format: NewDiskUsageFormat("raw"),
},
},
string(golden.Get(t, "disk-usage-raw-format.golden")),

View File

@ -1,154 +0,0 @@
package formatter
import (
"time"
"github.com/docker/cli/internal/licenseutils"
"github.com/docker/licensing/model"
)
const (
defaultSubscriptionsTableFormat = "table {{.Num}}\t{{.Owner}}\t{{.ProductID}}\t{{.Expires}}\t{{.ComponentsString}}"
defaultSubscriptionsQuietFormat = "{{.Num}}:{{.Summary}}"
numHeader = "NUM"
ownerHeader = "OWNER"
licenseNameHeader = "NAME"
idHeader = "ID"
dockerIDHeader = "DOCKER ID"
productIDHeader = "PRODUCT ID"
productRatePlanHeader = "PRODUCT RATE PLAN"
productRatePlanIDHeader = "PRODUCT RATE PLAN ID"
startHeader = "START"
expiresHeader = "EXPIRES"
stateHeader = "STATE"
eusaHeader = "EUSA"
pricingComponentsHeader = "PRICING COMPONENTS"
)
// NewSubscriptionsFormat returns a Format for rendering using a license Context
func NewSubscriptionsFormat(source string, quiet bool) Format {
switch source {
case TableFormatKey:
if quiet {
return defaultSubscriptionsQuietFormat
}
return defaultSubscriptionsTableFormat
case RawFormatKey:
if quiet {
return `license: {{.ID}}`
}
return `license: {{.ID}}\nname: {{.Name}}\nowner: {{.Owner}}\ncomponents: {{.ComponentsString}}\n`
}
return Format(source)
}
// SubscriptionsWrite writes the context
func SubscriptionsWrite(ctx Context, subs []licenseutils.LicenseDisplay) error {
render := func(format func(subContext subContext) error) error {
for _, sub := range subs {
licenseCtx := &licenseContext{trunc: ctx.Trunc, l: sub}
if err := format(licenseCtx); err != nil {
return err
}
}
return nil
}
licenseCtx := licenseContext{}
licenseCtx.header = map[string]string{
"Num": numHeader,
"Owner": ownerHeader,
"Name": licenseNameHeader,
"ID": idHeader,
"DockerID": dockerIDHeader,
"ProductID": productIDHeader,
"ProductRatePlan": productRatePlanHeader,
"ProductRatePlanID": productRatePlanIDHeader,
"Start": startHeader,
"Expires": expiresHeader,
"State": stateHeader,
"Eusa": eusaHeader,
"ComponentsString": pricingComponentsHeader,
}
return ctx.Write(&licenseCtx, render)
}
type licenseContext struct {
HeaderContext
trunc bool
l licenseutils.LicenseDisplay
}
func (c *licenseContext) MarshalJSON() ([]byte, error) {
return marshalJSON(c)
}
func (c *licenseContext) Num() int {
return c.l.Num
}
func (c *licenseContext) Owner() string {
return c.l.Owner
}
func (c *licenseContext) ComponentsString() string {
return c.l.ComponentsString
}
func (c *licenseContext) Summary() string {
return c.l.String()
}
func (c *licenseContext) Name() string {
return c.l.Name
}
func (c *licenseContext) ID() string {
return c.l.ID
}
func (c *licenseContext) DockerID() string {
return c.l.DockerID
}
func (c *licenseContext) ProductID() string {
return c.l.ProductID
}
func (c *licenseContext) ProductRatePlan() string {
return c.l.ProductRatePlan
}
func (c *licenseContext) ProductRatePlanID() string {
return c.l.ProductRatePlanID
}
func (c *licenseContext) Start() *time.Time {
return c.l.Start
}
func (c *licenseContext) Expires() *time.Time {
return c.l.Expires
}
func (c *licenseContext) State() string {
return c.l.State
}
func (c *licenseContext) Eusa() *model.EusaState {
return c.l.Eusa
}
func (c *licenseContext) PricingComponents() []model.SubscriptionPricingComponent {
// Dereference the pricing component pointers in the pricing components
// so it can be rendered properly with the template formatter
var ret []model.SubscriptionPricingComponent
for _, spc := range c.l.PricingComponents {
if spc == nil {
continue
}
ret = append(ret, *spc)
}
return ret
}

View File

@ -1,256 +0,0 @@
package formatter
import (
"bytes"
"encoding/json"
"strings"
"testing"
"time"
"github.com/docker/cli/internal/licenseutils"
"github.com/docker/licensing/model"
"gotest.tools/assert"
is "gotest.tools/assert/cmp"
)
func TestSubscriptionContextWrite(t *testing.T) {
cases := []struct {
context Context
expected string
}{
// Errors
{
Context{Format: "{{InvalidFunction}}"},
`Template parsing error: template: :1: function "InvalidFunction" not defined
`,
},
{
Context{Format: "{{nil}}"},
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
`,
},
// Table format
{
Context{Format: NewSubscriptionsFormat("table", false)},
`NUM OWNER PRODUCT ID EXPIRES PRICING COMPONENTS
1 owner1 productid1 2020-01-01 10:00:00 +0000 UTC compstring
2 owner2 productid2 2020-01-01 10:00:00 +0000 UTC compstring
`,
},
{
Context{Format: NewSubscriptionsFormat("table", true)},
`1:License Name: name1 Quantity: 10 nodes Expiration date: 2020-01-01
2:License Name: name2 Quantity: 20 nodes Expiration date: 2020-01-01
`,
},
{
Context{Format: NewSubscriptionsFormat("table {{.Owner}}", false)},
`OWNER
owner1
owner2
`,
},
{
Context{Format: NewSubscriptionsFormat("table {{.Owner}}", true)},
`OWNER
owner1
owner2
`,
},
// Raw Format
{
Context{Format: NewSubscriptionsFormat("raw", false)},
`license: id1
name: name1
owner: owner1
components: compstring
license: id2
name: name2
owner: owner2
components: compstring
`,
},
{
Context{Format: NewSubscriptionsFormat("raw", true)},
`license: id1
license: id2
`,
},
// Custom Format
{
Context{Format: NewSubscriptionsFormat("{{.Owner}}", false)},
`owner1
owner2
`,
},
}
expiration, _ := time.Parse(time.RFC822, "01 Jan 20 10:00 UTC")
for _, testcase := range cases {
subscriptions := []licenseutils.LicenseDisplay{
{
Num: 1,
Owner: "owner1",
Subscription: model.Subscription{
ID: "id1",
Name: "name1",
ProductID: "productid1",
Expires: &expiration,
PricingComponents: model.PricingComponents{
&model.SubscriptionPricingComponent{
Name: "nodes",
Value: 10,
},
},
},
ComponentsString: "compstring",
},
{
Num: 2,
Owner: "owner2",
Subscription: model.Subscription{
ID: "id2",
Name: "name2",
ProductID: "productid2",
Expires: &expiration,
PricingComponents: model.PricingComponents{
&model.SubscriptionPricingComponent{
Name: "nodes",
Value: 20,
},
},
},
ComponentsString: "compstring",
},
}
out := &bytes.Buffer{}
testcase.context.Output = out
err := SubscriptionsWrite(testcase.context, subscriptions)
if err != nil {
assert.Error(t, err, testcase.expected)
} else {
assert.Check(t, is.Equal(testcase.expected, out.String()))
}
}
}
func TestSubscriptionContextWriteJSON(t *testing.T) {
expiration, _ := time.Parse(time.RFC822, "01 Jan 20 10:00 UTC")
subscriptions := []licenseutils.LicenseDisplay{
{
Num: 1,
Owner: "owner1",
Subscription: model.Subscription{
ID: "id1",
Name: "name1",
ProductID: "productid1",
Expires: &expiration,
PricingComponents: model.PricingComponents{
&model.SubscriptionPricingComponent{
Name: "nodes",
Value: 10,
},
},
},
ComponentsString: "compstring",
},
{
Num: 2,
Owner: "owner2",
Subscription: model.Subscription{
ID: "id2",
Name: "name2",
ProductID: "productid2",
Expires: &expiration,
PricingComponents: model.PricingComponents{
&model.SubscriptionPricingComponent{
Name: "nodes",
Value: 20,
},
},
},
ComponentsString: "compstring",
},
}
expectedJSONs := []map[string]interface{}{
{
"Owner": "owner1",
"ComponentsString": "compstring",
"Expires": "2020-01-01T10:00:00Z",
"DockerID": "",
"Eusa": nil,
"ID": "id1",
"Start": nil,
"Name": "name1",
"Num": float64(1),
"PricingComponents": []interface{}{
map[string]interface{}{
"name": "nodes",
"value": float64(10),
},
},
"ProductID": "productid1",
"ProductRatePlan": "",
"ProductRatePlanID": "",
"State": "",
"Summary": "License Name: name1\tQuantity: 10 nodes\tExpiration date: 2020-01-01",
},
{
"Owner": "owner2",
"ComponentsString": "compstring",
"Expires": "2020-01-01T10:00:00Z",
"DockerID": "",
"Eusa": nil,
"ID": "id2",
"Start": nil,
"Name": "name2",
"Num": float64(2),
"PricingComponents": []interface{}{
map[string]interface{}{
"name": "nodes",
"value": float64(20),
},
},
"ProductID": "productid2",
"ProductRatePlan": "",
"ProductRatePlanID": "",
"State": "",
"Summary": "License Name: name2\tQuantity: 20 nodes\tExpiration date: 2020-01-01",
},
}
out := &bytes.Buffer{}
err := SubscriptionsWrite(Context{Format: "{{json .}}", Output: out}, subscriptions)
if err != nil {
t.Fatal(err)
}
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
var m map[string]interface{}
if err := json.Unmarshal([]byte(line), &m); err != nil {
t.Fatal(err)
}
assert.Check(t, is.DeepEqual(expectedJSONs[i], m))
}
}
func TestSubscriptionContextWriteJSONField(t *testing.T) {
subscriptions := []licenseutils.LicenseDisplay{
{Num: 1, Owner: "owner1"},
{Num: 2, Owner: "owner2"},
}
out := &bytes.Buffer{}
err := SubscriptionsWrite(Context{Format: "{{json .Owner}}", Output: out}, subscriptions)
if err != nil {
t.Fatal(err)
}
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
var s string
if err := json.Unmarshal([]byte(line), &s); err != nil {
t.Fatal(err)
}
assert.Check(t, is.Equal(subscriptions[i].Owner, s))
}
}

View File

@ -599,13 +599,7 @@ func (c *serviceContext) Ports() string {
pr := portRange{}
ports := []string{}
servicePorts := c.service.Endpoint.Ports
sort.Slice(servicePorts, func(i, j int) bool {
if servicePorts[i].Protocol == servicePorts[j].Protocol {
return servicePorts[i].PublishedPort < servicePorts[j].PublishedPort
}
return servicePorts[i].Protocol < servicePorts[j].Protocol
})
sort.Sort(byProtocolAndPublishedPort(c.service.Endpoint.Ports))
for _, p := range c.service.Endpoint.Ports {
if p.PublishMode == swarm.PortConfigPublishModeIngress {
@ -639,3 +633,14 @@ func (c *serviceContext) Ports() string {
}
return strings.Join(ports, ", ")
}
type byProtocolAndPublishedPort []swarm.PortConfig
func (a byProtocolAndPublishedPort) Len() int { return len(a) }
func (a byProtocolAndPublishedPort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byProtocolAndPublishedPort) Less(i, j int) bool {
if a[i].Protocol == a[j].Protocol {
return a[i].PublishedPort < a[j].PublishedPort
}
return a[i].Protocol < a[j].Protocol
}

View File

@ -133,3 +133,18 @@ func (c *signerInfoContext) Keys() string {
func (c *signerInfoContext) Signer() string {
return c.s.Name
}
// SignerInfoList helps sort []SignerInfo by signer names
type SignerInfoList []SignerInfo
func (signerInfoComp SignerInfoList) Len() int {
return len(signerInfoComp)
}
func (signerInfoComp SignerInfoList) Less(i, j int) bool {
return signerInfoComp[i].Name < signerInfoComp[j].Name
}
func (signerInfoComp SignerInfoList) Swap(i, j int) {
signerInfoComp[i], signerInfoComp[j] = signerInfoComp[j], signerInfoComp[i]
}

View File

@ -222,7 +222,7 @@ eve foobarbazquxquux, key31, key32
}
for _, testcase := range cases {
signerInfo := []SignerInfo{
signerInfo := SignerInfoList{
{Name: "alice", Keys: []string{"key11", "key12"}},
{Name: "bob", Keys: []string{"key21"}},
{Name: "eve", Keys: []string{"key31", "key32", "foobarbazquxquux"}},

View File

@ -1,73 +0,0 @@
package formatter
import (
clitypes "github.com/docker/cli/types"
)
const (
defaultUpdatesTableFormat = "table {{.Type}}\t{{.Version}}\t{{.Notes}}"
defaultUpdatesQuietFormat = "{{.Version}}"
updatesTypeHeader = "TYPE"
versionHeader = "VERSION"
notesHeader = "NOTES"
)
// NewUpdatesFormat returns a Format for rendering using a updates context
func NewUpdatesFormat(source string, quiet bool) Format {
switch source {
case TableFormatKey:
if quiet {
return defaultUpdatesQuietFormat
}
return defaultUpdatesTableFormat
case RawFormatKey:
if quiet {
return `update_version: {{.Version}}`
}
return `update_version: {{.Version}}\ntype: {{.Type}}\nnotes: {{.Notes}}\n`
}
return Format(source)
}
// UpdatesWrite writes the context
func UpdatesWrite(ctx Context, availableUpdates []clitypes.Update) error {
render := func(format func(subContext subContext) error) error {
for _, update := range availableUpdates {
updatesCtx := &updateContext{trunc: ctx.Trunc, u: update}
if err := format(updatesCtx); err != nil {
return err
}
}
return nil
}
updatesCtx := updateContext{}
updatesCtx.header = map[string]string{
"Type": updatesTypeHeader,
"Version": versionHeader,
"Notes": notesHeader,
}
return ctx.Write(&updatesCtx, render)
}
type updateContext struct {
HeaderContext
trunc bool
u clitypes.Update
}
func (c *updateContext) MarshalJSON() ([]byte, error) {
return marshalJSON(c)
}
func (c *updateContext) Type() string {
return c.u.Type
}
func (c *updateContext) Version() string {
return c.u.Version
}
func (c *updateContext) Notes() string {
return c.u.Notes
}

View File

@ -1,143 +0,0 @@
package formatter
import (
"bytes"
"encoding/json"
"strings"
"testing"
clitypes "github.com/docker/cli/types"
"gotest.tools/assert"
is "gotest.tools/assert/cmp"
)
func TestUpdateContextWrite(t *testing.T) {
cases := []struct {
context Context
expected string
}{
// Errors
{
Context{Format: "{{InvalidFunction}}"},
`Template parsing error: template: :1: function "InvalidFunction" not defined
`,
},
{
Context{Format: "{{nil}}"},
`Template parsing error: template: :1:2: executing "" at <nil>: nil is not a command
`,
},
// Table format
{
Context{Format: NewUpdatesFormat("table", false)},
`TYPE VERSION NOTES
updateType1 version1 description 1
updateType2 version2 description 2
`,
},
{
Context{Format: NewUpdatesFormat("table", true)},
`version1
version2
`,
},
{
Context{Format: NewUpdatesFormat("table {{.Version}}", false)},
`VERSION
version1
version2
`,
},
{
Context{Format: NewUpdatesFormat("table {{.Version}}", true)},
`VERSION
version1
version2
`,
},
// Raw Format
{
Context{Format: NewUpdatesFormat("raw", false)},
`update_version: version1
type: updateType1
notes: description 1
update_version: version2
type: updateType2
notes: description 2
`,
},
{
Context{Format: NewUpdatesFormat("raw", true)},
`update_version: version1
update_version: version2
`,
},
// Custom Format
{
Context{Format: NewUpdatesFormat("{{.Version}}", false)},
`version1
version2
`,
},
}
for _, testcase := range cases {
updates := []clitypes.Update{
{Type: "updateType1", Version: "version1", Notes: "description 1"},
{Type: "updateType2", Version: "version2", Notes: "description 2"},
}
out := &bytes.Buffer{}
testcase.context.Output = out
err := UpdatesWrite(testcase.context, updates)
if err != nil {
assert.Error(t, err, testcase.expected)
} else {
assert.Check(t, is.Equal(testcase.expected, out.String()))
}
}
}
func TestUpdateContextWriteJSON(t *testing.T) {
updates := []clitypes.Update{
{Type: "updateType1", Version: "version1", Notes: "note1"},
{Type: "updateType2", Version: "version2", Notes: "note2"},
}
expectedJSONs := []map[string]interface{}{
{"Version": "version1", "Notes": "note1", "Type": "updateType1"},
{"Version": "version2", "Notes": "note2", "Type": "updateType2"},
}
out := &bytes.Buffer{}
err := UpdatesWrite(Context{Format: "{{json .}}", Output: out}, updates)
if err != nil {
t.Fatal(err)
}
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
var m map[string]interface{}
if err := json.Unmarshal([]byte(line), &m); err != nil {
t.Fatal(err)
}
assert.Check(t, is.DeepEqual(expectedJSONs[i], m))
}
}
func TestUpdateContextWriteJSONField(t *testing.T) {
updates := []clitypes.Update{
{Type: "updateType1", Version: "version1"},
{Type: "updateType2", Version: "version2"},
}
out := &bytes.Buffer{}
err := UpdatesWrite(Context{Format: "{{json .Type}}", Output: out}, updates)
if err != nil {
t.Fatal(err)
}
for i, line := range strings.Split(strings.TrimSpace(out.String()), "\n") {
var s string
if err := json.Unmarshal([]byte(line), &s); err != nil {
t.Fatal(err)
}
assert.Check(t, is.Equal(updates[i].Type, s))
}
}

View File

@ -13,6 +13,7 @@ import (
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"github.com/docker/cli/cli"
@ -57,7 +58,7 @@ type buildOptions struct {
isolation string
quiet bool
noCache bool
progress string
console opts.NullableBool
rm bool
forceRm bool
pull bool
@ -71,8 +72,6 @@ type buildOptions struct {
stream bool
platform string
untrusted bool
secrets []string
ssh []string
}
// dockerfileFromStdin returns true when the user specified that the Dockerfile
@ -136,8 +135,6 @@ func NewBuildCommand(dockerCli command.Cli) *cobra.Command {
flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image")
flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources")
flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip")
flags.SetAnnotation("compress", "no-buildkit", nil)
flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options")
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
flags.SetAnnotation("network", "version", []string{"1.25"})
@ -155,18 +152,10 @@ func NewBuildCommand(dockerCli command.Cli) *cobra.Command {
flags.BoolVar(&options.stream, "stream", false, "Stream attaches to server to negotiate build context")
flags.SetAnnotation("stream", "experimental", nil)
flags.SetAnnotation("stream", "version", []string{"1.31"})
flags.SetAnnotation("stream", "no-buildkit", nil)
flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (auto, plain, tty). Use plain to show container output")
flags.SetAnnotation("progress", "buildkit", nil)
flags.StringArrayVar(&options.secrets, "secret", []string{}, "Secret file to expose to the build (only if BuildKit enabled): id=mysecret,src=/local/secret")
flags.SetAnnotation("secret", "version", []string{"1.39"})
flags.SetAnnotation("secret", "buildkit", nil)
flags.StringArrayVar(&options.ssh, "ssh", []string{}, "SSH agent socket or keys to expose to the build (only if BuildKit enabled) (format: default|<id>[=<socket>|<key>[,<key>]])")
flags.SetAnnotation("ssh", "version", []string{"1.39"})
flags.SetAnnotation("ssh", "buildkit", nil)
flags.Var(&options.console, "console", "Show console output (with buildkit only) (true, false, auto)")
flags.SetAnnotation("console", "experimental", nil)
flags.SetAnnotation("console", "version", []string{"1.38"})
return cmd
}
@ -188,17 +177,20 @@ func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error {
// nolint: gocyclo
func runBuild(dockerCli command.Cli, options buildOptions) error {
buildkitEnabled, err := command.BuildKitEnabled(dockerCli.ServerInfo())
if err != nil {
return err
}
if buildkitEnabled {
return runBuildBuildKit(dockerCli, options)
if buildkitEnv := os.Getenv("DOCKER_BUILDKIT"); buildkitEnv != "" {
enableBuildkit, err := strconv.ParseBool(buildkitEnv)
if err != nil {
return errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value")
}
if enableBuildkit {
return runBuildBuildKit(dockerCli, options)
}
}
var (
buildCtx io.ReadCloser
dockerfileCtx io.ReadCloser
err error
contextDir string
tempDir string
relDockerfile string
@ -278,12 +270,15 @@ func runBuild(dockerCli command.Cli, options buildOptions) error {
}
// And canonicalize dockerfile name to a platform-independent one
relDockerfile = archive.CanonicalTarNameForPath(relDockerfile)
relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile)
if err != nil {
return errors.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err)
}
excludes = build.TrimBuildFilesFromExcludes(excludes, relDockerfile, options.dockerfileFromStdin())
buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
ExcludePatterns: excludes,
ChownOpts: &idtools.Identity{UID: 0, GID: 0},
ChownOpts: &idtools.IDPair{UID: 0, GID: 0},
})
if err != nil {
return err
@ -350,7 +345,7 @@ func runBuild(dockerCli command.Cli, options buildOptions) error {
buildCtx = dockerfileCtx
}
s, err := trySession(dockerCli, contextDir, true)
s, err := trySession(dockerCli, contextDir)
if err != nil {
return err
}

View File

@ -3,7 +3,6 @@ package image
import (
"bytes"
"context"
"encoding/csv"
"encoding/json"
"fmt"
"io"
@ -16,22 +15,18 @@ import (
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/image/build"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/urlutil"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/auth/authprovider"
"github.com/moby/buildkit/session/filesync"
"github.com/moby/buildkit/session/secrets/secretsprovider"
"github.com/moby/buildkit/session/sshforward/sshprovider"
"github.com/moby/buildkit/util/appcontext"
"github.com/moby/buildkit/util/progress/progressui"
"github.com/pkg/errors"
fsutiltypes "github.com/tonistiigi/fsutil/types"
"github.com/tonistiigi/fsutil"
"golang.org/x/sync/errgroup"
)
@ -43,7 +38,7 @@ var errDockerfileConflict = errors.New("ambiguous Dockerfile source: both stdin
func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
ctx := appcontext.Context()
s, err := trySession(dockerCli, options.context, false)
s, err := trySession(dockerCli, options.context)
if err != nil {
return err
}
@ -132,20 +127,6 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
}
s.Allow(authprovider.NewDockerAuthProvider())
if len(options.secrets) > 0 {
sp, err := parseSecretSpecs(options.secrets)
if err != nil {
return errors.Wrapf(err, "could not parse secrets: %v", options.secrets)
}
s.Allow(sp)
}
if len(options.ssh) > 0 {
sshp, err := parseSSHSpecs(options.ssh)
if err != nil {
return errors.Wrapf(err, "could not parse ssh: %v", options.ssh)
}
s.Allow(sshp)
}
eg, ctx := errgroup.WithContext(ctx)
@ -210,19 +191,16 @@ func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, opt
t := newTracer()
ssArr := []*client.SolveStatus{}
if err := opts.ValidateProgressOutput(options.progress); err != nil {
return err
}
displayStatus := func(out *os.File, displayCh chan *client.SolveStatus) {
var c console.Console
// TODO: Handle tty output in non-tty environment.
if cons, err := console.ConsoleFromFile(out); err == nil && (options.progress == "auto" || options.progress == "tty") {
// TODO: Handle interactive output in non-interactive environment.
consoleOpt := options.console.Value()
if cons, err := console.ConsoleFromFile(out); err == nil && (consoleOpt == nil || *consoleOpt) {
c = cons
}
// not using shared context to not disrupt display but let is finish reporting errors
eg.Go(func() error {
return progressui.DisplaySolveStatus(context.TODO(), "", c, out, displayCh)
return progressui.DisplaySolveStatus(context.TODO(), c, out, displayCh)
})
}
@ -299,7 +277,7 @@ func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, opt
return err
}
func resetUIDAndGID(s *fsutiltypes.Stat) bool {
func resetUIDAndGID(s *fsutil.Stat) bool {
s.Uid = 0
s.Gid = 0
return true
@ -366,76 +344,3 @@ func (t *tracer) write(msg jsonmessage.JSONMessage) {
t.displayCh <- &s
}
func parseSecretSpecs(sl []string) (session.Attachable, error) {
fs := make([]secretsprovider.FileSource, 0, len(sl))
for _, v := range sl {
s, err := parseSecret(v)
if err != nil {
return nil, err
}
fs = append(fs, *s)
}
store, err := secretsprovider.NewFileStore(fs)
if err != nil {
return nil, err
}
return secretsprovider.NewSecretProvider(store), nil
}
func parseSecret(value string) (*secretsprovider.FileSource, error) {
csvReader := csv.NewReader(strings.NewReader(value))
fields, err := csvReader.Read()
if err != nil {
return nil, errors.Wrap(err, "failed to parse csv secret")
}
fs := secretsprovider.FileSource{}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
key := strings.ToLower(parts[0])
if len(parts) != 2 {
return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field)
}
value := parts[1]
switch key {
case "type":
if value != "file" {
return nil, errors.Errorf("unsupported secret type %q", value)
}
case "id":
fs.ID = value
case "source", "src":
fs.FilePath = value
default:
return nil, errors.Errorf("unexpected key '%s' in '%s'", key, field)
}
}
return &fs, nil
}
func parseSSHSpecs(sl []string) (session.Attachable, error) {
configs := make([]sshprovider.AgentConfig, 0, len(sl))
for _, v := range sl {
c, err := parseSSH(v)
if err != nil {
return nil, err
}
configs = append(configs, *c)
}
return sshprovider.NewSSHAgentProvider(configs)
}
func parseSSH(value string) (*sshprovider.AgentConfig, error) {
parts := strings.SplitN(value, "=", 2)
cfg := sshprovider.AgentConfig{
ID: parts[0],
}
if len(parts) > 1 {
cfg.Paths = strings.Split(parts[1], ",")
}
return &cfg, nil
}

View File

@ -27,16 +27,13 @@ import (
const clientSessionRemote = "client-session"
func isSessionSupported(dockerCli command.Cli, forStream bool) bool {
if !forStream && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.39") {
return true
}
func isSessionSupported(dockerCli command.Cli) bool {
return dockerCli.ServerInfo().HasExperimental && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.31")
}
func trySession(dockerCli command.Cli, contextDir string, forStream bool) (*session.Session, error) {
func trySession(dockerCli command.Cli, contextDir string) (*session.Session, error) {
var s *session.Session
if isSessionSupported(dockerCli, forStream) {
if isSessionSupported(dockerCli) {
sharedKey, err := getBuildSharedKey(contextDir)
if err != nil {
return nil, errors.Wrap(err, "failed to get build shared key")

View File

@ -5,7 +5,6 @@ import (
"bytes"
"compress/gzip"
"context"
"fmt"
"io"
"io/ioutil"
"os"
@ -18,7 +17,6 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/archive"
"github.com/google/go-cmp/cmp"
"github.com/moby/buildkit/session/secrets/secretsprovider"
"gotest.tools/assert"
"gotest.tools/fs"
"gotest.tools/skip"
@ -175,66 +173,6 @@ RUN echo hello world
assert.DeepEqual(t, fakeBuild.filenames(t), []string{"Dockerfile"})
}
func TestParseSecret(t *testing.T) {
type testcase struct {
value string
errExpected bool
errMatch string
filesource *secretsprovider.FileSource
}
var testcases = []testcase{
{
value: "",
errExpected: true,
}, {
value: "foobar",
errExpected: true,
errMatch: "must be a key=value pair",
}, {
value: "foo,bar",
errExpected: true,
errMatch: "must be a key=value pair",
}, {
value: "foo=bar",
errExpected: true,
errMatch: "unexpected key",
}, {
value: "src=somefile",
filesource: &secretsprovider.FileSource{FilePath: "somefile"},
}, {
value: "source=somefile",
filesource: &secretsprovider.FileSource{FilePath: "somefile"},
}, {
value: "id=mysecret",
filesource: &secretsprovider.FileSource{ID: "mysecret"},
}, {
value: "id=mysecret,src=somefile",
filesource: &secretsprovider.FileSource{ID: "mysecret", FilePath: "somefile"},
}, {
value: "id=mysecret,source=somefile,type=file",
filesource: &secretsprovider.FileSource{ID: "mysecret", FilePath: "somefile"},
}, {
value: "id=mysecret,src=somefile,src=othersecretfile",
filesource: &secretsprovider.FileSource{ID: "mysecret", FilePath: "othersecretfile"},
}, {
value: "type=invalid",
errExpected: true,
errMatch: "unsupported secret type",
},
}
for _, tc := range testcases {
t.Run(tc.value, func(t *testing.T) {
secret, err := parseSecret(tc.value)
assert.Equal(t, err != nil, tc.errExpected, fmt.Sprintf("err=%v errExpected=%t", err, tc.errExpected))
if tc.errMatch != "" {
assert.ErrorContains(t, err, tc.errMatch)
}
assert.DeepEqual(t, secret, tc.filesource)
})
}
}
type fakeBuild struct {
context *tar.Reader
options types.ImageBuildOptions

View File

@ -19,7 +19,6 @@ type importOptions struct {
reference string
changes dockeropts.ListOpts
message string
platform string
}
// NewImportCommand creates a new `docker import` command
@ -44,7 +43,6 @@ func NewImportCommand(dockerCli command.Cli) *cobra.Command {
options.changes = dockeropts.NewListOpts(nil)
flags.VarP(&options.changes, "change", "c", "Apply Dockerfile instruction to the created image")
flags.StringVarP(&options.message, "message", "m", "", "Set commit message for imported image")
command.AddPlatformFlag(flags, &options.platform)
return cmd
}
@ -73,9 +71,8 @@ func runImport(dockerCli command.Cli, options importOptions) error {
}
importOptions := types.ImageImportOptions{
Message: options.message,
Changes: options.changes.GetAll(),
Platform: options.platform,
Message: options.message,
Changes: options.changes.GetAll(),
}
clnt := dockerCli.Client()

View File

@ -3,14 +3,11 @@ package image
import (
"context"
"fmt"
"strings"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types/filters"
units "github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
@ -57,25 +54,8 @@ Are you sure you want to continue?`
Are you sure you want to continue?`
)
// cloneFilter is a temporary workaround that uses existing public APIs from the filters package to clone a filter.
// TODO(tiborvass): remove this once filters.Args.Clone() is added.
func cloneFilter(args filters.Args) (newArgs filters.Args, err error) {
if args.Len() == 0 {
return filters.NewArgs(), nil
}
b, err := args.MarshalJSON()
if err != nil {
return newArgs, err
}
err = newArgs.UnmarshalJSON(b)
return newArgs, err
}
func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) {
pruneFilters, err := cloneFilter(options.filter.Value())
if err != nil {
return 0, "", errors.Wrap(err, "could not copy filter in image prune")
}
pruneFilters := options.filter.Value()
pruneFilters.Add("dangling", fmt.Sprintf("%v", !options.all))
pruneFilters = command.PruneFilters(dockerCli, pruneFilters)
@ -93,20 +73,14 @@ func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint6
}
if len(report.ImagesDeleted) > 0 {
var sb strings.Builder
sb.WriteString("Deleted Images:\n")
output = "Deleted Images:\n"
for _, st := range report.ImagesDeleted {
if st.Untagged != "" {
sb.WriteString("untagged: ")
sb.WriteString(st.Untagged)
sb.WriteByte('\n')
output += fmt.Sprintln("untagged:", st.Untagged)
} else {
sb.WriteString("deleted: ")
sb.WriteString(st.Deleted)
sb.WriteByte('\n')
output += fmt.Sprintln("deleted:", st.Deleted)
}
}
output = sb.String()
spaceReclaimed = report.SpaceReclaimed
}

View File

@ -15,7 +15,6 @@ type fakeRegistryClient struct {
getManifestListFunc func(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error)
mountBlobFunc func(ctx context.Context, source reference.Canonical, target reference.Named) error
putManifestFunc func(ctx context.Context, source reference.Named, mf distribution.Manifest) (digest.Digest, error)
getTagsFunc func(ctx context.Context, ref reference.Named) ([]string, error)
}
func (c *fakeRegistryClient) GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) {
@ -46,11 +45,4 @@ func (c *fakeRegistryClient) PutManifest(ctx context.Context, ref reference.Name
return digest.Digest(""), nil
}
func (c *fakeRegistryClient) GetTags(ctx context.Context, ref reference.Named) ([]string, error) {
if c.getTagsFunc != nil {
return c.getTagsFunc(ctx, ref)
}
return nil, nil
}
var _ client.RegistryClient = &fakeRegistryClient{}

View File

@ -18,7 +18,6 @@ type osArch struct {
// list of valid os/arch values (see "Optional Environment Variables" section
// of https://golang.org/doc/install/source
// Added linux/s390x as we know System z support already exists
// Keep in sync with _docker_manifest_annotate in contrib/completion/bash/docker
var validOSArches = map[osArch]bool{
{os: "darwin", arch: "386"}: true,
{os: "darwin", arch: "amd64"}: true,

View File

@ -10,9 +10,14 @@ import (
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types"
"github.com/spf13/cobra"
"vbom.ml/util/sortorder"
)
type byNetworkName []types.NetworkResource
func (r byNetworkName) Len() int { return len(r) }
func (r byNetworkName) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r byNetworkName) Less(i, j int) bool { return r[i].Name < r[j].Name }
type listOptions struct {
quiet bool
noTrunc bool
@ -59,9 +64,7 @@ func runList(dockerCli command.Cli, options listOptions) error {
}
}
sort.Slice(networkResources, func(i, j int) bool {
return sortorder.NaturalLess(networkResources[i].Name, networkResources[j].Name)
})
sort.Sort(byNetworkName(networkResources))
networksCtx := formatter.Context{
Output: dockerCli.Out(),

View File

@ -3,6 +3,7 @@ package network
import (
"context"
"io/ioutil"
"strings"
"testing"
"github.com/docker/cli/internal/test"
@ -40,55 +41,23 @@ func TestNetworkListErrors(t *testing.T) {
}
}
func TestNetworkList(t *testing.T) {
testCases := []struct {
doc string
networkListFunc func(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error)
flags map[string]string
golden string
}{
{
doc: "network list with flags",
flags: map[string]string{
"filter": "image.name=ubuntu",
},
golden: "network-list.golden",
networkListFunc: func(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) {
expectedOpts := types.NetworkListOptions{
Filters: filters.NewArgs(filters.Arg("image.name", "ubuntu")),
}
assert.Check(t, is.DeepEqual(expectedOpts, options, cmp.AllowUnexported(filters.Args{})))
return []types.NetworkResource{*NetworkResource(NetworkResourceID("123454321"),
NetworkResourceName("network_1"),
NetworkResourceDriver("09.7.01"),
NetworkResourceScope("global"))}, nil
},
},
{
doc: "network list sort order",
flags: map[string]string{
"format": "{{ .Name }}",
},
golden: "network-list-sort.golden",
networkListFunc: func(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) {
return []types.NetworkResource{
*NetworkResource(NetworkResourceName("network-2-foo")),
*NetworkResource(NetworkResourceName("network-1-foo")),
*NetworkResource(NetworkResourceName("network-10-foo"))}, nil
},
},
func TestNetworkListWithFlags(t *testing.T) {
expectedOpts := types.NetworkListOptions{
Filters: filters.NewArgs(filters.Arg("image.name", "ubuntu")),
}
for _, tc := range testCases {
t.Run(tc.doc, func(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{networkListFunc: tc.networkListFunc})
cmd := newListCommand(cli)
for key, value := range tc.flags {
cmd.Flags().Set(key, value)
}
assert.NilError(t, cmd.Execute())
golden.Assert(t, cli.OutBuffer().String(), tc.golden)
})
}
cli := test.NewFakeCli(&fakeClient{
networkListFunc: func(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) {
assert.Check(t, is.DeepEqual(expectedOpts, options, cmp.AllowUnexported(filters.Args{})))
return []types.NetworkResource{*NetworkResource(NetworkResourceID("123454321"),
NetworkResourceName("network_1"),
NetworkResourceDriver("09.7.01"),
NetworkResourceScope("global"))}, nil
},
})
cmd := newListCommand(cli)
cmd.Flags().Set("filter", "image.name=ubuntu")
assert.NilError(t, cmd.Execute())
golden.Assert(t, strings.TrimSpace(cli.OutBuffer().String()), "network-list.golden")
}

View File

@ -70,7 +70,7 @@ func runPrune(dockerCli command.Cli, options pruneOptions) (output string, err e
// RunPrune calls the Network Prune API
// This returns the amount of space reclaimed and a detailed output string
func RunPrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) {
func RunPrune(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) {
output, err := runPrune(dockerCli, pruneOptions{force: true, filter: filter})
return 0, output, err
}

View File

@ -1,3 +0,0 @@
network-1-foo
network-2-foo
network-10-foo

View File

@ -1,2 +1,2 @@
NETWORK ID NAME DRIVER SCOPE
123454321 network_1 09.7.01 global
123454321 network_1 09.7.01 global

View File

@ -9,10 +9,19 @@ import (
"github.com/docker/cli/cli/command/formatter"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
"github.com/spf13/cobra"
"vbom.ml/util/sortorder"
)
type byHostname []swarm.Node
func (n byHostname) Len() int { return len(n) }
func (n byHostname) Swap(i, j int) { n[i], n[j] = n[j], n[i] }
func (n byHostname) Less(i, j int) bool {
return sortorder.NaturalLess(n[i].Description.Hostname, n[j].Description.Hostname)
}
type listOptions struct {
quiet bool
format string
@ -71,8 +80,6 @@ func runList(dockerCli command.Cli, options listOptions) error {
Output: dockerCli.Out(),
Format: formatter.NewNodeFormat(format, options.quiet),
}
sort.Slice(nodes, func(i, j int) bool {
return sortorder.NaturalLess(nodes[i].Description.Hostname, nodes[j].Description.Hostname)
})
sort.Sort(byHostname(nodes))
return formatter.NodeWrite(nodesCtx, nodes, info)
}

View File

@ -5,7 +5,6 @@ import (
"io"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
)
@ -15,8 +14,6 @@ type fakeClient struct {
pluginDisableFunc func(name string, disableOptions types.PluginDisableOptions) error
pluginEnableFunc func(name string, options types.PluginEnableOptions) error
pluginRemoveFunc func(name string, options types.PluginRemoveOptions) error
pluginInstallFunc func(name string, options types.PluginInstallOptions) (io.ReadCloser, error)
pluginListFunc func(filter filters.Args) (types.PluginsListResponse, error)
}
func (c *fakeClient) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error {
@ -46,18 +43,3 @@ func (c *fakeClient) PluginRemove(context context.Context, name string, removeOp
}
return nil
}
func (c *fakeClient) PluginInstall(context context.Context, name string, installOptions types.PluginInstallOptions) (io.ReadCloser, error) {
if c.pluginInstallFunc != nil {
return c.pluginInstallFunc(name, installOptions)
}
return nil, nil
}
func (c *fakeClient) PluginList(context context.Context, filter filters.Args) (types.PluginsListResponse, error) {
if c.pluginListFunc != nil {
return c.pluginListFunc(filter)
}
return types.PluginsListResponse{}, nil
}

View File

@ -151,7 +151,7 @@ func runInstall(dockerCli command.Cli, opts pluginOptions) error {
responseBody, err := dockerCli.Client().PluginInstall(ctx, localName, options)
if err != nil {
if strings.Contains(err.Error(), "(image) when fetching") {
return errors.New(err.Error() + " - Use \"docker image pull\"")
return errors.New(err.Error() + " - Use `docker image pull`")
}
return err
}

View File

@ -1,141 +0,0 @@
package plugin
import (
"fmt"
"io"
"io/ioutil"
"strings"
"testing"
"github.com/docker/cli/internal/test"
"github.com/docker/cli/internal/test/notary"
"github.com/docker/docker/api/types"
"gotest.tools/assert"
)
func TestInstallErrors(t *testing.T) {
testCases := []struct {
description string
args []string
expectedError string
installFunc func(name string, options types.PluginInstallOptions) (io.ReadCloser, error)
}{
{
description: "insufficient number of arguments",
args: []string{},
expectedError: "requires at least 1 argument",
},
{
description: "invalid alias",
args: []string{"foo", "--alias", "UPPERCASE_ALIAS"},
expectedError: "invalid",
},
{
description: "invalid plugin name",
args: []string{"UPPERCASE_REPONAME"},
expectedError: "invalid",
},
{
description: "installation error",
args: []string{"foo"},
expectedError: "Error installing plugin",
installFunc: func(name string, options types.PluginInstallOptions) (io.ReadCloser, error) {
return nil, fmt.Errorf("Error installing plugin")
},
},
{
description: "installation error due to missing image",
args: []string{"foo"},
expectedError: "docker image pull",
installFunc: func(name string, options types.PluginInstallOptions) (io.ReadCloser, error) {
return nil, fmt.Errorf("(image) when fetching")
},
},
}
for _, tc := range testCases {
cli := test.NewFakeCli(&fakeClient{pluginInstallFunc: tc.installFunc})
cmd := newInstallCommand(cli)
cmd.SetArgs(tc.args)
cmd.SetOutput(ioutil.Discard)
assert.ErrorContains(t, cmd.Execute(), tc.expectedError)
}
}
func TestInstallContentTrustErrors(t *testing.T) {
testCases := []struct {
description string
args []string
expectedError string
notaryFunc test.NotaryClientFuncType
}{
{
description: "install plugin, offline notary server",
args: []string{"plugin:tag"},
expectedError: "client is offline",
notaryFunc: notary.GetOfflineNotaryRepository,
},
{
description: "install plugin, uninitialized notary server",
args: []string{"plugin:tag"},
expectedError: "remote trust data does not exist",
notaryFunc: notary.GetUninitializedNotaryRepository,
},
{
description: "install plugin, empty notary server",
args: []string{"plugin:tag"},
expectedError: "No valid trust data for tag",
notaryFunc: notary.GetEmptyTargetsNotaryRepository,
},
}
for _, tc := range testCases {
cli := test.NewFakeCli(&fakeClient{
pluginInstallFunc: func(name string, options types.PluginInstallOptions) (io.ReadCloser, error) {
return nil, fmt.Errorf("should not try to install plugin")
},
}, test.EnableContentTrust)
cli.SetNotaryClient(tc.notaryFunc)
cmd := newInstallCommand(cli)
cmd.SetArgs(tc.args)
cmd.SetOutput(ioutil.Discard)
assert.ErrorContains(t, cmd.Execute(), tc.expectedError)
}
}
func TestInstall(t *testing.T) {
testCases := []struct {
description string
args []string
expectedOutput string
installFunc func(name string, options types.PluginInstallOptions) (io.ReadCloser, error)
}{
{
description: "install with no additional flags",
args: []string{"foo"},
expectedOutput: "Installed plugin foo\n",
installFunc: func(name string, options types.PluginInstallOptions) (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader("")), nil
},
},
{
description: "install with disable flag",
args: []string{"--disable", "foo"},
expectedOutput: "Installed plugin foo\n",
installFunc: func(name string, options types.PluginInstallOptions) (io.ReadCloser, error) {
assert.Check(t, options.Disabled)
return ioutil.NopCloser(strings.NewReader("")), nil
},
},
}
for _, tc := range testCases {
cli := test.NewFakeCli(&fakeClient{pluginInstallFunc: tc.installFunc})
cmd := newInstallCommand(cli)
cmd.SetArgs(tc.args)
assert.NilError(t, cmd.Execute())
assert.Check(t, strings.Contains(cli.OutBuffer().String(), tc.expectedOutput))
}
}

View File

@ -2,14 +2,12 @@ package plugin
import (
"context"
"sort"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/formatter"
"github.com/docker/cli/opts"
"github.com/spf13/cobra"
"vbom.ml/util/sortorder"
)
type listOptions struct {
@ -48,10 +46,6 @@ func runList(dockerCli command.Cli, options listOptions) error {
return err
}
sort.Slice(plugins, func(i, j int) bool {
return sortorder.NaturalLess(plugins[i].Name, plugins[j].Name)
})
format := options.format
if len(format) == 0 {
if len(dockerCli.ConfigFile().PluginsFormat) > 0 && !options.quiet {

View File

@ -1,174 +0,0 @@
package plugin
import (
"fmt"
"io/ioutil"
"testing"
"github.com/docker/cli/internal/test"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"gotest.tools/assert"
is "gotest.tools/assert/cmp"
"gotest.tools/golden"
)
func TestListErrors(t *testing.T) {
testCases := []struct {
description string
args []string
flags map[string]string
expectedError string
listFunc func(filter filters.Args) (types.PluginsListResponse, error)
}{
{
description: "too many arguments",
args: []string{"foo"},
expectedError: "accepts no arguments",
},
{
description: "error listing plugins",
args: []string{},
expectedError: "error listing plugins",
listFunc: func(filter filters.Args) (types.PluginsListResponse, error) {
return types.PluginsListResponse{}, fmt.Errorf("error listing plugins")
},
},
{
description: "invalid format",
args: []string{},
flags: map[string]string{
"format": "{{invalid format}}",
},
expectedError: "Template parsing error",
},
}
for _, tc := range testCases {
cli := test.NewFakeCli(&fakeClient{pluginListFunc: tc.listFunc})
cmd := newListCommand(cli)
cmd.SetArgs(tc.args)
for key, value := range tc.flags {
cmd.Flags().Set(key, value)
}
cmd.SetOutput(ioutil.Discard)
assert.ErrorContains(t, cmd.Execute(), tc.expectedError)
}
}
func TestList(t *testing.T) {
singlePluginListFunc := func(filter filters.Args) (types.PluginsListResponse, error) {
return types.PluginsListResponse{
{
ID: "id-foo",
Name: "name-foo",
Enabled: true,
Config: types.PluginConfig{
Description: "desc-bar",
},
},
}, nil
}
testCases := []struct {
description string
args []string
flags map[string]string
golden string
listFunc func(filter filters.Args) (types.PluginsListResponse, error)
}{
{
description: "list with no additional flags",
args: []string{},
golden: "plugin-list-without-format.golden",
listFunc: singlePluginListFunc,
},
{
description: "list with filters",
args: []string{},
flags: map[string]string{
"filter": "foo=bar",
},
golden: "plugin-list-without-format.golden",
listFunc: func(filter filters.Args) (types.PluginsListResponse, error) {
assert.Check(t, is.Equal("bar", filter.Get("foo")[0]))
return singlePluginListFunc(filter)
},
},
{
description: "list with quiet option",
args: []string{},
flags: map[string]string{
"quiet": "true",
},
golden: "plugin-list-with-quiet-option.golden",
listFunc: singlePluginListFunc,
},
{
description: "list with no-trunc option",
args: []string{},
flags: map[string]string{
"no-trunc": "true",
"format": "{{ .ID }}",
},
golden: "plugin-list-with-no-trunc-option.golden",
listFunc: func(filter filters.Args) (types.PluginsListResponse, error) {
return types.PluginsListResponse{
{
ID: "xyg4z2hiSLO5yTnBJfg4OYia9gKA6Qjd",
Name: "name-foo",
Enabled: true,
Config: types.PluginConfig{
Description: "desc-bar",
},
},
}, nil
},
},
{
description: "list with format",
args: []string{},
flags: map[string]string{
"format": "{{ .Name }}",
},
golden: "plugin-list-with-format.golden",
listFunc: singlePluginListFunc,
},
{
description: "list output is sorted based on plugin name",
args: []string{},
flags: map[string]string{
"format": "{{ .Name }}",
},
golden: "plugin-list-sort.golden",
listFunc: func(filter filters.Args) (types.PluginsListResponse, error) {
return types.PluginsListResponse{
{
ID: "id-1",
Name: "plugin-1-foo",
},
{
ID: "id-2",
Name: "plugin-10-foo",
},
{
ID: "id-3",
Name: "plugin-2-foo",
},
}, nil
},
},
}
for _, tc := range testCases {
cli := test.NewFakeCli(&fakeClient{pluginListFunc: tc.listFunc})
cmd := newListCommand(cli)
cmd.SetArgs(tc.args)
for key, value := range tc.flags {
cmd.Flags().Set(key, value)
}
assert.NilError(t, cmd.Execute())
golden.Assert(t, cli.OutBuffer().String(), tc.golden)
}
}

View File

@ -1,3 +0,0 @@
plugin-1-foo
plugin-2-foo
plugin-10-foo

View File

@ -1 +0,0 @@
name-foo

View File

@ -1 +0,0 @@
xyg4z2hiSLO5yTnBJfg4OYia9gKA6Qjd

View File

@ -1 +0,0 @@
id-foo

View File

@ -1,2 +0,0 @@
ID NAME DESCRIPTION ENABLED
id-foo name-foo desc-bar true

View File

@ -11,7 +11,6 @@ import (
"runtime"
"strings"
"github.com/docker/cli/cli/debug"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
@ -27,10 +26,9 @@ func ElectAuthServer(ctx context.Context, cli Cli) string {
// example a Linux client might be interacting with a Windows daemon, hence
// the default registry URL might be Windows specific.
serverAddress := registry.IndexServer
if info, err := cli.Client().Info(ctx); err != nil && debug.IsEnabled() {
// Only report the warning if we're in debug mode to prevent nagging during engine initialization workflows
if info, err := cli.Client().Info(ctx); err != nil {
fmt.Fprintf(cli.Err(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress)
} else if info.IndexServerAddress == "" && debug.IsEnabled() {
} else if info.IndexServerAddress == "" {
fmt.Fprintf(cli.Err(), "Warning: Empty registry endpoint from daemon. Using system default: %s\n", serverAddress)
} else {
serverAddress = info.IndexServerAddress

View File

@ -125,11 +125,6 @@ func runLogin(dockerCli command.Cli, opts loginOptions) error { //nolint: gocycl
}
response, err = clnt.RegistryLogin(ctx, *authConfig)
if err != nil && client.IsErrConnectionFailed(err) {
// If the server isn't responding (yet) attempt to login purely client side
response, err = loginClientSide(ctx, *authConfig)
}
// If we (still) have an error, give up
if err != nil {
return err
}
@ -172,17 +167,3 @@ func loginWithCredStoreCreds(ctx context.Context, dockerCli command.Cli, authCon
}
return response, err
}
func loginClientSide(ctx context.Context, auth types.AuthConfig) (registrytypes.AuthenticateOKBody, error) {
svc, err := registry.NewService(registry.ServiceOptions{})
if err != nil {
return registrytypes.AuthenticateOKBody{}, err
}
status, token, err := svc.Auth(ctx, &auth, command.UserAgent())
return registrytypes.AuthenticateOKBody{
Status: status,
IdentityToken: token,
}, err
}

View File

@ -28,6 +28,7 @@ type fakeClient struct {
client.Client
}
// nolint: unparam
func (c fakeClient) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registrytypes.AuthenticateOKBody, error) {
if auth.Password == expiredPassword {
return registrytypes.AuthenticateOKBody{}, fmt.Errorf("Invalid Username or Password")

View File

@ -9,6 +9,7 @@ import (
"github.com/docker/cli/cli/command/formatter"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/registry"
"github.com/spf13/cobra"
)
@ -80,14 +81,13 @@ func runSearch(dockerCli command.Cli, options searchOptions) error {
clnt := dockerCli.Client()
results, err := clnt.ImageSearch(ctx, options.term, searchOptions)
unorderedResults, err := clnt.ImageSearch(ctx, options.term, searchOptions)
if err != nil {
return err
}
sort.Slice(results, func(i, j int) bool {
return results[j].StarCount < results[i].StarCount
})
results := searchResultsByStars(unorderedResults)
sort.Sort(results)
searchCtx := formatter.Context{
Output: dockerCli.Out(),
Format: formatter.NewSearchFormat(options.format),
@ -95,3 +95,10 @@ func runSearch(dockerCli command.Cli, options searchOptions) error {
}
return formatter.SearchWrite(searchCtx, results, options.automated, int(options.stars))
}
// searchResultsByStars sorts search results in descending order by number of stars.
type searchResultsByStars []registrytypes.SearchResult
func (r searchResultsByStars) Len() int { return len(r) }
func (r searchResultsByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r searchResultsByStars) Less(i, j int) bool { return r[j].StarCount < r[i].StarCount }

View File

@ -13,7 +13,6 @@ import (
// Prevents a circular import with "github.com/docker/cli/internal/test"
. "github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/debug"
"github.com/docker/cli/internal/test"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
@ -79,8 +78,6 @@ func TestElectAuthServer(t *testing.T) {
},
},
}
// Enable debug to see warnings we're checking for
debug.Enable()
for _, tc := range testCases {
cli := test.NewFakeCli(&fakeClient{infoFunc: tc.infoFunc})
server := ElectAuthServer(context.Background(), cli)

View File

@ -9,10 +9,19 @@ import (
"github.com/docker/cli/cli/command/formatter"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm"
"github.com/spf13/cobra"
"vbom.ml/util/sortorder"
)
type bySecretName []swarm.Secret
func (r bySecretName) Len() int { return len(r) }
func (r bySecretName) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r bySecretName) Less(i, j int) bool {
return sortorder.NaturalLess(r[i].Spec.Name, r[j].Spec.Name)
}
type listOptions struct {
quiet bool
format string
@ -57,9 +66,7 @@ func runSecretList(dockerCli command.Cli, options listOptions) error {
}
}
sort.Slice(secrets, func(i, j int) bool {
return sortorder.NaturalLess(secrets[i].Spec.Name, secrets[j].Spec.Name)
})
sort.Sort(bySecretName(secrets))
secretCtx := formatter.Context{
Output: dockerCli.Out(),

View File

@ -44,6 +44,12 @@ func newListCommand(dockerCli command.Cli) *cobra.Command {
return cmd
}
type byName []swarm.Service
func (n byName) Len() int { return len(n) }
func (n byName) Swap(i, j int) { n[i], n[j] = n[j], n[i] }
func (n byName) Less(i, j int) bool { return sortorder.NaturalLess(n[i].Spec.Name, n[j].Spec.Name) }
func runList(dockerCli command.Cli, options listOptions) error {
ctx := context.Background()
client := dockerCli.Client()
@ -54,9 +60,7 @@ func runList(dockerCli command.Cli, options listOptions) error {
return err
}
sort.Slice(services, func(i, j int) bool {
return sortorder.NaturalLess(services[i].Spec.Name, services[j].Spec.Name)
})
sort.Sort(byName(services))
info := map[string]formatter.ServiceListInfo{}
if len(services) > 0 && !options.quiet {
// only non-empty services and not quiet, should we call TaskList and NodeList api

View File

@ -598,9 +598,7 @@ func (options *serviceOptions) ToService(ctx context.Context, apiClient client.N
}
networks[i].Target = nwID
}
sort.Slice(networks, func(i, j int) bool {
return networks[i].Target < networks[j].Target
})
sort.Sort(byNetworkTarget(networks))
resources, err := options.resources.ToResourceRequirements()
if err != nil {

View File

@ -140,7 +140,7 @@ loop:
}
func updateNodeFilter(ctx context.Context, client client.APIClient, filter filters.Args) error {
if filter.Contains("node") {
if filter.Include("node") {
nodeFilters := filter.Get("node")
for _, nodeFilter := range nodeFilters {
nodeReference, err := node.Reference(ctx, client, nodeFilter)

View File

@ -753,6 +753,20 @@ func removeItems(
return newSeq
}
type byMountSource []mounttypes.Mount
func (m byMountSource) Len() int { return len(m) }
func (m byMountSource) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
func (m byMountSource) Less(i, j int) bool {
a, b := m[i], m[j]
if a.Source == b.Source {
return a.Target < b.Target
}
return a.Source < b.Source
}
func updateMounts(flags *pflag.FlagSet, mounts *[]mounttypes.Mount) error {
mountsByTarget := map[string]mounttypes.Mount{}
@ -782,15 +796,7 @@ func updateMounts(flags *pflag.FlagSet, mounts *[]mounttypes.Mount) error {
newMounts = append(newMounts, mount)
}
}
sort.Slice(newMounts, func(i, j int) bool {
a, b := newMounts[i], newMounts[j]
if a.Source == b.Source {
return a.Target < b.Target
}
return a.Source < b.Source
})
sort.Sort(byMountSource(newMounts))
*mounts = newMounts
return nil
}
@ -880,6 +886,16 @@ func updateDNSConfig(flags *pflag.FlagSet, config **swarm.DNSConfig) error {
return nil
}
type byPortConfig []swarm.PortConfig
func (r byPortConfig) Len() int { return len(r) }
func (r byPortConfig) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r byPortConfig) Less(i, j int) bool {
// We convert PortConfig into `port/protocol`, e.g., `80/tcp`
// In updatePorts we already filter out with map so there is duplicate entries
return portConfigToString(&r[i]) < portConfigToString(&r[j])
}
func portConfigToString(portConfig *swarm.PortConfig) string {
protocol := portConfig.Protocol
mode := portConfig.PublishMode
@ -928,11 +944,7 @@ portLoop:
}
// Sort the PortConfig to avoid unnecessary updates
sort.Slice(newPorts, func(i, j int) bool {
// We convert PortConfig into `port/protocol`, e.g., `80/tcp`
// In updatePorts we already filter out with map so there is duplicate entries
return portConfigToString(&newPorts[i]) < portConfigToString(&newPorts[j])
})
sort.Sort(byPortConfig(newPorts))
*portConfig = newPorts
return nil
}
@ -1130,6 +1142,14 @@ func updateHealthcheck(flags *pflag.FlagSet, containerSpec *swarm.ContainerSpec)
return nil
}
type byNetworkTarget []swarm.NetworkAttachmentConfig
func (m byNetworkTarget) Len() int { return len(m) }
func (m byNetworkTarget) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
func (m byNetworkTarget) Less(i, j int) bool {
return m[i].Target < m[j].Target
}
func updateNetworks(ctx context.Context, apiClient client.NetworkAPIClient, flags *pflag.FlagSet, spec *swarm.ServiceSpec) error {
// spec.TaskTemplate.Networks takes precedence over the deprecated
// spec.Networks field. If spec.Network is in use, we'll migrate those
@ -1178,9 +1198,7 @@ func updateNetworks(ctx context.Context, apiClient client.NetworkAPIClient, flag
}
}
sort.Slice(newNetworks, func(i, j int) bool {
return newNetworks[i].Target < newNetworks[j].Target
})
sort.Sort(byNetworkTarget(newNetworks))
spec.TaskTemplate.Networks = newNetworks
return nil

View File

@ -81,7 +81,7 @@ func RunDeploy(dockerCli command.Cli, flags *pflag.FlagSet, config *composetypes
case commonOrchestrator.HasKubernetes():
kli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(flags, commonOrchestrator))
if err != nil {
return errors.Wrap(err, "unable to deploy to Kubernetes")
return err
}
return kubernetes.RunDeploy(kli, opts, config)
default:

View File

@ -4,12 +4,10 @@ import (
"fmt"
"net"
"net/url"
"os"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/kubernetes"
cliv1beta1 "github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta1"
"github.com/pkg/errors"
flag "github.com/spf13/pflag"
kubeclient "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
@ -60,10 +58,7 @@ func WrapCli(dockerCli command.Cli, opts Options) (*KubeCli, error) {
cli.kubeNamespace = opts.Namespace
if opts.Namespace == "" {
configNamespace, _, err := clientConfig.Namespace()
switch {
case os.IsNotExist(err), os.IsPermission(err):
return nil, errors.Wrap(err, "unable to load configuration file")
case err != nil:
if err != nil {
return nil, err
}
cli.kubeNamespace = configNamespace

View File

@ -8,72 +8,13 @@ import (
"strings"
"github.com/docker/cli/cli/compose/loader"
"github.com/docker/cli/cli/compose/schema"
composeTypes "github.com/docker/cli/cli/compose/types"
composetypes "github.com/docker/cli/cli/compose/types"
"github.com/docker/cli/kubernetes/compose/v1beta1"
"github.com/docker/cli/kubernetes/compose/v1beta2"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NewStackConverter returns a converter from types.Config (compose) to the specified
// stack version or error out if the version is not supported or existent.
func NewStackConverter(version string) (StackConverter, error) {
switch version {
case "v1beta1":
return stackV1Beta1Converter{}, nil
case "v1beta2":
return stackV1Beta2Converter{}, nil
default:
return nil, errors.Errorf("stack version %s unsupported", version)
}
}
// StackConverter converts a compose types.Config to a Stack
type StackConverter interface {
FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (Stack, error)
}
type stackV1Beta1Converter struct{}
func (s stackV1Beta1Converter) FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (Stack, error) {
cfg.Version = v1beta1.MaxComposeVersion
st, err := fromCompose(stderr, name, cfg)
if err != nil {
return Stack{}, err
}
res, err := yaml.Marshal(cfg)
if err != nil {
return Stack{}, err
}
// reload the result to check that it produced a valid 3.5 compose file
resparsedConfig, err := loader.ParseYAML(res)
if err != nil {
return Stack{}, err
}
if err = schema.Validate(resparsedConfig, v1beta1.MaxComposeVersion); err != nil {
return Stack{}, errors.Wrapf(err, "the compose yaml file is invalid with v%s", v1beta1.MaxComposeVersion)
}
st.ComposeFile = string(res)
return st, nil
}
type stackV1Beta2Converter struct{}
func (s stackV1Beta2Converter) FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (Stack, error) {
return fromCompose(stderr, name, cfg)
}
func fromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (Stack, error) {
return Stack{
Name: name,
Spec: fromComposeConfig(stderr, cfg),
}, nil
}
func loadStackData(composefile string) (*composetypes.Config, error) {
parsed, err := loader.ParseYAML([]byte(composefile))
if err != nil {
@ -89,44 +30,44 @@ func loadStackData(composefile string) (*composetypes.Config, error) {
}
// Conversions from internal stack to different stack compose component versions.
func stackFromV1beta1(in *v1beta1.Stack) (Stack, error) {
func stackFromV1beta1(in *v1beta1.Stack) (stack, error) {
cfg, err := loadStackData(in.Spec.ComposeFile)
if err != nil {
return Stack{}, err
return stack{}, err
}
return Stack{
Name: in.ObjectMeta.Name,
Namespace: in.ObjectMeta.Namespace,
ComposeFile: in.Spec.ComposeFile,
Spec: fromComposeConfig(ioutil.Discard, cfg),
return stack{
name: in.ObjectMeta.Name,
namespace: in.ObjectMeta.Namespace,
composeFile: in.Spec.ComposeFile,
spec: fromComposeConfig(ioutil.Discard, cfg),
}, nil
}
func stackToV1beta1(s Stack) *v1beta1.Stack {
func stackToV1beta1(s stack) *v1beta1.Stack {
return &v1beta1.Stack{
ObjectMeta: metav1.ObjectMeta{
Name: s.Name,
Name: s.name,
},
Spec: v1beta1.StackSpec{
ComposeFile: s.ComposeFile,
ComposeFile: s.composeFile,
},
}
}
func stackFromV1beta2(in *v1beta2.Stack) Stack {
return Stack{
Name: in.ObjectMeta.Name,
Namespace: in.ObjectMeta.Namespace,
Spec: in.Spec,
func stackFromV1beta2(in *v1beta2.Stack) stack {
return stack{
name: in.ObjectMeta.Name,
namespace: in.ObjectMeta.Namespace,
spec: in.Spec,
}
}
func stackToV1beta2(s Stack) *v1beta2.Stack {
func stackToV1beta2(s stack) *v1beta2.Stack {
return &v1beta2.Stack{
ObjectMeta: metav1.ObjectMeta{
Name: s.Name,
Name: s.name,
},
Spec: s.Spec,
Spec: s.spec,
}
}

View File

@ -1,18 +0,0 @@
package kubernetes
import (
"testing"
"gotest.tools/assert"
is "gotest.tools/assert/cmp"
)
func TestNewStackConverter(t *testing.T) {
_, err := NewStackConverter("v1alpha1")
assert.Check(t, is.ErrorContains(err, "stack version v1alpha1 unsupported"))
_, err = NewStackConverter("v1beta1")
assert.NilError(t, err)
_, err = NewStackConverter("v1beta2")
assert.NilError(t, err)
}

View File

@ -70,13 +70,13 @@ func RunDeploy(dockerCli *KubeCli, opts options.Deploy, cfg *composetypes.Config
}
}()
err = watcher.Watch(stack.Name, stack.getServices(), statusUpdates)
err = watcher.Watch(stack.name, stack.getServices(), statusUpdates)
close(statusUpdates)
<-displayDone
if err != nil {
return err
}
fmt.Fprintf(cmdOut, "\nStack %s is stable and running\n\n", stack.Name)
fmt.Fprintf(cmdOut, "\nStack %s is stable and running\n\n", stack.name)
return nil
}

View File

@ -48,10 +48,10 @@ func getStacks(kubeCli *KubeCli, opts options.List) ([]*formatter.Stack, error)
var formattedStacks []*formatter.Stack
for _, stack := range stacks {
formattedStacks = append(formattedStacks, &formatter.Stack{
Name: stack.Name,
Name: stack.name,
Services: len(stack.getServices()),
Orchestrator: "Kubernetes",
Namespace: stack.Namespace,
Namespace: stack.namespace,
})
}
return formattedStacks, nil

View File

@ -12,18 +12,18 @@ import (
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
)
// Stack is the main type used by stack commands so they remain independent from kubernetes compose component version.
type Stack struct {
Name string
Namespace string
ComposeFile string
Spec *v1beta2.StackSpec
// stack is the main type used by stack commands so they remain independent from kubernetes compose component version.
type stack struct {
name string
namespace string
composeFile string
spec *v1beta2.StackSpec
}
// getServices returns all the stack service names, sorted lexicographically
func (s *Stack) getServices() []string {
services := make([]string, len(s.Spec.Services))
for i, service := range s.Spec.Services {
func (s *stack) getServices() []string {
services := make([]string, len(s.spec.Services))
for i, service := range s.spec.Services {
services[i] = service.Name
}
sort.Strings(services)
@ -31,8 +31,8 @@ func (s *Stack) getServices() []string {
}
// createFileBasedConfigMaps creates a Kubernetes ConfigMap for each Compose global file-based config.
func (s *Stack) createFileBasedConfigMaps(configMaps corev1.ConfigMapInterface) error {
for name, config := range s.Spec.Configs {
func (s *stack) createFileBasedConfigMaps(configMaps corev1.ConfigMapInterface) error {
for name, config := range s.spec.Configs {
if config.File == "" {
continue
}
@ -43,7 +43,7 @@ func (s *Stack) createFileBasedConfigMaps(configMaps corev1.ConfigMapInterface)
return err
}
if _, err := configMaps.Create(toConfigMap(s.Name, name, fileName, content)); err != nil {
if _, err := configMaps.Create(toConfigMap(s.name, name, fileName, content)); err != nil {
return err
}
}
@ -71,8 +71,8 @@ func toConfigMap(stackName, name, key string, content []byte) *apiv1.ConfigMap {
}
// createFileBasedSecrets creates a Kubernetes Secret for each Compose global file-based secret.
func (s *Stack) createFileBasedSecrets(secrets corev1.SecretInterface) error {
for name, secret := range s.Spec.Secrets {
func (s *stack) createFileBasedSecrets(secrets corev1.SecretInterface) error {
for name, secret := range s.spec.Secrets {
if secret.File == "" {
continue
}
@ -83,7 +83,7 @@ func (s *Stack) createFileBasedSecrets(secrets corev1.SecretInterface) error {
return err
}
if _, err := secrets.Create(toSecret(s.Name, name, fileName, content)); err != nil {
if _, err := secrets.Create(toSecret(s.name, name, fileName, content)); err != nil {
return err
}
}

View File

@ -2,10 +2,17 @@ package kubernetes
import (
"fmt"
"io"
"github.com/docker/cli/cli/compose/loader"
"github.com/docker/cli/cli/compose/schema"
composetypes "github.com/docker/cli/cli/compose/types"
composev1beta1 "github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta1"
composev1beta2 "github.com/docker/cli/kubernetes/client/clientset/typed/compose/v1beta2"
v1beta1types "github.com/docker/cli/kubernetes/compose/v1beta1"
"github.com/docker/cli/kubernetes/labels"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
@ -13,17 +20,16 @@ import (
// StackClient talks to a kubernetes compose component.
type StackClient interface {
StackConverter
CreateOrUpdate(s Stack) error
CreateOrUpdate(s stack) error
Delete(name string) error
Get(name string) (Stack, error)
List(opts metav1.ListOptions) ([]Stack, error)
IsColliding(servicesClient corev1.ServiceInterface, s Stack) error
Get(name string) (stack, error)
List(opts metav1.ListOptions) ([]stack, error)
IsColliding(servicesClient corev1.ServiceInterface, s stack) error
FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (stack, error)
}
// stackV1Beta1 implements stackClient interface and talks to compose component v1beta1.
type stackV1Beta1 struct {
stackV1Beta1Converter
stacks composev1beta1.StackInterface
}
@ -35,10 +41,10 @@ func newStackV1Beta1(config *rest.Config, namespace string) (*stackV1Beta1, erro
return &stackV1Beta1{stacks: client.Stacks(namespace)}, nil
}
func (s *stackV1Beta1) CreateOrUpdate(internalStack Stack) error {
func (s *stackV1Beta1) CreateOrUpdate(internalStack stack) error {
// If it already exists, update the stack
if stackBeta1, err := s.stacks.Get(internalStack.Name, metav1.GetOptions{}); err == nil {
stackBeta1.Spec.ComposeFile = internalStack.ComposeFile
if stackBeta1, err := s.stacks.Get(internalStack.name, metav1.GetOptions{}); err == nil {
stackBeta1.Spec.ComposeFile = internalStack.composeFile
_, err := s.stacks.Update(stackBeta1)
return err
}
@ -51,20 +57,20 @@ func (s *stackV1Beta1) Delete(name string) error {
return s.stacks.Delete(name, &metav1.DeleteOptions{})
}
func (s *stackV1Beta1) Get(name string) (Stack, error) {
func (s *stackV1Beta1) Get(name string) (stack, error) {
stackBeta1, err := s.stacks.Get(name, metav1.GetOptions{})
if err != nil {
return Stack{}, err
return stack{}, err
}
return stackFromV1beta1(stackBeta1)
}
func (s *stackV1Beta1) List(opts metav1.ListOptions) ([]Stack, error) {
func (s *stackV1Beta1) List(opts metav1.ListOptions) ([]stack, error) {
list, err := s.stacks.List(opts)
if err != nil {
return nil, err
}
stacks := make([]Stack, len(list.Items))
stacks := make([]stack, len(list.Items))
for i := range list.Items {
stack, err := stackFromV1beta1(&list.Items[i])
if err != nil {
@ -76,9 +82,9 @@ func (s *stackV1Beta1) List(opts metav1.ListOptions) ([]Stack, error) {
}
// IsColliding verifies that services defined in the stack collides with already deployed services
func (s *stackV1Beta1) IsColliding(servicesClient corev1.ServiceInterface, st Stack) error {
func (s *stackV1Beta1) IsColliding(servicesClient corev1.ServiceInterface, st stack) error {
for _, srv := range st.getServices() {
if err := verify(servicesClient, st.Name, srv); err != nil {
if err := verify(servicesClient, st.name, srv); err != nil {
return err
}
}
@ -102,9 +108,31 @@ func verify(services corev1.ServiceInterface, stackName string, service string)
return nil
}
func (s *stackV1Beta1) FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (stack, error) {
cfg.Version = v1beta1types.MaxComposeVersion
st, err := fromCompose(stderr, name, cfg)
if err != nil {
return stack{}, err
}
res, err := yaml.Marshal(cfg)
if err != nil {
return stack{}, err
}
// reload the result to check that it produced a valid 3.5 compose file
resparsedConfig, err := loader.ParseYAML(res)
if err != nil {
return stack{}, err
}
if err = schema.Validate(resparsedConfig, v1beta1types.MaxComposeVersion); err != nil {
return stack{}, errors.Wrapf(err, "the compose yaml file is invalid with v%s", v1beta1types.MaxComposeVersion)
}
st.composeFile = string(res)
return st, nil
}
// stackV1Beta2 implements stackClient interface and talks to compose component v1beta2.
type stackV1Beta2 struct {
stackV1Beta2Converter
stacks composev1beta2.StackInterface
}
@ -116,10 +144,10 @@ func newStackV1Beta2(config *rest.Config, namespace string) (*stackV1Beta2, erro
return &stackV1Beta2{stacks: client.Stacks(namespace)}, nil
}
func (s *stackV1Beta2) CreateOrUpdate(internalStack Stack) error {
func (s *stackV1Beta2) CreateOrUpdate(internalStack stack) error {
// If it already exists, update the stack
if stackBeta2, err := s.stacks.Get(internalStack.Name, metav1.GetOptions{}); err == nil {
stackBeta2.Spec = internalStack.Spec
if stackBeta2, err := s.stacks.Get(internalStack.name, metav1.GetOptions{}); err == nil {
stackBeta2.Spec = internalStack.spec
_, err := s.stacks.Update(stackBeta2)
return err
}
@ -132,20 +160,20 @@ func (s *stackV1Beta2) Delete(name string) error {
return s.stacks.Delete(name, &metav1.DeleteOptions{})
}
func (s *stackV1Beta2) Get(name string) (Stack, error) {
func (s *stackV1Beta2) Get(name string) (stack, error) {
stackBeta2, err := s.stacks.Get(name, metav1.GetOptions{})
if err != nil {
return Stack{}, err
return stack{}, err
}
return stackFromV1beta2(stackBeta2), nil
}
func (s *stackV1Beta2) List(opts metav1.ListOptions) ([]Stack, error) {
func (s *stackV1Beta2) List(opts metav1.ListOptions) ([]stack, error) {
list, err := s.stacks.List(opts)
if err != nil {
return nil, err
}
stacks := make([]Stack, len(list.Items))
stacks := make([]stack, len(list.Items))
for i := range list.Items {
stacks[i] = stackFromV1beta2(&list.Items[i])
}
@ -153,6 +181,17 @@ func (s *stackV1Beta2) List(opts metav1.ListOptions) ([]Stack, error) {
}
// IsColliding is handle server side with the compose api v1beta2, so nothing to do here
func (s *stackV1Beta2) IsColliding(servicesClient corev1.ServiceInterface, st Stack) error {
func (s *stackV1Beta2) IsColliding(servicesClient corev1.ServiceInterface, st stack) error {
return nil
}
func (s *stackV1Beta2) FromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (stack, error) {
return fromCompose(stderr, name, cfg)
}
func fromCompose(stderr io.Writer, name string, cfg *composetypes.Config) (stack, error) {
return stack{
name: name,
spec: fromComposeConfig(stderr, cfg),
}, nil
}

View File

@ -25,14 +25,18 @@ func TestFromCompose(t *testing.T) {
},
})
assert.NilError(t, err)
assert.Equal(t, "foo", s.Name)
assert.Equal(t, "foo", s.name)
assert.Equal(t, string(`version: "3.5"
services:
bar:
image: bar
foo:
image: foo
`), s.ComposeFile)
networks: {}
volumes: {}
secrets: {}
configs: {}
`), s.composeFile)
}
func TestFromComposeUnsupportedVersion(t *testing.T) {

View File

@ -51,134 +51,121 @@ func TestStackPsErrors(t *testing.T) {
}
}
func TestStackPs(t *testing.T) {
testCases := []struct {
doc string
taskListFunc func(types.TaskListOptions) ([]swarm.Task, error)
nodeInspectWithRaw func(string) (swarm.Node, []byte, error)
config configfile.ConfigFile
args []string
flags map[string]string
expectedErr string
golden string
}{
{
doc: "WithEmptyName",
args: []string{"' '"},
expectedErr: `invalid stack name: "' '"`,
},
{
doc: "WithEmptyStack",
taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) {
return []swarm.Task{}, nil
},
args: []string{"foo"},
expectedErr: "nothing found in stack: foo",
},
{
doc: "WithQuietOption",
taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) {
return []swarm.Task{*Task(TaskID("id-foo"))}, nil
},
args: []string{"foo"},
flags: map[string]string{
"quiet": "true",
},
golden: "stack-ps-with-quiet-option.golden",
},
{
doc: "WithNoTruncOption",
taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) {
return []swarm.Task{*Task(TaskID("xn4cypcov06f2w8gsbaf2lst3"))}, nil
},
args: []string{"foo"},
flags: map[string]string{
"no-trunc": "true",
"format": "{{ .ID }}",
},
golden: "stack-ps-with-no-trunc-option.golden",
},
{
doc: "WithNoResolveOption",
taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) {
return []swarm.Task{*Task(
TaskNodeID("id-node-foo"),
)}, nil
},
nodeInspectWithRaw: func(ref string) (swarm.Node, []byte, error) {
return *Node(NodeName("node-name-bar")), nil, nil
},
args: []string{"foo"},
flags: map[string]string{
"no-resolve": "true",
"format": "{{ .Node }}",
},
golden: "stack-ps-with-no-resolve-option.golden",
},
{
doc: "WithFormat",
taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) {
return []swarm.Task{*Task(TaskServiceID("service-id-foo"))}, nil
},
args: []string{"foo"},
flags: map[string]string{
"format": "{{ .Name }}",
},
golden: "stack-ps-with-format.golden",
},
{
doc: "WithConfigFormat",
taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) {
return []swarm.Task{*Task(TaskServiceID("service-id-foo"))}, nil
},
config: configfile.ConfigFile{
TasksFormat: "{{ .Name }}",
},
args: []string{"foo"},
golden: "stack-ps-with-config-format.golden",
},
{
doc: "WithoutFormat",
taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) {
return []swarm.Task{*Task(
TaskID("id-foo"),
TaskServiceID("service-id-foo"),
TaskNodeID("id-node"),
WithTaskSpec(TaskImage("myimage:mytag")),
TaskDesiredState(swarm.TaskStateReady),
WithStatus(TaskState(swarm.TaskStateFailed), Timestamp(time.Now().Add(-2*time.Hour))),
)}, nil
},
nodeInspectWithRaw: func(ref string) (swarm.Node, []byte, error) {
return *Node(NodeName("node-name-bar")), nil, nil
},
args: []string{"foo"},
golden: "stack-ps-without-format.golden",
},
}
func TestRunPSWithEmptyName(t *testing.T) {
cmd := newPsCommand(test.NewFakeCli(&fakeClient{}), &orchestrator)
cmd.SetArgs([]string{"' '"})
cmd.SetOutput(ioutil.Discard)
for _, tc := range testCases {
t.Run(tc.doc, func(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{
taskListFunc: tc.taskListFunc,
nodeInspectWithRaw: tc.nodeInspectWithRaw,
})
cli.SetConfigFile(&tc.config)
cmd := newPsCommand(cli, &orchestrator)
cmd.SetArgs(tc.args)
for key, value := range tc.flags {
cmd.Flags().Set(key, value)
}
cmd.SetOutput(ioutil.Discard)
if tc.expectedErr != "" {
assert.Error(t, cmd.Execute(), tc.expectedErr)
assert.Check(t, is.Equal("", cli.OutBuffer().String()))
return
}
assert.NilError(t, cmd.Execute())
golden.Assert(t, cli.OutBuffer().String(), tc.golden)
})
}
assert.ErrorContains(t, cmd.Execute(), `invalid stack name: "' '"`)
}
func TestStackPsEmptyStack(t *testing.T) {
fakeCli := test.NewFakeCli(&fakeClient{
taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) {
return []swarm.Task{}, nil
},
})
cmd := newPsCommand(fakeCli, &orchestrator)
cmd.SetArgs([]string{"foo"})
cmd.SetOutput(ioutil.Discard)
assert.Error(t, cmd.Execute(), "nothing found in stack: foo")
assert.Check(t, is.Equal("", fakeCli.OutBuffer().String()))
}
func TestStackPsWithQuietOption(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{
taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) {
return []swarm.Task{*Task(TaskID("id-foo"))}, nil
},
})
cmd := newPsCommand(cli, &orchestrator)
cmd.SetArgs([]string{"foo"})
cmd.Flags().Set("quiet", "true")
assert.NilError(t, cmd.Execute())
golden.Assert(t, cli.OutBuffer().String(), "stack-ps-with-quiet-option.golden")
}
func TestStackPsWithNoTruncOption(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{
taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) {
return []swarm.Task{*Task(TaskID("xn4cypcov06f2w8gsbaf2lst3"))}, nil
},
})
cmd := newPsCommand(cli, &orchestrator)
cmd.SetArgs([]string{"foo"})
cmd.Flags().Set("no-trunc", "true")
cmd.Flags().Set("format", "{{ .ID }}")
assert.NilError(t, cmd.Execute())
golden.Assert(t, cli.OutBuffer().String(), "stack-ps-with-no-trunc-option.golden")
}
func TestStackPsWithNoResolveOption(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{
taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) {
return []swarm.Task{*Task(
TaskNodeID("id-node-foo"),
)}, nil
},
nodeInspectWithRaw: func(ref string) (swarm.Node, []byte, error) {
return *Node(NodeName("node-name-bar")), nil, nil
},
})
cmd := newPsCommand(cli, &orchestrator)
cmd.SetArgs([]string{"foo"})
cmd.Flags().Set("no-resolve", "true")
cmd.Flags().Set("format", "{{ .Node }}")
assert.NilError(t, cmd.Execute())
golden.Assert(t, cli.OutBuffer().String(), "stack-ps-with-no-resolve-option.golden")
}
func TestStackPsWithFormat(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{
taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) {
return []swarm.Task{*Task(TaskServiceID("service-id-foo"))}, nil
},
})
cmd := newPsCommand(cli, &orchestrator)
cmd.SetArgs([]string{"foo"})
cmd.Flags().Set("format", "{{ .Name }}")
assert.NilError(t, cmd.Execute())
golden.Assert(t, cli.OutBuffer().String(), "stack-ps-with-format.golden")
}
func TestStackPsWithConfigFormat(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{
taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) {
return []swarm.Task{*Task(TaskServiceID("service-id-foo"))}, nil
},
})
cli.SetConfigFile(&configfile.ConfigFile{
TasksFormat: "{{ .Name }}",
})
cmd := newPsCommand(cli, &orchestrator)
cmd.SetArgs([]string{"foo"})
assert.NilError(t, cmd.Execute())
golden.Assert(t, cli.OutBuffer().String(), "stack-ps-with-config-format.golden")
}
func TestStackPsWithoutFormat(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{
taskListFunc: func(options types.TaskListOptions) ([]swarm.Task, error) {
return []swarm.Task{*Task(
TaskID("id-foo"),
TaskServiceID("service-id-foo"),
TaskNodeID("id-node"),
WithTaskSpec(TaskImage("myimage:mytag")),
TaskDesiredState(swarm.TaskStateReady),
WithStatus(TaskState(swarm.TaskStateFailed), Timestamp(time.Now().Add(-2*time.Hour))),
)}, nil
},
nodeInspectWithRaw: func(ref string) (swarm.Node, []byte, error) {
return *Node(NodeName("node-name-bar")), nil, nil
},
})
cmd := newPsCommand(cli, &orchestrator)
cmd.SetArgs([]string{"foo"})
assert.NilError(t, cmd.Execute())
golden.Assert(t, cli.OutBuffer().String(), "stack-ps-without-format.golden")
}

View File

@ -3,7 +3,6 @@ package swarm
import (
"context"
"fmt"
"net"
"strings"
"github.com/docker/cli/cli"
@ -18,12 +17,10 @@ type initOptions struct {
swarmOptions
listenAddr NodeAddrOption
// Not a NodeAddrOption because it has no default port.
advertiseAddr string
dataPathAddr string
forceNewCluster bool
availability string
defaultAddrPools []net.IPNet
DefaultAddrPoolMaskLength uint32
advertiseAddr string
dataPathAddr string
forceNewCluster bool
availability string
}
func newInitCommand(dockerCli command.Cli) *cobra.Command {
@ -44,36 +41,24 @@ func newInitCommand(dockerCli command.Cli) *cobra.Command {
flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: <ip|interface>[:port])")
flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: <ip|interface>[:port])")
flags.StringVar(&opts.dataPathAddr, flagDataPathAddr, "", "Address or interface to use for data path traffic (format: <ip|interface>)")
flags.SetAnnotation(flagDataPathAddr, "version", []string{"1.31"})
flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state")
flags.BoolVar(&opts.autolock, flagAutolock, false, "Enable manager autolocking (requiring an unlock key to start a stopped manager)")
flags.StringVar(&opts.availability, flagAvailability, "active", `Availability of the node ("active"|"pause"|"drain")`)
flags.IPNetSliceVar(&opts.defaultAddrPools, flagDefaultAddrPool, []net.IPNet{}, "default address pool in CIDR format")
flags.SetAnnotation(flagDefaultAddrPool, "version", []string{"1.39"})
flags.Uint32Var(&opts.DefaultAddrPoolMaskLength, flagDefaultAddrPoolMaskLength, 24, "default address pool subnet mask length")
flags.SetAnnotation(flagDefaultAddrPoolMaskLength, "version", []string{"1.39"})
addSwarmFlags(flags, &opts.swarmOptions)
return cmd
}
func runInit(dockerCli command.Cli, flags *pflag.FlagSet, opts initOptions) error {
var defaultAddrPool []string
client := dockerCli.Client()
ctx := context.Background()
for _, p := range opts.defaultAddrPools {
defaultAddrPool = append(defaultAddrPool, p.String())
}
req := swarm.InitRequest{
ListenAddr: opts.listenAddr.String(),
AdvertiseAddr: opts.advertiseAddr,
DataPathAddr: opts.dataPathAddr,
DefaultAddrPool: defaultAddrPool,
ForceNewCluster: opts.forceNewCluster,
Spec: opts.swarmOptions.ToSpec(flags),
AutoLockManagers: opts.swarmOptions.autolock,
SubnetSize: opts.DefaultAddrPoolMaskLength,
}
if flags.Changed(flagAvailability) {
availability := swarm.NodeAvailability(strings.ToLower(opts.availability))

View File

@ -42,7 +42,6 @@ func newJoinCommand(dockerCli command.Cli) *cobra.Command {
flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: <ip|interface>[:port])")
flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: <ip|interface>[:port])")
flags.StringVar(&opts.dataPathAddr, flagDataPathAddr, "", "Address or interface to use for data path traffic (format: <ip|interface>)")
flags.SetAnnotation(flagDataPathAddr, "version", []string{"1.31"})
flags.StringVar(&opts.token, flagToken, "", "Token for entry into the swarm")
flags.StringVar(&opts.availability, flagAvailability, "active", `Availability of the node ("active"|"pause"|"drain")`)
return cmd

View File

@ -17,24 +17,22 @@ import (
const (
defaultListenAddr = "0.0.0.0:2377"
flagCertExpiry = "cert-expiry"
flagDispatcherHeartbeat = "dispatcher-heartbeat"
flagListenAddr = "listen-addr"
flagAdvertiseAddr = "advertise-addr"
flagDataPathAddr = "data-path-addr"
flagDefaultAddrPool = "default-addr-pool"
flagDefaultAddrPoolMaskLength = "default-addr-pool-mask-length"
flagQuiet = "quiet"
flagRotate = "rotate"
flagToken = "token"
flagTaskHistoryLimit = "task-history-limit"
flagExternalCA = "external-ca"
flagMaxSnapshots = "max-snapshots"
flagSnapshotInterval = "snapshot-interval"
flagAutolock = "autolock"
flagAvailability = "availability"
flagCACert = "ca-cert"
flagCAKey = "ca-key"
flagCertExpiry = "cert-expiry"
flagDispatcherHeartbeat = "dispatcher-heartbeat"
flagListenAddr = "listen-addr"
flagAdvertiseAddr = "advertise-addr"
flagDataPathAddr = "data-path-addr"
flagQuiet = "quiet"
flagRotate = "rotate"
flagToken = "token"
flagTaskHistoryLimit = "task-history-limit"
flagExternalCA = "external-ca"
flagMaxSnapshots = "max-snapshots"
flagSnapshotInterval = "snapshot-interval"
flagAutolock = "autolock"
flagAvailability = "availability"
flagCACert = "ca-cert"
flagCAKey = "ca-key"
)
type swarmOptions struct {

View File

@ -19,7 +19,6 @@ func NewSystemCommand(dockerCli command.Cli) *cobra.Command {
NewInfoCommand(dockerCli),
newDiskUsageCommand(dockerCli),
newPruneCommand(dockerCli),
newDialStdioCommand(dockerCli),
)
return cmd

View File

@ -2,6 +2,7 @@ package system
import (
"context"
"errors"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
@ -37,6 +38,10 @@ func newDiskUsageCommand(dockerCli command.Cli) *cobra.Command {
}
func runDiskUsage(dockerCli command.Cli, opts diskUsageOptions) error {
if opts.verbose && len(opts.format) != 0 {
return errors.New("the verbose and the format options conflict")
}
du, err := dockerCli.Client().DiskUsage(context.Background())
if err != nil {
return err
@ -47,20 +52,13 @@ func runDiskUsage(dockerCli command.Cli, opts diskUsageOptions) error {
format = formatter.TableFormatKey
}
var bsz int64
for _, bc := range du.BuildCache {
if !bc.Shared {
bsz += bc.Size
}
}
duCtx := formatter.DiskUsageContext{
Context: formatter.Context{
Output: dockerCli.Out(),
Format: formatter.NewDiskUsageFormat(format, opts.verbose),
Format: formatter.NewDiskUsageFormat(format),
},
LayersSize: du.LayersSize,
BuilderSize: bsz,
BuilderSize: du.BuilderSize,
BuildCache: du.BuildCache,
Images: du.Images,
Containers: du.Containers,

View File

@ -1,107 +0,0 @@
package system
import (
"context"
"io"
"os"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
// newDialStdioCommand creates a new cobra.Command for `docker system dial-stdio`
func newDialStdioCommand(dockerCli command.Cli) *cobra.Command {
cmd := &cobra.Command{
Use: "dial-stdio",
Short: "Proxy the stdio stream to the daemon connection. Should not be invoked manually.",
Args: cli.NoArgs,
Hidden: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runDialStdio(dockerCli)
},
}
return cmd
}
func runDialStdio(dockerCli command.Cli) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dialer := dockerCli.Client().Dialer()
conn, err := dialer(ctx)
if err != nil {
return errors.Wrap(err, "failed to open the raw stream connection")
}
connHalfCloser, ok := conn.(halfCloser)
if !ok {
return errors.New("the raw stream connection does not implement halfCloser")
}
stdin2conn := make(chan error)
conn2stdout := make(chan error)
go func() {
stdin2conn <- copier(connHalfCloser, &halfReadCloserWrapper{os.Stdin}, "stdin to stream")
}()
go func() {
conn2stdout <- copier(&halfWriteCloserWrapper{os.Stdout}, connHalfCloser, "stream to stdout")
}()
select {
case err = <-stdin2conn:
if err != nil {
return err
}
// wait for stdout
err = <-conn2stdout
case err = <-conn2stdout:
// return immediately without waiting for stdin to be closed.
// (stdin is never closed when tty)
}
return err
}
func copier(to halfWriteCloser, from halfReadCloser, debugDescription string) error {
defer func() {
if err := from.CloseRead(); err != nil {
logrus.Errorf("error while CloseRead (%s): %v", debugDescription, err)
}
if err := to.CloseWrite(); err != nil {
logrus.Errorf("error while CloseWrite (%s): %v", debugDescription, err)
}
}()
if _, err := io.Copy(to, from); err != nil {
return errors.Wrapf(err, "error while Copy (%s)", debugDescription)
}
return nil
}
type halfReadCloser interface {
io.Reader
CloseRead() error
}
type halfWriteCloser interface {
io.Writer
CloseWrite() error
}
type halfCloser interface {
halfReadCloser
halfWriteCloser
}
type halfReadCloserWrapper struct {
io.ReadCloser
}
func (x *halfReadCloserWrapper) CloseRead() error {
return x.Close()
}
type halfWriteCloserWrapper struct {
io.WriteCloser
}
func (x *halfWriteCloserWrapper) CloseWrite() error {
return x.Close()
}

View File

@ -204,16 +204,50 @@ func prettyPrintInfo(dockerCli command.Cli, info types.Info) error {
}
fmt.Fprintln(dockerCli.Out(), "Live Restore Enabled:", info.LiveRestoreEnabled)
if info.ProductLicense != "" {
fmt.Fprintln(dockerCli.Out(), "Product License:", info.ProductLicense)
}
fmt.Fprint(dockerCli.Out(), "\n")
printWarnings(dockerCli, info)
// Only output these warnings if the server does not support these features
if info.OSType != "windows" {
printStorageDriverWarnings(dockerCli, info)
if !info.MemoryLimit {
fmt.Fprintln(dockerCli.Err(), "WARNING: No memory limit support")
}
if !info.SwapLimit {
fmt.Fprintln(dockerCli.Err(), "WARNING: No swap limit support")
}
if !info.KernelMemory {
fmt.Fprintln(dockerCli.Err(), "WARNING: No kernel memory limit support")
}
if !info.OomKillDisable {
fmt.Fprintln(dockerCli.Err(), "WARNING: No oom kill disable support")
}
if !info.CPUCfsQuota {
fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs quota support")
}
if !info.CPUCfsPeriod {
fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs period support")
}
if !info.CPUShares {
fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu shares support")
}
if !info.CPUSet {
fmt.Fprintln(dockerCli.Err(), "WARNING: No cpuset support")
}
if !info.IPv4Forwarding {
fmt.Fprintln(dockerCli.Err(), "WARNING: IPv4 forwarding is disabled")
}
if !info.BridgeNfIptables {
fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-iptables is disabled")
}
if !info.BridgeNfIP6tables {
fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-ip6tables is disabled")
}
}
return nil
}
// nolint: gocyclo
func printSwarmInfo(dockerCli command.Cli, info types.Info) {
if info.Swarm.LocalNodeState == swarm.LocalNodeStateInactive || info.Swarm.LocalNodeState == swarm.LocalNodeStateLocked {
return
@ -227,16 +261,7 @@ func printSwarmInfo(dockerCli command.Cli, info types.Info) {
fmt.Fprintln(dockerCli.Out(), " ClusterID:", info.Swarm.Cluster.ID)
fmt.Fprintln(dockerCli.Out(), " Managers:", info.Swarm.Managers)
fmt.Fprintln(dockerCli.Out(), " Nodes:", info.Swarm.Nodes)
var strAddrPool strings.Builder
if info.Swarm.Cluster.DefaultAddrPool != nil {
for _, p := range info.Swarm.Cluster.DefaultAddrPool {
strAddrPool.WriteString(p + " ")
}
fmt.Fprintln(dockerCli.Out(), " Default Address Pool:", strAddrPool.String())
fmt.Fprintln(dockerCli.Out(), " SubnetSize:", info.Swarm.Cluster.SubnetSize)
}
fmt.Fprintln(dockerCli.Out(), " Orchestration:")
taskHistoryRetentionLimit := int64(0)
if info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit != nil {
taskHistoryRetentionLimit = *info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit
@ -280,73 +305,11 @@ func printSwarmInfo(dockerCli command.Cli, info types.Info) {
}
}
func printWarnings(dockerCli command.Cli, info types.Info) {
if len(info.Warnings) > 0 {
fmt.Fprintln(dockerCli.Err(), strings.Join(info.Warnings, "\n"))
return
}
// daemon didn't return warnings. Fallback to old behavior
printStorageDriverWarnings(dockerCli, info)
printWarningsLegacy(dockerCli, info)
}
// printWarningsLegacy generates warnings based on information returned by the daemon.
// DEPRECATED: warnings are now generated by the daemon, and returned in
// info.Warnings. This function is used to provide backward compatibility with
// daemons that do not provide these warnings. No new warnings should be added
// here.
func printWarningsLegacy(dockerCli command.Cli, info types.Info) {
if info.OSType == "windows" {
return
}
if !info.MemoryLimit {
fmt.Fprintln(dockerCli.Err(), "WARNING: No memory limit support")
}
if !info.SwapLimit {
fmt.Fprintln(dockerCli.Err(), "WARNING: No swap limit support")
}
if !info.KernelMemory {
fmt.Fprintln(dockerCli.Err(), "WARNING: No kernel memory limit support")
}
if !info.OomKillDisable {
fmt.Fprintln(dockerCli.Err(), "WARNING: No oom kill disable support")
}
if !info.CPUCfsQuota {
fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs quota support")
}
if !info.CPUCfsPeriod {
fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu cfs period support")
}
if !info.CPUShares {
fmt.Fprintln(dockerCli.Err(), "WARNING: No cpu shares support")
}
if !info.CPUSet {
fmt.Fprintln(dockerCli.Err(), "WARNING: No cpuset support")
}
if !info.IPv4Forwarding {
fmt.Fprintln(dockerCli.Err(), "WARNING: IPv4 forwarding is disabled")
}
if !info.BridgeNfIptables {
fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-iptables is disabled")
}
if !info.BridgeNfIP6tables {
fmt.Fprintln(dockerCli.Err(), "WARNING: bridge-nf-call-ip6tables is disabled")
}
}
// printStorageDriverWarnings generates warnings based on storage-driver information
// returned by the daemon.
// DEPRECATED: warnings are now generated by the daemon, and returned in
// info.Warnings. This function is used to provide backward compatibility with
// daemons that do not provide these warnings. No new warnings should be added
// here.
func printStorageDriverWarnings(dockerCli command.Cli, info types.Info) {
if info.OSType == "windows" {
return
}
if info.DriverStatus == nil {
return
}
for _, pair := range info.DriverStatus {
if pair[0] == "Data loop file" {
fmt.Fprintf(dockerCli.Err(), "WARNING: %s: usage of loopback devices is "+

View File

@ -207,59 +207,32 @@ func TestPrettyPrintInfo(t *testing.T) {
infoWithWarningsLinux.BridgeNfIptables = false
infoWithWarningsLinux.BridgeNfIP6tables = false
sampleInfoDaemonWarnings := sampleInfoNoSwarm
sampleInfoDaemonWarnings.Warnings = []string{
"WARNING: No memory limit support",
"WARNING: No swap limit support",
"WARNING: No kernel memory limit support",
"WARNING: No oom kill disable support",
"WARNING: No cpu cfs quota support",
"WARNING: No cpu cfs period support",
"WARNING: No cpu shares support",
"WARNING: No cpuset support",
"WARNING: IPv4 forwarding is disabled",
"WARNING: bridge-nf-call-iptables is disabled",
"WARNING: bridge-nf-call-ip6tables is disabled",
}
for _, tc := range []struct {
doc string
dockerInfo types.Info
expectedGolden string
warningsGolden string
}{
{
doc: "info without swarm",
dockerInfo: sampleInfoNoSwarm,
expectedGolden: "docker-info-no-swarm",
},
{
doc: "info with swarm",
dockerInfo: infoWithSwarm,
expectedGolden: "docker-info-with-swarm",
},
{
doc: "info with legacy warnings",
dockerInfo: infoWithWarningsLinux,
expectedGolden: "docker-info-no-swarm",
warningsGolden: "docker-info-warnings",
},
{
doc: "info with daemon warnings",
dockerInfo: sampleInfoDaemonWarnings,
expectedGolden: "docker-info-no-swarm",
warningsGolden: "docker-info-warnings",
},
} {
t.Run(tc.doc, func(t *testing.T) {
cli := test.NewFakeCli(&fakeClient{})
assert.NilError(t, prettyPrintInfo(cli, tc.dockerInfo))
golden.Assert(t, cli.OutBuffer().String(), tc.expectedGolden+".golden")
if tc.warningsGolden != "" {
golden.Assert(t, cli.ErrBuffer().String(), tc.warningsGolden+".golden")
} else {
assert.Check(t, is.Equal("", cli.ErrBuffer().String()))
}
})
cli := test.NewFakeCli(&fakeClient{})
assert.NilError(t, prettyPrintInfo(cli, tc.dockerInfo))
golden.Assert(t, cli.OutBuffer().String(), tc.expectedGolden+".golden")
if tc.warningsGolden != "" {
golden.Assert(t, cli.ErrBuffer().String(), tc.warningsGolden+".golden")
} else {
assert.Check(t, is.Equal("", cli.ErrBuffer().String()))
}
}
}

View File

@ -2,12 +2,12 @@ package system
import (
"bytes"
"context"
"fmt"
"text/template"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/builder"
"github.com/docker/cli/cli/command/container"
"github.com/docker/cli/cli/command/image"
"github.com/docker/cli/cli/command/network"
@ -21,21 +21,20 @@ import (
type pruneOptions struct {
force bool
all bool
pruneVolumes bool
pruneBuildCache bool
pruneVolumes bool
filter opts.FilterOpt
}
// newPruneCommand creates a new cobra.Command for `docker prune`
func newPruneCommand(dockerCli command.Cli) *cobra.Command {
options := pruneOptions{filter: opts.NewFilterOpt()}
options := pruneOptions{filter: opts.NewFilterOpt(), pruneBuildCache: true}
cmd := &cobra.Command{
Use: "prune [OPTIONS]",
Short: "Remove unused data",
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
options.pruneBuildCache = versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.31")
return runPrune(dockerCli, options)
},
Annotations: map[string]string{"version": "1.25"},
@ -58,29 +57,44 @@ const confirmationTemplate = `WARNING! This will remove:
{{- end }}
Are you sure you want to continue?`
// runBuildCachePrune executes a prune command for build cache
func runBuildCachePrune(dockerCli command.Cli, _ opts.FilterOpt) (uint64, string, error) {
report, err := dockerCli.Client().BuildCachePrune(context.Background())
if err != nil {
return 0, "", err
}
return report.SpaceReclaimed, "", nil
}
func runPrune(dockerCli command.Cli, options pruneOptions) error {
// TODO version this once "until" filter is supported for volumes
if options.pruneVolumes && options.filter.Value().Contains("until") {
if options.pruneVolumes && options.filter.Value().Include("until") {
return fmt.Errorf(`ERROR: The "until" filter is not supported with "--volumes"`)
}
if versions.LessThan(dockerCli.Client().ClientVersion(), "1.31") {
options.pruneBuildCache = false
}
if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), confirmationMessage(options)) {
return nil
}
pruneFuncs := []func(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error){
imagePrune := func(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error) {
return image.RunPrune(dockerCli, options.all, options.filter)
}
pruneFuncs := []func(dockerCli command.Cli, filter opts.FilterOpt) (uint64, string, error){
container.RunPrune,
network.RunPrune,
}
if options.pruneVolumes {
pruneFuncs = append(pruneFuncs, volume.RunPrune)
}
pruneFuncs = append(pruneFuncs, image.RunPrune)
pruneFuncs = append(pruneFuncs, imagePrune)
if options.pruneBuildCache {
pruneFuncs = append(pruneFuncs, builder.CachePrune)
pruneFuncs = append(pruneFuncs, runBuildCachePrune)
}
var spaceReclaimed uint64
for _, pruneFn := range pruneFuncs {
spc, output, err := pruneFn(dockerCli, options.all, options.filter)
spc, output, err := pruneFn(dockerCli, options.filter)
if err != nil {
return err
}
@ -112,11 +126,7 @@ func confirmationMessage(options pruneOptions) string {
warnings = append(warnings, "all dangling images")
}
if options.pruneBuildCache {
if options.all {
warnings = append(warnings, "all build cache")
} else {
warnings = append(warnings, "all dangling build cache")
}
warnings = append(warnings, "all build cache")
}
if len(options.filter.String()) > 0 {
warnings = append(warnings, "Elements to be pruned will be filtered with:")

View File

@ -14,7 +14,6 @@ import (
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/client"
"github.com/theupdateframework/notary/tuf/data"
"vbom.ml/util/sortorder"
)
// trustTagKey represents a unique signed tag and hex-encoded hash pair
@ -29,12 +28,26 @@ type trustTagRow struct {
Signers []string
}
type trustTagRowList []trustTagRow
func (tagComparator trustTagRowList) Len() int {
return len(tagComparator)
}
func (tagComparator trustTagRowList) Less(i, j int) bool {
return tagComparator[i].SignedTag < tagComparator[j].SignedTag
}
func (tagComparator trustTagRowList) Swap(i, j int) {
tagComparator[i], tagComparator[j] = tagComparator[j], tagComparator[i]
}
// trustRepo represents consumable information about a trusted repository
type trustRepo struct {
Name string
SignedTags []trustTagRow
Signers []trustSigner
AdministrativeKeys []trustSigner
Name string
SignedTags trustTagRowList
Signers []trustSigner
AdminstrativeKeys []trustSigner
}
// trustSigner represents a trusted signer in a trusted repository
@ -51,20 +64,20 @@ type trustKey struct {
// lookupTrustInfo returns processed signature and role information about a notary repository.
// This information is to be pretty printed or serialized into a machine-readable format.
func lookupTrustInfo(cli command.Cli, remote string) ([]trustTagRow, []client.RoleWithSignatures, []data.Role, error) {
func lookupTrustInfo(cli command.Cli, remote string) (trustTagRowList, []client.RoleWithSignatures, []data.Role, error) {
ctx := context.Background()
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, nil, image.AuthResolver(cli), remote)
if err != nil {
return []trustTagRow{}, []client.RoleWithSignatures{}, []data.Role{}, err
return trustTagRowList{}, []client.RoleWithSignatures{}, []data.Role{}, err
}
tag := imgRefAndAuth.Tag()
notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPullOnly)
if err != nil {
return []trustTagRow{}, []client.RoleWithSignatures{}, []data.Role{}, trust.NotaryError(imgRefAndAuth.Reference().Name(), err)
return trustTagRowList{}, []client.RoleWithSignatures{}, []data.Role{}, trust.NotaryError(imgRefAndAuth.Reference().Name(), err)
}
if err = clearChangeList(notaryRepo); err != nil {
return []trustTagRow{}, []client.RoleWithSignatures{}, []data.Role{}, err
return trustTagRowList{}, []client.RoleWithSignatures{}, []data.Role{}, err
}
defer clearChangeList(notaryRepo)
@ -74,7 +87,7 @@ func lookupTrustInfo(cli command.Cli, remote string) ([]trustTagRow, []client.Ro
logrus.Debug(trust.NotaryError(remote, err))
// print an empty table if we don't have signed targets, but have an initialized notary repo
if _, ok := err.(client.ErrNoSuchTarget); !ok {
return []trustTagRow{}, []client.RoleWithSignatures{}, []data.Role{}, fmt.Errorf("No signatures or cannot access %s", remote)
return trustTagRowList{}, []client.RoleWithSignatures{}, []data.Role{}, fmt.Errorf("No signatures or cannot access %s", remote)
}
}
signatureRows := matchReleasedSignatures(allSignedTargets)
@ -82,7 +95,7 @@ func lookupTrustInfo(cli command.Cli, remote string) ([]trustTagRow, []client.Ro
// get the administrative roles
adminRolesWithSigs, err := notaryRepo.ListRoles()
if err != nil {
return []trustTagRow{}, []client.RoleWithSignatures{}, []data.Role{}, fmt.Errorf("No signers for %s", remote)
return trustTagRowList{}, []client.RoleWithSignatures{}, []data.Role{}, fmt.Errorf("No signers for %s", remote)
}
// get delegation roles with the canonical key IDs
@ -125,8 +138,8 @@ func getDelegationRoleToKeyMap(rawDelegationRoles []data.Role) map[string][]stri
// aggregate all signers for a "released" hash+tagname pair. To be "released," the tag must have been
// signed into the "targets" or "targets/releases" role. Output is sorted by tag name
func matchReleasedSignatures(allTargets []client.TargetSignedStruct) []trustTagRow {
signatureRows := []trustTagRow{}
func matchReleasedSignatures(allTargets []client.TargetSignedStruct) trustTagRowList {
signatureRows := trustTagRowList{}
// do a first pass to get filter on tags signed into "targets" or "targets/releases"
releasedTargetRows := map[trustTagKey][]string{}
for _, tgt := range allTargets {
@ -149,8 +162,6 @@ func matchReleasedSignatures(allTargets []client.TargetSignedStruct) []trustTagR
for targetKey, signers := range releasedTargetRows {
signatureRows = append(signatureRows, trustTagRow{targetKey, signers})
}
sort.Slice(signatureRows, func(i, j int) bool {
return sortorder.NaturalLess(signatureRows[i].SignedTag, signatureRows[j].SignedTag)
})
sort.Sort(signatureRows)
return signatureRows
}

View File

@ -1,33 +0,0 @@
package trust
import (
"testing"
"github.com/docker/cli/cli/trust"
"github.com/theupdateframework/notary/client"
"github.com/theupdateframework/notary/tuf/data"
"gotest.tools/assert"
is "gotest.tools/assert/cmp"
)
func TestMatchReleasedSignaturesSortOrder(t *testing.T) {
var releasesRole = data.DelegationRole{BaseRole: data.BaseRole{Name: trust.ReleasesRole}}
targets := []client.TargetSignedStruct{
{Target: client.Target{Name: "target10-foo"}, Role: releasesRole},
{Target: client.Target{Name: "target1-foo"}, Role: releasesRole},
{Target: client.Target{Name: "target2-foo"}, Role: releasesRole},
}
rows := matchReleasedSignatures(targets)
var targetNames []string
for _, r := range rows {
targetNames = append(targetNames, r.SignedTag)
}
expected := []string{
"target1-foo",
"target2-foo",
"target10-foo",
}
assert.Check(t, is.DeepEqual(expected, targetNames))
}

View File

@ -107,9 +107,9 @@ func getRepoTrustInfo(cli command.Cli, remote string) ([]byte, error) {
sort.Slice(adminList, func(i, j int) bool { return adminList[i].Name > adminList[j].Name })
return json.Marshal(trustRepo{
Name: remote,
SignedTags: signatureRows,
Signers: signerList,
AdministrativeKeys: adminList,
Name: remote,
SignedTags: signatureRows,
Signers: signerList,
AdminstrativeKeys: adminList,
})
}

Some files were not shown because too many files have changed in this diff Show More