Compare commits
176 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| a224086349 | |||
| a282e0c5d2 | |||
| f3764ff5f9 | |||
| 700364e304 | |||
| 62d27c32ff | |||
| c0e952cf04 | |||
| 04104a04d3 | |||
| b721998b7b | |||
| 4065e1246e | |||
| f1002eb9fb | |||
| e97c7b240e | |||
| aa78937634 | |||
| 40fe0573aa | |||
| c9737e1c37 | |||
| 5c6723d080 | |||
| 59a8a0906f | |||
| 3f9857e6a8 | |||
| fd5fc61ecd | |||
| 3624019d83 | |||
| 96f2cf80ab | |||
| f3ff8e6ad6 | |||
| ee1ac1b319 | |||
| e91ed5707e | |||
| 38dd744a11 | |||
| cedc602a78 | |||
| 4de40a825e | |||
| dea9396e18 | |||
| 03fa8f92c8 | |||
| b485636f4b | |||
| 241523cc1a | |||
| 9989fdbc40 | |||
| 0e20c1fd21 | |||
| c4cb29837f | |||
| 0b56256638 | |||
| 1c0927a041 | |||
| 82f9d5921b | |||
| adb01ca79d | |||
| 8260476a06 | |||
| aa5b6b7728 | |||
| 859e303655 | |||
| bce2e1f953 | |||
| 44064f51c8 | |||
| 292779add5 | |||
| f2e79b826c | |||
| fa46b92361 | |||
| 400f81089a | |||
| c72057c8db | |||
| 77db97d595 | |||
| cbf0d2b7b7 | |||
| d0014a86bc | |||
| 6c1c8b55aa | |||
| c2ea9bc90b | |||
| 44fdac11f5 | |||
| 893e52cf4b | |||
| 061051c24d | |||
| 62eae52c2a | |||
| 2012fbf111 | |||
| 42d1c02750 | |||
| 3967b7d28e | |||
| 0b924e51fc | |||
| 6288e8b1ac | |||
| 1e9575e81a | |||
| c98e9c47ca | |||
| a6f6b5fa34 | |||
| 8437cfefae | |||
| 68a5ca859f | |||
| f9d091f4b1 | |||
| e9b8231d6a | |||
| 8a64739631 | |||
| a555c853b0 | |||
| 260ba1a8a2 | |||
| f63cb8b97e | |||
| c1492eabde | |||
| 48e6b44379 | |||
| bfcd17b5b7 | |||
| 8279b718ea | |||
| 644c003606 | |||
| 0d17280a30 | |||
| eedfe50a99 | |||
| f3dd1ee6c1 | |||
| c7cf60f657 | |||
| 1d37fb3027 | |||
| 0793f96394 | |||
| b639ea8b89 | |||
| 063e3dd329 | |||
| 0168626037 | |||
| 00ea8bdc41 | |||
| e3a9a92b14 | |||
| 6da4ee40c7 | |||
| ab733b5564 | |||
| f0df35096d | |||
| f485f66943 | |||
| 746c553574 | |||
| 2945ba4f7a | |||
| 032e485e1c | |||
| 88de81ff21 | |||
| 706ca7985b | |||
| e0d47b1c0b | |||
| 54b529feae | |||
| c88e6432ec | |||
| f291a49ba5 | |||
| 78fcd905c6 | |||
| 12e2f94eba | |||
| 00755d7dba | |||
| 8264f5be8d | |||
| 9780f41efe | |||
| 4fbdf3f362 | |||
| 1ff45aac40 | |||
| bb03b9d3c2 | |||
| ed71df1b9f | |||
| ee20fa1ec4 | |||
| ffe40dc6b6 | |||
| 7ab2d19a1e | |||
| 4630fe0075 | |||
| fbbf1be52d | |||
| 3de2cc6efd | |||
| 234036d105 | |||
| 0c442dc179 | |||
| 6b48c78672 | |||
| 370c28948e | |||
| dc017bdda3 | |||
| feb6f439e3 | |||
| 8bc4062fc0 | |||
| 84cc7d87cc | |||
| c1c3d3b3aa | |||
| 048a846146 | |||
| 33dacda24f | |||
| fcc05e5ea1 | |||
| 58061d25f6 | |||
| 55b2abb0ca | |||
| 4c3b87d922 | |||
| e77605c83d | |||
| 0196098721 | |||
| 6ebf765040 | |||
| f508ce9db7 | |||
| 897293c7c7 | |||
| 2c04354315 | |||
| 9c8a91d22b | |||
| ff945151ef | |||
| 4571d90f20 | |||
| 55c4c88966 | |||
| f33a69f6ee | |||
| d3cb89ee53 | |||
| 433014bcfb | |||
| 61cb016851 | |||
| 53c4602909 | |||
| 830d6595e7 | |||
| 13048444cc | |||
| 3e293e6bd1 | |||
| 41b3ea7e47 | |||
| d6eeeb6259 | |||
| 3e157d5293 | |||
| 1fdf84b8e9 | |||
| 376b99c6d6 | |||
| 0de4e6e9a7 | |||
| 3c87f01b18 | |||
| de40c2b172 | |||
| d513e46bfc | |||
| 2b74b90efb | |||
| 05343b36a2 | |||
| f90db254d7 | |||
| 0dcfdde336 | |||
| 03cd1dc50a | |||
| 42811a7eb9 | |||
| be966aa194 | |||
| b22fe0fb14 | |||
| 4eb050071e | |||
| 08c4fdfa7a | |||
| 6aa1b37c8d | |||
| e82920d76d | |||
| 82123939f7 | |||
| 48d30b5b32 | |||
| 5941f4104a | |||
| 18f33b337d | |||
| 9ecc69d17e | |||
| 6f49197cab |
@ -4,13 +4,13 @@ jobs:
|
||||
|
||||
lint:
|
||||
working_directory: /work
|
||||
docker: [{image: 'docker:19.03-git'}]
|
||||
docker: [{image: 'docker:20.10-git'}]
|
||||
environment:
|
||||
DOCKER_BUILDKIT: 1
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: 19.03.12
|
||||
version: 20.10.6
|
||||
reusable: true
|
||||
exclusive: false
|
||||
- run:
|
||||
@ -39,14 +39,15 @@ jobs:
|
||||
|
||||
cross:
|
||||
working_directory: /work
|
||||
docker: [{image: 'docker:19.03-git'}]
|
||||
docker: [{image: 'docker:20.10-git'}]
|
||||
environment:
|
||||
DOCKER_BUILDKIT: 1
|
||||
BUILDX_VERSION: "v0.5.1"
|
||||
parallelism: 3
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: 19.03.12
|
||||
version: 20.10.6
|
||||
reusable: true
|
||||
exclusive: false
|
||||
- run:
|
||||
@ -55,33 +56,26 @@ jobs:
|
||||
- run:
|
||||
name: "Docker info"
|
||||
command: docker info
|
||||
- run:
|
||||
name: "Cross - build image"
|
||||
command: |
|
||||
docker build --progress=plain -f dockerfiles/Dockerfile.cross --tag cli-builder:$CIRCLE_BUILD_NUM .
|
||||
- run:
|
||||
name: "Cross"
|
||||
command: |
|
||||
name=cross-$CIRCLE_BUILD_NUM-$CIRCLE_NODE_INDEX
|
||||
docker run \
|
||||
-e CROSS_GROUP=$CIRCLE_NODE_INDEX \
|
||||
--name $name cli-builder:$CIRCLE_BUILD_NUM \
|
||||
make cross
|
||||
docker cp \
|
||||
$name:/go/src/github.com/docker/cli/build \
|
||||
/work/build
|
||||
- run: apk add make curl
|
||||
- run: mkdir -vp ~/.docker/cli-plugins/
|
||||
- run: curl -fsSL --output ~/.docker/cli-plugins/docker-buildx https://github.com/docker/buildx/releases/download/${BUILDX_VERSION}/buildx-${BUILDX_VERSION}.linux-amd64
|
||||
- run: chmod a+x ~/.docker/cli-plugins/docker-buildx
|
||||
- run: docker buildx version
|
||||
- run: docker context create buildctx
|
||||
- run: docker buildx create --use buildctx && docker buildx inspect --bootstrap
|
||||
- run: GROUP_INDEX=$CIRCLE_NODE_INDEX GROUP_TOTAL=$CIRCLE_NODE_TOTAL docker buildx bake cross --progress=plain
|
||||
- store_artifacts:
|
||||
path: /work/build
|
||||
|
||||
test:
|
||||
working_directory: /work
|
||||
docker: [{image: 'docker:19.03-git'}]
|
||||
docker: [{image: 'docker:20.10-git'}]
|
||||
environment:
|
||||
DOCKER_BUILDKIT: 1
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: 19.03.12
|
||||
version: 20.10.6
|
||||
reusable: true
|
||||
exclusive: false
|
||||
- run:
|
||||
@ -122,13 +116,13 @@ jobs:
|
||||
|
||||
validate:
|
||||
working_directory: /work
|
||||
docker: [{image: 'docker:19.03-git'}]
|
||||
docker: [{image: 'docker:20.10-git'}]
|
||||
environment:
|
||||
DOCKER_BUILDKIT: 1
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
version: 19.03.12
|
||||
version: 20.10.6
|
||||
reusable: true
|
||||
exclusive: false
|
||||
- run:
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
.circleci
|
||||
.dockerignore
|
||||
.git
|
||||
.github
|
||||
.gitignore
|
||||
appveyor.yml
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@ -8,8 +8,7 @@
|
||||
Thumbs.db
|
||||
.editorconfig
|
||||
/build/
|
||||
cli/winresources/rsrc_386.syso
|
||||
cli/winresources/rsrc_amd64.syso
|
||||
cli/winresources/rsrc_*.syso
|
||||
/man/man1/
|
||||
/man/man5/
|
||||
/man/man8/
|
||||
|
||||
62
Dockerfile
Normal file
62
Dockerfile
Normal file
@ -0,0 +1,62 @@
|
||||
# syntax=docker/dockerfile:1.3
|
||||
|
||||
ARG BASE_VARIANT=alpine
|
||||
ARG GO_VERSION=1.16.15
|
||||
ARG XX_VERSION=1.0.0-rc.2
|
||||
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-${BASE_VARIANT} AS gostable
|
||||
FROM --platform=$BUILDPLATFORM golang:1.17rc1-${BASE_VARIANT} AS golatest
|
||||
|
||||
FROM gostable AS go-linux
|
||||
FROM gostable AS go-darwin
|
||||
FROM gostable AS go-windows-amd64
|
||||
FROM gostable AS go-windows-386
|
||||
FROM gostable AS go-windows-arm
|
||||
FROM golatest AS go-windows-arm64
|
||||
FROM go-windows-${TARGETARCH} AS go-windows
|
||||
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
|
||||
FROM go-${TARGETOS} AS build-base-alpine
|
||||
COPY --from=xx / /
|
||||
RUN apk add --no-cache clang lld llvm file git
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
|
||||
FROM build-base-alpine AS build-alpine
|
||||
ARG TARGETPLATFORM
|
||||
# gcc is installed for libgcc only
|
||||
RUN xx-apk add --no-cache musl-dev gcc
|
||||
|
||||
FROM go-${TARGETOS} AS build-base-buster
|
||||
COPY --from=xx / /
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y clang lld file
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
|
||||
FROM build-base-buster AS build-buster
|
||||
ARG TARGETPLATFORM
|
||||
RUN xx-apt install --no-install-recommends -y libc6-dev libgcc-8-dev
|
||||
|
||||
FROM build-${BASE_VARIANT} AS build
|
||||
# GO_LINKMODE defines if static or dynamic binary should be produced
|
||||
ARG GO_LINKMODE=static
|
||||
# GO_BUILDTAGS defines additional build tags
|
||||
ARG GO_BUILDTAGS
|
||||
# GO_STRIP strips debugging symbols if set
|
||||
ARG GO_STRIP
|
||||
# CGO_ENABLED manually sets if cgo is used
|
||||
ARG CGO_ENABLED
|
||||
# VERSION sets the version for the produced binary
|
||||
ARG VERSION
|
||||
RUN --mount=ro --mount=type=cache,target=/root/.cache \
|
||||
--mount=from=dockercore/golang-cross:xx-sdk-extras,target=/xx-sdk,src=/xx-sdk \
|
||||
--mount=type=tmpfs,target=cli/winresources \
|
||||
xx-go --wrap && \
|
||||
# export GOCACHE=$(go env GOCACHE)/$(xx-info)$([ -f /etc/alpine-release ] && echo "alpine") && \
|
||||
TARGET=/out ./scripts/build/binary && \
|
||||
xx-verify $([ "$GO_LINKMODE" = "static" ] && echo "--static") /out/docker
|
||||
|
||||
FROM build-base-${BASE_VARIANT} AS dev
|
||||
COPY . .
|
||||
|
||||
FROM scratch AS binary
|
||||
COPY --from=build /out .
|
||||
6
Jenkinsfile
vendored
6
Jenkinsfile
vendored
@ -1,6 +1,6 @@
|
||||
pipeline {
|
||||
agent {
|
||||
label "linux && x86_64"
|
||||
label "amd64 && ubuntu-1804 && overlay2"
|
||||
}
|
||||
|
||||
options {
|
||||
@ -21,9 +21,9 @@ pipeline {
|
||||
make -f docker.Makefile test-e2e-non-experimental"
|
||||
}
|
||||
}
|
||||
stage("e2e (non-experimental) - 18.09 engine") {
|
||||
stage("e2e (non-experimental) - 19.03 engine") {
|
||||
steps {
|
||||
sh "E2E_ENGINE_VERSION=18.09-dind \
|
||||
sh "E2E_ENGINE_VERSION=19.03-dind \
|
||||
E2E_UNIQUE_ID=clie2e${BUILD_NUMBER} \
|
||||
IMAGE_TAG=clie2e${BUILD_NUMBER} \
|
||||
make -f docker.Makefile test-e2e-non-experimental"
|
||||
|
||||
17
Makefile
17
Makefile
@ -30,8 +30,7 @@ lint: ## run all the lint tools
|
||||
gometalinter --config gometalinter.json ./...
|
||||
|
||||
.PHONY: binary
|
||||
binary: ## build executable for Linux
|
||||
@echo "WARNING: binary creates a Linux executable. Use cross for macOS or Windows."
|
||||
binary:
|
||||
./scripts/build/binary
|
||||
|
||||
.PHONY: plugins
|
||||
@ -39,28 +38,20 @@ plugins: ## build example CLI plugins
|
||||
./scripts/build/plugins
|
||||
|
||||
.PHONY: cross
|
||||
cross: ## build executable for macOS and Windows
|
||||
./scripts/build/cross
|
||||
|
||||
.PHONY: binary-windows
|
||||
binary-windows: ## build executable for Windows
|
||||
./scripts/build/windows
|
||||
cross:
|
||||
./scripts/build/binary
|
||||
|
||||
.PHONY: plugins-windows
|
||||
plugins-windows: ## build example CLI plugins for Windows
|
||||
./scripts/build/plugins-windows
|
||||
|
||||
.PHONY: binary-osx
|
||||
binary-osx: ## build executable for macOS
|
||||
./scripts/build/osx
|
||||
|
||||
.PHONY: plugins-osx
|
||||
plugins-osx: ## build example CLI plugins for macOS
|
||||
./scripts/build/plugins-osx
|
||||
|
||||
.PHONY: dynbinary
|
||||
dynbinary: ## build dynamically linked binary
|
||||
./scripts/build/dynbinary
|
||||
GO_LINKMODE=dynamic ./scripts/build/binary
|
||||
|
||||
vendor: vendor.conf ## check that vendor matches vendor.conf
|
||||
rm -rf vendor
|
||||
|
||||
25
README.md
25
README.md
@ -12,18 +12,31 @@ Development
|
||||
|
||||
`docker/cli` is developed using Docker.
|
||||
|
||||
Build a linux binary:
|
||||
Build CLI from source:
|
||||
|
||||
```
|
||||
$ make -f docker.Makefile binary
|
||||
$ docker buildx bake
|
||||
```
|
||||
|
||||
Build binaries for all supported platforms:
|
||||
|
||||
```
|
||||
$ make -f docker.Makefile cross
|
||||
$ docker buildx bake cross
|
||||
```
|
||||
|
||||
Build for a specific platform:
|
||||
|
||||
```
|
||||
$ docker buildx bake --set binary.platform=linux/arm64
|
||||
```
|
||||
|
||||
Build dynamic binary for glibc or musl:
|
||||
|
||||
```
|
||||
$ USE_GLIBC=1 docker buildx bake dynbinary
|
||||
```
|
||||
|
||||
|
||||
Run all linting:
|
||||
|
||||
```
|
||||
@ -44,12 +57,6 @@ Start an interactive development environment:
|
||||
$ make -f docker.Makefile shell
|
||||
```
|
||||
|
||||
In the development environment you can run many tasks, including build binaries:
|
||||
|
||||
```
|
||||
$ make binary
|
||||
```
|
||||
|
||||
Legal
|
||||
=====
|
||||
*Brought to you courtesy of our legal counsel. For more context,
|
||||
|
||||
@ -4,7 +4,7 @@ clone_folder: c:\gopath\src\github.com\docker\cli
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GOVERSION: 1.13.15
|
||||
GOVERSION: 1.16.15
|
||||
DEPVERSION: v0.4.1
|
||||
|
||||
install:
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
// Candidate represents a possible plugin candidate, for mocking purposes
|
||||
|
||||
@ -3,7 +3,6 @@ package manager
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
@ -12,6 +11,7 @@ import (
|
||||
"github.com/docker/cli/cli/config"
|
||||
"github.com/fvbommel/sortorder"
|
||||
"github.com/spf13/cobra"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
// ReexecEnvvar is the name of an ennvar which is set to the command
|
||||
|
||||
@ -255,7 +255,7 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...Initialize
|
||||
if tlsconfig.IsErrEncryptedKey(err) {
|
||||
passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil)
|
||||
newClient := func(password string) (client.APIClient, error) {
|
||||
cli.dockerEndpoint.TLSPassword = password
|
||||
cli.dockerEndpoint.TLSPassword = password //nolint: staticcheck // SA1019: cli.dockerEndpoint.TLSPassword is deprecated
|
||||
return newAPIClientFromEndpoint(cli.dockerEndpoint, cli.configFile)
|
||||
}
|
||||
cli.client, err = getClientWithPassword(passRetriever, newClient)
|
||||
|
||||
@ -6,7 +6,6 @@ import (
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
@ -80,24 +79,6 @@ func TestNewAPIClientFromFlagsWithAPIVersionFromEnv(t *testing.T) {
|
||||
assert.Check(t, is.Equal(customVersion, apiclient.ClientVersion()))
|
||||
}
|
||||
|
||||
func TestNewAPIClientFromFlagsWithHttpProxyEnv(t *testing.T) {
|
||||
defer env.Patch(t, "HTTP_PROXY", "http://proxy.acme.com:1234")()
|
||||
defer env.Patch(t, "DOCKER_HOST", "tcp://docker.acme.com:2376")()
|
||||
|
||||
opts := &flags.CommonOptions{}
|
||||
configFile := &configfile.ConfigFile{}
|
||||
apiclient, err := NewAPIClientFromFlags(opts, configFile)
|
||||
assert.NilError(t, err)
|
||||
transport, ok := apiclient.HTTPClient().Transport.(*http.Transport)
|
||||
assert.Assert(t, ok)
|
||||
assert.Assert(t, transport.Proxy != nil)
|
||||
request, err := http.NewRequest(http.MethodGet, "tcp://docker.acme.com:2376", nil)
|
||||
assert.NilError(t, err)
|
||||
url, err := transport.Proxy(request)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal("http://proxy.acme.com:1234", url.String()))
|
||||
}
|
||||
|
||||
type fakeClient struct {
|
||||
client.Client
|
||||
pingFunc func() (types.Ping, error)
|
||||
|
||||
@ -97,7 +97,8 @@ func runAttach(dockerCli command.Cli, opts *attachOptions) error {
|
||||
}
|
||||
|
||||
if opts.proxy && !c.Config.Tty {
|
||||
sigc := ForwardAllSignals(ctx, dockerCli, opts.container)
|
||||
sigc := notfiyAllSignals()
|
||||
go ForwardAllSignals(ctx, dockerCli, opts.container, sigc)
|
||||
defer signal.StopCatch(sigc)
|
||||
}
|
||||
|
||||
|
||||
@ -32,6 +32,7 @@ type fakeClient struct {
|
||||
containerExportFunc func(string) (io.ReadCloser, error)
|
||||
containerExecResizeFunc func(id string, options types.ResizeOptions) error
|
||||
containerRemoveFunc func(ctx context.Context, container string, options types.ContainerRemoveOptions) error
|
||||
containerKillFunc func(ctx context.Context, container, signal string) error
|
||||
Version string
|
||||
}
|
||||
|
||||
@ -154,3 +155,10 @@ func (f *fakeClient) ContainerExecResize(_ context.Context, id string, options t
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeClient) ContainerKill(ctx context.Context, container, signal string) error {
|
||||
if f.containerKillFunc != nil {
|
||||
return f.containerKillFunc(ctx, container, signal)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -133,7 +133,7 @@ func TestCreateContainerImagePullPolicy(t *testing.T) {
|
||||
return ioutil.NopCloser(strings.NewReader("")), nil
|
||||
},
|
||||
infoFunc: func() (types.Info, error) {
|
||||
return types.Info{IndexServerAddress: "http://indexserver"}, nil
|
||||
return types.Info{IndexServerAddress: "https://indexserver.example.com"}, nil
|
||||
},
|
||||
}
|
||||
cli := test.NewFakeCli(client)
|
||||
|
||||
@ -131,7 +131,8 @@ func runContainer(dockerCli command.Cli, opts *runOptions, copts *containerOptio
|
||||
return runStartContainerErr(err)
|
||||
}
|
||||
if opts.sigProxy {
|
||||
sigc := ForwardAllSignals(ctx, dockerCli, createResponse.ID)
|
||||
sigc := notfiyAllSignals()
|
||||
go ForwardAllSignals(ctx, dockerCli, createResponse.ID, sigc)
|
||||
defer signal.StopCatch(sigc)
|
||||
}
|
||||
|
||||
|
||||
61
cli/command/container/signals.go
Normal file
61
cli/command/container/signals.go
Normal file
@ -0,0 +1,61 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
gosignal "os/signal"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ForwardAllSignals forwards signals to the container
|
||||
//
|
||||
// The channel you pass in must already be setup to receive any signals you want to forward.
|
||||
func ForwardAllSignals(ctx context.Context, cli command.Cli, cid string, sigc <-chan os.Signal) {
|
||||
var (
|
||||
s os.Signal
|
||||
ok bool
|
||||
)
|
||||
for {
|
||||
select {
|
||||
case s, ok = <-sigc:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
if s == signal.SIGCHLD || s == signal.SIGPIPE {
|
||||
continue
|
||||
}
|
||||
|
||||
// In go1.14+, the go runtime issues SIGURG as an interrupt to support pre-emptable system calls on Linux.
|
||||
// Since we can't forward that along we'll check that here.
|
||||
if isRuntimeSig(s) {
|
||||
continue
|
||||
}
|
||||
var sig string
|
||||
for sigStr, sigN := range signal.SignalMap {
|
||||
if sigN == s {
|
||||
sig = sigStr
|
||||
break
|
||||
}
|
||||
}
|
||||
if sig == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := cli.Client().ContainerKill(ctx, cid, sig); err != nil {
|
||||
logrus.Debugf("Error sending signal: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func notfiyAllSignals() chan os.Signal {
|
||||
sigc := make(chan os.Signal, 128)
|
||||
gosignal.Notify(sigc)
|
||||
return sigc
|
||||
}
|
||||
48
cli/command/container/signals_test.go
Normal file
48
cli/command/container/signals_test.go
Normal file
@ -0,0 +1,48 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
)
|
||||
|
||||
func TestForwardSignals(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
called := make(chan struct{})
|
||||
client := &fakeClient{containerKillFunc: func(ctx context.Context, container, signal string) error {
|
||||
close(called)
|
||||
return nil
|
||||
}}
|
||||
|
||||
cli := test.NewFakeCli(client)
|
||||
sigc := make(chan os.Signal)
|
||||
defer close(sigc)
|
||||
|
||||
go ForwardAllSignals(ctx, cli, t.Name(), sigc)
|
||||
|
||||
timer := time.NewTimer(30 * time.Second)
|
||||
defer timer.Stop()
|
||||
|
||||
select {
|
||||
case <-timer.C:
|
||||
t.Fatal("timeout waiting to send signal")
|
||||
case sigc <- signal.SignalMap["TERM"]:
|
||||
}
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
timer.Reset(30 * time.Second)
|
||||
|
||||
select {
|
||||
case <-called:
|
||||
case <-timer.C:
|
||||
t.Fatal("timeout waiting for signal to be processed")
|
||||
}
|
||||
|
||||
}
|
||||
13
cli/command/container/signals_unix.go
Normal file
13
cli/command/container/signals_unix.go
Normal file
@ -0,0 +1,13 @@
|
||||
// +build !windows
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func isRuntimeSig(s os.Signal) bool {
|
||||
return s == unix.SIGURG
|
||||
}
|
||||
59
cli/command/container/signals_unix_test.go
Normal file
59
cli/command/container/signals_unix_test.go
Normal file
@ -0,0 +1,59 @@
|
||||
// +build !windows
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/internal/test"
|
||||
"golang.org/x/sys/unix"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
func TestIgnoredSignals(t *testing.T) {
|
||||
ignoredSignals := []syscall.Signal{unix.SIGPIPE, unix.SIGCHLD, unix.SIGURG}
|
||||
|
||||
for _, s := range ignoredSignals {
|
||||
t.Run(unix.SignalName(s), func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var called bool
|
||||
client := &fakeClient{containerKillFunc: func(ctx context.Context, container, signal string) error {
|
||||
called = true
|
||||
return nil
|
||||
}}
|
||||
|
||||
cli := test.NewFakeCli(client)
|
||||
sigc := make(chan os.Signal)
|
||||
defer close(sigc)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
ForwardAllSignals(ctx, cli, t.Name(), sigc)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
timer := time.NewTimer(30 * time.Second)
|
||||
defer timer.Stop()
|
||||
|
||||
select {
|
||||
case <-timer.C:
|
||||
t.Fatal("timeout waiting to send signal")
|
||||
case sigc <- s:
|
||||
case <-done:
|
||||
}
|
||||
|
||||
// cancel the context so ForwardAllSignals will exit after it has processed the signal we sent.
|
||||
// This is how we know the signal was actually processed and are not introducing a flakey test.
|
||||
cancel()
|
||||
<-done
|
||||
|
||||
assert.Assert(t, !called, "kill was called")
|
||||
})
|
||||
}
|
||||
}
|
||||
7
cli/command/container/signals_windows.go
Normal file
7
cli/command/container/signals_windows.go
Normal file
@ -0,0 +1,7 @@
|
||||
package container
|
||||
|
||||
import "os"
|
||||
|
||||
func isRuntimeSig(_ os.Signal) bool {
|
||||
return false
|
||||
}
|
||||
@ -74,7 +74,8 @@ func runStart(dockerCli command.Cli, opts *startOptions) error {
|
||||
|
||||
// We always use c.ID instead of container to maintain consistency during `docker start`
|
||||
if !c.Config.Tty {
|
||||
sigc := ForwardAllSignals(ctx, dockerCli, c.ID)
|
||||
sigc := notfiyAllSignals()
|
||||
go ForwardAllSignals(ctx, dockerCli, c.ID, sigc)
|
||||
defer signal.StopCatch(sigc)
|
||||
}
|
||||
|
||||
|
||||
@ -95,32 +95,3 @@ func MonitorTtySize(ctx context.Context, cli command.Cli, id string, isExec bool
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForwardAllSignals forwards signals to the container
|
||||
func ForwardAllSignals(ctx context.Context, cli command.Cli, cid string) chan os.Signal {
|
||||
sigc := make(chan os.Signal, 128)
|
||||
signal.CatchAll(sigc)
|
||||
go func() {
|
||||
for s := range sigc {
|
||||
if s == signal.SIGCHLD || s == signal.SIGPIPE {
|
||||
continue
|
||||
}
|
||||
var sig string
|
||||
for sigStr, sigN := range signal.SignalMap {
|
||||
if sigN == s {
|
||||
sig = sigStr
|
||||
break
|
||||
}
|
||||
}
|
||||
if sig == "" {
|
||||
fmt.Fprintf(cli.Err(), "Unsupported signal: %v. Discarding.\n", s)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := cli.Client().ContainerKill(ctx, cid, sig); err != nil {
|
||||
logrus.Debugf("Error sending signal: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return sigc
|
||||
}
|
||||
|
||||
@ -1,10 +1,6 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
@ -30,20 +26,3 @@ func NewContextCommand(dockerCli command.Cli) *cobra.Command {
|
||||
)
|
||||
return cmd
|
||||
}
|
||||
|
||||
const restrictedNamePattern = "^[a-zA-Z0-9][a-zA-Z0-9_.+-]+$"
|
||||
|
||||
var restrictedNameRegEx = regexp.MustCompile(restrictedNamePattern)
|
||||
|
||||
func validateContextName(name string) error {
|
||||
if name == "" {
|
||||
return errors.New("context name cannot be empty")
|
||||
}
|
||||
if name == "default" {
|
||||
return errors.New(`"default" is a reserved context name`)
|
||||
}
|
||||
if !restrictedNameRegEx.MatchString(name) {
|
||||
return fmt.Errorf("context name %q is invalid, names are validated against regexp %q", name, restrictedNamePattern)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -62,8 +62,11 @@ func newCreateCommand(dockerCli command.Cli) *cobra.Command {
|
||||
&opts.DefaultStackOrchestrator,
|
||||
"default-stack-orchestrator", "",
|
||||
"Default orchestrator for stack operations to use with this context (swarm|kubernetes|all)")
|
||||
flags.SetAnnotation("default-stack-orchestrator", "deprecated", nil)
|
||||
flags.StringToStringVar(&opts.Docker, "docker", nil, "set the docker endpoint")
|
||||
flags.StringToStringVar(&opts.Kubernetes, "kubernetes", nil, "set the kubernetes endpoint")
|
||||
flags.SetAnnotation("kubernetes", "kubernetes", nil)
|
||||
flags.SetAnnotation("kubernetes", "deprecated", nil)
|
||||
flags.StringVar(&opts.From, "from", "", "create context from a named context")
|
||||
return cmd
|
||||
}
|
||||
@ -137,7 +140,7 @@ func createNewContext(o *CreateOptions, stackOrchestrator command.Orchestrator,
|
||||
}
|
||||
|
||||
func checkContextNameForCreation(s store.Reader, name string) error {
|
||||
if err := validateContextName(name); err != nil {
|
||||
if err := store.ValidateContextName(name); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.GetMetadata(name); !store.IsErrContextDoesNotExist(err) {
|
||||
|
||||
@ -169,7 +169,7 @@ func validateTestKubeEndpoint(t *testing.T, s store.Reader, name string) {
|
||||
kubeMeta := ctxMetadata.Endpoints[kubernetes.KubernetesEndpoint].(kubernetes.EndpointMeta)
|
||||
kubeEP, err := kubeMeta.WithTLSData(s, name)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, "https://someserver", kubeEP.Host)
|
||||
assert.Equal(t, "https://someserver.example.com", kubeEP.Host)
|
||||
assert.Equal(t, "the-ca", string(kubeEP.TLSData.CA))
|
||||
assert.Equal(t, "the-cert", string(kubeEP.TLSData.Cert))
|
||||
assert.Equal(t, "the-key", string(kubeEP.TLSData.Key))
|
||||
@ -287,7 +287,7 @@ func TestCreateFromContext(t *testing.T) {
|
||||
assert.Equal(t, newContextTyped.Description, c.expectedDescription)
|
||||
assert.Equal(t, newContextTyped.StackOrchestrator, c.expectedOrchestrator)
|
||||
assert.Equal(t, dockerEndpoint.Host, "tcp://42.42.42.42:2375")
|
||||
assert.Equal(t, kubeEndpoint.Host, "https://someserver")
|
||||
assert.Equal(t, kubeEndpoint.Host, "https://someserver.example.com")
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -361,7 +361,7 @@ func TestCreateFromCurrent(t *testing.T) {
|
||||
assert.Equal(t, newContextTyped.Description, c.expectedDescription)
|
||||
assert.Equal(t, newContextTyped.StackOrchestrator, c.expectedOrchestrator)
|
||||
assert.Equal(t, dockerEndpoint.Host, "tcp://42.42.42.42:2375")
|
||||
assert.Equal(t, kubeEndpoint.Host, "https://someserver")
|
||||
assert.Equal(t, kubeEndpoint.Host, "https://someserver.example.com")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -46,6 +46,8 @@ func newExportCommand(dockerCli command.Cli) *cobra.Command {
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.Kubeconfig, "kubeconfig", false, "Export as a kubeconfig file")
|
||||
flags.SetAnnotation("kubeconfig", "kubernetes", nil)
|
||||
flags.SetAnnotation("kubeconfig", "deprecated", nil)
|
||||
return cmd
|
||||
}
|
||||
|
||||
@ -77,7 +79,7 @@ func writeTo(dockerCli command.Cli, reader io.Reader, dest string) error {
|
||||
|
||||
// RunExport exports a Docker context
|
||||
func RunExport(dockerCli command.Cli, opts *ExportOptions) error {
|
||||
if err := validateContextName(opts.ContextName); err != nil && opts.ContextName != command.DefaultContextName {
|
||||
if err := store.ValidateContextName(opts.ContextName); err != nil && opts.ContextName != command.DefaultContextName {
|
||||
return err
|
||||
}
|
||||
ctxMeta, err := dockerCli.ContextStore().GetMetadata(opts.ContextName)
|
||||
|
||||
@ -18,7 +18,7 @@ func createTestContextWithKubeAndSwarm(t *testing.T, cli command.Cli, name strin
|
||||
DefaultStackOrchestrator: orchestrator,
|
||||
Description: "description of " + name,
|
||||
Kubernetes: map[string]string{keyFrom: "default"},
|
||||
Docker: map[string]string{keyHost: "https://someswarmserver"},
|
||||
Docker: map[string]string{keyHost: "https://someswarmserver.example.com"},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
4
cli/command/context/testdata/inspect.golden
vendored
4
cli/command/context/testdata/inspect.golden
vendored
@ -7,11 +7,11 @@
|
||||
},
|
||||
"Endpoints": {
|
||||
"docker": {
|
||||
"Host": "https://someswarmserver",
|
||||
"Host": "https://someswarmserver.example.com",
|
||||
"SkipTLSVerify": false
|
||||
},
|
||||
"kubernetes": {
|
||||
"Host": "https://someserver",
|
||||
"Host": "https://someserver.example.com",
|
||||
"SkipTLSVerify": false,
|
||||
"DefaultNamespace": "default"
|
||||
}
|
||||
|
||||
10
cli/command/context/testdata/list.golden
vendored
10
cli/command/context/testdata/list.golden
vendored
@ -1,5 +1,5 @@
|
||||
NAME DESCRIPTION DOCKER ENDPOINT KUBERNETES ENDPOINT ORCHESTRATOR
|
||||
current * description of current https://someswarmserver https://someserver (default) all
|
||||
default Current DOCKER_HOST based configuration unix:///var/run/docker.sock swarm
|
||||
other description of other https://someswarmserver https://someserver (default) all
|
||||
unset description of unset https://someswarmserver https://someserver (default)
|
||||
NAME DESCRIPTION DOCKER ENDPOINT KUBERNETES ENDPOINT ORCHESTRATOR
|
||||
current * description of current https://someswarmserver.example.com https://someserver.example.com (default) all
|
||||
default Current DOCKER_HOST based configuration unix:///var/run/docker.sock swarm
|
||||
other description of other https://someswarmserver.example.com https://someserver.example.com (default) all
|
||||
unset description of unset https://someswarmserver.example.com https://someserver.example.com (default)
|
||||
|
||||
2
cli/command/context/testdata/test-kubeconfig
vendored
2
cli/command/context/testdata/test-kubeconfig
vendored
@ -2,7 +2,7 @@ apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: dGhlLWNh
|
||||
server: https://someserver
|
||||
server: https://someserver.example.com
|
||||
name: test-cluster
|
||||
contexts:
|
||||
- context:
|
||||
|
||||
@ -61,14 +61,17 @@ func newUpdateCommand(dockerCli command.Cli) *cobra.Command {
|
||||
&opts.DefaultStackOrchestrator,
|
||||
"default-stack-orchestrator", "",
|
||||
"Default orchestrator for stack operations to use with this context (swarm|kubernetes|all)")
|
||||
flags.SetAnnotation("default-stack-orchestrator", "deprecated", nil)
|
||||
flags.StringToStringVar(&opts.Docker, "docker", nil, "set the docker endpoint")
|
||||
flags.StringToStringVar(&opts.Kubernetes, "kubernetes", nil, "set the kubernetes endpoint")
|
||||
flags.SetAnnotation("kubernetes", "kubernetes", nil)
|
||||
flags.SetAnnotation("kubernetes", "deprecated", nil)
|
||||
return cmd
|
||||
}
|
||||
|
||||
// RunUpdate updates a Docker context
|
||||
func RunUpdate(cli command.Cli, o *UpdateOptions) error {
|
||||
if err := validateContextName(o.Name); err != nil {
|
||||
if err := store.ValidateContextName(o.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
s := cli.ContextStore()
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -23,7 +24,7 @@ func newUseCommand(dockerCli command.Cli) *cobra.Command {
|
||||
|
||||
// RunUse set the current Docker context
|
||||
func RunUse(dockerCli command.Cli, name string) error {
|
||||
if err := validateContextName(name); err != nil && name != "default" {
|
||||
if err := store.ValidateContextName(name); err != nil && name != "default" {
|
||||
return err
|
||||
}
|
||||
if _, err := dockerCli.ContextStore().GetMetadata(name); err != nil && name != "default" {
|
||||
|
||||
@ -9,7 +9,6 @@ import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
@ -24,6 +23,7 @@ import (
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/pkg/errors"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@ -78,7 +78,7 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
|
||||
if options.dockerfileFromStdin() {
|
||||
return errStdinConflict
|
||||
}
|
||||
rc, isArchive, err := build.DetectArchiveReader(os.Stdin)
|
||||
rc, isArchive, err := build.DetectArchiveReader(dockerCli.In())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -98,7 +98,7 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
|
||||
case isLocalDir(options.context):
|
||||
contextDir = options.context
|
||||
if options.dockerfileFromStdin() {
|
||||
dockerfileReader = os.Stdin
|
||||
dockerfileReader = dockerCli.In()
|
||||
} else if options.dockerfileName != "" {
|
||||
dockerfileName = filepath.Base(options.dockerfileName)
|
||||
dockerfileDir = filepath.Dir(options.dockerfileName)
|
||||
|
||||
@ -15,17 +15,17 @@ import (
|
||||
)
|
||||
|
||||
func TestENVTrustServer(t *testing.T) {
|
||||
defer env.PatchAll(t, map[string]string{"DOCKER_CONTENT_TRUST_SERVER": "https://notary-test.com:5000"})()
|
||||
defer env.PatchAll(t, map[string]string{"DOCKER_CONTENT_TRUST_SERVER": "https://notary-test.example.com:5000"})()
|
||||
indexInfo := ®istrytypes.IndexInfo{Name: "testserver"}
|
||||
output, err := trust.Server(indexInfo)
|
||||
expectedStr := "https://notary-test.com:5000"
|
||||
expectedStr := "https://notary-test.example.com:5000"
|
||||
if err != nil || output != expectedStr {
|
||||
t.Fatalf("Expected server to be %s, got %s", expectedStr, output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTTPENVTrustServer(t *testing.T) {
|
||||
defer env.PatchAll(t, map[string]string{"DOCKER_CONTENT_TRUST_SERVER": "http://notary-test.com:5000"})()
|
||||
defer env.PatchAll(t, map[string]string{"DOCKER_CONTENT_TRUST_SERVER": "http://notary-test.example.com:5000"})()
|
||||
indexInfo := ®istrytypes.IndexInfo{Name: "testserver"}
|
||||
_, err := trust.Server(indexInfo)
|
||||
if err == nil {
|
||||
|
||||
@ -66,11 +66,11 @@ func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInf
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.Err(), "Unable to retrieve stored credentials for %s, error: %s.\n", indexServer, err)
|
||||
}
|
||||
err = ConfigureAuth(cli, "", "", authConfig, isDefaultRegistry)
|
||||
err = ConfigureAuth(cli, "", "", &authConfig, isDefaultRegistry)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return EncodeAuthToBase64(*authConfig)
|
||||
return EncodeAuthToBase64(authConfig)
|
||||
}
|
||||
}
|
||||
|
||||
@ -89,7 +89,7 @@ func ResolveAuthConfig(ctx context.Context, cli Cli, index *registrytypes.IndexI
|
||||
|
||||
// GetDefaultAuthConfig gets the default auth config given a serverAddress
|
||||
// If credentials for given serverAddress exists in the credential store, the configuration will be populated with values in it
|
||||
func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, isDefaultRegistry bool) (*types.AuthConfig, error) {
|
||||
func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) {
|
||||
if !isDefaultRegistry {
|
||||
serverAddress = registry.ConvertToHostname(serverAddress)
|
||||
}
|
||||
@ -98,13 +98,15 @@ func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, is
|
||||
if checkCredStore {
|
||||
authconfig, err = cli.ConfigFile().GetAuthConfig(serverAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return types.AuthConfig{
|
||||
ServerAddress: serverAddress,
|
||||
}, err
|
||||
}
|
||||
}
|
||||
authconfig.ServerAddress = serverAddress
|
||||
authconfig.IdentityToken = ""
|
||||
res := types.AuthConfig(authconfig)
|
||||
return &res, nil
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ConfigureAuth handles prompting of user's username and password if needed
|
||||
|
||||
@ -111,24 +111,22 @@ func runLogin(dockerCli command.Cli, opts loginOptions) error { //nolint: gocycl
|
||||
serverAddress = authServer
|
||||
}
|
||||
|
||||
var err error
|
||||
var authConfig *types.AuthConfig
|
||||
var response registrytypes.AuthenticateOKBody
|
||||
isDefaultRegistry := serverAddress == authServer
|
||||
authConfig, err = command.GetDefaultAuthConfig(dockerCli, opts.user == "" && opts.password == "", serverAddress, isDefaultRegistry)
|
||||
authConfig, err := command.GetDefaultAuthConfig(dockerCli, opts.user == "" && opts.password == "", serverAddress, isDefaultRegistry)
|
||||
if err == nil && authConfig.Username != "" && authConfig.Password != "" {
|
||||
response, err = loginWithCredStoreCreds(ctx, dockerCli, authConfig)
|
||||
response, err = loginWithCredStoreCreds(ctx, dockerCli, &authConfig)
|
||||
}
|
||||
if err != nil || authConfig.Username == "" || authConfig.Password == "" {
|
||||
err = command.ConfigureAuth(dockerCli, opts.user, opts.password, authConfig, isDefaultRegistry)
|
||||
err = command.ConfigureAuth(dockerCli, opts.user, opts.password, &authConfig, isDefaultRegistry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
response, err = clnt.RegistryLogin(ctx, *authConfig)
|
||||
response, err = clnt.RegistryLogin(ctx, authConfig)
|
||||
if err != nil && client.IsErrConnectionFailed(err) {
|
||||
// If the server isn't responding (yet) attempt to login purely client side
|
||||
response, err = loginClientSide(ctx, *authConfig)
|
||||
response, err = loginClientSide(ctx, authConfig)
|
||||
}
|
||||
// If we (still) have an error, give up
|
||||
if err != nil {
|
||||
@ -151,7 +149,7 @@ func runLogin(dockerCli command.Cli, opts loginOptions) error { //nolint: gocycl
|
||||
}
|
||||
}
|
||||
|
||||
if err := creds.Store(configtypes.AuthConfig(*authConfig)); err != nil {
|
||||
if err := creds.Store(configtypes.AuthConfig(authConfig)); err != nil {
|
||||
return errors.Errorf("Error saving credentials: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@ -66,10 +66,10 @@ func TestElectAuthServer(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
expectedAuthServer: "https://foo.bar",
|
||||
expectedAuthServer: "https://foo.example.com",
|
||||
expectedWarning: "",
|
||||
infoFunc: func() (types.Info, error) {
|
||||
return types.Info{IndexServerAddress: "https://foo.bar"}, nil
|
||||
return types.Info{IndexServerAddress: "https://foo.example.com"}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -145,7 +145,21 @@ func TestGetDefaultAuthConfig(t *testing.T) {
|
||||
assert.Check(t, is.Equal(tc.expectedErr, err.Error()))
|
||||
} else {
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(tc.expectedAuthConfig, *authconfig))
|
||||
assert.Check(t, is.DeepEqual(tc.expectedAuthConfig, authconfig))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDefaultAuthConfig_HelperError(t *testing.T) {
|
||||
cli := test.NewFakeCli(&fakeClient{})
|
||||
errBuf := new(bytes.Buffer)
|
||||
cli.SetErr(errBuf)
|
||||
cli.ConfigFile().CredentialsStore = "fake-does-not-exist"
|
||||
serverAddress := "test-server-address"
|
||||
expectedAuthConfig := types.AuthConfig{
|
||||
ServerAddress: serverAddress,
|
||||
}
|
||||
authconfig, err := GetDefaultAuthConfig(cli, true, serverAddress, serverAddress == "https://index.docker.io/v1/")
|
||||
assert.Check(t, is.DeepEqual(expectedAuthConfig, authconfig))
|
||||
assert.Check(t, is.ErrorContains(err, "docker-credential-fake-does-not-exist"))
|
||||
}
|
||||
|
||||
@ -165,7 +165,7 @@ func updateConfigFromDefaults(defaultUpdateConfig *api.UpdateConfig) *swarm.Upda
|
||||
}
|
||||
|
||||
func (opts updateOptions) updateConfig(flags *pflag.FlagSet) *swarm.UpdateConfig {
|
||||
if !anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio) {
|
||||
if !anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateMonitor, flagUpdateFailureAction, flagUpdateMaxFailureRatio, flagUpdateOrder) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -194,7 +194,7 @@ func (opts updateOptions) updateConfig(flags *pflag.FlagSet) *swarm.UpdateConfig
|
||||
}
|
||||
|
||||
func (opts updateOptions) rollbackConfig(flags *pflag.FlagSet) *swarm.UpdateConfig {
|
||||
if !anyChanged(flags, flagRollbackParallelism, flagRollbackDelay, flagRollbackMonitor, flagRollbackFailureAction, flagRollbackMaxFailureRatio) {
|
||||
if !anyChanged(flags, flagRollbackParallelism, flagRollbackDelay, flagRollbackMonitor, flagRollbackFailureAction, flagRollbackMaxFailureRatio, flagRollbackOrder) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -289,6 +289,22 @@ func TestToServiceUpdateRollback(t *testing.T) {
|
||||
assert.Check(t, is.DeepEqual(service.RollbackConfig, expected.RollbackConfig))
|
||||
}
|
||||
|
||||
func TestToServiceUpdateRollbackOrder(t *testing.T) {
|
||||
flags := newCreateCommand(nil).Flags()
|
||||
flags.Set("update-order", "start-first")
|
||||
flags.Set("rollback-order", "start-first")
|
||||
|
||||
o := newServiceOptions()
|
||||
o.mode = "replicated"
|
||||
o.update = updateOptions{order: "start-first"}
|
||||
o.rollback = updateOptions{order: "start-first"}
|
||||
|
||||
service, err := o.ToService(context.Background(), &fakeClient{}, flags)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(service.UpdateConfig.Order, o.update.order))
|
||||
assert.Check(t, is.Equal(service.RollbackConfig.Order, o.rollback.order))
|
||||
}
|
||||
|
||||
func TestToServiceMaxReplicasGlobalModeConflict(t *testing.T) {
|
||||
opt := serviceOptions{
|
||||
mode: "global",
|
||||
|
||||
@ -99,6 +99,7 @@ func ServiceProgress(ctx context.Context, client client.APIClient, serviceID str
|
||||
convergedAt time.Time
|
||||
monitor = 5 * time.Second
|
||||
rollback bool
|
||||
message *progress.Progress
|
||||
)
|
||||
|
||||
for {
|
||||
@ -140,8 +141,9 @@ func ServiceProgress(ctx context.Context, client client.APIClient, serviceID str
|
||||
return fmt.Errorf("service rollback paused: %s", service.UpdateStatus.Message)
|
||||
case swarm.UpdateStateRollbackCompleted:
|
||||
if !converged {
|
||||
return fmt.Errorf("service rolled back: %s", service.UpdateStatus.Message)
|
||||
message = &progress.Progress{ID: "rollback", Message: service.UpdateStatus.Message}
|
||||
}
|
||||
rollback = true
|
||||
}
|
||||
}
|
||||
if converged && time.Since(convergedAt) >= monitor {
|
||||
@ -149,7 +151,9 @@ func ServiceProgress(ctx context.Context, client client.APIClient, serviceID str
|
||||
ID: "verify",
|
||||
Action: "Service converged",
|
||||
})
|
||||
|
||||
if message != nil {
|
||||
progressOut.WriteProgress(*message)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -69,7 +69,9 @@ func NewStackCommand(dockerCli command.Cli) *cobra.Command {
|
||||
flags := cmd.PersistentFlags()
|
||||
flags.String("kubeconfig", "", "Kubernetes config file")
|
||||
flags.SetAnnotation("kubeconfig", "kubernetes", nil)
|
||||
flags.SetAnnotation("kubeconfig", "deprecated", nil)
|
||||
flags.String("orchestrator", "", "Orchestrator to use (swarm|kubernetes|all)")
|
||||
flags.SetAnnotation("orchestrator", "deprecated", nil)
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
||||
@ -50,6 +50,7 @@ func NewOptions(flags *flag.FlagSet, orchestrator command.Orchestrator) Options
|
||||
func AddNamespaceFlag(flags *flag.FlagSet) {
|
||||
flags.String("namespace", "", "Kubernetes namespace to use")
|
||||
flags.SetAnnotation("namespace", "kubernetes", nil)
|
||||
flags.SetAnnotation("namespace", "deprecated", nil)
|
||||
}
|
||||
|
||||
// WrapCli wraps command.Cli with kubernetes specifics
|
||||
|
||||
@ -30,8 +30,10 @@ func newListCommand(dockerCli command.Cli, common *commonOptions) *cobra.Command
|
||||
flags.StringVar(&opts.Format, "format", "", "Pretty-print stacks using a Go template")
|
||||
flags.StringSliceVar(&opts.Namespaces, "namespace", []string{}, "Kubernetes namespaces to use")
|
||||
flags.SetAnnotation("namespace", "kubernetes", nil)
|
||||
flags.SetAnnotation("namespace", "deprecated", nil)
|
||||
flags.BoolVarP(&opts.AllNamespaces, "all-namespaces", "", false, "List stacks from all Kubernetes namespaces")
|
||||
flags.SetAnnotation("all-namespaces", "kubernetes", nil)
|
||||
flags.SetAnnotation("all-namespaces", "deprecated", nil)
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
||||
@ -104,20 +104,20 @@ func TestDisplayTrustRootInvalidFlags(t *testing.T) {
|
||||
errorMsg: "flag requires the `--rotate` flag to update the CA",
|
||||
},
|
||||
{
|
||||
args: []string{"--external-ca=protocol=cfssl,url=https://some.com/https/url"},
|
||||
args: []string{"--external-ca=protocol=cfssl,url=https://some.example.com/https/url"},
|
||||
errorMsg: "flag requires the `--rotate` flag to update the CA",
|
||||
},
|
||||
{ // to make sure we're not erroring because we didn't provide a CA cert and external CA
|
||||
args: []string{
|
||||
"--ca-cert=" + tmpfile,
|
||||
"--external-ca=protocol=cfssl,url=https://some.com/https/url",
|
||||
"--external-ca=protocol=cfssl,url=https://some.example.com/https/url",
|
||||
},
|
||||
errorMsg: "flag requires the `--rotate` flag to update the CA",
|
||||
},
|
||||
{
|
||||
args: []string{
|
||||
"--rotate",
|
||||
"--external-ca=protocol=cfssl,url=https://some.com/https/url",
|
||||
"--external-ca=protocol=cfssl,url=https://some.example.com/https/url",
|
||||
},
|
||||
errorMsg: "rotating to an external CA requires the `--ca-cert` flag to specify the external CA's cert - " +
|
||||
"to add an external CA with the current root CA certificate, use the `update` command instead",
|
||||
@ -243,7 +243,7 @@ func TestUpdateSwarmSpecCertAndExternalCA(t *testing.T) {
|
||||
"--rotate",
|
||||
"--detach",
|
||||
"--ca-cert=" + certfile,
|
||||
"--external-ca=protocol=cfssl,url=https://some.external.ca"})
|
||||
"--external-ca=protocol=cfssl,url=https://some.external.ca.example.com"})
|
||||
cmd.SetOut(cli.OutBuffer())
|
||||
assert.NilError(t, cmd.Execute())
|
||||
|
||||
@ -253,7 +253,7 @@ func TestUpdateSwarmSpecCertAndExternalCA(t *testing.T) {
|
||||
expected.CAConfig.ExternalCAs = []*swarm.ExternalCA{
|
||||
{
|
||||
Protocol: swarm.ExternalCAProtocolCFSSL,
|
||||
URL: "https://some.external.ca",
|
||||
URL: "https://some.external.ca.example.com",
|
||||
CACert: cert,
|
||||
Options: make(map[string]string),
|
||||
},
|
||||
@ -281,7 +281,7 @@ func TestUpdateSwarmSpecCertAndKeyAndExternalCA(t *testing.T) {
|
||||
"--detach",
|
||||
"--ca-cert=" + certfile,
|
||||
"--ca-key=" + keyfile,
|
||||
"--external-ca=protocol=cfssl,url=https://some.external.ca"})
|
||||
"--external-ca=protocol=cfssl,url=https://some.external.ca.example.com"})
|
||||
cmd.SetOut(cli.OutBuffer())
|
||||
assert.NilError(t, cmd.Execute())
|
||||
|
||||
@ -291,7 +291,7 @@ func TestUpdateSwarmSpecCertAndKeyAndExternalCA(t *testing.T) {
|
||||
expected.CAConfig.ExternalCAs = []*swarm.ExternalCA{
|
||||
{
|
||||
Protocol: swarm.ExternalCAProtocolCFSSL,
|
||||
URL: "https://some.external.ca",
|
||||
URL: "https://some.external.ca.example.com",
|
||||
CACert: cert,
|
||||
Options: make(map[string]string),
|
||||
},
|
||||
|
||||
@ -14,6 +14,7 @@ import (
|
||||
"github.com/docker/cli/templates"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@ -211,9 +212,6 @@ func prettyPrintServerInfo(dockerCli command.Cli, info types.Info) []error {
|
||||
for _, o := range so.Options {
|
||||
switch o.Key {
|
||||
case "profile":
|
||||
if o.Value != "default" {
|
||||
fmt.Fprintln(dockerCli.Err(), " WARNING: You're not using the default seccomp profile")
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), " Profile:", o.Value)
|
||||
}
|
||||
}
|
||||
@ -378,6 +376,9 @@ func printSwarmInfo(dockerCli command.Cli, info types.Info) {
|
||||
}
|
||||
|
||||
func printServerWarnings(dockerCli command.Cli, info types.Info) {
|
||||
if versions.LessThan(dockerCli.Client().ClientVersion(), "1.42") {
|
||||
printSecurityOptionsWarnings(dockerCli, info)
|
||||
}
|
||||
if len(info.Warnings) > 0 {
|
||||
fmt.Fprintln(dockerCli.Err(), strings.Join(info.Warnings, "\n"))
|
||||
return
|
||||
@ -387,6 +388,29 @@ func printServerWarnings(dockerCli command.Cli, info types.Info) {
|
||||
printServerWarningsLegacy(dockerCli, info)
|
||||
}
|
||||
|
||||
// printSecurityOptionsWarnings prints warnings based on the security options
|
||||
// returned by the daemon.
|
||||
// DEPRECATED: warnings are now generated by the daemon, and returned in
|
||||
// info.Warnings. This function is used to provide backward compatibility with
|
||||
// daemons that do not provide these warnings. No new warnings should be added
|
||||
// here.
|
||||
func printSecurityOptionsWarnings(dockerCli command.Cli, info types.Info) {
|
||||
if info.OSType == "windows" {
|
||||
return
|
||||
}
|
||||
kvs, _ := types.DecodeSecurityOptions(info.SecurityOptions)
|
||||
for _, so := range kvs {
|
||||
if so.Name != "seccomp" {
|
||||
continue
|
||||
}
|
||||
for _, o := range so.Options {
|
||||
if o.Key == "profile" && o.Value != "default" && o.Value != "builtin" {
|
||||
_, _ = fmt.Fprintln(dockerCli.Err(), "WARNING: You're not using the default seccomp profile")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// printServerWarningsLegacy generates warnings based on information returned by the daemon.
|
||||
// DEPRECATED: warnings are now generated by the daemon, and returned in
|
||||
// info.Warnings. This function is used to provide backward compatibility with
|
||||
@ -402,10 +426,7 @@ func printServerWarningsLegacy(dockerCli command.Cli, info types.Info) {
|
||||
if !info.SwapLimit {
|
||||
fmt.Fprintln(dockerCli.Err(), "WARNING: No swap limit support")
|
||||
}
|
||||
if !info.KernelMemory {
|
||||
fmt.Fprintln(dockerCli.Err(), "WARNING: No kernel memory limit support")
|
||||
}
|
||||
if !info.OomKillDisable {
|
||||
if !info.OomKillDisable && info.CgroupVersion != "2" {
|
||||
fmt.Fprintln(dockerCli.Err(), "WARNING: No oom kill disable support")
|
||||
}
|
||||
if !info.CPUCfsQuota {
|
||||
|
||||
@ -248,7 +248,6 @@ func TestPrettyPrintInfo(t *testing.T) {
|
||||
sampleInfoDaemonWarnings.Warnings = []string{
|
||||
"WARNING: No memory limit support",
|
||||
"WARNING: No swap limit support",
|
||||
"WARNING: No kernel memory limit support",
|
||||
"WARNING: No oom kill disable support",
|
||||
"WARNING: No cpu cfs quota support",
|
||||
"WARNING: No cpu cfs period support",
|
||||
|
||||
@ -1 +1 @@
|
||||
{"ID":"EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX","Containers":0,"ContainersRunning":0,"ContainersPaused":0,"ContainersStopped":0,"Images":0,"Driver":"aufs","DriverStatus":[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirs","0"],["Dirperm1 Supported","true"]],"Plugins":{"Volume":["local"],"Network":["bridge","host","macvlan","null","overlay"],"Authorization":null,"Log":["awslogs","fluentd","gcplogs","gelf","journald","json-file","logentries","splunk","syslog"]},"MemoryLimit":true,"SwapLimit":true,"KernelMemory":true,"KernelMemoryTCP":false,"CpuCfsPeriod":true,"CpuCfsQuota":true,"CPUShares":true,"CPUSet":true,"PidsLimit":false,"IPv4Forwarding":true,"BridgeNfIptables":true,"BridgeNfIp6tables":true,"Debug":true,"NFd":33,"OomKillDisable":true,"NGoroutines":135,"SystemTime":"2017-08-24T17:44:34.077811894Z","LoggingDriver":"json-file","CgroupDriver":"cgroupfs","NEventsListener":0,"KernelVersion":"4.4.0-87-generic","OperatingSystem":"Ubuntu 16.04.3 LTS","OSVersion":"","OSType":"linux","Architecture":"x86_64","IndexServerAddress":"https://index.docker.io/v1/","RegistryConfig":{"AllowNondistributableArtifactsCIDRs":null,"AllowNondistributableArtifactsHostnames":null,"InsecureRegistryCIDRs":["127.0.0.0/8"],"IndexConfigs":{"docker.io":{"Name":"docker.io","Mirrors":null,"Secure":true,"Official":true}},"Mirrors":null},"NCPU":2,"MemTotal":2097356800,"GenericResources":null,"DockerRootDir":"/var/lib/docker","HttpProxy":"","HttpsProxy":"","NoProxy":"","Name":"system-sample","Labels":["provider=digitalocean"],"ExperimentalBuild":false,"ServerVersion":"17.06.1-ce","Runtimes":{"runc":{"path":"docker-runc"}},"DefaultRuntime":"runc","Swarm":{"NodeID":"","NodeAddr":"","LocalNodeState":"inactive","ControlAvailable":false,"Error":"","RemoteManagers":null},"LiveRestoreEnabled":false,"Isolation":"","InitBinary":"docker-init","ContainerdCommit":{"ID":"6e23458c129b551d5c9871e5174f6b1b7f6d1170","Expected":"6e23458c129b551d5c9871e5174f6b1b7f6d1170"},"RuncCommit":{"ID":"810190ceaa507aa2727d7ae6f4790c76ec150bd2","Expected":"810190ceaa507aa2727d7ae6f4790c76ec150bd2"},"InitCommit":{"ID":"949e6fa","Expected":"949e6fa"},"SecurityOptions":["name=apparmor","name=seccomp,profile=default"],"DefaultAddressPools":[{"Base":"10.123.0.0/16","Size":24}],"Warnings":["WARNING: No memory limit support","WARNING: No swap limit support","WARNING: No kernel memory limit support","WARNING: No oom kill disable support","WARNING: No cpu cfs quota support","WARNING: No cpu cfs period support","WARNING: No cpu shares support","WARNING: No cpuset support","WARNING: IPv4 forwarding is disabled","WARNING: bridge-nf-call-iptables is disabled","WARNING: bridge-nf-call-ip6tables is disabled"],"ClientInfo":{"Debug":true,"Context":"default","Plugins":[],"Warnings":null}}
|
||||
{"ID":"EKHL:QDUU:QZ7U:MKGD:VDXK:S27Q:GIPU:24B7:R7VT:DGN6:QCSF:2UBX","Containers":0,"ContainersRunning":0,"ContainersPaused":0,"ContainersStopped":0,"Images":0,"Driver":"aufs","DriverStatus":[["Root Dir","/var/lib/docker/aufs"],["Backing Filesystem","extfs"],["Dirs","0"],["Dirperm1 Supported","true"]],"Plugins":{"Volume":["local"],"Network":["bridge","host","macvlan","null","overlay"],"Authorization":null,"Log":["awslogs","fluentd","gcplogs","gelf","journald","json-file","logentries","splunk","syslog"]},"MemoryLimit":true,"SwapLimit":true,"KernelMemory":true,"KernelMemoryTCP":false,"CpuCfsPeriod":true,"CpuCfsQuota":true,"CPUShares":true,"CPUSet":true,"PidsLimit":false,"IPv4Forwarding":true,"BridgeNfIptables":true,"BridgeNfIp6tables":true,"Debug":true,"NFd":33,"OomKillDisable":true,"NGoroutines":135,"SystemTime":"2017-08-24T17:44:34.077811894Z","LoggingDriver":"json-file","CgroupDriver":"cgroupfs","NEventsListener":0,"KernelVersion":"4.4.0-87-generic","OperatingSystem":"Ubuntu 16.04.3 LTS","OSVersion":"","OSType":"linux","Architecture":"x86_64","IndexServerAddress":"https://index.docker.io/v1/","RegistryConfig":{"AllowNondistributableArtifactsCIDRs":null,"AllowNondistributableArtifactsHostnames":null,"InsecureRegistryCIDRs":["127.0.0.0/8"],"IndexConfigs":{"docker.io":{"Name":"docker.io","Mirrors":null,"Secure":true,"Official":true}},"Mirrors":null},"NCPU":2,"MemTotal":2097356800,"GenericResources":null,"DockerRootDir":"/var/lib/docker","HttpProxy":"","HttpsProxy":"","NoProxy":"","Name":"system-sample","Labels":["provider=digitalocean"],"ExperimentalBuild":false,"ServerVersion":"17.06.1-ce","Runtimes":{"runc":{"path":"docker-runc"}},"DefaultRuntime":"runc","Swarm":{"NodeID":"","NodeAddr":"","LocalNodeState":"inactive","ControlAvailable":false,"Error":"","RemoteManagers":null},"LiveRestoreEnabled":false,"Isolation":"","InitBinary":"docker-init","ContainerdCommit":{"ID":"6e23458c129b551d5c9871e5174f6b1b7f6d1170","Expected":"6e23458c129b551d5c9871e5174f6b1b7f6d1170"},"RuncCommit":{"ID":"810190ceaa507aa2727d7ae6f4790c76ec150bd2","Expected":"810190ceaa507aa2727d7ae6f4790c76ec150bd2"},"InitCommit":{"ID":"949e6fa","Expected":"949e6fa"},"SecurityOptions":["name=apparmor","name=seccomp,profile=default"],"DefaultAddressPools":[{"Base":"10.123.0.0/16","Size":24}],"Warnings":["WARNING: No memory limit support","WARNING: No swap limit support","WARNING: No oom kill disable support","WARNING: No cpu cfs quota support","WARNING: No cpu cfs period support","WARNING: No cpu shares support","WARNING: No cpuset support","WARNING: IPv4 forwarding is disabled","WARNING: bridge-nf-call-iptables is disabled","WARNING: bridge-nf-call-ip6tables is disabled"],"ClientInfo":{"Debug":true,"Context":"default","Plugins":[],"Warnings":null}}
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
WARNING: No memory limit support
|
||||
WARNING: No swap limit support
|
||||
WARNING: No kernel memory limit support
|
||||
WARNING: No oom kill disable support
|
||||
WARNING: No cpu cfs quota support
|
||||
WARNING: No cpu cfs period support
|
||||
|
||||
@ -114,6 +114,7 @@ func NewVersionCommand(dockerCli command.Cli) *cobra.Command {
|
||||
flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template")
|
||||
flags.StringVar(&opts.kubeConfig, "kubeconfig", "", "Kubernetes config file")
|
||||
flags.SetAnnotation("kubeconfig", "kubernetes", nil)
|
||||
flags.SetAnnotation("kubeconfig", "deprecated", nil)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@ -24,17 +24,38 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
initConfigDir sync.Once
|
||||
initConfigDir = new(sync.Once)
|
||||
configDir string
|
||||
homeDir string
|
||||
)
|
||||
|
||||
// resetHomeDir is used in testing to reset the "homeDir" package variable to
|
||||
// force re-lookup of the home directory between tests.
|
||||
func resetHomeDir() {
|
||||
homeDir = ""
|
||||
}
|
||||
|
||||
func getHomeDir() string {
|
||||
if homeDir == "" {
|
||||
homeDir = homedir.Get()
|
||||
}
|
||||
return homeDir
|
||||
}
|
||||
|
||||
// resetConfigDir is used in testing to reset the "configDir" package variable
|
||||
// and its sync.Once to force re-lookup between tests.
|
||||
func resetConfigDir() {
|
||||
configDir = ""
|
||||
initConfigDir = new(sync.Once)
|
||||
}
|
||||
|
||||
func setConfigDir() {
|
||||
if configDir != "" {
|
||||
return
|
||||
}
|
||||
configDir = os.Getenv("DOCKER_CONFIG")
|
||||
if configDir == "" {
|
||||
configDir = filepath.Join(homedir.Get(), configFileDir)
|
||||
configDir = filepath.Join(getHomeDir(), configFileDir)
|
||||
}
|
||||
}
|
||||
|
||||
@ -87,6 +108,15 @@ func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) {
|
||||
// the auth config information and returns values.
|
||||
// FIXME: use the internal golang config parser
|
||||
func Load(configDir string) (*configfile.ConfigFile, error) {
|
||||
cfg, _, err := load(configDir)
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
// TODO remove this temporary hack, which is used to warn about the deprecated ~/.dockercfg file
|
||||
// so we can remove the bool return value and collapse this back into `Load`
|
||||
func load(configDir string) (*configfile.ConfigFile, bool, error) {
|
||||
printLegacyFileWarning := false
|
||||
|
||||
if configDir == "" {
|
||||
configDir = Dir()
|
||||
}
|
||||
@ -101,35 +131,35 @@ func Load(configDir string) (*configfile.ConfigFile, error) {
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, filename)
|
||||
}
|
||||
return configFile, err
|
||||
return configFile, printLegacyFileWarning, err
|
||||
} else if !os.IsNotExist(err) {
|
||||
// if file is there but we can't stat it for any reason other
|
||||
// than it doesn't exist then stop
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
return configFile, printLegacyFileWarning, errors.Wrap(err, filename)
|
||||
}
|
||||
|
||||
// Can't find latest config file so check for the old one
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return configFile, errors.Wrap(err, oldConfigfile)
|
||||
}
|
||||
filename = filepath.Join(home, oldConfigfile)
|
||||
filename = filepath.Join(getHomeDir(), oldConfigfile)
|
||||
if file, err := os.Open(filename); err == nil {
|
||||
printLegacyFileWarning = true
|
||||
defer file.Close()
|
||||
if err := configFile.LegacyLoadFromReader(file); err != nil {
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
return configFile, printLegacyFileWarning, errors.Wrap(err, filename)
|
||||
}
|
||||
}
|
||||
return configFile, nil
|
||||
return configFile, printLegacyFileWarning, nil
|
||||
}
|
||||
|
||||
// LoadDefaultConfigFile attempts to load the default config file and returns
|
||||
// an initialized ConfigFile struct if none is found.
|
||||
func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile {
|
||||
configFile, err := Load(Dir())
|
||||
configFile, printLegacyFileWarning, err := load(Dir())
|
||||
if err != nil {
|
||||
fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err)
|
||||
}
|
||||
if printLegacyFileWarning {
|
||||
_, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format is deprecated and will be removed in an upcoming release")
|
||||
}
|
||||
if !configFile.ContainsAuth() {
|
||||
configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore)
|
||||
}
|
||||
|
||||
@ -12,9 +12,11 @@ import (
|
||||
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/docker/cli/cli/config/credentials"
|
||||
"github.com/docker/cli/cli/config/types"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/env"
|
||||
"gotest.tools/v3/fs"
|
||||
)
|
||||
|
||||
var homeKey = "HOME"
|
||||
@ -115,6 +117,7 @@ password`: "Invalid Auth config file",
|
||||
email`: "Invalid auth configuration file",
|
||||
}
|
||||
|
||||
resetHomeDir()
|
||||
tmpHome, err := ioutil.TempDir("", "config-test")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpHome)
|
||||
@ -131,6 +134,7 @@ email`: "Invalid auth configuration file",
|
||||
}
|
||||
|
||||
func TestOldValidAuth(t *testing.T) {
|
||||
resetHomeDir()
|
||||
tmpHome, err := ioutil.TempDir("", "config-test")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpHome)
|
||||
@ -165,6 +169,7 @@ func TestOldValidAuth(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOldJSONInvalid(t *testing.T) {
|
||||
resetHomeDir()
|
||||
tmpHome, err := ioutil.TempDir("", "config-test")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpHome)
|
||||
@ -184,6 +189,7 @@ func TestOldJSONInvalid(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOldJSON(t *testing.T) {
|
||||
resetHomeDir()
|
||||
tmpHome, err := ioutil.TempDir("", "config-test")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(tmpHome)
|
||||
@ -219,6 +225,31 @@ func TestOldJSON(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestOldJSONFallbackDeprecationWarning(t *testing.T) {
|
||||
js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}`
|
||||
tmpHome := fs.NewDir(t, t.Name(), fs.WithFile(oldConfigfile, js))
|
||||
defer tmpHome.Remove()
|
||||
defer env.PatchAll(t, map[string]string{homeKey: tmpHome.Path(), "DOCKER_CONFIG": ""})()
|
||||
|
||||
// reset the homeDir, configDir, and its sync.Once, to force them being resolved again
|
||||
resetHomeDir()
|
||||
resetConfigDir()
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
configFile := LoadDefaultConfigFile(buffer)
|
||||
expected := configfile.New(tmpHome.Join(configFileDir, ConfigFileName))
|
||||
expected.AuthConfigs = map[string]types.AuthConfig{
|
||||
"https://index.docker.io/v1/": {
|
||||
Username: "joejoe",
|
||||
Password: "hello",
|
||||
Email: "user@example.com",
|
||||
ServerAddress: "https://index.docker.io/v1/",
|
||||
},
|
||||
}
|
||||
assert.Assert(t, strings.Contains(buffer.String(), "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format is deprecated and will be removed in an upcoming release"))
|
||||
assert.Check(t, is.DeepEqual(expected, configFile))
|
||||
}
|
||||
|
||||
func TestNewJSON(t *testing.T) {
|
||||
tmpHome, err := ioutil.TempDir("", "config-test")
|
||||
assert.NilError(t, err)
|
||||
|
||||
@ -27,16 +27,19 @@ func TestEncodeAuth(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProxyConfig(t *testing.T) {
|
||||
httpProxy := "http://proxy.mycorp.com:3128"
|
||||
httpsProxy := "https://user:password@proxy.mycorp.com:3129"
|
||||
ftpProxy := "http://ftpproxy.mycorp.com:21"
|
||||
noProxy := "*.intra.mycorp.com"
|
||||
defaultProxyConfig := ProxyConfig{
|
||||
HTTPProxy: httpProxy,
|
||||
HTTPSProxy: httpsProxy,
|
||||
FTPProxy: ftpProxy,
|
||||
NoProxy: noProxy,
|
||||
}
|
||||
var (
|
||||
httpProxy = "http://proxy.mycorp.example.com:3128"
|
||||
httpsProxy = "https://user:password@proxy.mycorp.example.com:3129"
|
||||
ftpProxy = "http://ftpproxy.mycorp.example.com:21"
|
||||
noProxy = "*.intra.mycorp.example.com"
|
||||
|
||||
defaultProxyConfig = ProxyConfig{
|
||||
HTTPProxy: httpProxy,
|
||||
HTTPSProxy: httpsProxy,
|
||||
FTPProxy: ftpProxy,
|
||||
NoProxy: noProxy,
|
||||
}
|
||||
)
|
||||
|
||||
cfg := ConfigFile{
|
||||
Proxies: map[string]ProxyConfig{
|
||||
@ -59,18 +62,21 @@ func TestProxyConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProxyConfigOverride(t *testing.T) {
|
||||
httpProxy := "http://proxy.mycorp.com:3128"
|
||||
overrideHTTPProxy := "http://proxy.example.com:3128"
|
||||
overrideNoProxy := ""
|
||||
httpsProxy := "https://user:password@proxy.mycorp.com:3129"
|
||||
ftpProxy := "http://ftpproxy.mycorp.com:21"
|
||||
noProxy := "*.intra.mycorp.com"
|
||||
defaultProxyConfig := ProxyConfig{
|
||||
HTTPProxy: httpProxy,
|
||||
HTTPSProxy: httpsProxy,
|
||||
FTPProxy: ftpProxy,
|
||||
NoProxy: noProxy,
|
||||
}
|
||||
var (
|
||||
httpProxy = "http://proxy.mycorp.example.com:3128"
|
||||
httpProxyOverride = "http://proxy.example.com:3128"
|
||||
httpsProxy = "https://user:password@proxy.mycorp.example.com:3129"
|
||||
ftpProxy = "http://ftpproxy.mycorp.example.com:21"
|
||||
noProxy = "*.intra.mycorp.example.com"
|
||||
noProxyOverride = ""
|
||||
|
||||
defaultProxyConfig = ProxyConfig{
|
||||
HTTPProxy: httpProxy,
|
||||
HTTPSProxy: httpsProxy,
|
||||
FTPProxy: ftpProxy,
|
||||
NoProxy: noProxy,
|
||||
}
|
||||
)
|
||||
|
||||
cfg := ConfigFile{
|
||||
Proxies: map[string]ProxyConfig{
|
||||
@ -84,46 +90,49 @@ func TestProxyConfigOverride(t *testing.T) {
|
||||
}
|
||||
|
||||
ropts := map[string]*string{
|
||||
"HTTP_PROXY": clone(overrideHTTPProxy),
|
||||
"NO_PROXY": clone(overrideNoProxy),
|
||||
"HTTP_PROXY": clone(httpProxyOverride),
|
||||
"NO_PROXY": clone(noProxyOverride),
|
||||
}
|
||||
proxyConfig := cfg.ParseProxyConfig("/var/run/docker.sock", ropts)
|
||||
expected := map[string]*string{
|
||||
"HTTP_PROXY": &overrideHTTPProxy,
|
||||
"HTTP_PROXY": &httpProxyOverride,
|
||||
"http_proxy": &httpProxy,
|
||||
"HTTPS_PROXY": &httpsProxy,
|
||||
"https_proxy": &httpsProxy,
|
||||
"FTP_PROXY": &ftpProxy,
|
||||
"ftp_proxy": &ftpProxy,
|
||||
"NO_PROXY": &overrideNoProxy,
|
||||
"NO_PROXY": &noProxyOverride,
|
||||
"no_proxy": &noProxy,
|
||||
}
|
||||
assert.Check(t, is.DeepEqual(expected, proxyConfig))
|
||||
}
|
||||
|
||||
func TestProxyConfigPerHost(t *testing.T) {
|
||||
httpProxy := "http://proxy.mycorp.com:3128"
|
||||
httpsProxy := "https://user:password@proxy.mycorp.com:3129"
|
||||
ftpProxy := "http://ftpproxy.mycorp.com:21"
|
||||
noProxy := "*.intra.mycorp.com"
|
||||
var (
|
||||
httpProxy = "http://proxy.mycorp.example.com:3128"
|
||||
httpsProxy = "https://user:password@proxy.mycorp.example.com:3129"
|
||||
ftpProxy = "http://ftpproxy.mycorp.example.com:21"
|
||||
noProxy = "*.intra.mycorp.example.com"
|
||||
|
||||
extHTTPProxy := "http://proxy.example.com:3128"
|
||||
extHTTPSProxy := "https://user:password@proxy.example.com:3129"
|
||||
extFTPProxy := "http://ftpproxy.example.com:21"
|
||||
extNoProxy := "*.intra.example.com"
|
||||
extHTTPProxy = "http://proxy.example.com:3128"
|
||||
extHTTPSProxy = "https://user:password@proxy.example.com:3129"
|
||||
extFTPProxy = "http://ftpproxy.example.com:21"
|
||||
extNoProxy = "*.intra.example.com"
|
||||
|
||||
defaultProxyConfig := ProxyConfig{
|
||||
HTTPProxy: httpProxy,
|
||||
HTTPSProxy: httpsProxy,
|
||||
FTPProxy: ftpProxy,
|
||||
NoProxy: noProxy,
|
||||
}
|
||||
externalProxyConfig := ProxyConfig{
|
||||
HTTPProxy: extHTTPProxy,
|
||||
HTTPSProxy: extHTTPSProxy,
|
||||
FTPProxy: extFTPProxy,
|
||||
NoProxy: extNoProxy,
|
||||
}
|
||||
defaultProxyConfig = ProxyConfig{
|
||||
HTTPProxy: httpProxy,
|
||||
HTTPSProxy: httpsProxy,
|
||||
FTPProxy: ftpProxy,
|
||||
NoProxy: noProxy,
|
||||
}
|
||||
|
||||
externalProxyConfig = ProxyConfig{
|
||||
HTTPProxy: extHTTPProxy,
|
||||
HTTPSProxy: extHTTPSProxy,
|
||||
FTPProxy: extFTPProxy,
|
||||
NoProxy: extNoProxy,
|
||||
}
|
||||
)
|
||||
|
||||
cfg := ConfigFile{
|
||||
Proxies: map[string]ProxyConfig{
|
||||
@ -226,9 +235,11 @@ func TestGetAllCredentialsCredsStore(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetAllCredentialsCredHelper(t *testing.T) {
|
||||
testCredHelperSuffix := "test_cred_helper"
|
||||
testCredHelperRegistryHostname := "credhelper.com"
|
||||
testExtraCredHelperRegistryHostname := "somethingweird.com"
|
||||
const (
|
||||
testCredHelperSuffix = "test_cred_helper"
|
||||
testCredHelperRegistryHostname = "credhelper.com"
|
||||
testExtraCredHelperRegistryHostname = "somethingweird.com"
|
||||
)
|
||||
|
||||
unexpectedCredHelperAuth := types.AuthConfig{
|
||||
Username: "file_store_user",
|
||||
@ -265,9 +276,11 @@ func TestGetAllCredentialsCredHelper(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetAllCredentialsFileStoreAndCredHelper(t *testing.T) {
|
||||
testFileStoreRegistryHostname := "example.com"
|
||||
testCredHelperSuffix := "test_cred_helper"
|
||||
testCredHelperRegistryHostname := "credhelper.com"
|
||||
const (
|
||||
testFileStoreRegistryHostname = "example.com"
|
||||
testCredHelperSuffix = "test_cred_helper"
|
||||
testCredHelperRegistryHostname = "credhelper.com"
|
||||
)
|
||||
|
||||
expectedFileStoreAuth := types.AuthConfig{
|
||||
Username: "file_store_user",
|
||||
@ -301,10 +314,12 @@ func TestGetAllCredentialsFileStoreAndCredHelper(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetAllCredentialsCredStoreAndCredHelper(t *testing.T) {
|
||||
testCredStoreSuffix := "test_creds_store"
|
||||
testCredStoreRegistryHostname := "credstore.com"
|
||||
testCredHelperSuffix := "test_cred_helper"
|
||||
testCredHelperRegistryHostname := "credhelper.com"
|
||||
const (
|
||||
testCredStoreSuffix = "test_creds_store"
|
||||
testCredStoreRegistryHostname = "credstore.com"
|
||||
testCredHelperSuffix = "test_cred_helper"
|
||||
testCredHelperRegistryHostname = "credhelper.com"
|
||||
)
|
||||
|
||||
configFile := New("filename")
|
||||
configFile.CredentialsStore = testCredStoreSuffix
|
||||
@ -343,9 +358,11 @@ func TestGetAllCredentialsCredStoreAndCredHelper(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetAllCredentialsCredHelperOverridesDefaultStore(t *testing.T) {
|
||||
testCredStoreSuffix := "test_creds_store"
|
||||
testCredHelperSuffix := "test_cred_helper"
|
||||
testRegistryHostname := "example.com"
|
||||
const (
|
||||
testCredStoreSuffix = "test_creds_store"
|
||||
testCredHelperSuffix = "test_cred_helper"
|
||||
testRegistryHostname = "example.com"
|
||||
)
|
||||
|
||||
configFile := New("filename")
|
||||
configFile.CredentialsStore = testCredStoreSuffix
|
||||
@ -424,38 +441,36 @@ func TestCheckKubernetesConfigurationRaiseAnErrorOnInvalidValue(t *testing.T) {
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
"no kubernetes config is valid",
|
||||
nil,
|
||||
false,
|
||||
name: "no kubernetes config is valid",
|
||||
},
|
||||
{
|
||||
"enabled is valid",
|
||||
&KubernetesConfig{AllNamespaces: "enabled"},
|
||||
false,
|
||||
name: "enabled is valid",
|
||||
config: &KubernetesConfig{AllNamespaces: "enabled"},
|
||||
},
|
||||
{
|
||||
"disabled is valid",
|
||||
&KubernetesConfig{AllNamespaces: "disabled"},
|
||||
false,
|
||||
name: "disabled is valid",
|
||||
config: &KubernetesConfig{AllNamespaces: "disabled"},
|
||||
},
|
||||
{
|
||||
"empty string is valid",
|
||||
&KubernetesConfig{AllNamespaces: ""},
|
||||
false,
|
||||
name: "empty string is valid",
|
||||
config: &KubernetesConfig{AllNamespaces: ""},
|
||||
},
|
||||
{
|
||||
"other value is invalid",
|
||||
&KubernetesConfig{AllNamespaces: "unknown"},
|
||||
true,
|
||||
name: "other value is invalid",
|
||||
config: &KubernetesConfig{AllNamespaces: "unknown"},
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
for _, test := range testCases {
|
||||
err := checkKubernetesConfiguration(test.config)
|
||||
if test.expectError {
|
||||
assert.Assert(t, err != nil, test.name)
|
||||
} else {
|
||||
assert.NilError(t, err, test.name)
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
test := tc
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
err := checkKubernetesConfiguration(test.config)
|
||||
if test.expectError {
|
||||
assert.Assert(t, err != nil, test.name)
|
||||
} else {
|
||||
assert.NilError(t, err, test.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
// DetectDefaultStore return the default credentials store for the platform if
|
||||
|
||||
@ -70,7 +70,7 @@ func TestFileStoreGet(t *testing.T) {
|
||||
|
||||
func TestFileStoreGetAll(t *testing.T) {
|
||||
s1 := "https://example.com"
|
||||
s2 := "https://example2.com"
|
||||
s2 := "https://example2.example.com"
|
||||
f := newStore(map[string]types.AuthConfig{
|
||||
s1: {
|
||||
Auth: "super_secret_token",
|
||||
@ -80,7 +80,7 @@ func TestFileStoreGetAll(t *testing.T) {
|
||||
s2: {
|
||||
Auth: "super_secret_token2",
|
||||
Email: "foo@example2.com",
|
||||
ServerAddress: "https://example2.com",
|
||||
ServerAddress: "https://example2.example.com",
|
||||
},
|
||||
})
|
||||
|
||||
|
||||
@ -20,7 +20,6 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -29,6 +28,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
// New returns net.Conn
|
||||
|
||||
@ -49,7 +49,7 @@ func getConnectionHelper(daemonURL string, sshFlags []string) (*ConnectionHelper
|
||||
Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return commandconn.New(ctx, "ssh", append(sshFlags, sp.Args("docker", "system", "dial-stdio")...)...)
|
||||
},
|
||||
Host: "http://docker",
|
||||
Host: "http://docker.example.com",
|
||||
}, nil
|
||||
}
|
||||
// Future version may support plugins via ~/.docker/config.json. e.g. "dind"
|
||||
@ -63,6 +63,6 @@ func GetCommandConnectionHelper(cmd string, flags ...string) (*ConnectionHelper,
|
||||
Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return commandconn.New(ctx, cmd, flags...)
|
||||
},
|
||||
Host: "http://docker",
|
||||
Host: "http://docker.example.com",
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -26,7 +26,12 @@ type EndpointMeta = context.EndpointMetaBase
|
||||
// a Docker Engine endpoint, with its tls data
|
||||
type Endpoint struct {
|
||||
EndpointMeta
|
||||
TLSData *context.TLSData
|
||||
TLSData *context.TLSData
|
||||
|
||||
// Deprecated: Use of encrypted TLS private keys has been deprecated, and
|
||||
// will be removed in a future release. Golang has deprecated support for
|
||||
// legacy PEM encryption (as specified in RFC 1423), as it is insecure by
|
||||
// design (see https://go-review.googlesource.com/c/go/+/264159).
|
||||
TLSPassword string
|
||||
}
|
||||
|
||||
@ -66,8 +71,9 @@ func (c *Endpoint) tlsConfig() (*tls.Config, error) {
|
||||
}
|
||||
|
||||
var err error
|
||||
if x509.IsEncryptedPEMBlock(pemBlock) {
|
||||
keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(c.TLSPassword))
|
||||
// TODO should we follow Golang, and deprecate RFC 1423 encryption, and produce a warning (or just error)? see https://github.com/docker/cli/issues/3212
|
||||
if x509.IsEncryptedPEMBlock(pemBlock) { //nolint: staticcheck // SA1019: x509.IsEncryptedPEMBlock is deprecated, and insecure by design
|
||||
keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(c.TLSPassword)) //nolint: staticcheck // SA1019: x509.IsEncryptedPEMBlock is deprecated, and insecure by design
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it")
|
||||
}
|
||||
|
||||
@ -7,19 +7,24 @@ import (
|
||||
"bytes"
|
||||
_ "crypto/sha256" // ensure ids can be computed
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/errdefs"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const restrictedNamePattern = "^[a-zA-Z0-9][a-zA-Z0-9_.+-]+$"
|
||||
|
||||
var restrictedNameRegEx = regexp.MustCompile(restrictedNamePattern)
|
||||
|
||||
// Store provides a context store for easily remembering endpoints configuration
|
||||
type Store interface {
|
||||
Reader
|
||||
@ -184,6 +189,20 @@ func (s *store) GetStorageInfo(contextName string) StorageInfo {
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateContextName checks a context name is valid.
|
||||
func ValidateContextName(name string) error {
|
||||
if name == "" {
|
||||
return errors.New("context name cannot be empty")
|
||||
}
|
||||
if name == "default" {
|
||||
return errors.New(`"default" is a reserved context name`)
|
||||
}
|
||||
if !restrictedNameRegEx.MatchString(name) {
|
||||
return fmt.Errorf("context name %q is invalid, names are validated against regexp %q", name, restrictedNamePattern)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Export exports an existing namespace into an opaque data stream
|
||||
// This stream is actually a tarball containing context metadata and TLS materials, but it does
|
||||
// not map 1:1 the layout of the context store (don't try to restore it manually without calling store.Import)
|
||||
@ -295,6 +314,19 @@ func Import(name string, s Writer, reader io.Reader) error {
|
||||
}
|
||||
}
|
||||
|
||||
func isValidFilePath(p string) error {
|
||||
if p != metaFile && !strings.HasPrefix(p, "tls/") {
|
||||
return errors.New("unexpected context file")
|
||||
}
|
||||
if path.Clean(p) != p {
|
||||
return errors.New("unexpected path format")
|
||||
}
|
||||
if strings.Contains(p, `\`) {
|
||||
return errors.New(`unexpected '\' in path`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func importTar(name string, s Writer, reader io.Reader) error {
|
||||
tr := tar.NewReader(&LimitedReader{R: reader, N: maxAllowedFileSizeToImport})
|
||||
tlsData := ContextTLSData{
|
||||
@ -309,10 +341,13 @@ func importTar(name string, s Writer, reader io.Reader) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
if hdr.Typeflag != tar.TypeReg {
|
||||
// skip this entry, only taking files into account
|
||||
continue
|
||||
}
|
||||
if err := isValidFilePath(hdr.Name); err != nil {
|
||||
return errors.Wrap(err, hdr.Name)
|
||||
}
|
||||
if hdr.Name == metaFile {
|
||||
data, err := ioutil.ReadAll(tr)
|
||||
if err != nil {
|
||||
@ -358,10 +393,13 @@ func importZip(name string, s Writer, reader io.Reader) error {
|
||||
var importedMetaFile bool
|
||||
for _, zf := range zr.File {
|
||||
fi := zf.FileInfo()
|
||||
if fi.IsDir() {
|
||||
// skip this entry, only taking files into account
|
||||
if !fi.Mode().IsRegular() {
|
||||
// skip this entry, only taking regular files into account
|
||||
continue
|
||||
}
|
||||
if err := isValidFilePath(zf.Name); err != nil {
|
||||
return errors.Wrap(err, zf.Name)
|
||||
}
|
||||
if zf.Name == metaFile {
|
||||
f, err := zf.Open()
|
||||
if err != nil {
|
||||
@ -408,6 +446,9 @@ func parseMetadata(data []byte, name string) (Metadata, error) {
|
||||
if err := json.Unmarshal(data, &meta); err != nil {
|
||||
return meta, err
|
||||
}
|
||||
if err := ValidateContextName(name); err != nil {
|
||||
return Metadata{}, err
|
||||
}
|
||||
meta.Name = name
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
@ -175,7 +175,7 @@ func TestImportTarInvalid(t *testing.T) {
|
||||
var r io.Reader = source
|
||||
s := New(testDir, testCfg)
|
||||
err = Import("tarInvalid", s, r)
|
||||
assert.ErrorContains(t, err, "invalid context: no metadata found")
|
||||
assert.ErrorContains(t, err, "unexpected context file")
|
||||
}
|
||||
|
||||
func TestImportZip(t *testing.T) {
|
||||
@ -254,5 +254,5 @@ func TestImportZipInvalid(t *testing.T) {
|
||||
var r io.Reader = source
|
||||
s := New(testDir, testCfg)
|
||||
err = Import("zipInvalid", s, r)
|
||||
assert.ErrorContains(t, err, "invalid context: no metadata found")
|
||||
assert.ErrorContains(t, err, "unexpected context file")
|
||||
}
|
||||
|
||||
@ -29,3 +29,32 @@ func TestConfigModification(t *testing.T) {
|
||||
assert.Equal(t, &testEP2{}, cfgCopy.endpointTypes["ep1"]())
|
||||
assert.Equal(t, &testEP3{}, cfgCopy.endpointTypes["ep2"]())
|
||||
}
|
||||
|
||||
func TestValidFilePaths(t *testing.T) {
|
||||
paths := map[string]bool{
|
||||
"tls/_/../../something": false,
|
||||
"tls/../../something": false,
|
||||
"../../something": false,
|
||||
"/tls/absolute/unix/path": false,
|
||||
`C:\tls\absolute\windows\path`: false,
|
||||
"C:/tls/absolute/windows/path": false,
|
||||
"tls/kubernetes/key.pem": true,
|
||||
}
|
||||
for p, expectedValid := range paths {
|
||||
err := isValidFilePath(p)
|
||||
assert.Equal(t, err == nil, expectedValid, "%q should report valid as: %v", p, expectedValid)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateContextName(t *testing.T) {
|
||||
names := map[string]bool{
|
||||
"../../invalid/escape": false,
|
||||
"/invalid/absolute": false,
|
||||
`\invalid\windows`: false,
|
||||
"validname": true,
|
||||
}
|
||||
for n, expectedValid := range names {
|
||||
err := ValidateContextName(n)
|
||||
assert.Equal(t, err == nil, expectedValid, "%q should report valid as: %v", n, expectedValid)
|
||||
}
|
||||
}
|
||||
|
||||
@ -5,7 +5,7 @@ These resources are used to provide
|
||||
* An icon
|
||||
* A Windows manifest declaring Windows version support
|
||||
|
||||
The resource object files are generated with go generate.
|
||||
The resource object files are generated when building with scripts/build/binary .
|
||||
The resource source files are located in scripts/winresources.
|
||||
This occurs automatically when you run scripts/build/windows.
|
||||
|
||||
@ -14,5 +14,3 @@ is included.
|
||||
|
||||
*/
|
||||
package winresources
|
||||
|
||||
//go:generate ../../scripts/gen/windows-resources
|
||||
|
||||
@ -17,7 +17,9 @@ import (
|
||||
func TestClientDebugEnabled(t *testing.T) {
|
||||
defer debug.Disable()
|
||||
|
||||
tcmd := newDockerCommand(&command.DockerCli{})
|
||||
cli, err := command.NewDockerCli()
|
||||
assert.NilError(t, err)
|
||||
tcmd := newDockerCommand(cli)
|
||||
tcmd.SetFlag("debug", "true")
|
||||
cmd, _, err := tcmd.HandleGlobalFlags()
|
||||
assert.NilError(t, err)
|
||||
|
||||
@ -935,7 +935,7 @@ __docker_complete_log_options() {
|
||||
# awslogs does not implement the $common_options2.
|
||||
local awslogs_options="$common_options1 awslogs-create-group awslogs-credentials-endpoint awslogs-datetime-format awslogs-group awslogs-multiline-pattern awslogs-region awslogs-stream tag"
|
||||
|
||||
local fluentd_options="$common_options1 $common_options2 fluentd-address fluentd-async-connect fluentd-buffer-limit fluentd-retry-wait fluentd-max-retries fluentd-sub-second-precision tag"
|
||||
local fluentd_options="$common_options1 $common_options2 fluentd-address fluentd-async fluentd-buffer-limit fluentd-request-ack fluentd-retry-wait fluentd-max-retries fluentd-sub-second-precision tag"
|
||||
local gcplogs_options="$common_options1 $common_options2 gcp-log-cmd gcp-meta-id gcp-meta-name gcp-meta-zone gcp-project"
|
||||
local gelf_options="$common_options1 $common_options2 gelf-address gelf-compression-level gelf-compression-type gelf-tcp-max-reconnect gelf-tcp-reconnect-delay tag"
|
||||
local journald_options="$common_options1 $common_options2 tag"
|
||||
@ -1955,6 +1955,7 @@ _docker_container_run_and_create() {
|
||||
--pid
|
||||
--pids-limit
|
||||
--publish -p
|
||||
--pull
|
||||
--restart
|
||||
--runtime
|
||||
--security-opt
|
||||
@ -2153,6 +2154,10 @@ _docker_container_run_and_create() {
|
||||
esac
|
||||
return
|
||||
;;
|
||||
--pull)
|
||||
COMPREPLY=( $( compgen -W 'always missing never' -- "$cur" ) )
|
||||
return
|
||||
;;
|
||||
--runtime)
|
||||
__docker_complete_runtimes
|
||||
return
|
||||
@ -2548,6 +2553,7 @@ _docker_daemon() {
|
||||
--ip-forward=false
|
||||
--ip-masq=false
|
||||
--iptables=false
|
||||
--ip6tables
|
||||
--ipv6
|
||||
--live-restore
|
||||
--no-new-privileges
|
||||
@ -3601,7 +3607,7 @@ _docker_service_ls() {
|
||||
return
|
||||
;;
|
||||
mode)
|
||||
COMPREPLY=( $( compgen -W "global replicated" -- "${cur##*=}" ) )
|
||||
COMPREPLY=( $( compgen -W "global global-job replicated replicated-job" -- "${cur##*=}" ) )
|
||||
return
|
||||
;;
|
||||
name)
|
||||
@ -3731,6 +3737,7 @@ _docker_service_update_and_create() {
|
||||
--limit-pids
|
||||
--log-driver
|
||||
--log-opt
|
||||
--max-replicas
|
||||
--replicas
|
||||
--replicas-max-per-node
|
||||
--reserve-cpu
|
||||
@ -3804,7 +3811,7 @@ _docker_service_update_and_create() {
|
||||
return
|
||||
;;
|
||||
--mode)
|
||||
COMPREPLY=( $( compgen -W "global replicated" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "global global-job replicated replicated-job" -- "$cur" ) )
|
||||
return
|
||||
;;
|
||||
esac
|
||||
@ -4348,6 +4355,9 @@ _docker_node_ls() {
|
||||
__docker_complete_nodes --cur "${cur##*=}" --id
|
||||
return
|
||||
;;
|
||||
label|node.label)
|
||||
return
|
||||
;;
|
||||
membership)
|
||||
COMPREPLY=( $( compgen -W "accepted pending" -- "${cur##*=}" ) )
|
||||
return
|
||||
@ -4364,7 +4374,7 @@ _docker_node_ls() {
|
||||
|
||||
case "$prev" in
|
||||
--filter|-f)
|
||||
COMPREPLY=( $( compgen -W "id label membership name role" -S = -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "id label membership name node.label role" -S = -- "$cur" ) )
|
||||
__docker_nospace
|
||||
return
|
||||
;;
|
||||
|
||||
@ -1343,7 +1343,7 @@ __docker_node_complete_ls_filters() {
|
||||
;;
|
||||
esac
|
||||
else
|
||||
opts=('id' 'label' 'membership' 'name' 'role')
|
||||
opts=('id' 'label' 'membership' 'name' 'node.label' 'role')
|
||||
_describe -t filter-opts "filter options" opts -qS "=" && ret=0
|
||||
fi
|
||||
|
||||
@ -2544,6 +2544,82 @@ __docker_volume_subcommand() {
|
||||
|
||||
# EO volume
|
||||
|
||||
# BO context
|
||||
|
||||
__docker_complete_contexts() {
|
||||
[[ $PREFIX = -* ]] && return 1
|
||||
integer ret=1
|
||||
declare -a contexts
|
||||
|
||||
contexts=(${(f)${:-"$(_call_program commands docker $docker_options context ls -q)"$'\n'}})
|
||||
|
||||
_describe -t context-list "context" contexts && ret=0
|
||||
return ret
|
||||
}
|
||||
|
||||
__docker_context_commands() {
|
||||
local -a _docker_context_subcommands
|
||||
_docker_context_subcommands=(
|
||||
"create:Create new context"
|
||||
"inspect:Display detailed information on one or more contexts"
|
||||
"list:List available contexts"
|
||||
"rm:Remove one or more contexts"
|
||||
"show:Print the current context"
|
||||
"update:Update a context"
|
||||
"use:Set the default context"
|
||||
)
|
||||
_describe -t docker-context-commands "docker context command" _docker_context_subcommands
|
||||
}
|
||||
|
||||
__docker_context_subcommand() {
|
||||
local -a _command_args opts_help
|
||||
local expl help="--help"
|
||||
integer ret=1
|
||||
|
||||
opts_help=("(: -)--help[Print usage]")
|
||||
|
||||
case "$words[1]" in
|
||||
(create)
|
||||
_arguments $(__docker_arguments) \
|
||||
$opts_help \
|
||||
"($help)--default-stack-orchestrator=[Default orchestrator for stack operations to use with this context]:default-stack-orchestrator:(swarm kubernetes all)" \
|
||||
"($help)--description=[Description of the context]:description:" \
|
||||
"($help)--docker=[Set the docker endpoint]:docker:" \
|
||||
"($help)--kubernetes=[Set the kubernetes endpoint]:kubernetes:" \
|
||||
"($help)--from=[Create context from a named context]:from:__docker_complete_contexts" \
|
||||
"($help -):name: " && ret=0
|
||||
;;
|
||||
(use)
|
||||
_arguments $(__docker_arguments) \
|
||||
$opts_help \
|
||||
"($help -)1:context:__docker_complete_contexts" && ret=0
|
||||
;;
|
||||
(inspect)
|
||||
_arguments $(__docker_arguments) \
|
||||
$opts_help \
|
||||
"($help -)1:context:__docker_complete_contexts" && ret=0
|
||||
;;
|
||||
(rm)
|
||||
_arguments $(__docker_arguments) \
|
||||
$opts_help \
|
||||
"($help -)1:context:__docker_complete_contexts" && ret=0
|
||||
;;
|
||||
(update)
|
||||
_arguments $(__docker_arguments) \
|
||||
$opts_help \
|
||||
"($help)--default-stack-orchestrator=[Default orchestrator for stack operations to use with this context]:default-stack-orchestrator:(swarm kubernetes all)" \
|
||||
"($help)--description=[Description of the context]:description:" \
|
||||
"($help)--docker=[Set the docker endpoint]:docker:" \
|
||||
"($help)--kubernetes=[Set the kubernetes endpoint]:kubernetes:" \
|
||||
"($help -):name:" && ret=0
|
||||
;;
|
||||
esac
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
# EO context
|
||||
|
||||
__docker_caching_policy() {
|
||||
oldp=( "$1"(Nmh+1) ) # 1 hour
|
||||
(( $#oldp ))
|
||||
@ -2631,6 +2707,23 @@ __docker_subcommand() {
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
(context)
|
||||
local curcontext="$curcontext" state
|
||||
_arguments $(__docker_arguments) \
|
||||
$opts_help \
|
||||
"($help -): :->command" \
|
||||
"($help -)*:: :->option-or-argument" && ret=0
|
||||
|
||||
case $state in
|
||||
(command)
|
||||
__docker_context_commands && ret=0
|
||||
;;
|
||||
(option-or-argument)
|
||||
curcontext=${curcontext%:*:*}:docker-${words[-1]}:
|
||||
__docker_context_subcommand && ret=0
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
(daemon)
|
||||
_arguments $(__docker_arguments) \
|
||||
$opts_help \
|
||||
|
||||
63
docker-bake.hcl
Normal file
63
docker-bake.hcl
Normal file
@ -0,0 +1,63 @@
|
||||
variable "VERSION" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "USE_GLIBC" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "STRIP_TARGET" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
group "default" {
|
||||
targets = ["binary"]
|
||||
}
|
||||
|
||||
target "binary" {
|
||||
target = "binary"
|
||||
platforms = ["local"]
|
||||
output = ["build"]
|
||||
args = {
|
||||
BASE_VARIANT = USE_GLIBC != "" ? "buster" : "alpine"
|
||||
VERSION = VERSION
|
||||
GO_STRIP = STRIP_TARGET
|
||||
}
|
||||
}
|
||||
|
||||
target "dynbinary" {
|
||||
inherits = ["binary"]
|
||||
args = {
|
||||
GO_LINKMODE = "dynamic"
|
||||
}
|
||||
}
|
||||
|
||||
variable "GROUP_TOTAL" {
|
||||
default = "1"
|
||||
}
|
||||
|
||||
variable "GROUP_INDEX" {
|
||||
default = "0"
|
||||
}
|
||||
|
||||
function "platforms" {
|
||||
params = []
|
||||
result = ["linux/amd64", "linux/386", "linux/arm64", "linux/arm", "linux/ppc64le", "linux/s390x", "darwin/amd64", "darwin/arm64", "windows/amd64"]
|
||||
}
|
||||
|
||||
function "glen" {
|
||||
params = [platforms, GROUP_TOTAL]
|
||||
result = ceil(length(platforms)/GROUP_TOTAL)
|
||||
}
|
||||
|
||||
target "_all_platforms" {
|
||||
platforms = slice(platforms(), GROUP_INDEX*glen(platforms(), GROUP_TOTAL),min(length(platforms()), (GROUP_INDEX+1)*glen(platforms(), GROUP_TOTAL)))
|
||||
}
|
||||
|
||||
target "cross" {
|
||||
inherits = ["binary", "_all_platforms"]
|
||||
}
|
||||
|
||||
target "dynbinary-cross" {
|
||||
inherits = ["dynbinary", "_all_platforms"]
|
||||
}
|
||||
@ -38,11 +38,6 @@ build_linter_image:
|
||||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
cat ./dockerfiles/Dockerfile.lint | docker build ${DOCKER_BUILD_ARGS} --build-arg=GO_VERSION -t $(LINTER_IMAGE_NAME) -
|
||||
|
||||
.PHONY: build_cross_image
|
||||
build_cross_image:
|
||||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
cat ./dockerfiles/Dockerfile.cross | docker build ${DOCKER_BUILD_ARGS} --build-arg=GO_VERSION -t $(CROSS_IMAGE_NAME) -
|
||||
|
||||
.PHONY: build_shell_validate_image
|
||||
build_shell_validate_image:
|
||||
# build dockerfile from stdin so that we don't send the build-context; source is bind-mounted in the development environment
|
||||
@ -80,22 +75,10 @@ test-unit: build_docker_image ## run unit tests (using go test)
|
||||
.PHONY: test ## run unit and e2e tests
|
||||
test: test-unit test-e2e
|
||||
|
||||
.PHONY: cross
|
||||
cross: build_cross_image ## build the CLI for macOS and Windows
|
||||
$(DOCKER_RUN) $(CROSS_IMAGE_NAME) make cross
|
||||
|
||||
.PHONY: binary-windows
|
||||
binary-windows: build_cross_image ## build the CLI for Windows
|
||||
$(DOCKER_RUN) $(CROSS_IMAGE_NAME) make $@
|
||||
|
||||
.PHONY: plugins-windows
|
||||
plugins-windows: build_cross_image ## build the example CLI plugins for Windows
|
||||
$(DOCKER_RUN) $(CROSS_IMAGE_NAME) make $@
|
||||
|
||||
.PHONY: binary-osx
|
||||
binary-osx: build_cross_image ## build the CLI for macOS
|
||||
$(DOCKER_RUN) $(CROSS_IMAGE_NAME) make $@
|
||||
|
||||
.PHONY: plugins-osx
|
||||
plugins-osx: build_cross_image ## build the example CLI plugins for macOS
|
||||
$(DOCKER_RUN) $(CROSS_IMAGE_NAME) make $@
|
||||
@ -120,9 +103,6 @@ fmt: ## run gofmt
|
||||
vendor: build_docker_image vendor.conf ## download dependencies (vendor/) listed in vendor.conf
|
||||
$(DOCKER_RUN) -it $(DEV_DOCKER_IMAGE_NAME) make vendor
|
||||
|
||||
dynbinary: build_cross_image ## build the CLI dynamically linked
|
||||
$(DOCKER_RUN) -it $(CROSS_IMAGE_NAME) make dynbinary
|
||||
|
||||
.PHONY: authors
|
||||
authors: ## generate AUTHORS file from git history
|
||||
$(DOCKER_RUN) -it $(DEV_DOCKER_IMAGE_NAME) make authors
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
ARG GO_VERSION=1.13.15
|
||||
ARG GO_VERSION=1.16.10
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
# syntax=docker/dockerfile:1.1.7-experimental
|
||||
ARG GO_VERSION=1.13.15
|
||||
# syntax=docker/dockerfile:1.3
|
||||
|
||||
ARG GO_VERSION=1.16.15
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS golang
|
||||
ENV CGO_ENABLED=0
|
||||
@ -9,21 +10,21 @@ ARG ESC_VERSION=v0.2.0
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=tmpfs,target=/go/src/ \
|
||||
GO111MODULE=on go get github.com/mjibson/esc@${ESC_VERSION}
|
||||
GO111MODULE=on go install github.com/mjibson/esc@${ESC_VERSION}
|
||||
|
||||
FROM golang AS gotestsum
|
||||
ARG GOTESTSUM_VERSION=v0.4.0
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=tmpfs,target=/go/src/ \
|
||||
GO111MODULE=on go get gotest.tools/gotestsum@${GOTESTSUM_VERSION}
|
||||
GO111MODULE=on go install gotest.tools/gotestsum@${GOTESTSUM_VERSION}
|
||||
|
||||
FROM golang AS vndr
|
||||
ARG VNDR_VERSION=v0.1.2
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=tmpfs,target=/go/src/ \
|
||||
GO111MODULE=on go get github.com/LK4D4/vndr@${VNDR_VERSION}
|
||||
GO111MODULE=on go install github.com/LK4D4/vndr@${VNDR_VERSION}
|
||||
|
||||
FROM golang AS dev
|
||||
RUN apk add --no-cache \
|
||||
@ -43,4 +44,5 @@ COPY --from=vndr /go/bin/* /go/bin/
|
||||
COPY --from=gotestsum /go/bin/* /go/bin/
|
||||
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
ENV GO111MODULE=auto
|
||||
COPY . .
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
ARG GO_VERSION=1.13.15
|
||||
ARG GO_VERSION=1.16.10
|
||||
|
||||
# Use Debian based image as docker-compose requires glibc.
|
||||
FROM golang:${GO_VERSION}-buster
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
# syntax=docker/dockerfile:1.1.3-experimental
|
||||
# syntax=docker/dockerfile:1.3
|
||||
|
||||
ARG GO_VERSION=1.13.15
|
||||
ARG GO_VERSION=1.16.15
|
||||
ARG GOLANGCI_LINTER_SHA="v1.21.0"
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS build
|
||||
@ -13,6 +13,7 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
go get github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINTER_SHA}
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS lint
|
||||
ENV GO111MODULE=off
|
||||
ENV CGO_ENABLED=0
|
||||
ENV DISABLE_WARN_OUTSIDE_CONTAINER=1
|
||||
COPY --from=build /go/bin/golangci-lint /usr/local/bin
|
||||
|
||||
@ -50,8 +50,11 @@ The table below provides an overview of the current status of deprecated feature
|
||||
|
||||
Status | Feature | Deprecated | Remove
|
||||
-----------|------------------------------------------------------------------------------------------------------------------------------------|------------|------------
|
||||
Deprecated | [Support for encrypted TLS private keys](#support-for-encrypted-tls-private-keys) | v20.10 | -
|
||||
Deprecated | [Kubernetes stack and context support](#kubernetes-stack-and-context-support) | v20.10 | -
|
||||
Deprecated | [Pulling images from non-compliant image registries](#pulling-images-from-non-compliant-image-registries) | v20.10 | -
|
||||
Deprecated | [Linux containers on Windows (LCOW)](#linux-containers-on-windows-lcow-experimental) | v20.10 | -
|
||||
Deprecated | [BLKIO weight options with cgroups v1](#blkio-weight-options-with-cgroups-v1) | v20.10 | -
|
||||
Deprecated | [Kernel memory limit](#kernel-memory-limit) | v20.10 | -
|
||||
Deprecated | [Classic Swarm and overlay networks using external key/value stores](#classic-swarm-and-overlay-networks-using-cluster-store) | v20.10 | -
|
||||
Deprecated | [Support for the legacy `~/.dockercfg` configuration file for authentication](#support-for-legacy-dockercfg-configuration-files) | v20.10 | -
|
||||
@ -96,6 +99,22 @@ Removed | [`--api-enable-cors` flag on `dockerd`](#--api-enable-cors-flag-on-
|
||||
Removed | [`--run` flag on `docker commit`](#--run-flag-on-docker-commit) | v0.10 | v1.13
|
||||
Removed | [Three arguments form in `docker import`](#three-arguments-form-in-docker-import) | v0.6.7 | v1.12
|
||||
|
||||
### Support for encrypted TLS private keys
|
||||
|
||||
**Deprecated in Release: v20.10**
|
||||
|
||||
Use of encrypted TLS private keys has been deprecated, and will be removed in a
|
||||
future release. Golang has deprecated support for legacy PEM encryption (as
|
||||
specified in [RFC 1423](https://datatracker.ietf.org/doc/html/rfc1423)), as it
|
||||
is insecure by design (see [https://go-review.googlesource.com/c/go/+/264159](https://go-review.googlesource.com/c/go/+/264159)).
|
||||
|
||||
### Kubernetes stack and context support
|
||||
|
||||
**Deprecated in Release: v20.10**
|
||||
|
||||
Following the deprecation of [Compose on Kubernetes](https://github.com/docker/compose-on-kubernetes), support for
|
||||
Kubernetes in the `stack` and `context` commands in the docker CLI is now marked as deprecated as well.
|
||||
|
||||
### Pulling images from non-compliant image registries
|
||||
|
||||
**Deprecated in Release: v20.10**
|
||||
@ -140,6 +159,16 @@ now stopped in favor of running docker natively on Linux in WSL2.
|
||||
Developers who want to run Linux workloads on a Windows host are encouraged to use
|
||||
[Docker Desktop with WSL2](https://docs.docker.com/docker-for-windows/wsl/) instead.
|
||||
|
||||
### BLKIO weight options with cgroups v1
|
||||
|
||||
**Deprecated in Release: v20.10**
|
||||
|
||||
Specifying blkio weight (`docker run --blkio-weight` and `docker run --blkio-weight-device`)
|
||||
is now marked as deprecated when using cgroups v1 because the corresponding features
|
||||
were [removed in Linux kernel v5.0 and up](https://github.com/torvalds/linux/commit/f382fb0bcef4c37dc049e9f6963e3baf204d815c).
|
||||
When using cgroups v2, the `--blkio-weight` options are implemented using
|
||||
[`io.weight](https://github.com/torvalds/linux/blob/v5.0/Documentation/admin-guide/cgroup-v2.rst#io).
|
||||
|
||||
### Kernel memory limit
|
||||
|
||||
**Deprecated in Release: v20.10**
|
||||
@ -559,9 +588,9 @@ Log tags are now generated in a standard way across different logging drivers.
|
||||
Because of which, the driver specific log tag options `syslog-tag`, `gelf-tag` and
|
||||
`fluentd-tag` have been deprecated in favor of the generic `tag` option.
|
||||
|
||||
```bash
|
||||
```console
|
||||
{% raw %}
|
||||
docker --log-driver=syslog --log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}"
|
||||
$ docker --log-driver=syslog --log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}"
|
||||
{% endraw %}
|
||||
```
|
||||
|
||||
|
||||
@ -55,7 +55,7 @@ enabled, and use it to create a volume.
|
||||
|
||||
1. Install the `sshfs` plugin.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker plugin install vieux/sshfs
|
||||
|
||||
Plugin "vieux/sshfs" is requesting the following privileges:
|
||||
@ -74,7 +74,7 @@ enabled, and use it to create a volume.
|
||||
|
||||
2. Check that the plugin is enabled in the output of `docker plugin ls`.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker plugin ls
|
||||
|
||||
ID NAME TAG DESCRIPTION ENABLED
|
||||
@ -87,7 +87,7 @@ enabled, and use it to create a volume.
|
||||
|
||||
This volume can now be mounted into containers.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker volume create \
|
||||
-d vieux/sshfs \
|
||||
--name sshvolume \
|
||||
@ -96,9 +96,10 @@ enabled, and use it to create a volume.
|
||||
|
||||
sshvolume
|
||||
```
|
||||
|
||||
4. Verify that the volume was created successfully.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker volume ls
|
||||
|
||||
DRIVER NAME
|
||||
@ -107,22 +108,23 @@ enabled, and use it to create a volume.
|
||||
|
||||
5. Start a container that uses the volume `sshvolume`.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker run --rm -v sshvolume:/data busybox ls /data
|
||||
|
||||
<content of /remote on machine 1.2.3.4>
|
||||
```
|
||||
|
||||
6. Remove the volume `sshvolume`
|
||||
```bash
|
||||
docker volume rm sshvolume
|
||||
```console
|
||||
$ docker volume rm sshvolume
|
||||
|
||||
sshvolume
|
||||
```
|
||||
|
||||
To disable a plugin, use the `docker plugin disable` command. To completely
|
||||
remove it, use the `docker plugin remove` command. For other available
|
||||
commands and options, see the
|
||||
[command line reference](../reference/commandline/index.md).
|
||||
[command line reference](https://docs.docker.com/engine/reference/commandline/cli/).
|
||||
|
||||
|
||||
## Developing a plugin
|
||||
@ -134,7 +136,7 @@ example, it was created from a Dockerfile:
|
||||
>**Note:** The `/run/docker/plugins` directory is mandatory inside of the
|
||||
plugin's filesystem for docker to communicate with the plugin.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ git clone https://github.com/vieux/docker-volume-sshfs
|
||||
$ cd docker-volume-sshfs
|
||||
$ docker build -t rootfsimage .
|
||||
@ -193,13 +195,13 @@ Stdout of a plugin is redirected to dockerd logs. Such entries have a
|
||||
`f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62` and their
|
||||
corresponding log entries in the docker daemon logs.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker plugin install tiborvass/sample-volume-plugin
|
||||
|
||||
INFO[0036] Starting... Found 0 volumes on startup plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
|
||||
```
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker volume create -d tiborvass/sample-volume-plugin samplevol
|
||||
|
||||
INFO[0193] Create Called... Ensuring directory /data/samplevol exists on host... plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
|
||||
@ -208,7 +210,7 @@ INFO[0193] Created volume samplevol with mountpoint /data/samp
|
||||
INFO[0193] Path Called... Returned path /data/samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
|
||||
```
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker run -v samplevol:/tmp busybox sh
|
||||
|
||||
INFO[0421] Get Called... Found samplevol plugin=f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62
|
||||
@ -223,7 +225,7 @@ INFO[0421] Unmount Called... Unmounted samplevol plugin=f52a3df433b9a
|
||||
plugins. This is specifically useful to collect plugin logs if they are
|
||||
redirected to a file.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ sudo docker-runc --root /var/run/docker/plugins/runtime-root/moby-plugins list
|
||||
|
||||
ID PID STATUS BUNDLE CREATED OWNER
|
||||
@ -232,13 +234,14 @@ ID PID S
|
||||
c5bb4b90941efcaccca999439ed06d6a6affdde7081bb34dc84126b57b3e793d 14984 running /run/docker/containerd/daemon/io.containerd.runtime.v1.linux/moby-plugins/c5bb4b90941efcaccca999439ed06d6a6affdde7081bb34dc84126b57b3e793d 2018-02-08T21:35:12.321288966Z root
|
||||
```
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ sudo docker-runc --root /var/run/docker/plugins/runtime-root/moby-plugins exec 93f1e7dbfe11c938782c2993628c895cf28e2274072c4a346a6002446c949b25 cat /var/log/plugin.log
|
||||
```
|
||||
|
||||
If the plugin has a built-in shell, then exec into the plugin can be done as
|
||||
follows:
|
||||
```bash
|
||||
|
||||
```console
|
||||
$ sudo docker-runc --root /var/run/docker/plugins/runtime-root/moby-plugins exec -t 93f1e7dbfe11c938782c2993628c895cf28e2274072c4a346a6002446c949b25 sh
|
||||
```
|
||||
|
||||
@ -251,17 +254,18 @@ the plugin is listening on the said socket. For a well functioning plugin,
|
||||
these basic requests should work. Note that plugin sockets are available on the host under `/var/run/docker/plugins/<pluginID>`
|
||||
|
||||
|
||||
```bash
|
||||
curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/e8a37ba56fc879c991f7d7921901723c64df6b42b87e6a0b055771ecf8477a6d/plugin.sock http:/VolumeDriver.List
|
||||
```console
|
||||
$ curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/e8a37ba56fc879c991f7d7921901723c64df6b42b87e6a0b055771ecf8477a6d/plugin.sock http:/VolumeDriver.List
|
||||
|
||||
{"Mountpoint":"","Err":"","Volumes":[{"Name":"myvol1","Mountpoint":"/data/myvol1"},{"Name":"myvol2","Mountpoint":"/data/myvol2"}],"Volume":null}
|
||||
```
|
||||
|
||||
```bash
|
||||
curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/45e00a7ce6185d6e365904c8bcf62eb724b1fe307e0d4e7ecc9f6c1eb7bcdb70/plugin.sock http:/NetworkDriver.GetCapabilities
|
||||
```console
|
||||
$ curl -H "Content-Type: application/json" -XPOST -d '{}' --unix-socket /var/run/docker/plugins/45e00a7ce6185d6e365904c8bcf62eb724b1fe307e0d4e7ecc9f6c1eb7bcdb70/plugin.sock http:/NetworkDriver.GetCapabilities
|
||||
|
||||
{"Scope":"local"}
|
||||
```
|
||||
|
||||
When using curl 7.5 and above, the URL should be of the form
|
||||
`http://hostname/APICall`, where `hostname` is the valid hostname where the
|
||||
plugin is installed and `APICall` is the call to the plugin API.
|
||||
|
||||
@ -72,7 +72,7 @@ The sections below provide an inexhaustive overview of available plugins.
|
||||
| [Horcrux Volume Plugin](https://github.com/muthu-r/horcrux) | A volume plugin that allows on-demand, version controlled access to your data. Horcrux is an open-source plugin, written in Go, and supports SCP, [Minio](https://www.minio.io) and Amazon S3. |
|
||||
| [HPE 3Par Volume Plugin](https://github.com/hpe-storage/python-hpedockerplugin/) | A volume plugin that supports HPE 3Par and StoreVirtual iSCSI storage arrays. |
|
||||
| [Infinit volume plugin](https://infinit.sh/documentation/docker/volume-plugin) | A volume plugin that makes it easy to mount and manage Infinit volumes using Docker. |
|
||||
| [IPFS Volume Plugin](http://github.com/vdemeester/docker-volume-ipfs) | An open source volume plugin that allows using an [ipfs](https://ipfs.io/) filesystem as a volume. |
|
||||
| [IPFS Volume Plugin](https://github.com/vdemeester/docker-volume-ipfs) | An open source volume plugin that allows using an [ipfs](https://ipfs.io/) filesystem as a volume. |
|
||||
| [Keywhiz plugin](https://github.com/calavera/docker-volume-keywhiz) | A plugin that provides credentials and secret management using Keywhiz as a central repository. |
|
||||
| [Local Persist Plugin](https://github.com/CWSpear/local-persist) | A volume plugin that extends the default `local` driver's functionality by allowing you specify a mountpoint anywhere on the host, which enables the files to *always persist*, even if the volume is removed via `docker volume rm`. |
|
||||
| [NetApp Plugin](https://github.com/NetApp/netappdvp) (nDVP) | A volume plugin that provides direct integration with the Docker ecosystem for the NetApp storage portfolio. The nDVP package supports the provisioning and management of storage resources from the storage platform to Docker hosts, with a robust framework for adding additional platforms in the future. |
|
||||
@ -80,7 +80,7 @@ The sections below provide an inexhaustive overview of available plugins.
|
||||
| [Nimble Storage Volume Plugin](https://connect.nimblestorage.com/community/app-integration/docker) | A volume plug-in that integrates with Nimble Storage Unified Flash Fabric arrays. The plug-in abstracts array volume capabilities to the Docker administrator to allow self-provisioning of secure multi-tenant volumes and clones. |
|
||||
| [OpenStorage Plugin](https://github.com/libopenstorage/openstorage) | A cluster-aware volume plugin that provides volume management for file and block storage solutions. It implements a vendor neutral specification for implementing extensions such as CoS, encryption, and snapshots. It has example drivers based on FUSE, NFS, NBD and EBS to name a few. |
|
||||
| [Portworx Volume Plugin](https://github.com/portworx/px-dev) | A volume plugin that turns any server into a scale-out converged compute/storage node, providing container granular storage and highly available volumes across any node, using a shared-nothing storage backend that works with any docker scheduler. |
|
||||
| [Quobyte Volume Plugin](https://github.com/quobyte/docker-volume) | A volume plugin that connects Docker to [Quobyte](http://www.quobyte.com/containers)'s data center file system, a general-purpose scalable and fault-tolerant storage platform. |
|
||||
| [Quobyte Volume Plugin](https://github.com/quobyte/docker-volume) | A volume plugin that connects Docker to [Quobyte](https://www.quobyte.com/containers)'s data center file system, a general-purpose scalable and fault-tolerant storage platform. |
|
||||
| [REX-Ray plugin](https://github.com/emccode/rexray) | A volume plugin which is written in Go and provides advanced storage functionality for many platforms including VirtualBox, EC2, Google Compute Engine, OpenStack, and EMC. |
|
||||
| [Virtuozzo Storage and Ploop plugin](https://github.com/virtuozzo/docker-volume-ploop) | A volume plugin with support for Virtuozzo Storage distributed cloud file system as well as ploop devices. |
|
||||
| [VMware vSphere Storage Plugin](https://github.com/vmware/docker-volume-vsphere) | Docker Volume Driver for vSphere enables customers to address persistent storage requirements for Docker containers in vSphere environments. |
|
||||
|
||||
@ -90,7 +90,7 @@ The `TLSConfig` field is optional and TLS will only be verified if this configur
|
||||
Plugins should be started before Docker, and stopped after Docker. For
|
||||
example, when packaging a plugin for a platform which supports `systemd`, you
|
||||
might use [`systemd` dependencies](
|
||||
http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to
|
||||
https://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=) to
|
||||
manage startup and shutdown order.
|
||||
|
||||
When upgrading a plugin, you should first stop the Docker daemon, upgrade the
|
||||
@ -114,7 +114,7 @@ a `service` file and a `socket` file.
|
||||
|
||||
The `service` file (for example `/lib/systemd/system/your-plugin.service`):
|
||||
|
||||
```
|
||||
```systemd
|
||||
[Unit]
|
||||
Description=Your plugin
|
||||
Before=docker.service
|
||||
@ -127,9 +127,10 @@ ExecStart=/usr/lib/docker/your-plugin
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
The `socket` file (for example `/lib/systemd/system/your-plugin.socket`):
|
||||
|
||||
```
|
||||
```systemd
|
||||
[Unit]
|
||||
Description=Your plugin
|
||||
|
||||
@ -166,7 +167,8 @@ Plugins are activated via the following "handshake" API call.
|
||||
**Request:** empty body
|
||||
|
||||
**Response:**
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"Implements": ["VolumeDriver"]
|
||||
}
|
||||
|
||||
@ -114,9 +114,9 @@ Enable the authorization plugin with a dedicated command line flag in the
|
||||
`--authorization-plugin=PLUGIN_ID` format. The flag supplies a `PLUGIN_ID`
|
||||
value. This value can be the plugin’s socket or a path to a specification file.
|
||||
Authorization plugins can be loaded without restarting the daemon. Refer
|
||||
to the [`dockerd` documentation](../reference/commandline/dockerd.md#configuration-reloading) for more information.
|
||||
to the [`dockerd` documentation](../reference/commandline/dockerd.md#configuration-reload-behavior) for more information.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,...
|
||||
```
|
||||
|
||||
@ -124,26 +124,26 @@ Docker's authorization subsystem supports multiple `--authorization-plugin` para
|
||||
|
||||
### Calling authorized command (allow)
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker pull centos
|
||||
...
|
||||
<...>
|
||||
f1b10cd84249: Pull complete
|
||||
...
|
||||
<...>
|
||||
```
|
||||
|
||||
### Calling unauthorized command (deny)
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker pull centos
|
||||
...
|
||||
<...>
|
||||
docker: Error response from daemon: authorization denied by plugin PLUGIN_NAME: volumes are not allowed.
|
||||
```
|
||||
|
||||
### Error from plugins
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker pull centos
|
||||
...
|
||||
<...>
|
||||
docker: Error response from daemon: plugin PLUGIN_NAME failed with error: AuthZPlugin.AuthZReq: Cannot connect to the Docker daemon. Is the docker daemon running on this host?.
|
||||
```
|
||||
|
||||
@ -180,6 +180,7 @@ should implement the following two methods:
|
||||
"Err": "The error message if things go wrong"
|
||||
}
|
||||
```
|
||||
|
||||
#### /AuthZPlugin.AuthZRes
|
||||
|
||||
**Request**:
|
||||
|
||||
@ -31,7 +31,7 @@ You need to install and enable the plugin and then restart the Docker daemon
|
||||
before using the plugin. See the following example for the correct ordering
|
||||
of steps.
|
||||
|
||||
```
|
||||
```console
|
||||
$ docker plugin install cpuguy83/docker-overlay2-graphdriver-plugin # this command also enables the driver
|
||||
<output suppressed>
|
||||
$ pkill dockerd
|
||||
@ -309,6 +309,7 @@ Get an archive of the changes between the filesystem layers specified by the `ID
|
||||
and `Parent`. `Parent` may be an empty string, in which case there is no parent.
|
||||
|
||||
**Response**:
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
{{ TAR STREAM }}
|
||||
@ -354,6 +355,7 @@ Respond with a non-empty string error if an error occurred.
|
||||
### /GraphDriver.ApplyDiff
|
||||
|
||||
**Request**:
|
||||
|
||||
```
|
||||
{% raw %}
|
||||
{{ TAR STREAM }}
|
||||
|
||||
@ -211,6 +211,7 @@ as they come in once the existing logs have been read.
|
||||
to determine what set of logs to read.
|
||||
|
||||
**Response**:
|
||||
|
||||
```
|
||||
{% raw %}{{ log stream }}{% endraw %}
|
||||
```
|
||||
|
||||
@ -42,7 +42,7 @@ Once running however, network driver plugins are used just like the built-in
|
||||
network drivers: by being mentioned as a driver in network-oriented Docker
|
||||
commands. For example,
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker network create --driver weave mynet
|
||||
```
|
||||
|
||||
@ -51,7 +51,7 @@ Some network driver plugins are listed in [plugins](legacy_plugins.md)
|
||||
The `mynet` network is now owned by `weave`, so subsequent commands
|
||||
referring to that network will be sent to the plugin,
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker run --network=mynet busybox top
|
||||
```
|
||||
|
||||
|
||||
@ -29,20 +29,20 @@ node1 is the manager and node2 is the worker.
|
||||
|
||||
1. Prepare manager. In node 1:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker swarm init
|
||||
Swarm initialized: current node (dxn1zf6l61qsb1josjja83ngz) is now a manager.
|
||||
```
|
||||
|
||||
2. Join swarm, install plugin and create volume on worker. In node 2:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker swarm join \
|
||||
--token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \
|
||||
192.168.99.100:2377
|
||||
--token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \
|
||||
192.168.99.100:2377
|
||||
```
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker plugin install tiborvass/sample-volume-plugin
|
||||
latest: Pulling from tiborvass/sample-volume-plugin
|
||||
eb9c16fbdc53: Download complete
|
||||
@ -51,23 +51,24 @@ node1 is the manager and node2 is the worker.
|
||||
Installed plugin tiborvass/sample-volume-plugin
|
||||
```
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker volume create -d tiborvass/sample-volume-plugin --name pluginVol
|
||||
```
|
||||
|
||||
3. Create a service using the plugin and volume. In node1:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker service create --name my-service --mount type=volume,volume-driver=tiborvass/sample-volume-plugin,source=pluginVol,destination=/tmp busybox top
|
||||
|
||||
$ docker service ls
|
||||
z1sj8bb8jnfn my-service replicated 1/1 busybox:latest
|
||||
```
|
||||
docker service ls shows service 1 instance of service running.
|
||||
|
||||
`docker service ls` shows service 1 instance of service running.
|
||||
|
||||
4. Observe the task getting scheduled in node 2:
|
||||
|
||||
```bash
|
||||
```console
|
||||
{% raw %}
|
||||
$ docker ps --format '{{.ID}}\t {{.Status}} {{.Names}} {{.Command}}'
|
||||
83fc1e842599 Up 2 days my-service.1.9jn59qzn7nbc3m0zt1hij12xs "top"
|
||||
@ -87,7 +88,7 @@ Note that node1 is the manager and node2 is the worker.
|
||||
1. Install a global scoped network plugin on both manager and worker. On node1
|
||||
and node2:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker plugin install bboreham/weave2
|
||||
Plugin "bboreham/weave2" is requesting the following privileges:
|
||||
- network: [host]
|
||||
@ -102,7 +103,7 @@ Note that node1 is the manager and node2 is the worker.
|
||||
|
||||
2. Create a network using plugin on manager. On node1:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker network create --driver=bboreham/weave2:latest globalnet
|
||||
|
||||
$ docker network ls
|
||||
@ -115,12 +116,12 @@ containers get scheduled on both manager and worker.
|
||||
|
||||
On node 1:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker service create --network globalnet --name myservice --replicas=8 mrjana/simpleweb simpleweb
|
||||
w90drnfzw85nygbie9kb89vpa
|
||||
```
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
87520965206a mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 5 seconds ago Up 4 seconds myservice.4.ytdzpktmwor82zjxkh118uf1v
|
||||
@ -131,7 +132,7 @@ w90drnfzw85nygbie9kb89vpa
|
||||
|
||||
On node 2:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
53c0ae7c1dae mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 2 seconds ago Up Less than a second myservice.7.x44tvvdm3iwkt9kif35f7ykz1
|
||||
@ -142,14 +143,14 @@ w90drnfzw85nygbie9kb89vpa
|
||||
|
||||
4. Scale down the number of instances. On node1:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker service scale myservice=0
|
||||
myservice scaled to 0
|
||||
```
|
||||
|
||||
5. Disable and uninstall the plugin on the worker. On node2:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker plugin rm -f bboreham/weave2
|
||||
bboreham/weave2
|
||||
```
|
||||
@ -159,12 +160,12 @@ scheduled on the master and not on the worker, because the plugin is not availab
|
||||
|
||||
On node 1:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker service scale myservice=8
|
||||
myservice scaled to 8
|
||||
```
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
cf4b0ec2415e mrjana/simpleweb@sha256:317d7f221d68c86d503119b0ea12c29de42af0a22ca087d522646ad1069a47a4 "simpleweb" 39 seconds ago Up 36 seconds myservice.3.r7p5o208jmlzpcbm2ytl3q6n1
|
||||
@ -179,7 +180,7 @@ scheduled on the master and not on the worker, because the plugin is not availab
|
||||
|
||||
On node 2:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
```
|
||||
|
||||
@ -54,7 +54,7 @@ flags on the `docker container run` command. The `--volume` (or `-v`) flag
|
||||
accepts a volume name and path on the host, and the `--volume-driver` flag
|
||||
accepts a driver type.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker volume create --driver=flocker volumename
|
||||
|
||||
$ docker container run -it --volume volumename:/data busybox sh
|
||||
|
||||
@ -33,11 +33,11 @@ a `Dockerfile` and a *context*. The build's context is the set of files at a
|
||||
specified location `PATH` or `URL`. The `PATH` is a directory on your local
|
||||
filesystem. The `URL` is a Git repository location.
|
||||
|
||||
A context is processed recursively. So, a `PATH` includes any subdirectories and
|
||||
the `URL` includes the repository and its submodules. This example shows a
|
||||
build command that uses the current directory as context:
|
||||
The build context is processed recursively. So, a `PATH` includes any subdirectories
|
||||
and the `URL` includes the repository and its submodules. This example shows a
|
||||
build command that uses the current directory (`.`) as build context:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build .
|
||||
|
||||
Sending build context to Docker daemon 6.51 MB
|
||||
@ -52,8 +52,9 @@ Dockerfile.
|
||||
|
||||
> **Warning**
|
||||
>
|
||||
> Do not use your root directory, `/`, as the `PATH` as it causes the build to
|
||||
> transfer the entire contents of your hard drive to the Docker daemon.
|
||||
> Do not use your root directory, `/`, as the `PATH` for your build context, as
|
||||
> it causes the build to transfer the entire contents of your hard drive to the
|
||||
> Docker daemon.
|
||||
{:.warning}
|
||||
|
||||
To use a file in the build context, the `Dockerfile` refers to the file specified
|
||||
@ -66,32 +67,37 @@ Traditionally, the `Dockerfile` is called `Dockerfile` and located in the root
|
||||
of the context. You use the `-f` flag with `docker build` to point to a Dockerfile
|
||||
anywhere in your file system.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build -f /path/to/a/Dockerfile .
|
||||
```
|
||||
|
||||
You can specify a repository and tag at which to save the new image if
|
||||
the build succeeds:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build -t shykes/myapp .
|
||||
```
|
||||
|
||||
To tag the image into multiple repositories after the build,
|
||||
add multiple `-t` parameters when you run the `build` command:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build -t shykes/myapp:1.0.2 -t shykes/myapp:latest .
|
||||
```
|
||||
|
||||
Before the Docker daemon runs the instructions in the `Dockerfile`, it performs
|
||||
a preliminary validation of the `Dockerfile` and returns an error if the syntax is incorrect:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build -t test/myapp .
|
||||
|
||||
Sending build context to Docker daemon 2.048 kB
|
||||
Error response from daemon: Unknown instruction: RUNCMD
|
||||
[+] Building 0.3s (2/2) FINISHED
|
||||
=> [internal] load build definition from Dockerfile 0.1s
|
||||
=> => transferring dockerfile: 60B 0.0s
|
||||
=> [internal] load .dockerignore 0.1s
|
||||
=> => transferring context: 2B 0.0s
|
||||
error: failed to solve: rpc error: code = Unknown desc = failed to solve with frontend dockerfile.v0: failed to create LLB definition:
|
||||
dockerfile parse error line 2: unknown instruction: RUNCMD
|
||||
```
|
||||
|
||||
The Docker daemon runs the instructions in the `Dockerfile` one-by-one,
|
||||
@ -104,38 +110,35 @@ Note that each instruction is run independently, and causes a new image
|
||||
to be created - so `RUN cd /tmp` will not have any effect on the next
|
||||
instructions.
|
||||
|
||||
Whenever possible, Docker will re-use the intermediate images (cache),
|
||||
to accelerate the `docker build` process significantly. This is indicated by
|
||||
the `Using cache` message in the console output.
|
||||
(For more information, see the [`Dockerfile` best practices guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/):
|
||||
Whenever possible, Docker uses a build-cache to accelerate the `docker build`
|
||||
process significantly. This is indicated by the `CACHED` message in the console
|
||||
output. (For more information, see the [`Dockerfile` best practices guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/)):
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build -t svendowideit/ambassador .
|
||||
|
||||
Sending build context to Docker daemon 15.36 kB
|
||||
Step 1/4 : FROM alpine:3.2
|
||||
---> 31f630c65071
|
||||
Step 2/4 : MAINTAINER SvenDowideit@home.org.au
|
||||
---> Using cache
|
||||
---> 2a1c91448f5f
|
||||
Step 3/4 : RUN apk update && apk add socat && rm -r /var/cache/
|
||||
---> Using cache
|
||||
---> 21ed6e7fbb73
|
||||
Step 4/4 : CMD env | grep _TCP= | (sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat -t 100000000 TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' && echo wait) | sh
|
||||
---> Using cache
|
||||
---> 7ea8aef582cc
|
||||
Successfully built 7ea8aef582cc
|
||||
[+] Building 0.7s (6/6) FINISHED
|
||||
=> [internal] load build definition from Dockerfile 0.1s
|
||||
=> => transferring dockerfile: 286B 0.0s
|
||||
=> [internal] load .dockerignore 0.1s
|
||||
=> => transferring context: 2B 0.0s
|
||||
=> [internal] load metadata for docker.io/library/alpine:3.2 0.4s
|
||||
=> CACHED [1/2] FROM docker.io/library/alpine:3.2@sha256:e9a2035f9d0d7ce 0.0s
|
||||
=> CACHED [2/2] RUN apk add --no-cache socat 0.0s
|
||||
=> exporting to image 0.0s
|
||||
=> => exporting layers 0.0s
|
||||
=> => writing image sha256:1affb80ca37018ac12067fa2af38cc5bcc2a8f09963de 0.0s
|
||||
=> => naming to docker.io/svendowideit/ambassador 0.0s
|
||||
```
|
||||
|
||||
Build cache is only used from images that have a local parent chain. This means
|
||||
that these images were created by previous builds or the whole chain of images
|
||||
was loaded with `docker load`. If you wish to use build cache of a specific
|
||||
image you can specify it with `--cache-from` option. Images specified with
|
||||
`--cache-from` do not need to have a parent chain and may be pulled from other
|
||||
registries.
|
||||
By default, the build cache is based on results from previous builds on the machine
|
||||
on which you are building. The `--cache-from` option also allows you to use a
|
||||
build-cache that's distributed through an image registry refer to the
|
||||
[specifying external cache sources](commandline/build.md#specifying-external-cache-sources)
|
||||
section in the `docker build` command reference.
|
||||
|
||||
When you're done with your build, you're ready to look into [*Pushing a
|
||||
repository to its registry*](https://docs.docker.com/engine/tutorials/dockerrepos/#/contributing-to-docker-hub).
|
||||
When you're done with your build, you're ready to look into [scanning your image with `docker scan`](https://docs.docker.com/engine/scan/),
|
||||
and [pushing your image to Docker Hub](https://docs.docker.com/docker-hub/repos/).
|
||||
|
||||
|
||||
## BuildKit
|
||||
@ -156,8 +159,8 @@ implementation. For example, BuildKit can:
|
||||
To use the BuildKit backend, you need to set an environment variable
|
||||
`DOCKER_BUILDKIT=1` on the CLI before invoking `docker build`.
|
||||
|
||||
To learn about the experimental Dockerfile syntax available to BuildKit-based
|
||||
builds [refer to the documentation in the BuildKit repository](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md).
|
||||
To learn about the Dockerfile syntax available to BuildKit-based
|
||||
builds [refer to the documentation in the BuildKit repository](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md).
|
||||
|
||||
## Format
|
||||
|
||||
@ -176,7 +179,7 @@ Docker runs instructions in a `Dockerfile` in order. A `Dockerfile` **must
|
||||
begin with a `FROM` instruction**. This may be after [parser
|
||||
directives](#parser-directives), [comments](#format), and globally scoped
|
||||
[ARGs](#arg). The `FROM` instruction specifies the [*Parent
|
||||
Image*](https://docs.docker.com/glossary/#parent_image) from which you are
|
||||
Image*](https://docs.docker.com/glossary/#parent-image) from which you are
|
||||
building. `FROM` may only be preceded by one or more `ARG` instructions, which
|
||||
declare arguments that are used in `FROM` lines in the `Dockerfile`.
|
||||
|
||||
@ -315,6 +318,8 @@ The following parser directives are supported:
|
||||
|
||||
## syntax
|
||||
|
||||
<a name="external-implementation-features"><!-- included for deep-links to old section --></a>
|
||||
|
||||
```dockerfile
|
||||
# syntax=[remote image reference]
|
||||
```
|
||||
@ -322,55 +327,73 @@ The following parser directives are supported:
|
||||
For example:
|
||||
|
||||
```dockerfile
|
||||
# syntax=docker/dockerfile
|
||||
# syntax=docker/dockerfile:1.0
|
||||
# syntax=docker/dockerfile:1
|
||||
# syntax=docker.io/docker/dockerfile:1
|
||||
# syntax=docker/dockerfile:1.0.0-experimental
|
||||
# syntax=example.com/user/repo:tag@sha256:abcdef...
|
||||
```
|
||||
|
||||
This feature is only enabled if the [BuildKit](#buildkit) backend is used.
|
||||
This feature is only available when using the [BuildKit](#buildkit) backend, and
|
||||
is ignored when using the classic builder backend.
|
||||
|
||||
The syntax directive defines the location of the Dockerfile builder that is used for
|
||||
building the current Dockerfile. The BuildKit backend allows to seamlessly use
|
||||
external implementations of builders that are distributed as Docker images and
|
||||
execute inside a container sandbox environment.
|
||||
The syntax directive defines the location of the Dockerfile syntax that is used
|
||||
to build the Dockerfile. The BuildKit backend allows to seamlessly use external
|
||||
implementations that are distributed as Docker images and execute inside a
|
||||
container sandbox environment.
|
||||
|
||||
Custom Dockerfile implementation allows you to:
|
||||
Custom Dockerfile implementations allows you to:
|
||||
|
||||
- Automatically get bugfixes without updating the daemon
|
||||
- Automatically get bugfixes without updating the Docker daemon
|
||||
- Make sure all users are using the same implementation to build your Dockerfile
|
||||
- Use the latest features without updating the daemon
|
||||
- Try out new experimental or third-party features
|
||||
- Use the latest features without updating the Docker daemon
|
||||
- Try out new features or third-party features before they are integrated in the Docker daemon
|
||||
- Use [alternative build definitions, or create your own](https://github.com/moby/buildkit#exploring-llb)
|
||||
|
||||
### Official releases
|
||||
|
||||
Docker distributes official versions of the images that can be used for building
|
||||
Dockerfiles under `docker/dockerfile` repository on Docker Hub. There are two
|
||||
channels where new images are released: stable and experimental.
|
||||
channels where new images are released: `stable` and `labs`.
|
||||
|
||||
Stable channel follows semantic versioning. For example:
|
||||
Stable channel follows [semantic versioning](https://semver.org). For example:
|
||||
|
||||
- `docker/dockerfile:1.0.0` - only allow immutable version `1.0.0`
|
||||
- `docker/dockerfile:1.0` - allow versions `1.0.*`
|
||||
- `docker/dockerfile:1` - allow versions `1.*.*`
|
||||
- `docker/dockerfile:latest` - latest release on stable channel
|
||||
- `docker/dockerfile:1` - kept updated with the latest `1.x.x` minor _and_ patch release
|
||||
- `docker/dockerfile:1.2` - kept updated with the latest `1.2.x` patch release,
|
||||
and stops receiving updates once version `1.3.0` is released.
|
||||
- `docker/dockerfile:1.2.1` - immutable: never updated
|
||||
|
||||
The experimental channel uses incremental versioning with the major and minor
|
||||
component from the stable channel on the time of the release. For example:
|
||||
We recommend using `docker/dockerfile:1`, which always points to the latest stable
|
||||
release of the version 1 syntax, and receives both "minor" and "patch" updates
|
||||
for the version 1 release cycle. BuildKit automatically checks for updates of the
|
||||
syntax when performing a build, making sure you are using the most current version.
|
||||
|
||||
- `docker/dockerfile:1.0.1-experimental` - only allow immutable version `1.0.1-experimental`
|
||||
- `docker/dockerfile:1.0-experimental` - latest experimental releases after `1.0`
|
||||
- `docker/dockerfile:experimental` - latest release on experimental channel
|
||||
If a specific version is used, such as `1.2` or `1.2.1`, the Dockerfile needs to
|
||||
be updated manually to continue receiving bugfixes and new features. Old versions
|
||||
of the Dockerfile remain compatible with the new versions of the builder.
|
||||
|
||||
You should choose a channel that best fits your needs. If you only want
|
||||
bugfixes, you should use `docker/dockerfile:1.0`. If you want to benefit from
|
||||
experimental features, you should use the experimental channel. If you are using
|
||||
the experimental channel, newer releases may not be backwards compatible, so it
|
||||
**labs channel**
|
||||
|
||||
The "labs" channel provides early access to Dockerfile features that are not yet
|
||||
available in the stable channel. Labs channel images are released in conjunction
|
||||
with the stable releases, and follow the same versioning with the `-labs` suffix,
|
||||
for example:
|
||||
|
||||
- `docker/dockerfile:labs` - latest release on labs channel
|
||||
- `docker/dockerfile:1-labs` - same as `dockerfile:1` in the stable channel, with labs features enabled
|
||||
- `docker/dockerfile:1.2-labs` - same as `dockerfile:1.2` in the stable channel, with labs features enabled
|
||||
- `docker/dockerfile:1.2.1-labs` - immutable: never updated. Same as `dockerfile:1.2.1` in the stable channel, with labs features enabled
|
||||
|
||||
Choose a channel that best fits your needs; if you want to benefit from
|
||||
new features, use the labs channel. Images in the labs channel provide a superset
|
||||
of the features in the stable channel; note that `stable` features in the labs
|
||||
channel images follow [semantic versioning](https://semver.org), but "labs"
|
||||
features do not, and newer releases may not be backwards compatible, so it
|
||||
is recommended to use an immutable full version variant.
|
||||
|
||||
For master builds and nightly feature releases refer to the description in
|
||||
[the source repository](https://github.com/moby/buildkit/blob/master/README.md).
|
||||
For documentation on "labs" features, master builds, and nightly feature releases,
|
||||
refer to the description in [the BuildKit source repository on GitHub](https://github.com/moby/buildkit/blob/master/README.md).
|
||||
For a full list of available images, visit the [image repository on Docker Hub](https://hub.docker.com/r/docker/dockerfile),
|
||||
and the [docker/dockerfile-upstream image repository](https://hub.docker.com/r/docker/dockerfile-upstream)
|
||||
for development builds.
|
||||
|
||||
## escape
|
||||
|
||||
@ -413,14 +436,15 @@ RUN dir c:\
|
||||
|
||||
Results in:
|
||||
|
||||
```powershell
|
||||
PS C:\John> docker build -t cmd .
|
||||
```console
|
||||
PS E:\myproject> docker build -t cmd .
|
||||
|
||||
Sending build context to Docker daemon 3.072 kB
|
||||
Step 1/2 : FROM microsoft/nanoserver
|
||||
---> 22738ff49c6d
|
||||
Step 2/2 : COPY testfile.txt c:\RUN dir c:
|
||||
GetFileAttributesEx c:RUN: The system cannot find the file specified.
|
||||
PS C:\John>
|
||||
PS E:\myproject>
|
||||
```
|
||||
|
||||
One solution to the above would be to use `/` as the target of both the `COPY`
|
||||
@ -441,8 +465,9 @@ RUN dir c:\
|
||||
|
||||
Results in:
|
||||
|
||||
```powershell
|
||||
PS C:\John> docker build -t succeeds --no-cache=true .
|
||||
```console
|
||||
PS E:\myproject> docker build -t succeeds --no-cache=true .
|
||||
|
||||
Sending build context to Docker daemon 3.072 kB
|
||||
Step 1/3 : FROM microsoft/nanoserver
|
||||
---> 22738ff49c6d
|
||||
@ -467,7 +492,7 @@ Step 3/3 : RUN dir c:\
|
||||
---> 01c7f3bef04f
|
||||
Removing intermediate container a2c157f842f5
|
||||
Successfully built 01c7f3bef04f
|
||||
PS C:\John>
|
||||
PS E:\myproject>
|
||||
```
|
||||
|
||||
## Environment replacement
|
||||
@ -574,10 +599,10 @@ This file causes the following build behavior:
|
||||
|
||||
|
||||
Matching is done using Go's
|
||||
[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. A
|
||||
[filepath.Match](https://golang.org/pkg/path/filepath#Match) rules. A
|
||||
preprocessing step removes leading and trailing whitespace and
|
||||
eliminates `.` and `..` elements using Go's
|
||||
[filepath.Clean](http://golang.org/pkg/path/filepath/#Clean). Lines
|
||||
[filepath.Clean](https://golang.org/pkg/path/filepath/#Clean). Lines
|
||||
that are blank after preprocessing are ignored.
|
||||
|
||||
Beyond Go's filepath.Match rules, Docker also supports a special
|
||||
@ -652,7 +677,7 @@ FROM [--platform=<platform>] <image>[@<digest>] [AS <name>]
|
||||
```
|
||||
|
||||
The `FROM` instruction initializes a new build stage and sets the
|
||||
[*Base Image*](https://docs.docker.com/glossary/#base_image) for subsequent instructions. As such, a
|
||||
[*Base Image*](https://docs.docker.com/glossary/#base-image) for subsequent instructions. As such, a
|
||||
valid `Dockerfile` must start with a `FROM` instruction. The image can be
|
||||
any valid image – it is especially easy to start by **pulling an image** from
|
||||
the [*Public Repositories*](https://docs.docker.com/docker-hub/repos/).
|
||||
@ -734,6 +759,7 @@ RUN instruction onto the next line. For example, consider these two lines:
|
||||
RUN /bin/bash -c 'source $HOME/.bashrc; \
|
||||
echo $HOME'
|
||||
```
|
||||
|
||||
Together they are equivalent to this single line:
|
||||
|
||||
```dockerfile
|
||||
@ -910,9 +936,10 @@ the most-recently-applied value overrides any previously-set value.
|
||||
To view an image's labels, use the `docker image inspect` command. You can use
|
||||
the `--format` option to show just the labels;
|
||||
|
||||
```bash
|
||||
docker image inspect --format='{{json .Config.Labels}}' myimage
|
||||
```console
|
||||
$ docker image inspect --format='{{json .Config.Labels}}' myimage
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"com.example.vendor": "ACME Incorporated",
|
||||
@ -938,7 +965,7 @@ easily, for example with `docker inspect`. To set a label corresponding to the
|
||||
`MAINTAINER` field you could use:
|
||||
|
||||
```dockerfile
|
||||
LABEL maintainer="SvenDowideit@home.org.au"
|
||||
LABEL org.opencontainers.image.authors="SvenDowideit@home.org.au"
|
||||
```
|
||||
|
||||
This will then be visible from `docker inspect` with the other labels.
|
||||
@ -980,8 +1007,8 @@ port on the host, so the port will not be the same for TCP and UDP.
|
||||
Regardless of the `EXPOSE` settings, you can override them at runtime by using
|
||||
the `-p` flag. For example
|
||||
|
||||
```bash
|
||||
docker run -p 80:80/tcp -p 80:80/udp ...
|
||||
```console
|
||||
$ docker run -p 80:80/tcp -p 80:80/udp ...
|
||||
```
|
||||
|
||||
To set up port redirection on the host system, see [using the -P flag](run.md#expose-incoming-ports).
|
||||
@ -1090,7 +1117,7 @@ directories, their paths are interpreted as relative to the source of
|
||||
the context of the build.
|
||||
|
||||
Each `<src>` may contain wildcards and matching will be done using Go's
|
||||
[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example:
|
||||
[filepath.Match](https://golang.org/pkg/path/filepath#Match) rules. For example:
|
||||
|
||||
To add all files starting with "hom":
|
||||
|
||||
@ -1266,7 +1293,7 @@ directories will be interpreted as relative to the source of the context
|
||||
of the build.
|
||||
|
||||
Each `<src>` may contain wildcards and matching will be done using Go's
|
||||
[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. For example:
|
||||
[filepath.Match](https://golang.org/pkg/path/filepath#Match) rules. For example:
|
||||
|
||||
To add all files starting with "hom":
|
||||
|
||||
@ -1397,7 +1424,7 @@ An `ENTRYPOINT` allows you to configure a container that will run as an executab
|
||||
For example, the following starts nginx with its default content, listening
|
||||
on port 80:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker run -i -t --rm -p 80:80 nginx
|
||||
```
|
||||
|
||||
@ -1432,7 +1459,7 @@ CMD ["-c"]
|
||||
|
||||
When you run the container, you can see that `top` is the only process:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker run -it --rm --name test top -H
|
||||
|
||||
top - 08:25:00 up 7:27, 0 users, load average: 0.00, 0.01, 0.05
|
||||
@ -1447,7 +1474,7 @@ KiB Swap: 1441840 total, 0 used, 1441840 free. 1324440 cached Mem
|
||||
|
||||
To examine the result further, you can use `docker exec`:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker exec -it test ps aux
|
||||
|
||||
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
|
||||
@ -1519,7 +1546,7 @@ If you run this image with `docker run -it --rm -p 80:80 --name test apache`,
|
||||
you can then examine the container's processes with `docker exec`, or `docker top`,
|
||||
and then ask the script to stop Apache:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker exec -it test ps aux
|
||||
|
||||
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
|
||||
@ -1579,7 +1606,7 @@ ENTRYPOINT exec top -b
|
||||
|
||||
When you run this image, you'll see the single `PID 1` process:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker run -it --rm --name test top
|
||||
|
||||
Mem: 1704520K used, 352148K free, 0K shrd, 0K buff, 140368121167873K cached
|
||||
@ -1591,7 +1618,7 @@ Load average: 0.08 0.03 0.05 2/98 6
|
||||
|
||||
Which exits cleanly on `docker stop`:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ /usr/bin/time docker stop test
|
||||
|
||||
test
|
||||
@ -1610,7 +1637,7 @@ CMD --ignored-param1
|
||||
|
||||
You can then run it (giving it a name for the next step):
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker run -it --name test top --ignored-param2
|
||||
|
||||
Mem: 1704184K used, 352484K free, 0K shrd, 0K buff, 140621524238337K cached
|
||||
@ -1626,7 +1653,7 @@ You can see from the output of `top` that the specified `ENTRYPOINT` is not `PID
|
||||
If you then run `docker stop test`, the container will not exit cleanly - the
|
||||
`stop` command will be forced to send a `SIGKILL` after the timeout:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker exec -it test ps aux
|
||||
|
||||
PID USER COMMAND
|
||||
@ -1797,6 +1824,11 @@ RUN pwd
|
||||
The output of the final `pwd` command in this `Dockerfile` would be
|
||||
`/path/$DIRNAME`
|
||||
|
||||
If not specified, the default working directory is `/`. In practice, if you aren't building a Dockerfile from scratch (`FROM scratch`),
|
||||
the `WORKDIR` may likely be set by the base image you're using.
|
||||
|
||||
Therefore, to avoid unintended operations in unknown directories, it is best practice to set your `WORKDIR` explicitly.
|
||||
|
||||
## ARG
|
||||
|
||||
```dockerfile
|
||||
@ -1859,9 +1891,10 @@ ARG user
|
||||
USER $user
|
||||
# ...
|
||||
```
|
||||
|
||||
A user builds this file by calling:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build --build-arg user=what_user .
|
||||
```
|
||||
|
||||
@ -1900,7 +1933,7 @@ RUN echo $CONT_IMG_VER
|
||||
|
||||
Then, assume this image is built with this command:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build --build-arg CONT_IMG_VER=v2.0.1 .
|
||||
```
|
||||
|
||||
@ -1922,7 +1955,7 @@ RUN echo $CONT_IMG_VER
|
||||
Unlike an `ARG` instruction, `ENV` values are always persisted in the built
|
||||
image. Consider a docker build without the `--build-arg` flag:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build .
|
||||
```
|
||||
|
||||
@ -1948,10 +1981,11 @@ corresponding `ARG` instruction in the Dockerfile.
|
||||
- `NO_PROXY`
|
||||
- `no_proxy`
|
||||
|
||||
To use these, simply pass them on the command line using the flag:
|
||||
To use these, pass them on the command line using the `--build-arg` flag, for
|
||||
example:
|
||||
|
||||
```bash
|
||||
--build-arg <varname>=<value>
|
||||
```console
|
||||
$ docker build --build-arg HTTPS_PROXY=https://my-proxy.example.com .
|
||||
```
|
||||
|
||||
By default, these pre-defined variables are excluded from the output of
|
||||
@ -2144,9 +2178,14 @@ ONBUILD RUN /usr/local/bin/python-build --dir /app/src
|
||||
STOPSIGNAL signal
|
||||
```
|
||||
|
||||
The `STOPSIGNAL` instruction sets the system call signal that will be sent to the container to exit.
|
||||
This signal can be a valid unsigned number that matches a position in the kernel's syscall table, for instance 9,
|
||||
or a signal name in the format SIGNAME, for instance SIGKILL.
|
||||
The `STOPSIGNAL` instruction sets the system call signal that will be sent to the
|
||||
container to exit. This signal can be a signal name in the format `SIG<NAME>`,
|
||||
for instance `SIGKILL`, or an unsigned number that matches a position in the
|
||||
kernel's syscall table, for instance `9`. The default is `SIGTERM` if not
|
||||
defined.
|
||||
|
||||
The image's default stopsignal can be overridden per container, using the
|
||||
`--stop-signal` flag on `docker run` and `docker create`.
|
||||
|
||||
## HEALTHCHECK
|
||||
|
||||
@ -2298,8 +2337,9 @@ RUN c:\example\Execute-MyCmdlet -sample 'hello world'
|
||||
|
||||
Resulting in:
|
||||
|
||||
```powershell
|
||||
PS E:\docker\build\shell> docker build -t shell .
|
||||
```console
|
||||
PS E:\myproject> docker build -t shell .
|
||||
|
||||
Sending build context to Docker daemon 4.096 kB
|
||||
Step 1/5 : FROM microsoft/nanoserver
|
||||
---> 22738ff49c6d
|
||||
@ -2314,9 +2354,9 @@ Step 3/5 : RUN New-Item -ItemType Directory C:\Example
|
||||
Directory: C:\
|
||||
|
||||
|
||||
Mode LastWriteTime Length Name
|
||||
---- ------------- ------ ----
|
||||
d----- 10/28/2016 11:26 AM Example
|
||||
Mode LastWriteTime Length Name
|
||||
---- ------------- ------ ----
|
||||
d----- 10/28/2016 11:26 AM Example
|
||||
|
||||
|
||||
---> 3f2fbf1395d9
|
||||
@ -2330,7 +2370,7 @@ hello world
|
||||
---> 8e559e9bf424
|
||||
Removing intermediate container be6d8e63fe75
|
||||
Successfully built 8e559e9bf424
|
||||
PS E:\docker\build\shell>
|
||||
PS E:\myproject>
|
||||
```
|
||||
|
||||
The `SHELL` instruction could also be used to modify the way in which
|
||||
@ -2340,61 +2380,10 @@ environment variable expansion semantics could be modified.
|
||||
The `SHELL` instruction can also be used on Linux should an alternate shell be
|
||||
required such as `zsh`, `csh`, `tcsh` and others.
|
||||
|
||||
## External implementation features
|
||||
|
||||
This feature is only available when using the [BuildKit](#buildkit) backend.
|
||||
|
||||
Docker build supports experimental features like cache mounts, build secrets and
|
||||
ssh forwarding that are enabled by using an external implementation of the
|
||||
builder with a syntax directive. To learn about these features,
|
||||
[refer to the documentation in BuildKit repository](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md).
|
||||
|
||||
## Dockerfile examples
|
||||
|
||||
Below you can see some examples of Dockerfile syntax.
|
||||
For examples of Dockerfiles, refer to:
|
||||
|
||||
```dockerfile
|
||||
# Nginx
|
||||
#
|
||||
# VERSION 0.0.1
|
||||
|
||||
FROM ubuntu
|
||||
LABEL Description="This image is used to start the foobar executable" Vendor="ACME Products" Version="1.0"
|
||||
RUN apt-get update && apt-get install -y inotify-tools nginx apache2 openssh-server
|
||||
```
|
||||
|
||||
```dockerfile
|
||||
# Firefox over VNC
|
||||
#
|
||||
# VERSION 0.3
|
||||
|
||||
FROM ubuntu
|
||||
|
||||
# Install vnc, xvfb in order to create a 'fake' display and firefox
|
||||
RUN apt-get update && apt-get install -y x11vnc xvfb firefox
|
||||
RUN mkdir ~/.vnc
|
||||
# Setup a password
|
||||
RUN x11vnc -storepasswd 1234 ~/.vnc/passwd
|
||||
# Autostart firefox (might not be the best way, but it does the trick)
|
||||
RUN bash -c 'echo "firefox" >> /.bashrc'
|
||||
|
||||
EXPOSE 5900
|
||||
CMD ["x11vnc", "-forever", "-usepw", "-create"]
|
||||
```
|
||||
|
||||
```dockerfile
|
||||
# Multiple images example
|
||||
#
|
||||
# VERSION 0.1
|
||||
|
||||
FROM ubuntu
|
||||
RUN echo foo > bar
|
||||
# Will output something like ===> 907ad6c2736f
|
||||
|
||||
FROM ubuntu
|
||||
RUN echo moo > oink
|
||||
# Will output something like ===> 695d7793cbe4
|
||||
|
||||
# You'll now have two images, 907ad6c2736f with /bar, and 695d7793cbe4 with
|
||||
# /oink.
|
||||
```
|
||||
- The ["build images" section](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/)
|
||||
- The ["get started](https://docs.docker.com/get-started/)
|
||||
- The [language-specific getting started guides](https://docs.docker.com/language/)
|
||||
|
||||
@ -84,7 +84,7 @@ containers, see [**Configuration file** section](cli.md#configuration-files).
|
||||
|
||||
### Attach to and detach from a running container
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker run -d --name topdemo ubuntu /usr/bin/top -b
|
||||
|
||||
$ docker attach topdemo
|
||||
@ -130,22 +130,19 @@ $ docker ps -a | grep topdemo
|
||||
And in this second example, you can see the exit code returned by the `bash`
|
||||
process is returned by the `docker attach` command to its caller too:
|
||||
|
||||
```bash
|
||||
$ docker run --name test -d -it debian
|
||||
```console
|
||||
$ docker run --name test -d -it debian
|
||||
275c44472aebd77c926d4527885bb09f2f6db21d878c75f0a1c212c03d3bcfab
|
||||
|
||||
275c44472aebd77c926d4527885bb09f2f6db21d878c75f0a1c212c03d3bcfab
|
||||
$ docker attach test
|
||||
root@f38c87f2a42d:/# exit 13
|
||||
|
||||
$ docker attach test
|
||||
exit
|
||||
|
||||
root@f38c87f2a42d:/# exit 13
|
||||
$ echo $?
|
||||
13
|
||||
|
||||
exit
|
||||
$ docker ps -a | grep test
|
||||
|
||||
$ echo $?
|
||||
|
||||
13
|
||||
|
||||
$ docker ps -a | grep test
|
||||
|
||||
275c44472aeb debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test
|
||||
275c44472aeb debian:7 "/bin/bash" 26 seconds ago Exited (13) 17 seconds ago test
|
||||
```
|
||||
|
||||
@ -92,7 +92,7 @@ context.
|
||||
For example, run this command to use a directory called `docker` in the branch
|
||||
`container`:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build https://github.com/docker/rootfs.git#container:docker
|
||||
```
|
||||
|
||||
@ -120,7 +120,7 @@ Build Syntax Suffix | Commit Used | Build Context Used
|
||||
|
||||
If you pass an URL to a remote tarball, the URL itself is sent to the daemon:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build http://server/context.tar.gz
|
||||
```
|
||||
|
||||
@ -136,7 +136,7 @@ build context. Tarball contexts must be tar archives conforming to the standard
|
||||
Instead of specifying a context, you can pass a single `Dockerfile` in the
|
||||
`URL` or pipe the file in via `STDIN`. To pipe a `Dockerfile` from `STDIN`:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build - < Dockerfile
|
||||
```
|
||||
|
||||
@ -176,7 +176,7 @@ build fails, a non-zero failure code will be returned.
|
||||
There should be informational output of the reason for failure output to
|
||||
`STDERR`:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build -t fail .
|
||||
|
||||
Sending build context to Docker daemon 2.048 kB
|
||||
@ -198,7 +198,7 @@ See also:
|
||||
|
||||
### Build with PATH
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build .
|
||||
|
||||
Uploading context 10240 bytes
|
||||
@ -243,7 +243,7 @@ you must use `--rm=false`. This does not affect the build cache.
|
||||
|
||||
### Build with URL
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build github.com/creack/docker-firefox
|
||||
```
|
||||
|
||||
@ -251,7 +251,7 @@ This will clone the GitHub repository and use the cloned repository as context.
|
||||
The Dockerfile at the root of the repository is used as Dockerfile. You can
|
||||
specify an arbitrary Git repository by using the `git://` or `git@` scheme.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build -f ctx/Dockerfile http://server/ctx.tar.gz
|
||||
|
||||
Downloading context: http://server/ctx.tar.gz [===================>] 240 B/240 B
|
||||
@ -277,7 +277,7 @@ ctx/container.cfg /` operation works as expected.
|
||||
|
||||
### Build with -
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build - < Dockerfile
|
||||
```
|
||||
|
||||
@ -286,7 +286,7 @@ context, no contents of any local directory will be sent to the Docker daemon.
|
||||
Since there is no context, a Dockerfile `ADD` only works if it refers to a
|
||||
remote URL.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build - < context.tar.gz
|
||||
```
|
||||
|
||||
@ -295,7 +295,7 @@ formats are: bzip2, gzip and xz.
|
||||
|
||||
### Use a .dockerignore file
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build .
|
||||
|
||||
Uploading context 18.829 MB
|
||||
@ -334,7 +334,7 @@ files.
|
||||
|
||||
### Tag an image (-t)
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build -t vieux/apache:2.0 .
|
||||
```
|
||||
|
||||
@ -348,27 +348,27 @@ version.
|
||||
For example, to tag an image both as `whenry/fedora-jboss:latest` and
|
||||
`whenry/fedora-jboss:v2.1`, use the following:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 .
|
||||
```
|
||||
|
||||
### Specify a Dockerfile (-f)
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build -f Dockerfile.debug .
|
||||
```
|
||||
|
||||
This will use a file called `Dockerfile.debug` for the build instructions
|
||||
instead of `Dockerfile`.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ curl example.com/remote/Dockerfile | docker build -f - .
|
||||
```
|
||||
|
||||
The above command will use the current directory as the build context and read
|
||||
a Dockerfile from stdin.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build -f dockerfiles/Dockerfile.debug -t myapp_debug .
|
||||
$ docker build -f dockerfiles/Dockerfile.prod -t myapp_prod .
|
||||
```
|
||||
@ -377,7 +377,7 @@ The above commands will build the current build context (as specified by the
|
||||
`.`) twice, once using a debug version of a `Dockerfile` and once using a
|
||||
production version.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ cd /home/me/myapp/some/dir/really/deep
|
||||
$ docker build -f /home/me/myapp/dockerfiles/debug /home/me/myapp
|
||||
$ docker build -f ../../../../dockerfiles/debug /home/me/myapp
|
||||
@ -420,7 +420,7 @@ A good example is `http_proxy` or source versions for pulling intermediate
|
||||
files. The `ARG` instruction lets Dockerfile authors define values that users
|
||||
can set at build-time using the `--build-arg` flag:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build --build-arg HTTP_PROXY=http://10.20.30.2:1234 --build-arg FTP_PROXY=http://40.50.60.5:4567 .
|
||||
```
|
||||
|
||||
@ -439,7 +439,7 @@ You may also use the `--build-arg` flag without a value, in which case the value
|
||||
from the local environment will be propagated into the Docker container being
|
||||
built:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ export HTTP_PROXY=http://10.20.30.2:1234
|
||||
$ docker build --build-arg HTTP_PROXY .
|
||||
```
|
||||
@ -491,7 +491,7 @@ FROM alpine AS production-env
|
||||
...
|
||||
```
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build -t mybuildimage --target build-env .
|
||||
```
|
||||
|
||||
@ -516,7 +516,7 @@ The following example builds an image using the current directory (`.`) as build
|
||||
context, and exports the files to a directory named `out` in the current directory.
|
||||
If the directory does not exist, Docker creates the directory automatically:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build -o out .
|
||||
```
|
||||
|
||||
@ -525,13 +525,13 @@ thus uses the default (`local`) exporter. The example below shows the equivalent
|
||||
using the long-hand CSV syntax, specifying both `type` and `dest` (destination
|
||||
path):
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build --output type=local,dest=out .
|
||||
```
|
||||
|
||||
Use the `tar` type to export the files as a `.tar` archive:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build --output type=tar,dest=out.tar .
|
||||
```
|
||||
|
||||
@ -540,8 +540,8 @@ case, `-` is specified as destination, which automatically selects the `tar` typ
|
||||
and writes the output tarball to standard output, which is then redirected to
|
||||
the `out.tar` file:
|
||||
|
||||
```bash
|
||||
docker build -o - . > out.tar
|
||||
```console
|
||||
$ docker build -o - . > out.tar
|
||||
```
|
||||
|
||||
The `--output` option exports all files from the target stage. A common pattern
|
||||
@ -562,7 +562,7 @@ COPY --from=build-stage /go/bin/vndr /
|
||||
When building the Dockerfile with the `-o` option, only the files from the final
|
||||
stage are exported to the `out` directory, in this case, the `vndr` binary:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build -o out .
|
||||
|
||||
[+] Building 2.3s (7/7) FINISHED
|
||||
@ -610,7 +610,7 @@ options) allow pulling layer data for intermediate stages in multi-stage builds.
|
||||
The following example builds an image with inline-cache metadata and pushes it
|
||||
to a registry, then uses the image as a cache source on another machine:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build -t myname/myapp --build-arg BUILDKIT_INLINE_CACHE=1 .
|
||||
$ docker push myname/myapp
|
||||
```
|
||||
@ -618,8 +618,9 @@ $ docker push myname/myapp
|
||||
After pushing the image, the image is used as cache source on another machine.
|
||||
BuildKit automatically pulls the image from the registry if needed.
|
||||
|
||||
```bash
|
||||
# on another machine
|
||||
On another machine:
|
||||
|
||||
```console
|
||||
$ docker build --cache-from myname/myapp .
|
||||
```
|
||||
|
||||
@ -666,7 +667,7 @@ The `--squash` option has a number of known limitations:
|
||||
base image is still supported.
|
||||
- When using this option you may see significantly more space used due to
|
||||
storing two copies of the image, one for the build cache with all the cache
|
||||
layers in tact, and one for the squashed version.
|
||||
layers intact, and one for the squashed version.
|
||||
- While squashing layers may produce smaller images, it may have a negative
|
||||
impact on performance, as a single layer takes longer to extract, and
|
||||
downloading a single layer cannot be parallelized.
|
||||
@ -725,7 +726,7 @@ To enable experimental features, you need to start the Docker daemon with
|
||||
|
||||
Then make sure the experimental flag is enabled:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker version -f '{{.Server.Experimental}}'
|
||||
true
|
||||
```
|
||||
@ -745,15 +746,15 @@ RUN rm /remove_me
|
||||
|
||||
An image named `test` is built with `--squash` argument.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build --squash -t test .
|
||||
|
||||
[...]
|
||||
<...>
|
||||
```
|
||||
|
||||
If everything is right, the history looks like this:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker history test
|
||||
|
||||
IMAGE CREATED CREATED BY SIZE COMMENT
|
||||
|
||||
102
docs/reference/commandline/checkpoint.md
Normal file
102
docs/reference/commandline/checkpoint.md
Normal file
@ -0,0 +1,102 @@
|
||||
---
|
||||
title: docker checkpoint
|
||||
description: "The checkpoint command description and usage"
|
||||
keywords: experimental, checkpoint, restore, criu
|
||||
experimental: true
|
||||
---
|
||||
|
||||
## Description
|
||||
|
||||
Checkpoint and Restore is an experimental feature that allows you to freeze a running
|
||||
container by checkpointing it, which turns its state into a collection of files
|
||||
on disk. Later, the container can be restored from the point it was frozen.
|
||||
|
||||
This is accomplished using a tool called [CRIU](https://criu.org), which is an
|
||||
external dependency of this feature. A good overview of the history of
|
||||
checkpoint and restore in Docker is available in this
|
||||
[Kubernetes blog post](https://kubernetes.io/blog/2015/07/how-did-quake-demo-from-dockercon-work/).
|
||||
|
||||
### Installing CRIU
|
||||
|
||||
If you use a Debian system, you can add the CRIU PPA and install with `apt-get`
|
||||
[from the criu launchpad](https://launchpad.net/~criu/+archive/ubuntu/ppa).
|
||||
|
||||
Alternatively, you can [build CRIU from source](https://criu.org/Installation).
|
||||
|
||||
You need at least version 2.0 of CRIU to run checkpoint and restore in Docker.
|
||||
|
||||
### Use cases for checkpoint and restore
|
||||
|
||||
This feature is currently focused on single-host use cases for checkpoint and
|
||||
restore. Here are a few:
|
||||
|
||||
- Restarting the host machine without stopping/starting containers
|
||||
- Speeding up the start time of slow start applications
|
||||
- "Rewinding" processes to an earlier point in time
|
||||
- "Forensic debugging" of running processes
|
||||
|
||||
Another primary use case of checkpoint and restore outside of Docker is the live
|
||||
migration of a server from one machine to another. This is possible with the
|
||||
current implementation, but not currently a priority (and so the workflow is
|
||||
not optimized for the task).
|
||||
|
||||
### Using checkpoint and restore
|
||||
|
||||
A new top level command `docker checkpoint` is introduced, with three subcommands:
|
||||
|
||||
- `docker checkpoint create` (creates a new checkpoint)
|
||||
- `docker checkpoint ls` (lists existing checkpoints)
|
||||
- `docker checkpoint rm` (deletes an existing checkpoint)
|
||||
|
||||
Additionally, a `--checkpoint` flag is added to the `docker container start` command.
|
||||
|
||||
The options for `docker checkpoint create`:
|
||||
|
||||
```console
|
||||
Usage: docker checkpoint create [OPTIONS] CONTAINER CHECKPOINT
|
||||
|
||||
Create a checkpoint from a running container
|
||||
|
||||
--leave-running=false Leave the container running after checkpoint
|
||||
--checkpoint-dir Use a custom checkpoint storage directory
|
||||
```
|
||||
|
||||
And to restore a container:
|
||||
|
||||
```console
|
||||
Usage: docker start --checkpoint CHECKPOINT_ID [OTHER OPTIONS] CONTAINER
|
||||
```
|
||||
|
||||
Example of using checkpoint and restore on a container:
|
||||
|
||||
```console
|
||||
$ docker run --security-opt=seccomp:unconfined --name cr -d busybox /bin/sh -c 'i=0; while true; do echo $i; i=$(expr $i + 1); sleep 1; done'
|
||||
abc0123
|
||||
|
||||
$ docker checkpoint create cr checkpoint1
|
||||
|
||||
# <later>
|
||||
$ docker start --checkpoint checkpoint1 cr
|
||||
abc0123
|
||||
```
|
||||
|
||||
This process just logs an incrementing counter to stdout. If you run `docker logs`
|
||||
in between running/checkpoint/restoring you should see that the counter
|
||||
increases while the process is running, stops while it's checkpointed, and
|
||||
resumes from the point it left off once you restore.
|
||||
|
||||
### Known limitations
|
||||
|
||||
seccomp is only supported by CRIU in very up to date kernels.
|
||||
|
||||
External terminal (i.e. `docker run -t ..`) is not supported at the moment.
|
||||
If you try to create a checkpoint for a container with an external terminal,
|
||||
it would fail:
|
||||
|
||||
```console
|
||||
$ docker checkpoint create cr checkpoint1
|
||||
Error response from daemon: Cannot checkpoint container c1: rpc error: code = 2 desc = exit status 1: "criu failed: type NOTIFY errno 0\nlog file: /var/lib/docker/containers/eb62ebdbf237ce1a8736d2ae3c7d88601fc0a50235b0ba767b559a1f3c5a600b/checkpoints/checkpoint1/criu.work/dump.log\n"
|
||||
|
||||
$ cat /var/lib/docker/containers/eb62ebdbf237ce1a8736d2ae3c7d88601fc0a50235b0ba767b559a1f3c5a600b/checkpoints/checkpoint1/criu.work/dump.log
|
||||
Error (mount.c:740): mnt: 126:./dev/console doesn't have a proper root mount
|
||||
```
|
||||
@ -1,9 +1,9 @@
|
||||
---
|
||||
title: "Use the Docker command line"
|
||||
description: "Docker's CLI command description and usage"
|
||||
keywords: "Docker, Docker documentation, CLI, command line"
|
||||
keywords: "Docker, Docker documentation, CLI, command line, config.json, CLI configuration file"
|
||||
redirect_from:
|
||||
- /go/experimental/
|
||||
- /reference/commandline/cli/
|
||||
- /engine/reference/commandline/engine/
|
||||
- /engine/reference/commandline/engine_activate/
|
||||
- /engine/reference/commandline/engine_check/
|
||||
@ -24,7 +24,7 @@ redirect_from:
|
||||
To list available commands, either run `docker` with no parameters
|
||||
or execute `docker help`:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker
|
||||
Usage: docker [OPTIONS] COMMAND [ARG...]
|
||||
docker [ --help | -v | --version ]
|
||||
@ -62,30 +62,23 @@ the [installation](https://docs.docker.com/install/) instructions for your opera
|
||||
|
||||
## Environment variables
|
||||
|
||||
For easy reference, the following list of environment variables are supported
|
||||
by the `docker` command line:
|
||||
The following list of environment variables are supported by the `docker` command
|
||||
line:
|
||||
|
||||
* `DOCKER_API_VERSION` The API version to use (e.g. `1.19`)
|
||||
* `DOCKER_CONFIG` The location of your client configuration files.
|
||||
* `DOCKER_HOST` Daemon socket to connect to.
|
||||
* `DOCKER_STACK_ORCHESTRATOR` Configure the default orchestrator to use when using `docker stack` management commands.
|
||||
* `DOCKER_CONTENT_TRUST` When set Docker uses notary to sign and verify images.
|
||||
Equates to `--disable-content-trust=false` for build, create, pull, push, run.
|
||||
* `DOCKER_CONTENT_TRUST_SERVER` The URL of the Notary server to use. This defaults
|
||||
to the same URL as the registry.
|
||||
* `DOCKER_HIDE_LEGACY_COMMANDS` When set, Docker hides "legacy" top-level commands (such as `docker rm`, and
|
||||
`docker pull`) in `docker help` output, and only `Management commands` per object-type (e.g., `docker container`) are
|
||||
printed. This may become the default in a future release, at which point this environment-variable is removed.
|
||||
* `DOCKER_CONTEXT` Specify the context to use (overrides DOCKER_HOST env var and default context set with "docker context use")
|
||||
* `DOCKER_DEFAULT_PLATFORM` Specify the default platform for the commands that take the `--platform` flag.
|
||||
|
||||
#### Shared Environment variables
|
||||
|
||||
These environment variables can be used both with the `docker` command line and
|
||||
`dockerd` command line:
|
||||
|
||||
* `DOCKER_CERT_PATH` The location of your authentication keys.
|
||||
* `DOCKER_TLS_VERIFY` When set Docker uses TLS and verifies the remote.
|
||||
| Variable | Description |
|
||||
|:------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `DOCKER_API_VERSION` | Override the negotiated API version to use for debugging (e.g. `1.19`) |
|
||||
| `DOCKER_CERT_PATH` | Location of your authentication keys. This variable is used both by the `docker` CLI and the [`dockerd` daemon](dockerd.md) |
|
||||
| `DOCKER_CONFIG` | The location of your client configuration files. |
|
||||
| `DOCKER_CONTENT_TRUST_SERVER` | The URL of the Notary server to use. Defaults to the same URL as the registry. |
|
||||
| `DOCKER_CONTENT_TRUST` | When set Docker uses notary to sign and verify images. Equates to `--disable-content-trust=false` for build, create, pull, push, run. |
|
||||
| `DOCKER_CONTEXT` | Name of the `docker context` to use (overrides `DOCKER_HOST` env var and default context set with `docker context use`) |
|
||||
| `DOCKER_DEFAULT_PLATFORM` | Default platform for commands that take the `--platform` flag. |
|
||||
| `DOCKER_HIDE_LEGACY_COMMANDS` | When set, Docker hides "legacy" top-level commands (such as `docker rm`, and `docker pull`) in `docker help` output, and only `Management commands` per object-type (e.g., `docker container`) are printed. This may become the default in a future release, at which point this environment-variable is removed. |
|
||||
| `DOCKER_HOST` | Daemon socket to connect to. |
|
||||
| `DOCKER_STACK_ORCHESTRATOR` | Configure the default orchestrator to use when using `docker stack` management commands. |
|
||||
| `DOCKER_TLS_VERIFY` | When set Docker uses TLS and verifies the remote. This variable is used both by the `docker` CLI and the [`dockerd` daemon](dockerd.md) |
|
||||
| `BUILDKIT_PROGRESS` | Set type of progress output (`auto`, `plain`, `tty`) when [building](build.md) with [BuildKit backend](../builder.md#buildkit). Use plain to show container output (default `auto`). |
|
||||
|
||||
Because Docker is developed using Go, you can also use any environment
|
||||
variables used by the Go runtime. In particular, you may find these useful:
|
||||
@ -95,10 +88,10 @@ variables used by the Go runtime. In particular, you may find these useful:
|
||||
* `NO_PROXY`
|
||||
|
||||
These Go environment variables are case-insensitive. See the
|
||||
[Go specification](http://golang.org/pkg/net/http/) for details on these
|
||||
[Go specification](https://golang.org/pkg/net/http/) for details on these
|
||||
variables.
|
||||
|
||||
### Configuration files
|
||||
## Configuration files
|
||||
|
||||
By default, the Docker command line stores its configuration files in a
|
||||
directory called `.docker` within your `$HOME` directory.
|
||||
@ -124,7 +117,7 @@ specified, then the `--config` option overrides the `DOCKER_CONFIG` environment
|
||||
variable. The example below overrides the `docker ps` command using a
|
||||
`config.json` file located in the `~/testconfigs/` directory.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker --config ~/testconfigs/ ps
|
||||
```
|
||||
|
||||
@ -133,79 +126,56 @@ configuration, you can set the `DOCKER_CONFIG` environment variable in your
|
||||
shell (e.g. `~/.profile` or `~/.bashrc`). The example below sets the new
|
||||
directory to be `HOME/newdir/.docker`.
|
||||
|
||||
```bash
|
||||
echo export DOCKER_CONFIG=$HOME/newdir/.docker > ~/.profile
|
||||
```console
|
||||
$ echo export DOCKER_CONFIG=$HOME/newdir/.docker > ~/.profile
|
||||
```
|
||||
|
||||
### `config.json` properties
|
||||
## Docker CLI configuration file (`config.json`) properties
|
||||
|
||||
The `config.json` file stores a JSON encoding of several properties:
|
||||
<a name="configjson-properties"><!-- included for deep-links to old section --></a>
|
||||
|
||||
Use the Docker CLI configuration to customize settings for the `docker` CLI. The
|
||||
configuration file uses JSON formatting, and properties:
|
||||
|
||||
By default, configuration file is stored in `~/.docker/config.json`. Refer to the
|
||||
[change the `.docker` directory](#change-the-docker-directory) section to use a
|
||||
different location.
|
||||
|
||||
> **Warning**
|
||||
>
|
||||
> The configuration file and other files inside the `~/.docker` configuration
|
||||
> directory may contain sensitive information, such as authentication information
|
||||
> for proxies or, depending on your credential store, credentials for your image
|
||||
> registries. Review your configuration file's content before sharing with others,
|
||||
> and prevent committing the file to version control.
|
||||
|
||||
### Customize the default output format for commands
|
||||
|
||||
These fields allow you to customize the default output format for some commands
|
||||
if no `--format` flag is provided.
|
||||
|
||||
| Property | Description |
|
||||
|:-----------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `configFormat` | Custom default format for `docker config ls` output. Refer to the [**format the output** section in the `docker config ls` documentation](config_ls.md#format-the-output) for a list of supported formatting directives. |
|
||||
| `imagesFormat` | Custom default format for `docker images` / `docker image ls` output. Refer to the [**format the output** section in the `docker images` documentation](images.md#format-the-output) for a list of supported formatting directives. |
|
||||
| `nodesFormat` | Custom default format for `docker node ls` output. Refer to the [**formatting** section in the `docker node ls` documentation](node_ls.md#formatting) for a list of supported formatting directives. |
|
||||
| `pluginsFormat` | Custom default format for `docker plugin ls` output. Refer to the [**formatting** section in the `docker plugin ls` documentation](plugin_ls.md#formatting) for a list of supported formatting directives. |
|
||||
| `psFormat` | Custom default format for `docker ps` / `docker container ps` output. Refer to the [**formatting** section in the `docker ps` documentation](ps.md#formatting) for a list of supported formatting directives. |
|
||||
| `secretFormat` | Custom default format for `docker secret ls` output. Refer to the [**format the output** section in the `docker secret ls` documentation](secret_ls.md#format-the-output) for a list of supported formatting directives. |
|
||||
| `serviceInspectFormat` | Custom default format for `docker service inspect` output. Refer to the [**formatting** section in the `docker service inspect` documentation](service_inspect.md#formatting) for a list of supported formatting directives. |
|
||||
| `servicesFormat` | Custom default format for `docker service ls` output. Refer to the [**formatting** section in the `docker service ls` documentation](service_ls.md#formatting) for a list of supported formatting directives. |
|
||||
| `statsFormat` | Custom default format for `docker stats` output. Refer to the [**formatting** section in the `docker stats` documentation](stats.md#formatting) for a list of supported formatting directives. |
|
||||
|
||||
|
||||
### Custom HTTP headers
|
||||
|
||||
The property `HttpHeaders` specifies a set of headers to include in all messages
|
||||
sent from the Docker client to the daemon. Docker does not try to interpret or
|
||||
understand these header; it simply puts them into the messages. Docker does
|
||||
understand these headers; it simply puts them into the messages. Docker does
|
||||
not allow these headers to change any headers it sets for itself.
|
||||
|
||||
The property `psFormat` specifies the default format for `docker ps` output.
|
||||
When the `--format` flag is not provided with the `docker ps` command,
|
||||
Docker's client uses this property. If this property is not set, the client
|
||||
falls back to the default table format. For a list of supported formatting
|
||||
directives, see the
|
||||
[**Formatting** section in the `docker ps` documentation](ps.md)
|
||||
|
||||
The property `imagesFormat` specifies the default format for `docker images` output.
|
||||
When the `--format` flag is not provided with the `docker images` command,
|
||||
Docker's client uses this property. If this property is not set, the client
|
||||
falls back to the default table format. For a list of supported formatting
|
||||
directives, see the [**Formatting** section in the `docker images` documentation](images.md)
|
||||
|
||||
The property `pluginsFormat` specifies the default format for `docker plugin ls` output.
|
||||
When the `--format` flag is not provided with the `docker plugin ls` command,
|
||||
Docker's client uses this property. If this property is not set, the client
|
||||
falls back to the default table format. For a list of supported formatting
|
||||
directives, see the [**Formatting** section in the `docker plugin ls` documentation](plugin_ls.md)
|
||||
|
||||
The property `servicesFormat` specifies the default format for `docker
|
||||
service ls` output. When the `--format` flag is not provided with the
|
||||
`docker service ls` command, Docker's client uses this property. If this
|
||||
property is not set, the client falls back to the default json format. For a
|
||||
list of supported formatting directives, see the
|
||||
[**Formatting** section in the `docker service ls` documentation](service_ls.md)
|
||||
|
||||
The property `serviceInspectFormat` specifies the default format for `docker
|
||||
service inspect` output. When the `--format` flag is not provided with the
|
||||
`docker service inspect` command, Docker's client uses this property. If this
|
||||
property is not set, the client falls back to the default json format. For a
|
||||
list of supported formatting directives, see the
|
||||
[**Formatting** section in the `docker service inspect` documentation](service_inspect.md)
|
||||
|
||||
The property `statsFormat` specifies the default format for `docker
|
||||
stats` output. When the `--format` flag is not provided with the
|
||||
`docker stats` command, Docker's client uses this property. If this
|
||||
property is not set, the client falls back to the default table
|
||||
format. For a list of supported formatting directives, see
|
||||
[**Formatting** section in the `docker stats` documentation](stats.md)
|
||||
|
||||
The property `secretFormat` specifies the default format for `docker
|
||||
secret ls` output. When the `--format` flag is not provided with the
|
||||
`docker secret ls` command, Docker's client uses this property. If this
|
||||
property is not set, the client falls back to the default table
|
||||
format. For a list of supported formatting directives, see
|
||||
[**Formatting** section in the `docker secret ls` documentation](secret_ls.md)
|
||||
|
||||
|
||||
The property `nodesFormat` specifies the default format for `docker node ls` output.
|
||||
When the `--format` flag is not provided with the `docker node ls` command,
|
||||
Docker's client uses the value of `nodesFormat`. If the value of `nodesFormat` is not set,
|
||||
the client uses the default table format. For a list of supported formatting
|
||||
directives, see the [**Formatting** section in the `docker node ls` documentation](node_ls.md)
|
||||
|
||||
The property `configFormat` specifies the default format for `docker
|
||||
config ls` output. When the `--format` flag is not provided with the
|
||||
`docker config ls` command, Docker's client uses this property. If this
|
||||
property is not set, the client falls back to the default table
|
||||
format. For a list of supported formatting directives, see
|
||||
[**Formatting** section in the `docker config ls` documentation](config_ls.md)
|
||||
### Credential store options
|
||||
|
||||
The property `credsStore` specifies an external binary to serve as the default
|
||||
credential store. When this property is set, `docker login` will attempt to
|
||||
@ -221,11 +191,17 @@ credentials for specific registries. If this property is set, the binary
|
||||
for a specific registry. For more information, see the
|
||||
[**Credential helpers** section in the `docker login` documentation](login.md#credential-helpers)
|
||||
|
||||
|
||||
### Orchestrator options for docker stacks
|
||||
|
||||
The property `stackOrchestrator` specifies the default orchestrator to use when
|
||||
running `docker stack` management commands. Valid values are `"swarm"`,
|
||||
`"kubernetes"`, and `"all"`. This property can be overridden with the
|
||||
`DOCKER_STACK_ORCHESTRATOR` environment variable, or the `--orchestrator` flag.
|
||||
|
||||
|
||||
### Automatic proxy configuration for containers
|
||||
|
||||
The property `proxies` specifies proxy environment variables to be automatically
|
||||
set on containers, and set as `--build-arg` on containers used during `docker build`.
|
||||
A `"default"` set of proxies can be configured, and will be used for any docker
|
||||
@ -233,15 +209,26 @@ daemon that the client connects to, or a configuration per host (docker daemon),
|
||||
for example, "https://docker-daemon1.example.com". The following properties can
|
||||
be set for each environment:
|
||||
|
||||
* `httpProxy` (sets the value of `HTTP_PROXY` and `http_proxy`)
|
||||
* `httpsProxy` (sets the value of `HTTPS_PROXY` and `https_proxy`)
|
||||
* `ftpProxy` (sets the value of `FTP_PROXY` and `ftp_proxy`)
|
||||
* `noProxy` (sets the value of `NO_PROXY` and `no_proxy`)
|
||||
| Property | Description |
|
||||
|:---------------|:--------------------------------------------------------------------------------------------------------|
|
||||
| `httpProxy` | Default value of `HTTP_PROXY` and `http_proxy` for containers, and as `--build-arg` on `docker build` |
|
||||
| `httpsProxy` | Default value of `HTTPS_PROXY` and `https_proxy` for containers, and as `--build-arg` on `docker build` |
|
||||
| `ftpProxy` | Default value of `FTP_PROXY` and `ftp_proxy` for containers, and as `--build-arg` on `docker build` |
|
||||
| `noProxy` | Default value of `NO_PROXY` and `no_proxy` for containers, and as `--build-arg` on `docker build` |
|
||||
|
||||
> **Warning**: Proxy settings may contain sensitive information (for example,
|
||||
> if the proxy requires authentication). Environment variables are stored as
|
||||
> plain text in the container's configuration, and as such can be inspected
|
||||
> through the remote API or committed to an image when using `docker commit`.
|
||||
These settings are used to configure proxy settings for containers only, and not
|
||||
used as proxy settings for the `docker` CLI or the `dockerd` daemon. Refer to the
|
||||
[environment variables](#environment-variables) and [HTTP/HTTPS proxy](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy)
|
||||
sections for configuring proxy settings for the cli and daemon.
|
||||
|
||||
> **Warning**
|
||||
>
|
||||
> Proxy settings may contain sensitive information (for example, if the proxy
|
||||
> requires authentication). Environment variables are stored as plain text in
|
||||
> the container's configuration, and as such can be inspected through the remote
|
||||
> API or committed to an image when using `docker commit`.
|
||||
|
||||
### Default key-sequence to detach from containers
|
||||
|
||||
Once attached to a container, users detach from it and leave it running using
|
||||
the using `CTRL-p CTRL-q` key sequence. This detach key sequence is customizable
|
||||
@ -261,11 +248,17 @@ Users can override your custom or the default key sequence on a per-container
|
||||
basis. To do this, the user specifies the `--detach-keys` flag with the `docker
|
||||
attach`, `docker exec`, `docker run` or `docker start` command.
|
||||
|
||||
### CLI Plugin options
|
||||
|
||||
The property `plugins` contains settings specific to CLI plugins. The
|
||||
key is the plugin name, while the value is a further map of options,
|
||||
which are specific to that plugin.
|
||||
|
||||
Following is a sample `config.json` file:
|
||||
|
||||
### Sample configuration file
|
||||
|
||||
Following is a sample `config.json` file to illustrate the format used for
|
||||
various fields:
|
||||
|
||||
```json
|
||||
{% raw %}
|
||||
@ -301,14 +294,14 @@ Following is a sample `config.json` file:
|
||||
"proxies": {
|
||||
"default": {
|
||||
"httpProxy": "http://user:pass@example.com:3128",
|
||||
"httpsProxy": "http://user:pass@example.com:3128",
|
||||
"noProxy": "http://user:pass@example.com:3128",
|
||||
"httpsProxy": "https://my-proxy.example.com:3129",
|
||||
"noProxy": "intra.mycorp.example.com",
|
||||
"ftpProxy": "http://user:pass@example.com:3128"
|
||||
},
|
||||
"https://manager1.mycorp.example.com:2377": {
|
||||
"httpProxy": "http://user:pass@example.com:3128",
|
||||
"httpsProxy": "http://user:pass@example.com:3128"
|
||||
},
|
||||
"httpsProxy": "https://my-proxy.example.com:3129"
|
||||
}
|
||||
}
|
||||
}
|
||||
{% endraw %}
|
||||
@ -320,6 +313,9 @@ Experimental features provide early access to future product functionality.
|
||||
These features are intended for testing and feedback, and they may change
|
||||
between releases without warning or can be removed from a future release.
|
||||
|
||||
Starting with Docker 20.10, experimental CLI features are enabled by default,
|
||||
and require no configuration to enable them.
|
||||
|
||||
### Notary
|
||||
|
||||
If using your own notary server and a self-signed certificate or an internal
|
||||
@ -336,16 +332,18 @@ list of root Certificate Authorities.
|
||||
To list the help on any command just execute the command, followed by the
|
||||
`--help` option.
|
||||
|
||||
$ docker run --help
|
||||
```console
|
||||
$ docker run --help
|
||||
|
||||
Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...]
|
||||
Usage: docker run [OPTIONS] IMAGE [COMMAND] [ARG...]
|
||||
|
||||
Run a command in a new container
|
||||
Run a command in a new container
|
||||
|
||||
Options:
|
||||
--add-host value Add a custom host-to-IP mapping (host:ip) (default [])
|
||||
-a, --attach value Attach to STDIN, STDOUT or STDERR (default [])
|
||||
...
|
||||
Options:
|
||||
--add-host value Add a custom host-to-IP mapping (host:ip) (default [])
|
||||
-a, --attach value Attach to STDIN, STDOUT or STDERR (default [])
|
||||
<...>
|
||||
```
|
||||
|
||||
### Option types
|
||||
|
||||
@ -366,7 +364,7 @@ container **will** run in "detached" mode, in the background.
|
||||
Options which default to `true` (e.g., `docker build --rm=true`) can only be
|
||||
set to the non-default value by explicitly setting them to `false`:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker build --rm=false .
|
||||
```
|
||||
|
||||
@ -375,7 +373,7 @@ $ docker build --rm=false .
|
||||
You can specify options like `-a=[]` multiple times in a single command line,
|
||||
for example in these commands:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker run -a stdin -a stdout -i -t ubuntu /bin/bash
|
||||
|
||||
$ docker run -a stdin -a stdout -a stderr ubuntu /bin/ls
|
||||
@ -384,7 +382,7 @@ $ docker run -a stdin -a stdout -a stderr ubuntu /bin/ls
|
||||
Sometimes, multiple options can call for a more complex value string as for
|
||||
`-v`:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker run -v /host:/container example/mysql
|
||||
```
|
||||
|
||||
|
||||
@ -43,7 +43,7 @@ created. Supported `Dockerfile` instructions:
|
||||
|
||||
### Commit a container
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker ps
|
||||
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
@ -62,7 +62,7 @@ svendowideit/testimage version3 f5283438590d 16 sec
|
||||
|
||||
### Commit a container with new configurations
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker ps
|
||||
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
@ -84,7 +84,7 @@ $ docker inspect -f "{{ .Config.Env }}" f5283438590d
|
||||
|
||||
### Commit a container with new `CMD` and `EXPOSE` instructions
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker ps
|
||||
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
|
||||
35
docs/reference/commandline/config.md
Normal file
35
docs/reference/commandline/config.md
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
title: "config"
|
||||
description: "The config command description and usage"
|
||||
keywords: "config"
|
||||
---
|
||||
|
||||
# config
|
||||
|
||||
```markdown
|
||||
Usage: docker config COMMAND
|
||||
|
||||
Manage Docker configs
|
||||
|
||||
Options:
|
||||
--help Print usage
|
||||
|
||||
Commands:
|
||||
create Create a config from a file or STDIN
|
||||
inspect Display detailed information on one or more configs
|
||||
ls List configs
|
||||
rm Remove one or more configs
|
||||
|
||||
Run 'docker config COMMAND --help' for more information on a command.
|
||||
```
|
||||
|
||||
## Description
|
||||
|
||||
Manage configs.
|
||||
|
||||
## Related commands
|
||||
|
||||
* [config create](config_create.md)
|
||||
* [config inspect](config_inspect.md)
|
||||
* [config list](config_ls.md)
|
||||
* [config rm](config_rm.md)
|
||||
99
docs/reference/commandline/config_create.md
Normal file
99
docs/reference/commandline/config_create.md
Normal file
@ -0,0 +1,99 @@
|
||||
---
|
||||
title: "config create"
|
||||
description: "The config create command description and usage"
|
||||
keywords: ["config, create"]
|
||||
---
|
||||
|
||||
# config create
|
||||
|
||||
```Markdown
|
||||
Usage: docker config create [OPTIONS] CONFIG [file|-]
|
||||
|
||||
Create a config from a file or STDIN as content
|
||||
|
||||
Options:
|
||||
-l, --label list Config labels
|
||||
--template-driver string Template driver
|
||||
```
|
||||
|
||||
## Description
|
||||
|
||||
Creates a config using standard input or from a file for the config content.
|
||||
|
||||
For detailed information about using configs, refer to [store configuration data using Docker Configs](https://docs.docker.com/engine/swarm/configs/).
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> This is a cluster management command, and must be executed on a swarm
|
||||
> manager node. To learn about managers and workers, refer to the
|
||||
> [Swarm mode section](https://docs.docker.com/engine/swarm/) in the
|
||||
> documentation.
|
||||
|
||||
## Examples
|
||||
|
||||
### Create a config
|
||||
|
||||
```console
|
||||
$ printf <config> | docker config create my_config -
|
||||
|
||||
onakdyv307se2tl7nl20anokv
|
||||
|
||||
$ docker config ls
|
||||
|
||||
ID NAME CREATED UPDATED
|
||||
onakdyv307se2tl7nl20anokv my_config 6 seconds ago 6 seconds ago
|
||||
```
|
||||
|
||||
### Create a config with a file
|
||||
|
||||
```console
|
||||
$ docker config create my_config ./config.json
|
||||
|
||||
dg426haahpi5ezmkkj5kyl3sn
|
||||
|
||||
$ docker config ls
|
||||
|
||||
ID NAME CREATED UPDATED
|
||||
dg426haahpi5ezmkkj5kyl3sn my_config 7 seconds ago 7 seconds ago
|
||||
```
|
||||
|
||||
### Create a config with labels
|
||||
|
||||
```console
|
||||
$ docker config create \
|
||||
--label env=dev \
|
||||
--label rev=20170324 \
|
||||
my_config ./config.json
|
||||
|
||||
eo7jnzguqgtpdah3cm5srfb97
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker config inspect my_config
|
||||
|
||||
[
|
||||
{
|
||||
"ID": "eo7jnzguqgtpdah3cm5srfb97",
|
||||
"Version": {
|
||||
"Index": 17
|
||||
},
|
||||
"CreatedAt": "2017-03-24T08:15:09.735271783Z",
|
||||
"UpdatedAt": "2017-03-24T08:15:09.735271783Z",
|
||||
"Spec": {
|
||||
"Name": "my_config",
|
||||
"Labels": {
|
||||
"env": "dev",
|
||||
"rev": "20170324"
|
||||
},
|
||||
"Data": "aGVsbG8K"
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
|
||||
## Related commands
|
||||
|
||||
* [config inspect](config_inspect.md)
|
||||
* [config ls](config_ls.md)
|
||||
* [config rm](config_rm.md)
|
||||
97
docs/reference/commandline/config_inspect.md
Normal file
97
docs/reference/commandline/config_inspect.md
Normal file
@ -0,0 +1,97 @@
|
||||
---
|
||||
title: "config inspect"
|
||||
description: "The config inspect command description and usage"
|
||||
keywords: ["config, inspect"]
|
||||
---
|
||||
|
||||
# config inspect
|
||||
|
||||
```Markdown
|
||||
Usage: docker config inspect [OPTIONS] CONFIG [CONFIG...]
|
||||
|
||||
Display detailed information on one or more configs
|
||||
|
||||
Options:
|
||||
-f, --format string Format the output using the given Go template
|
||||
--help Print usage
|
||||
```
|
||||
|
||||
## Description
|
||||
|
||||
Inspects the specified config.
|
||||
|
||||
By default, this renders all results in a JSON array. If a format is specified,
|
||||
the given template will be executed for each result.
|
||||
|
||||
Go's [text/template](https://golang.org/pkg/text/template/) package
|
||||
describes all the details of the format.
|
||||
|
||||
For detailed information about using configs, refer to [store configuration data using Docker Configs](https://docs.docker.com/engine/swarm/configs/).
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> This is a cluster management command, and must be executed on a swarm
|
||||
> manager node. To learn about managers and workers, refer to the
|
||||
> [Swarm mode section](https://docs.docker.com/engine/swarm/) in the
|
||||
> documentation.
|
||||
|
||||
## Examples
|
||||
|
||||
### Inspect a config by name or ID
|
||||
|
||||
You can inspect a config, either by its *name*, or *ID*
|
||||
|
||||
For example, given the following config:
|
||||
|
||||
```console
|
||||
$ docker config ls
|
||||
|
||||
ID NAME CREATED UPDATED
|
||||
eo7jnzguqgtpdah3cm5srfb97 my_config 3 minutes ago 3 minutes ago
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker config inspect config.json
|
||||
```
|
||||
|
||||
The output is in JSON format, for example:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"ID": "eo7jnzguqgtpdah3cm5srfb97",
|
||||
"Version": {
|
||||
"Index": 17
|
||||
},
|
||||
"CreatedAt": "2017-03-24T08:15:09.735271783Z",
|
||||
"UpdatedAt": "2017-03-24T08:15:09.735271783Z",
|
||||
"Spec": {
|
||||
"Name": "my_config",
|
||||
"Labels": {
|
||||
"env": "dev",
|
||||
"rev": "20170324"
|
||||
},
|
||||
"Data": "aGVsbG8K"
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### Formatting
|
||||
|
||||
You can use the --format option to obtain specific information about a
|
||||
config. The following example command outputs the creation time of the
|
||||
config.
|
||||
|
||||
```console
|
||||
$ docker config inspect --format='{{.CreatedAt}}' eo7jnzguqgtpdah3cm5srfb97
|
||||
|
||||
2017-03-24 08:15:09.735271783 +0000 UTC
|
||||
```
|
||||
|
||||
|
||||
## Related commands
|
||||
|
||||
* [config create](config_create.md)
|
||||
* [config ls](config_ls.md)
|
||||
* [config rm](config_rm.md)
|
||||
155
docs/reference/commandline/config_ls.md
Normal file
155
docs/reference/commandline/config_ls.md
Normal file
@ -0,0 +1,155 @@
|
||||
---
|
||||
title: "config ls"
|
||||
description: "The config ls command description and usage"
|
||||
keywords: ["config, ls"]
|
||||
---
|
||||
|
||||
# config ls
|
||||
|
||||
```Markdown
|
||||
Usage: docker config ls [OPTIONS]
|
||||
|
||||
List configs
|
||||
|
||||
Aliases:
|
||||
ls, list
|
||||
|
||||
Options:
|
||||
-f, --filter filter Filter output based on conditions provided
|
||||
--format string Pretty-print configs using a Go template
|
||||
--help Print usage
|
||||
-q, --quiet Only display IDs
|
||||
```
|
||||
|
||||
## Description
|
||||
|
||||
Run this command on a manager node to list the configs in the swarm.
|
||||
|
||||
For detailed information about using configs, refer to [store configuration data using Docker Configs](https://docs.docker.com/engine/swarm/configs/).
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> This is a cluster management command, and must be executed on a swarm
|
||||
> manager node. To learn about managers and workers, refer to the
|
||||
> [Swarm mode section](https://docs.docker.com/engine/swarm/) in the
|
||||
> documentation.
|
||||
|
||||
## Examples
|
||||
|
||||
```console
|
||||
$ docker config ls
|
||||
|
||||
ID NAME CREATED UPDATED
|
||||
6697bflskwj1998km1gnnjr38 q5s5570vtvnimefos1fyeo2u2 6 weeks ago 6 weeks ago
|
||||
9u9hk4br2ej0wgngkga6rp4hq my_config 5 weeks ago 5 weeks ago
|
||||
mem02h8n73mybpgqjf0kfi1n0 test_config 3 seconds ago 3 seconds ago
|
||||
```
|
||||
|
||||
### Filtering
|
||||
|
||||
The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there is more
|
||||
than one filter, then pass multiple flags (e.g., `--filter "foo=bar" --filter "bif=baz"`)
|
||||
|
||||
The currently supported filters are:
|
||||
|
||||
- [id](#id) (config's ID)
|
||||
- [label](#label) (`label=<key>` or `label=<key>=<value>`)
|
||||
- [name](#name) (config's name)
|
||||
|
||||
#### id
|
||||
|
||||
The `id` filter matches all or prefix of a config's id.
|
||||
|
||||
```console
|
||||
$ docker config ls -f "id=6697bflskwj1998km1gnnjr38"
|
||||
|
||||
ID NAME CREATED UPDATED
|
||||
6697bflskwj1998km1gnnjr38 q5s5570vtvnimefos1fyeo2u2 6 weeks ago 6 weeks ago
|
||||
```
|
||||
|
||||
#### label
|
||||
|
||||
The `label` filter matches configs based on the presence of a `label` alone or
|
||||
a `label` and a value.
|
||||
|
||||
The following filter matches all configs with a `project` label regardless of
|
||||
its value:
|
||||
|
||||
```console
|
||||
$ docker config ls --filter label=project
|
||||
|
||||
ID NAME CREATED UPDATED
|
||||
mem02h8n73mybpgqjf0kfi1n0 test_config About an hour ago About an hour ago
|
||||
```
|
||||
|
||||
The following filter matches only services with the `project` label with the
|
||||
`project-a` value.
|
||||
|
||||
```console
|
||||
$ docker service ls --filter label=project=test
|
||||
|
||||
ID NAME CREATED UPDATED
|
||||
mem02h8n73mybpgqjf0kfi1n0 test_config About an hour ago About an hour ago
|
||||
```
|
||||
|
||||
#### name
|
||||
|
||||
The `name` filter matches on all or prefix of a config's name.
|
||||
|
||||
The following filter matches config with a name containing a prefix of `test`.
|
||||
|
||||
```console
|
||||
$ docker config ls --filter name=test_config
|
||||
|
||||
ID NAME CREATED UPDATED
|
||||
mem02h8n73mybpgqjf0kfi1n0 test_config About an hour ago About an hour ago
|
||||
```
|
||||
|
||||
### Format the output
|
||||
|
||||
The formatting option (`--format`) pretty prints configs output
|
||||
using a Go template.
|
||||
|
||||
Valid placeholders for the Go template are listed below:
|
||||
|
||||
| Placeholder | Description |
|
||||
| ------------ | ------------------------------------------------------------------------------------ |
|
||||
| `.ID` | Config ID |
|
||||
| `.Name` | Config name |
|
||||
| `.CreatedAt` | Time when the config was created |
|
||||
| `.UpdatedAt` | Time when the config was updated |
|
||||
| `.Labels` | All labels assigned to the config |
|
||||
| `.Label` | Value of a specific label for this config. For example `{{.Label "my-label"}}` |
|
||||
|
||||
When using the `--format` option, the `config ls` command will either
|
||||
output the data exactly as the template declares or, when using the
|
||||
`table` directive, will include column headers as well.
|
||||
|
||||
The following example uses a template without headers and outputs the
|
||||
`ID` and `Name` entries separated by a colon (`:`) for all images:
|
||||
|
||||
```console
|
||||
$ docker config ls --format "{{.ID}}: {{.Name}}"
|
||||
|
||||
77af4d6b9913: config-1
|
||||
b6fa739cedf5: config-2
|
||||
78a85c484f71: config-3
|
||||
```
|
||||
|
||||
To list all configs with their name and created date in a table format you
|
||||
can use:
|
||||
|
||||
```console
|
||||
$ docker config ls --format "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}"
|
||||
|
||||
ID NAME CREATED
|
||||
77af4d6b9913 config-1 5 minutes ago
|
||||
b6fa739cedf5 config-2 3 hours ago
|
||||
78a85c484f71 config-3 10 days ago
|
||||
```
|
||||
|
||||
## Related commands
|
||||
|
||||
* [config create](config_create.md)
|
||||
* [config inspect](config_inspect.md)
|
||||
* [config rm](config_rm.md)
|
||||
53
docs/reference/commandline/config_rm.md
Normal file
53
docs/reference/commandline/config_rm.md
Normal file
@ -0,0 +1,53 @@
|
||||
---
|
||||
title: "config rm"
|
||||
description: "The config rm command description and usage"
|
||||
keywords: ["config, rm"]
|
||||
---
|
||||
|
||||
# config rm
|
||||
|
||||
```Markdown
|
||||
Usage: docker config rm CONFIG [CONFIG...]
|
||||
|
||||
Remove one or more configs
|
||||
|
||||
Aliases:
|
||||
rm, remove
|
||||
|
||||
Options:
|
||||
--help Print usage
|
||||
```
|
||||
|
||||
## Description
|
||||
|
||||
Removes the specified configs from the swarm.
|
||||
|
||||
For detailed information about using configs, refer to [store configuration data using Docker Configs](https://docs.docker.com/engine/swarm/configs/).
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> This is a cluster management command, and must be executed on a swarm
|
||||
> manager node. To learn about managers and workers, refer to the
|
||||
> [Swarm mode section](https://docs.docker.com/engine/swarm/) in the
|
||||
> documentation.
|
||||
|
||||
## Examples
|
||||
|
||||
This example removes a config:
|
||||
|
||||
```console
|
||||
$ docker config rm my_config
|
||||
sapth4csdo5b6wz2p5uimh5xg
|
||||
```
|
||||
|
||||
> **Warning**
|
||||
>
|
||||
> Unlike `docker rm`, this command does not ask for confirmation before removing
|
||||
> a config.
|
||||
|
||||
|
||||
## Related commands
|
||||
|
||||
* [config create](config_create.md)
|
||||
* [config inspect](config_inspect.md)
|
||||
* [config ls](config_ls.md)
|
||||
@ -26,7 +26,7 @@ Removes all stopped containers.
|
||||
|
||||
### Prune containers
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker container prune
|
||||
WARNING! This will remove all stopped containers.
|
||||
Are you sure you want to continue? [y/N] y
|
||||
@ -66,7 +66,7 @@ containers without the specified labels.
|
||||
|
||||
The following removes containers created more than 5 minutes ago:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}'
|
||||
|
||||
CONTAINER ID IMAGE COMMAND CREATED AT STATUS
|
||||
@ -88,7 +88,7 @@ CONTAINER ID IMAGE COMMAND CREATED AT
|
||||
|
||||
The following removes containers created before `2017-01-04T13:10:00`:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker ps -a --format 'table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.CreatedAt}}\t{{.Status}}'
|
||||
|
||||
CONTAINER ID IMAGE COMMAND CREATED AT STATUS
|
||||
|
||||
@ -62,7 +62,7 @@ kubernetes options. The example below creates the context `my-context`
|
||||
with a docker endpoint of `/var/run/docker.sock` and a kubernetes configuration
|
||||
sourced from the file `/home/me/my-kube-config`:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker context create \
|
||||
--docker host=unix:///var/run/docker.sock \
|
||||
--kubernetes config-file=/home/me/my-kube-config \
|
||||
@ -75,19 +75,19 @@ Use the `--from=<context-name>` option to create a new context from
|
||||
an existing context. The example below creates a new context named `my-context`
|
||||
from the existing context `existing-context`:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker context create --from existing-context my-context
|
||||
```
|
||||
|
||||
If the `--from` option is not set, the `context` is created from the current context:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker context create my-context
|
||||
```
|
||||
|
||||
This can be used to create a context out of an existing `DOCKER_HOST` based script:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ source my-setup-script.sh
|
||||
$ docker context create my-context
|
||||
```
|
||||
@ -98,7 +98,7 @@ new context named `my-context` using the docker endpoint configuration from
|
||||
the existing context `existing-context` and a kubernetes configuration sourced
|
||||
from the file `/home/me/my-kube-config`:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker context create \
|
||||
--docker from=existing-context \
|
||||
--kubernetes config-file=/home/me/my-kube-config \
|
||||
@ -110,7 +110,7 @@ To source only the `kubernetes` configuration from an existing context use the
|
||||
context named `my-context` using the kuberentes configuration from the existing
|
||||
context `existing-context` and a docker endpoint of `/var/run/docker.sock`:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker context create \
|
||||
--docker host=unix:///var/run/docker.sock \
|
||||
--kubernetes from=existing-context \
|
||||
|
||||
@ -23,7 +23,7 @@ Inspects one or more contexts.
|
||||
|
||||
### Inspect a context by name
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker context inspect "local+aks"
|
||||
|
||||
[
|
||||
|
||||
@ -25,7 +25,9 @@ Options:
|
||||
Use `docker context ls` to print all contexts. The currently active context is
|
||||
indicated with an `*`:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker context ls
|
||||
|
||||
NAME DESCRIPTION DOCKER ENDPOINT KUBERNETES ENDPOINT ORCHESTRATOR
|
||||
default * Current DOCKER_HOST based configuration unix:///var/run/docker.sock swarm
|
||||
production tcp:///prod.corp.example.com:2376
|
||||
|
||||
@ -54,7 +54,7 @@ See [context create](context_create.md).
|
||||
|
||||
### Update an existing context
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker context update \
|
||||
--description "some description" \
|
||||
--docker "host=tcp://myserver:2376,ca=~/ca-file,cert=~/cert-file,key=~/key-file" \
|
||||
|
||||
@ -95,11 +95,11 @@ the user in the container. However, you can still copy such files by manually
|
||||
running `tar` in `docker exec`. Both of the following examples do the same thing
|
||||
in different ways (consider `SRC_PATH` and `DEST_PATH` are directories):
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker exec CONTAINER tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH -
|
||||
```
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i CONTAINER tar Cxf DEST_PATH -
|
||||
```
|
||||
|
||||
|
||||
@ -109,7 +109,7 @@ Options:
|
||||
Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes),
|
||||
or `g` (gigabytes). If you omit the unit, the system uses bytes.
|
||||
--stop-signal string Signal to stop a container (default "SIGTERM")
|
||||
--stop-timeout=10 Timeout (in seconds) to stop a container
|
||||
--stop-timeout int Timeout (in seconds) to stop a container
|
||||
--storage-opt value Storage driver options for the container (default [])
|
||||
--sysctl value Sysctl options (default map[])
|
||||
--tmpfs value Mount a tmpfs directory (default [])
|
||||
@ -131,6 +131,7 @@ Options:
|
||||
--volumes-from value Mount volumes from the specified container(s) (default [])
|
||||
-w, --workdir string Working directory inside the container
|
||||
```
|
||||
|
||||
## Description
|
||||
|
||||
The `docker create` command creates a writeable container layer over the
|
||||
@ -149,7 +150,7 @@ Please see the [run command](run.md) section and the [Docker run reference](../r
|
||||
|
||||
### Create and start a container
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker create -t -i fedora bash
|
||||
|
||||
6d8af538ec541dd581ebc2a24153a28329acb5268abe5ef868c1f1a261221752
|
||||
@ -165,7 +166,7 @@ As of v1.4.0 container volumes are initialized during the `docker create` phase
|
||||
(i.e., `docker run` too). For example, this allows you to `create` the `data`
|
||||
volume container, and then use it from another container:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker create -v /data --name data ubuntu
|
||||
|
||||
240633dfbb98128fa77473d3d9018f6123b99c454b3251427ae190a7d951ad57
|
||||
@ -180,7 +181,7 @@ drwxr-xr-x 48 root root 4096 Dec 5 04:11 ..
|
||||
Similarly, `create` a host directory bind mounted volume container, which can
|
||||
then be used from the subsequent container:
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker create -v /home/docker:/docker --name docker ubuntu
|
||||
|
||||
9aa88c08f319cd1e4515c3c46b0de7cc9aa75e878357b1e96f91e2c773029f03
|
||||
@ -202,7 +203,7 @@ drwxr-xr-x 32 1000 staff 1140 Dec 5 04:01 docker
|
||||
|
||||
Set storage driver options per container.
|
||||
|
||||
```bash
|
||||
```console
|
||||
$ docker create -it --storage-opt size=120G fedora /bin/bash
|
||||
```
|
||||
|
||||
@ -223,12 +224,11 @@ technology. On Linux, the only supported is the `default` option which uses
|
||||
Linux namespaces. On Microsoft Windows, you can specify these values:
|
||||
|
||||
|
||||
| Value | Description |
|
||||
|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value if the
|
||||
daemon is running on Windows server, or `hyperv` if running on Windows client. |
|
||||
| `process` | Namespace isolation only. |
|
||||
| `hyperv` | Hyper-V hypervisor partition-based isolation. |
|
||||
| Value | Description |
|
||||
| --------- | ------------------------------------------------------------ |
|
||||
| `default` | Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value if the daemon is running on Windows server, or `hyperv` if running on Windows client. |
|
||||
| `process` | Namespace isolation only. |
|
||||
| `hyperv` | Hyper-V hypervisor partition-based isolation. |
|
||||
|
||||
Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`.
|
||||
|
||||
@ -239,14 +239,14 @@ assigned devices will both be added to the cgroup.allow file and
|
||||
created into the container once it is run. This poses a problem when
|
||||
a new device needs to be added to running container.
|
||||
|
||||
One of the solution is to add a more permissive rule to a container
|
||||
One of the solutions is to add a more permissive rule to a container
|
||||
allowing it access to a wider range of devices. For example, supposing
|
||||
our container needs access to a character device with major `42` and
|
||||
any number of minor number (added as new devices appear), the
|
||||
following rule would be added:
|
||||
|
||||
```
|
||||
docker create --device-cgroup-rule='c 42:* rmw' -name my-container my-image
|
||||
```console
|
||||
$ docker create --device-cgroup-rule='c 42:* rmw' -name my-container my-image
|
||||
```
|
||||
|
||||
Then, a user could ask `udev` to execute a script that would `docker exec my-container mknod newDevX c 42 <minor>`
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user