Merge component 'engine' from git@github.com:docker/engine 18.09
This commit is contained in:
@ -77,7 +77,7 @@ RUN set -x \
|
||||
|
||||
FROM base AS docker-py
|
||||
# Get the "docker-py" source so we can run their integration tests
|
||||
ENV DOCKER_PY_COMMIT 8b246db271a85d6541dc458838627e89c683e42f
|
||||
ENV DOCKER_PY_COMMIT ac922192959870774ad8428344d9faa0555f7ba6
|
||||
RUN git clone https://github.com/docker/docker-py.git /build \
|
||||
&& cd /build \
|
||||
&& git checkout -q $DOCKER_PY_COMMIT
|
||||
@ -187,6 +187,9 @@ RUN apt-get update && apt-get install -y \
|
||||
jq \
|
||||
libcap2-bin \
|
||||
libdevmapper-dev \
|
||||
# libffi-dev and libssl-dev appear to be required for compiling paramiko on s390x/ppc64le
|
||||
libffi-dev \
|
||||
libssl-dev \
|
||||
libudev-dev \
|
||||
libsystemd-dev \
|
||||
binutils-mingw-w64 \
|
||||
@ -195,6 +198,8 @@ RUN apt-get update && apt-get install -y \
|
||||
pigz \
|
||||
python-backports.ssl-match-hostname \
|
||||
python-dev \
|
||||
# python-cffi appears to be required for compiling paramiko on s390x/ppc64le
|
||||
python-cffi \
|
||||
python-mock \
|
||||
python-pip \
|
||||
python-requests \
|
||||
@ -227,7 +232,8 @@ COPY --from=docker-py /build/ /docker-py
|
||||
# split out into a separate image, including all the `python-*` deps installed
|
||||
# above.
|
||||
RUN cd /docker-py \
|
||||
&& pip install docker-pycreds==0.2.1 \
|
||||
&& pip install docker-pycreds==0.4.0 \
|
||||
&& pip install paramiko==2.4.2 \
|
||||
&& pip install yamllint==1.5.0 \
|
||||
&& pip install -r test-requirements.txt
|
||||
|
||||
@ -239,5 +245,7 @@ WORKDIR /go/src/github.com/docker/docker
|
||||
VOLUME /var/lib/docker
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
FROM dev AS final
|
||||
# Upload docker source
|
||||
COPY . /go/src/github.com/docker/docker
|
||||
|
||||
@ -1,10 +1,8 @@
|
||||
.PHONY: all binary dynbinary build cross help init-go-pkg-cache install manpages run shell test test-docker-py test-integration test-unit validate win
|
||||
.PHONY: all binary dynbinary build cross help install manpages run shell test test-docker-py test-integration test-unit validate win
|
||||
|
||||
# set the graph driver as the current graphdriver if not set
|
||||
DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //'))
|
||||
export DOCKER_GRAPHDRIVER
|
||||
DOCKER_INCREMENTAL_BINARY := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_INCREMENTAL_BINARY),1)
|
||||
export DOCKER_INCREMENTAL_BINARY
|
||||
|
||||
# get OS/Arch of docker engine
|
||||
DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH}')
|
||||
@ -36,6 +34,7 @@ DOCKER_ENVS := \
|
||||
-e KEEPBUNDLE \
|
||||
-e DOCKER_BUILD_ARGS \
|
||||
-e DOCKER_BUILD_GOGC \
|
||||
-e DOCKER_BUILD_OPTS \
|
||||
-e DOCKER_BUILD_PKGS \
|
||||
-e DOCKER_BUILDKIT \
|
||||
-e DOCKER_BASH_COMPLETION_PATH \
|
||||
@ -44,7 +43,6 @@ DOCKER_ENVS := \
|
||||
-e DOCKER_EXPERIMENTAL \
|
||||
-e DOCKER_GITCOMMIT \
|
||||
-e DOCKER_GRAPHDRIVER \
|
||||
-e DOCKER_INCREMENTAL_BINARY \
|
||||
-e DOCKER_LDFLAGS \
|
||||
-e DOCKER_PORT \
|
||||
-e DOCKER_REMAP_ROOT \
|
||||
@ -74,6 +72,9 @@ DOCKER_ENVS := \
|
||||
# (default to no bind mount if DOCKER_HOST is set)
|
||||
# note: BINDDIR is supported for backwards-compatibility here
|
||||
BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles))
|
||||
|
||||
# DOCKER_MOUNT can be overriden, but use at your own risk!
|
||||
ifndef DOCKER_MOUNT
|
||||
DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)")
|
||||
|
||||
# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs.
|
||||
@ -81,17 +82,14 @@ DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/do
|
||||
# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set.
|
||||
DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docker/docker/bundles) -v "$(CURDIR)/.git:/go/src/github.com/docker/docker/.git"
|
||||
|
||||
# This allows to set the docker-dev container name
|
||||
DOCKER_CONTAINER_NAME := $(if $(CONTAINER_NAME),--name $(CONTAINER_NAME),)
|
||||
|
||||
# enable package cache if DOCKER_INCREMENTAL_BINARY and DOCKER_MOUNT (i.e.DOCKER_HOST) are set
|
||||
PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64:/usr/local/go/pkg/linux_amd64 goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo
|
||||
PKGCACHE_VOLROOT := dockerdev-go-pkg-cache
|
||||
PKGCACHE_VOL := $(if $(PKGCACHE_DIR),$(CURDIR)/$(PKGCACHE_DIR)/,$(PKGCACHE_VOLROOT)-)
|
||||
DOCKER_MOUNT_PKGCACHE := $(if $(DOCKER_INCREMENTAL_BINARY),$(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(PKGCACHE_VOL)\1"@g'),)
|
||||
DOCKER_MOUNT_CACHE := -v docker-dev-cache:/root/.cache
|
||||
DOCKER_MOUNT_CLI := $(if $(DOCKER_CLI_PATH),-v $(shell dirname $(DOCKER_CLI_PATH)):/usr/local/cli,)
|
||||
DOCKER_MOUNT_BASH_COMPLETION := $(if $(DOCKER_BASH_COMPLETION_PATH),-v $(shell dirname $(DOCKER_BASH_COMPLETION_PATH)):/usr/local/completion/bash,)
|
||||
DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_MOUNT_CLI) $(DOCKER_MOUNT_BASH_COMPLETION)
|
||||
DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_CACHE) $(DOCKER_MOUNT_CLI) $(DOCKER_MOUNT_BASH_COMPLETION)
|
||||
endif # ifndef DOCKER_MOUNT
|
||||
|
||||
# This allows to set the docker-dev container name
|
||||
DOCKER_CONTAINER_NAME := $(if $(CONTAINER_NAME),--name $(CONTAINER_NAME),)
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
|
||||
@ -119,6 +117,9 @@ INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0)
|
||||
ifeq ($(INTERACTIVE), 1)
|
||||
DOCKER_FLAGS += -t
|
||||
endif
|
||||
ifeq ($(BIND_DIR), .)
|
||||
DOCKER_BUILD_OPTS += --target=dev
|
||||
endif
|
||||
|
||||
DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
|
||||
|
||||
@ -133,28 +134,26 @@ binary: build ## build the linux binaries
|
||||
dynbinary: build ## build the linux dynbinaries
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary
|
||||
|
||||
build: bundles init-go-pkg-cache
|
||||
build: DOCKER_BUILDKIT ?= 1
|
||||
build: bundles
|
||||
$(warning The docker client CLI has moved to github.com/docker/cli. For a dev-test cycle involving the CLI, run:${\n} DOCKER_CLI_PATH=/host/path/to/cli/binary make shell ${\n} then change the cli and compile into a binary at the same location.${\n})
|
||||
docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" .
|
||||
DOCKER_BUILDKIT="${DOCKER_BUILDKIT}" docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" .
|
||||
|
||||
bundles:
|
||||
mkdir bundles
|
||||
|
||||
clean: clean-pkg-cache-vol ## clean up cached resources
|
||||
.PHONY: clean
|
||||
clean: clean-cache
|
||||
|
||||
clean-pkg-cache-vol:
|
||||
@- $(foreach mapping,$(PKGCACHE_MAP), \
|
||||
$(shell docker volume rm $(PKGCACHE_VOLROOT)-$(shell echo $(mapping) | awk -F':/' '{ print $$1 }') > /dev/null 2>&1) \
|
||||
)
|
||||
.PHONY: clean-cache
|
||||
clean-cache:
|
||||
docker volume rm -f docker-dev-cache
|
||||
|
||||
cross: build ## cross build the binaries for darwin, freebsd and\nwindows
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross
|
||||
|
||||
help: ## this help
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
init-go-pkg-cache:
|
||||
$(if $(PKGCACHE_DIR), mkdir -p $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^: ]*):[^ ]*@$(PKGCACHE_DIR)/\1@g'))
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9_-]+:.*?## / {gsub("\\\\n",sprintf("\n%22c",""), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
install: ## install the linux binaries
|
||||
KEEPBUNDLE=1 hack/make.sh install-binary
|
||||
@ -176,6 +175,9 @@ test-integration-cli: test-integration ## (DEPRECATED) use test-integration
|
||||
test-integration: build ## run the integration tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration
|
||||
|
||||
test-integration-flaky: build ## run the stress test for all new integration tests
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration-flaky
|
||||
|
||||
test-unit: build ## run the unit tests
|
||||
$(DOCKER_RUN_DOCKER) hack/test/unit
|
||||
|
||||
@ -206,12 +208,11 @@ build-integration-cli-on-swarm: build ## build images and binary for running int
|
||||
go build -buildmode=pie -o ./hack/integration-cli-on-swarm/integration-cli-on-swarm ./hack/integration-cli-on-swarm/host
|
||||
@echo "Building $(INTEGRATION_CLI_MASTER_IMAGE)"
|
||||
docker build -t $(INTEGRATION_CLI_MASTER_IMAGE) hack/integration-cli-on-swarm/agent
|
||||
# For worker, we don't use `docker build` so as to enable DOCKER_INCREMENTAL_BINARY and so on
|
||||
@echo "Building $(INTEGRATION_CLI_WORKER_IMAGE) from $(DOCKER_IMAGE)"
|
||||
$(eval tmp := integration-cli-worker-tmp)
|
||||
# We mount pkgcache, but not bundle (bundle needs to be baked into the image)
|
||||
# For avoiding bakings DOCKER_GRAPHDRIVER and so on to image, we cannot use $(DOCKER_ENVS) here
|
||||
docker run -t -d --name $(tmp) -e DOCKER_GITCOMMIT -e BUILDFLAGS -e DOCKER_INCREMENTAL_BINARY --privileged $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_IMAGE) top
|
||||
docker run -t -d --name $(tmp) -e DOCKER_GITCOMMIT -e BUILDFLAGS --privileged $(DOCKER_IMAGE) top
|
||||
docker exec $(tmp) hack/make.sh build-integration-test-binary dynbinary
|
||||
docker exec $(tmp) go build -buildmode=pie -o /worker github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker
|
||||
docker commit -c 'ENTRYPOINT ["/worker"]' $(tmp) $(INTEGRATION_CLI_WORKER_IMAGE)
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"io/ioutil"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
@ -57,13 +58,15 @@ type SourceOpt struct {
|
||||
|
||||
type imageSource struct {
|
||||
SourceOpt
|
||||
g flightcontrol.Group
|
||||
g flightcontrol.Group
|
||||
resolverCache *resolverCache
|
||||
}
|
||||
|
||||
// NewSource creates a new image source
|
||||
func NewSource(opt SourceOpt) (source.Source, error) {
|
||||
is := &imageSource{
|
||||
SourceOpt: opt,
|
||||
SourceOpt: opt,
|
||||
resolverCache: newResolverCache(),
|
||||
}
|
||||
|
||||
return is, nil
|
||||
@ -74,6 +77,9 @@ func (is *imageSource) ID() string {
|
||||
}
|
||||
|
||||
func (is *imageSource) getResolver(ctx context.Context, rfn resolver.ResolveOptionsFunc, ref string) remotes.Resolver {
|
||||
if res := is.resolverCache.Get(ctx, ref); res != nil {
|
||||
return res
|
||||
}
|
||||
opt := docker.ResolverOptions{
|
||||
Client: tracing.DefaultClient,
|
||||
}
|
||||
@ -82,6 +88,7 @@ func (is *imageSource) getResolver(ctx context.Context, rfn resolver.ResolveOpti
|
||||
}
|
||||
opt.Credentials = is.getCredentialsFromSession(ctx)
|
||||
r := docker.NewResolver(opt)
|
||||
r = is.resolverCache.Add(ctx, ref, r)
|
||||
return r
|
||||
}
|
||||
|
||||
@ -380,6 +387,11 @@ func (p *puller) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// workaround for GCR bug that requires a request to manifest endpoint for authentication to work.
|
||||
// if current resolver has not used manifests do a dummy request.
|
||||
// in most cases resolver should be cached and extra request is not needed.
|
||||
ensureManifestRequested(ctx, p.resolver, p.ref)
|
||||
|
||||
var (
|
||||
schema1Converter *schema1.Converter
|
||||
handlers []images.Handler
|
||||
@ -791,3 +803,90 @@ func resolveModeToString(rm source.ResolveMode) string {
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type resolverCache struct {
|
||||
mu sync.Mutex
|
||||
m map[string]cachedResolver
|
||||
}
|
||||
|
||||
type cachedResolver struct {
|
||||
timeout time.Time
|
||||
remotes.Resolver
|
||||
counter int64
|
||||
}
|
||||
|
||||
func (cr *cachedResolver) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) {
|
||||
atomic.AddInt64(&cr.counter, 1)
|
||||
return cr.Resolver.Resolve(ctx, ref)
|
||||
}
|
||||
|
||||
func (r *resolverCache) Add(ctx context.Context, ref string, resolver remotes.Resolver) remotes.Resolver {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
ref = r.repo(ref) + "-" + session.FromContext(ctx)
|
||||
|
||||
cr, ok := r.m[ref]
|
||||
cr.timeout = time.Now().Add(time.Minute)
|
||||
if ok {
|
||||
return &cr
|
||||
}
|
||||
|
||||
cr.Resolver = resolver
|
||||
r.m[ref] = cr
|
||||
return &cr
|
||||
}
|
||||
|
||||
func (r *resolverCache) repo(refStr string) string {
|
||||
ref, err := distreference.ParseNormalizedNamed(refStr)
|
||||
if err != nil {
|
||||
return refStr
|
||||
}
|
||||
return ref.Name()
|
||||
}
|
||||
|
||||
func (r *resolverCache) Get(ctx context.Context, ref string) remotes.Resolver {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
ref = r.repo(ref) + "-" + session.FromContext(ctx)
|
||||
|
||||
cr, ok := r.m[ref]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return &cr
|
||||
}
|
||||
|
||||
func (r *resolverCache) clean(now time.Time) {
|
||||
r.mu.Lock()
|
||||
for k, cr := range r.m {
|
||||
if now.After(cr.timeout) {
|
||||
delete(r.m, k)
|
||||
}
|
||||
}
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
func newResolverCache() *resolverCache {
|
||||
rc := &resolverCache{
|
||||
m: map[string]cachedResolver{},
|
||||
}
|
||||
t := time.NewTicker(time.Minute)
|
||||
go func() {
|
||||
for {
|
||||
rc.clean(<-t.C)
|
||||
}
|
||||
}()
|
||||
return rc
|
||||
}
|
||||
|
||||
func ensureManifestRequested(ctx context.Context, res remotes.Resolver, ref string) {
|
||||
cr, ok := res.(*cachedResolver)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if atomic.LoadInt64(&cr.counter) == 0 {
|
||||
res.Resolve(ctx, ref)
|
||||
}
|
||||
}
|
||||
|
||||
@ -12,6 +12,7 @@ import (
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/dockerignore"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/parser"
|
||||
@ -34,8 +35,9 @@ func Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *pars
|
||||
case remoteURL == ClientSessionRemote:
|
||||
res, err := parser.Parse(config.Source)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, errdefs.InvalidParameter(err)
|
||||
}
|
||||
|
||||
return nil, res, nil
|
||||
case urlutil.IsGitURL(remoteURL):
|
||||
remote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath)
|
||||
@ -106,7 +108,7 @@ func newURLRemote(url string, dockerfilePath string, progressReader func(in io.R
|
||||
switch contentType {
|
||||
case mimeTypes.TextPlain:
|
||||
res, err := parser.Parse(progressReader(content))
|
||||
return nil, res, err
|
||||
return nil, res, errdefs.InvalidParameter(err)
|
||||
default:
|
||||
source, err := FromArchive(progressReader(content))
|
||||
if err != nil {
|
||||
@ -146,11 +148,17 @@ func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) {
|
||||
br := bufio.NewReader(rc)
|
||||
if _, err := br.Peek(1); err != nil {
|
||||
if err == io.EOF {
|
||||
return nil, errors.Errorf("the Dockerfile (%s) cannot be empty", name)
|
||||
return nil, errdefs.InvalidParameter(errors.Errorf("the Dockerfile (%s) cannot be empty", name))
|
||||
}
|
||||
return nil, errors.Wrap(err, "unexpected error reading Dockerfile")
|
||||
}
|
||||
return parser.Parse(br)
|
||||
|
||||
dockerfile, err := parser.Parse(br)
|
||||
if err != nil {
|
||||
return nil, errdefs.InvalidParameter(errors.Wrapf(err, "failed to parse %s", name))
|
||||
}
|
||||
|
||||
return dockerfile, nil
|
||||
}
|
||||
|
||||
func openAt(remote builder.Source, path string) (driver.File, error) {
|
||||
|
||||
@ -136,7 +136,7 @@ func (container *Container) CopyImagePathContent(v volume.Volume, destination st
|
||||
return err
|
||||
}
|
||||
|
||||
id := stringid.GenerateNonCryptoID()
|
||||
id := stringid.GenerateRandomID()
|
||||
path, err := v.Mount(id)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@ -6,7 +6,6 @@ import (
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
@ -31,10 +30,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// Explicitly use the kernel's default setting for CPU quota of 100ms.
|
||||
// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
|
||||
cpuQuotaPeriod = 100 * time.Millisecond
|
||||
|
||||
// systemLabelPrefix represents the reserved namespace for system labels.
|
||||
systemLabelPrefix = "com.docker.swarm"
|
||||
)
|
||||
@ -448,9 +443,7 @@ func (c *containerConfig) resources() enginecontainer.Resources {
|
||||
}
|
||||
|
||||
if r.Limits.NanoCPUs > 0 {
|
||||
// CPU Period must be set in microseconds.
|
||||
resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond)
|
||||
resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9
|
||||
resources.NanoCPUs = r.Limits.NanoCPUs
|
||||
}
|
||||
|
||||
return resources
|
||||
|
||||
@ -41,7 +41,7 @@ func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Con
|
||||
}
|
||||
|
||||
for spec := range config.Volumes {
|
||||
name := stringid.GenerateNonCryptoID()
|
||||
name := stringid.GenerateRandomID()
|
||||
destination := filepath.Clean(spec)
|
||||
|
||||
// Skip volumes for which we already have something mounted on that
|
||||
|
||||
@ -38,7 +38,7 @@ func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Con
|
||||
|
||||
// If the mountpoint doesn't have a name, generate one.
|
||||
if len(mp.Name) == 0 {
|
||||
mp.Name = stringid.GenerateNonCryptoID()
|
||||
mp.Name = stringid.GenerateRandomID()
|
||||
}
|
||||
|
||||
// Skip volumes for which we already have something mounted on that
|
||||
|
||||
@ -11,6 +11,7 @@ import (
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@ -157,15 +158,18 @@ func (daemon *Daemon) NewResolveOptionsFunc() resolver.ResolveOptionsFunc {
|
||||
)
|
||||
// must trim "https://" or "http://" prefix
|
||||
for i, v := range daemon.configStore.Mirrors {
|
||||
v = strings.TrimPrefix(v, "https://")
|
||||
v = strings.TrimPrefix(v, "http://")
|
||||
if uri, err := url.Parse(v); err == nil {
|
||||
v = uri.Host
|
||||
}
|
||||
mirrors[i] = v
|
||||
}
|
||||
// set "registry-mirrors"
|
||||
m[registryKey] = resolver.RegistryConf{Mirrors: mirrors}
|
||||
// set "insecure-registries"
|
||||
for _, v := range daemon.configStore.InsecureRegistries {
|
||||
v = strings.TrimPrefix(v, "http://")
|
||||
if uri, err := url.Parse(v); err == nil {
|
||||
v = uri.Host
|
||||
}
|
||||
m[v] = resolver.RegistryConf{
|
||||
PlainHTTP: true,
|
||||
}
|
||||
|
||||
@ -39,7 +39,7 @@ type Config struct {
|
||||
// NewConfig initializes the a new exec configuration
|
||||
func NewConfig() *Config {
|
||||
return &Config{
|
||||
ID: stringid.GenerateNonCryptoID(),
|
||||
ID: stringid.GenerateRandomID(),
|
||||
StreamConfig: stream.NewConfig(),
|
||||
Started: make(chan struct{}),
|
||||
}
|
||||
|
||||
@ -731,7 +731,7 @@ func BenchmarkConcurrentAccess(b *testing.B) {
|
||||
// create a bunch of ids
|
||||
var ids []string
|
||||
for i := 0; i < numConcurrent; i++ {
|
||||
ids = append(ids, stringid.GenerateNonCryptoID())
|
||||
ids = append(ids, stringid.GenerateRandomID())
|
||||
}
|
||||
|
||||
if err := d.Create(ids[0], "", nil); err != nil {
|
||||
|
||||
@ -39,6 +39,13 @@ func (a *pluginAdapter) Log(msg *Message) error {
|
||||
a.buf.TimeNano = msg.Timestamp.UnixNano()
|
||||
a.buf.Partial = msg.PLogMetaData != nil
|
||||
a.buf.Source = msg.Source
|
||||
if msg.PLogMetaData != nil {
|
||||
a.buf.PartialLogMetadata = &logdriver.PartialLogEntryMetadata{
|
||||
Id: msg.PLogMetaData.ID,
|
||||
Last: msg.PLogMetaData.Last,
|
||||
Ordinal: int32(msg.PLogMetaData.Ordinal),
|
||||
}
|
||||
}
|
||||
|
||||
err := a.enc.Encode(&a.buf)
|
||||
a.buf.Reset()
|
||||
|
||||
@ -81,7 +81,7 @@ func makePluginCreator(name string, l logPlugin, scopePath func(s string) string
|
||||
return nil, err
|
||||
}
|
||||
|
||||
id := stringid.GenerateNonCryptoID()
|
||||
id := stringid.GenerateRandomID()
|
||||
a := &pluginAdapter{
|
||||
driverName: name,
|
||||
id: id,
|
||||
|
||||
@ -38,7 +38,7 @@ func (daemon *Daemon) registerName(container *container.Container) error {
|
||||
func (daemon *Daemon) generateIDAndName(name string) (string, string, error) {
|
||||
var (
|
||||
err error
|
||||
id = stringid.GenerateNonCryptoID()
|
||||
id = stringid.GenerateRandomID()
|
||||
)
|
||||
|
||||
if name == "" {
|
||||
|
||||
@ -130,7 +130,7 @@ can take over 15 minutes to complete.
|
||||
```none
|
||||
Successfully built 3d872560918e
|
||||
Successfully tagged docker-dev:dry-run-test
|
||||
docker run --rm -i --privileged -e BUILDFLAGS -e KEEPBUNDLE -e DOCKER_BUILD_GOGC -e DOCKER_BUILD_PKGS -e DOCKER_CLIENTONLY -e DOCKER_DEBUG -e DOCKER_EXPERIMENTAL -e DOCKER_GITCOMMIT -e DOCKER_GRAPHDRIVER=devicemapper -e DOCKER_INCREMENTAL_BINARY -e DOCKER_REMAP_ROOT -e DOCKER_STORAGE_OPTS -e DOCKER_USERLANDPROXY -e TESTDIRS -e TESTFLAGS -e TIMEOUT -v "home/ubuntu/repos/docker/bundles:/go/src/github.com/docker/docker/bundles" -t "docker-dev:dry-run-test" bash
|
||||
docker run --rm -i --privileged -e BUILDFLAGS -e KEEPBUNDLE -e DOCKER_BUILD_GOGC -e DOCKER_BUILD_PKGS -e DOCKER_CLIENTONLY -e DOCKER_DEBUG -e DOCKER_EXPERIMENTAL -e DOCKER_GITCOMMIT -e DOCKER_GRAPHDRIVER=devicemapper -e DOCKER_REMAP_ROOT -e DOCKER_STORAGE_OPTS -e DOCKER_USERLANDPROXY -e TESTDIRS -e TESTFLAGS -e TIMEOUT -v "home/ubuntu/repos/docker/bundles:/go/src/github.com/docker/docker/bundles" -t "docker-dev:dry-run-test" bash
|
||||
#
|
||||
```
|
||||
|
||||
|
||||
@ -13,5 +13,6 @@ hack/make.sh \
|
||||
binary-daemon \
|
||||
dynbinary \
|
||||
test-docker-py \
|
||||
test-integration-flaky \
|
||||
test-integration \
|
||||
cross
|
||||
|
||||
@ -4,7 +4,7 @@
|
||||
# containerd is also pinned in vendor.conf. When updating the binary
|
||||
# version you may also need to update the vendor version to pick up bug
|
||||
# fixes or new APIs.
|
||||
CONTAINERD_COMMIT=bb71b10fd8f58240ca47fbb579b9d1028eea7c84 # v1.2.5
|
||||
CONTAINERD_COMMIT=894b81a4b802e4eb2a91d1ce216b8817763c29fb # v1.2.6
|
||||
|
||||
install_containerd() {
|
||||
echo "Install containerd version $CONTAINERD_COMMIT"
|
||||
|
||||
@ -4,7 +4,7 @@
|
||||
# The version of runc should match the version that is used by the containerd
|
||||
# version that is used. If you need to update runc, open a pull request in
|
||||
# the containerd project first, and update both after that is merged.
|
||||
RUNC_COMMIT=2b18fe1d885ee5083ef9f0838fee39b62d653e30
|
||||
RUNC_COMMIT=425e105d5a03fabd737a126ad93d62a9eeede87f # v1.0.0-rc8
|
||||
|
||||
install_runc() {
|
||||
# If using RHEL7 kernels (3.10.0 el7), disable kmem accounting/limiting
|
||||
|
||||
@ -36,7 +36,6 @@ while the client is supposed to be running on a laptop, e.g. Docker for Mac/Wind
|
||||
Following environment variables are known to work in this step:
|
||||
|
||||
- `BUILDFLAGS`
|
||||
- `DOCKER_INCREMENTAL_BINARY`
|
||||
|
||||
Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`.
|
||||
|
||||
|
||||
@ -148,14 +148,6 @@ EXTLDFLAGS_STATIC='-static'
|
||||
ORIG_BUILDFLAGS=( -tags "autogen netgo osusergo static_build $DOCKER_BUILDTAGS" -installsuffix netgo )
|
||||
# see https://github.com/golang/go/issues/9369#issuecomment-69864440 for why -installsuffix is necessary here
|
||||
|
||||
# When $DOCKER_INCREMENTAL_BINARY is set in the environment, enable incremental
|
||||
# builds by installing dependent packages to the GOPATH.
|
||||
REBUILD_FLAG="-a"
|
||||
if [ "$DOCKER_INCREMENTAL_BINARY" == "1" ] || [ "$DOCKER_INCREMENTAL_BINARY" == "true" ]; then
|
||||
REBUILD_FLAG="-i"
|
||||
fi
|
||||
ORIG_BUILDFLAGS+=( $REBUILD_FLAG )
|
||||
|
||||
BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" )
|
||||
|
||||
# Test timeout.
|
||||
|
||||
37
components/engine/hack/make/test-integration-flaky
Normal file
37
components/engine/hack/make/test-integration-flaky
Normal file
@ -0,0 +1,37 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e -o pipefail
|
||||
|
||||
source hack/validate/.validate
|
||||
new_tests=$(
|
||||
validate_diff --diff-filter=ACMR --unified=0 -- 'integration/*_test.go' |
|
||||
grep -E '^(\+func )(.*)(\*testing)' || true
|
||||
)
|
||||
|
||||
if [ -z "$new_tests" ]; then
|
||||
echo 'No new tests added to integration.'
|
||||
return
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "Found new integrations tests:"
|
||||
echo "$new_tests"
|
||||
echo "Running stress test for them."
|
||||
|
||||
(
|
||||
TESTARRAY=$(echo "$new_tests" | sed 's/+func //' | awk -F'\\(' '{print $1}' | tr '\n' '|')
|
||||
# Note: TEST_REPEAT will make the test suite run 5 times, restarting the daemon
|
||||
# whereas testcount will make each test run 5 times in a row under the same daemon.
|
||||
# This will make a total of 25 runs for each test in TESTARRAY.
|
||||
export TEST_REPEAT=5
|
||||
local testcount=5
|
||||
# However, TIMEOUT needs to take testcount into account, or a premature time out may happen.
|
||||
# The following ugliness will:
|
||||
# - remove last character (usually 'm' from '10m')
|
||||
# - multiply by testcount
|
||||
# - add last character back
|
||||
export TIMEOUT=$((${TIMEOUT::-1} * $testcount))${TIMEOUT:$((${#TIMEOUT}-1)):1}
|
||||
|
||||
export TESTFLAGS="-test.count $testcount -test.run ${TESTARRAY%?}"
|
||||
echo "Using test flags: $TESTFLAGS"
|
||||
source hack/make/test-integration
|
||||
)
|
||||
@ -19,10 +19,6 @@ TESTDIRS="${TESTDIRS:-"./..."}"
|
||||
exclude_paths="/vendor/|/integration"
|
||||
pkg_list=$(go list $TESTDIRS | grep -vE "($exclude_paths)")
|
||||
|
||||
# install test dependencies once before running tests for each package. This
|
||||
# significantly reduces the runtime.
|
||||
go test -i "${BUILDFLAGS[@]}" $pkg_list
|
||||
|
||||
for pkg in $pkg_list; do
|
||||
go test "${BUILDFLAGS[@]}" \
|
||||
-cover \
|
||||
|
||||
@ -558,7 +558,7 @@ func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverCapabilities(c *chec
|
||||
}
|
||||
|
||||
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverOutOfBandDelete(c *check.C) {
|
||||
driverName := stringid.GenerateNonCryptoID()
|
||||
driverName := stringid.GenerateRandomID()
|
||||
p := newVolumePlugin(c, driverName)
|
||||
defer p.Close()
|
||||
|
||||
|
||||
@ -474,6 +474,61 @@ RUN for g in $(seq 0 8); do dd if=/dev/urandom of=rnd bs=1K count=1 seek=$((1024
|
||||
assert.Check(t, is.Contains(out.String(), "Successfully built"))
|
||||
}
|
||||
|
||||
func TestBuildWithEmptyDockerfile(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
defer setupTest(t)()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
dockerfile string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
name: "empty-dockerfile",
|
||||
dockerfile: "",
|
||||
expectedErr: "cannot be empty",
|
||||
},
|
||||
{
|
||||
name: "empty-lines-dockerfile",
|
||||
dockerfile: `
|
||||
|
||||
|
||||
|
||||
`,
|
||||
expectedErr: "file with no instructions",
|
||||
},
|
||||
{
|
||||
name: "comment-only-dockerfile",
|
||||
dockerfile: `# this is a comment`,
|
||||
expectedErr: "file with no instructions",
|
||||
},
|
||||
}
|
||||
|
||||
apiclient := testEnv.APIClient()
|
||||
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
w := tar.NewWriter(buf)
|
||||
writeTarRecord(t, w, "Dockerfile", tc.dockerfile)
|
||||
err := w.Close()
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = apiclient.ImageBuild(ctx,
|
||||
buf,
|
||||
types.ImageBuildOptions{
|
||||
Remove: true,
|
||||
ForceRemove: true,
|
||||
})
|
||||
|
||||
assert.Check(t, is.Contains(err.Error(), tc.expectedErr))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func writeTarRecord(t *testing.T, w *tar.Writer, fn, contents string) {
|
||||
err := w.WriteHeader(&tar.Header{
|
||||
Name: fn,
|
||||
|
||||
@ -61,7 +61,7 @@ func TestRenameStoppedContainer(t *testing.T) {
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal("/"+oldName, inspect.Name))
|
||||
|
||||
newName := "new_name" + stringid.GenerateNonCryptoID()
|
||||
newName := "new_name" + stringid.GenerateRandomID()
|
||||
err = client.ContainerRename(ctx, oldName, newName)
|
||||
assert.NilError(t, err)
|
||||
|
||||
@ -79,7 +79,7 @@ func TestRenameRunningContainerAndReuse(t *testing.T) {
|
||||
cID := container.Run(t, ctx, client, container.WithName(oldName))
|
||||
poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
|
||||
|
||||
newName := "new_name" + stringid.GenerateNonCryptoID()
|
||||
newName := "new_name" + stringid.GenerateRandomID()
|
||||
err := client.ContainerRename(ctx, oldName, newName)
|
||||
assert.NilError(t, err)
|
||||
|
||||
|
||||
@ -75,10 +75,13 @@ func getPlatformDefaults(info types.Info, osType string) PlatformDefaults {
|
||||
}
|
||||
case "windows":
|
||||
baseImage := "microsoft/windowsservercore"
|
||||
if override := os.Getenv("WINDOWS_BASE_IMAGE"); override != "" {
|
||||
baseImage = override
|
||||
fmt.Println("INFO: Windows Base image is ", baseImage)
|
||||
if overrideBaseImage := os.Getenv("WINDOWS_BASE_IMAGE"); overrideBaseImage != "" {
|
||||
baseImage = overrideBaseImage
|
||||
if overrideBaseImageTag := os.Getenv("WINDOWS_BASE_IMAGE_TAG"); overrideBaseImageTag != "" {
|
||||
baseImage = baseImage + ":" + overrideBaseImageTag
|
||||
}
|
||||
}
|
||||
fmt.Println("INFO: Windows Base image is ", baseImage)
|
||||
return PlatformDefaults{
|
||||
BaseImage: baseImage,
|
||||
VolumesConfigPath: filepath.FromSlash(volumesPath),
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
@ -203,8 +202,6 @@ func (i *IdentityMapping) GIDs() []IDMap {
|
||||
func createIDMap(subidRanges ranges) []IDMap {
|
||||
idMap := []IDMap{}
|
||||
|
||||
// sort the ranges by lowest ID first
|
||||
sort.Sort(subidRanges)
|
||||
containerID := 0
|
||||
for _, idrange := range subidRanges {
|
||||
idMap = append(idMap, IDMap{
|
||||
|
||||
28
components/engine/pkg/idtools/idtools_test.go
Normal file
28
components/engine/pkg/idtools/idtools_test.go
Normal file
@ -0,0 +1,28 @@
|
||||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestCreateIDMapOrder(t *testing.T) {
|
||||
subidRanges := ranges{
|
||||
{100000, 1000},
|
||||
{1000, 1},
|
||||
}
|
||||
|
||||
idMap := createIDMap(subidRanges)
|
||||
assert.DeepEqual(t, idMap, []IDMap{
|
||||
{
|
||||
ContainerID: 0,
|
||||
HostID: 100000,
|
||||
Size: 1000,
|
||||
},
|
||||
{
|
||||
ContainerID: 1000,
|
||||
HostID: 1000,
|
||||
Size: 1,
|
||||
},
|
||||
})
|
||||
}
|
||||
@ -2,17 +2,12 @@
|
||||
package stringid // import "github.com/docker/docker/pkg/stringid"
|
||||
|
||||
import (
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const shortLen = 12
|
||||
@ -41,10 +36,11 @@ func TruncateID(id string) string {
|
||||
return id
|
||||
}
|
||||
|
||||
func generateID(r io.Reader) string {
|
||||
// GenerateRandomID returns a unique id.
|
||||
func GenerateRandomID() string {
|
||||
b := make([]byte, 32)
|
||||
for {
|
||||
if _, err := io.ReadFull(r, b); err != nil {
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
panic(err) // This shouldn't happen
|
||||
}
|
||||
id := hex.EncodeToString(b)
|
||||
@ -58,18 +54,6 @@ func generateID(r io.Reader) string {
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateRandomID returns a unique id.
|
||||
func GenerateRandomID() string {
|
||||
return generateID(cryptorand.Reader)
|
||||
}
|
||||
|
||||
// GenerateNonCryptoID generates unique id without using cryptographically
|
||||
// secure sources of random.
|
||||
// It helps you to save entropy.
|
||||
func GenerateNonCryptoID() string {
|
||||
return generateID(readerFunc(rand.Read))
|
||||
}
|
||||
|
||||
// ValidateID checks whether an ID string is a valid image ID.
|
||||
func ValidateID(id string) error {
|
||||
if ok := validHex.MatchString(id); !ok {
|
||||
@ -77,23 +61,3 @@ func ValidateID(id string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
// safely set the seed globally so we generate random ids. Tries to use a
|
||||
// crypto seed before falling back to time.
|
||||
var seed int64
|
||||
if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
|
||||
// This should not happen, but worst-case fallback to time-based seed.
|
||||
seed = time.Now().UnixNano()
|
||||
} else {
|
||||
seed = cryptoseed.Int64()
|
||||
}
|
||||
|
||||
rand.Seed(seed)
|
||||
}
|
||||
|
||||
type readerFunc func(p []byte) (int, error)
|
||||
|
||||
func (fn readerFunc) Read(p []byte) (int, error) {
|
||||
return fn(p)
|
||||
}
|
||||
|
||||
@ -13,14 +13,6 @@ func TestGenerateRandomID(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateNonCryptoID(t *testing.T) {
|
||||
id := GenerateNonCryptoID()
|
||||
|
||||
if len(id) != 64 {
|
||||
t.Fatalf("Id returned is incorrect: %s", id)
|
||||
}
|
||||
}
|
||||
|
||||
func TestShortenId(t *testing.T) {
|
||||
id := "90435eec5c4e124e741ef731e118be2fc799a68aba0466ec17717f24ce2ae6a2"
|
||||
truncID := TruncateID(id)
|
||||
|
||||
@ -158,7 +158,7 @@ func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult strin
|
||||
func BenchmarkTruncIndexAdd100(b *testing.B) {
|
||||
var testSet []string
|
||||
for i := 0; i < 100; i++ {
|
||||
testSet = append(testSet, stringid.GenerateNonCryptoID())
|
||||
testSet = append(testSet, stringid.GenerateRandomID())
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@ -174,7 +174,7 @@ func BenchmarkTruncIndexAdd100(b *testing.B) {
|
||||
func BenchmarkTruncIndexAdd250(b *testing.B) {
|
||||
var testSet []string
|
||||
for i := 0; i < 250; i++ {
|
||||
testSet = append(testSet, stringid.GenerateNonCryptoID())
|
||||
testSet = append(testSet, stringid.GenerateRandomID())
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@ -190,7 +190,7 @@ func BenchmarkTruncIndexAdd250(b *testing.B) {
|
||||
func BenchmarkTruncIndexAdd500(b *testing.B) {
|
||||
var testSet []string
|
||||
for i := 0; i < 500; i++ {
|
||||
testSet = append(testSet, stringid.GenerateNonCryptoID())
|
||||
testSet = append(testSet, stringid.GenerateRandomID())
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@ -207,7 +207,7 @@ func BenchmarkTruncIndexGet100(b *testing.B) {
|
||||
var testSet []string
|
||||
var testKeys []string
|
||||
for i := 0; i < 100; i++ {
|
||||
testSet = append(testSet, stringid.GenerateNonCryptoID())
|
||||
testSet = append(testSet, stringid.GenerateRandomID())
|
||||
}
|
||||
index := NewTruncIndex([]string{})
|
||||
for _, id := range testSet {
|
||||
@ -231,7 +231,7 @@ func BenchmarkTruncIndexGet250(b *testing.B) {
|
||||
var testSet []string
|
||||
var testKeys []string
|
||||
for i := 0; i < 250; i++ {
|
||||
testSet = append(testSet, stringid.GenerateNonCryptoID())
|
||||
testSet = append(testSet, stringid.GenerateRandomID())
|
||||
}
|
||||
index := NewTruncIndex([]string{})
|
||||
for _, id := range testSet {
|
||||
@ -255,7 +255,7 @@ func BenchmarkTruncIndexGet500(b *testing.B) {
|
||||
var testSet []string
|
||||
var testKeys []string
|
||||
for i := 0; i < 500; i++ {
|
||||
testSet = append(testSet, stringid.GenerateNonCryptoID())
|
||||
testSet = append(testSet, stringid.GenerateRandomID())
|
||||
}
|
||||
index := NewTruncIndex([]string{})
|
||||
for _, id := range testSet {
|
||||
@ -278,7 +278,7 @@ func BenchmarkTruncIndexGet500(b *testing.B) {
|
||||
func BenchmarkTruncIndexDelete100(b *testing.B) {
|
||||
var testSet []string
|
||||
for i := 0; i < 100; i++ {
|
||||
testSet = append(testSet, stringid.GenerateNonCryptoID())
|
||||
testSet = append(testSet, stringid.GenerateRandomID())
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@ -301,7 +301,7 @@ func BenchmarkTruncIndexDelete100(b *testing.B) {
|
||||
func BenchmarkTruncIndexDelete250(b *testing.B) {
|
||||
var testSet []string
|
||||
for i := 0; i < 250; i++ {
|
||||
testSet = append(testSet, stringid.GenerateNonCryptoID())
|
||||
testSet = append(testSet, stringid.GenerateRandomID())
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@ -324,7 +324,7 @@ func BenchmarkTruncIndexDelete250(b *testing.B) {
|
||||
func BenchmarkTruncIndexDelete500(b *testing.B) {
|
||||
var testSet []string
|
||||
for i := 0; i < 500; i++ {
|
||||
testSet = append(testSet, stringid.GenerateNonCryptoID())
|
||||
testSet = append(testSet, stringid.GenerateRandomID())
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@ -347,7 +347,7 @@ func BenchmarkTruncIndexDelete500(b *testing.B) {
|
||||
func BenchmarkTruncIndexNew100(b *testing.B) {
|
||||
var testSet []string
|
||||
for i := 0; i < 100; i++ {
|
||||
testSet = append(testSet, stringid.GenerateNonCryptoID())
|
||||
testSet = append(testSet, stringid.GenerateRandomID())
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@ -358,7 +358,7 @@ func BenchmarkTruncIndexNew100(b *testing.B) {
|
||||
func BenchmarkTruncIndexNew250(b *testing.B) {
|
||||
var testSet []string
|
||||
for i := 0; i < 250; i++ {
|
||||
testSet = append(testSet, stringid.GenerateNonCryptoID())
|
||||
testSet = append(testSet, stringid.GenerateRandomID())
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@ -369,7 +369,7 @@ func BenchmarkTruncIndexNew250(b *testing.B) {
|
||||
func BenchmarkTruncIndexNew500(b *testing.B) {
|
||||
var testSet []string
|
||||
for i := 0; i < 500; i++ {
|
||||
testSet = append(testSet, stringid.GenerateNonCryptoID())
|
||||
testSet = append(testSet, stringid.GenerateRandomID())
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@ -381,7 +381,7 @@ func BenchmarkTruncIndexAddGet100(b *testing.B) {
|
||||
var testSet []string
|
||||
var testKeys []string
|
||||
for i := 0; i < 500; i++ {
|
||||
id := stringid.GenerateNonCryptoID()
|
||||
id := stringid.GenerateRandomID()
|
||||
testSet = append(testSet, id)
|
||||
l := rand.Intn(12) + 12
|
||||
testKeys = append(testKeys, id[:l])
|
||||
@ -406,7 +406,7 @@ func BenchmarkTruncIndexAddGet250(b *testing.B) {
|
||||
var testSet []string
|
||||
var testKeys []string
|
||||
for i := 0; i < 500; i++ {
|
||||
id := stringid.GenerateNonCryptoID()
|
||||
id := stringid.GenerateRandomID()
|
||||
testSet = append(testSet, id)
|
||||
l := rand.Intn(12) + 12
|
||||
testKeys = append(testKeys, id[:l])
|
||||
@ -431,7 +431,7 @@ func BenchmarkTruncIndexAddGet500(b *testing.B) {
|
||||
var testSet []string
|
||||
var testKeys []string
|
||||
for i := 0; i < 500; i++ {
|
||||
id := stringid.GenerateNonCryptoID()
|
||||
id := stringid.GenerateRandomID()
|
||||
testSet = append(testSet, id)
|
||||
l := rand.Intn(12) + 12
|
||||
testKeys = append(testKeys, id[:l])
|
||||
|
||||
@ -70,7 +70,7 @@ func TestManagerWithPluginMounts(t *testing.T) {
|
||||
}
|
||||
|
||||
func newTestPlugin(t *testing.T, name, cap, root string) *v2.Plugin {
|
||||
id := stringid.GenerateNonCryptoID()
|
||||
id := stringid.GenerateRandomID()
|
||||
rootfs := filepath.Join(root, id)
|
||||
if err := os.MkdirAll(rootfs, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
@ -26,7 +26,7 @@ github.com/imdario/mergo v0.3.6
|
||||
golang.org/x/sync 1d60e4601c6fd243af51cc01ddf169918a5407ca
|
||||
|
||||
# buildkit
|
||||
github.com/moby/buildkit ed4da8b4a9661f278ae8433056ca37d0727c408b # docker-18.09 branch
|
||||
github.com/moby/buildkit 05766c5c21a1e528eeb1c3522b2f05493fe9ac47 # docker-18.09 branch
|
||||
github.com/tonistiigi/fsutil 2862f6bc5ac9b97124e552a5c108230b38a1b0ca
|
||||
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
|
||||
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
|
||||
@ -121,7 +121,7 @@ google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9
|
||||
github.com/containerd/containerd 9754871865f7fe2f4e74d43e2fc7ccd237edcbce # v1.2.2
|
||||
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
|
||||
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
|
||||
github.com/containerd/cgroups dbea6f2bd41658b84b00417ceefa416b979cbf10
|
||||
github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8bebdbb4bf66c1
|
||||
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
|
||||
github.com/containerd/cri 0d5cabd006cb5319dc965046067b8432d9fa5ef8 # release/1.2 branch
|
||||
github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
|
||||
|
||||
4
components/engine/vendor/github.com/containerd/cgroups/cgroup.go
generated
vendored
4
components/engine/vendor/github.com/containerd/cgroups/cgroup.go
generated
vendored
@ -105,6 +105,10 @@ func Load(hierarchy Hierarchy, path Path, opts ...InitOpts) (Cgroup, error) {
|
||||
}
|
||||
activeSubsystems = append(activeSubsystems, s)
|
||||
}
|
||||
// if we do not have any active systems then the cgroup is deleted
|
||||
if len(activeSubsystems) == 0 {
|
||||
return nil, ErrCgroupDeleted
|
||||
}
|
||||
return &cgroup{
|
||||
path: path,
|
||||
subsystems: activeSubsystems,
|
||||
|
||||
@ -275,6 +275,11 @@ func Parse(rwc io.Reader) (*Result, error) {
|
||||
if len(warnings) > 0 {
|
||||
warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.")
|
||||
}
|
||||
|
||||
if root.StartLine < 0 {
|
||||
return nil, errors.New("file with no instructions.")
|
||||
}
|
||||
|
||||
return &Result{
|
||||
AST: root,
|
||||
Warnings: warnings,
|
||||
|
||||
@ -82,7 +82,10 @@ func (p *linuxParser) validateMountConfigImpl(mnt *mount.Mount, validateBindSour
|
||||
}
|
||||
|
||||
if validateBindSourceExists {
|
||||
exists, _, _ := currentFileInfoProvider.fileInfo(mnt.Source)
|
||||
exists, _, err := currentFileInfoProvider.fileInfo(mnt.Source)
|
||||
if err != nil {
|
||||
return &errMountConfig{mnt, err}
|
||||
}
|
||||
if !exists {
|
||||
return &errMountConfig{mnt, errBindSourceDoesNotExist(mnt.Source)}
|
||||
}
|
||||
@ -292,7 +295,7 @@ func (p *linuxParser) parseMountSpec(cfg mount.Mount, validateBindSourceExists b
|
||||
switch cfg.Type {
|
||||
case mount.TypeVolume:
|
||||
if cfg.Source == "" {
|
||||
mp.Name = stringid.GenerateNonCryptoID()
|
||||
mp.Name = stringid.GenerateRandomID()
|
||||
} else {
|
||||
mp.Name = cfg.Source
|
||||
}
|
||||
|
||||
@ -125,7 +125,7 @@ func (m *MountPoint) Setup(mountLabel string, rootIDs idtools.Identity, checkFun
|
||||
if m.Volume != nil {
|
||||
id := m.ID
|
||||
if id == "" {
|
||||
id = stringid.GenerateNonCryptoID()
|
||||
id = stringid.GenerateRandomID()
|
||||
}
|
||||
path, err := m.Volume.Mount(id)
|
||||
if err != nil {
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
package mounts // import "github.com/docker/docker/volume/mounts"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
@ -8,6 +9,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"gotest.tools/assert"
|
||||
"gotest.tools/assert/cmp"
|
||||
)
|
||||
|
||||
type parseMountRawTestSet struct {
|
||||
@ -477,4 +480,51 @@ func TestParseMountSpec(t *testing.T) {
|
||||
t.Errorf("Expected mount copy data to match. Expected: '%v', Actual: '%v'", c.expected.CopyData, mp.CopyData)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// always returns the configured error
|
||||
// this is used to test error handling
|
||||
type mockFiProviderWithError struct{ err error }
|
||||
|
||||
func (m mockFiProviderWithError) fileInfo(path string) (bool, bool, error) {
|
||||
return false, false, m.err
|
||||
}
|
||||
|
||||
// TestParseMountSpecBindWithFileinfoError makes sure that the parser returns
|
||||
// the error produced by the fileinfo provider.
|
||||
//
|
||||
// Some extra context for the future in case of changes and possible wtf are we
|
||||
// testing this for:
|
||||
//
|
||||
// Currently this "fileInfoProvider" returns (bool, bool, error)
|
||||
// The 1st bool is "does this path exist"
|
||||
// The 2nd bool is "is this path a dir"
|
||||
// Then of course the error is an error.
|
||||
//
|
||||
// The issue is the parser was ignoring the error and only looking at the
|
||||
// "does this path exist" boolean, which is always false if there is an error.
|
||||
// Then the error returned to the caller was a (slightly, maybe) friendlier
|
||||
// error string than what comes from `os.Stat`
|
||||
// So ...the caller was always getting an error saying the path doesn't exist
|
||||
// even if it does exist but got some other error (like a permission error).
|
||||
// This is confusing to users.
|
||||
func TestParseMountSpecBindWithFileinfoError(t *testing.T) {
|
||||
previousProvider := currentFileInfoProvider
|
||||
defer func() { currentFileInfoProvider = previousProvider }()
|
||||
|
||||
testErr := errors.New("some crazy error")
|
||||
currentFileInfoProvider = &mockFiProviderWithError{err: testErr}
|
||||
|
||||
p := "/bananas"
|
||||
if runtime.GOOS == "windows" {
|
||||
p = `c:\bananas`
|
||||
}
|
||||
m := mount.Mount{Type: mount.TypeBind, Source: p, Target: p}
|
||||
|
||||
parser := NewParser(runtime.GOOS)
|
||||
|
||||
_, err := parser.ParseMountSpec(m)
|
||||
assert.Assert(t, err != nil)
|
||||
assert.Assert(t, cmp.Contains(err.Error(), "some crazy error"))
|
||||
}
|
||||
|
||||
@ -385,7 +385,7 @@ func (p *windowsParser) parseMountSpec(cfg mount.Mount, destRegex string, conver
|
||||
switch cfg.Type {
|
||||
case mount.TypeVolume:
|
||||
if cfg.Source == "" {
|
||||
mp.Name = stringid.GenerateNonCryptoID()
|
||||
mp.Name = stringid.GenerateRandomID()
|
||||
} else {
|
||||
mp.Name = cfg.Source
|
||||
}
|
||||
|
||||
@ -56,7 +56,7 @@ func (s *VolumesService) GetDriverList() []string {
|
||||
// Create creates a volume
|
||||
func (s *VolumesService) Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*types.Volume, error) {
|
||||
if name == "" {
|
||||
name = stringid.GenerateNonCryptoID()
|
||||
name = stringid.GenerateRandomID()
|
||||
}
|
||||
v, err := s.vs.Create(ctx, name, driverName, opts...)
|
||||
if err != nil {
|
||||
|
||||
Reference in New Issue
Block a user