Merge component 'engine' from git@github.com:moby/moby master
This commit is contained in:
@ -23,7 +23,7 @@
|
||||
# the case. Therefore, you don't have to disable it anymore.
|
||||
#
|
||||
|
||||
FROM debian:jessie
|
||||
FROM debian:stretch
|
||||
|
||||
# allow replacing httpredir or deb mirror
|
||||
ARG APT_MIRROR=deb.debian.org
|
||||
@ -51,21 +51,28 @@ RUN apt-get update && apt-get install -y \
|
||||
less \
|
||||
libapparmor-dev \
|
||||
libcap-dev \
|
||||
libdevmapper-dev \
|
||||
libnl-3-dev \
|
||||
libprotobuf-c0-dev \
|
||||
libprotobuf-dev \
|
||||
libsystemd-journal-dev \
|
||||
libsystemd-dev \
|
||||
libtool \
|
||||
libudev-dev \
|
||||
mercurial \
|
||||
net-tools \
|
||||
pkg-config \
|
||||
protobuf-compiler \
|
||||
protobuf-c-compiler \
|
||||
python-backports.ssl-match-hostname \
|
||||
python-dev \
|
||||
python-mock \
|
||||
python-pip \
|
||||
python-requests \
|
||||
python-setuptools \
|
||||
python-websocket \
|
||||
python-wheel \
|
||||
tar \
|
||||
thin-provisioning-tools \
|
||||
vim \
|
||||
vim-common \
|
||||
xfsprogs \
|
||||
@ -73,21 +80,6 @@ RUN apt-get update && apt-get install -y \
|
||||
--no-install-recommends \
|
||||
&& pip install awscli==1.10.15
|
||||
|
||||
# Get lvm2 sources to build statically linked devmapper library
|
||||
ENV LVM2_VERSION 2.02.173
|
||||
RUN mkdir -p /usr/local/lvm2 \
|
||||
&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
|
||||
| tar -xzC /usr/local/lvm2 --strip-components=1
|
||||
|
||||
# Compile and install (only the needed library)
|
||||
RUN cd /usr/local/lvm2 \
|
||||
&& ./configure \
|
||||
--build="$(gcc -print-multiarch)" \
|
||||
--enable-static_link \
|
||||
--enable-pkgconfig \
|
||||
&& make -C include \
|
||||
&& make -C libdm install_device-mapper
|
||||
|
||||
# Install seccomp: the version shipped upstream is too old
|
||||
ENV SECCOMP_VERSION 2.3.2
|
||||
RUN set -x \
|
||||
@ -157,9 +149,6 @@ RUN set -x \
|
||||
# Get the "docker-py" source so we can run their integration tests
|
||||
ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef
|
||||
# To run integration tests docker-pycreds is required.
|
||||
# Before running the integration tests conftest.py is
|
||||
# loaded which results in loads auth.py that
|
||||
# imports the docker-pycreds module.
|
||||
RUN git clone https://github.com/docker/docker-py.git /docker-py \
|
||||
&& cd /docker-py \
|
||||
&& git checkout -q $DOCKER_PY_COMMIT \
|
||||
@ -216,3 +205,6 @@ ENTRYPOINT ["hack/dind"]
|
||||
|
||||
# Upload docker source
|
||||
COPY . /go/src/github.com/docker/docker
|
||||
|
||||
# Options for hack/validate/gometalinter
|
||||
ENV GOMETALINTER_OPTS="--deadline 2m"
|
||||
|
||||
@ -15,14 +15,20 @@
|
||||
# the case. Therefore, you don't have to disable it anymore.
|
||||
#
|
||||
|
||||
FROM aarch64/ubuntu:xenial
|
||||
FROM arm64v8/debian:stretch
|
||||
|
||||
# allow replacing httpredir or deb mirror
|
||||
ARG APT_MIRROR=deb.debian.org
|
||||
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
|
||||
|
||||
# Packaged dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
apparmor \
|
||||
apt-utils \
|
||||
aufs-tools \
|
||||
automake \
|
||||
bash-completion \
|
||||
bsdmainutils \
|
||||
btrfs-tools \
|
||||
build-essential \
|
||||
cmake \
|
||||
@ -32,43 +38,40 @@ RUN apt-get update && apt-get install -y \
|
||||
g++ \
|
||||
gcc \
|
||||
git \
|
||||
golang \
|
||||
iptables \
|
||||
jq \
|
||||
less \
|
||||
libapparmor-dev \
|
||||
libc6-dev \
|
||||
libcap-dev \
|
||||
libdevmapper-dev \
|
||||
libnl-3-dev \
|
||||
libprotobuf-c0-dev \
|
||||
libprotobuf-dev \
|
||||
libsystemd-dev \
|
||||
libyaml-dev \
|
||||
libtool \
|
||||
libudev-dev \
|
||||
mercurial \
|
||||
net-tools \
|
||||
parallel \
|
||||
pkg-config \
|
||||
protobuf-compiler \
|
||||
protobuf-c-compiler \
|
||||
python-backports.ssl-match-hostname \
|
||||
python-dev \
|
||||
python-mock \
|
||||
python-pip \
|
||||
python-requests \
|
||||
python-setuptools \
|
||||
python-websocket \
|
||||
golang-go \
|
||||
iproute2 \
|
||||
iputils-ping \
|
||||
python-wheel \
|
||||
tar \
|
||||
thin-provisioning-tools \
|
||||
vim \
|
||||
vim-common \
|
||||
xfsprogs \
|
||||
zip \
|
||||
--no-install-recommends
|
||||
|
||||
# Get lvm2 sources to build statically linked devmapper library
|
||||
ENV LVM2_VERSION 2.02.173
|
||||
RUN mkdir -p /usr/local/lvm2 \
|
||||
&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
|
||||
| tar -xzC /usr/local/lvm2 --strip-components=1
|
||||
|
||||
# Compile and install (only the needed library)
|
||||
RUN cd /usr/local/lvm2 \
|
||||
&& ./configure \
|
||||
--build="$(gcc -print-multiarch)" \
|
||||
--enable-static_link \
|
||||
--enable-pkgconfig \
|
||||
&& make -C include \
|
||||
&& make -C libdm install_device-mapper
|
||||
|
||||
# Install seccomp: the version shipped upstream is too old
|
||||
ENV SECCOMP_VERSION 2.3.2
|
||||
RUN set -x \
|
||||
@ -86,9 +89,7 @@ RUN set -x \
|
||||
|
||||
# Install Go
|
||||
# We don't have official binary golang 1.7.5 tarballs for ARM64, either for Go or
|
||||
# bootstrap, so we use golang-go (1.6) as bootstrap to build Go from source code.
|
||||
# We don't use the official ARMv6 released binaries as a GOROOT_BOOTSTRAP, because
|
||||
# not all ARM64 platforms support 32-bit mode. 32-bit mode is optional for ARMv8.
|
||||
# bootstrap, so we use Debian golang (1.7) as bootstrap to build Go from source code.
|
||||
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
|
||||
ENV GO_VERSION 1.8.3
|
||||
RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \
|
||||
@ -124,13 +125,10 @@ RUN set -x \
|
||||
|
||||
# Get the "docker-py" source so we can run their integration tests
|
||||
ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef
|
||||
# Before running the integration tests conftest.py is
|
||||
# loaded which results in loads auth.py that
|
||||
# imports the docker-pycreds module.
|
||||
# To run integration tests docker-pycreds is required.
|
||||
RUN git clone https://github.com/docker/docker-py.git /docker-py \
|
||||
&& cd /docker-py \
|
||||
&& git checkout -q $DOCKER_PY_COMMIT \
|
||||
&& pip install wheel \
|
||||
&& pip install docker-pycreds==0.2.1 \
|
||||
&& pip install -r test-requirements.txt
|
||||
|
||||
@ -173,7 +171,7 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
|
||||
# Please edit hack/dockerfile/install-binaries.sh to update them.
|
||||
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
|
||||
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli gometalinter
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
@ -181,3 +179,6 @@ ENTRYPOINT ["hack/dind"]
|
||||
|
||||
# Upload docker source
|
||||
COPY . /go/src/github.com/docker/docker
|
||||
|
||||
# Options for hack/validate/gometalinter
|
||||
ENV GOMETALINTER_OPTS="--deadline 4m -j2"
|
||||
|
||||
@ -15,7 +15,7 @@
|
||||
# the case. Therefore, you don't have to disable it anymore.
|
||||
#
|
||||
|
||||
FROM armhf/debian:jessie
|
||||
FROM arm32v7/debian:stretch
|
||||
|
||||
# allow replacing httpredir or deb mirror
|
||||
ARG APT_MIRROR=deb.debian.org
|
||||
@ -39,36 +39,27 @@ RUN apt-get update && apt-get install -y \
|
||||
net-tools \
|
||||
libapparmor-dev \
|
||||
libcap-dev \
|
||||
libsystemd-journal-dev \
|
||||
libdevmapper-dev \
|
||||
libsystemd-dev \
|
||||
libtool \
|
||||
libudev-dev \
|
||||
mercurial \
|
||||
pkg-config \
|
||||
python-backports.ssl-match-hostname \
|
||||
python-dev \
|
||||
python-mock \
|
||||
python-pip \
|
||||
python-requests \
|
||||
python-setuptools \
|
||||
python-websocket \
|
||||
python-wheel \
|
||||
xfsprogs \
|
||||
tar \
|
||||
thin-provisioning-tools \
|
||||
vim-common \
|
||||
--no-install-recommends \
|
||||
&& pip install awscli==1.10.15
|
||||
|
||||
# Get lvm2 sources to build statically linked devmapper library
|
||||
ENV LVM2_VERSION 2.02.173
|
||||
RUN mkdir -p /usr/local/lvm2 \
|
||||
&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
|
||||
| tar -xzC /usr/local/lvm2 --strip-components=1
|
||||
|
||||
# Compile and install (only the needed library)
|
||||
RUN cd /usr/local/lvm2 \
|
||||
&& ./configure \
|
||||
--build="$(gcc -print-multiarch)" \
|
||||
--enable-static_link \
|
||||
--enable-pkgconfig \
|
||||
&& make -C include \
|
||||
&& make -C libdm install_device-mapper
|
||||
|
||||
|
||||
# Install Go
|
||||
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
|
||||
ENV GO_VERSION 1.8.3
|
||||
@ -127,9 +118,11 @@ RUN set -x \
|
||||
|
||||
# Get the "docker-py" source so we can run their integration tests
|
||||
ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef
|
||||
# To run integration tests docker-pycreds is required.
|
||||
RUN git clone https://github.com/docker/docker-py.git /docker-py \
|
||||
&& cd /docker-py \
|
||||
&& git checkout -q $DOCKER_PY_COMMIT \
|
||||
&& pip install docker-pycreds==0.2.1 \
|
||||
&& pip install -r test-requirements.txt
|
||||
|
||||
# Set user.email so crosbymichael's in-container merge commits go smoothly
|
||||
@ -162,10 +155,13 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
|
||||
# Please edit hack/dockerfile/install-binaries.sh to update them.
|
||||
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
|
||||
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli gometalinter
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
# Upload docker source
|
||||
COPY . /go/src/github.com/docker/docker
|
||||
|
||||
# Options for hack/validate/gometalinter
|
||||
ENV GOMETALINTER_OPTS="--deadline 10m -j2"
|
||||
|
||||
70
components/engine/Dockerfile.e2e
Normal file
70
components/engine/Dockerfile.e2e
Normal file
@ -0,0 +1,70 @@
|
||||
## Step 1: Build tests
|
||||
FROM golang:1.8.3-alpine3.6 as builder
|
||||
|
||||
RUN apk add --update \
|
||||
bash \
|
||||
build-base \
|
||||
curl \
|
||||
lvm2-dev \
|
||||
jq \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
RUN mkdir -p /go/src/github.com/docker/docker/
|
||||
WORKDIR /go/src/github.com/docker/docker/
|
||||
|
||||
# Generate frozen images
|
||||
COPY contrib/download-frozen-image-v2.sh contrib/download-frozen-image-v2.sh
|
||||
RUN contrib/download-frozen-image-v2.sh /output/docker-frozen-images \
|
||||
buildpack-deps:jessie@sha256:85b379ec16065e4fe4127eb1c5fb1bcc03c559bd36dbb2e22ff496de55925fa6 \
|
||||
busybox:latest@sha256:32f093055929dbc23dec4d03e09dfe971f5973a9ca5cf059cbfb644c206aa83f \
|
||||
debian:jessie@sha256:72f784399fd2719b4cb4e16ef8e369a39dc67f53d978cd3e2e7bf4e502c7b793 \
|
||||
hello-world:latest@sha256:c5515758d4c5e1e838e9cd307f6c6a0d620b5e07e6f927b07d05f6d12a1ac8d7
|
||||
|
||||
# Download Docker CLI binary
|
||||
COPY hack/dockerfile hack/dockerfile
|
||||
RUN hack/dockerfile/install-binaries.sh dockercli
|
||||
|
||||
# Set tag and add sources
|
||||
ARG DOCKER_GITCOMMIT
|
||||
ENV DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT
|
||||
ADD . .
|
||||
|
||||
# Build DockerSuite.TestBuild* dependency
|
||||
RUN CGO_ENABLED=0 go build -o /output/httpserver github.com/docker/docker/contrib/httpserver
|
||||
|
||||
# Build the integration tests and copy the resulting binaries to /output/tests
|
||||
RUN hack/make.sh build-integration-test-binary
|
||||
RUN mkdir -p /output/tests && find . -name test.main -exec cp --parents '{}' /output/tests \;
|
||||
|
||||
## Step 2: Generate testing image
|
||||
FROM alpine:3.6 as runner
|
||||
|
||||
# GNU tar is used for generating the emptyfs image
|
||||
RUN apk add --update \
|
||||
bash \
|
||||
ca-certificates \
|
||||
g++ \
|
||||
git \
|
||||
iptables \
|
||||
tar \
|
||||
xz \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
# Add an unprivileged user to be used for tests which need it
|
||||
RUN addgroup docker && adduser -D -G docker unprivilegeduser -s /bin/ash
|
||||
|
||||
COPY contrib/httpserver/Dockerfile /tests/contrib/httpserver/Dockerfile
|
||||
COPY contrib/syscall-test /tests/contrib/syscall-test
|
||||
COPY integration-cli/fixtures /tests/integration-cli/fixtures
|
||||
|
||||
COPY hack/test/e2e-run.sh /scripts/run.sh
|
||||
COPY hack/make/.ensure-emptyfs /scripts/ensure-emptyfs.sh
|
||||
|
||||
COPY --from=builder /output/docker-frozen-images /docker-frozen-images
|
||||
COPY --from=builder /output/httpserver /tests/contrib/httpserver/httpserver
|
||||
COPY --from=builder /output/tests /tests
|
||||
COPY --from=builder /usr/local/bin/docker /usr/bin/docker
|
||||
|
||||
ENV DOCKER_REMOTE_DAEMON=1 DOCKER_INTEGRATION_DAEMON_DEST=/
|
||||
|
||||
ENTRYPOINT ["/scripts/run.sh"]
|
||||
@ -15,7 +15,7 @@
|
||||
# the case. Therefore, you don't have to disable it anymore.
|
||||
#
|
||||
|
||||
FROM ppc64le/debian:jessie
|
||||
FROM ppc64le/debian:stretch
|
||||
|
||||
# allow replacing httpredir or deb mirror
|
||||
ARG APT_MIRROR=deb.debian.org
|
||||
@ -40,34 +40,26 @@ RUN apt-get update && apt-get install -y \
|
||||
net-tools \
|
||||
libapparmor-dev \
|
||||
libcap-dev \
|
||||
libsystemd-journal-dev \
|
||||
libdevmapper-dev \
|
||||
libsystemd-dev \
|
||||
libtool \
|
||||
libudev-dev \
|
||||
mercurial \
|
||||
pkg-config \
|
||||
python-backports.ssl-match-hostname \
|
||||
python-dev \
|
||||
python-mock \
|
||||
python-pip \
|
||||
python-requests \
|
||||
python-setuptools \
|
||||
python-websocket \
|
||||
python-wheel \
|
||||
xfsprogs \
|
||||
tar \
|
||||
thin-provisioning-tools \
|
||||
vim-common \
|
||||
--no-install-recommends
|
||||
|
||||
# Get lvm2 sources to build statically linked devmapper library
|
||||
ENV LVM2_VERSION 2.02.173
|
||||
RUN mkdir -p /usr/local/lvm2 \
|
||||
&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
|
||||
| tar -xzC /usr/local/lvm2 --strip-components=1
|
||||
|
||||
# Compile and install (only the needed library)
|
||||
RUN cd /usr/local/lvm2 \
|
||||
&& ./configure \
|
||||
--build="$(gcc -print-multiarch)" \
|
||||
--enable-static_link \
|
||||
--enable-pkgconfig \
|
||||
&& make -C include \
|
||||
&& make -C libdm install_device-mapper
|
||||
|
||||
# Install seccomp: the version shipped upstream is too old
|
||||
ENV SECCOMP_VERSION 2.3.2
|
||||
RUN set -x \
|
||||
@ -125,9 +117,11 @@ RUN set -x \
|
||||
|
||||
# Get the "docker-py" source so we can run their integration tests
|
||||
ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef
|
||||
# To run integration tests docker-pycreds is required.
|
||||
RUN git clone https://github.com/docker/docker-py.git /docker-py \
|
||||
&& cd /docker-py \
|
||||
&& git checkout -q $DOCKER_PY_COMMIT \
|
||||
&& pip install docker-pycreds==0.2.1 \
|
||||
&& pip install -r test-requirements.txt
|
||||
|
||||
# Set user.email so crosbymichael's in-container merge commits go smoothly
|
||||
@ -160,7 +154,7 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
|
||||
# Please edit hack/dockerfile/install-binaries.sh to update them.
|
||||
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
|
||||
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli gometalinter
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
|
||||
@ -15,7 +15,7 @@
|
||||
# the case. Therefore, you don't have to disable it anymore.
|
||||
#
|
||||
|
||||
FROM s390x/debian:jessie
|
||||
FROM s390x/debian:stretch
|
||||
|
||||
# Packaged dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
@ -36,16 +36,23 @@ RUN apt-get update && apt-get install -y \
|
||||
net-tools \
|
||||
libapparmor-dev \
|
||||
libcap-dev \
|
||||
libsystemd-journal-dev \
|
||||
libdevmapper-dev \
|
||||
libsystemd-dev \
|
||||
libtool \
|
||||
libudev-dev \
|
||||
mercurial \
|
||||
pkg-config \
|
||||
python-backports.ssl-match-hostname \
|
||||
python-dev \
|
||||
python-mock \
|
||||
python-pip \
|
||||
python-requests \
|
||||
python-setuptools \
|
||||
python-websocket \
|
||||
python-wheel \
|
||||
xfsprogs \
|
||||
tar \
|
||||
thin-provisioning-tools \
|
||||
vim-common \
|
||||
--no-install-recommends
|
||||
|
||||
@ -64,21 +71,6 @@ RUN set -x \
|
||||
) \
|
||||
&& rm -rf "$SECCOMP_PATH"
|
||||
|
||||
# Get lvm2 sources to build statically linked devmapper library
|
||||
ENV LVM2_VERSION 2.02.173
|
||||
RUN mkdir -p /usr/local/lvm2 \
|
||||
&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
|
||||
| tar -xzC /usr/local/lvm2 --strip-components=1
|
||||
|
||||
# Compile and install (only the needed library)
|
||||
RUN cd /usr/local/lvm2 \
|
||||
&& ./configure \
|
||||
--build="$(gcc -print-multiarch)" \
|
||||
--enable-static_link \
|
||||
--enable-pkgconfig \
|
||||
&& make -C include \
|
||||
&& make -C libdm install_device-mapper
|
||||
|
||||
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
|
||||
ENV GO_VERSION 1.8.3
|
||||
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \
|
||||
@ -118,9 +110,11 @@ RUN set -x \
|
||||
|
||||
# Get the "docker-py" source so we can run their integration tests
|
||||
ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef
|
||||
# To run integration tests docker-pycreds is required.
|
||||
RUN git clone https://github.com/docker/docker-py.git /docker-py \
|
||||
&& cd /docker-py \
|
||||
&& git checkout -q $DOCKER_PY_COMMIT \
|
||||
&& pip install docker-pycreds==0.2.1 \
|
||||
&& pip install -r test-requirements.txt
|
||||
|
||||
# Set user.email so crosbymichael's in-container merge commits go smoothly
|
||||
@ -153,7 +147,7 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
|
||||
# Please edit hack/dockerfile/install-binaries.sh to update them.
|
||||
COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
|
||||
COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli
|
||||
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli gometalinter
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
|
||||
@ -5,7 +5,7 @@
|
||||
|
||||
# This represents the bare minimum required to build and test Docker.
|
||||
|
||||
FROM debian:jessie
|
||||
FROM debian:stretch
|
||||
|
||||
# allow replacing httpredir or deb mirror
|
||||
ARG APT_MIRROR=deb.debian.org
|
||||
|
||||
@ -35,6 +35,7 @@ DOCKER_ENVS := \
|
||||
-e DOCKER_REMAP_ROOT \
|
||||
-e DOCKER_STORAGE_OPTS \
|
||||
-e DOCKER_USERLANDPROXY \
|
||||
-e TEST_INTEGRATION_DIR \
|
||||
-e TESTDIRS \
|
||||
-e TESTFLAGS \
|
||||
-e TIMEOUT \
|
||||
@ -43,7 +44,8 @@ DOCKER_ENVS := \
|
||||
-e NO_PROXY \
|
||||
-e http_proxy \
|
||||
-e https_proxy \
|
||||
-e no_proxy
|
||||
-e no_proxy \
|
||||
-e VERSION
|
||||
# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
|
||||
|
||||
# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test`
|
||||
|
||||
@ -1,17 +1,5 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
// Common constants for daemon and client.
|
||||
const (
|
||||
// DefaultVersion of Current REST API
|
||||
@ -21,45 +9,3 @@ const (
|
||||
// command to specify that no base image is to be used.
|
||||
NoBaseImageSpecifier string = "scratch"
|
||||
)
|
||||
|
||||
// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
|
||||
// otherwise generates a new one
|
||||
func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
|
||||
err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
|
||||
if err == libtrust.ErrKeyFileDoesNotExist {
|
||||
trustKey, err = libtrust.GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error generating key: %s", err)
|
||||
}
|
||||
encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error serializing key: %s", err)
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil {
|
||||
return nil, fmt.Errorf("Error saving key file: %s", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err)
|
||||
}
|
||||
return trustKey, nil
|
||||
}
|
||||
|
||||
func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) {
|
||||
if ext == ".json" || ext == ".jwk" {
|
||||
encoded, err = json.Marshal(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to encode private key JWK: %s", err)
|
||||
}
|
||||
} else {
|
||||
pemBlock, err := key.PEMBlock()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to encode private key PEM: %s", err)
|
||||
}
|
||||
encoded = pem.EncodeToMemory(pemBlock)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -1,77 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"os"
|
||||
)
|
||||
|
||||
// LoadOrCreateTrustKey
|
||||
func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) {
|
||||
tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpKeyFolderPath)
|
||||
|
||||
tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := LoadOrCreateTrustKey(tmpKeyFile.Name()); err == nil {
|
||||
t.Fatal("expected an error, got nothing.")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestLoadOrCreateTrustKeyCreateKey(t *testing.T) {
|
||||
tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpKeyFolderPath)
|
||||
|
||||
// Without the need to create the folder hierarchy
|
||||
tmpKeyFile := filepath.Join(tmpKeyFolderPath, "keyfile")
|
||||
|
||||
if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil {
|
||||
t.Fatalf("expected a new key file, got : %v and %v", err, key)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(tmpKeyFile); err != nil {
|
||||
t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err)
|
||||
}
|
||||
|
||||
// With the need to create the folder hierarchy as tmpKeyFie is in a path
|
||||
// where some folders do not exist.
|
||||
tmpKeyFile = filepath.Join(tmpKeyFolderPath, "folder/hierarchy/keyfile")
|
||||
|
||||
if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil {
|
||||
t.Fatalf("expected a new key file, got : %v and %v", err, key)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(tmpKeyFile); err != nil {
|
||||
t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err)
|
||||
}
|
||||
|
||||
// With no path at all
|
||||
defer os.Remove("keyfile")
|
||||
if key, err := LoadOrCreateTrustKey("keyfile"); err != nil || key == nil {
|
||||
t.Fatalf("expected a new key file, got : %v and %v", err, key)
|
||||
}
|
||||
|
||||
if _, err := os.Stat("keyfile"); err != nil {
|
||||
t.Fatalf("Expected to find a file keyfile, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) {
|
||||
tmpKeyFile := filepath.Join("fixtures", "keyfile")
|
||||
|
||||
if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil {
|
||||
t.Fatalf("expected a key file, got : %v and %v", err, key)
|
||||
}
|
||||
}
|
||||
@ -2,7 +2,6 @@ package httputils
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
@ -69,8 +68,7 @@ func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions,
|
||||
if name == "" {
|
||||
return ArchiveOptions{}, badParameterError{"name"}
|
||||
}
|
||||
|
||||
path := filepath.FromSlash(r.Form.Get("path"))
|
||||
path := r.Form.Get("path")
|
||||
if path == "" {
|
||||
return ArchiveOptions{}, badParameterError{"path"}
|
||||
}
|
||||
|
||||
@ -17,20 +17,13 @@ import (
|
||||
|
||||
// WriteLogStream writes an encoded byte stream of log messages from the
|
||||
// messages channel, multiplexing them with a stdcopy.Writer if mux is true
|
||||
func WriteLogStream(ctx context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *types.ContainerLogsOptions, mux bool) {
|
||||
func WriteLogStream(_ context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *types.ContainerLogsOptions, mux bool) {
|
||||
wf := ioutils.NewWriteFlusher(w)
|
||||
defer wf.Close()
|
||||
|
||||
wf.Flush()
|
||||
|
||||
// this might seem like doing below is clear:
|
||||
// var outStream io.Writer = wf
|
||||
// however, this GREATLY DISPLEASES golint, and if you do that, it will
|
||||
// fail CI. we need outstream to be type writer because if we mux streams,
|
||||
// we will need to reassign all of the streams to be stdwriters, which only
|
||||
// conforms to the io.Writer interface.
|
||||
var outStream io.Writer
|
||||
outStream = wf
|
||||
outStream := io.Writer(wf)
|
||||
errStream := outStream
|
||||
sysErrStream := errStream
|
||||
if mux {
|
||||
|
||||
@ -427,11 +427,7 @@ func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter,
|
||||
}
|
||||
|
||||
id := vars["id"]
|
||||
if err := sr.backend.UpdateSecret(id, version, secret); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return sr.backend.UpdateSecret(id, version, secret)
|
||||
}
|
||||
|
||||
func (sr *swarmRouter) getConfigs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
@ -498,9 +494,5 @@ func (sr *swarmRouter) updateConfig(ctx context.Context, w http.ResponseWriter,
|
||||
}
|
||||
|
||||
id := vars["id"]
|
||||
if err := sr.backend.UpdateConfig(id, version, config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return sr.backend.UpdateConfig(id, version, config)
|
||||
}
|
||||
|
||||
@ -23,7 +23,6 @@ const versionMatcher = "/v{version:[0-9.]+}"
|
||||
// Config provides the configuration for the API server
|
||||
type Config struct {
|
||||
Logging bool
|
||||
EnableCors bool
|
||||
CorsHeaders string
|
||||
Version string
|
||||
SocketGroup string
|
||||
|
||||
@ -144,6 +144,10 @@ tags:
|
||||
x-displayName: "Secrets"
|
||||
description: |
|
||||
Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work.
|
||||
- name: "Config"
|
||||
x-displayName: "Configs"
|
||||
description: |
|
||||
Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work.
|
||||
# System things
|
||||
- name: "Plugin"
|
||||
x-displayName: "Plugins"
|
||||
|
||||
@ -181,7 +181,7 @@ type ImageBuildOptions struct {
|
||||
SessionID string
|
||||
|
||||
// TODO @jhowardmsft LCOW Support: This will require extending to include
|
||||
// `Platform string`, but is ommited for now as it's hard-coded temporarily
|
||||
// `Platform string`, but is omitted for now as it's hard-coded temporarily
|
||||
// to avoid API changes.
|
||||
}
|
||||
|
||||
|
||||
24
components/engine/api/types/filters/example_test.go
Normal file
24
components/engine/api/types/filters/example_test.go
Normal file
@ -0,0 +1,24 @@
|
||||
package filters
|
||||
|
||||
func ExampleArgs_MatchKVList() {
|
||||
args := NewArgs(
|
||||
Arg("label", "image=foo"),
|
||||
Arg("label", "state=running"))
|
||||
|
||||
// returns true because there are no values for bogus
|
||||
args.MatchKVList("bogus", nil)
|
||||
|
||||
// returns false because there are no sources
|
||||
args.MatchKVList("label", nil)
|
||||
|
||||
// returns true because all sources are matched
|
||||
args.MatchKVList("label", map[string]string{
|
||||
"image": "foo",
|
||||
"state": "running",
|
||||
})
|
||||
|
||||
// returns false because the values do not match
|
||||
args.MatchKVList("label", map[string]string{
|
||||
"image": "other",
|
||||
})
|
||||
}
|
||||
@ -1,5 +1,6 @@
|
||||
// Package filters provides helper function to parse and handle command line
|
||||
// filter, used for example in docker ps or docker images commands.
|
||||
/*Package filters provides tools for encoding a mapping of keys to a set of
|
||||
multiple values.
|
||||
*/
|
||||
package filters
|
||||
|
||||
import (
|
||||
@ -11,27 +12,34 @@ import (
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
)
|
||||
|
||||
// Args stores filter arguments as map key:{map key: bool}.
|
||||
// It contains an aggregation of the map of arguments (which are in the form
|
||||
// of -f 'key=value') based on the key, and stores values for the same key
|
||||
// in a map with string keys and boolean values.
|
||||
// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
|
||||
// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
|
||||
// Args stores a mapping of keys to a set of multiple values.
|
||||
type Args struct {
|
||||
fields map[string]map[string]bool
|
||||
}
|
||||
|
||||
// NewArgs initializes a new Args struct.
|
||||
func NewArgs() Args {
|
||||
return Args{fields: map[string]map[string]bool{}}
|
||||
// KeyValuePair are used to initialize a new Args
|
||||
type KeyValuePair struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
// ParseFlag parses the argument to the filter flag. Like
|
||||
// Arg creates a new KeyValuePair for initializing Args
|
||||
func Arg(key, value string) KeyValuePair {
|
||||
return KeyValuePair{Key: key, Value: value}
|
||||
}
|
||||
|
||||
// NewArgs returns a new Args populated with the initial args
|
||||
func NewArgs(initialArgs ...KeyValuePair) Args {
|
||||
args := Args{fields: map[string]map[string]bool{}}
|
||||
for _, arg := range initialArgs {
|
||||
args.Add(arg.Key, arg.Value)
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
// ParseFlag parses a key=value string and adds it to an Args.
|
||||
//
|
||||
// `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
|
||||
//
|
||||
// If prev map is provided, then it is appended to, and returned. By default a new
|
||||
// map is created.
|
||||
// Deprecated: Use Args.Add()
|
||||
func ParseFlag(arg string, prev Args) (Args, error) {
|
||||
filters := prev
|
||||
if len(arg) == 0 {
|
||||
@ -52,74 +60,95 @@ func ParseFlag(arg string, prev Args) (Args, error) {
|
||||
return filters, nil
|
||||
}
|
||||
|
||||
// ErrBadFormat is an error returned in case of bad format for a filter.
|
||||
// ErrBadFormat is an error returned when a filter is not in the form key=value
|
||||
//
|
||||
// Deprecated: this error will be removed in a future version
|
||||
var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
|
||||
|
||||
// ToParam packs the Args into a string for easy transport from client to server.
|
||||
// ToParam encodes the Args as args JSON encoded string
|
||||
//
|
||||
// Deprecated: use ToJSON
|
||||
func ToParam(a Args) (string, error) {
|
||||
// this way we don't URL encode {}, just empty space
|
||||
return ToJSON(a)
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte representation of the Args
|
||||
func (args Args) MarshalJSON() ([]byte, error) {
|
||||
if len(args.fields) == 0 {
|
||||
return []byte{}, nil
|
||||
}
|
||||
return json.Marshal(args.fields)
|
||||
}
|
||||
|
||||
// ToJSON returns the Args as a JSON encoded string
|
||||
func ToJSON(a Args) (string, error) {
|
||||
if a.Len() == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
buf, err := json.Marshal(a.fields)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(buf), nil
|
||||
buf, err := json.Marshal(a)
|
||||
return string(buf), err
|
||||
}
|
||||
|
||||
// ToParamWithVersion packs the Args into a string for easy transport from client to server.
|
||||
// The generated string will depend on the specified version (corresponding to the API version).
|
||||
// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22
|
||||
// then the encoded format will use an older legacy format where the values are a
|
||||
// list of strings, instead of a set.
|
||||
//
|
||||
// Deprecated: Use ToJSON
|
||||
func ToParamWithVersion(version string, a Args) (string, error) {
|
||||
// this way we don't URL encode {}, just empty space
|
||||
if a.Len() == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// for daemons older than v1.10, filter must be of the form map[string][]string
|
||||
var buf []byte
|
||||
var err error
|
||||
if version != "" && versions.LessThan(version, "1.22") {
|
||||
buf, err = json.Marshal(convertArgsToSlice(a.fields))
|
||||
} else {
|
||||
buf, err = json.Marshal(a.fields)
|
||||
buf, err := json.Marshal(convertArgsToSlice(a.fields))
|
||||
return string(buf), err
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(buf), nil
|
||||
|
||||
return ToJSON(a)
|
||||
}
|
||||
|
||||
// FromParam unpacks the filter Args.
|
||||
// FromParam decodes a JSON encoded string into Args
|
||||
//
|
||||
// Deprecated: use FromJSON
|
||||
func FromParam(p string) (Args, error) {
|
||||
if len(p) == 0 {
|
||||
return NewArgs(), nil
|
||||
}
|
||||
|
||||
r := strings.NewReader(p)
|
||||
d := json.NewDecoder(r)
|
||||
|
||||
m := map[string]map[string]bool{}
|
||||
if err := d.Decode(&m); err != nil {
|
||||
r.Seek(0, 0)
|
||||
|
||||
// Allow parsing old arguments in slice format.
|
||||
// Because other libraries might be sending them in this format.
|
||||
deprecated := map[string][]string{}
|
||||
if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil {
|
||||
m = deprecatedArgs(deprecated)
|
||||
} else {
|
||||
return NewArgs(), err
|
||||
}
|
||||
}
|
||||
return Args{m}, nil
|
||||
return FromJSON(p)
|
||||
}
|
||||
|
||||
// Get returns the list of values associates with a field.
|
||||
// It returns a slice of strings to keep backwards compatibility with old code.
|
||||
func (filters Args) Get(field string) []string {
|
||||
values := filters.fields[field]
|
||||
// FromJSON decodes a JSON encoded string into Args
|
||||
func FromJSON(p string) (Args, error) {
|
||||
args := NewArgs()
|
||||
|
||||
if p == "" {
|
||||
return args, nil
|
||||
}
|
||||
|
||||
raw := []byte(p)
|
||||
err := json.Unmarshal(raw, &args)
|
||||
if err == nil {
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// Fallback to parsing arguments in the legacy slice format
|
||||
deprecated := map[string][]string{}
|
||||
if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil {
|
||||
return args, err
|
||||
}
|
||||
|
||||
args.fields = deprecatedArgs(deprecated)
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON populates the Args from JSON encode bytes
|
||||
func (args Args) UnmarshalJSON(raw []byte) error {
|
||||
if len(raw) == 0 {
|
||||
return nil
|
||||
}
|
||||
return json.Unmarshal(raw, &args.fields)
|
||||
}
|
||||
|
||||
// Get returns the list of values associated with the key
|
||||
func (args Args) Get(key string) []string {
|
||||
values := args.fields[key]
|
||||
if values == nil {
|
||||
return make([]string, 0)
|
||||
}
|
||||
@ -130,37 +159,34 @@ func (filters Args) Get(field string) []string {
|
||||
return slice
|
||||
}
|
||||
|
||||
// Add adds a new value to a filter field.
|
||||
func (filters Args) Add(name, value string) {
|
||||
if _, ok := filters.fields[name]; ok {
|
||||
filters.fields[name][value] = true
|
||||
// Add a new value to the set of values
|
||||
func (args Args) Add(key, value string) {
|
||||
if _, ok := args.fields[key]; ok {
|
||||
args.fields[key][value] = true
|
||||
} else {
|
||||
filters.fields[name] = map[string]bool{value: true}
|
||||
args.fields[key] = map[string]bool{value: true}
|
||||
}
|
||||
}
|
||||
|
||||
// Del removes a value from a filter field.
|
||||
func (filters Args) Del(name, value string) {
|
||||
if _, ok := filters.fields[name]; ok {
|
||||
delete(filters.fields[name], value)
|
||||
if len(filters.fields[name]) == 0 {
|
||||
delete(filters.fields, name)
|
||||
// Del removes a value from the set
|
||||
func (args Args) Del(key, value string) {
|
||||
if _, ok := args.fields[key]; ok {
|
||||
delete(args.fields[key], value)
|
||||
if len(args.fields[key]) == 0 {
|
||||
delete(args.fields, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of fields in the arguments.
|
||||
func (filters Args) Len() int {
|
||||
return len(filters.fields)
|
||||
// Len returns the number of keys in the mapping
|
||||
func (args Args) Len() int {
|
||||
return len(args.fields)
|
||||
}
|
||||
|
||||
// MatchKVList returns true if the values for the specified field matches the ones
|
||||
// from the sources.
|
||||
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
|
||||
// field is 'label' and sources are {'label1': '1', 'label2': '2'}
|
||||
// it returns true.
|
||||
func (filters Args) MatchKVList(field string, sources map[string]string) bool {
|
||||
fieldValues := filters.fields[field]
|
||||
// MatchKVList returns true if all the pairs in sources exist as key=value
|
||||
// pairs in the mapping at key, or if there are no values at key.
|
||||
func (args Args) MatchKVList(key string, sources map[string]string) bool {
|
||||
fieldValues := args.fields[key]
|
||||
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if len(fieldValues) == 0 {
|
||||
@ -171,8 +197,8 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
for name2match := range fieldValues {
|
||||
testKV := strings.SplitN(name2match, "=", 2)
|
||||
for value := range fieldValues {
|
||||
testKV := strings.SplitN(value, "=", 2)
|
||||
|
||||
v, ok := sources[testKV[0]]
|
||||
if !ok {
|
||||
@ -186,16 +212,13 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Match returns true if the values for the specified field matches the source string
|
||||
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
|
||||
// field is 'image.name' and source is 'ubuntu'
|
||||
// it returns true.
|
||||
func (filters Args) Match(field, source string) bool {
|
||||
if filters.ExactMatch(field, source) {
|
||||
// Match returns true if any of the values at key match the source string
|
||||
func (args Args) Match(field, source string) bool {
|
||||
if args.ExactMatch(field, source) {
|
||||
return true
|
||||
}
|
||||
|
||||
fieldValues := filters.fields[field]
|
||||
fieldValues := args.fields[field]
|
||||
for name2match := range fieldValues {
|
||||
match, err := regexp.MatchString(name2match, source)
|
||||
if err != nil {
|
||||
@ -208,9 +231,9 @@ func (filters Args) Match(field, source string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ExactMatch returns true if the source matches exactly one of the filters.
|
||||
func (filters Args) ExactMatch(field, source string) bool {
|
||||
fieldValues, ok := filters.fields[field]
|
||||
// ExactMatch returns true if the source matches exactly one of the values.
|
||||
func (args Args) ExactMatch(key, source string) bool {
|
||||
fieldValues, ok := args.fields[key]
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if !ok || len(fieldValues) == 0 {
|
||||
return true
|
||||
@ -220,14 +243,15 @@ func (filters Args) ExactMatch(field, source string) bool {
|
||||
return fieldValues[source]
|
||||
}
|
||||
|
||||
// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one.
|
||||
func (filters Args) UniqueExactMatch(field, source string) bool {
|
||||
fieldValues := filters.fields[field]
|
||||
// UniqueExactMatch returns true if there is only one value and the source
|
||||
// matches exactly the value.
|
||||
func (args Args) UniqueExactMatch(key, source string) bool {
|
||||
fieldValues := args.fields[key]
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if len(fieldValues) == 0 {
|
||||
return true
|
||||
}
|
||||
if len(filters.fields[field]) != 1 {
|
||||
if len(args.fields[key]) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
@ -235,14 +259,14 @@ func (filters Args) UniqueExactMatch(field, source string) bool {
|
||||
return fieldValues[source]
|
||||
}
|
||||
|
||||
// FuzzyMatch returns true if the source matches exactly one of the filters,
|
||||
// or the source has one of the filters as a prefix.
|
||||
func (filters Args) FuzzyMatch(field, source string) bool {
|
||||
if filters.ExactMatch(field, source) {
|
||||
// FuzzyMatch returns true if the source matches exactly one value, or the
|
||||
// source has one of the values as a prefix.
|
||||
func (args Args) FuzzyMatch(key, source string) bool {
|
||||
if args.ExactMatch(key, source) {
|
||||
return true
|
||||
}
|
||||
|
||||
fieldValues := filters.fields[field]
|
||||
fieldValues := args.fields[key]
|
||||
for prefix := range fieldValues {
|
||||
if strings.HasPrefix(source, prefix) {
|
||||
return true
|
||||
@ -251,9 +275,17 @@ func (filters Args) FuzzyMatch(field, source string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Include returns true if the name of the field to filter is in the filters.
|
||||
func (filters Args) Include(field string) bool {
|
||||
_, ok := filters.fields[field]
|
||||
// Include returns true if the key exists in the mapping
|
||||
//
|
||||
// Deprecated: use Contains
|
||||
func (args Args) Include(field string) bool {
|
||||
_, ok := args.fields[field]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Contains returns true if the key exists in the mapping
|
||||
func (args Args) Contains(field string) bool {
|
||||
_, ok := args.fields[field]
|
||||
return ok
|
||||
}
|
||||
|
||||
@ -265,10 +297,10 @@ func (e invalidFilter) Error() string {
|
||||
|
||||
func (invalidFilter) InvalidParameter() {}
|
||||
|
||||
// Validate ensures that all the fields in the filter are valid.
|
||||
// It returns an error as soon as it finds an invalid field.
|
||||
func (filters Args) Validate(accepted map[string]bool) error {
|
||||
for name := range filters.fields {
|
||||
// Validate compared the set of accepted keys against the keys in the mapping.
|
||||
// An error is returned if any mapping keys are not in the accepted set.
|
||||
func (args Args) Validate(accepted map[string]bool) error {
|
||||
for name := range args.fields {
|
||||
if !accepted[name] {
|
||||
return invalidFilter(name)
|
||||
}
|
||||
@ -276,13 +308,14 @@ func (filters Args) Validate(accepted map[string]bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WalkValues iterates over the list of filtered values for a field.
|
||||
// It stops the iteration if it finds an error and it returns that error.
|
||||
func (filters Args) WalkValues(field string, op func(value string) error) error {
|
||||
if _, ok := filters.fields[field]; !ok {
|
||||
// WalkValues iterates over the list of values for a key in the mapping and calls
|
||||
// op() for each value. If op returns an error the iteration stops and the
|
||||
// error is returned.
|
||||
func (args Args) WalkValues(field string, op func(value string) error) error {
|
||||
if _, ok := args.fields[field]; !ok {
|
||||
return nil
|
||||
}
|
||||
for v := range filters.fields[field] {
|
||||
for v := range args.fields[field] {
|
||||
if err := op(v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -3,6 +3,9 @@ package filters
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseArgs(t *testing.T) {
|
||||
@ -16,23 +19,18 @@ func TestParseArgs(t *testing.T) {
|
||||
args = NewArgs()
|
||||
err error
|
||||
)
|
||||
|
||||
for i := range flagArgs {
|
||||
args, err = ParseFlag(flagArgs[i], args)
|
||||
if err != nil {
|
||||
t.Errorf("failed to parse %s: %s", flagArgs[i], err)
|
||||
}
|
||||
}
|
||||
if len(args.Get("created")) != 1 {
|
||||
t.Error("failed to set this arg")
|
||||
}
|
||||
if len(args.Get("image.name")) != 2 {
|
||||
t.Error("the args should have collapsed")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.Len(t, args.Get("created"), 1)
|
||||
assert.Len(t, args.Get("image.name"), 2)
|
||||
}
|
||||
|
||||
func TestParseArgsEdgeCase(t *testing.T) {
|
||||
var filters Args
|
||||
args, err := ParseFlag("", filters)
|
||||
var args Args
|
||||
args, err := ParseFlag("", args)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -184,7 +182,7 @@ func TestArgsMatchKVList(t *testing.T) {
|
||||
}
|
||||
|
||||
for args, field := range matches {
|
||||
if args.MatchKVList(field, sources) != true {
|
||||
if !args.MatchKVList(field, sources) {
|
||||
t.Fatalf("Expected true for %v on %v, got false", sources, args)
|
||||
}
|
||||
}
|
||||
@ -204,7 +202,7 @@ func TestArgsMatchKVList(t *testing.T) {
|
||||
}
|
||||
|
||||
for args, field := range differs {
|
||||
if args.MatchKVList(field, sources) != false {
|
||||
if args.MatchKVList(field, sources) {
|
||||
t.Fatalf("Expected false for %v on %v, got true", sources, args)
|
||||
}
|
||||
}
|
||||
@ -233,9 +231,8 @@ func TestArgsMatch(t *testing.T) {
|
||||
}
|
||||
|
||||
for args, field := range matches {
|
||||
if args.Match(field, source) != true {
|
||||
t.Fatalf("Expected true for %v on %v, got false", source, args)
|
||||
}
|
||||
assert.True(t, args.Match(field, source),
|
||||
"Expected field %s to match %s", field, source)
|
||||
}
|
||||
|
||||
differs := map[*Args]string{
|
||||
@ -258,9 +255,8 @@ func TestArgsMatch(t *testing.T) {
|
||||
}
|
||||
|
||||
for args, field := range differs {
|
||||
if args.Match(field, source) != false {
|
||||
t.Fatalf("Expected false for %v on %v, got true", source, args)
|
||||
}
|
||||
assert.False(t, args.Match(field, source),
|
||||
"Expected field %s to not match %s", field, source)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -29,10 +29,8 @@ func GetTimestamp(value string, reference time.Time) (string, error) {
|
||||
}
|
||||
|
||||
var format string
|
||||
var parseInLocation bool
|
||||
|
||||
// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
|
||||
parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
|
||||
parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
|
||||
|
||||
if strings.Contains(value, ".") {
|
||||
if parseInLocation {
|
||||
|
||||
@ -12,6 +12,7 @@ import (
|
||||
"github.com/docker/docker/api/types/container"
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -24,7 +25,7 @@ const (
|
||||
// instructions in the builder.
|
||||
type Source interface {
|
||||
// Root returns root path for accessing source
|
||||
Root() string
|
||||
Root() containerfs.ContainerFS
|
||||
// Close allows to signal that the filesystem tree won't be used anymore.
|
||||
// For Context implementations using a temporary directory, it is recommended to
|
||||
// delete the temporary directory in Close().
|
||||
@ -99,7 +100,7 @@ type Image interface {
|
||||
// ReleaseableLayer is an image layer that can be mounted and released
|
||||
type ReleaseableLayer interface {
|
||||
Release() error
|
||||
Mount() (string, error)
|
||||
Mount() (containerfs.ContainerFS, error)
|
||||
Commit(platform string) (ReleaseableLayer, error)
|
||||
DiffID() layer.DiffID
|
||||
}
|
||||
|
||||
@ -34,10 +34,10 @@ func TestBuilderFlags(t *testing.T) {
|
||||
t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err)
|
||||
}
|
||||
|
||||
if flStr1.IsUsed() == true {
|
||||
if flStr1.IsUsed() {
|
||||
t.Fatal("Test3 - str1 was not used!")
|
||||
}
|
||||
if flBool1.IsUsed() == true {
|
||||
if flBool1.IsUsed() {
|
||||
t.Fatal("Test3 - bool1 was not used!")
|
||||
}
|
||||
|
||||
@ -58,10 +58,10 @@ func TestBuilderFlags(t *testing.T) {
|
||||
if flBool1.IsTrue() {
|
||||
t.Fatal("Bool1 was supposed to default to: false")
|
||||
}
|
||||
if flStr1.IsUsed() == true {
|
||||
if flStr1.IsUsed() {
|
||||
t.Fatal("Str1 was not used!")
|
||||
}
|
||||
if flBool1.IsUsed() == true {
|
||||
if flBool1.IsUsed() {
|
||||
t.Fatal("Bool1 was not used!")
|
||||
}
|
||||
|
||||
|
||||
@ -17,8 +17,6 @@ import (
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/docker/docker/builder/fscache"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
@ -50,21 +48,21 @@ type SessionGetter interface {
|
||||
|
||||
// BuildManager is shared across all Builder objects
|
||||
type BuildManager struct {
|
||||
archiver *archive.Archiver
|
||||
backend builder.Backend
|
||||
pathCache pathCache // TODO: make this persistent
|
||||
sg SessionGetter
|
||||
fsCache *fscache.FSCache
|
||||
idMappings *idtools.IDMappings
|
||||
backend builder.Backend
|
||||
pathCache pathCache // TODO: make this persistent
|
||||
sg SessionGetter
|
||||
fsCache *fscache.FSCache
|
||||
}
|
||||
|
||||
// NewBuildManager creates a BuildManager
|
||||
func NewBuildManager(b builder.Backend, sg SessionGetter, fsCache *fscache.FSCache, idMappings *idtools.IDMappings) (*BuildManager, error) {
|
||||
bm := &BuildManager{
|
||||
backend: b,
|
||||
pathCache: &syncmap.Map{},
|
||||
sg: sg,
|
||||
archiver: chrootarchive.NewArchiver(idMappings),
|
||||
fsCache: fsCache,
|
||||
backend: b,
|
||||
pathCache: &syncmap.Map{},
|
||||
sg: sg,
|
||||
idMappings: idMappings,
|
||||
fsCache: fsCache,
|
||||
}
|
||||
if err := fsCache.RegisterTransport(remotecontext.ClientSessionRemote, NewClientSessionTransport()); err != nil {
|
||||
return nil, err
|
||||
@ -114,7 +112,7 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (
|
||||
ProgressWriter: config.ProgressWriter,
|
||||
Backend: bm.backend,
|
||||
PathCache: bm.pathCache,
|
||||
Archiver: bm.archiver,
|
||||
IDMappings: bm.idMappings,
|
||||
Platform: dockerfile.Platform,
|
||||
}
|
||||
|
||||
@ -160,7 +158,7 @@ type builderOptions struct {
|
||||
Backend builder.Backend
|
||||
ProgressWriter backend.ProgressWriter
|
||||
PathCache pathCache
|
||||
Archiver *archive.Archiver
|
||||
IDMappings *idtools.IDMappings
|
||||
Platform string
|
||||
}
|
||||
|
||||
@ -177,7 +175,7 @@ type Builder struct {
|
||||
docker builder.Backend
|
||||
clientCtx context.Context
|
||||
|
||||
archiver *archive.Archiver
|
||||
idMappings *idtools.IDMappings
|
||||
buildStages *buildStages
|
||||
disableCommit bool
|
||||
buildArgs *buildArgs
|
||||
@ -219,7 +217,7 @@ func newBuilder(clientCtx context.Context, options builderOptions) *Builder {
|
||||
Aux: options.ProgressWriter.AuxFormatter,
|
||||
Output: options.ProgressWriter.Output,
|
||||
docker: options.Backend,
|
||||
archiver: options.Archiver,
|
||||
idMappings: options.IDMappings,
|
||||
buildArgs: newBuildArgs(config.BuildArgs),
|
||||
buildStages: newBuildStages(),
|
||||
imageSources: newImageSources(clientCtx, options),
|
||||
|
||||
@ -1,12 +1,15 @@
|
||||
package dockerfile
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@ -14,16 +17,18 @@ import (
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const unnamedFilename = "__unnamed__"
|
||||
|
||||
type pathCache interface {
|
||||
Load(key interface{}) (value interface{}, ok bool)
|
||||
Store(key, value interface{})
|
||||
@ -32,14 +37,14 @@ type pathCache interface {
|
||||
// copyInfo is a data object which stores the metadata about each source file in
|
||||
// a copyInstruction
|
||||
type copyInfo struct {
|
||||
root string
|
||||
root containerfs.ContainerFS
|
||||
path string
|
||||
hash string
|
||||
noDecompress bool
|
||||
}
|
||||
|
||||
func (c copyInfo) fullPath() (string, error) {
|
||||
return symlink.FollowSymlinkInScope(filepath.Join(c.root, c.path), c.root)
|
||||
return c.root.ResolveScopedPath(c.path, true)
|
||||
}
|
||||
|
||||
func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo {
|
||||
@ -68,6 +73,7 @@ type copier struct {
|
||||
pathCache pathCache
|
||||
download sourceDownloader
|
||||
tmpPaths []string
|
||||
platform string
|
||||
}
|
||||
|
||||
func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier {
|
||||
@ -76,6 +82,7 @@ func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, i
|
||||
pathCache: req.builder.pathCache,
|
||||
download: download,
|
||||
imageSource: imageSource,
|
||||
platform: req.builder.platform,
|
||||
}
|
||||
}
|
||||
|
||||
@ -83,14 +90,14 @@ func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstr
|
||||
inst := copyInstruction{cmdName: cmdName}
|
||||
last := len(args) - 1
|
||||
|
||||
// Work in daemon-specific filepath semantics
|
||||
inst.dest = filepath.FromSlash(args[last])
|
||||
|
||||
infos, err := o.getCopyInfosForSourcePaths(args[0:last])
|
||||
// Work in platform-specific filepath semantics
|
||||
inst.dest = fromSlash(args[last], o.platform)
|
||||
separator := string(separator(o.platform))
|
||||
infos, err := o.getCopyInfosForSourcePaths(args[0:last], inst.dest)
|
||||
if err != nil {
|
||||
return inst, errors.Wrapf(err, "%s failed", cmdName)
|
||||
}
|
||||
if len(infos) > 1 && !strings.HasSuffix(inst.dest, string(os.PathSeparator)) {
|
||||
if len(infos) > 1 && !strings.HasSuffix(inst.dest, separator) {
|
||||
return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
|
||||
}
|
||||
inst.infos = infos
|
||||
@ -99,10 +106,11 @@ func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstr
|
||||
|
||||
// getCopyInfosForSourcePaths iterates over the source files and calculate the info
|
||||
// needed to copy (e.g. hash value if cached)
|
||||
func (o *copier) getCopyInfosForSourcePaths(sources []string) ([]copyInfo, error) {
|
||||
// The dest is used in case source is URL (and ends with "/")
|
||||
func (o *copier) getCopyInfosForSourcePaths(sources []string, dest string) ([]copyInfo, error) {
|
||||
var infos []copyInfo
|
||||
for _, orig := range sources {
|
||||
subinfos, err := o.getCopyInfoForSourcePath(orig)
|
||||
subinfos, err := o.getCopyInfoForSourcePath(orig, dest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -115,15 +123,24 @@ func (o *copier) getCopyInfosForSourcePaths(sources []string) ([]copyInfo, error
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
func (o *copier) getCopyInfoForSourcePath(orig string) ([]copyInfo, error) {
|
||||
func (o *copier) getCopyInfoForSourcePath(orig, dest string) ([]copyInfo, error) {
|
||||
if !urlutil.IsURL(orig) {
|
||||
return o.calcCopyInfo(orig, true)
|
||||
}
|
||||
|
||||
remote, path, err := o.download(orig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o.tmpPaths = append(o.tmpPaths, remote.Root())
|
||||
// If path == "" then we are unable to determine filename from src
|
||||
// We have to make sure dest is available
|
||||
if path == "" {
|
||||
if strings.HasSuffix(dest, "/") {
|
||||
return nil, errors.Errorf("cannot determine filename for source %s", orig)
|
||||
}
|
||||
path = unnamedFilename
|
||||
}
|
||||
o.tmpPaths = append(o.tmpPaths, remote.Root().Path())
|
||||
|
||||
hash, err := remote.Hash(path)
|
||||
ci := newCopyInfoFromSource(remote, path, hash)
|
||||
@ -143,14 +160,6 @@ func (o *copier) Cleanup() {
|
||||
// TODO: allowWildcards can probably be removed by refactoring this function further.
|
||||
func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) {
|
||||
imageSource := o.imageSource
|
||||
if err := validateCopySourcePath(imageSource, origPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Work in daemon-specific OS filepath semantics
|
||||
origPath = filepath.FromSlash(origPath)
|
||||
origPath = strings.TrimPrefix(origPath, string(os.PathSeparator))
|
||||
origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
|
||||
|
||||
// TODO: do this when creating copier. Requires validateCopySourcePath
|
||||
// (and other below) to be aware of the difference sources. Why is it only
|
||||
@ -167,8 +176,20 @@ func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo,
|
||||
return nil, errors.Errorf("missing build context")
|
||||
}
|
||||
|
||||
root := o.source.Root()
|
||||
|
||||
if err := validateCopySourcePath(imageSource, origPath, root.OS()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Work in source OS specific filepath semantics
|
||||
// For LCOW, this is NOT the daemon OS.
|
||||
origPath = root.FromSlash(origPath)
|
||||
origPath = strings.TrimPrefix(origPath, string(root.Separator()))
|
||||
origPath = strings.TrimPrefix(origPath, "."+string(root.Separator()))
|
||||
|
||||
// Deal with wildcards
|
||||
if allowWildcards && containsWildcards(origPath) {
|
||||
if allowWildcards && containsWildcards(origPath, root.OS()) {
|
||||
return o.copyWithWildcards(origPath)
|
||||
}
|
||||
|
||||
@ -200,6 +221,19 @@ func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo,
|
||||
return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil
|
||||
}
|
||||
|
||||
func containsWildcards(name, platform string) bool {
|
||||
isWindows := platform == "windows"
|
||||
for i := 0; i < len(name); i++ {
|
||||
ch := name[i]
|
||||
if ch == '\\' && !isWindows {
|
||||
i++
|
||||
} else if ch == '*' || ch == '?' || ch == '[' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (o *copier) storeInPathCache(im *imageMount, path string, hash string) {
|
||||
if im != nil {
|
||||
o.pathCache.Store(im.ImageID()+path, hash)
|
||||
@ -207,12 +241,13 @@ func (o *copier) storeInPathCache(im *imageMount, path string, hash string) {
|
||||
}
|
||||
|
||||
func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) {
|
||||
root := o.source.Root()
|
||||
var copyInfos []copyInfo
|
||||
if err := filepath.Walk(o.source.Root(), func(path string, info os.FileInfo, err error) error {
|
||||
if err := root.Walk(root.Path(), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rel, err := remotecontext.Rel(o.source.Root(), path)
|
||||
rel, err := remotecontext.Rel(root, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -220,7 +255,7 @@ func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) {
|
||||
if rel == "." {
|
||||
return nil
|
||||
}
|
||||
if match, _ := filepath.Match(origPath, rel); !match {
|
||||
if match, _ := root.Match(origPath, rel); !match {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -262,7 +297,7 @@ func walkSource(source builder.Source, origPath string) ([]string, error) {
|
||||
}
|
||||
// Must be a dir
|
||||
var subfiles []string
|
||||
err = filepath.Walk(fp, func(path string, info os.FileInfo, err error) error {
|
||||
err = source.Root().Walk(fp, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -301,22 +336,40 @@ func errOnSourceDownload(_ string) (builder.Source, string, error) {
|
||||
return nil, "", errors.New("source can't be a URL for COPY")
|
||||
}
|
||||
|
||||
func getFilenameForDownload(path string, resp *http.Response) string {
|
||||
// Guess filename based on source
|
||||
if path != "" && !strings.HasSuffix(path, "/") {
|
||||
if filename := filepath.Base(filepath.FromSlash(path)); filename != "" {
|
||||
return filename
|
||||
}
|
||||
}
|
||||
|
||||
// Guess filename based on Content-Disposition
|
||||
if contentDisposition := resp.Header.Get("Content-Disposition"); contentDisposition != "" {
|
||||
if _, params, err := mime.ParseMediaType(contentDisposition); err == nil {
|
||||
if params["filename"] != "" && !strings.HasSuffix(params["filename"], "/") {
|
||||
if filename := filepath.Base(filepath.FromSlash(params["filename"])); filename != "" {
|
||||
return filename
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) {
|
||||
u, err := url.Parse(srcURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
filename := filepath.Base(filepath.FromSlash(u.Path)) // Ensure in platform semantics
|
||||
if filename == "" {
|
||||
err = errors.Errorf("cannot determine filename from url: %s", u)
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := remotecontext.GetWithStatusError(srcURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
filename := getFilenameForDownload(u.Path, resp)
|
||||
|
||||
// Prepare file in a tmp dir
|
||||
tmpDir, err := ioutils.TempDir("", "docker-remote")
|
||||
if err != nil {
|
||||
@ -327,7 +380,13 @@ func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote b
|
||||
os.RemoveAll(tmpDir)
|
||||
}
|
||||
}()
|
||||
tmpFileName := filepath.Join(tmpDir, filename)
|
||||
// If filename is empty, the returned filename will be "" but
|
||||
// the tmp filename will be created as "__unnamed__"
|
||||
tmpFileName := filename
|
||||
if filename == "" {
|
||||
tmpFileName = unnamedFilename
|
||||
}
|
||||
tmpFileName = filepath.Join(tmpDir, tmpFileName)
|
||||
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return
|
||||
@ -363,14 +422,19 @@ func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote b
|
||||
return
|
||||
}
|
||||
|
||||
lc, err := remotecontext.NewLazySource(tmpDir)
|
||||
lc, err := remotecontext.NewLazySource(containerfs.NewLocalContainerFS(tmpDir))
|
||||
return lc, filename, err
|
||||
}
|
||||
|
||||
type copyFileOptions struct {
|
||||
decompress bool
|
||||
archiver *archive.Archiver
|
||||
chownPair idtools.IDPair
|
||||
archiver Archiver
|
||||
}
|
||||
|
||||
type copyEndpoint struct {
|
||||
driver containerfs.Driver
|
||||
path string
|
||||
}
|
||||
|
||||
func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error {
|
||||
@ -378,6 +442,7 @@ func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
destPath, err := dest.fullPath()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -385,59 +450,90 @@ func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions)
|
||||
|
||||
archiver := options.archiver
|
||||
|
||||
src, err := os.Stat(srcPath)
|
||||
srcEndpoint := ©Endpoint{driver: source.root, path: srcPath}
|
||||
destEndpoint := ©Endpoint{driver: dest.root, path: destPath}
|
||||
|
||||
src, err := source.root.Stat(srcPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "source path not found")
|
||||
}
|
||||
if src.IsDir() {
|
||||
return copyDirectory(archiver, srcPath, destPath, options.chownPair)
|
||||
return copyDirectory(archiver, srcEndpoint, destEndpoint, options.chownPair)
|
||||
}
|
||||
if options.decompress && archive.IsArchivePath(srcPath) && !source.noDecompress {
|
||||
if options.decompress && isArchivePath(source.root, srcPath) && !source.noDecompress {
|
||||
return archiver.UntarPath(srcPath, destPath)
|
||||
}
|
||||
|
||||
destExistsAsDir, err := isExistingDirectory(destPath)
|
||||
destExistsAsDir, err := isExistingDirectory(destEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// dest.path must be used because destPath has already been cleaned of any
|
||||
// trailing slash
|
||||
if endsInSlash(dest.path) || destExistsAsDir {
|
||||
if endsInSlash(dest.root, dest.path) || destExistsAsDir {
|
||||
// source.path must be used to get the correct filename when the source
|
||||
// is a symlink
|
||||
destPath = filepath.Join(destPath, filepath.Base(source.path))
|
||||
destPath = dest.root.Join(destPath, source.root.Base(source.path))
|
||||
destEndpoint = ©Endpoint{driver: dest.root, path: destPath}
|
||||
}
|
||||
return copyFile(archiver, srcPath, destPath, options.chownPair)
|
||||
return copyFile(archiver, srcEndpoint, destEndpoint, options.chownPair)
|
||||
}
|
||||
|
||||
func copyDirectory(archiver *archive.Archiver, source, dest string, chownPair idtools.IDPair) error {
|
||||
func isArchivePath(driver containerfs.ContainerFS, path string) bool {
|
||||
file, err := driver.Open(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer file.Close()
|
||||
rdr, err := archive.DecompressStream(file)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
r := tar.NewReader(rdr)
|
||||
_, err = r.Next()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func copyDirectory(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error {
|
||||
destExists, err := isExistingDirectory(dest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to query destination path")
|
||||
}
|
||||
if err := archiver.CopyWithTar(source, dest); err != nil {
|
||||
|
||||
if err := archiver.CopyWithTar(source.path, dest.path); err != nil {
|
||||
return errors.Wrapf(err, "failed to copy directory")
|
||||
}
|
||||
return fixPermissions(source, dest, chownPair, !destExists)
|
||||
// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
|
||||
return fixPermissions(source.path, dest.path, chownPair, !destExists)
|
||||
}
|
||||
|
||||
func copyFile(archiver *archive.Archiver, source, dest string, chownPair idtools.IDPair) error {
|
||||
if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest), 0755, chownPair); err != nil {
|
||||
return errors.Wrapf(err, "failed to create new directory")
|
||||
func copyFile(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error {
|
||||
if runtime.GOOS == "windows" && dest.driver.OS() == "linux" {
|
||||
// LCOW
|
||||
if err := dest.driver.MkdirAll(dest.driver.Dir(dest.path), 0755); err != nil {
|
||||
return errors.Wrapf(err, "failed to create new directory")
|
||||
}
|
||||
} else {
|
||||
if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, chownPair); err != nil {
|
||||
// Normal containers
|
||||
return errors.Wrapf(err, "failed to create new directory")
|
||||
}
|
||||
}
|
||||
if err := archiver.CopyFileWithTar(source, dest); err != nil {
|
||||
|
||||
if err := archiver.CopyFileWithTar(source.path, dest.path); err != nil {
|
||||
return errors.Wrapf(err, "failed to copy file")
|
||||
}
|
||||
return fixPermissions(source, dest, chownPair, false)
|
||||
// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
|
||||
return fixPermissions(source.path, dest.path, chownPair, false)
|
||||
}
|
||||
|
||||
func endsInSlash(path string) bool {
|
||||
return strings.HasSuffix(path, string(os.PathSeparator))
|
||||
func endsInSlash(driver containerfs.Driver, path string) bool {
|
||||
return strings.HasSuffix(path, string(driver.Separator()))
|
||||
}
|
||||
|
||||
// isExistingDirectory returns true if the path exists and is a directory
|
||||
func isExistingDirectory(path string) (bool, error) {
|
||||
destStat, err := os.Stat(path)
|
||||
func isExistingDirectory(point *copyEndpoint) (bool, error) {
|
||||
destStat, err := point.driver.Stat(point.path)
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
return false, nil
|
||||
|
||||
@ -1,8 +1,10 @@
|
||||
package dockerfile
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/gotestyourself/gotestyourself/fs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@ -36,10 +38,110 @@ func TestIsExistingDirectory(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testcase := range testcases {
|
||||
result, err := isExistingDirectory(testcase.path)
|
||||
result, err := isExistingDirectory(©Endpoint{driver: containerfs.NewLocalDriver(), path: testcase.path})
|
||||
if !assert.NoError(t, err) {
|
||||
continue
|
||||
}
|
||||
assert.Equal(t, testcase.expected, result, testcase.doc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFilenameForDownload(t *testing.T) {
|
||||
var testcases = []struct {
|
||||
path string
|
||||
disposition string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
path: "http://www.example.com/",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
path: "http://www.example.com/xyz",
|
||||
expected: "xyz",
|
||||
},
|
||||
{
|
||||
path: "http://www.example.com/xyz.html",
|
||||
expected: "xyz.html",
|
||||
},
|
||||
{
|
||||
path: "http://www.example.com/xyz/",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
path: "http://www.example.com/xyz/uvw",
|
||||
expected: "uvw",
|
||||
},
|
||||
{
|
||||
path: "http://www.example.com/xyz/uvw.html",
|
||||
expected: "uvw.html",
|
||||
},
|
||||
{
|
||||
path: "http://www.example.com/xyz/uvw/",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
path: "/",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
path: "/xyz",
|
||||
expected: "xyz",
|
||||
},
|
||||
{
|
||||
path: "/xyz.html",
|
||||
expected: "xyz.html",
|
||||
},
|
||||
{
|
||||
path: "/xyz/",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
path: "/xyz/",
|
||||
disposition: "attachment; filename=xyz.html",
|
||||
expected: "xyz.html",
|
||||
},
|
||||
{
|
||||
disposition: "",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
disposition: "attachment; filename=xyz",
|
||||
expected: "xyz",
|
||||
},
|
||||
{
|
||||
disposition: "attachment; filename=xyz.html",
|
||||
expected: "xyz.html",
|
||||
},
|
||||
{
|
||||
disposition: "attachment; filename=\"xyz\"",
|
||||
expected: "xyz",
|
||||
},
|
||||
{
|
||||
disposition: "attachment; filename=\"xyz.html\"",
|
||||
expected: "xyz.html",
|
||||
},
|
||||
{
|
||||
disposition: "attachment; filename=\"/xyz.html\"",
|
||||
expected: "xyz.html",
|
||||
},
|
||||
{
|
||||
disposition: "attachment; filename=\"/xyz/uvw\"",
|
||||
expected: "uvw",
|
||||
},
|
||||
{
|
||||
disposition: "attachment; filename=\"Naïve file.txt\"",
|
||||
expected: "Naïve file.txt",
|
||||
},
|
||||
}
|
||||
for _, testcase := range testcases {
|
||||
resp := http.Response{
|
||||
Header: make(map[string][]string),
|
||||
}
|
||||
if testcase.disposition != "" {
|
||||
resp.Header.Add("Content-Disposition", testcase.disposition)
|
||||
}
|
||||
filename := getFilenameForDownload(testcase.path, &resp)
|
||||
assert.Equal(t, testcase.expected, filename)
|
||||
}
|
||||
}
|
||||
|
||||
@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
)
|
||||
|
||||
@ -15,7 +16,8 @@ func fixPermissions(source, destination string, rootIDs idtools.IDPair, override
|
||||
err error
|
||||
)
|
||||
if !overrideSkip {
|
||||
skipChownRoot, err = isExistingDirectory(destination)
|
||||
destEndpoint := ©Endpoint{driver: containerfs.NewLocalDriver(), path: destination}
|
||||
skipChownRoot, err = isExistingDirectory(destEndpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -40,3 +42,7 @@ func fixPermissions(source, destination string, rootIDs idtools.IDPair, override
|
||||
return os.Lchown(fullpath, rootIDs.UID, rootIDs.GID)
|
||||
})
|
||||
}
|
||||
|
||||
func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1,8 +1,38 @@
|
||||
package dockerfile
|
||||
|
||||
import "github.com/docker/docker/pkg/idtools"
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
)
|
||||
|
||||
func fixPermissions(source, destination string, rootIDs idtools.IDPair, overrideSkip bool) error {
|
||||
// chown is not supported on Windows
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error {
|
||||
// validate windows paths from other images + LCOW
|
||||
if imageSource == nil || platform != "windows" {
|
||||
return nil
|
||||
}
|
||||
|
||||
origPath = filepath.FromSlash(origPath)
|
||||
p := strings.ToLower(filepath.Clean(origPath))
|
||||
if !filepath.IsAbs(p) {
|
||||
if filepath.VolumeName(p) != "" {
|
||||
if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths
|
||||
p = p[:len(p)-1]
|
||||
}
|
||||
p += "\\"
|
||||
} else {
|
||||
p = filepath.Join("c:\\", p)
|
||||
}
|
||||
}
|
||||
if _, blacklisted := pathBlacklist[p]; blacklisted {
|
||||
return errors.New("copy from c:\\ or c:\\windows is not allowed on windows")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -9,6 +9,7 @@ import (
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/docker/docker/builder/remotecontext"
|
||||
"github.com/docker/docker/internal/testutil"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
@ -197,14 +198,6 @@ func executeTestCase(t *testing.T, testCase dispatchTestCase) {
|
||||
shlex: shlex,
|
||||
source: context,
|
||||
}
|
||||
state, err = b.dispatch(opts)
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("No error when executing test %s", testCase.name)
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), testCase.expectedError) {
|
||||
t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", testCase.expectedError, err.Error())
|
||||
}
|
||||
|
||||
_, err = b.dispatch(opts)
|
||||
testutil.ErrorContains(t, err, testCase.expectedError)
|
||||
}
|
||||
|
||||
@ -7,6 +7,9 @@ import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -15,13 +18,69 @@ import (
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
lcUser "github.com/opencontainers/runc/libcontainer/user"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// For Windows only
|
||||
var pathBlacklist = map[string]bool{
|
||||
"c:\\": true,
|
||||
"c:\\windows": true,
|
||||
}
|
||||
|
||||
// Archiver defines an interface for copying files from one destination to
|
||||
// another using Tar/Untar.
|
||||
type Archiver interface {
|
||||
TarUntar(src, dst string) error
|
||||
UntarPath(src, dst string) error
|
||||
CopyWithTar(src, dst string) error
|
||||
CopyFileWithTar(src, dst string) error
|
||||
IDMappings() *idtools.IDMappings
|
||||
}
|
||||
|
||||
// The builder will use the following interfaces if the container fs implements
|
||||
// these for optimized copies to and from the container.
|
||||
type extractor interface {
|
||||
ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error
|
||||
}
|
||||
|
||||
type archiver interface {
|
||||
ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// helper functions to get tar/untar func
|
||||
func untarFunc(i interface{}) containerfs.UntarFunc {
|
||||
if ea, ok := i.(extractor); ok {
|
||||
return ea.ExtractArchive
|
||||
}
|
||||
return chrootarchive.Untar
|
||||
}
|
||||
|
||||
func tarFunc(i interface{}) containerfs.TarFunc {
|
||||
if ap, ok := i.(archiver); ok {
|
||||
return ap.ArchivePath
|
||||
}
|
||||
return archive.TarWithOptions
|
||||
}
|
||||
|
||||
func (b *Builder) getArchiver(src, dst containerfs.Driver) Archiver {
|
||||
t, u := tarFunc(src), untarFunc(dst)
|
||||
return &containerfs.Archiver{
|
||||
SrcDriver: src,
|
||||
DstDriver: dst,
|
||||
Tar: t,
|
||||
Untar: u,
|
||||
IDMappingsVar: b.idMappings,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Builder) commit(dispatchState *dispatchState, comment string) error {
|
||||
if b.disableCommit {
|
||||
return nil
|
||||
@ -131,28 +190,29 @@ func (b *Builder) performCopy(state *dispatchState, inst copyInstruction) error
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get destination image %q", state.imageID)
|
||||
}
|
||||
destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, imageMount)
|
||||
|
||||
destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, imageMount, b.platform)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
chownPair := b.archiver.IDMappings.RootPair()
|
||||
chownPair := b.idMappings.RootPair()
|
||||
// if a chown was requested, perform the steps to get the uid, gid
|
||||
// translated (if necessary because of user namespaces), and replace
|
||||
// the root pair with the chown pair for copy operations
|
||||
if inst.chownStr != "" {
|
||||
chownPair, err = parseChownFlag(inst.chownStr, destInfo.root, b.archiver.IDMappings)
|
||||
chownPair, err = parseChownFlag(inst.chownStr, destInfo.root.Path(), b.idMappings)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to convert uid/gid chown string to host mapping")
|
||||
}
|
||||
}
|
||||
|
||||
opts := copyFileOptions{
|
||||
decompress: inst.allowLocalDecompression,
|
||||
archiver: b.archiver,
|
||||
chownPair: chownPair,
|
||||
}
|
||||
for _, info := range inst.infos {
|
||||
opts := copyFileOptions{
|
||||
decompress: inst.allowLocalDecompression,
|
||||
archiver: b.getArchiver(info.root, destInfo.root),
|
||||
chownPair: chownPair,
|
||||
}
|
||||
if err := performCopyForInfo(destInfo, info, opts); err != nil {
|
||||
return errors.Wrapf(err, "failed to copy files")
|
||||
}
|
||||
@ -206,10 +266,7 @@ func lookupUser(userStr, filepath string) (int, error) {
|
||||
return uid, nil
|
||||
}
|
||||
users, err := lcUser.ParsePasswdFileFilter(filepath, func(u lcUser.User) bool {
|
||||
if u.Name == userStr {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return u.Name == userStr
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@ -228,10 +285,7 @@ func lookupGroup(groupStr, filepath string) (int, error) {
|
||||
return gid, nil
|
||||
}
|
||||
groups, err := lcUser.ParseGroupFileFilter(filepath, func(g lcUser.Group) bool {
|
||||
if g.Name == groupStr {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return g.Name == groupStr
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@ -242,10 +296,10 @@ func lookupGroup(groupStr, filepath string) (int, error) {
|
||||
return groups[0].Gid, nil
|
||||
}
|
||||
|
||||
func createDestInfo(workingDir string, inst copyInstruction, imageMount *imageMount) (copyInfo, error) {
|
||||
func createDestInfo(workingDir string, inst copyInstruction, imageMount *imageMount, platform string) (copyInfo, error) {
|
||||
// Twiddle the destination when it's a relative path - meaning, make it
|
||||
// relative to the WORKINGDIR
|
||||
dest, err := normalizeDest(workingDir, inst.dest)
|
||||
dest, err := normalizeDest(workingDir, inst.dest, platform)
|
||||
if err != nil {
|
||||
return copyInfo{}, errors.Wrapf(err, "invalid %s", inst.cmdName)
|
||||
}
|
||||
@ -258,6 +312,63 @@ func createDestInfo(workingDir string, inst copyInstruction, imageMount *imageMo
|
||||
return newCopyInfoFromSource(destMount, dest, ""), nil
|
||||
}
|
||||
|
||||
// normalizeDest normalises the destination of a COPY/ADD command in a
|
||||
// platform semantically consistent way.
|
||||
func normalizeDest(workingDir, requested string, platform string) (string, error) {
|
||||
dest := fromSlash(requested, platform)
|
||||
endsInSlash := strings.HasSuffix(dest, string(separator(platform)))
|
||||
|
||||
if platform != "windows" {
|
||||
if !path.IsAbs(requested) {
|
||||
dest = path.Join("/", filepath.ToSlash(workingDir), dest)
|
||||
// Make sure we preserve any trailing slash
|
||||
if endsInSlash {
|
||||
dest += "/"
|
||||
}
|
||||
}
|
||||
return dest, nil
|
||||
}
|
||||
|
||||
// We are guaranteed that the working directory is already consistent,
|
||||
// However, Windows also has, for now, the limitation that ADD/COPY can
|
||||
// only be done to the system drive, not any drives that might be present
|
||||
// as a result of a bind mount.
|
||||
//
|
||||
// So... if the path requested is Linux-style absolute (/foo or \\foo),
|
||||
// we assume it is the system drive. If it is a Windows-style absolute
|
||||
// (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we
|
||||
// strip any configured working directories drive letter so that it
|
||||
// can be subsequently legitimately converted to a Windows volume-style
|
||||
// pathname.
|
||||
|
||||
// Not a typo - filepath.IsAbs, not system.IsAbs on this next check as
|
||||
// we only want to validate where the DriveColon part has been supplied.
|
||||
if filepath.IsAbs(dest) {
|
||||
if strings.ToUpper(string(dest[0])) != "C" {
|
||||
return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)")
|
||||
}
|
||||
dest = dest[2:] // Strip the drive letter
|
||||
}
|
||||
|
||||
// Cannot handle relative where WorkingDir is not the system drive.
|
||||
if len(workingDir) > 0 {
|
||||
if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) {
|
||||
return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir)
|
||||
}
|
||||
if !system.IsAbs(dest) {
|
||||
if string(workingDir[0]) != "C" {
|
||||
return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive")
|
||||
}
|
||||
dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest)
|
||||
// Make sure we preserve any trailing slash
|
||||
if endsInSlash {
|
||||
dest += string(os.PathSeparator)
|
||||
}
|
||||
}
|
||||
}
|
||||
return dest, nil
|
||||
}
|
||||
|
||||
// For backwards compat, if there's just one info then use it as the
|
||||
// cache look-up string, otherwise hash 'em all into one
|
||||
func getSourceHashFromInfos(infos []copyInfo) string {
|
||||
@ -403,3 +514,19 @@ func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConf
|
||||
ExtraHosts: options.ExtraHosts,
|
||||
}
|
||||
}
|
||||
|
||||
// fromSlash works like filepath.FromSlash but with a given OS platform field
|
||||
func fromSlash(path, platform string) string {
|
||||
if platform == "windows" {
|
||||
return strings.Replace(path, "/", "\\", -1)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// separator returns a OS path separator for the given OS platform
|
||||
func separator(platform string) byte {
|
||||
if platform == "windows" {
|
||||
return '\\'
|
||||
}
|
||||
return '/'
|
||||
}
|
||||
|
||||
@ -1,42 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package dockerfile
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
// normalizeDest normalizes the destination of a COPY/ADD command in a
|
||||
// platform semantically consistent way.
|
||||
func normalizeDest(workingDir, requested string) (string, error) {
|
||||
dest := filepath.FromSlash(requested)
|
||||
endsInSlash := strings.HasSuffix(requested, string(os.PathSeparator))
|
||||
if !system.IsAbs(requested) {
|
||||
dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest)
|
||||
// Make sure we preserve any trailing slash
|
||||
if endsInSlash {
|
||||
dest += string(os.PathSeparator)
|
||||
}
|
||||
}
|
||||
return dest, nil
|
||||
}
|
||||
|
||||
func containsWildcards(name string) bool {
|
||||
for i := 0; i < len(name); i++ {
|
||||
ch := name[i]
|
||||
if ch == '\\' {
|
||||
i++
|
||||
} else if ch == '*' || ch == '?' || ch == '[' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func validateCopySourcePath(imageSource *imageMount, origPath string) error {
|
||||
return nil
|
||||
}
|
||||
@ -1,95 +0,0 @@
|
||||
package dockerfile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// normalizeDest normalizes the destination of a COPY/ADD command in a
|
||||
// platform semantically consistent way.
|
||||
func normalizeDest(workingDir, requested string) (string, error) {
|
||||
dest := filepath.FromSlash(requested)
|
||||
endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator))
|
||||
|
||||
// We are guaranteed that the working directory is already consistent,
|
||||
// However, Windows also has, for now, the limitation that ADD/COPY can
|
||||
// only be done to the system drive, not any drives that might be present
|
||||
// as a result of a bind mount.
|
||||
//
|
||||
// So... if the path requested is Linux-style absolute (/foo or \\foo),
|
||||
// we assume it is the system drive. If it is a Windows-style absolute
|
||||
// (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we
|
||||
// strip any configured working directories drive letter so that it
|
||||
// can be subsequently legitimately converted to a Windows volume-style
|
||||
// pathname.
|
||||
|
||||
// Not a typo - filepath.IsAbs, not system.IsAbs on this next check as
|
||||
// we only want to validate where the DriveColon part has been supplied.
|
||||
if filepath.IsAbs(dest) {
|
||||
if strings.ToUpper(string(dest[0])) != "C" {
|
||||
return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)")
|
||||
}
|
||||
dest = dest[2:] // Strip the drive letter
|
||||
}
|
||||
|
||||
// Cannot handle relative where WorkingDir is not the system drive.
|
||||
if len(workingDir) > 0 {
|
||||
if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) {
|
||||
return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir)
|
||||
}
|
||||
if !system.IsAbs(dest) {
|
||||
if string(workingDir[0]) != "C" {
|
||||
return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive")
|
||||
}
|
||||
dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest)
|
||||
// Make sure we preserve any trailing slash
|
||||
if endsInSlash {
|
||||
dest += string(os.PathSeparator)
|
||||
}
|
||||
}
|
||||
}
|
||||
return dest, nil
|
||||
}
|
||||
|
||||
func containsWildcards(name string) bool {
|
||||
for i := 0; i < len(name); i++ {
|
||||
ch := name[i]
|
||||
if ch == '*' || ch == '?' || ch == '[' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var pathBlacklist = map[string]bool{
|
||||
"c:\\": true,
|
||||
"c:\\windows": true,
|
||||
}
|
||||
|
||||
func validateCopySourcePath(imageSource *imageMount, origPath string) error {
|
||||
// validate windows paths from other images
|
||||
if imageSource == nil {
|
||||
return nil
|
||||
}
|
||||
origPath = filepath.FromSlash(origPath)
|
||||
p := strings.ToLower(filepath.Clean(origPath))
|
||||
if !filepath.IsAbs(p) {
|
||||
if filepath.VolumeName(p) != "" {
|
||||
if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths
|
||||
p = p[:len(p)-1]
|
||||
}
|
||||
p += "\\"
|
||||
} else {
|
||||
p = filepath.Join("c:\\", p)
|
||||
}
|
||||
}
|
||||
if _, blacklisted := pathBlacklist[p]; blacklisted {
|
||||
return errors.New("copy from c:\\ or c:\\windows is not allowed on windows")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -40,7 +40,7 @@ func TestNormalizeDest(t *testing.T) {
|
||||
}
|
||||
for _, testcase := range tests {
|
||||
msg := fmt.Sprintf("Input: %s, %s", testcase.current, testcase.requested)
|
||||
actual, err := normalizeDest(testcase.current, testcase.requested)
|
||||
actual, err := normalizeDest(testcase.current, testcase.requested, "windows")
|
||||
if testcase.etext == "" {
|
||||
if !assert.NoError(t, err, msg) {
|
||||
continue
|
||||
|
||||
@ -10,6 +10,7 @@ import (
|
||||
"github.com/docker/docker/builder"
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -117,8 +118,8 @@ func (l *mockLayer) Release() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *mockLayer) Mount() (string, error) {
|
||||
return "mountPath", nil
|
||||
func (l *mockLayer) Mount() (containerfs.ContainerFS, error) {
|
||||
return containerfs.NewLocalContainerFS("mountPath"), nil
|
||||
}
|
||||
|
||||
func (l *mockLayer) Commit(string) (builder.ReleaseableLayer, error) {
|
||||
|
||||
@ -143,7 +143,7 @@ func (d *Directive) possibleParserDirective(line string) error {
|
||||
if len(tecMatch) != 0 {
|
||||
for i, n := range tokenEscapeCommand.SubexpNames() {
|
||||
if n == "escapechar" {
|
||||
if d.escapeSeen == true {
|
||||
if d.escapeSeen {
|
||||
return errors.New("only one escape parser directive can be used")
|
||||
}
|
||||
d.escapeSeen = true
|
||||
@ -159,7 +159,7 @@ func (d *Directive) possibleParserDirective(line string) error {
|
||||
if len(tpcMatch) != 0 {
|
||||
for i, n := range tokenPlatformCommand.SubexpNames() {
|
||||
if n == "platform" {
|
||||
if d.platformSeen == true {
|
||||
if d.platformSeen {
|
||||
return errors.New("only one platform parser directive can be used")
|
||||
}
|
||||
d.platformSeen = true
|
||||
|
||||
@ -36,25 +36,25 @@ func TestFSCache(t *testing.T) {
|
||||
src1, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data", "bar"})
|
||||
assert.Nil(t, err)
|
||||
|
||||
dt, err := ioutil.ReadFile(filepath.Join(src1.Root(), "foo"))
|
||||
dt, err := ioutil.ReadFile(filepath.Join(src1.Root().Path(), "foo"))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, string(dt), "data")
|
||||
|
||||
// same id doesn't recalculate anything
|
||||
src2, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data2", "bar"})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, src1.Root(), src2.Root())
|
||||
assert.Equal(t, src1.Root().Path(), src2.Root().Path())
|
||||
|
||||
dt, err = ioutil.ReadFile(filepath.Join(src1.Root(), "foo"))
|
||||
dt, err = ioutil.ReadFile(filepath.Join(src1.Root().Path(), "foo"))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, string(dt), "data")
|
||||
assert.Nil(t, src2.Close())
|
||||
|
||||
src3, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo2", "data2", "bar"})
|
||||
assert.Nil(t, err)
|
||||
assert.NotEqual(t, src1.Root(), src3.Root())
|
||||
assert.NotEqual(t, src1.Root().Path(), src3.Root().Path())
|
||||
|
||||
dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo2"))
|
||||
dt, err = ioutil.ReadFile(filepath.Join(src3.Root().Path(), "foo2"))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, string(dt), "data2")
|
||||
|
||||
@ -71,12 +71,12 @@ func TestFSCache(t *testing.T) {
|
||||
// new upload with the same shared key shoutl overwrite
|
||||
src4, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo3", "data3", "bar"})
|
||||
assert.Nil(t, err)
|
||||
assert.NotEqual(t, src1.Root(), src3.Root())
|
||||
assert.NotEqual(t, src1.Root().Path(), src3.Root().Path())
|
||||
|
||||
dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo3"))
|
||||
dt, err = ioutil.ReadFile(filepath.Join(src3.Root().Path(), "foo3"))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, string(dt), "data3")
|
||||
assert.Equal(t, src4.Root(), src3.Root())
|
||||
assert.Equal(t, src4.Root().Path(), src3.Root().Path())
|
||||
assert.Nil(t, src4.Close())
|
||||
|
||||
s, err = fscache.DiskUsage()
|
||||
|
||||
@ -8,19 +8,19 @@ import (
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type archiveContext struct {
|
||||
root string
|
||||
root containerfs.ContainerFS
|
||||
sums tarsum.FileInfoSums
|
||||
}
|
||||
|
||||
func (c *archiveContext) Close() error {
|
||||
return os.RemoveAll(c.root)
|
||||
return c.root.RemoveAll(c.root.Path())
|
||||
}
|
||||
|
||||
func convertPathError(err error, cleanpath string) error {
|
||||
@ -52,7 +52,8 @@ func FromArchive(tarStream io.Reader) (builder.Source, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tsc := &archiveContext{root: root}
|
||||
// Assume local file system. Since it's coming from a tar file.
|
||||
tsc := &archiveContext{root: containerfs.NewLocalContainerFS(root)}
|
||||
|
||||
// Make sure we clean-up upon error. In the happy case the caller
|
||||
// is expected to manage the clean-up
|
||||
@ -82,7 +83,7 @@ func FromArchive(tarStream io.Reader) (builder.Source, error) {
|
||||
return tsc, nil
|
||||
}
|
||||
|
||||
func (c *archiveContext) Root() string {
|
||||
func (c *archiveContext) Root() containerfs.ContainerFS {
|
||||
return c.root
|
||||
}
|
||||
|
||||
@ -91,7 +92,7 @@ func (c *archiveContext) Remove(path string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.RemoveAll(fullpath)
|
||||
return c.root.RemoveAll(fullpath)
|
||||
}
|
||||
|
||||
func (c *archiveContext) Hash(path string) (string, error) {
|
||||
@ -100,7 +101,7 @@ func (c *archiveContext) Hash(path string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
rel, err := filepath.Rel(c.root, fullpath)
|
||||
rel, err := c.root.Rel(c.root.Path(), fullpath)
|
||||
if err != nil {
|
||||
return "", convertPathError(err, cleanpath)
|
||||
}
|
||||
@ -115,13 +116,13 @@ func (c *archiveContext) Hash(path string) (string, error) {
|
||||
return path, nil // backwards compat TODO: see if really needed
|
||||
}
|
||||
|
||||
func normalize(path, root string) (cleanPath, fullPath string, err error) {
|
||||
cleanPath = filepath.Clean(string(os.PathSeparator) + path)[1:]
|
||||
fullPath, err = symlink.FollowSymlinkInScope(filepath.Join(root, path), root)
|
||||
func normalize(path string, root containerfs.ContainerFS) (cleanPath, fullPath string, err error) {
|
||||
cleanPath = root.Clean(string(root.Separator()) + path)[1:]
|
||||
fullPath, err = root.ResolveScopedPath(path, true)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath)
|
||||
}
|
||||
if _, err := os.Lstat(fullPath); err != nil {
|
||||
if _, err := root.Lstat(fullPath); err != nil {
|
||||
return "", "", errors.WithStack(convertPathError(err, path))
|
||||
}
|
||||
return
|
||||
|
||||
@ -5,15 +5,14 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/continuity/driver"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/docker/docker/builder/dockerignore"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -157,12 +156,12 @@ func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) {
|
||||
return parser.Parse(br)
|
||||
}
|
||||
|
||||
func openAt(remote builder.Source, path string) (*os.File, error) {
|
||||
func openAt(remote builder.Source, path string) (driver.File, error) {
|
||||
fullPath, err := FullPath(remote, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return os.Open(fullPath)
|
||||
return remote.Root().Open(fullPath)
|
||||
}
|
||||
|
||||
// StatAt is a helper for calling Stat on a path from a source
|
||||
@ -171,12 +170,12 @@ func StatAt(remote builder.Source, path string) (os.FileInfo, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return os.Stat(fullPath)
|
||||
return remote.Root().Stat(fullPath)
|
||||
}
|
||||
|
||||
// FullPath is a helper for getting a full path for a path from a source
|
||||
func FullPath(remote builder.Source, path string) (string, error) {
|
||||
fullPath, err := symlink.FollowSymlinkInScope(filepath.Join(remote.Root(), path), remote.Root())
|
||||
fullPath, err := remote.Root().ResolveScopedPath(path, true)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error
|
||||
}
|
||||
|
||||
@ -5,11 +5,11 @@ import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -21,7 +21,7 @@ const (
|
||||
const shouldStayFilename = "should_stay"
|
||||
|
||||
func extractFilenames(files []os.FileInfo) []string {
|
||||
filenames := make([]string, len(files), len(files))
|
||||
filenames := make([]string, len(files))
|
||||
|
||||
for i, file := range files {
|
||||
filenames[i] = file.Name()
|
||||
@ -53,7 +53,7 @@ func checkDirectory(t *testing.T, dir string, expectedFiles []string) {
|
||||
}
|
||||
|
||||
func executeProcess(t *testing.T, contextDir string) {
|
||||
modifiableCtx := &stubRemote{root: contextDir}
|
||||
modifiableCtx := &stubRemote{root: containerfs.NewLocalContainerFS(contextDir)}
|
||||
|
||||
err := removeDockerfile(modifiableCtx, builder.DefaultDockerfileName)
|
||||
|
||||
@ -105,19 +105,19 @@ func TestProcessShouldLeaveAllFiles(t *testing.T) {
|
||||
|
||||
// TODO: remove after moving to a separate pkg
|
||||
type stubRemote struct {
|
||||
root string
|
||||
root containerfs.ContainerFS
|
||||
}
|
||||
|
||||
func (r *stubRemote) Hash(path string) (string, error) {
|
||||
return "", errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (r *stubRemote) Root() string {
|
||||
func (r *stubRemote) Root() containerfs.ContainerFS {
|
||||
return r.root
|
||||
}
|
||||
func (r *stubRemote) Close() error {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
func (r *stubRemote) Remove(p string) error {
|
||||
return os.Remove(filepath.Join(r.root, p))
|
||||
return r.root.Remove(r.root.Join(r.root.Path(), p))
|
||||
}
|
||||
|
||||
@ -3,11 +3,10 @@ package remotecontext
|
||||
import (
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@ -15,7 +14,7 @@ import (
|
||||
// NewLazySource creates a new LazyContext. LazyContext defines a hashed build
|
||||
// context based on a root directory. Individual files are hashed first time
|
||||
// they are asked. It is not safe to call methods of LazyContext concurrently.
|
||||
func NewLazySource(root string) (builder.Source, error) {
|
||||
func NewLazySource(root containerfs.ContainerFS) (builder.Source, error) {
|
||||
return &lazySource{
|
||||
root: root,
|
||||
sums: make(map[string]string),
|
||||
@ -23,11 +22,11 @@ func NewLazySource(root string) (builder.Source, error) {
|
||||
}
|
||||
|
||||
type lazySource struct {
|
||||
root string
|
||||
root containerfs.ContainerFS
|
||||
sums map[string]string
|
||||
}
|
||||
|
||||
func (c *lazySource) Root() string {
|
||||
func (c *lazySource) Root() containerfs.ContainerFS {
|
||||
return c.root
|
||||
}
|
||||
|
||||
@ -41,7 +40,7 @@ func (c *lazySource) Hash(path string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fi, err := os.Lstat(fullPath)
|
||||
fi, err := c.root.Lstat(fullPath)
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
@ -63,13 +62,13 @@ func (c *lazySource) Hash(path string) (string, error) {
|
||||
}
|
||||
|
||||
func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) {
|
||||
p := filepath.Join(c.root, relPath)
|
||||
p := c.root.Join(c.root.Path(), relPath)
|
||||
h, err := NewFileHash(p, relPath, fi)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to create hash for %s", relPath)
|
||||
}
|
||||
if fi.Mode().IsRegular() && fi.Size() > 0 {
|
||||
f, err := os.Open(p)
|
||||
f, err := c.root.Open(p)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to open %s", relPath)
|
||||
}
|
||||
@ -85,10 +84,10 @@ func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error)
|
||||
|
||||
// Rel makes a path relative to base path. Same as `filepath.Rel` but can also
|
||||
// handle UUID paths in windows.
|
||||
func Rel(basepath, targpath string) (string, error) {
|
||||
func Rel(basepath containerfs.ContainerFS, targpath string) (string, error) {
|
||||
// filepath.Rel can't handle UUID paths in windows
|
||||
if runtime.GOOS == "windows" {
|
||||
pfx := basepath + `\`
|
||||
if basepath.OS() == "windows" {
|
||||
pfx := basepath.Path() + `\`
|
||||
if strings.HasPrefix(targpath, pfx) {
|
||||
p := strings.TrimPrefix(targpath, pfx)
|
||||
if p == "" {
|
||||
@ -97,5 +96,5 @@ func Rel(basepath, targpath string) (string, error) {
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
return filepath.Rel(basepath, targpath)
|
||||
return basepath.Rel(basepath.Path(), targpath)
|
||||
}
|
||||
|
||||
@ -116,7 +116,7 @@ func inspectResponse(ct string, r io.Reader, clen int64) (string, io.ReadCloser,
|
||||
plen = maxPreambleLength
|
||||
}
|
||||
|
||||
preamble := make([]byte, plen, plen)
|
||||
preamble := make([]byte, plen)
|
||||
rlen, err := r.Read(preamble)
|
||||
if rlen == 0 {
|
||||
return ct, ioutil.NopCloser(r), errors.New("empty response")
|
||||
|
||||
@ -3,11 +3,11 @@ package remotecontext
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
iradix "github.com/hashicorp/go-immutable-radix"
|
||||
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tonistiigi/fsutil"
|
||||
)
|
||||
@ -19,7 +19,7 @@ type hashed interface {
|
||||
// CachableSource is a source that contains cache records for its contents
|
||||
type CachableSource struct {
|
||||
mu sync.Mutex
|
||||
root string
|
||||
root containerfs.ContainerFS
|
||||
tree *iradix.Tree
|
||||
txn *iradix.Txn
|
||||
}
|
||||
@ -28,7 +28,7 @@ type CachableSource struct {
|
||||
func NewCachableSource(root string) *CachableSource {
|
||||
ts := &CachableSource{
|
||||
tree: iradix.New(),
|
||||
root: root,
|
||||
root: containerfs.NewLocalContainerFS(root),
|
||||
}
|
||||
return ts
|
||||
}
|
||||
@ -67,7 +67,7 @@ func (cs *CachableSource) Scan() error {
|
||||
return err
|
||||
}
|
||||
txn := iradix.New().Txn()
|
||||
err = filepath.Walk(cs.root, func(path string, info os.FileInfo, err error) error {
|
||||
err = cs.root.Walk(cs.root.Path(), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to walk %s", path)
|
||||
}
|
||||
@ -134,12 +134,12 @@ func (cs *CachableSource) Close() error {
|
||||
}
|
||||
|
||||
func (cs *CachableSource) normalize(path string) (cleanpath, fullpath string, err error) {
|
||||
cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:]
|
||||
fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(cs.root, path), cs.root)
|
||||
cleanpath = cs.root.Clean(string(cs.root.Separator()) + path)[1:]
|
||||
fullpath, err = cs.root.ResolveScopedPath(path, true)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("Forbidden path outside the context: %s (%s)", path, fullpath)
|
||||
}
|
||||
_, err = os.Lstat(fullpath)
|
||||
_, err = cs.root.Lstat(fullpath)
|
||||
if err != nil {
|
||||
return "", "", convertPathError(err, path)
|
||||
}
|
||||
@ -149,19 +149,16 @@ func (cs *CachableSource) normalize(path string) (cleanpath, fullpath string, er
|
||||
// Hash returns a hash for a single file in the source
|
||||
func (cs *CachableSource) Hash(path string) (string, error) {
|
||||
n := cs.getRoot()
|
||||
sum := ""
|
||||
// TODO: check this for symlinks
|
||||
v, ok := n.Get([]byte(path))
|
||||
if !ok {
|
||||
sum = path
|
||||
} else {
|
||||
sum = v.(*fileInfo).sum
|
||||
return path, nil
|
||||
}
|
||||
return sum, nil
|
||||
return v.(*fileInfo).sum, nil
|
||||
}
|
||||
|
||||
// Root returns a root directory for the source
|
||||
func (cs *CachableSource) Root() string {
|
||||
func (cs *CachableSource) Root() containerfs.ContainerFS {
|
||||
return cs.root
|
||||
}
|
||||
|
||||
|
||||
@ -94,7 +94,7 @@ func (this *TarsumBackup) GoString() string {
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&remotecontext.TarsumBackup{")
|
||||
keysForHashes := make([]string, 0, len(this.Hashes))
|
||||
for k, _ := range this.Hashes {
|
||||
for k := range this.Hashes {
|
||||
keysForHashes = append(keysForHashes, k)
|
||||
}
|
||||
github_com_gogo_protobuf_sortkeys.Strings(keysForHashes)
|
||||
@ -133,7 +133,7 @@ func (m *TarsumBackup) MarshalTo(dAtA []byte) (int, error) {
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Hashes) > 0 {
|
||||
for k, _ := range m.Hashes {
|
||||
for k := range m.Hashes {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
v := m.Hashes[k]
|
||||
@ -211,7 +211,7 @@ func (this *TarsumBackup) String() string {
|
||||
return "nil"
|
||||
}
|
||||
keysForHashes := make([]string, 0, len(this.Hashes))
|
||||
for k, _ := range this.Hashes {
|
||||
for k := range this.Hashes {
|
||||
keysForHashes = append(keysForHashes, k)
|
||||
}
|
||||
github_com_gogo_protobuf_sortkeys.Strings(keysForHashes)
|
||||
|
||||
@ -35,7 +35,7 @@ func TestCloseRootDirectory(t *testing.T) {
|
||||
t.Fatalf("Error while executing Close: %s", err)
|
||||
}
|
||||
|
||||
_, err = os.Stat(src.Root())
|
||||
_, err = os.Stat(src.Root().Path())
|
||||
|
||||
if !os.IsNotExist(err) {
|
||||
t.Fatal("Directory should not exist at this point")
|
||||
|
||||
@ -2,7 +2,6 @@ package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
@ -20,10 +19,7 @@ func (cli *Client) CheckpointList(ctx context.Context, container string, options
|
||||
|
||||
resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil)
|
||||
if err != nil {
|
||||
if resp.statusCode == http.StatusNotFound {
|
||||
return checkpoints, containerNotFoundError{container}
|
||||
}
|
||||
return checkpoints, err
|
||||
return checkpoints, wrapResponseError(err, resp, "container", container)
|
||||
}
|
||||
|
||||
err = json.NewDecoder(resp.body).Decode(&checkpoints)
|
||||
|
||||
@ -1,10 +1,6 @@
|
||||
/*
|
||||
Package client is a Go client for the Docker Engine API.
|
||||
|
||||
The "docker" command uses this package to communicate with the daemon. It can also
|
||||
be used by your own Go applications to do anything the command-line interface does
|
||||
- running containers, pulling images, managing swarms, etc.
|
||||
|
||||
For more information about the Engine API, see the documentation:
|
||||
https://docs.docker.com/engine/reference/api/
|
||||
|
||||
@ -160,7 +156,7 @@ func NewEnvClient() (*Client, error) {
|
||||
// highly recommended that you set a version or your client may break if the
|
||||
// server is upgraded.
|
||||
func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
|
||||
proto, addr, basePath, err := ParseHost(host)
|
||||
hostURL, err := ParseHostURL(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -171,7 +167,7 @@ func NewClient(host string, version string, client *http.Client, httpHeaders map
|
||||
}
|
||||
} else {
|
||||
transport := new(http.Transport)
|
||||
sockets.ConfigureTransport(transport, proto, addr)
|
||||
sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host)
|
||||
client = &http.Client{
|
||||
Transport: transport,
|
||||
CheckRedirect: CheckRedirect,
|
||||
@ -189,28 +185,24 @@ func NewClient(host string, version string, client *http.Client, httpHeaders map
|
||||
scheme = "https"
|
||||
}
|
||||
|
||||
// TODO: store URL instead of proto/addr/basePath
|
||||
return &Client{
|
||||
scheme: scheme,
|
||||
host: host,
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
basePath: basePath,
|
||||
proto: hostURL.Scheme,
|
||||
addr: hostURL.Host,
|
||||
basePath: hostURL.Path,
|
||||
client: client,
|
||||
version: version,
|
||||
customHTTPHeaders: httpHeaders,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close ensures that transport.Client is closed
|
||||
// especially needed while using NewClient with *http.Client = nil
|
||||
// for example
|
||||
// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"})
|
||||
// Close the transport used by the client
|
||||
func (cli *Client) Close() error {
|
||||
|
||||
if t, ok := cli.client.Transport.(*http.Transport); ok {
|
||||
t.CloseIdleConnections()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -220,37 +212,27 @@ func (cli *Client) getAPIPath(p string, query url.Values) string {
|
||||
var apiPath string
|
||||
if cli.version != "" {
|
||||
v := strings.TrimPrefix(cli.version, "v")
|
||||
apiPath = path.Join(cli.basePath, "/v"+v+p)
|
||||
apiPath = path.Join(cli.basePath, "/v"+v, p)
|
||||
} else {
|
||||
apiPath = path.Join(cli.basePath, p)
|
||||
}
|
||||
|
||||
u := &url.URL{
|
||||
Path: apiPath,
|
||||
}
|
||||
if len(query) > 0 {
|
||||
u.RawQuery = query.Encode()
|
||||
}
|
||||
return u.String()
|
||||
return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String()
|
||||
}
|
||||
|
||||
// ClientVersion returns the version string associated with this
|
||||
// instance of the Client. Note that this value can be changed
|
||||
// via the DOCKER_API_VERSION env var.
|
||||
// This operation doesn't acquire a mutex.
|
||||
// ClientVersion returns the API version used by this client.
|
||||
func (cli *Client) ClientVersion() string {
|
||||
return cli.version
|
||||
}
|
||||
|
||||
// NegotiateAPIVersion updates the version string associated with this
|
||||
// instance of the Client to match the latest version the server supports
|
||||
// NegotiateAPIVersion queries the API and updates the version to match the
|
||||
// API version. Any errors are silently ignored.
|
||||
func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
|
||||
ping, _ := cli.Ping(ctx)
|
||||
cli.NegotiateAPIVersionPing(ping)
|
||||
}
|
||||
|
||||
// NegotiateAPIVersionPing updates the version string associated with this
|
||||
// instance of the Client to match the latest version the server supports
|
||||
// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion
|
||||
// if the ping version is less than the default version.
|
||||
func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
|
||||
if cli.manualOverride {
|
||||
return
|
||||
@ -272,17 +254,28 @@ func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
|
||||
}
|
||||
}
|
||||
|
||||
// DaemonHost returns the host associated with this instance of the Client.
|
||||
// This operation doesn't acquire a mutex.
|
||||
// DaemonHost returns the host address used by the client
|
||||
func (cli *Client) DaemonHost() string {
|
||||
return cli.host
|
||||
}
|
||||
|
||||
// ParseHost verifies that the given host strings is valid.
|
||||
// ParseHost parses a url string, validates the strings is a host url, and returns
|
||||
// the parsed host as: protocol, address, and base path
|
||||
// Deprecated: use ParseHostURL
|
||||
func ParseHost(host string) (string, string, string, error) {
|
||||
hostURL, err := ParseHostURL(host)
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
}
|
||||
return hostURL.Scheme, hostURL.Host, hostURL.Path, nil
|
||||
}
|
||||
|
||||
// ParseHostURL parses a url string, validates the string is a host url, and
|
||||
// returns the parsed URL
|
||||
func ParseHostURL(host string) (*url.URL, error) {
|
||||
protoAddrParts := strings.SplitN(host, "://", 2)
|
||||
if len(protoAddrParts) == 1 {
|
||||
return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host)
|
||||
return nil, fmt.Errorf("unable to parse docker host `%s`", host)
|
||||
}
|
||||
|
||||
var basePath string
|
||||
@ -290,16 +283,19 @@ func ParseHost(host string) (string, string, string, error) {
|
||||
if proto == "tcp" {
|
||||
parsed, err := url.Parse("tcp://" + addr)
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
return nil, err
|
||||
}
|
||||
addr = parsed.Host
|
||||
basePath = parsed.Path
|
||||
}
|
||||
return proto, addr, basePath, nil
|
||||
return &url.URL{
|
||||
Scheme: proto,
|
||||
Host: addr,
|
||||
Path: basePath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CustomHTTPHeaders returns the custom http headers associated with this
|
||||
// instance of the Client. This operation doesn't acquire a mutex.
|
||||
// CustomHTTPHeaders returns the custom http headers stored by the client.
|
||||
func (cli *Client) CustomHTTPHeaders() map[string]string {
|
||||
m := make(map[string]string)
|
||||
for k, v := range cli.customHTTPHeaders {
|
||||
@ -308,8 +304,7 @@ func (cli *Client) CustomHTTPHeaders() map[string]string {
|
||||
return m
|
||||
}
|
||||
|
||||
// SetCustomHTTPHeaders updates the custom http headers associated with this
|
||||
// instance of the Client. This operation doesn't acquire a mutex.
|
||||
// SetCustomHTTPHeaders that will be set on every HTTP request made by the client.
|
||||
func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) {
|
||||
cli.customHTTPHeaders = headers
|
||||
}
|
||||
|
||||
@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/internal/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@ -104,11 +105,11 @@ func TestNewEnvClient(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetAPIPath(t *testing.T) {
|
||||
cases := []struct {
|
||||
v string
|
||||
p string
|
||||
q url.Values
|
||||
e string
|
||||
testcases := []struct {
|
||||
version string
|
||||
path string
|
||||
query url.Values
|
||||
expected string
|
||||
}{
|
||||
{"", "/containers/json", nil, "/containers/json"},
|
||||
{"", "/containers/json", url.Values{}, "/containers/json"},
|
||||
@ -122,16 +123,10 @@ func TestGetAPIPath(t *testing.T) {
|
||||
{"v1.22", "/networks/kiwl$%^", nil, "/v1.22/networks/kiwl$%25%5E"},
|
||||
}
|
||||
|
||||
for _, cs := range cases {
|
||||
c, err := NewClient("unix:///var/run/docker.sock", cs.v, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
g := c.getAPIPath(cs.p, cs.q)
|
||||
assert.Equal(t, g, cs.e)
|
||||
|
||||
err = c.Close()
|
||||
assert.NoError(t, err)
|
||||
for _, testcase := range testcases {
|
||||
c := Client{version: testcase.version, basePath: "/"}
|
||||
actual := c.getAPIPath(testcase.path, testcase.query)
|
||||
assert.Equal(t, actual, testcase.expected)
|
||||
}
|
||||
}
|
||||
|
||||
@ -152,7 +147,6 @@ func TestParseHost(t *testing.T) {
|
||||
|
||||
for _, cs := range cases {
|
||||
p, a, b, e := ParseHost(cs.host)
|
||||
// if we expected an error to be returned...
|
||||
if cs.err {
|
||||
assert.Error(t, e)
|
||||
}
|
||||
@ -162,6 +156,43 @@ func TestParseHost(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseHostURL(t *testing.T) {
|
||||
testcases := []struct {
|
||||
host string
|
||||
expected *url.URL
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
host: "",
|
||||
expectedErr: "unable to parse docker host",
|
||||
},
|
||||
{
|
||||
host: "foobar",
|
||||
expectedErr: "unable to parse docker host",
|
||||
},
|
||||
{
|
||||
host: "foo://bar",
|
||||
expected: &url.URL{Scheme: "foo", Host: "bar"},
|
||||
},
|
||||
{
|
||||
host: "tcp://localhost:2476",
|
||||
expected: &url.URL{Scheme: "tcp", Host: "localhost:2476"},
|
||||
},
|
||||
{
|
||||
host: "tcp://localhost:2476/path",
|
||||
expected: &url.URL{Scheme: "tcp", Host: "localhost:2476", Path: "/path"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range testcases {
|
||||
actual, err := ParseHostURL(testcase.host)
|
||||
if testcase.expectedErr != "" {
|
||||
testutil.ErrorContains(t, err, testcase.expectedErr)
|
||||
}
|
||||
assert.Equal(t, testcase.expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewEnvClientSetsDefaultVersion(t *testing.T) {
|
||||
env := envToMap()
|
||||
defer mapToEnv(env)
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"golang.org/x/net/context"
|
||||
@ -17,10 +16,7 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C
|
||||
}
|
||||
resp, err := cli.get(ctx, "/configs/"+id, nil, nil)
|
||||
if err != nil {
|
||||
if resp.statusCode == http.StatusNotFound {
|
||||
return swarm.Config{}, nil, configNotFoundError{id}
|
||||
}
|
||||
return swarm.Config{}, nil, err
|
||||
return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id)
|
||||
}
|
||||
defer ensureReaderClosed(resp)
|
||||
|
||||
|
||||
@ -9,5 +9,5 @@ func (cli *Client) ConfigRemove(ctx context.Context, id string) error {
|
||||
}
|
||||
resp, err := cli.delete(ctx, "/configs/"+id, nil, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
return wrapResponseError(err, resp, "config", id)
|
||||
}
|
||||
|
||||
@ -39,7 +39,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, container string, option
|
||||
for _, change := range options.Changes {
|
||||
query.Add("changes", change)
|
||||
}
|
||||
if options.Pause != true {
|
||||
if !options.Pause {
|
||||
query.Set("pause", "0")
|
||||
}
|
||||
|
||||
|
||||
@ -45,7 +45,7 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config
|
||||
serverResp, err := cli.post(ctx, "/containers/create", query, body, nil)
|
||||
if err != nil {
|
||||
if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") {
|
||||
return response, imageNotFoundError{config.Image}
|
||||
return response, objectNotFoundError{object: "image", id: config.Image}
|
||||
}
|
||||
return response, err
|
||||
}
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
@ -15,10 +14,7 @@ import (
|
||||
func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
|
||||
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
|
||||
if err != nil {
|
||||
if serverResp.statusCode == http.StatusNotFound {
|
||||
return types.ContainerJSON{}, containerNotFoundError{containerID}
|
||||
}
|
||||
return types.ContainerJSON{}, err
|
||||
return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID)
|
||||
}
|
||||
|
||||
var response types.ContainerJSON
|
||||
@ -35,10 +31,7 @@ func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID stri
|
||||
}
|
||||
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
|
||||
if err != nil {
|
||||
if serverResp.statusCode == http.StatusNotFound {
|
||||
return types.ContainerJSON{}, nil, containerNotFoundError{containerID}
|
||||
}
|
||||
return types.ContainerJSON{}, nil, err
|
||||
return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID)
|
||||
}
|
||||
defer ensureReaderClosed(serverResp)
|
||||
|
||||
|
||||
@ -23,5 +23,5 @@ func (cli *Client) ContainerRemove(ctx context.Context, containerID string, opti
|
||||
|
||||
resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
return wrapResponseError(err, resp, "container", containerID)
|
||||
}
|
||||
|
||||
@ -9,6 +9,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -17,9 +18,16 @@ func TestContainerRemoveError(t *testing.T) {
|
||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||
}
|
||||
err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{})
|
||||
if err == nil || err.Error() != "Error response from daemon: Server error" {
|
||||
t.Fatalf("expected a Server Error, got %v", err)
|
||||
assert.EqualError(t, err, "Error response from daemon: Server error")
|
||||
}
|
||||
|
||||
func TestContainerRemoveNotFoundError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusNotFound, "missing")),
|
||||
}
|
||||
err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{})
|
||||
assert.EqualError(t, err, "Error: No such container: container_id")
|
||||
assert.True(t, IsErrNotFound(err))
|
||||
}
|
||||
|
||||
func TestContainerRemove(t *testing.T) {
|
||||
@ -53,7 +61,5 @@ func TestContainerRemove(t *testing.T) {
|
||||
RemoveVolumes: true,
|
||||
Force: true,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@ -3,6 +3,8 @@ package client
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@ -36,93 +38,65 @@ type notFound interface {
|
||||
NotFound() bool // Is the error a NotFound error
|
||||
}
|
||||
|
||||
// IsErrNotFound returns true if the error is caused with an
|
||||
// object (image, container, network, volume, …) is not found in the docker host.
|
||||
// IsErrNotFound returns true if the error is a NotFound error, which is returned
|
||||
// by the API when some object is not found.
|
||||
func IsErrNotFound(err error) bool {
|
||||
te, ok := err.(notFound)
|
||||
return ok && te.NotFound()
|
||||
}
|
||||
|
||||
// imageNotFoundError implements an error returned when an image is not in the docker host.
|
||||
type imageNotFoundError struct {
|
||||
imageID string
|
||||
type objectNotFoundError struct {
|
||||
object string
|
||||
id string
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e imageNotFoundError) NotFound() bool {
|
||||
func (e objectNotFoundError) NotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Error returns a string representation of an imageNotFoundError
|
||||
func (e imageNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such image: %s", e.imageID)
|
||||
func (e objectNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such %s: %s", e.object, e.id)
|
||||
}
|
||||
|
||||
func wrapResponseError(err error, resp serverResponse, object, id string) error {
|
||||
switch {
|
||||
case err == nil:
|
||||
return nil
|
||||
case resp.statusCode == http.StatusNotFound:
|
||||
return objectNotFoundError{object: object, id: id}
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// IsErrImageNotFound returns true if the error is caused
|
||||
// when an image is not found in the docker host.
|
||||
//
|
||||
// Deprecated: Use IsErrNotFound
|
||||
func IsErrImageNotFound(err error) bool {
|
||||
return IsErrNotFound(err)
|
||||
}
|
||||
|
||||
// containerNotFoundError implements an error returned when a container is not in the docker host.
|
||||
type containerNotFoundError struct {
|
||||
containerID string
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e containerNotFoundError) NotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Error returns a string representation of a containerNotFoundError
|
||||
func (e containerNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such container: %s", e.containerID)
|
||||
}
|
||||
|
||||
// IsErrContainerNotFound returns true if the error is caused
|
||||
// when a container is not found in the docker host.
|
||||
//
|
||||
// Deprecated: Use IsErrNotFound
|
||||
func IsErrContainerNotFound(err error) bool {
|
||||
return IsErrNotFound(err)
|
||||
}
|
||||
|
||||
// networkNotFoundError implements an error returned when a network is not in the docker host.
|
||||
type networkNotFoundError struct {
|
||||
networkID string
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e networkNotFoundError) NotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Error returns a string representation of a networkNotFoundError
|
||||
func (e networkNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such network: %s", e.networkID)
|
||||
}
|
||||
|
||||
// IsErrNetworkNotFound returns true if the error is caused
|
||||
// when a network is not found in the docker host.
|
||||
//
|
||||
// Deprecated: Use IsErrNotFound
|
||||
func IsErrNetworkNotFound(err error) bool {
|
||||
return IsErrNotFound(err)
|
||||
}
|
||||
|
||||
// volumeNotFoundError implements an error returned when a volume is not in the docker host.
|
||||
type volumeNotFoundError struct {
|
||||
volumeID string
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e volumeNotFoundError) NotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Error returns a string representation of a volumeNotFoundError
|
||||
func (e volumeNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such volume: %s", e.volumeID)
|
||||
}
|
||||
|
||||
// IsErrVolumeNotFound returns true if the error is caused
|
||||
// when a volume is not found in the docker host.
|
||||
//
|
||||
// Deprecated: Use IsErrNotFound
|
||||
func IsErrVolumeNotFound(err error) bool {
|
||||
return IsErrNotFound(err)
|
||||
}
|
||||
@ -144,70 +118,28 @@ func IsErrUnauthorized(err error) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
// nodeNotFoundError implements an error returned when a node is not found.
|
||||
type nodeNotFoundError struct {
|
||||
nodeID string
|
||||
}
|
||||
|
||||
// Error returns a string representation of a nodeNotFoundError
|
||||
func (e nodeNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such node: %s", e.nodeID)
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e nodeNotFoundError) NotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsErrNodeNotFound returns true if the error is caused
|
||||
// when a node is not found.
|
||||
//
|
||||
// Deprecated: Use IsErrNotFound
|
||||
func IsErrNodeNotFound(err error) bool {
|
||||
_, ok := err.(nodeNotFoundError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// serviceNotFoundError implements an error returned when a service is not found.
|
||||
type serviceNotFoundError struct {
|
||||
serviceID string
|
||||
}
|
||||
|
||||
// Error returns a string representation of a serviceNotFoundError
|
||||
func (e serviceNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such service: %s", e.serviceID)
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e serviceNotFoundError) NotFound() bool {
|
||||
return true
|
||||
return IsErrNotFound(err)
|
||||
}
|
||||
|
||||
// IsErrServiceNotFound returns true if the error is caused
|
||||
// when a service is not found.
|
||||
//
|
||||
// Deprecated: Use IsErrNotFound
|
||||
func IsErrServiceNotFound(err error) bool {
|
||||
_, ok := err.(serviceNotFoundError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// taskNotFoundError implements an error returned when a task is not found.
|
||||
type taskNotFoundError struct {
|
||||
taskID string
|
||||
}
|
||||
|
||||
// Error returns a string representation of a taskNotFoundError
|
||||
func (e taskNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such task: %s", e.taskID)
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e taskNotFoundError) NotFound() bool {
|
||||
return true
|
||||
return IsErrNotFound(err)
|
||||
}
|
||||
|
||||
// IsErrTaskNotFound returns true if the error is caused
|
||||
// when a task is not found.
|
||||
//
|
||||
// Deprecated: Use IsErrNotFound
|
||||
func IsErrTaskNotFound(err error) bool {
|
||||
_, ok := err.(taskNotFoundError)
|
||||
return ok
|
||||
return IsErrNotFound(err)
|
||||
}
|
||||
|
||||
type pluginPermissionDenied struct {
|
||||
@ -234,67 +166,26 @@ func (cli *Client) NewVersionError(APIrequired, feature string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// secretNotFoundError implements an error returned when a secret is not found.
|
||||
type secretNotFoundError struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// Error returns a string representation of a secretNotFoundError
|
||||
func (e secretNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: no such secret: %s", e.name)
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e secretNotFoundError) NotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsErrSecretNotFound returns true if the error is caused
|
||||
// when a secret is not found.
|
||||
//
|
||||
// Deprecated: Use IsErrNotFound
|
||||
func IsErrSecretNotFound(err error) bool {
|
||||
_, ok := err.(secretNotFoundError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// configNotFoundError implements an error returned when a config is not found.
|
||||
type configNotFoundError struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// Error returns a string representation of a configNotFoundError
|
||||
func (e configNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: no such config: %s", e.name)
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e configNotFoundError) NotFound() bool {
|
||||
return true
|
||||
return IsErrNotFound(err)
|
||||
}
|
||||
|
||||
// IsErrConfigNotFound returns true if the error is caused
|
||||
// when a config is not found.
|
||||
//
|
||||
// Deprecated: Use IsErrNotFound
|
||||
func IsErrConfigNotFound(err error) bool {
|
||||
_, ok := err.(configNotFoundError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// pluginNotFoundError implements an error returned when a plugin is not in the docker host.
|
||||
type pluginNotFoundError struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// NotFound indicates that this error type is of NotFound
|
||||
func (e pluginNotFoundError) NotFound() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Error returns a string representation of a pluginNotFoundError
|
||||
func (e pluginNotFoundError) Error() string {
|
||||
return fmt.Sprintf("Error: No such plugin: %s", e.name)
|
||||
return IsErrNotFound(err)
|
||||
}
|
||||
|
||||
// IsErrPluginNotFound returns true if the error is caused
|
||||
// when a plugin is not found in the docker host.
|
||||
//
|
||||
// Deprecated: Use IsErrNotFound
|
||||
func IsErrPluginNotFound(err error) bool {
|
||||
return IsErrNotFound(err)
|
||||
}
|
||||
|
||||
@ -12,7 +12,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/tlsconfig"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
@ -71,7 +70,7 @@ func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Con
|
||||
timeout := dialer.Timeout
|
||||
|
||||
if !dialer.Deadline.IsZero() {
|
||||
deadlineTimeout := dialer.Deadline.Sub(time.Now())
|
||||
deadlineTimeout := time.Until(dialer.Deadline)
|
||||
if timeout == 0 || deadlineTimeout < timeout {
|
||||
timeout = deadlineTimeout
|
||||
}
|
||||
@ -115,7 +114,7 @@ func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Con
|
||||
// from the hostname we're connecting to.
|
||||
if config.ServerName == "" {
|
||||
// Make a copy to avoid polluting argument or default.
|
||||
config = tlsconfig.Clone(config)
|
||||
config = tlsConfigClone(config)
|
||||
config.ServerName = hostname
|
||||
}
|
||||
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"golang.org/x/net/context"
|
||||
@ -14,10 +13,7 @@ import (
|
||||
func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) {
|
||||
serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil)
|
||||
if err != nil {
|
||||
if serverResp.statusCode == http.StatusNotFound {
|
||||
return types.ImageInspect{}, nil, imageNotFoundError{imageID}
|
||||
}
|
||||
return types.ImageInspect{}, nil, err
|
||||
return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID)
|
||||
}
|
||||
defer ensureReaderClosed(serverResp)
|
||||
|
||||
|
||||
@ -2,7 +2,6 @@ package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
@ -20,15 +19,12 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options type
|
||||
query.Set("noprune", "1")
|
||||
}
|
||||
|
||||
var dels []types.ImageDeleteResponseItem
|
||||
resp, err := cli.delete(ctx, "/images/"+imageID, query, nil)
|
||||
if err != nil {
|
||||
if resp.statusCode == http.StatusNotFound {
|
||||
return nil, imageNotFoundError{imageID}
|
||||
}
|
||||
return nil, err
|
||||
return dels, wrapResponseError(err, resp, "image", imageID)
|
||||
}
|
||||
|
||||
var dels []types.ImageDeleteResponseItem
|
||||
err = json.NewDecoder(resp.body).Decode(&dels)
|
||||
ensureReaderClosed(resp)
|
||||
return dels, err
|
||||
|
||||
@ -10,6 +10,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -19,20 +20,17 @@ func TestImageRemoveError(t *testing.T) {
|
||||
}
|
||||
|
||||
_, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{})
|
||||
if err == nil || err.Error() != "Error response from daemon: Server error" {
|
||||
t.Fatalf("expected a Server Error, got %v", err)
|
||||
}
|
||||
assert.EqualError(t, err, "Error response from daemon: Server error")
|
||||
}
|
||||
|
||||
func TestImageRemoveImageNotFound(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusNotFound, "Server error")),
|
||||
client: newMockClient(errorMock(http.StatusNotFound, "missing")),
|
||||
}
|
||||
|
||||
_, err := client.ImageRemove(context.Background(), "unknown", types.ImageRemoveOptions{})
|
||||
if err == nil || !IsErrNotFound(err) {
|
||||
t.Fatalf("expected an imageNotFoundError error, got %v", err)
|
||||
}
|
||||
assert.EqualError(t, err, "Error: No such image: unknown")
|
||||
assert.True(t, IsErrNotFound(err))
|
||||
}
|
||||
|
||||
func TestImageRemove(t *testing.T) {
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
@ -33,10 +32,7 @@ func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string,
|
||||
}
|
||||
resp, err = cli.get(ctx, "/networks/"+networkID, query, nil)
|
||||
if err != nil {
|
||||
if resp.statusCode == http.StatusNotFound {
|
||||
return networkResource, nil, networkNotFoundError{networkID}
|
||||
}
|
||||
return networkResource, nil, err
|
||||
return networkResource, nil, wrapResponseError(err, resp, "network", networkID)
|
||||
}
|
||||
defer ensureReaderClosed(resp)
|
||||
|
||||
|
||||
@ -21,20 +21,17 @@ func TestNetworkInspectError(t *testing.T) {
|
||||
}
|
||||
|
||||
_, err := client.NetworkInspect(context.Background(), "nothing", types.NetworkInspectOptions{})
|
||||
if err == nil || err.Error() != "Error response from daemon: Server error" {
|
||||
t.Fatalf("expected a Server Error, got %v", err)
|
||||
}
|
||||
assert.EqualError(t, err, "Error response from daemon: Server error")
|
||||
}
|
||||
|
||||
func TestNetworkInspectContainerNotFound(t *testing.T) {
|
||||
func TestNetworkInspectNotFoundError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusNotFound, "Server error")),
|
||||
client: newMockClient(errorMock(http.StatusNotFound, "missing")),
|
||||
}
|
||||
|
||||
_, err := client.NetworkInspect(context.Background(), "unknown", types.NetworkInspectOptions{})
|
||||
if err == nil || !IsErrNetworkNotFound(err) {
|
||||
t.Fatalf("expected a networkNotFound error, got %v", err)
|
||||
}
|
||||
assert.EqualError(t, err, "Error: No such network: unknown")
|
||||
assert.True(t, IsErrNotFound(err))
|
||||
}
|
||||
|
||||
func TestNetworkInspect(t *testing.T) {
|
||||
|
||||
@ -6,5 +6,5 @@ import "golang.org/x/net/context"
|
||||
func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error {
|
||||
resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
return wrapResponseError(err, resp, "network", networkID)
|
||||
}
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"golang.org/x/net/context"
|
||||
@ -14,10 +13,7 @@ import (
|
||||
func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) {
|
||||
serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil)
|
||||
if err != nil {
|
||||
if serverResp.statusCode == http.StatusNotFound {
|
||||
return swarm.Node{}, nil, nodeNotFoundError{nodeID}
|
||||
}
|
||||
return swarm.Node{}, nil, err
|
||||
return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID)
|
||||
}
|
||||
defer ensureReaderClosed(serverResp)
|
||||
|
||||
|
||||
@ -17,5 +17,5 @@ func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.
|
||||
|
||||
resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
return wrapResponseError(err, resp, "node", nodeID)
|
||||
}
|
||||
|
||||
@ -1,41 +0,0 @@
|
||||
package client
|
||||
|
||||
// parse_logs.go contains utility helpers for getting information out of docker
|
||||
// log lines. really, it only contains ParseDetails right now. maybe in the
|
||||
// future there will be some desire to parse log messages back into a struct?
|
||||
// that would go here if we did
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ParseLogDetails takes a details string of key value pairs in the form
|
||||
// "k=v,l=w", where the keys and values are url query escaped, and each pair
|
||||
// is separated by a comma, returns a map. returns an error if the details
|
||||
// string is not in a valid format
|
||||
// the exact form of details encoding is implemented in
|
||||
// api/server/httputils/write_log_stream.go
|
||||
func ParseLogDetails(details string) (map[string]string, error) {
|
||||
pairs := strings.Split(details, ",")
|
||||
detailsMap := make(map[string]string, len(pairs))
|
||||
for _, pair := range pairs {
|
||||
p := strings.SplitN(pair, "=", 2)
|
||||
// if there is no equals sign, we will only get 1 part back
|
||||
if len(p) != 2 {
|
||||
return nil, errors.New("invalid details format")
|
||||
}
|
||||
k, err := url.QueryUnescape(p[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v, err := url.QueryUnescape(p[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
detailsMap[k] = v
|
||||
}
|
||||
return detailsMap, nil
|
||||
}
|
||||
@ -1,36 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestParseLogDetails(t *testing.T) {
|
||||
testCases := []struct {
|
||||
line string
|
||||
expected map[string]string
|
||||
err error
|
||||
}{
|
||||
{"key=value", map[string]string{"key": "value"}, nil},
|
||||
{"key1=value1,key2=value2", map[string]string{"key1": "value1", "key2": "value2"}, nil},
|
||||
{"key+with+spaces=value%3Dequals,asdf%2C=", map[string]string{"key with spaces": "value=equals", "asdf,": ""}, nil},
|
||||
{"key=,=nothing", map[string]string{"key": "", "": "nothing"}, nil},
|
||||
{"=", map[string]string{"": ""}, nil},
|
||||
{"errors", nil, errors.New("invalid details format")},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
tc := tc // capture range variable
|
||||
t.Run(tc.line, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
res, err := ParseLogDetails(tc.line)
|
||||
if err != nil && (err.Error() != tc.err.Error()) {
|
||||
t.Fatalf("unexpected error parsing logs:\nExpected:\n\t%v\nActual:\n\t%v", tc.err, err)
|
||||
}
|
||||
if !reflect.DeepEqual(tc.expected, res) {
|
||||
t.Errorf("result does not match expected:\nExpected:\n\t%#v\nActual:\n\t%#v", tc.expected, res)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -28,7 +28,5 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) {
|
||||
}
|
||||
ping.OSType = serverResp.header.Get("OSType")
|
||||
}
|
||||
|
||||
err = cli.checkResponseErr(serverResp)
|
||||
return ping, err
|
||||
return ping, cli.checkResponseErr(serverResp)
|
||||
}
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"golang.org/x/net/context"
|
||||
@ -14,10 +13,7 @@ import (
|
||||
func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) {
|
||||
resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil)
|
||||
if err != nil {
|
||||
if resp.statusCode == http.StatusNotFound {
|
||||
return nil, nil, pluginNotFoundError{name}
|
||||
}
|
||||
return nil, nil, err
|
||||
return nil, nil, wrapResponseError(err, resp, "plugin", name)
|
||||
}
|
||||
|
||||
defer ensureReaderClosed(resp)
|
||||
|
||||
@ -16,5 +16,5 @@ func (cli *Client) PluginRemove(ctx context.Context, name string, options types.
|
||||
|
||||
resp, err := cli.delete(ctx, "/plugins/"+name, query, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
return wrapResponseError(err, resp, "plugin", name)
|
||||
}
|
||||
|
||||
@ -203,7 +203,7 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error {
|
||||
return err
|
||||
}
|
||||
if len(body) == 0 {
|
||||
return fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL)
|
||||
return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL)
|
||||
}
|
||||
|
||||
var ct string
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"golang.org/x/net/context"
|
||||
@ -17,10 +16,7 @@ func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.S
|
||||
}
|
||||
resp, err := cli.get(ctx, "/secrets/"+id, nil, nil)
|
||||
if err != nil {
|
||||
if resp.statusCode == http.StatusNotFound {
|
||||
return swarm.Secret{}, nil, secretNotFoundError{id}
|
||||
}
|
||||
return swarm.Secret{}, nil, err
|
||||
return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id)
|
||||
}
|
||||
defer ensureReaderClosed(resp)
|
||||
|
||||
|
||||
@ -9,5 +9,5 @@ func (cli *Client) SecretRemove(ctx context.Context, id string) error {
|
||||
}
|
||||
resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
return wrapResponseError(err, resp, "secret", id)
|
||||
}
|
||||
|
||||
@ -3,6 +3,7 @@ package client
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
@ -87,19 +88,28 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec,
|
||||
|
||||
func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) {
|
||||
distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth)
|
||||
imageWithDigest := image
|
||||
var platforms []swarm.Platform
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
imageWithDigest = imageWithDigestString(image, distributionInspect.Descriptor.Digest)
|
||||
imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest)
|
||||
|
||||
if len(distributionInspect.Platforms) > 0 {
|
||||
platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms))
|
||||
for _, p := range distributionInspect.Platforms {
|
||||
// clear architecture field for arm. This is a temporary patch to address
|
||||
// https://github.com/docker/swarmkit/issues/2294. The issue is that while
|
||||
// image manifests report "arm" as the architecture, the node reports
|
||||
// something like "armv7l" (includes the variant), which causes arm images
|
||||
// to stop working with swarm mode. This patch removes the architecture
|
||||
// constraint for arm images to ensure tasks get scheduled.
|
||||
arch := p.Architecture
|
||||
if strings.ToLower(arch) == "arm" {
|
||||
arch = ""
|
||||
}
|
||||
platforms = append(platforms, swarm.Platform{
|
||||
Architecture: p.Architecture,
|
||||
Architecture: arch,
|
||||
OS: p.OS,
|
||||
})
|
||||
}
|
||||
|
||||
@ -5,7 +5,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
@ -19,10 +18,7 @@ func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string,
|
||||
query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults))
|
||||
serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil)
|
||||
if err != nil {
|
||||
if serverResp.statusCode == http.StatusNotFound {
|
||||
return swarm.Service{}, nil, serviceNotFoundError{serviceID}
|
||||
}
|
||||
return swarm.Service{}, nil, err
|
||||
return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID)
|
||||
}
|
||||
defer ensureReaderClosed(serverResp)
|
||||
|
||||
|
||||
@ -6,5 +6,5 @@ import "golang.org/x/net/context"
|
||||
func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error {
|
||||
resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
return wrapResponseError(err, resp, "service", serviceID)
|
||||
}
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -17,9 +18,17 @@ func TestServiceRemoveError(t *testing.T) {
|
||||
}
|
||||
|
||||
err := client.ServiceRemove(context.Background(), "service_id")
|
||||
if err == nil || err.Error() != "Error response from daemon: Server error" {
|
||||
t.Fatalf("expected a Server Error, got %v", err)
|
||||
assert.EqualError(t, err, "Error response from daemon: Server error")
|
||||
}
|
||||
|
||||
func TestServiceRemoveNotFoundError(t *testing.T) {
|
||||
client := &Client{
|
||||
client: newMockClient(errorMock(http.StatusNotFound, "missing")),
|
||||
}
|
||||
|
||||
err := client.ServiceRemove(context.Background(), "service_id")
|
||||
assert.EqualError(t, err, "Error: No such service: service_id")
|
||||
assert.True(t, IsErrNotFound(err))
|
||||
}
|
||||
|
||||
func TestServiceRemove(t *testing.T) {
|
||||
|
||||
@ -4,10 +4,8 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -15,10 +13,7 @@ import (
|
||||
func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) {
|
||||
serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil)
|
||||
if err != nil {
|
||||
if serverResp.statusCode == http.StatusNotFound {
|
||||
return swarm.Task{}, nil, taskNotFoundError{taskID}
|
||||
}
|
||||
return swarm.Task{}, nil, err
|
||||
return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID)
|
||||
}
|
||||
defer ensureReaderClosed(serverResp)
|
||||
|
||||
|
||||
11
components/engine/client/tlsconfig_clone.go
Normal file
11
components/engine/client/tlsconfig_clone.go
Normal file
@ -0,0 +1,11 @@
|
||||
// +build go1.8
|
||||
|
||||
package client
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
// tlsConfigClone returns a clone of tls.Config. This function is provided for
|
||||
// compatibility for go1.7 that doesn't include this method in stdlib.
|
||||
func tlsConfigClone(c *tls.Config) *tls.Config {
|
||||
return c.Clone()
|
||||
}
|
||||
@ -1,12 +1,12 @@
|
||||
// +build go1.7,!go1.8
|
||||
|
||||
package tlsconfig
|
||||
package client
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
// Clone returns a clone of tls.Config. This function is provided for
|
||||
// tlsConfigClone returns a clone of tls.Config. This function is provided for
|
||||
// compatibility for go1.7 that doesn't include this method in stdlib.
|
||||
func Clone(c *tls.Config) *tls.Config {
|
||||
func tlsConfigClone(c *tls.Config) *tls.Config {
|
||||
return &tls.Config{
|
||||
Rand: c.Rand,
|
||||
Time: c.Time,
|
||||
@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"golang.org/x/net/context"
|
||||
@ -18,13 +18,17 @@ func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Vo
|
||||
|
||||
// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation
|
||||
func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) {
|
||||
// The empty ID needs to be handled here because with an empty ID the
|
||||
// request url will not contain a trailing / which calls the volume list API
|
||||
// instead of volume inspect
|
||||
if volumeID == "" {
|
||||
return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID}
|
||||
}
|
||||
|
||||
var volume types.Volume
|
||||
resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil)
|
||||
resp, err := cli.get(ctx, path.Join("/volumes", volumeID), nil, nil)
|
||||
if err != nil {
|
||||
if resp.statusCode == http.StatusNotFound {
|
||||
return volume, nil, volumeNotFoundError{volumeID}
|
||||
}
|
||||
return volume, nil, err
|
||||
return volume, nil, wrapResponseError(err, resp, "volume", volumeID)
|
||||
}
|
||||
defer ensureReaderClosed(resp)
|
||||
|
||||
|
||||
@ -10,6 +10,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/internal/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -19,9 +22,7 @@ func TestVolumeInspectError(t *testing.T) {
|
||||
}
|
||||
|
||||
_, err := client.VolumeInspect(context.Background(), "nothing")
|
||||
if err == nil || err.Error() != "Error response from daemon: Server error" {
|
||||
t.Fatalf("expected a Server Error, got %v", err)
|
||||
}
|
||||
testutil.ErrorContains(t, err, "Error response from daemon: Server error")
|
||||
}
|
||||
|
||||
func TestVolumeInspectNotFound(t *testing.T) {
|
||||
@ -30,13 +31,34 @@ func TestVolumeInspectNotFound(t *testing.T) {
|
||||
}
|
||||
|
||||
_, err := client.VolumeInspect(context.Background(), "unknown")
|
||||
if err == nil || !IsErrVolumeNotFound(err) {
|
||||
t.Fatalf("expected a volumeNotFound error, got %v", err)
|
||||
assert.True(t, IsErrNotFound(err))
|
||||
}
|
||||
|
||||
func TestVolumeInspectWithEmptyID(t *testing.T) {
|
||||
expectedURL := "/volumes/"
|
||||
|
||||
client := &Client{
|
||||
client: newMockClient(func(req *http.Request) (*http.Response, error) {
|
||||
assert.Equal(t, req.URL.Path, expectedURL)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusNotFound,
|
||||
Body: ioutil.NopCloser(bytes.NewReader(nil)),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
_, err := client.VolumeInspect(context.Background(), "")
|
||||
testutil.ErrorContains(t, err, "No such volume: ")
|
||||
|
||||
}
|
||||
|
||||
func TestVolumeInspect(t *testing.T) {
|
||||
expectedURL := "/volumes/volume_id"
|
||||
expected := types.Volume{
|
||||
Name: "name",
|
||||
Driver: "driver",
|
||||
Mountpoint: "mountpoint",
|
||||
}
|
||||
|
||||
client := &Client{
|
||||
client: newMockClient(func(req *http.Request) (*http.Response, error) {
|
||||
if !strings.HasPrefix(req.URL.Path, expectedURL) {
|
||||
@ -45,11 +67,7 @@ func TestVolumeInspect(t *testing.T) {
|
||||
if req.Method != "GET" {
|
||||
return nil, fmt.Errorf("expected GET method, got %s", req.Method)
|
||||
}
|
||||
content, err := json.Marshal(types.Volume{
|
||||
Name: "name",
|
||||
Driver: "driver",
|
||||
Mountpoint: "mountpoint",
|
||||
})
|
||||
content, err := json.Marshal(expected)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -60,17 +78,7 @@ func TestVolumeInspect(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
|
||||
v, err := client.VolumeInspect(context.Background(), "volume_id")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if v.Name != "name" {
|
||||
t.Fatalf("expected `name`, got %s", v.Name)
|
||||
}
|
||||
if v.Driver != "driver" {
|
||||
t.Fatalf("expected `driver`, got %s", v.Driver)
|
||||
}
|
||||
if v.Mountpoint != "mountpoint" {
|
||||
t.Fatalf("expected `mountpoint`, got %s", v.Mountpoint)
|
||||
}
|
||||
volume, err := client.VolumeInspect(context.Background(), "volume_id")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, volume)
|
||||
}
|
||||
|
||||
@ -17,5 +17,5 @@ func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool
|
||||
}
|
||||
resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil)
|
||||
ensureReaderClosed(resp)
|
||||
return err
|
||||
return wrapResponseError(err, resp, "volume", volumeID)
|
||||
}
|
||||
|
||||
@ -33,8 +33,6 @@ func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) {
|
||||
flags.StringVar(&conf.BridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs")
|
||||
flags.BoolVar(&conf.BridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic")
|
||||
flags.StringVar(&conf.BridgeConfig.UserlandProxyPath, "userland-proxy-path", "", "Path to the userland proxy binary")
|
||||
flags.BoolVar(&conf.EnableCors, "api-enable-cors", false, "Enable CORS headers in the Engine API, this is deprecated by --api-cors-header")
|
||||
flags.MarkDeprecated("api-enable-cors", "Please use --api-cors-header")
|
||||
flags.StringVar(&conf.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers")
|
||||
flags.StringVar(&conf.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces")
|
||||
flags.StringVar(&conf.ContainerdAddr, "containerd", "", "Path to containerd socket")
|
||||
|
||||
@ -99,6 +99,8 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||
FullTimestamp: true,
|
||||
})
|
||||
|
||||
system.InitLCOW(cli.Config.Experimental)
|
||||
|
||||
if err := setDefaultUmask(); err != nil {
|
||||
return fmt.Errorf("Failed to set umask: %v", err)
|
||||
}
|
||||
@ -132,7 +134,6 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||
Logging: true,
|
||||
SocketGroup: cli.Config.SocketGroup,
|
||||
Version: dockerversion.Version,
|
||||
EnableCors: cli.Config.EnableCors,
|
||||
CorsHeaders: cli.Config.CorsHeaders,
|
||||
}
|
||||
|
||||
@ -198,7 +199,11 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||
cli.api.Accept(addr, ls...)
|
||||
}
|
||||
|
||||
registryService := registry.NewService(cli.Config.ServiceOptions)
|
||||
registryService, err := registry.NewService(cli.Config.ServiceOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
containerdRemote, err := libcontainerd.New(cli.getLibcontainerdRoot(), cli.getPlatformRemoteOptions()...)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -467,7 +472,7 @@ func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if conf.V2Only == false {
|
||||
if !conf.V2Only {
|
||||
logrus.Warnf(`The "disable-legacy-registry" option is deprecated and wil be removed in Docker v17.12. Interacting with legacy (v1) registries will no longer be supported in Docker v17.12"`)
|
||||
}
|
||||
|
||||
@ -548,7 +553,7 @@ func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config
|
||||
vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion)
|
||||
s.UseMiddleware(vm)
|
||||
|
||||
if cfg.EnableCors || cfg.CorsHeaders != "" {
|
||||
if cfg.CorsHeaders != "" {
|
||||
c := middleware.NewCORSMiddleware(cfg.CorsHeaders)
|
||||
s.UseMiddleware(c)
|
||||
}
|
||||
|
||||
@ -2,7 +2,6 @@ package container
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
@ -15,17 +14,20 @@ import (
|
||||
// an error if the path points to outside the container's rootfs.
|
||||
func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) {
|
||||
// Check if a drive letter supplied, it must be the system drive. No-op except on Windows
|
||||
path, err = system.CheckSystemDriveAndRemoveDriveLetter(path)
|
||||
path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, container.BaseFS)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// Consider the given path as an absolute path in the container.
|
||||
absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path)
|
||||
absPath = archive.PreserveTrailingDotOrSeparator(
|
||||
container.BaseFS.Join(string(container.BaseFS.Separator()), path),
|
||||
path,
|
||||
container.BaseFS.Separator())
|
||||
|
||||
// Split the absPath into its Directory and Base components. We will
|
||||
// resolve the dir in the scope of the container then append the base.
|
||||
dirPath, basePath := filepath.Split(absPath)
|
||||
dirPath, basePath := container.BaseFS.Split(absPath)
|
||||
|
||||
resolvedDirPath, err := container.GetResourcePath(dirPath)
|
||||
if err != nil {
|
||||
@ -34,8 +36,7 @@ func (container *Container) ResolvePath(path string) (resolvedPath, absPath stri
|
||||
|
||||
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
||||
// we can manually join it with the base path element.
|
||||
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
|
||||
|
||||
resolvedPath = resolvedDirPath + string(container.BaseFS.Separator()) + basePath
|
||||
return resolvedPath, absPath, nil
|
||||
}
|
||||
|
||||
@ -44,7 +45,9 @@ func (container *Container) ResolvePath(path string) (resolvedPath, absPath stri
|
||||
// resolved to a path on the host corresponding to the given absolute path
|
||||
// inside the container.
|
||||
func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) {
|
||||
lstat, err := os.Lstat(resolvedPath)
|
||||
driver := container.BaseFS
|
||||
|
||||
lstat, err := driver.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -57,17 +60,17 @@ func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
linkTarget, err = filepath.Rel(container.BaseFS, hostPath)
|
||||
linkTarget, err = driver.Rel(driver.Path(), hostPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make it an absolute path.
|
||||
linkTarget = filepath.Join(string(filepath.Separator), linkTarget)
|
||||
linkTarget = driver.Join(string(driver.Separator()), linkTarget)
|
||||
}
|
||||
|
||||
return &types.ContainerPathStat{
|
||||
Name: filepath.Base(absPath),
|
||||
Name: driver.Base(absPath),
|
||||
Size: lstat.Size(),
|
||||
Mode: lstat.Mode(),
|
||||
Mtime: lstat.ModTime(),
|
||||
|
||||
@ -28,6 +28,7 @@ import (
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/libcontainerd"
|
||||
"github.com/docker/docker/opts"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
@ -64,10 +65,10 @@ var (
|
||||
type Container struct {
|
||||
StreamConfig *stream.Config
|
||||
// embed for Container to support states directly.
|
||||
*State `json:"State"` // Needed for Engine API version <= 1.11
|
||||
Root string `json:"-"` // Path to the "home" of the container, including metadata.
|
||||
BaseFS string `json:"-"` // Path to the graphdriver mountpoint
|
||||
RWLayer layer.RWLayer `json:"-"`
|
||||
*State `json:"State"` // Needed for Engine API version <= 1.11
|
||||
Root string `json:"-"` // Path to the "home" of the container, including metadata.
|
||||
BaseFS containerfs.ContainerFS `json:"-"` // interface containing graphdriver mount
|
||||
RWLayer layer.RWLayer `json:"-"`
|
||||
ID string
|
||||
Created time.Time
|
||||
Managed bool
|
||||
@ -305,15 +306,13 @@ func (container *Container) SetupWorkingDirectory(rootIDs idtools.IDPair) error
|
||||
func (container *Container) GetResourcePath(path string) (string, error) {
|
||||
// IMPORTANT - These are paths on the OS where the daemon is running, hence
|
||||
// any filepath operations must be done in an OS agnostic way.
|
||||
|
||||
cleanPath := cleanResourcePath(path)
|
||||
r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS)
|
||||
r, e := container.BaseFS.ResolveScopedPath(path, false)
|
||||
|
||||
// Log this here on the daemon side as there's otherwise no indication apart
|
||||
// from the error being propagated all the way back to the client. This makes
|
||||
// debugging significantly easier and clearly indicates the error comes from the daemon.
|
||||
if e != nil {
|
||||
logrus.Errorf("Failed to FollowSymlinkInScope BaseFS %s cleanPath %s path %s %s\n", container.BaseFS, cleanPath, path, e)
|
||||
logrus.Errorf("Failed to ResolveScopedPath BaseFS %s path %s %s\n", container.BaseFS.Path(), path, e)
|
||||
}
|
||||
return r, e
|
||||
}
|
||||
@ -435,6 +434,11 @@ func (container *Container) ShouldRestart() bool {
|
||||
|
||||
// AddMountPointWithVolume adds a new mount point configured with a volume to the container.
|
||||
func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) {
|
||||
operatingSystem := container.Platform
|
||||
if operatingSystem == "" {
|
||||
operatingSystem = runtime.GOOS
|
||||
}
|
||||
volumeParser := volume.NewParser(operatingSystem)
|
||||
container.MountPoints[destination] = &volume.MountPoint{
|
||||
Type: mounttypes.TypeVolume,
|
||||
Name: vol.Name(),
|
||||
@ -442,7 +446,7 @@ func (container *Container) AddMountPointWithVolume(destination string, vol volu
|
||||
Destination: destination,
|
||||
RW: rw,
|
||||
Volume: vol,
|
||||
CopyData: volume.DefaultCopyMode,
|
||||
CopyData: volumeParser.DefaultCopyMode(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -5,7 +5,6 @@ package container
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
@ -13,7 +12,6 @@ import (
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/docker/volume"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
@ -68,6 +66,7 @@ func (container *Container) BuildHostnameFile() error {
|
||||
func (container *Container) NetworkMounts() []Mount {
|
||||
var mounts []Mount
|
||||
shared := container.HostConfig.NetworkMode.IsContainer()
|
||||
parser := volume.NewParser(container.Platform)
|
||||
if container.ResolvConfPath != "" {
|
||||
if _, err := os.Stat(container.ResolvConfPath); err != nil {
|
||||
logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err)
|
||||
@ -83,7 +82,7 @@ func (container *Container) NetworkMounts() []Mount {
|
||||
Source: container.ResolvConfPath,
|
||||
Destination: "/etc/resolv.conf",
|
||||
Writable: writable,
|
||||
Propagation: string(volume.DefaultPropagationMode),
|
||||
Propagation: string(parser.DefaultPropagationMode()),
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -102,7 +101,7 @@ func (container *Container) NetworkMounts() []Mount {
|
||||
Source: container.HostnamePath,
|
||||
Destination: "/etc/hostname",
|
||||
Writable: writable,
|
||||
Propagation: string(volume.DefaultPropagationMode),
|
||||
Propagation: string(parser.DefaultPropagationMode()),
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -121,7 +120,7 @@ func (container *Container) NetworkMounts() []Mount {
|
||||
Source: container.HostsPath,
|
||||
Destination: "/etc/hosts",
|
||||
Writable: writable,
|
||||
Propagation: string(volume.DefaultPropagationMode),
|
||||
Propagation: string(parser.DefaultPropagationMode()),
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -130,7 +129,7 @@ func (container *Container) NetworkMounts() []Mount {
|
||||
|
||||
// CopyImagePathContent copies files in destination to the volume.
|
||||
func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error {
|
||||
rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS)
|
||||
rootfs, err := container.GetResourcePath(destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -196,6 +195,7 @@ func (container *Container) UnmountIpcMount(unmount func(pth string) error) erro
|
||||
// IpcMounts returns the list of IPC mounts
|
||||
func (container *Container) IpcMounts() []Mount {
|
||||
var mounts []Mount
|
||||
parser := volume.NewParser(container.Platform)
|
||||
|
||||
if container.HasMountFor("/dev/shm") {
|
||||
return mounts
|
||||
@ -209,7 +209,7 @@ func (container *Container) IpcMounts() []Mount {
|
||||
Source: container.ShmPath,
|
||||
Destination: "/dev/shm",
|
||||
Writable: true,
|
||||
Propagation: string(volume.DefaultPropagationMode),
|
||||
Propagation: string(parser.DefaultPropagationMode()),
|
||||
})
|
||||
|
||||
return mounts
|
||||
@ -429,6 +429,7 @@ func copyOwnership(source, destination string) error {
|
||||
|
||||
// TmpfsMounts returns the list of tmpfs mounts
|
||||
func (container *Container) TmpfsMounts() ([]Mount, error) {
|
||||
parser := volume.NewParser(container.Platform)
|
||||
var mounts []Mount
|
||||
for dest, data := range container.HostConfig.Tmpfs {
|
||||
mounts = append(mounts, Mount{
|
||||
@ -439,7 +440,7 @@ func (container *Container) TmpfsMounts() ([]Mount, error) {
|
||||
}
|
||||
for dest, mnt := range container.MountPoints {
|
||||
if mnt.Type == mounttypes.TypeTmpfs {
|
||||
data, err := volume.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly)
|
||||
data, err := parser.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -453,11 +454,6 @@ func (container *Container) TmpfsMounts() ([]Mount, error) {
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
// cleanResourcePath cleans a resource path and prepares to combine with mnt path
|
||||
func cleanResourcePath(path string) string {
|
||||
return filepath.Join(string(os.PathSeparator), path)
|
||||
}
|
||||
|
||||
// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network
|
||||
func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool {
|
||||
return false
|
||||
|
||||
@ -172,18 +172,6 @@ func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfi
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanResourcePath cleans a resource path by removing C:\ syntax, and prepares
|
||||
// to combine with a volume path
|
||||
func cleanResourcePath(path string) string {
|
||||
if len(path) >= 2 {
|
||||
c := path[0]
|
||||
if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
|
||||
path = path[2:]
|
||||
}
|
||||
}
|
||||
return filepath.Join(string(os.PathSeparator), path)
|
||||
}
|
||||
|
||||
// BuildHostnameFile writes the container's hostname file.
|
||||
func (container *Container) BuildHostnameFile() error {
|
||||
return nil
|
||||
|
||||
@ -203,10 +203,7 @@ func (db *memDB) ReserveName(name, containerID string) error {
|
||||
// Once released, a name can be reserved again
|
||||
func (db *memDB) ReleaseName(name string) error {
|
||||
return db.withTxn(func(txn *memdb.Txn) error {
|
||||
if err := txn.Delete(memdbNamesTable, nameAssociation{name: name}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return txn.Delete(memdbNamesTable, nameAssociation{name: name})
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@ -90,14 +90,12 @@ func main() {
|
||||
fmt.Printf("Sector size: %d\n", status.SectorSize)
|
||||
fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total))
|
||||
fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total))
|
||||
break
|
||||
case "list":
|
||||
ids := devices.List()
|
||||
sort.Strings(ids)
|
||||
for _, id := range ids {
|
||||
fmt.Println(id)
|
||||
}
|
||||
break
|
||||
case "device":
|
||||
if flag.NArg() < 2 {
|
||||
usage()
|
||||
@ -113,7 +111,6 @@ func main() {
|
||||
fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors)
|
||||
fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors)
|
||||
fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector)
|
||||
break
|
||||
case "resize":
|
||||
if flag.NArg() < 2 {
|
||||
usage()
|
||||
@ -131,7 +128,6 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
break
|
||||
case "snap":
|
||||
if flag.NArg() < 3 {
|
||||
usage()
|
||||
@ -142,7 +138,6 @@ func main() {
|
||||
fmt.Println("Can't create snap device: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
break
|
||||
case "remove":
|
||||
if flag.NArg() < 2 {
|
||||
usage()
|
||||
@ -153,7 +148,6 @@ func main() {
|
||||
fmt.Println("Can't remove device: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
break
|
||||
case "mount":
|
||||
if flag.NArg() < 3 {
|
||||
usage()
|
||||
@ -164,13 +158,10 @@ func main() {
|
||||
fmt.Println("Can't mount device: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
break
|
||||
default:
|
||||
fmt.Printf("Unknown command %s\n", args[0])
|
||||
usage()
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@ -3,7 +3,6 @@ package daemon
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
@ -20,6 +19,31 @@ import (
|
||||
// path does not refer to a directory.
|
||||
var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory")
|
||||
|
||||
// The daemon will use the following interfaces if the container fs implements
|
||||
// these for optimized copies to and from the container.
|
||||
type extractor interface {
|
||||
ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error
|
||||
}
|
||||
|
||||
type archiver interface {
|
||||
ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// helper functions to extract or archive
|
||||
func extractArchive(i interface{}, src io.Reader, dst string, opts *archive.TarOptions) error {
|
||||
if ea, ok := i.(extractor); ok {
|
||||
return ea.ExtractArchive(src, dst, opts)
|
||||
}
|
||||
return chrootarchive.Untar(src, dst, opts)
|
||||
}
|
||||
|
||||
func archivePath(i interface{}, src string, opts *archive.TarOptions) (io.ReadCloser, error) {
|
||||
if ap, ok := i.(archiver); ok {
|
||||
return ap.ArchivePath(src, opts)
|
||||
}
|
||||
return archive.TarWithOptions(src, opts)
|
||||
}
|
||||
|
||||
// ContainerCopy performs a deprecated operation of archiving the resource at
|
||||
// the specified path in the container identified by the given name.
|
||||
func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
|
||||
@ -138,6 +162,9 @@ func (daemon *Daemon) containerStatPath(container *container.Container, path str
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Normalize path before sending to rootfs
|
||||
path = container.BaseFS.FromSlash(path)
|
||||
|
||||
resolvedPath, absPath, err := container.ResolvePath(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -178,6 +205,9 @@ func (daemon *Daemon) containerArchivePath(container *container.Container, path
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Normalize path before sending to rootfs
|
||||
path = container.BaseFS.FromSlash(path)
|
||||
|
||||
resolvedPath, absPath, err := container.ResolvePath(path)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@ -196,7 +226,18 @@ func (daemon *Daemon) containerArchivePath(container *container.Container, path
|
||||
// also catches the case when the root directory of the container is
|
||||
// requested: we want the archive entries to start with "/" and not the
|
||||
// container ID.
|
||||
data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath))
|
||||
driver := container.BaseFS
|
||||
|
||||
// Get the source and the base paths of the container resolved path in order
|
||||
// to get the proper tar options for the rebase tar.
|
||||
resolvedPath = driver.Clean(resolvedPath)
|
||||
if driver.Base(resolvedPath) == "." {
|
||||
resolvedPath += string(driver.Separator()) + "."
|
||||
}
|
||||
sourceDir, sourceBase := driver.Dir(resolvedPath), driver.Base(resolvedPath)
|
||||
opts := archive.TarResourceRebaseOpts(sourceBase, driver.Base(absPath))
|
||||
|
||||
data, err := archivePath(driver, sourceDir, opts)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -235,8 +276,12 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path
|
||||
return err
|
||||
}
|
||||
|
||||
// Normalize path before sending to rootfs'
|
||||
path = container.BaseFS.FromSlash(path)
|
||||
driver := container.BaseFS
|
||||
|
||||
// Check if a drive letter supplied, it must be the system drive. No-op except on Windows
|
||||
path, err = system.CheckSystemDriveAndRemoveDriveLetter(path)
|
||||
path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, driver)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -248,7 +293,10 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path
|
||||
// that you can extract an archive to a symlink that points to a directory.
|
||||
|
||||
// Consider the given path as an absolute path in the container.
|
||||
absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path)
|
||||
absPath := archive.PreserveTrailingDotOrSeparator(
|
||||
driver.Join(string(driver.Separator()), path),
|
||||
path,
|
||||
driver.Separator())
|
||||
|
||||
// This will evaluate the last path element if it is a symlink.
|
||||
resolvedPath, err := container.GetResourcePath(absPath)
|
||||
@ -256,7 +304,7 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path
|
||||
return err
|
||||
}
|
||||
|
||||
stat, err := os.Lstat(resolvedPath)
|
||||
stat, err := driver.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -279,21 +327,24 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path
|
||||
// a volume file path.
|
||||
var baseRel string
|
||||
if strings.HasPrefix(resolvedPath, `\\?\Volume{`) {
|
||||
if strings.HasPrefix(resolvedPath, container.BaseFS) {
|
||||
baseRel = resolvedPath[len(container.BaseFS):]
|
||||
if strings.HasPrefix(resolvedPath, driver.Path()) {
|
||||
baseRel = resolvedPath[len(driver.Path()):]
|
||||
if baseRel[:1] == `\` {
|
||||
baseRel = baseRel[1:]
|
||||
}
|
||||
}
|
||||
} else {
|
||||
baseRel, err = filepath.Rel(container.BaseFS, resolvedPath)
|
||||
baseRel, err = driver.Rel(driver.Path(), resolvedPath)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Make it an absolute path.
|
||||
absPath = filepath.Join(string(filepath.Separator), baseRel)
|
||||
absPath = driver.Join(string(driver.Separator()), baseRel)
|
||||
|
||||
// @ TODO: gupta-ak: Technically, this works since it no-ops
|
||||
// on Windows and the file system is local anyway on linux.
|
||||
// But eventually, it should be made driver aware.
|
||||
toVolume, err := checkIfPathIsInAVolume(container, absPath)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -315,7 +366,7 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path
|
||||
}
|
||||
}
|
||||
|
||||
if err := chrootarchive.Untar(content, resolvedPath, options); err != nil {
|
||||
if err := extractArchive(driver, content, resolvedPath, options); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -356,24 +407,28 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Normalize path before sending to rootfs
|
||||
resource = container.BaseFS.FromSlash(resource)
|
||||
driver := container.BaseFS
|
||||
|
||||
basePath, err := container.GetResourcePath(resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := os.Stat(basePath)
|
||||
stat, err := driver.Stat(basePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var filter []string
|
||||
if !stat.IsDir() {
|
||||
d, f := filepath.Split(basePath)
|
||||
d, f := driver.Split(basePath)
|
||||
basePath = d
|
||||
filter = []string{f}
|
||||
} else {
|
||||
filter = []string{filepath.Base(basePath)}
|
||||
basePath = filepath.Dir(basePath)
|
||||
filter = []string{driver.Base(basePath)}
|
||||
basePath = driver.Dir(basePath)
|
||||
}
|
||||
archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
|
||||
archive, err := archivePath(driver, basePath, &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
IncludeFiles: filter,
|
||||
})
|
||||
|
||||
@ -4,6 +4,7 @@ package daemon
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/volume"
|
||||
)
|
||||
|
||||
// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it
|
||||
@ -11,8 +12,9 @@ import (
|
||||
// cannot be configured with a read-only rootfs.
|
||||
func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) {
|
||||
var toVolume bool
|
||||
parser := volume.NewParser(container.Platform)
|
||||
for _, mnt := range container.MountPoints {
|
||||
if toVolume = mnt.HasResource(absPath); toVolume {
|
||||
if toVolume = parser.HasResource(mnt, absPath); toVolume {
|
||||
if mnt.RW {
|
||||
break
|
||||
}
|
||||
|
||||
@ -168,7 +168,7 @@ func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.Attach
|
||||
// Wait for the container to stop before returning.
|
||||
waitChan := c.Wait(context.Background(), container.WaitConditionNotRunning)
|
||||
defer func() {
|
||||
_ = <-waitChan // Ignore returned exit code.
|
||||
<-waitChan // Ignore returned exit code.
|
||||
}()
|
||||
}
|
||||
|
||||
|
||||
@ -10,6 +10,7 @@ import (
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/registry"
|
||||
@ -25,9 +26,9 @@ type releaseableLayer struct {
|
||||
rwLayer layer.RWLayer
|
||||
}
|
||||
|
||||
func (rl *releaseableLayer) Mount() (string, error) {
|
||||
func (rl *releaseableLayer) Mount() (containerfs.ContainerFS, error) {
|
||||
var err error
|
||||
var mountPath string
|
||||
var mountPath containerfs.ContainerFS
|
||||
var chainID layer.ChainID
|
||||
if rl.roLayer != nil {
|
||||
chainID = rl.roLayer.ChainID()
|
||||
@ -36,7 +37,7 @@ func (rl *releaseableLayer) Mount() (string, error) {
|
||||
mountID := stringid.GenerateRandomID()
|
||||
rl.rwLayer, err = rl.layerStore.CreateRWLayer(mountID, chainID, nil)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to create rwlayer")
|
||||
return nil, errors.Wrap(err, "failed to create rwlayer")
|
||||
}
|
||||
|
||||
mountPath, err = rl.rwLayer.Mount("")
|
||||
@ -48,7 +49,7 @@ func (rl *releaseableLayer) Mount() (string, error) {
|
||||
logrus.Errorf("Failed to release RWLayer: %s", err)
|
||||
}
|
||||
rl.rwLayer = nil
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mountPath, nil
|
||||
|
||||
@ -7,13 +7,13 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/daemon/names"
|
||||
)
|
||||
|
||||
var (
|
||||
validCheckpointNameChars = api.RestrictedNameChars
|
||||
validCheckpointNamePattern = api.RestrictedNamePattern
|
||||
validCheckpointNameChars = names.RestrictedNameChars
|
||||
validCheckpointNamePattern = names.RestrictedNamePattern
|
||||
)
|
||||
|
||||
// getCheckpointDir verifies checkpoint directory for create,remove, list options and checks if checkpoint already exists
|
||||
|
||||
@ -15,6 +15,7 @@ import (
|
||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
clustertypes "github.com/docker/docker/daemon/cluster/provider"
|
||||
networkSettings "github.com/docker/docker/daemon/network"
|
||||
"github.com/docker/docker/plugin"
|
||||
"github.com/docker/libnetwork"
|
||||
"github.com/docker/libnetwork/cluster"
|
||||
@ -61,4 +62,5 @@ type Backend interface {
|
||||
LookupImage(name string) (*types.ImageInspect, error)
|
||||
PluginManager() *plugin.Manager
|
||||
PluginGetter() *plugin.Store
|
||||
GetLBAttachmentStore() *networkSettings.LBAttachmentStore
|
||||
}
|
||||
|
||||
@ -41,8 +41,8 @@ type containerAdapter struct {
|
||||
dependencies exec.DependencyGetter
|
||||
}
|
||||
|
||||
func newContainerAdapter(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*containerAdapter, error) {
|
||||
ctnr, err := newContainerConfig(task)
|
||||
func newContainerAdapter(b executorpkg.Backend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*containerAdapter, error) {
|
||||
ctnr, err := newContainerConfig(task, node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -20,8 +20,8 @@ type networkAttacherController struct {
|
||||
closed chan struct{}
|
||||
}
|
||||
|
||||
func newNetworkAttacherController(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*networkAttacherController, error) {
|
||||
adapter, err := newContainerAdapter(b, task, dependencies)
|
||||
func newNetworkAttacherController(b executorpkg.Backend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*networkAttacherController, error) {
|
||||
adapter, err := newContainerAdapter(b, task, node, dependencies)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -40,11 +40,7 @@ func (nc *networkAttacherController) Update(ctx context.Context, t *api.Task) er
|
||||
|
||||
func (nc *networkAttacherController) Prepare(ctx context.Context) error {
|
||||
// Make sure all the networks that the task needs are created.
|
||||
if err := nc.adapter.createNetworks(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return nc.adapter.createNetworks(ctx)
|
||||
}
|
||||
|
||||
func (nc *networkAttacherController) Start(ctx context.Context) error {
|
||||
@ -69,11 +65,7 @@ func (nc *networkAttacherController) Terminate(ctx context.Context) error {
|
||||
func (nc *networkAttacherController) Remove(ctx context.Context) error {
|
||||
// Try removing the network referenced in this task in case this
|
||||
// task is the last one referencing it
|
||||
if err := nc.adapter.removeNetworks(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return nc.adapter.removeNetworks(ctx)
|
||||
}
|
||||
|
||||
func (nc *networkAttacherController) Close() error {
|
||||
|
||||
@ -48,12 +48,12 @@ type containerConfig struct {
|
||||
|
||||
// newContainerConfig returns a validated container config. No methods should
|
||||
// return an error if this function returns without error.
|
||||
func newContainerConfig(t *api.Task) (*containerConfig, error) {
|
||||
func newContainerConfig(t *api.Task, node *api.NodeDescription) (*containerConfig, error) {
|
||||
var c containerConfig
|
||||
return &c, c.setTask(t)
|
||||
return &c, c.setTask(t, node)
|
||||
}
|
||||
|
||||
func (c *containerConfig) setTask(t *api.Task) error {
|
||||
func (c *containerConfig) setTask(t *api.Task, node *api.NodeDescription) error {
|
||||
if t.Spec.GetContainer() == nil && t.Spec.GetAttachment() == nil {
|
||||
return exec.ErrRuntimeUnsupported
|
||||
}
|
||||
@ -78,7 +78,7 @@ func (c *containerConfig) setTask(t *api.Task) error {
|
||||
c.task = t
|
||||
|
||||
if t.Spec.GetContainer() != nil {
|
||||
preparedSpec, err := template.ExpandContainerSpec(nil, t)
|
||||
preparedSpec, err := template.ExpandContainerSpec(node, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -40,8 +40,8 @@ type controller struct {
|
||||
var _ exec.Controller = &controller{}
|
||||
|
||||
// NewController returns a docker exec runner for the provided task.
|
||||
func newController(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*controller, error) {
|
||||
adapter, err := newContainerAdapter(b, task, dependencies)
|
||||
func newController(b executorpkg.Backend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*controller, error) {
|
||||
adapter, err := newContainerAdapter(b, task, node, dependencies)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
@ -26,6 +27,8 @@ type executor struct {
|
||||
backend executorpkg.Backend
|
||||
pluginBackend plugin.Backend
|
||||
dependencies exec.DependencyManager
|
||||
mutex sync.Mutex // This mutex protects the following node field
|
||||
node *api.NodeDescription
|
||||
}
|
||||
|
||||
// NewExecutor returns an executor from the docker client.
|
||||
@ -124,27 +127,41 @@ func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) {
|
||||
},
|
||||
}
|
||||
|
||||
// Save the node information in the executor field
|
||||
e.mutex.Lock()
|
||||
e.node = description
|
||||
e.mutex.Unlock()
|
||||
|
||||
return description, nil
|
||||
}
|
||||
|
||||
func (e *executor) Configure(ctx context.Context, node *api.Node) error {
|
||||
na := node.Attachment
|
||||
if na == nil {
|
||||
var ingressNA *api.NetworkAttachment
|
||||
lbAttachments := make(map[string]string)
|
||||
|
||||
for _, na := range node.LbAttachments {
|
||||
if na.Network.Spec.Ingress {
|
||||
ingressNA = na
|
||||
}
|
||||
lbAttachments[na.Network.ID] = na.Addresses[0]
|
||||
}
|
||||
|
||||
if ingressNA == nil {
|
||||
e.backend.ReleaseIngress()
|
||||
return nil
|
||||
return e.backend.GetLBAttachmentStore().ResetLBAttachments(lbAttachments)
|
||||
}
|
||||
|
||||
options := types.NetworkCreate{
|
||||
Driver: na.Network.DriverState.Name,
|
||||
Driver: ingressNA.Network.DriverState.Name,
|
||||
IPAM: &network.IPAM{
|
||||
Driver: na.Network.IPAM.Driver.Name,
|
||||
Driver: ingressNA.Network.IPAM.Driver.Name,
|
||||
},
|
||||
Options: na.Network.DriverState.Options,
|
||||
Options: ingressNA.Network.DriverState.Options,
|
||||
Ingress: true,
|
||||
CheckDuplicate: true,
|
||||
}
|
||||
|
||||
for _, ic := range na.Network.IPAM.Configs {
|
||||
for _, ic := range ingressNA.Network.IPAM.Configs {
|
||||
c := network.IPAMConfig{
|
||||
Subnet: ic.Subnet,
|
||||
IPRange: ic.Range,
|
||||
@ -154,22 +171,30 @@ func (e *executor) Configure(ctx context.Context, node *api.Node) error {
|
||||
}
|
||||
|
||||
_, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{
|
||||
ID: na.Network.ID,
|
||||
ID: ingressNA.Network.ID,
|
||||
NetworkCreateRequest: types.NetworkCreateRequest{
|
||||
Name: na.Network.Spec.Annotations.Name,
|
||||
Name: ingressNA.Network.Spec.Annotations.Name,
|
||||
NetworkCreate: options,
|
||||
},
|
||||
}, na.Addresses[0])
|
||||
}, ingressNA.Addresses[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
return e.backend.GetLBAttachmentStore().ResetLBAttachments(lbAttachments)
|
||||
}
|
||||
|
||||
// Controller returns a docker container runner.
|
||||
func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
|
||||
dependencyGetter := agent.Restrict(e.dependencies, t)
|
||||
|
||||
// Get the node description from the executor field
|
||||
e.mutex.Lock()
|
||||
nodeDescription := e.node
|
||||
e.mutex.Unlock()
|
||||
|
||||
if t.Spec.GetAttachment() != nil {
|
||||
return newNetworkAttacherController(e.backend, t, dependencyGetter)
|
||||
return newNetworkAttacherController(e.backend, t, nodeDescription, dependencyGetter)
|
||||
}
|
||||
|
||||
var ctlr exec.Controller
|
||||
@ -198,7 +223,7 @@ func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
|
||||
return ctlr, fmt.Errorf("unsupported runtime type: %q", runtimeKind)
|
||||
}
|
||||
case *api.TaskSpec_Container:
|
||||
c, err := newController(e.backend, t, dependencyGetter)
|
||||
c, err := newController(e.backend, t, nodeDescription, dependencyGetter)
|
||||
if err != nil {
|
||||
return ctlr, err
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user