diff --git a/components/engine/Dockerfile b/components/engine/Dockerfile index 8971edf6b8..82375a78ef 100644 --- a/components/engine/Dockerfile +++ b/components/engine/Dockerfile @@ -23,7 +23,7 @@ # the case. Therefore, you don't have to disable it anymore. # -FROM debian:jessie +FROM debian:stretch # allow replacing httpredir or deb mirror ARG APT_MIRROR=deb.debian.org @@ -51,21 +51,28 @@ RUN apt-get update && apt-get install -y \ less \ libapparmor-dev \ libcap-dev \ + libdevmapper-dev \ libnl-3-dev \ libprotobuf-c0-dev \ libprotobuf-dev \ - libsystemd-journal-dev \ + libsystemd-dev \ libtool \ + libudev-dev \ mercurial \ net-tools \ pkg-config \ protobuf-compiler \ protobuf-c-compiler \ + python-backports.ssl-match-hostname \ python-dev \ python-mock \ python-pip \ + python-requests \ + python-setuptools \ python-websocket \ + python-wheel \ tar \ + thin-provisioning-tools \ vim \ vim-common \ xfsprogs \ @@ -73,21 +80,6 @@ RUN apt-get update && apt-get install -y \ --no-install-recommends \ && pip install awscli==1.10.15 -# Get lvm2 sources to build statically linked devmapper library -ENV LVM2_VERSION 2.02.173 -RUN mkdir -p /usr/local/lvm2 \ - && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ - | tar -xzC /usr/local/lvm2 --strip-components=1 - -# Compile and install (only the needed library) -RUN cd /usr/local/lvm2 \ - && ./configure \ - --build="$(gcc -print-multiarch)" \ - --enable-static_link \ - --enable-pkgconfig \ - && make -C include \ - && make -C libdm install_device-mapper - # Install seccomp: the version shipped upstream is too old ENV SECCOMP_VERSION 2.3.2 RUN set -x \ @@ -157,9 +149,6 @@ RUN set -x \ # Get the "docker-py" source so we can run their integration tests ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef # To run integration tests docker-pycreds is required. -# Before running the integration tests conftest.py is -# loaded which results in loads auth.py that -# imports the docker-pycreds module. RUN git clone https://github.com/docker/docker-py.git /docker-py \ && cd /docker-py \ && git checkout -q $DOCKER_PY_COMMIT \ @@ -216,3 +205,6 @@ ENTRYPOINT ["hack/dind"] # Upload docker source COPY . /go/src/github.com/docker/docker + +# Options for hack/validate/gometalinter +ENV GOMETALINTER_OPTS="--deadline 2m" diff --git a/components/engine/Dockerfile.aarch64 b/components/engine/Dockerfile.aarch64 index 5f3b25ce59..493145174d 100644 --- a/components/engine/Dockerfile.aarch64 +++ b/components/engine/Dockerfile.aarch64 @@ -15,14 +15,20 @@ # the case. Therefore, you don't have to disable it anymore. # -FROM aarch64/ubuntu:xenial +FROM arm64v8/debian:stretch + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list # Packaged dependencies RUN apt-get update && apt-get install -y \ apparmor \ + apt-utils \ aufs-tools \ automake \ bash-completion \ + bsdmainutils \ btrfs-tools \ build-essential \ cmake \ @@ -32,43 +38,40 @@ RUN apt-get update && apt-get install -y \ g++ \ gcc \ git \ + golang \ iptables \ jq \ + less \ libapparmor-dev \ - libc6-dev \ libcap-dev \ + libdevmapper-dev \ + libnl-3-dev \ + libprotobuf-c0-dev \ + libprotobuf-dev \ libsystemd-dev \ - libyaml-dev \ + libtool \ + libudev-dev \ mercurial \ net-tools \ - parallel \ pkg-config \ + protobuf-compiler \ + protobuf-c-compiler \ + python-backports.ssl-match-hostname \ python-dev \ python-mock \ python-pip \ + python-requests \ python-setuptools \ python-websocket \ - golang-go \ - iproute2 \ - iputils-ping \ + python-wheel \ + tar \ + thin-provisioning-tools \ + vim \ vim-common \ + xfsprogs \ + zip \ --no-install-recommends -# Get lvm2 sources to build statically linked devmapper library -ENV LVM2_VERSION 2.02.173 -RUN mkdir -p /usr/local/lvm2 \ - && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ - | tar -xzC /usr/local/lvm2 --strip-components=1 - -# Compile and install (only the needed library) -RUN cd /usr/local/lvm2 \ - && ./configure \ - --build="$(gcc -print-multiarch)" \ - --enable-static_link \ - --enable-pkgconfig \ - && make -C include \ - && make -C libdm install_device-mapper - # Install seccomp: the version shipped upstream is too old ENV SECCOMP_VERSION 2.3.2 RUN set -x \ @@ -86,9 +89,7 @@ RUN set -x \ # Install Go # We don't have official binary golang 1.7.5 tarballs for ARM64, either for Go or -# bootstrap, so we use golang-go (1.6) as bootstrap to build Go from source code. -# We don't use the official ARMv6 released binaries as a GOROOT_BOOTSTRAP, because -# not all ARM64 platforms support 32-bit mode. 32-bit mode is optional for ARMv8. +# bootstrap, so we use Debian golang (1.7) as bootstrap to build Go from source code. # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ENV GO_VERSION 1.8.3 RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ @@ -124,13 +125,10 @@ RUN set -x \ # Get the "docker-py" source so we can run their integration tests ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef -# Before running the integration tests conftest.py is -# loaded which results in loads auth.py that -# imports the docker-pycreds module. +# To run integration tests docker-pycreds is required. RUN git clone https://github.com/docker/docker-py.git /docker-py \ && cd /docker-py \ && git checkout -q $DOCKER_PY_COMMIT \ - && pip install wheel \ && pip install docker-pycreds==0.2.1 \ && pip install -r test-requirements.txt @@ -173,7 +171,7 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ # Please edit hack/dockerfile/install-binaries.sh to update them. COPY hack/dockerfile/binaries-commits /tmp/binaries-commits COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh -RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli gometalinter ENV PATH=/usr/local/cli:$PATH # Wrap all commands in the "docker-in-docker" script to allow nested containers @@ -181,3 +179,6 @@ ENTRYPOINT ["hack/dind"] # Upload docker source COPY . /go/src/github.com/docker/docker + +# Options for hack/validate/gometalinter +ENV GOMETALINTER_OPTS="--deadline 4m -j2" diff --git a/components/engine/Dockerfile.armhf b/components/engine/Dockerfile.armhf index 18cd74f12f..9818c34cc8 100644 --- a/components/engine/Dockerfile.armhf +++ b/components/engine/Dockerfile.armhf @@ -15,7 +15,7 @@ # the case. Therefore, you don't have to disable it anymore. # -FROM armhf/debian:jessie +FROM arm32v7/debian:stretch # allow replacing httpredir or deb mirror ARG APT_MIRROR=deb.debian.org @@ -39,36 +39,27 @@ RUN apt-get update && apt-get install -y \ net-tools \ libapparmor-dev \ libcap-dev \ - libsystemd-journal-dev \ + libdevmapper-dev \ + libsystemd-dev \ libtool \ + libudev-dev \ mercurial \ pkg-config \ + python-backports.ssl-match-hostname \ python-dev \ python-mock \ python-pip \ + python-requests \ + python-setuptools \ python-websocket \ + python-wheel \ xfsprogs \ tar \ + thin-provisioning-tools \ vim-common \ --no-install-recommends \ && pip install awscli==1.10.15 -# Get lvm2 sources to build statically linked devmapper library -ENV LVM2_VERSION 2.02.173 -RUN mkdir -p /usr/local/lvm2 \ - && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ - | tar -xzC /usr/local/lvm2 --strip-components=1 - -# Compile and install (only the needed library) -RUN cd /usr/local/lvm2 \ - && ./configure \ - --build="$(gcc -print-multiarch)" \ - --enable-static_link \ - --enable-pkgconfig \ - && make -C include \ - && make -C libdm install_device-mapper - - # Install Go # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ENV GO_VERSION 1.8.3 @@ -127,9 +118,11 @@ RUN set -x \ # Get the "docker-py" source so we can run their integration tests ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef +# To run integration tests docker-pycreds is required. RUN git clone https://github.com/docker/docker-py.git /docker-py \ && cd /docker-py \ && git checkout -q $DOCKER_PY_COMMIT \ + && pip install docker-pycreds==0.2.1 \ && pip install -r test-requirements.txt # Set user.email so crosbymichael's in-container merge commits go smoothly @@ -162,10 +155,13 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ # Please edit hack/dockerfile/install-binaries.sh to update them. COPY hack/dockerfile/binaries-commits /tmp/binaries-commits COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh -RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli gometalinter ENV PATH=/usr/local/cli:$PATH ENTRYPOINT ["hack/dind"] # Upload docker source COPY . /go/src/github.com/docker/docker + +# Options for hack/validate/gometalinter +ENV GOMETALINTER_OPTS="--deadline 10m -j2" diff --git a/components/engine/Dockerfile.e2e b/components/engine/Dockerfile.e2e new file mode 100644 index 0000000000..3d700fab51 --- /dev/null +++ b/components/engine/Dockerfile.e2e @@ -0,0 +1,70 @@ +## Step 1: Build tests +FROM golang:1.8.3-alpine3.6 as builder + +RUN apk add --update \ + bash \ + build-base \ + curl \ + lvm2-dev \ + jq \ + && rm -rf /var/cache/apk/* + +RUN mkdir -p /go/src/github.com/docker/docker/ +WORKDIR /go/src/github.com/docker/docker/ + +# Generate frozen images +COPY contrib/download-frozen-image-v2.sh contrib/download-frozen-image-v2.sh +RUN contrib/download-frozen-image-v2.sh /output/docker-frozen-images \ + buildpack-deps:jessie@sha256:85b379ec16065e4fe4127eb1c5fb1bcc03c559bd36dbb2e22ff496de55925fa6 \ + busybox:latest@sha256:32f093055929dbc23dec4d03e09dfe971f5973a9ca5cf059cbfb644c206aa83f \ + debian:jessie@sha256:72f784399fd2719b4cb4e16ef8e369a39dc67f53d978cd3e2e7bf4e502c7b793 \ + hello-world:latest@sha256:c5515758d4c5e1e838e9cd307f6c6a0d620b5e07e6f927b07d05f6d12a1ac8d7 + +# Download Docker CLI binary +COPY hack/dockerfile hack/dockerfile +RUN hack/dockerfile/install-binaries.sh dockercli + +# Set tag and add sources +ARG DOCKER_GITCOMMIT +ENV DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT +ADD . . + +# Build DockerSuite.TestBuild* dependency +RUN CGO_ENABLED=0 go build -o /output/httpserver github.com/docker/docker/contrib/httpserver + +# Build the integration tests and copy the resulting binaries to /output/tests +RUN hack/make.sh build-integration-test-binary +RUN mkdir -p /output/tests && find . -name test.main -exec cp --parents '{}' /output/tests \; + +## Step 2: Generate testing image +FROM alpine:3.6 as runner + +# GNU tar is used for generating the emptyfs image +RUN apk add --update \ + bash \ + ca-certificates \ + g++ \ + git \ + iptables \ + tar \ + xz \ + && rm -rf /var/cache/apk/* + +# Add an unprivileged user to be used for tests which need it +RUN addgroup docker && adduser -D -G docker unprivilegeduser -s /bin/ash + +COPY contrib/httpserver/Dockerfile /tests/contrib/httpserver/Dockerfile +COPY contrib/syscall-test /tests/contrib/syscall-test +COPY integration-cli/fixtures /tests/integration-cli/fixtures + +COPY hack/test/e2e-run.sh /scripts/run.sh +COPY hack/make/.ensure-emptyfs /scripts/ensure-emptyfs.sh + +COPY --from=builder /output/docker-frozen-images /docker-frozen-images +COPY --from=builder /output/httpserver /tests/contrib/httpserver/httpserver +COPY --from=builder /output/tests /tests +COPY --from=builder /usr/local/bin/docker /usr/bin/docker + +ENV DOCKER_REMOTE_DAEMON=1 DOCKER_INTEGRATION_DAEMON_DEST=/ + +ENTRYPOINT ["/scripts/run.sh"] diff --git a/components/engine/Dockerfile.ppc64le b/components/engine/Dockerfile.ppc64le index 3234239ea6..91d2bd2476 100644 --- a/components/engine/Dockerfile.ppc64le +++ b/components/engine/Dockerfile.ppc64le @@ -15,7 +15,7 @@ # the case. Therefore, you don't have to disable it anymore. # -FROM ppc64le/debian:jessie +FROM ppc64le/debian:stretch # allow replacing httpredir or deb mirror ARG APT_MIRROR=deb.debian.org @@ -40,34 +40,26 @@ RUN apt-get update && apt-get install -y \ net-tools \ libapparmor-dev \ libcap-dev \ - libsystemd-journal-dev \ + libdevmapper-dev \ + libsystemd-dev \ libtool \ + libudev-dev \ mercurial \ pkg-config \ + python-backports.ssl-match-hostname \ python-dev \ python-mock \ python-pip \ + python-requests \ + python-setuptools \ python-websocket \ + python-wheel \ xfsprogs \ tar \ + thin-provisioning-tools \ vim-common \ --no-install-recommends -# Get lvm2 sources to build statically linked devmapper library -ENV LVM2_VERSION 2.02.173 -RUN mkdir -p /usr/local/lvm2 \ - && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ - | tar -xzC /usr/local/lvm2 --strip-components=1 - -# Compile and install (only the needed library) -RUN cd /usr/local/lvm2 \ - && ./configure \ - --build="$(gcc -print-multiarch)" \ - --enable-static_link \ - --enable-pkgconfig \ - && make -C include \ - && make -C libdm install_device-mapper - # Install seccomp: the version shipped upstream is too old ENV SECCOMP_VERSION 2.3.2 RUN set -x \ @@ -125,9 +117,11 @@ RUN set -x \ # Get the "docker-py" source so we can run their integration tests ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef +# To run integration tests docker-pycreds is required. RUN git clone https://github.com/docker/docker-py.git /docker-py \ && cd /docker-py \ && git checkout -q $DOCKER_PY_COMMIT \ + && pip install docker-pycreds==0.2.1 \ && pip install -r test-requirements.txt # Set user.email so crosbymichael's in-container merge commits go smoothly @@ -160,7 +154,7 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ # Please edit hack/dockerfile/install-binaries.sh to update them. COPY hack/dockerfile/binaries-commits /tmp/binaries-commits COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh -RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli gometalinter ENV PATH=/usr/local/cli:$PATH # Wrap all commands in the "docker-in-docker" script to allow nested containers diff --git a/components/engine/Dockerfile.s390x b/components/engine/Dockerfile.s390x index 14dfd12bdb..9ad5d5aa59 100644 --- a/components/engine/Dockerfile.s390x +++ b/components/engine/Dockerfile.s390x @@ -15,7 +15,7 @@ # the case. Therefore, you don't have to disable it anymore. # -FROM s390x/debian:jessie +FROM s390x/debian:stretch # Packaged dependencies RUN apt-get update && apt-get install -y \ @@ -36,16 +36,23 @@ RUN apt-get update && apt-get install -y \ net-tools \ libapparmor-dev \ libcap-dev \ - libsystemd-journal-dev \ + libdevmapper-dev \ + libsystemd-dev \ libtool \ + libudev-dev \ mercurial \ pkg-config \ + python-backports.ssl-match-hostname \ python-dev \ python-mock \ python-pip \ + python-requests \ + python-setuptools \ python-websocket \ + python-wheel \ xfsprogs \ tar \ + thin-provisioning-tools \ vim-common \ --no-install-recommends @@ -64,21 +71,6 @@ RUN set -x \ ) \ && rm -rf "$SECCOMP_PATH" -# Get lvm2 sources to build statically linked devmapper library -ENV LVM2_VERSION 2.02.173 -RUN mkdir -p /usr/local/lvm2 \ - && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ - | tar -xzC /usr/local/lvm2 --strip-components=1 - -# Compile and install (only the needed library) -RUN cd /usr/local/lvm2 \ - && ./configure \ - --build="$(gcc -print-multiarch)" \ - --enable-static_link \ - --enable-pkgconfig \ - && make -C include \ - && make -C libdm install_device-mapper - # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ENV GO_VERSION 1.8.3 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \ @@ -118,9 +110,11 @@ RUN set -x \ # Get the "docker-py" source so we can run their integration tests ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef +# To run integration tests docker-pycreds is required. RUN git clone https://github.com/docker/docker-py.git /docker-py \ && cd /docker-py \ && git checkout -q $DOCKER_PY_COMMIT \ + && pip install docker-pycreds==0.2.1 \ && pip install -r test-requirements.txt # Set user.email so crosbymichael's in-container merge commits go smoothly @@ -153,7 +147,7 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ # Please edit hack/dockerfile/install-binaries.sh to update them. COPY hack/dockerfile/binaries-commits /tmp/binaries-commits COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh -RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli gometalinter ENV PATH=/usr/local/cli:$PATH # Wrap all commands in the "docker-in-docker" script to allow nested containers diff --git a/components/engine/Dockerfile.simple b/components/engine/Dockerfile.simple index f84f3c565c..cdbc242a75 100644 --- a/components/engine/Dockerfile.simple +++ b/components/engine/Dockerfile.simple @@ -5,7 +5,7 @@ # This represents the bare minimum required to build and test Docker. -FROM debian:jessie +FROM debian:stretch # allow replacing httpredir or deb mirror ARG APT_MIRROR=deb.debian.org diff --git a/components/engine/Makefile b/components/engine/Makefile index 4cf8c02cf7..d9fb68b1de 100644 --- a/components/engine/Makefile +++ b/components/engine/Makefile @@ -35,6 +35,7 @@ DOCKER_ENVS := \ -e DOCKER_REMAP_ROOT \ -e DOCKER_STORAGE_OPTS \ -e DOCKER_USERLANDPROXY \ + -e TEST_INTEGRATION_DIR \ -e TESTDIRS \ -e TESTFLAGS \ -e TIMEOUT \ @@ -43,7 +44,8 @@ DOCKER_ENVS := \ -e NO_PROXY \ -e http_proxy \ -e https_proxy \ - -e no_proxy + -e no_proxy \ + -e VERSION # note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds # to allow `make BIND_DIR=. shell` or `make BIND_DIR= test` diff --git a/components/engine/api/common.go b/components/engine/api/common.go index 6e462aeda7..ff87a94b58 100644 --- a/components/engine/api/common.go +++ b/components/engine/api/common.go @@ -1,17 +1,5 @@ package api -import ( - "encoding/json" - "encoding/pem" - "fmt" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/system" - "github.com/docker/libtrust" -) - // Common constants for daemon and client. const ( // DefaultVersion of Current REST API @@ -21,45 +9,3 @@ const ( // command to specify that no base image is to be used. NoBaseImageSpecifier string = "scratch" ) - -// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, -// otherwise generates a new one -func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { - err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "") - if err != nil { - return nil, err - } - trustKey, err := libtrust.LoadKeyFile(trustKeyPath) - if err == libtrust.ErrKeyFileDoesNotExist { - trustKey, err = libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, fmt.Errorf("Error generating key: %s", err) - } - encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath)) - if err != nil { - return nil, fmt.Errorf("Error serializing key: %s", err) - } - if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil { - return nil, fmt.Errorf("Error saving key file: %s", err) - } - } else if err != nil { - return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) - } - return trustKey, nil -} - -func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) { - if ext == ".json" || ext == ".jwk" { - encoded, err = json.Marshal(key) - if err != nil { - return nil, fmt.Errorf("unable to encode private key JWK: %s", err) - } - } else { - pemBlock, err := key.PEMBlock() - if err != nil { - return nil, fmt.Errorf("unable to encode private key PEM: %s", err) - } - encoded = pem.EncodeToMemory(pemBlock) - } - return -} diff --git a/components/engine/api/common_test.go b/components/engine/api/common_test.go deleted file mode 100644 index f466616b0f..0000000000 --- a/components/engine/api/common_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package api - -import ( - "io/ioutil" - "path/filepath" - "testing" - - "os" -) - -// LoadOrCreateTrustKey -func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) { - tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpKeyFolderPath) - - tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile") - if err != nil { - t.Fatal(err) - } - - if _, err := LoadOrCreateTrustKey(tmpKeyFile.Name()); err == nil { - t.Fatal("expected an error, got nothing.") - } - -} - -func TestLoadOrCreateTrustKeyCreateKey(t *testing.T) { - tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpKeyFolderPath) - - // Without the need to create the folder hierarchy - tmpKeyFile := filepath.Join(tmpKeyFolderPath, "keyfile") - - if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { - t.Fatalf("expected a new key file, got : %v and %v", err, key) - } - - if _, err := os.Stat(tmpKeyFile); err != nil { - t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) - } - - // With the need to create the folder hierarchy as tmpKeyFie is in a path - // where some folders do not exist. - tmpKeyFile = filepath.Join(tmpKeyFolderPath, "folder/hierarchy/keyfile") - - if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { - t.Fatalf("expected a new key file, got : %v and %v", err, key) - } - - if _, err := os.Stat(tmpKeyFile); err != nil { - t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) - } - - // With no path at all - defer os.Remove("keyfile") - if key, err := LoadOrCreateTrustKey("keyfile"); err != nil || key == nil { - t.Fatalf("expected a new key file, got : %v and %v", err, key) - } - - if _, err := os.Stat("keyfile"); err != nil { - t.Fatalf("Expected to find a file keyfile, got %v", err) - } -} - -func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) { - tmpKeyFile := filepath.Join("fixtures", "keyfile") - - if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { - t.Fatalf("expected a key file, got : %v and %v", err, key) - } -} diff --git a/components/engine/api/server/httputils/form.go b/components/engine/api/server/httputils/form.go index a5f62287ec..1ce822ed94 100644 --- a/components/engine/api/server/httputils/form.go +++ b/components/engine/api/server/httputils/form.go @@ -2,7 +2,6 @@ package httputils import ( "net/http" - "path/filepath" "strconv" "strings" ) @@ -69,8 +68,7 @@ func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, if name == "" { return ArchiveOptions{}, badParameterError{"name"} } - - path := filepath.FromSlash(r.Form.Get("path")) + path := r.Form.Get("path") if path == "" { return ArchiveOptions{}, badParameterError{"path"} } diff --git a/components/engine/api/server/httputils/write_log_stream.go b/components/engine/api/server/httputils/write_log_stream.go index fd024e196e..e90e610da6 100644 --- a/components/engine/api/server/httputils/write_log_stream.go +++ b/components/engine/api/server/httputils/write_log_stream.go @@ -17,20 +17,13 @@ import ( // WriteLogStream writes an encoded byte stream of log messages from the // messages channel, multiplexing them with a stdcopy.Writer if mux is true -func WriteLogStream(ctx context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *types.ContainerLogsOptions, mux bool) { +func WriteLogStream(_ context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *types.ContainerLogsOptions, mux bool) { wf := ioutils.NewWriteFlusher(w) defer wf.Close() wf.Flush() - // this might seem like doing below is clear: - // var outStream io.Writer = wf - // however, this GREATLY DISPLEASES golint, and if you do that, it will - // fail CI. we need outstream to be type writer because if we mux streams, - // we will need to reassign all of the streams to be stdwriters, which only - // conforms to the io.Writer interface. - var outStream io.Writer - outStream = wf + outStream := io.Writer(wf) errStream := outStream sysErrStream := errStream if mux { diff --git a/components/engine/api/server/router/swarm/cluster_routes.go b/components/engine/api/server/router/swarm/cluster_routes.go index 7bd3aff182..55743a6218 100644 --- a/components/engine/api/server/router/swarm/cluster_routes.go +++ b/components/engine/api/server/router/swarm/cluster_routes.go @@ -427,11 +427,7 @@ func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter, } id := vars["id"] - if err := sr.backend.UpdateSecret(id, version, secret); err != nil { - return err - } - - return nil + return sr.backend.UpdateSecret(id, version, secret) } func (sr *swarmRouter) getConfigs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { @@ -498,9 +494,5 @@ func (sr *swarmRouter) updateConfig(ctx context.Context, w http.ResponseWriter, } id := vars["id"] - if err := sr.backend.UpdateConfig(id, version, config); err != nil { - return err - } - - return nil + return sr.backend.UpdateConfig(id, version, config) } diff --git a/components/engine/api/server/server.go b/components/engine/api/server/server.go index 7ba8a6ce39..bf3774bba0 100644 --- a/components/engine/api/server/server.go +++ b/components/engine/api/server/server.go @@ -23,7 +23,6 @@ const versionMatcher = "/v{version:[0-9.]+}" // Config provides the configuration for the API server type Config struct { Logging bool - EnableCors bool CorsHeaders string Version string SocketGroup string diff --git a/components/engine/api/swagger.yaml b/components/engine/api/swagger.yaml index 0c9ee9515a..75276c1b93 100644 --- a/components/engine/api/swagger.yaml +++ b/components/engine/api/swagger.yaml @@ -144,6 +144,10 @@ tags: x-displayName: "Secrets" description: | Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work. # System things - name: "Plugin" x-displayName: "Plugins" diff --git a/components/engine/api/types/client.go b/components/engine/api/types/client.go index 18a1263f10..4ca9ccac72 100644 --- a/components/engine/api/types/client.go +++ b/components/engine/api/types/client.go @@ -181,7 +181,7 @@ type ImageBuildOptions struct { SessionID string // TODO @jhowardmsft LCOW Support: This will require extending to include - // `Platform string`, but is ommited for now as it's hard-coded temporarily + // `Platform string`, but is omitted for now as it's hard-coded temporarily // to avoid API changes. } diff --git a/components/engine/api/types/filters/example_test.go b/components/engine/api/types/filters/example_test.go new file mode 100644 index 0000000000..6529be3250 --- /dev/null +++ b/components/engine/api/types/filters/example_test.go @@ -0,0 +1,24 @@ +package filters + +func ExampleArgs_MatchKVList() { + args := NewArgs( + Arg("label", "image=foo"), + Arg("label", "state=running")) + + // returns true because there are no values for bogus + args.MatchKVList("bogus", nil) + + // returns false because there are no sources + args.MatchKVList("label", nil) + + // returns true because all sources are matched + args.MatchKVList("label", map[string]string{ + "image": "foo", + "state": "running", + }) + + // returns false because the values do not match + args.MatchKVList("label", map[string]string{ + "image": "other", + }) +} diff --git a/components/engine/api/types/filters/parse.go b/components/engine/api/types/filters/parse.go index 363d4540bb..d45d0528fb 100644 --- a/components/engine/api/types/filters/parse.go +++ b/components/engine/api/types/filters/parse.go @@ -1,5 +1,6 @@ -// Package filters provides helper function to parse and handle command line -// filter, used for example in docker ps or docker images commands. +/*Package filters provides tools for encoding a mapping of keys to a set of +multiple values. +*/ package filters import ( @@ -11,27 +12,34 @@ import ( "github.com/docker/docker/api/types/versions" ) -// Args stores filter arguments as map key:{map key: bool}. -// It contains an aggregation of the map of arguments (which are in the form -// of -f 'key=value') based on the key, and stores values for the same key -// in a map with string keys and boolean values. -// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' -// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} +// Args stores a mapping of keys to a set of multiple values. type Args struct { fields map[string]map[string]bool } -// NewArgs initializes a new Args struct. -func NewArgs() Args { - return Args{fields: map[string]map[string]bool{}} +// KeyValuePair are used to initialize a new Args +type KeyValuePair struct { + Key string + Value string } -// ParseFlag parses the argument to the filter flag. Like +// Arg creates a new KeyValuePair for initializing Args +func Arg(key, value string) KeyValuePair { + return KeyValuePair{Key: key, Value: value} +} + +// NewArgs returns a new Args populated with the initial args +func NewArgs(initialArgs ...KeyValuePair) Args { + args := Args{fields: map[string]map[string]bool{}} + for _, arg := range initialArgs { + args.Add(arg.Key, arg.Value) + } + return args +} + +// ParseFlag parses a key=value string and adds it to an Args. // -// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` -// -// If prev map is provided, then it is appended to, and returned. By default a new -// map is created. +// Deprecated: Use Args.Add() func ParseFlag(arg string, prev Args) (Args, error) { filters := prev if len(arg) == 0 { @@ -52,74 +60,95 @@ func ParseFlag(arg string, prev Args) (Args, error) { return filters, nil } -// ErrBadFormat is an error returned in case of bad format for a filter. +// ErrBadFormat is an error returned when a filter is not in the form key=value +// +// Deprecated: this error will be removed in a future version var ErrBadFormat = errors.New("bad format of filter (expected name=value)") -// ToParam packs the Args into a string for easy transport from client to server. +// ToParam encodes the Args as args JSON encoded string +// +// Deprecated: use ToJSON func ToParam(a Args) (string, error) { - // this way we don't URL encode {}, just empty space + return ToJSON(a) +} + +// MarshalJSON returns a JSON byte representation of the Args +func (args Args) MarshalJSON() ([]byte, error) { + if len(args.fields) == 0 { + return []byte{}, nil + } + return json.Marshal(args.fields) +} + +// ToJSON returns the Args as a JSON encoded string +func ToJSON(a Args) (string, error) { if a.Len() == 0 { return "", nil } - - buf, err := json.Marshal(a.fields) - if err != nil { - return "", err - } - return string(buf), nil + buf, err := json.Marshal(a) + return string(buf), err } -// ToParamWithVersion packs the Args into a string for easy transport from client to server. -// The generated string will depend on the specified version (corresponding to the API version). +// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 +// then the encoded format will use an older legacy format where the values are a +// list of strings, instead of a set. +// +// Deprecated: Use ToJSON func ToParamWithVersion(version string, a Args) (string, error) { - // this way we don't URL encode {}, just empty space if a.Len() == 0 { return "", nil } - // for daemons older than v1.10, filter must be of the form map[string][]string - var buf []byte - var err error if version != "" && versions.LessThan(version, "1.22") { - buf, err = json.Marshal(convertArgsToSlice(a.fields)) - } else { - buf, err = json.Marshal(a.fields) + buf, err := json.Marshal(convertArgsToSlice(a.fields)) + return string(buf), err } - if err != nil { - return "", err - } - return string(buf), nil + + return ToJSON(a) } -// FromParam unpacks the filter Args. +// FromParam decodes a JSON encoded string into Args +// +// Deprecated: use FromJSON func FromParam(p string) (Args, error) { - if len(p) == 0 { - return NewArgs(), nil - } - - r := strings.NewReader(p) - d := json.NewDecoder(r) - - m := map[string]map[string]bool{} - if err := d.Decode(&m); err != nil { - r.Seek(0, 0) - - // Allow parsing old arguments in slice format. - // Because other libraries might be sending them in this format. - deprecated := map[string][]string{} - if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { - m = deprecatedArgs(deprecated) - } else { - return NewArgs(), err - } - } - return Args{m}, nil + return FromJSON(p) } -// Get returns the list of values associates with a field. -// It returns a slice of strings to keep backwards compatibility with old code. -func (filters Args) Get(field string) []string { - values := filters.fields[field] +// FromJSON decodes a JSON encoded string into Args +func FromJSON(p string) (Args, error) { + args := NewArgs() + + if p == "" { + return args, nil + } + + raw := []byte(p) + err := json.Unmarshal(raw, &args) + if err == nil { + return args, nil + } + + // Fallback to parsing arguments in the legacy slice format + deprecated := map[string][]string{} + if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { + return args, err + } + + args.fields = deprecatedArgs(deprecated) + return args, nil +} + +// UnmarshalJSON populates the Args from JSON encode bytes +func (args Args) UnmarshalJSON(raw []byte) error { + if len(raw) == 0 { + return nil + } + return json.Unmarshal(raw, &args.fields) +} + +// Get returns the list of values associated with the key +func (args Args) Get(key string) []string { + values := args.fields[key] if values == nil { return make([]string, 0) } @@ -130,37 +159,34 @@ func (filters Args) Get(field string) []string { return slice } -// Add adds a new value to a filter field. -func (filters Args) Add(name, value string) { - if _, ok := filters.fields[name]; ok { - filters.fields[name][value] = true +// Add a new value to the set of values +func (args Args) Add(key, value string) { + if _, ok := args.fields[key]; ok { + args.fields[key][value] = true } else { - filters.fields[name] = map[string]bool{value: true} + args.fields[key] = map[string]bool{value: true} } } -// Del removes a value from a filter field. -func (filters Args) Del(name, value string) { - if _, ok := filters.fields[name]; ok { - delete(filters.fields[name], value) - if len(filters.fields[name]) == 0 { - delete(filters.fields, name) +// Del removes a value from the set +func (args Args) Del(key, value string) { + if _, ok := args.fields[key]; ok { + delete(args.fields[key], value) + if len(args.fields[key]) == 0 { + delete(args.fields, key) } } } -// Len returns the number of fields in the arguments. -func (filters Args) Len() int { - return len(filters.fields) +// Len returns the number of keys in the mapping +func (args Args) Len() int { + return len(args.fields) } -// MatchKVList returns true if the values for the specified field matches the ones -// from the sources. -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'label' and sources are {'label1': '1', 'label2': '2'} -// it returns true. -func (filters Args) MatchKVList(field string, sources map[string]string) bool { - fieldValues := filters.fields[field] +// MatchKVList returns true if all the pairs in sources exist as key=value +// pairs in the mapping at key, or if there are no values at key. +func (args Args) MatchKVList(key string, sources map[string]string) bool { + fieldValues := args.fields[key] //do not filter if there is no filter set or cannot determine filter if len(fieldValues) == 0 { @@ -171,8 +197,8 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool { return false } - for name2match := range fieldValues { - testKV := strings.SplitN(name2match, "=", 2) + for value := range fieldValues { + testKV := strings.SplitN(value, "=", 2) v, ok := sources[testKV[0]] if !ok { @@ -186,16 +212,13 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool { return true } -// Match returns true if the values for the specified field matches the source string -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'image.name' and source is 'ubuntu' -// it returns true. -func (filters Args) Match(field, source string) bool { - if filters.ExactMatch(field, source) { +// Match returns true if any of the values at key match the source string +func (args Args) Match(field, source string) bool { + if args.ExactMatch(field, source) { return true } - fieldValues := filters.fields[field] + fieldValues := args.fields[field] for name2match := range fieldValues { match, err := regexp.MatchString(name2match, source) if err != nil { @@ -208,9 +231,9 @@ func (filters Args) Match(field, source string) bool { return false } -// ExactMatch returns true if the source matches exactly one of the filters. -func (filters Args) ExactMatch(field, source string) bool { - fieldValues, ok := filters.fields[field] +// ExactMatch returns true if the source matches exactly one of the values. +func (args Args) ExactMatch(key, source string) bool { + fieldValues, ok := args.fields[key] //do not filter if there is no filter set or cannot determine filter if !ok || len(fieldValues) == 0 { return true @@ -220,14 +243,15 @@ func (filters Args) ExactMatch(field, source string) bool { return fieldValues[source] } -// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one. -func (filters Args) UniqueExactMatch(field, source string) bool { - fieldValues := filters.fields[field] +// UniqueExactMatch returns true if there is only one value and the source +// matches exactly the value. +func (args Args) UniqueExactMatch(key, source string) bool { + fieldValues := args.fields[key] //do not filter if there is no filter set or cannot determine filter if len(fieldValues) == 0 { return true } - if len(filters.fields[field]) != 1 { + if len(args.fields[key]) != 1 { return false } @@ -235,14 +259,14 @@ func (filters Args) UniqueExactMatch(field, source string) bool { return fieldValues[source] } -// FuzzyMatch returns true if the source matches exactly one of the filters, -// or the source has one of the filters as a prefix. -func (filters Args) FuzzyMatch(field, source string) bool { - if filters.ExactMatch(field, source) { +// FuzzyMatch returns true if the source matches exactly one value, or the +// source has one of the values as a prefix. +func (args Args) FuzzyMatch(key, source string) bool { + if args.ExactMatch(key, source) { return true } - fieldValues := filters.fields[field] + fieldValues := args.fields[key] for prefix := range fieldValues { if strings.HasPrefix(source, prefix) { return true @@ -251,9 +275,17 @@ func (filters Args) FuzzyMatch(field, source string) bool { return false } -// Include returns true if the name of the field to filter is in the filters. -func (filters Args) Include(field string) bool { - _, ok := filters.fields[field] +// Include returns true if the key exists in the mapping +// +// Deprecated: use Contains +func (args Args) Include(field string) bool { + _, ok := args.fields[field] + return ok +} + +// Contains returns true if the key exists in the mapping +func (args Args) Contains(field string) bool { + _, ok := args.fields[field] return ok } @@ -265,10 +297,10 @@ func (e invalidFilter) Error() string { func (invalidFilter) InvalidParameter() {} -// Validate ensures that all the fields in the filter are valid. -// It returns an error as soon as it finds an invalid field. -func (filters Args) Validate(accepted map[string]bool) error { - for name := range filters.fields { +// Validate compared the set of accepted keys against the keys in the mapping. +// An error is returned if any mapping keys are not in the accepted set. +func (args Args) Validate(accepted map[string]bool) error { + for name := range args.fields { if !accepted[name] { return invalidFilter(name) } @@ -276,13 +308,14 @@ func (filters Args) Validate(accepted map[string]bool) error { return nil } -// WalkValues iterates over the list of filtered values for a field. -// It stops the iteration if it finds an error and it returns that error. -func (filters Args) WalkValues(field string, op func(value string) error) error { - if _, ok := filters.fields[field]; !ok { +// WalkValues iterates over the list of values for a key in the mapping and calls +// op() for each value. If op returns an error the iteration stops and the +// error is returned. +func (args Args) WalkValues(field string, op func(value string) error) error { + if _, ok := args.fields[field]; !ok { return nil } - for v := range filters.fields[field] { + for v := range args.fields[field] { if err := op(v); err != nil { return err } diff --git a/components/engine/api/types/filters/parse_test.go b/components/engine/api/types/filters/parse_test.go index ccd1684a07..5279054854 100644 --- a/components/engine/api/types/filters/parse_test.go +++ b/components/engine/api/types/filters/parse_test.go @@ -3,6 +3,9 @@ package filters import ( "errors" "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestParseArgs(t *testing.T) { @@ -16,23 +19,18 @@ func TestParseArgs(t *testing.T) { args = NewArgs() err error ) + for i := range flagArgs { args, err = ParseFlag(flagArgs[i], args) - if err != nil { - t.Errorf("failed to parse %s: %s", flagArgs[i], err) - } - } - if len(args.Get("created")) != 1 { - t.Error("failed to set this arg") - } - if len(args.Get("image.name")) != 2 { - t.Error("the args should have collapsed") + require.NoError(t, err) } + assert.Len(t, args.Get("created"), 1) + assert.Len(t, args.Get("image.name"), 2) } func TestParseArgsEdgeCase(t *testing.T) { - var filters Args - args, err := ParseFlag("", filters) + var args Args + args, err := ParseFlag("", args) if err != nil { t.Fatal(err) } @@ -184,7 +182,7 @@ func TestArgsMatchKVList(t *testing.T) { } for args, field := range matches { - if args.MatchKVList(field, sources) != true { + if !args.MatchKVList(field, sources) { t.Fatalf("Expected true for %v on %v, got false", sources, args) } } @@ -204,7 +202,7 @@ func TestArgsMatchKVList(t *testing.T) { } for args, field := range differs { - if args.MatchKVList(field, sources) != false { + if args.MatchKVList(field, sources) { t.Fatalf("Expected false for %v on %v, got true", sources, args) } } @@ -233,9 +231,8 @@ func TestArgsMatch(t *testing.T) { } for args, field := range matches { - if args.Match(field, source) != true { - t.Fatalf("Expected true for %v on %v, got false", source, args) - } + assert.True(t, args.Match(field, source), + "Expected field %s to match %s", field, source) } differs := map[*Args]string{ @@ -258,9 +255,8 @@ func TestArgsMatch(t *testing.T) { } for args, field := range differs { - if args.Match(field, source) != false { - t.Fatalf("Expected false for %v on %v, got true", source, args) - } + assert.False(t, args.Match(field, source), + "Expected field %s to not match %s", field, source) } } diff --git a/components/engine/api/types/time/timestamp.go b/components/engine/api/types/time/timestamp.go index 9aa9702dad..ed9c1168b7 100644 --- a/components/engine/api/types/time/timestamp.go +++ b/components/engine/api/types/time/timestamp.go @@ -29,10 +29,8 @@ func GetTimestamp(value string, reference time.Time) (string, error) { } var format string - var parseInLocation bool - // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation - parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) if strings.Contains(value, ".") { if parseInLocation { diff --git a/components/engine/builder/builder.go b/components/engine/builder/builder.go index e480601d46..f376f530cd 100644 --- a/components/engine/builder/builder.go +++ b/components/engine/builder/builder.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/api/types/container" containerpkg "github.com/docker/docker/container" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/containerfs" "golang.org/x/net/context" ) @@ -24,7 +25,7 @@ const ( // instructions in the builder. type Source interface { // Root returns root path for accessing source - Root() string + Root() containerfs.ContainerFS // Close allows to signal that the filesystem tree won't be used anymore. // For Context implementations using a temporary directory, it is recommended to // delete the temporary directory in Close(). @@ -99,7 +100,7 @@ type Image interface { // ReleaseableLayer is an image layer that can be mounted and released type ReleaseableLayer interface { Release() error - Mount() (string, error) + Mount() (containerfs.ContainerFS, error) Commit(platform string) (ReleaseableLayer, error) DiffID() layer.DiffID } diff --git a/components/engine/builder/dockerfile/bflag_test.go b/components/engine/builder/dockerfile/bflag_test.go index ac07e48c14..4ea10fff4b 100644 --- a/components/engine/builder/dockerfile/bflag_test.go +++ b/components/engine/builder/dockerfile/bflag_test.go @@ -34,10 +34,10 @@ func TestBuilderFlags(t *testing.T) { t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err) } - if flStr1.IsUsed() == true { + if flStr1.IsUsed() { t.Fatal("Test3 - str1 was not used!") } - if flBool1.IsUsed() == true { + if flBool1.IsUsed() { t.Fatal("Test3 - bool1 was not used!") } @@ -58,10 +58,10 @@ func TestBuilderFlags(t *testing.T) { if flBool1.IsTrue() { t.Fatal("Bool1 was supposed to default to: false") } - if flStr1.IsUsed() == true { + if flStr1.IsUsed() { t.Fatal("Str1 was not used!") } - if flBool1.IsUsed() == true { + if flBool1.IsUsed() { t.Fatal("Bool1 was not used!") } diff --git a/components/engine/builder/dockerfile/builder.go b/components/engine/builder/dockerfile/builder.go index 34db3786f1..46a5af7395 100644 --- a/components/engine/builder/dockerfile/builder.go +++ b/components/engine/builder/dockerfile/builder.go @@ -17,8 +17,6 @@ import ( "github.com/docker/docker/builder/dockerfile/parser" "github.com/docker/docker/builder/fscache" "github.com/docker/docker/builder/remotecontext" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/stringid" @@ -50,21 +48,21 @@ type SessionGetter interface { // BuildManager is shared across all Builder objects type BuildManager struct { - archiver *archive.Archiver - backend builder.Backend - pathCache pathCache // TODO: make this persistent - sg SessionGetter - fsCache *fscache.FSCache + idMappings *idtools.IDMappings + backend builder.Backend + pathCache pathCache // TODO: make this persistent + sg SessionGetter + fsCache *fscache.FSCache } // NewBuildManager creates a BuildManager func NewBuildManager(b builder.Backend, sg SessionGetter, fsCache *fscache.FSCache, idMappings *idtools.IDMappings) (*BuildManager, error) { bm := &BuildManager{ - backend: b, - pathCache: &syncmap.Map{}, - sg: sg, - archiver: chrootarchive.NewArchiver(idMappings), - fsCache: fsCache, + backend: b, + pathCache: &syncmap.Map{}, + sg: sg, + idMappings: idMappings, + fsCache: fsCache, } if err := fsCache.RegisterTransport(remotecontext.ClientSessionRemote, NewClientSessionTransport()); err != nil { return nil, err @@ -114,7 +112,7 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) ( ProgressWriter: config.ProgressWriter, Backend: bm.backend, PathCache: bm.pathCache, - Archiver: bm.archiver, + IDMappings: bm.idMappings, Platform: dockerfile.Platform, } @@ -160,7 +158,7 @@ type builderOptions struct { Backend builder.Backend ProgressWriter backend.ProgressWriter PathCache pathCache - Archiver *archive.Archiver + IDMappings *idtools.IDMappings Platform string } @@ -177,7 +175,7 @@ type Builder struct { docker builder.Backend clientCtx context.Context - archiver *archive.Archiver + idMappings *idtools.IDMappings buildStages *buildStages disableCommit bool buildArgs *buildArgs @@ -219,7 +217,7 @@ func newBuilder(clientCtx context.Context, options builderOptions) *Builder { Aux: options.ProgressWriter.AuxFormatter, Output: options.ProgressWriter.Output, docker: options.Backend, - archiver: options.Archiver, + idMappings: options.IDMappings, buildArgs: newBuildArgs(config.BuildArgs), buildStages: newBuildStages(), imageSources: newImageSources(clientCtx, options), diff --git a/components/engine/builder/dockerfile/copy.go b/components/engine/builder/dockerfile/copy.go index 223623ccd6..f4b703d8d1 100644 --- a/components/engine/builder/dockerfile/copy.go +++ b/components/engine/builder/dockerfile/copy.go @@ -1,12 +1,15 @@ package dockerfile import ( + "archive/tar" "fmt" "io" + "mime" "net/http" "net/url" "os" "path/filepath" + "runtime" "sort" "strings" "time" @@ -14,16 +17,18 @@ import ( "github.com/docker/docker/builder" "github.com/docker/docker/builder/remotecontext" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/urlutil" "github.com/pkg/errors" ) +const unnamedFilename = "__unnamed__" + type pathCache interface { Load(key interface{}) (value interface{}, ok bool) Store(key, value interface{}) @@ -32,14 +37,14 @@ type pathCache interface { // copyInfo is a data object which stores the metadata about each source file in // a copyInstruction type copyInfo struct { - root string + root containerfs.ContainerFS path string hash string noDecompress bool } func (c copyInfo) fullPath() (string, error) { - return symlink.FollowSymlinkInScope(filepath.Join(c.root, c.path), c.root) + return c.root.ResolveScopedPath(c.path, true) } func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo { @@ -68,6 +73,7 @@ type copier struct { pathCache pathCache download sourceDownloader tmpPaths []string + platform string } func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier { @@ -76,6 +82,7 @@ func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, i pathCache: req.builder.pathCache, download: download, imageSource: imageSource, + platform: req.builder.platform, } } @@ -83,14 +90,14 @@ func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstr inst := copyInstruction{cmdName: cmdName} last := len(args) - 1 - // Work in daemon-specific filepath semantics - inst.dest = filepath.FromSlash(args[last]) - - infos, err := o.getCopyInfosForSourcePaths(args[0:last]) + // Work in platform-specific filepath semantics + inst.dest = fromSlash(args[last], o.platform) + separator := string(separator(o.platform)) + infos, err := o.getCopyInfosForSourcePaths(args[0:last], inst.dest) if err != nil { return inst, errors.Wrapf(err, "%s failed", cmdName) } - if len(infos) > 1 && !strings.HasSuffix(inst.dest, string(os.PathSeparator)) { + if len(infos) > 1 && !strings.HasSuffix(inst.dest, separator) { return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) } inst.infos = infos @@ -99,10 +106,11 @@ func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstr // getCopyInfosForSourcePaths iterates over the source files and calculate the info // needed to copy (e.g. hash value if cached) -func (o *copier) getCopyInfosForSourcePaths(sources []string) ([]copyInfo, error) { +// The dest is used in case source is URL (and ends with "/") +func (o *copier) getCopyInfosForSourcePaths(sources []string, dest string) ([]copyInfo, error) { var infos []copyInfo for _, orig := range sources { - subinfos, err := o.getCopyInfoForSourcePath(orig) + subinfos, err := o.getCopyInfoForSourcePath(orig, dest) if err != nil { return nil, err } @@ -115,15 +123,24 @@ func (o *copier) getCopyInfosForSourcePaths(sources []string) ([]copyInfo, error return infos, nil } -func (o *copier) getCopyInfoForSourcePath(orig string) ([]copyInfo, error) { +func (o *copier) getCopyInfoForSourcePath(orig, dest string) ([]copyInfo, error) { if !urlutil.IsURL(orig) { return o.calcCopyInfo(orig, true) } + remote, path, err := o.download(orig) if err != nil { return nil, err } - o.tmpPaths = append(o.tmpPaths, remote.Root()) + // If path == "" then we are unable to determine filename from src + // We have to make sure dest is available + if path == "" { + if strings.HasSuffix(dest, "/") { + return nil, errors.Errorf("cannot determine filename for source %s", orig) + } + path = unnamedFilename + } + o.tmpPaths = append(o.tmpPaths, remote.Root().Path()) hash, err := remote.Hash(path) ci := newCopyInfoFromSource(remote, path, hash) @@ -143,14 +160,6 @@ func (o *copier) Cleanup() { // TODO: allowWildcards can probably be removed by refactoring this function further. func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) { imageSource := o.imageSource - if err := validateCopySourcePath(imageSource, origPath); err != nil { - return nil, err - } - - // Work in daemon-specific OS filepath semantics - origPath = filepath.FromSlash(origPath) - origPath = strings.TrimPrefix(origPath, string(os.PathSeparator)) - origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) // TODO: do this when creating copier. Requires validateCopySourcePath // (and other below) to be aware of the difference sources. Why is it only @@ -167,8 +176,20 @@ func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, return nil, errors.Errorf("missing build context") } + root := o.source.Root() + + if err := validateCopySourcePath(imageSource, origPath, root.OS()); err != nil { + return nil, err + } + + // Work in source OS specific filepath semantics + // For LCOW, this is NOT the daemon OS. + origPath = root.FromSlash(origPath) + origPath = strings.TrimPrefix(origPath, string(root.Separator())) + origPath = strings.TrimPrefix(origPath, "."+string(root.Separator())) + // Deal with wildcards - if allowWildcards && containsWildcards(origPath) { + if allowWildcards && containsWildcards(origPath, root.OS()) { return o.copyWithWildcards(origPath) } @@ -200,6 +221,19 @@ func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil } +func containsWildcards(name, platform string) bool { + isWindows := platform == "windows" + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' && !isWindows { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + func (o *copier) storeInPathCache(im *imageMount, path string, hash string) { if im != nil { o.pathCache.Store(im.ImageID()+path, hash) @@ -207,12 +241,13 @@ func (o *copier) storeInPathCache(im *imageMount, path string, hash string) { } func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) { + root := o.source.Root() var copyInfos []copyInfo - if err := filepath.Walk(o.source.Root(), func(path string, info os.FileInfo, err error) error { + if err := root.Walk(root.Path(), func(path string, info os.FileInfo, err error) error { if err != nil { return err } - rel, err := remotecontext.Rel(o.source.Root(), path) + rel, err := remotecontext.Rel(root, path) if err != nil { return err } @@ -220,7 +255,7 @@ func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) { if rel == "." { return nil } - if match, _ := filepath.Match(origPath, rel); !match { + if match, _ := root.Match(origPath, rel); !match { return nil } @@ -262,7 +297,7 @@ func walkSource(source builder.Source, origPath string) ([]string, error) { } // Must be a dir var subfiles []string - err = filepath.Walk(fp, func(path string, info os.FileInfo, err error) error { + err = source.Root().Walk(fp, func(path string, info os.FileInfo, err error) error { if err != nil { return err } @@ -301,22 +336,40 @@ func errOnSourceDownload(_ string) (builder.Source, string, error) { return nil, "", errors.New("source can't be a URL for COPY") } +func getFilenameForDownload(path string, resp *http.Response) string { + // Guess filename based on source + if path != "" && !strings.HasSuffix(path, "/") { + if filename := filepath.Base(filepath.FromSlash(path)); filename != "" { + return filename + } + } + + // Guess filename based on Content-Disposition + if contentDisposition := resp.Header.Get("Content-Disposition"); contentDisposition != "" { + if _, params, err := mime.ParseMediaType(contentDisposition); err == nil { + if params["filename"] != "" && !strings.HasSuffix(params["filename"], "/") { + if filename := filepath.Base(filepath.FromSlash(params["filename"])); filename != "" { + return filename + } + } + } + } + return "" +} + func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) { u, err := url.Parse(srcURL) if err != nil { return } - filename := filepath.Base(filepath.FromSlash(u.Path)) // Ensure in platform semantics - if filename == "" { - err = errors.Errorf("cannot determine filename from url: %s", u) - return - } resp, err := remotecontext.GetWithStatusError(srcURL) if err != nil { return } + filename := getFilenameForDownload(u.Path, resp) + // Prepare file in a tmp dir tmpDir, err := ioutils.TempDir("", "docker-remote") if err != nil { @@ -327,7 +380,13 @@ func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote b os.RemoveAll(tmpDir) } }() - tmpFileName := filepath.Join(tmpDir, filename) + // If filename is empty, the returned filename will be "" but + // the tmp filename will be created as "__unnamed__" + tmpFileName := filename + if filename == "" { + tmpFileName = unnamedFilename + } + tmpFileName = filepath.Join(tmpDir, tmpFileName) tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return @@ -363,14 +422,19 @@ func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote b return } - lc, err := remotecontext.NewLazySource(tmpDir) + lc, err := remotecontext.NewLazySource(containerfs.NewLocalContainerFS(tmpDir)) return lc, filename, err } type copyFileOptions struct { decompress bool - archiver *archive.Archiver chownPair idtools.IDPair + archiver Archiver +} + +type copyEndpoint struct { + driver containerfs.Driver + path string } func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error { @@ -378,6 +442,7 @@ func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) if err != nil { return err } + destPath, err := dest.fullPath() if err != nil { return err @@ -385,59 +450,90 @@ func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) archiver := options.archiver - src, err := os.Stat(srcPath) + srcEndpoint := ©Endpoint{driver: source.root, path: srcPath} + destEndpoint := ©Endpoint{driver: dest.root, path: destPath} + + src, err := source.root.Stat(srcPath) if err != nil { return errors.Wrapf(err, "source path not found") } if src.IsDir() { - return copyDirectory(archiver, srcPath, destPath, options.chownPair) + return copyDirectory(archiver, srcEndpoint, destEndpoint, options.chownPair) } - if options.decompress && archive.IsArchivePath(srcPath) && !source.noDecompress { + if options.decompress && isArchivePath(source.root, srcPath) && !source.noDecompress { return archiver.UntarPath(srcPath, destPath) } - destExistsAsDir, err := isExistingDirectory(destPath) + destExistsAsDir, err := isExistingDirectory(destEndpoint) if err != nil { return err } // dest.path must be used because destPath has already been cleaned of any // trailing slash - if endsInSlash(dest.path) || destExistsAsDir { + if endsInSlash(dest.root, dest.path) || destExistsAsDir { // source.path must be used to get the correct filename when the source // is a symlink - destPath = filepath.Join(destPath, filepath.Base(source.path)) + destPath = dest.root.Join(destPath, source.root.Base(source.path)) + destEndpoint = ©Endpoint{driver: dest.root, path: destPath} } - return copyFile(archiver, srcPath, destPath, options.chownPair) + return copyFile(archiver, srcEndpoint, destEndpoint, options.chownPair) } -func copyDirectory(archiver *archive.Archiver, source, dest string, chownPair idtools.IDPair) error { +func isArchivePath(driver containerfs.ContainerFS, path string) bool { + file, err := driver.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := archive.DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +func copyDirectory(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error { destExists, err := isExistingDirectory(dest) if err != nil { return errors.Wrapf(err, "failed to query destination path") } - if err := archiver.CopyWithTar(source, dest); err != nil { + + if err := archiver.CopyWithTar(source.path, dest.path); err != nil { return errors.Wrapf(err, "failed to copy directory") } - return fixPermissions(source, dest, chownPair, !destExists) + // TODO: @gupta-ak. Investigate how LCOW permission mappings will work. + return fixPermissions(source.path, dest.path, chownPair, !destExists) } -func copyFile(archiver *archive.Archiver, source, dest string, chownPair idtools.IDPair) error { - if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest), 0755, chownPair); err != nil { - return errors.Wrapf(err, "failed to create new directory") +func copyFile(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error { + if runtime.GOOS == "windows" && dest.driver.OS() == "linux" { + // LCOW + if err := dest.driver.MkdirAll(dest.driver.Dir(dest.path), 0755); err != nil { + return errors.Wrapf(err, "failed to create new directory") + } + } else { + if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, chownPair); err != nil { + // Normal containers + return errors.Wrapf(err, "failed to create new directory") + } } - if err := archiver.CopyFileWithTar(source, dest); err != nil { + + if err := archiver.CopyFileWithTar(source.path, dest.path); err != nil { return errors.Wrapf(err, "failed to copy file") } - return fixPermissions(source, dest, chownPair, false) + // TODO: @gupta-ak. Investigate how LCOW permission mappings will work. + return fixPermissions(source.path, dest.path, chownPair, false) } -func endsInSlash(path string) bool { - return strings.HasSuffix(path, string(os.PathSeparator)) +func endsInSlash(driver containerfs.Driver, path string) bool { + return strings.HasSuffix(path, string(driver.Separator())) } // isExistingDirectory returns true if the path exists and is a directory -func isExistingDirectory(path string) (bool, error) { - destStat, err := os.Stat(path) +func isExistingDirectory(point *copyEndpoint) (bool, error) { + destStat, err := point.driver.Stat(point.path) switch { case os.IsNotExist(err): return false, nil diff --git a/components/engine/builder/dockerfile/copy_test.go b/components/engine/builder/dockerfile/copy_test.go index ed384fd20b..87c5675d90 100644 --- a/components/engine/builder/dockerfile/copy_test.go +++ b/components/engine/builder/dockerfile/copy_test.go @@ -1,8 +1,10 @@ package dockerfile import ( + "net/http" "testing" + "github.com/docker/docker/pkg/containerfs" "github.com/gotestyourself/gotestyourself/fs" "github.com/stretchr/testify/assert" ) @@ -36,10 +38,110 @@ func TestIsExistingDirectory(t *testing.T) { } for _, testcase := range testcases { - result, err := isExistingDirectory(testcase.path) + result, err := isExistingDirectory(©Endpoint{driver: containerfs.NewLocalDriver(), path: testcase.path}) if !assert.NoError(t, err) { continue } assert.Equal(t, testcase.expected, result, testcase.doc) } } + +func TestGetFilenameForDownload(t *testing.T) { + var testcases = []struct { + path string + disposition string + expected string + }{ + { + path: "http://www.example.com/", + expected: "", + }, + { + path: "http://www.example.com/xyz", + expected: "xyz", + }, + { + path: "http://www.example.com/xyz.html", + expected: "xyz.html", + }, + { + path: "http://www.example.com/xyz/", + expected: "", + }, + { + path: "http://www.example.com/xyz/uvw", + expected: "uvw", + }, + { + path: "http://www.example.com/xyz/uvw.html", + expected: "uvw.html", + }, + { + path: "http://www.example.com/xyz/uvw/", + expected: "", + }, + { + path: "/", + expected: "", + }, + { + path: "/xyz", + expected: "xyz", + }, + { + path: "/xyz.html", + expected: "xyz.html", + }, + { + path: "/xyz/", + expected: "", + }, + { + path: "/xyz/", + disposition: "attachment; filename=xyz.html", + expected: "xyz.html", + }, + { + disposition: "", + expected: "", + }, + { + disposition: "attachment; filename=xyz", + expected: "xyz", + }, + { + disposition: "attachment; filename=xyz.html", + expected: "xyz.html", + }, + { + disposition: "attachment; filename=\"xyz\"", + expected: "xyz", + }, + { + disposition: "attachment; filename=\"xyz.html\"", + expected: "xyz.html", + }, + { + disposition: "attachment; filename=\"/xyz.html\"", + expected: "xyz.html", + }, + { + disposition: "attachment; filename=\"/xyz/uvw\"", + expected: "uvw", + }, + { + disposition: "attachment; filename=\"Naïve file.txt\"", + expected: "Naïve file.txt", + }, + } + for _, testcase := range testcases { + resp := http.Response{ + Header: make(map[string][]string), + } + if testcase.disposition != "" { + resp.Header.Add("Content-Disposition", testcase.disposition) + } + filename := getFilenameForDownload(testcase.path, &resp) + assert.Equal(t, testcase.expected, filename) + } +} diff --git a/components/engine/builder/dockerfile/copy_unix.go b/components/engine/builder/dockerfile/copy_unix.go index a4a5e05235..8833700554 100644 --- a/components/engine/builder/dockerfile/copy_unix.go +++ b/components/engine/builder/dockerfile/copy_unix.go @@ -6,6 +6,7 @@ import ( "os" "path/filepath" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" ) @@ -15,7 +16,8 @@ func fixPermissions(source, destination string, rootIDs idtools.IDPair, override err error ) if !overrideSkip { - skipChownRoot, err = isExistingDirectory(destination) + destEndpoint := ©Endpoint{driver: containerfs.NewLocalDriver(), path: destination} + skipChownRoot, err = isExistingDirectory(destEndpoint) if err != nil { return err } @@ -40,3 +42,7 @@ func fixPermissions(source, destination string, rootIDs idtools.IDPair, override return os.Lchown(fullpath, rootIDs.UID, rootIDs.GID) }) } + +func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error { + return nil +} diff --git a/components/engine/builder/dockerfile/copy_windows.go b/components/engine/builder/dockerfile/copy_windows.go index e4b15bcc10..dcf4c5acfb 100644 --- a/components/engine/builder/dockerfile/copy_windows.go +++ b/components/engine/builder/dockerfile/copy_windows.go @@ -1,8 +1,38 @@ package dockerfile -import "github.com/docker/docker/pkg/idtools" +import ( + "errors" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/idtools" +) func fixPermissions(source, destination string, rootIDs idtools.IDPair, overrideSkip bool) error { // chown is not supported on Windows return nil } + +func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error { + // validate windows paths from other images + LCOW + if imageSource == nil || platform != "windows" { + return nil + } + + origPath = filepath.FromSlash(origPath) + p := strings.ToLower(filepath.Clean(origPath)) + if !filepath.IsAbs(p) { + if filepath.VolumeName(p) != "" { + if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths + p = p[:len(p)-1] + } + p += "\\" + } else { + p = filepath.Join("c:\\", p) + } + } + if _, blacklisted := pathBlacklist[p]; blacklisted { + return errors.New("copy from c:\\ or c:\\windows is not allowed on windows") + } + return nil +} diff --git a/components/engine/builder/dockerfile/evaluator_test.go b/components/engine/builder/dockerfile/evaluator_test.go index 72d7ce10e3..b64e21e625 100644 --- a/components/engine/builder/dockerfile/evaluator_test.go +++ b/components/engine/builder/dockerfile/evaluator_test.go @@ -9,6 +9,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder/dockerfile/parser" "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/internal/testutil" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" ) @@ -197,14 +198,6 @@ func executeTestCase(t *testing.T, testCase dispatchTestCase) { shlex: shlex, source: context, } - state, err = b.dispatch(opts) - - if err == nil { - t.Fatalf("No error when executing test %s", testCase.name) - } - - if !strings.Contains(err.Error(), testCase.expectedError) { - t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", testCase.expectedError, err.Error()) - } - + _, err = b.dispatch(opts) + testutil.ErrorContains(t, err, testCase.expectedError) } diff --git a/components/engine/builder/dockerfile/internals.go b/components/engine/builder/dockerfile/internals.go index b18118ce36..04ed6dc337 100644 --- a/components/engine/builder/dockerfile/internals.go +++ b/components/engine/builder/dockerfile/internals.go @@ -7,6 +7,9 @@ import ( "crypto/sha256" "encoding/hex" "fmt" + "io" + "os" + "path" "path/filepath" "strconv" "strings" @@ -15,13 +18,69 @@ import ( "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" lcUser "github.com/opencontainers/runc/libcontainer/user" "github.com/pkg/errors" ) +// For Windows only +var pathBlacklist = map[string]bool{ + "c:\\": true, + "c:\\windows": true, +} + +// Archiver defines an interface for copying files from one destination to +// another using Tar/Untar. +type Archiver interface { + TarUntar(src, dst string) error + UntarPath(src, dst string) error + CopyWithTar(src, dst string) error + CopyFileWithTar(src, dst string) error + IDMappings() *idtools.IDMappings +} + +// The builder will use the following interfaces if the container fs implements +// these for optimized copies to and from the container. +type extractor interface { + ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error +} + +type archiver interface { + ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error) +} + +// helper functions to get tar/untar func +func untarFunc(i interface{}) containerfs.UntarFunc { + if ea, ok := i.(extractor); ok { + return ea.ExtractArchive + } + return chrootarchive.Untar +} + +func tarFunc(i interface{}) containerfs.TarFunc { + if ap, ok := i.(archiver); ok { + return ap.ArchivePath + } + return archive.TarWithOptions +} + +func (b *Builder) getArchiver(src, dst containerfs.Driver) Archiver { + t, u := tarFunc(src), untarFunc(dst) + return &containerfs.Archiver{ + SrcDriver: src, + DstDriver: dst, + Tar: t, + Untar: u, + IDMappingsVar: b.idMappings, + } +} + func (b *Builder) commit(dispatchState *dispatchState, comment string) error { if b.disableCommit { return nil @@ -131,28 +190,29 @@ func (b *Builder) performCopy(state *dispatchState, inst copyInstruction) error if err != nil { return errors.Wrapf(err, "failed to get destination image %q", state.imageID) } - destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, imageMount) + + destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, imageMount, b.platform) if err != nil { return err } - chownPair := b.archiver.IDMappings.RootPair() + chownPair := b.idMappings.RootPair() // if a chown was requested, perform the steps to get the uid, gid // translated (if necessary because of user namespaces), and replace // the root pair with the chown pair for copy operations if inst.chownStr != "" { - chownPair, err = parseChownFlag(inst.chownStr, destInfo.root, b.archiver.IDMappings) + chownPair, err = parseChownFlag(inst.chownStr, destInfo.root.Path(), b.idMappings) if err != nil { return errors.Wrapf(err, "unable to convert uid/gid chown string to host mapping") } } - opts := copyFileOptions{ - decompress: inst.allowLocalDecompression, - archiver: b.archiver, - chownPair: chownPair, - } for _, info := range inst.infos { + opts := copyFileOptions{ + decompress: inst.allowLocalDecompression, + archiver: b.getArchiver(info.root, destInfo.root), + chownPair: chownPair, + } if err := performCopyForInfo(destInfo, info, opts); err != nil { return errors.Wrapf(err, "failed to copy files") } @@ -206,10 +266,7 @@ func lookupUser(userStr, filepath string) (int, error) { return uid, nil } users, err := lcUser.ParsePasswdFileFilter(filepath, func(u lcUser.User) bool { - if u.Name == userStr { - return true - } - return false + return u.Name == userStr }) if err != nil { return 0, err @@ -228,10 +285,7 @@ func lookupGroup(groupStr, filepath string) (int, error) { return gid, nil } groups, err := lcUser.ParseGroupFileFilter(filepath, func(g lcUser.Group) bool { - if g.Name == groupStr { - return true - } - return false + return g.Name == groupStr }) if err != nil { return 0, err @@ -242,10 +296,10 @@ func lookupGroup(groupStr, filepath string) (int, error) { return groups[0].Gid, nil } -func createDestInfo(workingDir string, inst copyInstruction, imageMount *imageMount) (copyInfo, error) { +func createDestInfo(workingDir string, inst copyInstruction, imageMount *imageMount, platform string) (copyInfo, error) { // Twiddle the destination when it's a relative path - meaning, make it // relative to the WORKINGDIR - dest, err := normalizeDest(workingDir, inst.dest) + dest, err := normalizeDest(workingDir, inst.dest, platform) if err != nil { return copyInfo{}, errors.Wrapf(err, "invalid %s", inst.cmdName) } @@ -258,6 +312,63 @@ func createDestInfo(workingDir string, inst copyInstruction, imageMount *imageMo return newCopyInfoFromSource(destMount, dest, ""), nil } +// normalizeDest normalises the destination of a COPY/ADD command in a +// platform semantically consistent way. +func normalizeDest(workingDir, requested string, platform string) (string, error) { + dest := fromSlash(requested, platform) + endsInSlash := strings.HasSuffix(dest, string(separator(platform))) + + if platform != "windows" { + if !path.IsAbs(requested) { + dest = path.Join("/", filepath.ToSlash(workingDir), dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += "/" + } + } + return dest, nil + } + + // We are guaranteed that the working directory is already consistent, + // However, Windows also has, for now, the limitation that ADD/COPY can + // only be done to the system drive, not any drives that might be present + // as a result of a bind mount. + // + // So... if the path requested is Linux-style absolute (/foo or \\foo), + // we assume it is the system drive. If it is a Windows-style absolute + // (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we + // strip any configured working directories drive letter so that it + // can be subsequently legitimately converted to a Windows volume-style + // pathname. + + // Not a typo - filepath.IsAbs, not system.IsAbs on this next check as + // we only want to validate where the DriveColon part has been supplied. + if filepath.IsAbs(dest) { + if strings.ToUpper(string(dest[0])) != "C" { + return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)") + } + dest = dest[2:] // Strip the drive letter + } + + // Cannot handle relative where WorkingDir is not the system drive. + if len(workingDir) > 0 { + if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) { + return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir) + } + if !system.IsAbs(dest) { + if string(workingDir[0]) != "C" { + return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive") + } + dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += string(os.PathSeparator) + } + } + } + return dest, nil +} + // For backwards compat, if there's just one info then use it as the // cache look-up string, otherwise hash 'em all into one func getSourceHashFromInfos(infos []copyInfo) string { @@ -403,3 +514,19 @@ func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConf ExtraHosts: options.ExtraHosts, } } + +// fromSlash works like filepath.FromSlash but with a given OS platform field +func fromSlash(path, platform string) string { + if platform == "windows" { + return strings.Replace(path, "/", "\\", -1) + } + return path +} + +// separator returns a OS path separator for the given OS platform +func separator(platform string) byte { + if platform == "windows" { + return '\\' + } + return '/' +} diff --git a/components/engine/builder/dockerfile/internals_unix.go b/components/engine/builder/dockerfile/internals_unix.go deleted file mode 100644 index 533735c960..0000000000 --- a/components/engine/builder/dockerfile/internals_unix.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build !windows - -package dockerfile - -import ( - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" -) - -// normalizeDest normalizes the destination of a COPY/ADD command in a -// platform semantically consistent way. -func normalizeDest(workingDir, requested string) (string, error) { - dest := filepath.FromSlash(requested) - endsInSlash := strings.HasSuffix(requested, string(os.PathSeparator)) - if !system.IsAbs(requested) { - dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest) - // Make sure we preserve any trailing slash - if endsInSlash { - dest += string(os.PathSeparator) - } - } - return dest, nil -} - -func containsWildcards(name string) bool { - for i := 0; i < len(name); i++ { - ch := name[i] - if ch == '\\' { - i++ - } else if ch == '*' || ch == '?' || ch == '[' { - return true - } - } - return false -} - -func validateCopySourcePath(imageSource *imageMount, origPath string) error { - return nil -} diff --git a/components/engine/builder/dockerfile/internals_windows.go b/components/engine/builder/dockerfile/internals_windows.go deleted file mode 100644 index 57f83296ab..0000000000 --- a/components/engine/builder/dockerfile/internals_windows.go +++ /dev/null @@ -1,95 +0,0 @@ -package dockerfile - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" - "github.com/pkg/errors" -) - -// normalizeDest normalizes the destination of a COPY/ADD command in a -// platform semantically consistent way. -func normalizeDest(workingDir, requested string) (string, error) { - dest := filepath.FromSlash(requested) - endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator)) - - // We are guaranteed that the working directory is already consistent, - // However, Windows also has, for now, the limitation that ADD/COPY can - // only be done to the system drive, not any drives that might be present - // as a result of a bind mount. - // - // So... if the path requested is Linux-style absolute (/foo or \\foo), - // we assume it is the system drive. If it is a Windows-style absolute - // (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we - // strip any configured working directories drive letter so that it - // can be subsequently legitimately converted to a Windows volume-style - // pathname. - - // Not a typo - filepath.IsAbs, not system.IsAbs on this next check as - // we only want to validate where the DriveColon part has been supplied. - if filepath.IsAbs(dest) { - if strings.ToUpper(string(dest[0])) != "C" { - return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)") - } - dest = dest[2:] // Strip the drive letter - } - - // Cannot handle relative where WorkingDir is not the system drive. - if len(workingDir) > 0 { - if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) { - return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir) - } - if !system.IsAbs(dest) { - if string(workingDir[0]) != "C" { - return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive") - } - dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest) - // Make sure we preserve any trailing slash - if endsInSlash { - dest += string(os.PathSeparator) - } - } - } - return dest, nil -} - -func containsWildcards(name string) bool { - for i := 0; i < len(name); i++ { - ch := name[i] - if ch == '*' || ch == '?' || ch == '[' { - return true - } - } - return false -} - -var pathBlacklist = map[string]bool{ - "c:\\": true, - "c:\\windows": true, -} - -func validateCopySourcePath(imageSource *imageMount, origPath string) error { - // validate windows paths from other images - if imageSource == nil { - return nil - } - origPath = filepath.FromSlash(origPath) - p := strings.ToLower(filepath.Clean(origPath)) - if !filepath.IsAbs(p) { - if filepath.VolumeName(p) != "" { - if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths - p = p[:len(p)-1] - } - p += "\\" - } else { - p = filepath.Join("c:\\", p) - } - } - if _, blacklisted := pathBlacklist[p]; blacklisted { - return errors.New("copy from c:\\ or c:\\windows is not allowed on windows") - } - return nil -} diff --git a/components/engine/builder/dockerfile/internals_windows_test.go b/components/engine/builder/dockerfile/internals_windows_test.go index ca6920c3de..6ecc37ba63 100644 --- a/components/engine/builder/dockerfile/internals_windows_test.go +++ b/components/engine/builder/dockerfile/internals_windows_test.go @@ -40,7 +40,7 @@ func TestNormalizeDest(t *testing.T) { } for _, testcase := range tests { msg := fmt.Sprintf("Input: %s, %s", testcase.current, testcase.requested) - actual, err := normalizeDest(testcase.current, testcase.requested) + actual, err := normalizeDest(testcase.current, testcase.requested, "windows") if testcase.etext == "" { if !assert.NoError(t, err, msg) { continue diff --git a/components/engine/builder/dockerfile/mockbackend_test.go b/components/engine/builder/dockerfile/mockbackend_test.go index adc22762e0..0f076b5326 100644 --- a/components/engine/builder/dockerfile/mockbackend_test.go +++ b/components/engine/builder/dockerfile/mockbackend_test.go @@ -10,6 +10,7 @@ import ( "github.com/docker/docker/builder" containerpkg "github.com/docker/docker/container" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/containerfs" "golang.org/x/net/context" ) @@ -117,8 +118,8 @@ func (l *mockLayer) Release() error { return nil } -func (l *mockLayer) Mount() (string, error) { - return "mountPath", nil +func (l *mockLayer) Mount() (containerfs.ContainerFS, error) { + return containerfs.NewLocalContainerFS("mountPath"), nil } func (l *mockLayer) Commit(string) (builder.ReleaseableLayer, error) { diff --git a/components/engine/builder/dockerfile/parser/parser.go b/components/engine/builder/dockerfile/parser/parser.go index 7f07ff2150..4003cba465 100644 --- a/components/engine/builder/dockerfile/parser/parser.go +++ b/components/engine/builder/dockerfile/parser/parser.go @@ -143,7 +143,7 @@ func (d *Directive) possibleParserDirective(line string) error { if len(tecMatch) != 0 { for i, n := range tokenEscapeCommand.SubexpNames() { if n == "escapechar" { - if d.escapeSeen == true { + if d.escapeSeen { return errors.New("only one escape parser directive can be used") } d.escapeSeen = true @@ -159,7 +159,7 @@ func (d *Directive) possibleParserDirective(line string) error { if len(tpcMatch) != 0 { for i, n := range tokenPlatformCommand.SubexpNames() { if n == "platform" { - if d.platformSeen == true { + if d.platformSeen { return errors.New("only one platform parser directive can be used") } d.platformSeen = true diff --git a/components/engine/builder/fscache/fscache_test.go b/components/engine/builder/fscache/fscache_test.go index 3f6a1b02af..c327ec72d3 100644 --- a/components/engine/builder/fscache/fscache_test.go +++ b/components/engine/builder/fscache/fscache_test.go @@ -36,25 +36,25 @@ func TestFSCache(t *testing.T) { src1, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data", "bar"}) assert.Nil(t, err) - dt, err := ioutil.ReadFile(filepath.Join(src1.Root(), "foo")) + dt, err := ioutil.ReadFile(filepath.Join(src1.Root().Path(), "foo")) assert.Nil(t, err) assert.Equal(t, string(dt), "data") // same id doesn't recalculate anything src2, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data2", "bar"}) assert.Nil(t, err) - assert.Equal(t, src1.Root(), src2.Root()) + assert.Equal(t, src1.Root().Path(), src2.Root().Path()) - dt, err = ioutil.ReadFile(filepath.Join(src1.Root(), "foo")) + dt, err = ioutil.ReadFile(filepath.Join(src1.Root().Path(), "foo")) assert.Nil(t, err) assert.Equal(t, string(dt), "data") assert.Nil(t, src2.Close()) src3, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo2", "data2", "bar"}) assert.Nil(t, err) - assert.NotEqual(t, src1.Root(), src3.Root()) + assert.NotEqual(t, src1.Root().Path(), src3.Root().Path()) - dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo2")) + dt, err = ioutil.ReadFile(filepath.Join(src3.Root().Path(), "foo2")) assert.Nil(t, err) assert.Equal(t, string(dt), "data2") @@ -71,12 +71,12 @@ func TestFSCache(t *testing.T) { // new upload with the same shared key shoutl overwrite src4, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo3", "data3", "bar"}) assert.Nil(t, err) - assert.NotEqual(t, src1.Root(), src3.Root()) + assert.NotEqual(t, src1.Root().Path(), src3.Root().Path()) - dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo3")) + dt, err = ioutil.ReadFile(filepath.Join(src3.Root().Path(), "foo3")) assert.Nil(t, err) assert.Equal(t, string(dt), "data3") - assert.Equal(t, src4.Root(), src3.Root()) + assert.Equal(t, src4.Root().Path(), src3.Root().Path()) assert.Nil(t, src4.Close()) s, err = fscache.DiskUsage() diff --git a/components/engine/builder/remotecontext/archive.go b/components/engine/builder/remotecontext/archive.go index f48cafecd4..fc18c5da31 100644 --- a/components/engine/builder/remotecontext/archive.go +++ b/components/engine/builder/remotecontext/archive.go @@ -8,19 +8,19 @@ import ( "github.com/docker/docker/builder" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/tarsum" "github.com/pkg/errors" ) type archiveContext struct { - root string + root containerfs.ContainerFS sums tarsum.FileInfoSums } func (c *archiveContext) Close() error { - return os.RemoveAll(c.root) + return c.root.RemoveAll(c.root.Path()) } func convertPathError(err error, cleanpath string) error { @@ -52,7 +52,8 @@ func FromArchive(tarStream io.Reader) (builder.Source, error) { return nil, err } - tsc := &archiveContext{root: root} + // Assume local file system. Since it's coming from a tar file. + tsc := &archiveContext{root: containerfs.NewLocalContainerFS(root)} // Make sure we clean-up upon error. In the happy case the caller // is expected to manage the clean-up @@ -82,7 +83,7 @@ func FromArchive(tarStream io.Reader) (builder.Source, error) { return tsc, nil } -func (c *archiveContext) Root() string { +func (c *archiveContext) Root() containerfs.ContainerFS { return c.root } @@ -91,7 +92,7 @@ func (c *archiveContext) Remove(path string) error { if err != nil { return err } - return os.RemoveAll(fullpath) + return c.root.RemoveAll(fullpath) } func (c *archiveContext) Hash(path string) (string, error) { @@ -100,7 +101,7 @@ func (c *archiveContext) Hash(path string) (string, error) { return "", err } - rel, err := filepath.Rel(c.root, fullpath) + rel, err := c.root.Rel(c.root.Path(), fullpath) if err != nil { return "", convertPathError(err, cleanpath) } @@ -115,13 +116,13 @@ func (c *archiveContext) Hash(path string) (string, error) { return path, nil // backwards compat TODO: see if really needed } -func normalize(path, root string) (cleanPath, fullPath string, err error) { - cleanPath = filepath.Clean(string(os.PathSeparator) + path)[1:] - fullPath, err = symlink.FollowSymlinkInScope(filepath.Join(root, path), root) +func normalize(path string, root containerfs.ContainerFS) (cleanPath, fullPath string, err error) { + cleanPath = root.Clean(string(root.Separator()) + path)[1:] + fullPath, err = root.ResolveScopedPath(path, true) if err != nil { return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath) } - if _, err := os.Lstat(fullPath); err != nil { + if _, err := root.Lstat(fullPath); err != nil { return "", "", errors.WithStack(convertPathError(err, path)) } return diff --git a/components/engine/builder/remotecontext/detect.go b/components/engine/builder/remotecontext/detect.go index ec32dbed7a..38aff67985 100644 --- a/components/engine/builder/remotecontext/detect.go +++ b/components/engine/builder/remotecontext/detect.go @@ -5,15 +5,14 @@ import ( "fmt" "io" "os" - "path/filepath" "strings" + "github.com/containerd/continuity/driver" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/builder" "github.com/docker/docker/builder/dockerfile/parser" "github.com/docker/docker/builder/dockerignore" "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/urlutil" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -157,12 +156,12 @@ func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) { return parser.Parse(br) } -func openAt(remote builder.Source, path string) (*os.File, error) { +func openAt(remote builder.Source, path string) (driver.File, error) { fullPath, err := FullPath(remote, path) if err != nil { return nil, err } - return os.Open(fullPath) + return remote.Root().Open(fullPath) } // StatAt is a helper for calling Stat on a path from a source @@ -171,12 +170,12 @@ func StatAt(remote builder.Source, path string) (os.FileInfo, error) { if err != nil { return nil, err } - return os.Stat(fullPath) + return remote.Root().Stat(fullPath) } // FullPath is a helper for getting a full path for a path from a source func FullPath(remote builder.Source, path string) (string, error) { - fullPath, err := symlink.FollowSymlinkInScope(filepath.Join(remote.Root(), path), remote.Root()) + fullPath, err := remote.Root().ResolveScopedPath(path, true) if err != nil { return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error } diff --git a/components/engine/builder/remotecontext/detect_test.go b/components/engine/builder/remotecontext/detect_test.go index 6b47ac2274..3d1ebd1c3f 100644 --- a/components/engine/builder/remotecontext/detect_test.go +++ b/components/engine/builder/remotecontext/detect_test.go @@ -5,11 +5,11 @@ import ( "io/ioutil" "log" "os" - "path/filepath" "sort" "testing" "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/containerfs" ) const ( @@ -21,7 +21,7 @@ const ( const shouldStayFilename = "should_stay" func extractFilenames(files []os.FileInfo) []string { - filenames := make([]string, len(files), len(files)) + filenames := make([]string, len(files)) for i, file := range files { filenames[i] = file.Name() @@ -53,7 +53,7 @@ func checkDirectory(t *testing.T, dir string, expectedFiles []string) { } func executeProcess(t *testing.T, contextDir string) { - modifiableCtx := &stubRemote{root: contextDir} + modifiableCtx := &stubRemote{root: containerfs.NewLocalContainerFS(contextDir)} err := removeDockerfile(modifiableCtx, builder.DefaultDockerfileName) @@ -105,19 +105,19 @@ func TestProcessShouldLeaveAllFiles(t *testing.T) { // TODO: remove after moving to a separate pkg type stubRemote struct { - root string + root containerfs.ContainerFS } func (r *stubRemote) Hash(path string) (string, error) { return "", errors.New("not implemented") } -func (r *stubRemote) Root() string { +func (r *stubRemote) Root() containerfs.ContainerFS { return r.root } func (r *stubRemote) Close() error { return errors.New("not implemented") } func (r *stubRemote) Remove(p string) error { - return os.Remove(filepath.Join(r.root, p)) + return r.root.Remove(r.root.Join(r.root.Path(), p)) } diff --git a/components/engine/builder/remotecontext/lazycontext.go b/components/engine/builder/remotecontext/lazycontext.go index b29c413fac..66f36defd4 100644 --- a/components/engine/builder/remotecontext/lazycontext.go +++ b/components/engine/builder/remotecontext/lazycontext.go @@ -3,11 +3,10 @@ package remotecontext import ( "encoding/hex" "os" - "path/filepath" - "runtime" "strings" "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/pools" "github.com/pkg/errors" ) @@ -15,7 +14,7 @@ import ( // NewLazySource creates a new LazyContext. LazyContext defines a hashed build // context based on a root directory. Individual files are hashed first time // they are asked. It is not safe to call methods of LazyContext concurrently. -func NewLazySource(root string) (builder.Source, error) { +func NewLazySource(root containerfs.ContainerFS) (builder.Source, error) { return &lazySource{ root: root, sums: make(map[string]string), @@ -23,11 +22,11 @@ func NewLazySource(root string) (builder.Source, error) { } type lazySource struct { - root string + root containerfs.ContainerFS sums map[string]string } -func (c *lazySource) Root() string { +func (c *lazySource) Root() containerfs.ContainerFS { return c.root } @@ -41,7 +40,7 @@ func (c *lazySource) Hash(path string) (string, error) { return "", err } - fi, err := os.Lstat(fullPath) + fi, err := c.root.Lstat(fullPath) if err != nil { return "", errors.WithStack(err) } @@ -63,13 +62,13 @@ func (c *lazySource) Hash(path string) (string, error) { } func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) { - p := filepath.Join(c.root, relPath) + p := c.root.Join(c.root.Path(), relPath) h, err := NewFileHash(p, relPath, fi) if err != nil { return "", errors.Wrapf(err, "failed to create hash for %s", relPath) } if fi.Mode().IsRegular() && fi.Size() > 0 { - f, err := os.Open(p) + f, err := c.root.Open(p) if err != nil { return "", errors.Wrapf(err, "failed to open %s", relPath) } @@ -85,10 +84,10 @@ func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) // Rel makes a path relative to base path. Same as `filepath.Rel` but can also // handle UUID paths in windows. -func Rel(basepath, targpath string) (string, error) { +func Rel(basepath containerfs.ContainerFS, targpath string) (string, error) { // filepath.Rel can't handle UUID paths in windows - if runtime.GOOS == "windows" { - pfx := basepath + `\` + if basepath.OS() == "windows" { + pfx := basepath.Path() + `\` if strings.HasPrefix(targpath, pfx) { p := strings.TrimPrefix(targpath, pfx) if p == "" { @@ -97,5 +96,5 @@ func Rel(basepath, targpath string) (string, error) { return p, nil } } - return filepath.Rel(basepath, targpath) + return basepath.Rel(basepath.Path(), targpath) } diff --git a/components/engine/builder/remotecontext/remote.go b/components/engine/builder/remotecontext/remote.go index 6733ff9e54..6c4073bd4c 100644 --- a/components/engine/builder/remotecontext/remote.go +++ b/components/engine/builder/remotecontext/remote.go @@ -116,7 +116,7 @@ func inspectResponse(ct string, r io.Reader, clen int64) (string, io.ReadCloser, plen = maxPreambleLength } - preamble := make([]byte, plen, plen) + preamble := make([]byte, plen) rlen, err := r.Read(preamble) if rlen == 0 { return ct, ioutil.NopCloser(r), errors.New("empty response") diff --git a/components/engine/builder/remotecontext/tarsum.go b/components/engine/builder/remotecontext/tarsum.go index 3ae9d82427..6770eed871 100644 --- a/components/engine/builder/remotecontext/tarsum.go +++ b/components/engine/builder/remotecontext/tarsum.go @@ -3,11 +3,11 @@ package remotecontext import ( "fmt" "os" - "path/filepath" "sync" - "github.com/docker/docker/pkg/symlink" iradix "github.com/hashicorp/go-immutable-radix" + + "github.com/docker/docker/pkg/containerfs" "github.com/pkg/errors" "github.com/tonistiigi/fsutil" ) @@ -19,7 +19,7 @@ type hashed interface { // CachableSource is a source that contains cache records for its contents type CachableSource struct { mu sync.Mutex - root string + root containerfs.ContainerFS tree *iradix.Tree txn *iradix.Txn } @@ -28,7 +28,7 @@ type CachableSource struct { func NewCachableSource(root string) *CachableSource { ts := &CachableSource{ tree: iradix.New(), - root: root, + root: containerfs.NewLocalContainerFS(root), } return ts } @@ -67,7 +67,7 @@ func (cs *CachableSource) Scan() error { return err } txn := iradix.New().Txn() - err = filepath.Walk(cs.root, func(path string, info os.FileInfo, err error) error { + err = cs.root.Walk(cs.root.Path(), func(path string, info os.FileInfo, err error) error { if err != nil { return errors.Wrapf(err, "failed to walk %s", path) } @@ -134,12 +134,12 @@ func (cs *CachableSource) Close() error { } func (cs *CachableSource) normalize(path string) (cleanpath, fullpath string, err error) { - cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:] - fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(cs.root, path), cs.root) + cleanpath = cs.root.Clean(string(cs.root.Separator()) + path)[1:] + fullpath, err = cs.root.ResolveScopedPath(path, true) if err != nil { return "", "", fmt.Errorf("Forbidden path outside the context: %s (%s)", path, fullpath) } - _, err = os.Lstat(fullpath) + _, err = cs.root.Lstat(fullpath) if err != nil { return "", "", convertPathError(err, path) } @@ -149,19 +149,16 @@ func (cs *CachableSource) normalize(path string) (cleanpath, fullpath string, er // Hash returns a hash for a single file in the source func (cs *CachableSource) Hash(path string) (string, error) { n := cs.getRoot() - sum := "" // TODO: check this for symlinks v, ok := n.Get([]byte(path)) if !ok { - sum = path - } else { - sum = v.(*fileInfo).sum + return path, nil } - return sum, nil + return v.(*fileInfo).sum, nil } // Root returns a root directory for the source -func (cs *CachableSource) Root() string { +func (cs *CachableSource) Root() containerfs.ContainerFS { return cs.root } diff --git a/components/engine/builder/remotecontext/tarsum.pb.go b/components/engine/builder/remotecontext/tarsum.pb.go index 561a7f6367..1d23bbe65b 100644 --- a/components/engine/builder/remotecontext/tarsum.pb.go +++ b/components/engine/builder/remotecontext/tarsum.pb.go @@ -94,7 +94,7 @@ func (this *TarsumBackup) GoString() string { s := make([]string, 0, 5) s = append(s, "&remotecontext.TarsumBackup{") keysForHashes := make([]string, 0, len(this.Hashes)) - for k, _ := range this.Hashes { + for k := range this.Hashes { keysForHashes = append(keysForHashes, k) } github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) @@ -133,7 +133,7 @@ func (m *TarsumBackup) MarshalTo(dAtA []byte) (int, error) { var l int _ = l if len(m.Hashes) > 0 { - for k, _ := range m.Hashes { + for k := range m.Hashes { dAtA[i] = 0xa i++ v := m.Hashes[k] @@ -211,7 +211,7 @@ func (this *TarsumBackup) String() string { return "nil" } keysForHashes := make([]string, 0, len(this.Hashes)) - for k, _ := range this.Hashes { + for k := range this.Hashes { keysForHashes = append(keysForHashes, k) } github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) diff --git a/components/engine/builder/remotecontext/tarsum_test.go b/components/engine/builder/remotecontext/tarsum_test.go index 8a9d69bb73..6d2b41d3d4 100644 --- a/components/engine/builder/remotecontext/tarsum_test.go +++ b/components/engine/builder/remotecontext/tarsum_test.go @@ -35,7 +35,7 @@ func TestCloseRootDirectory(t *testing.T) { t.Fatalf("Error while executing Close: %s", err) } - _, err = os.Stat(src.Root()) + _, err = os.Stat(src.Root().Path()) if !os.IsNotExist(err) { t.Fatal("Directory should not exist at this point") diff --git a/components/engine/client/checkpoint_list.go b/components/engine/client/checkpoint_list.go index ffe44bc976..9835bad5ca 100644 --- a/components/engine/client/checkpoint_list.go +++ b/components/engine/client/checkpoint_list.go @@ -2,7 +2,6 @@ package client import ( "encoding/json" - "net/http" "net/url" "github.com/docker/docker/api/types" @@ -20,10 +19,7 @@ func (cli *Client) CheckpointList(ctx context.Context, container string, options resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) if err != nil { - if resp.statusCode == http.StatusNotFound { - return checkpoints, containerNotFoundError{container} - } - return checkpoints, err + return checkpoints, wrapResponseError(err, resp, "container", container) } err = json.NewDecoder(resp.body).Decode(&checkpoints) diff --git a/components/engine/client/client.go b/components/engine/client/client.go index f7a8c07d3a..2072b2f806 100644 --- a/components/engine/client/client.go +++ b/components/engine/client/client.go @@ -1,10 +1,6 @@ /* Package client is a Go client for the Docker Engine API. -The "docker" command uses this package to communicate with the daemon. It can also -be used by your own Go applications to do anything the command-line interface does -- running containers, pulling images, managing swarms, etc. - For more information about the Engine API, see the documentation: https://docs.docker.com/engine/reference/api/ @@ -160,7 +156,7 @@ func NewEnvClient() (*Client, error) { // highly recommended that you set a version or your client may break if the // server is upgraded. func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { - proto, addr, basePath, err := ParseHost(host) + hostURL, err := ParseHostURL(host) if err != nil { return nil, err } @@ -171,7 +167,7 @@ func NewClient(host string, version string, client *http.Client, httpHeaders map } } else { transport := new(http.Transport) - sockets.ConfigureTransport(transport, proto, addr) + sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) client = &http.Client{ Transport: transport, CheckRedirect: CheckRedirect, @@ -189,28 +185,24 @@ func NewClient(host string, version string, client *http.Client, httpHeaders map scheme = "https" } + // TODO: store URL instead of proto/addr/basePath return &Client{ scheme: scheme, host: host, - proto: proto, - addr: addr, - basePath: basePath, + proto: hostURL.Scheme, + addr: hostURL.Host, + basePath: hostURL.Path, client: client, version: version, customHTTPHeaders: httpHeaders, }, nil } -// Close ensures that transport.Client is closed -// especially needed while using NewClient with *http.Client = nil -// for example -// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"}) +// Close the transport used by the client func (cli *Client) Close() error { - if t, ok := cli.client.Transport.(*http.Transport); ok { t.CloseIdleConnections() } - return nil } @@ -220,37 +212,27 @@ func (cli *Client) getAPIPath(p string, query url.Values) string { var apiPath string if cli.version != "" { v := strings.TrimPrefix(cli.version, "v") - apiPath = path.Join(cli.basePath, "/v"+v+p) + apiPath = path.Join(cli.basePath, "/v"+v, p) } else { apiPath = path.Join(cli.basePath, p) } - - u := &url.URL{ - Path: apiPath, - } - if len(query) > 0 { - u.RawQuery = query.Encode() - } - return u.String() + return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() } -// ClientVersion returns the version string associated with this -// instance of the Client. Note that this value can be changed -// via the DOCKER_API_VERSION env var. -// This operation doesn't acquire a mutex. +// ClientVersion returns the API version used by this client. func (cli *Client) ClientVersion() string { return cli.version } -// NegotiateAPIVersion updates the version string associated with this -// instance of the Client to match the latest version the server supports +// NegotiateAPIVersion queries the API and updates the version to match the +// API version. Any errors are silently ignored. func (cli *Client) NegotiateAPIVersion(ctx context.Context) { ping, _ := cli.Ping(ctx) cli.NegotiateAPIVersionPing(ping) } -// NegotiateAPIVersionPing updates the version string associated with this -// instance of the Client to match the latest version the server supports +// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion +// if the ping version is less than the default version. func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { if cli.manualOverride { return @@ -272,17 +254,28 @@ func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { } } -// DaemonHost returns the host associated with this instance of the Client. -// This operation doesn't acquire a mutex. +// DaemonHost returns the host address used by the client func (cli *Client) DaemonHost() string { return cli.host } -// ParseHost verifies that the given host strings is valid. +// ParseHost parses a url string, validates the strings is a host url, and returns +// the parsed host as: protocol, address, and base path +// Deprecated: use ParseHostURL func ParseHost(host string) (string, string, string, error) { + hostURL, err := ParseHostURL(host) + if err != nil { + return "", "", "", err + } + return hostURL.Scheme, hostURL.Host, hostURL.Path, nil +} + +// ParseHostURL parses a url string, validates the string is a host url, and +// returns the parsed URL +func ParseHostURL(host string) (*url.URL, error) { protoAddrParts := strings.SplitN(host, "://", 2) if len(protoAddrParts) == 1 { - return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) + return nil, fmt.Errorf("unable to parse docker host `%s`", host) } var basePath string @@ -290,16 +283,19 @@ func ParseHost(host string) (string, string, string, error) { if proto == "tcp" { parsed, err := url.Parse("tcp://" + addr) if err != nil { - return "", "", "", err + return nil, err } addr = parsed.Host basePath = parsed.Path } - return proto, addr, basePath, nil + return &url.URL{ + Scheme: proto, + Host: addr, + Path: basePath, + }, nil } -// CustomHTTPHeaders returns the custom http headers associated with this -// instance of the Client. This operation doesn't acquire a mutex. +// CustomHTTPHeaders returns the custom http headers stored by the client. func (cli *Client) CustomHTTPHeaders() map[string]string { m := make(map[string]string) for k, v := range cli.customHTTPHeaders { @@ -308,8 +304,7 @@ func (cli *Client) CustomHTTPHeaders() map[string]string { return m } -// SetCustomHTTPHeaders updates the custom http headers associated with this -// instance of the Client. This operation doesn't acquire a mutex. +// SetCustomHTTPHeaders that will be set on every HTTP request made by the client. func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { cli.customHTTPHeaders = headers } diff --git a/components/engine/client/client_test.go b/components/engine/client/client_test.go index bc911c0c4a..9bde777f14 100644 --- a/components/engine/client/client_test.go +++ b/components/engine/client/client_test.go @@ -11,6 +11,7 @@ import ( "github.com/docker/docker/api" "github.com/docker/docker/api/types" + "github.com/docker/docker/internal/testutil" "github.com/stretchr/testify/assert" ) @@ -104,11 +105,11 @@ func TestNewEnvClient(t *testing.T) { } func TestGetAPIPath(t *testing.T) { - cases := []struct { - v string - p string - q url.Values - e string + testcases := []struct { + version string + path string + query url.Values + expected string }{ {"", "/containers/json", nil, "/containers/json"}, {"", "/containers/json", url.Values{}, "/containers/json"}, @@ -122,16 +123,10 @@ func TestGetAPIPath(t *testing.T) { {"v1.22", "/networks/kiwl$%^", nil, "/v1.22/networks/kiwl$%25%5E"}, } - for _, cs := range cases { - c, err := NewClient("unix:///var/run/docker.sock", cs.v, nil, nil) - if err != nil { - t.Fatal(err) - } - g := c.getAPIPath(cs.p, cs.q) - assert.Equal(t, g, cs.e) - - err = c.Close() - assert.NoError(t, err) + for _, testcase := range testcases { + c := Client{version: testcase.version, basePath: "/"} + actual := c.getAPIPath(testcase.path, testcase.query) + assert.Equal(t, actual, testcase.expected) } } @@ -152,7 +147,6 @@ func TestParseHost(t *testing.T) { for _, cs := range cases { p, a, b, e := ParseHost(cs.host) - // if we expected an error to be returned... if cs.err { assert.Error(t, e) } @@ -162,6 +156,43 @@ func TestParseHost(t *testing.T) { } } +func TestParseHostURL(t *testing.T) { + testcases := []struct { + host string + expected *url.URL + expectedErr string + }{ + { + host: "", + expectedErr: "unable to parse docker host", + }, + { + host: "foobar", + expectedErr: "unable to parse docker host", + }, + { + host: "foo://bar", + expected: &url.URL{Scheme: "foo", Host: "bar"}, + }, + { + host: "tcp://localhost:2476", + expected: &url.URL{Scheme: "tcp", Host: "localhost:2476"}, + }, + { + host: "tcp://localhost:2476/path", + expected: &url.URL{Scheme: "tcp", Host: "localhost:2476", Path: "/path"}, + }, + } + + for _, testcase := range testcases { + actual, err := ParseHostURL(testcase.host) + if testcase.expectedErr != "" { + testutil.ErrorContains(t, err, testcase.expectedErr) + } + assert.Equal(t, testcase.expected, actual) + } +} + func TestNewEnvClientSetsDefaultVersion(t *testing.T) { env := envToMap() defer mapToEnv(env) diff --git a/components/engine/client/config_inspect.go b/components/engine/client/config_inspect.go index ebb6d636c2..b44d6fdd7e 100644 --- a/components/engine/client/config_inspect.go +++ b/components/engine/client/config_inspect.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types/swarm" "golang.org/x/net/context" @@ -17,10 +16,7 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C } resp, err := cli.get(ctx, "/configs/"+id, nil, nil) if err != nil { - if resp.statusCode == http.StatusNotFound { - return swarm.Config{}, nil, configNotFoundError{id} - } - return swarm.Config{}, nil, err + return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id) } defer ensureReaderClosed(resp) diff --git a/components/engine/client/config_remove.go b/components/engine/client/config_remove.go index 726b5c8530..e025d44f79 100644 --- a/components/engine/client/config_remove.go +++ b/components/engine/client/config_remove.go @@ -9,5 +9,5 @@ func (cli *Client) ConfigRemove(ctx context.Context, id string) error { } resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "config", id) } diff --git a/components/engine/client/container_commit.go b/components/engine/client/container_commit.go index 531d796ee7..b3b16abfd1 100644 --- a/components/engine/client/container_commit.go +++ b/components/engine/client/container_commit.go @@ -39,7 +39,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, container string, option for _, change := range options.Changes { query.Add("changes", change) } - if options.Pause != true { + if !options.Pause { query.Set("pause", "0") } diff --git a/components/engine/client/container_create.go b/components/engine/client/container_create.go index 6841b0b282..bd817e7fd1 100644 --- a/components/engine/client/container_create.go +++ b/components/engine/client/container_create.go @@ -45,7 +45,7 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) if err != nil { if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { - return response, imageNotFoundError{config.Image} + return response, objectNotFoundError{object: "image", id: config.Image} } return response, err } diff --git a/components/engine/client/container_inspect.go b/components/engine/client/container_inspect.go index 17f1809747..a15db14be8 100644 --- a/components/engine/client/container_inspect.go +++ b/components/engine/client/container_inspect.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "io/ioutil" - "net/http" "net/url" "github.com/docker/docker/api/types" @@ -15,10 +14,7 @@ import ( func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ContainerJSON{}, containerNotFoundError{containerID} - } - return types.ContainerJSON{}, err + return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID) } var response types.ContainerJSON @@ -35,10 +31,7 @@ func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID stri } serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ContainerJSON{}, nil, containerNotFoundError{containerID} - } - return types.ContainerJSON{}, nil, err + return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID) } defer ensureReaderClosed(serverResp) diff --git a/components/engine/client/container_remove.go b/components/engine/client/container_remove.go index 3a79590ced..070108bf3e 100644 --- a/components/engine/client/container_remove.go +++ b/components/engine/client/container_remove.go @@ -23,5 +23,5 @@ func (cli *Client) ContainerRemove(ctx context.Context, containerID string, opti resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "container", containerID) } diff --git a/components/engine/client/container_remove_test.go b/components/engine/client/container_remove_test.go index 798c08b333..0eab7ee518 100644 --- a/components/engine/client/container_remove_test.go +++ b/components/engine/client/container_remove_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/docker/docker/api/types" + "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -17,9 +18,16 @@ func TestContainerRemoveError(t *testing.T) { client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), } err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) + assert.EqualError(t, err, "Error response from daemon: Server error") +} + +func TestContainerRemoveNotFoundError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "missing")), } + err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{}) + assert.EqualError(t, err, "Error: No such container: container_id") + assert.True(t, IsErrNotFound(err)) } func TestContainerRemove(t *testing.T) { @@ -53,7 +61,5 @@ func TestContainerRemove(t *testing.T) { RemoveVolumes: true, Force: true, }) - if err != nil { - t.Fatal(err) - } + assert.NoError(t, err) } diff --git a/components/engine/client/errors.go b/components/engine/client/errors.go index fc7df9f1eb..cad4521757 100644 --- a/components/engine/client/errors.go +++ b/components/engine/client/errors.go @@ -3,6 +3,8 @@ package client import ( "fmt" + "net/http" + "github.com/docker/docker/api/types/versions" "github.com/pkg/errors" ) @@ -36,93 +38,65 @@ type notFound interface { NotFound() bool // Is the error a NotFound error } -// IsErrNotFound returns true if the error is caused with an -// object (image, container, network, volume, …) is not found in the docker host. +// IsErrNotFound returns true if the error is a NotFound error, which is returned +// by the API when some object is not found. func IsErrNotFound(err error) bool { te, ok := err.(notFound) return ok && te.NotFound() } -// imageNotFoundError implements an error returned when an image is not in the docker host. -type imageNotFoundError struct { - imageID string +type objectNotFoundError struct { + object string + id string } -// NotFound indicates that this error type is of NotFound -func (e imageNotFoundError) NotFound() bool { +func (e objectNotFoundError) NotFound() bool { return true } -// Error returns a string representation of an imageNotFoundError -func (e imageNotFoundError) Error() string { - return fmt.Sprintf("Error: No such image: %s", e.imageID) +func (e objectNotFoundError) Error() string { + return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) +} + +func wrapResponseError(err error, resp serverResponse, object, id string) error { + switch { + case err == nil: + return nil + case resp.statusCode == http.StatusNotFound: + return objectNotFoundError{object: object, id: id} + default: + return err + } } // IsErrImageNotFound returns true if the error is caused // when an image is not found in the docker host. +// +// Deprecated: Use IsErrNotFound func IsErrImageNotFound(err error) bool { return IsErrNotFound(err) } -// containerNotFoundError implements an error returned when a container is not in the docker host. -type containerNotFoundError struct { - containerID string -} - -// NotFound indicates that this error type is of NotFound -func (e containerNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of a containerNotFoundError -func (e containerNotFoundError) Error() string { - return fmt.Sprintf("Error: No such container: %s", e.containerID) -} - // IsErrContainerNotFound returns true if the error is caused // when a container is not found in the docker host. +// +// Deprecated: Use IsErrNotFound func IsErrContainerNotFound(err error) bool { return IsErrNotFound(err) } -// networkNotFoundError implements an error returned when a network is not in the docker host. -type networkNotFoundError struct { - networkID string -} - -// NotFound indicates that this error type is of NotFound -func (e networkNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of a networkNotFoundError -func (e networkNotFoundError) Error() string { - return fmt.Sprintf("Error: No such network: %s", e.networkID) -} - // IsErrNetworkNotFound returns true if the error is caused // when a network is not found in the docker host. +// +// Deprecated: Use IsErrNotFound func IsErrNetworkNotFound(err error) bool { return IsErrNotFound(err) } -// volumeNotFoundError implements an error returned when a volume is not in the docker host. -type volumeNotFoundError struct { - volumeID string -} - -// NotFound indicates that this error type is of NotFound -func (e volumeNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of a volumeNotFoundError -func (e volumeNotFoundError) Error() string { - return fmt.Sprintf("Error: No such volume: %s", e.volumeID) -} - // IsErrVolumeNotFound returns true if the error is caused // when a volume is not found in the docker host. +// +// Deprecated: Use IsErrNotFound func IsErrVolumeNotFound(err error) bool { return IsErrNotFound(err) } @@ -144,70 +118,28 @@ func IsErrUnauthorized(err error) bool { return ok } -// nodeNotFoundError implements an error returned when a node is not found. -type nodeNotFoundError struct { - nodeID string -} - -// Error returns a string representation of a nodeNotFoundError -func (e nodeNotFoundError) Error() string { - return fmt.Sprintf("Error: No such node: %s", e.nodeID) -} - -// NotFound indicates that this error type is of NotFound -func (e nodeNotFoundError) NotFound() bool { - return true -} - // IsErrNodeNotFound returns true if the error is caused // when a node is not found. +// +// Deprecated: Use IsErrNotFound func IsErrNodeNotFound(err error) bool { - _, ok := err.(nodeNotFoundError) - return ok -} - -// serviceNotFoundError implements an error returned when a service is not found. -type serviceNotFoundError struct { - serviceID string -} - -// Error returns a string representation of a serviceNotFoundError -func (e serviceNotFoundError) Error() string { - return fmt.Sprintf("Error: No such service: %s", e.serviceID) -} - -// NotFound indicates that this error type is of NotFound -func (e serviceNotFoundError) NotFound() bool { - return true + return IsErrNotFound(err) } // IsErrServiceNotFound returns true if the error is caused // when a service is not found. +// +// Deprecated: Use IsErrNotFound func IsErrServiceNotFound(err error) bool { - _, ok := err.(serviceNotFoundError) - return ok -} - -// taskNotFoundError implements an error returned when a task is not found. -type taskNotFoundError struct { - taskID string -} - -// Error returns a string representation of a taskNotFoundError -func (e taskNotFoundError) Error() string { - return fmt.Sprintf("Error: No such task: %s", e.taskID) -} - -// NotFound indicates that this error type is of NotFound -func (e taskNotFoundError) NotFound() bool { - return true + return IsErrNotFound(err) } // IsErrTaskNotFound returns true if the error is caused // when a task is not found. +// +// Deprecated: Use IsErrNotFound func IsErrTaskNotFound(err error) bool { - _, ok := err.(taskNotFoundError) - return ok + return IsErrNotFound(err) } type pluginPermissionDenied struct { @@ -234,67 +166,26 @@ func (cli *Client) NewVersionError(APIrequired, feature string) error { return nil } -// secretNotFoundError implements an error returned when a secret is not found. -type secretNotFoundError struct { - name string -} - -// Error returns a string representation of a secretNotFoundError -func (e secretNotFoundError) Error() string { - return fmt.Sprintf("Error: no such secret: %s", e.name) -} - -// NotFound indicates that this error type is of NotFound -func (e secretNotFoundError) NotFound() bool { - return true -} - // IsErrSecretNotFound returns true if the error is caused // when a secret is not found. +// +// Deprecated: Use IsErrNotFound func IsErrSecretNotFound(err error) bool { - _, ok := err.(secretNotFoundError) - return ok -} - -// configNotFoundError implements an error returned when a config is not found. -type configNotFoundError struct { - name string -} - -// Error returns a string representation of a configNotFoundError -func (e configNotFoundError) Error() string { - return fmt.Sprintf("Error: no such config: %s", e.name) -} - -// NotFound indicates that this error type is of NotFound -func (e configNotFoundError) NotFound() bool { - return true + return IsErrNotFound(err) } // IsErrConfigNotFound returns true if the error is caused // when a config is not found. +// +// Deprecated: Use IsErrNotFound func IsErrConfigNotFound(err error) bool { - _, ok := err.(configNotFoundError) - return ok -} - -// pluginNotFoundError implements an error returned when a plugin is not in the docker host. -type pluginNotFoundError struct { - name string -} - -// NotFound indicates that this error type is of NotFound -func (e pluginNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of a pluginNotFoundError -func (e pluginNotFoundError) Error() string { - return fmt.Sprintf("Error: No such plugin: %s", e.name) + return IsErrNotFound(err) } // IsErrPluginNotFound returns true if the error is caused // when a plugin is not found in the docker host. +// +// Deprecated: Use IsErrNotFound func IsErrPluginNotFound(err error) bool { return IsErrNotFound(err) } diff --git a/components/engine/client/hijack.go b/components/engine/client/hijack.go index 8cf0119f3d..d04cebdcf4 100644 --- a/components/engine/client/hijack.go +++ b/components/engine/client/hijack.go @@ -12,7 +12,6 @@ import ( "time" "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/tlsconfig" "github.com/docker/go-connections/sockets" "github.com/pkg/errors" "golang.org/x/net/context" @@ -71,7 +70,7 @@ func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Con timeout := dialer.Timeout if !dialer.Deadline.IsZero() { - deadlineTimeout := dialer.Deadline.Sub(time.Now()) + deadlineTimeout := time.Until(dialer.Deadline) if timeout == 0 || deadlineTimeout < timeout { timeout = deadlineTimeout } @@ -115,7 +114,7 @@ func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Con // from the hostname we're connecting to. if config.ServerName == "" { // Make a copy to avoid polluting argument or default. - config = tlsconfig.Clone(config) + config = tlsConfigClone(config) config.ServerName = hostname } diff --git a/components/engine/client/image_inspect.go b/components/engine/client/image_inspect.go index b3a64ce2f8..1bc5919907 100644 --- a/components/engine/client/image_inspect.go +++ b/components/engine/client/image_inspect.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types" "golang.org/x/net/context" @@ -14,10 +13,7 @@ import ( func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ImageInspect{}, nil, imageNotFoundError{imageID} - } - return types.ImageInspect{}, nil, err + return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID) } defer ensureReaderClosed(serverResp) diff --git a/components/engine/client/image_remove.go b/components/engine/client/image_remove.go index 94e4b74ec3..81d6c5438d 100644 --- a/components/engine/client/image_remove.go +++ b/components/engine/client/image_remove.go @@ -2,7 +2,6 @@ package client import ( "encoding/json" - "net/http" "net/url" "github.com/docker/docker/api/types" @@ -20,15 +19,12 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options type query.Set("noprune", "1") } + var dels []types.ImageDeleteResponseItem resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) if err != nil { - if resp.statusCode == http.StatusNotFound { - return nil, imageNotFoundError{imageID} - } - return nil, err + return dels, wrapResponseError(err, resp, "image", imageID) } - var dels []types.ImageDeleteResponseItem err = json.NewDecoder(resp.body).Decode(&dels) ensureReaderClosed(resp) return dels, err diff --git a/components/engine/client/image_remove_test.go b/components/engine/client/image_remove_test.go index 5a391b0ae6..6149db7cb6 100644 --- a/components/engine/client/image_remove_test.go +++ b/components/engine/client/image_remove_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/docker/docker/api/types" + "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -19,20 +20,17 @@ func TestImageRemoveError(t *testing.T) { } _, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } + assert.EqualError(t, err, "Error response from daemon: Server error") } func TestImageRemoveImageNotFound(t *testing.T) { client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + client: newMockClient(errorMock(http.StatusNotFound, "missing")), } _, err := client.ImageRemove(context.Background(), "unknown", types.ImageRemoveOptions{}) - if err == nil || !IsErrNotFound(err) { - t.Fatalf("expected an imageNotFoundError error, got %v", err) - } + assert.EqualError(t, err, "Error: No such image: unknown") + assert.True(t, IsErrNotFound(err)) } func TestImageRemove(t *testing.T) { diff --git a/components/engine/client/network_inspect.go b/components/engine/client/network_inspect.go index 848c9799fb..afabe65970 100644 --- a/components/engine/client/network_inspect.go +++ b/components/engine/client/network_inspect.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "io/ioutil" - "net/http" "net/url" "github.com/docker/docker/api/types" @@ -33,10 +32,7 @@ func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, } resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) if err != nil { - if resp.statusCode == http.StatusNotFound { - return networkResource, nil, networkNotFoundError{networkID} - } - return networkResource, nil, err + return networkResource, nil, wrapResponseError(err, resp, "network", networkID) } defer ensureReaderClosed(resp) diff --git a/components/engine/client/network_inspect_test.go b/components/engine/client/network_inspect_test.go index 9bfb55d74e..56399c7bc2 100644 --- a/components/engine/client/network_inspect_test.go +++ b/components/engine/client/network_inspect_test.go @@ -21,20 +21,17 @@ func TestNetworkInspectError(t *testing.T) { } _, err := client.NetworkInspect(context.Background(), "nothing", types.NetworkInspectOptions{}) - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } + assert.EqualError(t, err, "Error response from daemon: Server error") } -func TestNetworkInspectContainerNotFound(t *testing.T) { +func TestNetworkInspectNotFoundError(t *testing.T) { client := &Client{ - client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + client: newMockClient(errorMock(http.StatusNotFound, "missing")), } _, err := client.NetworkInspect(context.Background(), "unknown", types.NetworkInspectOptions{}) - if err == nil || !IsErrNetworkNotFound(err) { - t.Fatalf("expected a networkNotFound error, got %v", err) - } + assert.EqualError(t, err, "Error: No such network: unknown") + assert.True(t, IsErrNotFound(err)) } func TestNetworkInspect(t *testing.T) { diff --git a/components/engine/client/network_remove.go b/components/engine/client/network_remove.go index 6bd6748924..0811b5b51c 100644 --- a/components/engine/client/network_remove.go +++ b/components/engine/client/network_remove.go @@ -6,5 +6,5 @@ import "golang.org/x/net/context" func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "network", networkID) } diff --git a/components/engine/client/node_inspect.go b/components/engine/client/node_inspect.go index abf505d29c..791d2c0066 100644 --- a/components/engine/client/node_inspect.go +++ b/components/engine/client/node_inspect.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types/swarm" "golang.org/x/net/context" @@ -14,10 +13,7 @@ import ( func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return swarm.Node{}, nil, nodeNotFoundError{nodeID} - } - return swarm.Node{}, nil, err + return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID) } defer ensureReaderClosed(serverResp) diff --git a/components/engine/client/node_remove.go b/components/engine/client/node_remove.go index 0a77f3d578..adbf52febb 100644 --- a/components/engine/client/node_remove.go +++ b/components/engine/client/node_remove.go @@ -17,5 +17,5 @@ func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types. resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "node", nodeID) } diff --git a/components/engine/client/parse_logs.go b/components/engine/client/parse_logs.go deleted file mode 100644 index e427f80a77..0000000000 --- a/components/engine/client/parse_logs.go +++ /dev/null @@ -1,41 +0,0 @@ -package client - -// parse_logs.go contains utility helpers for getting information out of docker -// log lines. really, it only contains ParseDetails right now. maybe in the -// future there will be some desire to parse log messages back into a struct? -// that would go here if we did - -import ( - "net/url" - "strings" - - "github.com/pkg/errors" -) - -// ParseLogDetails takes a details string of key value pairs in the form -// "k=v,l=w", where the keys and values are url query escaped, and each pair -// is separated by a comma, returns a map. returns an error if the details -// string is not in a valid format -// the exact form of details encoding is implemented in -// api/server/httputils/write_log_stream.go -func ParseLogDetails(details string) (map[string]string, error) { - pairs := strings.Split(details, ",") - detailsMap := make(map[string]string, len(pairs)) - for _, pair := range pairs { - p := strings.SplitN(pair, "=", 2) - // if there is no equals sign, we will only get 1 part back - if len(p) != 2 { - return nil, errors.New("invalid details format") - } - k, err := url.QueryUnescape(p[0]) - if err != nil { - return nil, err - } - v, err := url.QueryUnescape(p[1]) - if err != nil { - return nil, err - } - detailsMap[k] = v - } - return detailsMap, nil -} diff --git a/components/engine/client/parse_logs_test.go b/components/engine/client/parse_logs_test.go deleted file mode 100644 index ac7f61679d..0000000000 --- a/components/engine/client/parse_logs_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package client - -import ( - "reflect" - "testing" - - "github.com/pkg/errors" -) - -func TestParseLogDetails(t *testing.T) { - testCases := []struct { - line string - expected map[string]string - err error - }{ - {"key=value", map[string]string{"key": "value"}, nil}, - {"key1=value1,key2=value2", map[string]string{"key1": "value1", "key2": "value2"}, nil}, - {"key+with+spaces=value%3Dequals,asdf%2C=", map[string]string{"key with spaces": "value=equals", "asdf,": ""}, nil}, - {"key=,=nothing", map[string]string{"key": "", "": "nothing"}, nil}, - {"=", map[string]string{"": ""}, nil}, - {"errors", nil, errors.New("invalid details format")}, - } - for _, tc := range testCases { - tc := tc // capture range variable - t.Run(tc.line, func(t *testing.T) { - t.Parallel() - res, err := ParseLogDetails(tc.line) - if err != nil && (err.Error() != tc.err.Error()) { - t.Fatalf("unexpected error parsing logs:\nExpected:\n\t%v\nActual:\n\t%v", tc.err, err) - } - if !reflect.DeepEqual(tc.expected, res) { - t.Errorf("result does not match expected:\nExpected:\n\t%#v\nActual:\n\t%#v", tc.expected, res) - } - }) - } -} diff --git a/components/engine/client/ping.go b/components/engine/client/ping.go index 8501375c88..0b6e450da7 100644 --- a/components/engine/client/ping.go +++ b/components/engine/client/ping.go @@ -28,7 +28,5 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { } ping.OSType = serverResp.header.Get("OSType") } - - err = cli.checkResponseErr(serverResp) - return ping, err + return ping, cli.checkResponseErr(serverResp) } diff --git a/components/engine/client/plugin_inspect.go b/components/engine/client/plugin_inspect.go index 89f39ee2c6..6a6fc18dfe 100644 --- a/components/engine/client/plugin_inspect.go +++ b/components/engine/client/plugin_inspect.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types" "golang.org/x/net/context" @@ -14,10 +13,7 @@ import ( func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) if err != nil { - if resp.statusCode == http.StatusNotFound { - return nil, nil, pluginNotFoundError{name} - } - return nil, nil, err + return nil, nil, wrapResponseError(err, resp, "plugin", name) } defer ensureReaderClosed(resp) diff --git a/components/engine/client/plugin_remove.go b/components/engine/client/plugin_remove.go index b017e4d348..b498c48203 100644 --- a/components/engine/client/plugin_remove.go +++ b/components/engine/client/plugin_remove.go @@ -16,5 +16,5 @@ func (cli *Client) PluginRemove(ctx context.Context, name string, options types. resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "plugin", name) } diff --git a/components/engine/client/request.go b/components/engine/client/request.go index 3e7d43feac..615d0b989d 100644 --- a/components/engine/client/request.go +++ b/components/engine/client/request.go @@ -203,7 +203,7 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error { return err } if len(body) == 0 { - return fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) + return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) } var ct string diff --git a/components/engine/client/secret_inspect.go b/components/engine/client/secret_inspect.go index 9b602972bc..6927ea96fa 100644 --- a/components/engine/client/secret_inspect.go +++ b/components/engine/client/secret_inspect.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types/swarm" "golang.org/x/net/context" @@ -17,10 +16,7 @@ func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.S } resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) if err != nil { - if resp.statusCode == http.StatusNotFound { - return swarm.Secret{}, nil, secretNotFoundError{id} - } - return swarm.Secret{}, nil, err + return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id) } defer ensureReaderClosed(resp) diff --git a/components/engine/client/secret_remove.go b/components/engine/client/secret_remove.go index c5e37af17d..9b4ee71e2c 100644 --- a/components/engine/client/secret_remove.go +++ b/components/engine/client/secret_remove.go @@ -9,5 +9,5 @@ func (cli *Client) SecretRemove(ctx context.Context, id string) error { } resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "secret", id) } diff --git a/components/engine/client/service_create.go b/components/engine/client/service_create.go index 6b9364d6f2..834709d1f3 100644 --- a/components/engine/client/service_create.go +++ b/components/engine/client/service_create.go @@ -3,6 +3,7 @@ package client import ( "encoding/json" "fmt" + "strings" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" @@ -87,19 +88,28 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) - imageWithDigest := image var platforms []swarm.Platform if err != nil { return "", nil, err } - imageWithDigest = imageWithDigestString(image, distributionInspect.Descriptor.Digest) + imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) if len(distributionInspect.Platforms) > 0 { platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) for _, p := range distributionInspect.Platforms { + // clear architecture field for arm. This is a temporary patch to address + // https://github.com/docker/swarmkit/issues/2294. The issue is that while + // image manifests report "arm" as the architecture, the node reports + // something like "armv7l" (includes the variant), which causes arm images + // to stop working with swarm mode. This patch removes the architecture + // constraint for arm images to ensure tasks get scheduled. + arch := p.Architecture + if strings.ToLower(arch) == "arm" { + arch = "" + } platforms = append(platforms, swarm.Platform{ - Architecture: p.Architecture, + Architecture: arch, OS: p.OS, }) } diff --git a/components/engine/client/service_inspect.go b/components/engine/client/service_inspect.go index d7e051e3a4..3e9699e5e0 100644 --- a/components/engine/client/service_inspect.go +++ b/components/engine/client/service_inspect.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io/ioutil" - "net/http" "net/url" "github.com/docker/docker/api/types" @@ -19,10 +18,7 @@ func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return swarm.Service{}, nil, serviceNotFoundError{serviceID} - } - return swarm.Service{}, nil, err + return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID) } defer ensureReaderClosed(serverResp) diff --git a/components/engine/client/service_remove.go b/components/engine/client/service_remove.go index a9331f92c2..ad992c01d0 100644 --- a/components/engine/client/service_remove.go +++ b/components/engine/client/service_remove.go @@ -6,5 +6,5 @@ import "golang.org/x/net/context" func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "service", serviceID) } diff --git a/components/engine/client/service_remove_test.go b/components/engine/client/service_remove_test.go index 8e2ac259c1..d7c51ef3dc 100644 --- a/components/engine/client/service_remove_test.go +++ b/components/engine/client/service_remove_test.go @@ -8,6 +8,7 @@ import ( "strings" "testing" + "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -17,9 +18,17 @@ func TestServiceRemoveError(t *testing.T) { } err := client.ServiceRemove(context.Background(), "service_id") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) + assert.EqualError(t, err, "Error response from daemon: Server error") +} + +func TestServiceRemoveNotFoundError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "missing")), } + + err := client.ServiceRemove(context.Background(), "service_id") + assert.EqualError(t, err, "Error: No such service: service_id") + assert.True(t, IsErrNotFound(err)) } func TestServiceRemove(t *testing.T) { diff --git a/components/engine/client/task_inspect.go b/components/engine/client/task_inspect.go index bc8058fc32..dc08cedb96 100644 --- a/components/engine/client/task_inspect.go +++ b/components/engine/client/task_inspect.go @@ -4,10 +4,8 @@ import ( "bytes" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) @@ -15,10 +13,7 @@ import ( func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return swarm.Task{}, nil, taskNotFoundError{taskID} - } - return swarm.Task{}, nil, err + return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID) } defer ensureReaderClosed(serverResp) diff --git a/components/engine/client/tlsconfig_clone.go b/components/engine/client/tlsconfig_clone.go new file mode 100644 index 0000000000..99b6be1cea --- /dev/null +++ b/components/engine/client/tlsconfig_clone.go @@ -0,0 +1,11 @@ +// +build go1.8 + +package client + +import "crypto/tls" + +// tlsConfigClone returns a clone of tls.Config. This function is provided for +// compatibility for go1.7 that doesn't include this method in stdlib. +func tlsConfigClone(c *tls.Config) *tls.Config { + return c.Clone() +} diff --git a/components/engine/pkg/tlsconfig/tlsconfig_clone_go17.go b/components/engine/client/tlsconfig_clone_go17.go similarity index 89% rename from components/engine/pkg/tlsconfig/tlsconfig_clone_go17.go rename to components/engine/client/tlsconfig_clone_go17.go index 0d5b448fec..b837b2ade0 100644 --- a/components/engine/pkg/tlsconfig/tlsconfig_clone_go17.go +++ b/components/engine/client/tlsconfig_clone_go17.go @@ -1,12 +1,12 @@ // +build go1.7,!go1.8 -package tlsconfig +package client import "crypto/tls" -// Clone returns a clone of tls.Config. This function is provided for +// tlsConfigClone returns a clone of tls.Config. This function is provided for // compatibility for go1.7 that doesn't include this method in stdlib. -func Clone(c *tls.Config) *tls.Config { +func tlsConfigClone(c *tls.Config) *tls.Config { return &tls.Config{ Rand: c.Rand, Time: c.Time, diff --git a/components/engine/client/volume_inspect.go b/components/engine/client/volume_inspect.go index 3860e9b22c..9889343849 100644 --- a/components/engine/client/volume_inspect.go +++ b/components/engine/client/volume_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "encoding/json" "io/ioutil" - "net/http" + "path" "github.com/docker/docker/api/types" "golang.org/x/net/context" @@ -18,13 +18,17 @@ func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Vo // VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { + // The empty ID needs to be handled here because with an empty ID the + // request url will not contain a trailing / which calls the volume list API + // instead of volume inspect + if volumeID == "" { + return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} + } + var volume types.Volume - resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + resp, err := cli.get(ctx, path.Join("/volumes", volumeID), nil, nil) if err != nil { - if resp.statusCode == http.StatusNotFound { - return volume, nil, volumeNotFoundError{volumeID} - } - return volume, nil, err + return volume, nil, wrapResponseError(err, resp, "volume", volumeID) } defer ensureReaderClosed(resp) diff --git a/components/engine/client/volume_inspect_test.go b/components/engine/client/volume_inspect_test.go index 0d1d118828..7d01f44ed2 100644 --- a/components/engine/client/volume_inspect_test.go +++ b/components/engine/client/volume_inspect_test.go @@ -10,6 +10,9 @@ import ( "testing" "github.com/docker/docker/api/types" + "github.com/docker/docker/internal/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/net/context" ) @@ -19,9 +22,7 @@ func TestVolumeInspectError(t *testing.T) { } _, err := client.VolumeInspect(context.Background(), "nothing") - if err == nil || err.Error() != "Error response from daemon: Server error" { - t.Fatalf("expected a Server Error, got %v", err) - } + testutil.ErrorContains(t, err, "Error response from daemon: Server error") } func TestVolumeInspectNotFound(t *testing.T) { @@ -30,13 +31,34 @@ func TestVolumeInspectNotFound(t *testing.T) { } _, err := client.VolumeInspect(context.Background(), "unknown") - if err == nil || !IsErrVolumeNotFound(err) { - t.Fatalf("expected a volumeNotFound error, got %v", err) + assert.True(t, IsErrNotFound(err)) +} + +func TestVolumeInspectWithEmptyID(t *testing.T) { + expectedURL := "/volumes/" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + assert.Equal(t, req.URL.Path, expectedURL) + return &http.Response{ + StatusCode: http.StatusNotFound, + Body: ioutil.NopCloser(bytes.NewReader(nil)), + }, nil + }), } + _, err := client.VolumeInspect(context.Background(), "") + testutil.ErrorContains(t, err, "No such volume: ") + } func TestVolumeInspect(t *testing.T) { expectedURL := "/volumes/volume_id" + expected := types.Volume{ + Name: "name", + Driver: "driver", + Mountpoint: "mountpoint", + } + client := &Client{ client: newMockClient(func(req *http.Request) (*http.Response, error) { if !strings.HasPrefix(req.URL.Path, expectedURL) { @@ -45,11 +67,7 @@ func TestVolumeInspect(t *testing.T) { if req.Method != "GET" { return nil, fmt.Errorf("expected GET method, got %s", req.Method) } - content, err := json.Marshal(types.Volume{ - Name: "name", - Driver: "driver", - Mountpoint: "mountpoint", - }) + content, err := json.Marshal(expected) if err != nil { return nil, err } @@ -60,17 +78,7 @@ func TestVolumeInspect(t *testing.T) { }), } - v, err := client.VolumeInspect(context.Background(), "volume_id") - if err != nil { - t.Fatal(err) - } - if v.Name != "name" { - t.Fatalf("expected `name`, got %s", v.Name) - } - if v.Driver != "driver" { - t.Fatalf("expected `driver`, got %s", v.Driver) - } - if v.Mountpoint != "mountpoint" { - t.Fatalf("expected `mountpoint`, got %s", v.Mountpoint) - } + volume, err := client.VolumeInspect(context.Background(), "volume_id") + require.NoError(t, err) + assert.Equal(t, expected, volume) } diff --git a/components/engine/client/volume_remove.go b/components/engine/client/volume_remove.go index 6c26575b49..3ffb8bcf2e 100644 --- a/components/engine/client/volume_remove.go +++ b/components/engine/client/volume_remove.go @@ -17,5 +17,5 @@ func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool } resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) ensureReaderClosed(resp) - return err + return wrapResponseError(err, resp, "volume", volumeID) } diff --git a/components/engine/cmd/dockerd/config_unix.go b/components/engine/cmd/dockerd/config_unix.go index 148fa87459..ad27a46726 100644 --- a/components/engine/cmd/dockerd/config_unix.go +++ b/components/engine/cmd/dockerd/config_unix.go @@ -33,8 +33,6 @@ func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) { flags.StringVar(&conf.BridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs") flags.BoolVar(&conf.BridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic") flags.StringVar(&conf.BridgeConfig.UserlandProxyPath, "userland-proxy-path", "", "Path to the userland proxy binary") - flags.BoolVar(&conf.EnableCors, "api-enable-cors", false, "Enable CORS headers in the Engine API, this is deprecated by --api-cors-header") - flags.MarkDeprecated("api-enable-cors", "Please use --api-cors-header") flags.StringVar(&conf.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers") flags.StringVar(&conf.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces") flags.StringVar(&conf.ContainerdAddr, "containerd", "", "Path to containerd socket") diff --git a/components/engine/cmd/dockerd/daemon.go b/components/engine/cmd/dockerd/daemon.go index c2f3781d88..2e8acf97da 100644 --- a/components/engine/cmd/dockerd/daemon.go +++ b/components/engine/cmd/dockerd/daemon.go @@ -99,6 +99,8 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) { FullTimestamp: true, }) + system.InitLCOW(cli.Config.Experimental) + if err := setDefaultUmask(); err != nil { return fmt.Errorf("Failed to set umask: %v", err) } @@ -132,7 +134,6 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) { Logging: true, SocketGroup: cli.Config.SocketGroup, Version: dockerversion.Version, - EnableCors: cli.Config.EnableCors, CorsHeaders: cli.Config.CorsHeaders, } @@ -198,7 +199,11 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) { cli.api.Accept(addr, ls...) } - registryService := registry.NewService(cli.Config.ServiceOptions) + registryService, err := registry.NewService(cli.Config.ServiceOptions) + if err != nil { + return err + } + containerdRemote, err := libcontainerd.New(cli.getLibcontainerdRoot(), cli.getPlatformRemoteOptions()...) if err != nil { return err @@ -467,7 +472,7 @@ func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { return nil, err } - if conf.V2Only == false { + if !conf.V2Only { logrus.Warnf(`The "disable-legacy-registry" option is deprecated and wil be removed in Docker v17.12. Interacting with legacy (v1) registries will no longer be supported in Docker v17.12"`) } @@ -548,7 +553,7 @@ func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) s.UseMiddleware(vm) - if cfg.EnableCors || cfg.CorsHeaders != "" { + if cfg.CorsHeaders != "" { c := middleware.NewCORSMiddleware(cfg.CorsHeaders) s.UseMiddleware(c) } diff --git a/components/engine/container/archive.go b/components/engine/container/archive.go index 56e6598b9c..ec4c236fe6 100644 --- a/components/engine/container/archive.go +++ b/components/engine/container/archive.go @@ -2,7 +2,6 @@ package container import ( "os" - "path/filepath" "github.com/docker/docker/api/types" "github.com/docker/docker/pkg/archive" @@ -15,17 +14,20 @@ import ( // an error if the path points to outside the container's rootfs. func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { // Check if a drive letter supplied, it must be the system drive. No-op except on Windows - path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, container.BaseFS) if err != nil { return "", "", err } // Consider the given path as an absolute path in the container. - absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + absPath = archive.PreserveTrailingDotOrSeparator( + container.BaseFS.Join(string(container.BaseFS.Separator()), path), + path, + container.BaseFS.Separator()) // Split the absPath into its Directory and Base components. We will // resolve the dir in the scope of the container then append the base. - dirPath, basePath := filepath.Split(absPath) + dirPath, basePath := container.BaseFS.Split(absPath) resolvedDirPath, err := container.GetResourcePath(dirPath) if err != nil { @@ -34,8 +36,7 @@ func (container *Container) ResolvePath(path string) (resolvedPath, absPath stri // resolvedDirPath will have been cleaned (no trailing path separators) so // we can manually join it with the base path element. - resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath - + resolvedPath = resolvedDirPath + string(container.BaseFS.Separator()) + basePath return resolvedPath, absPath, nil } @@ -44,7 +45,9 @@ func (container *Container) ResolvePath(path string) (resolvedPath, absPath stri // resolved to a path on the host corresponding to the given absolute path // inside the container. func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { - lstat, err := os.Lstat(resolvedPath) + driver := container.BaseFS + + lstat, err := driver.Lstat(resolvedPath) if err != nil { return nil, err } @@ -57,17 +60,17 @@ func (container *Container) StatPath(resolvedPath, absPath string) (stat *types. return nil, err } - linkTarget, err = filepath.Rel(container.BaseFS, hostPath) + linkTarget, err = driver.Rel(driver.Path(), hostPath) if err != nil { return nil, err } // Make it an absolute path. - linkTarget = filepath.Join(string(filepath.Separator), linkTarget) + linkTarget = driver.Join(string(driver.Separator()), linkTarget) } return &types.ContainerPathStat{ - Name: filepath.Base(absPath), + Name: driver.Base(absPath), Size: lstat.Size(), Mode: lstat.Mode(), Mtime: lstat.ModTime(), diff --git a/components/engine/container/container.go b/components/engine/container/container.go index 188c017cf9..150910f795 100644 --- a/components/engine/container/container.go +++ b/components/engine/container/container.go @@ -28,6 +28,7 @@ import ( "github.com/docker/docker/layer" "github.com/docker/docker/libcontainerd" "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/signal" @@ -64,10 +65,10 @@ var ( type Container struct { StreamConfig *stream.Config // embed for Container to support states directly. - *State `json:"State"` // Needed for Engine API version <= 1.11 - Root string `json:"-"` // Path to the "home" of the container, including metadata. - BaseFS string `json:"-"` // Path to the graphdriver mountpoint - RWLayer layer.RWLayer `json:"-"` + *State `json:"State"` // Needed for Engine API version <= 1.11 + Root string `json:"-"` // Path to the "home" of the container, including metadata. + BaseFS containerfs.ContainerFS `json:"-"` // interface containing graphdriver mount + RWLayer layer.RWLayer `json:"-"` ID string Created time.Time Managed bool @@ -305,15 +306,13 @@ func (container *Container) SetupWorkingDirectory(rootIDs idtools.IDPair) error func (container *Container) GetResourcePath(path string) (string, error) { // IMPORTANT - These are paths on the OS where the daemon is running, hence // any filepath operations must be done in an OS agnostic way. - - cleanPath := cleanResourcePath(path) - r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS) + r, e := container.BaseFS.ResolveScopedPath(path, false) // Log this here on the daemon side as there's otherwise no indication apart // from the error being propagated all the way back to the client. This makes // debugging significantly easier and clearly indicates the error comes from the daemon. if e != nil { - logrus.Errorf("Failed to FollowSymlinkInScope BaseFS %s cleanPath %s path %s %s\n", container.BaseFS, cleanPath, path, e) + logrus.Errorf("Failed to ResolveScopedPath BaseFS %s path %s %s\n", container.BaseFS.Path(), path, e) } return r, e } @@ -435,6 +434,11 @@ func (container *Container) ShouldRestart() bool { // AddMountPointWithVolume adds a new mount point configured with a volume to the container. func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) { + operatingSystem := container.Platform + if operatingSystem == "" { + operatingSystem = runtime.GOOS + } + volumeParser := volume.NewParser(operatingSystem) container.MountPoints[destination] = &volume.MountPoint{ Type: mounttypes.TypeVolume, Name: vol.Name(), @@ -442,7 +446,7 @@ func (container *Container) AddMountPointWithVolume(destination string, vol volu Destination: destination, RW: rw, Volume: vol, - CopyData: volume.DefaultCopyMode, + CopyData: volumeParser.DefaultCopyMode(), } } diff --git a/components/engine/container/container_unix.go b/components/engine/container/container_unix.go index 8212cb9d7c..6bb253fae3 100644 --- a/components/engine/container/container_unix.go +++ b/components/engine/container/container_unix.go @@ -5,7 +5,6 @@ package container import ( "io/ioutil" "os" - "path/filepath" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" @@ -13,7 +12,6 @@ import ( "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" "github.com/docker/docker/volume" "github.com/opencontainers/selinux/go-selinux/label" @@ -68,6 +66,7 @@ func (container *Container) BuildHostnameFile() error { func (container *Container) NetworkMounts() []Mount { var mounts []Mount shared := container.HostConfig.NetworkMode.IsContainer() + parser := volume.NewParser(container.Platform) if container.ResolvConfPath != "" { if _, err := os.Stat(container.ResolvConfPath); err != nil { logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) @@ -83,7 +82,7 @@ func (container *Container) NetworkMounts() []Mount { Source: container.ResolvConfPath, Destination: "/etc/resolv.conf", Writable: writable, - Propagation: string(volume.DefaultPropagationMode), + Propagation: string(parser.DefaultPropagationMode()), }) } } @@ -102,7 +101,7 @@ func (container *Container) NetworkMounts() []Mount { Source: container.HostnamePath, Destination: "/etc/hostname", Writable: writable, - Propagation: string(volume.DefaultPropagationMode), + Propagation: string(parser.DefaultPropagationMode()), }) } } @@ -121,7 +120,7 @@ func (container *Container) NetworkMounts() []Mount { Source: container.HostsPath, Destination: "/etc/hosts", Writable: writable, - Propagation: string(volume.DefaultPropagationMode), + Propagation: string(parser.DefaultPropagationMode()), }) } } @@ -130,7 +129,7 @@ func (container *Container) NetworkMounts() []Mount { // CopyImagePathContent copies files in destination to the volume. func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { - rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS) + rootfs, err := container.GetResourcePath(destination) if err != nil { return err } @@ -196,6 +195,7 @@ func (container *Container) UnmountIpcMount(unmount func(pth string) error) erro // IpcMounts returns the list of IPC mounts func (container *Container) IpcMounts() []Mount { var mounts []Mount + parser := volume.NewParser(container.Platform) if container.HasMountFor("/dev/shm") { return mounts @@ -209,7 +209,7 @@ func (container *Container) IpcMounts() []Mount { Source: container.ShmPath, Destination: "/dev/shm", Writable: true, - Propagation: string(volume.DefaultPropagationMode), + Propagation: string(parser.DefaultPropagationMode()), }) return mounts @@ -429,6 +429,7 @@ func copyOwnership(source, destination string) error { // TmpfsMounts returns the list of tmpfs mounts func (container *Container) TmpfsMounts() ([]Mount, error) { + parser := volume.NewParser(container.Platform) var mounts []Mount for dest, data := range container.HostConfig.Tmpfs { mounts = append(mounts, Mount{ @@ -439,7 +440,7 @@ func (container *Container) TmpfsMounts() ([]Mount, error) { } for dest, mnt := range container.MountPoints { if mnt.Type == mounttypes.TypeTmpfs { - data, err := volume.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly) + data, err := parser.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly) if err != nil { return nil, err } @@ -453,11 +454,6 @@ func (container *Container) TmpfsMounts() ([]Mount, error) { return mounts, nil } -// cleanResourcePath cleans a resource path and prepares to combine with mnt path -func cleanResourcePath(path string) string { - return filepath.Join(string(os.PathSeparator), path) -} - // EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { return false diff --git a/components/engine/container/container_windows.go b/components/engine/container/container_windows.go index 9c52d00a46..2dbea5905e 100644 --- a/components/engine/container/container_windows.go +++ b/components/engine/container/container_windows.go @@ -172,18 +172,6 @@ func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfi return nil } -// cleanResourcePath cleans a resource path by removing C:\ syntax, and prepares -// to combine with a volume path -func cleanResourcePath(path string) string { - if len(path) >= 2 { - c := path[0] - if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { - path = path[2:] - } - } - return filepath.Join(string(os.PathSeparator), path) -} - // BuildHostnameFile writes the container's hostname file. func (container *Container) BuildHostnameFile() error { return nil diff --git a/components/engine/container/view.go b/components/engine/container/view.go index 8d885268c2..164827c550 100644 --- a/components/engine/container/view.go +++ b/components/engine/container/view.go @@ -203,10 +203,7 @@ func (db *memDB) ReserveName(name, containerID string) error { // Once released, a name can be reserved again func (db *memDB) ReleaseName(name string) error { return db.withTxn(func(txn *memdb.Txn) error { - if err := txn.Delete(memdbNamesTable, nameAssociation{name: name}); err != nil { - return err - } - return nil + return txn.Delete(memdbNamesTable, nameAssociation{name: name}) }) } diff --git a/components/engine/contrib/docker-device-tool/device_tool.go b/components/engine/contrib/docker-device-tool/device_tool.go index 8f6251582a..905b689581 100644 --- a/components/engine/contrib/docker-device-tool/device_tool.go +++ b/components/engine/contrib/docker-device-tool/device_tool.go @@ -90,14 +90,12 @@ func main() { fmt.Printf("Sector size: %d\n", status.SectorSize) fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total)) fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total)) - break case "list": ids := devices.List() sort.Strings(ids) for _, id := range ids { fmt.Println(id) } - break case "device": if flag.NArg() < 2 { usage() @@ -113,7 +111,6 @@ func main() { fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors) fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors) fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector) - break case "resize": if flag.NArg() < 2 { usage() @@ -131,7 +128,6 @@ func main() { os.Exit(1) } - break case "snap": if flag.NArg() < 3 { usage() @@ -142,7 +138,6 @@ func main() { fmt.Println("Can't create snap device: ", err) os.Exit(1) } - break case "remove": if flag.NArg() < 2 { usage() @@ -153,7 +148,6 @@ func main() { fmt.Println("Can't remove device: ", err) os.Exit(1) } - break case "mount": if flag.NArg() < 3 { usage() @@ -164,13 +158,10 @@ func main() { fmt.Println("Can't mount device: ", err) os.Exit(1) } - break default: fmt.Printf("Unknown command %s\n", args[0]) usage() os.Exit(1) } - - return } diff --git a/components/engine/daemon/archive.go b/components/engine/daemon/archive.go index 4bcf8d0a0c..c52d3b8509 100644 --- a/components/engine/daemon/archive.go +++ b/components/engine/daemon/archive.go @@ -3,7 +3,6 @@ package daemon import ( "io" "os" - "path/filepath" "strings" "github.com/docker/docker/api/types" @@ -20,6 +19,31 @@ import ( // path does not refer to a directory. var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory") +// The daemon will use the following interfaces if the container fs implements +// these for optimized copies to and from the container. +type extractor interface { + ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error +} + +type archiver interface { + ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error) +} + +// helper functions to extract or archive +func extractArchive(i interface{}, src io.Reader, dst string, opts *archive.TarOptions) error { + if ea, ok := i.(extractor); ok { + return ea.ExtractArchive(src, dst, opts) + } + return chrootarchive.Untar(src, dst, opts) +} + +func archivePath(i interface{}, src string, opts *archive.TarOptions) (io.ReadCloser, error) { + if ap, ok := i.(archiver); ok { + return ap.ArchivePath(src, opts) + } + return archive.TarWithOptions(src, opts) +} + // ContainerCopy performs a deprecated operation of archiving the resource at // the specified path in the container identified by the given name. func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) { @@ -138,6 +162,9 @@ func (daemon *Daemon) containerStatPath(container *container.Container, path str return nil, err } + // Normalize path before sending to rootfs + path = container.BaseFS.FromSlash(path) + resolvedPath, absPath, err := container.ResolvePath(path) if err != nil { return nil, err @@ -178,6 +205,9 @@ func (daemon *Daemon) containerArchivePath(container *container.Container, path return nil, nil, err } + // Normalize path before sending to rootfs + path = container.BaseFS.FromSlash(path) + resolvedPath, absPath, err := container.ResolvePath(path) if err != nil { return nil, nil, err @@ -196,7 +226,18 @@ func (daemon *Daemon) containerArchivePath(container *container.Container, path // also catches the case when the root directory of the container is // requested: we want the archive entries to start with "/" and not the // container ID. - data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath)) + driver := container.BaseFS + + // Get the source and the base paths of the container resolved path in order + // to get the proper tar options for the rebase tar. + resolvedPath = driver.Clean(resolvedPath) + if driver.Base(resolvedPath) == "." { + resolvedPath += string(driver.Separator()) + "." + } + sourceDir, sourceBase := driver.Dir(resolvedPath), driver.Base(resolvedPath) + opts := archive.TarResourceRebaseOpts(sourceBase, driver.Base(absPath)) + + data, err := archivePath(driver, sourceDir, opts) if err != nil { return nil, nil, err } @@ -235,8 +276,12 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path return err } + // Normalize path before sending to rootfs' + path = container.BaseFS.FromSlash(path) + driver := container.BaseFS + // Check if a drive letter supplied, it must be the system drive. No-op except on Windows - path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, driver) if err != nil { return err } @@ -248,7 +293,10 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path // that you can extract an archive to a symlink that points to a directory. // Consider the given path as an absolute path in the container. - absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) + absPath := archive.PreserveTrailingDotOrSeparator( + driver.Join(string(driver.Separator()), path), + path, + driver.Separator()) // This will evaluate the last path element if it is a symlink. resolvedPath, err := container.GetResourcePath(absPath) @@ -256,7 +304,7 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path return err } - stat, err := os.Lstat(resolvedPath) + stat, err := driver.Lstat(resolvedPath) if err != nil { return err } @@ -279,21 +327,24 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path // a volume file path. var baseRel string if strings.HasPrefix(resolvedPath, `\\?\Volume{`) { - if strings.HasPrefix(resolvedPath, container.BaseFS) { - baseRel = resolvedPath[len(container.BaseFS):] + if strings.HasPrefix(resolvedPath, driver.Path()) { + baseRel = resolvedPath[len(driver.Path()):] if baseRel[:1] == `\` { baseRel = baseRel[1:] } } } else { - baseRel, err = filepath.Rel(container.BaseFS, resolvedPath) + baseRel, err = driver.Rel(driver.Path(), resolvedPath) } if err != nil { return err } // Make it an absolute path. - absPath = filepath.Join(string(filepath.Separator), baseRel) + absPath = driver.Join(string(driver.Separator()), baseRel) + // @ TODO: gupta-ak: Technically, this works since it no-ops + // on Windows and the file system is local anyway on linux. + // But eventually, it should be made driver aware. toVolume, err := checkIfPathIsInAVolume(container, absPath) if err != nil { return err @@ -315,7 +366,7 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path } } - if err := chrootarchive.Untar(content, resolvedPath, options); err != nil { + if err := extractArchive(driver, content, resolvedPath, options); err != nil { return err } @@ -356,24 +407,28 @@ func (daemon *Daemon) containerCopy(container *container.Container, resource str return nil, err } + // Normalize path before sending to rootfs + resource = container.BaseFS.FromSlash(resource) + driver := container.BaseFS + basePath, err := container.GetResourcePath(resource) if err != nil { return nil, err } - stat, err := os.Stat(basePath) + stat, err := driver.Stat(basePath) if err != nil { return nil, err } var filter []string if !stat.IsDir() { - d, f := filepath.Split(basePath) + d, f := driver.Split(basePath) basePath = d filter = []string{f} } else { - filter = []string{filepath.Base(basePath)} - basePath = filepath.Dir(basePath) + filter = []string{driver.Base(basePath)} + basePath = driver.Dir(basePath) } - archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ + archive, err := archivePath(driver, basePath, &archive.TarOptions{ Compression: archive.Uncompressed, IncludeFiles: filter, }) diff --git a/components/engine/daemon/archive_unix.go b/components/engine/daemon/archive_unix.go index d5dfad78cb..e2d4c308d2 100644 --- a/components/engine/daemon/archive_unix.go +++ b/components/engine/daemon/archive_unix.go @@ -4,6 +4,7 @@ package daemon import ( "github.com/docker/docker/container" + "github.com/docker/docker/volume" ) // checkIfPathIsInAVolume checks if the path is in a volume. If it is, it @@ -11,8 +12,9 @@ import ( // cannot be configured with a read-only rootfs. func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { var toVolume bool + parser := volume.NewParser(container.Platform) for _, mnt := range container.MountPoints { - if toVolume = mnt.HasResource(absPath); toVolume { + if toVolume = parser.HasResource(mnt, absPath); toVolume { if mnt.RW { break } diff --git a/components/engine/daemon/attach.go b/components/engine/daemon/attach.go index 7b676ccaf0..651a964c05 100644 --- a/components/engine/daemon/attach.go +++ b/components/engine/daemon/attach.go @@ -168,7 +168,7 @@ func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.Attach // Wait for the container to stop before returning. waitChan := c.Wait(context.Background(), container.WaitConditionNotRunning) defer func() { - _ = <-waitChan // Ignore returned exit code. + <-waitChan // Ignore returned exit code. }() } diff --git a/components/engine/daemon/build.go b/components/engine/daemon/build.go index 39269ab5a2..be344062ff 100644 --- a/components/engine/daemon/build.go +++ b/components/engine/daemon/build.go @@ -10,6 +10,7 @@ import ( "github.com/docker/docker/builder" "github.com/docker/docker/image" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/registry" @@ -25,9 +26,9 @@ type releaseableLayer struct { rwLayer layer.RWLayer } -func (rl *releaseableLayer) Mount() (string, error) { +func (rl *releaseableLayer) Mount() (containerfs.ContainerFS, error) { var err error - var mountPath string + var mountPath containerfs.ContainerFS var chainID layer.ChainID if rl.roLayer != nil { chainID = rl.roLayer.ChainID() @@ -36,7 +37,7 @@ func (rl *releaseableLayer) Mount() (string, error) { mountID := stringid.GenerateRandomID() rl.rwLayer, err = rl.layerStore.CreateRWLayer(mountID, chainID, nil) if err != nil { - return "", errors.Wrap(err, "failed to create rwlayer") + return nil, errors.Wrap(err, "failed to create rwlayer") } mountPath, err = rl.rwLayer.Mount("") @@ -48,7 +49,7 @@ func (rl *releaseableLayer) Mount() (string, error) { logrus.Errorf("Failed to release RWLayer: %s", err) } rl.rwLayer = nil - return "", err + return nil, err } return mountPath, nil diff --git a/components/engine/daemon/checkpoint.go b/components/engine/daemon/checkpoint.go index d3028f1e28..7bdcae5154 100644 --- a/components/engine/daemon/checkpoint.go +++ b/components/engine/daemon/checkpoint.go @@ -7,13 +7,13 @@ import ( "os" "path/filepath" - "github.com/docker/docker/api" "github.com/docker/docker/api/types" + "github.com/docker/docker/daemon/names" ) var ( - validCheckpointNameChars = api.RestrictedNameChars - validCheckpointNamePattern = api.RestrictedNamePattern + validCheckpointNameChars = names.RestrictedNameChars + validCheckpointNamePattern = names.RestrictedNamePattern ) // getCheckpointDir verifies checkpoint directory for create,remove, list options and checks if checkpoint already exists diff --git a/components/engine/daemon/cluster/executor/backend.go b/components/engine/daemon/cluster/executor/backend.go index 2b269793fb..89952c1452 100644 --- a/components/engine/daemon/cluster/executor/backend.go +++ b/components/engine/daemon/cluster/executor/backend.go @@ -15,6 +15,7 @@ import ( swarmtypes "github.com/docker/docker/api/types/swarm" containerpkg "github.com/docker/docker/container" clustertypes "github.com/docker/docker/daemon/cluster/provider" + networkSettings "github.com/docker/docker/daemon/network" "github.com/docker/docker/plugin" "github.com/docker/libnetwork" "github.com/docker/libnetwork/cluster" @@ -61,4 +62,5 @@ type Backend interface { LookupImage(name string) (*types.ImageInspect, error) PluginManager() *plugin.Manager PluginGetter() *plugin.Store + GetLBAttachmentStore() *networkSettings.LBAttachmentStore } diff --git a/components/engine/daemon/cluster/executor/container/adapter.go b/components/engine/daemon/cluster/executor/container/adapter.go index ded3533515..8077efdb05 100644 --- a/components/engine/daemon/cluster/executor/container/adapter.go +++ b/components/engine/daemon/cluster/executor/container/adapter.go @@ -41,8 +41,8 @@ type containerAdapter struct { dependencies exec.DependencyGetter } -func newContainerAdapter(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*containerAdapter, error) { - ctnr, err := newContainerConfig(task) +func newContainerAdapter(b executorpkg.Backend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*containerAdapter, error) { + ctnr, err := newContainerConfig(task, node) if err != nil { return nil, err } diff --git a/components/engine/daemon/cluster/executor/container/attachment.go b/components/engine/daemon/cluster/executor/container/attachment.go index 54f95a1fbf..405aa2db6e 100644 --- a/components/engine/daemon/cluster/executor/container/attachment.go +++ b/components/engine/daemon/cluster/executor/container/attachment.go @@ -20,8 +20,8 @@ type networkAttacherController struct { closed chan struct{} } -func newNetworkAttacherController(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*networkAttacherController, error) { - adapter, err := newContainerAdapter(b, task, dependencies) +func newNetworkAttacherController(b executorpkg.Backend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*networkAttacherController, error) { + adapter, err := newContainerAdapter(b, task, node, dependencies) if err != nil { return nil, err } @@ -40,11 +40,7 @@ func (nc *networkAttacherController) Update(ctx context.Context, t *api.Task) er func (nc *networkAttacherController) Prepare(ctx context.Context) error { // Make sure all the networks that the task needs are created. - if err := nc.adapter.createNetworks(ctx); err != nil { - return err - } - - return nil + return nc.adapter.createNetworks(ctx) } func (nc *networkAttacherController) Start(ctx context.Context) error { @@ -69,11 +65,7 @@ func (nc *networkAttacherController) Terminate(ctx context.Context) error { func (nc *networkAttacherController) Remove(ctx context.Context) error { // Try removing the network referenced in this task in case this // task is the last one referencing it - if err := nc.adapter.removeNetworks(ctx); err != nil { - return err - } - - return nil + return nc.adapter.removeNetworks(ctx) } func (nc *networkAttacherController) Close() error { diff --git a/components/engine/daemon/cluster/executor/container/container.go b/components/engine/daemon/cluster/executor/container/container.go index 8ea4126d73..59ac9bf215 100644 --- a/components/engine/daemon/cluster/executor/container/container.go +++ b/components/engine/daemon/cluster/executor/container/container.go @@ -48,12 +48,12 @@ type containerConfig struct { // newContainerConfig returns a validated container config. No methods should // return an error if this function returns without error. -func newContainerConfig(t *api.Task) (*containerConfig, error) { +func newContainerConfig(t *api.Task, node *api.NodeDescription) (*containerConfig, error) { var c containerConfig - return &c, c.setTask(t) + return &c, c.setTask(t, node) } -func (c *containerConfig) setTask(t *api.Task) error { +func (c *containerConfig) setTask(t *api.Task, node *api.NodeDescription) error { if t.Spec.GetContainer() == nil && t.Spec.GetAttachment() == nil { return exec.ErrRuntimeUnsupported } @@ -78,7 +78,7 @@ func (c *containerConfig) setTask(t *api.Task) error { c.task = t if t.Spec.GetContainer() != nil { - preparedSpec, err := template.ExpandContainerSpec(nil, t) + preparedSpec, err := template.ExpandContainerSpec(node, t) if err != nil { return err } diff --git a/components/engine/daemon/cluster/executor/container/controller.go b/components/engine/daemon/cluster/executor/container/controller.go index 3ba4302d55..dda12591a9 100644 --- a/components/engine/daemon/cluster/executor/container/controller.go +++ b/components/engine/daemon/cluster/executor/container/controller.go @@ -40,8 +40,8 @@ type controller struct { var _ exec.Controller = &controller{} // NewController returns a docker exec runner for the provided task. -func newController(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*controller, error) { - adapter, err := newContainerAdapter(b, task, dependencies) +func newController(b executorpkg.Backend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*controller, error) { + adapter, err := newContainerAdapter(b, task, node, dependencies) if err != nil { return nil, err } diff --git a/components/engine/daemon/cluster/executor/container/executor.go b/components/engine/daemon/cluster/executor/container/executor.go index fb339f14fc..a5bf2603d9 100644 --- a/components/engine/daemon/cluster/executor/container/executor.go +++ b/components/engine/daemon/cluster/executor/container/executor.go @@ -4,6 +4,7 @@ import ( "fmt" "sort" "strings" + "sync" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" @@ -26,6 +27,8 @@ type executor struct { backend executorpkg.Backend pluginBackend plugin.Backend dependencies exec.DependencyManager + mutex sync.Mutex // This mutex protects the following node field + node *api.NodeDescription } // NewExecutor returns an executor from the docker client. @@ -124,27 +127,41 @@ func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { }, } + // Save the node information in the executor field + e.mutex.Lock() + e.node = description + e.mutex.Unlock() + return description, nil } func (e *executor) Configure(ctx context.Context, node *api.Node) error { - na := node.Attachment - if na == nil { + var ingressNA *api.NetworkAttachment + lbAttachments := make(map[string]string) + + for _, na := range node.LbAttachments { + if na.Network.Spec.Ingress { + ingressNA = na + } + lbAttachments[na.Network.ID] = na.Addresses[0] + } + + if ingressNA == nil { e.backend.ReleaseIngress() - return nil + return e.backend.GetLBAttachmentStore().ResetLBAttachments(lbAttachments) } options := types.NetworkCreate{ - Driver: na.Network.DriverState.Name, + Driver: ingressNA.Network.DriverState.Name, IPAM: &network.IPAM{ - Driver: na.Network.IPAM.Driver.Name, + Driver: ingressNA.Network.IPAM.Driver.Name, }, - Options: na.Network.DriverState.Options, + Options: ingressNA.Network.DriverState.Options, Ingress: true, CheckDuplicate: true, } - for _, ic := range na.Network.IPAM.Configs { + for _, ic := range ingressNA.Network.IPAM.Configs { c := network.IPAMConfig{ Subnet: ic.Subnet, IPRange: ic.Range, @@ -154,22 +171,30 @@ func (e *executor) Configure(ctx context.Context, node *api.Node) error { } _, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{ - ID: na.Network.ID, + ID: ingressNA.Network.ID, NetworkCreateRequest: types.NetworkCreateRequest{ - Name: na.Network.Spec.Annotations.Name, + Name: ingressNA.Network.Spec.Annotations.Name, NetworkCreate: options, }, - }, na.Addresses[0]) + }, ingressNA.Addresses[0]) + if err != nil { + return err + } - return err + return e.backend.GetLBAttachmentStore().ResetLBAttachments(lbAttachments) } // Controller returns a docker container runner. func (e *executor) Controller(t *api.Task) (exec.Controller, error) { dependencyGetter := agent.Restrict(e.dependencies, t) + // Get the node description from the executor field + e.mutex.Lock() + nodeDescription := e.node + e.mutex.Unlock() + if t.Spec.GetAttachment() != nil { - return newNetworkAttacherController(e.backend, t, dependencyGetter) + return newNetworkAttacherController(e.backend, t, nodeDescription, dependencyGetter) } var ctlr exec.Controller @@ -198,7 +223,7 @@ func (e *executor) Controller(t *api.Task) (exec.Controller, error) { return ctlr, fmt.Errorf("unsupported runtime type: %q", runtimeKind) } case *api.TaskSpec_Container: - c, err := newController(e.backend, t, dependencyGetter) + c, err := newController(e.backend, t, nodeDescription, dependencyGetter) if err != nil { return ctlr, err } diff --git a/components/engine/daemon/cluster/executor/container/health_test.go b/components/engine/daemon/cluster/executor/container/health_test.go index b6f188557f..450865edd3 100644 --- a/components/engine/daemon/cluster/executor/container/health_test.go +++ b/components/engine/daemon/cluster/executor/container/health_test.go @@ -52,7 +52,7 @@ func TestHealthStates(t *testing.T) { EventsService: e, } - controller, err := newController(daemon, task, nil) + controller, err := newController(daemon, task, nil, nil) if err != nil { t.Fatalf("create controller fail %v", err) } diff --git a/components/engine/daemon/cluster/executor/container/validate_test.go b/components/engine/daemon/cluster/executor/container/validate_test.go index 9d98e2c008..43e224a8fa 100644 --- a/components/engine/daemon/cluster/executor/container/validate_test.go +++ b/components/engine/daemon/cluster/executor/container/validate_test.go @@ -26,7 +26,8 @@ func newTestControllerWithMount(m api.Mount) (*controller, error) { }, }, }, - }, nil) + }, nil, + nil) } func TestControllerValidateMountBind(t *testing.T) { diff --git a/components/engine/daemon/config/config.go b/components/engine/daemon/config/config.go index d01d4a4fb4..501c07af76 100644 --- a/components/engine/daemon/config/config.go +++ b/components/engine/daemon/config/config.go @@ -103,7 +103,6 @@ type CommonConfig struct { Root string `json:"data-root,omitempty"` SocketGroup string `json:"group,omitempty"` CorsHeaders string `json:"api-cors-header,omitempty"` - EnableCors bool `json:"api-enable-cors,omitempty"` // TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests // when pushing to a registry which does not support schema 2. This field is marked as diff --git a/components/engine/daemon/config/config_unix_test.go b/components/engine/daemon/config/config_unix_test.go index 5987e77e00..fedf757eb4 100644 --- a/components/engine/daemon/config/config_unix_test.go +++ b/components/engine/daemon/config/config_unix_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/docker/docker/opts" - "github.com/docker/go-units" + units "github.com/docker/go-units" "github.com/gotestyourself/gotestyourself/fs" "github.com/spf13/pflag" "github.com/stretchr/testify/assert" @@ -14,7 +14,7 @@ import ( ) func TestGetConflictFreeConfiguration(t *testing.T) { - configFileData := string([]byte(` + configFileData := ` { "debug": true, "default-ulimits": { @@ -27,7 +27,7 @@ func TestGetConflictFreeConfiguration(t *testing.T) { "log-opts": { "tag": "test_tag" } - }`)) + }` file := fs.NewFile(t, "docker-config", fs.WithContent(configFileData)) defer file.Remove() @@ -55,7 +55,7 @@ func TestGetConflictFreeConfiguration(t *testing.T) { } func TestDaemonConfigurationMerge(t *testing.T) { - configFileData := string([]byte(` + configFileData := ` { "debug": true, "default-ulimits": { @@ -68,7 +68,7 @@ func TestDaemonConfigurationMerge(t *testing.T) { "log-opts": { "tag": "test_tag" } - }`)) + }` file := fs.NewFile(t, "docker-config", fs.WithContent(configFileData)) defer file.Remove() @@ -115,10 +115,7 @@ func TestDaemonConfigurationMerge(t *testing.T) { } func TestDaemonConfigurationMergeShmSize(t *testing.T) { - data := string([]byte(` - { - "default-shm-size": "1g" - }`)) + data := `{"default-shm-size": "1g"}` file := fs.NewFile(t, "docker-config", fs.WithContent(data)) defer file.Remove() @@ -133,7 +130,5 @@ func TestDaemonConfigurationMergeShmSize(t *testing.T) { require.NoError(t, err) expectedValue := 1 * 1024 * 1024 * 1024 - if cc.ShmSize.Value() != int64(expectedValue) { - t.Fatalf("expected default shm size %d, got %d", expectedValue, cc.ShmSize.Value()) - } + assert.Equal(t, int64(expectedValue), cc.ShmSize.Value()) } diff --git a/components/engine/daemon/container_operations_unix.go b/components/engine/daemon/container_operations_unix.go index 84b7eb352f..954c194ea8 100644 --- a/components/engine/daemon/container_operations_unix.go +++ b/components/engine/daemon/container_operations_unix.go @@ -307,6 +307,8 @@ func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) { if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil { return errors.Wrap(err, "error setting ownership for config") } + + label.Relabel(fPath, c.MountLabel, false) } return nil diff --git a/components/engine/daemon/create_windows.go b/components/engine/daemon/create_windows.go index c63bfff17d..059980b47f 100644 --- a/components/engine/daemon/create_windows.go +++ b/components/engine/daemon/create_windows.go @@ -26,10 +26,10 @@ func (daemon *Daemon) createContainerPlatformSpecificSettings(container *contain } hostConfig.Isolation = "hyperv" } - + parser := volume.NewParser(container.Platform) for spec := range config.Volumes { - mp, err := volume.ParseMountRaw(spec, hostConfig.VolumeDriver) + mp, err := parser.ParseMountRaw(spec, hostConfig.VolumeDriver) if err != nil { return fmt.Errorf("Unrecognised volume spec: %v", err) } diff --git a/components/engine/daemon/daemon.go b/components/engine/daemon/daemon.go index a11a1f8691..7208f3c5c4 100644 --- a/components/engine/daemon/daemon.go +++ b/components/engine/daemon/daemon.go @@ -19,7 +19,6 @@ import ( "time" containerd "github.com/containerd/containerd/api/grpc/types" - "github.com/docker/docker/api" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/swarm" @@ -29,6 +28,7 @@ import ( "github.com/docker/docker/daemon/events" "github.com/docker/docker/daemon/exec" "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/network" "github.com/sirupsen/logrus" // register graph drivers _ "github.com/docker/docker/daemon/graphdriver/register" @@ -41,6 +41,7 @@ import ( "github.com/docker/docker/layer" "github.com/docker/docker/libcontainerd" "github.com/docker/docker/migrate/v1" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/sysinfo" @@ -122,6 +123,8 @@ type Daemon struct { pruneRunning int32 hosts map[string]bool // hosts stores the addresses the daemon is listening on startupDone chan struct{} + + lbAttachmentStore network.LBAttachmentStore } // StoreHosts stores the addresses the daemon is listening on @@ -489,6 +492,8 @@ func (daemon *Daemon) DaemonLeavesCluster() { } else { logrus.Warnf("failed to initiate ingress network removal: %v", err) } + + daemon.lbAttachmentStore.ClearLBAttachments() } // setClusterProvider sets a component for querying the current cluster state. @@ -713,7 +718,7 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe return nil, err } - trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) + trustKey, err := loadOrCreateTrustKey(config.TrustKeyPath) if err != nil { return nil, err } @@ -860,7 +865,7 @@ func (daemon *Daemon) shutdownContainer(c *container.Container) error { // Wait without timeout for the container to exit. // Ignore the result. - _ = <-c.Wait(context.Background(), container.WaitConditionNotRunning) + <-c.Wait(context.Background(), container.WaitConditionNotRunning) return nil } @@ -967,11 +972,11 @@ func (daemon *Daemon) Mount(container *container.Container) error { } logrus.Debugf("container mounted via layerStore: %v", dir) - if container.BaseFS != dir { + if container.BaseFS != nil && container.BaseFS.Path() != dir.Path() { // The mount path reported by the graph driver should always be trusted on Windows, since the // volume path for a given mounted layer may change over time. This should only be an error // on non-Windows operating systems. - if container.BaseFS != "" && runtime.GOOS != "windows" { + if runtime.GOOS != "windows" { daemon.Unmount(container) return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", daemon.GraphDriverName(container.Platform), container.ID, container.BaseFS, dir) @@ -1046,7 +1051,7 @@ func prepareTempDir(rootDir string, rootIDs idtools.IDPair) (string, error) { return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0700, rootIDs) } -func (daemon *Daemon) setupInitLayer(initPath string) error { +func (daemon *Daemon) setupInitLayer(initPath containerfs.ContainerFS) error { rootIDs := daemon.idMappings.RootPair() return initlayer.Setup(initPath, rootIDs) } @@ -1243,3 +1248,8 @@ func fixMemorySwappiness(resources *containertypes.Resources) { resources.MemorySwappiness = nil } } + +// GetLBAttachmentStore returns current load balancer store associated with the daemon +func (daemon *Daemon) GetLBAttachmentStore() *network.LBAttachmentStore { + return &daemon.lbAttachmentStore +} diff --git a/components/engine/daemon/daemon_solaris.go b/components/engine/daemon/daemon_solaris.go index 156d11194a..9c7d67f0bd 100644 --- a/components/engine/daemon/daemon_solaris.go +++ b/components/engine/daemon/daemon_solaris.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/image" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers/kernel" @@ -97,7 +98,7 @@ func setupDaemonRoot(config *config.Config, rootDir string, rootIDs idtools.IDPa return nil } -func (daemon *Daemon) getLayerInit() func(string) error { +func (daemon *Daemon) getLayerInit() func(containerfs.ContainerFS) error { return nil } diff --git a/components/engine/daemon/daemon_unix.go b/components/engine/daemon/daemon_unix.go index 0082706b6c..655483c89e 100644 --- a/components/engine/daemon/daemon_unix.go +++ b/components/engine/daemon/daemon_unix.go @@ -24,6 +24,7 @@ import ( "github.com/docker/docker/daemon/config" "github.com/docker/docker/image" "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/kernel" @@ -612,8 +613,9 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes. return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) } + parser := volume.NewParser(runtime.GOOS) for dest := range hostConfig.Tmpfs { - if err := volume.ValidateTmpfsMountDestination(dest); err != nil { + if err := parser.ValidateTmpfsMountDestination(dest); err != nil { return warnings, err } } @@ -987,7 +989,7 @@ func removeDefaultBridgeInterface() { } } -func (daemon *Daemon) getLayerInit() func(string) error { +func (daemon *Daemon) getLayerInit() func(containerfs.ContainerFS) error { return daemon.setupInitLayer } diff --git a/components/engine/daemon/daemon_unix_test.go b/components/engine/daemon/daemon_unix_test.go index f3a7ce4ae8..2bdbd23290 100644 --- a/components/engine/daemon/daemon_unix_test.go +++ b/components/engine/daemon/daemon_unix_test.go @@ -17,6 +17,7 @@ import ( "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/local" "github.com/docker/docker/volume/store" + "github.com/stretchr/testify/require" ) type fakeContainerGetter struct { @@ -289,13 +290,12 @@ func TestMigratePre17Volumes(t *testing.T) { containerRoot := filepath.Join(rootDir, "containers") cid := "1234" err = os.MkdirAll(filepath.Join(containerRoot, cid), 0755) + require.NoError(t, err) vid := "5678" vfsPath := filepath.Join(rootDir, "vfs", "dir", vid) err = os.MkdirAll(vfsPath, 0755) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) config := []byte(` { diff --git a/components/engine/daemon/daemon_windows.go b/components/engine/daemon/daemon_windows.go index f78f60a0af..c85a1483f2 100644 --- a/components/engine/daemon/daemon_windows.go +++ b/components/engine/daemon/daemon_windows.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/image" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" @@ -56,7 +57,7 @@ func parseSecurityOpt(container *container.Container, config *containertypes.Hos return nil } -func (daemon *Daemon) getLayerInit() func(string) error { +func (daemon *Daemon) getLayerInit() func(containerfs.ContainerFS) error { return nil } diff --git a/components/engine/daemon/discovery/discovery_test.go b/components/engine/daemon/discovery/discovery_test.go index f084a649a7..d5c9966a3c 100644 --- a/components/engine/daemon/discovery/discovery_test.go +++ b/components/engine/daemon/discovery/discovery_test.go @@ -4,92 +4,77 @@ import ( "fmt" "testing" "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) +func TestDiscoveryOptsErrors(t *testing.T) { + var testcases = []struct { + doc string + opts map[string]string + }{ + { + doc: "discovery.ttl < discovery.heartbeat", + opts: map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "5"}, + }, + { + doc: "discovery.ttl == discovery.heartbeat", + opts: map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "10"}, + }, + { + doc: "negative discovery.heartbeat", + opts: map[string]string{"discovery.heartbeat": "-10", "discovery.ttl": "10"}, + }, + { + doc: "negative discovery.ttl", + opts: map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "-10"}, + }, + { + doc: "invalid discovery.heartbeat", + opts: map[string]string{"discovery.heartbeat": "invalid"}, + }, + { + doc: "invalid discovery.ttl", + opts: map[string]string{"discovery.ttl": "invalid"}, + }, + } + + for _, testcase := range testcases { + _, _, err := discoveryOpts(testcase.opts) + assert.Error(t, err, testcase.doc) + } +} + func TestDiscoveryOpts(t *testing.T) { - clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "5"} + clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"} heartbeat, ttl, err := discoveryOpts(clusterOpts) - if err == nil { - t.Fatal("discovery.ttl < discovery.heartbeat must fail") - } - - clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "10"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err == nil { - t.Fatal("discovery.ttl == discovery.heartbeat must fail") - } - - clusterOpts = map[string]string{"discovery.heartbeat": "-10", "discovery.ttl": "10"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err == nil { - t.Fatal("negative discovery.heartbeat must fail") - } - - clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "-10"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err == nil { - t.Fatal("negative discovery.ttl must fail") - } - - clusterOpts = map[string]string{"discovery.heartbeat": "invalid"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err == nil { - t.Fatal("invalid discovery.heartbeat must fail") - } - - clusterOpts = map[string]string{"discovery.ttl": "invalid"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err == nil { - t.Fatal("invalid discovery.ttl must fail") - } - - clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err != nil { - t.Fatal(err) - } - - if heartbeat != 10*time.Second { - t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) - } - - if ttl != 20*time.Second { - t.Fatalf("TTL - Expected : %v, Actual : %v", 20*time.Second, ttl) - } + require.NoError(t, err) + assert.Equal(t, 10*time.Second, heartbeat) + assert.Equal(t, 20*time.Second, ttl) clusterOpts = map[string]string{"discovery.heartbeat": "10"} heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err != nil { - t.Fatal(err) - } - - if heartbeat != 10*time.Second { - t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) - } - - expected := 10 * defaultDiscoveryTTLFactor * time.Second - if ttl != expected { - t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) - } + require.NoError(t, err) + assert.Equal(t, 10*time.Second, heartbeat) + assert.Equal(t, 10*defaultDiscoveryTTLFactor*time.Second, ttl) clusterOpts = map[string]string{"discovery.ttl": "30"} heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if ttl != 30*time.Second { t.Fatalf("TTL - Expected : %v, Actual : %v", 30*time.Second, ttl) } - expected = 30 * time.Second / defaultDiscoveryTTLFactor + expected := 30 * time.Second / defaultDiscoveryTTLFactor if heartbeat != expected { t.Fatalf("Heartbeat - Expected : %v, Actual : %v", expected, heartbeat) } discoveryTTL := fmt.Sprintf("%d", defaultDiscoveryTTLFactor-1) clusterOpts = map[string]string{"discovery.ttl": discoveryTTL} - heartbeat, ttl, err = discoveryOpts(clusterOpts) + heartbeat, _, err = discoveryOpts(clusterOpts) if err == nil && heartbeat == 0 { t.Fatal("discovery.heartbeat must be positive") } diff --git a/components/engine/daemon/disk_usage.go b/components/engine/daemon/disk_usage.go index a28463eba9..7142ef0781 100644 --- a/components/engine/daemon/disk_usage.go +++ b/components/engine/daemon/disk_usage.go @@ -99,7 +99,6 @@ func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, er for platform := range daemon.stores { layerRefs := daemon.getLayerRefs(platform) allLayers := daemon.stores[platform].layerStore.Map() - var allLayersSize int64 for _, l := range allLayers { select { case <-ctx.Done(): diff --git a/components/engine/daemon/exec/exec.go b/components/engine/daemon/exec/exec.go index 2348ad8fd5..f4efb4d54e 100644 --- a/components/engine/daemon/exec/exec.go +++ b/components/engine/daemon/exec/exec.go @@ -75,7 +75,7 @@ type Store struct { // NewStore initializes a new exec store. func NewStore() *Store { - return &Store{commands: make(map[string]*Config, 0)} + return &Store{commands: make(map[string]*Config)} } // Commands returns the exec configurations in the store. diff --git a/components/engine/daemon/export.go b/components/engine/daemon/export.go index 081e1639b7..730387d76c 100644 --- a/components/engine/daemon/export.go +++ b/components/engine/daemon/export.go @@ -40,7 +40,7 @@ func (daemon *Daemon) containerExport(container *container.Container) (io.ReadCl return nil, err } - archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{ + archive, err := archivePath(container.BaseFS, container.BaseFS.Path(), &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: daemon.idMappings.UIDs(), GIDMaps: daemon.idMappings.GIDs(), diff --git a/components/engine/daemon/graphdriver/aufs/aufs.go b/components/engine/daemon/graphdriver/aufs/aufs.go index 05822a70c9..8313263ff1 100644 --- a/components/engine/daemon/graphdriver/aufs/aufs.go +++ b/components/engine/daemon/graphdriver/aufs/aufs.go @@ -38,6 +38,7 @@ import ( "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/locker" @@ -388,12 +389,12 @@ func atomicRemove(source string) error { // Get returns the rootfs path for the id. // This will mount the dir at its given path -func (a *Driver) Get(id, mountLabel string) (string, error) { +func (a *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { a.locker.Lock(id) defer a.locker.Unlock(id) parents, err := a.getParentLayerPaths(id) if err != nil && !os.IsNotExist(err) { - return "", err + return nil, err } a.pathCacheLock.Lock() @@ -407,21 +408,21 @@ func (a *Driver) Get(id, mountLabel string) (string, error) { } } if count := a.ctr.Increment(m); count > 1 { - return m, nil + return containerfs.NewLocalContainerFS(m), nil } // If a dir does not have a parent ( no layers )do not try to mount // just return the diff path to the data if len(parents) > 0 { if err := a.mount(id, m, mountLabel, parents); err != nil { - return "", err + return nil, err } } a.pathCacheLock.Lock() a.pathCache[id] = m a.pathCacheLock.Unlock() - return m, nil + return containerfs.NewLocalContainerFS(m), nil } // Put unmounts and updates list of active mounts. diff --git a/components/engine/daemon/graphdriver/aufs/aufs_test.go b/components/engine/daemon/graphdriver/aufs/aufs_test.go index 532074eee6..a58f18a0dc 100644 --- a/components/engine/daemon/graphdriver/aufs/aufs_test.go +++ b/components/engine/daemon/graphdriver/aufs/aufs_test.go @@ -9,15 +9,16 @@ import ( "io/ioutil" "os" "path" + "path/filepath" "sync" "testing" - "path/filepath" - "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/stringid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var ( @@ -41,6 +42,14 @@ func testInit(dir string, t testing.TB) graphdriver.Driver { return d } +func driverGet(d *Driver, id string, mntLabel string) (string, error) { + mnt, err := d.Get(id, mntLabel) + if err != nil { + return "", err + } + return mnt.Path(), nil +} + func newDriver(t testing.TB) *Driver { if err := os.MkdirAll(tmp, 0755); err != nil { t.Fatal(err) @@ -170,7 +179,7 @@ func TestGetWithoutParent(t *testing.T) { t.Fatal(err) } expected := path.Join(tmp, "diff", "1") - if diffPath != expected { + if diffPath.Path() != expected { t.Fatalf("Expected path %s got %s", expected, diffPath) } } @@ -179,9 +188,8 @@ func TestCleanupWithNoDirs(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } + err := d.Cleanup() + assert.NoError(t, err) } func TestCleanupWithDir(t *testing.T) { @@ -201,18 +209,12 @@ func TestMountedFalseResponse(t *testing.T) { d := newDriver(t) defer os.RemoveAll(tmp) - if err := d.Create("1", "", nil); err != nil { - t.Fatal(err) - } + err := d.Create("1", "", nil) + require.NoError(t, err) response, err := d.mounted(d.getDiffPath("1")) - if err != nil { - t.Fatal(err) - } - - if response != false { - t.Fatal("Response if dir id 1 is mounted should be false") - } + require.NoError(t, err) + assert.False(t, response) } func TestMountedTrueResponse(t *testing.T) { @@ -220,26 +222,17 @@ func TestMountedTrueResponse(t *testing.T) { defer os.RemoveAll(tmp) defer d.Cleanup() - if err := d.Create("1", "", nil); err != nil { - t.Fatal(err) - } - if err := d.Create("2", "1", nil); err != nil { - t.Fatal(err) - } + err := d.Create("1", "", nil) + require.NoError(t, err) + err = d.Create("2", "1", nil) + require.NoError(t, err) - _, err := d.Get("2", "") - if err != nil { - t.Fatal(err) - } + _, err = d.Get("2", "") + require.NoError(t, err) response, err := d.mounted(d.pathCache["2"]) - if err != nil { - t.Fatal(err) - } - - if response != true { - t.Fatal("Response if dir id 2 is mounted should be true") - } + require.NoError(t, err) + assert.True(t, response) } func TestMountWithParent(t *testing.T) { @@ -263,13 +256,13 @@ func TestMountWithParent(t *testing.T) { if err != nil { t.Fatal(err) } - if mntPath == "" { - t.Fatal("mntPath should not be empty string") + if mntPath == nil { + t.Fatal("mntPath should not be nil") } expected := path.Join(tmp, "mnt", "2") - if mntPath != expected { - t.Fatalf("Expected %s got %s", expected, mntPath) + if mntPath.Path() != expected { + t.Fatalf("Expected %s got %s", expected, mntPath.Path()) } } @@ -294,8 +287,8 @@ func TestRemoveMountedDir(t *testing.T) { if err != nil { t.Fatal(err) } - if mntPath == "" { - t.Fatal("mntPath should not be empty string") + if mntPath == nil { + t.Fatal("mntPath should not be nil") } mounted, err := d.mounted(d.pathCache["2"]) @@ -329,7 +322,7 @@ func TestGetDiff(t *testing.T) { t.Fatal(err) } - diffPath, err := d.Get("1", "") + diffPath, err := driverGet(d, "1", "") if err != nil { t.Fatal(err) } @@ -373,7 +366,7 @@ func TestChanges(t *testing.T) { } }() - mntPoint, err := d.Get("2", "") + mntPoint, err := driverGet(d, "2", "") if err != nil { t.Fatal(err) } @@ -412,7 +405,7 @@ func TestChanges(t *testing.T) { if err := d.CreateReadWrite("3", "2", nil); err != nil { t.Fatal(err) } - mntPoint, err = d.Get("3", "") + mntPoint, err = driverGet(d, "3", "") if err != nil { t.Fatal(err) } @@ -458,7 +451,7 @@ func TestDiffSize(t *testing.T) { t.Fatal(err) } - diffPath, err := d.Get("1", "") + diffPath, err := driverGet(d, "1", "") if err != nil { t.Fatal(err) } @@ -500,7 +493,7 @@ func TestChildDiffSize(t *testing.T) { t.Fatal(err) } - diffPath, err := d.Get("1", "") + diffPath, err := driverGet(d, "1", "") if err != nil { t.Fatal(err) } @@ -574,9 +567,8 @@ func TestStatus(t *testing.T) { } status := d.Status() - if status == nil || len(status) == 0 { - t.Fatal("Status should not be nil or empty") - } + assert.Len(t, status, 4) + rootDir := status[0] dirs := status[2] if rootDir[0] != "Root Dir" { @@ -602,7 +594,7 @@ func TestApplyDiff(t *testing.T) { t.Fatal(err) } - diffPath, err := d.Get("1", "") + diffPath, err := driverGet(d, "1", "") if err != nil { t.Fatal(err) } @@ -637,7 +629,7 @@ func TestApplyDiff(t *testing.T) { // Ensure that the file is in the mount point for id 3 - mountPoint, err := d.Get("3", "") + mountPoint, err := driverGet(d, "3", "") if err != nil { t.Fatal(err) } @@ -677,48 +669,34 @@ func testMountMoreThan42Layers(t *testing.T, mountPath string) { } current = hash(current) - if err := d.CreateReadWrite(current, parent, nil); err != nil { - t.Logf("Current layer %d", i) - t.Error(err) - } - point, err := d.Get(current, "") - if err != nil { - t.Logf("Current layer %d", i) - t.Error(err) - } + err := d.CreateReadWrite(current, parent, nil) + require.NoError(t, err, "current layer %d", i) + + point, err := driverGet(d, current, "") + require.NoError(t, err, "current layer %d", i) + f, err := os.Create(path.Join(point, current)) - if err != nil { - t.Logf("Current layer %d", i) - t.Error(err) - } + require.NoError(t, err, "current layer %d", i) f.Close() if i%10 == 0 { - if err := os.Remove(path.Join(point, parent)); err != nil { - t.Logf("Current layer %d", i) - t.Error(err) - } + err := os.Remove(path.Join(point, parent)) + require.NoError(t, err, "current layer %d", i) expected-- } last = current } // Perform the actual mount for the top most image - point, err := d.Get(last, "") - if err != nil { - t.Error(err) - } + point, err := driverGet(d, last, "") + require.NoError(t, err) files, err := ioutil.ReadDir(point) - if err != nil { - t.Error(err) - } - if len(files) != expected { - t.Errorf("Expected %d got %d", expected, len(files)) - } + require.NoError(t, err) + assert.Len(t, files, expected) } func TestMountMoreThan42Layers(t *testing.T) { - os.RemoveAll(tmpOuter) + defer os.RemoveAll(tmpOuter) testMountMoreThan42Layers(t, tmp) } diff --git a/components/engine/daemon/graphdriver/btrfs/btrfs.go b/components/engine/daemon/graphdriver/btrfs/btrfs.go index dcdfc9a867..5a61217341 100644 --- a/components/engine/daemon/graphdriver/btrfs/btrfs.go +++ b/components/engine/daemon/graphdriver/btrfs/btrfs.go @@ -27,6 +27,7 @@ import ( "unsafe" "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/parsers" @@ -631,29 +632,29 @@ func (d *Driver) Remove(id string) error { } // Get the requested filesystem id. -func (d *Driver) Get(id, mountLabel string) (string, error) { +func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { dir := d.subvolumesDirID(id) st, err := os.Stat(dir) if err != nil { - return "", err + return nil, err } if !st.IsDir() { - return "", fmt.Errorf("%s: not a directory", dir) + return nil, fmt.Errorf("%s: not a directory", dir) } if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil { if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace { if err := d.subvolEnableQuota(); err != nil { - return "", err + return nil, err } if err := subvolLimitQgroup(dir, size); err != nil { - return "", err + return nil, err } } } - return dir, nil + return containerfs.NewLocalContainerFS(dir), nil } // Put is not implemented for BTRFS as there is no cleanup required for the id. diff --git a/components/engine/daemon/graphdriver/btrfs/btrfs_test.go b/components/engine/daemon/graphdriver/btrfs/btrfs_test.go index 0038dbcdcd..056b99b94b 100644 --- a/components/engine/daemon/graphdriver/btrfs/btrfs_test.go +++ b/components/engine/daemon/graphdriver/btrfs/btrfs_test.go @@ -35,12 +35,14 @@ func TestBtrfsSubvolDelete(t *testing.T) { } defer graphtest.PutDriver(t) - dir, err := d.Get("test", "") + dirFS, err := d.Get("test", "") if err != nil { t.Fatal(err) } defer d.Put("test") + dir := dirFS.Path() + if err := subvolCreate(dir, "subvoltest"); err != nil { t.Fatal(err) } diff --git a/components/engine/daemon/graphdriver/devmapper/deviceset.go b/components/engine/daemon/graphdriver/devmapper/deviceset.go index 32b35c9acd..deb8c87d1f 100644 --- a/components/engine/daemon/graphdriver/devmapper/deviceset.go +++ b/components/engine/daemon/graphdriver/devmapper/deviceset.go @@ -21,10 +21,12 @@ import ( "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/dockerversion" "github.com/docker/docker/pkg/devicemapper" + "github.com/docker/docker/pkg/dmesg" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/loopback" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/parsers/kernel" units "github.com/docker/go-units" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" @@ -533,11 +535,11 @@ func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bo return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) } -// Return true only if kernel supports xfs and mkfs.xfs is available -func xfsSupported() bool { +// xfsSupported checks if xfs is supported, returns nil if it is, otherwise an error +func xfsSupported() error { // Make sure mkfs.xfs is available if _, err := exec.LookPath("mkfs.xfs"); err != nil { - return false + return err // error text is descriptive enough } // Check if kernel supports xfs filesystem or not. @@ -545,41 +547,48 @@ func xfsSupported() bool { f, err := os.Open("/proc/filesystems") if err != nil { - logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) - return false + return errors.Wrapf(err, "error checking for xfs support") } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if strings.HasSuffix(s.Text(), "\txfs") { - return true + return nil } } if err := s.Err(); err != nil { - logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) + return errors.Wrapf(err, "error checking for xfs support") } - return false + + return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`) } func determineDefaultFS() string { - if xfsSupported() { + err := xfsSupported() + if err == nil { return "xfs" } - logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesn't support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem") + logrus.Warnf("devmapper: XFS is not supported in your system (%v). Defaulting to ext4 filesystem", err) return "ext4" } +// mkfsOptions tries to figure out whether some additional mkfs options are required +func mkfsOptions(fs string) []string { + if fs == "xfs" && !kernel.CheckKernelVersion(3, 16, 0) { + // For kernels earlier than 3.16 (and newer xfsutils), + // some xfs features need to be explicitly disabled. + return []string{"-m", "crc=0,finobt=0"} + } + + return []string{} +} + func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { devname := info.DevName() - args := []string{} - args = append(args, devices.mkfsArgs...) - - args = append(args, devname) - if devices.filesystem == "" { devices.filesystem = determineDefaultFS() } @@ -587,7 +596,11 @@ func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { return err } - logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name()) + args := mkfsOptions(devices.filesystem) + args = append(args, devices.mkfsArgs...) + args = append(args, devname) + + logrus.Infof("devmapper: Creating filesystem %s on device %s, mkfs args: %v", devices.filesystem, info.Name(), args) defer func() { if err != nil { logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) @@ -1188,7 +1201,7 @@ func (devices *DeviceSet) growFS(info *devInfo) error { options = joinMountOptions(options, devices.mountOptions) if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { - return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err) + return fmt.Errorf("Error mounting '%s' on '%s': %s\n%v", info.DevName(), fsMountPoint, err, string(dmesg.Dmesg(256))) } defer unix.Unmount(fsMountPoint, unix.MNT_DETACH) @@ -1254,14 +1267,13 @@ func (devices *DeviceSet) setupBaseImage() error { } func setCloseOnExec(name string) { - if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { - for _, i := range fileInfos { - link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) - if link == name { - fd, err := strconv.Atoi(i.Name()) - if err == nil { - unix.CloseOnExec(fd) - } + fileInfos, _ := ioutil.ReadDir("/proc/self/fd") + for _, i := range fileInfos { + link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) + if link == name { + fd, err := strconv.Atoi(i.Name()) + if err == nil { + unix.CloseOnExec(fd) } } } @@ -2380,7 +2392,7 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { - return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) + return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s\n%v", info.DevName(), path, err, string(dmesg.Dmesg(256))) } if fstype == "xfs" && devices.xfsNospaceRetries != "" { diff --git a/components/engine/daemon/graphdriver/devmapper/driver.go b/components/engine/daemon/graphdriver/devmapper/driver.go index 02ee0124f1..f41afa2ae7 100644 --- a/components/engine/daemon/graphdriver/devmapper/driver.go +++ b/components/engine/daemon/graphdriver/devmapper/driver.go @@ -12,6 +12,7 @@ import ( "github.com/sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/devicemapper" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/locker" @@ -69,18 +70,18 @@ func (d *Driver) Status() [][2]string { status := [][2]string{ {"Pool Name", s.PoolName}, - {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, - {"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))}, + {"Pool Blocksize", units.HumanSize(float64(s.SectorSize))}, + {"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))}, {"Backing Filesystem", s.BaseDeviceFS}, {"Data file", s.DataFile}, {"Metadata file", s.MetadataFile}, - {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, - {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, - {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, - {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, - {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, - {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, - {"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))}, + {"Data Space Used", units.HumanSize(float64(s.Data.Used))}, + {"Data Space Total", units.HumanSize(float64(s.Data.Total))}, + {"Data Space Available", units.HumanSize(float64(s.Data.Available))}, + {"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))}, + {"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))}, + {"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))}, + {"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))}, {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, @@ -159,51 +160,45 @@ func (d *Driver) Remove(id string) error { if err := d.DeviceSet.DeleteDevice(id, false); err != nil { return fmt.Errorf("failed to remove device %s: %v", id, err) } - - mp := path.Join(d.home, "mnt", id) - if err := system.EnsureRemoveAll(mp); err != nil { - return err - } - - return nil + return system.EnsureRemoveAll(path.Join(d.home, "mnt", id)) } // Get mounts a device with given id into the root filesystem -func (d *Driver) Get(id, mountLabel string) (string, error) { +func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { d.locker.Lock(id) defer d.locker.Unlock(id) mp := path.Join(d.home, "mnt", id) rootFs := path.Join(mp, "rootfs") if count := d.ctr.Increment(mp); count > 1 { - return rootFs, nil + return containerfs.NewLocalContainerFS(rootFs), nil } uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { d.ctr.Decrement(mp) - return "", err + return nil, err } // Create the target directories if they don't exist if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) { d.ctr.Decrement(mp) - return "", err + return nil, err } if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { d.ctr.Decrement(mp) - return "", err + return nil, err } // Mount the device if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { d.ctr.Decrement(mp) - return "", err + return nil, err } if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) { d.ctr.Decrement(mp) d.DeviceSet.UnmountDevice(id, mp) - return "", err + return nil, err } idFile := path.Join(mp, "id") @@ -213,11 +208,11 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { d.ctr.Decrement(mp) d.DeviceSet.UnmountDevice(id, mp) - return "", err + return nil, err } } - return rootFs, nil + return containerfs.NewLocalContainerFS(rootFs), nil } // Put unmounts a device and removes it. diff --git a/components/engine/daemon/graphdriver/driver.go b/components/engine/daemon/graphdriver/driver.go index 94c52094c3..68f9022e1c 100644 --- a/components/engine/daemon/graphdriver/driver.go +++ b/components/engine/daemon/graphdriver/driver.go @@ -12,6 +12,7 @@ import ( "github.com/vbatts/tar-split/tar/storage" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" ) @@ -68,7 +69,7 @@ type ProtoDriver interface { // Get returns the mountpoint for the layered filesystem referred // to by this id. You can optionally specify a mountLabel or "". // Returns the absolute path to the mounted layered filesystem. - Get(id, mountLabel string) (dir string, err error) + Get(id, mountLabel string) (fs containerfs.ContainerFS, err error) // Put releases the system resources for the specified id, // e.g, unmounting layered filesystem. Put(id string) error diff --git a/components/engine/daemon/graphdriver/driver_linux.go b/components/engine/daemon/graphdriver/driver_linux.go index 50c8e6a6ef..a2be46b53a 100644 --- a/components/engine/daemon/graphdriver/driver_linux.go +++ b/components/engine/daemon/graphdriver/driver_linux.go @@ -67,6 +67,7 @@ var ( FsMagicAufs: "aufs", FsMagicBtrfs: "btrfs", FsMagicCramfs: "cramfs", + FsMagicEcryptfs: "ecryptfs", FsMagicExtfs: "extfs", FsMagicF2fs: "f2fs", FsMagicGPFS: "gpfs", diff --git a/components/engine/daemon/graphdriver/fsdiff.go b/components/engine/daemon/graphdriver/fsdiff.go index e8c7ff27a4..533c5a7699 100644 --- a/components/engine/daemon/graphdriver/fsdiff.go +++ b/components/engine/daemon/graphdriver/fsdiff.go @@ -18,9 +18,9 @@ var ( ) // NaiveDiffDriver takes a ProtoDriver and adds the -// capability of the Diffing methods which it may or may not -// support on its own. See the comment on the exported -// NewNaiveDiffDriver function below. +// capability of the Diffing methods on the local file system, +// which it may or may not support on its own. See the comment +// on the exported NewNaiveDiffDriver function below. // Notably, the AUFS driver doesn't need to be wrapped like this. type NaiveDiffDriver struct { ProtoDriver @@ -47,10 +47,11 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err startTime := time.Now() driver := gdw.ProtoDriver - layerFs, err := driver.Get(id, "") + layerRootFs, err := driver.Get(id, "") if err != nil { return nil, err } + layerFs := layerRootFs.Path() defer func() { if err != nil { @@ -70,12 +71,14 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err }), nil } - parentFs, err := driver.Get(parent, "") + parentRootFs, err := driver.Get(parent, "") if err != nil { return nil, err } defer driver.Put(parent) + parentFs := parentRootFs.Path() + changes, err := archive.ChangesDirs(layerFs, parentFs) if err != nil { return nil, err @@ -94,7 +97,7 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err // are extracted from tar's with full second precision on modified time. // We need this hack here to make sure calls within same second receive // correct result. - time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) + time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second))) return err }), nil } @@ -104,20 +107,22 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { driver := gdw.ProtoDriver - layerFs, err := driver.Get(id, "") + layerRootFs, err := driver.Get(id, "") if err != nil { return nil, err } defer driver.Put(id) + layerFs := layerRootFs.Path() parentFs := "" if parent != "" { - parentFs, err = driver.Get(parent, "") + parentRootFs, err := driver.Get(parent, "") if err != nil { return nil, err } defer driver.Put(parent) + parentFs = parentRootFs.Path() } return archive.ChangesDirs(layerFs, parentFs) @@ -130,12 +135,13 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size i driver := gdw.ProtoDriver // Mount the root filesystem so we can apply the diff/layer. - layerFs, err := driver.Get(id, "") + layerRootFs, err := driver.Get(id, "") if err != nil { return } defer driver.Put(id) + layerFs := layerRootFs.Path() options := &archive.TarOptions{UIDMaps: gdw.uidMaps, GIDMaps: gdw.gidMaps} start := time.Now().UTC() @@ -165,5 +171,5 @@ func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) } defer driver.Put(id) - return archive.ChangesSize(layerFs, changes), nil + return archive.ChangesSize(layerFs.Path(), changes), nil } diff --git a/components/engine/daemon/graphdriver/graphtest/graphbench_unix.go b/components/engine/daemon/graphdriver/graphtest/graphbench_unix.go index def822b9a1..c04394d519 100644 --- a/components/engine/daemon/graphdriver/graphtest/graphbench_unix.go +++ b/components/engine/daemon/graphdriver/graphtest/graphbench_unix.go @@ -3,13 +3,13 @@ package graphtest import ( - "bytes" "io" "io/ioutil" - "path/filepath" "testing" + contdriver "github.com/containerd/continuity/driver" "github.com/docker/docker/pkg/stringid" + "github.com/stretchr/testify/require" ) // DriverBenchExists benchmarks calls to exist @@ -245,15 +245,13 @@ func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, d for i := 0; i < b.N; i++ { // Read content - c, err := ioutil.ReadFile(filepath.Join(root, "testfile.txt")) + c, err := contdriver.ReadFile(root, root.Join(root.Path(), "testfile.txt")) if err != nil { b.Fatal(err) } b.StopTimer() - if bytes.Compare(c, content) != 0 { - b.Fatalf("Wrong content in file %v, expected %v", c, content) - } + require.Equal(b, content, c) b.StartTimer() } } diff --git a/components/engine/daemon/graphdriver/graphtest/graphtest_unix.go b/components/engine/daemon/graphdriver/graphtest/graphtest_unix.go index 2f8ae54777..11dff48896 100644 --- a/components/engine/daemon/graphdriver/graphtest/graphtest_unix.go +++ b/components/engine/daemon/graphdriver/graphtest/graphtest_unix.go @@ -97,10 +97,10 @@ func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...str dir, err := driver.Get("empty", "") require.NoError(t, err) - verifyFile(t, dir, 0755|os.ModeDir, 0, 0) + verifyFile(t, dir.Path(), 0755|os.ModeDir, 0, 0) // Verify that the directory is empty - fis, err := readDir(dir) + fis, err := readDir(dir, dir.Path()) require.NoError(t, err) assert.Len(t, fis, 0) @@ -328,9 +328,9 @@ func DriverTestSetQuota(t *testing.T, drivername string) { } quota := uint64(50 * units.MiB) - err = writeRandomFile(path.Join(mountPath, "file"), quota*2) + + err = writeRandomFile(path.Join(mountPath.Path(), "file"), quota*2) if pathError, ok := err.(*os.PathError); ok && pathError.Err != unix.EDQUOT { t.Fatalf("expect write() to fail with %v, got %v", unix.EDQUOT, err) } - } diff --git a/components/engine/daemon/graphdriver/graphtest/testutil.go b/components/engine/daemon/graphdriver/graphtest/testutil.go index 35bf6d17ba..946313296b 100644 --- a/components/engine/daemon/graphdriver/graphtest/testutil.go +++ b/components/engine/daemon/graphdriver/graphtest/testutil.go @@ -3,12 +3,11 @@ package graphtest import ( "bytes" "fmt" - "io/ioutil" "math/rand" "os" - "path" "sort" + "github.com/containerd/continuity/driver" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/stringid" @@ -36,17 +35,17 @@ func addFiles(drv graphdriver.Driver, layer string, seed int64) error { } defer drv.Put(layer) - if err := ioutil.WriteFile(path.Join(root, "file-a"), randomContent(64, seed), 0755); err != nil { + if err := driver.WriteFile(root, root.Join(root.Path(), "file-a"), randomContent(64, seed), 0755); err != nil { return err } - if err := os.MkdirAll(path.Join(root, "dir-b"), 0755); err != nil { + if err := root.MkdirAll(root.Join(root.Path(), "dir-b"), 0755); err != nil { return err } - if err := ioutil.WriteFile(path.Join(root, "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil { + if err := driver.WriteFile(root, root.Join(root.Path(), "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil { return err } - return ioutil.WriteFile(path.Join(root, "file-c"), randomContent(128*128, seed+2), 0755) + return driver.WriteFile(root, root.Join(root.Path(), "file-c"), randomContent(128*128, seed+2), 0755) } func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) error { @@ -56,12 +55,12 @@ func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) e } defer drv.Put(layer) - fileContent, err := ioutil.ReadFile(path.Join(root, filename)) + fileContent, err := driver.ReadFile(root, root.Join(root.Path(), filename)) if err != nil { return err } - if bytes.Compare(fileContent, content) != 0 { + if !bytes.Equal(fileContent, content) { return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) } @@ -75,7 +74,7 @@ func addFile(drv graphdriver.Driver, layer, filename string, content []byte) err } defer drv.Put(layer) - return ioutil.WriteFile(path.Join(root, filename), content, 0755) + return driver.WriteFile(root, root.Join(root.Path(), filename), content, 0755) } func addDirectory(drv graphdriver.Driver, layer, dir string) error { @@ -85,7 +84,7 @@ func addDirectory(drv graphdriver.Driver, layer, dir string) error { } defer drv.Put(layer) - return os.MkdirAll(path.Join(root, dir), 0755) + return root.MkdirAll(root.Join(root.Path(), dir), 0755) } func removeAll(drv graphdriver.Driver, layer string, names ...string) error { @@ -96,7 +95,7 @@ func removeAll(drv graphdriver.Driver, layer string, names ...string) error { defer drv.Put(layer) for _, filename := range names { - if err := os.RemoveAll(path.Join(root, filename)); err != nil { + if err := root.RemoveAll(root.Join(root.Path(), filename)); err != nil { return err } } @@ -110,8 +109,8 @@ func checkFileRemoved(drv graphdriver.Driver, layer, filename string) error { } defer drv.Put(layer) - if _, err := os.Stat(path.Join(root, filename)); err == nil { - return fmt.Errorf("file still exists: %s", path.Join(root, filename)) + if _, err := root.Stat(root.Join(root.Path(), filename)); err == nil { + return fmt.Errorf("file still exists: %s", root.Join(root.Path(), filename)) } else if !os.IsNotExist(err) { return err } @@ -127,13 +126,13 @@ func addManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) e defer drv.Put(layer) for i := 0; i < count; i += 100 { - dir := path.Join(root, fmt.Sprintf("directory-%d", i)) - if err := os.MkdirAll(dir, 0755); err != nil { + dir := root.Join(root.Path(), fmt.Sprintf("directory-%d", i)) + if err := root.MkdirAll(dir, 0755); err != nil { return err } for j := 0; i+j < count && j < 100; j++ { - file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) - if err := ioutil.WriteFile(file, randomContent(64, seed+int64(i+j)), 0755); err != nil { + file := root.Join(dir, fmt.Sprintf("file-%d", i+j)) + if err := driver.WriteFile(root, file, randomContent(64, seed+int64(i+j)), 0755); err != nil { return err } } @@ -152,7 +151,7 @@ func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64 changes := []archive.Change{} for i := 0; i < count; i += 100 { archiveRoot := fmt.Sprintf("/directory-%d", i) - if err := os.MkdirAll(path.Join(root, archiveRoot), 0755); err != nil { + if err := root.MkdirAll(root.Join(root.Path(), archiveRoot), 0755); err != nil { return nil, err } for j := 0; i+j < count && j < 100; j++ { @@ -166,23 +165,23 @@ func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64 switch j % 3 { // Update file case 0: - change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) + change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) change.Kind = archive.ChangeModify - if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + if err := driver.WriteFile(root, root.Join(root.Path(), change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { return nil, err } // Add file case 1: - change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j)) + change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j)) change.Kind = archive.ChangeAdd - if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { + if err := driver.WriteFile(root, root.Join(root.Path(), change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { return nil, err } // Remove file case 2: - change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) + change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) change.Kind = archive.ChangeDelete - if err := os.Remove(path.Join(root, change.Path)); err != nil { + if err := root.Remove(root.Join(root.Path(), change.Path)); err != nil { return nil, err } } @@ -201,17 +200,17 @@ func checkManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) defer drv.Put(layer) for i := 0; i < count; i += 100 { - dir := path.Join(root, fmt.Sprintf("directory-%d", i)) + dir := root.Join(root.Path(), fmt.Sprintf("directory-%d", i)) for j := 0; i+j < count && j < 100; j++ { - file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) - fileContent, err := ioutil.ReadFile(file) + file := root.Join(dir, fmt.Sprintf("file-%d", i+j)) + fileContent, err := driver.ReadFile(root, file) if err != nil { return err } content := randomContent(64, seed+int64(i+j)) - if bytes.Compare(fileContent, content) != 0 { + if !bytes.Equal(fileContent, content) { return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) } } @@ -254,17 +253,17 @@ func addLayerFiles(drv graphdriver.Driver, layer, parent string, i int) error { } defer drv.Put(layer) - if err := ioutil.WriteFile(path.Join(root, "top-id"), []byte(layer), 0755); err != nil { + if err := driver.WriteFile(root, root.Join(root.Path(), "top-id"), []byte(layer), 0755); err != nil { return err } - layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) - if err := os.MkdirAll(layerDir, 0755); err != nil { + layerDir := root.Join(root.Path(), fmt.Sprintf("layer-%d", i)) + if err := root.MkdirAll(layerDir, 0755); err != nil { return err } - if err := ioutil.WriteFile(path.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil { + if err := driver.WriteFile(root, root.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil { return err } - if err := ioutil.WriteFile(path.Join(layerDir, "parent-id"), []byte(parent), 0755); err != nil { + if err := driver.WriteFile(root, root.Join(layerDir, "parent-id"), []byte(parent), 0755); err != nil { return err } @@ -295,26 +294,26 @@ func checkManyLayers(drv graphdriver.Driver, layer string, count int) error { } defer drv.Put(layer) - layerIDBytes, err := ioutil.ReadFile(path.Join(root, "top-id")) + layerIDBytes, err := driver.ReadFile(root, root.Join(root.Path(), "top-id")) if err != nil { return err } - if bytes.Compare(layerIDBytes, []byte(layer)) != 0 { + if !bytes.Equal(layerIDBytes, []byte(layer)) { return fmt.Errorf("mismatched file content %v, expecting %v", layerIDBytes, []byte(layer)) } for i := count; i > 0; i-- { - layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) + layerDir := root.Join(root.Path(), fmt.Sprintf("layer-%d", i)) - thisLayerIDBytes, err := ioutil.ReadFile(path.Join(layerDir, "layer-id")) + thisLayerIDBytes, err := driver.ReadFile(root, root.Join(layerDir, "layer-id")) if err != nil { return err } - if bytes.Compare(thisLayerIDBytes, layerIDBytes) != 0 { + if !bytes.Equal(thisLayerIDBytes, layerIDBytes) { return fmt.Errorf("mismatched file content %v, expecting %v", thisLayerIDBytes, layerIDBytes) } - layerIDBytes, err = ioutil.ReadFile(path.Join(layerDir, "parent-id")) + layerIDBytes, err = driver.ReadFile(root, root.Join(layerDir, "parent-id")) if err != nil { return err } @@ -322,11 +321,11 @@ func checkManyLayers(drv graphdriver.Driver, layer string, count int) error { return nil } -// readDir reads a directory just like ioutil.ReadDir() +// readDir reads a directory just like driver.ReadDir() // then hides specific files (currently "lost+found") // so the tests don't "see" it -func readDir(dir string) ([]os.FileInfo, error) { - a, err := ioutil.ReadDir(dir) +func readDir(r driver.Driver, dir string) ([]os.FileInfo, error) { + a, err := driver.ReadDir(r, dir) if err != nil { return nil, err } diff --git a/components/engine/daemon/graphdriver/graphtest/testutil_unix.go b/components/engine/daemon/graphdriver/graphtest/testutil_unix.go index 96474487aa..b5dec43fb3 100644 --- a/components/engine/daemon/graphdriver/graphtest/testutil_unix.go +++ b/components/engine/daemon/graphdriver/graphtest/testutil_unix.go @@ -3,12 +3,11 @@ package graphtest import ( - "io/ioutil" "os" - "path" "syscall" "testing" + contdriver "github.com/containerd/continuity/driver" "github.com/docker/docker/daemon/graphdriver" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -40,31 +39,31 @@ func createBase(t testing.TB, driver graphdriver.Driver, name string) { err := driver.CreateReadWrite(name, "", nil) require.NoError(t, err) - dir, err := driver.Get(name, "") + dirFS, err := driver.Get(name, "") require.NoError(t, err) defer driver.Put(name) - subdir := path.Join(dir, "a subdir") - require.NoError(t, os.Mkdir(subdir, 0705|os.ModeSticky)) - require.NoError(t, os.Chown(subdir, 1, 2)) + subdir := dirFS.Join(dirFS.Path(), "a subdir") + require.NoError(t, dirFS.Mkdir(subdir, 0705|os.ModeSticky)) + require.NoError(t, dirFS.Lchown(subdir, 1, 2)) - file := path.Join(dir, "a file") - err = ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid) + file := dirFS.Join(dirFS.Path(), "a file") + err = contdriver.WriteFile(dirFS, file, []byte("Some data"), 0222|os.ModeSetuid) require.NoError(t, err) } func verifyBase(t testing.TB, driver graphdriver.Driver, name string) { - dir, err := driver.Get(name, "") + dirFS, err := driver.Get(name, "") require.NoError(t, err) defer driver.Put(name) - subdir := path.Join(dir, "a subdir") + subdir := dirFS.Join(dirFS.Path(), "a subdir") verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) - file := path.Join(dir, "a file") + file := dirFS.Join(dirFS.Path(), "a file") verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) - files, err := readDir(dir) + files, err := readDir(dirFS, dirFS.Path()) require.NoError(t, err) assert.Len(t, files, 2) } diff --git a/components/engine/daemon/graphdriver/lcow/lcow.go b/components/engine/daemon/graphdriver/lcow/lcow.go index 86beb3d5f0..9ae04b1beb 100644 --- a/components/engine/daemon/graphdriver/lcow/lcow.go +++ b/components/engine/daemon/graphdriver/lcow/lcow.go @@ -13,7 +13,7 @@ // operations. The downside of safe-mode is that operations are slower as // a new service utility VM has to be started and torn-down when needed. // -// Options (needs official documentation, but lets get full functionality first...) @jhowardmsft +// Options: // // The following options are read by the graphdriver itself: // @@ -23,33 +23,33 @@ // // * lcow.sandboxsize - Specifies a custom sandbox size in GB for starting a container // -- Possible values: >= default sandbox size (opengcs defined, currently 20) -// -- Default if ommitted: 20 +// -- Default if omitted: 20 // // The following options are read by opengcs: // // * lcow.kirdpath - Specifies a custom path to a kernel/initrd pair // -- Possible values: Any local path that is not a mapped drive -// -- Default if ommitted: %ProgramFiles%\Linux Containers +// -- Default if omitted: %ProgramFiles%\Linux Containers // // * lcow.kernel - Specifies a custom kernel file located in the `lcow.kirdpath` path // -- Possible values: Any valid filename -// -- Default if ommitted: bootx64.efi +// -- Default if omitted: bootx64.efi // // * lcow.initrd - Specifies a custom initrd file located in the `lcow.kirdpath` path // -- Possible values: Any valid filename -// -- Default if ommitted: initrd.img +// -- Default if omitted: initrd.img // // * lcow.bootparameters - Specifies additional boot parameters for booting in kernel+initrd mode // -- Possible values: Any valid linux kernel boot options -// -- Default if ommitted: +// -- Default if omitted: // // * lcow.vhdx - Specifies a custom vhdx file to boot (instead of a kernel+initrd) // -- Possible values: Any valid filename -// -- Default if ommitted: uvm.vhdx under `lcow.kirdpath` +// -- Default if omitted: uvm.vhdx under `lcow.kirdpath` // // * lcow.timeout - Specifies a timeout for utility VM operations in seconds // -- Possible values: >=0 -// -- Default if ommitted: 300 +// -- Default if omitted: 300 // TODO: Grab logs from SVM at terminate or errors @@ -65,12 +65,14 @@ import ( "strconv" "strings" "sync" + "syscall" "time" "github.com/Microsoft/hcsshim" "github.com/Microsoft/opengcs/client" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/system" @@ -106,72 +108,24 @@ const ( // scratchDirectory is the sub-folder under the driver's data-root used for scratch VHDs in service VMs scratchDirectory = "scratch" + + // errOperationPending is the HRESULT returned by the HCS when the VM termination operation is still pending. + errOperationPending syscall.Errno = 0xc0370103 ) -// cacheItem is our internal structure representing an item in our local cache -// of things that have been mounted. -type cacheItem struct { - sync.Mutex // Protects operations performed on this item - uvmPath string // Path in utility VM - hostPath string // Path on host - refCount int // How many times its been mounted - isSandbox bool // True if a sandbox - isMounted bool // True when mounted in a service VM -} - -// setIsMounted is a helper function for a cacheItem which does exactly what it says -func (ci *cacheItem) setIsMounted() { - logrus.Debugf("locking cache item for set isMounted") - ci.Lock() - defer ci.Unlock() - ci.isMounted = true - logrus.Debugf("set isMounted on cache item") -} - -// incrementRefCount is a helper function for a cacheItem which does exactly what it says -func (ci *cacheItem) incrementRefCount() { - logrus.Debugf("locking cache item for increment") - ci.Lock() - defer ci.Unlock() - ci.refCount++ - logrus.Debugf("incremented refcount on cache item %+v", ci) -} - -// decrementRefCount is a helper function for a cacheItem which does exactly what it says -func (ci *cacheItem) decrementRefCount() int { - logrus.Debugf("locking cache item for decrement") - ci.Lock() - defer ci.Unlock() - ci.refCount-- - logrus.Debugf("decremented refcount on cache item %+v", ci) - return ci.refCount -} - -// serviceVMItem is our internal structure representing an item in our -// map of service VMs we are maintaining. -type serviceVMItem struct { - sync.Mutex // Serialises operations being performed in this service VM. - scratchAttached bool // Has a scratch been attached? - config *client.Config // Represents the service VM item. -} - // Driver represents an LCOW graph driver. type Driver struct { - dataRoot string // Root path on the host where we are storing everything. - cachedSandboxFile string // Location of the local default-sized cached sandbox. - cachedSandboxMutex sync.Mutex // Protects race conditions from multiple threads creating the cached sandbox. - cachedScratchFile string // Location of the local cached empty scratch space. - cachedScratchMutex sync.Mutex // Protects race conditions from multiple threads creating the cached scratch. - options []string // Graphdriver options we are initialised with. - serviceVmsMutex sync.Mutex // Protects add/updates/delete to the serviceVMs map. - serviceVms map[string]*serviceVMItem // Map of the configs representing the service VM(s) we are running. - globalMode bool // Indicates if running in an unsafe/global service VM mode. + dataRoot string // Root path on the host where we are storing everything. + cachedSandboxFile string // Location of the local default-sized cached sandbox. + cachedSandboxMutex sync.Mutex // Protects race conditions from multiple threads creating the cached sandbox. + cachedScratchFile string // Location of the local cached empty scratch space. + cachedScratchMutex sync.Mutex // Protects race conditions from multiple threads creating the cached scratch. + options []string // Graphdriver options we are initialised with. + globalMode bool // Indicates if running in an unsafe/global service VM mode. // NOTE: It is OK to use a cache here because Windows does not support // restoring containers when the daemon dies. - - cacheMutex sync.Mutex // Protects add/update/deletes to cache. - cache map[string]*cacheItem // Map holding a cache of all the IDs we've mounted/unmounted. + serviceVms *serviceVMMap // Map of the configs representing the service VM(s) we are running. } // layerDetails is the structure returned by a helper function `getLayerDetails` @@ -204,9 +158,10 @@ func InitDriver(dataRoot string, options []string, _, _ []idtools.IDMap) (graphd options: options, cachedSandboxFile: filepath.Join(cd, sandboxFilename), cachedScratchFile: filepath.Join(cd, scratchFilename), - cache: make(map[string]*cacheItem), - serviceVms: make(map[string]*serviceVMItem), - globalMode: false, + serviceVms: &serviceVMMap{ + svms: make(map[string]*serviceVMMapItem), + }, + globalMode: false, } // Looks for relevant options @@ -248,53 +203,59 @@ func InitDriver(dataRoot string, options []string, _, _ []idtools.IDMap) (graphd return d, nil } +func (d *Driver) getVMID(id string) string { + if d.globalMode { + return svmGlobalID + } + return id +} + // startServiceVMIfNotRunning starts a service utility VM if it is not currently running. // It can optionally be started with a mapped virtual disk. Returns a opengcs config structure // representing the VM. -func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd *hcsshim.MappedVirtualDisk, context string) (*serviceVMItem, error) { +func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd []hcsshim.MappedVirtualDisk, context string) (_ *serviceVM, err error) { // Use the global ID if in global mode - if d.globalMode { - id = svmGlobalID - } + id = d.getVMID(id) title := fmt.Sprintf("lcowdriver: startservicevmifnotrunning %s:", id) - // Make sure thread-safe when interrogating the map - logrus.Debugf("%s taking serviceVmsMutex", title) - d.serviceVmsMutex.Lock() + // Attempt to add ID to the service vm map + logrus.Debugf("%s: Adding entry to service vm map", title) + svm, exists, err := d.serviceVms.add(id) + if err != nil && err == errVMisTerminating { + // VM is in the process of terminating. Wait until it's done and and then try again + logrus.Debugf("%s: VM with current ID still in the process of terminating: %s", title, id) + if err := svm.getStopError(); err != nil { + logrus.Debugf("%s: VM %s did not stop succesfully: %s", title, id, err) + return nil, err + } + return d.startServiceVMIfNotRunning(id, mvdToAdd, context) + } else if err != nil { + logrus.Debugf("%s: failed to add service vm to map: %s", err) + return nil, fmt.Errorf("%s: failed to add to service vm map: %s", title, err) + } - // Nothing to do if it's already running except add the mapped drive if supplied. - if svm, ok := d.serviceVms[id]; ok { - logrus.Debugf("%s exists, releasing serviceVmsMutex", title) - d.serviceVmsMutex.Unlock() - - if mvdToAdd != nil { - logrus.Debugf("hot-adding %s to %s", mvdToAdd.HostPath, mvdToAdd.ContainerPath) - - // Ensure the item is locked while doing this - logrus.Debugf("%s locking serviceVmItem %s", title, svm.config.Name) - svm.Lock() - - if err := svm.config.HotAddVhd(mvdToAdd.HostPath, mvdToAdd.ContainerPath, false, true); err != nil { - logrus.Debugf("%s releasing serviceVmItem %s on hot-add failure %s", title, svm.config.Name, err) - svm.Unlock() - return nil, fmt.Errorf("%s hot add %s to %s failed: %s", title, mvdToAdd.HostPath, mvdToAdd.ContainerPath, err) - } - - logrus.Debugf("%s releasing serviceVmItem %s", title, svm.config.Name) - svm.Unlock() + if exists { + // Service VM is already up and running. In this case, just hot add the vhds. + logrus.Debugf("%s: service vm already exists. Just hot adding: %+v", title, mvdToAdd) + if err := svm.hotAddVHDs(mvdToAdd...); err != nil { + logrus.Debugf("%s: failed to hot add vhds on service vm creation: %s", title, err) + return nil, fmt.Errorf("%s: failed to hot add vhds on service vm: %s", title, err) } return svm, nil } - // Release the lock early - logrus.Debugf("%s releasing serviceVmsMutex", title) - d.serviceVmsMutex.Unlock() + // We are the first service for this id, so we need to start it + logrus.Debugf("%s: service vm doesn't exist. Now starting it up: %s", title, id) - // So we are starting one. First need an enpty structure. - svm := &serviceVMItem{ - config: &client.Config{}, - } + defer func() { + // Signal that start has finished, passing in the error if any. + svm.signalStartFinished(err) + if err != nil { + // We added a ref to the VM, since we failed, we should delete the ref. + d.terminateServiceVM(id, "error path on startServiceVMIfNotRunning", false) + } + }() // Generate a default configuration if err := svm.config.GenerateDefault(d.options); err != nil { @@ -335,12 +296,14 @@ func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd *hcsshim.MappedV svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvd) svm.scratchAttached = true } + logrus.Debugf("%s releasing cachedScratchMutex", title) d.cachedScratchMutex.Unlock() // If requested to start it with a mapped virtual disk, add it now. - if mvdToAdd != nil { - svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, *mvdToAdd) + svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvdToAdd...) + for _, mvd := range svm.config.MappedVirtualDisks { + svm.attachedVHDs[mvd.HostPath] = 1 } // Start it. @@ -349,108 +312,80 @@ func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd *hcsshim.MappedV return nil, fmt.Errorf("failed to start service utility VM (%s): %s", context, err) } - // As it's now running, add it to the map, checking for a race where another - // thread has simultaneously tried to start it. - logrus.Debugf("%s locking serviceVmsMutex for insertion", title) - d.serviceVmsMutex.Lock() - if svm, ok := d.serviceVms[id]; ok { - logrus.Debugf("%s releasing serviceVmsMutex after insertion but exists", title) - d.serviceVmsMutex.Unlock() - return svm, nil - } - d.serviceVms[id] = svm - logrus.Debugf("%s releasing serviceVmsMutex after insertion", title) - d.serviceVmsMutex.Unlock() + // defer function to terminate the VM if the next steps fail + defer func() { + if err != nil { + waitTerminate(svm, fmt.Sprintf("startServiceVmIfNotRunning: %s (%s)", id, context)) + } + }() // Now we have a running service VM, we can create the cached scratch file if it doesn't exist. logrus.Debugf("%s locking cachedScratchMutex", title) d.cachedScratchMutex.Lock() if _, err := os.Stat(d.cachedScratchFile); err != nil { - logrus.Debugf("%s (%s): creating an SVM scratch - locking serviceVM", title, context) - svm.Lock() + logrus.Debugf("%s (%s): creating an SVM scratch", title, context) + + // Don't use svm.CreateExt4Vhdx since that only works when the service vm is setup, + // but we're still in that process right now. if err := svm.config.CreateExt4Vhdx(scratchTargetFile, client.DefaultVhdxSizeGB, d.cachedScratchFile); err != nil { - logrus.Debugf("%s (%s): releasing serviceVM on error path from CreateExt4Vhdx: %s", title, context, err) - svm.Unlock() logrus.Debugf("%s (%s): releasing cachedScratchMutex on error path", title, context) d.cachedScratchMutex.Unlock() - - // Do a force terminate and remove it from the map on failure, ignoring any errors - if err2 := d.terminateServiceVM(id, "error path from CreateExt4Vhdx", true); err2 != nil { - logrus.Warnf("failed to terminate service VM on error path from CreateExt4Vhdx: %s", err2) - } - + logrus.Debugf("%s: failed to create vm scratch %s: %s", title, scratchTargetFile, err) return nil, fmt.Errorf("failed to create SVM scratch VHDX (%s): %s", context, err) } - logrus.Debugf("%s (%s): releasing serviceVM after %s created and cached to %s", title, context, scratchTargetFile, d.cachedScratchFile) - svm.Unlock() } logrus.Debugf("%s (%s): releasing cachedScratchMutex", title, context) d.cachedScratchMutex.Unlock() // Hot-add the scratch-space if not already attached if !svm.scratchAttached { - logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) hot-adding scratch %s - locking serviceVM", context, scratchTargetFile) - svm.Lock() - if err := svm.config.HotAddVhd(scratchTargetFile, toolsScratchPath, false, true); err != nil { - logrus.Debugf("%s (%s): releasing serviceVM on error path of HotAddVhd: %s", title, context, err) - svm.Unlock() - - // Do a force terminate and remove it from the map on failure, ignoring any errors - if err2 := d.terminateServiceVM(id, "error path from HotAddVhd", true); err2 != nil { - logrus.Warnf("failed to terminate service VM on error path from HotAddVhd: %s", err2) - } - + logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) hot-adding scratch %s", context, scratchTargetFile) + if err := svm.hotAddVHDsAtStart(hcsshim.MappedVirtualDisk{ + HostPath: scratchTargetFile, + ContainerPath: toolsScratchPath, + CreateInUtilityVM: true, + }); err != nil { + logrus.Debugf("%s: failed to hot-add scratch %s: %s", title, scratchTargetFile, err) return nil, fmt.Errorf("failed to hot-add %s failed: %s", scratchTargetFile, err) } - logrus.Debugf("%s (%s): releasing serviceVM", title, context) - svm.Unlock() + svm.scratchAttached = true } logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) success", context) return svm, nil } -// getServiceVM returns the appropriate service utility VM instance, optionally -// deleting it from the map (but not the global one) -func (d *Driver) getServiceVM(id string, deleteFromMap bool) (*serviceVMItem, error) { - logrus.Debugf("lcowdriver: getservicevm:locking serviceVmsMutex") - d.serviceVmsMutex.Lock() - defer func() { - logrus.Debugf("lcowdriver: getservicevm:releasing serviceVmsMutex") - d.serviceVmsMutex.Unlock() - }() - if d.globalMode { - id = svmGlobalID - } - if _, ok := d.serviceVms[id]; !ok { - return nil, fmt.Errorf("getservicevm for %s failed as not found", id) - } - svm := d.serviceVms[id] - if deleteFromMap && id != svmGlobalID { - logrus.Debugf("lcowdriver: getservicevm: removing %s from map", id) - delete(d.serviceVms, id) - } - return svm, nil -} - -// terminateServiceVM terminates a service utility VM if its running, but does nothing -// when in global mode as it's lifetime is limited to that of the daemon. -func (d *Driver) terminateServiceVM(id, context string, force bool) error { - +// terminateServiceVM terminates a service utility VM if its running if it's, +// not being used by any goroutine, but does nothing when in global mode as it's +// lifetime is limited to that of the daemon. If the force flag is set, then +// the VM will be killed regardless of the ref count or if it's global. +func (d *Driver) terminateServiceVM(id, context string, force bool) (err error) { // We don't do anything in safe mode unless the force flag has been passed, which // is only the case for cleanup at driver termination. - if d.globalMode { - if !force { - logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - doing nothing as in global mode", id, context) - return nil - } - id = svmGlobalID + if d.globalMode && !force { + logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - doing nothing as in global mode", id, context) + return nil } - // Get the service VM and delete it from the map - svm, err := d.getServiceVM(id, true) - if err != nil { - return err + id = d.getVMID(id) + + var svm *serviceVM + var lastRef bool + if !force { + // In the not force case, we ref count + svm, lastRef, err = d.serviceVms.decrementRefCount(id) + } else { + // In the force case, we ignore the ref count and just set it to 0 + svm, err = d.serviceVms.setRefCountZero(id) + lastRef = true + } + + if err == errVMUnknown { + return nil + } else if err == errVMisTerminating { + return svm.getStopError() + } else if !lastRef { + return nil } // We run the deletion of the scratch as a deferred function to at least attempt @@ -459,29 +394,67 @@ func (d *Driver) terminateServiceVM(id, context string, force bool) error { if svm.scratchAttached { scratchTargetFile := filepath.Join(d.dataRoot, scratchDirectory, fmt.Sprintf("%s.vhdx", id)) logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - deleting scratch %s", id, context, scratchTargetFile) - if err := os.Remove(scratchTargetFile); err != nil { - logrus.Warnf("failed to remove scratch file %s (%s): %s", scratchTargetFile, context, err) + if errRemove := os.Remove(scratchTargetFile); errRemove != nil { + logrus.Warnf("failed to remove scratch file %s (%s): %s", scratchTargetFile, context, errRemove) + err = errRemove } } + + // This function shouldn't actually return error unless there is a bug + if errDelete := d.serviceVms.deleteID(id); errDelete != nil { + logrus.Warnf("failed to service vm from svm map %s (%s): %s", id, context, errDelete) + } + + // Signal that this VM has stopped + svm.signalStopFinished(err) }() - // Nothing to do if it's not running - if svm.config.Uvm != nil { - logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - calling terminate", id, context) - if err := svm.config.Uvm.Terminate(); err != nil { - return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err) - } + // Now it's possible that the serivce VM failed to start and now we are trying to termiante it. + // In this case, we will relay the error to the goroutines waiting for this vm to stop. + if err := svm.getStartError(); err != nil { + logrus.Debugf("lcowdriver: terminateservicevm: %s had failed to start up: %s", id, err) + return err + } - logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - waiting for utility VM to terminate", id, context) - if err := svm.config.Uvm.WaitTimeout(time.Duration(svm.config.UvmTimeoutSeconds) * time.Second); err != nil { - return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err) - } + if err := waitTerminate(svm, fmt.Sprintf("terminateservicevm: %s (%s)", id, context)); err != nil { + return err } logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - success", id, context) return nil } +func waitTerminate(svm *serviceVM, context string) error { + if svm.config == nil { + return fmt.Errorf("lcowdriver: waitTermiante: Nil utility VM. %s", context) + } + + logrus.Debugf("lcowdriver: waitTerminate: Calling terminate: %s", context) + if err := svm.config.Uvm.Terminate(); err != nil { + // We might get operation still pending from the HCS. In that case, we shouldn't return + // an error since we call wait right after. + underlyingError := err + if conterr, ok := err.(*hcsshim.ContainerError); ok { + underlyingError = conterr.Err + } + + if syscallErr, ok := underlyingError.(syscall.Errno); ok { + underlyingError = syscallErr + } + + if underlyingError != errOperationPending { + return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err) + } + logrus.Debugf("lcowdriver: waitTerminate: uvm.Terminate() returned operation pending (%s)", context) + } + + logrus.Debugf("lcowdriver: waitTerminate: (%s) - waiting for utility VM to terminate", context) + if err := svm.config.Uvm.WaitTimeout(time.Duration(svm.config.UvmTimeoutSeconds) * time.Second); err != nil { + return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err) + } + return nil +} + // String returns the string representation of a driver. This should match // the name the graph driver has been registered with. func (d *Driver) String() string { @@ -571,25 +544,18 @@ func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts }() } - // Synchronise the operation in the service VM. - logrus.Debugf("%s: locking svm for sandbox creation", title) - svm.Lock() - defer func() { - logrus.Debugf("%s: releasing svm for sandbox creation", title) - svm.Unlock() - }() - // Make sure we don't write to our local cached copy if this is for a non-default size request. targetCacheFile := d.cachedSandboxFile if sandboxSize != client.DefaultVhdxSizeGB { targetCacheFile = "" } - // Actually do the creation. - if err := svm.config.CreateExt4Vhdx(filepath.Join(d.dir(id), sandboxFilename), uint32(sandboxSize), targetCacheFile); err != nil { + // Create the ext4 vhdx + logrus.Debugf("%s: creating sandbox ext4 vhdx", title) + if err := svm.createExt4VHDX(filepath.Join(d.dir(id), sandboxFilename), uint32(sandboxSize), targetCacheFile); err != nil { + logrus.Debugf("%s: failed to create sandbox vhdx for %s: %s", title, id, err) return err } - return nil } @@ -638,6 +604,21 @@ func (d *Driver) Remove(id string) error { layerPath := d.dir(id) logrus.Debugf("lcowdriver: remove: id %s: layerPath %s", id, layerPath) + + // Unmount all the layers + err := d.Put(id) + if err != nil { + logrus.Debugf("lcowdriver: remove id %s: failed to unmount: %s", id, err) + return err + } + + // for non-global case just kill the vm + if !d.globalMode { + if err := d.terminateServiceVM(id, fmt.Sprintf("Remove %s", id), true); err != nil { + return err + } + } + if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { return err } @@ -659,43 +640,24 @@ func (d *Driver) Remove(id string) error { // For optimisation, we don't actually mount the filesystem (which in our // case means [hot-]adding it to a service VM. But we track that and defer // the actual adding to the point we need to access it. -func (d *Driver) Get(id, mountLabel string) (string, error) { +func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { title := fmt.Sprintf("lcowdriver: get: %s", id) logrus.Debugf(title) - // Work out what we are working on - ld, err := getLayerDetails(d.dir(id)) + // Generate the mounts needed for the defered operation. + disks, err := d.getAllMounts(id) if err != nil { - logrus.Debugf("%s failed to get layer details from %s: %s", title, d.dir(id), err) - return "", fmt.Errorf("%s failed to open layer or sandbox VHD to open in %s: %s", title, d.dir(id), err) + logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err) + return nil, fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err) } - logrus.Debugf("%s %s, size %d, isSandbox %t", title, ld.filename, ld.size, ld.isSandbox) - // Add item to cache, or update existing item, but ensure we have the - // lock while updating items. - logrus.Debugf("%s: locking cacheMutex", title) - d.cacheMutex.Lock() - var ci *cacheItem - if item, ok := d.cache[id]; !ok { - // The item is not currently in the cache. - ci = &cacheItem{ - refCount: 1, - isSandbox: ld.isSandbox, - hostPath: ld.filename, - uvmPath: fmt.Sprintf("/mnt/%s", id), - isMounted: false, // we defer this as an optimisation - } - d.cache[id] = ci - logrus.Debugf("%s: added cache item %+v", title, ci) - } else { - // Increment the reference counter in the cache. - item.incrementRefCount() - } - logrus.Debugf("%s: releasing cacheMutex", title) - d.cacheMutex.Unlock() - - logrus.Debugf("%s %s success. %s: %+v: size %d", title, id, d.dir(id), ci, ld.size) - return d.dir(id), nil + logrus.Debugf("%s: got layer mounts: %+v", title, disks) + return &lcowfs{ + root: unionMountName(disks), + d: d, + mappedDisks: disks, + vmID: d.getVMID(id), + }, nil } // Put does the reverse of get. If there are no more references to @@ -703,56 +665,45 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { func (d *Driver) Put(id string) error { title := fmt.Sprintf("lcowdriver: put: %s", id) - logrus.Debugf("%s: locking cacheMutex", title) - d.cacheMutex.Lock() - item, ok := d.cache[id] - if !ok { - logrus.Debugf("%s: releasing cacheMutex on error path", title) - d.cacheMutex.Unlock() - return fmt.Errorf("%s possible ref-count error, or invalid id was passed to the graphdriver. Cannot handle id %s as it's not in the cache", title, id) - } - - // Decrement the ref-count, and nothing more to do if still in use. - if item.decrementRefCount() > 0 { - logrus.Debugf("%s: releasing cacheMutex. Cache item is still in use", title) - d.cacheMutex.Unlock() + // Get the service VM that we need to remove from + svm, err := d.serviceVms.get(d.getVMID(id)) + if err == errVMUnknown { return nil + } else if err == errVMisTerminating { + return svm.getStopError() } - // Remove from the cache map. - delete(d.cache, id) - logrus.Debugf("%s: releasing cacheMutex. Ref count on cache item has dropped to zero, removed from cache", title) - d.cacheMutex.Unlock() + // Generate the mounts that Get() might have mounted + disks, err := d.getAllMounts(id) + if err != nil { + logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err) + return fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err) + } - // If we have done a mount and we are in global mode, then remove it. We don't - // need to remove in safe mode as the service VM is going to be torn down anyway. - if d.globalMode { - logrus.Debugf("%s: locking cache item at zero ref-count", title) - item.Lock() - defer func() { - logrus.Debugf("%s: releasing cache item at zero ref-count", title) - item.Unlock() - }() - if item.isMounted { - svm, err := d.getServiceVM(id, false) - if err != nil { - return err - } + // Now, we want to perform the unmounts, hot-remove and stop the service vm. + // We want to go though all the steps even if we have an error to clean up properly + err = svm.deleteUnionMount(unionMountName(disks), disks...) + if err != nil { + logrus.Debugf("%s failed to delete union mount %s: %s", title, id, err) + } - logrus.Debugf("%s: Hot-Removing %s. Locking svm", title, item.hostPath) - svm.Lock() - if err := svm.config.HotRemoveVhd(item.hostPath); err != nil { - logrus.Debugf("%s: releasing svm on error path", title) - svm.Unlock() - return fmt.Errorf("%s failed to hot-remove %s from global service utility VM: %s", title, item.hostPath, err) - } - logrus.Debugf("%s: releasing svm", title) - svm.Unlock() + err1 := svm.hotRemoveVHDs(disks...) + if err1 != nil { + logrus.Debugf("%s failed to hot remove vhds %s: %s", title, id, err) + if err == nil { + err = err1 } } - logrus.Debugf("%s %s: refCount 0. %s (%s) completed successfully", title, id, item.hostPath, item.uvmPath) - return nil + err1 = d.terminateServiceVM(id, fmt.Sprintf("Put %s", id), false) + if err1 != nil { + logrus.Debugf("%s failed to terminate service vm %s: %s", title, id, err1) + if err == nil { + err = err1 + } + } + logrus.Debugf("Put succeeded on id %s", id) + return err } // Cleanup ensures the information the driver stores is properly removed. @@ -761,15 +712,6 @@ func (d *Driver) Put(id string) error { func (d *Driver) Cleanup() error { title := "lcowdriver: cleanup" - d.cacheMutex.Lock() - for k, v := range d.cache { - logrus.Debugf("%s cache item: %s: %+v", title, k, v) - if v.refCount > 0 { - logrus.Warnf("%s leaked %s: %+v", title, k, v) - } - } - d.cacheMutex.Unlock() - items, err := ioutil.ReadDir(d.dataRoot) if err != nil { if os.IsNotExist(err) { @@ -794,8 +736,8 @@ func (d *Driver) Cleanup() error { // Cleanup any service VMs we have running, along with their scratch spaces. // We don't take the lock for this as it's taken in terminateServiceVm. - for k, v := range d.serviceVms { - logrus.Debugf("%s svm: %s: %+v", title, k, v) + for k, v := range d.serviceVms.svms { + logrus.Debugf("%s svm entry: %s: %+v", title, k, v) d.terminateServiceVM(k, "cleanup", true) } @@ -812,65 +754,41 @@ func (d *Driver) Cleanup() error { func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { title := fmt.Sprintf("lcowdriver: diff: %s", id) - logrus.Debugf("%s: locking cacheMutex", title) - d.cacheMutex.Lock() - if _, ok := d.cache[id]; !ok { - logrus.Debugf("%s: releasing cacheMutex on error path", title) - d.cacheMutex.Unlock() - return nil, fmt.Errorf("%s fail as %s is not in the cache", title, id) - } - ci := d.cache[id] - logrus.Debugf("%s: releasing cacheMutex", title) - d.cacheMutex.Unlock() - - // Stat to get size - logrus.Debugf("%s: locking cacheItem", title) - ci.Lock() - fileInfo, err := os.Stat(ci.hostPath) + // Get VHDX info + ld, err := getLayerDetails(d.dir(id)) if err != nil { - logrus.Debugf("%s: releasing cacheItem on error path", title) - ci.Unlock() - return nil, fmt.Errorf("%s failed to stat %s: %s", title, ci.hostPath, err) + logrus.Debugf("%s: failed to get vhdx information of %s: %s", title, d.dir(id), err) + return nil, err } - logrus.Debugf("%s: releasing cacheItem", title) - ci.Unlock() // Start the SVM with a mapped virtual disk. Note that if the SVM is - // already runing and we are in global mode, this will be + // already running and we are in global mode, this will be // hot-added. - mvd := &hcsshim.MappedVirtualDisk{ - HostPath: ci.hostPath, - ContainerPath: ci.uvmPath, + mvd := hcsshim.MappedVirtualDisk{ + HostPath: ld.filename, + ContainerPath: hostToGuest(ld.filename), CreateInUtilityVM: true, ReadOnly: true, } logrus.Debugf("%s: starting service VM", title) - svm, err := d.startServiceVMIfNotRunning(id, mvd, fmt.Sprintf("diff %s", id)) + svm, err := d.startServiceVMIfNotRunning(id, []hcsshim.MappedVirtualDisk{mvd}, fmt.Sprintf("diff %s", id)) if err != nil { return nil, err } - // Set `isMounted` for the cache item. Note that we re-scan the cache - // at this point as it's possible the cacheItem changed during the long- - // running operation above when we weren't holding the cacheMutex lock. - logrus.Debugf("%s: locking cacheMutex for updating isMounted", title) - d.cacheMutex.Lock() - if _, ok := d.cache[id]; !ok { - logrus.Debugf("%s: releasing cacheMutex on error path of isMounted", title) - d.cacheMutex.Unlock() + logrus.Debugf("lcowdriver: diff: waiting for svm to finish booting") + err = svm.getStartError() + if err != nil { d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) - return nil, fmt.Errorf("%s fail as %s is not in the cache when updating isMounted", title, id) + return nil, fmt.Errorf("lcowdriver: diff: svm failed to boot: %s", err) } - ci = d.cache[id] - ci.setIsMounted() - logrus.Debugf("%s: releasing cacheMutex for updating isMounted", title) - d.cacheMutex.Unlock() // Obtain the tar stream for it - logrus.Debugf("%s %s, size %d, isSandbox %t", title, ci.hostPath, fileInfo.Size(), ci.isSandbox) - tarReadCloser, err := svm.config.VhdToTar(ci.hostPath, ci.uvmPath, ci.isSandbox, fileInfo.Size()) + logrus.Debugf("%s: %s %s, size %d, ReadOnly %t", title, ld.filename, mvd.ContainerPath, ld.size, ld.isSandbox) + tarReadCloser, err := svm.config.VhdToTar(mvd.HostPath, mvd.ContainerPath, ld.isSandbox, ld.size) if err != nil { + svm.hotRemoveVHDs(mvd) d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) return nil, fmt.Errorf("%s failed to export layer to tar stream for id: %s, parent: %s : %s", title, id, parent, err) } @@ -878,14 +796,12 @@ func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { logrus.Debugf("%s id %s parent %s completed successfully", title, id, parent) // In safe/non-global mode, we can't tear down the service VM until things have been read. - if !d.globalMode { - return ioutils.NewReadCloserWrapper(tarReadCloser, func() error { - tarReadCloser.Close() - d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) - return nil - }), nil - } - return tarReadCloser, nil + return ioutils.NewReadCloserWrapper(tarReadCloser, func() error { + tarReadCloser.Close() + svm.hotRemoveVHDs(mvd) + d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) + return nil + }), nil } // ApplyDiff extracts the changeset from the given diff into the @@ -902,6 +818,12 @@ func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { } defer d.terminateServiceVM(id, fmt.Sprintf("applydiff %s", id), false) + logrus.Debugf("lcowdriver: applydiff: waiting for svm to finish booting") + err = svm.getStartError() + if err != nil { + return 0, fmt.Errorf("lcowdriver: applydiff: svm failed to boot: %s", err) + } + // TODO @jhowardmsft - the retries are temporary to overcome platform reliablity issues. // Obviously this will be removed as platform bugs are fixed. retries := 0 @@ -944,6 +866,11 @@ func (d *Driver) GetMetadata(id string) (map[string]string, error) { return m, nil } +// GetLayerPath gets the layer path on host (path to VHD/VHDX) +func (d *Driver) GetLayerPath(id string) (string, error) { + return d.dir(id), nil +} + // dir returns the absolute path to the layer. func (d *Driver) dir(id string) string { return filepath.Join(d.dataRoot, filepath.Base(id)) @@ -1006,3 +933,34 @@ func getLayerDetails(folder string) (*layerDetails, error) { return ld, nil } + +func (d *Driver) getAllMounts(id string) ([]hcsshim.MappedVirtualDisk, error) { + layerChain, err := d.getLayerChain(id) + if err != nil { + return nil, err + } + layerChain = append([]string{d.dir(id)}, layerChain...) + + logrus.Debugf("getting all layers: %v", layerChain) + disks := make([]hcsshim.MappedVirtualDisk, len(layerChain), len(layerChain)) + for i := range layerChain { + ld, err := getLayerDetails(layerChain[i]) + if err != nil { + logrus.Debugf("Failed to get LayerVhdDetails from %s: %s", layerChain[i], err) + return nil, err + } + disks[i].HostPath = ld.filename + disks[i].ContainerPath = hostToGuest(ld.filename) + disks[i].CreateInUtilityVM = true + disks[i].ReadOnly = !ld.isSandbox + } + return disks, nil +} + +func hostToGuest(hostpath string) string { + return fmt.Sprintf("/tmp/%s", filepath.Base(filepath.Dir(hostpath))) +} + +func unionMountName(disks []hcsshim.MappedVirtualDisk) string { + return fmt.Sprintf("%s-mount", disks[0].ContainerPath) +} diff --git a/components/engine/daemon/graphdriver/lcow/lcow_svm.go b/components/engine/daemon/graphdriver/lcow/lcow_svm.go new file mode 100644 index 0000000000..26f6df4f03 --- /dev/null +++ b/components/engine/daemon/graphdriver/lcow/lcow_svm.go @@ -0,0 +1,373 @@ +// +build windows + +package lcow + +import ( + "errors" + "fmt" + "io" + "strings" + "sync" + "time" + + "github.com/Microsoft/hcsshim" + "github.com/Microsoft/opengcs/client" + "github.com/sirupsen/logrus" +) + +// Code for all the service VM management for the LCOW graphdriver + +var errVMisTerminating = errors.New("service VM is shutting down") +var errVMUnknown = errors.New("service vm id is unknown") +var errVMStillHasReference = errors.New("Attemping to delete a VM that is still being used") + +// serviceVMMap is the struct representing the id -> service VM mapping. +type serviceVMMap struct { + sync.Mutex + svms map[string]*serviceVMMapItem +} + +// serviceVMMapItem is our internal structure representing an item in our +// map of service VMs we are maintaining. +type serviceVMMapItem struct { + svm *serviceVM // actual service vm object + refCount int // refcount for VM +} + +type serviceVM struct { + sync.Mutex // Serialises operations being performed in this service VM. + scratchAttached bool // Has a scratch been attached? + config *client.Config // Represents the service VM item. + + // Indicates that the vm is started + startStatus chan interface{} + startError error + + // Indicates that the vm is stopped + stopStatus chan interface{} + stopError error + + attachedVHDs map[string]int // Map ref counting all the VHDS we've hot-added/hot-removed. + unionMounts map[string]int // Map ref counting all the union filesystems we mounted. +} + +// add will add an id to the service vm map. There are three cases: +// - entry doesn't exist: +// - add id to map and return a new vm that the caller can manually configure+start +// - entry does exist +// - return vm in map and increment ref count +// - entry does exist but the ref count is 0 +// - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop +func (svmMap *serviceVMMap) add(id string) (svm *serviceVM, alreadyExists bool, err error) { + svmMap.Lock() + defer svmMap.Unlock() + if svm, ok := svmMap.svms[id]; ok { + if svm.refCount == 0 { + return svm.svm, true, errVMisTerminating + } + svm.refCount++ + return svm.svm, true, nil + } + + // Doesn't exist, so create an empty svm to put into map and return + newSVM := &serviceVM{ + startStatus: make(chan interface{}), + stopStatus: make(chan interface{}), + attachedVHDs: make(map[string]int), + unionMounts: make(map[string]int), + config: &client.Config{}, + } + svmMap.svms[id] = &serviceVMMapItem{ + svm: newSVM, + refCount: 1, + } + return newSVM, false, nil +} + +// get will get the service vm from the map. There are three cases: +// - entry doesn't exist: +// - return errVMUnknown +// - entry does exist +// - return vm with no error +// - entry does exist but the ref count is 0 +// - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop +func (svmMap *serviceVMMap) get(id string) (*serviceVM, error) { + svmMap.Lock() + defer svmMap.Unlock() + svm, ok := svmMap.svms[id] + if !ok { + return nil, errVMUnknown + } + if svm.refCount == 0 { + return svm.svm, errVMisTerminating + } + return svm.svm, nil +} + +// decrementRefCount decrements the ref count of the given ID from the map. There are four cases: +// - entry doesn't exist: +// - return errVMUnknown +// - entry does exist but the ref count is 0 +// - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop +// - entry does exist but ref count is 1 +// - return vm and set lastRef to true. The caller can then stop the vm, delete the id from this map +// - and execute svm.signalStopFinished to signal the threads that the svm has been terminated. +// - entry does exist and ref count > 1 +// - just reduce ref count and return svm +func (svmMap *serviceVMMap) decrementRefCount(id string) (_ *serviceVM, lastRef bool, _ error) { + svmMap.Lock() + defer svmMap.Unlock() + + svm, ok := svmMap.svms[id] + if !ok { + return nil, false, errVMUnknown + } + if svm.refCount == 0 { + return svm.svm, false, errVMisTerminating + } + svm.refCount-- + return svm.svm, svm.refCount == 0, nil +} + +// setRefCountZero works the same way as decrementRefCount, but sets ref count to 0 instead of decrementing it. +func (svmMap *serviceVMMap) setRefCountZero(id string) (*serviceVM, error) { + svmMap.Lock() + defer svmMap.Unlock() + + svm, ok := svmMap.svms[id] + if !ok { + return nil, errVMUnknown + } + if svm.refCount == 0 { + return svm.svm, errVMisTerminating + } + svm.refCount = 0 + return svm.svm, nil +} + +// deleteID deletes the given ID from the map. If the refcount is not 0 or the +// VM does not exist, then this function returns an error. +func (svmMap *serviceVMMap) deleteID(id string) error { + svmMap.Lock() + defer svmMap.Unlock() + svm, ok := svmMap.svms[id] + if !ok { + return errVMUnknown + } + if svm.refCount != 0 { + return errVMStillHasReference + } + delete(svmMap.svms, id) + return nil +} + +func (svm *serviceVM) signalStartFinished(err error) { + svm.Lock() + svm.startError = err + svm.Unlock() + close(svm.startStatus) +} + +func (svm *serviceVM) getStartError() error { + <-svm.startStatus + svm.Lock() + defer svm.Unlock() + return svm.startError +} + +func (svm *serviceVM) signalStopFinished(err error) { + svm.Lock() + svm.stopError = err + svm.Unlock() + close(svm.stopStatus) +} + +func (svm *serviceVM) getStopError() error { + <-svm.stopStatus + svm.Lock() + defer svm.Unlock() + return svm.stopError +} + +// hotAddVHDs waits for the service vm to start and then attaches the vhds. +func (svm *serviceVM) hotAddVHDs(mvds ...hcsshim.MappedVirtualDisk) error { + if err := svm.getStartError(); err != nil { + return err + } + return svm.hotAddVHDsAtStart(mvds...) +} + +// hotAddVHDsAtStart works the same way as hotAddVHDs but does not wait for the VM to start. +func (svm *serviceVM) hotAddVHDsAtStart(mvds ...hcsshim.MappedVirtualDisk) error { + svm.Lock() + defer svm.Unlock() + for i, mvd := range mvds { + if _, ok := svm.attachedVHDs[mvd.HostPath]; ok { + svm.attachedVHDs[mvd.HostPath]++ + continue + } + + if err := svm.config.HotAddVhd(mvd.HostPath, mvd.ContainerPath, mvd.ReadOnly, !mvd.AttachOnly); err != nil { + svm.hotRemoveVHDsAtStart(mvds[:i]...) + return err + } + svm.attachedVHDs[mvd.HostPath] = 1 + } + return nil +} + +// hotRemoveVHDs waits for the service vm to start and then removes the vhds. +func (svm *serviceVM) hotRemoveVHDs(mvds ...hcsshim.MappedVirtualDisk) error { + if err := svm.getStartError(); err != nil { + return err + } + return svm.hotRemoveVHDsAtStart(mvds...) +} + +// hotRemoveVHDsAtStart works the same way as hotRemoveVHDs but does not wait for the VM to start. +func (svm *serviceVM) hotRemoveVHDsAtStart(mvds ...hcsshim.MappedVirtualDisk) error { + svm.Lock() + defer svm.Unlock() + var retErr error + for _, mvd := range mvds { + if _, ok := svm.attachedVHDs[mvd.HostPath]; !ok { + // We continue instead of returning an error if we try to hot remove a non-existent VHD. + // This is because one of the callers of the function is graphdriver.Put(). Since graphdriver.Get() + // defers the VM start to the first operation, it's possible that nothing have been hot-added + // when Put() is called. To avoid Put returning an error in that case, we simply continue if we + // don't find the vhd attached. + continue + } + + if svm.attachedVHDs[mvd.HostPath] > 1 { + svm.attachedVHDs[mvd.HostPath]-- + continue + } + + // last VHD, so remove from VM and map + if err := svm.config.HotRemoveVhd(mvd.HostPath); err == nil { + delete(svm.attachedVHDs, mvd.HostPath) + } else { + // Take note of the error, but still continue to remove the other VHDs + logrus.Warnf("Failed to hot remove %s: %s", mvd.HostPath, err) + if retErr == nil { + retErr = err + } + } + } + return retErr +} + +func (svm *serviceVM) createExt4VHDX(destFile string, sizeGB uint32, cacheFile string) error { + if err := svm.getStartError(); err != nil { + return err + } + + svm.Lock() + defer svm.Unlock() + return svm.config.CreateExt4Vhdx(destFile, sizeGB, cacheFile) +} + +func (svm *serviceVM) createUnionMount(mountName string, mvds ...hcsshim.MappedVirtualDisk) (err error) { + if len(mvds) == 0 { + return fmt.Errorf("createUnionMount: error must have at least 1 layer") + } + + if err = svm.getStartError(); err != nil { + return err + } + + svm.Lock() + defer svm.Unlock() + if _, ok := svm.unionMounts[mountName]; ok { + svm.unionMounts[mountName]++ + return nil + } + + var lowerLayers []string + if mvds[0].ReadOnly { + lowerLayers = append(lowerLayers, mvds[0].ContainerPath) + } + + for i := 1; i < len(mvds); i++ { + lowerLayers = append(lowerLayers, mvds[i].ContainerPath) + } + + logrus.Debugf("Doing the overlay mount with union directory=%s", mountName) + if err = svm.runProcess(fmt.Sprintf("mkdir -p %s", mountName), nil, nil, nil); err != nil { + return err + } + + var cmd string + if mvds[0].ReadOnly { + // Readonly overlay + cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s %s", + strings.Join(lowerLayers, ","), + mountName) + } else { + upper := fmt.Sprintf("%s/upper", mvds[0].ContainerPath) + work := fmt.Sprintf("%s/work", mvds[0].ContainerPath) + + if err = svm.runProcess(fmt.Sprintf("mkdir -p %s %s", upper, work), nil, nil, nil); err != nil { + return err + } + + cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s,upperdir=%s,workdir=%s %s", + strings.Join(lowerLayers, ":"), + upper, + work, + mountName) + } + + logrus.Debugf("createUnionMount: Executing mount=%s", cmd) + if err = svm.runProcess(cmd, nil, nil, nil); err != nil { + return err + } + + svm.unionMounts[mountName] = 1 + return nil +} + +func (svm *serviceVM) deleteUnionMount(mountName string, disks ...hcsshim.MappedVirtualDisk) error { + if err := svm.getStartError(); err != nil { + return err + } + + svm.Lock() + defer svm.Unlock() + if _, ok := svm.unionMounts[mountName]; !ok { + return nil + } + + if svm.unionMounts[mountName] > 1 { + svm.unionMounts[mountName]-- + return nil + } + + logrus.Debugf("Removing union mount %s", mountName) + if err := svm.runProcess(fmt.Sprintf("umount %s", mountName), nil, nil, nil); err != nil { + return err + } + + delete(svm.unionMounts, mountName) + return nil +} + +func (svm *serviceVM) runProcess(command string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { + process, err := svm.config.RunProcess(command, stdin, stdout, stderr) + if err != nil { + return err + } + defer process.Close() + + process.WaitTimeout(time.Duration(int(time.Second) * svm.config.UvmTimeoutSeconds)) + exitCode, err := process.ExitCode() + if err != nil { + return err + } + + if exitCode != 0 { + return fmt.Errorf("svm.runProcess: command %s failed with exit code %d", command, exitCode) + } + return nil +} diff --git a/components/engine/daemon/graphdriver/lcow/remotefs.go b/components/engine/daemon/graphdriver/lcow/remotefs.go new file mode 100644 index 0000000000..148e3c0a2d --- /dev/null +++ b/components/engine/daemon/graphdriver/lcow/remotefs.go @@ -0,0 +1,139 @@ +// +build windows + +package lcow + +import ( + "bytes" + "fmt" + "io" + "runtime" + "strings" + "sync" + + "github.com/Microsoft/hcsshim" + "github.com/Microsoft/opengcs/service/gcsutils/remotefs" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" + "github.com/sirupsen/logrus" +) + +type lcowfs struct { + root string + d *Driver + mappedDisks []hcsshim.MappedVirtualDisk + vmID string + currentSVM *serviceVM + sync.Mutex +} + +var _ containerfs.ContainerFS = &lcowfs{} + +// ErrNotSupported is an error for unsupported operations in the remotefs +var ErrNotSupported = fmt.Errorf("not supported") + +// Functions to implement the ContainerFS interface +func (l *lcowfs) Path() string { + return l.root +} + +func (l *lcowfs) ResolveScopedPath(path string, rawPath bool) (string, error) { + logrus.Debugf("remotefs.resolvescopedpath inputs: %s %s ", path, l.root) + + arg1 := l.Join(l.root, path) + if !rawPath { + // The l.Join("/", path) will make path an absolute path and then clean it + // so if path = ../../X, it will become /X. + arg1 = l.Join(l.root, l.Join("/", path)) + } + arg2 := l.root + + output := &bytes.Buffer{} + if err := l.runRemoteFSProcess(nil, output, remotefs.ResolvePathCmd, arg1, arg2); err != nil { + return "", err + } + + logrus.Debugf("remotefs.resolvescopedpath success. Output: %s\n", output.String()) + return output.String(), nil +} + +func (l *lcowfs) OS() string { + return "linux" +} + +func (l *lcowfs) Architecture() string { + return runtime.GOARCH +} + +// Other functions that are used by docker like the daemon Archiver/Extractor +func (l *lcowfs) ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error { + logrus.Debugf("remotefs.ExtractArchve inputs: %s %+v", dst, opts) + + tarBuf := &bytes.Buffer{} + if err := remotefs.WriteTarOptions(tarBuf, opts); err != nil { + return fmt.Errorf("failed to marshall tar opts: %s", err) + } + + input := io.MultiReader(tarBuf, src) + if err := l.runRemoteFSProcess(input, nil, remotefs.ExtractArchiveCmd, dst); err != nil { + return fmt.Errorf("failed to extract archive to %s: %s", dst, err) + } + return nil +} + +func (l *lcowfs) ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error) { + logrus.Debugf("remotefs.ArchivePath: %s %+v", src, opts) + + tarBuf := &bytes.Buffer{} + if err := remotefs.WriteTarOptions(tarBuf, opts); err != nil { + return nil, fmt.Errorf("failed to marshall tar opts: %s", err) + } + + r, w := io.Pipe() + go func() { + defer w.Close() + if err := l.runRemoteFSProcess(tarBuf, w, remotefs.ArchivePathCmd, src); err != nil { + logrus.Debugf("REMOTEFS: Failed to extract archive: %s %+v %s", src, opts, err) + } + }() + return r, nil +} + +// Helper functions +func (l *lcowfs) startVM() error { + l.Lock() + defer l.Unlock() + if l.currentSVM != nil { + return nil + } + + svm, err := l.d.startServiceVMIfNotRunning(l.vmID, l.mappedDisks, fmt.Sprintf("lcowfs.startVM")) + if err != nil { + return err + } + + if err = svm.createUnionMount(l.root, l.mappedDisks...); err != nil { + return err + } + l.currentSVM = svm + return nil +} + +func (l *lcowfs) runRemoteFSProcess(stdin io.Reader, stdout io.Writer, args ...string) error { + if err := l.startVM(); err != nil { + return err + } + + // Append remotefs prefix and setup as a command line string + cmd := fmt.Sprintf("%s %s", remotefs.RemotefsCmd, strings.Join(args, " ")) + stderr := &bytes.Buffer{} + if err := l.currentSVM.runProcess(cmd, stdin, stdout, stderr); err != nil { + return err + } + + eerr, err := remotefs.ReadError(stderr) + if eerr != nil { + // Process returned an error so return that. + return remotefs.ExportedToError(eerr) + } + return err +} diff --git a/components/engine/daemon/graphdriver/lcow/remotefs_file.go b/components/engine/daemon/graphdriver/lcow/remotefs_file.go new file mode 100644 index 0000000000..c13431973c --- /dev/null +++ b/components/engine/daemon/graphdriver/lcow/remotefs_file.go @@ -0,0 +1,211 @@ +// +build windows + +package lcow + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "os" + "strconv" + + "github.com/Microsoft/hcsshim" + "github.com/Microsoft/opengcs/service/gcsutils/remotefs" + "github.com/containerd/continuity/driver" +) + +type lcowfile struct { + process hcsshim.Process + stdin io.WriteCloser + stdout io.ReadCloser + stderr io.ReadCloser + fs *lcowfs + guestPath string +} + +func (l *lcowfs) Open(path string) (driver.File, error) { + return l.OpenFile(path, os.O_RDONLY, 0) +} + +func (l *lcowfs) OpenFile(path string, flag int, perm os.FileMode) (_ driver.File, err error) { + flagStr := strconv.FormatInt(int64(flag), 10) + permStr := strconv.FormatUint(uint64(perm), 8) + + commandLine := fmt.Sprintf("%s %s %s %s", remotefs.RemotefsCmd, remotefs.OpenFileCmd, flagStr, permStr) + env := make(map[string]string) + env["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:" + processConfig := &hcsshim.ProcessConfig{ + EmulateConsole: false, + CreateStdInPipe: true, + CreateStdOutPipe: true, + CreateStdErrPipe: true, + CreateInUtilityVm: true, + WorkingDirectory: "/bin", + Environment: env, + CommandLine: commandLine, + } + + process, err := l.currentSVM.config.Uvm.CreateProcess(processConfig) + if err != nil { + return nil, fmt.Errorf("failed to open file %s: %s", path, err) + } + + stdin, stdout, stderr, err := process.Stdio() + if err != nil { + process.Kill() + process.Close() + return nil, fmt.Errorf("failed to open file pipes %s: %s", path, err) + } + + lf := &lcowfile{ + process: process, + stdin: stdin, + stdout: stdout, + stderr: stderr, + fs: l, + guestPath: path, + } + + if _, err := lf.getResponse(); err != nil { + return nil, fmt.Errorf("failed to open file %s: %s", path, err) + } + return lf, nil +} + +func (l *lcowfile) Read(b []byte) (int, error) { + hdr := &remotefs.FileHeader{ + Cmd: remotefs.Read, + Size: uint64(len(b)), + } + + if err := remotefs.WriteFileHeader(l.stdin, hdr, nil); err != nil { + return 0, err + } + + buf, err := l.getResponse() + if err != nil { + return 0, nil + } + + n := copy(b, buf) + return n, nil +} + +func (l *lcowfile) Write(b []byte) (int, error) { + hdr := &remotefs.FileHeader{ + Cmd: remotefs.Write, + Size: uint64(len(b)), + } + + if err := remotefs.WriteFileHeader(l.stdin, hdr, b); err != nil { + return 0, err + } + + _, err := l.getResponse() + if err != nil { + return 0, nil + } + + return len(b), nil +} + +func (l *lcowfile) Seek(offset int64, whence int) (int64, error) { + seekHdr := &remotefs.SeekHeader{ + Offset: offset, + Whence: int32(whence), + } + + buf := &bytes.Buffer{} + if err := binary.Write(buf, binary.BigEndian, seekHdr); err != nil { + return 0, err + } + + hdr := &remotefs.FileHeader{ + Cmd: remotefs.Write, + Size: uint64(buf.Len()), + } + if err := remotefs.WriteFileHeader(l.stdin, hdr, buf.Bytes()); err != nil { + return 0, err + } + + resBuf, err := l.getResponse() + if err != nil { + return 0, err + } + + var res int64 + if err := binary.Read(bytes.NewBuffer(resBuf), binary.BigEndian, &res); err != nil { + return 0, err + } + return res, nil +} + +func (l *lcowfile) Close() error { + hdr := &remotefs.FileHeader{ + Cmd: remotefs.Close, + Size: 0, + } + + if err := remotefs.WriteFileHeader(l.stdin, hdr, nil); err != nil { + return err + } + + _, err := l.getResponse() + return err +} + +func (l *lcowfile) Readdir(n int) ([]os.FileInfo, error) { + nStr := strconv.FormatInt(int64(n), 10) + + // Unlike the other File functions, this one can just be run without maintaining state, + // so just do the normal runRemoteFSProcess way. + buf := &bytes.Buffer{} + if err := l.fs.runRemoteFSProcess(nil, buf, remotefs.ReadDirCmd, l.guestPath, nStr); err != nil { + return nil, err + } + + var info []remotefs.FileInfo + if err := json.Unmarshal(buf.Bytes(), &info); err != nil { + return nil, nil + } + + osInfo := make([]os.FileInfo, len(info)) + for i := range info { + osInfo[i] = &info[i] + } + return osInfo, nil +} + +func (l *lcowfile) getResponse() ([]byte, error) { + hdr, err := remotefs.ReadFileHeader(l.stdout) + if err != nil { + return nil, err + } + + if hdr.Cmd != remotefs.CmdOK { + // Something went wrong during the openfile in the server. + // Parse stderr and return that as an error + eerr, err := remotefs.ReadError(l.stderr) + if eerr != nil { + return nil, remotefs.ExportedToError(eerr) + } + + // Maybe the parsing went wrong? + if err != nil { + return nil, err + } + + // At this point, we know something went wrong in the remotefs program, but + // we we don't know why. + return nil, fmt.Errorf("unknown error") + } + + // Successful command, we might have some data to read (for Read + Seek) + buf := make([]byte, hdr.Size, hdr.Size) + if _, err := io.ReadFull(l.stdout, buf); err != nil { + return nil, err + } + return buf, nil +} diff --git a/components/engine/daemon/graphdriver/lcow/remotefs_filedriver.go b/components/engine/daemon/graphdriver/lcow/remotefs_filedriver.go new file mode 100644 index 0000000000..a3e0d9e9f5 --- /dev/null +++ b/components/engine/daemon/graphdriver/lcow/remotefs_filedriver.go @@ -0,0 +1,123 @@ +// +build windows + +package lcow + +import ( + "bytes" + "encoding/json" + "os" + "strconv" + + "github.com/Microsoft/opengcs/service/gcsutils/remotefs" + + "github.com/containerd/continuity/driver" + "github.com/sirupsen/logrus" +) + +var _ driver.Driver = &lcowfs{} + +func (l *lcowfs) Readlink(p string) (string, error) { + logrus.Debugf("removefs.readlink args: %s", p) + + result := &bytes.Buffer{} + if err := l.runRemoteFSProcess(nil, result, remotefs.ReadlinkCmd, p); err != nil { + return "", err + } + return result.String(), nil +} + +func (l *lcowfs) Mkdir(path string, mode os.FileMode) error { + return l.mkdir(path, mode, remotefs.MkdirCmd) +} + +func (l *lcowfs) MkdirAll(path string, mode os.FileMode) error { + return l.mkdir(path, mode, remotefs.MkdirAllCmd) +} + +func (l *lcowfs) mkdir(path string, mode os.FileMode, cmd string) error { + modeStr := strconv.FormatUint(uint64(mode), 8) + logrus.Debugf("remotefs.%s args: %s %s", cmd, path, modeStr) + return l.runRemoteFSProcess(nil, nil, cmd, path, modeStr) +} + +func (l *lcowfs) Remove(path string) error { + return l.remove(path, remotefs.RemoveCmd) +} + +func (l *lcowfs) RemoveAll(path string) error { + return l.remove(path, remotefs.RemoveAllCmd) +} + +func (l *lcowfs) remove(path string, cmd string) error { + logrus.Debugf("remotefs.%s args: %s", cmd, path) + return l.runRemoteFSProcess(nil, nil, cmd, path) +} + +func (l *lcowfs) Link(oldname, newname string) error { + return l.link(oldname, newname, remotefs.LinkCmd) +} + +func (l *lcowfs) Symlink(oldname, newname string) error { + return l.link(oldname, newname, remotefs.SymlinkCmd) +} + +func (l *lcowfs) link(oldname, newname, cmd string) error { + logrus.Debugf("remotefs.%s args: %s %s", cmd, oldname, newname) + return l.runRemoteFSProcess(nil, nil, cmd, oldname, newname) +} + +func (l *lcowfs) Lchown(name string, uid, gid int64) error { + uidStr := strconv.FormatInt(uid, 10) + gidStr := strconv.FormatInt(gid, 10) + + logrus.Debugf("remotefs.lchown args: %s %s %s", name, uidStr, gidStr) + return l.runRemoteFSProcess(nil, nil, remotefs.LchownCmd, name, uidStr, gidStr) +} + +// Lchmod changes the mode of an file not following symlinks. +func (l *lcowfs) Lchmod(path string, mode os.FileMode) error { + modeStr := strconv.FormatUint(uint64(mode), 8) + logrus.Debugf("remotefs.lchmod args: %s %s", path, modeStr) + return l.runRemoteFSProcess(nil, nil, remotefs.LchmodCmd, path, modeStr) +} + +func (l *lcowfs) Mknod(path string, mode os.FileMode, major, minor int) error { + modeStr := strconv.FormatUint(uint64(mode), 8) + majorStr := strconv.FormatUint(uint64(major), 10) + minorStr := strconv.FormatUint(uint64(minor), 10) + + logrus.Debugf("remotefs.mknod args: %s %s %s %s", path, modeStr, majorStr, minorStr) + return l.runRemoteFSProcess(nil, nil, remotefs.MknodCmd, path, modeStr, majorStr, minorStr) +} + +func (l *lcowfs) Mkfifo(path string, mode os.FileMode) error { + modeStr := strconv.FormatUint(uint64(mode), 8) + logrus.Debugf("remotefs.mkfifo args: %s %s", path, modeStr) + return l.runRemoteFSProcess(nil, nil, remotefs.MkfifoCmd, path, modeStr) +} + +func (l *lcowfs) Stat(p string) (os.FileInfo, error) { + return l.stat(p, remotefs.StatCmd) +} + +func (l *lcowfs) Lstat(p string) (os.FileInfo, error) { + return l.stat(p, remotefs.LstatCmd) +} + +func (l *lcowfs) stat(path string, cmd string) (os.FileInfo, error) { + logrus.Debugf("remotefs.stat inputs: %s %s", cmd, path) + + output := &bytes.Buffer{} + err := l.runRemoteFSProcess(nil, output, cmd, path) + if err != nil { + return nil, err + } + + var fi remotefs.FileInfo + if err := json.Unmarshal(output.Bytes(), &fi); err != nil { + return nil, err + } + + logrus.Debugf("remotefs.stat success. got: %v\n", fi) + return &fi, nil +} diff --git a/components/engine/daemon/graphdriver/lcow/remotefs_pathdriver.go b/components/engine/daemon/graphdriver/lcow/remotefs_pathdriver.go new file mode 100644 index 0000000000..95d3e715a2 --- /dev/null +++ b/components/engine/daemon/graphdriver/lcow/remotefs_pathdriver.go @@ -0,0 +1,212 @@ +// +build windows + +package lcow + +import ( + "errors" + "os" + pathpkg "path" + "path/filepath" + "sort" + "strings" + + "github.com/containerd/continuity/pathdriver" +) + +var _ pathdriver.PathDriver = &lcowfs{} + +// Continuity Path functions can be done locally +func (l *lcowfs) Join(path ...string) string { + return pathpkg.Join(path...) +} + +func (l *lcowfs) IsAbs(path string) bool { + return pathpkg.IsAbs(path) +} + +func sameWord(a, b string) bool { + return a == b +} + +// Implementation taken from the Go standard library +func (l *lcowfs) Rel(basepath, targpath string) (string, error) { + baseVol := "" + targVol := "" + base := l.Clean(basepath) + targ := l.Clean(targpath) + if sameWord(targ, base) { + return ".", nil + } + base = base[len(baseVol):] + targ = targ[len(targVol):] + if base == "." { + base = "" + } + // Can't use IsAbs - `\a` and `a` are both relative in Windows. + baseSlashed := len(base) > 0 && base[0] == l.Separator() + targSlashed := len(targ) > 0 && targ[0] == l.Separator() + if baseSlashed != targSlashed || !sameWord(baseVol, targVol) { + return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath) + } + // Position base[b0:bi] and targ[t0:ti] at the first differing elements. + bl := len(base) + tl := len(targ) + var b0, bi, t0, ti int + for { + for bi < bl && base[bi] != l.Separator() { + bi++ + } + for ti < tl && targ[ti] != l.Separator() { + ti++ + } + if !sameWord(targ[t0:ti], base[b0:bi]) { + break + } + if bi < bl { + bi++ + } + if ti < tl { + ti++ + } + b0 = bi + t0 = ti + } + if base[b0:bi] == ".." { + return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath) + } + if b0 != bl { + // Base elements left. Must go up before going down. + seps := strings.Count(base[b0:bl], string(l.Separator())) + size := 2 + seps*3 + if tl != t0 { + size += 1 + tl - t0 + } + buf := make([]byte, size) + n := copy(buf, "..") + for i := 0; i < seps; i++ { + buf[n] = l.Separator() + copy(buf[n+1:], "..") + n += 3 + } + if t0 != tl { + buf[n] = l.Separator() + copy(buf[n+1:], targ[t0:]) + } + return string(buf), nil + } + return targ[t0:], nil +} + +func (l *lcowfs) Base(path string) string { + return pathpkg.Base(path) +} + +func (l *lcowfs) Dir(path string) string { + return pathpkg.Dir(path) +} + +func (l *lcowfs) Clean(path string) string { + return pathpkg.Clean(path) +} + +func (l *lcowfs) Split(path string) (dir, file string) { + return pathpkg.Split(path) +} + +func (l *lcowfs) Separator() byte { + return '/' +} + +func (l *lcowfs) Abs(path string) (string, error) { + // Abs is supposed to add the current working directory, which is meaningless in lcow. + // So, return an error. + return "", ErrNotSupported +} + +// Implementation taken from the Go standard library +func (l *lcowfs) Walk(root string, walkFn filepath.WalkFunc) error { + info, err := l.Lstat(root) + if err != nil { + err = walkFn(root, nil, err) + } else { + err = l.walk(root, info, walkFn) + } + if err == filepath.SkipDir { + return nil + } + return err +} + +// walk recursively descends path, calling w. +func (l *lcowfs) walk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + err := walkFn(path, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := l.readDirNames(path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + filename := l.Join(path, name) + fileInfo, err := l.Lstat(filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = l.walk(filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +func (l *lcowfs) readDirNames(dirname string) ([]string, error) { + f, err := l.Open(dirname) + if err != nil { + return nil, err + } + files, err := f.Readdir(-1) + f.Close() + if err != nil { + return nil, err + } + + names := make([]string, len(files), len(files)) + for i := range files { + names[i] = files[i].Name() + } + + sort.Strings(names) + return names, nil +} + +// Note that Go's filepath.FromSlash/ToSlash convert between OS paths and '/'. Since the path separator +// for LCOW (and Unix) is '/', they are no-ops. +func (l *lcowfs) FromSlash(path string) string { + return path +} + +func (l *lcowfs) ToSlash(path string) string { + return path +} + +func (l *lcowfs) Match(pattern, name string) (matched bool, err error) { + return pathpkg.Match(pattern, name) +} diff --git a/components/engine/daemon/graphdriver/overlay/copy.go b/components/engine/daemon/graphdriver/overlay/copy.go index 8c35b91ddc..f7e35e2bd3 100644 --- a/components/engine/daemon/graphdriver/overlay/copy.go +++ b/components/engine/daemon/graphdriver/overlay/copy.go @@ -157,9 +157,10 @@ func copyDir(srcDir, dstDir string, flags copyFlags) error { } // system.Chtimes doesn't support a NOFOLLOW flag atm + // nolint: unconvert if !isSymlink { - aTime := time.Unix(stat.Atim.Sec, stat.Atim.Nsec) - mTime := time.Unix(stat.Mtim.Sec, stat.Mtim.Nsec) + aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) if err := system.Chtimes(dstPath, aTime, mTime); err != nil { return err } diff --git a/components/engine/daemon/graphdriver/overlay/overlay.go b/components/engine/daemon/graphdriver/overlay/overlay.go index 0d14b045f9..9012722c20 100644 --- a/components/engine/daemon/graphdriver/overlay/overlay.go +++ b/components/engine/daemon/graphdriver/overlay/overlay.go @@ -15,6 +15,7 @@ import ( "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/locker" @@ -269,10 +270,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr // Toplevel images are just a "root" dir if parent == "" { - if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil { - return err - } - return nil + return idtools.MkdirAndChown(path.Join(dir, "root"), 0755, idtools.IDPair{rootUID, rootGID}) } parentDir := d.dir(parent) @@ -344,21 +342,21 @@ func (d *Driver) Remove(id string) error { } // Get creates and mounts the required file system for the given id and returns the mount path. -func (d *Driver) Get(id string, mountLabel string) (s string, err error) { +func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, err error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { - return "", err + return nil, err } // If id has a root, just return it rootDir := path.Join(dir, "root") if _, err := os.Stat(rootDir); err == nil { - return rootDir, nil + return containerfs.NewLocalContainerFS(rootDir), nil } mergedDir := path.Join(dir, "merged") if count := d.ctr.Increment(mergedDir); count > 1 { - return mergedDir, nil + return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if err != nil { @@ -369,7 +367,7 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) { }() lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) if err != nil { - return "", err + return nil, err } var ( lowerDir = path.Join(d.dir(string(lowerID)), "root") @@ -378,18 +376,18 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) { opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) ) if err := unix.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { - return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { - return "", err + return nil, err } if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { - return "", err + return nil, err } - return mergedDir, nil + return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. diff --git a/components/engine/daemon/graphdriver/overlay2/overlay.go b/components/engine/daemon/graphdriver/overlay2/overlay.go index 6b0d4be869..9650975b3c 100644 --- a/components/engine/daemon/graphdriver/overlay2/overlay.go +++ b/components/engine/daemon/graphdriver/overlay2/overlay.go @@ -23,6 +23,7 @@ import ( "github.com/docker/docker/daemon/graphdriver/quota" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" @@ -514,12 +515,12 @@ func (d *Driver) Remove(id string) error { } // Get creates and mounts the required file system for the given id and returns the mount path. -func (d *Driver) Get(id string, mountLabel string) (s string, err error) { +func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { - return "", err + return nil, err } diffDir := path.Join(dir, "diff") @@ -527,14 +528,14 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) { if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { - return diffDir, nil + return containerfs.NewLocalContainerFS(diffDir), nil } - return "", err + return nil, err } mergedDir := path.Join(dir, "merged") if count := d.ctr.Increment(mergedDir); count > 1 { - return mergedDir, nil + return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if err != nil { @@ -574,7 +575,7 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) { opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize { - return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) + return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { @@ -584,21 +585,21 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) { } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { - return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) + return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { - return "", err + return nil, err } if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { - return "", err + return nil, err } - return mergedDir, nil + return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. diff --git a/components/engine/daemon/graphdriver/proxy.go b/components/engine/daemon/graphdriver/proxy.go index 120afad459..81ef872ad9 100644 --- a/components/engine/daemon/graphdriver/proxy.go +++ b/components/engine/daemon/graphdriver/proxy.go @@ -7,6 +7,7 @@ import ( "path/filepath" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/plugins" @@ -129,20 +130,20 @@ func (d *graphDriverProxy) Remove(id string) error { return nil } -func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { +func (d *graphDriverProxy) Get(id, mountLabel string) (containerfs.ContainerFS, error) { args := &graphDriverRequest{ ID: id, MountLabel: mountLabel, } var ret graphDriverResponse if err := d.p.Client().Call("GraphDriver.Get", args, &ret); err != nil { - return "", err + return nil, err } var err error if ret.Err != "" { err = errors.New(ret.Err) } - return filepath.Join(d.p.BasePath(), ret.Dir), err + return containerfs.NewLocalContainerFS(filepath.Join(d.p.BasePath(), ret.Dir)), err } func (d *graphDriverProxy) Put(id string) error { diff --git a/components/engine/daemon/graphdriver/register/register_devicemapper.go b/components/engine/daemon/graphdriver/register/register_devicemapper.go index bb2e9ef541..09dfb71eb7 100644 --- a/components/engine/daemon/graphdriver/register/register_devicemapper.go +++ b/components/engine/daemon/graphdriver/register/register_devicemapper.go @@ -1,4 +1,4 @@ -// +build !exclude_graphdriver_devicemapper,linux +// +build !exclude_graphdriver_devicemapper,!static_build,linux package register diff --git a/components/engine/daemon/graphdriver/vfs/driver.go b/components/engine/daemon/graphdriver/vfs/driver.go index 15a4de3606..0482dccb87 100644 --- a/components/engine/daemon/graphdriver/vfs/driver.go +++ b/components/engine/daemon/graphdriver/vfs/driver.go @@ -7,6 +7,7 @@ import ( "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/system" "github.com/opencontainers/selinux/go-selinux/label" @@ -94,7 +95,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { if err != nil { return fmt.Errorf("%s: %s", parent, err) } - return CopyWithTar(parentDir, dir) + return CopyWithTar(parentDir.Path(), dir) } func (d *Driver) dir(id string) string { @@ -103,21 +104,18 @@ func (d *Driver) dir(id string) string { // Remove deletes the content from the directory for a given id. func (d *Driver) Remove(id string) error { - if err := system.EnsureRemoveAll(d.dir(id)); err != nil { - return err - } - return nil + return system.EnsureRemoveAll(d.dir(id)) } // Get returns the directory for the given id. -func (d *Driver) Get(id, mountLabel string) (string, error) { +func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { dir := d.dir(id) if st, err := os.Stat(dir); err != nil { - return "", err + return nil, err } else if !st.IsDir() { - return "", fmt.Errorf("%s: not a directory", dir) + return nil, fmt.Errorf("%s: not a directory", dir) } - return dir, nil + return containerfs.NewLocalContainerFS(dir), nil } // Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. diff --git a/components/engine/daemon/graphdriver/windows/windows.go b/components/engine/daemon/graphdriver/windows/windows.go index 44114051bb..e7130d80f2 100644 --- a/components/engine/daemon/graphdriver/windows/windows.go +++ b/components/engine/daemon/graphdriver/windows/windows.go @@ -26,6 +26,7 @@ import ( "github.com/Microsoft/hcsshim" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/longpath" @@ -354,36 +355,36 @@ func (d *Driver) Remove(id string) error { } // Get returns the rootfs path for the id. This will mount the dir at its given path. -func (d *Driver) Get(id, mountLabel string) (string, error) { +func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { panicIfUsedByLcow() logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) var dir string rID, err := d.resolveID(id) if err != nil { - return "", err + return nil, err } if count := d.ctr.Increment(rID); count > 1 { - return d.cache[rID], nil + return containerfs.NewLocalContainerFS(d.cache[rID]), nil } // Getting the layer paths must be done outside of the lock. layerChain, err := d.getLayerChain(rID) if err != nil { d.ctr.Decrement(rID) - return "", err + return nil, err } if err := hcsshim.ActivateLayer(d.info, rID); err != nil { d.ctr.Decrement(rID) - return "", err + return nil, err } if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { d.ctr.Decrement(rID) if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { logrus.Warnf("Failed to Deactivate %s: %s", id, err) } - return "", err + return nil, err } mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) @@ -395,7 +396,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { logrus.Warnf("Failed to Deactivate %s: %s", id, err) } - return "", err + return nil, err } d.cacheMu.Lock() d.cache[rID] = mountPath @@ -409,7 +410,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { dir = d.dir(id) } - return dir, nil + return containerfs.NewLocalContainerFS(dir), nil } // Put adds a new layer to the driver. @@ -618,7 +619,7 @@ func (d *Driver) DiffSize(id, parent string) (size int64, err error) { } defer d.Put(id) - return archive.ChangesSize(layerFs, changes), nil + return archive.ChangesSize(layerFs.Path(), changes), nil } // GetMetadata returns custom driver information. diff --git a/components/engine/daemon/graphdriver/zfs/zfs.go b/components/engine/daemon/graphdriver/zfs/zfs.go index 729099a6cd..4caedef0ee 100644 --- a/components/engine/daemon/graphdriver/zfs/zfs.go +++ b/components/engine/daemon/graphdriver/zfs/zfs.go @@ -13,6 +13,7 @@ import ( "time" "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/parsers" @@ -356,10 +357,10 @@ func (d *Driver) Remove(id string) error { } // Get returns the mountpoint for the given id after creating the target directories if necessary. -func (d *Driver) Get(id, mountLabel string) (string, error) { +func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { mountpoint := d.mountPath(id) if count := d.ctr.Increment(mountpoint); count > 1 { - return mountpoint, nil + return containerfs.NewLocalContainerFS(mountpoint), nil } filesystem := d.zfsPath(id) @@ -369,17 +370,17 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { d.ctr.Decrement(mountpoint) - return "", err + return nil, err } // Create the target directories if they don't exist if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { d.ctr.Decrement(mountpoint) - return "", err + return nil, err } if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil { d.ctr.Decrement(mountpoint) - return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) + return nil, fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) } // this could be our first mount after creation of the filesystem, and the root dir may still have root @@ -387,10 +388,10 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { mount.Unmount(mountpoint) d.ctr.Decrement(mountpoint) - return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) + return nil, fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) } - return mountpoint, nil + return containerfs.NewLocalContainerFS(mountpoint), nil } // Put removes the existing mountpoint for the given id if it exists. @@ -416,5 +417,5 @@ func (d *Driver) Put(id string) error { func (d *Driver) Exists(id string) bool { d.Lock() defer d.Unlock() - return d.filesystemsCache[d.zfsPath(id)] == true + return d.filesystemsCache[d.zfsPath(id)] } diff --git a/components/engine/daemon/images.go b/components/engine/daemon/images.go index 9be6e0ec30..f4110ce789 100644 --- a/components/engine/daemon/images.go +++ b/components/engine/daemon/images.go @@ -301,12 +301,10 @@ func (daemon *Daemon) SquashImage(id, parent string) (string, error) { } defer daemon.stores[img.Platform()].layerStore.Release(newL) - var newImage image.Image - newImage = *img + newImage := *img newImage.RootFS = nil - var rootFS image.RootFS - rootFS = *parentImg.RootFS + rootFS := *parentImg.RootFS rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID()) newImage.RootFS = &rootFS diff --git a/components/engine/daemon/initlayer/setup_solaris.go b/components/engine/daemon/initlayer/setup_solaris.go index 66d53f0eef..57bc116b4b 100644 --- a/components/engine/daemon/initlayer/setup_solaris.go +++ b/components/engine/daemon/initlayer/setup_solaris.go @@ -2,12 +2,14 @@ package initlayer +import "github.com/docker/docker/pkg/containerfs" + // Setup populates a directory with mountpoints suitable // for bind-mounting dockerinit into the container. The mountpoint is simply an // empty file at /.dockerinit // // This extra layer is used by all containers as the top-most ro layer. It protects // the container from unwanted side-effects on the rw layer. -func Setup(initLayer string, rootUID, rootGID int) error { +func Setup(initLayer containerfs.ContainerFS, rootUID, rootGID int) error { return nil } diff --git a/components/engine/daemon/initlayer/setup_unix.go b/components/engine/daemon/initlayer/setup_unix.go index e26d3a05f1..a02cea6f37 100644 --- a/components/engine/daemon/initlayer/setup_unix.go +++ b/components/engine/daemon/initlayer/setup_unix.go @@ -7,6 +7,7 @@ import ( "path/filepath" "strings" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "golang.org/x/sys/unix" ) @@ -16,7 +17,10 @@ import ( // // This extra layer is used by all containers as the top-most ro layer. It protects // the container from unwanted side-effects on the rw layer. -func Setup(initLayer string, rootIDs idtools.IDPair) error { +func Setup(initLayerFs containerfs.ContainerFS, rootIDs idtools.IDPair) error { + // Since all paths are local to the container, we can just extract initLayerFs.Path() + initLayer := initLayerFs.Path() + for pth, typ := range map[string]string{ "/dev/pts": "dir", "/dev/shm": "dir", diff --git a/components/engine/daemon/initlayer/setup_windows.go b/components/engine/daemon/initlayer/setup_windows.go index 2b22f58b5e..b47563ebf6 100644 --- a/components/engine/daemon/initlayer/setup_windows.go +++ b/components/engine/daemon/initlayer/setup_windows.go @@ -3,6 +3,7 @@ package initlayer import ( + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" ) @@ -12,6 +13,6 @@ import ( // // This extra layer is used by all containers as the top-most ro layer. It protects // the container from unwanted side-effects on the rw layer. -func Setup(initLayer string, rootIDs idtools.IDPair) error { +func Setup(initLayer containerfs.ContainerFS, rootIDs idtools.IDPair) error { return nil } diff --git a/components/engine/daemon/kill.go b/components/engine/daemon/kill.go index 43981513db..bb3e87cae3 100644 --- a/components/engine/daemon/kill.go +++ b/components/engine/daemon/kill.go @@ -160,7 +160,7 @@ func (daemon *Daemon) Kill(container *containerpkg.Container) error { // Wait for exit with no timeout. // Ignore returned status. - _ = <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning) + <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning) return nil } diff --git a/components/engine/daemon/logger/factory.go b/components/engine/daemon/logger/factory.go index 32001590d9..2ef3bf8799 100644 --- a/components/engine/daemon/logger/factory.go +++ b/components/engine/daemon/logger/factory.go @@ -93,7 +93,7 @@ func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { lf.m.Lock() defer lf.m.Unlock() - c, _ := lf.optValidator[name] + c := lf.optValidator[name] return c } diff --git a/components/engine/daemon/logger/jsonfilelog/read.go b/components/engine/daemon/logger/jsonfilelog/read.go index 32425b5128..25fc99a984 100644 --- a/components/engine/daemon/logger/jsonfilelog/read.go +++ b/components/engine/daemon/logger/jsonfilelog/read.go @@ -137,8 +137,7 @@ func newSectionReader(f *os.File) (*io.SectionReader, error) { } func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { - var rdr io.Reader - rdr = f + rdr := io.Reader(f) if tail > 0 { ls, err := tailfile.TailFile(f, tail) if err != nil { diff --git a/components/engine/daemon/logger/ring.go b/components/engine/daemon/logger/ring.go index dea8f6b6a4..c89897cfde 100644 --- a/components/engine/daemon/logger/ring.go +++ b/components/engine/daemon/logger/ring.go @@ -202,7 +202,6 @@ func (r *messageRing) Close() { r.closed = true r.wait.Broadcast() r.mu.Unlock() - return } // Drain drains all messages from the queue. diff --git a/components/engine/daemon/logger/splunk/splunk.go b/components/engine/daemon/logger/splunk/splunk.go index 274904bec1..31a0487006 100644 --- a/components/engine/daemon/logger/splunk/splunk.go +++ b/components/engine/daemon/logger/splunk/splunk.go @@ -15,6 +15,7 @@ import ( "net/url" "os" "strconv" + "strings" "sync" "time" @@ -363,6 +364,11 @@ func (l *splunkLoggerJSON) Log(msg *logger.Message) error { } func (l *splunkLoggerRaw) Log(msg *logger.Message) error { + // empty or whitespace-only messages are not accepted by HEC + if strings.TrimSpace(string(msg.Line)) == "" { + return nil + } + message := l.createSplunkMessage(msg) message.Event = string(append(l.prefix, msg.Line...)) diff --git a/components/engine/daemon/logger/splunk/splunk_test.go b/components/engine/daemon/logger/splunk/splunk_test.go index cbe9a55bf9..ebf835c584 100644 --- a/components/engine/daemon/logger/splunk/splunk_test.go +++ b/components/engine/daemon/logger/splunk/splunk_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/docker/docker/daemon/logger" + "github.com/stretchr/testify/require" ) // Validate options @@ -125,7 +126,7 @@ func TestDefault(t *testing.T) { splunkLoggerDriver.nullMessage.Source != "" || splunkLoggerDriver.nullMessage.SourceType != "" || splunkLoggerDriver.nullMessage.Index != "" || - splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.gzipCompression || splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || @@ -255,7 +256,7 @@ func TestInlineFormatWithNonDefaultOptions(t *testing.T) { splunkLoggerDriver.nullMessage.Source != "mysource" || splunkLoggerDriver.nullMessage.SourceType != "mysourcetype" || splunkLoggerDriver.nullMessage.Index != "myindex" || - splunkLoggerDriver.gzipCompression != true || + !splunkLoggerDriver.gzipCompression || splunkLoggerDriver.gzipCompressionLevel != gzip.DefaultCompression || splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || @@ -355,7 +356,7 @@ func TestJsonFormat(t *testing.T) { splunkLoggerDriver.nullMessage.Source != "" || splunkLoggerDriver.nullMessage.SourceType != "" || splunkLoggerDriver.nullMessage.Index != "" || - splunkLoggerDriver.gzipCompression != true || + !splunkLoggerDriver.gzipCompression || splunkLoggerDriver.gzipCompressionLevel != gzip.BestSpeed || splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || @@ -448,14 +449,10 @@ func TestRawFormat(t *testing.T) { } hostname, err := info.Hostname() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) loggerDriver, err := New(info) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if !hec.connectionVerified { t.Fatal("By default connection should be verified") @@ -472,7 +469,7 @@ func TestRawFormat(t *testing.T) { splunkLoggerDriver.nullMessage.Source != "" || splunkLoggerDriver.nullMessage.SourceType != "" || splunkLoggerDriver.nullMessage.Index != "" || - splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.gzipCompression || splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || @@ -586,7 +583,7 @@ func TestRawFormatWithLabels(t *testing.T) { splunkLoggerDriver.nullMessage.Source != "" || splunkLoggerDriver.nullMessage.SourceType != "" || splunkLoggerDriver.nullMessage.Index != "" || - splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.gzipCompression || splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || @@ -698,7 +695,7 @@ func TestRawFormatWithoutTag(t *testing.T) { splunkLoggerDriver.nullMessage.Source != "" || splunkLoggerDriver.nullMessage.SourceType != "" || splunkLoggerDriver.nullMessage.Index != "" || - splunkLoggerDriver.gzipCompression != false || + splunkLoggerDriver.gzipCompression || splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency || splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize || splunkLoggerDriver.bufferMaximum != defaultBufferMaximum || @@ -716,12 +713,19 @@ func TestRawFormatWithoutTag(t *testing.T) { if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil { t.Fatal(err) } + message3Time := time.Now() + if err := loggerDriver.Log(&logger.Message{Line: []byte(" "), Source: "stdout", Timestamp: message3Time}); err != nil { + t.Fatal(err) + } err = loggerDriver.Close() if err != nil { t.Fatal(err) } + // message3 would have an empty or whitespace only string in the "event" field + // both of which are not acceptable to HEC + // thus here we must expect 2 messages, not 3 if len(hec.messages) != 2 { t.Fatal("Expected two messages") } diff --git a/components/engine/daemon/metrics.go b/components/engine/daemon/metrics.go index 8cd363fb11..44228e7db4 100644 --- a/components/engine/daemon/metrics.go +++ b/components/engine/daemon/metrics.go @@ -169,5 +169,4 @@ func pluginStopMetricsCollection(p plugingetter.CompatPlugin) { logrus.WithError(err).WithField("name", p.Name()).WithField("socket", sockPath).Error("error unmounting metrics socket for plugin") } } - return } diff --git a/components/engine/daemon/names.go b/components/engine/daemon/names.go index e61e94008f..712df9fd0f 100644 --- a/components/engine/daemon/names.go +++ b/components/engine/daemon/names.go @@ -4,8 +4,8 @@ import ( "fmt" "strings" - "github.com/docker/docker/api" "github.com/docker/docker/container" + "github.com/docker/docker/daemon/names" "github.com/docker/docker/pkg/namesgenerator" "github.com/docker/docker/pkg/stringid" "github.com/pkg/errors" @@ -13,8 +13,8 @@ import ( ) var ( - validContainerNameChars = api.RestrictedNameChars - validContainerNamePattern = api.RestrictedNamePattern + validContainerNameChars = names.RestrictedNameChars + validContainerNamePattern = names.RestrictedNamePattern ) func (daemon *Daemon) registerName(container *container.Container) error { diff --git a/components/engine/api/names.go b/components/engine/daemon/names/names.go similarity index 96% rename from components/engine/api/names.go rename to components/engine/daemon/names/names.go index f147d1f4ce..26f6748a8f 100644 --- a/components/engine/api/names.go +++ b/components/engine/daemon/names/names.go @@ -1,4 +1,4 @@ -package api +package names import "regexp" diff --git a/components/engine/daemon/network.go b/components/engine/daemon/network.go index 62fe951cbb..92d49765c1 100644 --- a/components/engine/daemon/network.go +++ b/components/engine/daemon/network.go @@ -115,6 +115,7 @@ var ( func (daemon *Daemon) startIngressWorker() { ingressJobsChannel = make(chan *ingressJob, 100) go func() { + // nolint: gosimple for { select { case r := <-ingressJobsChannel: @@ -181,27 +182,8 @@ func (daemon *Daemon) setupIngress(create *clustertypes.NetworkCreateRequest, ip logrus.Errorf("Failed getting ingress network by id after creating: %v", err) } - sb, err := controller.NewSandbox("ingress-sbox", libnetwork.OptionIngress()) - if err != nil { - if _, ok := err.(networktypes.ForbiddenError); !ok { - logrus.Errorf("Failed creating ingress sandbox: %v", err) - } - return - } - - ep, err := n.CreateEndpoint("ingress-endpoint", libnetwork.CreateOptionIpam(ip, nil, nil, nil)) - if err != nil { - logrus.Errorf("Failed creating ingress endpoint: %v", err) - return - } - - if err := ep.Join(sb, nil); err != nil { - logrus.Errorf("Failed joining ingress sandbox to ingress endpoint: %v", err) - return - } - - if err := sb.EnableService(); err != nil { - logrus.Errorf("Failed enabling service for ingress sandbox") + if err = daemon.createLoadBalancerSandbox("ingress", create.ID, ip, n, libnetwork.OptionIngress()); err != nil { + logrus.Errorf("Failed creating load balancer sandbox for ingress network: %v", err) } } @@ -232,7 +214,6 @@ func (daemon *Daemon) releaseIngress(id string) { logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err) return } - return } // SetNetworkBootstrapKeys sets the bootstrap keys. @@ -283,6 +264,34 @@ func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.N return resp, err } +func (daemon *Daemon) createLoadBalancerSandbox(prefix, id string, ip net.IP, n libnetwork.Network, options ...libnetwork.SandboxOption) error { + c := daemon.netController + sandboxName := prefix + "-sbox" + sb, err := c.NewSandbox(sandboxName, options...) + if err != nil { + if _, ok := err.(networktypes.ForbiddenError); !ok { + return errors.Wrapf(err, "Failed creating %s sandbox", sandboxName) + } + return nil + } + + endpointName := prefix + "-endpoint" + ep, err := n.CreateEndpoint(endpointName, libnetwork.CreateOptionIpam(ip, nil, nil, nil), libnetwork.CreateOptionLoadBalancer()) + if err != nil { + return errors.Wrapf(err, "Failed creating %s in sandbox %s", endpointName, sandboxName) + } + + if err := ep.Join(sb, nil); err != nil { + return errors.Wrapf(err, "Failed joining %s to sandbox %s", endpointName, sandboxName) + } + + if err := sb.EnableService(); err != nil { + return errors.Wrapf(err, "Failed enabling service in %s sandbox", sandboxName) + } + + return nil +} + func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) { if runconfig.IsPreDefinedNetwork(create.Name) && !agent { err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name) @@ -360,6 +369,18 @@ func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string } daemon.LogNetworkEvent(n, "create") + if agent && !n.Info().Ingress() && n.Type() == "overlay" { + nodeIP, exists := daemon.GetLBAttachmentStore().GetLBIPForNetwork(id) + if !exists { + return nil, fmt.Errorf("Failed to find a load balancer IP to use for network: %v", id) + } + + if err := daemon.createLoadBalancerSandbox(create.Name, id, nodeIP, n); err != nil { + return nil, err + } + + } + return &types.NetworkCreateResponse{ ID: n.ID(), Warning: warning, @@ -496,6 +517,31 @@ func (daemon *Daemon) DeleteNetwork(networkID string) error { return daemon.deleteNetwork(networkID, false) } +func (daemon *Daemon) deleteLoadBalancerSandbox(n libnetwork.Network) { + controller := daemon.netController + + //The only endpoint left should be the LB endpoint (nw.Name() + "-endpoint") + endpoints := n.Endpoints() + if len(endpoints) == 1 { + sandboxName := n.Name() + "-sbox" + + if err := endpoints[0].Info().Sandbox().DisableService(); err != nil { + logrus.Errorf("Failed to disable service on sandbox %s: %v", sandboxName, err) + //Ignore error and attempt to delete the load balancer endpoint + } + + if err := endpoints[0].Delete(true); err != nil { + logrus.Errorf("Failed to delete endpoint %s (%s) in %s: %v", endpoints[0].Name(), endpoints[0].ID(), sandboxName, err) + //Ignore error and attempt to delete the sandbox. + } + + if err := controller.SandboxDestroy(sandboxName); err != nil { + logrus.Errorf("Failed to delete %s sandbox: %v", sandboxName, err) + //Ignore error and attempt to delete the network. + } + } +} + func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error { nw, err := daemon.FindNetwork(networkID) if err != nil { @@ -517,6 +563,10 @@ func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error { return notAllowedError{err} } + if !nw.Info().Ingress() && nw.Type() == "overlay" { + daemon.deleteLoadBalancerSandbox(nw) + } + if err := nw.Delete(); err != nil { return err } diff --git a/components/engine/daemon/network/settings.go b/components/engine/daemon/network/settings.go index 8f6b7dd59e..0d7d5baf5a 100644 --- a/components/engine/daemon/network/settings.go +++ b/components/engine/daemon/network/settings.go @@ -1,9 +1,12 @@ package network import ( + "net" + networktypes "github.com/docker/docker/api/types/network" clustertypes "github.com/docker/docker/daemon/cluster/provider" "github.com/docker/go-connections/nat" + "github.com/pkg/errors" ) // Settings stores configuration details about the daemon network config @@ -31,3 +34,36 @@ type EndpointSettings struct { *networktypes.EndpointSettings IPAMOperational bool } + +// LBAttachmentStore stores the load balancer IP address for a network id. +type LBAttachmentStore struct { + //key: networkd id + //value: load balancer ip address + networkToNodeLBIP map[string]net.IP +} + +// ResetLBAttachments clears any exsiting load balancer IP to network mapping and +// sets the mapping to the given lbAttachments. +func (lbStore *LBAttachmentStore) ResetLBAttachments(lbAttachments map[string]string) error { + lbStore.ClearLBAttachments() + for nid, nodeIP := range lbAttachments { + ip, _, err := net.ParseCIDR(nodeIP) + if err != nil { + lbStore.networkToNodeLBIP = make(map[string]net.IP) + return errors.Wrapf(err, "Failed to parse load balancer address %s", nodeIP) + } + lbStore.networkToNodeLBIP[nid] = ip + } + return nil +} + +// ClearLBAttachments clears all the mappings of network to load balancer IP Address. +func (lbStore *LBAttachmentStore) ClearLBAttachments() { + lbStore.networkToNodeLBIP = make(map[string]net.IP) +} + +// GetLBIPForNetwork return the load balancer IP address for the given network. +func (lbStore *LBAttachmentStore) GetLBIPForNetwork(networkID string) (net.IP, bool) { + ip, exists := lbStore.networkToNodeLBIP[networkID] + return ip, exists +} diff --git a/components/engine/daemon/oci_linux.go b/components/engine/daemon/oci_linux.go index 9cf6674dfe..0f8a392c26 100644 --- a/components/engine/daemon/oci_linux.go +++ b/components/engine/daemon/oci_linux.go @@ -19,7 +19,6 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/stringutils" - "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/volume" "github.com/opencontainers/runc/libcontainer/apparmor" "github.com/opencontainers/runc/libcontainer/cgroups" @@ -29,6 +28,7 @@ import ( "github.com/sirupsen/logrus" ) +// nolint: gosimple var ( deviceCgroupRuleRegex = regexp.MustCompile("^([acb]) ([0-9]+|\\*):([0-9]+|\\*) ([rwm]{1,3})$") ) @@ -186,7 +186,7 @@ func setUser(s *specs.Spec, c *container.Container) error { } func readUserFile(c *container.Container, p string) (io.ReadCloser, error) { - fp, err := symlink.FollowSymlinkInScope(filepath.Join(c.BaseFS, p), c.BaseFS) + fp, err := c.GetResourcePath(p) if err != nil { return nil, err } @@ -497,6 +497,7 @@ func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []c // Filter out mounts from spec noIpc := c.HostConfig.IpcMode.IsNone() + // Filter out mounts that are overridden by user supplied mounts var defaultMounts []specs.Mount _, mountDev := userMounts["/dev"] for _, m := range s.Mounts { @@ -523,7 +524,8 @@ func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []c if m.Source == "tmpfs" { data := m.Data - options := []string{"noexec", "nosuid", "nodev", string(volume.DefaultPropagationMode)} + parser := volume.NewParser("linux") + options := []string{"noexec", "nosuid", "nodev", string(parser.DefaultPropagationMode())} if data != "" { options = append(options, strings.Split(data, ",")...) } @@ -631,7 +633,7 @@ func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) return err } s.Root = &specs.Root{ - Path: c.BaseFS, + Path: c.BaseFS.Path(), Readonly: c.HostConfig.ReadonlyRootfs, } if err := c.SetupWorkingDirectory(daemon.idMappings.RootPair()); err != nil { diff --git a/components/engine/daemon/oci_solaris.go b/components/engine/daemon/oci_solaris.go index 610efe10a1..45fa1e0ffe 100644 --- a/components/engine/daemon/oci_solaris.go +++ b/components/engine/daemon/oci_solaris.go @@ -2,7 +2,6 @@ package daemon import ( "fmt" - "path/filepath" "sort" "strconv" @@ -127,7 +126,7 @@ func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) return err } s.Root = specs.Root{ - Path: filepath.Dir(c.BaseFS), + Path: c.BaseFS.Dir(c.BaseFS.Path()), Readonly: c.HostConfig.ReadonlyRootfs, } if err := c.SetupWorkingDirectory(daemon.idMappings.RootPair()); err != nil { diff --git a/components/engine/daemon/oci_windows.go b/components/engine/daemon/oci_windows.go index 0254351569..17b104049a 100644 --- a/components/engine/daemon/oci_windows.go +++ b/components/engine/daemon/oci_windows.go @@ -4,6 +4,7 @@ import ( "fmt" "io/ioutil" "path/filepath" + "runtime" "strings" containertypes "github.com/docker/docker/api/types/container" @@ -108,6 +109,11 @@ func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { if !mount.Writable { m.Options = append(m.Options, "ro") } + if img.OS != runtime.GOOS { + m.Type = "bind" + m.Options = append(m.Options, "rbind") + m.Options = append(m.Options, fmt.Sprintf("uvmpath=/tmp/gcs/%s/binds", c.ID)) + } s.Mounts = append(s.Mounts, m) } @@ -233,7 +239,7 @@ func (daemon *Daemon) createSpecWindowsFields(c *container.Container, s *specs.S s.Root.Readonly = false // Windows does not support a read-only root filesystem if !isHyperV { - s.Root.Path = c.BaseFS // This is not set for Hyper-V containers + s.Root.Path = c.BaseFS.Path() // This is not set for Hyper-V containers if !strings.HasSuffix(s.Root.Path, `\`) { s.Root.Path = s.Root.Path + `\` // Ensure a correctly formatted volume GUID path \\?\Volume{GUID}\ } diff --git a/components/engine/daemon/reload_test.go b/components/engine/daemon/reload_test.go index bf11b6bd56..3ff6b57735 100644 --- a/components/engine/daemon/reload_test.go +++ b/components/engine/daemon/reload_test.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/pkg/discovery" _ "github.com/docker/docker/pkg/discovery/memory" "github.com/docker/docker/registry" + "github.com/stretchr/testify/assert" ) func TestDaemonReloadLabels(t *testing.T) { @@ -46,8 +47,9 @@ func TestDaemonReloadAllowNondistributableArtifacts(t *testing.T) { configStore: &config.Config{}, } + var err error // Initialize daemon with some registries. - daemon.RegistryService = registry.NewService(registry.ServiceOptions{ + daemon.RegistryService, err = registry.NewService(registry.ServiceOptions{ AllowNondistributableArtifacts: []string{ "127.0.0.0/8", "10.10.1.11:5000", @@ -56,6 +58,9 @@ func TestDaemonReloadAllowNondistributableArtifacts(t *testing.T) { "docker2.com", // This will be removed during reload. }, }) + if err != nil { + t.Fatal(err) + } registries := []string{ "127.0.0.0/8", @@ -85,20 +90,17 @@ func TestDaemonReloadAllowNondistributableArtifacts(t *testing.T) { for _, value := range serviceConfig.AllowNondistributableArtifactsCIDRs { actual = append(actual, value.String()) } - for _, value := range serviceConfig.AllowNondistributableArtifactsHostnames { - actual = append(actual, value) - } + actual = append(actual, serviceConfig.AllowNondistributableArtifactsHostnames...) sort.Strings(registries) sort.Strings(actual) - if !reflect.DeepEqual(registries, actual) { - t.Fatalf("expected %v, got %v\n", registries, actual) - } + assert.Equal(t, registries, actual) } func TestDaemonReloadMirrors(t *testing.T) { daemon := &Daemon{} - daemon.RegistryService = registry.NewService(registry.ServiceOptions{ + var err error + daemon.RegistryService, err = registry.NewService(registry.ServiceOptions{ InsecureRegistries: []string{}, Mirrors: []string{ "https://mirror.test1.com", @@ -106,6 +108,9 @@ func TestDaemonReloadMirrors(t *testing.T) { "https://mirror.test3.com", // this will be removed when reloading }, }) + if err != nil { + t.Fatal(err) + } daemon.configStore = &config.Config{} @@ -191,8 +196,9 @@ func TestDaemonReloadMirrors(t *testing.T) { func TestDaemonReloadInsecureRegistries(t *testing.T) { daemon := &Daemon{} + var err error // initialize daemon with existing insecure registries: "127.0.0.0/8", "10.10.1.11:5000", "10.10.1.22:5000" - daemon.RegistryService = registry.NewService(registry.ServiceOptions{ + daemon.RegistryService, err = registry.NewService(registry.ServiceOptions{ InsecureRegistries: []string{ "127.0.0.0/8", "10.10.1.11:5000", @@ -201,6 +207,9 @@ func TestDaemonReloadInsecureRegistries(t *testing.T) { "docker2.com", // this will be removed when reloading }, }) + if err != nil { + t.Fatal(err) + } daemon.configStore = &config.Config{} diff --git a/components/engine/daemon/start.go b/components/engine/daemon/start.go index 55438cf2c4..de32a649d7 100644 --- a/components/engine/daemon/start.go +++ b/components/engine/daemon/start.go @@ -204,7 +204,7 @@ func (daemon *Daemon) Cleanup(container *container.Container) { daemon.unregisterExecCommand(container, eConfig) } - if container.BaseFS != "" { + if container.BaseFS != nil && container.BaseFS.Path() != "" { if err := container.UnmountVolumes(daemon.LogVolumeEvent); err != nil { logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) } diff --git a/components/engine/daemon/stop.go b/components/engine/daemon/stop.go index c43fbfa73b..7eadba7e26 100644 --- a/components/engine/daemon/stop.go +++ b/components/engine/daemon/stop.go @@ -78,7 +78,7 @@ func (daemon *Daemon) containerStop(container *containerpkg.Container, seconds i // 3. If it doesn't, then send SIGKILL if err := daemon.Kill(container); err != nil { // Wait without a timeout, ignore result. - _ = <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning) + <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning) logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it } } diff --git a/components/engine/api/fixtures/keyfile b/components/engine/daemon/testdata/keyfile similarity index 100% rename from components/engine/api/fixtures/keyfile rename to components/engine/daemon/testdata/keyfile diff --git a/components/engine/daemon/top_unix.go b/components/engine/daemon/top_unix.go index ec2b1da8b7..22e88b702e 100644 --- a/components/engine/daemon/top_unix.go +++ b/components/engine/daemon/top_unix.go @@ -16,6 +16,7 @@ func validatePSArgs(psArgs string) error { // NOTE: \\s does not detect unicode whitespaces. // So we use fieldsASCII instead of strings.Fields in parsePSOutput. // See https://github.com/docker/docker/pull/24358 + // nolint: gosimple re := regexp.MustCompile("\\s+([^\\s]*)=\\s*(PID[^\\s]*)") for _, group := range re.FindAllStringSubmatch(psArgs, -1) { if len(group) >= 3 { diff --git a/components/engine/daemon/trustkey.go b/components/engine/daemon/trustkey.go new file mode 100644 index 0000000000..cb33146f9b --- /dev/null +++ b/components/engine/daemon/trustkey.go @@ -0,0 +1,57 @@ +package daemon + +import ( + "encoding/json" + "encoding/pem" + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" + "github.com/docker/libtrust" +) + +// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, +// otherwise generates a new one +// TODO: this should use more of libtrust.LoadOrCreateTrustKey which may need +// a refactor or this function to be moved into libtrust +func loadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { + err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "") + if err != nil { + return nil, err + } + trustKey, err := libtrust.LoadKeyFile(trustKeyPath) + if err == libtrust.ErrKeyFileDoesNotExist { + trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("Error generating key: %s", err) + } + encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath)) + if err != nil { + return nil, fmt.Errorf("Error serializing key: %s", err) + } + if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil { + return nil, fmt.Errorf("Error saving key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) + } + return trustKey, nil +} + +func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) { + if ext == ".json" || ext == ".jwk" { + encoded, err = json.Marshal(key) + if err != nil { + return nil, fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + pemBlock, err := key.PEMBlock() + if err != nil { + return nil, fmt.Errorf("unable to encode private key PEM: %s", err) + } + encoded = pem.EncodeToMemory(pemBlock) + } + return +} diff --git a/components/engine/daemon/trustkey_test.go b/components/engine/daemon/trustkey_test.go new file mode 100644 index 0000000000..2ade2aa80d --- /dev/null +++ b/components/engine/daemon/trustkey_test.go @@ -0,0 +1,72 @@ +package daemon + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/docker/docker/internal/testutil" + "github.com/gotestyourself/gotestyourself/fs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// LoadOrCreateTrustKey +func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + require.NoError(t, err) + defer os.RemoveAll(tmpKeyFolderPath) + + tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile") + require.NoError(t, err) + + _, err = loadOrCreateTrustKey(tmpKeyFile.Name()) + testutil.ErrorContains(t, err, "Error loading key file") +} + +func TestLoadOrCreateTrustKeyCreateKeyWhenFileDoesNotExist(t *testing.T) { + tmpKeyFolderPath := fs.NewDir(t, "api-trustkey-test") + defer tmpKeyFolderPath.Remove() + + // Without the need to create the folder hierarchy + tmpKeyFile := tmpKeyFolderPath.Join("keyfile") + + key, err := loadOrCreateTrustKey(tmpKeyFile) + require.NoError(t, err) + assert.NotNil(t, key) + + _, err = os.Stat(tmpKeyFile) + require.NoError(t, err, "key file doesn't exist") +} + +func TestLoadOrCreateTrustKeyCreateKeyWhenDirectoryDoesNotExist(t *testing.T) { + tmpKeyFolderPath := fs.NewDir(t, "api-trustkey-test") + defer tmpKeyFolderPath.Remove() + tmpKeyFile := tmpKeyFolderPath.Join("folder/hierarchy/keyfile") + + key, err := loadOrCreateTrustKey(tmpKeyFile) + require.NoError(t, err) + assert.NotNil(t, key) + + _, err = os.Stat(tmpKeyFile) + require.NoError(t, err, "key file doesn't exist") +} + +func TestLoadOrCreateTrustKeyCreateKeyNoPath(t *testing.T) { + defer os.Remove("keyfile") + key, err := loadOrCreateTrustKey("keyfile") + require.NoError(t, err) + assert.NotNil(t, key) + + _, err = os.Stat("keyfile") + require.NoError(t, err, "key file doesn't exist") +} + +func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) { + tmpKeyFile := filepath.Join("testdata", "keyfile") + key, err := loadOrCreateTrustKey(tmpKeyFile) + require.NoError(t, err) + expected := "AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY" + assert.Contains(t, key.String(), expected) +} diff --git a/components/engine/daemon/volumes.go b/components/engine/daemon/volumes.go index b2f17dfab9..03bfda6f5d 100644 --- a/components/engine/daemon/volumes.go +++ b/components/engine/daemon/volumes.go @@ -75,6 +75,7 @@ func (m mounts) parts(i int) int { func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) { binds := map[string]bool{} mountPoints := map[string]*volume.MountPoint{} + parser := volume.NewParser(container.Platform) defer func() { // clean up the container mountpoints once return with error if retErr != nil { @@ -103,7 +104,7 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo // 2. Read volumes from other containers. for _, v := range hostConfig.VolumesFrom { - containerID, mode, err := volume.ParseVolumesFrom(v) + containerID, mode, err := parser.ParseVolumesFrom(v) if err != nil { return err } @@ -118,7 +119,7 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo Type: m.Type, Name: m.Name, Source: m.Source, - RW: m.RW && volume.ReadWrite(mode), + RW: m.RW && parser.ReadWrite(mode), Driver: m.Driver, Destination: m.Destination, Propagation: m.Propagation, @@ -140,7 +141,7 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo // 3. Read bind mounts for _, b := range hostConfig.Binds { - bind, err := volume.ParseMountRaw(b, hostConfig.VolumeDriver) + bind, err := parser.ParseMountRaw(b, hostConfig.VolumeDriver) if err != nil { return err } @@ -172,7 +173,7 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo } for _, cfg := range hostConfig.Mounts { - mp, err := volume.ParseMountSpec(cfg) + mp, err := parser.ParseMountSpec(cfg) if err != nil { return validationError{err} } @@ -217,7 +218,7 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo // 4. Cleanup old volumes that are about to be reassigned. for _, m := range mountPoints { - if m.BackwardsCompatible() { + if parser.IsBackwardCompatible(m) { if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { daemon.volumes.Dereference(mp.Volume, container.ID) } @@ -252,6 +253,8 @@ func (daemon *Daemon) backportMountSpec(container *container.Container) { container.Lock() defer container.Unlock() + parser := volume.NewParser(container.Platform) + maybeUpdate := make(map[string]bool) for _, mp := range container.MountPoints { if mp.Spec.Source != "" && mp.Type != "" { @@ -270,7 +273,7 @@ func (daemon *Daemon) backportMountSpec(container *container.Container) { binds := make(map[string]*volume.MountPoint, len(container.HostConfig.Binds)) for _, rawSpec := range container.HostConfig.Binds { - mp, err := volume.ParseMountRaw(rawSpec, container.HostConfig.VolumeDriver) + mp, err := parser.ParseMountRaw(rawSpec, container.HostConfig.VolumeDriver) if err != nil { logrus.WithError(err).Error("Got unexpected error while re-parsing raw volume spec during spec backport") continue @@ -280,7 +283,7 @@ func (daemon *Daemon) backportMountSpec(container *container.Container) { volumesFrom := make(map[string]volume.MountPoint) for _, fromSpec := range container.HostConfig.VolumesFrom { - from, _, err := volume.ParseVolumesFrom(fromSpec) + from, _, err := parser.ParseVolumesFrom(fromSpec) if err != nil { logrus.WithError(err).WithField("id", container.ID).Error("Error reading volumes-from spec during mount spec backport") continue diff --git a/components/engine/daemon/volumes_unit_test.go b/components/engine/daemon/volumes_unit_test.go index 450d17f978..3f57f0ceee 100644 --- a/components/engine/daemon/volumes_unit_test.go +++ b/components/engine/daemon/volumes_unit_test.go @@ -1,6 +1,7 @@ package daemon import ( + "runtime" "testing" "github.com/docker/docker/volume" @@ -20,8 +21,10 @@ func TestParseVolumesFrom(t *testing.T) { {"foobar:baz", "", "", true}, } + parser := volume.NewParser(runtime.GOOS) + for _, c := range cases { - id, mode, err := volume.ParseVolumesFrom(c.spec) + id, mode, err := parser.ParseVolumesFrom(c.spec) if c.fail { if err == nil { t.Fatalf("Expected error, was nil, for spec %s\n", c.spec) diff --git a/components/engine/distribution/pull_v2.go b/components/engine/distribution/pull_v2.go index 08a24e5a5b..39bf782495 100644 --- a/components/engine/distribution/pull_v2.go +++ b/components/engine/distribution/pull_v2.go @@ -709,11 +709,16 @@ func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mf logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a os/arch match", ref, len(mfstList.Manifests)) var manifestDigest digest.Digest + // TODO @jhowardmsft LCOW Support: Need to remove the hard coding in LCOW mode. + lookingForOS := runtime.GOOS + if system.LCOWSupported() { + lookingForOS = "linux" + } for _, manifestDescriptor := range mfstList.Manifests { // TODO(aaronl): The manifest list spec supports optional // "features" and "variant" fields. These are not yet used. // Once they are, their values should be interpreted here. - if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS { + if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == lookingForOS { manifestDigest = manifestDescriptor.Digest logrus.Debugf("found match for %s/%s with media type %s, digest %s", runtime.GOOS, runtime.GOARCH, manifestDescriptor.MediaType, manifestDigest.String()) break diff --git a/components/engine/distribution/push_v2.go b/components/engine/distribution/push_v2.go index 5ceac8b7ea..1dfa881b48 100644 --- a/components/engine/distribution/push_v2.go +++ b/components/engine/distribution/push_v2.go @@ -395,12 +395,7 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress. defer layerUpload.Close() // upload the blob - desc, err := pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload) - if err != nil { - return desc, err - } - - return desc, nil + return pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload) } func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { diff --git a/components/engine/distribution/registry_unit_test.go b/components/engine/distribution/registry_unit_test.go index d6b6ee8832..9653f29c03 100644 --- a/components/engine/distribution/registry_unit_test.go +++ b/components/engine/distribution/registry_unit_test.go @@ -1,21 +1,15 @@ package distribution import ( - "fmt" - "io/ioutil" "net/http" "net/http/httptest" "net/url" - "os" - "runtime" "strings" "testing" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/registry" "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -42,12 +36,6 @@ func (h *tokenPassThruHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) } func testTokenPassThru(t *testing.T, ts *httptest.Server) { - tmp, err := testDirectory("") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - uri, err := url.Parse(ts.URL) if err != nil { t.Fatalf("could not parse url from test server: %v", err) @@ -137,36 +125,3 @@ func TestTokenPassThruDifferentHost(t *testing.T) { t.Fatal("Redirect should not forward Authorization header to another host") } } - -// testDirectory creates a new temporary directory and returns its path. -// The contents of directory at path `templateDir` is copied into the -// new directory. -func testDirectory(templateDir string) (dir string, err error) { - testID := stringid.GenerateNonCryptoID()[:4] - prefix := fmt.Sprintf("docker-test%s-%s-", testID, getCallerName(2)) - if prefix == "" { - prefix = "docker-test-" - } - dir, err = ioutil.TempDir("", prefix) - if err = os.Remove(dir); err != nil { - return - } - if templateDir != "" { - if err = archive.NewDefaultArchiver().CopyWithTar(templateDir, dir); err != nil { - return - } - } - return -} - -// getCallerName introspects the call stack and returns the name of the -// function `depth` levels down in the stack. -func getCallerName(depth int) string { - // Use the caller function name as a prefix. - // This helps trace temp directories back to their test. - pc, _, _, _ := runtime.Caller(depth + 1) - callerLongName := runtime.FuncForPC(pc).Name() - parts := strings.Split(callerLongName, ".") - callerShortName := parts[len(parts)-1] - return callerShortName -} diff --git a/components/engine/hack/dockerfile/binaries-commits b/components/engine/hack/dockerfile/binaries-commits index 3a1037c20e..4338a3b48b 100644 --- a/components/engine/hack/dockerfile/binaries-commits +++ b/components/engine/hack/dockerfile/binaries-commits @@ -3,11 +3,11 @@ TOMLV_COMMIT=9baf8a8a9f2ed20a8e54160840c492f937eeaf9a # When updating RUNC_COMMIT, also update runc in vendor.conf accordingly -RUNC_COMMIT=3f2f8b84a77f73d38244dd690525642a72156c64 +RUNC_COMMIT=1c81e2a794c6e26a4c650142ae8893c47f619764 CONTAINERD_COMMIT=06b9cb35161009dcb7123345749fef02f7cea8e0 TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574 LIBNETWORK_COMMIT=7b2b1feb1de4817d522cc372af149ff48d25028e VNDR_COMMIT=9909bb2b8a0b7ea464527b376dc50389c90df587 # Linting -GOMETALINTER_COMMIT=f7b6e55301c9c67035003b7ba7f8a1cde532d338 +GOMETALINTER_COMMIT=bfcc1d6942136fd86eb6f1a6fb328de8398fbd80 diff --git a/components/engine/hack/integration-cli-on-swarm/agent/master/call.go b/components/engine/hack/integration-cli-on-swarm/agent/master/call.go index 858c2c0724..dab9c67077 100644 --- a/components/engine/hack/integration-cli-on-swarm/agent/master/call.go +++ b/components/engine/hack/integration-cli-on-swarm/agent/master/call.go @@ -73,14 +73,14 @@ func executeTests(funkerName string, testChunks [][]string) error { } log.Printf("Finished chunk %d [%d/%d] with %d test filters in %s, code=%d.", chunkID, passed+failed, len(testChunks), len(tests), - time.Now().Sub(chunkBegin), result.Code) + time.Since(chunkBegin), result.Code) } }(chunkID, tests) } wg.Wait() // TODO: print actual tests rather than chunks log.Printf("Executed %d chunks in %s. PASS: %d, FAIL: %d.", - len(testChunks), time.Now().Sub(begin), passed, failed) + len(testChunks), time.Since(begin), passed, failed) if failed > 0 { return fmt.Errorf("%d chunks failed", failed) } @@ -103,7 +103,7 @@ func executeTestChunk(funkerName string, args types.Args) (types.Result, error) func executeTestChunkWithRetry(funkerName string, args types.Args) (types.Result, error) { begin := time.Now() - for i := 0; time.Now().Sub(begin) < funkerRetryTimeout; i++ { + for i := 0; time.Since(begin) < funkerRetryTimeout; i++ { result, err := executeTestChunk(funkerName, args) if err == nil { log.Printf("executeTestChunk(%q, %d) returned code %d in trial %d", funkerName, args.ChunkID, result.Code, i) diff --git a/components/engine/hack/integration-cli-on-swarm/agent/worker/worker.go b/components/engine/hack/integration-cli-on-swarm/agent/worker/worker.go index 36ab3684d2..ea8bb3fe27 100644 --- a/components/engine/hack/integration-cli-on-swarm/agent/worker/worker.go +++ b/components/engine/hack/integration-cli-on-swarm/agent/worker/worker.go @@ -58,7 +58,7 @@ func handle(workerImageDigest string, executor testChunkExecutor) error { RawLog: rawLog, } } - elapsed := time.Now().Sub(begin) + elapsed := time.Since(begin) log.Printf("Finished chunk %d, code=%d, elapsed=%v", args.ChunkID, code, elapsed) return types.Result{ ChunkID: args.ChunkID, diff --git a/components/engine/hack/make.sh b/components/engine/hack/make.sh index 58e0d8cd62..c52e634f97 100755 --- a/components/engine/hack/make.sh +++ b/components/engine/hack/make.sh @@ -67,7 +67,7 @@ DEFAULT_BUNDLES=( tgz ) -VERSION=$(< ./VERSION) +VERSION=${VERSION:-$(< ./VERSION)} ! BUILDTIME=$(date -u -d "@${SOURCE_DATE_EPOCH:-$(date +%s)}" --rfc-3339 ns 2> /dev/null | sed -e 's/ /T/') if [ "$DOCKER_GITCOMMIT" ]; then GITCOMMIT="$DOCKER_GITCOMMIT" @@ -190,20 +190,18 @@ bundle() { } main() { - # We want this to fail if the bundles already exist and cannot be removed. - # This is to avoid mixing bundles from different versions of the code. - mkdir -p bundles - if [ -e "bundles/$VERSION" ] && [ -z "$KEEPBUNDLE" ]; then - echo "bundles/$VERSION already exists. Removing." - rm -fr "bundles/$VERSION" && mkdir "bundles/$VERSION" || exit 1 + if [ -z "${KEEPBUNDLE-}" ]; then + echo "Removing bundles/" + rm -rf "bundles/*" echo fi + mkdir -p bundles + # Windows and symlinks don't get along well if [ "$(go env GOHOSTOS)" != 'windows' ]; then - # Windows and symlinks don't get along well - rm -f bundles/latest - ln -s "$VERSION" bundles/latest + # preserve latest symlink for backward compatibility + ln -sf . bundles/latest fi if [ $# -lt 1 ]; then @@ -212,7 +210,7 @@ main() { bundles=($@) fi for bundle in ${bundles[@]}; do - export DEST="bundles/$VERSION/$(basename "$bundle")" + export DEST="bundles/$(basename "$bundle")" # Cygdrive paths don't play well with go build -o. if [[ "$(uname -s)" == CYGWIN* ]]; then export DEST="$(cygpath -mw "$DEST")" diff --git a/components/engine/hack/make/.integration-test-helpers b/components/engine/hack/make/.integration-test-helpers index 2a5bd5f218..e3cb7d84a7 100644 --- a/components/engine/hack/make/.integration-test-helpers +++ b/components/engine/hack/make/.integration-test-helpers @@ -8,22 +8,32 @@ source "$SCRIPTDIR/make/.go-autogen" +# Set defaults : ${TEST_REPEAT:=1} +: ${TESTFLAGS:=} +: ${TESTDEBUG:=} -integration_api_dirs=("$( +integration_api_dirs=${TEST_INTEGRATION_DIR:-"$( find ./integration -type d | - grep -vE '^(./integration$|./integration/util)')") + grep -vE '^(./integration$|./integration/util)')"} run_test_integration() { + [[ "$TESTFLAGS" != *-check.f* ]] && run_test_integration_suites + run_test_integration_legacy_suites +} + +run_test_integration_suites() { local flags="-test.v -test.timeout=${TIMEOUT} $TESTFLAGS" for dir in $integration_api_dirs; do - ( + if ! ( cd $dir echo "Running $PWD" test_env ./test.main $flags - ) + ); then exit 1; fi done +} +run_test_integration_legacy_suites() { ( flags="-check.v -check.timeout=${TIMEOUT} -test.timeout=360m $TESTFLAGS" cd integration-cli @@ -33,7 +43,7 @@ run_test_integration() { } build_test_suite_binaries() { - if [ $DOCKER_INTEGRATION_TESTS_VERIFIED ]; then + if [ ${DOCKER_INTEGRATION_TESTS_VERIFIED-} ]; then echo "Skipping building test binaries; as DOCKER_INTEGRATION_TESTS_VERIFIED is set" return fi diff --git a/components/engine/hack/make/test-integration b/components/engine/hack/make/test-integration index e419d66c6e..0100ac9cc7 100755 --- a/components/engine/hack/make/test-integration +++ b/components/engine/hack/make/test-integration @@ -1,5 +1,5 @@ #!/usr/bin/env bash -set -e +set -e -o pipefail source "${MAKEDIR}/.go-autogen" source hack/make/.integration-test-helpers diff --git a/components/engine/hack/test/e2e-run.sh b/components/engine/hack/test/e2e-run.sh new file mode 100755 index 0000000000..bfcc03f628 --- /dev/null +++ b/components/engine/hack/test/e2e-run.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +set -e + +TESTFLAGS=${TESTFLAGS:-""} +# Currently only DockerSuite and DockerNetworkSuite have been adapted for E2E testing +TESTFLAGS_LEGACY=${TESTFLAGS_LEGACY:-""} +TIMEOUT=${TIMEOUT:-60m} + +SCRIPTDIR="$(dirname ${BASH_SOURCE[0]})" + +export DOCKER_ENGINE_GOARCH=${DOCKER_ENGINE_GOARCH:-amd64} + +run_test_integration() { + run_test_integration_suites + run_test_integration_legacy_suites +} + +run_test_integration_suites() { + local flags="-test.v -test.timeout=${TIMEOUT} $TESTFLAGS" + for dir in /tests/integration/*; do + if ! ( + cd $dir + echo "Running $PWD" + ./test.main $flags + ); then exit 1; fi + done +} + +run_test_integration_legacy_suites() { + ( + flags="-check.v -check.timeout=${TIMEOUT} -test.timeout=360m $TESTFLAGS_LEGACY" + cd /tests/integration-cli + echo "Running $PWD" + ./test.main $flags + ) +} + +bash $SCRIPTDIR/ensure-emptyfs.sh + +echo "Run integration tests" +run_test_integration diff --git a/components/engine/hack/validate/gometalinter b/components/engine/hack/validate/gometalinter index 9830659d69..ae411e864a 100755 --- a/components/engine/hack/validate/gometalinter +++ b/components/engine/hack/validate/gometalinter @@ -3,4 +3,9 @@ set -e -o pipefail SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -gometalinter --config $SCRIPTDIR/gometalinter.json ./... +# CI platforms differ, so per-platform GOMETALINTER_OPTS can be set +# from a platform-specific Dockerfile, otherwise let's just set +# (somewhat pessimistic) default of 10 minutes. +gometalinter \ + ${GOMETALINTER_OPTS:--deadine 10m} \ + --config $SCRIPTDIR/gometalinter.json ./... diff --git a/components/engine/hack/validate/gometalinter.json b/components/engine/hack/validate/gometalinter.json index 2e0a6c2f79..8e8da10a39 100644 --- a/components/engine/hack/validate/gometalinter.json +++ b/components/engine/hack/validate/gometalinter.json @@ -1,6 +1,6 @@ { "Vendor": true, - "Deadline": "2m", + "EnableGC": true, "Sort": ["linter", "severity", "path"], "Exclude": [ ".*\\.pb\\.go", @@ -8,15 +8,15 @@ "api/types/container/container_.*", "integration-cli/" ], - "Skip": [ - "integration-cli/" - ], + "Skip": ["integration-cli/"], "Enable": [ "deadcode", "gofmt", "goimports", "golint", + "gosimple", + "ineffassign", "interfacer", "unconvert", "vet" diff --git a/components/engine/image/fs_test.go b/components/engine/image/fs_test.go index 5464ab5171..2672524039 100644 --- a/components/engine/image/fs_test.go +++ b/components/engine/image/fs_test.go @@ -1,7 +1,6 @@ package image import ( - "bytes" "crypto/rand" "crypto/sha256" "encoding/hex" @@ -12,7 +11,7 @@ import ( "testing" "github.com/docker/docker/internal/testutil" - "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" "github.com/stretchr/testify/assert" ) @@ -112,9 +111,7 @@ func TestFSMetadataGetSet(t *testing.T) { actual, err := store.GetMetadata(tc.id, tc.key) assert.NoError(t, err) - if bytes.Compare(actual, tc.value) != 0 { - t.Fatalf("Metadata expected %q, got %q", tc.value, actual) - } + assert.Equal(t, tc.value, actual) } _, err = store.GetMetadata(id2, "tkey2") @@ -183,9 +180,7 @@ func TestFSGetSet(t *testing.T) { for _, tc := range tcases { data, err := store.Get(tc.expected) assert.NoError(t, err) - if bytes.Compare(data, tc.input) != 0 { - t.Fatalf("expected data %q, got %q", tc.input, data) - } + assert.Equal(t, tc.input, data) } } diff --git a/components/engine/image/store_test.go b/components/engine/image/store_test.go index e6c1746eff..23a60a98fa 100644 --- a/components/engine/image/store_test.go +++ b/components/engine/image/store_test.go @@ -41,10 +41,10 @@ func TestRestore(t *testing.T) { assert.Equal(t, "abc", img1.Comment) assert.Equal(t, "def", img2.Comment) - p, err := is.GetParent(ID(id1)) + _, err = is.GetParent(ID(id1)) testutil.ErrorContains(t, err, "failed to read metadata") - p, err = is.GetParent(ID(id2)) + p, err := is.GetParent(ID(id2)) assert.NoError(t, err) assert.Equal(t, ID(id1), p) diff --git a/components/engine/image/tarexport/load.go b/components/engine/image/tarexport/load.go index 480400cd64..8bb1ac166b 100644 --- a/components/engine/image/tarexport/load.go +++ b/components/engine/image/tarexport/load.go @@ -82,8 +82,7 @@ func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) if err := checkCompatibleOS(img.OS); err != nil { return err } - var rootFS image.RootFS - rootFS = *img.RootFS + rootFS := *img.RootFS rootFS.DiffIDs = nil if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual { diff --git a/components/engine/integration-cli/check_test.go b/components/engine/integration-cli/check_test.go index 87517e7db7..2e57e00b34 100644 --- a/components/engine/integration-cli/check_test.go +++ b/components/engine/integration-cli/check_test.go @@ -67,7 +67,7 @@ func TestMain(m *testing.M) { func Test(t *testing.T) { cli.SetTestEnvironment(testEnv) fakestorage.SetTestEnvironment(&testEnv.Execution) - ienv.ProtectImages(t, &testEnv.Execution) + ienv.ProtectAll(t, &testEnv.Execution) check.TestingT(t) } @@ -79,6 +79,9 @@ type DockerSuite struct { } func (s *DockerSuite) OnTimeout(c *check.C) { + if !testEnv.IsLocalDaemon() { + return + } path := filepath.Join(os.Getenv("DEST"), "docker.pid") b, err := ioutil.ReadFile(path) if err != nil { @@ -91,7 +94,7 @@ func (s *DockerSuite) OnTimeout(c *check.C) { } daemonPid := int(rawPid) - if daemonPid > 0 && testEnv.IsLocalDaemon() { + if daemonPid > 0 { daemon.SignalDaemonDump(daemonPid) } } @@ -117,7 +120,7 @@ func (s *DockerRegistrySuite) OnTimeout(c *check.C) { } func (s *DockerRegistrySuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux, registry.Hosting) + testRequires(c, DaemonIsLinux, registry.Hosting, SameHostDaemon) s.reg = setupRegistry(c, false, "", "") s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ Experimental: testEnv.ExperimentalDaemon(), @@ -151,7 +154,7 @@ func (s *DockerSchema1RegistrySuite) OnTimeout(c *check.C) { } func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux, registry.Hosting, NotArm64) + testRequires(c, DaemonIsLinux, registry.Hosting, NotArm64, SameHostDaemon) s.reg = setupRegistry(c, true, "", "") s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ Experimental: testEnv.ExperimentalDaemon(), @@ -185,7 +188,7 @@ func (s *DockerRegistryAuthHtpasswdSuite) OnTimeout(c *check.C) { } func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux, registry.Hosting) + testRequires(c, DaemonIsLinux, registry.Hosting, SameHostDaemon) s.reg = setupRegistry(c, false, "htpasswd", "") s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ Experimental: testEnv.ExperimentalDaemon(), @@ -221,7 +224,7 @@ func (s *DockerRegistryAuthTokenSuite) OnTimeout(c *check.C) { } func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux, registry.Hosting) + testRequires(c, DaemonIsLinux, registry.Hosting, SameHostDaemon) s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ Experimental: testEnv.ExperimentalDaemon(), }) @@ -316,7 +319,7 @@ func (s *DockerSwarmSuite) OnTimeout(c *check.C) { } func (s *DockerSwarmSuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, SameHostDaemon) } func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *daemon.Swarm { @@ -468,7 +471,7 @@ func (ps *DockerPluginSuite) getPluginRepoWithTag() string { } func (ps *DockerPluginSuite) SetUpSuite(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, registry.Hosting) ps.registry = setupRegistry(c, false, "", "") ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) diff --git a/components/engine/integration-cli/cli/build/fakestorage/fixtures.go b/components/engine/integration-cli/cli/build/fakestorage/fixtures.go index 8a6bb137ad..b76a7d9207 100644 --- a/components/engine/integration-cli/cli/build/fakestorage/fixtures.go +++ b/components/engine/integration-cli/cli/build/fakestorage/fixtures.go @@ -39,27 +39,34 @@ func ensureHTTPServerImage(t testingT) { goarch = "amd64" } - goCmd, lookErr := exec.LookPath("go") - if lookErr != nil { - t.Fatalf("could not build http server: %v", lookErr) - } - - cmd := exec.Command(goCmd, "build", "-o", filepath.Join(tmp, "httpserver"), "github.com/docker/docker/contrib/httpserver") - cmd.Env = append(os.Environ(), []string{ - "CGO_ENABLED=0", - "GOOS=" + goos, - "GOARCH=" + goarch, - }...) - var out []byte - if out, err = cmd.CombinedOutput(); err != nil { - t.Fatalf("could not build http server: %s", string(out)) - } - cpCmd, lookErr := exec.LookPath("cp") if lookErr != nil { t.Fatalf("could not build http server: %v", lookErr) } - if out, err = exec.Command(cpCmd, "../contrib/httpserver/Dockerfile", filepath.Join(tmp, "Dockerfile")).CombinedOutput(); err != nil { + + if _, err = os.Stat("../contrib/httpserver/httpserver"); os.IsNotExist(err) { + goCmd, lookErr := exec.LookPath("go") + if lookErr != nil { + t.Fatalf("could not build http server: %v", lookErr) + } + + cmd := exec.Command(goCmd, "build", "-o", filepath.Join(tmp, "httpserver"), "github.com/docker/docker/contrib/httpserver") + cmd.Env = append(os.Environ(), []string{ + "CGO_ENABLED=0", + "GOOS=" + goos, + "GOARCH=" + goarch, + }...) + var out []byte + if out, err = cmd.CombinedOutput(); err != nil { + t.Fatalf("could not build http server: %s", string(out)) + } + } else { + if out, err := exec.Command(cpCmd, "../contrib/httpserver/httpserver", filepath.Join(tmp, "httpserver")).CombinedOutput(); err != nil { + t.Fatalf("could not copy http server: %v", string(out)) + } + } + + if out, err := exec.Command(cpCmd, "../contrib/httpserver/Dockerfile", filepath.Join(tmp, "Dockerfile")).CombinedOutput(); err != nil { t.Fatalf("could not build http server: %v", string(out)) } diff --git a/components/engine/integration-cli/docker_api_build_test.go b/components/engine/integration-cli/docker_api_build_test.go index 59b451024e..d428d78f0a 100644 --- a/components/engine/integration-cli/docker_api_build_test.go +++ b/components/engine/integration-cli/docker_api_build_test.go @@ -28,6 +28,7 @@ import ( func (s *DockerSuite) TestBuildAPIDockerFileRemote(c *check.C) { testRequires(c, NotUserNamespace) + var testD string if testEnv.DaemonPlatform() == "windows" { testD = `FROM busybox diff --git a/components/engine/integration-cli/docker_api_containers_test.go b/components/engine/integration-cli/docker_api_containers_test.go index 554d68760a..173d5f80b0 100644 --- a/components/engine/integration-cli/docker_api_containers_test.go +++ b/components/engine/integration-cli/docker_api_containers_test.go @@ -30,6 +30,9 @@ import ( "github.com/docker/docker/volume" "github.com/docker/go-connections/nat" "github.com/go-check/check" + "github.com/gotestyourself/gotestyourself/poll" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/net/context" ) @@ -68,7 +71,6 @@ func (s *DockerSuite) TestContainerAPIGetJSONNoFieldsOmitted(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(containers, checker.HasLen, startCount+1) actual := fmt.Sprintf("%+v", containers[0]) - fmt.Println(actual) // empty Labels field triggered this bug, make sense to check for everything // cause even Ports for instance can trigger this bug @@ -1369,8 +1371,7 @@ func (s *DockerSuite) TestContainerAPICreateNoHostConfig118(c *check.C) { Image: "busybox", } - var httpClient *http.Client - cli, err := client.NewClient(daemonHost(), "v1.18", httpClient, map[string]string{}) + cli, err := request.NewEnvClientWithVersion("v1.18") _, err = cli.ContainerCreate(context.Background(), &config, &containertypes.HostConfig{}, &networktypes.NetworkingConfig{}, "") c.Assert(err, checker.IsNil) @@ -1406,16 +1407,6 @@ func (s *DockerSuite) TestPutContainerArchiveErrSymlinkInVolumeToReadOnlyRootfs( c.Assert(err.Error(), checker.Contains, "container rootfs is marked read-only") } -func (s *DockerSuite) TestContainerAPIGetContainersJSONEmpty(c *check.C) { - cli, err := client.NewEnvClient() - c.Assert(err, checker.IsNil) - defer cli.Close() - - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{All: true}) - c.Assert(err, checker.IsNil) - c.Assert(containers, checker.HasLen, 0) -} - func (s *DockerSuite) TestPostContainersCreateWithWrongCpusetValues(c *check.C) { // Not supported on Windows testRequires(c, DaemonIsLinux) @@ -1614,7 +1605,7 @@ func (s *DockerSuite) TestContainerAPIDeleteWithEmptyName(c *check.C) { defer cli.Close() err = cli.ContainerRemove(context.Background(), "", types.ContainerRemoveOptions{}) - c.Assert(err.Error(), checker.Contains, "Error response from daemon: page not found") + c.Assert(err.Error(), checker.Contains, "No such container") } func (s *DockerSuite) TestContainerAPIStatsWithNetworkDisabled(c *check.C) { @@ -1921,7 +1912,23 @@ func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) { {mounttypes.Mount{Type: "volume", Target: destPath + slash}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}}, {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test1"}, types.MountPoint{Type: "volume", Name: "test1", RW: true, Destination: destPath}}, {mounttypes.Mount{Type: "volume", Target: destPath, ReadOnly: true, Source: "test2"}, types.MountPoint{Type: "volume", Name: "test2", RW: false, Destination: destPath}}, - {mounttypes.Mount{Type: "volume", Target: destPath, Source: "test3", VolumeOptions: &mounttypes.VolumeOptions{DriverConfig: &mounttypes.Driver{Name: volume.DefaultDriverName}}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", Name: "test3", RW: true, Destination: destPath}}, + { + mounttypes.Mount{ + Type: "volume", + Target: destPath, + Source: "test3", + VolumeOptions: &mounttypes.VolumeOptions{ + DriverConfig: &mounttypes.Driver{Name: volume.DefaultDriverName}, + }, + }, + types.MountPoint{ + Driver: volume.DefaultDriverName, + Type: "volume", + Name: "test3", + RW: true, + Destination: destPath, + }, + }, } if SameHostDaemon() { @@ -1930,7 +1937,19 @@ func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) { c.Assert(err, checker.IsNil) defer os.RemoveAll(tmpDir1) cases = append(cases, []testCase{ - {mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath}, types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir1}}, + { + mounttypes.Mount{ + Type: "bind", + Source: tmpDir1, + Target: destPath, + }, + types.MountPoint{ + Type: "bind", + RW: true, + Destination: destPath, + Source: tmpDir1, + }, + }, {mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath, ReadOnly: true}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir1}}, }...) @@ -1968,58 +1987,83 @@ func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) { ID string `json:"Id"` } - cli, err := client.NewEnvClient() - c.Assert(err, checker.IsNil) - defer cli.Close() - + ctx := context.Background() + apiclient := testEnv.APIClient() for i, x := range cases { c.Logf("case %d - config: %v", i, x.cfg) - container, err := cli.ContainerCreate(context.Background(), &containertypes.Config{Image: testImg}, &containertypes.HostConfig{Mounts: []mounttypes.Mount{x.cfg}}, &networktypes.NetworkingConfig{}, "") - c.Assert(err, checker.IsNil) + container, err := apiclient.ContainerCreate( + ctx, + &containertypes.Config{Image: testImg}, + &containertypes.HostConfig{Mounts: []mounttypes.Mount{x.cfg}}, + &networktypes.NetworkingConfig{}, + "") + require.NoError(c, err) - id := container.ID + containerInspect, err := apiclient.ContainerInspect(ctx, container.ID) + require.NoError(c, err) + mps := containerInspect.Mounts + require.Len(c, mps, 1) + mountPoint := mps[0] - var mps []types.MountPoint - err = json.NewDecoder(strings.NewReader(inspectFieldJSON(c, id, "Mounts"))).Decode(&mps) - c.Assert(err, checker.IsNil) - c.Assert(mps, checker.HasLen, 1) - c.Assert(mps[0].Destination, checker.Equals, x.expected.Destination) + if x.expected.Source != "" { + assert.Equal(c, x.expected.Source, mountPoint.Source) + } + if x.expected.Name != "" { + assert.Equal(c, x.expected.Name, mountPoint.Name) + } + if x.expected.Driver != "" { + assert.Equal(c, x.expected.Driver, mountPoint.Driver) + } + if x.expected.Propagation != "" { + assert.Equal(c, x.expected.Propagation, mountPoint.Propagation) + } + assert.Equal(c, x.expected.RW, mountPoint.RW) + assert.Equal(c, x.expected.Type, mountPoint.Type) + assert.Equal(c, x.expected.Mode, mountPoint.Mode) + assert.Equal(c, x.expected.Destination, mountPoint.Destination) - if len(x.expected.Source) > 0 { - c.Assert(mps[0].Source, checker.Equals, x.expected.Source) - } - if len(x.expected.Name) > 0 { - c.Assert(mps[0].Name, checker.Equals, x.expected.Name) - } - if len(x.expected.Driver) > 0 { - c.Assert(mps[0].Driver, checker.Equals, x.expected.Driver) - } - c.Assert(mps[0].RW, checker.Equals, x.expected.RW) - c.Assert(mps[0].Type, checker.Equals, x.expected.Type) - c.Assert(mps[0].Mode, checker.Equals, x.expected.Mode) - if len(x.expected.Propagation) > 0 { - c.Assert(mps[0].Propagation, checker.Equals, x.expected.Propagation) - } + err = apiclient.ContainerStart(ctx, container.ID, types.ContainerStartOptions{}) + require.NoError(c, err) + poll.WaitOn(c, containerExit(apiclient, container.ID), poll.WithDelay(time.Second)) - out, _, err := dockerCmdWithError("start", "-a", id) - if (x.cfg.Type != "volume" || (x.cfg.VolumeOptions != nil && x.cfg.VolumeOptions.NoCopy)) && testEnv.DaemonPlatform() != "windows" { - c.Assert(err, checker.NotNil, check.Commentf("%s\n%v", out, mps[0])) - } else { - c.Assert(err, checker.IsNil, check.Commentf("%s\n%v", out, mps[0])) - } + err = apiclient.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{ + RemoveVolumes: true, + Force: true, + }) + require.NoError(c, err) - dockerCmd(c, "rm", "-fv", id) - if x.cfg.Type == "volume" && len(x.cfg.Source) > 0 { - // This should still exist even though we removed the container - dockerCmd(c, "volume", "inspect", mps[0].Name) - } else { - // This should be removed automatically when we removed the container - out, _, err := dockerCmdWithError("volume", "inspect", mps[0].Name) - c.Assert(err, checker.NotNil, check.Commentf(out)) + switch { + + // Named volumes still exist after the container is removed + case x.cfg.Type == "volume" && len(x.cfg.Source) > 0: + _, err := apiclient.VolumeInspect(ctx, mountPoint.Name) + require.NoError(c, err) + + // Bind mounts are never removed with the container + case x.cfg.Type == "bind": + + // anonymous volumes are removed + default: + _, err := apiclient.VolumeInspect(ctx, mountPoint.Name) + assert.True(c, client.IsErrNotFound(err)) } } } +func containerExit(apiclient client.APIClient, name string) func(poll.LogT) poll.Result { + return func(logT poll.LogT) poll.Result { + container, err := apiclient.ContainerInspect(context.Background(), name) + if err != nil { + return poll.Error(err) + } + switch container.State.Status { + case "created", "running": + return poll.Continue("container %s is %s, waiting for exit", name, container.State.Status) + } + return poll.Success() + } +} + func (s *DockerSuite) TestContainersAPICreateMountsTmpfs(c *check.C) { testRequires(c, DaemonIsLinux) type testCase struct { diff --git a/components/engine/integration-cli/docker_api_images_test.go b/components/engine/integration-cli/docker_api_images_test.go index 8ad12fb77d..fba69dc682 100644 --- a/components/engine/integration-cli/docker_api_images_test.go +++ b/components/engine/integration-cli/docker_api_images_test.go @@ -119,7 +119,7 @@ func (s *DockerSuite) TestAPIImagesHistory(c *check.C) { } func (s *DockerSuite) TestAPIImagesImportBadSrc(c *check.C) { - testRequires(c, Network) + testRequires(c, Network, SameHostDaemon) server := httptest.NewServer(http.NewServeMux()) defer server.Close() @@ -179,8 +179,7 @@ func (s *DockerSuite) TestAPIImagesSizeCompatibility(c *check.C) { Labels map[string]string } - var httpClient *http.Client - cli, err = client.NewClient(daemonHost(), "v1.24", httpClient, nil) + cli, err = request.NewEnvClientWithVersion("v1.24") c.Assert(err, checker.IsNil) defer cli.Close() diff --git a/components/engine/integration-cli/docker_api_inspect_unix_test.go b/components/engine/integration-cli/docker_api_inspect_unix_test.go index 93c40947af..dae64be2a7 100644 --- a/components/engine/integration-cli/docker_api_inspect_unix_test.go +++ b/components/engine/integration-cli/docker_api_inspect_unix_test.go @@ -4,10 +4,9 @@ package main import ( "encoding/json" - "net/http" - "github.com/docker/docker/client" "github.com/docker/docker/integration-cli/checker" + "github.com/docker/docker/integration-cli/request" "github.com/go-check/check" "golang.org/x/net/context" ) @@ -19,8 +18,7 @@ func (s *DockerSuite) TestInspectAPICpusetInConfigPre120(c *check.C) { name := "cpusetinconfig-pre120" dockerCmd(c, "run", "--name", name, "--cpuset-cpus", "0", "busybox", "true") - var httpClient *http.Client - cli, err := client.NewClient(daemonHost(), "v1.19", httpClient, nil) + cli, err := request.NewEnvClientWithVersion("v1.19") c.Assert(err, checker.IsNil) defer cli.Close() _, body, err := cli.ContainerInspectWithRaw(context.Background(), name, false) diff --git a/components/engine/integration-cli/docker_api_network_test.go b/components/engine/integration-cli/docker_api_network_test.go index 129ec7ea69..a49fbae3d4 100644 --- a/components/engine/integration-cli/docker_api_network_test.go +++ b/components/engine/integration-cli/docker_api_network_test.go @@ -76,7 +76,7 @@ func (s *DockerSuite) TestAPINetworkFilter(c *check.C) { c.Assert(nr.Name, checker.Equals, "bridge") } -func (s *DockerSuite) TestAPINetworkInspect(c *check.C) { +func (s *DockerSuite) TestAPINetworkInspectBridge(c *check.C) { testRequires(c, DaemonIsLinux) // Inspect default bridge network nr := getNetworkResource(c, "bridge") @@ -94,13 +94,15 @@ func (s *DockerSuite) TestAPINetworkInspect(c *check.C) { c.Assert(nr.Internal, checker.Equals, false) c.Assert(nr.EnableIPv6, checker.Equals, false) c.Assert(nr.IPAM.Driver, checker.Equals, "default") - c.Assert(len(nr.Containers), checker.Equals, 1) c.Assert(nr.Containers[containerID], checker.NotNil) ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) c.Assert(err, checker.IsNil) c.Assert(ip.String(), checker.Equals, containerIP) +} +func (s *DockerSuite) TestAPINetworkInspectUserDefinedNetwork(c *check.C) { + testRequires(c, DaemonIsLinux) // IPAM configuration inspect ipam := &network.IPAM{ Driver: "default", @@ -117,7 +119,7 @@ func (s *DockerSuite) TestAPINetworkInspect(c *check.C) { id0 := createNetwork(c, config, true) c.Assert(isNetworkAvailable(c, "br0"), checker.Equals, true) - nr = getNetworkResource(c, id0) + nr := getNetworkResource(c, id0) c.Assert(len(nr.IPAM.Config), checker.Equals, 1) c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "172.28.0.0/16") c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24") @@ -291,9 +293,16 @@ func getNetworkIDByName(c *check.C, name string) string { nJSON := []types.NetworkResource{} err = json.NewDecoder(body).Decode(&nJSON) c.Assert(err, checker.IsNil) - c.Assert(len(nJSON), checker.Equals, 1) + var res string + for _, n := range nJSON { + // Find exact match + if n.Name == name { + res = n.ID + } + } + c.Assert(res, checker.Not(checker.Equals), "") - return nJSON[0].ID + return res } func getNetworkResource(c *check.C, id string) *types.NetworkResource { diff --git a/components/engine/integration-cli/docker_api_swarm_config_test.go b/components/engine/integration-cli/docker_api_swarm_config_test.go index c06f3c45ea..c01d80ff7f 100644 --- a/components/engine/integration-cli/docker_api_swarm_config_test.go +++ b/components/engine/integration-cli/docker_api_swarm_config_test.go @@ -55,10 +55,8 @@ func (s *DockerSwarmSuite) TestAPISwarmConfigsDelete(c *check.C) { c.Assert(err, checker.IsNil) defer cli.Close() - expected := "no such config" - _, _, err = cli.ConfigInspectWithRaw(context.Background(), id) - c.Assert(err.Error(), checker.Contains, expected) + c.Assert(err.Error(), checker.Contains, "No such config") } func (s *DockerSwarmSuite) TestAPISwarmConfigsUpdate(c *check.C) { diff --git a/components/engine/integration-cli/docker_api_swarm_secret_test.go b/components/engine/integration-cli/docker_api_swarm_secret_test.go index db346dd283..c30e2237d8 100644 --- a/components/engine/integration-cli/docker_api_swarm_secret_test.go +++ b/components/engine/integration-cli/docker_api_swarm_secret_test.go @@ -64,14 +64,12 @@ func (s *DockerSwarmSuite) TestAPISwarmSecretsDelete(c *check.C) { c.Assert(err, checker.IsNil) defer cli.Close() - expected := "no such secret" _, _, err = cli.SecretInspectWithRaw(context.Background(), id) - c.Assert(err.Error(), checker.Contains, expected) + c.Assert(err.Error(), checker.Contains, "No such secret") id = "non-existing" - expected = "secret non-existing not found" err = cli.SecretRemove(context.Background(), id) - c.Assert(err.Error(), checker.Contains, expected) + c.Assert(err.Error(), checker.Contains, "No such secret: non-existing") } func (s *DockerSwarmSuite) TestAPISwarmSecretsUpdate(c *check.C) { diff --git a/components/engine/integration-cli/docker_api_version_test.go b/components/engine/integration-cli/docker_api_version_test.go deleted file mode 100644 index 0b995f7a15..0000000000 --- a/components/engine/integration-cli/docker_api_version_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "github.com/docker/docker/client" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/integration-cli/checker" - "github.com/go-check/check" - "golang.org/x/net/context" -) - -func (s *DockerSuite) TestGetVersion(c *check.C) { - cli, err := client.NewEnvClient() - c.Assert(err, checker.IsNil) - defer cli.Close() - - v, err := cli.ServerVersion(context.Background()) - c.Assert(v.Version, checker.Equals, dockerversion.Version, check.Commentf("Version mismatch")) -} diff --git a/components/engine/integration-cli/docker_api_volumes_test.go b/components/engine/integration-cli/docker_api_volumes_test.go index 5e3d6a929a..65a9652092 100644 --- a/components/engine/integration-cli/docker_api_volumes_test.go +++ b/components/engine/integration-cli/docker_api_volumes_test.go @@ -16,16 +16,27 @@ import ( func (s *DockerSuite) TestVolumesAPIList(c *check.C) { prefix, _ := getPrefixAndSlashFromDaemonPlatform() - dockerCmd(c, "run", "-v", prefix+"/foo", "busybox") + cid, _ := dockerCmd(c, "run", "-d", "-v", prefix+"/foo", "busybox") cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) defer cli.Close() + container, err := cli.ContainerInspect(context.Background(), strings.TrimSpace(cid)) + c.Assert(err, checker.IsNil) + vname := container.Mounts[0].Name + volumes, err := cli.VolumeList(context.Background(), filters.Args{}) c.Assert(err, checker.IsNil) - c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) + found := false + for _, vol := range volumes.Volumes { + if vol.Name == vname { + found = true + break + } + } + c.Assert(found, checker.Equals, true) } func (s *DockerSuite) TestVolumesAPICreate(c *check.C) { @@ -45,21 +56,21 @@ func (s *DockerSuite) TestVolumesAPICreate(c *check.C) { func (s *DockerSuite) TestVolumesAPIRemove(c *check.C) { prefix, _ := getPrefixAndSlashFromDaemonPlatform() - dockerCmd(c, "run", "-v", prefix+"/foo", "--name=test", "busybox") + cid, _ := dockerCmd(c, "run", "-d", "-v", prefix+"/foo", "--name=test", "busybox") cli, err := client.NewEnvClient() c.Assert(err, checker.IsNil) defer cli.Close() - volumes, err := cli.VolumeList(context.Background(), filters.Args{}) + container, err := cli.ContainerInspect(context.Background(), strings.TrimSpace(cid)) c.Assert(err, checker.IsNil) + vname := container.Mounts[0].Name - v := volumes.Volumes[0] - err = cli.VolumeRemove(context.Background(), v.Name, false) + err = cli.VolumeRemove(context.Background(), vname, false) c.Assert(err.Error(), checker.Contains, "volume is in use") dockerCmd(c, "rm", "-f", "test") - err = cli.VolumeRemove(context.Background(), v.Name, false) + err = cli.VolumeRemove(context.Background(), vname, false) c.Assert(err, checker.IsNil) } @@ -78,10 +89,6 @@ func (s *DockerSuite) TestVolumesAPIInspect(c *check.C) { _, err = cli.VolumeCreate(context.Background(), config) c.Assert(err, check.IsNil) - volumes, err := cli.VolumeList(context.Background(), filters.Args{}) - c.Assert(err, checker.IsNil) - c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) - vol, err := cli.VolumeInspect(context.Background(), config.Name) c.Assert(err, checker.IsNil) c.Assert(vol.Name, checker.Equals, config.Name) diff --git a/components/engine/integration-cli/docker_cli_attach_unix_test.go b/components/engine/integration-cli/docker_cli_attach_unix_test.go index 78f55e043d..e40d7cf39c 100644 --- a/components/engine/integration-cli/docker_cli_attach_unix_test.go +++ b/components/engine/integration-cli/docker_cli_attach_unix_test.go @@ -173,7 +173,7 @@ func (s *DockerSuite) TestAttachDetach(c *check.C) { c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) go func() { - dockerCmd(c, "kill", id) + dockerCmdWithResult("kill", id) }() select { @@ -225,7 +225,7 @@ func (s *DockerSuite) TestAttachDetachTruncatedID(c *check.C) { c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) go func() { - dockerCmd(c, "kill", id) + dockerCmdWithResult("kill", id) }() select { diff --git a/components/engine/integration-cli/docker_cli_authz_plugin_v2_test.go b/components/engine/integration-cli/docker_cli_authz_plugin_v2_test.go index b8bbcc75e1..30026f7fcd 100644 --- a/components/engine/integration-cli/docker_cli_authz_plugin_v2_test.go +++ b/components/engine/integration-cli/docker_cli_authz_plugin_v2_test.go @@ -4,7 +4,6 @@ package main import ( "fmt" - "strings" "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/daemon" @@ -31,7 +30,7 @@ type DockerAuthzV2Suite struct { } func (s *DockerAuthzV2Suite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux, Network) + testRequires(c, DaemonIsLinux, Network, SameHostDaemon) s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ Experimental: testEnv.ExperimentalDaemon(), }) @@ -47,6 +46,7 @@ func (s *DockerAuthzV2Suite) TearDownTest(c *check.C) { func (s *DockerAuthzV2Suite) TestAuthZPluginAllowNonVolumeRequest(c *check.C) { testRequires(c, DaemonIsLinux, IsAmd64, Network) + // Install authz plugin _, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag) c.Assert(err, checker.IsNil) @@ -65,14 +65,8 @@ func (s *DockerAuthzV2Suite) TestAuthZPluginAllowNonVolumeRequest(c *check.C) { }() // Ensure docker run command and accompanying docker ps are successful - out, err := s.d.Cmd("run", "-d", "busybox", "top") + _, err = s.d.Cmd("run", "-d", "busybox", "top") c.Assert(err, check.IsNil) - - id := strings.TrimSpace(out) - - out, err = s.d.Cmd("ps") - c.Assert(err, check.IsNil) - c.Assert(assertContainerList(out, []string{id}), check.Equals, true) } func (s *DockerAuthzV2Suite) TestAuthZPluginDisable(c *check.C) { diff --git a/components/engine/integration-cli/docker_cli_authz_unix_test.go b/components/engine/integration-cli/docker_cli_authz_unix_test.go index f46ab806ca..8a1bd023ea 100644 --- a/components/engine/integration-cli/docker_cli_authz_unix_test.go +++ b/components/engine/integration-cli/docker_cli_authz_unix_test.go @@ -64,6 +64,7 @@ type authorizationController struct { } func (s *DockerAuthzSuite) SetUpTest(c *check.C) { + testRequires(c, SameHostDaemon) s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ Experimental: testEnv.ExperimentalDaemon(), }) @@ -218,12 +219,6 @@ func (s *DockerAuthzSuite) TestAuthZPluginAllowRequest(c *check.C) { id := strings.TrimSpace(out) assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create") assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", id)) - - out, err = s.d.Cmd("ps") - c.Assert(err, check.IsNil) - c.Assert(assertContainerList(out, []string{id}), check.Equals, true) - c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) - c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) } func (s *DockerAuthzSuite) TestAuthZPluginTls(c *check.C) { diff --git a/components/engine/integration-cli/docker_cli_build_unix_test.go b/components/engine/integration-cli/docker_cli_build_unix_test.go index dbcf00b5d4..91a329fae8 100644 --- a/components/engine/integration-cli/docker_cli_build_unix_test.go +++ b/components/engine/integration-cli/docker_cli_build_unix_test.go @@ -27,17 +27,18 @@ import ( func (s *DockerSuite) TestBuildResourceConstraintsAreUsed(c *check.C) { testRequires(c, cpuCfsQuota) name := "testbuildresourceconstraints" + buildLabel := "DockerSuite.TestBuildResourceConstraintsAreUsed" ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(` FROM hello-world:frozen RUN ["/hello"] `)) cli.Docker( - cli.Args("build", "--no-cache", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpuset-mems=0", "--cpu-shares=100", "--cpu-quota=8000", "--ulimit", "nofile=42", "-t", name, "."), + cli.Args("build", "--no-cache", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpuset-mems=0", "--cpu-shares=100", "--cpu-quota=8000", "--ulimit", "nofile=42", "--label="+buildLabel, "-t", name, "."), cli.InDir(ctx.Dir), ).Assert(c, icmd.Success) - out := cli.DockerCmd(c, "ps", "-lq").Combined() + out := cli.DockerCmd(c, "ps", "-lq", "--filter", "label="+buildLabel).Combined() cID := strings.TrimSpace(out) type hostConfig struct { diff --git a/components/engine/integration-cli/docker_cli_by_digest_test.go b/components/engine/integration-cli/docker_cli_by_digest_test.go index c7115c88c6..0c682719a4 100644 --- a/components/engine/integration-cli/docker_cli_by_digest_test.go +++ b/components/engine/integration-cli/docker_cli_by_digest_test.go @@ -407,6 +407,8 @@ func (s *DockerRegistrySuite) TestInspectImageWithDigests(c *check.C) { } func (s *DockerRegistrySuite) TestPsListContainersFilterAncestorImageByDigest(c *check.C) { + existingContainers := ExistingContainerIDs(c) + digest, err := setupImage(c) c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) @@ -438,7 +440,7 @@ func (s *DockerRegistrySuite) TestPsListContainersFilterAncestorImageByDigest(c // Valid imageReference out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageReference) - checkPsAncestorFilterOutput(c, out, imageReference, expectedIDs) + checkPsAncestorFilterOutput(c, RemoveOutputForExistingElements(out, existingContainers), imageReference, expectedIDs) } func (s *DockerRegistrySuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C) { diff --git a/components/engine/integration-cli/docker_cli_config_ls_test.go b/components/engine/integration-cli/docker_cli_config_ls_test.go index 5c07012614..5f002bcab6 100644 --- a/components/engine/integration-cli/docker_cli_config_ls_test.go +++ b/components/engine/integration-cli/docker_cli_config_ls_test.go @@ -11,6 +11,7 @@ import ( ) func (s *DockerSwarmSuite) TestConfigList(c *check.C) { + testRequires(c, SameHostDaemon) d := s.AddDaemon(c, true, true) testName0 := "test0" diff --git a/components/engine/integration-cli/docker_cli_cp_utils_test.go b/components/engine/integration-cli/docker_cli_cp_utils_test.go index e517fc0f37..402a87ea90 100644 --- a/components/engine/integration-cli/docker_cli_cp_utils_test.go +++ b/components/engine/integration-cli/docker_cli_cp_utils_test.go @@ -193,9 +193,7 @@ func runDockerCp(c *check.C, src, dst string, params []string) (err error) { args := []string{"cp"} - for _, param := range params { - args = append(args, param) - } + args = append(args, params...) args = append(args, src, dst) diff --git a/components/engine/integration-cli/docker_cli_daemon_test.go b/components/engine/integration-cli/docker_cli_daemon_test.go index 7e954b9b55..ccf50543e9 100644 --- a/components/engine/integration-cli/docker_cli_daemon_test.go +++ b/components/engine/integration-cli/docker_cli_daemon_test.go @@ -1825,7 +1825,7 @@ func (s *DockerDaemonSuite) TestDaemonNoSpaceLeftOnDeviceError(c *check.C) { defer s.d.Stop(c) // pull a repository large enough to fill the mount point - pullOut, err := s.d.Cmd("pull", "registry:2") + pullOut, err := s.d.Cmd("pull", "debian:stretch") c.Assert(err, checker.NotNil, check.Commentf(pullOut)) c.Assert(pullOut, checker.Contains, "no space left on device") } @@ -2159,9 +2159,9 @@ func (s *DockerDaemonSuite) TestRunLinksChanged(c *check.C) { } func (s *DockerDaemonSuite) TestDaemonStartWithoutColors(c *check.C) { - testRequires(c, DaemonIsLinux, NotPpc64le) + testRequires(c, DaemonIsLinux) - infoLog := "\x1b[34mINFO\x1b" + infoLog := "\x1b[36mINFO\x1b" b := bytes.NewBuffer(nil) done := make(chan bool) @@ -2209,7 +2209,7 @@ func (s *DockerDaemonSuite) TestDaemonStartWithoutColors(c *check.C) { } func (s *DockerDaemonSuite) TestDaemonDebugLog(c *check.C) { - testRequires(c, DaemonIsLinux, NotPpc64le) + testRequires(c, DaemonIsLinux) debugLog := "\x1b[37mDEBU\x1b" diff --git a/components/engine/integration-cli/docker_cli_events_test.go b/components/engine/integration-cli/docker_cli_events_test.go index b36f0be14e..e179a0ebd3 100644 --- a/components/engine/integration-cli/docker_cli_events_test.go +++ b/components/engine/integration-cli/docker_cli_events_test.go @@ -36,7 +36,7 @@ func (s *DockerSuite) TestEventsTimestampFormats(c *check.C) { // List of available time formats to --since unixTs := func(t time.Time) string { return fmt.Sprintf("%v", t.Unix()) } rfc3339 := func(t time.Time) string { return t.Format(time.RFC3339) } - duration := func(t time.Time) string { return time.Now().Sub(t).String() } + duration := func(t time.Time) string { return time.Since(t).String() } // --since=$start must contain only the 'untag' event for _, f := range []func(time.Time) string{unixTs, rfc3339, duration} { diff --git a/components/engine/integration-cli/docker_cli_events_unix_test.go b/components/engine/integration-cli/docker_cli_events_unix_test.go index a2d67069de..afac998e02 100644 --- a/components/engine/integration-cli/docker_cli_events_unix_test.go +++ b/components/engine/integration-cli/docker_cli_events_unix_test.go @@ -97,7 +97,7 @@ func (s *DockerSuite) TestEventsOOMDisableTrue(c *check.C) { }() c.Assert(waitRun("oomTrue"), checker.IsNil) - defer dockerCmd(c, "kill", "oomTrue") + defer dockerCmdWithResult("kill", "oomTrue") containerID := inspectField(c, "oomTrue", "Id") testActions := map[string]chan bool{ diff --git a/components/engine/integration-cli/docker_cli_experimental_test.go b/components/engine/integration-cli/docker_cli_experimental_test.go deleted file mode 100644 index 0a496fd26c..0000000000 --- a/components/engine/integration-cli/docker_cli_experimental_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package main - -import ( - "strings" - - "github.com/docker/docker/integration-cli/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestExperimentalVersionTrue(c *check.C) { - testExperimentalInVersion(c, ExperimentalDaemon, "*true") -} - -func (s *DockerSuite) TestExperimentalVersionFalse(c *check.C) { - testExperimentalInVersion(c, NotExperimentalDaemon, "*false") -} - -func testExperimentalInVersion(c *check.C, requirement func() bool, expectedValue string) { - testRequires(c, requirement) - out, _ := dockerCmd(c, "version") - for _, line := range strings.Split(out, "\n") { - if strings.HasPrefix(strings.TrimSpace(line), "Experimental:") { - c.Assert(line, checker.Matches, expectedValue) - return - } - } - - c.Fatal(`"Experimental" not found in version output`) -} diff --git a/components/engine/integration-cli/docker_cli_external_graphdriver_unix_test.go b/components/engine/integration-cli/docker_cli_external_graphdriver_unix_test.go index 16023c9a75..8e766bcc31 100644 --- a/components/engine/integration-cli/docker_cli_external_graphdriver_unix_test.go +++ b/components/engine/integration-cli/docker_cli_external_graphdriver_unix_test.go @@ -198,12 +198,13 @@ func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ex return } + // TODO @gupta-ak: Figure out what to do here. dir, err := driver.Get(req.ID, req.MountLabel) if err != nil { respond(w, err) return } - respond(w, &graphDriverResponse{Dir: dir}) + respond(w, &graphDriverResponse{Dir: dir.Path()}) }) mux.HandleFunc("/GraphDriver.Put", func(w http.ResponseWriter, r *http.Request) { diff --git a/components/engine/integration-cli/docker_cli_external_volume_driver_unix_test.go b/components/engine/integration-cli/docker_cli_external_volume_driver_unix_test.go index 5fe417c2c8..2e2de972d5 100644 --- a/components/engine/integration-cli/docker_cli_external_volume_driver_unix_test.go +++ b/components/engine/integration-cli/docker_cli_external_volume_driver_unix_test.go @@ -50,6 +50,7 @@ type DockerExternalVolumeSuite struct { } func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) { + testRequires(c, SameHostDaemon) s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ Experimental: testEnv.ExperimentalDaemon(), }) diff --git a/components/engine/integration-cli/docker_cli_health_test.go b/components/engine/integration-cli/docker_cli_health_test.go index 0f78a41d87..20b2bc255d 100644 --- a/components/engine/integration-cli/docker_cli_health_test.go +++ b/components/engine/integration-cli/docker_cli_health_test.go @@ -39,6 +39,8 @@ func getHealth(c *check.C, name string) *types.Health { func (s *DockerSuite) TestHealth(c *check.C) { testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows + existingContainers := ExistingContainerIDs(c) + imageName := "testhealth" buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox RUN echo OK > /status @@ -49,9 +51,10 @@ func (s *DockerSuite) TestHealth(c *check.C) { // No health status before starting name := "test_health" - dockerCmd(c, "create", "--name", name, imageName) - out, _ := dockerCmd(c, "ps", "-a", "--format={{.Status}}") - c.Check(out, checker.Equals, "Created\n") + cid, _ := dockerCmd(c, "create", "--name", name, imageName) + out, _ := dockerCmd(c, "ps", "-a", "--format={{.ID}} {{.Status}}") + out = RemoveOutputForExistingElements(out, existingContainers) + c.Check(out, checker.Equals, cid[:12]+" Created\n") // Inspect the options out, _ = dockerCmd(c, "inspect", diff --git a/components/engine/integration-cli/docker_cli_info_test.go b/components/engine/integration-cli/docker_cli_info_test.go index d75974dfc9..b6f867373b 100644 --- a/components/engine/integration-cli/docker_cli_info_test.go +++ b/components/engine/integration-cli/docker_cli_info_test.go @@ -135,42 +135,48 @@ func (s *DockerSuite) TestInfoDiscoveryAdvertiseInterfaceName(c *check.C) { func (s *DockerSuite) TestInfoDisplaysRunningContainers(c *check.C) { testRequires(c, DaemonIsLinux) + existing := existingContainerStates(c) + dockerCmd(c, "run", "-d", "busybox", "top") out, _ := dockerCmd(c, "info") - c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 1)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", existing["Containers"]+1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", existing["ContainersRunning"]+1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", existing["ContainersPaused"])) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", existing["ContainersStopped"])) } func (s *DockerSuite) TestInfoDisplaysPausedContainers(c *check.C) { testRequires(c, IsPausable) + existing := existingContainerStates(c) + out := runSleepingContainer(c, "-d") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "pause", cleanedContainerID) out, _ = dockerCmd(c, "info") - c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 1)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", existing["Containers"]+1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", existing["ContainersRunning"])) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", existing["ContainersPaused"]+1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", existing["ContainersStopped"])) } func (s *DockerSuite) TestInfoDisplaysStoppedContainers(c *check.C) { testRequires(c, DaemonIsLinux) + existing := existingContainerStates(c) + out, _ := dockerCmd(c, "run", "-d", "busybox", "top") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "stop", cleanedContainerID) out, _ = dockerCmd(c, "info") - c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 1)) + c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", existing["Containers"]+1)) + c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", existing["ContainersRunning"])) + c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", existing["ContainersPaused"])) + c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", existing["ContainersStopped"]+1)) } func (s *DockerSuite) TestInfoDebug(c *check.C) { @@ -237,3 +243,16 @@ func (s *DockerDaemonSuite) TestInfoLabels(c *check.C) { c.Assert(err, checker.IsNil) c.Assert(out, checker.Contains, "WARNING: labels with duplicate keys and conflicting values have been deprecated") } + +func existingContainerStates(c *check.C) map[string]int { + out, _ := dockerCmd(c, "info", "--format", "{{json .}}") + var m map[string]interface{} + err := json.Unmarshal([]byte(out), &m) + c.Assert(err, checker.IsNil) + res := map[string]int{} + res["Containers"] = int(m["Containers"].(float64)) + res["ContainersRunning"] = int(m["ContainersRunning"].(float64)) + res["ContainersPaused"] = int(m["ContainersPaused"].(float64)) + res["ContainersStopped"] = int(m["ContainersStopped"].(float64)) + return res +} diff --git a/components/engine/integration-cli/docker_cli_kill_test.go b/components/engine/integration-cli/docker_cli_kill_test.go index ea1c269812..0a5aac5f8c 100644 --- a/components/engine/integration-cli/docker_cli_kill_test.go +++ b/components/engine/integration-cli/docker_cli_kill_test.go @@ -1,13 +1,12 @@ package main import ( - "net/http" "strings" "time" - "github.com/docker/docker/client" "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/request" "github.com/go-check/check" "github.com/gotestyourself/gotestyourself/icmd" "golang.org/x/net/context" @@ -131,8 +130,7 @@ func (s *DockerSuite) TestKillStoppedContainerAPIPre120(c *check.C) { testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later runSleepingContainer(c, "--name", "docker-kill-test-api", "-d") dockerCmd(c, "stop", "docker-kill-test-api") - var httpClient *http.Client - cli, err := client.NewClient(daemonHost(), "v1.19", httpClient, nil) + cli, err := request.NewEnvClientWithVersion("v1.19") c.Assert(err, check.IsNil) defer cli.Close() err = cli.ContainerKill(context.Background(), "docker-kill-test-api", "SIGKILL") diff --git a/components/engine/integration-cli/docker_cli_logs_test.go b/components/engine/integration-cli/docker_cli_logs_test.go index 4f14634b82..d9523bffcc 100644 --- a/components/engine/integration-cli/docker_cli_logs_test.go +++ b/components/engine/integration-cli/docker_cli_logs_test.go @@ -214,14 +214,15 @@ func (s *DockerSuite) TestLogsSinceFutureFollow(c *check.C) { func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) { // TODO Windows: Fix this test for TP5. testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", `usleep 600000;yes X | head -c 200000`) + expected := 150000 + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", fmt.Sprintf("usleep 600000; yes X | head -c %d", expected)) id := strings.TrimSpace(out) stopSlowRead := make(chan bool) go func() { - exec.Command(dockerBinary, "wait", id).Run() + dockerCmd(c, "wait", id) stopSlowRead <- true }() @@ -238,8 +239,9 @@ func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) { bytes2, err := ConsumeWithSpeed(stdout, 32*1024, 0, nil) c.Assert(err, checker.IsNil) + c.Assert(logCmd.Wait(), checker.IsNil) + actual := bytes1 + bytes2 - expected := 200000 c.Assert(actual, checker.Equals, expected) } @@ -288,6 +290,7 @@ func (s *DockerSuite) TestLogsFollowGoroutinesWithStdout(c *check.C) { c.Assert(<-chErr, checker.IsNil) c.Assert(cmd.Process.Kill(), checker.IsNil) r.Close() + cmd.Wait() // NGoroutines is not updated right away, so we need to wait before failing c.Assert(waitForGoroutines(nroutines), checker.IsNil) } @@ -303,6 +306,7 @@ func (s *DockerSuite) TestLogsFollowGoroutinesNoOutput(c *check.C) { c.Assert(cmd.Start(), checker.IsNil) time.Sleep(200 * time.Millisecond) c.Assert(cmd.Process.Kill(), checker.IsNil) + cmd.Wait() // NGoroutines is not updated right away, so we need to wait before failing c.Assert(waitForGoroutines(nroutines), checker.IsNil) diff --git a/components/engine/integration-cli/docker_cli_network_unix_test.go b/components/engine/integration-cli/docker_cli_network_unix_test.go index 2fc0724171..4762e3993c 100644 --- a/components/engine/integration-cli/docker_cli_network_unix_test.go +++ b/components/engine/integration-cli/docker_cli_network_unix_test.go @@ -10,7 +10,6 @@ import ( "net/http" "net/http/httptest" "os" - "path/filepath" "strings" "time" @@ -288,39 +287,6 @@ func (s *DockerNetworkSuite) TestDockerNetworkLsDefault(c *check.C) { } } -func (s *DockerSuite) TestNetworkLsFormat(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "network", "ls", "--format", "{{.Name}}") - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - - expected := []string{"bridge", "host", "none"} - var names []string - names = append(names, lines...) - c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) -} - -func (s *DockerSuite) TestNetworkLsFormatDefaultFormat(c *check.C) { - testRequires(c, DaemonIsLinux) - - config := `{ - "networksFormat": "{{ .Name }} default" -}` - d, err := ioutil.TempDir("", "integration-cli-") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(d) - - err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) - c.Assert(err, checker.IsNil) - - out, _ := dockerCmd(c, "--config", d, "network", "ls") - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - - expected := []string{"bridge default", "host default", "none default"} - var names []string - names = append(names, lines...) - c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) -} - func (s *DockerNetworkSuite) TestDockerNetworkCreatePredefined(c *check.C) { predefined := []string{"bridge", "host", "none", "default"} for _, net := range predefined { @@ -351,6 +317,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkRmPredefined(c *check.C) { } func (s *DockerNetworkSuite) TestDockerNetworkLsFilter(c *check.C) { + testRequires(c, OnlyDefaultNetworks) testNet := "testnet1" testLabel := "foo" testValue := "bar" @@ -624,6 +591,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnect(c *check.C) { } func (s *DockerNetworkSuite) TestDockerNetworkIPAMMultipleNetworks(c *check.C) { + testRequires(c, SameHostDaemon) // test0 bridge network dockerCmd(c, "network", "create", "--subnet=192.168.0.0/16", "test1") assertNwIsAvailable(c, "test1") @@ -664,6 +632,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkIPAMMultipleNetworks(c *check.C) { } func (s *DockerNetworkSuite) TestDockerNetworkCustomIPAM(c *check.C) { + testRequires(c, SameHostDaemon) // Create a bridge network using custom ipam driver dockerCmd(c, "network", "create", "--ipam-driver", dummyIPAMDriver, "br0") assertNwIsAvailable(c, "br0") @@ -679,6 +648,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkCustomIPAM(c *check.C) { } func (s *DockerNetworkSuite) TestDockerNetworkIPAMOptions(c *check.C) { + testRequires(c, SameHostDaemon) // Create a bridge network using custom ipam driver and options dockerCmd(c, "network", "create", "--ipam-driver", dummyIPAMDriver, "--ipam-opt", "opt1=drv1", "--ipam-opt", "opt2=drv2", "br0") assertNwIsAvailable(c, "br0") @@ -691,6 +661,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkIPAMOptions(c *check.C) { } func (s *DockerNetworkSuite) TestDockerNetworkNullIPAMDriver(c *check.C) { + testRequires(c, SameHostDaemon) // Create a network with null ipam driver _, _, err := dockerCmdWithError("network", "create", "-d", dummyNetworkDriver, "--ipam-driver", "null", "test000") c.Assert(err, check.IsNil) @@ -796,6 +767,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkIPAMInvalidCombinations(c *check.C } func (s *DockerNetworkSuite) TestDockerNetworkDriverOptions(c *check.C) { + testRequires(c, SameHostDaemon) dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "-o", "opt1=drv1", "-o", "opt2=drv2", "testopt") assertNwIsAvailable(c, "testopt") gopts := remoteDriverNetworkRequest.Options[netlabel.GenericData] @@ -981,6 +953,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkLinkOnDefaultNetworkOnly(c *check. } func (s *DockerNetworkSuite) TestDockerNetworkOverlayPortMapping(c *check.C) { + testRequires(c, SameHostDaemon) // Verify exposed ports are present in ps output when running a container on // a network managed by a driver which does not provide the default gateway // for the container @@ -1007,7 +980,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkOverlayPortMapping(c *check.C) { } func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) + testRequires(c, DaemonIsLinux, NotUserNamespace, SameHostDaemon) dnd := "dnd" did := "did" @@ -1048,6 +1021,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C } func (s *DockerNetworkSuite) TestDockerNetworkMacInspect(c *check.C) { + testRequires(c, SameHostDaemon) // Verify endpoint MAC address is correctly populated in container's network settings nwn := "ov" ctn := "bb" @@ -1113,6 +1087,7 @@ func verifyContainerIsConnectedToNetworks(c *check.C, d *daemon.Daemon, cName st } func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksGracefulDaemonRestart(c *check.C) { + testRequires(c, SameHostDaemon) cName := "bb" nwList := []string{"nw1", "nw2", "nw3"} @@ -1131,6 +1106,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksGracefulDaemonRest } func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksUngracefulDaemonRestart(c *check.C) { + testRequires(c, SameHostDaemon) cName := "cc" nwList := []string{"nw1", "nw2", "nw3"} @@ -1157,7 +1133,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkRunNetByID(c *check.C) { } func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) + testRequires(c, DaemonIsLinux, NotUserNamespace, SameHostDaemon) s.d.StartWithBusybox(c) // Run a few containers on host network @@ -1283,6 +1259,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkRestartWithMultipleNetworks(c *che } func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectToStoppedContainer(c *check.C) { + testRequires(c, SameHostDaemon) dockerCmd(c, "network", "create", "test") dockerCmd(c, "create", "--name=foo", "busybox", "top") dockerCmd(c, "network", "connect", "test", "foo") @@ -1810,7 +1787,7 @@ func (s *DockerNetworkSuite) TestDockerNetworkDisconnectFromBridge(c *check.C) { // TestConntrackFlowsLeak covers the failure scenario of ticket: https://github.com/docker/docker/issues/8795 // Validates that conntrack is correctly cleaned once a container is destroyed func (s *DockerNetworkSuite) TestConntrackFlowsLeak(c *check.C) { - testRequires(c, IsAmd64, DaemonIsLinux, Network) + testRequires(c, IsAmd64, DaemonIsLinux, Network, SameHostDaemon) // Create a new network cli.DockerCmd(c, "network", "create", "--subnet=192.168.10.0/24", "--gateway=192.168.10.1", "-o", "com.docker.network.bridge.host_binding_ipv4=192.168.10.1", "testbind") diff --git a/components/engine/integration-cli/docker_cli_port_test.go b/components/engine/integration-cli/docker_cli_port_test.go index bcb87f5f33..84058cda10 100644 --- a/components/engine/integration-cli/docker_cli_port_test.go +++ b/components/engine/integration-cli/docker_cli_port_test.go @@ -5,6 +5,7 @@ import ( "net" "regexp" "sort" + "strconv" "strings" "github.com/docker/docker/integration-cli/checker" @@ -148,9 +149,8 @@ func (s *DockerSuite) TestPortList(c *check.C) { out, _ = dockerCmd(c, "port", ID) - err = assertPortList(c, out, []string{ - "80/tcp -> 0.0.0.0:8000", - "80/udp -> 0.0.0.0:8000"}) + // Running this test multiple times causes the TCP port to increment. + err = assertPortRange(c, out, []int{8000, 8080}, []int{8000, 8080}) // Port list is not correct c.Assert(err, checker.IsNil) dockerCmd(c, "rm", "-f", ID) @@ -173,6 +173,38 @@ func assertPortList(c *check.C, out string, expected []string) error { return nil } +func assertPortRange(c *check.C, out string, expectedTcp, expectedUdp []int) error { + lines := strings.Split(strings.Trim(out, "\n "), "\n") + + var validTcp, validUdp bool + for _, l := range lines { + // 80/tcp -> 0.0.0.0:8015 + port, err := strconv.Atoi(strings.Split(l, ":")[1]) + if err != nil { + return err + } + if strings.Contains(l, "tcp") && expectedTcp != nil { + if port < expectedTcp[0] || port > expectedTcp[1] { + return fmt.Errorf("tcp port (%d) not in range expected range %d-%d", port, expectedTcp[0], expectedTcp[1]) + } + validTcp = true + } + if strings.Contains(l, "udp") && expectedUdp != nil { + if port < expectedUdp[0] || port > expectedUdp[1] { + return fmt.Errorf("udp port (%d) not in range expected range %d-%d", port, expectedUdp[0], expectedUdp[1]) + } + validUdp = true + } + } + if !validTcp { + return fmt.Errorf("tcp port not found") + } + if !validUdp { + return fmt.Errorf("udp port not found") + } + return nil +} + func stopRemoveContainer(id string, c *check.C) { dockerCmd(c, "rm", "-f", id) } diff --git a/components/engine/integration-cli/docker_cli_prune_unix_test.go b/components/engine/integration-cli/docker_cli_prune_unix_test.go index bea4f4fbda..edd3eab621 100644 --- a/components/engine/integration-cli/docker_cli_prune_unix_test.go +++ b/components/engine/integration-cli/docker_cli_prune_unix_test.go @@ -19,13 +19,21 @@ import ( func pruneNetworkAndVerify(c *check.C, d *daemon.Swarm, kept, pruned []string) { _, err := d.Cmd("network", "prune", "--force") c.Assert(err, checker.IsNil) - out, err := d.Cmd("network", "ls", "--format", "{{.Name}}") - c.Assert(err, checker.IsNil) + for _, s := range kept { - c.Assert(out, checker.Contains, s) + waitAndAssert(c, defaultReconciliationTimeout, func(*check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("network", "ls", "--format", "{{.Name}}") + c.Assert(err, checker.IsNil) + return out, nil + }, checker.Contains, s) } + for _, s := range pruned { - c.Assert(out, checker.Not(checker.Contains), s) + waitAndAssert(c, defaultReconciliationTimeout, func(*check.C) (interface{}, check.CommentInterface) { + out, err := d.Cmd("network", "ls", "--format", "{{.Name}}") + c.Assert(err, checker.IsNil) + return out, nil + }, checker.Not(checker.Contains), s) } } @@ -64,6 +72,7 @@ func (s *DockerSwarmSuite) TestPruneNetwork(c *check.C) { _, err = d.Cmd("service", "rm", serviceName) c.Assert(err, checker.IsNil) waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0) + pruneNetworkAndVerify(c, d, []string{}, []string{"n1", "n3"}) } diff --git a/components/engine/integration-cli/docker_cli_ps_test.go b/components/engine/integration-cli/docker_cli_ps_test.go index 736103e776..bea1261202 100644 --- a/components/engine/integration-cli/docker_cli_ps_test.go +++ b/components/engine/integration-cli/docker_cli_ps_test.go @@ -19,6 +19,8 @@ import ( ) func (s *DockerSuite) TestPsListContainersBase(c *check.C) { + existingContainers := ExistingContainerIDs(c) + out := runSleepingContainer(c, "-d") firstID := strings.TrimSpace(out) @@ -43,79 +45,79 @@ func (s *DockerSuite) TestPsListContainersBase(c *check.C) { // all out, _ = dockerCmd(c, "ps", "-a") - c.Assert(assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}), checker.Equals, true, check.Commentf("ALL: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), []string{fourthID, thirdID, secondID, firstID}), checker.Equals, true, check.Commentf("ALL: Container list is not in the correct order: \n%s", out)) // running out, _ = dockerCmd(c, "ps") - c.Assert(assertContainerList(out, []string{fourthID, secondID, firstID}), checker.Equals, true, check.Commentf("RUNNING: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), []string{fourthID, secondID, firstID}), checker.Equals, true, check.Commentf("RUNNING: Container list is not in the correct order: \n%s", out)) // limit out, _ = dockerCmd(c, "ps", "-n=2", "-a") expected := []string{fourthID, thirdID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("LIMIT & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-n=2") - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("LIMIT: Container list is not in the correct order: \n%s", out)) // filter since out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-a") expected = []string{fourthID, thirdID, secondID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter & ALL: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "since="+firstID) expected = []string{fourthID, secondID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "since="+thirdID) expected = []string{fourthID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) // filter before out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-a") expected = []string{thirdID, secondID, firstID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID) expected = []string{secondID, firstID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("BEFORE filter: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "before="+thirdID) expected = []string{secondID, firstID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) // filter since & before out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-a") expected = []string{thirdID, secondID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID) expected = []string{secondID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter: Container list is not in the correct order: \n%s", out)) // filter since & limit out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2", "-a") expected = []string{fourthID, thirdID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2") - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT: Container list is not in the correct order: \n%s", out)) // filter before & limit out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1", "-a") expected = []string{thirdID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1") - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) // filter since & filter before & limit out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1", "-a") expected = []string{thirdID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1") - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) + c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) } @@ -185,6 +187,8 @@ func (s *DockerSuite) TestPsListContainersSize(c *check.C) { } func (s *DockerSuite) TestPsListContainersFilterStatus(c *check.C) { + existingContainers := ExistingContainerIDs(c) + // start exited container out := cli.DockerCmd(c, "run", "-d", "busybox").Combined() firstID := strings.TrimSpace(out) @@ -199,11 +203,11 @@ func (s *DockerSuite) TestPsListContainersFilterStatus(c *check.C) { // filter containers by exited out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=exited").Combined() containerOut := strings.TrimSpace(out) - c.Assert(containerOut, checker.Equals, firstID) + c.Assert(RemoveOutputForExistingElements(containerOut, existingContainers), checker.Equals, firstID) out = cli.DockerCmd(c, "ps", "-a", "--no-trunc", "-q", "--filter=status=running").Combined() containerOut = strings.TrimSpace(out) - c.Assert(containerOut, checker.Equals, secondID) + c.Assert(RemoveOutputForExistingElements(containerOut, existingContainers), checker.Equals, secondID) result := cli.Docker(cli.Args("ps", "-a", "-q", "--filter=status=rubbish"), cli.WithTimeout(time.Second*60)) result.Assert(c, icmd.Expected{ @@ -222,11 +226,12 @@ func (s *DockerSuite) TestPsListContainersFilterStatus(c *check.C) { out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=paused").Combined() containerOut = strings.TrimSpace(out) - c.Assert(containerOut, checker.Equals, pausedID) + c.Assert(RemoveOutputForExistingElements(containerOut, existingContainers), checker.Equals, pausedID) } } func (s *DockerSuite) TestPsListContainersFilterHealth(c *check.C) { + existingContainers := ExistingContainerIDs(c) // Test legacy no health check out := runSleepingContainer(c, "--name=none_legacy") containerID := strings.TrimSpace(out) @@ -264,7 +269,7 @@ func (s *DockerSuite) TestPsListContainersFilterHealth(c *check.C) { waitForHealthStatus(c, "passing_container", "starting", "healthy") out = cli.DockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=healthy").Combined() - containerOut = strings.TrimSpace(out) + containerOut = strings.TrimSpace(RemoveOutputForExistingElements(out, existingContainers)) c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected containerID %s, got %s for healthy filter, output: %q", containerID, containerOut, out)) } @@ -305,6 +310,8 @@ func (s *DockerSuite) TestPsListContainersFilterName(c *check.C) { // - Run containers for each of those image (busybox, images_ps_filter_test1, images_ps_filter_test2) // - Filter them out :P func (s *DockerSuite) TestPsListContainersFilterAncestorImage(c *check.C) { + existingContainers := ExistingContainerIDs(c) + // Build images imageName1 := "images_ps_filter_test1" buildImageSuccessfully(c, imageName1, build.WithDockerfile(`FROM busybox @@ -367,12 +374,12 @@ func (s *DockerSuite) TestPsListContainersFilterAncestorImage(c *check.C) { var out string for _, filter := range filterTestSuite { out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+filter.filterName) - checkPsAncestorFilterOutput(c, out, filter.filterName, filter.expectedIDs) + checkPsAncestorFilterOutput(c, RemoveOutputForExistingElements(out, existingContainers), filter.filterName, filter.expectedIDs) } // Multiple ancestor filter out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageName2, "--filter=ancestor="+imageName1Tagged) - checkPsAncestorFilterOutput(c, out, imageName2+","+imageName1Tagged, []string{fourthID, fifthID}) + checkPsAncestorFilterOutput(c, RemoveOutputForExistingElements(out, existingContainers), imageName2+","+imageName1Tagged, []string{fourthID, fifthID}) } func checkPsAncestorFilterOutput(c *check.C, out string, filterName string, expectedIDs []string) { @@ -469,6 +476,9 @@ func (s *DockerSuite) TestPsListContainersFilterExited(c *check.C) { func (s *DockerSuite) TestPsRightTagName(c *check.C) { // TODO Investigate further why this fails on Windows to Windows CI testRequires(c, DaemonIsLinux) + + existingContainers := ExistingContainerNames(c) + tag := "asybox:shmatest" dockerCmd(c, "tag", "busybox", tag) @@ -490,6 +500,7 @@ func (s *DockerSuite) TestPsRightTagName(c *check.C) { out, _ = dockerCmd(c, "ps", "--no-trunc") lines := strings.Split(strings.TrimSpace(string(out)), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) // skip header lines = lines[1:] c.Assert(lines, checker.HasLen, 3, check.Commentf("There should be 3 running container, got %d", len(lines))) @@ -511,6 +522,7 @@ func (s *DockerSuite) TestPsRightTagName(c *check.C) { func (s *DockerSuite) TestPsLinkedWithNoTrunc(c *check.C) { // Problematic on Windows as it doesn't support links as of Jan 2016 testRequires(c, DaemonIsLinux) + existingContainers := ExistingContainerIDs(c) runSleepingContainer(c, "--name=first") runSleepingContainer(c, "--name=second", "--link=first:first") @@ -518,6 +530,7 @@ func (s *DockerSuite) TestPsLinkedWithNoTrunc(c *check.C) { lines := strings.Split(strings.TrimSpace(string(out)), "\n") // strip header lines = lines[1:] + lines = RemoveLinesForExistingElements(lines, existingContainers) expected := []string{"second", "first,second/first"} var names []string for _, l := range lines { @@ -581,12 +594,14 @@ func (s *DockerSuite) TestPsListContainersFilterCreated(c *check.C) { func (s *DockerSuite) TestPsFormatMultiNames(c *check.C) { // Problematic on Windows as it doesn't support link as of Jan 2016 testRequires(c, DaemonIsLinux) + existingContainers := ExistingContainerNames(c) //create 2 containers and link them dockerCmd(c, "run", "--name=child", "-d", "busybox", "top") dockerCmd(c, "run", "--name=parent", "--link=child:linkedone", "-d", "busybox", "top") //use the new format capabilities to only list the names and --no-trunc to get all names out, _ := dockerCmd(c, "ps", "--format", "{{.Names}}", "--no-trunc") + out = RemoveOutputForExistingElements(out, existingContainers) lines := strings.Split(strings.TrimSpace(string(out)), "\n") expected := []string{"parent", "child,parent/linkedone"} var names []string @@ -595,6 +610,7 @@ func (s *DockerSuite) TestPsFormatMultiNames(c *check.C) { //now list without turning off truncation and make sure we only get the non-link names out, _ = dockerCmd(c, "ps", "--format", "{{.Names}}") + out = RemoveOutputForExistingElements(out, existingContainers) lines = strings.Split(strings.TrimSpace(string(out)), "\n") expected = []string{"parent", "child"} var truncNames []string @@ -604,30 +620,22 @@ func (s *DockerSuite) TestPsFormatMultiNames(c *check.C) { // Test for GitHub issue #21772 func (s *DockerSuite) TestPsNamesMultipleTime(c *check.C) { + existingContainers := ExistingContainerNames(c) runSleepingContainer(c, "--name=test1") runSleepingContainer(c, "--name=test2") //use the new format capabilities to list the names twice out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Names}}") lines := strings.Split(strings.TrimSpace(string(out)), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) expected := []string{"test2 test2", "test1 test1"} var names []string names = append(names, lines...) c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with names displayed twice: %v, got: %v", expected, names)) } -func (s *DockerSuite) TestPsFormatHeaders(c *check.C) { - // make sure no-container "docker ps" still prints the header row - out, _ := dockerCmd(c, "ps", "--format", "table {{.ID}}") - c.Assert(out, checker.Equals, "CONTAINER ID\n", check.Commentf(`Expected 'CONTAINER ID\n', got %v`, out)) - - // verify that "docker ps" with a container still prints the header row also - runSleepingContainer(c, "--name=test") - out, _ = dockerCmd(c, "ps", "--format", "table {{.Names}}") - c.Assert(out, checker.Equals, "NAMES\ntest\n", check.Commentf(`Expected 'NAMES\ntest\n', got %v`, out)) -} - func (s *DockerSuite) TestPsDefaultFormatAndQuiet(c *check.C) { + existingContainers := ExistingContainerIDs(c) config := `{ "psFormat": "default {{ .ID }}" }` @@ -642,6 +650,7 @@ func (s *DockerSuite) TestPsDefaultFormatAndQuiet(c *check.C) { id := strings.TrimSpace(out) out, _ = dockerCmd(c, "--config", d, "ps", "-q") + out = RemoveOutputForExistingElements(out, existingContainers) c.Assert(id, checker.HasPrefix, strings.TrimSpace(out), check.Commentf("Expected to print only the container id, got %v\n", out)) } @@ -652,6 +661,8 @@ func (s *DockerSuite) TestPsImageIDAfterUpdate(c *check.C) { originalImageName := "busybox:TestPsImageIDAfterUpdate-original" updatedImageName := "busybox:TestPsImageIDAfterUpdate-updated" + existingContainers := ExistingContainerIDs(c) + icmd.RunCommand(dockerBinary, "tag", "busybox:latest", originalImageName).Assert(c, icmd.Success) originalImageID := getIDByName(c, originalImageName) @@ -664,6 +675,7 @@ func (s *DockerSuite) TestPsImageIDAfterUpdate(c *check.C) { result.Assert(c, icmd.Success) lines := strings.Split(strings.TrimSpace(string(result.Combined())), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) // skip header lines = lines[1:] c.Assert(len(lines), checker.Equals, 1) @@ -680,6 +692,7 @@ func (s *DockerSuite) TestPsImageIDAfterUpdate(c *check.C) { result.Assert(c, icmd.Success) lines = strings.Split(strings.TrimSpace(string(result.Combined())), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) // skip header lines = lines[1:] c.Assert(len(lines), checker.Equals, 1) @@ -710,6 +723,8 @@ func (s *DockerSuite) TestPsNotShowPortsOfStoppedContainer(c *check.C) { } func (s *DockerSuite) TestPsShowMounts(c *check.C) { + existingContainers := ExistingContainerNames(c) + prefix, slash := getPrefixAndSlashFromDaemonPlatform() mp := prefix + slash + "test" @@ -736,6 +751,7 @@ func (s *DockerSuite) TestPsShowMounts(c *check.C) { out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}") lines := strings.Split(strings.TrimSpace(string(out)), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) c.Assert(lines, checker.HasLen, 3) fields := strings.Fields(lines[0]) @@ -755,6 +771,7 @@ func (s *DockerSuite) TestPsShowMounts(c *check.C) { out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume=ps-volume-test") lines = strings.Split(strings.TrimSpace(string(out)), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) c.Assert(lines, checker.HasLen, 1) fields = strings.Fields(lines[0]) @@ -768,6 +785,7 @@ func (s *DockerSuite) TestPsShowMounts(c *check.C) { out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+mp) lines = strings.Split(strings.TrimSpace(string(out)), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) c.Assert(lines, checker.HasLen, 2) fields = strings.Fields(lines[0]) @@ -779,6 +797,7 @@ func (s *DockerSuite) TestPsShowMounts(c *check.C) { out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountSource) lines = strings.Split(strings.TrimSpace(string(out)), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) c.Assert(lines, checker.HasLen, 1) fields = strings.Fields(lines[0]) @@ -790,6 +809,7 @@ func (s *DockerSuite) TestPsShowMounts(c *check.C) { out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountDestination) lines = strings.Split(strings.TrimSpace(string(out)), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) c.Assert(lines, checker.HasLen, 1) fields = strings.Fields(lines[0]) @@ -820,6 +840,8 @@ func (s *DockerSuite) TestPsFormatSize(c *check.C) { } func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) { + existing := ExistingContainerIDs(c) + // TODO default network on Windows is not called "bridge", and creating a // custom network fails on Windows fails with "Error response from daemon: plugin not found") testRequires(c, DaemonIsLinux) @@ -837,7 +859,7 @@ func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) { lines = lines[1:] // ps output should have no containers - c.Assert(lines, checker.HasLen, 0) + c.Assert(RemoveLinesForExistingElements(lines, existing), checker.HasLen, 0) // Filter docker ps on network bridge out, _ = dockerCmd(c, "ps", "--filter", "network=bridge") @@ -849,7 +871,7 @@ func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) { lines = lines[1:] // ps output should have only one container - c.Assert(lines, checker.HasLen, 1) + c.Assert(RemoveLinesForExistingElements(lines, existing), checker.HasLen, 1) // Making sure onbridgenetwork is on the output c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on network\n")) @@ -864,7 +886,7 @@ func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) { lines = lines[1:] //ps output should have both the containers - c.Assert(lines, checker.HasLen, 2) + c.Assert(RemoveLinesForExistingElements(lines, existing), checker.HasLen, 2) // Making sure onbridgenetwork and onnonenetwork is on the output c.Assert(containerOut, checker.Contains, "onnonenetwork", check.Commentf("Missing the container on none network\n")) @@ -885,11 +907,12 @@ func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) { containerOut = strings.TrimSpace(string(out)) lines = strings.Split(containerOut, "\n") + // skip header lines = lines[1:] // ps output should have only one container - c.Assert(lines, checker.HasLen, 1) + c.Assert(RemoveLinesForExistingElements(lines, existing), checker.HasLen, 1) // Making sure onbridgenetwork is on the output c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on network\n")) @@ -927,13 +950,16 @@ func (s *DockerSuite) TestPsFilterMissingArgErrorCode(c *check.C) { // Test case for 30291 func (s *DockerSuite) TestPsFormatTemplateWithArg(c *check.C) { + existingContainers := ExistingContainerNames(c) runSleepingContainer(c, "-d", "--name", "top", "--label", "some.label=label.foo-bar") out, _ := dockerCmd(c, "ps", "--format", `{{.Names}} {{.Label "some.label"}}`) + out = RemoveOutputForExistingElements(out, existingContainers) c.Assert(strings.TrimSpace(out), checker.Equals, "top label.foo-bar") } func (s *DockerSuite) TestPsListContainersFilterPorts(c *check.C) { testRequires(c, DaemonIsLinux) + existingContainers := ExistingContainerIDs(c) out, _ := dockerCmd(c, "run", "-d", "--publish=80", "busybox", "top") id1 := strings.TrimSpace(out) @@ -962,6 +988,7 @@ func (s *DockerSuite) TestPsListContainersFilterPorts(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id2) out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter", "expose=8080/tcp") + out = RemoveOutputForExistingElements(out, existingContainers) c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id1) c.Assert(strings.TrimSpace(out), checker.Equals, id2) } @@ -969,11 +996,14 @@ func (s *DockerSuite) TestPsListContainersFilterPorts(c *check.C) { func (s *DockerSuite) TestPsNotShowLinknamesOfDeletedContainer(c *check.C) { testRequires(c, DaemonIsLinux) + existingContainers := ExistingContainerNames(c) + dockerCmd(c, "create", "--name=aaa", "busybox", "top") dockerCmd(c, "create", "--name=bbb", "--link=aaa", "busybox", "top") out, _ := dockerCmd(c, "ps", "--no-trunc", "-a", "--format", "{{.Names}}") lines := strings.Split(strings.TrimSpace(string(out)), "\n") + lines = RemoveLinesForExistingElements(lines, existingContainers) expected := []string{"bbb", "aaa,bbb/aaa"} var names []string names = append(names, lines...) @@ -982,5 +1012,6 @@ func (s *DockerSuite) TestPsNotShowLinknamesOfDeletedContainer(c *check.C) { dockerCmd(c, "rm", "bbb") out, _ = dockerCmd(c, "ps", "--no-trunc", "-a", "--format", "{{.Names}}") + out = RemoveOutputForExistingElements(out, existingContainers) c.Assert(strings.TrimSpace(out), checker.Equals, "aaa") } diff --git a/components/engine/integration-cli/docker_cli_pull_test.go b/components/engine/integration-cli/docker_cli_pull_test.go index fd91edb81e..56e518b179 100644 --- a/components/engine/integration-cli/docker_cli_pull_test.go +++ b/components/engine/integration-cli/docker_cli_pull_test.go @@ -193,25 +193,26 @@ func (s *DockerHubPullSuite) TestPullScratchNotAllowed(c *check.C) { // results in more images than a naked pull. func (s *DockerHubPullSuite) TestPullAllTagsFromCentralRegistry(c *check.C) { testRequires(c, DaemonIsLinux) - s.Cmd(c, "pull", "busybox") - outImageCmd := s.Cmd(c, "images", "busybox") + s.Cmd(c, "pull", "dockercore/engine-pull-all-test-fixture") + outImageCmd := s.Cmd(c, "images", "dockercore/engine-pull-all-test-fixture") splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n") c.Assert(splitOutImageCmd, checker.HasLen, 2) - s.Cmd(c, "pull", "--all-tags=true", "busybox") - outImageAllTagCmd := s.Cmd(c, "images", "busybox") + s.Cmd(c, "pull", "--all-tags=true", "dockercore/engine-pull-all-test-fixture") + outImageAllTagCmd := s.Cmd(c, "images", "dockercore/engine-pull-all-test-fixture") linesCount := strings.Count(outImageAllTagCmd, "\n") c.Assert(linesCount, checker.GreaterThan, 2, check.Commentf("pulling all tags should provide more than two images, got %s", outImageAllTagCmd)) - // Verify that the line for 'busybox:latest' is left unchanged. + // Verify that the line for 'dockercore/engine-pull-all-test-fixture:latest' is left unchanged. var latestLine string for _, line := range strings.Split(outImageAllTagCmd, "\n") { - if strings.HasPrefix(line, "busybox") && strings.Contains(line, "latest") { + if strings.HasPrefix(line, "dockercore/engine-pull-all-test-fixture") && strings.Contains(line, "latest") { latestLine = line break } } - c.Assert(latestLine, checker.Not(checker.Equals), "", check.Commentf("no entry for busybox:latest found after pulling all tags")) + c.Assert(latestLine, checker.Not(checker.Equals), "", check.Commentf("no entry for dockercore/engine-pull-all-test-fixture:latest found after pulling all tags")) + splitLatest := strings.Fields(latestLine) splitCurrent := strings.Fields(splitOutImageCmd[1]) @@ -227,7 +228,7 @@ func (s *DockerHubPullSuite) TestPullAllTagsFromCentralRegistry(c *check.C) { splitCurrent[4] = "" splitCurrent[5] = "" - c.Assert(splitLatest, checker.DeepEquals, splitCurrent, check.Commentf("busybox:latest was changed after pulling all tags")) + c.Assert(splitLatest, checker.DeepEquals, splitCurrent, check.Commentf("dockercore/engine-pull-all-test-fixture:latest was changed after pulling all tags")) } // TestPullClientDisconnect kills the client during a pull operation and verifies that the operation @@ -273,7 +274,7 @@ func (s *DockerRegistryAuthHtpasswdSuite) TestPullNoCredentialsNotFound(c *check func (s *DockerSuite) TestPullLinuxImageFailsOnWindows(c *check.C) { testRequires(c, DaemonIsWindows, Network) _, _, err := dockerCmdWithError("pull", "ubuntu") - c.Assert(err.Error(), checker.Contains, "cannot be used on this platform") + c.Assert(err.Error(), checker.Contains, "no matching manifest") } // Regression test for https://github.com/docker/docker/issues/28892 diff --git a/components/engine/integration-cli/docker_cli_run_test.go b/components/engine/integration-cli/docker_cli_run_test.go index 340ad4b90f..1ad5ce7ebd 100644 --- a/components/engine/integration-cli/docker_cli_run_test.go +++ b/components/engine/integration-cli/docker_cli_run_test.go @@ -21,6 +21,7 @@ import ( "sync" "time" + "github.com/docker/docker/client" "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/cli/build" @@ -35,6 +36,7 @@ import ( "github.com/go-check/check" "github.com/gotestyourself/gotestyourself/icmd" libcontainerUser "github.com/opencontainers/runc/libcontainer/user" + "golang.org/x/net/context" ) // "test123" should be printed by docker run @@ -824,7 +826,7 @@ func (s *DockerSuite) TestRunEnvironment(c *check.C) { }) result.Assert(c, icmd.Success) - actualEnv := strings.Split(strings.TrimSpace(result.Combined()), "\n") + actualEnv := strings.Split(strings.TrimSuffix(result.Stdout(), "\n"), "\n") sort.Strings(actualEnv) goodEnv := []string{ @@ -2813,23 +2815,27 @@ func (s *DockerSuite) TestRunVolumesFromRestartAfterRemoved(c *check.C) { // run container with --rm should remove container if exit code != 0 func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check.C) { + existingContainers := ExistingContainerIDs(c) name := "flowers" cli.Docker(cli.Args("run", "--name", name, "--rm", "busybox", "ls", "/notexists")).Assert(c, icmd.Expected{ ExitCode: 1, }) out := cli.DockerCmd(c, "ps", "-q", "-a").Combined() + out = RemoveOutputForExistingElements(out, existingContainers) if out != "" { c.Fatal("Expected not to have containers", out) } } func (s *DockerSuite) TestRunContainerWithRmFlagCannotStartContainer(c *check.C) { + existingContainers := ExistingContainerIDs(c) name := "sparkles" cli.Docker(cli.Args("run", "--name", name, "--rm", "busybox", "commandNotFound")).Assert(c, icmd.Expected{ ExitCode: 127, }) out := cli.DockerCmd(c, "ps", "-q", "-a").Combined() + out = RemoveOutputForExistingElements(out, existingContainers) if out != "" { c.Fatal("Expected not to have containers", out) } @@ -3963,29 +3969,44 @@ func (s *DockerSuite) TestRunNamedVolumeNotRemoved(c *check.C) { dockerCmd(c, "run", "--rm", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") dockerCmd(c, "volume", "inspect", "test") out, _ := dockerCmd(c, "volume", "ls", "-q") - c.Assert(strings.TrimSpace(out), checker.Equals, "test") + c.Assert(strings.TrimSpace(out), checker.Contains, "test") dockerCmd(c, "run", "--name=test", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") dockerCmd(c, "rm", "-fv", "test") dockerCmd(c, "volume", "inspect", "test") out, _ = dockerCmd(c, "volume", "ls", "-q") - c.Assert(strings.TrimSpace(out), checker.Equals, "test") + c.Assert(strings.TrimSpace(out), checker.Contains, "test") } func (s *DockerSuite) TestRunNamedVolumesFromNotRemoved(c *check.C) { prefix, _ := getPrefixAndSlashFromDaemonPlatform() dockerCmd(c, "volume", "create", "test") - dockerCmd(c, "run", "--name=parent", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") + cid, _ := dockerCmd(c, "run", "-d", "--name=parent", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") dockerCmd(c, "run", "--name=child", "--volumes-from=parent", "busybox", "true") + cli, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + defer cli.Close() + + container, err := cli.ContainerInspect(context.Background(), strings.TrimSpace(cid)) + c.Assert(err, checker.IsNil) + var vname string + for _, v := range container.Mounts { + if v.Name != "test" { + vname = v.Name + } + } + c.Assert(vname, checker.Not(checker.Equals), "") + // Remove the parent so there are not other references to the volumes dockerCmd(c, "rm", "-f", "parent") // now remove the child and ensure the named volume (and only the named volume) still exists dockerCmd(c, "rm", "-fv", "child") dockerCmd(c, "volume", "inspect", "test") out, _ := dockerCmd(c, "volume", "ls", "-q") - c.Assert(strings.TrimSpace(out), checker.Equals, "test") + c.Assert(strings.TrimSpace(out), checker.Contains, "test") + c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), vname) } func (s *DockerSuite) TestRunAttachFailedNoLeak(c *check.C) { @@ -4123,7 +4144,7 @@ func (s *DockerSuite) TestRunRm(c *check.C) { // Test that auto-remove is performed by the client on API versions that do not support daemon-side api-remove (API < 1.25) func (s *DockerSuite) TestRunRmPre125Api(c *check.C) { name := "miss-me-when-im-gone" - envs := appendBaseEnv(false, "DOCKER_API_VERSION=1.24") + envs := appendBaseEnv(os.Getenv("DOCKER_TLS_VERIFY") != "", "DOCKER_API_VERSION=1.24") cli.Docker(cli.Args("run", "--name="+name, "--rm", "busybox"), cli.WithEnvironmentVariables(envs...)).Assert(c, icmd.Success) cli.Docker(cli.Inspect(name), cli.Format(".name")).Assert(c, icmd.Expected{ diff --git a/components/engine/integration-cli/docker_cli_run_unix_test.go b/components/engine/integration-cli/docker_cli_run_unix_test.go index 582f929836..dd32a72d7c 100644 --- a/components/engine/integration-cli/docker_cli_run_unix_test.go +++ b/components/engine/integration-cli/docker_cli_run_unix_test.go @@ -4,6 +4,7 @@ package main import ( "bufio" + "context" "encoding/json" "fmt" "io/ioutil" @@ -16,6 +17,7 @@ import ( "syscall" "time" + "github.com/docker/docker/client" "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/cli/build" @@ -675,7 +677,7 @@ func (s *DockerSuite) TestRunWithSwappinessInvalid(c *check.C) { } func (s *DockerSuite) TestRunWithMemoryReservation(c *check.C) { - testRequires(c, memoryReservationSupport) + testRequires(c, SameHostDaemon, memoryReservationSupport) file := "/sys/fs/cgroup/memory/memory.soft_limit_in_bytes" out, _ := dockerCmd(c, "run", "--memory-reservation", "200M", "--name", "test", "busybox", "cat", file) @@ -687,7 +689,7 @@ func (s *DockerSuite) TestRunWithMemoryReservation(c *check.C) { func (s *DockerSuite) TestRunWithMemoryReservationInvalid(c *check.C) { testRequires(c, memoryLimitSupport) - testRequires(c, memoryReservationSupport) + testRequires(c, SameHostDaemon, memoryReservationSupport) out, _, err := dockerCmdWithError("run", "-m", "500M", "--memory-reservation", "800M", "busybox", "true") c.Assert(err, check.NotNil) expected := "Minimum memory limit can not be less than memory reservation limit" @@ -1058,7 +1060,7 @@ func (s *DockerSuite) TestRunSeccompProfileAllow32Bit(c *check.C) { testRequires(c, SameHostDaemon, seccompEnabled, IsAmd64) ensureSyscallTest(c) - icmd.RunCommand(dockerBinary, "run", "syscall-test", "exit32-test", "id").Assert(c, icmd.Success) + icmd.RunCommand(dockerBinary, "run", "syscall-test", "exit32-test").Assert(c, icmd.Success) } // TestRunSeccompAllowSetrlimit checks that 'docker run debian:jessie ulimit -v 1048510' succeeds. @@ -1399,7 +1401,7 @@ func (s *DockerSuite) TestRunDeviceSymlink(c *check.C) { // TestRunPIDsLimit makes sure the pids cgroup is set with --pids-limit func (s *DockerSuite) TestRunPIDsLimit(c *check.C) { - testRequires(c, pidsLimit) + testRequires(c, SameHostDaemon, pidsLimit) file := "/sys/fs/cgroup/pids/pids.max" out, _ := dockerCmd(c, "run", "--name", "skittles", "--pids-limit", "4", "busybox", "cat", file) @@ -1563,14 +1565,18 @@ func (s *DockerSuite) TestRunWithNanoCPUs(c *check.C) { out, _ := dockerCmd(c, "run", "--cpus", "0.5", "--name", "test", "busybox", "sh", "-c", fmt.Sprintf("cat %s && cat %s", file1, file2)) c.Assert(strings.TrimSpace(out), checker.Equals, "50000\n100000") - out = inspectField(c, "test", "HostConfig.NanoCpus") - c.Assert(out, checker.Equals, "5e+08", check.Commentf("setting the Nano CPUs failed")) + clt, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + inspect, err := clt.ContainerInspect(context.Background(), "test") + c.Assert(err, checker.IsNil) + c.Assert(inspect.HostConfig.NanoCPUs, checker.Equals, int64(500000000)) + out = inspectField(c, "test", "HostConfig.CpuQuota") c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS quota should be 0")) out = inspectField(c, "test", "HostConfig.CpuPeriod") c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS period should be 0")) - out, _, err := dockerCmdWithError("run", "--cpus", "0.5", "--cpu-quota", "50000", "--cpu-period", "100000", "busybox", "sh") + out, _, err = dockerCmdWithError("run", "--cpus", "0.5", "--cpu-quota", "50000", "--cpu-period", "100000", "busybox", "sh") c.Assert(err, check.NotNil) c.Assert(out, checker.Contains, "Conflicting options: Nano CPUs and CPU Period cannot both be set") } diff --git a/components/engine/integration-cli/docker_cli_swarm_test.go b/components/engine/integration-cli/docker_cli_swarm_test.go index 5ecb010b29..c56cf1fe6c 100644 --- a/components/engine/integration-cli/docker_cli_swarm_test.go +++ b/components/engine/integration-cli/docker_cli_swarm_test.go @@ -169,8 +169,10 @@ func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *check.C) { func (s *DockerSwarmSuite) TestSwarmServiceTemplatingHostname(c *check.C) { d := s.AddDaemon(c, true, true) + hostname, err := d.Cmd("node", "inspect", "--format", "{{.Description.Hostname}}", "self") + c.Assert(err, checker.IsNil, check.Commentf(hostname)) - out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--hostname", "{{.Service.Name}}-{{.Task.Slot}}", "busybox", "top") + out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--hostname", "{{.Service.Name}}-{{.Task.Slot}}-{{.Node.Hostname}}", "busybox", "top") c.Assert(err, checker.IsNil, check.Commentf(out)) // make sure task has been deployed. @@ -179,7 +181,7 @@ func (s *DockerSwarmSuite) TestSwarmServiceTemplatingHostname(c *check.C) { containers := d.ActiveContainers() out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.Config.Hostname}}", containers[0]) c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(strings.Split(out, "\n")[0], checker.Equals, "test-1", check.Commentf("hostname with templating invalid")) + c.Assert(strings.Split(out, "\n")[0], checker.Equals, "test-1-"+strings.Split(hostname, "\n")[0], check.Commentf("hostname with templating invalid")) } // Test case for #24270 diff --git a/components/engine/integration-cli/docker_cli_update_unix_test.go b/components/engine/integration-cli/docker_cli_update_unix_test.go index be2274bb39..c3dfbcc9c5 100644 --- a/components/engine/integration-cli/docker_cli_update_unix_test.go +++ b/components/engine/integration-cli/docker_cli_update_unix_test.go @@ -3,6 +3,7 @@ package main import ( + "context" "encoding/json" "fmt" "os/exec" @@ -10,6 +11,7 @@ import ( "time" "github.com/docker/docker/api/types" + "github.com/docker/docker/client" "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/request" "github.com/docker/docker/pkg/parsers/kernel" @@ -137,7 +139,7 @@ func (s *DockerSuite) TestUpdateKernelMemory(c *check.C) { func (s *DockerSuite) TestUpdateKernelMemoryUninitialized(c *check.C) { testRequires(c, DaemonIsLinux, kernelMemorySupport) - isNewKernel := kernel.CheckKernelVersion(4, 6, 0) + isNewKernel := CheckKernelVersion(4, 6, 0) name := "test-update-container" dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") _, _, err := dockerCmdWithError("update", "--kernel-memory", "100M", name) @@ -169,6 +171,18 @@ func (s *DockerSuite) TestUpdateKernelMemoryUninitialized(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Equals, "314572800") } +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() *kernel.VersionInfo { + v, _ := kernel.ParseRelease(testEnv.DaemonInfo.KernelVersion) + return v +} + +// CheckKernelVersion checks if current kernel is newer than (or equal to) +// the given version. +func CheckKernelVersion(k, major, minor int) bool { + return kernel.CompareKernelVersion(*GetKernelVersion(), kernel.VersionInfo{Kernel: k, Major: major, Minor: minor}) > 0 +} + func (s *DockerSuite) TestUpdateSwapMemoryOnly(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, memoryLimitSupport) @@ -295,20 +309,26 @@ func (s *DockerSuite) TestUpdateWithNanoCPUs(c *check.C) { out, _ = dockerCmd(c, "exec", "top", "sh", "-c", fmt.Sprintf("cat %s && cat %s", file1, file2)) c.Assert(strings.TrimSpace(out), checker.Equals, "50000\n100000") - out = inspectField(c, "top", "HostConfig.NanoCpus") - c.Assert(out, checker.Equals, "5e+08", check.Commentf("setting the Nano CPUs failed")) + clt, err := client.NewEnvClient() + c.Assert(err, checker.IsNil) + inspect, err := clt.ContainerInspect(context.Background(), "top") + c.Assert(err, checker.IsNil) + c.Assert(inspect.HostConfig.NanoCPUs, checker.Equals, int64(500000000)) + out = inspectField(c, "top", "HostConfig.CpuQuota") c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS quota should be 0")) out = inspectField(c, "top", "HostConfig.CpuPeriod") c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS period should be 0")) - out, _, err := dockerCmdWithError("update", "--cpu-quota", "80000", "top") + out, _, err = dockerCmdWithError("update", "--cpu-quota", "80000", "top") c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, "Conflicting options: CPU Quota cannot be updated as NanoCPUs has already been set") out, _ = dockerCmd(c, "update", "--cpus", "0.8", "top") - out = inspectField(c, "top", "HostConfig.NanoCpus") - c.Assert(out, checker.Equals, "8e+08", check.Commentf("updating the Nano CPUs failed")) + inspect, err = clt.ContainerInspect(context.Background(), "top") + c.Assert(err, checker.IsNil) + c.Assert(inspect.HostConfig.NanoCPUs, checker.Equals, int64(800000000)) + out = inspectField(c, "top", "HostConfig.CpuQuota") c.Assert(out, checker.Equals, "0", check.Commentf("CPU CFS quota should be 0")) out = inspectField(c, "top", "HostConfig.CpuPeriod") diff --git a/components/engine/integration-cli/docker_cli_version_test.go b/components/engine/integration-cli/docker_cli_version_test.go deleted file mode 100644 index 074a7db475..0000000000 --- a/components/engine/integration-cli/docker_cli_version_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package main - -import ( - "strings" - - "github.com/docker/docker/integration-cli/checker" - "github.com/go-check/check" -) - -// ensure docker version works -func (s *DockerSuite) TestVersionEnsureSucceeds(c *check.C) { - out, _ := dockerCmd(c, "version") - stringsToCheck := map[string]int{ - "Client:": 1, - "Server:": 1, - " Version:": 2, - " API version:": 2, - " Go version:": 2, - " Git commit:": 2, - " OS/Arch:": 2, - " Built:": 2, - } - - for k, v := range stringsToCheck { - c.Assert(strings.Count(out, k), checker.Equals, v, check.Commentf("The count of %v in %s does not match excepted", k, out)) - } -} - -// ensure the Windows daemon return the correct platform string -func (s *DockerSuite) TestVersionPlatform_w(c *check.C) { - testRequires(c, DaemonIsWindows) - testVersionPlatform(c, "windows/amd64") -} - -// ensure the Linux daemon return the correct platform string -func (s *DockerSuite) TestVersionPlatform_l(c *check.C) { - testRequires(c, DaemonIsLinux) - testVersionPlatform(c, "linux") -} - -func testVersionPlatform(c *check.C, platform string) { - out, _ := dockerCmd(c, "version") - expected := "OS/Arch: " + platform - - split := strings.Split(out, "\n") - c.Assert(len(split) >= 14, checker.Equals, true, check.Commentf("got %d lines from version", len(split))) - - // Verify the second 'OS/Arch' matches the platform. Experimental has - // more lines of output than 'regular' - bFound := false - for i := 14; i < len(split); i++ { - if strings.Contains(split[i], expected) { - bFound = true - break - } - } - c.Assert(bFound, checker.Equals, true, check.Commentf("Could not find server '%s' in '%s'", expected, out)) -} diff --git a/components/engine/integration-cli/docker_cli_volume_test.go b/components/engine/integration-cli/docker_cli_volume_test.go index 3ca0834806..fc930d319c 100644 --- a/components/engine/integration-cli/docker_cli_volume_test.go +++ b/components/engine/integration-cli/docker_cli_volume_test.go @@ -64,9 +64,6 @@ func (s *DockerSuite) TestVolumeCLIInspectMulti(c *check.C) { }) out := result.Stdout() - outArr := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 3, check.Commentf("\n%s", out)) - c.Assert(out, checker.Contains, "test1") c.Assert(out, checker.Contains, "test2") c.Assert(out, checker.Contains, "test3") @@ -81,11 +78,8 @@ func (s *DockerSuite) TestVolumeCLILs(c *check.C) { dockerCmd(c, "volume", "create", "soo") dockerCmd(c, "run", "-v", "soo:"+prefix+"/foo", "busybox", "ls", "/") - out, _ := dockerCmd(c, "volume", "ls") - outArr := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) - - assertVolList(c, out, []string{"aaa", "soo", "test"}) + out, _ := dockerCmd(c, "volume", "ls", "-q") + assertVolumesInList(c, out, []string{"aaa", "soo", "test"}) } func (s *DockerSuite) TestVolumeLsFormat(c *check.C) { @@ -94,12 +88,7 @@ func (s *DockerSuite) TestVolumeLsFormat(c *check.C) { dockerCmd(c, "volume", "create", "soo") out, _ := dockerCmd(c, "volume", "ls", "--format", "{{.Name}}") - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - - expected := []string{"aaa", "soo", "test"} - var names []string - names = append(names, lines...) - c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) + assertVolumesInList(c, out, []string{"aaa", "soo", "test"}) } func (s *DockerSuite) TestVolumeLsFormatDefaultFormat(c *check.C) { @@ -118,12 +107,7 @@ func (s *DockerSuite) TestVolumeLsFormatDefaultFormat(c *check.C) { c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "--config", d, "volume", "ls") - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - - expected := []string{"aaa default", "soo default", "test default"} - var names []string - names = append(names, lines...) - c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) + assertVolumesInList(c, out, []string{"aaa default", "soo default", "test default"}) } // assertVolList checks volume retrieved with ls command @@ -142,6 +126,20 @@ func assertVolList(c *check.C, out string, expectVols []string) { c.Assert(volList, checker.DeepEquals, expectVols) } +func assertVolumesInList(c *check.C, out string, expected []string) { + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + for _, expect := range expected { + found := false + for _, v := range lines { + found = v == expect + if found { + break + } + } + c.Assert(found, checker.Equals, true, check.Commentf("Expected volume not found: %v, got: %v", expect, lines)) + } +} + func (s *DockerSuite) TestVolumeCLILsFilterDangling(c *check.C) { prefix, _ := getPrefixAndSlashFromDaemonPlatform() dockerCmd(c, "volume", "create", "testnotinuse1") @@ -213,10 +211,6 @@ func (s *DockerSuite) TestVolumeCLIRm(c *check.C) { dockerCmd(c, "volume", "rm", id) dockerCmd(c, "volume", "rm", "test") - out, _ = dockerCmd(c, "volume", "ls") - outArr := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) - volumeID := "testing" dockerCmd(c, "run", "-v", volumeID+":"+prefix+"/foo", "--name=test", "busybox", "sh", "-c", "echo hello > /foo/bar") @@ -407,10 +401,6 @@ func (s *DockerSuite) TestVolumeCLIRmForceUsage(c *check.C) { dockerCmd(c, "volume", "rm", "-f", id) dockerCmd(c, "volume", "rm", "--force", "nonexist") - - out, _ = dockerCmd(c, "volume", "ls") - outArr := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) } func (s *DockerSuite) TestVolumeCLIRmForce(c *check.C) { diff --git a/components/engine/integration-cli/docker_hub_pull_suite_test.go b/components/engine/integration-cli/docker_hub_pull_suite_test.go index 263372087c..286a3913ce 100644 --- a/components/engine/integration-cli/docker_hub_pull_suite_test.go +++ b/components/engine/integration-cli/docker_hub_pull_suite_test.go @@ -39,7 +39,7 @@ func newDockerHubPullSuite() *DockerHubPullSuite { // SetUpSuite starts the suite daemon. func (s *DockerHubPullSuite) SetUpSuite(c *check.C) { - testRequires(c, DaemonIsLinux) + testRequires(c, DaemonIsLinux, SameHostDaemon) s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{ Experimental: testEnv.ExperimentalDaemon(), }) diff --git a/components/engine/integration-cli/docker_utils_test.go b/components/engine/integration-cli/docker_utils_test.go index 95d2e93cfe..1bda2c7239 100644 --- a/components/engine/integration-cli/docker_utils_test.go +++ b/components/engine/integration-cli/docker_utils_test.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "io/ioutil" - "net/http" "os" "path" "path/filepath" @@ -373,8 +372,7 @@ func waitInspectWithArgs(name, expr, expected string, timeout time.Duration, arg } func getInspectBody(c *check.C, version, id string) []byte { - var httpClient *http.Client - cli, err := client.NewClient(daemonHost(), version, httpClient, nil) + cli, err := request.NewEnvClientWithVersion(version) c.Assert(err, check.IsNil) defer cli.Close() _, body, err := cli.ContainerInspectWithRaw(context.Background(), id, false) diff --git a/components/engine/integration-cli/fixtures/plugin/plugin.go b/components/engine/integration-cli/fixtures/plugin/plugin.go index c8259be1a7..4ab15c23de 100644 --- a/components/engine/integration-cli/fixtures/plugin/plugin.go +++ b/components/engine/integration-cli/fixtures/plugin/plugin.go @@ -7,7 +7,7 @@ import ( "golang.org/x/net/context" ) -// CreateOpt is is passed used to change the defualt plugin config before +// CreateOpt is is passed used to change the default plugin config before // creating it type CreateOpt func(*Config) diff --git a/components/engine/integration-cli/fixtures/plugin/plugin_linux.go b/components/engine/integration-cli/fixtures/plugin/plugin_linux.go index 757694cd37..5da79fcb77 100644 --- a/components/engine/integration-cli/fixtures/plugin/plugin_linux.go +++ b/components/engine/integration-cli/fixtures/plugin/plugin_linux.go @@ -71,9 +71,14 @@ func CreateInRegistry(ctx context.Context, repo string, auth *types.AuthConfig, } defer tar.Close() + regService, err := registry.NewService(registry.ServiceOptions{V2Only: true}) + if err != nil { + return err + } + managerConfig := plugin.ManagerConfig{ Store: plugin.NewStore(), - RegistryService: registry.NewService(registry.ServiceOptions{V2Only: true}), + RegistryService: regService, Root: filepath.Join(tmpDir, "root"), ExecRoot: "/run/docker", // manager init fails if not set Executor: dummyExecutor{}, diff --git a/components/engine/integration-cli/fixtures_linux_daemon_test.go b/components/engine/integration-cli/fixtures_linux_daemon_test.go index 1508762060..6ac4511215 100644 --- a/components/engine/integration-cli/fixtures_linux_daemon_test.go +++ b/components/engine/integration-cli/fixtures_linux_daemon_test.go @@ -57,7 +57,7 @@ func ensureSyscallTest(c *check.C) { } if runtime.GOOS == "linux" && runtime.GOARCH == "amd64" { - out, err := exec.Command(gcc, "-s", "-m32", "-nostdlib", "../contrib/syscall-test/exit32.s", "-o", tmp+"/"+"exit32-test").CombinedOutput() + out, err := exec.Command(gcc, "-s", "-m32", "-nostdlib", "-static", "../contrib/syscall-test/exit32.s", "-o", tmp+"/"+"exit32-test").CombinedOutput() c.Assert(err, checker.IsNil, check.Commentf(string(out))) } diff --git a/components/engine/integration-cli/request/request.go b/components/engine/integration-cli/request/request.go index 72632f3f76..f22b08d768 100644 --- a/components/engine/integration-cli/request/request.go +++ b/components/engine/integration-cli/request/request.go @@ -129,7 +129,11 @@ func New(host, endpoint string, modifiers ...func(*http.Request) error) (*http.R return nil, fmt.Errorf("could not create new request: %v", err) } - req.URL.Scheme = "http" + if os.Getenv("DOCKER_TLS_VERIFY") != "" { + req.URL.Scheme = "https" + } else { + req.URL.Scheme = "http" + } req.URL.Host = addr for _, config := range modifiers { @@ -319,3 +323,35 @@ func DaemonHost() string { } return daemonURLStr } + +// NewEnvClientWithVersion returns a docker client with a specified version. +// See: github.com/docker/docker/client `NewEnvClient()` +func NewEnvClientWithVersion(version string) (*dclient.Client, error) { + if version == "" { + return nil, errors.New("version not specified") + } + + var httpClient *http.Client + if os.Getenv("DOCKER_CERT_PATH") != "" { + tlsConfig, err := getTLSConfig() + if err != nil { + return nil, err + } + httpClient = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + } + } + + host := os.Getenv("DOCKER_HOST") + if host == "" { + host = dclient.DefaultDockerHost + } + + cli, err := dclient.NewClient(host, version, httpClient, nil) + if err != nil { + return cli, err + } + return cli, nil +} diff --git a/components/engine/integration-cli/requirements_test.go b/components/engine/integration-cli/requirements_test.go index 0b10969996..411248195b 100644 --- a/components/engine/integration-cli/requirements_test.go +++ b/components/engine/integration-cli/requirements_test.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "io/ioutil" "net/http" @@ -10,6 +11,8 @@ import ( "strings" "time" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" "github.com/docker/docker/integration-cli/requirement" ) @@ -36,6 +39,18 @@ func DaemonIsLinux() bool { return testEnv.DaemonInfo.OSType == "linux" } +func OnlyDefaultNetworks() bool { + cli, err := client.NewEnvClient() + if err != nil { + return false + } + networks, err := cli.NetworkList(context.TODO(), types.NetworkListOptions{}) + if err != nil || len(networks) > 0 { + return false + } + return true +} + // Deprecated: use skip.IfCondition(t, !testEnv.DaemonInfo.ExperimentalBuild) func ExperimentalDaemon() bool { return testEnv.DaemonInfo.ExperimentalBuild diff --git a/components/engine/integration-cli/requirements_unix_test.go b/components/engine/integration-cli/requirements_unix_test.go index 6ef900fc18..7c594f7db4 100644 --- a/components/engine/integration-cli/requirements_unix_test.go +++ b/components/engine/integration-cli/requirements_unix_test.go @@ -18,19 +18,19 @@ var ( ) func cpuCfsPeriod() bool { - return SysInfo.CPUCfsPeriod + return testEnv.DaemonInfo.CPUCfsPeriod } func cpuCfsQuota() bool { - return SysInfo.CPUCfsQuota + return testEnv.DaemonInfo.CPUCfsQuota } func cpuShare() bool { - return SysInfo.CPUShares + return testEnv.DaemonInfo.CPUShares } func oomControl() bool { - return SysInfo.OomKillDisable + return testEnv.DaemonInfo.OomKillDisable } func pidsLimit() bool { @@ -38,11 +38,11 @@ func pidsLimit() bool { } func kernelMemorySupport() bool { - return SysInfo.KernelMemory + return testEnv.DaemonInfo.KernelMemory } func memoryLimitSupport() bool { - return SysInfo.MemoryLimit + return testEnv.DaemonInfo.MemoryLimit } func memoryReservationSupport() bool { @@ -50,19 +50,19 @@ func memoryReservationSupport() bool { } func swapMemorySupport() bool { - return SysInfo.SwapLimit + return testEnv.DaemonInfo.SwapLimit } func memorySwappinessSupport() bool { - return SysInfo.MemorySwappiness + return SameHostDaemon() && SysInfo.MemorySwappiness } func blkioWeight() bool { - return SysInfo.BlkioWeight + return SameHostDaemon() && SysInfo.BlkioWeight } func cgroupCpuset() bool { - return SysInfo.Cpuset + return testEnv.DaemonInfo.CPUSet } func seccompEnabled() bool { @@ -111,5 +111,7 @@ func overlay2Supported() bool { } func init() { - SysInfo = sysinfo.New(true) + if SameHostDaemon() { + SysInfo = sysinfo.New(true) + } } diff --git a/components/engine/integration-cli/utils_test.go b/components/engine/integration-cli/utils_test.go index e09fb80643..d176c7f062 100644 --- a/components/engine/integration-cli/utils_test.go +++ b/components/engine/integration-cli/utils_test.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" "github.com/gotestyourself/gotestyourself/icmd" "github.com/pkg/errors" ) @@ -112,3 +113,71 @@ func RunCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, err error) out, err := cmds[len(cmds)-1].CombinedOutput() return string(out), err } + +type elementListOptions struct { + element, format string +} + +func existingElements(c *check.C, opts elementListOptions) []string { + args := []string{} + switch opts.element { + case "container": + args = append(args, "ps", "-a") + case "image": + args = append(args, "images", "-a") + case "network": + args = append(args, "network", "ls") + case "plugin": + args = append(args, "plugin", "ls") + case "volume": + args = append(args, "volume", "ls") + } + if opts.format != "" { + args = append(args, "--format", opts.format) + } + out, _ := dockerCmd(c, args...) + lines := []string{} + for _, l := range strings.Split(out, "\n") { + if l != "" { + lines = append(lines, l) + } + } + return lines +} + +// ExistingContainerIDs returns a list of currently existing container IDs. +func ExistingContainerIDs(c *check.C) []string { + return existingElements(c, elementListOptions{element: "container", format: "{{.ID}}"}) +} + +// ExistingContainerNames returns a list of existing container names. +func ExistingContainerNames(c *check.C) []string { + return existingElements(c, elementListOptions{element: "container", format: "{{.Names}}"}) +} + +// RemoveLinesForExistingElements removes existing elements from the output of a +// docker command. +// This function takes an output []string and returns a []string. +func RemoveLinesForExistingElements(output, existing []string) []string { + for _, e := range existing { + index := -1 + for i, line := range output { + if strings.Contains(line, e) { + index = i + break + } + } + if index != -1 { + output = append(output[:index], output[index+1:]...) + } + } + return output +} + +// RemoveOutputForExistingElements removes existing elements from the output of +// a docker command. +// This function takes an output string and returns a string. +func RemoveOutputForExistingElements(output string, existing []string) string { + res := RemoveLinesForExistingElements(strings.Split(output, "\n"), existing) + return strings.Join(res, "\n") +} diff --git a/components/engine/integration/container/main_test.go b/components/engine/integration/container/main_test.go index 1c4e078400..fbfed2ae40 100644 --- a/components/engine/integration/container/main_test.go +++ b/components/engine/integration/container/main_test.go @@ -23,6 +23,6 @@ func TestMain(m *testing.M) { } func setupTest(t *testing.T) func() { - environment.ProtectImages(t, testEnv) + environment.ProtectAll(t, testEnv) return func() { testEnv.Clean(t) } } diff --git a/components/engine/integration/service/create_test.go b/components/engine/integration/service/create_test.go new file mode 100644 index 0000000000..cb0823dfbd --- /dev/null +++ b/components/engine/integration/service/create_test.go @@ -0,0 +1,129 @@ +package service + +import ( + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/client" + "github.com/docker/docker/integration-cli/request" + "github.com/gotestyourself/gotestyourself/poll" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" +) + +func TestCreateWithLBSandbox(t *testing.T) { + defer setupTest(t)() + d := newSwarm(t) + defer d.Stop(t) + client, err := request.NewClientForHost(d.Sock()) + require.NoError(t, err) + + overlayName := "overlay1" + networkCreate := types.NetworkCreate{ + CheckDuplicate: true, + Driver: "overlay", + } + + netResp, err := client.NetworkCreate(context.Background(), overlayName, networkCreate) + require.NoError(t, err) + overlayID := netResp.ID + + var instances uint64 = 1 + serviceSpec := swarmServiceSpec("TestService", instances) + + serviceSpec.TaskTemplate.Networks = append(serviceSpec.TaskTemplate.Networks, swarm.NetworkAttachmentConfig{Target: overlayName}) + + serviceResp, err := client.ServiceCreate(context.Background(), serviceSpec, types.ServiceCreateOptions{ + QueryRegistry: false, + }) + require.NoError(t, err) + + serviceID := serviceResp.ID + poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances)) + + _, _, err = client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{}) + require.NoError(t, err) + + network, err := client.NetworkInspect(context.Background(), overlayID, types.NetworkInspectOptions{}) + require.NoError(t, err) + assert.Contains(t, network.Containers, overlayName+"-sbox") + + err = client.ServiceRemove(context.Background(), serviceID) + require.NoError(t, err) + + poll.WaitOn(t, serviceIsRemoved(client, serviceID)) + err = client.NetworkRemove(context.Background(), overlayID) + require.NoError(t, err) + + poll.WaitOn(t, networkIsRemoved(client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second)) +} + +func swarmServiceSpec(name string, replicas uint64) swarm.ServiceSpec { + return swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: name, + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: "busybox:latest", + Command: []string{"/bin/top"}, + }, + }, + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &replicas, + }, + }, + } +} + +func serviceRunningTasksCount(client client.ServiceAPIClient, serviceID string, instances uint64) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + filter := filters.NewArgs() + filter.Add("service", serviceID) + tasks, err := client.TaskList(context.Background(), types.TaskListOptions{ + Filters: filter, + }) + switch { + case err != nil: + return poll.Error(err) + case len(tasks) == int(instances): + for _, task := range tasks { + if task.Status.State != swarm.TaskStateRunning { + return poll.Continue("waiting for tasks to enter run state") + } + } + return poll.Success() + default: + return poll.Continue("task count at %d waiting for %d", len(tasks), instances) + } + } +} + +func serviceIsRemoved(client client.ServiceAPIClient, serviceID string) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + filter := filters.NewArgs() + filter.Add("service", serviceID) + _, err := client.TaskList(context.Background(), types.TaskListOptions{ + Filters: filter, + }) + if err == nil { + return poll.Continue("waiting for service %s to be deleted", serviceID) + } + return poll.Success() + } +} + +func networkIsRemoved(client client.NetworkAPIClient, networkID string) func(log poll.LogT) poll.Result { + return func(log poll.LogT) poll.Result { + _, err := client.NetworkInspect(context.Background(), networkID, types.NetworkInspectOptions{}) + if err == nil { + return poll.Continue("waiting for network %s to be removed", networkID) + } + return poll.Success() + } +} diff --git a/components/engine/integration/service/inspect_test.go b/components/engine/integration/service/inspect_test.go index e4459af437..61831b72f1 100644 --- a/components/engine/integration/service/inspect_test.go +++ b/components/engine/integration/service/inspect_test.go @@ -12,12 +12,15 @@ import ( "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/integration-cli/request" "github.com/gotestyourself/gotestyourself/poll" + "github.com/gotestyourself/gotestyourself/skip" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/net/context" ) func TestInspect(t *testing.T) { + skip.IfCondition(t, !testEnv.IsLocalDaemon()) + defer setupTest(t)() d := newSwarm(t) defer d.Stop(t) client, err := request.NewClientForHost(d.Sock()) diff --git a/components/engine/integration/service/main_test.go b/components/engine/integration/service/main_test.go index 4d6af81895..4cad6ed975 100644 --- a/components/engine/integration/service/main_test.go +++ b/components/engine/integration/service/main_test.go @@ -25,6 +25,6 @@ func TestMain(m *testing.M) { } func setupTest(t *testing.T) func() { - environment.ProtectImages(t, testEnv) + environment.ProtectAll(t, testEnv) return func() { testEnv.Clean(t) } } diff --git a/components/engine/integration/system/main_test.go b/components/engine/integration/system/main_test.go new file mode 100644 index 0000000000..ad0d203753 --- /dev/null +++ b/components/engine/integration/system/main_test.go @@ -0,0 +1,28 @@ +package system + +import ( + "fmt" + "os" + "testing" + + "github.com/docker/docker/internal/test/environment" +) + +var testEnv *environment.Execution + +func TestMain(m *testing.M) { + var err error + testEnv, err = environment.New() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + testEnv.Print() + os.Exit(m.Run()) +} + +func setupTest(t *testing.T) func() { + environment.ProtectImages(t, testEnv) + return func() { testEnv.Clean(t) } +} diff --git a/components/engine/integration/system/version_test.go b/components/engine/integration/system/version_test.go new file mode 100644 index 0000000000..ac47891e9b --- /dev/null +++ b/components/engine/integration/system/version_test.go @@ -0,0 +1,24 @@ +package system + +import ( + "testing" + + "github.com/docker/docker/integration-cli/request" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/context" +) + +func TestVersion(t *testing.T) { + client, err := request.NewClient() + require.NoError(t, err) + + version, err := client.ServerVersion(context.Background()) + require.NoError(t, err) + + assert.NotNil(t, version.APIVersion) + assert.NotNil(t, version.Version) + assert.NotNil(t, version.MinAPIVersion) + assert.Equal(t, testEnv.DaemonInfo.ExperimentalBuild, version.Experimental) + assert.Equal(t, testEnv.DaemonInfo.OSType, version.Os) +} diff --git a/components/engine/internal/test/environment/clean.go b/components/engine/internal/test/environment/clean.go index c6392dc1bc..702d10711b 100644 --- a/components/engine/internal/test/environment/clean.go +++ b/components/engine/internal/test/environment/clean.go @@ -32,12 +32,12 @@ func (e *Execution) Clean(t testingT) { if (platform != "windows") || (platform == "windows" && e.DaemonInfo.Isolation == "hyperv") { unpauseAllContainers(t, client) } - deleteAllContainers(t, client) + deleteAllContainers(t, client, e.protectedElements.containers) deleteAllImages(t, client, e.protectedElements.images) - deleteAllVolumes(t, client) - deleteAllNetworks(t, client, platform) + deleteAllVolumes(t, client, e.protectedElements.volumes) + deleteAllNetworks(t, client, platform, e.protectedElements.networks) if platform == "linux" { - deleteAllPlugins(t, client) + deleteAllPlugins(t, client, e.protectedElements.plugins) } } @@ -66,7 +66,7 @@ func getPausedContainers(ctx context.Context, t assert.TestingT, client client.C var alreadyExists = regexp.MustCompile(`Error response from daemon: removal of container (\w+) is already in progress`) -func deleteAllContainers(t assert.TestingT, apiclient client.ContainerAPIClient) { +func deleteAllContainers(t assert.TestingT, apiclient client.ContainerAPIClient, protectedContainers map[string]struct{}) { ctx := context.Background() containers := getAllContainers(ctx, t, apiclient) if len(containers) == 0 { @@ -74,6 +74,9 @@ func deleteAllContainers(t assert.TestingT, apiclient client.ContainerAPIClient) } for _, container := range containers { + if _, ok := protectedContainers[container.ID]; ok { + continue + } err := apiclient.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{ Force: true, RemoveVolumes: true, @@ -126,17 +129,20 @@ func removeImage(ctx context.Context, t assert.TestingT, apiclient client.ImageA assert.NoError(t, err, "failed to remove image %s", ref) } -func deleteAllVolumes(t assert.TestingT, c client.VolumeAPIClient) { +func deleteAllVolumes(t assert.TestingT, c client.VolumeAPIClient, protectedVolumes map[string]struct{}) { volumes, err := c.VolumeList(context.Background(), filters.Args{}) assert.NoError(t, err, "failed to list volumes") for _, v := range volumes.Volumes { + if _, ok := protectedVolumes[v.Name]; ok { + continue + } err := c.VolumeRemove(context.Background(), v.Name, true) assert.NoError(t, err, "failed to remove volume %s", v.Name) } } -func deleteAllNetworks(t assert.TestingT, c client.NetworkAPIClient, daemonPlatform string) { +func deleteAllNetworks(t assert.TestingT, c client.NetworkAPIClient, daemonPlatform string, protectedNetworks map[string]struct{}) { networks, err := c.NetworkList(context.Background(), types.NetworkListOptions{}) assert.NoError(t, err, "failed to list networks") @@ -144,6 +150,9 @@ func deleteAllNetworks(t assert.TestingT, c client.NetworkAPIClient, daemonPlatf if n.Name == "bridge" || n.Name == "none" || n.Name == "host" { continue } + if _, ok := protectedNetworks[n.ID]; ok { + continue + } if daemonPlatform == "windows" && strings.ToLower(n.Name) == "nat" { // nat is a pre-defined network on Windows and cannot be removed continue @@ -153,11 +162,14 @@ func deleteAllNetworks(t assert.TestingT, c client.NetworkAPIClient, daemonPlatf } } -func deleteAllPlugins(t assert.TestingT, c client.PluginAPIClient) { +func deleteAllPlugins(t assert.TestingT, c client.PluginAPIClient, protectedPlugins map[string]struct{}) { plugins, err := c.PluginList(context.Background(), filters.Args{}) assert.NoError(t, err, "failed to list plugins") for _, p := range plugins { + if _, ok := protectedPlugins[p.Name]; ok { + continue + } err := c.PluginRemove(context.Background(), p.Name, types.PluginRemoveOptions{Force: true}) assert.NoError(t, err, "failed to remove plugin %s", p.ID) } diff --git a/components/engine/internal/test/environment/protect.go b/components/engine/internal/test/environment/protect.go index 5821863298..2e882c8470 100644 --- a/components/engine/internal/test/environment/protect.go +++ b/components/engine/internal/test/environment/protect.go @@ -10,7 +10,63 @@ import ( ) type protectedElements struct { - images map[string]struct{} + containers map[string]struct{} + images map[string]struct{} + networks map[string]struct{} + plugins map[string]struct{} + volumes map[string]struct{} +} + +func newProtectedElements() protectedElements { + return protectedElements{ + containers: map[string]struct{}{}, + images: map[string]struct{}{}, + networks: map[string]struct{}{}, + plugins: map[string]struct{}{}, + volumes: map[string]struct{}{}, + } +} + +// ProtectAll protects the existing environment (containers, images, networks, +// volumes, and, on Linux, plugins) from being cleaned up at the end of test +// runs +func ProtectAll(t testingT, testEnv *Execution) { + ProtectContainers(t, testEnv) + ProtectImages(t, testEnv) + ProtectNetworks(t, testEnv) + ProtectVolumes(t, testEnv) + if testEnv.DaemonInfo.OSType == "linux" { + ProtectPlugins(t, testEnv) + } +} + +// ProtectContainer adds the specified container(s) to be protected in case of +// clean +func (e *Execution) ProtectContainer(t testingT, containers ...string) { + for _, container := range containers { + e.protectedElements.containers[container] = struct{}{} + } +} + +// ProtectContainers protects existing containers from being cleaned up at the +// end of test runs +func ProtectContainers(t testingT, testEnv *Execution) { + containers := getExistingContainers(t, testEnv) + testEnv.ProtectContainer(t, containers...) +} + +func getExistingContainers(t require.TestingT, testEnv *Execution) []string { + client := testEnv.APIClient() + containerList, err := client.ContainerList(context.Background(), types.ContainerListOptions{ + All: true, + }) + require.NoError(t, err, "failed to list containers") + + containers := []string{} + for _, container := range containerList { + containers = append(containers, container.ID) + } + return containers } // ProtectImage adds the specified image(s) to be protected in case of clean @@ -20,12 +76,6 @@ func (e *Execution) ProtectImage(t testingT, images ...string) { } } -func newProtectedElements() protectedElements { - return protectedElements{ - images: map[string]struct{}{}, - } -} - // ProtectImages protects existing images and on linux frozen images from being // cleaned up at the end of test runs func ProtectImages(t testingT, testEnv *Execution) { @@ -42,6 +92,7 @@ func getExistingImages(t require.TestingT, testEnv *Execution) []string { filter := filters.NewArgs() filter.Add("dangling", "false") imageList, err := client.ImageList(context.Background(), types.ImageListOptions{ + All: true, Filters: filter, }) require.NoError(t, err, "failed to list images") @@ -76,3 +127,82 @@ func ensureFrozenImagesLinux(t testingT, testEnv *Execution) []string { } return images } + +// ProtectNetwork adds the specified network(s) to be protected in case of +// clean +func (e *Execution) ProtectNetwork(t testingT, networks ...string) { + for _, network := range networks { + e.protectedElements.networks[network] = struct{}{} + } +} + +// ProtectNetworks protects existing networks from being cleaned up at the end +// of test runs +func ProtectNetworks(t testingT, testEnv *Execution) { + networks := getExistingNetworks(t, testEnv) + testEnv.ProtectNetwork(t, networks...) +} + +func getExistingNetworks(t require.TestingT, testEnv *Execution) []string { + client := testEnv.APIClient() + networkList, err := client.NetworkList(context.Background(), types.NetworkListOptions{}) + require.NoError(t, err, "failed to list networks") + + networks := []string{} + for _, network := range networkList { + networks = append(networks, network.ID) + } + return networks +} + +// ProtectPlugin adds the specified plugin(s) to be protected in case of clean +func (e *Execution) ProtectPlugin(t testingT, plugins ...string) { + for _, plugin := range plugins { + e.protectedElements.plugins[plugin] = struct{}{} + } +} + +// ProtectPlugins protects existing plugins from being cleaned up at the end of +// test runs +func ProtectPlugins(t testingT, testEnv *Execution) { + plugins := getExistingPlugins(t, testEnv) + testEnv.ProtectPlugin(t, plugins...) +} + +func getExistingPlugins(t require.TestingT, testEnv *Execution) []string { + client := testEnv.APIClient() + pluginList, err := client.PluginList(context.Background(), filters.Args{}) + require.NoError(t, err, "failed to list plugins") + + plugins := []string{} + for _, plugin := range pluginList { + plugins = append(plugins, plugin.Name) + } + return plugins +} + +// ProtectVolume adds the specified volume(s) to be protected in case of clean +func (e *Execution) ProtectVolume(t testingT, volumes ...string) { + for _, volume := range volumes { + e.protectedElements.volumes[volume] = struct{}{} + } +} + +// ProtectVolumes protects existing volumes from being cleaned up at the end of +// test runs +func ProtectVolumes(t testingT, testEnv *Execution) { + volumes := getExistingVolumes(t, testEnv) + testEnv.ProtectVolume(t, volumes...) +} + +func getExistingVolumes(t require.TestingT, testEnv *Execution) []string { + client := testEnv.APIClient() + volumeList, err := client.VolumeList(context.Background(), filters.Args{}) + require.NoError(t, err, "failed to list volumes") + + volumes := []string{} + for _, volume := range volumeList.Volumes { + volumes = append(volumes, volume.Name) + } + return volumes +} diff --git a/components/engine/layer/empty_test.go b/components/engine/layer/empty_test.go index 5555dbd8aa..abafc23acc 100644 --- a/components/engine/layer/empty_test.go +++ b/components/engine/layer/empty_test.go @@ -28,6 +28,12 @@ func TestEmptyLayer(t *testing.T) { t.Fatal("expected zero diffsize for empty layer") } + meta, err := EmptyLayer.Metadata() + + if len(meta) != 0 || err != nil { + t.Fatal("expected zero length metadata for empty layer") + } + tarStream, err := EmptyLayer.TarStream() if err != nil { t.Fatalf("error streaming tar for empty layer: %v", err) diff --git a/components/engine/layer/layer.go b/components/engine/layer/layer.go index e269ef8a4a..4ff159e9f8 100644 --- a/components/engine/layer/layer.go +++ b/components/engine/layer/layer.go @@ -15,6 +15,7 @@ import ( "github.com/docker/distribution" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) @@ -137,7 +138,7 @@ type RWLayer interface { // Mount mounts the RWLayer and returns the filesystem path // the to the writable layer. - Mount(mountLabel string) (string, error) + Mount(mountLabel string) (containerfs.ContainerFS, error) // Unmount unmounts the RWLayer. This should be called // for every mount. If there are multiple mount calls @@ -178,7 +179,7 @@ type Metadata struct { // writable mount. Changes made here will // not be included in the Tar stream of the // RWLayer. -type MountInit func(root string) error +type MountInit func(root containerfs.ContainerFS) error // CreateRWLayerOpts contains optional arguments to be passed to CreateRWLayer type CreateRWLayerOpts struct { diff --git a/components/engine/layer/layer_store.go b/components/engine/layer/layer_store.go index 7283014459..c3973cef70 100644 --- a/components/engine/layer/layer_store.go +++ b/components/engine/layer/layer_store.go @@ -749,5 +749,5 @@ func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, if err != nil { return nil, err } - return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil + return &fileGetPutter{storage.NewPathFileGetter(p.Path()), n.Driver, id}, nil } diff --git a/components/engine/layer/layer_test.go b/components/engine/layer/layer_test.go index 8ec5b4df54..5839ac3852 100644 --- a/components/engine/layer/layer_test.go +++ b/components/engine/layer/layer_test.go @@ -10,9 +10,11 @@ import ( "strings" "testing" + "github.com/containerd/continuity/driver" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/vfs" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/stringid" "github.com/opencontainers/go-digest" @@ -82,7 +84,7 @@ func newTestStore(t *testing.T) (Store, string, func()) { } } -type layerInit func(root string) error +type layerInit func(root containerfs.ContainerFS) error func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) { containerID := stringid.GenerateRandomID() @@ -91,12 +93,12 @@ func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) { return nil, err } - path, err := mount.Mount("") + pathFS, err := mount.Mount("") if err != nil { return nil, err } - if err := layerFunc(path); err != nil { + if err := layerFunc(pathFS); err != nil { return nil, err } @@ -123,7 +125,7 @@ func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) { } type FileApplier interface { - ApplyFile(root string) error + ApplyFile(root containerfs.ContainerFS) error } type testFile struct { @@ -140,25 +142,25 @@ func newTestFile(name string, content []byte, perm os.FileMode) FileApplier { } } -func (tf *testFile) ApplyFile(root string) error { - fullPath := filepath.Join(root, tf.name) - if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { +func (tf *testFile) ApplyFile(root containerfs.ContainerFS) error { + fullPath := root.Join(root.Path(), tf.name) + if err := root.MkdirAll(root.Dir(fullPath), 0755); err != nil { return err } // Check if already exists - if stat, err := os.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission { - if err := os.Chmod(fullPath, tf.permission); err != nil { + if stat, err := root.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission { + if err := root.Lchmod(fullPath, tf.permission); err != nil { return err } } - if err := ioutil.WriteFile(fullPath, tf.content, tf.permission); err != nil { + if err := driver.WriteFile(root, fullPath, tf.content, tf.permission); err != nil { return err } return nil } func initWithFiles(files ...FileApplier) layerInit { - return func(root string) error { + return func(root containerfs.ContainerFS) error { for _, f := range files { if err := f.ApplyFile(root); err != nil { return err @@ -288,7 +290,7 @@ func TestMountAndRegister(t *testing.T) { t.Fatal(err) } - b, err := ioutil.ReadFile(filepath.Join(path2, "testfile.txt")) + b, err := driver.ReadFile(path2, path2.Join(path2.Path(), "testfile.txt")) if err != nil { t.Fatal(err) } @@ -391,12 +393,12 @@ func TestStoreRestore(t *testing.T) { t.Fatal(err) } - path, err := m.Mount("") + pathFS, err := m.Mount("") if err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil { + if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "testfile.txt"), []byte("nothing here"), 0644); err != nil { t.Fatal(err) } @@ -430,20 +432,20 @@ func TestStoreRestore(t *testing.T) { if mountPath, err := m2.Mount(""); err != nil { t.Fatal(err) - } else if path != mountPath { - t.Fatalf("Unexpected path %s, expected %s", mountPath, path) + } else if pathFS.Path() != mountPath.Path() { + t.Fatalf("Unexpected path %s, expected %s", mountPath.Path(), pathFS.Path()) } if mountPath, err := m2.Mount(""); err != nil { t.Fatal(err) - } else if path != mountPath { - t.Fatalf("Unexpected path %s, expected %s", mountPath, path) + } else if pathFS.Path() != mountPath.Path() { + t.Fatalf("Unexpected path %s, expected %s", mountPath.Path(), pathFS.Path()) } if err := m2.Unmount(); err != nil { t.Fatal(err) } - b, err := ioutil.ReadFile(filepath.Join(path, "testfile.txt")) + b, err := driver.ReadFile(pathFS, pathFS.Join(pathFS.Path(), "testfile.txt")) if err != nil { t.Fatal(err) } @@ -618,7 +620,7 @@ func tarFromFiles(files ...FileApplier) ([]byte, error) { defer os.RemoveAll(td) for _, f := range files { - if err := f.ApplyFile(td); err != nil { + if err := f.ApplyFile(containerfs.NewLocalContainerFS(td)); err != nil { return nil, err } } diff --git a/components/engine/layer/layer_windows.go b/components/engine/layer/layer_windows.go index a1c195311e..d02d4d0dda 100644 --- a/components/engine/layer/layer_windows.go +++ b/components/engine/layer/layer_windows.go @@ -1,6 +1,15 @@ package layer -import "errors" +import ( + "errors" +) + +// Getter is an interface to get the path to a layer on the host. +type Getter interface { + // GetLayerPath gets the path for the layer. This is different from Get() + // since that returns an interface to account for umountable layers. + GetLayerPath(id string) (string, error) +} // GetLayerPath returns the path to a layer func GetLayerPath(s Store, layer ChainID) (string, error) { @@ -16,6 +25,10 @@ func GetLayerPath(s Store, layer ChainID) (string, error) { return "", ErrLayerDoesNotExist } + if layerGetter, ok := ls.driver.(Getter); ok { + return layerGetter.GetLayerPath(rl.cacheID) + } + path, err := ls.driver.Get(rl.cacheID, "") if err != nil { return "", err @@ -25,7 +38,7 @@ func GetLayerPath(s Store, layer ChainID) (string, error) { return "", err } - return path, nil + return path.Path(), nil } func (ls *layerStore) mountID(name string) string { diff --git a/components/engine/layer/mount_test.go b/components/engine/layer/mount_test.go index f5799e7cd9..44d461f9b8 100644 --- a/components/engine/layer/mount_test.go +++ b/components/engine/layer/mount_test.go @@ -2,13 +2,13 @@ package layer import ( "io/ioutil" - "os" - "path/filepath" "runtime" "sort" "testing" + "github.com/containerd/continuity/driver" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" ) func TestMountInit(t *testing.T) { @@ -28,7 +28,7 @@ func TestMountInit(t *testing.T) { t.Fatal(err) } - mountInit := func(root string) error { + mountInit := func(root containerfs.ContainerFS) error { return initfile.ApplyFile(root) } @@ -40,22 +40,22 @@ func TestMountInit(t *testing.T) { t.Fatal(err) } - path, err := m.Mount("") + pathFS, err := m.Mount("") if err != nil { t.Fatal(err) } - f, err := os.Open(filepath.Join(path, "testfile.txt")) + fi, err := pathFS.Stat(pathFS.Join(pathFS.Path(), "testfile.txt")) + if err != nil { + t.Fatal(err) + } + + f, err := pathFS.Open(pathFS.Join(pathFS.Path(), "testfile.txt")) if err != nil { t.Fatal(err) } defer f.Close() - fi, err := f.Stat() - if err != nil { - t.Fatal(err) - } - b, err := ioutil.ReadAll(f) if err != nil { t.Fatal(err) @@ -88,7 +88,7 @@ func TestMountSize(t *testing.T) { t.Fatal(err) } - mountInit := func(root string) error { + mountInit := func(root containerfs.ContainerFS) error { return newTestFile("file-init", contentInit, 0777).ApplyFile(root) } rwLayerOpts := &CreateRWLayerOpts{ @@ -100,12 +100,12 @@ func TestMountSize(t *testing.T) { t.Fatal(err) } - path, err := m.Mount("") + pathFS, err := m.Mount("") if err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(path, "file2"), content2, 0755); err != nil { + if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "file2"), content2, 0755); err != nil { t.Fatal(err) } @@ -140,7 +140,7 @@ func TestMountChanges(t *testing.T) { t.Fatal(err) } - mountInit := func(root string) error { + mountInit := func(root containerfs.ContainerFS) error { return initfile.ApplyFile(root) } rwLayerOpts := &CreateRWLayerOpts{ @@ -152,28 +152,28 @@ func TestMountChanges(t *testing.T) { t.Fatal(err) } - path, err := m.Mount("") + pathFS, err := m.Mount("") if err != nil { t.Fatal(err) } - if err := os.Chmod(filepath.Join(path, "testfile1.txt"), 0755); err != nil { + if err := pathFS.Lchmod(pathFS.Join(pathFS.Path(), "testfile1.txt"), 0755); err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(path, "testfile1.txt"), []byte("mount data!"), 0755); err != nil { + if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "testfile1.txt"), []byte("mount data!"), 0755); err != nil { t.Fatal(err) } - if err := os.Remove(filepath.Join(path, "testfile2.txt")); err != nil { + if err := pathFS.Remove(pathFS.Join(pathFS.Path(), "testfile2.txt")); err != nil { t.Fatal(err) } - if err := os.Chmod(filepath.Join(path, "testfile3.txt"), 0755); err != nil { + if err := pathFS.Lchmod(pathFS.Join(pathFS.Path(), "testfile3.txt"), 0755); err != nil { t.Fatal(err) } - if err := ioutil.WriteFile(filepath.Join(path, "testfile4.txt"), []byte("mount data!"), 0644); err != nil { + if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "testfile4.txt"), []byte("mount data!"), 0644); err != nil { t.Fatal(err) } diff --git a/components/engine/layer/mounted_layer.go b/components/engine/layer/mounted_layer.go index a5cfcfa9bd..47ef966987 100644 --- a/components/engine/layer/mounted_layer.go +++ b/components/engine/layer/mounted_layer.go @@ -4,6 +4,7 @@ import ( "io" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" ) type mountedLayer struct { @@ -88,7 +89,7 @@ type referencedRWLayer struct { *mountedLayer } -func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) { +func (rl *referencedRWLayer) Mount(mountLabel string) (containerfs.ContainerFS, error) { return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) } diff --git a/components/engine/libcontainerd/client_linux.go b/components/engine/libcontainerd/client_linux.go index 6c3460a8c9..12808fd0c1 100644 --- a/components/engine/libcontainerd/client_linux.go +++ b/components/engine/libcontainerd/client_linux.go @@ -296,10 +296,7 @@ func (clnt *client) UpdateResources(containerID string, resources Resources) err Pid: InitFriendlyName, Resources: (*containerd.UpdateResource)(&resources), }) - if err != nil { - return err - } - return nil + return err } func (clnt *client) getExitNotifier(containerID string) *exitNotifier { diff --git a/components/engine/libcontainerd/client_windows.go b/components/engine/libcontainerd/client_windows.go index b869f96ba7..8b13d44699 100644 --- a/components/engine/libcontainerd/client_windows.go +++ b/components/engine/libcontainerd/client_windows.go @@ -7,6 +7,7 @@ import ( "io" "io/ioutil" "os" + "path" "path/filepath" "regexp" "strings" @@ -343,7 +344,7 @@ func (clnt *client) createLinux(containerID string, checkpoint string, checkpoin configuration.HvRuntime = &hcsshim.HvRuntime{ ImagePath: lcowOpt.Config.Vhdx, BootSource: "Vhd", - WritableBootSource: true, + WritableBootSource: false, } } else { configuration.HvRuntime = &hcsshim.HvRuntime{ @@ -388,11 +389,101 @@ func (clnt *client) createLinux(containerID string, checkpoint string, checkpoin configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName } + // Add the mounts (volumes, bind mounts etc) to the structure. We have to do + // some translation for both the mapped directories passed into HCS and in + // the spec. + // + // For HCS, we only pass in the mounts from the spec which are type "bind". + // Further, the "ContainerPath" field (which is a little mis-leadingly + // named when it applies to the utility VM rather than the container in the + // utility VM) is moved to under /tmp/gcs//binds, where this is passed + // by the caller through a 'uvmpath' option. + // + // We do similar translation for the mounts in the spec by stripping out + // the uvmpath option, and translating the Source path to the location in the + // utility VM calculated above. + // + // From inside the utility VM, you would see a 9p mount such as in the following + // where a host folder has been mapped to /target. The line with /tmp/gcs//binds + // specifically: + // + // / # mount + // rootfs on / type rootfs (rw,size=463736k,nr_inodes=115934) + // proc on /proc type proc (rw,relatime) + // sysfs on /sys type sysfs (rw,relatime) + // udev on /dev type devtmpfs (rw,relatime,size=498100k,nr_inodes=124525,mode=755) + // tmpfs on /run type tmpfs (rw,relatime) + // cgroup on /sys/fs/cgroup type cgroup (rw,relatime,cpuset,cpu,cpuacct,blkio,memory,devices,freezer,net_cls,perf_event,net_prio,hugetlb,pids,rdma) + // mqueue on /dev/mqueue type mqueue (rw,relatime) + // devpts on /dev/pts type devpts (rw,relatime,mode=600,ptmxmode=000) + // /binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target on /binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target type 9p (rw,sync,dirsync,relatime,trans=fd,rfdno=6,wfdno=6) + // /dev/pmem0 on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0 type ext4 (ro,relatime,block_validity,delalloc,norecovery,barrier,dax,user_xattr,acl) + // /dev/sda on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl) + // overlay on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/rootfs type overlay (rw,relatime,lowerdir=/tmp/base/:/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0,upperdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/upper,workdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/work) + // + // /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l + // total 16 + // drwx------ 3 0 0 60 Sep 7 18:54 binds + // -rw-r--r-- 1 0 0 3345 Sep 7 18:54 config.json + // drwxr-xr-x 10 0 0 4096 Sep 6 17:26 layer0 + // drwxr-xr-x 1 0 0 4096 Sep 7 18:54 rootfs + // drwxr-xr-x 5 0 0 4096 Sep 7 18:54 scratch + // + // /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l binds + // total 0 + // drwxrwxrwt 2 0 0 4096 Sep 7 16:51 target + + mds := []hcsshim.MappedDir{} + specMounts := []specs.Mount{} + for _, mount := range spec.Mounts { + specMount := mount + if mount.Type == "bind" { + // Strip out the uvmpath from the options + updatedOptions := []string{} + uvmPath := "" + readonly := false + for _, opt := range mount.Options { + dropOption := false + elements := strings.SplitN(opt, "=", 2) + switch elements[0] { + case "uvmpath": + uvmPath = elements[1] + dropOption = true + case "rw": + case "ro": + readonly = true + case "rbind": + default: + return fmt.Errorf("unsupported option %q", opt) + } + if !dropOption { + updatedOptions = append(updatedOptions, opt) + } + } + mount.Options = updatedOptions + if uvmPath == "" { + return fmt.Errorf("no uvmpath for bind mount %+v", mount) + } + md := hcsshim.MappedDir{ + HostPath: mount.Source, + ContainerPath: path.Join(uvmPath, mount.Destination), + CreateInUtilityVM: true, + ReadOnly: readonly, + } + mds = append(mds, md) + specMount.Source = path.Join(uvmPath, mount.Destination) + } + specMounts = append(specMounts, specMount) + } + configuration.MappedDirectories = mds + hcsContainer, err := hcsshim.CreateContainer(containerID, configuration) if err != nil { return err } + spec.Mounts = specMounts + // Construct a container object for calling start on it. container := &container{ containerCommon: containerCommon{ diff --git a/components/engine/pkg/archive/archive.go b/components/engine/pkg/archive/archive.go index 677c1e41c5..876e605680 100644 --- a/components/engine/pkg/archive/archive.go +++ b/components/engine/pkg/archive/archive.go @@ -55,18 +55,17 @@ type ( } ) -// Archiver allows the reuse of most utility functions of this package -// with a pluggable Untar function. Also, to facilitate the passing of -// specific id mappings for untar, an archiver can be created with maps -// which will then be passed to Untar operations +// Archiver implements the Archiver interface and allows the reuse of most utility functions of +// this package with a pluggable Untar function. Also, to facilitate the passing of specific id +// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. type Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - IDMappings *idtools.IDMappings + Untar func(io.Reader, string, *TarOptions) error + IDMappingsVar *idtools.IDMappings } // NewDefaultArchiver returns a new Archiver without any IDMappings func NewDefaultArchiver() *Archiver { - return &Archiver{Untar: Untar, IDMappings: &idtools.IDMappings{}} + return &Archiver{Untar: Untar, IDMappingsVar: &idtools.IDMappings{}} } // breakoutError is used to differentiate errors related to breaking out @@ -1025,8 +1024,8 @@ func (archiver *Archiver) TarUntar(src, dst string) error { } defer archive.Close() options := &TarOptions{ - UIDMaps: archiver.IDMappings.UIDs(), - GIDMaps: archiver.IDMappings.GIDs(), + UIDMaps: archiver.IDMappingsVar.UIDs(), + GIDMaps: archiver.IDMappingsVar.GIDs(), } return archiver.Untar(archive, dst, options) } @@ -1039,8 +1038,8 @@ func (archiver *Archiver) UntarPath(src, dst string) error { } defer archive.Close() options := &TarOptions{ - UIDMaps: archiver.IDMappings.UIDs(), - GIDMaps: archiver.IDMappings.GIDs(), + UIDMaps: archiver.IDMappingsVar.UIDs(), + GIDMaps: archiver.IDMappingsVar.GIDs(), } return archiver.Untar(archive, dst, options) } @@ -1058,10 +1057,10 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { return archiver.CopyFileWithTar(src, dst) } - // if this archiver is set up with ID mapping we need to create + // if this Archiver is set up with ID mapping we need to create // the new destination directory with the remapped root UID/GID pair // as owner - rootIDs := archiver.IDMappings.RootPair() + rootIDs := archiver.IDMappingsVar.RootPair() // Create dst, copy src's content into it logrus.Debugf("Creating dest directory: %s", dst) if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { @@ -1112,7 +1111,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - if err := remapIDs(archiver.IDMappings, hdr); err != nil { + if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil { return err } @@ -1139,6 +1138,11 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { return err } +// IDMappings returns the IDMappings of the archiver. +func (archiver *Archiver) IDMappings() *idtools.IDMappings { + return archiver.IDMappingsVar +} + func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) hdr.Uid, hdr.Gid = ids.UID, ids.GID diff --git a/components/engine/pkg/archive/archive_test.go b/components/engine/pkg/archive/archive_test.go index d6be3507b4..20a07fb40d 100644 --- a/components/engine/pkg/archive/archive_test.go +++ b/components/engine/pkg/archive/archive_test.go @@ -1183,8 +1183,10 @@ func TestUntarInvalidSymlink(t *testing.T) { func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := ioutil.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") + require.NoError(t, err) buf := make([]byte, 10) n, err := tempArchive.Read(buf) + require.NoError(t, err) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } diff --git a/components/engine/pkg/archive/archive_unix.go b/components/engine/pkg/archive/archive_unix.go index 73814d58d6..ac4a348d5a 100644 --- a/components/engine/pkg/archive/archive_unix.go +++ b/components/engine/pkg/archive/archive_unix.go @@ -50,8 +50,8 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) ( // Currently go does not fill in the major/minors if s.Mode&unix.S_IFBLK != 0 || s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(major(s.Rdev)) - hdr.Devminor = int64(minor(s.Rdev)) + hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert + hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert } } diff --git a/components/engine/pkg/archive/changes_test.go b/components/engine/pkg/archive/changes_test.go index 8c14a867ae..86f8ef175a 100644 --- a/components/engine/pkg/archive/changes_test.go +++ b/components/engine/pkg/archive/changes_test.go @@ -188,6 +188,7 @@ func TestChangesWithChangesGH13590(t *testing.T) { t.Skip("symlinks on Windows") } baseLayer, err := ioutil.TempDir("", "docker-changes-test.") + require.NoError(t, err) defer os.RemoveAll(baseLayer) dir3 := path.Join(baseLayer, "dir1/dir2/dir3") @@ -197,6 +198,7 @@ func TestChangesWithChangesGH13590(t *testing.T) { ioutil.WriteFile(file, []byte("hello"), 0666) layer, err := ioutil.TempDir("", "docker-changes-test2.") + require.NoError(t, err) defer os.RemoveAll(layer) // Test creating a new file @@ -219,6 +221,7 @@ func TestChangesWithChangesGH13590(t *testing.T) { // Now test changing a file layer, err = ioutil.TempDir("", "docker-changes-test3.") + require.NoError(t, err) defer os.RemoveAll(layer) if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { @@ -465,6 +468,7 @@ func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { func TestChangesSize(t *testing.T) { parentPath, err := ioutil.TempDir("", "docker-changes-test") + require.NoError(t, err) defer os.RemoveAll(parentPath) addition := path.Join(parentPath, "addition") err = ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744) diff --git a/components/engine/pkg/archive/copy.go b/components/engine/pkg/archive/copy.go index 3adf8a275c..298eb2ad68 100644 --- a/components/engine/pkg/archive/copy.go +++ b/components/engine/pkg/archive/copy.go @@ -27,23 +27,23 @@ var ( // path (from before being processed by utility functions from the path or // filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned // path already ends in a `.` path segment, then another is not added. If the -// clean path already ends in a path separator, then another is not added. -func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { +// clean path already ends in the separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { // Ensure paths are in platform semantics - cleanedPath = normalizePath(cleanedPath) - originalPath = normalizePath(originalPath) + cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1) + originalPath = strings.Replace(originalPath, "/", string(sep), -1) if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { - if !hasTrailingPathSeparator(cleanedPath) { + if !hasTrailingPathSeparator(cleanedPath, sep) { // Add a separator if it doesn't already end with one (a cleaned // path would only end in a separator if it is the root). - cleanedPath += string(filepath.Separator) + cleanedPath += string(sep) } cleanedPath += "." } - if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { - cleanedPath += string(filepath.Separator) + if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) { + cleanedPath += string(sep) } return cleanedPath @@ -52,14 +52,14 @@ func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { // assertsDirectory returns whether the given path is // asserted to be a directory, i.e., the path ends with // a trailing '/' or `/.`, assuming a path separator of `/`. -func assertsDirectory(path string) bool { - return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) +func assertsDirectory(path string, sep byte) bool { + return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path) } // hasTrailingPathSeparator returns whether the given // path ends with the system's path separator character. -func hasTrailingPathSeparator(path string) bool { - return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) +func hasTrailingPathSeparator(path string, sep byte) bool { + return len(path) > 0 && path[len(path)-1] == sep } // specifiesCurrentDir returns whether the given path specifies @@ -72,10 +72,10 @@ func specifiesCurrentDir(path string) bool { // basename by first cleaning the path but preserves a trailing "." if the // original path specified the current directory. func SplitPathDirEntry(path string) (dir, base string) { - cleanedPath := filepath.Clean(normalizePath(path)) + cleanedPath := filepath.Clean(filepath.FromSlash(path)) if specifiesCurrentDir(path) { - cleanedPath += string(filepath.Separator) + "." + cleanedPath += string(os.PathSeparator) + "." } return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) @@ -106,19 +106,24 @@ func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, er // Separate the source path between its directory and // the entry in that directory which we are archiving. sourceDir, sourceBase := SplitPathDirEntry(sourcePath) - - filter := []string{sourceBase} + opts := TarResourceRebaseOpts(sourceBase, rebaseName) logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + return TarWithOptions(sourceDir, opts) +} - return TarWithOptions(sourceDir, &TarOptions{ +// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase +// parameters to be sent to TarWithOptions (the TarOptions struct) +func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { + filter := []string{sourceBase} + return &TarOptions{ Compression: Uncompressed, IncludeFiles: filter, IncludeSourceDir: true, RebaseNames: map[string]string{ sourceBase: rebaseName, }, - }) + } } // CopyInfo holds basic info about the source @@ -281,7 +286,7 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir srcBase = srcInfo.RebaseName } return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case assertsDirectory(dstInfo.Path): + case assertsDirectory(dstInfo.Path, os.PathSeparator): // The destination does not exist and is asserted to be created as a // directory, but the source content is not a directory. This is an // error condition since you cannot create a directory from a file @@ -351,6 +356,9 @@ func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.Read return rebased } +// TODO @gupta-ak. These might have to be changed in the future to be +// continuity driver aware as well to support LCOW. + // CopyResource performs an archive copy from the given source path to the // given destination path. The source path MUST exist and the destination // path's parent directory must exist. @@ -365,8 +373,8 @@ func CopyResource(srcPath, dstPath string, followLink bool) error { dstPath = normalizePath(dstPath) // Clean the source and destination paths. - srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) - dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator) if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { return err @@ -429,7 +437,8 @@ func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseNa // resolvedDirPath will have been cleaned (no trailing path separators) so // we can manually join it with the base path element. resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath - if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + if hasTrailingPathSeparator(path, os.PathSeparator) && + filepath.Base(path) != filepath.Base(resolvedPath) { rebaseName = filepath.Base(path) } } @@ -442,11 +451,13 @@ func GetRebaseName(path, resolvedPath string) (string, string) { // linkTarget will have been cleaned (no trailing path separators and dot) so // we can manually join it with them var rebaseName string - if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + if specifiesCurrentDir(path) && + !specifiesCurrentDir(resolvedPath) { resolvedPath += string(filepath.Separator) + "." } - if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + if hasTrailingPathSeparator(path, os.PathSeparator) && + !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) { resolvedPath += string(filepath.Separator) } diff --git a/components/engine/pkg/authorization/authz.go b/components/engine/pkg/authorization/authz.go index 924908af9b..3bd2fc0bf6 100644 --- a/components/engine/pkg/authorization/authz.go +++ b/components/engine/pkg/authorization/authz.go @@ -158,7 +158,7 @@ func sendBody(url string, header http.Header) bool { // headers returns flatten version of the http headers excluding authorization func headers(header http.Header) map[string]string { - v := make(map[string]string, 0) + v := make(map[string]string) for k, values := range header { // Skip authorization headers if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") { diff --git a/components/engine/pkg/chrootarchive/archive.go b/components/engine/pkg/chrootarchive/archive.go index 7604418767..d6d07888e8 100644 --- a/components/engine/pkg/chrootarchive/archive.go +++ b/components/engine/pkg/chrootarchive/archive.go @@ -16,7 +16,10 @@ func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { if idMappings == nil { idMappings = &idtools.IDMappings{} } - return &archive.Archiver{Untar: Untar, IDMappings: idMappings} + return &archive.Archiver{ + Untar: Untar, + IDMappingsVar: idMappings, + } } // Untar reads a stream of bytes from `archive`, parses it as a tar archive, diff --git a/components/engine/pkg/containerfs/archiver.go b/components/engine/pkg/containerfs/archiver.go new file mode 100644 index 0000000000..7fffa00036 --- /dev/null +++ b/components/engine/pkg/containerfs/archiver.go @@ -0,0 +1,194 @@ +package containerfs + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/promise" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// TarFunc provides a function definition for a custom Tar function +type TarFunc func(string, *archive.TarOptions) (io.ReadCloser, error) + +// UntarFunc provides a function definition for a custom Untar function +type UntarFunc func(io.Reader, string, *archive.TarOptions) error + +// Archiver provides a similar implementation of the archive.Archiver package with the rootfs abstraction +type Archiver struct { + SrcDriver Driver + DstDriver Driver + Tar TarFunc + Untar UntarFunc + IDMappingsVar *idtools.IDMappings +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + tarArchive, err := archiver.Tar(src, &archive.TarOptions{Compression: archive.Uncompressed}) + if err != nil { + return err + } + defer tarArchive.Close() + options := &archive.TarOptions{ + UIDMaps: archiver.IDMappingsVar.UIDs(), + GIDMaps: archiver.IDMappingsVar.GIDs(), + } + return archiver.Untar(tarArchive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + tarArchive, err := archiver.SrcDriver.Open(src) + if err != nil { + return err + } + defer tarArchive.Close() + options := &archive.TarOptions{ + UIDMaps: archiver.IDMappingsVar.UIDs(), + GIDMaps: archiver.IDMappingsVar.GIDs(), + } + return archiver.Untar(tarArchive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := archiver.SrcDriver.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.IDMappingsVar.RootPair() + // Create dst, copy src's content into it + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcDriver := archiver.SrcDriver + dstDriver := archiver.DstDriver + + srcSt, err := srcDriver.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == dstDriver.Separator() { + dst = dstDriver.Join(dst, srcDriver.Base(src)) + } + + // The original call was system.MkdirAll, which is just + // os.MkdirAll on not-Windows and changed for Windows. + if dstDriver.OS() == "windows" { + // Now we are WCOW + if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { + return err + } + } else { + // We can just use the driver.MkdirAll function + if err := dstDriver.MkdirAll(dstDriver.Dir(dst), 0700); err != nil { + return err + } + } + + r, w := io.Pipe() + errC := promise.Go(func() error { + defer w.Close() + + srcF, err := srcDriver.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Name = dstDriver.Base(dst) + if dstDriver.OS() == "windows" { + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + } else { + hdr.Mode = int64(os.FileMode(hdr.Mode)) + } + + if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }) + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, dstDriver.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// IDMappings returns the IDMappings of the archiver. +func (archiver *Archiver) IDMappings() *idtools.IDMappings { + return archiver.IDMappingsVar +} + +func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { + ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} diff --git a/components/engine/pkg/containerfs/containerfs.go b/components/engine/pkg/containerfs/containerfs.go new file mode 100644 index 0000000000..05842ac64c --- /dev/null +++ b/components/engine/pkg/containerfs/containerfs.go @@ -0,0 +1,87 @@ +package containerfs + +import ( + "path/filepath" + "runtime" + + "github.com/containerd/continuity/driver" + "github.com/containerd/continuity/pathdriver" + "github.com/docker/docker/pkg/symlink" +) + +// ContainerFS is that represents a root file system +type ContainerFS interface { + // Path returns the path to the root. Note that this may not exist + // on the local system, so the continuity operations must be used + Path() string + + // ResolveScopedPath evaluates the given path scoped to the root. + // For example, if root=/a, and path=/b/c, then this function would return /a/b/c. + // If rawPath is true, then the function will not preform any modifications + // before path resolution. Otherwise, the function will clean the given path + // by making it an absolute path. + ResolveScopedPath(path string, rawPath bool) (string, error) + + Driver +} + +// Driver combines both continuity's Driver and PathDriver interfaces with a Platform +// field to determine the OS. +type Driver interface { + // OS returns the OS where the rootfs is located. Essentially, + // runtime.GOOS for everything aside from LCOW, which is "linux" + OS() string + + // Architecture returns the hardware architecture where the + // container is located. + Architecture() string + + // Driver & PathDriver provide methods to manipulate files & paths + driver.Driver + pathdriver.PathDriver +} + +// NewLocalContainerFS is a helper function to implement daemon's Mount interface +// when the graphdriver mount point is a local path on the machine. +func NewLocalContainerFS(path string) ContainerFS { + return &local{ + path: path, + Driver: driver.LocalDriver, + PathDriver: pathdriver.LocalPathDriver, + } +} + +// NewLocalDriver provides file and path drivers for a local file system. They are +// essentially a wrapper around the `os` and `filepath` functions. +func NewLocalDriver() Driver { + return &local{ + Driver: driver.LocalDriver, + PathDriver: pathdriver.LocalPathDriver, + } +} + +type local struct { + path string + driver.Driver + pathdriver.PathDriver +} + +func (l *local) Path() string { + return l.path +} + +func (l *local) ResolveScopedPath(path string, rawPath bool) (string, error) { + cleanedPath := path + if !rawPath { + cleanedPath = cleanScopedPath(path) + } + return symlink.FollowSymlinkInScope(filepath.Join(l.path, cleanedPath), l.path) +} + +func (l *local) OS() string { + return runtime.GOOS +} + +func (l *local) Architecture() string { + return runtime.GOARCH +} diff --git a/components/engine/pkg/containerfs/containerfs_unix.go b/components/engine/pkg/containerfs/containerfs_unix.go new file mode 100644 index 0000000000..fbc418f012 --- /dev/null +++ b/components/engine/pkg/containerfs/containerfs_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package containerfs + +import "path/filepath" + +// cleanScopedPath preappends a to combine with a mnt path. +func cleanScopedPath(path string) string { + return filepath.Join(string(filepath.Separator), path) +} diff --git a/components/engine/pkg/containerfs/containerfs_windows.go b/components/engine/pkg/containerfs/containerfs_windows.go new file mode 100644 index 0000000000..56f5a7564f --- /dev/null +++ b/components/engine/pkg/containerfs/containerfs_windows.go @@ -0,0 +1,15 @@ +package containerfs + +import "path/filepath" + +// cleanScopedPath removes the C:\ syntax, and prepares to combine +// with a volume path +func cleanScopedPath(path string) string { + if len(path) >= 2 { + c := path[0] + if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { + path = path[2:] + } + } + return filepath.Join(string(filepath.Separator), path) +} diff --git a/components/engine/pkg/devicemapper/devmapper.go b/components/engine/pkg/devicemapper/devmapper.go index 6a0ac24647..48618765fb 100644 --- a/components/engine/pkg/devicemapper/devmapper.go +++ b/components/engine/pkg/devicemapper/devmapper.go @@ -351,8 +351,7 @@ func RemoveDeviceDeferred(name string) error { // disable udev dm rules and delete the symlink under /dev/mapper by itself, // even if the removal is deferred by the kernel. cookie := new(uint) - var flags uint16 - flags = DmUdevDisableLibraryFallback + flags := uint16(DmUdevDisableLibraryFallback) if err := task.setCookie(cookie, flags); err != nil { return fmt.Errorf("devicemapper: Can not set cookie: %s", err) } @@ -465,8 +464,7 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize } cookie := new(uint) - var flags uint16 - flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag + flags := uint16(DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag) if err := task.setCookie(cookie, flags); err != nil { return fmt.Errorf("devicemapper: Can't set cookie %s", err) } diff --git a/components/engine/pkg/devicemapper/devmapper_log.go b/components/engine/pkg/devicemapper/devmapper_log.go index 65a202ad2a..f2ac7da87c 100644 --- a/components/engine/pkg/devicemapper/devmapper_log.go +++ b/components/engine/pkg/devicemapper/devmapper_log.go @@ -12,7 +12,7 @@ import ( ) // DevmapperLogger defines methods required to register as a callback for -// logging events recieved from devicemapper. Note that devicemapper will send +// logging events received from devicemapper. Note that devicemapper will send // *all* logs regardless to callbacks (including debug logs) so it's // recommended to not spam the console with the outputs. type DevmapperLogger interface { diff --git a/components/engine/pkg/devicemapper/devmapper_wrapper_static.go b/components/engine/pkg/devicemapper/devmapper_wrapper_static.go deleted file mode 100644 index cf7f26a4c6..0000000000 --- a/components/engine/pkg/devicemapper/devmapper_wrapper_static.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build linux,cgo,static_build - -package devicemapper - -// #cgo pkg-config: --static devmapper -import "C" diff --git a/components/engine/pkg/discovery/kv/kv_test.go b/components/engine/pkg/discovery/kv/kv_test.go index dab3939dd0..adc5c289a4 100644 --- a/components/engine/pkg/discovery/kv/kv_test.go +++ b/components/engine/pkg/discovery/kv/kv_test.go @@ -11,7 +11,6 @@ import ( "github.com/docker/docker/pkg/discovery" "github.com/docker/libkv" "github.com/docker/libkv/store" - "github.com/go-check/check" ) @@ -130,7 +129,6 @@ func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) { // Close mock func (s *Mock) Close() { - return } func (ds *DiscoverySuite) TestInitializeWithCerts(c *check.C) { diff --git a/components/engine/pkg/dmesg/dmesg_linux.go b/components/engine/pkg/dmesg/dmesg_linux.go new file mode 100644 index 0000000000..7df7f3d436 --- /dev/null +++ b/components/engine/pkg/dmesg/dmesg_linux.go @@ -0,0 +1,20 @@ +// +build linux + +package dmesg + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// Dmesg returns last messages from the kernel log, up to size bytes +func Dmesg(size int) []byte { + t := uintptr(3) // SYSLOG_ACTION_READ_ALL + b := make([]byte, size) + amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))) + if err != 0 { + return []byte{} + } + return b[:amt] +} diff --git a/components/engine/pkg/dmesg/dmesg_linux_test.go b/components/engine/pkg/dmesg/dmesg_linux_test.go new file mode 100644 index 0000000000..c5028aac1d --- /dev/null +++ b/components/engine/pkg/dmesg/dmesg_linux_test.go @@ -0,0 +1,9 @@ +package dmesg + +import ( + "testing" +) + +func TestDmesg(t *testing.T) { + t.Logf("dmesg output follows:\n%v", string(Dmesg(512))) +} diff --git a/components/engine/pkg/locker/README.md b/components/engine/pkg/locker/README.md index c8dbddc57f..ce787aefb3 100644 --- a/components/engine/pkg/locker/README.md +++ b/components/engine/pkg/locker/README.md @@ -35,7 +35,7 @@ type important struct { func (i *important) Get(name string) interface{} { i.locks.Lock(name) defer i.locks.Unlock(name) - return data[name] + return i.data[name] } func (i *important) Create(name string, data interface{}) { @@ -44,9 +44,9 @@ func (i *important) Create(name string, data interface{}) { i.createImportant(data) - s.mu.Lock() + i.mu.Lock() i.data[name] = data - s.mu.Unlock() + i.mu.Unlock() } func (i *important) createImportant(data interface{}) { diff --git a/components/engine/pkg/loopback/ioctl.go b/components/engine/pkg/loopback/ioctl.go index fa744f0a69..84fcea669c 100644 --- a/components/engine/pkg/loopback/ioctl.go +++ b/components/engine/pkg/loopback/ioctl.go @@ -17,10 +17,7 @@ func ioctlLoopCtlGetFree(fd uintptr) (int, error) { } func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { - if err := unix.IoctlSetInt(int(loopFd), LoopSetFd, int(sparseFd)); err != nil { - return err - } - return nil + return unix.IoctlSetInt(int(loopFd), LoopSetFd, int(sparseFd)) } func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error { @@ -47,8 +44,5 @@ func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { } func ioctlLoopSetCapacity(loopFd uintptr, value int) error { - if err := unix.IoctlSetInt(int(loopFd), LoopSetCapacity, value); err != nil { - return err - } - return nil + return unix.IoctlSetInt(int(loopFd), LoopSetCapacity, value) } diff --git a/components/engine/pkg/plugins/discovery_unix_test.go b/components/engine/pkg/plugins/discovery_unix_test.go index 66f50353c3..e4d156dbdc 100644 --- a/components/engine/pkg/plugins/discovery_unix_test.go +++ b/components/engine/pkg/plugins/discovery_unix_test.go @@ -10,6 +10,8 @@ import ( "path/filepath" "reflect" "testing" + + "github.com/stretchr/testify/require" ) func TestLocalSocket(t *testing.T) { @@ -89,6 +91,7 @@ func TestScan(t *testing.T) { r := newLocalRegistry() p, err := r.Plugin(name) + require.NoError(t, err) pluginNamesNotEmpty, err := Scan() if err != nil { diff --git a/components/engine/pkg/pools/pools_test.go b/components/engine/pkg/pools/pools_test.go index d71cb99ac7..544c499957 100644 --- a/components/engine/pkg/pools/pools_test.go +++ b/components/engine/pkg/pools/pools_test.go @@ -6,6 +6,9 @@ import ( "io" "strings" "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { @@ -92,22 +95,16 @@ func TestBufioWriterPoolPutAndGet(t *testing.T) { buf := new(bytes.Buffer) bw := bufio.NewWriter(buf) writer := BufioWriter32KPool.Get(bw) - if writer == nil { - t.Fatalf("BufioReaderPool should not return a nil writer.") - } + require.NotNil(t, writer) + written, err := writer.Write([]byte("foobar")) - if err != nil { - t.Fatal(err) - } - if written != 6 { - t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) - } + require.NoError(t, err) + assert.Equal(t, 6, written) + // Make sure we Flush all the way ? writer.Flush() bw.Flush() - if len(buf.Bytes()) != 6 { - t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes())) - } + assert.Len(t, buf.Bytes(), 6) // Reset the buffer buf.Reset() BufioWriter32KPool.Put(writer) diff --git a/components/engine/pkg/signal/signal_linux_test.go b/components/engine/pkg/signal/signal_linux_test.go index 32c056fe49..da0e010545 100644 --- a/components/engine/pkg/signal/signal_linux_test.go +++ b/components/engine/pkg/signal/signal_linux_test.go @@ -41,7 +41,7 @@ func TestCatchAll(t *testing.T) { } func TestStopCatch(t *testing.T) { - signal, _ := SignalMap["HUP"] + signal := SignalMap["HUP"] channel := make(chan os.Signal, 1) CatchAll(channel) go func() { diff --git a/components/engine/pkg/system/init_unix.go b/components/engine/pkg/system/init_unix.go new file mode 100644 index 0000000000..a219895e6d --- /dev/null +++ b/components/engine/pkg/system/init_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package system + +// InitLCOW does nothing since LCOW is a windows only feature +func InitLCOW(experimental bool) { +} diff --git a/components/engine/pkg/system/init_windows.go b/components/engine/pkg/system/init_windows.go index 019c66441c..e751837267 100644 --- a/components/engine/pkg/system/init_windows.go +++ b/components/engine/pkg/system/init_windows.go @@ -8,9 +8,10 @@ import "os" // on build number. @jhowardmsft var lcowSupported = false -func init() { +// InitLCOW sets whether LCOW is supported or not +func InitLCOW(experimental bool) { // LCOW initialization - if os.Getenv("LCOW_SUPPORTED") != "" { + if experimental && os.Getenv("LCOW_SUPPORTED") != "" { lcowSupported = true } diff --git a/components/engine/pkg/system/path.go b/components/engine/pkg/system/path.go index f634a6be67..4160616f43 100644 --- a/components/engine/pkg/system/path.go +++ b/components/engine/pkg/system/path.go @@ -1,6 +1,13 @@ package system -import "runtime" +import ( + "fmt" + "path/filepath" + "runtime" + "strings" + + "github.com/containerd/continuity/pathdriver" +) const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" @@ -19,3 +26,35 @@ func DefaultPathEnv(platform string) string { return defaultUnixPathEnv } + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. +// On Linux: this is a no-op. +// On Windows: this does the following> +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be contatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) { + if runtime.GOOS != "windows" || LCOWSupported() { + return path, nil + } + + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !driver.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/components/engine/pkg/system/path_unix.go b/components/engine/pkg/system/path_unix.go deleted file mode 100644 index f3762e69d3..0000000000 --- a/components/engine/pkg/system/path_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !windows - -package system - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, -// is the system drive. This is a no-op on Linux. -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return path, nil -} diff --git a/components/engine/pkg/system/path_windows.go b/components/engine/pkg/system/path_windows.go deleted file mode 100644 index aab891522d..0000000000 --- a/components/engine/pkg/system/path_windows.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build windows - -package system - -import ( - "fmt" - "path/filepath" - "strings" -) - -// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. -// This is used, for example, when validating a user provided path in docker cp. -// If a drive letter is supplied, it must be the system drive. The drive letter -// is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be concatenated with -// a Windows long-path which doesn't support drive-letters. Examples: -// C: --> Fail -// C:\ --> \ -// a --> a -// /a --> \a -// d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("No relative path specified in %q", path) - } - if !filepath.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("The specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil -} diff --git a/components/engine/pkg/system/path_windows_test.go b/components/engine/pkg/system/path_windows_test.go index eccb26aaea..0e6bcab522 100644 --- a/components/engine/pkg/system/path_windows_test.go +++ b/components/engine/pkg/system/path_windows_test.go @@ -2,18 +2,23 @@ package system -import "testing" +import ( + "testing" + + "github.com/containerd/continuity/pathdriver" +) // TestCheckSystemDriveAndRemoveDriveLetter tests CheckSystemDriveAndRemoveDriveLetter func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { // Fails if not C drive. - path, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`) + _, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`, pathdriver.LocalPathDriver) if err == nil || (err != nil && err.Error() != "The specified path is not on the system drive (C:)") { t.Fatalf("Expected error for d:") } // Single character is unchanged - if path, err = CheckSystemDriveAndRemoveDriveLetter("z"); err != nil { + var path string + if path, err = CheckSystemDriveAndRemoveDriveLetter("z", pathdriver.LocalPathDriver); err != nil { t.Fatalf("Single character should pass") } if path != "z" { @@ -21,7 +26,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { } // Two characters without colon is unchanged - if path, err = CheckSystemDriveAndRemoveDriveLetter("AB"); err != nil { + if path, err = CheckSystemDriveAndRemoveDriveLetter("AB", pathdriver.LocalPathDriver); err != nil { t.Fatalf("2 characters without colon should pass") } if path != "AB" { @@ -29,7 +34,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { } // Abs path without drive letter - if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`); err != nil { + if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`, pathdriver.LocalPathDriver); err != nil { t.Fatalf("abs path no drive letter should pass") } if path != `\l` { @@ -37,7 +42,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { } // Abs path without drive letter, linux style - if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`); err != nil { + if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`, pathdriver.LocalPathDriver); err != nil { t.Fatalf("abs path no drive letter linux style should pass") } if path != `\l` { @@ -45,7 +50,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { } // Drive-colon should be stripped - if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`); err != nil { + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`, pathdriver.LocalPathDriver); err != nil { t.Fatalf("An absolute path should pass") } if path != `\` { @@ -53,7 +58,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { } // Verify with a linux-style path - if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`); err != nil { + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`, pathdriver.LocalPathDriver); err != nil { t.Fatalf("An absolute path should pass") } if path != `\` { @@ -61,7 +66,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { } // Failure on c: - if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`); err == nil { + if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`, pathdriver.LocalPathDriver); err == nil { t.Fatalf("c: should fail") } if err.Error() != `No relative path specified in "c:"` { @@ -69,7 +74,7 @@ func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { } // Failure on d: - if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`); err == nil { + if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`, pathdriver.LocalPathDriver); err == nil { t.Fatalf("c: should fail") } if err.Error() != `No relative path specified in "d:"` { diff --git a/components/engine/pkg/system/stat_unix_test.go b/components/engine/pkg/system/stat_unix_test.go index dee8d30a19..15c2e273b1 100644 --- a/components/engine/pkg/system/stat_unix_test.go +++ b/components/engine/pkg/system/stat_unix_test.go @@ -6,6 +6,8 @@ import ( "os" "syscall" "testing" + + "github.com/stretchr/testify/require" ) // TestFromStatT tests fromStatT for a tempfile @@ -15,11 +17,10 @@ func TestFromStatT(t *testing.T) { stat := &syscall.Stat_t{} err := syscall.Lstat(file, stat) + require.NoError(t, err) s, err := fromStatT(stat) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if stat.Mode != s.Mode() { t.Fatal("got invalid mode") diff --git a/components/engine/pkg/tarsum/tarsum_test.go b/components/engine/pkg/tarsum/tarsum_test.go index 86df0e2b89..05f00d36af 100644 --- a/components/engine/pkg/tarsum/tarsum_test.go +++ b/components/engine/pkg/tarsum/tarsum_test.go @@ -16,6 +16,9 @@ import ( "os" "strings" "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type testLayer struct { @@ -222,17 +225,13 @@ func TestNewTarSumForLabel(t *testing.T) { func TestEmptyTar(t *testing.T) { // Test without gzip. ts, err := emptyTarSum(false) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) zeroBlock := make([]byte, 1024) buf := new(bytes.Buffer) n, err := io.Copy(buf, ts) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) @@ -247,19 +246,16 @@ func TestEmptyTar(t *testing.T) { // Test with gzip. ts, err = emptyTarSum(true) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) buf.Reset() - n, err = io.Copy(buf, ts) - if err != nil { - t.Fatal(err) - } + _, err = io.Copy(buf, ts) + require.NoError(t, err) bufgz := new(bytes.Buffer) gz := gzip.NewWriter(bufgz) n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) + require.NoError(t, err) gz.Close() gzBytes := bufgz.Bytes() @@ -279,10 +275,7 @@ func TestEmptyTar(t *testing.T) { } resultSum = ts.Sum(nil) - - if resultSum != expectedSum { - t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) - } + assert.Equal(t, expectedSum, resultSum) } var ( diff --git a/components/engine/pkg/term/ascii_test.go b/components/engine/pkg/term/ascii_test.go index 4a1e7f302c..5078cb7075 100644 --- a/components/engine/pkg/term/ascii_test.go +++ b/components/engine/pkg/term/ascii_test.go @@ -1,43 +1,25 @@ package term -import "testing" +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) func TestToBytes(t *testing.T) { codes, err := ToBytes("ctrl-a,a") - if err != nil { - t.Fatal(err) - } - if len(codes) != 2 { - t.Fatalf("Expected 2 codes, got %d", len(codes)) - } - if codes[0] != 1 || codes[1] != 97 { - t.Fatalf("Expected '1' '97', got '%d' '%d'", codes[0], codes[1]) - } + require.NoError(t, err) + assert.Equal(t, []byte{1, 97}, codes) - codes, err = ToBytes("shift-z") - if err == nil { - t.Fatalf("Expected error, got none") - } + _, err = ToBytes("shift-z") + assert.Error(t, err) codes, err = ToBytes("ctrl-@,ctrl-[,~,ctrl-o") - if err != nil { - t.Fatal(err) - } - if len(codes) != 4 { - t.Fatalf("Expected 4 codes, got %d", len(codes)) - } - if codes[0] != 0 || codes[1] != 27 || codes[2] != 126 || codes[3] != 15 { - t.Fatalf("Expected '0' '27' '126', '15', got '%d' '%d' '%d' '%d'", codes[0], codes[1], codes[2], codes[3]) - } + require.NoError(t, err) + assert.Equal(t, []byte{0, 27, 126, 15}, codes) codes, err = ToBytes("DEL,+") - if err != nil { - t.Fatal(err) - } - if len(codes) != 2 { - t.Fatalf("Expected 2 codes, got %d", len(codes)) - } - if codes[0] != 127 || codes[1] != 43 { - t.Fatalf("Expected '127 '43'', got '%d' '%d'", codes[0], codes[1]) - } + require.NoError(t, err) + assert.Equal(t, []byte{127, 43}, codes) } diff --git a/components/engine/pkg/term/term_linux_test.go b/components/engine/pkg/term/term_linux_test.go index f907ff53a0..0bb6f1c95f 100644 --- a/components/engine/pkg/term/term_linux_test.go +++ b/components/engine/pkg/term/term_linux_test.go @@ -68,6 +68,7 @@ func TestGetFdInfo(t *testing.T) { require.Equal(t, inFd, tty.Fd()) require.Equal(t, isTerminal, true) tmpFile, err := newTempFile() + require.NoError(t, err) defer tmpFile.Close() inFd, isTerminal = GetFdInfo(tmpFile) require.Equal(t, inFd, tmpFile.Fd()) @@ -81,6 +82,7 @@ func TestIsTerminal(t *testing.T) { isTerminal := IsTerminal(tty.Fd()) require.Equal(t, isTerminal, true) tmpFile, err := newTempFile() + require.NoError(t, err) defer tmpFile.Close() isTerminal = IsTerminal(tmpFile.Fd()) require.Equal(t, isTerminal, false) @@ -94,6 +96,7 @@ func TestSaveState(t *testing.T) { require.NoError(t, err) require.NotNil(t, state) tty, err = newTtyForTest(t) + require.NoError(t, err) defer tty.Close() err = RestoreTerminal(tty.Fd(), state) require.NoError(t, err) diff --git a/components/engine/pkg/tlsconfig/tlsconfig_clone.go b/components/engine/pkg/tlsconfig/tlsconfig_clone.go deleted file mode 100644 index e4dec3a5d1..0000000000 --- a/components/engine/pkg/tlsconfig/tlsconfig_clone.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.8 - -package tlsconfig - -import "crypto/tls" - -// Clone returns a clone of tls.Config. This function is provided for -// compatibility for go1.7 that doesn't include this method in stdlib. -func Clone(c *tls.Config) *tls.Config { - return c.Clone() -} diff --git a/components/engine/plugin/manager_linux.go b/components/engine/plugin/manager_linux.go index 84bf606346..7c832b55b2 100644 --- a/components/engine/plugin/manager_linux.go +++ b/components/engine/plugin/manager_linux.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/daemon/initlayer" "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/plugins" @@ -57,7 +58,8 @@ func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { } } - if err := initlayer.Setup(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName), idtools.IDPair{0, 0}); err != nil { + rootFS := containerfs.NewLocalContainerFS(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName)) + if err := initlayer.Setup(rootFS, idtools.IDPair{0, 0}); err != nil { return errors.WithStack(err) } diff --git a/components/engine/reference/store_test.go b/components/engine/reference/store_test.go index 8f0ff6304e..72a19d6b86 100644 --- a/components/engine/reference/store_test.go +++ b/components/engine/reference/store_test.go @@ -9,7 +9,9 @@ import ( "testing" "github.com/docker/distribution/reference" - "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var ( @@ -62,10 +64,10 @@ func TestLoad(t *testing.T) { func TestSave(t *testing.T) { jsonFile, err := ioutil.TempFile("", "tag-store-test") - if err != nil { - t.Fatalf("error creating temp file: %v", err) - } + require.NoError(t, err) + _, err = jsonFile.Write([]byte(`{}`)) + require.NoError(t, err) jsonFile.Close() defer os.RemoveAll(jsonFile.Name()) @@ -304,19 +306,19 @@ func TestAddDeleteGet(t *testing.T) { } // Delete a few references - if deleted, err := store.Delete(ref1); err != nil || deleted != true { + if deleted, err := store.Delete(ref1); err != nil || !deleted { t.Fatal("Delete failed") } if _, err := store.Get(ref1); err != ErrDoesNotExist { t.Fatal("Expected ErrDoesNotExist from Get") } - if deleted, err := store.Delete(ref5); err != nil || deleted != true { + if deleted, err := store.Delete(ref5); err != nil || !deleted { t.Fatal("Delete failed") } if _, err := store.Get(ref5); err != ErrDoesNotExist { t.Fatal("Expected ErrDoesNotExist from Get") } - if deleted, err := store.Delete(nameOnly); err != nil || deleted != true { + if deleted, err := store.Delete(nameOnly); err != nil || !deleted { t.Fatal("Delete failed") } if _, err := store.Get(nameOnly); err != ErrDoesNotExist { @@ -326,32 +328,23 @@ func TestAddDeleteGet(t *testing.T) { func TestInvalidTags(t *testing.T) { tmpDir, err := ioutil.TempDir("", "tag-store-test") + require.NoError(t, err) defer os.RemoveAll(tmpDir) store, err := NewReferenceStore(filepath.Join(tmpDir, "repositories.json")) - if err != nil { - t.Fatalf("error creating tag store: %v", err) - } + require.NoError(t, err) id := digest.Digest("sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6") // sha256 as repo name ref, err := reference.ParseNormalizedNamed("sha256:abc") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = store.AddTag(ref, id, true) - if err == nil { - t.Fatalf("expected setting tag %q to fail", ref) - } + assert.Error(t, err) // setting digest as a tag ref, err = reference.ParseNormalizedNamed("registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6") - if err != nil { - t.Fatal(err) - } - err = store.AddTag(ref, id, true) - if err == nil { - t.Fatalf("expected setting tag %q to fail", ref) - } + require.NoError(t, err) + err = store.AddTag(ref, id, true) + assert.Error(t, err) } diff --git a/components/engine/registry/config.go b/components/engine/registry/config.go index 70efb4330f..5246faa4ee 100644 --- a/components/engine/registry/config.go +++ b/components/engine/registry/config.go @@ -60,7 +60,7 @@ var ( // not have the correct form ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") - emptyServiceConfig = newServiceConfig(ServiceOptions{}) + emptyServiceConfig, _ = newServiceConfig(ServiceOptions{}) ) var ( @@ -71,22 +71,27 @@ var ( var lookupIP = net.LookupIP // newServiceConfig returns a new instance of ServiceConfig -func newServiceConfig(options ServiceOptions) *serviceConfig { +func newServiceConfig(options ServiceOptions) (*serviceConfig, error) { config := &serviceConfig{ ServiceConfig: registrytypes.ServiceConfig{ InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), - IndexConfigs: make(map[string]*registrytypes.IndexInfo, 0), + IndexConfigs: make(map[string]*registrytypes.IndexInfo), // Hack: Bypass setting the mirrors to IndexConfigs since they are going away // and Mirrors are only for the official registry anyways. }, V2Only: options.V2Only, } + if err := config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts); err != nil { + return nil, err + } + if err := config.LoadMirrors(options.Mirrors); err != nil { + return nil, err + } + if err := config.LoadInsecureRegistries(options.InsecureRegistries); err != nil { + return nil, err + } - config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts) - config.LoadMirrors(options.Mirrors) - config.LoadInsecureRegistries(options.InsecureRegistries) - - return config + return config, nil } // LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries into config. @@ -171,7 +176,7 @@ func (config *serviceConfig) LoadInsecureRegistries(registries []string) error { originalIndexInfos := config.ServiceConfig.IndexConfigs config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0) - config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo, 0) + config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo) skip: for _, r := range registries { diff --git a/components/engine/registry/config_test.go b/components/engine/registry/config_test.go index 8cb7e5a543..0899e17f67 100644 --- a/components/engine/registry/config_test.go +++ b/components/engine/registry/config_test.go @@ -5,6 +5,8 @@ import ( "sort" "strings" "testing" + + "github.com/stretchr/testify/assert" ) func TestLoadAllowNondistributableArtifacts(t *testing.T) { @@ -90,7 +92,7 @@ func TestLoadAllowNondistributableArtifacts(t *testing.T) { }, } for _, testCase := range testCases { - config := newServiceConfig(ServiceOptions{}) + config := emptyServiceConfig err := config.LoadAllowNondistributableArtifacts(testCase.registries) if testCase.err == "" { if err != nil { @@ -233,7 +235,7 @@ func TestLoadInsecureRegistries(t *testing.T) { }, } for _, testCase := range testCases { - config := newServiceConfig(ServiceOptions{}) + config := emptyServiceConfig err := config.LoadInsecureRegistries(testCase.registries) if testCase.err == "" { if err != nil { @@ -258,3 +260,60 @@ func TestLoadInsecureRegistries(t *testing.T) { } } } + +func TestNewServiceConfig(t *testing.T) { + testCases := []struct { + opts ServiceOptions + errStr string + }{ + { + ServiceOptions{}, + "", + }, + { + ServiceOptions{ + Mirrors: []string{"example.com:5000"}, + }, + `invalid mirror: unsupported scheme "example.com" in "example.com:5000"`, + }, + { + ServiceOptions{ + Mirrors: []string{"http://example.com:5000"}, + }, + "", + }, + { + ServiceOptions{ + InsecureRegistries: []string{"[fe80::]/64"}, + }, + `insecure registry [fe80::]/64 is not valid: invalid host "[fe80::]/64"`, + }, + { + ServiceOptions{ + InsecureRegistries: []string{"102.10.8.1/24"}, + }, + "", + }, + { + ServiceOptions{ + AllowNondistributableArtifacts: []string{"[fe80::]/64"}, + }, + `allow-nondistributable-artifacts registry [fe80::]/64 is not valid: invalid host "[fe80::]/64"`, + }, + { + ServiceOptions{ + AllowNondistributableArtifacts: []string{"102.10.8.1/24"}, + }, + "", + }, + } + + for _, testCase := range testCases { + _, err := newServiceConfig(testCase.opts) + if testCase.errStr != "" { + assert.EqualError(t, err, testCase.errStr) + } else { + assert.Nil(t, err) + } + } +} diff --git a/components/engine/registry/registry_mock_test.go b/components/engine/registry/registry_mock_test.go index 204a98344e..cf1cd19c1c 100644 --- a/components/engine/registry/registry_mock_test.go +++ b/components/engine/registry/registry_mock_test.go @@ -175,7 +175,7 @@ func makePublicIndex() *registrytypes.IndexInfo { return index } -func makeServiceConfig(mirrors []string, insecureRegistries []string) *serviceConfig { +func makeServiceConfig(mirrors []string, insecureRegistries []string) (*serviceConfig, error) { options := ServiceOptions{ Mirrors: mirrors, InsecureRegistries: insecureRegistries, diff --git a/components/engine/registry/registry_test.go b/components/engine/registry/registry_test.go index d89c46c2c0..e9c407d4b4 100644 --- a/components/engine/registry/registry_test.go +++ b/components/engine/registry/registry_test.go @@ -14,6 +14,7 @@ import ( "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" + "github.com/stretchr/testify/assert" ) var ( @@ -539,7 +540,7 @@ func TestNewIndexInfo(t *testing.T) { } } - config := newServiceConfig(ServiceOptions{}) + config := emptyServiceConfig noMirrors := []string{} expectedIndexInfos := map[string]*registrytypes.IndexInfo{ IndexName: { @@ -570,7 +571,11 @@ func TestNewIndexInfo(t *testing.T) { testIndexInfo(config, expectedIndexInfos) publicMirrors := []string{"http://mirror1.local", "http://mirror2.local"} - config = makeServiceConfig(publicMirrors, []string{"example.com"}) + var err error + config, err = makeServiceConfig(publicMirrors, []string{"example.com"}) + if err != nil { + t.Fatal(err) + } expectedIndexInfos = map[string]*registrytypes.IndexInfo{ IndexName: { @@ -618,7 +623,10 @@ func TestNewIndexInfo(t *testing.T) { } testIndexInfo(config, expectedIndexInfos) - config = makeServiceConfig(nil, []string{"42.42.0.0/16"}) + config, err = makeServiceConfig(nil, []string{"42.42.0.0/16"}) + if err != nil { + t.Fatal(err) + } expectedIndexInfos = map[string]*registrytypes.IndexInfo{ "example.com": { Name: "example.com", @@ -663,7 +671,11 @@ func TestMirrorEndpointLookup(t *testing.T) { } return false } - s := DefaultService{config: makeServiceConfig([]string{"https://my.mirror"}, nil)} + cfg, err := makeServiceConfig([]string{"https://my.mirror"}, nil) + if err != nil { + t.Fatal(err) + } + s := DefaultService{config: cfg} imageName, err := reference.WithName(IndexName + "/test/image") if err != nil { @@ -747,16 +759,12 @@ func TestSearchRepositories(t *testing.T) { func TestTrustedLocation(t *testing.T) { for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { req, _ := http.NewRequest("GET", url, nil) - if trustedLocation(req) == true { - t.Fatalf("'%s' shouldn't be detected as a trusted location", url) - } + assert.False(t, trustedLocation(req)) } for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { req, _ := http.NewRequest("GET", url, nil) - if trustedLocation(req) == false { - t.Fatalf("'%s' should be detected as a trusted location", url) - } + assert.True(t, trustedLocation(req)) } } @@ -844,9 +852,12 @@ func TestAllowNondistributableArtifacts(t *testing.T) { {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, true}, } for _, tt := range tests { - config := newServiceConfig(ServiceOptions{ + config, err := newServiceConfig(ServiceOptions{ AllowNondistributableArtifacts: tt.registries, }) + if err != nil { + t.Error(err) + } if v := allowNondistributableArtifacts(config, tt.addr); v != tt.expected { t.Errorf("allowNondistributableArtifacts failed for %q %v, expected %v got %v", tt.addr, tt.registries, tt.expected, v) } @@ -886,7 +897,10 @@ func TestIsSecureIndex(t *testing.T) { {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, false}, } for _, tt := range tests { - config := makeServiceConfig(nil, tt.insecureRegistries) + config, err := makeServiceConfig(nil, tt.insecureRegistries) + if err != nil { + t.Error(err) + } if sec := isSecureIndex(config, tt.addr); sec != tt.expected { t.Errorf("isSecureIndex failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) } diff --git a/components/engine/registry/service.go b/components/engine/registry/service.go index d36b11f0e1..a991a8fc39 100644 --- a/components/engine/registry/service.go +++ b/components/engine/registry/service.go @@ -45,10 +45,10 @@ type DefaultService struct { // NewService returns a new instance of DefaultService ready to be // installed into an engine. -func NewService(options ServiceOptions) *DefaultService { - return &DefaultService{ - config: newServiceConfig(options), - } +func NewService(options ServiceOptions) (*DefaultService, error) { + config, err := newServiceConfig(options) + + return &DefaultService{config: config}, err } // ServiceConfig returns the public registry service configuration. diff --git a/components/engine/registry/service_v1_test.go b/components/engine/registry/service_v1_test.go index bd15dfffb8..6ea846eed7 100644 --- a/components/engine/registry/service_v1_test.go +++ b/components/engine/registry/service_v1_test.go @@ -3,7 +3,10 @@ package registry import "testing" func TestLookupV1Endpoints(t *testing.T) { - s := NewService(ServiceOptions{}) + s, err := NewService(ServiceOptions{}) + if err != nil { + t.Fatal(err) + } cases := []struct { hostname string diff --git a/components/engine/runconfig/config.go b/components/engine/runconfig/config.go index 3d236deb53..56eb946cc8 100644 --- a/components/engine/runconfig/config.go +++ b/components/engine/runconfig/config.go @@ -7,8 +7,6 @@ import ( "github.com/docker/docker/api/types/container" networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/pkg/sysinfo" - "github.com/docker/docker/volume" - "github.com/pkg/errors" ) // ContainerDecoder implements httputils.ContainerDecoder @@ -46,11 +44,6 @@ func decodeContainerConfig(src io.Reader) (*container.Config, *container.HostCon if w.Config.Volumes == nil { w.Config.Volumes = make(map[string]struct{}) } - - // Now validate all the volumes and binds - if err := validateMountSettings(w.Config, hc); err != nil { - return nil, nil, nil, err - } } // Certain parameters need daemon-side validation that cannot be done @@ -86,23 +79,3 @@ func decodeContainerConfig(src io.Reader) (*container.Config, *container.HostCon return w.Config, hc, w.NetworkingConfig, nil } - -// validateMountSettings validates each of the volumes and bind settings -// passed by the caller to ensure they are valid. -func validateMountSettings(c *container.Config, hc *container.HostConfig) error { - // it is ok to have len(hc.Mounts) > 0 && (len(hc.Binds) > 0 || len (c.Volumes) > 0 || len (hc.Tmpfs) > 0 ) - - // Ensure all volumes and binds are valid. - for spec := range c.Volumes { - if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil { - return errors.Wrapf(err, "invalid volume spec %q", spec) - } - } - for _, spec := range hc.Binds { - if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil { - return errors.Wrapf(err, "invalid bind mount spec %q", spec) - } - } - - return nil -} diff --git a/components/engine/runconfig/config_test.go b/components/engine/runconfig/config_test.go index ebd74ea31c..0a4ee1fa2a 100644 --- a/components/engine/runconfig/config_test.go +++ b/components/engine/runconfig/config_test.go @@ -9,12 +9,9 @@ import ( "strings" "testing" - "os" - "github.com/docker/docker/api/types/container" networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/strslice" - "github.com/gotestyourself/gotestyourself/skip" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -143,103 +140,6 @@ func callDecodeContainerConfigIsolation(isolation string) (*container.Config, *c return decodeContainerConfig(bytes.NewReader(b)) } -func TestDecodeContainerConfigWithVolumes(t *testing.T) { - var testcases = []decodeConfigTestcase{ - { - doc: "no paths volume", - wrapper: containerWrapperWithVolume(":"), - expectedErr: `invalid volume specification: ':'`, - }, - { - doc: "no paths bind", - wrapper: containerWrapperWithBind(":"), - expectedErr: `invalid volume specification: ':'`, - }, - { - doc: "no paths or mode volume", - wrapper: containerWrapperWithVolume("::"), - expectedErr: `invalid volume specification: '::'`, - }, - { - doc: "no paths or mode bind", - wrapper: containerWrapperWithBind("::"), - expectedErr: `invalid volume specification: '::'`, - }, - } - for _, testcase := range testcases { - t.Run(testcase.doc, runDecodeContainerConfigTestCase(testcase)) - } -} - -func TestDecodeContainerConfigWithVolumesUnix(t *testing.T) { - skip.IfCondition(t, runtime.GOOS == "windows") - - baseErr := `invalid mount config for type "volume": invalid specification: ` - var testcases = []decodeConfigTestcase{ - { - doc: "root to root volume", - wrapper: containerWrapperWithVolume("/:/"), - expectedErr: `invalid volume specification: '/:/'`, - }, - { - doc: "root to root bind", - wrapper: containerWrapperWithBind("/:/"), - expectedErr: `invalid volume specification: '/:/'`, - }, - { - doc: "no destination path volume", - wrapper: containerWrapperWithVolume(`/tmp:`), - expectedErr: ` invalid volume specification: '/tmp:'`, - }, - { - doc: "no destination path bind", - wrapper: containerWrapperWithBind(`/tmp:`), - expectedErr: ` invalid volume specification: '/tmp:'`, - }, - { - doc: "no destination path or mode volume", - wrapper: containerWrapperWithVolume(`/tmp::`), - expectedErr: `invalid mount config for type "bind": field Target must not be empty`, - }, - { - doc: "no destination path or mode bind", - wrapper: containerWrapperWithBind(`/tmp::`), - expectedErr: `invalid mount config for type "bind": field Target must not be empty`, - }, - { - doc: "too many sections volume", - wrapper: containerWrapperWithVolume(`/tmp:/tmp:/tmp:/tmp`), - expectedErr: `invalid volume specification: '/tmp:/tmp:/tmp:/tmp'`, - }, - { - doc: "too many sections bind", - wrapper: containerWrapperWithBind(`/tmp:/tmp:/tmp:/tmp`), - expectedErr: `invalid volume specification: '/tmp:/tmp:/tmp:/tmp'`, - }, - { - doc: "just root volume", - wrapper: containerWrapperWithVolume("/"), - expectedErr: baseErr + `destination can't be '/'`, - }, - { - doc: "just root bind", - wrapper: containerWrapperWithBind("/"), - expectedErr: baseErr + `destination can't be '/'`, - }, - { - doc: "bind mount passed as a volume", - wrapper: containerWrapperWithVolume(`/foo:/bar`), - expectedConfig: &container.Config{ - Volumes: map[string]struct{}{`/foo:/bar`: {}}, - }, - expectedHostConfig: &container.HostConfig{NetworkMode: "default"}, - }, - } - for _, testcase := range testcases { - t.Run(testcase.doc, runDecodeContainerConfigTestCase(testcase)) - } -} - type decodeConfigTestcase struct { doc string wrapper ContainerConfigWrapper @@ -266,89 +166,6 @@ func runDecodeContainerConfigTestCase(testcase decodeConfigTestcase) func(t *tes } } -func TestDecodeContainerConfigWithVolumesWindows(t *testing.T) { - skip.IfCondition(t, runtime.GOOS != "windows") - - tmpDir := os.Getenv("TEMP") - systemDrive := os.Getenv("SystemDrive") - var testcases = []decodeConfigTestcase{ - { - doc: "root to root volume", - wrapper: containerWrapperWithVolume(systemDrive + `\:c:\`), - expectedErr: `invalid volume specification: `, - }, - { - doc: "root to root bind", - wrapper: containerWrapperWithBind(systemDrive + `\:c:\`), - expectedErr: `invalid volume specification: `, - }, - { - doc: "no destination path volume", - wrapper: containerWrapperWithVolume(tmpDir + `\:`), - expectedErr: `invalid volume specification: `, - }, - { - doc: "no destination path bind", - wrapper: containerWrapperWithBind(tmpDir + `\:`), - expectedErr: `invalid volume specification: `, - }, - { - doc: "no destination path or mode volume", - wrapper: containerWrapperWithVolume(tmpDir + `\::`), - expectedErr: `invalid volume specification: `, - }, - { - doc: "no destination path or mode bind", - wrapper: containerWrapperWithBind(tmpDir + `\::`), - expectedErr: `invalid volume specification: `, - }, - { - doc: "too many sections volume", - wrapper: containerWrapperWithVolume(tmpDir + ":" + tmpDir + ":" + tmpDir + ":" + tmpDir), - expectedErr: `invalid volume specification: `, - }, - { - doc: "too many sections bind", - wrapper: containerWrapperWithBind(tmpDir + ":" + tmpDir + ":" + tmpDir + ":" + tmpDir), - expectedErr: `invalid volume specification: `, - }, - { - doc: "no drive letter volume", - wrapper: containerWrapperWithVolume(`\tmp`), - expectedErr: `invalid volume specification: `, - }, - { - doc: "no drive letter bind", - wrapper: containerWrapperWithBind(`\tmp`), - expectedErr: `invalid volume specification: `, - }, - { - doc: "root to c-drive volume", - wrapper: containerWrapperWithVolume(systemDrive + `\:c:`), - expectedErr: `invalid volume specification: `, - }, - { - doc: "root to c-drive bind", - wrapper: containerWrapperWithBind(systemDrive + `\:c:`), - expectedErr: `invalid volume specification: `, - }, - { - doc: "container path without driver letter volume", - wrapper: containerWrapperWithVolume(`c:\windows:\somewhere`), - expectedErr: `invalid volume specification: `, - }, - { - doc: "container path without driver letter bind", - wrapper: containerWrapperWithBind(`c:\windows:\somewhere`), - expectedErr: `invalid volume specification: `, - }, - } - - for _, testcase := range testcases { - t.Run(testcase.doc, runDecodeContainerConfigTestCase(testcase)) - } -} - func marshal(t *testing.T, w ContainerConfigWrapper, doc string) []byte { b, err := json.Marshal(w) require.NoError(t, err, "%s: failed to encode config wrapper", doc) diff --git a/components/engine/runconfig/hostconfig.go b/components/engine/runconfig/hostconfig.go index 3a90c84498..cfc5383f02 100644 --- a/components/engine/runconfig/hostconfig.go +++ b/components/engine/runconfig/hostconfig.go @@ -68,7 +68,7 @@ func validateNetContainerMode(c *container.Config, hc *container.HostConfig) err return ErrConflictContainerNetworkAndMac } - if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) { + if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts) { return ErrConflictNetworkPublishPorts } diff --git a/components/engine/runconfig/hostconfig_test.go b/components/engine/runconfig/hostconfig_test.go index b461c16f0c..76c3fa2cee 100644 --- a/components/engine/runconfig/hostconfig_test.go +++ b/components/engine/runconfig/hostconfig_test.go @@ -195,9 +195,7 @@ func TestDecodeHostConfig(t *testing.T) { t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) } - if c.Privileged != false { - t.Fatalf("Expected privileged false, found %v\n", c.Privileged) - } + assert.False(t, c.Privileged) if l := len(c.Binds); l != 1 { t.Fatalf("Expected 1 bind, found %d\n", l) diff --git a/components/engine/vendor.conf b/components/engine/vendor.conf index 535adad387..d760b938c0 100644 --- a/components/engine/vendor.conf +++ b/components/engine/vendor.conf @@ -1,6 +1,6 @@ # the following lines are in sorted order, FYI github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e -github.com/Microsoft/hcsshim v0.6.3 +github.com/Microsoft/hcsshim v0.6.5 github.com/Microsoft/go-winio v0.4.5 github.com/moby/buildkit da2b9dc7dab99e824b2b1067ad7d0523e32dd2d9 https://github.com/dmcgowan/buildkit.git github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 @@ -8,10 +8,10 @@ github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git github.com/gorilla/context v1.1 github.com/gorilla/mux v1.1 -github.com/Microsoft/opengcs v0.3.2 +github.com/Microsoft/opengcs v0.3.3 github.com/kr/pty 5cf931ef8f github.com/mattn/go-shellwords v1.0.3 -github.com/sirupsen/logrus v1.0.1 +github.com/sirupsen/logrus v1.0.3 github.com/tchap/go-patricia v2.2.6 github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 @@ -27,8 +27,10 @@ github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5 github.com/imdario/mergo 0.2.1 golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0 +github.com/containerd/continuity 22694c680ee48fb8f50015b44618517e2bde77e8 + #get libnetwork packages -github.com/docker/libnetwork 5b28c0ec98236c489e39ae6a9e1aeb802e071681 +github.com/docker/libnetwork 60e002dd61885e1cd909582f00f7eb4da634518a github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec @@ -63,7 +65,7 @@ github.com/pborman/uuid v1.0 google.golang.org/grpc v1.3.0 # When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly -github.com/opencontainers/runc 3f2f8b84a77f73d38244dd690525642a72156c64 +github.com/opencontainers/runc 1c81e2a794c6e26a4c650142ae8893c47f619764 github.com/opencontainers/image-spec 372ad780f63454fbbbbcc7cf80e5b90245c13e13 github.com/opencontainers/runtime-spec v1.0.0 @@ -108,11 +110,11 @@ github.com/stevvooe/continuity cd7a8e21e2b6f84799f5dd4b65faf49c8d3ee02d github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb # cluster -github.com/docker/swarmkit ddb4539f883b18ea40af44ee6de63ac2adc8dc1e +github.com/docker/swarmkit bd7bafb8a61de1f5f23c8215ce7b9ecbcb30ff21 github.com/gogo/protobuf v0.4 github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e -golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 +golang.org/x/crypto 558b6879de74bc843225cde5686419267ff707ca golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990 diff --git a/components/engine/vendor/github.com/Microsoft/hcsshim/container.go b/components/engine/vendor/github.com/Microsoft/hcsshim/container.go index b924d39f46..3354f70efc 100644 --- a/components/engine/vendor/github.com/Microsoft/hcsshim/container.go +++ b/components/engine/vendor/github.com/Microsoft/hcsshim/container.go @@ -201,12 +201,18 @@ func createContainerWithJSON(id string, c *ContainerConfig, additionalJSON strin if createError == nil || IsPending(createError) { if err := container.registerCallback(); err != nil { + // Terminate the container if it still exists. We're okay to ignore a failure here. + container.Terminate() return nil, makeContainerError(container, operation, "", err) } } err = processAsyncHcsResult(createError, resultp, container.callbackNumber, hcsNotificationSystemCreateCompleted, &defaultTimeout) if err != nil { + if err == ErrTimeout { + // Terminate the container if it still exists. We're okay to ignore a failure here. + container.Terminate() + } return nil, makeContainerError(container, operation, configuration, err) } diff --git a/components/engine/vendor/github.com/Microsoft/hcsshim/interface.go b/components/engine/vendor/github.com/Microsoft/hcsshim/interface.go index 9fc7852e41..e21f30025a 100644 --- a/components/engine/vendor/github.com/Microsoft/hcsshim/interface.go +++ b/components/engine/vendor/github.com/Microsoft/hcsshim/interface.go @@ -30,11 +30,12 @@ type Layer struct { } type MappedDir struct { - HostPath string - ContainerPath string - ReadOnly bool - BandwidthMaximum uint64 - IOPSMaximum uint64 + HostPath string + ContainerPath string + ReadOnly bool + BandwidthMaximum uint64 + IOPSMaximum uint64 + CreateInUtilityVM bool } type MappedPipe struct { diff --git a/components/engine/vendor/github.com/Microsoft/opengcs/client/config.go b/components/engine/vendor/github.com/Microsoft/opengcs/client/config.go index 12119574a3..5ece88b91d 100644 --- a/components/engine/vendor/github.com/Microsoft/opengcs/client/config.go +++ b/components/engine/vendor/github.com/Microsoft/opengcs/client/config.go @@ -93,10 +93,10 @@ func ParseOptions(options []string) (Options, error) { case "lcow.timeout": var err error if rOpts.TimeoutSeconds, err = strconv.Atoi(opt[1]); err != nil { - return rOpts, fmt.Errorf("opengcstimeoutsecs option could not be interpreted as an integer") + return rOpts, fmt.Errorf("lcow.timeout option could not be interpreted as an integer") } if rOpts.TimeoutSeconds < 0 { - return rOpts, fmt.Errorf("opengcstimeoutsecs option cannot be negative") + return rOpts, fmt.Errorf("lcow.timeout option cannot be negative") } } } @@ -242,7 +242,7 @@ func (config *Config) StartUtilityVM() error { configuration.HvRuntime = &hcsshim.HvRuntime{ ImagePath: config.Vhdx, BootSource: "Vhd", - WritableBootSource: true, + WritableBootSource: false, } } else { configuration.HvRuntime = &hcsshim.HvRuntime{ diff --git a/components/engine/vendor/github.com/Microsoft/opengcs/service/gcsutils/README b/components/engine/vendor/github.com/Microsoft/opengcs/service/gcsutils/README new file mode 100644 index 0000000000..85e54428a0 --- /dev/null +++ b/components/engine/vendor/github.com/Microsoft/opengcs/service/gcsutils/README @@ -0,0 +1,4 @@ +1. This program only runs in Linux. So you just first copy the files over to a Linux machine. +2. Get Go and and then run make get-deps && make. This is set the $GOPATH for you and build the binaries. +3. vhd_to_tar and tar_to_vhd are the standalone executables that read/write to stdin/out and do the tar <-> vhd conversion. + tar2vhd_server is the service VM server that takes client requests over hvsock. diff --git a/components/engine/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/defs.go b/components/engine/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/defs.go new file mode 100644 index 0000000000..f1f2c04a45 --- /dev/null +++ b/components/engine/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/defs.go @@ -0,0 +1,109 @@ +package remotefs + +import ( + "errors" + "os" + "time" +) + +// RemotefsCmd is the name of the remotefs meta command +const RemotefsCmd = "remotefs" + +// Name of the commands when called from the cli context (remotefs ...) +const ( + StatCmd = "stat" + LstatCmd = "lstat" + ReadlinkCmd = "readlink" + MkdirCmd = "mkdir" + MkdirAllCmd = "mkdirall" + RemoveCmd = "remove" + RemoveAllCmd = "removeall" + LinkCmd = "link" + SymlinkCmd = "symlink" + LchmodCmd = "lchmod" + LchownCmd = "lchown" + MknodCmd = "mknod" + MkfifoCmd = "mkfifo" + OpenFileCmd = "openfile" + ReadFileCmd = "readfile" + WriteFileCmd = "writefile" + ReadDirCmd = "readdir" + ResolvePathCmd = "resolvepath" + ExtractArchiveCmd = "extractarchive" + ArchivePathCmd = "archivepath" +) + +// ErrInvalid is returned if the parameters are invalid +var ErrInvalid = errors.New("invalid arguments") + +// ErrUnknown is returned for an unknown remotefs command +var ErrUnknown = errors.New("unkown command") + +// ExportedError is the serialized version of the a Go error. +// It also provides a trivial implementation of the error interface. +type ExportedError struct { + ErrString string + ErrNum int `json:",omitempty"` +} + +// Error returns an error string +func (ee *ExportedError) Error() string { + return ee.ErrString +} + +// FileInfo is the stat struct returned by the remotefs system. It +// fulfills the os.FileInfo interface. +type FileInfo struct { + NameVar string + SizeVar int64 + ModeVar os.FileMode + ModTimeVar int64 // Serialization of time.Time breaks in travis, so use an int + IsDirVar bool +} + +var _ os.FileInfo = &FileInfo{} + +// Name returns the filename from a FileInfo structure +func (f *FileInfo) Name() string { return f.NameVar } + +// Size returns the size from a FileInfo structure +func (f *FileInfo) Size() int64 { return f.SizeVar } + +// Mode returns the mode from a FileInfo structure +func (f *FileInfo) Mode() os.FileMode { return f.ModeVar } + +// ModTime returns the modification time from a FileInfo structure +func (f *FileInfo) ModTime() time.Time { return time.Unix(0, f.ModTimeVar) } + +// IsDir returns the is-directory indicator from a FileInfo structure +func (f *FileInfo) IsDir() bool { return f.IsDirVar } + +// Sys provides an interface to a FileInfo structure +func (f *FileInfo) Sys() interface{} { return nil } + +// FileHeader is a header for remote *os.File operations for remotefs.OpenFile +type FileHeader struct { + Cmd uint32 + Size uint64 +} + +const ( + // Read request command. + Read uint32 = iota + // Write request command. + Write + // Seek request command. + Seek + // Close request command. + Close + // CmdOK is a response meaning request succeeded. + CmdOK + // CmdFailed is a response meaning request failed. + CmdFailed +) + +// SeekHeader is header for the Seek operation for remotefs.OpenFile +type SeekHeader struct { + Offset int64 + Whence int32 +} diff --git a/components/engine/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/remotefs.go b/components/engine/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/remotefs.go new file mode 100644 index 0000000000..2d4a9f2bfe --- /dev/null +++ b/components/engine/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/remotefs.go @@ -0,0 +1,546 @@ +// +build !windows + +package remotefs + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "io" + "os" + "path/filepath" + "strconv" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/symlink" + "golang.org/x/sys/unix" +) + +// Func is the function definition for a generic remote fs function +// The input to the function is any serialized structs / data from in and the string slice +// from args. The output of the function will be serialized and written to out. +type Func func(stdin io.Reader, stdout io.Writer, args []string) error + +// Commands provide a string -> remotefs function mapping. +// This is useful for commandline programs that will receive a string +// as the function to execute. +var Commands = map[string]Func{ + StatCmd: Stat, + LstatCmd: Lstat, + ReadlinkCmd: Readlink, + MkdirCmd: Mkdir, + MkdirAllCmd: MkdirAll, + RemoveCmd: Remove, + RemoveAllCmd: RemoveAll, + LinkCmd: Link, + SymlinkCmd: Symlink, + LchmodCmd: Lchmod, + LchownCmd: Lchown, + MknodCmd: Mknod, + MkfifoCmd: Mkfifo, + OpenFileCmd: OpenFile, + ReadFileCmd: ReadFile, + WriteFileCmd: WriteFile, + ReadDirCmd: ReadDir, + ResolvePathCmd: ResolvePath, + ExtractArchiveCmd: ExtractArchive, + ArchivePathCmd: ArchivePath, +} + +// Stat functions like os.Stat. +// Args: +// - args[0] is the path +// Out: +// - out = FileInfo object +func Stat(in io.Reader, out io.Writer, args []string) error { + return stat(in, out, args, os.Stat) +} + +// Lstat functions like os.Lstat. +// Args: +// - args[0] is the path +// Out: +// - out = FileInfo object +func Lstat(in io.Reader, out io.Writer, args []string) error { + return stat(in, out, args, os.Lstat) +} + +func stat(in io.Reader, out io.Writer, args []string, statfunc func(string) (os.FileInfo, error)) error { + if len(args) < 1 { + return ErrInvalid + } + + fi, err := statfunc(args[0]) + if err != nil { + return err + } + + info := FileInfo{ + NameVar: fi.Name(), + SizeVar: fi.Size(), + ModeVar: fi.Mode(), + ModTimeVar: fi.ModTime().UnixNano(), + IsDirVar: fi.IsDir(), + } + + buf, err := json.Marshal(info) + if err != nil { + return err + } + + if _, err := out.Write(buf); err != nil { + return err + } + return nil +} + +// Readlink works like os.Readlink +// In: +// - args[0] is path +// Out: +// - Write link result to out +func Readlink(in io.Reader, out io.Writer, args []string) error { + if len(args) < 1 { + return ErrInvalid + } + + l, err := os.Readlink(args[0]) + if err != nil { + return err + } + + if _, err := out.Write([]byte(l)); err != nil { + return err + } + return nil +} + +// Mkdir works like os.Mkdir +// Args: +// - args[0] is the path +// - args[1] is the permissions in octal (like 0755) +func Mkdir(in io.Reader, out io.Writer, args []string) error { + return mkdir(in, out, args, os.Mkdir) +} + +// MkdirAll works like os.MkdirAll. +// Args: +// - args[0] is the path +// - args[1] is the permissions in octal (like 0755) +func MkdirAll(in io.Reader, out io.Writer, args []string) error { + return mkdir(in, out, args, os.MkdirAll) +} + +func mkdir(in io.Reader, out io.Writer, args []string, mkdirFunc func(string, os.FileMode) error) error { + if len(args) < 2 { + return ErrInvalid + } + + perm, err := strconv.ParseUint(args[1], 8, 32) + if err != nil { + return err + } + return mkdirFunc(args[0], os.FileMode(perm)) +} + +// Remove works like os.Remove +// Args: +// - args[0] is the path +func Remove(in io.Reader, out io.Writer, args []string) error { + return remove(in, out, args, os.Remove) +} + +// RemoveAll works like os.RemoveAll +// Args: +// - args[0] is the path +func RemoveAll(in io.Reader, out io.Writer, args []string) error { + return remove(in, out, args, os.RemoveAll) +} + +func remove(in io.Reader, out io.Writer, args []string, removefunc func(string) error) error { + if len(args) < 1 { + return ErrInvalid + } + return removefunc(args[0]) +} + +// Link works like os.Link +// Args: +// - args[0] = old path name (link source) +// - args[1] = new path name (link dest) +func Link(in io.Reader, out io.Writer, args []string) error { + return link(in, out, args, os.Link) +} + +// Symlink works like os.Symlink +// Args: +// - args[0] = old path name (link source) +// - args[1] = new path name (link dest) +func Symlink(in io.Reader, out io.Writer, args []string) error { + return link(in, out, args, os.Symlink) +} + +func link(in io.Reader, out io.Writer, args []string, linkfunc func(string, string) error) error { + if len(args) < 2 { + return ErrInvalid + } + return linkfunc(args[0], args[1]) +} + +// Lchmod changes permission of the given file without following symlinks +// Args: +// - args[0] = path +// - args[1] = permission mode in octal (like 0755) +func Lchmod(in io.Reader, out io.Writer, args []string) error { + if len(args) < 2 { + return ErrInvalid + } + + perm, err := strconv.ParseUint(args[1], 8, 32) + if err != nil { + return err + } + + path := args[0] + if !filepath.IsAbs(path) { + path, err = filepath.Abs(path) + if err != nil { + return err + } + } + return unix.Fchmodat(0, path, uint32(perm), unix.AT_SYMLINK_NOFOLLOW) +} + +// Lchown works like os.Lchown +// Args: +// - args[0] = path +// - args[1] = uid in base 10 +// - args[2] = gid in base 10 +func Lchown(in io.Reader, out io.Writer, args []string) error { + if len(args) < 3 { + return ErrInvalid + } + + uid, err := strconv.ParseInt(args[1], 10, 64) + if err != nil { + return err + } + + gid, err := strconv.ParseInt(args[2], 10, 64) + if err != nil { + return err + } + return os.Lchown(args[0], int(uid), int(gid)) +} + +// Mknod works like syscall.Mknod +// Args: +// - args[0] = path +// - args[1] = permission mode in octal (like 0755) +// - args[2] = major device number in base 10 +// - args[3] = minor device number in base 10 +func Mknod(in io.Reader, out io.Writer, args []string) error { + if len(args) < 4 { + return ErrInvalid + } + + perm, err := strconv.ParseUint(args[1], 8, 32) + if err != nil { + return err + } + + major, err := strconv.ParseInt(args[2], 10, 32) + if err != nil { + return err + } + + minor, err := strconv.ParseInt(args[3], 10, 32) + if err != nil { + return err + } + + dev := unix.Mkdev(uint32(major), uint32(minor)) + return unix.Mknod(args[0], uint32(perm), int(dev)) +} + +// Mkfifo creates a FIFO special file with the given path name and permissions +// Args: +// - args[0] = path +// - args[1] = permission mode in octal (like 0755) +func Mkfifo(in io.Reader, out io.Writer, args []string) error { + if len(args) < 2 { + return ErrInvalid + } + + perm, err := strconv.ParseUint(args[1], 8, 32) + if err != nil { + return err + } + return unix.Mkfifo(args[0], uint32(perm)) +} + +// OpenFile works like os.OpenFile. To manage the file pointer state, +// this function acts as a single file "file server" with Read/Write/Close +// being serialized control codes from in. +// Args: +// - args[0] = path +// - args[1] = flag in base 10 +// - args[2] = permission mode in octal (like 0755) +func OpenFile(in io.Reader, out io.Writer, args []string) (err error) { + defer func() { + if err != nil { + // error code will be serialized by the caller, so don't write it here + WriteFileHeader(out, &FileHeader{Cmd: CmdFailed}, nil) + } + }() + + if len(args) < 3 { + return ErrInvalid + } + + flag, err := strconv.ParseInt(args[1], 10, 32) + if err != nil { + return err + } + + perm, err := strconv.ParseUint(args[2], 8, 32) + if err != nil { + return err + } + + f, err := os.OpenFile(args[0], int(flag), os.FileMode(perm)) + if err != nil { + return err + } + + // Signal the client that OpenFile succeeded + if err := WriteFileHeader(out, &FileHeader{Cmd: CmdOK}, nil); err != nil { + return err + } + + for { + hdr, err := ReadFileHeader(in) + if err != nil { + return err + } + + var buf []byte + switch hdr.Cmd { + case Read: + buf = make([]byte, hdr.Size, hdr.Size) + n, err := f.Read(buf) + if err != nil { + return err + } + buf = buf[:n] + case Write: + if _, err := io.CopyN(f, in, int64(hdr.Size)); err != nil { + return err + } + case Seek: + seekHdr := &SeekHeader{} + if err := binary.Read(in, binary.BigEndian, seekHdr); err != nil { + return err + } + res, err := f.Seek(seekHdr.Offset, int(seekHdr.Whence)) + if err != nil { + return err + } + buffer := &bytes.Buffer{} + if err := binary.Write(buffer, binary.BigEndian, res); err != nil { + return err + } + buf = buffer.Bytes() + case Close: + if err := f.Close(); err != nil { + return err + } + default: + return ErrUnknown + } + + retHdr := &FileHeader{ + Cmd: CmdOK, + Size: uint64(len(buf)), + } + if err := WriteFileHeader(out, retHdr, buf); err != nil { + return err + } + + if hdr.Cmd == Close { + break + } + } + return nil +} + +// ReadFile works like ioutil.ReadFile but instead writes the file to a writer +// Args: +// - args[0] = path +// Out: +// - Write file contents to out +func ReadFile(in io.Reader, out io.Writer, args []string) error { + if len(args) < 1 { + return ErrInvalid + } + + f, err := os.Open(args[0]) + if err != nil { + return err + } + defer f.Close() + + if _, err := io.Copy(out, f); err != nil { + return nil + } + return nil +} + +// WriteFile works like ioutil.WriteFile but instead reads the file from a reader +// Args: +// - args[0] = path +// - args[1] = permission mode in octal (like 0755) +// - input data stream from in +func WriteFile(in io.Reader, out io.Writer, args []string) error { + if len(args) < 2 { + return ErrInvalid + } + + perm, err := strconv.ParseUint(args[1], 8, 32) + if err != nil { + return err + } + + f, err := os.OpenFile(args[0], os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(perm)) + if err != nil { + return err + } + defer f.Close() + + if _, err := io.Copy(f, in); err != nil { + return err + } + return nil +} + +// ReadDir works like *os.File.Readdir but instead writes the result to a writer +// Args: +// - args[0] = path +// - args[1] = number of directory entries to return. If <= 0, return all entries in directory +func ReadDir(in io.Reader, out io.Writer, args []string) error { + if len(args) < 2 { + return ErrInvalid + } + + n, err := strconv.ParseInt(args[1], 10, 32) + if err != nil { + return err + } + + f, err := os.Open(args[0]) + if err != nil { + return err + } + defer f.Close() + + infos, err := f.Readdir(int(n)) + if err != nil { + return err + } + + fileInfos := make([]FileInfo, len(infos)) + for i := range infos { + fileInfos[i] = FileInfo{ + NameVar: infos[i].Name(), + SizeVar: infos[i].Size(), + ModeVar: infos[i].Mode(), + ModTimeVar: infos[i].ModTime().UnixNano(), + IsDirVar: infos[i].IsDir(), + } + } + + buf, err := json.Marshal(fileInfos) + if err != nil { + return err + } + + if _, err := out.Write(buf); err != nil { + return err + } + return nil +} + +// ResolvePath works like docker's symlink.FollowSymlinkInScope. +// It takens in a `path` and a `root` and evaluates symlinks in `path` +// as if they were scoped in `root`. `path` must be a child path of `root`. +// In other words, `path` must have `root` as a prefix. +// Example: +// path=/foo/bar -> /baz +// root=/foo, +// Expected result = /foo/baz +// +// Args: +// - args[0] is `path` +// - args[1] is `root` +// Out: +// - Write resolved path to stdout +func ResolvePath(in io.Reader, out io.Writer, args []string) error { + if len(args) < 2 { + return ErrInvalid + } + res, err := symlink.FollowSymlinkInScope(args[0], args[1]) + if err != nil { + return err + } + if _, err = out.Write([]byte(res)); err != nil { + return err + } + return nil +} + +// ExtractArchive extracts the archive read from in. +// Args: +// - in = size of json | json of archive.TarOptions | input tar stream +// - args[0] = extract directory name +func ExtractArchive(in io.Reader, out io.Writer, args []string) error { + if len(args) < 1 { + return ErrInvalid + } + + opts, err := ReadTarOptions(in) + if err != nil { + return err + } + + if err := archive.Untar(in, args[0], opts); err != nil { + return err + } + return nil +} + +// ArchivePath archives the given directory and writes it to out. +// Args: +// - in = size of json | json of archive.TarOptions +// - args[0] = source directory name +// Out: +// - out = tar file of the archive +func ArchivePath(in io.Reader, out io.Writer, args []string) error { + if len(args) < 1 { + return ErrInvalid + } + + opts, err := ReadTarOptions(in) + if err != nil { + return err + } + + r, err := archive.TarWithOptions(args[0], opts) + if err != nil { + return err + } + + if _, err := io.Copy(out, r); err != nil { + return err + } + return nil +} diff --git a/components/engine/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/utils.go b/components/engine/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/utils.go new file mode 100644 index 0000000000..a12827c93f --- /dev/null +++ b/components/engine/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/utils.go @@ -0,0 +1,168 @@ +package remotefs + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "io" + "io/ioutil" + "os" + "syscall" + + "github.com/docker/docker/pkg/archive" +) + +// ReadError is an utility function that reads a serialized error from the given reader +// and deserializes it. +func ReadError(in io.Reader) (*ExportedError, error) { + b, err := ioutil.ReadAll(in) + if err != nil { + return nil, err + } + + // No error + if len(b) == 0 { + return nil, nil + } + + var exportedErr ExportedError + if err := json.Unmarshal(b, &exportedErr); err != nil { + return nil, err + } + + return &exportedErr, nil +} + +// ExportedToError will convert a ExportedError to an error. It will try to match +// the error to any existing known error like os.ErrNotExist. Otherwise, it will just +// return an implementation of the error interface. +func ExportedToError(ee *ExportedError) error { + if ee.Error() == os.ErrNotExist.Error() { + return os.ErrNotExist + } else if ee.Error() == os.ErrExist.Error() { + return os.ErrExist + } else if ee.Error() == os.ErrPermission.Error() { + return os.ErrPermission + } + return ee +} + +// WriteError is an utility function that serializes the error +// and writes it to the output writer. +func WriteError(err error, out io.Writer) error { + if err == nil { + return nil + } + err = fixOSError(err) + + var errno int + switch typedError := err.(type) { + case *os.PathError: + if se, ok := typedError.Err.(syscall.Errno); ok { + errno = int(se) + } + case *os.LinkError: + if se, ok := typedError.Err.(syscall.Errno); ok { + errno = int(se) + } + case *os.SyscallError: + if se, ok := typedError.Err.(syscall.Errno); ok { + errno = int(se) + } + } + + exportedError := &ExportedError{ + ErrString: err.Error(), + ErrNum: errno, + } + + b, err1 := json.Marshal(exportedError) + if err1 != nil { + return err1 + } + + _, err1 = out.Write(b) + if err1 != nil { + return err1 + } + return nil +} + +// fixOSError converts possible platform dependent error into the portable errors in the +// Go os package if possible. +func fixOSError(err error) error { + // The os.IsExist, os.IsNotExist, and os.IsPermissions functions are platform + // dependent, so sending the raw error might break those functions on a different OS. + // Go defines portable errors for these. + if os.IsExist(err) { + return os.ErrExist + } else if os.IsNotExist(err) { + return os.ErrNotExist + } else if os.IsPermission(err) { + return os.ErrPermission + } + return err +} + +// ReadTarOptions reads from the specified reader and deserializes an archive.TarOptions struct. +func ReadTarOptions(r io.Reader) (*archive.TarOptions, error) { + var size uint64 + if err := binary.Read(r, binary.BigEndian, &size); err != nil { + return nil, err + } + + rawJSON := make([]byte, size) + if _, err := io.ReadFull(r, rawJSON); err != nil { + return nil, err + } + + var opts archive.TarOptions + if err := json.Unmarshal(rawJSON, &opts); err != nil { + return nil, err + } + return &opts, nil +} + +// WriteTarOptions serializes a archive.TarOptions struct and writes it to the writer. +func WriteTarOptions(w io.Writer, opts *archive.TarOptions) error { + optsBuf, err := json.Marshal(opts) + if err != nil { + return err + } + + optsSize := uint64(len(optsBuf)) + optsSizeBuf := &bytes.Buffer{} + if err := binary.Write(optsSizeBuf, binary.BigEndian, optsSize); err != nil { + return err + } + + if _, err := optsSizeBuf.WriteTo(w); err != nil { + return err + } + + if _, err := w.Write(optsBuf); err != nil { + return err + } + + return nil +} + +// ReadFileHeader reads from r and returns a deserialized FileHeader +func ReadFileHeader(r io.Reader) (*FileHeader, error) { + hdr := &FileHeader{} + if err := binary.Read(r, binary.BigEndian, hdr); err != nil { + return nil, err + } + return hdr, nil +} + +// WriteFileHeader serializes a FileHeader and writes it to w, along with any extra data +func WriteFileHeader(w io.Writer, hdr *FileHeader, extraData []byte) error { + if err := binary.Write(w, binary.BigEndian, hdr); err != nil { + return err + } + if _, err := w.Write(extraData); err != nil { + return err + } + return nil +} diff --git a/components/engine/vendor/github.com/containerd/continuity/LICENSE b/components/engine/vendor/github.com/containerd/continuity/LICENSE new file mode 100644 index 0000000000..8f71f43fee --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/components/engine/vendor/github.com/containerd/continuity/README.md b/components/engine/vendor/github.com/containerd/continuity/README.md new file mode 100644 index 0000000000..0e91ce07b5 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/README.md @@ -0,0 +1,74 @@ +# continuity + +[![GoDoc](https://godoc.org/github.com/containerd/continuity?status.svg)](https://godoc.org/github.com/containerd/continuity) +[![Build Status](https://travis-ci.org/containerd/continuity.svg?branch=master)](https://travis-ci.org/containerd/continuity) + +A transport-agnostic, filesystem metadata manifest system + +This project is a staging area for experiments in providing transport agnostic +metadata storage. + +Please see https://github.com/opencontainers/specs/issues/11 for more details. + +## Manifest Format + +A continuity manifest encodes filesystem metadata in Protocol Buffers. +Please refer to [proto/manifest.proto](proto/manifest.proto). + +## Usage + +Build: + +```console +$ make +``` + +Create a manifest (of this repo itself): + +```console +$ ./bin/continuity build . > /tmp/a.pb +``` + +Dump a manifest: + +```console +$ ./bin/continuity ls /tmp/a.pb +... +-rw-rw-r-- 270 B /.gitignore +-rw-rw-r-- 88 B /.mailmap +-rw-rw-r-- 187 B /.travis.yml +-rw-rw-r-- 359 B /AUTHORS +-rw-rw-r-- 11 kB /LICENSE +-rw-rw-r-- 1.5 kB /Makefile +... +-rw-rw-r-- 986 B /testutil_test.go +drwxrwxr-x 0 B /version +-rw-rw-r-- 478 B /version/version.go +``` + +Verify a manifest: + +```console +$ ./bin/continuity verify . /tmp/a.pb +``` + +Break the directory and restore using the manifest: +```console +$ chmod 777 Makefile +$ ./bin/continuity verify . /tmp/a.pb +2017/06/23 08:00:34 error verifying manifest: resource "/Makefile" has incorrect mode: -rwxrwxrwx != -rw-rw-r-- +$ ./bin/continuity apply . /tmp/a.pb +$ stat -c %a Makefile +664 +$ ./bin/continuity verify . /tmp/a.pb +``` + + +## Contribution Guide +### Building Proto Package + +If you change the proto file you will need to rebuild the generated Go with `go generate`. + +```console +$ go generate ./proto +``` diff --git a/components/engine/vendor/github.com/containerd/continuity/devices/devices.go b/components/engine/vendor/github.com/containerd/continuity/devices/devices.go new file mode 100644 index 0000000000..7086407047 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/devices/devices.go @@ -0,0 +1,5 @@ +package devices + +import "fmt" + +var ErrNotSupported = fmt.Errorf("not supported") diff --git a/components/engine/vendor/github.com/containerd/continuity/devices/devices_darwin.go b/components/engine/vendor/github.com/containerd/continuity/devices/devices_darwin.go new file mode 100644 index 0000000000..5041e66611 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/devices/devices_darwin.go @@ -0,0 +1,15 @@ +package devices + +// from /usr/include/sys/types.h + +func getmajor(dev int32) uint64 { + return (uint64(dev) >> 24) & 0xff +} + +func getminor(dev int32) uint64 { + return uint64(dev) & 0xffffff +} + +func makedev(major int, minor int) int { + return ((major << 24) | minor) +} diff --git a/components/engine/vendor/github.com/containerd/continuity/devices/devices_dummy.go b/components/engine/vendor/github.com/containerd/continuity/devices/devices_dummy.go new file mode 100644 index 0000000000..9a48330a56 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/devices/devices_dummy.go @@ -0,0 +1,23 @@ +// +build solaris,!cgo + +// +// Implementing the functions below requires cgo support. Non-cgo stubs +// versions are defined below to enable cross-compilation of source code +// that depends on these functions, but the resultant cross-compiled +// binaries cannot actually be used. If the stub function(s) below are +// actually invoked they will cause the calling process to exit. +// + +package devices + +func getmajor(dev uint64) uint64 { + panic("getmajor() support requires cgo.") +} + +func getminor(dev uint64) uint64 { + panic("getminor() support requires cgo.") +} + +func makedev(major int, minor int) int { + panic("makedev() support requires cgo.") +} diff --git a/components/engine/vendor/github.com/containerd/continuity/devices/devices_freebsd.go b/components/engine/vendor/github.com/containerd/continuity/devices/devices_freebsd.go new file mode 100644 index 0000000000..a5c7b93189 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/devices/devices_freebsd.go @@ -0,0 +1,15 @@ +package devices + +// from /usr/include/sys/types.h + +func getmajor(dev uint32) uint64 { + return (uint64(dev) >> 24) & 0xff +} + +func getminor(dev uint32) uint64 { + return uint64(dev) & 0xffffff +} + +func makedev(major int, minor int) int { + return ((major << 24) | minor) +} diff --git a/components/engine/vendor/github.com/containerd/continuity/devices/devices_linux.go b/components/engine/vendor/github.com/containerd/continuity/devices/devices_linux.go new file mode 100644 index 0000000000..454cf668f5 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/devices/devices_linux.go @@ -0,0 +1,15 @@ +package devices + +// from /usr/include/linux/kdev_t.h + +func getmajor(dev uint64) uint64 { + return dev >> 8 +} + +func getminor(dev uint64) uint64 { + return dev & 0xff +} + +func makedev(major int, minor int) int { + return ((major << 8) | minor) +} diff --git a/components/engine/vendor/github.com/containerd/continuity/devices/devices_solaris.go b/components/engine/vendor/github.com/containerd/continuity/devices/devices_solaris.go new file mode 100644 index 0000000000..8819ac82f5 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/devices/devices_solaris.go @@ -0,0 +1,18 @@ +// +build cgo + +package devices + +//#include +import "C" + +func getmajor(dev uint64) uint64 { + return uint64(C.major(C.dev_t(dev))) +} + +func getminor(dev uint64) uint64 { + return uint64(C.minor(C.dev_t(dev))) +} + +func makedev(major int, minor int) int { + return int(C.makedev(C.major_t(major), C.minor_t(minor))) +} diff --git a/components/engine/vendor/github.com/containerd/continuity/devices/devices_unix.go b/components/engine/vendor/github.com/containerd/continuity/devices/devices_unix.go new file mode 100644 index 0000000000..85e9a68c49 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/devices/devices_unix.go @@ -0,0 +1,55 @@ +// +build linux darwin freebsd solaris + +package devices + +import ( + "fmt" + "os" + "syscall" +) + +func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return 0, 0, fmt.Errorf("cannot extract device from os.FileInfo") + } + + return getmajor(sys.Rdev), getminor(sys.Rdev), nil +} + +// mknod provides a shortcut for syscall.Mknod +func Mknod(p string, mode os.FileMode, maj, min int) error { + var ( + m = syscallMode(mode.Perm()) + dev int + ) + + if mode&os.ModeDevice != 0 { + dev = makedev(maj, min) + + if mode&os.ModeCharDevice != 0 { + m |= syscall.S_IFCHR + } else { + m |= syscall.S_IFBLK + } + } else if mode&os.ModeNamedPipe != 0 { + m |= syscall.S_IFIFO + } + + return syscall.Mknod(p, m, dev) +} + +// syscallMode returns the syscall-specific mode bits from Go's portable mode bits. +func syscallMode(i os.FileMode) (o uint32) { + o |= uint32(i.Perm()) + if i&os.ModeSetuid != 0 { + o |= syscall.S_ISUID + } + if i&os.ModeSetgid != 0 { + o |= syscall.S_ISGID + } + if i&os.ModeSticky != 0 { + o |= syscall.S_ISVTX + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/devices/devices_windows.go b/components/engine/vendor/github.com/containerd/continuity/devices/devices_windows.go new file mode 100644 index 0000000000..6099d1d779 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/devices/devices_windows.go @@ -0,0 +1,11 @@ +package devices + +import ( + "os" + + "github.com/pkg/errors" +) + +func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) { + return 0, 0, errors.Wrap(ErrNotSupported, "cannot get device info on windows") +} diff --git a/components/engine/vendor/github.com/containerd/continuity/driver/driver.go b/components/engine/vendor/github.com/containerd/continuity/driver/driver.go new file mode 100644 index 0000000000..aa1dd7d297 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/driver/driver.go @@ -0,0 +1,162 @@ +package driver + +import ( + "fmt" + "io" + "os" +) + +var ErrNotSupported = fmt.Errorf("not supported") + +// Driver provides all of the system-level functions in a common interface. +// The context should call these with full paths and should never use the `os` +// package or any other package to access resources on the filesystem. This +// mechanism let's us carefully control access to the context and maintain +// path and resource integrity. It also gives us an interface to reason about +// direct resource access. +// +// Implementations don't need to do much other than meet the interface. For +// example, it is not required to wrap os.FileInfo to return correct paths for +// the call to Name(). +type Driver interface { + // Note that Open() returns a File interface instead of *os.File. This + // is because os.File is a struct, so if Open was to return *os.File, + // the only way to fulfill the interface would be to call os.Open() + Open(path string) (File, error) + OpenFile(path string, flag int, perm os.FileMode) (File, error) + + Stat(path string) (os.FileInfo, error) + Lstat(path string) (os.FileInfo, error) + Readlink(p string) (string, error) + Mkdir(path string, mode os.FileMode) error + Remove(path string) error + + Link(oldname, newname string) error + Lchmod(path string, mode os.FileMode) error + Lchown(path string, uid, gid int64) error + Symlink(oldname, newname string) error + + MkdirAll(path string, perm os.FileMode) error + RemoveAll(path string) error + + // TODO(aaronl): These methods might move outside the main Driver + // interface in the future as more platforms are added. + Mknod(path string, mode os.FileMode, major int, minor int) error + Mkfifo(path string, mode os.FileMode) error +} + +// File is the interface for interacting with files returned by continuity's Open +// This is needed since os.File is a struct, instead of an interface, so it can't +// be used. +type File interface { + io.ReadWriteCloser + io.Seeker + Readdir(n int) ([]os.FileInfo, error) +} + +func NewSystemDriver() (Driver, error) { + // TODO(stevvooe): Consider having this take a "hint" path argument, which + // would be the context root. The hint could be used to resolve required + // filesystem support when assembling the driver to use. + return &driver{}, nil +} + +// XAttrDriver should be implemented on operation systems and filesystems that +// have xattr support for regular files and directories. +type XAttrDriver interface { + // Getxattr returns all of the extended attributes for the file at path. + // Typically, this takes a syscall call to Listxattr and Getxattr. + Getxattr(path string) (map[string][]byte, error) + + // Setxattr sets all of the extended attributes on file at path, following + // any symbolic links, if necessary. All attributes on the target are + // replaced by the values from attr. If the operation fails to set any + // attribute, those already applied will not be rolled back. + Setxattr(path string, attr map[string][]byte) error +} + +// LXAttrDriver should be implemented by drivers on operating systems and +// filesystems that support setting and getting extended attributes on +// symbolic links. If this is not implemented, extended attributes will be +// ignored on symbolic links. +type LXAttrDriver interface { + // LGetxattr returns all of the extended attributes for the file at path + // and does not follow symlinks. Typically, this takes a syscall call to + // Llistxattr and Lgetxattr. + LGetxattr(path string) (map[string][]byte, error) + + // LSetxattr sets all of the extended attributes on file at path, without + // following symbolic links. All attributes on the target are replaced by + // the values from attr. If the operation fails to set any attribute, + // those already applied will not be rolled back. + LSetxattr(path string, attr map[string][]byte) error +} + +type DeviceInfoDriver interface { + DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error) +} + +// driver is a simple default implementation that sends calls out to the "os" +// package. Extend the "driver" type in system-specific files to add support, +// such as xattrs, which can add support at compile time. +type driver struct{} + +var _ File = &os.File{} + +// LocalDriver is the exported Driver struct for convenience. +var LocalDriver Driver = &driver{} + +func (d *driver) Open(p string) (File, error) { + return os.Open(p) +} + +func (d *driver) OpenFile(path string, flag int, perm os.FileMode) (File, error) { + return os.OpenFile(path, flag, perm) +} + +func (d *driver) Stat(p string) (os.FileInfo, error) { + return os.Stat(p) +} + +func (d *driver) Lstat(p string) (os.FileInfo, error) { + return os.Lstat(p) +} + +func (d *driver) Readlink(p string) (string, error) { + return os.Readlink(p) +} + +func (d *driver) Mkdir(p string, mode os.FileMode) error { + return os.Mkdir(p, mode) +} + +// Remove is used to unlink files and remove directories. +// This is following the golang os package api which +// combines the operations into a higher level Remove +// function. If explicit unlinking or directory removal +// to mirror system call is required, they should be +// split up at that time. +func (d *driver) Remove(path string) error { + return os.Remove(path) +} + +func (d *driver) Link(oldname, newname string) error { + return os.Link(oldname, newname) +} + +func (d *driver) Lchown(name string, uid, gid int64) error { + // TODO: error out if uid excesses int bit width? + return os.Lchown(name, int(uid), int(gid)) +} + +func (d *driver) Symlink(oldname, newname string) error { + return os.Symlink(oldname, newname) +} + +func (d *driver) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (d *driver) RemoveAll(path string) error { + return os.RemoveAll(path) +} diff --git a/components/engine/vendor/github.com/containerd/continuity/driver/driver_unix.go b/components/engine/vendor/github.com/containerd/continuity/driver/driver_unix.go new file mode 100644 index 0000000000..d9ab1656c9 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/driver/driver_unix.go @@ -0,0 +1,122 @@ +// +build linux darwin freebsd solaris + +package driver + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "sort" + + "github.com/containerd/continuity/devices" + "github.com/containerd/continuity/sysx" +) + +func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error { + return devices.Mknod(path, mode, major, minor) +} + +func (d *driver) Mkfifo(path string, mode os.FileMode) error { + if mode&os.ModeNamedPipe == 0 { + return errors.New("mode passed to Mkfifo does not have the named pipe bit set") + } + // mknod with a mode that has ModeNamedPipe set creates a fifo, not a + // device. + return devices.Mknod(path, mode, 0, 0) +} + +// Lchmod changes the mode of an file not following symlinks. +func (d *driver) Lchmod(path string, mode os.FileMode) (err error) { + if !filepath.IsAbs(path) { + path, err = filepath.Abs(path) + if err != nil { + return + } + } + + return sysx.Fchmodat(0, path, uint32(mode), sysx.AtSymlinkNofollow) +} + +// Getxattr returns all of the extended attributes for the file at path p. +func (d *driver) Getxattr(p string) (map[string][]byte, error) { + xattrs, err := sysx.Listxattr(p) + if err != nil { + return nil, fmt.Errorf("listing %s xattrs: %v", p, err) + } + + sort.Strings(xattrs) + m := make(map[string][]byte, len(xattrs)) + + for _, attr := range xattrs { + value, err := sysx.Getxattr(p, attr) + if err != nil { + return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err) + } + + // NOTE(stevvooe): This append/copy tricky relies on unique + // xattrs. Break this out into an alloc/copy if xattrs are no + // longer unique. + m[attr] = append(m[attr], value...) + } + + return m, nil +} + +// Setxattr sets all of the extended attributes on file at path, following +// any symbolic links, if necessary. All attributes on the target are +// replaced by the values from attr. If the operation fails to set any +// attribute, those already applied will not be rolled back. +func (d *driver) Setxattr(path string, attrMap map[string][]byte) error { + for attr, value := range attrMap { + if err := sysx.Setxattr(path, attr, value, 0); err != nil { + return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err) + } + } + + return nil +} + +// LGetxattr returns all of the extended attributes for the file at path p +// not following symbolic links. +func (d *driver) LGetxattr(p string) (map[string][]byte, error) { + xattrs, err := sysx.LListxattr(p) + if err != nil { + return nil, fmt.Errorf("listing %s xattrs: %v", p, err) + } + + sort.Strings(xattrs) + m := make(map[string][]byte, len(xattrs)) + + for _, attr := range xattrs { + value, err := sysx.LGetxattr(p, attr) + if err != nil { + return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err) + } + + // NOTE(stevvooe): This append/copy tricky relies on unique + // xattrs. Break this out into an alloc/copy if xattrs are no + // longer unique. + m[attr] = append(m[attr], value...) + } + + return m, nil +} + +// LSetxattr sets all of the extended attributes on file at path, not +// following any symbolic links. All attributes on the target are +// replaced by the values from attr. If the operation fails to set any +// attribute, those already applied will not be rolled back. +func (d *driver) LSetxattr(path string, attrMap map[string][]byte) error { + for attr, value := range attrMap { + if err := sysx.LSetxattr(path, attr, value, 0); err != nil { + return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err) + } + } + + return nil +} + +func (d *driver) DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error) { + return devices.DeviceInfo(fi) +} diff --git a/components/engine/vendor/github.com/containerd/continuity/driver/driver_windows.go b/components/engine/vendor/github.com/containerd/continuity/driver/driver_windows.go new file mode 100644 index 0000000000..e4cfa64fb7 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/driver/driver_windows.go @@ -0,0 +1,21 @@ +package driver + +import ( + "os" + + "github.com/pkg/errors" +) + +func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error { + return errors.Wrap(ErrNotSupported, "cannot create device node on Windows") +} + +func (d *driver) Mkfifo(path string, mode os.FileMode) error { + return errors.Wrap(ErrNotSupported, "cannot create fifo on Windows") +} + +// Lchmod changes the mode of an file not following symlinks. +func (d *driver) Lchmod(path string, mode os.FileMode) (err error) { + // TODO: Use Window's equivalent + return os.Chmod(path, mode) +} diff --git a/components/engine/vendor/github.com/containerd/continuity/driver/utils.go b/components/engine/vendor/github.com/containerd/continuity/driver/utils.go new file mode 100644 index 0000000000..9e0edd7bca --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/driver/utils.go @@ -0,0 +1,74 @@ +package driver + +import ( + "io" + "io/ioutil" + "os" + "sort" +) + +// ReadFile works the same as ioutil.ReadFile with the Driver abstraction +func ReadFile(r Driver, filename string) ([]byte, error) { + f, err := r.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return data, nil +} + +// WriteFile works the same as ioutil.WriteFile with the Driver abstraction +func WriteFile(r Driver, filename string, data []byte, perm os.FileMode) error { + f, err := r.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + defer f.Close() + + n, err := f.Write(data) + if err != nil { + return err + } else if n != len(data) { + return io.ErrShortWrite + } + + return nil +} + +// ReadDir works the same as ioutil.ReadDir with the Driver abstraction +func ReadDir(r Driver, dirname string) ([]os.FileInfo, error) { + f, err := r.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + dirs, err := f.Readdir(-1) + if err != nil { + return nil, err + } + + sort.Sort(fileInfos(dirs)) + return dirs, nil +} + +// Simple implementation of the sort.Interface for os.FileInfo +type fileInfos []os.FileInfo + +func (fis fileInfos) Len() int { + return len(fis) +} + +func (fis fileInfos) Less(i, j int) bool { + return fis[i].Name() < fis[j].Name() +} + +func (fis fileInfos) Swap(i, j int) { + fis[i], fis[j] = fis[j], fis[i] +} diff --git a/components/engine/vendor/github.com/containerd/continuity/pathdriver/path_driver.go b/components/engine/vendor/github.com/containerd/continuity/pathdriver/path_driver.go new file mode 100644 index 0000000000..b43d55fe95 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/pathdriver/path_driver.go @@ -0,0 +1,85 @@ +package pathdriver + +import ( + "path/filepath" +) + +// PathDriver provides all of the path manipulation functions in a common +// interface. The context should call these and never use the `filepath` +// package or any other package to manipulate paths. +type PathDriver interface { + Join(paths ...string) string + IsAbs(path string) bool + Rel(base, target string) (string, error) + Base(path string) string + Dir(path string) string + Clean(path string) string + Split(path string) (dir, file string) + Separator() byte + Abs(path string) (string, error) + Walk(string, filepath.WalkFunc) error + FromSlash(path string) string + ToSlash(path string) string + Match(pattern, name string) (matched bool, err error) +} + +// pathDriver is a simple default implementation calls the filepath package. +type pathDriver struct{} + +// LocalPathDriver is the exported pathDriver struct for convenience. +var LocalPathDriver PathDriver = &pathDriver{} + +func (*pathDriver) Join(paths ...string) string { + return filepath.Join(paths...) +} + +func (*pathDriver) IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +func (*pathDriver) Rel(base, target string) (string, error) { + return filepath.Rel(base, target) +} + +func (*pathDriver) Base(path string) string { + return filepath.Base(path) +} + +func (*pathDriver) Dir(path string) string { + return filepath.Dir(path) +} + +func (*pathDriver) Clean(path string) string { + return filepath.Clean(path) +} + +func (*pathDriver) Split(path string) (dir, file string) { + return filepath.Split(path) +} + +func (*pathDriver) Separator() byte { + return filepath.Separator +} + +func (*pathDriver) Abs(path string) (string, error) { + return filepath.Abs(path) +} + +// Note that filepath.Walk calls os.Stat, so if the context wants to +// to call Driver.Stat() for Walk, they need to create a new struct that +// overrides this method. +func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error { + return filepath.Walk(root, walkFn) +} + +func (*pathDriver) FromSlash(path string) string { + return filepath.FromSlash(path) +} + +func (*pathDriver) ToSlash(path string) string { + return filepath.ToSlash(path) +} + +func (*pathDriver) Match(pattern, name string) (bool, error) { + return filepath.Match(pattern, name) +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/asm.s b/components/engine/vendor/github.com/containerd/continuity/sysx/asm.s new file mode 100644 index 0000000000..8ed2fdb94b --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/asm.s @@ -0,0 +1,10 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +TEXT ·use(SB),NOSPLIT,$0 + RET diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_darwin.go b/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_darwin.go new file mode 100644 index 0000000000..e3ae2b7bbf --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_darwin.go @@ -0,0 +1,18 @@ +package sysx + +const ( + // AtSymlinkNoFollow defined from AT_SYMLINK_NOFOLLOW in + AtSymlinkNofollow = 0x20 +) + +const ( + + // SYS_FCHMODAT defined from golang.org/sys/unix + SYS_FCHMODAT = 467 +) + +// These functions will be generated by generate.sh +// $ GOOS=darwin GOARCH=386 ./generate.sh chmod +// $ GOOS=darwin GOARCH=amd64 ./generate.sh chmod + +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_darwin_386.go b/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_darwin_386.go new file mode 100644 index 0000000000..5a8cf5b57d --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_darwin_386.go @@ -0,0 +1,25 @@ +// mksyscall.pl -l32 chmod_darwin.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall.Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_darwin_amd64.go b/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_darwin_amd64.go new file mode 100644 index 0000000000..3287d1d579 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_darwin_amd64.go @@ -0,0 +1,25 @@ +// mksyscall.pl chmod_darwin.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall.Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_freebsd.go b/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_freebsd.go new file mode 100644 index 0000000000..b64a708be1 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_freebsd.go @@ -0,0 +1,17 @@ +package sysx + +const ( + // AtSymlinkNoFollow defined from AT_SYMLINK_NOFOLLOW in + AtSymlinkNofollow = 0x200 +) + +const ( + + // SYS_FCHMODAT defined from golang.org/sys/unix + SYS_FCHMODAT = 490 +) + +// These functions will be generated by generate.sh +// $ GOOS=freebsd GOARCH=amd64 ./generate.sh chmod + +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_freebsd_amd64.go b/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_freebsd_amd64.go new file mode 100644 index 0000000000..5a271abb1e --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_freebsd_amd64.go @@ -0,0 +1,25 @@ +// mksyscall.pl chmod_freebsd.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall.Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_linux.go b/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_linux.go new file mode 100644 index 0000000000..89df6d38ef --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_linux.go @@ -0,0 +1,12 @@ +package sysx + +import "syscall" + +const ( + // AtSymlinkNoFollow defined from AT_SYMLINK_NOFOLLOW in /usr/include/linux/fcntl.h + AtSymlinkNofollow = 0x100 +) + +func Fchmodat(dirfd int, path string, mode uint32, flags int) error { + return syscall.Fchmodat(dirfd, path, mode, flags) +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_solaris.go b/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_solaris.go new file mode 100644 index 0000000000..3ba6e5edc8 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/chmod_solaris.go @@ -0,0 +1,11 @@ +package sysx + +import "golang.org/x/sys/unix" + +const ( + AtSymlinkNofollow = unix.AT_SYMLINK_NOFOLLOW +) + +func Fchmodat(dirfd int, path string, mode uint32, flags int) error { + return unix.Fchmodat(dirfd, path, mode, flags) +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux.go b/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux.go new file mode 100644 index 0000000000..4d8581284a --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux.go @@ -0,0 +1,11 @@ +package sysx + +// These functions will be generated by generate.sh +// $ GOOS=linux GOARCH=386 ./generate.sh copy +// $ GOOS=linux GOARCH=amd64 ./generate.sh copy +// $ GOOS=linux GOARCH=arm ./generate.sh copy +// $ GOOS=linux GOARCH=arm64 ./generate.sh copy +// $ GOOS=linux GOARCH=ppc64le ./generate.sh copy +// $ GOOS=linux GOARCH=s390x ./generate.sh copy + +//sys CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error) diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_386.go b/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_386.go new file mode 100644 index 0000000000..c1368c5723 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_386.go @@ -0,0 +1,20 @@ +// mksyscall.pl -l32 copy_linux.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error) { + r0, _, e1 := syscall.Syscall6(SYS_COPY_FILE_RANGE, uintptr(fdin), uintptr(unsafe.Pointer(offin)), uintptr(fdout), uintptr(unsafe.Pointer(offout)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_amd64.go b/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_amd64.go new file mode 100644 index 0000000000..9941b01f09 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_amd64.go @@ -0,0 +1,20 @@ +// mksyscall.pl copy_linux.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error) { + r0, _, e1 := syscall.Syscall6(SYS_COPY_FILE_RANGE, uintptr(fdin), uintptr(unsafe.Pointer(offin)), uintptr(fdout), uintptr(unsafe.Pointer(offout)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_arm.go b/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_arm.go new file mode 100644 index 0000000000..c1368c5723 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_arm.go @@ -0,0 +1,20 @@ +// mksyscall.pl -l32 copy_linux.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error) { + r0, _, e1 := syscall.Syscall6(SYS_COPY_FILE_RANGE, uintptr(fdin), uintptr(unsafe.Pointer(offin)), uintptr(fdout), uintptr(unsafe.Pointer(offout)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_arm64.go b/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_arm64.go new file mode 100644 index 0000000000..9941b01f09 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_arm64.go @@ -0,0 +1,20 @@ +// mksyscall.pl copy_linux.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error) { + r0, _, e1 := syscall.Syscall6(SYS_COPY_FILE_RANGE, uintptr(fdin), uintptr(unsafe.Pointer(offin)), uintptr(fdout), uintptr(unsafe.Pointer(offout)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_ppc64le.go b/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_ppc64le.go new file mode 100644 index 0000000000..9941b01f09 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_ppc64le.go @@ -0,0 +1,20 @@ +// mksyscall.pl copy_linux.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error) { + r0, _, e1 := syscall.Syscall6(SYS_COPY_FILE_RANGE, uintptr(fdin), uintptr(unsafe.Pointer(offin)), uintptr(fdout), uintptr(unsafe.Pointer(offout)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_s390x.go b/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_s390x.go new file mode 100644 index 0000000000..9941b01f09 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/copy_linux_s390x.go @@ -0,0 +1,20 @@ +// mksyscall.pl copy_linux.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error) { + r0, _, e1 := syscall.Syscall6(SYS_COPY_FILE_RANGE, uintptr(fdin), uintptr(unsafe.Pointer(offin)), uintptr(fdout), uintptr(unsafe.Pointer(offout)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/nodata_linux.go b/components/engine/vendor/github.com/containerd/continuity/sysx/nodata_linux.go new file mode 100644 index 0000000000..fc47ddb8dc --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/nodata_linux.go @@ -0,0 +1,7 @@ +package sysx + +import ( + "syscall" +) + +const ENODATA = syscall.ENODATA diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go b/components/engine/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go new file mode 100644 index 0000000000..53cc8e068f --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go @@ -0,0 +1,8 @@ +package sysx + +import ( + "syscall" +) + +// This should actually be a set that contains ENOENT and EPERM +const ENODATA = syscall.ENOENT diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/nodata_unix.go b/components/engine/vendor/github.com/containerd/continuity/sysx/nodata_unix.go new file mode 100644 index 0000000000..7e6851209f --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/nodata_unix.go @@ -0,0 +1,9 @@ +// +build darwin freebsd + +package sysx + +import ( + "syscall" +) + +const ENODATA = syscall.ENOATTR diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/sys.go b/components/engine/vendor/github.com/containerd/continuity/sysx/sys.go new file mode 100644 index 0000000000..0bb1676283 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/sys.go @@ -0,0 +1,37 @@ +package sysx + +import ( + "syscall" + "unsafe" +) + +var _zero uintptr + +// use is a no-op, but the compiler cannot see that it is. +// Calling use(p) ensures that p is kept live until that point. +//go:noescape +func use(p unsafe.Pointer) + +// Do the interface allocations only once for common +// Errno values. +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case syscall.EAGAIN: + return errEAGAIN + case syscall.EINVAL: + return errEINVAL + case syscall.ENOENT: + return errENOENT + } + return e +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_386.go b/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_386.go new file mode 100644 index 0000000000..0063f8a913 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_386.go @@ -0,0 +1,7 @@ +package sysx + +const ( + // SYS_COPYFILERANGE defined in Kernel 4.5+ + // Number defined in /usr/include/asm/unistd_32.h + SYS_COPY_FILE_RANGE = 377 +) diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_amd64.go b/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_amd64.go new file mode 100644 index 0000000000..4170540c5d --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_amd64.go @@ -0,0 +1,7 @@ +package sysx + +const ( + // SYS_COPYFILERANGE defined in Kernel 4.5+ + // Number defined in /usr/include/asm/unistd_64.h + SYS_COPY_FILE_RANGE = 326 +) diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_arm.go b/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_arm.go new file mode 100644 index 0000000000..a05dcbb5ef --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_arm.go @@ -0,0 +1,7 @@ +package sysx + +const ( + // SYS_COPY_FILE_RANGE defined in Kernel 4.5+ + // Number defined in /usr/include/arm-linux-gnueabihf/asm/unistd.h + SYS_COPY_FILE_RANGE = 391 +) diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_arm64.go b/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_arm64.go new file mode 100644 index 0000000000..da31bbd908 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_arm64.go @@ -0,0 +1,7 @@ +package sysx + +const ( + // SYS_COPY_FILE_RANGE defined in Kernel 4.5+ + // Number defined in /usr/include/asm-generic/unistd.h + SYS_COPY_FILE_RANGE = 285 +) diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_ppc64le.go b/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_ppc64le.go new file mode 100644 index 0000000000..5dea25a3c4 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_ppc64le.go @@ -0,0 +1,7 @@ +package sysx + +const ( + // SYS_COPYFILERANGE defined in Kernel 4.5+ + // Number defined in /usr/include/asm/unistd_64.h + SYS_COPY_FILE_RANGE = 379 +) diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_s390x.go b/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_s390x.go new file mode 100644 index 0000000000..8a6f2a7ec0 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/sysnum_linux_s390x.go @@ -0,0 +1,7 @@ +package sysx + +const ( + // SYS_COPYFILERANGE defined in Kernel 4.5+ + // Number defined in /usr/include/asm/unistd_64.h + SYS_COPY_FILE_RANGE = 375 +) diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/xattr.go b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr.go new file mode 100644 index 0000000000..20937c2d4d --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr.go @@ -0,0 +1,67 @@ +package sysx + +import ( + "bytes" + "fmt" + "syscall" +) + +const defaultXattrBufferSize = 5 + +var ErrNotSupported = fmt.Errorf("not supported") + +type listxattrFunc func(path string, dest []byte) (int, error) + +func listxattrAll(path string, listFunc listxattrFunc) ([]string, error) { + var p []byte // nil on first execution + + for { + n, err := listFunc(path, p) // first call gets buffer size. + if err != nil { + return nil, err + } + + if n > len(p) { + p = make([]byte, n) + continue + } + + p = p[:n] + + ps := bytes.Split(bytes.TrimSuffix(p, []byte{0}), []byte{0}) + var entries []string + for _, p := range ps { + s := string(p) + if s != "" { + entries = append(entries, s) + } + } + + return entries, nil + } +} + +type getxattrFunc func(string, string, []byte) (int, error) + +func getxattrAll(path, attr string, getFunc getxattrFunc) ([]byte, error) { + p := make([]byte, defaultXattrBufferSize) + for { + n, err := getFunc(path, attr, p) + if err != nil { + if errno, ok := err.(syscall.Errno); ok && errno == syscall.ERANGE { + p = make([]byte, len(p)*2) // this can't be ideal. + continue // try again! + } + + return nil, err + } + + // realloc to correct size and repeat + if n > len(p) { + p = make([]byte, n) + continue + } + + return p[:n], nil + } +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_darwin.go b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_darwin.go new file mode 100644 index 0000000000..1164a7d11c --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_darwin.go @@ -0,0 +1,71 @@ +package sysx + +// These functions will be generated by generate.sh +// $ GOOS=darwin GOARCH=386 ./generate.sh xattr +// $ GOOS=darwin GOARCH=amd64 ./generate.sh xattr + +//sys getxattr(path string, attr string, dest []byte, pos int, options int) (sz int, err error) +//sys setxattr(path string, attr string, data []byte, flags int) (err error) +//sys removexattr(path string, attr string, options int) (err error) +//sys listxattr(path string, dest []byte, options int) (sz int, err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) + +const ( + xattrNoFollow = 0x01 +) + +func listxattrFollow(path string, dest []byte) (sz int, err error) { + return listxattr(path, dest, 0) +} + +// Listxattr calls syscall getxattr +func Listxattr(path string) ([]string, error) { + return listxattrAll(path, listxattrFollow) +} + +// Removexattr calls syscall getxattr +func Removexattr(path string, attr string) (err error) { + return removexattr(path, attr, 0) +} + +// Setxattr calls syscall setxattr +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return setxattr(path, attr, data, flags) +} + +func getxattrFollow(path, attr string, dest []byte) (sz int, err error) { + return getxattr(path, attr, dest, 0, 0) +} + +// Getxattr calls syscall getxattr +func Getxattr(path, attr string) ([]byte, error) { + return getxattrAll(path, attr, getxattrFollow) +} + +func listxattrNoFollow(path string, dest []byte) (sz int, err error) { + return listxattr(path, dest, xattrNoFollow) +} + +// LListxattr calls syscall listxattr with XATTR_NOFOLLOW +func LListxattr(path string) ([]string, error) { + return listxattrAll(path, listxattrNoFollow) +} + +// LRemovexattr calls syscall removexattr with XATTR_NOFOLLOW +func LRemovexattr(path string, attr string) (err error) { + return removexattr(path, attr, xattrNoFollow) +} + +// Setxattr calls syscall setxattr with XATTR_NOFOLLOW +func LSetxattr(path string, attr string, data []byte, flags int) (err error) { + return setxattr(path, attr, data, flags|xattrNoFollow) +} + +func getxattrNoFollow(path, attr string, dest []byte) (sz int, err error) { + return getxattr(path, attr, dest, 0, xattrNoFollow) +} + +// LGetxattr calls syscall getxattr with XATTR_NOFOLLOW +func LGetxattr(path, attr string) ([]byte, error) { + return getxattrAll(path, attr, getxattrNoFollow) +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_darwin_386.go b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_darwin_386.go new file mode 100644 index 0000000000..aa896b57fc --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_darwin_386.go @@ -0,0 +1,111 @@ +// mksyscall.pl -l32 xattr_darwin.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getxattr(path string, attr string, dest []byte, pos int, options int) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), uintptr(pos), uintptr(options)) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func removexattr(path string, attr string, options int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := syscall.Syscall(syscall.SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func listxattr(path string, dest []byte, options int) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(options), 0, 0) + use(unsafe.Pointer(_p0)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_darwin_amd64.go b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_darwin_amd64.go new file mode 100644 index 0000000000..6ff27e2703 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_darwin_amd64.go @@ -0,0 +1,111 @@ +// mksyscall.pl xattr_darwin.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getxattr(path string, attr string, dest []byte, pos int, options int) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), uintptr(pos), uintptr(options)) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func removexattr(path string, attr string, options int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := syscall.Syscall(syscall.SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func listxattr(path string, dest []byte, options int) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(options), 0, 0) + use(unsafe.Pointer(_p0)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_freebsd.go b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_freebsd.go new file mode 100644 index 0000000000..e8017d317f --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_freebsd.go @@ -0,0 +1,12 @@ +package sysx + +import ( + "errors" +) + +// Initial stub version for FreeBSD. FreeBSD has a different +// syscall API from Darwin and Linux for extended attributes; +// it is also not widely used. It is not exposed at all by the +// Go syscall package, so we need to implement directly eventually. + +var unsupported = errors.New("extended attributes unsupported on FreeBSD") diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux.go b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux.go new file mode 100644 index 0000000000..cd18136343 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux.go @@ -0,0 +1,61 @@ +package sysx + +import "syscall" + +// These functions will be generated by generate.sh +// $ GOOS=linux GOARCH=386 ./generate.sh xattr +// $ GOOS=linux GOARCH=amd64 ./generate.sh xattr +// $ GOOS=linux GOARCH=arm ./generate.sh xattr +// $ GOOS=linux GOARCH=arm64 ./generate.sh xattr +// $ GOOS=linux GOARCH=ppc64 ./generate.sh xattr +// $ GOOS=linux GOARCH=ppc64le ./generate.sh xattr +// $ GOOS=linux GOARCH=s390x ./generate.sh xattr + +// Listxattr calls syscall listxattr and reads all content +// and returns a string array +func Listxattr(path string) ([]string, error) { + return listxattrAll(path, syscall.Listxattr) +} + +// Removexattr calls syscall removexattr +func Removexattr(path string, attr string) (err error) { + return syscall.Removexattr(path, attr) +} + +// Setxattr calls syscall setxattr +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return syscall.Setxattr(path, attr, data, flags) +} + +// Getxattr calls syscall getxattr +func Getxattr(path, attr string) ([]byte, error) { + return getxattrAll(path, attr, syscall.Getxattr) +} + +//sys llistxattr(path string, dest []byte) (sz int, err error) + +// LListxattr lists xattrs, not following symlinks +func LListxattr(path string) ([]string, error) { + return listxattrAll(path, llistxattr) +} + +//sys lremovexattr(path string, attr string) (err error) + +// LRemovexattr removes an xattr, not following symlinks +func LRemovexattr(path string, attr string) (err error) { + return lremovexattr(path, attr) +} + +//sys lsetxattr(path string, attr string, data []byte, flags int) (err error) + +// LSetxattr sets an xattr, not following symlinks +func LSetxattr(path string, attr string, data []byte, flags int) (err error) { + return lsetxattr(path, attr, data, flags) +} + +//sys lgetxattr(path string, attr string, dest []byte) (sz int, err error) + +// LGetxattr gets an xattr, not following symlinks +func LGetxattr(path, attr string) ([]byte, error) { + return getxattrAll(path, attr, lgetxattr) +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_386.go b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_386.go new file mode 100644 index 0000000000..c3e5c8e385 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_386.go @@ -0,0 +1,111 @@ +// mksyscall.pl -l32 xattr_linux.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + use(unsafe.Pointer(_p0)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_amd64.go b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_amd64.go new file mode 100644 index 0000000000..dec46faaaf --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_amd64.go @@ -0,0 +1,111 @@ +// mksyscall.pl xattr_linux.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + use(unsafe.Pointer(_p0)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_arm.go b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_arm.go new file mode 100644 index 0000000000..c3e5c8e385 --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_arm.go @@ -0,0 +1,111 @@ +// mksyscall.pl -l32 xattr_linux.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + use(unsafe.Pointer(_p0)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_arm64.go b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_arm64.go new file mode 100644 index 0000000000..dec46faaaf --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_arm64.go @@ -0,0 +1,111 @@ +// mksyscall.pl xattr_linux.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + use(unsafe.Pointer(_p0)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_ppc64.go b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_ppc64.go new file mode 100644 index 0000000000..dec46faaaf --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_ppc64.go @@ -0,0 +1,111 @@ +// mksyscall.pl xattr_linux.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + use(unsafe.Pointer(_p0)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_ppc64le.go b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_ppc64le.go new file mode 100644 index 0000000000..dec46faaaf --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_ppc64le.go @@ -0,0 +1,111 @@ +// mksyscall.pl xattr_linux.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + use(unsafe.Pointer(_p0)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_s390x.go b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_s390x.go new file mode 100644 index 0000000000..dec46faaaf --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_linux_s390x.go @@ -0,0 +1,111 @@ +// mksyscall.pl xattr_linux.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + use(unsafe.Pointer(_p0)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_solaris.go b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_solaris.go new file mode 100644 index 0000000000..fc523fcbbe --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_solaris.go @@ -0,0 +1,12 @@ +package sysx + +import ( + "errors" +) + +// Initial stub version for Solaris. Solaris has a different +// syscall API from Darwin and Linux for extended attributes; +// it is also not widely used. It is not exposed at all by the +// Go syscall package, so we need to implement directly eventually. + +var unsupported = errors.New("extended attributes unsupported on Solaris") diff --git a/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go new file mode 100644 index 0000000000..a8dd9f245f --- /dev/null +++ b/components/engine/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go @@ -0,0 +1,44 @@ +// +build freebsd solaris + +package sysx + +// Listxattr calls syscall listxattr and reads all content +// and returns a string array +func Listxattr(path string) ([]string, error) { + return []string{}, nil +} + +// Removexattr calls syscall removexattr +func Removexattr(path string, attr string) (err error) { + return unsupported +} + +// Setxattr calls syscall setxattr +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return unsupported +} + +// Getxattr calls syscall getxattr +func Getxattr(path, attr string) ([]byte, error) { + return []byte{}, unsupported +} + +// LListxattr lists xattrs, not following symlinks +func LListxattr(path string) ([]string, error) { + return []string{}, nil +} + +// LRemovexattr removes an xattr, not following symlinks +func LRemovexattr(path string, attr string) (err error) { + return unsupported +} + +// LSetxattr sets an xattr, not following symlinks +func LSetxattr(path string, attr string, data []byte, flags int) (err error) { + return unsupported +} + +// LGetxattr gets an xattr, not following symlinks +func LGetxattr(path, attr string) ([]byte, error) { + return []byte{}, nil +} diff --git a/components/engine/vendor/github.com/docker/libnetwork/agent.go b/components/engine/vendor/github.com/docker/libnetwork/agent.go index 1328f0bade..b0d66476e6 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/agent.go +++ b/components/engine/vendor/github.com/docker/libnetwork/agent.go @@ -892,13 +892,13 @@ func (c *controller) handleEpTableEvent(ev events.Event) { if svcID != "" { // This is a remote task part of a service if err := c.addServiceBinding(svcName, svcID, nid, eid, containerName, vip, ingressPorts, serviceAliases, taskAliases, ip, "handleEpTableEvent"); err != nil { - logrus.Errorf("failed adding service binding for %s epRec:%v err:%s", eid, epRec, err) + logrus.Errorf("failed adding service binding for %s epRec:%v err:%v", eid, epRec, err) return } } else { // This is a remote container simply attached to an attachable network if err := c.addContainerNameResolution(nid, eid, containerName, taskAliases, ip, "handleEpTableEvent"); err != nil { - logrus.Errorf("failed adding service binding for %s epRec:%v err:%s", eid, epRec, err) + logrus.Errorf("failed adding container name resolution for %s epRec:%v err:%v", eid, epRec, err) } } } else { @@ -906,13 +906,13 @@ func (c *controller) handleEpTableEvent(ev events.Event) { if svcID != "" { // This is a remote task part of a service if err := c.rmServiceBinding(svcName, svcID, nid, eid, containerName, vip, ingressPorts, serviceAliases, taskAliases, ip, "handleEpTableEvent", true); err != nil { - logrus.Errorf("failed removing service binding for %s epRec:%v err:%s", eid, epRec, err) + logrus.Errorf("failed removing service binding for %s epRec:%v err:%v", eid, epRec, err) return } } else { // This is a remote container simply attached to an attachable network if err := c.delContainerNameResolution(nid, eid, containerName, taskAliases, ip, "handleEpTableEvent"); err != nil { - logrus.Errorf("failed adding service binding for %s epRec:%v err:%s", eid, epRec, err) + logrus.Errorf("failed removing container name resolution for %s epRec:%v err:%v", eid, epRec, err) } } } diff --git a/components/engine/vendor/github.com/docker/libnetwork/config/config.go b/components/engine/vendor/github.com/docker/libnetwork/config/config.go index 96a157a1fb..3e8473d255 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/config/config.go +++ b/components/engine/vendor/github.com/docker/libnetwork/config/config.go @@ -15,6 +15,11 @@ import ( "github.com/sirupsen/logrus" ) +const ( + warningThNetworkControlPlaneMTU = 1500 + minimumNetworkControlPlaneMTU = 500 +) + // Config encapsulates configurations of various Libnetwork components type Config struct { Daemon DaemonCfg @@ -226,9 +231,12 @@ func OptionExperimental(exp bool) Option { func OptionNetworkControlPlaneMTU(exp int) Option { return func(c *Config) { logrus.Debugf("Network Control Plane MTU: %d", exp) - if exp < 1500 { - // if exp == 0 the value won't be used - logrus.Warnf("Received a MTU of %d, this value is very low, the network control plane can misbehave", exp) + if exp < warningThNetworkControlPlaneMTU { + logrus.Warnf("Received a MTU of %d, this value is very low, the network control plane can misbehave,"+ + " defaulting to minimum value (%d)", exp, minimumNetworkControlPlaneMTU) + if exp < minimumNetworkControlPlaneMTU { + exp = minimumNetworkControlPlaneMTU + } } c.Daemon.NetworkControlPlaneMTU = exp } diff --git a/components/engine/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go b/components/engine/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go index 0c39ec9d27..28d6cca4e0 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go +++ b/components/engine/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go @@ -201,5 +201,5 @@ func delDummyLink(linkName string) error { // getDummyName returns the name of a dummy parent with truncated net ID and driver prefix func getDummyName(netID string) string { - return fmt.Sprintf("%s%s", dummyPrefix, netID) + return dummyPrefix + netID } diff --git a/components/engine/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go b/components/engine/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go index 843a2e73bf..98d4bd3832 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go +++ b/components/engine/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go @@ -205,5 +205,5 @@ func delDummyLink(linkName string) error { // getDummyName returns the name of a dummy parent with truncated net ID and driver prefix func getDummyName(netID string) string { - return fmt.Sprintf("%s%s", dummyPrefix, netID) + return dummyPrefix + netID } diff --git a/components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go b/components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go index 126093fefe..0e9ca77866 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go +++ b/components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go @@ -494,7 +494,7 @@ func (n *network) restoreSubnetSandbox(s *subnet, brName, vxlanName string) erro brIfaceOption := make([]osl.IfaceOption, 2) brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Address(s.gwIP)) brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Bridge(true)) - Ifaces[fmt.Sprintf("%s+%s", brName, "br")] = brIfaceOption + Ifaces[brName+"+br"] = brIfaceOption err := sbox.Restore(Ifaces, nil, nil, nil) if err != nil { @@ -504,7 +504,7 @@ func (n *network) restoreSubnetSandbox(s *subnet, brName, vxlanName string) erro Ifaces = make(map[string][]osl.IfaceOption) vxlanIfaceOption := make([]osl.IfaceOption, 1) vxlanIfaceOption = append(vxlanIfaceOption, sbox.InterfaceOptions().Master(brName)) - Ifaces[fmt.Sprintf("%s+%s", vxlanName, "vxlan")] = vxlanIfaceOption + Ifaces[vxlanName+"+vxlan"] = vxlanIfaceOption err = sbox.Restore(Ifaces, nil, nil, nil) if err != nil { return err diff --git a/components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.go b/components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.go index e624aade2e..2bae0823e1 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.go +++ b/components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.go @@ -162,7 +162,7 @@ func (d *driver) restoreEndpoints() error { Ifaces := make(map[string][]osl.IfaceOption) vethIfaceOption := make([]osl.IfaceOption, 1) vethIfaceOption = append(vethIfaceOption, n.sbox.InterfaceOptions().Master(s.brName)) - Ifaces[fmt.Sprintf("%s+%s", "veth", "veth")] = vethIfaceOption + Ifaces["veth+veth"] = vethIfaceOption err := n.sbox.Restore(Ifaces, nil, nil, nil) if err != nil { @@ -270,7 +270,7 @@ func (d *driver) nodeJoin(advertiseAddress, bindAddress string, self bool) { // If there is no cluster store there is no need to start serf. if d.store != nil { if err := validateSelf(advertiseAddress); err != nil { - logrus.Warnf("%s", err.Error()) + logrus.Warn(err.Error()) } err := d.serfInit() if err != nil { diff --git a/components/engine/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_network.go b/components/engine/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_network.go index b545bc8903..4e3f9ae76f 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_network.go +++ b/components/engine/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_network.go @@ -331,7 +331,7 @@ func (n *network) restoreSubnetSandbox(s *subnet, brName, vxlanName string) erro brIfaceOption := make([]osl.IfaceOption, 2) brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Address(s.gwIP)) brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Bridge(true)) - Ifaces[fmt.Sprintf("%s+%s", brName, "br")] = brIfaceOption + Ifaces[brName+"+br"] = brIfaceOption err := sbox.Restore(Ifaces, nil, nil, nil) if err != nil { @@ -341,7 +341,7 @@ func (n *network) restoreSubnetSandbox(s *subnet, brName, vxlanName string) erro Ifaces = make(map[string][]osl.IfaceOption) vxlanIfaceOption := make([]osl.IfaceOption, 1) vxlanIfaceOption = append(vxlanIfaceOption, sbox.InterfaceOptions().Master(brName)) - Ifaces[fmt.Sprintf("%s+%s", vxlanName, "vxlan")] = vxlanIfaceOption + Ifaces[vxlanName+"+vxlan"] = vxlanIfaceOption err = sbox.Restore(Ifaces, nil, nil, nil) if err != nil { return err diff --git a/components/engine/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.go b/components/engine/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.go index 45b62b1835..92b0a4e3b8 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.go +++ b/components/engine/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.go @@ -141,7 +141,7 @@ func (d *driver) restoreEndpoints() error { Ifaces := make(map[string][]osl.IfaceOption) vethIfaceOption := make([]osl.IfaceOption, 1) vethIfaceOption = append(vethIfaceOption, n.sbox.InterfaceOptions().Master(s.brName)) - Ifaces[fmt.Sprintf("%s+%s", "veth", "veth")] = vethIfaceOption + Ifaces["veth+veth"] = vethIfaceOption err := n.sbox.Restore(Ifaces, nil, nil, nil) if err != nil { @@ -234,7 +234,7 @@ func (d *driver) nodeJoin(advertiseAddress, bindAddress string, self bool) { // If there is no cluster store there is no need to start serf. if d.store != nil { if err := validateSelf(advertiseAddress); err != nil { - logrus.Warnf("%s", err.Error()) + logrus.Warn(err.Error()) } err := d.serfInit() if err != nil { diff --git a/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/joinleave_windows.go b/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/joinleave_windows.go index cded48af64..83bee5ad93 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/joinleave_windows.go +++ b/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/joinleave_windows.go @@ -39,6 +39,11 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, if err := jinfo.AddTableEntry(ovPeerTable, eid, buf); err != nil { logrus.Errorf("overlay: Failed adding table entry to joininfo: %v", err) } + + if ep.disablegateway { + jinfo.DisableGatewayService() + } + return nil } diff --git a/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go b/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go index 47af64cb9a..b7bda4a6b2 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go +++ b/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go @@ -6,7 +6,11 @@ import ( "net" "github.com/Microsoft/hcsshim" + "github.com/docker/docker/pkg/system" "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/drivers/windows" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/types" "github.com/sirupsen/logrus" ) @@ -15,12 +19,14 @@ type endpointTable map[string]*endpoint const overlayEndpointPrefix = "overlay/endpoint" type endpoint struct { - id string - nid string - profileId string - remote bool - mac net.HardwareAddr - addr *net.IPNet + id string + nid string + profileID string + remote bool + mac net.HardwareAddr + addr *net.IPNet + disablegateway bool + portMapping []types.PortBinding // Operation port bindings } func validateID(nid, eid string) error { @@ -71,7 +77,7 @@ func (n *network) removeEndpointWithAddress(addr *net.IPNet) { if networkEndpoint != nil { logrus.Debugf("Removing stale endpoint from HNS") - _, err := hcsshim.HNSEndpointRequest("DELETE", networkEndpoint.profileId, "") + _, err := hcsshim.HNSEndpointRequest("DELETE", networkEndpoint.profileID, "") if err != nil { logrus.Debugf("Failed to delete stale overlay endpoint (%s) from hns", networkEndpoint.id[0:7]) @@ -96,7 +102,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, logrus.Debugf("Deleting stale endpoint %s", eid) n.deleteEndpoint(eid) - _, err := hcsshim.HNSEndpointRequest("DELETE", ep.profileId, "") + _, err := hcsshim.HNSEndpointRequest("DELETE", ep.profileID, "") if err != nil { return err } @@ -113,17 +119,19 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, return fmt.Errorf("create endpoint was not passed interface IP address") } - if s := n.getSubnetforIP(ep.addr); s == nil { - return fmt.Errorf("no matching subnet for IP %q in network %q\n", ep.addr, nid) + s := n.getSubnetforIP(ep.addr) + if s == nil { + return fmt.Errorf("no matching subnet for IP %q in network %q", ep.addr, nid) } // Todo: Add port bindings and qos policies here hnsEndpoint := &hcsshim.HNSEndpoint{ Name: eid, - VirtualNetwork: n.hnsId, + VirtualNetwork: n.hnsID, IPAddress: ep.addr.IP, EnableInternalDNS: true, + GatewayAddress: s.gwIP.String(), } if ep.mac != nil { @@ -141,6 +149,31 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, hnsEndpoint.Policies = append(hnsEndpoint.Policies, paPolicy) + if system.GetOSVersion().Build > 16236 { + natPolicy, err := json.Marshal(hcsshim.PaPolicy{ + Type: "OutBoundNAT", + }) + + if err != nil { + return err + } + + hnsEndpoint.Policies = append(hnsEndpoint.Policies, natPolicy) + + epConnectivity, err := windows.ParseEndpointConnectivity(epOptions) + if err != nil { + return err + } + + pbPolicy, err := windows.ConvertPortBindings(epConnectivity.PortBindings) + if err != nil { + return err + } + hnsEndpoint.Policies = append(hnsEndpoint.Policies, pbPolicy...) + + ep.disablegateway = true + } + configurationb, err := json.Marshal(hnsEndpoint) if err != nil { return err @@ -151,7 +184,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, return err } - ep.profileId = hnsresponse.Id + ep.profileID = hnsresponse.Id if ep.mac == nil { ep.mac, err = net.ParseMAC(hnsresponse.MacAddress) @@ -164,6 +197,12 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, } } + ep.portMapping, err = windows.ParsePortBindingPolicies(hnsresponse.Policies) + if err != nil { + hcsshim.HNSEndpointRequest("DELETE", hnsresponse.Id, "") + return err + } + n.addEndpoint(ep) return nil @@ -186,7 +225,7 @@ func (d *driver) DeleteEndpoint(nid, eid string) error { n.deleteEndpoint(eid) - _, err := hcsshim.HNSEndpointRequest("DELETE", ep.profileId, "") + _, err := hcsshim.HNSEndpointRequest("DELETE", ep.profileID, "") if err != nil { return err } @@ -210,7 +249,17 @@ func (d *driver) EndpointOperInfo(nid, eid string) (map[string]interface{}, erro } data := make(map[string]interface{}, 1) - data["hnsid"] = ep.profileId + data["hnsid"] = ep.profileID data["AllowUnqualifiedDNSQuery"] = true + + if ep.portMapping != nil { + // Return a copy of the operational data + pmc := make([]types.PortBinding, 0, len(ep.portMapping)) + for _, pm := range ep.portMapping { + pmc = append(pmc, pm.GetCopy()) + } + data[netlabel.PortMap] = pmc + } + return data, nil } diff --git a/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go b/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go index 70c4f02eda..9cc46f8cfe 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go +++ b/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go @@ -37,7 +37,7 @@ type subnetJSON struct { type network struct { id string name string - hnsId string + hnsID string providerAddress string interfaceName string endpoints endpointTable @@ -108,7 +108,7 @@ func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo d case "com.docker.network.windowsshim.interface": interfaceName = value case "com.docker.network.windowsshim.hnsid": - n.hnsId = value + n.hnsID = value case netlabel.OverlayVxlanIDList: vniStrings := strings.Split(value, ",") for _, vniStr := range vniStrings { @@ -181,7 +181,7 @@ func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo d if err != nil { d.deleteNetwork(id) } else { - genData["com.docker.network.windowsshim.hnsid"] = n.hnsId + genData["com.docker.network.windowsshim.hnsid"] = n.hnsID } return err @@ -197,7 +197,7 @@ func (d *driver) DeleteNetwork(nid string) error { return types.ForbiddenErrorf("could not find network with id %s", nid) } - _, err := hcsshim.HNSNetworkRequest("DELETE", n.hnsId, "") + _, err := hcsshim.HNSNetworkRequest("DELETE", n.hnsID, "") if err != nil { return types.ForbiddenErrorf(err.Error()) } @@ -242,7 +242,7 @@ func (d *driver) network(nid string) *network { // } // for _, endpoint := range hnsresponse { -// if endpoint.VirtualNetwork != n.hnsId { +// if endpoint.VirtualNetwork != n.hnsID { // continue // } @@ -260,7 +260,7 @@ func (d *driver) network(nid string) *network { func (n *network) convertToOverlayEndpoint(v *hcsshim.HNSEndpoint) *endpoint { ep := &endpoint{ id: v.Name, - profileId: v.Id, + profileID: v.Id, nid: n.id, remote: v.IsRemoteEndpoint, } @@ -311,6 +311,7 @@ func (d *driver) createHnsNetwork(n *network) error { Type: d.Type(), Subnets: subnets, NetworkAdapterName: n.interfaceName, + AutomaticDNS: true, } configurationb, err := json.Marshal(network) @@ -326,7 +327,7 @@ func (d *driver) createHnsNetwork(n *network) error { return err } - n.hnsId = hnsresponse.Id + n.hnsID = hnsresponse.Id n.providerAddress = hnsresponse.ManagementIP return nil diff --git a/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay_windows.go b/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay_windows.go index d415bebcbd..65ad62ae0d 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay_windows.go +++ b/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay_windows.go @@ -104,7 +104,7 @@ func (d *driver) restoreHNSNetworks() error { func (d *driver) convertToOverlayNetwork(v *hcsshim.HNSNetwork) *network { n := &network{ id: v.Name, - hnsId: v.Id, + hnsID: v.Id, driver: d, endpoints: endpointTable{}, subnets: []*subnet{}, diff --git a/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/peerdb_windows.go b/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/peerdb_windows.go index 0abc232432..159bfd6ed1 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/peerdb_windows.go +++ b/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/overlay/peerdb_windows.go @@ -33,7 +33,7 @@ func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, hnsEndpoint := &hcsshim.HNSEndpoint{ Name: eid, - VirtualNetwork: n.hnsId, + VirtualNetwork: n.hnsID, MacAddress: peerMac.String(), IPAddress: peerIP, IsRemoteEndpoint: true, @@ -78,7 +78,7 @@ func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, nid: nid, addr: addr, mac: peerMac, - profileId: hnsresponse.Id, + profileID: hnsresponse.Id, remote: true, } @@ -108,7 +108,7 @@ func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMas } if updateDb { - _, err := hcsshim.HNSEndpointRequest("DELETE", ep.profileId, "") + _, err := hcsshim.HNSEndpointRequest("DELETE", ep.profileID, "") if err != nil { return err } diff --git a/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/windows.go b/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/windows.go index 45f835ee07..0b15b2aa49 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/windows.go +++ b/components/engine/vendor/github.com/docker/libnetwork/drivers/windows/windows.go @@ -55,7 +55,8 @@ type endpointOption struct { DisableICC bool } -type endpointConnectivity struct { +// EndpointConnectivity stores the port bindings and exposed ports that the user has specified in epOptions. +type EndpointConnectivity struct { PortBindings []types.PortBinding ExposedPorts []types.TransportPort } @@ -67,7 +68,7 @@ type hnsEndpoint struct { Type string macAddress net.HardwareAddr epOption *endpointOption // User specified parameters - epConnectivity *endpointConnectivity // User specified parameters + epConnectivity *EndpointConnectivity // User specified parameters portMapping []types.PortBinding // Operation port bindings addr *net.IPNet gateway net.IP @@ -95,7 +96,7 @@ const ( errNotFound = "HNS failed with error : The object identifier does not represent a valid object. " ) -// IsBuiltinWindowsDriver vaidates if network-type is a builtin local-scoped driver +// IsBuiltinLocalDriver validates if network-type is a builtin local-scoped driver func IsBuiltinLocalDriver(networkType string) bool { if "l2bridge" == networkType || "l2tunnel" == networkType || "nat" == networkType || "ics" == networkType || "transparent" == networkType { return true @@ -396,7 +397,8 @@ func convertQosPolicies(qosPolicies []types.QosPolicy) ([]json.RawMessage, error return qps, nil } -func convertPortBindings(portBindings []types.PortBinding) ([]json.RawMessage, error) { +// ConvertPortBindings converts PortBindings to JSON for HNS request +func ConvertPortBindings(portBindings []types.PortBinding) ([]json.RawMessage, error) { var pbs []json.RawMessage // Enumerate through the port bindings specified by the user and convert @@ -431,7 +433,8 @@ func convertPortBindings(portBindings []types.PortBinding) ([]json.RawMessage, e return pbs, nil } -func parsePortBindingPolicies(policies []json.RawMessage) ([]types.PortBinding, error) { +// ParsePortBindingPolicies parses HNS endpoint response message to PortBindings +func ParsePortBindingPolicies(policies []json.RawMessage) ([]types.PortBinding, error) { var bindings []types.PortBinding hcsPolicy := &hcsshim.NatPolicy{} @@ -505,12 +508,13 @@ func parseEndpointOptions(epOptions map[string]interface{}) (*endpointOption, er return ec, nil } -func parseEndpointConnectivity(epOptions map[string]interface{}) (*endpointConnectivity, error) { +// ParseEndpointConnectivity parses options passed to CreateEndpoint, specifically port bindings, and store in a endpointConnectivity object. +func ParseEndpointConnectivity(epOptions map[string]interface{}) (*EndpointConnectivity, error) { if epOptions == nil { return nil, nil } - ec := &endpointConnectivity{} + ec := &EndpointConnectivity{} if opt, ok := epOptions[netlabel.PortMap]; ok { if bs, ok := opt.([]types.PortBinding); ok { @@ -550,7 +554,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, if err != nil { return err } - epConnectivity, err := parseEndpointConnectivity(epOptions) + epConnectivity, err := ParseEndpointConnectivity(epOptions) if err != nil { return err } @@ -561,7 +565,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, endpointStruct.MacAddress = strings.Replace(macAddress.String(), ":", "-", -1) } - endpointStruct.Policies, err = convertPortBindings(epConnectivity.PortBindings) + endpointStruct.Policies, err = ConvertPortBindings(epConnectivity.PortBindings) if err != nil { return err } @@ -615,7 +619,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, endpoint.profileID = hnsresponse.Id endpoint.epConnectivity = epConnectivity endpoint.epOption = epOption - endpoint.portMapping, err = parsePortBindingPolicies(hnsresponse.Policies) + endpoint.portMapping, err = ParsePortBindingPolicies(hnsresponse.Policies) if err != nil { hcsshim.HNSEndpointRequest("DELETE", hnsresponse.Id, "") diff --git a/components/engine/vendor/github.com/docker/libnetwork/endpoint.go b/components/engine/vendor/github.com/docker/libnetwork/endpoint.go index 724d0e5315..a2d1dbc4c6 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/endpoint.go +++ b/components/engine/vendor/github.com/docker/libnetwork/endpoint.go @@ -75,6 +75,7 @@ type endpoint struct { dbIndex uint64 dbExists bool serviceEnabled bool + loadBalancer bool sync.Mutex } @@ -101,6 +102,7 @@ func (ep *endpoint) MarshalJSON() ([]byte, error) { epMap["virtualIP"] = ep.virtualIP.String() epMap["ingressPorts"] = ep.ingressPorts epMap["svcAliases"] = ep.svcAliases + epMap["loadBalancer"] = ep.loadBalancer return json.Marshal(epMap) } @@ -201,6 +203,10 @@ func (ep *endpoint) UnmarshalJSON(b []byte) (err error) { ep.virtualIP = net.ParseIP(vip.(string)) } + if v, ok := epMap["loadBalancer"]; ok { + ep.loadBalancer = v.(bool) + } + sal, _ := json.Marshal(epMap["svcAliases"]) var svcAliases []string json.Unmarshal(sal, &svcAliases) @@ -238,6 +244,7 @@ func (ep *endpoint) CopyTo(o datastore.KVObject) error { dstEp.svcName = ep.svcName dstEp.svcID = ep.svcID dstEp.virtualIP = ep.virtualIP + dstEp.loadBalancer = ep.loadBalancer dstEp.svcAliases = make([]string, len(ep.svcAliases)) copy(dstEp.svcAliases, ep.svcAliases) @@ -985,7 +992,7 @@ func CreateOptionDisableResolution() EndpointOption { } } -//CreateOptionAlias function returns an option setter for setting endpoint alias +// CreateOptionAlias function returns an option setter for setting endpoint alias func CreateOptionAlias(name string, alias string) EndpointOption { return func(ep *endpoint) { if ep.aliases == nil { @@ -1006,13 +1013,20 @@ func CreateOptionService(name, id string, vip net.IP, ingressPorts []*PortConfig } } -//CreateOptionMyAlias function returns an option setter for setting endpoint's self alias +// CreateOptionMyAlias function returns an option setter for setting endpoint's self alias func CreateOptionMyAlias(alias string) EndpointOption { return func(ep *endpoint) { ep.myAliases = append(ep.myAliases, alias) } } +// CreateOptionLoadBalancer function returns an option setter for denoting the endpoint is a load balancer for a network +func CreateOptionLoadBalancer() EndpointOption { + return func(ep *endpoint) { + ep.loadBalancer = true + } +} + // JoinOptionPriority function returns an option setter for priority option to // be passed to the endpoint.Join() method. func JoinOptionPriority(ep Endpoint, prio int) EndpointOption { diff --git a/components/engine/vendor/github.com/docker/libnetwork/endpoint_info.go b/components/engine/vendor/github.com/docker/libnetwork/endpoint_info.go index 47bff432cb..b5d3fabcb2 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/endpoint_info.go +++ b/components/engine/vendor/github.com/docker/libnetwork/endpoint_info.go @@ -31,6 +31,9 @@ type EndpointInfo interface { // Sandbox returns the attached sandbox if there, nil otherwise. Sandbox() Sandbox + + // LoadBalancer returns whether the endpoint is the load balancer endpoint for the network. + LoadBalancer() bool } // InterfaceInfo provides an interface to retrieve interface addresses bound to the endpoint. @@ -327,6 +330,12 @@ func (ep *endpoint) Sandbox() Sandbox { return cnt } +func (ep *endpoint) LoadBalancer() bool { + ep.Lock() + defer ep.Unlock() + return ep.loadBalancer +} + func (ep *endpoint) StaticRoutes() []*types.StaticRoute { ep.Lock() defer ep.Unlock() diff --git a/components/engine/vendor/github.com/docker/libnetwork/ipam/allocator.go b/components/engine/vendor/github.com/docker/libnetwork/ipam/allocator.go index 71c9f39531..c4ed9a0e32 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/ipam/allocator.go +++ b/components/engine/vendor/github.com/docker/libnetwork/ipam/allocator.go @@ -579,7 +579,7 @@ func (a *Allocator) DumpDatabase() string { s = fmt.Sprintf("\n\n%s Config", as) aSpace.Lock() for k, config := range aSpace.subnets { - s = fmt.Sprintf("%s%s", s, fmt.Sprintf("\n%v: %v", k, config)) + s += fmt.Sprintf("\n%v: %v", k, config) if config.Range == nil { a.retrieveBitmask(k, config.Pool) } @@ -589,7 +589,7 @@ func (a *Allocator) DumpDatabase() string { s = fmt.Sprintf("%s\n\nBitmasks", s) for k, bm := range a.addresses { - s = fmt.Sprintf("%s%s", s, fmt.Sprintf("\n%s: %s", k, bm)) + s += fmt.Sprintf("\n%s: %s", k, bm) } return s diff --git a/components/engine/vendor/github.com/docker/libnetwork/ipvs/ipvs.go b/components/engine/vendor/github.com/docker/libnetwork/ipvs/ipvs.go index a285e102e3..ebcdd808c3 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/ipvs/ipvs.go +++ b/components/engine/vendor/github.com/docker/libnetwork/ipvs/ipvs.go @@ -116,6 +116,13 @@ func (i *Handle) DelService(s *Service) error { return i.doCmd(s, nil, ipvsCmdDelService) } +// Flush deletes all existing services in the passed +// handle. +func (i *Handle) Flush() error { + _, err := i.doCmdWithoutAttr(ipvsCmdFlush) + return err +} + // NewDestination creates a new real server in the passed ipvs // service which should already be existing in the passed handle. func (i *Handle) NewDestination(s *Service, d *Destination) error { diff --git a/components/engine/vendor/github.com/docker/libnetwork/ipvs/netlink.go b/components/engine/vendor/github.com/docker/libnetwork/ipvs/netlink.go index b8d33dcdc4..2089283d14 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/ipvs/netlink.go +++ b/components/engine/vendor/github.com/docker/libnetwork/ipvs/netlink.go @@ -402,6 +402,13 @@ func (i *Handle) doGetServicesCmd(svc *Service) ([]*Service, error) { return res, nil } +// doCmdWithoutAttr a simple wrapper of netlink socket execute command +func (i *Handle) doCmdWithoutAttr(cmd uint8) ([][]byte, error) { + req := newIPVSRequest(cmd) + req.Seq = atomic.AddUint32(&i.seq, 1) + return execute(i.sock, req, 0) +} + func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) { var d Destination diff --git a/components/engine/vendor/github.com/docker/libnetwork/portallocator/portallocator_freebsd.go b/components/engine/vendor/github.com/docker/libnetwork/portallocator/portallocator_freebsd.go new file mode 100644 index 0000000000..97d7fbb49d --- /dev/null +++ b/components/engine/vendor/github.com/docker/libnetwork/portallocator/portallocator_freebsd.go @@ -0,0 +1,42 @@ +package portallocator + +import ( + "bytes" + "fmt" + "os/exec" +) + +func getDynamicPortRange() (start int, end int, err error) { + portRangeKernelSysctl := []string{"net.inet.ip.portrange.hifirst", "net.ip.portrange.hilast"} + portRangeFallback := fmt.Sprintf("using fallback port range %d-%d", DefaultPortRangeStart, DefaultPortRangeEnd) + portRangeLowCmd := exec.Command("/sbin/sysctl", portRangeKernelSysctl[0]) + var portRangeLowOut bytes.Buffer + portRangeLowCmd.Stdout = &portRangeLowOut + cmdErr := portRangeLowCmd.Run() + if cmdErr != nil { + return 0, 0, fmt.Errorf("port allocator - sysctl net.inet.ip.portrange.hifirst failed - %s: %v", portRangeFallback, err) + } + n, err := fmt.Sscanf(portRangeLowOut.String(), "%d", &start) + if n != 1 || err != nil { + if err == nil { + err = fmt.Errorf("unexpected count of parsed numbers (%d)", n) + } + return 0, 0, fmt.Errorf("port allocator - failed to parse system ephemeral port range start from %s - %s: %v", portRangeLowOut.String(), portRangeFallback, err) + } + + portRangeHighCmd := exec.Command("/sbin/sysctl", portRangeKernelSysctl[1]) + var portRangeHighOut bytes.Buffer + portRangeHighCmd.Stdout = &portRangeHighOut + cmdErr = portRangeHighCmd.Run() + if cmdErr != nil { + return 0, 0, fmt.Errorf("port allocator - sysctl net.inet.ip.portrange.hilast failed - %s: %v", portRangeFallback, err) + } + n, err = fmt.Sscanf(portRangeHighOut.String(), "%d", &end) + if n != 1 || err != nil { + if err == nil { + err = fmt.Errorf("unexpected count of parsed numbers (%d)", n) + } + return 0, 0, fmt.Errorf("port allocator - failed to parse system ephemeral port range end from %s - %s: %v", portRangeHighOut.String(), portRangeFallback, err) + } + return start, end, nil +} diff --git a/components/engine/vendor/github.com/docker/libnetwork/sandbox.go b/components/engine/vendor/github.com/docker/libnetwork/sandbox.go index 6f4c2508b2..315195ebb8 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/sandbox.go +++ b/components/engine/vendor/github.com/docker/libnetwork/sandbox.go @@ -916,6 +916,13 @@ func (sb *sandbox) clearNetworkResources(origEp *endpoint) error { break } } + + if index == -1 { + logrus.Warnf("Endpoint %s has already been deleted", ep.Name()) + sb.Unlock() + return nil + } + heap.Remove(&sb.endpoints, index) for _, e := range sb.endpoints { if len(e.Gateway()) > 0 { diff --git a/components/engine/vendor/github.com/docker/libnetwork/sandbox_dns_unix.go b/components/engine/vendor/github.com/docker/libnetwork/sandbox_dns_unix.go index f18f0b3ee6..d196330558 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/sandbox_dns_unix.go +++ b/components/engine/vendor/github.com/docker/libnetwork/sandbox_dns_unix.go @@ -197,14 +197,26 @@ func (sb *sandbox) setupDNS() error { // This is for the host mode networking if sb.config.originResolvConfPath != "" { if err := copyFile(sb.config.originResolvConfPath, sb.config.resolvConfPath); err != nil { - return fmt.Errorf("could not copy source resolv.conf file %s to %s: %v", sb.config.originResolvConfPath, sb.config.resolvConfPath, err) + if !os.IsNotExist(err) { + return fmt.Errorf("could not copy source resolv.conf file %s to %s: %v", sb.config.originResolvConfPath, sb.config.resolvConfPath, err) + } + logrus.Infof("%s does not exist, we create an empty resolv.conf for container", sb.config.originResolvConfPath) + if err := createFile(sb.config.resolvConfPath); err != nil { + return err + } } return nil } currRC, err := resolvconf.Get() if err != nil { - return err + if !os.IsNotExist(err) { + return err + } + // it's ok to continue if /etc/resolv.conf doesn't exist, default resolvers (Google's Public DNS) + // will be used + currRC = &resolvconf.File{} + logrus.Infof("/etc/resolv.conf does not exist") } if len(sb.config.dnsList) > 0 || len(sb.config.dnsSearchList) > 0 || len(sb.config.dnsOptionsList) > 0 { diff --git a/components/engine/vendor/github.com/docker/libnetwork/service.go b/components/engine/vendor/github.com/docker/libnetwork/service.go index 5a0d7e0057..63095f31b1 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/service.go +++ b/components/engine/vendor/github.com/docker/libnetwork/service.go @@ -89,4 +89,5 @@ type loadBalancer struct { // Back pointer to service to which the loadbalancer belongs. service *service + sync.Mutex } diff --git a/components/engine/vendor/github.com/docker/libnetwork/service_common.go b/components/engine/vendor/github.com/docker/libnetwork/service_common.go index 64d283e541..fe54ea30c5 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/service_common.go +++ b/components/engine/vendor/github.com/docker/libnetwork/service_common.go @@ -287,7 +287,7 @@ func (c *controller) addServiceBinding(svcName, svcID, nID, eID, containerName s // Add loadbalancer service and backend in all sandboxes in // the network only if vip is valid. if len(vip) != 0 { - n.(*network).addLBBackend(ip, vip, lb.fwMark, ingressPorts) + n.(*network).addLBBackend(ip, vip, lb, ingressPorts) } // Add the appropriate name resolutions @@ -355,7 +355,7 @@ func (c *controller) rmServiceBinding(svcName, svcID, nID, eID, containerName st // Remove loadbalancer service(if needed) and backend in all // sandboxes in the network only if the vip is valid. if len(vip) != 0 && entries == 0 { - n.(*network).rmLBBackend(ip, vip, lb.fwMark, ingressPorts, rmService) + n.(*network).rmLBBackend(ip, vip, lb, ingressPorts, rmService) } // Delete the name resolutions diff --git a/components/engine/vendor/github.com/docker/libnetwork/service_linux.go b/components/engine/vendor/github.com/docker/libnetwork/service_linux.go index 00665941ed..d8a95b2f50 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/service_linux.go +++ b/components/engine/vendor/github.com/docker/libnetwork/service_linux.go @@ -111,7 +111,7 @@ func (sb *sandbox) populateLoadbalancers(ep *endpoint) { // Add loadbalancer backend to all sandboxes which has a connection to // this network. If needed add the service as well. -func (n *network) addLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*PortConfig) { +func (n *network) addLBBackend(ip, vip net.IP, lb *loadBalancer, ingressPorts []*PortConfig) { n.WalkEndpoints(func(e Endpoint) bool { ep := e.(*endpoint) if sb, ok := ep.getSandbox(); ok { @@ -124,7 +124,7 @@ func (n *network) addLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*Po gwIP = ep.Iface().Address().IP } - sb.addLBBackend(ip, vip, fwMark, ingressPorts, ep.Iface().Address(), gwIP, n.ingress) + sb.addLBBackend(ip, vip, lb.fwMark, ingressPorts, ep.Iface().Address(), gwIP, n.ingress) } return false @@ -134,7 +134,7 @@ func (n *network) addLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*Po // Remove loadbalancer backend from all sandboxes which has a // connection to this network. If needed remove the service entry as // well, as specified by the rmService bool. -func (n *network) rmLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*PortConfig, rmService bool) { +func (n *network) rmLBBackend(ip, vip net.IP, lb *loadBalancer, ingressPorts []*PortConfig, rmService bool) { n.WalkEndpoints(func(e Endpoint) bool { ep := e.(*endpoint) if sb, ok := ep.getSandbox(); ok { @@ -147,7 +147,7 @@ func (n *network) rmLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*Por gwIP = ep.Iface().Address().IP } - sb.rmLBBackend(ip, vip, fwMark, ingressPorts, ep.Iface().Address(), gwIP, rmService, n.ingress) + sb.rmLBBackend(ip, vip, lb.fwMark, ingressPorts, ep.Iface().Address(), gwIP, rmService, n.ingress) } return false diff --git a/components/engine/vendor/github.com/docker/libnetwork/service_windows.go b/components/engine/vendor/github.com/docker/libnetwork/service_windows.go index 6fe521ef99..9ed3e06047 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/service_windows.go +++ b/components/engine/vendor/github.com/docker/libnetwork/service_windows.go @@ -1,11 +1,146 @@ package libnetwork -import "net" +import ( + "net" -func (n *network) addLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*PortConfig) { + "github.com/Microsoft/hcsshim" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +type policyLists struct { + ilb *hcsshim.PolicyList + elb *hcsshim.PolicyList } -func (n *network) rmLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*PortConfig, rmService bool) { +var lbPolicylistMap map[*loadBalancer]*policyLists + +func init() { + lbPolicylistMap = make(map[*loadBalancer]*policyLists) +} + +func (n *network) addLBBackend(ip, vip net.IP, lb *loadBalancer, ingressPorts []*PortConfig) { + + if system.GetOSVersion().Build > 16236 { + lb.Lock() + defer lb.Unlock() + //find the load balancer IP for the network. + var sourceVIP string + for _, e := range n.Endpoints() { + epInfo := e.Info() + if epInfo == nil { + continue + } + if epInfo.LoadBalancer() { + sourceVIP = epInfo.Iface().Address().IP.String() + break + } + } + + if sourceVIP == "" { + logrus.Errorf("Failed to find load balancer IP for network %s", n.Name()) + return + } + + var endpoints []hcsshim.HNSEndpoint + + for eid := range lb.backEnds { + //Call HNS to get back ID (GUID) corresponding to the endpoint. + hnsEndpoint, err := hcsshim.GetHNSEndpointByName(eid) + if err != nil { + logrus.Errorf("Failed to find HNS ID for endpoint %v: %v", eid, err) + return + } + + endpoints = append(endpoints, *hnsEndpoint) + } + + if policies, ok := lbPolicylistMap[lb]; ok { + + if policies.ilb != nil { + policies.ilb.Delete() + policies.ilb = nil + } + + if policies.elb != nil { + policies.elb.Delete() + policies.elb = nil + } + delete(lbPolicylistMap, lb) + } + + ilbPolicy, err := hcsshim.AddLoadBalancer(endpoints, true, sourceVIP, vip.String(), 0, 0, 0) + if err != nil { + logrus.Errorf("Failed to add ILB policy for service %s (%s) with endpoints %v using load balancer IP %s on network %s: %v", + lb.service.name, vip.String(), endpoints, sourceVIP, n.Name(), err) + return + } + + lbPolicylistMap[lb] = &policyLists{ + ilb: ilbPolicy, + } + + publishedPorts := make(map[uint32]uint32) + + for i, port := range ingressPorts { + protocol := uint16(6) + + // Skip already published port + if publishedPorts[port.PublishedPort] == port.TargetPort { + continue + } + + if port.Protocol == ProtocolUDP { + protocol = 17 + } + + // check if already has udp matching to add wild card publishing + for j := i + 1; j < len(ingressPorts); j++ { + if ingressPorts[j].TargetPort == port.TargetPort && + ingressPorts[j].PublishedPort == port.PublishedPort { + protocol = 0 + } + } + + publishedPorts[port.PublishedPort] = port.TargetPort + + lbPolicylistMap[lb].elb, err = hcsshim.AddLoadBalancer(endpoints, false, sourceVIP, "", protocol, uint16(port.TargetPort), uint16(port.PublishedPort)) + if err != nil { + logrus.Errorf("Failed to add ELB policy for service %s (ip:%s target port:%v published port:%v) with endpoints %v using load balancer IP %s on network %s: %v", + lb.service.name, vip.String(), uint16(port.TargetPort), uint16(port.PublishedPort), endpoints, sourceVIP, n.Name(), err) + return + } + } + } +} + +func (n *network) rmLBBackend(ip, vip net.IP, lb *loadBalancer, ingressPorts []*PortConfig, rmService bool) { + if system.GetOSVersion().Build > 16236 { + if len(lb.backEnds) > 0 { + //Reprogram HNS (actually VFP) with the existing backends. + n.addLBBackend(ip, vip, lb, ingressPorts) + } else { + lb.Lock() + defer lb.Unlock() + logrus.Debugf("No more backends for service %s (ip:%s). Removing all policies", lb.service.name, lb.vip.String()) + + if policyLists, ok := lbPolicylistMap[lb]; ok { + if policyLists.ilb != nil { + policyLists.ilb.Delete() + policyLists.ilb = nil + } + + if policyLists.elb != nil { + policyLists.elb.Delete() + policyLists.elb = nil + } + delete(lbPolicylistMap, lb) + + } else { + logrus.Errorf("Failed to find policies for service %s (%s)", lb.service.name, lb.vip.String()) + } + } + } } func (sb *sandbox) populateLoadbalancers(ep *endpoint) { diff --git a/components/engine/vendor/github.com/docker/libnetwork/types/types.go b/components/engine/vendor/github.com/docker/libnetwork/types/types.go index da113e24cd..164b18096c 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/types/types.go +++ b/components/engine/vendor/github.com/docker/libnetwork/types/types.go @@ -129,11 +129,11 @@ func (p *PortBinding) GetCopy() PortBinding { func (p *PortBinding) String() string { ret := fmt.Sprintf("%s/", p.Proto) if p.IP != nil { - ret = fmt.Sprintf("%s%s", ret, p.IP.String()) + ret += p.IP.String() } ret = fmt.Sprintf("%s:%d/", ret, p.Port) if p.HostIP != nil { - ret = fmt.Sprintf("%s%s", ret, p.HostIP.String()) + ret += p.HostIP.String() } ret = fmt.Sprintf("%s:%d", ret, p.HostPort) return ret diff --git a/components/engine/vendor/github.com/docker/libnetwork/vendor.conf b/components/engine/vendor/github.com/docker/libnetwork/vendor.conf index 6751cba47e..c97f5517df 100644 --- a/components/engine/vendor/github.com/docker/libnetwork/vendor.conf +++ b/components/engine/vendor/github.com/docker/libnetwork/vendor.conf @@ -1,7 +1,7 @@ github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060 github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34 -github.com/Microsoft/hcsshim v0.6.1 +github.com/Microsoft/hcsshim v0.6.3 github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 github.com/boltdb/bolt c6ba97b89e0454fec9aa92e1d33a4e2c5fc1f631 diff --git a/components/engine/vendor/github.com/docker/swarmkit/api/objects.pb.go b/components/engine/vendor/github.com/docker/swarmkit/api/objects.pb.go index d7a2f015ad..cc1a6480fc 100644 --- a/components/engine/vendor/github.com/docker/swarmkit/api/objects.pb.go +++ b/components/engine/vendor/github.com/docker/swarmkit/api/objects.pb.go @@ -57,6 +57,7 @@ type Node struct { // ManagerStatus provides the current status of the node's manager // component, if the node is a manager. ManagerStatus *ManagerStatus `protobuf:"bytes,6,opt,name=manager_status,json=managerStatus" json:"manager_status,omitempty"` + // DEPRECATED: Use lb_attachments to find the ingress network // The node attachment to the ingress network. Attachment *NetworkAttachment `protobuf:"bytes,7,opt,name=attachment" json:"attachment,omitempty"` // Certificate is the TLS certificate issued for the node, if any. @@ -71,6 +72,10 @@ type Node struct { // shows the privilege level that the CA would currently grant when // issuing or renewing the node's certificate. Role NodeRole `protobuf:"varint,9,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` + // Each node uses the network attachment to set up an endpoint on the + // node to be used for load balancing. Each overlay network, including + // ingress network, will have an NetworkAttachment. + LbAttachments []*NetworkAttachment `protobuf:"bytes,10,rep,name=lb_attachments,json=lbAttachments" json:"lb_attachments,omitempty"` } func (m *Node) Reset() { *m = Node{} } @@ -403,6 +408,14 @@ func (m *Node) CopyFrom(src interface{}) { github_com_docker_swarmkit_api_deepcopy.Copy(m.Attachment, o.Attachment) } github_com_docker_swarmkit_api_deepcopy.Copy(&m.Certificate, &o.Certificate) + if o.LbAttachments != nil { + m.LbAttachments = make([]*NetworkAttachment, len(o.LbAttachments)) + for i := range m.LbAttachments { + m.LbAttachments[i] = &NetworkAttachment{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.LbAttachments[i], o.LbAttachments[i]) + } + } + } func (m *Service) Copy() *Service { @@ -849,6 +862,18 @@ func (m *Node) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintObjects(dAtA, i, uint64(m.Role)) } + if len(m.LbAttachments) > 0 { + for _, msg := range m.LbAttachments { + dAtA[i] = 0x52 + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -1670,6 +1695,12 @@ func (m *Node) Size() (n int) { if m.Role != 0 { n += 1 + sovObjects(uint64(m.Role)) } + if len(m.LbAttachments) > 0 { + for _, e := range m.LbAttachments { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } return n } @@ -4383,6 +4414,7 @@ func (this *Node) String() string { `Attachment:` + strings.Replace(fmt.Sprintf("%v", this.Attachment), "NetworkAttachment", "NetworkAttachment", 1) + `,`, `Certificate:` + strings.Replace(strings.Replace(this.Certificate.String(), "Certificate", "Certificate", 1), `&`, ``, 1) + `,`, `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `LbAttachments:` + strings.Replace(fmt.Sprintf("%v", this.LbAttachments), "NetworkAttachment", "NetworkAttachment", 1) + `,`, `}`, }, "") return s @@ -5017,6 +5049,37 @@ func (m *Node) Unmarshal(dAtA []byte) error { break } } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LbAttachments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LbAttachments = append(m.LbAttachments, &NetworkAttachment{}) + if err := m.LbAttachments[len(m.LbAttachments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipObjects(dAtA[iNdEx:]) @@ -7689,100 +7752,101 @@ var ( func init() { proto.RegisterFile("github.com/docker/swarmkit/api/objects.proto", fileDescriptorObjects) } var fileDescriptorObjects = []byte{ - // 1513 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4d, 0x6f, 0x1b, 0x4f, - 0x19, 0xef, 0xda, 0x1b, 0xbf, 0x3c, 0x4e, 0x4c, 0x98, 0x7f, 0x08, 0x5b, 0x13, 0xec, 0xe0, 0x0a, - 0x54, 0x55, 0x95, 0x53, 0x42, 0x81, 0x34, 0x50, 0x5a, 0x3b, 0x89, 0x5a, 0xab, 0x94, 0x46, 0xd3, - 0xd2, 0x72, 0x5b, 0x26, 0xbb, 0x53, 0x77, 0xf1, 0x7a, 0x67, 0xb5, 0x33, 0x76, 0xf1, 0x8d, 0x73, - 0xf8, 0x00, 0xb9, 0x71, 0xe8, 0x57, 0x80, 0x0b, 0x17, 0x0e, 0x9c, 0x7a, 0xe4, 0x84, 0x38, 0x45, - 0xd4, 0xdf, 0x02, 0x89, 0x03, 0x9a, 0xd9, 0x59, 0x7b, 0x13, 0xaf, 0x93, 0x14, 0x55, 0xd1, 0xff, - 0x94, 0x99, 0x9d, 0xdf, 0xef, 0x79, 0x9b, 0xe7, 0x65, 0x62, 0xb8, 0xdb, 0xf3, 0xc4, 0xbb, 0xe1, - 0x51, 0xcb, 0x61, 0x83, 0x2d, 0x97, 0x39, 0x7d, 0x1a, 0x6d, 0xf1, 0xf7, 0x24, 0x1a, 0xf4, 0x3d, - 0xb1, 0x45, 0x42, 0x6f, 0x8b, 0x1d, 0xfd, 0x8e, 0x3a, 0x82, 0xb7, 0xc2, 0x88, 0x09, 0x86, 0x50, - 0x0c, 0x69, 0x25, 0x90, 0xd6, 0xe8, 0x87, 0xb5, 0x3b, 0x97, 0x48, 0x10, 0xe3, 0x90, 0x6a, 0xfe, - 0xa5, 0x58, 0x1e, 0x52, 0x27, 0xc1, 0x36, 0x7a, 0x8c, 0xf5, 0x7c, 0xba, 0xa5, 0x76, 0x47, 0xc3, - 0xb7, 0x5b, 0xc2, 0x1b, 0x50, 0x2e, 0xc8, 0x20, 0xd4, 0x80, 0xb5, 0x1e, 0xeb, 0x31, 0xb5, 0xdc, - 0x92, 0x2b, 0xfd, 0xf5, 0xe6, 0x79, 0x1a, 0x09, 0xc6, 0xfa, 0xe8, 0xa7, 0x17, 0x68, 0x9f, 0xc2, - 0x43, 0x7f, 0xd8, 0xf3, 0x02, 0xfd, 0x27, 0x26, 0x36, 0xff, 0x6a, 0x80, 0xf9, 0x9c, 0x0a, 0x82, - 0x7e, 0x06, 0xc5, 0x11, 0x8d, 0xb8, 0xc7, 0x02, 0xcb, 0xd8, 0x34, 0x6e, 0x57, 0xb6, 0xbf, 0xd3, - 0x9a, 0x8f, 0x48, 0xeb, 0x75, 0x0c, 0xe9, 0x98, 0x1f, 0x4f, 0x1b, 0x37, 0x70, 0xc2, 0x40, 0x0f, - 0x00, 0x9c, 0x88, 0x12, 0x41, 0x5d, 0x9b, 0x08, 0x2b, 0xa7, 0xf8, 0xb5, 0x56, 0x6c, 0x6e, 0x2b, - 0xd1, 0xdf, 0x7a, 0x95, 0x78, 0x89, 0xcb, 0x1a, 0xdd, 0x16, 0x92, 0x3a, 0x0c, 0xdd, 0x84, 0x9a, - 0xbf, 0x9c, 0xaa, 0xd1, 0x6d, 0xd1, 0xfc, 0xb3, 0x09, 0xe6, 0xaf, 0x98, 0x4b, 0xd1, 0x3a, 0xe4, - 0x3c, 0x57, 0x99, 0x5d, 0xee, 0x14, 0x26, 0xa7, 0x8d, 0x5c, 0x77, 0x1f, 0xe7, 0x3c, 0x17, 0x6d, - 0x83, 0x39, 0xa0, 0x82, 0x68, 0x83, 0xac, 0x2c, 0x87, 0xa4, 0xef, 0xda, 0x1b, 0x85, 0x45, 0x3f, - 0x01, 0x53, 0x5e, 0x95, 0xb6, 0x64, 0x23, 0x8b, 0x23, 0x75, 0xbe, 0x0c, 0xa9, 0x93, 0xf0, 0x24, - 0x1e, 0x1d, 0x40, 0xc5, 0xa5, 0xdc, 0x89, 0xbc, 0x50, 0xc8, 0x18, 0x9a, 0x8a, 0x7e, 0x6b, 0x11, - 0x7d, 0x7f, 0x06, 0xc5, 0x69, 0x1e, 0xfa, 0x39, 0x14, 0xb8, 0x20, 0x62, 0xc8, 0xad, 0x25, 0x25, - 0xa1, 0xbe, 0xd0, 0x00, 0x85, 0xd2, 0x26, 0x68, 0x0e, 0x7a, 0x0a, 0xd5, 0x01, 0x09, 0x48, 0x8f, - 0x46, 0xb6, 0x96, 0x52, 0x50, 0x52, 0xbe, 0x97, 0xe9, 0x7a, 0x8c, 0x8c, 0x05, 0xe1, 0x95, 0x41, - 0x7a, 0x8b, 0x0e, 0x00, 0x88, 0x10, 0xc4, 0x79, 0x37, 0xa0, 0x81, 0xb0, 0x8a, 0x4a, 0xca, 0xf7, - 0x33, 0x6d, 0xa1, 0xe2, 0x3d, 0x8b, 0xfa, 0xed, 0x29, 0x18, 0xa7, 0x88, 0xe8, 0x09, 0x54, 0x1c, - 0x1a, 0x09, 0xef, 0xad, 0xe7, 0x10, 0x41, 0xad, 0x92, 0x92, 0xd3, 0xc8, 0x92, 0xb3, 0x37, 0x83, - 0x69, 0xa7, 0xd2, 0x4c, 0x74, 0x0f, 0xcc, 0x88, 0xf9, 0xd4, 0x2a, 0x6f, 0x1a, 0xb7, 0xab, 0x8b, - 0xaf, 0x05, 0x33, 0x9f, 0x62, 0x85, 0xdc, 0x5d, 0x3f, 0x3e, 0x69, 0x22, 0x58, 0x2d, 0x19, 0xab, - 0x86, 0x4a, 0x0d, 0xe3, 0x9e, 0xf1, 0x1b, 0xe3, 0xb7, 0x46, 0xf3, 0xbf, 0x79, 0x28, 0xbe, 0xa4, - 0xd1, 0xc8, 0x73, 0xbe, 0x6c, 0xe2, 0x3c, 0x38, 0x93, 0x38, 0x99, 0x3e, 0x6a, 0xb5, 0x73, 0xb9, - 0xb3, 0x03, 0x25, 0x1a, 0xb8, 0x21, 0xf3, 0x02, 0xa1, 0x13, 0x27, 0xd3, 0xc1, 0x03, 0x8d, 0xc1, - 0x53, 0x34, 0x3a, 0x80, 0x95, 0xb8, 0x1e, 0xec, 0x33, 0x59, 0xb3, 0x99, 0x45, 0xff, 0xb5, 0x02, - 0xea, 0xeb, 0x5e, 0x1e, 0xa6, 0x76, 0x68, 0x1f, 0x56, 0xc2, 0x88, 0x8e, 0x3c, 0x36, 0xe4, 0xb6, - 0x72, 0xa2, 0x70, 0x25, 0x27, 0xf0, 0x72, 0xc2, 0x92, 0x3b, 0xf4, 0x0b, 0x58, 0x96, 0x64, 0x3b, - 0xe9, 0x23, 0x70, 0x69, 0x1f, 0xc1, 0x15, 0x49, 0xd0, 0x1b, 0xf4, 0x02, 0xbe, 0x75, 0xc6, 0x8a, - 0xa9, 0xa0, 0xca, 0xe5, 0x82, 0xbe, 0x4a, 0x5b, 0xa2, 0x3f, 0xee, 0xa2, 0xe3, 0x93, 0x66, 0x15, - 0x96, 0xd3, 0x29, 0xd0, 0xfc, 0x53, 0x0e, 0x4a, 0x49, 0x20, 0xd1, 0x7d, 0x7d, 0x67, 0xc6, 0xe2, - 0xa8, 0x25, 0x58, 0xe5, 0x6f, 0x7c, 0x5d, 0xf7, 0x61, 0x29, 0x64, 0x91, 0xe0, 0x56, 0x6e, 0x33, - 0xbf, 0xa8, 0x44, 0x0f, 0x59, 0x24, 0xf6, 0x58, 0xf0, 0xd6, 0xeb, 0xe1, 0x18, 0x8c, 0xde, 0x40, - 0x65, 0xe4, 0x45, 0x62, 0x48, 0x7c, 0xdb, 0x0b, 0xb9, 0x95, 0x57, 0xdc, 0x1f, 0x5c, 0xa4, 0xb2, - 0xf5, 0x3a, 0xc6, 0x77, 0x0f, 0x3b, 0xd5, 0xc9, 0x69, 0x03, 0xa6, 0x5b, 0x8e, 0x41, 0x8b, 0xea, - 0x86, 0xbc, 0xf6, 0x1c, 0xca, 0xd3, 0x13, 0x74, 0x17, 0x20, 0x88, 0x2b, 0xd2, 0x9e, 0x66, 0xf6, - 0xca, 0xe4, 0xb4, 0x51, 0xd6, 0x75, 0xda, 0xdd, 0xc7, 0x65, 0x0d, 0xe8, 0xba, 0x08, 0x81, 0x49, - 0x5c, 0x37, 0x52, 0x79, 0x5e, 0xc6, 0x6a, 0xdd, 0xfc, 0x63, 0x11, 0xcc, 0x57, 0x84, 0xf7, 0xaf, - 0xbb, 0xab, 0x4a, 0x9d, 0x73, 0x95, 0x71, 0x17, 0x80, 0xc7, 0xf9, 0x26, 0xdd, 0x31, 0x67, 0xee, - 0xe8, 0x2c, 0x94, 0xee, 0x68, 0x40, 0xec, 0x0e, 0xf7, 0x99, 0x50, 0x45, 0x60, 0x62, 0xb5, 0x46, - 0xb7, 0xa0, 0x18, 0x30, 0x57, 0xd1, 0x0b, 0x8a, 0x0e, 0x93, 0xd3, 0x46, 0x41, 0xf6, 0x8a, 0xee, - 0x3e, 0x2e, 0xc8, 0xa3, 0xae, 0x2b, 0xdb, 0x14, 0x09, 0x02, 0x26, 0x88, 0xec, 0xc1, 0x5c, 0xb7, - 0xbb, 0xcc, 0xec, 0x6f, 0xcf, 0x60, 0x49, 0x9b, 0x4a, 0x31, 0xd1, 0x6b, 0xf8, 0x2a, 0xb1, 0x37, - 0x2d, 0xb0, 0xf4, 0x39, 0x02, 0x91, 0x96, 0x90, 0x3a, 0x49, 0x8d, 0x85, 0xf2, 0xe2, 0xb1, 0xa0, - 0x22, 0x98, 0x35, 0x16, 0x3a, 0xb0, 0xe2, 0x52, 0xee, 0x45, 0xd4, 0x55, 0x6d, 0x82, 0xaa, 0xca, - 0xac, 0x6e, 0x7f, 0xf7, 0x22, 0x21, 0x14, 0x2f, 0x6b, 0x8e, 0xda, 0xa1, 0x36, 0x94, 0x74, 0xde, - 0x70, 0xab, 0xa2, 0x72, 0xf7, 0x8a, 0xe3, 0x60, 0x4a, 0x3b, 0xd3, 0xe6, 0x96, 0x3f, 0xab, 0xcd, - 0x3d, 0x00, 0xf0, 0x59, 0xcf, 0x76, 0x23, 0x6f, 0x44, 0x23, 0x6b, 0x45, 0x3f, 0x12, 0x32, 0xb8, - 0xfb, 0x0a, 0x81, 0xcb, 0x3e, 0xeb, 0xc5, 0xcb, 0xb9, 0xa6, 0x54, 0xfd, 0xcc, 0xa6, 0x44, 0xa0, - 0x46, 0x38, 0xf7, 0x7a, 0x01, 0x75, 0xed, 0x1e, 0x0d, 0x68, 0xe4, 0x39, 0x76, 0x44, 0x39, 0x1b, - 0x46, 0x0e, 0xe5, 0xd6, 0x37, 0x54, 0x24, 0x32, 0xc7, 0xfc, 0x93, 0x18, 0x8c, 0x35, 0x16, 0x5b, - 0x89, 0x98, 0x73, 0x07, 0x7c, 0xb7, 0x76, 0x7c, 0xd2, 0x5c, 0x87, 0xb5, 0x74, 0x9b, 0xda, 0x31, - 0x1e, 0x1b, 0x4f, 0x8d, 0x43, 0xa3, 0xf9, 0xf7, 0x1c, 0x7c, 0x73, 0x2e, 0xa6, 0xe8, 0xc7, 0x50, - 0xd4, 0x51, 0xbd, 0xe8, 0xb1, 0xa6, 0x79, 0x38, 0xc1, 0xa2, 0x0d, 0x28, 0xcb, 0x12, 0xa7, 0x9c, - 0xd3, 0xb8, 0x79, 0x95, 0xf1, 0xec, 0x03, 0xb2, 0xa0, 0x48, 0x7c, 0x8f, 0xc8, 0xb3, 0xbc, 0x3a, - 0x4b, 0xb6, 0x68, 0x08, 0xeb, 0x71, 0xe8, 0xed, 0xd9, 0x68, 0xb7, 0x59, 0x28, 0xb8, 0x65, 0x2a, - 0xff, 0x1f, 0x5d, 0x29, 0x13, 0xf4, 0xe5, 0xcc, 0x3e, 0xbc, 0x08, 0x05, 0x3f, 0x08, 0x44, 0x34, - 0xc6, 0x6b, 0x6e, 0xc6, 0x51, 0xed, 0x09, 0xdc, 0x5c, 0x48, 0x41, 0xab, 0x90, 0xef, 0xd3, 0x71, - 0xdc, 0x9e, 0xb0, 0x5c, 0xa2, 0x35, 0x58, 0x1a, 0x11, 0x7f, 0x48, 0x75, 0x37, 0x8b, 0x37, 0xbb, - 0xb9, 0x1d, 0xa3, 0xf9, 0x21, 0x07, 0x45, 0x6d, 0xce, 0x75, 0x8f, 0x7c, 0xad, 0x76, 0xae, 0xb1, - 0x3d, 0x84, 0x65, 0x1d, 0xd2, 0xb8, 0x22, 0xcd, 0x4b, 0x73, 0xba, 0x12, 0xe3, 0xe3, 0x6a, 0x7c, - 0x08, 0xa6, 0x17, 0x92, 0x81, 0x1e, 0xf7, 0x99, 0x9a, 0xbb, 0x87, 0xed, 0xe7, 0x2f, 0xc2, 0xb8, - 0xb1, 0x94, 0x26, 0xa7, 0x0d, 0x53, 0x7e, 0xc0, 0x8a, 0x96, 0x39, 0x18, 0xff, 0xb2, 0x04, 0xc5, - 0x3d, 0x7f, 0xc8, 0x05, 0x8d, 0xae, 0x3b, 0x48, 0x5a, 0xed, 0x5c, 0x90, 0xf6, 0xa0, 0x18, 0x31, - 0x26, 0x6c, 0x87, 0x5c, 0x14, 0x1f, 0xcc, 0x98, 0xd8, 0x6b, 0x77, 0xaa, 0x92, 0x28, 0x7b, 0x7b, - 0xbc, 0xc7, 0x05, 0x49, 0xdd, 0x23, 0xe8, 0x0d, 0xac, 0x27, 0x13, 0xf1, 0x88, 0x31, 0xc1, 0x45, - 0x44, 0x42, 0xbb, 0x4f, 0xc7, 0xf2, 0xad, 0x94, 0x5f, 0xf4, 0x36, 0x3e, 0x08, 0x9c, 0x68, 0xac, - 0x82, 0xf7, 0x8c, 0x8e, 0xf1, 0x9a, 0x16, 0xd0, 0x49, 0xf8, 0xcf, 0xe8, 0x98, 0xa3, 0x47, 0xb0, - 0x41, 0xa7, 0x30, 0x29, 0xd1, 0xf6, 0xc9, 0x40, 0xce, 0x7a, 0xdb, 0xf1, 0x99, 0xd3, 0x57, 0xe3, - 0xc6, 0xc4, 0x37, 0x69, 0x5a, 0xd4, 0x2f, 0x63, 0xc4, 0x9e, 0x04, 0x20, 0x0e, 0xd6, 0x91, 0x4f, - 0x9c, 0xbe, 0xef, 0x71, 0xf9, 0xef, 0x4f, 0xea, 0xb9, 0x2b, 0x27, 0x86, 0xb4, 0x6d, 0xe7, 0x82, - 0x68, 0xb5, 0x3a, 0x33, 0x6e, 0xea, 0xf1, 0xac, 0x2b, 0xea, 0xdb, 0x47, 0xd9, 0xa7, 0xa8, 0x03, - 0x95, 0x61, 0x20, 0xd5, 0xc7, 0x31, 0x28, 0x5f, 0x35, 0x06, 0x10, 0xb3, 0xa4, 0xe7, 0xb5, 0x11, - 0x6c, 0x5c, 0xa4, 0x3c, 0xa3, 0x36, 0x1f, 0xa7, 0x6b, 0xb3, 0xb2, 0x7d, 0x27, 0x4b, 0x5f, 0xb6, - 0xc8, 0x54, 0x1d, 0x67, 0xa6, 0xed, 0xdf, 0x0c, 0x28, 0xbc, 0xa4, 0x4e, 0x44, 0xc5, 0x17, 0xcd, - 0xda, 0x9d, 0x33, 0x59, 0x5b, 0xcf, 0x7e, 0x08, 0x4b, 0xad, 0x73, 0x49, 0x5b, 0x83, 0x92, 0x17, - 0x08, 0x1a, 0x05, 0xc4, 0x57, 0x59, 0x5b, 0xc2, 0xd3, 0x7d, 0xa6, 0x03, 0x1f, 0x0c, 0x28, 0xc4, - 0x2f, 0xc5, 0xeb, 0x76, 0x20, 0xd6, 0x7a, 0xde, 0x81, 0x4c, 0x23, 0xff, 0x63, 0x40, 0x29, 0x19, - 0x58, 0x5f, 0xd4, 0xcc, 0x73, 0x2f, 0xaf, 0xfc, 0xff, 0xfd, 0xf2, 0x42, 0x60, 0xf6, 0xbd, 0x40, - 0xbf, 0x11, 0xb1, 0x5a, 0xa3, 0x16, 0x14, 0x43, 0x32, 0xf6, 0x19, 0x71, 0x75, 0xa3, 0x5c, 0x9b, - 0xfb, 0x61, 0xa1, 0x1d, 0x8c, 0x71, 0x02, 0xda, 0x5d, 0x3b, 0x3e, 0x69, 0xae, 0x42, 0x35, 0xed, - 0xf9, 0x3b, 0xa3, 0xf9, 0x4f, 0x03, 0xca, 0x07, 0xbf, 0x17, 0x34, 0x50, 0xef, 0x81, 0xaf, 0xa5, - 0xf3, 0x9b, 0xf3, 0x3f, 0x3e, 0x94, 0xcf, 0xfc, 0xae, 0x90, 0x75, 0xa9, 0x1d, 0xeb, 0xe3, 0xa7, - 0xfa, 0x8d, 0x7f, 0x7d, 0xaa, 0xdf, 0xf8, 0xc3, 0xa4, 0x6e, 0x7c, 0x9c, 0xd4, 0x8d, 0x7f, 0x4c, - 0xea, 0xc6, 0xbf, 0x27, 0x75, 0xe3, 0xa8, 0xa0, 0xe2, 0xf3, 0xa3, 0xff, 0x05, 0x00, 0x00, 0xff, - 0xff, 0x34, 0x0b, 0x7d, 0x79, 0x43, 0x13, 0x00, 0x00, + // 1536 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x72, 0x1b, 0x4b, + 0x15, 0xce, 0x48, 0x63, 0xfd, 0x1c, 0xd9, 0xc2, 0xf4, 0x35, 0x66, 0x22, 0x8c, 0x64, 0x74, 0x0b, + 0xea, 0xd6, 0xad, 0x94, 0x1c, 0x4c, 0x00, 0xc7, 0x10, 0x12, 0xc9, 0x76, 0x25, 0xaa, 0x24, 0xc4, + 0xd5, 0x09, 0x09, 0xbb, 0xa1, 0x35, 0xd3, 0x51, 0x06, 0x8d, 0xa6, 0xa7, 0xa6, 0x5b, 0x0a, 0xda, + 0xb1, 0x61, 0x63, 0x1e, 0xc0, 0x3b, 0x16, 0x79, 0x06, 0x36, 0x6c, 0x58, 0xb0, 0xca, 0x92, 0x15, + 0xc5, 0xca, 0x45, 0xf4, 0x16, 0x54, 0xb1, 0xa0, 0xba, 0xa7, 0x47, 0x1a, 0x5b, 0xe3, 0x3f, 0x2a, + 0xe5, 0x62, 0xe5, 0xee, 0xe9, 0xef, 0x3b, 0x7f, 0x7d, 0xce, 0xe9, 0x63, 0xc1, 0x9d, 0xbe, 0x27, + 0xde, 0x8d, 0x7a, 0x2d, 0x87, 0x0d, 0xb7, 0x5c, 0xe6, 0x0c, 0x68, 0xb4, 0xc5, 0xdf, 0x93, 0x68, + 0x38, 0xf0, 0xc4, 0x16, 0x09, 0xbd, 0x2d, 0xd6, 0xfb, 0x2d, 0x75, 0x04, 0x6f, 0x85, 0x11, 0x13, + 0x0c, 0xa1, 0x18, 0xd2, 0x4a, 0x20, 0xad, 0xf1, 0x0f, 0x6b, 0x5f, 0x5f, 0x22, 0x41, 0x4c, 0x42, + 0xaa, 0xf9, 0x97, 0x62, 0x79, 0x48, 0x9d, 0x04, 0xdb, 0xe8, 0x33, 0xd6, 0xf7, 0xe9, 0x96, 0xda, + 0xf5, 0x46, 0x6f, 0xb7, 0x84, 0x37, 0xa4, 0x5c, 0x90, 0x61, 0xa8, 0x01, 0x6b, 0x7d, 0xd6, 0x67, + 0x6a, 0xb9, 0x25, 0x57, 0xfa, 0xeb, 0xed, 0xb3, 0x34, 0x12, 0x4c, 0xf4, 0xd1, 0x4f, 0x2f, 0xd0, + 0x3e, 0x83, 0x87, 0xfe, 0xa8, 0xef, 0x05, 0xfa, 0x4f, 0x4c, 0x6c, 0xfe, 0xc5, 0x00, 0xf3, 0x39, + 0x15, 0x04, 0xfd, 0x0c, 0x8a, 0x63, 0x1a, 0x71, 0x8f, 0x05, 0x96, 0xb1, 0x69, 0x7c, 0x55, 0xd9, + 0xfe, 0x4e, 0x6b, 0x31, 0x22, 0xad, 0xd7, 0x31, 0xa4, 0x63, 0x7e, 0x3c, 0x69, 0xdc, 0xc2, 0x09, + 0x03, 0xdd, 0x07, 0x70, 0x22, 0x4a, 0x04, 0x75, 0x6d, 0x22, 0xac, 0x9c, 0xe2, 0xd7, 0x5a, 0xb1, + 0xb9, 0xad, 0x44, 0x7f, 0xeb, 0x55, 0xe2, 0x25, 0x2e, 0x6b, 0x74, 0x5b, 0x48, 0xea, 0x28, 0x74, + 0x13, 0x6a, 0xfe, 0x72, 0xaa, 0x46, 0xb7, 0x45, 0xf3, 0x0f, 0x4b, 0x60, 0xfe, 0x92, 0xb9, 0x14, + 0xad, 0x43, 0xce, 0x73, 0x95, 0xd9, 0xe5, 0x4e, 0x61, 0x7a, 0xd2, 0xc8, 0x75, 0xf7, 0x71, 0xce, + 0x73, 0xd1, 0x36, 0x98, 0x43, 0x2a, 0x88, 0x36, 0xc8, 0xca, 0x72, 0x48, 0xfa, 0xae, 0xbd, 0x51, + 0x58, 0xf4, 0x13, 0x30, 0xe5, 0x55, 0x69, 0x4b, 0x36, 0xb2, 0x38, 0x52, 0xe7, 0xcb, 0x90, 0x3a, + 0x09, 0x4f, 0xe2, 0xd1, 0x01, 0x54, 0x5c, 0xca, 0x9d, 0xc8, 0x0b, 0x85, 0x8c, 0xa1, 0xa9, 0xe8, + 0x5f, 0x9e, 0x47, 0xdf, 0x9f, 0x43, 0x71, 0x9a, 0x87, 0x7e, 0x0e, 0x05, 0x2e, 0x88, 0x18, 0x71, + 0x6b, 0x49, 0x49, 0xa8, 0x9f, 0x6b, 0x80, 0x42, 0x69, 0x13, 0x34, 0x07, 0x3d, 0x81, 0xea, 0x90, + 0x04, 0xa4, 0x4f, 0x23, 0x5b, 0x4b, 0x29, 0x28, 0x29, 0xdf, 0xcb, 0x74, 0x3d, 0x46, 0xc6, 0x82, + 0xf0, 0xca, 0x30, 0xbd, 0x45, 0x5d, 0x00, 0x22, 0x04, 0x71, 0xde, 0x0d, 0x69, 0x20, 0xac, 0xa2, + 0x92, 0xf2, 0xfd, 0x4c, 0x5b, 0xa8, 0x78, 0xcf, 0xa2, 0x41, 0x7b, 0x06, 0xee, 0xe4, 0x2c, 0x03, + 0xa7, 0xc8, 0xe8, 0x31, 0x54, 0x1c, 0x1a, 0x09, 0xef, 0xad, 0xe7, 0x10, 0x41, 0xad, 0x92, 0x92, + 0xd5, 0xc8, 0x92, 0xb5, 0x37, 0x87, 0x69, 0xc7, 0xd2, 0x4c, 0x74, 0x17, 0xcc, 0x88, 0xf9, 0xd4, + 0x2a, 0x6f, 0x1a, 0x5f, 0x55, 0xcf, 0xbf, 0x1a, 0xcc, 0x7c, 0x8a, 0x15, 0x12, 0x3d, 0x83, 0xaa, + 0xdf, 0xb3, 0xe7, 0xb6, 0x70, 0x0b, 0x36, 0xf3, 0x57, 0xf6, 0x04, 0xaf, 0xf8, 0xbd, 0xf9, 0x8e, + 0xef, 0xae, 0x1f, 0x1d, 0x37, 0x11, 0xac, 0x96, 0x8c, 0x55, 0x43, 0x25, 0x9b, 0x71, 0xd7, 0xf8, + 0xb5, 0xf1, 0x1b, 0xa3, 0xf9, 0x9f, 0x3c, 0x14, 0x5f, 0xd2, 0x68, 0xec, 0x39, 0x9f, 0x37, 0x15, + 0xef, 0x9f, 0x4a, 0xc5, 0xcc, 0x88, 0x69, 0xb5, 0x0b, 0xd9, 0xb8, 0x03, 0x25, 0x1a, 0xb8, 0x21, + 0xf3, 0x02, 0xa1, 0x53, 0x31, 0x33, 0x5c, 0x07, 0x1a, 0x83, 0x67, 0x68, 0x74, 0x00, 0x2b, 0x71, + 0x85, 0xd9, 0xa7, 0xf2, 0x70, 0x33, 0x8b, 0xfe, 0x2b, 0x05, 0xd4, 0x09, 0xb4, 0x3c, 0x4a, 0xed, + 0xd0, 0x3e, 0xac, 0x84, 0x11, 0x1d, 0x7b, 0x6c, 0xc4, 0x6d, 0xe5, 0x44, 0xe1, 0x4a, 0x4e, 0xe0, + 0xe5, 0x84, 0x25, 0x77, 0xe8, 0x17, 0xb0, 0x2c, 0xc9, 0x76, 0xd2, 0x99, 0xe0, 0xd2, 0xce, 0x84, + 0x2b, 0x92, 0xa0, 0x37, 0xe8, 0x05, 0x7c, 0xeb, 0x94, 0x15, 0x33, 0x41, 0x95, 0xcb, 0x05, 0x7d, + 0x91, 0xb6, 0x44, 0x7f, 0xdc, 0x45, 0x47, 0xc7, 0xcd, 0x2a, 0x2c, 0xa7, 0x53, 0xa0, 0xf9, 0xa7, + 0x1c, 0x94, 0x92, 0x40, 0xa2, 0x7b, 0xfa, 0xce, 0x8c, 0xf3, 0xa3, 0x96, 0x60, 0x95, 0xbf, 0xf1, + 0x75, 0xdd, 0x83, 0xa5, 0x90, 0x45, 0x82, 0x5b, 0x39, 0x95, 0x9e, 0x99, 0x45, 0x7f, 0xc8, 0x22, + 0xb1, 0xc7, 0x82, 0xb7, 0x5e, 0x1f, 0xc7, 0x60, 0xf4, 0x06, 0x2a, 0x63, 0x2f, 0x12, 0x23, 0xe2, + 0xdb, 0x5e, 0xc8, 0xad, 0xbc, 0xe2, 0xfe, 0xe0, 0x22, 0x95, 0xad, 0xd7, 0x31, 0xbe, 0x7b, 0xd8, + 0xa9, 0x4e, 0x4f, 0x1a, 0x30, 0xdb, 0x72, 0x0c, 0x5a, 0x54, 0x37, 0xe4, 0xb5, 0xe7, 0x50, 0x9e, + 0x9d, 0xa0, 0x3b, 0x00, 0x41, 0x5c, 0x19, 0xf6, 0x2c, 0xb3, 0x57, 0xa6, 0x27, 0x8d, 0xb2, 0xae, + 0x97, 0xee, 0x3e, 0x2e, 0x6b, 0x40, 0xd7, 0x45, 0x08, 0x4c, 0xe2, 0xba, 0x91, 0xca, 0xf3, 0x32, + 0x56, 0xeb, 0xe6, 0x1f, 0x8b, 0x60, 0xbe, 0x22, 0x7c, 0x70, 0xd3, 0x7d, 0x5a, 0xea, 0x5c, 0xa8, + 0x8c, 0x3b, 0x00, 0x3c, 0xce, 0x37, 0xe9, 0x8e, 0x39, 0x77, 0x47, 0x67, 0xa1, 0x74, 0x47, 0x03, + 0x62, 0x77, 0xb8, 0xcf, 0x84, 0x2a, 0x02, 0x13, 0xab, 0x35, 0xfa, 0x12, 0x8a, 0x01, 0x73, 0x15, + 0xbd, 0xa0, 0xe8, 0x30, 0x3d, 0x69, 0x14, 0x64, 0xe7, 0xe9, 0xee, 0xe3, 0x82, 0x3c, 0xea, 0xba, + 0xb2, 0xe9, 0x91, 0x20, 0x60, 0x82, 0xc8, 0xae, 0xce, 0x75, 0x03, 0xcd, 0xcc, 0xfe, 0xf6, 0x1c, + 0x96, 0x34, 0xbd, 0x14, 0x13, 0xbd, 0x86, 0x2f, 0x12, 0x7b, 0xd3, 0x02, 0x4b, 0xd7, 0x11, 0x88, + 0xb4, 0x84, 0xd4, 0x49, 0xea, 0xa1, 0x29, 0x9f, 0xff, 0xd0, 0xa8, 0x08, 0x66, 0x3d, 0x34, 0x1d, + 0x58, 0x71, 0x29, 0xf7, 0x22, 0xea, 0xaa, 0x36, 0x41, 0x55, 0x65, 0x56, 0xb7, 0xbf, 0x7b, 0x91, + 0x10, 0x8a, 0x97, 0x35, 0x47, 0xed, 0x50, 0x1b, 0x4a, 0x3a, 0x6f, 0xb8, 0x55, 0xb9, 0x4e, 0x5b, + 0x9e, 0xd1, 0x4e, 0xb5, 0xb9, 0xe5, 0x6b, 0xb5, 0xb9, 0xfb, 0x00, 0x3e, 0xeb, 0xdb, 0x6e, 0xe4, + 0x8d, 0x69, 0x64, 0xad, 0xe8, 0xb1, 0x23, 0x83, 0xbb, 0xaf, 0x10, 0xb8, 0xec, 0xb3, 0x7e, 0xbc, + 0x5c, 0x68, 0x4a, 0xd5, 0x6b, 0x36, 0x25, 0x02, 0x35, 0xc2, 0xb9, 0xd7, 0x0f, 0xa8, 0x6b, 0xf7, + 0x69, 0x40, 0x23, 0xcf, 0xb1, 0x23, 0xca, 0xd9, 0x28, 0x72, 0x28, 0xb7, 0xbe, 0xa1, 0x22, 0x91, + 0x39, 0x38, 0x3c, 0x8e, 0xc1, 0x58, 0x63, 0xb1, 0x95, 0x88, 0x39, 0x73, 0xc0, 0x77, 0x6b, 0x47, + 0xc7, 0xcd, 0x75, 0x58, 0x4b, 0xb7, 0xa9, 0x1d, 0xe3, 0x91, 0xf1, 0xc4, 0x38, 0x34, 0x9a, 0x7f, + 0xcb, 0xc1, 0x37, 0x17, 0x62, 0x8a, 0x7e, 0x0c, 0x45, 0x1d, 0xd5, 0x8b, 0xc6, 0x3f, 0xcd, 0xc3, + 0x09, 0x16, 0x6d, 0x40, 0x59, 0x96, 0x38, 0xe5, 0x9c, 0xc6, 0xcd, 0xab, 0x8c, 0xe7, 0x1f, 0x90, + 0x05, 0x45, 0xe2, 0x7b, 0x44, 0x9e, 0xe5, 0xd5, 0x59, 0xb2, 0x45, 0x23, 0x58, 0x8f, 0x43, 0x9f, + 0x7a, 0x9c, 0x6d, 0x16, 0x0a, 0x6e, 0x99, 0xca, 0xff, 0x87, 0x57, 0xca, 0x04, 0x7d, 0x39, 0xf3, + 0x0f, 0x2f, 0x42, 0xc1, 0x0f, 0x02, 0x11, 0x4d, 0xf0, 0x9a, 0x9b, 0x71, 0x54, 0x7b, 0x0c, 0xb7, + 0xcf, 0xa5, 0xa0, 0x55, 0xc8, 0x0f, 0xe8, 0x24, 0x6e, 0x4f, 0x58, 0x2e, 0xd1, 0x1a, 0x2c, 0x8d, + 0x89, 0x3f, 0xa2, 0xba, 0x9b, 0xc5, 0x9b, 0xdd, 0xdc, 0x8e, 0xd1, 0xfc, 0x90, 0x83, 0xa2, 0x36, + 0xe7, 0xa6, 0x9f, 0x7c, 0xad, 0x76, 0xa1, 0xb1, 0x3d, 0x80, 0x65, 0x1d, 0xd2, 0xb8, 0x22, 0xcd, + 0x4b, 0x73, 0xba, 0x12, 0xe3, 0xe3, 0x6a, 0x7c, 0x00, 0xa6, 0x17, 0x92, 0xa1, 0x7e, 0xee, 0x33, + 0x35, 0x77, 0x0f, 0xdb, 0xcf, 0x5f, 0x84, 0x71, 0x63, 0x29, 0x4d, 0x4f, 0x1a, 0xa6, 0xfc, 0x80, + 0x15, 0x2d, 0xf3, 0x61, 0xfc, 0xf3, 0x12, 0x14, 0xf7, 0xfc, 0x11, 0x17, 0x34, 0xba, 0xe9, 0x20, + 0x69, 0xb5, 0x0b, 0x41, 0xda, 0x83, 0x62, 0xc4, 0x98, 0xb0, 0x1d, 0x72, 0x51, 0x7c, 0x30, 0x63, + 0x62, 0xaf, 0xdd, 0xa9, 0x4a, 0xa2, 0xec, 0xed, 0xf1, 0x1e, 0x17, 0x24, 0x75, 0x8f, 0xa0, 0x37, + 0xb0, 0x9e, 0xbc, 0x88, 0x3d, 0xc6, 0x04, 0x17, 0x11, 0x09, 0xed, 0x01, 0x9d, 0xc8, 0x59, 0x29, + 0x7f, 0xde, 0xb4, 0x7d, 0x10, 0x38, 0xd1, 0x44, 0x05, 0xef, 0x29, 0x9d, 0xe0, 0x35, 0x2d, 0xa0, + 0x93, 0xf0, 0x9f, 0xd2, 0x09, 0x47, 0x0f, 0x61, 0x83, 0xce, 0x60, 0x52, 0xa2, 0xed, 0x93, 0xa1, + 0x7c, 0xeb, 0x6d, 0xc7, 0x67, 0xce, 0x40, 0x3d, 0x37, 0x26, 0xbe, 0x4d, 0xd3, 0xa2, 0x9e, 0xc5, + 0x88, 0x3d, 0x09, 0x40, 0x1c, 0xac, 0x9e, 0x4f, 0x9c, 0x81, 0xef, 0x71, 0xf9, 0x0f, 0x55, 0x6a, + 0x78, 0x96, 0x2f, 0x86, 0xb4, 0x6d, 0xe7, 0x82, 0x68, 0xb5, 0x3a, 0x73, 0x6e, 0x6a, 0x14, 0xd7, + 0x15, 0xf5, 0xed, 0x5e, 0xf6, 0x29, 0xea, 0x40, 0x65, 0x14, 0x48, 0xf5, 0x71, 0x0c, 0xca, 0x57, + 0x8d, 0x01, 0xc4, 0x2c, 0xe9, 0x79, 0x6d, 0x0c, 0x1b, 0x17, 0x29, 0xcf, 0xa8, 0xcd, 0x47, 0xe9, + 0xda, 0xac, 0x6c, 0x7f, 0x9d, 0xa5, 0x2f, 0x5b, 0x64, 0xaa, 0x8e, 0x33, 0xd3, 0xf6, 0xaf, 0x06, + 0x14, 0x5e, 0x52, 0x27, 0xa2, 0xe2, 0xb3, 0x66, 0xed, 0xce, 0xa9, 0xac, 0xad, 0x67, 0x0f, 0xc2, + 0x52, 0xeb, 0x42, 0xd2, 0xd6, 0xa0, 0xe4, 0x05, 0x82, 0x46, 0x01, 0xf1, 0x55, 0xd6, 0x96, 0xf0, + 0x6c, 0x9f, 0xe9, 0xc0, 0x07, 0x03, 0x0a, 0xf1, 0xa4, 0x78, 0xd3, 0x0e, 0xc4, 0x5a, 0xcf, 0x3a, + 0x90, 0x69, 0xe4, 0xbf, 0x0d, 0x28, 0x25, 0x0f, 0xd6, 0x67, 0x35, 0xf3, 0xcc, 0xe4, 0x95, 0xff, + 0x9f, 0x27, 0x2f, 0x04, 0xe6, 0xc0, 0x0b, 0xf4, 0x8c, 0x88, 0xd5, 0x1a, 0xb5, 0xa0, 0x18, 0x92, + 0x89, 0xcf, 0x88, 0xab, 0x1b, 0xe5, 0xda, 0xc2, 0x4f, 0x15, 0xed, 0x60, 0x82, 0x13, 0xd0, 0xee, + 0xda, 0xd1, 0x71, 0x73, 0x15, 0xaa, 0x69, 0xcf, 0xdf, 0x19, 0xcd, 0x7f, 0x18, 0x50, 0x3e, 0xf8, + 0x9d, 0xa0, 0x81, 0x9a, 0x07, 0xfe, 0x2f, 0x9d, 0xdf, 0x5c, 0xfc, 0x39, 0xa3, 0x7c, 0xea, 0x97, + 0x8a, 0xac, 0x4b, 0xed, 0x58, 0x1f, 0x3f, 0xd5, 0x6f, 0xfd, 0xf3, 0x53, 0xfd, 0xd6, 0xef, 0xa7, + 0x75, 0xe3, 0xe3, 0xb4, 0x6e, 0xfc, 0x7d, 0x5a, 0x37, 0xfe, 0x35, 0xad, 0x1b, 0xbd, 0x82, 0x8a, + 0xcf, 0x8f, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x5c, 0xf5, 0x06, 0x95, 0x13, 0x00, 0x00, } diff --git a/components/engine/vendor/github.com/docker/swarmkit/api/objects.proto b/components/engine/vendor/github.com/docker/swarmkit/api/objects.proto index fd5aff73af..43bffca381 100644 --- a/components/engine/vendor/github.com/docker/swarmkit/api/objects.proto +++ b/components/engine/vendor/github.com/docker/swarmkit/api/objects.proto @@ -59,8 +59,9 @@ message Node { // component, if the node is a manager. ManagerStatus manager_status = 6; + // DEPRECATED: Use lb_attachments to find the ingress network // The node attachment to the ingress network. - NetworkAttachment attachment = 7; + NetworkAttachment attachment = 7 [deprecated=true]; // Certificate is the TLS certificate issued for the node, if any. Certificate certificate = 8 [(gogoproto.nullable) = false]; @@ -75,6 +76,11 @@ message Node { // shows the privilege level that the CA would currently grant when // issuing or renewing the node's certificate. NodeRole role = 9; + + // Each node uses the network attachment to set up an endpoint on the + // node to be used for load balancing. Each overlay network, including + // ingress network, will have an NetworkAttachment. + repeated NetworkAttachment lb_attachments = 10; } message Service { @@ -257,7 +263,7 @@ message NetworkAttachment { // List of aliases by which a task is resolved in a network repeated string aliases = 3; - + // Map of all the driver attachment options for this network map driver_attachment_opts = 4; } diff --git a/components/engine/vendor/github.com/docker/swarmkit/api/types.pb.go b/components/engine/vendor/github.com/docker/swarmkit/api/types.pb.go index 07e1fe0067..9ce04eb0b1 100644 --- a/components/engine/vendor/github.com/docker/swarmkit/api/types.pb.go +++ b/components/engine/vendor/github.com/docker/swarmkit/api/types.pb.go @@ -219,6 +219,36 @@ func (x Mount_MountType) String() string { } func (Mount_MountType) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 0} } +// Consistency indicates the tolerable level of file system consistency +type Mount_MountConsistency int32 + +const ( + MountConsistencyDefault Mount_MountConsistency = 0 + MountConsistencyFull Mount_MountConsistency = 1 + MountConsistencyCached Mount_MountConsistency = 2 + MountConsistencyDelegated Mount_MountConsistency = 3 +) + +var Mount_MountConsistency_name = map[int32]string{ + 0: "DEFAULT", + 1: "CONSISTENT", + 2: "CACHED", + 3: "DELEGATED", +} +var Mount_MountConsistency_value = map[string]int32{ + "DEFAULT": 0, + "CONSISTENT": 1, + "CACHED": 2, + "DELEGATED": 3, +} + +func (x Mount_MountConsistency) String() string { + return proto.EnumName(Mount_MountConsistency_name, int32(x)) +} +func (Mount_MountConsistency) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{16, 1} +} + type Mount_BindOptions_MountPropagation int32 const ( @@ -871,7 +901,8 @@ type Mount struct { // Target path in container Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` // ReadOnly should be set to true if the mount should not be writable. - ReadOnly bool `protobuf:"varint,4,opt,name=readonly,proto3" json:"readonly,omitempty"` + ReadOnly bool `protobuf:"varint,4,opt,name=readonly,proto3" json:"readonly,omitempty"` + Consistency Mount_MountConsistency `protobuf:"varint,8,opt,name=consistency,proto3,enum=docker.swarmkit.v1.Mount_MountConsistency" json:"consistency,omitempty"` // BindOptions configures properties of a bind mount type. // // For mounts of type bind, the source must be an absolute host path. @@ -2127,6 +2158,7 @@ func init() { proto.RegisterEnum("docker.swarmkit.v1.RaftMemberStatus_Reachability", RaftMemberStatus_Reachability_name, RaftMemberStatus_Reachability_value) proto.RegisterEnum("docker.swarmkit.v1.NodeStatus_State", NodeStatus_State_name, NodeStatus_State_value) proto.RegisterEnum("docker.swarmkit.v1.Mount_MountType", Mount_MountType_name, Mount_MountType_value) + proto.RegisterEnum("docker.swarmkit.v1.Mount_MountConsistency", Mount_MountConsistency_name, Mount_MountConsistency_value) proto.RegisterEnum("docker.swarmkit.v1.Mount_BindOptions_MountPropagation", Mount_BindOptions_MountPropagation_name, Mount_BindOptions_MountPropagation_value) proto.RegisterEnum("docker.swarmkit.v1.RestartPolicy_RestartCondition", RestartPolicy_RestartCondition_name, RestartPolicy_RestartCondition_value) proto.RegisterEnum("docker.swarmkit.v1.UpdateConfig_FailureAction", UpdateConfig_FailureAction_name, UpdateConfig_FailureAction_value) @@ -4160,6 +4192,11 @@ func (m *Mount) MarshalTo(dAtA []byte) (int, error) { } i += n12 } + if m.Consistency != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Consistency)) + } return i, nil } @@ -6398,6 +6435,9 @@ func (m *Mount) Size() (n int) { l = m.TmpfsOptions.Size() n += 1 + l + sovTypes(uint64(l)) } + if m.Consistency != 0 { + n += 1 + sovTypes(uint64(m.Consistency)) + } return n } @@ -7498,6 +7538,7 @@ func (this *Mount) String() string { `BindOptions:` + strings.Replace(fmt.Sprintf("%v", this.BindOptions), "Mount_BindOptions", "Mount_BindOptions", 1) + `,`, `VolumeOptions:` + strings.Replace(fmt.Sprintf("%v", this.VolumeOptions), "Mount_VolumeOptions", "Mount_VolumeOptions", 1) + `,`, `TmpfsOptions:` + strings.Replace(fmt.Sprintf("%v", this.TmpfsOptions), "Mount_TmpfsOptions", "Mount_TmpfsOptions", 1) + `,`, + `Consistency:` + fmt.Sprintf("%v", this.Consistency) + `,`, `}`, }, "") return s @@ -10464,6 +10505,25 @@ func (m *Mount) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Consistency", wireType) + } + m.Consistency = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Consistency |= (Mount_MountConsistency(b) & 0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -16964,312 +17024,319 @@ var ( func init() { proto.RegisterFile("github.com/docker/swarmkit/api/types.proto", fileDescriptorTypes) } var fileDescriptorTypes = []byte{ - // 4905 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x7a, 0x4d, 0x6c, 0x24, 0x49, - 0x56, 0xbf, 0xeb, 0xd3, 0x55, 0xaf, 0xca, 0x76, 0x3a, 0xda, 0xdb, 0xe3, 0xae, 0xed, 0xb1, 0x3d, - 0x39, 0xd3, 0x3b, 0x33, 0xbd, 0xf3, 0xaf, 0xfe, 0x9a, 0x19, 0xf5, 0xcc, 0xfc, 0xe7, 0xa3, 0xbe, - 0xdc, 0xae, 0x6d, 0xbb, 0xaa, 0x14, 0x55, 0xee, 0xde, 0x41, 0x82, 0x54, 0x3a, 0x33, 0x5c, 0xce, - 0x71, 0x56, 0x46, 0x91, 0x99, 0x65, 0x77, 0xb1, 0x20, 0x5a, 0x1c, 0x00, 0xf9, 0x04, 0xb7, 0x45, - 0xc8, 0x5c, 0xe0, 0x84, 0x90, 0x38, 0x80, 0x84, 0xe0, 0x34, 0x48, 0x1c, 0xe6, 0xc6, 0x02, 0x12, - 0x5a, 0x81, 0x64, 0x18, 0x1f, 0xb8, 0x21, 0xb8, 0xac, 0xb8, 0x80, 0x84, 0xe2, 0x23, 0xb3, 0xb2, - 0xaa, 0xd3, 0x76, 0x0f, 0xb3, 0x17, 0x3b, 0xe3, 0xbd, 0xdf, 0x7b, 0xf1, 0xe2, 0x45, 0xc4, 0x8b, - 0xf7, 0x22, 0x0a, 0x6e, 0xf7, 0x2d, 0xff, 0x60, 0xb4, 0x57, 0x36, 0xe8, 0xe0, 0x8e, 0x49, 0x8d, - 0x43, 0xe2, 0xde, 0xf1, 0x8e, 0x75, 0x77, 0x70, 0x68, 0xf9, 0x77, 0xf4, 0xa1, 0x75, 0xc7, 0x1f, - 0x0f, 0x89, 0x57, 0x1e, 0xba, 0xd4, 0xa7, 0x08, 0x09, 0x40, 0x39, 0x00, 0x94, 0x8f, 0xee, 0x95, - 0xd6, 0xfb, 0x94, 0xf6, 0x6d, 0x72, 0x87, 0x23, 0xf6, 0x46, 0xfb, 0x77, 0x7c, 0x6b, 0x40, 0x3c, - 0x5f, 0x1f, 0x0c, 0x85, 0x50, 0x69, 0x6d, 0x16, 0x60, 0x8e, 0x5c, 0xdd, 0xb7, 0xa8, 0x23, 0xf9, - 0x2b, 0x7d, 0xda, 0xa7, 0xfc, 0xf3, 0x0e, 0xfb, 0x12, 0x54, 0x75, 0x1d, 0xe6, 0x9f, 0x10, 0xd7, - 0xb3, 0xa8, 0x83, 0x56, 0x20, 0x63, 0x39, 0x26, 0x79, 0xb6, 0x9a, 0xd8, 0x48, 0xbc, 0x95, 0xc6, - 0xa2, 0xa1, 0xde, 0x05, 0x68, 0xb2, 0x8f, 0x86, 0xe3, 0xbb, 0x63, 0xa4, 0x40, 0xea, 0x90, 0x8c, - 0x39, 0x22, 0x8f, 0xd9, 0x27, 0xa3, 0x1c, 0xe9, 0xf6, 0x6a, 0x52, 0x50, 0x8e, 0x74, 0x5b, 0xfd, - 0x3a, 0x01, 0x85, 0x8a, 0xe3, 0x50, 0x9f, 0xf7, 0xee, 0x21, 0x04, 0x69, 0x47, 0x1f, 0x10, 0x29, - 0xc4, 0xbf, 0x51, 0x0d, 0xb2, 0xb6, 0xbe, 0x47, 0x6c, 0x6f, 0x35, 0xb9, 0x91, 0x7a, 0xab, 0x70, - 0xff, 0xfb, 0xe5, 0x17, 0x87, 0x5c, 0x8e, 0x28, 0x29, 0x6f, 0x73, 0x34, 0x37, 0x02, 0x4b, 0x51, - 0xf4, 0x09, 0xcc, 0x5b, 0x8e, 0x69, 0x19, 0xc4, 0x5b, 0x4d, 0x73, 0x2d, 0x6b, 0x71, 0x5a, 0x26, - 0xd6, 0x57, 0xd3, 0x5f, 0x9d, 0xad, 0xcf, 0xe1, 0x40, 0xa8, 0xf4, 0x01, 0x14, 0x22, 0x6a, 0x63, - 0xc6, 0xb6, 0x02, 0x99, 0x23, 0xdd, 0x1e, 0x11, 0x39, 0x3a, 0xd1, 0xf8, 0x30, 0xf9, 0x30, 0xa1, - 0x7e, 0x06, 0x2b, 0x2d, 0x7d, 0x40, 0xcc, 0x47, 0xc4, 0x21, 0xae, 0x65, 0x60, 0xe2, 0xd1, 0x91, - 0x6b, 0x10, 0x36, 0xd6, 0x43, 0xcb, 0x31, 0x83, 0xb1, 0xb2, 0xef, 0x78, 0x2d, 0x6a, 0x0d, 0x5e, - 0xa9, 0x5b, 0x9e, 0xe1, 0x12, 0x9f, 0x7c, 0x63, 0x25, 0xa9, 0x40, 0xc9, 0x59, 0x02, 0x96, 0x66, - 0xa5, 0x7f, 0x01, 0xae, 0x31, 0x17, 0x9b, 0x9a, 0x2b, 0x29, 0x9a, 0x37, 0x24, 0x06, 0x57, 0x56, - 0xb8, 0xff, 0x56, 0x9c, 0x87, 0xe2, 0x46, 0xb2, 0x35, 0x87, 0x97, 0xb9, 0x9a, 0x80, 0xd0, 0x1d, - 0x12, 0x03, 0x19, 0x70, 0xdd, 0x94, 0x46, 0xcf, 0xa8, 0x4f, 0x72, 0xf5, 0xb1, 0xd3, 0x78, 0xc1, - 0x30, 0xb7, 0xe6, 0xf0, 0x4a, 0xa0, 0x2c, 0xda, 0x49, 0x15, 0x20, 0x17, 0xe8, 0x56, 0x7f, 0x9c, - 0x80, 0x7c, 0xc0, 0xf4, 0xd0, 0xdb, 0x90, 0x77, 0x74, 0x87, 0x6a, 0xc6, 0x70, 0xe4, 0xf1, 0x01, - 0xa5, 0xaa, 0xc5, 0xf3, 0xb3, 0xf5, 0x5c, 0x4b, 0x77, 0x68, 0xad, 0xb3, 0xeb, 0xe1, 0x1c, 0x63, - 0xd7, 0x86, 0x23, 0x0f, 0xbd, 0x06, 0xc5, 0x01, 0x19, 0x50, 0x77, 0xac, 0xed, 0x8d, 0x7d, 0xe2, - 0x49, 0xb7, 0x15, 0x04, 0xad, 0xca, 0x48, 0xe8, 0x63, 0x98, 0xef, 0x0b, 0x93, 0x56, 0x53, 0x7c, - 0xf9, 0xbc, 0x1e, 0x67, 0xfd, 0x8c, 0xd5, 0x38, 0x90, 0x51, 0x7f, 0x27, 0x01, 0x2b, 0x21, 0x95, - 0xfc, 0xf2, 0xc8, 0x72, 0xc9, 0x80, 0x38, 0xbe, 0x87, 0xde, 0x83, 0xac, 0x6d, 0x0d, 0x2c, 0xdf, - 0x93, 0x3e, 0x7f, 0x35, 0x4e, 0x6d, 0x38, 0x28, 0x2c, 0xc1, 0xa8, 0x02, 0x45, 0x97, 0x78, 0xc4, - 0x3d, 0x12, 0x2b, 0x5e, 0x7a, 0xf4, 0x0a, 0xe1, 0x29, 0x11, 0x75, 0x13, 0x72, 0x1d, 0x5b, 0xf7, - 0xf7, 0xa9, 0x3b, 0x40, 0x2a, 0x14, 0x75, 0xd7, 0x38, 0xb0, 0x7c, 0x62, 0xf8, 0x23, 0x37, 0xd8, - 0x7d, 0x53, 0x34, 0x74, 0x1d, 0x92, 0x54, 0x74, 0x94, 0xaf, 0x66, 0xcf, 0xcf, 0xd6, 0x93, 0xed, - 0x2e, 0x4e, 0x52, 0x4f, 0xfd, 0x08, 0x96, 0x3b, 0xf6, 0xa8, 0x6f, 0x39, 0x75, 0xe2, 0x19, 0xae, - 0x35, 0x64, 0xda, 0xd9, 0xaa, 0x64, 0x31, 0x2a, 0x58, 0x95, 0xec, 0x3b, 0xdc, 0xda, 0xc9, 0xc9, - 0xd6, 0x56, 0x7f, 0x2b, 0x09, 0xcb, 0x0d, 0xa7, 0x6f, 0x39, 0x24, 0x2a, 0x7d, 0x0b, 0x16, 0x09, - 0x27, 0x6a, 0x47, 0x22, 0xdc, 0x48, 0x3d, 0x0b, 0x82, 0x1a, 0xc4, 0xa0, 0xe6, 0x4c, 0x5c, 0xb8, - 0x17, 0x37, 0xfc, 0x17, 0xb4, 0xc7, 0x46, 0x87, 0x06, 0xcc, 0x0f, 0xf9, 0x20, 0x3c, 0x39, 0xbd, - 0xb7, 0xe2, 0x74, 0xbd, 0x30, 0xce, 0x20, 0x48, 0x48, 0xd9, 0x6f, 0x13, 0x24, 0xfe, 0x24, 0x09, - 0x4b, 0x2d, 0x6a, 0x4e, 0xf9, 0xa1, 0x04, 0xb9, 0x03, 0xea, 0xf9, 0x91, 0x80, 0x18, 0xb6, 0xd1, - 0x43, 0xc8, 0x0d, 0xe5, 0xf4, 0xc9, 0xd9, 0xbf, 0x19, 0x6f, 0xb2, 0xc0, 0xe0, 0x10, 0x8d, 0x3e, - 0x82, 0x7c, 0xb0, 0x65, 0xd8, 0x68, 0x5f, 0x62, 0xe1, 0x4c, 0xf0, 0xe8, 0x63, 0xc8, 0x8a, 0x49, - 0x58, 0x4d, 0x73, 0xc9, 0x5b, 0x2f, 0xe5, 0x73, 0x2c, 0x85, 0xd0, 0x23, 0xc8, 0xf9, 0xb6, 0xa7, - 0x59, 0xce, 0x3e, 0x5d, 0xcd, 0x70, 0x05, 0xeb, 0xb1, 0x41, 0x86, 0x9a, 0xa4, 0xb7, 0xdd, 0x6d, - 0x3a, 0xfb, 0xb4, 0x5a, 0x38, 0x3f, 0x5b, 0x9f, 0x97, 0x0d, 0x3c, 0xef, 0xdb, 0x1e, 0xfb, 0x50, - 0x7f, 0x37, 0x01, 0x85, 0x08, 0x0a, 0xbd, 0x0a, 0xe0, 0xbb, 0x23, 0xcf, 0xd7, 0x5c, 0x4a, 0x7d, - 0xee, 0xac, 0x22, 0xce, 0x73, 0x0a, 0xa6, 0xd4, 0x47, 0x65, 0xb8, 0x66, 0x10, 0xd7, 0xd7, 0x2c, - 0xcf, 0x1b, 0x11, 0x57, 0xf3, 0x46, 0x7b, 0x5f, 0x10, 0xc3, 0xe7, 0x8e, 0x2b, 0xe2, 0x65, 0xc6, - 0x6a, 0x72, 0x4e, 0x57, 0x30, 0xd0, 0x03, 0xb8, 0x1e, 0xc5, 0x0f, 0x47, 0x7b, 0xb6, 0x65, 0x68, - 0x6c, 0x32, 0x53, 0x5c, 0xe4, 0xda, 0x44, 0xa4, 0xc3, 0x79, 0x8f, 0xc9, 0x58, 0xfd, 0x69, 0x02, - 0x14, 0xac, 0xef, 0xfb, 0x3b, 0x64, 0xb0, 0x47, 0xdc, 0xae, 0xaf, 0xfb, 0x23, 0x0f, 0x5d, 0x87, - 0xac, 0x4d, 0x74, 0x93, 0xb8, 0xdc, 0xa8, 0x1c, 0x96, 0x2d, 0xb4, 0xcb, 0x76, 0xb0, 0x6e, 0x1c, - 0xe8, 0x7b, 0x96, 0x6d, 0xf9, 0x63, 0x6e, 0xca, 0x62, 0xfc, 0x12, 0x9e, 0xd5, 0x59, 0xc6, 0x11, - 0x41, 0x3c, 0xa5, 0x06, 0xad, 0xc2, 0xfc, 0x80, 0x78, 0x9e, 0xde, 0x27, 0xdc, 0xd2, 0x3c, 0x0e, - 0x9a, 0xea, 0x47, 0x50, 0x8c, 0xca, 0xa1, 0x02, 0xcc, 0xef, 0xb6, 0x1e, 0xb7, 0xda, 0x4f, 0x5b, - 0xca, 0x1c, 0x5a, 0x82, 0xc2, 0x6e, 0x0b, 0x37, 0x2a, 0xb5, 0xad, 0x4a, 0x75, 0xbb, 0xa1, 0x24, - 0xd0, 0x02, 0xe4, 0x27, 0xcd, 0xa4, 0xfa, 0x67, 0x09, 0x00, 0xe6, 0x6e, 0x39, 0xa8, 0x0f, 0x21, - 0xe3, 0xf9, 0xba, 0x2f, 0x56, 0xe5, 0xe2, 0xfd, 0x37, 0x2e, 0x9a, 0x43, 0x69, 0x2f, 0xfb, 0x47, - 0xb0, 0x10, 0x89, 0x5a, 0x98, 0x9c, 0xb2, 0x90, 0x05, 0x08, 0xdd, 0x34, 0x5d, 0x69, 0x38, 0xff, - 0x56, 0x3f, 0x82, 0x0c, 0x97, 0x9e, 0x36, 0x37, 0x07, 0xe9, 0x3a, 0xfb, 0x4a, 0xa0, 0x3c, 0x64, - 0x70, 0xa3, 0x52, 0xff, 0x5c, 0x49, 0x22, 0x05, 0x8a, 0xf5, 0x66, 0xb7, 0xd6, 0x6e, 0xb5, 0x1a, - 0xb5, 0x5e, 0xa3, 0xae, 0xa4, 0xd4, 0x5b, 0x90, 0x69, 0x0e, 0x98, 0xe6, 0x9b, 0x6c, 0xc9, 0xef, - 0x13, 0x97, 0x38, 0x46, 0xb0, 0x93, 0x26, 0x04, 0xf5, 0x27, 0x79, 0xc8, 0xec, 0xd0, 0x91, 0xe3, - 0xa3, 0xfb, 0x91, 0xb0, 0xb5, 0x18, 0x9f, 0x21, 0x70, 0x60, 0xb9, 0x37, 0x1e, 0x12, 0x19, 0xd6, - 0xae, 0x43, 0x56, 0x6c, 0x0e, 0x39, 0x1c, 0xd9, 0x62, 0x74, 0x5f, 0x77, 0xfb, 0xc4, 0x97, 0xe3, - 0x91, 0x2d, 0xf4, 0x16, 0x3b, 0xb1, 0x74, 0x93, 0x3a, 0xf6, 0x98, 0xef, 0xa1, 0x9c, 0x38, 0x96, - 0x30, 0xd1, 0xcd, 0xb6, 0x63, 0x8f, 0x71, 0xc8, 0x45, 0x5b, 0x50, 0xdc, 0xb3, 0x1c, 0x53, 0xa3, - 0x43, 0x11, 0xe4, 0x33, 0x17, 0xef, 0x38, 0x61, 0x55, 0xd5, 0x72, 0xcc, 0xb6, 0x00, 0xe3, 0xc2, - 0xde, 0xa4, 0x81, 0x5a, 0xb0, 0x78, 0x44, 0xed, 0xd1, 0x80, 0x84, 0xba, 0xb2, 0x5c, 0xd7, 0x9b, - 0x17, 0xeb, 0x7a, 0xc2, 0xf1, 0x81, 0xb6, 0x85, 0xa3, 0x68, 0x13, 0x3d, 0x86, 0x05, 0x7f, 0x30, - 0xdc, 0xf7, 0x42, 0x75, 0xf3, 0x5c, 0xdd, 0xf7, 0x2e, 0x71, 0x18, 0x83, 0x07, 0xda, 0x8a, 0x7e, - 0xa4, 0x55, 0xfa, 0x8d, 0x14, 0x14, 0x22, 0x96, 0xa3, 0x2e, 0x14, 0x86, 0x2e, 0x1d, 0xea, 0x7d, - 0x7e, 0x50, 0xc9, 0xb9, 0xb8, 0xf7, 0x52, 0xa3, 0x2e, 0x77, 0x26, 0x82, 0x38, 0xaa, 0x45, 0x3d, - 0x4d, 0x42, 0x21, 0xc2, 0x44, 0xb7, 0x21, 0x87, 0x3b, 0xb8, 0xf9, 0xa4, 0xd2, 0x6b, 0x28, 0x73, - 0xa5, 0x9b, 0x27, 0xa7, 0x1b, 0xab, 0x5c, 0x5b, 0x54, 0x41, 0xc7, 0xb5, 0x8e, 0xd8, 0xd2, 0x7b, - 0x0b, 0xe6, 0x03, 0x68, 0xa2, 0xf4, 0xdd, 0x93, 0xd3, 0x8d, 0x57, 0x66, 0xa1, 0x11, 0x24, 0xee, - 0x6e, 0x55, 0x70, 0xa3, 0xae, 0x24, 0xe3, 0x91, 0xb8, 0x7b, 0xa0, 0xbb, 0xc4, 0x44, 0xdf, 0x83, - 0xac, 0x04, 0xa6, 0x4a, 0xa5, 0x93, 0xd3, 0x8d, 0xeb, 0xb3, 0xc0, 0x09, 0x0e, 0x77, 0xb7, 0x2b, - 0x4f, 0x1a, 0x4a, 0x3a, 0x1e, 0x87, 0xbb, 0xb6, 0x7e, 0x44, 0xd0, 0x1b, 0x90, 0x11, 0xb0, 0x4c, - 0xe9, 0xc6, 0xc9, 0xe9, 0xc6, 0x77, 0x5e, 0x50, 0xc7, 0x50, 0xa5, 0xd5, 0xdf, 0xfe, 0xc3, 0xb5, - 0xb9, 0xbf, 0xfa, 0xa3, 0x35, 0x65, 0x96, 0x5d, 0xfa, 0xef, 0x04, 0x2c, 0x4c, 0x4d, 0x39, 0x52, - 0x21, 0xeb, 0x50, 0x83, 0x0e, 0xc5, 0xf9, 0x95, 0xab, 0xc2, 0xf9, 0xd9, 0x7a, 0xb6, 0x45, 0x6b, - 0x74, 0x38, 0xc6, 0x92, 0x83, 0x1e, 0xcf, 0x9c, 0xc0, 0x0f, 0x5e, 0x72, 0x3d, 0xc5, 0x9e, 0xc1, - 0x9f, 0xc2, 0x82, 0xe9, 0x5a, 0x47, 0xc4, 0xd5, 0x0c, 0xea, 0xec, 0x5b, 0x7d, 0x79, 0x36, 0x95, - 0x62, 0xd3, 0x44, 0x0e, 0xc4, 0x45, 0x21, 0x50, 0xe3, 0xf8, 0x6f, 0x71, 0xfa, 0x96, 0x9e, 0x40, - 0x31, 0xba, 0x42, 0xd9, 0x71, 0xe2, 0x59, 0xbf, 0x42, 0x64, 0x3e, 0xc8, 0xb3, 0x47, 0x9c, 0x67, - 0x14, 0x91, 0x0d, 0xbe, 0x09, 0xe9, 0x01, 0x35, 0x85, 0x9e, 0x85, 0xea, 0x35, 0x96, 0x04, 0xfc, - 0xd3, 0xd9, 0x7a, 0x81, 0x7a, 0xe5, 0x4d, 0xcb, 0x26, 0x3b, 0xd4, 0x24, 0x98, 0x03, 0xd4, 0x23, - 0x48, 0xb3, 0x50, 0x81, 0xbe, 0x0b, 0xe9, 0x6a, 0xb3, 0x55, 0x57, 0xe6, 0x4a, 0xcb, 0x27, 0xa7, - 0x1b, 0x0b, 0xdc, 0x25, 0x8c, 0xc1, 0xd6, 0x2e, 0x5a, 0x87, 0xec, 0x93, 0xf6, 0xf6, 0xee, 0x0e, - 0x5b, 0x5e, 0xd7, 0x4e, 0x4e, 0x37, 0x96, 0x42, 0xb6, 0x70, 0x1a, 0x7a, 0x15, 0x32, 0xbd, 0x9d, - 0xce, 0x66, 0x57, 0x49, 0x96, 0xd0, 0xc9, 0xe9, 0xc6, 0x62, 0xc8, 0xe7, 0x36, 0x97, 0x96, 0xe5, - 0xac, 0xe6, 0x43, 0xba, 0xfa, 0xb3, 0x24, 0x2c, 0x60, 0x56, 0xf1, 0xb9, 0x7e, 0x87, 0xda, 0x96, - 0x31, 0x46, 0x1d, 0xc8, 0x1b, 0xd4, 0x31, 0xad, 0xc8, 0x9e, 0xba, 0x7f, 0xc1, 0xa9, 0x3f, 0x91, - 0x0a, 0x5a, 0xb5, 0x40, 0x12, 0x4f, 0x94, 0xa0, 0x3b, 0x90, 0x31, 0x89, 0xad, 0x8f, 0x65, 0xfa, - 0x71, 0xa3, 0x2c, 0x6a, 0xca, 0x72, 0x50, 0x53, 0x96, 0xeb, 0xb2, 0xa6, 0xc4, 0x02, 0xc7, 0xd3, - 0x6c, 0xfd, 0x99, 0xa6, 0xfb, 0x3e, 0x19, 0x0c, 0x7d, 0x91, 0x7b, 0xa4, 0x71, 0x61, 0xa0, 0x3f, - 0xab, 0x48, 0x12, 0xba, 0x07, 0xd9, 0x63, 0xcb, 0x31, 0xe9, 0xb1, 0x4c, 0x2f, 0x2e, 0x51, 0x2a, - 0x81, 0xea, 0x09, 0x3b, 0x75, 0x67, 0xcc, 0x64, 0xfe, 0x6e, 0xb5, 0x5b, 0x8d, 0xc0, 0xdf, 0x92, - 0xdf, 0x76, 0x5a, 0xd4, 0x61, 0x7b, 0x05, 0xda, 0x2d, 0x6d, 0xb3, 0xd2, 0xdc, 0xde, 0xc5, 0xcc, - 0xe7, 0x2b, 0x27, 0xa7, 0x1b, 0x4a, 0x08, 0xd9, 0xd4, 0x2d, 0x9b, 0xe5, 0xbb, 0x37, 0x20, 0x55, - 0x69, 0x7d, 0xae, 0x24, 0x4b, 0xca, 0xc9, 0xe9, 0x46, 0x31, 0x64, 0x57, 0x9c, 0xf1, 0x64, 0x1b, - 0xcd, 0xf6, 0xab, 0xfe, 0x6d, 0x0a, 0x8a, 0xbb, 0x43, 0x53, 0xf7, 0x89, 0x58, 0x93, 0x68, 0x03, - 0x0a, 0x43, 0xdd, 0xd5, 0x6d, 0x9b, 0xd8, 0x96, 0x37, 0x90, 0xd5, 0x72, 0x94, 0x84, 0x3e, 0x78, - 0x59, 0x37, 0x56, 0x73, 0x6c, 0x9d, 0xfd, 0xf8, 0x5f, 0xd6, 0x13, 0x81, 0x43, 0x77, 0x61, 0x71, - 0x5f, 0x58, 0xab, 0xe9, 0x06, 0x9f, 0xd8, 0x14, 0x9f, 0xd8, 0x72, 0xdc, 0xc4, 0x46, 0xcd, 0x2a, - 0xcb, 0x41, 0x56, 0xb8, 0x14, 0x5e, 0xd8, 0x8f, 0x36, 0xd1, 0x03, 0x98, 0x1f, 0x50, 0xc7, 0xf2, - 0xa9, 0x7b, 0xf5, 0x2c, 0x04, 0x48, 0x74, 0x1b, 0x96, 0xd9, 0xe4, 0x06, 0xf6, 0x70, 0x36, 0x3f, - 0xb1, 0x92, 0x78, 0x69, 0xa0, 0x3f, 0x93, 0x1d, 0x62, 0x46, 0x46, 0x55, 0xc8, 0x50, 0x97, 0xa5, - 0x44, 0x59, 0x6e, 0xee, 0x3b, 0x57, 0x9a, 0x2b, 0x1a, 0x6d, 0x26, 0x83, 0x85, 0xa8, 0xfa, 0x3e, - 0x2c, 0x4c, 0x0d, 0x82, 0x65, 0x02, 0x9d, 0xca, 0x6e, 0xb7, 0xa1, 0xcc, 0xa1, 0x22, 0xe4, 0x6a, - 0xed, 0x56, 0xaf, 0xd9, 0xda, 0x65, 0xa9, 0x4c, 0x11, 0x72, 0xb8, 0xbd, 0xbd, 0x5d, 0xad, 0xd4, - 0x1e, 0x2b, 0x49, 0xb5, 0x0c, 0x85, 0x88, 0x36, 0xb4, 0x08, 0xd0, 0xed, 0xb5, 0x3b, 0xda, 0x66, - 0x13, 0x77, 0x7b, 0x22, 0x11, 0xea, 0xf6, 0x2a, 0xb8, 0x27, 0x09, 0x09, 0xf5, 0x3f, 0x92, 0xc1, - 0x8c, 0xca, 0xdc, 0xa7, 0x3a, 0x9d, 0xfb, 0x5c, 0x62, 0xbc, 0xcc, 0x7e, 0x26, 0x8d, 0x30, 0x07, - 0xfa, 0x00, 0x80, 0x2f, 0x1c, 0x62, 0x6a, 0xba, 0x2f, 0x27, 0xbe, 0xf4, 0x82, 0x93, 0x7b, 0xc1, - 0xa5, 0x0d, 0xce, 0x4b, 0x74, 0xc5, 0x47, 0x1f, 0x43, 0xd1, 0xa0, 0x83, 0xa1, 0x4d, 0xa4, 0x70, - 0xea, 0x4a, 0xe1, 0x42, 0x88, 0xaf, 0xf8, 0xd1, 0xec, 0x2b, 0x3d, 0x9d, 0x1f, 0xfe, 0x66, 0x22, - 0xf0, 0x4c, 0x4c, 0xc2, 0x55, 0x84, 0xdc, 0x6e, 0xa7, 0x5e, 0xe9, 0x35, 0x5b, 0x8f, 0x94, 0x04, - 0x02, 0xc8, 0x72, 0x57, 0xd7, 0x95, 0x24, 0x4b, 0x14, 0x6b, 0xed, 0x9d, 0xce, 0x76, 0x83, 0xa7, - 0x5c, 0x68, 0x05, 0x94, 0xc0, 0xd9, 0x1a, 0x77, 0x64, 0xa3, 0xae, 0xa4, 0xd1, 0x35, 0x58, 0x0a, - 0xa9, 0x52, 0x32, 0x83, 0xae, 0x03, 0x0a, 0x89, 0x13, 0x15, 0x59, 0xf5, 0xd7, 0x60, 0xa9, 0x46, - 0x1d, 0x5f, 0xb7, 0x9c, 0x30, 0x89, 0xbe, 0xcf, 0x06, 0x2d, 0x49, 0x9a, 0x25, 0x2f, 0x3b, 0xaa, - 0x4b, 0xe7, 0x67, 0xeb, 0x85, 0x10, 0xda, 0xac, 0xb3, 0x91, 0x06, 0x0d, 0x93, 0xed, 0xdf, 0xa1, - 0x65, 0x72, 0xe7, 0x66, 0xaa, 0xf3, 0xe7, 0x67, 0xeb, 0xa9, 0x4e, 0xb3, 0x8e, 0x19, 0x0d, 0x7d, - 0x17, 0xf2, 0xe4, 0x99, 0xe5, 0x6b, 0x06, 0x8b, 0xe1, 0xcc, 0x81, 0x19, 0x9c, 0x63, 0x84, 0x1a, - 0x0b, 0xd9, 0x55, 0x80, 0x0e, 0x75, 0x7d, 0xd9, 0xf3, 0xbb, 0x90, 0x19, 0x52, 0x97, 0x97, 0xe7, - 0x17, 0x5e, 0x1a, 0x31, 0xb8, 0x58, 0xa8, 0x58, 0x80, 0xd5, 0xdf, 0x4b, 0x01, 0xf4, 0x74, 0xef, - 0x50, 0x2a, 0x79, 0x08, 0xf9, 0xf0, 0x02, 0x4e, 0xd6, 0xf9, 0x97, 0xce, 0x76, 0x08, 0x46, 0x0f, - 0x82, 0xc5, 0x26, 0xca, 0x83, 0xd8, 0x3a, 0x2d, 0xe8, 0x28, 0x2e, 0xc3, 0x9e, 0xae, 0x01, 0xd8, - 0x91, 0x48, 0x5c, 0x57, 0xce, 0x3c, 0xfb, 0x44, 0x35, 0x7e, 0x2c, 0x08, 0xa7, 0xc9, 0x04, 0x33, - 0xf6, 0x66, 0x63, 0x66, 0x46, 0xb6, 0xe6, 0xf0, 0x44, 0x0e, 0x7d, 0x0a, 0x05, 0x36, 0x6e, 0xcd, - 0xe3, 0x3c, 0x99, 0x5b, 0x5e, 0xe8, 0x2a, 0xa1, 0x01, 0xc3, 0x70, 0xe2, 0xe5, 0x57, 0x01, 0xf4, - 0xe1, 0xd0, 0xb6, 0x88, 0xa9, 0xed, 0x8d, 0x79, 0x32, 0x99, 0xc7, 0x79, 0x49, 0xa9, 0x8e, 0xd9, - 0x76, 0x09, 0xd8, 0xba, 0xbf, 0x9a, 0xbb, 0xda, 0x81, 0x12, 0x5d, 0xf1, 0xab, 0x0a, 0x2c, 0xba, - 0x23, 0x87, 0x39, 0x54, 0x5a, 0xa7, 0xfe, 0x69, 0x12, 0x5e, 0x69, 0x11, 0xff, 0x98, 0xba, 0x87, - 0x15, 0xdf, 0xd7, 0x8d, 0x83, 0x01, 0x71, 0xe4, 0xf4, 0x45, 0x72, 0xf6, 0xc4, 0x54, 0xce, 0xbe, - 0x0a, 0xf3, 0xba, 0x6d, 0xe9, 0x1e, 0x11, 0x89, 0x4e, 0x1e, 0x07, 0x4d, 0x56, 0x59, 0xb0, 0x3a, - 0x85, 0x78, 0x1e, 0x11, 0x57, 0x07, 0xcc, 0xf0, 0x80, 0x80, 0x7e, 0x04, 0xd7, 0x65, 0x4a, 0xa3, - 0x87, 0x5d, 0xb1, 0x9c, 0x39, 0xb8, 0x83, 0x6c, 0xc4, 0x16, 0x4e, 0xf1, 0xc6, 0xc9, 0x9c, 0x67, - 0x42, 0x6e, 0x0f, 0x7d, 0x99, 0x41, 0xad, 0x98, 0x31, 0xac, 0xd2, 0x23, 0xb8, 0x71, 0xa1, 0xc8, - 0x37, 0xba, 0x9a, 0xf8, 0x87, 0x24, 0x40, 0xb3, 0x53, 0xd9, 0x91, 0x4e, 0xaa, 0x43, 0x76, 0x5f, - 0x1f, 0x58, 0xf6, 0xf8, 0xb2, 0x08, 0x38, 0xc1, 0x97, 0x2b, 0xc2, 0x1d, 0x9b, 0x5c, 0x06, 0x4b, - 0x59, 0x5e, 0x36, 0x8d, 0xf6, 0x1c, 0xe2, 0x87, 0x65, 0x13, 0x6f, 0x31, 0x33, 0x5c, 0xdd, 0x09, - 0x97, 0xae, 0x68, 0xb0, 0x09, 0xe8, 0xeb, 0x3e, 0x39, 0xd6, 0xc7, 0x41, 0xd8, 0x92, 0x4d, 0xb4, - 0xc5, 0x2f, 0x00, 0x89, 0x7b, 0x44, 0xcc, 0xd5, 0x0c, 0x77, 0xea, 0x55, 0xf6, 0x60, 0x09, 0x17, - 0xbe, 0x0b, 0xa5, 0x4b, 0x1f, 0xf1, 0x94, 0x69, 0xc2, 0xfa, 0x46, 0x3e, 0xba, 0x0b, 0x0b, 0x53, - 0xe3, 0x7c, 0xa1, 0x5e, 0x6d, 0x76, 0x9e, 0xbc, 0xab, 0xa4, 0xe5, 0xd7, 0xfb, 0x4a, 0x56, 0xfd, - 0xe3, 0x94, 0x08, 0x34, 0xd2, 0xab, 0xf1, 0x17, 0xdf, 0x39, 0xbe, 0xba, 0x0d, 0x6a, 0xcb, 0x00, - 0xf0, 0xe6, 0xe5, 0xf1, 0x87, 0xd5, 0x3f, 0x1c, 0x8e, 0x43, 0x41, 0xb4, 0x0e, 0x05, 0xb1, 0x8a, - 0x35, 0xb6, 0xe1, 0xb8, 0x5b, 0x17, 0x30, 0x08, 0x12, 0x93, 0x44, 0xb7, 0x60, 0x91, 0xdf, 0x6f, - 0x78, 0x07, 0xc4, 0x14, 0x98, 0x34, 0xc7, 0x2c, 0x84, 0x54, 0x0e, 0xdb, 0x81, 0xa2, 0x24, 0x68, - 0x3c, 0xf7, 0xcd, 0x70, 0x83, 0x6e, 0x5f, 0x65, 0x90, 0x10, 0xe1, 0x29, 0x71, 0x61, 0x38, 0x69, - 0xa8, 0x75, 0xc8, 0x05, 0xc6, 0xa2, 0x55, 0x48, 0xf5, 0x6a, 0x1d, 0x65, 0xae, 0xb4, 0x74, 0x72, - 0xba, 0x51, 0x08, 0xc8, 0xbd, 0x5a, 0x87, 0x71, 0x76, 0xeb, 0x1d, 0x25, 0x31, 0xcd, 0xd9, 0xad, - 0x77, 0x4a, 0x69, 0x96, 0x83, 0xa9, 0xfb, 0x50, 0x88, 0xf4, 0x80, 0x5e, 0x87, 0xf9, 0x66, 0xeb, - 0x11, 0x6e, 0x74, 0xbb, 0xca, 0x5c, 0xe9, 0xfa, 0xc9, 0xe9, 0x06, 0x8a, 0x70, 0x9b, 0x4e, 0x9f, - 0xcd, 0x0f, 0x7a, 0x15, 0xd2, 0x5b, 0x6d, 0x76, 0xb6, 0x8b, 0x64, 0x3b, 0x82, 0xd8, 0xa2, 0x9e, - 0x5f, 0xba, 0x26, 0x93, 0xbb, 0xa8, 0x62, 0xf5, 0xf7, 0x13, 0x90, 0x15, 0x9b, 0x29, 0x76, 0xa2, - 0x2a, 0x30, 0x1f, 0x54, 0xc2, 0xa2, 0x10, 0x7a, 0xf3, 0xe2, 0xa2, 0xa5, 0x2c, 0x6b, 0x0c, 0xb1, - 0xfc, 0x02, 0xb9, 0xd2, 0x87, 0x50, 0x8c, 0x32, 0xbe, 0xd1, 0xe2, 0xfb, 0x11, 0x14, 0xd8, 0xfa, - 0x0e, 0x8a, 0x97, 0xfb, 0x90, 0x15, 0x01, 0x21, 0x3c, 0x6b, 0x2e, 0xae, 0xa0, 0x24, 0x12, 0x3d, - 0x84, 0x79, 0x51, 0x75, 0x05, 0x17, 0xa0, 0x6b, 0x97, 0xef, 0x22, 0x1c, 0xc0, 0xd5, 0x4f, 0x21, - 0xdd, 0x21, 0xc4, 0x65, 0xbe, 0x77, 0xa8, 0x49, 0x26, 0xc7, 0xb3, 0x2c, 0x18, 0x4d, 0xd2, 0xac, - 0xb3, 0x82, 0xd1, 0x24, 0x4d, 0x33, 0xbc, 0xe2, 0x49, 0x46, 0xae, 0x78, 0x7a, 0x50, 0x7c, 0x4a, - 0xac, 0xfe, 0x81, 0x4f, 0x4c, 0xae, 0xe8, 0x1d, 0x48, 0x0f, 0x49, 0x68, 0xfc, 0x6a, 0xec, 0x02, - 0x23, 0xc4, 0xc5, 0x1c, 0xc5, 0xe2, 0xc8, 0x31, 0x97, 0x96, 0xb7, 0xf6, 0xb2, 0xa5, 0xfe, 0x7d, - 0x12, 0x16, 0x9b, 0x9e, 0x37, 0xd2, 0x1d, 0x23, 0xc8, 0xdc, 0x3e, 0x99, 0xce, 0xdc, 0x62, 0x9f, - 0x37, 0xa6, 0x45, 0xa6, 0x6f, 0xae, 0xe4, 0xe9, 0x99, 0x0c, 0x4f, 0x4f, 0xf5, 0xdf, 0x13, 0xc1, - 0xf5, 0xd4, 0xad, 0xc8, 0x76, 0x2f, 0xad, 0x9e, 0x9c, 0x6e, 0xac, 0x44, 0x35, 0x91, 0x5d, 0xe7, - 0xd0, 0xa1, 0xc7, 0x0e, 0x7a, 0x0d, 0x32, 0xb8, 0xd1, 0x6a, 0x3c, 0x55, 0x12, 0x62, 0x79, 0x4e, - 0x81, 0x30, 0x71, 0xc8, 0x31, 0xd3, 0xd4, 0x69, 0xb4, 0xea, 0x2c, 0xd3, 0x4a, 0xc6, 0x68, 0xea, - 0x10, 0xc7, 0xb4, 0x9c, 0x3e, 0x7a, 0x1d, 0xb2, 0xcd, 0x6e, 0x77, 0x97, 0x5f, 0x20, 0xbc, 0x72, - 0x72, 0xba, 0x71, 0x6d, 0x0a, 0xc5, 0xaf, 0x26, 0x4d, 0x06, 0x62, 0x65, 0x0e, 0xcb, 0xc1, 0x62, - 0x40, 0x2c, 0x7f, 0x16, 0x20, 0xdc, 0xee, 0x55, 0x7a, 0x0d, 0x25, 0x13, 0x03, 0xc2, 0x94, 0xfd, - 0x95, 0xdb, 0xed, 0x9f, 0x93, 0xa0, 0x54, 0x0c, 0x83, 0x0c, 0x7d, 0xc6, 0x97, 0x95, 0x65, 0x0f, - 0x72, 0x43, 0xf6, 0x65, 0x91, 0x20, 0x4b, 0x7a, 0x18, 0xfb, 0x40, 0x37, 0x23, 0x57, 0xc6, 0xd4, - 0x26, 0x15, 0x73, 0x60, 0x79, 0x9e, 0x45, 0x1d, 0x41, 0xc3, 0xa1, 0xa6, 0xd2, 0x7f, 0x26, 0xe0, - 0x5a, 0x0c, 0x02, 0xdd, 0x85, 0xb4, 0x4b, 0xed, 0x60, 0x0e, 0x6f, 0x5e, 0x74, 0xf3, 0xc8, 0x44, - 0x31, 0x47, 0xa2, 0x35, 0x00, 0x7d, 0xe4, 0x53, 0x9d, 0xf7, 0xcf, 0x67, 0x2f, 0x87, 0x23, 0x14, - 0xf4, 0x14, 0xb2, 0x1e, 0x31, 0x5c, 0x12, 0xe4, 0xd2, 0x9f, 0xfe, 0x5f, 0xad, 0x2f, 0x77, 0xb9, - 0x1a, 0x2c, 0xd5, 0x95, 0xca, 0x90, 0x15, 0x14, 0xb6, 0xec, 0x4d, 0xdd, 0xd7, 0xe5, 0xbd, 0x34, - 0xff, 0x66, 0xab, 0x49, 0xb7, 0xfb, 0xc1, 0x6a, 0xd2, 0xed, 0xbe, 0xfa, 0x37, 0x49, 0x80, 0xc6, - 0x33, 0x9f, 0xb8, 0x8e, 0x6e, 0xd7, 0x2a, 0xa8, 0x11, 0x89, 0xfe, 0x62, 0xb4, 0x6f, 0xc7, 0x5e, - 0xb6, 0x87, 0x12, 0xe5, 0x5a, 0x25, 0x26, 0xfe, 0xdf, 0x80, 0xd4, 0xc8, 0x95, 0x6f, 0xae, 0x22, - 0x0f, 0xde, 0xc5, 0xdb, 0x98, 0xd1, 0x50, 0x63, 0x12, 0xb6, 0x52, 0x17, 0xbf, 0xac, 0x46, 0x3a, - 0x88, 0x0d, 0x5d, 0x6c, 0xe7, 0x1b, 0xba, 0x66, 0x10, 0x79, 0x72, 0x14, 0xc5, 0xce, 0xaf, 0x55, - 0x6a, 0xc4, 0xf5, 0x71, 0xd6, 0xd0, 0xd9, 0xff, 0x6f, 0x15, 0xdf, 0xde, 0x01, 0x98, 0x0c, 0x0d, - 0xad, 0x41, 0xa6, 0xb6, 0xd9, 0xed, 0x6e, 0x2b, 0x73, 0x22, 0x80, 0x4f, 0x58, 0x9c, 0xac, 0xfe, - 0x65, 0x12, 0x72, 0xb5, 0x8a, 0x3c, 0x56, 0x6b, 0xa0, 0xf0, 0xa8, 0xc4, 0x6f, 0xf3, 0xc9, 0xb3, - 0xa1, 0xe5, 0x8e, 0x65, 0x60, 0xb9, 0xa4, 0xa8, 0x5d, 0x64, 0x22, 0xcc, 0xea, 0x06, 0x17, 0x40, - 0x18, 0x8a, 0x44, 0x3a, 0x41, 0x33, 0xf4, 0x20, 0xc6, 0xaf, 0x5d, 0xee, 0x2c, 0x51, 0x9e, 0x4c, - 0xda, 0x1e, 0x2e, 0x04, 0x4a, 0x6a, 0xba, 0x87, 0x3e, 0x80, 0x25, 0xcf, 0xea, 0x3b, 0x96, 0xd3, - 0xd7, 0x02, 0xe7, 0xf1, 0xa7, 0x85, 0xea, 0xf2, 0xf9, 0xd9, 0xfa, 0x42, 0x57, 0xb0, 0xa4, 0x0f, - 0x17, 0x24, 0xb2, 0xc6, 0x5d, 0x89, 0xde, 0x87, 0xc5, 0x88, 0x28, 0xf3, 0xa2, 0x70, 0xbb, 0x72, - 0x7e, 0xb6, 0x5e, 0x0c, 0x25, 0x1f, 0x93, 0x31, 0x2e, 0x86, 0x82, 0x8f, 0x09, 0xbf, 0x7f, 0xd9, - 0xa7, 0xae, 0x41, 0x34, 0x97, 0xef, 0x69, 0x7e, 0x82, 0xa7, 0x71, 0x81, 0xd3, 0xc4, 0x36, 0x57, - 0x9f, 0xc0, 0xb5, 0xb6, 0x6b, 0x1c, 0x10, 0xcf, 0x17, 0xae, 0x90, 0x5e, 0xfc, 0x14, 0x6e, 0xfa, - 0xba, 0x77, 0xa8, 0x1d, 0x58, 0x9e, 0x4f, 0xdd, 0xb1, 0xe6, 0x12, 0x9f, 0x38, 0x8c, 0xaf, 0xf1, - 0xf7, 0x48, 0x79, 0x41, 0x76, 0x83, 0x61, 0xb6, 0x04, 0x04, 0x07, 0x88, 0x6d, 0x06, 0x50, 0x9b, - 0x50, 0x64, 0x65, 0x4a, 0x9d, 0xec, 0xeb, 0x23, 0xdb, 0x67, 0xa3, 0x07, 0x9b, 0xf6, 0xb5, 0x97, - 0x3e, 0xa6, 0xf2, 0x36, 0xed, 0x8b, 0x4f, 0xf5, 0x87, 0xa0, 0xd4, 0x2d, 0x6f, 0xa8, 0xfb, 0xc6, - 0x41, 0x70, 0xf3, 0x87, 0xea, 0xa0, 0x1c, 0x10, 0xdd, 0xf5, 0xf7, 0x88, 0xee, 0x6b, 0x43, 0xe2, - 0x5a, 0xd4, 0xbc, 0x7a, 0x96, 0x97, 0x42, 0x91, 0x0e, 0x97, 0x50, 0xff, 0x2b, 0x01, 0x80, 0xf5, - 0xfd, 0x20, 0x23, 0xfb, 0x3e, 0x2c, 0x7b, 0x8e, 0x3e, 0xf4, 0x0e, 0xa8, 0xaf, 0x59, 0x8e, 0x4f, - 0xdc, 0x23, 0xdd, 0x96, 0x17, 0x38, 0x4a, 0xc0, 0x68, 0x4a, 0x3a, 0x7a, 0x07, 0xd0, 0x21, 0x21, - 0x43, 0x8d, 0xda, 0xa6, 0x16, 0x30, 0xc5, 0x6b, 0x69, 0x1a, 0x2b, 0x8c, 0xd3, 0xb6, 0xcd, 0x6e, - 0x40, 0x47, 0x55, 0x58, 0x63, 0xc3, 0x27, 0x8e, 0xef, 0x5a, 0xc4, 0xd3, 0xf6, 0xa9, 0xab, 0x79, - 0x36, 0x3d, 0xd6, 0xf6, 0xa9, 0x6d, 0xd3, 0x63, 0xe2, 0x06, 0x77, 0x63, 0x25, 0x9b, 0xf6, 0x1b, - 0x02, 0xb4, 0x49, 0xdd, 0xae, 0x4d, 0x8f, 0x37, 0x03, 0x04, 0x4b, 0xdb, 0x26, 0x63, 0xf6, 0x2d, - 0xe3, 0x30, 0x48, 0xdb, 0x42, 0x6a, 0xcf, 0x32, 0x0e, 0xd1, 0xeb, 0xb0, 0x40, 0x6c, 0xc2, 0xaf, - 0x48, 0x04, 0x2a, 0xc3, 0x51, 0xc5, 0x80, 0xc8, 0x40, 0xea, 0x67, 0xa0, 0x34, 0x1c, 0xc3, 0x1d, - 0x0f, 0x23, 0x73, 0xfe, 0x0e, 0x20, 0x16, 0x24, 0x35, 0x9b, 0x1a, 0x87, 0xda, 0x40, 0x77, 0xf4, - 0x3e, 0xb3, 0x4b, 0x3c, 0x62, 0x29, 0x8c, 0xb3, 0x4d, 0x8d, 0xc3, 0x1d, 0x49, 0x57, 0x3f, 0x00, - 0xe8, 0x0e, 0x5d, 0xa2, 0x9b, 0x6d, 0x96, 0x4d, 0x30, 0xd7, 0xf1, 0x96, 0x66, 0xca, 0x47, 0x40, - 0xea, 0xca, 0xad, 0xae, 0x08, 0x46, 0x3d, 0xa4, 0xab, 0xbf, 0x08, 0xd7, 0x3a, 0xb6, 0x6e, 0xf0, - 0x07, 0xf1, 0x4e, 0xf8, 0x2a, 0x83, 0x1e, 0x42, 0x56, 0x40, 0xe5, 0x4c, 0xc6, 0x6e, 0xb7, 0x49, - 0x9f, 0x5b, 0x73, 0x58, 0xe2, 0xab, 0x45, 0x80, 0x89, 0x1e, 0xf5, 0xcf, 0x13, 0x90, 0x0f, 0xf5, - 0xa3, 0x0d, 0x28, 0x18, 0xd4, 0x61, 0xcb, 0xdb, 0x72, 0x64, 0x55, 0x9f, 0xc7, 0x51, 0x12, 0x6a, - 0x42, 0x61, 0x18, 0x4a, 0x5f, 0x9a, 0xcf, 0xc5, 0x58, 0x8d, 0xa3, 0xb2, 0xe8, 0x43, 0xc8, 0x07, - 0xaf, 0xae, 0x41, 0x84, 0xbd, 0xfc, 0x91, 0x76, 0x02, 0x57, 0x3f, 0x01, 0xf8, 0x01, 0xb5, 0x9c, - 0x1e, 0x3d, 0x24, 0x0e, 0x7f, 0x45, 0x64, 0x35, 0x21, 0x09, 0xbc, 0x28, 0x5b, 0xbc, 0xd4, 0x17, - 0x53, 0x10, 0x3e, 0xa6, 0x89, 0xa6, 0xfa, 0xd7, 0x49, 0xc8, 0x62, 0x4a, 0xfd, 0x5a, 0x05, 0x6d, - 0x40, 0x56, 0xc6, 0x09, 0x7e, 0xfe, 0x54, 0xf3, 0xe7, 0x67, 0xeb, 0x19, 0x11, 0x20, 0x32, 0x06, - 0x8f, 0x0c, 0x91, 0x08, 0x9e, 0xbc, 0x28, 0x82, 0xa3, 0xbb, 0x50, 0x94, 0x20, 0xed, 0x40, 0xf7, - 0x0e, 0x44, 0x81, 0x56, 0x5d, 0x3c, 0x3f, 0x5b, 0x07, 0x81, 0xdc, 0xd2, 0xbd, 0x03, 0x0c, 0x02, - 0xcd, 0xbe, 0x51, 0x03, 0x0a, 0x5f, 0x50, 0xcb, 0xd1, 0x7c, 0x3e, 0x08, 0x79, 0x99, 0x18, 0x3b, - 0x8f, 0x93, 0xa1, 0xca, 0x27, 0x75, 0xf8, 0x62, 0x32, 0xf8, 0x06, 0x2c, 0xb8, 0x94, 0xfa, 0x22, - 0x6c, 0x59, 0xd4, 0x91, 0xf7, 0x14, 0x1b, 0xb1, 0xd7, 0xd7, 0x94, 0xfa, 0x58, 0xe2, 0x70, 0xd1, - 0x8d, 0xb4, 0xd0, 0x5d, 0x58, 0xb1, 0x75, 0xcf, 0xd7, 0x78, 0xbc, 0x33, 0x27, 0xda, 0xb2, 0x7c, - 0xab, 0x21, 0xc6, 0xdb, 0xe4, 0xac, 0x40, 0x42, 0xfd, 0xc7, 0x04, 0x14, 0xd8, 0x60, 0xac, 0x7d, - 0xcb, 0x60, 0x49, 0xde, 0x37, 0xcf, 0x3d, 0x6e, 0x40, 0xca, 0xf0, 0x5c, 0xe9, 0x54, 0x7e, 0xf8, - 0xd6, 0xba, 0x18, 0x33, 0x1a, 0xfa, 0x0c, 0xb2, 0xf2, 0xbe, 0x44, 0xa4, 0x1d, 0xea, 0xd5, 0xe9, - 0xa8, 0xf4, 0x8d, 0x94, 0xe3, 0x6b, 0x79, 0x62, 0x9d, 0x38, 0x04, 0x70, 0x94, 0x84, 0xae, 0x43, - 0xd2, 0x10, 0xee, 0x92, 0xbf, 0xd9, 0xa8, 0xb5, 0x70, 0xd2, 0x70, 0xd4, 0xbf, 0x4b, 0xc0, 0xc2, - 0x64, 0xc3, 0xb3, 0x15, 0x70, 0x13, 0xf2, 0xde, 0x68, 0xcf, 0x1b, 0x7b, 0x3e, 0x19, 0x04, 0x2f, - 0xa4, 0x21, 0x01, 0x35, 0x21, 0xaf, 0xdb, 0x7d, 0xea, 0x5a, 0xfe, 0xc1, 0x40, 0x56, 0xa2, 0xf1, - 0xa9, 0x42, 0x54, 0x67, 0xb9, 0x12, 0x88, 0xe0, 0x89, 0x74, 0x70, 0xee, 0x8b, 0x67, 0x74, 0x7e, - 0xee, 0xbf, 0x06, 0x45, 0x5b, 0x1f, 0xf0, 0x0b, 0x24, 0xdf, 0x1a, 0x88, 0x71, 0xa4, 0x71, 0x41, - 0xd2, 0x7a, 0xd6, 0x80, 0xa8, 0x2a, 0xe4, 0x43, 0x65, 0x68, 0x09, 0x0a, 0x95, 0x46, 0x57, 0xbb, - 0x77, 0xff, 0xa1, 0xf6, 0xa8, 0xb6, 0xa3, 0xcc, 0xc9, 0xdc, 0xf4, 0x2f, 0x12, 0xb0, 0x20, 0xc3, - 0x91, 0xcc, 0xf7, 0x5f, 0x87, 0x79, 0x57, 0xdf, 0xf7, 0x83, 0x8a, 0x24, 0x2d, 0x56, 0x35, 0x8b, - 0xf0, 0xac, 0x22, 0x61, 0xac, 0xf8, 0x8a, 0x24, 0xf2, 0x66, 0x9f, 0xba, 0xf4, 0xcd, 0x3e, 0xfd, - 0x73, 0x79, 0xb3, 0x57, 0x7f, 0x1d, 0x60, 0xd3, 0xb2, 0x49, 0x4f, 0xdc, 0x35, 0xc5, 0xd5, 0x97, - 0x2c, 0x87, 0x93, 0x77, 0x99, 0x41, 0x0e, 0xd7, 0xac, 0x63, 0x46, 0x63, 0xac, 0xbe, 0x65, 0xca, - 0xcd, 0xc8, 0x59, 0x8f, 0x18, 0xab, 0x6f, 0x99, 0xe1, 0x2b, 0x55, 0xfa, 0xaa, 0x57, 0xaa, 0xd3, - 0x04, 0x2c, 0xc9, 0xdc, 0x35, 0x0c, 0xbf, 0x6f, 0x43, 0x5e, 0xa4, 0xb1, 0x93, 0x82, 0x8e, 0xbf, - 0x53, 0x0b, 0x5c, 0xb3, 0x8e, 0x73, 0x82, 0xdd, 0x34, 0xd1, 0x3a, 0x14, 0x24, 0x34, 0xf2, 0xfb, - 0x1e, 0x10, 0xa4, 0x16, 0x33, 0xff, 0x5d, 0x48, 0xef, 0x5b, 0x36, 0x91, 0x0b, 0x3d, 0x36, 0x00, - 0x4c, 0x1c, 0xb0, 0x35, 0x87, 0x39, 0xba, 0x9a, 0x0b, 0x2e, 0xe3, 0xb8, 0x7d, 0xb2, 0xec, 0x8c, - 0xda, 0x27, 0x2a, 0xd0, 0x19, 0xfb, 0x04, 0x8e, 0xd9, 0x27, 0xd8, 0xc2, 0x3e, 0x09, 0x8d, 0xda, - 0x27, 0x48, 0x3f, 0x17, 0xfb, 0xb6, 0xe1, 0x7a, 0xd5, 0xd6, 0x8d, 0x43, 0xdb, 0xf2, 0x7c, 0x62, - 0x46, 0x23, 0xc6, 0x7d, 0xc8, 0x4e, 0x25, 0x9d, 0x97, 0xdd, 0x5a, 0x4a, 0xa4, 0xfa, 0x6f, 0x09, - 0x28, 0x6e, 0x11, 0xdd, 0xf6, 0x0f, 0x26, 0x57, 0x43, 0x3e, 0xf1, 0x7c, 0x79, 0x58, 0xf1, 0x6f, - 0xf4, 0x1e, 0xe4, 0xc2, 0x9c, 0xe4, 0xca, 0xf7, 0xb7, 0x10, 0x8a, 0x1e, 0xc0, 0x3c, 0xdb, 0x63, - 0x74, 0x14, 0x14, 0x3b, 0x97, 0x3d, 0xed, 0x48, 0x24, 0x3b, 0x64, 0x5c, 0xc2, 0x93, 0x10, 0xbe, - 0x94, 0x32, 0x38, 0x68, 0xa2, 0xff, 0x0f, 0x45, 0xfe, 0x32, 0x11, 0xe4, 0x5c, 0x99, 0xab, 0x74, - 0x16, 0xc4, 0xe3, 0xa2, 0xc8, 0xb7, 0xfe, 0x27, 0x01, 0x2b, 0x3b, 0xfa, 0x78, 0x8f, 0xc8, 0xb0, - 0x41, 0x4c, 0x4c, 0x0c, 0xea, 0x9a, 0xa8, 0x13, 0x0d, 0x37, 0x97, 0xbc, 0x55, 0xc6, 0x09, 0xc7, - 0x47, 0x9d, 0xa0, 0x00, 0x4b, 0x46, 0x0a, 0xb0, 0x15, 0xc8, 0x38, 0xd4, 0x31, 0x88, 0x8c, 0x45, - 0xa2, 0xa1, 0x5a, 0xd1, 0x50, 0x53, 0x0a, 0x9f, 0x11, 0xf9, 0x23, 0x60, 0x8b, 0xfa, 0x61, 0x6f, - 0xe8, 0x33, 0x28, 0x75, 0x1b, 0x35, 0xdc, 0xe8, 0x55, 0xdb, 0x3f, 0xd4, 0xba, 0x95, 0xed, 0x6e, - 0xe5, 0xfe, 0x5d, 0xad, 0xd3, 0xde, 0xfe, 0xfc, 0xde, 0x83, 0xbb, 0xef, 0x29, 0x89, 0xd2, 0xc6, - 0xc9, 0xe9, 0xc6, 0xcd, 0x56, 0xa5, 0xb6, 0x2d, 0x76, 0xcc, 0x1e, 0x7d, 0xd6, 0xd5, 0x6d, 0x4f, - 0xbf, 0x7f, 0xb7, 0x43, 0xed, 0x31, 0xc3, 0xb0, 0x65, 0x5d, 0x8c, 0x9e, 0x57, 0xd1, 0x63, 0x38, - 0x71, 0xe1, 0x31, 0x3c, 0x39, 0xcd, 0x93, 0x17, 0x9c, 0xe6, 0x9b, 0xb0, 0x62, 0xb8, 0xd4, 0xf3, - 0x34, 0x96, 0xfd, 0x13, 0x73, 0xa6, 0xbe, 0xf8, 0xce, 0xf9, 0xd9, 0xfa, 0x72, 0x8d, 0xf1, 0xbb, - 0x9c, 0x2d, 0xd5, 0x2f, 0x1b, 0x11, 0x12, 0xef, 0x49, 0xfd, 0x83, 0x14, 0x4b, 0xa4, 0xac, 0x23, - 0xcb, 0x26, 0x7d, 0xe2, 0xa1, 0x27, 0xb0, 0x64, 0xb8, 0xc4, 0x64, 0x69, 0xbd, 0x6e, 0x47, 0x7f, - 0x27, 0xfa, 0xff, 0x62, 0x73, 0x9a, 0x50, 0xb0, 0x5c, 0x0b, 0xa5, 0xba, 0x43, 0x62, 0xe0, 0x45, - 0x63, 0xaa, 0x8d, 0xbe, 0x80, 0x25, 0x8f, 0xd8, 0x96, 0x33, 0x7a, 0xa6, 0x19, 0xd4, 0xf1, 0xc9, - 0xb3, 0xe0, 0x45, 0xec, 0x2a, 0xbd, 0xdd, 0xc6, 0x36, 0x93, 0xaa, 0x09, 0xa1, 0x2a, 0x3a, 0x3f, - 0x5b, 0x5f, 0x9c, 0xa6, 0xe1, 0x45, 0xa9, 0x59, 0xb6, 0x4b, 0x2d, 0x58, 0x9c, 0xb6, 0x06, 0xad, - 0xc8, 0xbd, 0xcf, 0x43, 0x48, 0xb0, 0xb7, 0xd1, 0x4d, 0xc8, 0xb9, 0xa4, 0x6f, 0x79, 0xbe, 0x2b, - 0xdc, 0xcc, 0x38, 0x21, 0x85, 0xed, 0x7c, 0xf1, 0x23, 0x9f, 0xd2, 0xaf, 0xc2, 0x4c, 0x8f, 0x6c, - 0xb3, 0x98, 0x96, 0xa7, 0xef, 0x49, 0x95, 0x39, 0x1c, 0x34, 0xd9, 0x1a, 0x1c, 0x79, 0x61, 0xa2, - 0xc6, 0xbf, 0x19, 0x8d, 0x67, 0x14, 0xf2, 0x27, 0x4f, 0x3c, 0x67, 0x08, 0x7e, 0x3b, 0x99, 0x8e, - 0xfc, 0x76, 0x72, 0x05, 0x32, 0x36, 0x39, 0x22, 0xb6, 0x38, 0xcb, 0xb1, 0x68, 0xdc, 0xbe, 0x0b, - 0xc5, 0xe0, 0x47, 0x7a, 0xfc, 0x57, 0x06, 0x39, 0x48, 0xf7, 0x2a, 0xdd, 0xc7, 0xca, 0x1c, 0x02, - 0xc8, 0x8a, 0xc5, 0x29, 0x5e, 0xeb, 0x6a, 0xed, 0xd6, 0x66, 0xf3, 0x91, 0x92, 0xbc, 0xfd, 0xb3, - 0x14, 0xe4, 0xc3, 0xf7, 0x22, 0x76, 0x76, 0xb4, 0x1a, 0x4f, 0x83, 0xd5, 0x1d, 0xd2, 0x5b, 0xe4, - 0x18, 0xbd, 0x36, 0xb9, 0x85, 0xfa, 0x4c, 0x3c, 0x90, 0x87, 0xec, 0xe0, 0x06, 0xea, 0x0d, 0xc8, - 0x55, 0xba, 0xdd, 0xe6, 0xa3, 0x56, 0xa3, 0xae, 0x7c, 0x99, 0x28, 0x7d, 0xe7, 0xe4, 0x74, 0x63, - 0x39, 0x04, 0x55, 0x3c, 0xb1, 0xf8, 0x38, 0xaa, 0x56, 0x6b, 0x74, 0x7a, 0x8d, 0xba, 0xf2, 0x3c, - 0x39, 0x8b, 0xe2, 0xb7, 0x2a, 0xfc, 0x67, 0x2e, 0xf9, 0x0e, 0x6e, 0x74, 0x2a, 0x98, 0x75, 0xf8, - 0x65, 0x52, 0x5c, 0x8e, 0x4d, 0x7a, 0x74, 0xc9, 0x50, 0x77, 0x59, 0x9f, 0x6b, 0xc1, 0xcf, 0xbd, - 0x9e, 0xa7, 0xc4, 0x4f, 0x21, 0x26, 0x8f, 0x5f, 0x44, 0x37, 0xc7, 0xac, 0x37, 0xfe, 0xea, 0xc8, - 0xd5, 0xa4, 0x66, 0x7a, 0xeb, 0xb2, 0xd8, 0xc3, 0xb4, 0xa8, 0x30, 0x8f, 0x77, 0x5b, 0x2d, 0x06, - 0x7a, 0x9e, 0x9e, 0x19, 0x1d, 0x1e, 0x39, 0xac, 0x62, 0x46, 0xb7, 0x20, 0x17, 0x3c, 0x4a, 0x2a, - 0x5f, 0xa6, 0x67, 0x0c, 0xaa, 0x05, 0x2f, 0xaa, 0xbc, 0xc3, 0xad, 0xdd, 0x1e, 0xff, 0x35, 0xda, - 0xf3, 0xcc, 0x6c, 0x87, 0x07, 0x23, 0xdf, 0xa4, 0xc7, 0x0e, 0xdb, 0xb3, 0xf2, 0x1e, 0xee, 0xcb, - 0x8c, 0xb8, 0xb4, 0x08, 0x31, 0xf2, 0x12, 0xee, 0x0d, 0xc8, 0xe1, 0xc6, 0x0f, 0xc4, 0x0f, 0xd7, - 0x9e, 0x67, 0x67, 0xf4, 0x60, 0xf2, 0x05, 0x31, 0x64, 0x6f, 0x6d, 0xdc, 0xd9, 0xaa, 0x70, 0x97, - 0xcf, 0xa2, 0xda, 0xee, 0xf0, 0x40, 0x77, 0x88, 0x39, 0xf9, 0x3d, 0x48, 0xc8, 0xba, 0xfd, 0x4b, - 0x90, 0x0b, 0x32, 0x53, 0xb4, 0x06, 0xd9, 0xa7, 0x6d, 0xfc, 0xb8, 0x81, 0x95, 0x39, 0xe1, 0xc3, - 0x80, 0xf3, 0x54, 0xd4, 0x14, 0x1b, 0x30, 0xbf, 0x53, 0x69, 0x55, 0x1e, 0x35, 0x70, 0x70, 0x45, - 0x1e, 0x00, 0x64, 0x7a, 0x55, 0x52, 0x64, 0x07, 0xa1, 0xce, 0xea, 0xea, 0x57, 0x5f, 0xaf, 0xcd, - 0xfd, 0xf4, 0xeb, 0xb5, 0xb9, 0xe7, 0xe7, 0x6b, 0x89, 0xaf, 0xce, 0xd7, 0x12, 0x3f, 0x39, 0x5f, - 0x4b, 0xfc, 0xeb, 0xf9, 0x5a, 0x62, 0x2f, 0xcb, 0x0f, 0x81, 0x07, 0xff, 0x1b, 0x00, 0x00, 0xff, - 0xff, 0x3b, 0x34, 0xbd, 0xc6, 0xd3, 0x30, 0x00, 0x00, + // 5020 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0x4d, 0x6c, 0x24, 0x49, + 0x56, 0x76, 0xfd, 0xba, 0xea, 0x55, 0xd9, 0x4e, 0x47, 0x7b, 0x7b, 0xdc, 0xb5, 0xdd, 0x76, 0x4d, + 0xce, 0xf4, 0xce, 0x6c, 0x6f, 0x53, 0xfd, 0xb7, 0xbb, 0xea, 0x99, 0x61, 0x77, 0xa6, 0xfe, 0x6c, + 0xd7, 0xb6, 0x5d, 0x55, 0x8a, 0x2a, 0x77, 0xef, 0x22, 0x41, 0x2a, 0x9d, 0x19, 0x2e, 0xe7, 0x38, + 0x2b, 0xa3, 0xc8, 0xcc, 0xb2, 0xbb, 0x58, 0x10, 0x2d, 0x0e, 0x80, 0x7c, 0x82, 0xdb, 0x22, 0x64, + 0x2e, 0x70, 0x42, 0x48, 0x1c, 0x40, 0x42, 0x70, 0x1a, 0x24, 0x0e, 0x7b, 0x83, 0x05, 0x09, 0xad, + 0x40, 0x32, 0xac, 0x0f, 0xdc, 0x56, 0x70, 0x59, 0x71, 0x01, 0x09, 0xc5, 0x4f, 0x66, 0xa5, 0xab, + 0xd3, 0x76, 0x0f, 0xb3, 0x17, 0xbb, 0xe2, 0xbd, 0xef, 0xbd, 0x78, 0xf1, 0x22, 0xe2, 0xc5, 0x7b, + 0x11, 0x09, 0xf7, 0x06, 0x96, 0x7f, 0x30, 0xde, 0xab, 0x18, 0x74, 0xf8, 0xc0, 0xa4, 0xc6, 0x21, + 0x71, 0x1f, 0x78, 0xc7, 0xba, 0x3b, 0x3c, 0xb4, 0xfc, 0x07, 0xfa, 0xc8, 0x7a, 0xe0, 0x4f, 0x46, + 0xc4, 0xab, 0x8c, 0x5c, 0xea, 0x53, 0x84, 0x04, 0xa0, 0x12, 0x00, 0x2a, 0x47, 0x8f, 0x4a, 0xeb, + 0x03, 0x4a, 0x07, 0x36, 0x79, 0xc0, 0x11, 0x7b, 0xe3, 0xfd, 0x07, 0xbe, 0x35, 0x24, 0x9e, 0xaf, + 0x0f, 0x47, 0x42, 0xa8, 0xb4, 0x36, 0x0b, 0x30, 0xc7, 0xae, 0xee, 0x5b, 0xd4, 0x91, 0xfc, 0x95, + 0x01, 0x1d, 0x50, 0xfe, 0xf3, 0x01, 0xfb, 0x25, 0xa8, 0xea, 0x3a, 0xcc, 0x3f, 0x27, 0xae, 0x67, + 0x51, 0x07, 0xad, 0x40, 0xc6, 0x72, 0x4c, 0xf2, 0x72, 0x35, 0x51, 0x4e, 0xbc, 0x9f, 0xc6, 0xa2, + 0xa1, 0x3e, 0x04, 0x68, 0xb1, 0x1f, 0x4d, 0xc7, 0x77, 0x27, 0x48, 0x81, 0xd4, 0x21, 0x99, 0x70, + 0x44, 0x1e, 0xb3, 0x9f, 0x8c, 0x72, 0xa4, 0xdb, 0xab, 0x49, 0x41, 0x39, 0xd2, 0x6d, 0xf5, 0x27, + 0x09, 0x28, 0x54, 0x1d, 0x87, 0xfa, 0xbc, 0x77, 0x0f, 0x21, 0x48, 0x3b, 0xfa, 0x90, 0x48, 0x21, + 0xfe, 0x1b, 0xd5, 0x21, 0x6b, 0xeb, 0x7b, 0xc4, 0xf6, 0x56, 0x93, 0xe5, 0xd4, 0xfb, 0x85, 0xc7, + 0x5f, 0xab, 0xbc, 0x3e, 0xe4, 0x4a, 0x44, 0x49, 0x65, 0x9b, 0xa3, 0xb9, 0x11, 0x58, 0x8a, 0xa2, + 0x6f, 0xc3, 0xbc, 0xe5, 0x98, 0x96, 0x41, 0xbc, 0xd5, 0x34, 0xd7, 0xb2, 0x16, 0xa7, 0x65, 0x6a, + 0x7d, 0x2d, 0xfd, 0xc3, 0xb3, 0xf5, 0x39, 0x1c, 0x08, 0x95, 0x3e, 0x80, 0x42, 0x44, 0x6d, 0xcc, + 0xd8, 0x56, 0x20, 0x73, 0xa4, 0xdb, 0x63, 0x22, 0x47, 0x27, 0x1a, 0x1f, 0x26, 0x9f, 0x26, 0xd4, + 0x4f, 0x60, 0xa5, 0xad, 0x0f, 0x89, 0xb9, 0x49, 0x1c, 0xe2, 0x5a, 0x06, 0x26, 0x1e, 0x1d, 0xbb, + 0x06, 0x61, 0x63, 0x3d, 0xb4, 0x1c, 0x33, 0x18, 0x2b, 0xfb, 0x1d, 0xaf, 0x45, 0xad, 0xc3, 0x5b, + 0x0d, 0xcb, 0x33, 0x5c, 0xe2, 0x93, 0xcf, 0xad, 0x24, 0x15, 0x28, 0x39, 0x4b, 0xc0, 0xd2, 0xac, + 0xf4, 0x2f, 0xc1, 0x0d, 0xe6, 0x62, 0x53, 0x73, 0x25, 0x45, 0xf3, 0x46, 0xc4, 0xe0, 0xca, 0x0a, + 0x8f, 0xdf, 0x8f, 0xf3, 0x50, 0xdc, 0x48, 0xb6, 0xe6, 0xf0, 0x32, 0x57, 0x13, 0x10, 0x7a, 0x23, + 0x62, 0x20, 0x03, 0x6e, 0x9a, 0xd2, 0xe8, 0x19, 0xf5, 0x49, 0xae, 0x3e, 0x76, 0x1a, 0x2f, 0x19, + 0xe6, 0xd6, 0x1c, 0x5e, 0x09, 0x94, 0x45, 0x3b, 0xa9, 0x01, 0xe4, 0x02, 0xdd, 0xea, 0x0f, 0x12, + 0x90, 0x0f, 0x98, 0x1e, 0xfa, 0x2a, 0xe4, 0x1d, 0xdd, 0xa1, 0x9a, 0x31, 0x1a, 0x7b, 0x7c, 0x40, + 0xa9, 0x5a, 0xf1, 0xfc, 0x6c, 0x3d, 0xd7, 0xd6, 0x1d, 0x5a, 0xef, 0xee, 0x7a, 0x38, 0xc7, 0xd8, + 0xf5, 0xd1, 0xd8, 0x43, 0x6f, 0x43, 0x71, 0x48, 0x86, 0xd4, 0x9d, 0x68, 0x7b, 0x13, 0x9f, 0x78, + 0xd2, 0x6d, 0x05, 0x41, 0xab, 0x31, 0x12, 0xfa, 0x16, 0xcc, 0x0f, 0x84, 0x49, 0xab, 0x29, 0xbe, + 0x7c, 0xde, 0x89, 0xb3, 0x7e, 0xc6, 0x6a, 0x1c, 0xc8, 0xa8, 0xbf, 0x97, 0x80, 0x95, 0x90, 0x4a, + 0x7e, 0x75, 0x6c, 0xb9, 0x64, 0x48, 0x1c, 0xdf, 0x43, 0xdf, 0x80, 0xac, 0x6d, 0x0d, 0x2d, 0xdf, + 0x93, 0x3e, 0xbf, 0x13, 0xa7, 0x36, 0x1c, 0x14, 0x96, 0x60, 0x54, 0x85, 0xa2, 0x4b, 0x3c, 0xe2, + 0x1e, 0x89, 0x15, 0x2f, 0x3d, 0x7a, 0x8d, 0xf0, 0x05, 0x11, 0x75, 0x03, 0x72, 0x5d, 0x5b, 0xf7, + 0xf7, 0xa9, 0x3b, 0x44, 0x2a, 0x14, 0x75, 0xd7, 0x38, 0xb0, 0x7c, 0x62, 0xf8, 0x63, 0x37, 0xd8, + 0x7d, 0x17, 0x68, 0xe8, 0x26, 0x24, 0xa9, 0xe8, 0x28, 0x5f, 0xcb, 0x9e, 0x9f, 0xad, 0x27, 0x3b, + 0x3d, 0x9c, 0xa4, 0x9e, 0xfa, 0x11, 0x2c, 0x77, 0xed, 0xf1, 0xc0, 0x72, 0x1a, 0xc4, 0x33, 0x5c, + 0x6b, 0xc4, 0xb4, 0xb3, 0x55, 0xc9, 0x62, 0x54, 0xb0, 0x2a, 0xd9, 0xef, 0x70, 0x6b, 0x27, 0xa7, + 0x5b, 0x5b, 0xfd, 0x9d, 0x24, 0x2c, 0x37, 0x9d, 0x81, 0xe5, 0x90, 0xa8, 0xf4, 0x5d, 0x58, 0x24, + 0x9c, 0xa8, 0x1d, 0x89, 0x70, 0x23, 0xf5, 0x2c, 0x08, 0x6a, 0x10, 0x83, 0x5a, 0x33, 0x71, 0xe1, + 0x51, 0xdc, 0xf0, 0x5f, 0xd3, 0x1e, 0x1b, 0x1d, 0x9a, 0x30, 0x3f, 0xe2, 0x83, 0xf0, 0xe4, 0xf4, + 0xde, 0x8d, 0xd3, 0xf5, 0xda, 0x38, 0x83, 0x20, 0x21, 0x65, 0xbf, 0x48, 0x90, 0xf8, 0xb3, 0x24, + 0x2c, 0xb5, 0xa9, 0x79, 0xc1, 0x0f, 0x25, 0xc8, 0x1d, 0x50, 0xcf, 0x8f, 0x04, 0xc4, 0xb0, 0x8d, + 0x9e, 0x42, 0x6e, 0x24, 0xa7, 0x4f, 0xce, 0xfe, 0xed, 0x78, 0x93, 0x05, 0x06, 0x87, 0x68, 0xf4, + 0x11, 0xe4, 0x83, 0x2d, 0xc3, 0x46, 0xfb, 0x06, 0x0b, 0x67, 0x8a, 0x47, 0xdf, 0x82, 0xac, 0x98, + 0x84, 0xd5, 0x34, 0x97, 0xbc, 0xfb, 0x46, 0x3e, 0xc7, 0x52, 0x08, 0x6d, 0x42, 0xce, 0xb7, 0x3d, + 0xcd, 0x72, 0xf6, 0xe9, 0x6a, 0x86, 0x2b, 0x58, 0x8f, 0x0d, 0x32, 0xd4, 0x24, 0xfd, 0xed, 0x5e, + 0xcb, 0xd9, 0xa7, 0xb5, 0xc2, 0xf9, 0xd9, 0xfa, 0xbc, 0x6c, 0xe0, 0x79, 0xdf, 0xf6, 0xd8, 0x0f, + 0xf5, 0xf7, 0x13, 0x50, 0x88, 0xa0, 0xd0, 0x1d, 0x00, 0xdf, 0x1d, 0x7b, 0xbe, 0xe6, 0x52, 0xea, + 0x73, 0x67, 0x15, 0x71, 0x9e, 0x53, 0x30, 0xa5, 0x3e, 0xaa, 0xc0, 0x0d, 0x83, 0xb8, 0xbe, 0x66, + 0x79, 0xde, 0x98, 0xb8, 0x9a, 0x37, 0xde, 0xfb, 0x94, 0x18, 0x3e, 0x77, 0x5c, 0x11, 0x2f, 0x33, + 0x56, 0x8b, 0x73, 0x7a, 0x82, 0x81, 0x9e, 0xc0, 0xcd, 0x28, 0x7e, 0x34, 0xde, 0xb3, 0x2d, 0x43, + 0x63, 0x93, 0x99, 0xe2, 0x22, 0x37, 0xa6, 0x22, 0x5d, 0xce, 0x7b, 0x46, 0x26, 0xea, 0x8f, 0x13, + 0xa0, 0x60, 0x7d, 0xdf, 0xdf, 0x21, 0xc3, 0x3d, 0xe2, 0xf6, 0x7c, 0xdd, 0x1f, 0x7b, 0xe8, 0x26, + 0x64, 0x6d, 0xa2, 0x9b, 0xc4, 0xe5, 0x46, 0xe5, 0xb0, 0x6c, 0xa1, 0x5d, 0xb6, 0x83, 0x75, 0xe3, + 0x40, 0xdf, 0xb3, 0x6c, 0xcb, 0x9f, 0x70, 0x53, 0x16, 0xe3, 0x97, 0xf0, 0xac, 0xce, 0x0a, 0x8e, + 0x08, 0xe2, 0x0b, 0x6a, 0xd0, 0x2a, 0xcc, 0x0f, 0x89, 0xe7, 0xe9, 0x03, 0xc2, 0x2d, 0xcd, 0xe3, + 0xa0, 0xa9, 0x7e, 0x04, 0xc5, 0xa8, 0x1c, 0x2a, 0xc0, 0xfc, 0x6e, 0xfb, 0x59, 0xbb, 0xf3, 0xa2, + 0xad, 0xcc, 0xa1, 0x25, 0x28, 0xec, 0xb6, 0x71, 0xb3, 0x5a, 0xdf, 0xaa, 0xd6, 0xb6, 0x9b, 0x4a, + 0x02, 0x2d, 0x40, 0x7e, 0xda, 0x4c, 0xaa, 0x7f, 0x91, 0x00, 0x60, 0xee, 0x96, 0x83, 0xfa, 0x10, + 0x32, 0x9e, 0xaf, 0xfb, 0x62, 0x55, 0x2e, 0x3e, 0x7e, 0xf7, 0xb2, 0x39, 0x94, 0xf6, 0xb2, 0x7f, + 0x04, 0x0b, 0x91, 0xa8, 0x85, 0xc9, 0x0b, 0x16, 0xb2, 0x00, 0xa1, 0x9b, 0xa6, 0x2b, 0x0d, 0xe7, + 0xbf, 0xd5, 0x8f, 0x20, 0xc3, 0xa5, 0x2f, 0x9a, 0x9b, 0x83, 0x74, 0x83, 0xfd, 0x4a, 0xa0, 0x3c, + 0x64, 0x70, 0xb3, 0xda, 0xf8, 0x9e, 0x92, 0x44, 0x0a, 0x14, 0x1b, 0xad, 0x5e, 0xbd, 0xd3, 0x6e, + 0x37, 0xeb, 0xfd, 0x66, 0x43, 0x49, 0xa9, 0x77, 0x21, 0xd3, 0x1a, 0x32, 0xcd, 0xb7, 0xd9, 0x92, + 0xdf, 0x27, 0x2e, 0x71, 0x8c, 0x60, 0x27, 0x4d, 0x09, 0xea, 0x4f, 0x0b, 0x90, 0xd9, 0xa1, 0x63, + 0xc7, 0x47, 0x8f, 0x23, 0x61, 0x6b, 0x31, 0x3e, 0x43, 0xe0, 0xc0, 0x4a, 0x7f, 0x32, 0x22, 0x32, + 0xac, 0xdd, 0x84, 0xac, 0xd8, 0x1c, 0x72, 0x38, 0xb2, 0xc5, 0xe8, 0xbe, 0xee, 0x0e, 0x88, 0x2f, + 0xc7, 0x23, 0x5b, 0xe8, 0x7d, 0x76, 0x62, 0xe9, 0x26, 0x75, 0xec, 0x09, 0xdf, 0x43, 0x39, 0x71, + 0x2c, 0x61, 0xa2, 0x9b, 0x1d, 0xc7, 0x9e, 0xe0, 0x90, 0x8b, 0xb6, 0xa0, 0xb8, 0x67, 0x39, 0xa6, + 0x46, 0x47, 0x22, 0xc8, 0x67, 0x2e, 0xdf, 0x71, 0xc2, 0xaa, 0x9a, 0xe5, 0x98, 0x1d, 0x01, 0xc6, + 0x85, 0xbd, 0x69, 0x03, 0xb5, 0x61, 0xf1, 0x88, 0xda, 0xe3, 0x21, 0x09, 0x75, 0x65, 0xb9, 0xae, + 0xf7, 0x2e, 0xd7, 0xf5, 0x9c, 0xe3, 0x03, 0x6d, 0x0b, 0x47, 0xd1, 0x26, 0x7a, 0x06, 0x0b, 0xfe, + 0x70, 0xb4, 0xef, 0x85, 0xea, 0xe6, 0xb9, 0xba, 0xaf, 0x5c, 0xe1, 0x30, 0x06, 0x0f, 0xb4, 0x15, + 0xfd, 0x48, 0x0b, 0x6d, 0x42, 0xc1, 0xa0, 0x8e, 0x67, 0x79, 0x3e, 0x71, 0x8c, 0xc9, 0x6a, 0x8e, + 0xfb, 0xfe, 0x8a, 0x51, 0xd6, 0xa7, 0x60, 0x1c, 0x95, 0x2c, 0xfd, 0x56, 0x0a, 0x0a, 0x11, 0x17, + 0xa0, 0x1e, 0x14, 0x46, 0x2e, 0x1d, 0xe9, 0x03, 0x7e, 0xe2, 0xc9, 0x49, 0x7d, 0xf4, 0x46, 0xee, + 0xab, 0x74, 0xa7, 0x82, 0x38, 0xaa, 0x45, 0x3d, 0x4d, 0x42, 0x21, 0xc2, 0x44, 0xf7, 0x20, 0x87, + 0xbb, 0xb8, 0xf5, 0xbc, 0xda, 0x6f, 0x2a, 0x73, 0xa5, 0xdb, 0x27, 0xa7, 0xe5, 0x55, 0xae, 0x2d, + 0xaa, 0xa0, 0xeb, 0x5a, 0x47, 0x6c, 0x0d, 0xbf, 0x0f, 0xf3, 0x01, 0x34, 0x51, 0xfa, 0xf2, 0xc9, + 0x69, 0xf9, 0xad, 0x59, 0x68, 0x04, 0x89, 0x7b, 0x5b, 0x55, 0xdc, 0x6c, 0x28, 0xc9, 0x78, 0x24, + 0xee, 0x1d, 0xe8, 0x2e, 0x31, 0xd1, 0x57, 0x20, 0x2b, 0x81, 0xa9, 0x52, 0xe9, 0xe4, 0xb4, 0x7c, + 0x73, 0x16, 0x38, 0xc5, 0xe1, 0xde, 0x76, 0xf5, 0x79, 0x53, 0x49, 0xc7, 0xe3, 0x70, 0xcf, 0xd6, + 0x8f, 0x08, 0x7a, 0x17, 0x32, 0x02, 0x96, 0x29, 0xdd, 0x3a, 0x39, 0x2d, 0x7f, 0xe9, 0x35, 0x75, + 0x0c, 0x55, 0x5a, 0xfd, 0xdd, 0x3f, 0x5e, 0x9b, 0xfb, 0x9b, 0x3f, 0x59, 0x53, 0x66, 0xd9, 0xa5, + 0xff, 0x49, 0xc0, 0xc2, 0x85, 0xb5, 0x83, 0x54, 0xc8, 0x3a, 0xd4, 0xa0, 0x23, 0x71, 0x10, 0xe6, + 0x6a, 0x70, 0x7e, 0xb6, 0x9e, 0x6d, 0xd3, 0x3a, 0x1d, 0x4d, 0xb0, 0xe4, 0xa0, 0x67, 0x33, 0x47, + 0xf9, 0x93, 0x37, 0x5c, 0x98, 0xb1, 0x87, 0xf9, 0xc7, 0xb0, 0x60, 0xba, 0xd6, 0x11, 0x71, 0x35, + 0x83, 0x3a, 0xfb, 0xd6, 0x40, 0x1e, 0x72, 0xa5, 0xd8, 0x7c, 0x93, 0x03, 0x71, 0x51, 0x08, 0xd4, + 0x39, 0xfe, 0x0b, 0x1c, 0xe3, 0xa5, 0xe7, 0x50, 0x8c, 0x2e, 0x75, 0x76, 0x2e, 0x79, 0xd6, 0xaf, + 0x11, 0x99, 0x58, 0xf2, 0x34, 0x14, 0xe7, 0x19, 0x45, 0xa4, 0x95, 0xef, 0x41, 0x7a, 0x48, 0x4d, + 0xa1, 0x67, 0xa1, 0x76, 0x83, 0x65, 0x13, 0xff, 0x72, 0xb6, 0x5e, 0xa0, 0x5e, 0x65, 0xc3, 0xb2, + 0xc9, 0x0e, 0x35, 0x09, 0xe6, 0x00, 0xf5, 0x08, 0xd2, 0x2c, 0xe6, 0xa0, 0x2f, 0x43, 0xba, 0xd6, + 0x6a, 0x37, 0x94, 0xb9, 0xd2, 0xf2, 0xc9, 0x69, 0x79, 0x81, 0xbb, 0x84, 0x31, 0xd8, 0xda, 0x45, + 0xeb, 0x90, 0x7d, 0xde, 0xd9, 0xde, 0xdd, 0x61, 0xcb, 0xeb, 0xc6, 0xc9, 0x69, 0x79, 0x29, 0x64, + 0x0b, 0xa7, 0xa1, 0x3b, 0x90, 0xe9, 0xef, 0x74, 0x37, 0x7a, 0x4a, 0xb2, 0x84, 0x4e, 0x4e, 0xcb, + 0x8b, 0x21, 0x9f, 0xdb, 0x5c, 0x5a, 0x96, 0xb3, 0x9a, 0x0f, 0xe9, 0xea, 0x8f, 0x12, 0x50, 0x88, + 0x6c, 0x38, 0xb6, 0x30, 0x1b, 0xcd, 0x8d, 0xea, 0xee, 0x76, 0x5f, 0x99, 0x8b, 0x2c, 0xcc, 0x08, + 0xa4, 0x41, 0xf6, 0xf5, 0xb1, 0xcd, 0xe2, 0x1c, 0xd4, 0x3b, 0xed, 0x5e, 0xab, 0xd7, 0x6f, 0xb6, + 0xfb, 0x4a, 0xa2, 0xb4, 0x7a, 0x72, 0x5a, 0x5e, 0x99, 0x05, 0x6f, 0x8c, 0x6d, 0x9b, 0x2d, 0xcd, + 0x7a, 0xb5, 0xbe, 0xc5, 0xd7, 0xfa, 0x74, 0x69, 0x46, 0x50, 0x75, 0xdd, 0x38, 0x20, 0x26, 0xba, + 0x0f, 0xf9, 0x46, 0x73, 0xbb, 0xb9, 0x59, 0xe5, 0xd1, 0xbd, 0x74, 0xe7, 0xe4, 0xb4, 0x7c, 0xeb, + 0xf5, 0xde, 0x6d, 0x32, 0xd0, 0x7d, 0x62, 0xce, 0x2c, 0xd1, 0x08, 0x44, 0xfd, 0x59, 0x12, 0x16, + 0x30, 0x2b, 0x87, 0x5d, 0xbf, 0x4b, 0x6d, 0xcb, 0x98, 0xa0, 0x2e, 0xe4, 0x0d, 0xea, 0x98, 0x56, + 0x24, 0x4e, 0x3c, 0xbe, 0x24, 0x25, 0x9a, 0x4a, 0x05, 0xad, 0x7a, 0x20, 0x89, 0xa7, 0x4a, 0xd0, + 0x03, 0xc8, 0x98, 0xc4, 0xd6, 0x27, 0x32, 0x37, 0xbb, 0x55, 0x11, 0x05, 0x77, 0x25, 0x28, 0xb8, + 0x2b, 0x0d, 0x59, 0x70, 0x63, 0x81, 0xe3, 0x35, 0x88, 0xfe, 0x52, 0xd3, 0x7d, 0x9f, 0x0c, 0x47, + 0xbe, 0x48, 0xcc, 0xd2, 0xb8, 0x30, 0xd4, 0x5f, 0x56, 0x25, 0x09, 0x3d, 0x82, 0xec, 0xb1, 0xe5, + 0x98, 0xf4, 0x58, 0xe6, 0x5e, 0x57, 0x28, 0x95, 0x40, 0xf5, 0x84, 0xa5, 0x24, 0x33, 0x66, 0xb2, + 0x35, 0xd4, 0xee, 0xb4, 0x9b, 0xc1, 0x1a, 0x92, 0xfc, 0x8e, 0xd3, 0xa6, 0x0e, 0xdb, 0xff, 0xd0, + 0x69, 0x6b, 0x1b, 0xd5, 0xd6, 0xf6, 0x2e, 0x66, 0xeb, 0x68, 0xe5, 0xe4, 0xb4, 0xac, 0x84, 0x90, + 0x0d, 0xdd, 0xb2, 0x59, 0x31, 0x70, 0x0b, 0x52, 0xd5, 0xf6, 0xf7, 0x94, 0x64, 0x49, 0x39, 0x39, + 0x2d, 0x17, 0x43, 0x76, 0xd5, 0x99, 0x4c, 0xfd, 0x3e, 0xdb, 0xaf, 0xfa, 0xf7, 0x29, 0x28, 0xee, + 0x8e, 0x4c, 0xdd, 0x27, 0x62, 0x9f, 0xa1, 0x32, 0x14, 0x46, 0xba, 0xab, 0xdb, 0x36, 0xb1, 0x2d, + 0x6f, 0x28, 0xaf, 0x12, 0xa2, 0x24, 0xf4, 0xc1, 0x9b, 0xba, 0xb1, 0x96, 0x63, 0x7b, 0xe7, 0x07, + 0xff, 0xb6, 0x9e, 0x08, 0x1c, 0xba, 0x0b, 0x8b, 0xfb, 0xc2, 0x5a, 0x4d, 0x37, 0xf8, 0xc4, 0xa6, + 0xf8, 0xc4, 0x56, 0xe2, 0x26, 0x36, 0x6a, 0x56, 0x45, 0x0e, 0xb2, 0xca, 0xa5, 0xf0, 0xc2, 0x7e, + 0xb4, 0x89, 0x9e, 0xc0, 0xfc, 0x90, 0x3a, 0x96, 0x4f, 0xdd, 0xeb, 0x67, 0x21, 0x40, 0xa2, 0x7b, + 0xb0, 0xcc, 0x26, 0x37, 0xb0, 0x87, 0xb3, 0xf9, 0x71, 0x9e, 0xc4, 0x4b, 0x43, 0xfd, 0xa5, 0xec, + 0x10, 0x33, 0x32, 0xaa, 0x41, 0x86, 0xba, 0x2c, 0x5f, 0xcc, 0x72, 0x73, 0xef, 0x5f, 0x6b, 0xae, + 0x68, 0x74, 0x98, 0x0c, 0x16, 0xa2, 0xea, 0x37, 0x61, 0xe1, 0xc2, 0x20, 0x58, 0x9a, 0xd4, 0xad, + 0xee, 0xf6, 0x9a, 0xca, 0x1c, 0x2a, 0x42, 0xae, 0xde, 0x69, 0xf7, 0x5b, 0xed, 0x5d, 0x96, 0xe7, + 0x15, 0x21, 0x87, 0x3b, 0xdb, 0xdb, 0xb5, 0x6a, 0xfd, 0x99, 0x92, 0x54, 0x2b, 0x50, 0x88, 0x68, + 0x43, 0x8b, 0x00, 0xbd, 0x7e, 0xa7, 0xab, 0x6d, 0xb4, 0x70, 0xaf, 0x2f, 0xb2, 0xc4, 0x5e, 0xbf, + 0x8a, 0xfb, 0x92, 0x90, 0x50, 0xff, 0x33, 0x19, 0xcc, 0xa8, 0x4c, 0x0c, 0x6b, 0x17, 0x13, 0xc3, + 0x2b, 0x8c, 0x97, 0xa9, 0xe1, 0xb4, 0x11, 0x26, 0x88, 0x1f, 0x00, 0xf0, 0x85, 0x43, 0x4c, 0x4d, + 0xf7, 0xe5, 0xc4, 0x97, 0x5e, 0x73, 0x72, 0x3f, 0xb8, 0xd1, 0xc2, 0x79, 0x89, 0xae, 0xfa, 0xe8, + 0x5b, 0x50, 0x34, 0xe8, 0x70, 0x64, 0x13, 0x29, 0x9c, 0xba, 0x56, 0xb8, 0x10, 0xe2, 0xab, 0x7e, + 0x34, 0x35, 0x4d, 0x5f, 0x4c, 0x9e, 0x7f, 0x3b, 0x11, 0x78, 0x26, 0x26, 0x1b, 0x2d, 0x42, 0x6e, + 0xb7, 0xdb, 0xa8, 0xf6, 0x5b, 0xed, 0x4d, 0x25, 0x81, 0x00, 0xb2, 0xdc, 0xd5, 0x0d, 0x25, 0xc9, + 0xb2, 0xe8, 0x7a, 0x67, 0xa7, 0xbb, 0xdd, 0xe4, 0x11, 0x0b, 0xad, 0x80, 0x12, 0x38, 0x5b, 0xe3, + 0x8e, 0x6c, 0x36, 0x94, 0x34, 0xba, 0x01, 0x4b, 0x21, 0x55, 0x4a, 0x66, 0xd0, 0x4d, 0x40, 0x21, + 0x71, 0xaa, 0x22, 0xab, 0xfe, 0x06, 0x2c, 0xd5, 0xa9, 0xe3, 0xeb, 0x96, 0x13, 0x56, 0x18, 0x8f, + 0xd9, 0xa0, 0x25, 0x49, 0xb3, 0xe4, 0x4d, 0x50, 0x6d, 0xe9, 0xfc, 0x6c, 0xbd, 0x10, 0x42, 0x5b, + 0x0d, 0x9e, 0x2a, 0xc9, 0x86, 0xc9, 0xf6, 0xef, 0xc8, 0x32, 0xb9, 0x73, 0x33, 0xb5, 0xf9, 0xf3, + 0xb3, 0xf5, 0x54, 0xb7, 0xd5, 0xc0, 0x8c, 0x86, 0xbe, 0x0c, 0x79, 0xf2, 0xd2, 0xf2, 0x35, 0x83, + 0x9d, 0x4b, 0xcc, 0x81, 0x19, 0x9c, 0x63, 0x84, 0x3a, 0x3b, 0x86, 0x6a, 0x00, 0x5d, 0xea, 0xfa, + 0xb2, 0xe7, 0xaf, 0x43, 0x66, 0x44, 0x5d, 0x7e, 0x77, 0x71, 0xe9, 0x8d, 0x1a, 0x83, 0x8b, 0x85, + 0x8a, 0x05, 0x58, 0xfd, 0x83, 0x14, 0x40, 0x5f, 0xf7, 0x0e, 0xa5, 0x92, 0xa7, 0x90, 0x0f, 0x6f, + 0x27, 0xe5, 0x25, 0xc8, 0x95, 0xb3, 0x1d, 0x82, 0xd1, 0x93, 0x60, 0xb1, 0x89, 0xda, 0x29, 0xb6, + 0x88, 0x0d, 0x3a, 0x8a, 0x2b, 0x3f, 0x2e, 0x16, 0x48, 0xec, 0x98, 0x27, 0xae, 0x2b, 0x67, 0x9e, + 0xfd, 0x44, 0x75, 0x7e, 0x2c, 0x08, 0xa7, 0xc9, 0xec, 0x3b, 0xf6, 0xda, 0x67, 0x66, 0x46, 0xb6, + 0xe6, 0xf0, 0x54, 0x0e, 0x7d, 0x0c, 0x05, 0x36, 0x6e, 0xcd, 0xe3, 0x3c, 0x99, 0x78, 0x5f, 0xea, + 0x2a, 0xa1, 0x01, 0xc3, 0x68, 0xea, 0xe5, 0x3b, 0x00, 0xfa, 0x68, 0x64, 0x5b, 0xc4, 0xd4, 0xf6, + 0x26, 0x3c, 0xd3, 0xce, 0xe3, 0xbc, 0xa4, 0xd4, 0x26, 0x6c, 0xbb, 0x04, 0x6c, 0xdd, 0xe7, 0xd9, + 0xf3, 0x35, 0x0e, 0x94, 0xe8, 0xaa, 0x5f, 0x53, 0x60, 0xd1, 0x1d, 0x3b, 0xcc, 0xa1, 0xd2, 0x3a, + 0xf5, 0xcf, 0x93, 0xf0, 0x56, 0x9b, 0xf8, 0xc7, 0xd4, 0x3d, 0xac, 0xfa, 0xbe, 0x6e, 0x1c, 0x0c, + 0x89, 0x23, 0xa7, 0x2f, 0x52, 0xd0, 0x24, 0x2e, 0x14, 0x34, 0xab, 0x30, 0xaf, 0xdb, 0x96, 0xee, + 0x11, 0x91, 0xbc, 0xe5, 0x71, 0xd0, 0x64, 0x65, 0x17, 0x2b, 0xe2, 0x88, 0xe7, 0x11, 0x71, 0xaf, + 0xc2, 0x0c, 0x0f, 0x08, 0xe8, 0xfb, 0x70, 0x53, 0xa6, 0x69, 0x7a, 0xd8, 0x15, 0x2b, 0x28, 0x82, + 0x0b, 0xda, 0x66, 0x6c, 0x55, 0x19, 0x6f, 0x9c, 0xcc, 0xe3, 0xa6, 0xe4, 0xce, 0xc8, 0x97, 0x59, + 0xe1, 0x8a, 0x19, 0xc3, 0x2a, 0x6d, 0xc2, 0xad, 0x4b, 0x45, 0x3e, 0xd7, 0xbd, 0xcd, 0x3f, 0x25, + 0x01, 0x5a, 0xdd, 0xea, 0x8e, 0x74, 0x52, 0x03, 0xb2, 0xfb, 0xfa, 0xd0, 0xb2, 0x27, 0x57, 0x45, + 0xc0, 0x29, 0xbe, 0x52, 0x15, 0xee, 0xd8, 0xe0, 0x32, 0x58, 0xca, 0xf2, 0x9a, 0x72, 0xbc, 0xe7, + 0x10, 0x3f, 0xac, 0x29, 0x79, 0x8b, 0x99, 0xe1, 0xea, 0x4e, 0xb8, 0x74, 0x45, 0x83, 0x4d, 0x00, + 0x4b, 0x79, 0x8e, 0xf5, 0x49, 0x10, 0xb6, 0x64, 0x13, 0x6d, 0xf1, 0xdb, 0x51, 0xe2, 0x1e, 0x11, + 0x73, 0x35, 0xc3, 0x9d, 0x7a, 0x9d, 0x3d, 0x58, 0xc2, 0x85, 0xef, 0x42, 0xe9, 0xd2, 0x47, 0x3c, + 0x65, 0x9a, 0xb2, 0x3e, 0x97, 0x8f, 0x1e, 0xc2, 0xc2, 0x85, 0x71, 0xbe, 0x56, 0xcc, 0xb7, 0xba, + 0xcf, 0xbf, 0xae, 0xa4, 0xe5, 0xaf, 0x6f, 0x2a, 0x59, 0xf5, 0x4f, 0x53, 0x22, 0xd0, 0x48, 0xaf, + 0xc6, 0xbf, 0x0a, 0xe4, 0xf8, 0xea, 0x36, 0xa8, 0x2d, 0x03, 0xc0, 0x7b, 0x57, 0xc7, 0x1f, 0x56, + 0xd3, 0x71, 0x38, 0x0e, 0x05, 0xd1, 0x3a, 0x14, 0xc4, 0x2a, 0xd6, 0xd8, 0x86, 0xe3, 0x6e, 0x5d, + 0xc0, 0x20, 0x48, 0x4c, 0x12, 0xdd, 0x85, 0x45, 0x7e, 0xf9, 0xe3, 0x1d, 0x10, 0x53, 0x60, 0xd2, + 0x1c, 0xb3, 0x10, 0x52, 0x39, 0x6c, 0x07, 0x8a, 0x92, 0xa0, 0xf1, 0x7c, 0x3e, 0xc3, 0x0d, 0xba, + 0x77, 0x9d, 0x41, 0x42, 0x84, 0xa7, 0xf9, 0x85, 0xd1, 0xb4, 0xa1, 0x36, 0x20, 0x17, 0x18, 0x8b, + 0x56, 0x21, 0xd5, 0xaf, 0x77, 0x95, 0xb9, 0xd2, 0xd2, 0xc9, 0x69, 0xb9, 0x10, 0x90, 0xfb, 0xf5, + 0x2e, 0xe3, 0xec, 0x36, 0xba, 0x4a, 0xe2, 0x22, 0x67, 0xb7, 0xd1, 0x2d, 0xa5, 0x59, 0x0e, 0xa6, + 0xee, 0x43, 0x21, 0xd2, 0x03, 0x7a, 0x07, 0xe6, 0x5b, 0xed, 0x4d, 0xdc, 0xec, 0xf5, 0x94, 0xb9, + 0xd2, 0xcd, 0x93, 0xd3, 0x32, 0x8a, 0x70, 0x5b, 0xce, 0x80, 0xcd, 0x0f, 0xba, 0x03, 0xe9, 0xad, + 0x0e, 0x3b, 0xdb, 0x45, 0x01, 0x11, 0x41, 0x6c, 0x51, 0xcf, 0x2f, 0xdd, 0x90, 0xc9, 0x5d, 0x54, + 0xb1, 0xfa, 0x87, 0x09, 0xc8, 0x8a, 0xcd, 0x14, 0x3b, 0x51, 0x55, 0x98, 0x0f, 0xae, 0x09, 0x44, + 0x71, 0xf7, 0xde, 0xe5, 0x85, 0x58, 0x45, 0xd6, 0x4d, 0x62, 0xf9, 0x05, 0x72, 0xa5, 0x0f, 0xa1, + 0x18, 0x65, 0x7c, 0xae, 0xc5, 0xf7, 0x7d, 0x28, 0xb0, 0xf5, 0x1d, 0x14, 0x64, 0x8f, 0x21, 0x2b, + 0x02, 0x42, 0x78, 0xd6, 0x5c, 0x5e, 0x15, 0x4a, 0x24, 0x7a, 0x0a, 0xf3, 0xa2, 0x92, 0x0c, 0x6e, + 0x87, 0xd7, 0xae, 0xde, 0x45, 0x38, 0x80, 0xab, 0x1f, 0x43, 0xba, 0x4b, 0x88, 0xcb, 0x7c, 0xef, + 0x50, 0x93, 0x4c, 0x8f, 0x67, 0x59, 0x04, 0x9b, 0xa4, 0xd5, 0x60, 0x45, 0xb0, 0x49, 0x5a, 0x66, + 0x78, 0xff, 0x95, 0x8c, 0xdc, 0x7f, 0xf5, 0xa1, 0xf8, 0x82, 0x58, 0x83, 0x03, 0x9f, 0x98, 0x5c, + 0xd1, 0x7d, 0x48, 0x8f, 0x48, 0x68, 0xfc, 0x6a, 0xec, 0x02, 0x23, 0xc4, 0xc5, 0x1c, 0xc5, 0xe2, + 0xc8, 0x31, 0x97, 0x96, 0x4f, 0x1a, 0xb2, 0xa5, 0xfe, 0x63, 0x12, 0x16, 0x5b, 0x9e, 0x37, 0xd6, + 0x1d, 0x23, 0xc8, 0xdc, 0xbe, 0x7d, 0x31, 0x73, 0x8b, 0x7d, 0xfb, 0xb9, 0x28, 0x72, 0xf1, 0x5a, + 0x4f, 0x9e, 0x9e, 0xc9, 0xf0, 0xf4, 0x54, 0x7f, 0x9a, 0x08, 0xee, 0xee, 0xee, 0x46, 0xb6, 0xbb, + 0xa8, 0x03, 0xa3, 0x9a, 0xc8, 0xae, 0x73, 0xe8, 0xd0, 0x63, 0x07, 0xbd, 0x0d, 0x19, 0xdc, 0x6c, + 0x37, 0x5f, 0x28, 0x09, 0xb1, 0x3c, 0x2f, 0x80, 0x30, 0x71, 0xc8, 0x31, 0xd3, 0xd4, 0x6d, 0xb6, + 0x1b, 0x2c, 0xd3, 0x4a, 0xc6, 0x68, 0xea, 0x12, 0xc7, 0xb4, 0x9c, 0x01, 0x7a, 0x07, 0xb2, 0xad, + 0x5e, 0x6f, 0x97, 0x97, 0x89, 0x6f, 0x9d, 0x9c, 0x96, 0x6f, 0x5c, 0x40, 0xf1, 0x7b, 0x5b, 0x93, + 0x81, 0x58, 0x99, 0xc3, 0x72, 0xb0, 0x18, 0x10, 0xcb, 0x9f, 0x05, 0x08, 0x77, 0xfa, 0xd5, 0x7e, + 0x53, 0xc9, 0xc4, 0x80, 0x30, 0x65, 0x7f, 0xe5, 0x76, 0xfb, 0xd7, 0x24, 0x28, 0x55, 0xc3, 0x20, + 0x23, 0x9f, 0xf1, 0x65, 0x65, 0xd9, 0x87, 0xdc, 0x88, 0xfd, 0xb2, 0x48, 0x90, 0x25, 0x3d, 0x8d, + 0x7d, 0xbd, 0x9c, 0x91, 0xab, 0x60, 0x6a, 0x93, 0xaa, 0x39, 0xb4, 0x3c, 0xcf, 0xa2, 0x8e, 0xa0, + 0xe1, 0x50, 0x53, 0xe9, 0xbf, 0x12, 0x70, 0x23, 0x06, 0x81, 0x1e, 0x42, 0xda, 0xa5, 0x76, 0x30, + 0x87, 0xb7, 0x2f, 0xbb, 0x96, 0x65, 0xa2, 0x98, 0x23, 0xd1, 0x1a, 0x80, 0x3e, 0xf6, 0xa9, 0xce, + 0xfb, 0xe7, 0xb3, 0x97, 0xc3, 0x11, 0x0a, 0x7a, 0x01, 0x59, 0x8f, 0x18, 0x2e, 0x09, 0x72, 0xe9, + 0x8f, 0xff, 0xbf, 0xd6, 0x57, 0x7a, 0x5c, 0x0d, 0x96, 0xea, 0x4a, 0x15, 0xc8, 0x0a, 0x0a, 0x5b, + 0xf6, 0xa6, 0xee, 0xeb, 0xf2, 0xd2, 0x9e, 0xff, 0x66, 0xab, 0x49, 0xb7, 0x07, 0xc1, 0x6a, 0xd2, + 0xed, 0x81, 0xfa, 0x77, 0x49, 0x80, 0xe6, 0x4b, 0x9f, 0xb8, 0x8e, 0x6e, 0xd7, 0xab, 0xa8, 0x19, + 0x89, 0xfe, 0x62, 0xb4, 0x5f, 0x8d, 0x7d, 0x89, 0x08, 0x25, 0x2a, 0xf5, 0x6a, 0x4c, 0xfc, 0xbf, + 0x05, 0xa9, 0xb1, 0x2b, 0x1f, 0xa4, 0x45, 0x1e, 0xbc, 0x8b, 0xb7, 0x31, 0xa3, 0xa1, 0xe6, 0x34, + 0x6c, 0xa5, 0x2e, 0x7f, 0x76, 0x8e, 0x74, 0x10, 0x1b, 0xba, 0xd8, 0xce, 0x37, 0x74, 0xcd, 0x20, + 0xf2, 0xe4, 0x28, 0x8a, 0x9d, 0x5f, 0xaf, 0xd6, 0x89, 0xeb, 0xe3, 0xac, 0xa1, 0xb3, 0xff, 0x5f, + 0x28, 0xbe, 0xdd, 0x07, 0x98, 0x0e, 0x0d, 0xad, 0x41, 0xa6, 0xbe, 0xd1, 0xeb, 0x6d, 0x2b, 0x73, + 0x22, 0x80, 0x4f, 0x59, 0x9c, 0xac, 0xfe, 0x75, 0x12, 0x72, 0xf5, 0xaa, 0x3c, 0x56, 0xeb, 0xa0, + 0xf0, 0xa8, 0xc4, 0x9f, 0x3a, 0xc8, 0xcb, 0x91, 0xe5, 0x4e, 0x64, 0x60, 0xb9, 0xa2, 0xa8, 0x5d, + 0x64, 0x22, 0xcc, 0xea, 0x26, 0x17, 0x40, 0x18, 0x8a, 0x44, 0x3a, 0x41, 0x33, 0xf4, 0x20, 0xc6, + 0xaf, 0x5d, 0xed, 0x2c, 0x51, 0x9e, 0x4c, 0xdb, 0x1e, 0x2e, 0x04, 0x4a, 0xea, 0xba, 0x87, 0x3e, + 0x80, 0x25, 0xcf, 0x1a, 0x38, 0x96, 0x33, 0xd0, 0x02, 0xe7, 0xf1, 0x77, 0x97, 0xda, 0xf2, 0xf9, + 0xd9, 0xfa, 0x42, 0x4f, 0xb0, 0xa4, 0x0f, 0x17, 0x24, 0xb2, 0xce, 0x5d, 0x89, 0xbe, 0x09, 0x8b, + 0x11, 0x51, 0xe6, 0x45, 0xe1, 0x76, 0xe5, 0xfc, 0x6c, 0xbd, 0x18, 0x4a, 0x3e, 0x23, 0x13, 0x5c, + 0x0c, 0x05, 0x9f, 0x11, 0x7e, 0xff, 0xb2, 0x4f, 0x5d, 0x83, 0x68, 0x2e, 0xdf, 0xd3, 0xfc, 0x04, + 0x4f, 0xe3, 0x02, 0xa7, 0x89, 0x6d, 0xae, 0x3e, 0x87, 0x1b, 0x1d, 0xd7, 0x38, 0x20, 0x9e, 0x2f, + 0x5c, 0x21, 0xbd, 0xf8, 0x31, 0xdc, 0xf6, 0x75, 0xef, 0x50, 0x3b, 0xb0, 0x3c, 0x9f, 0xba, 0x13, + 0xcd, 0x25, 0x3e, 0x71, 0x18, 0x5f, 0xe3, 0x8f, 0xb5, 0xf2, 0xd2, 0xef, 0x16, 0xc3, 0x6c, 0x09, + 0x08, 0x0e, 0x10, 0xdb, 0x0c, 0xa0, 0xb6, 0xa0, 0xc8, 0xca, 0x14, 0x79, 0x71, 0xc6, 0x46, 0x0f, + 0x36, 0x1d, 0x68, 0x6f, 0x7c, 0x4c, 0xe5, 0x6d, 0x3a, 0x10, 0x3f, 0xd5, 0xef, 0x82, 0xd2, 0xb0, + 0xbc, 0x91, 0xee, 0x1b, 0x07, 0xc1, 0x6d, 0x26, 0x6a, 0x80, 0x72, 0x40, 0x74, 0xd7, 0xdf, 0x23, + 0xba, 0xaf, 0x8d, 0x88, 0x6b, 0x51, 0xf3, 0xfa, 0x59, 0x5e, 0x0a, 0x45, 0xba, 0x5c, 0x42, 0xfd, + 0xef, 0x04, 0x00, 0xd6, 0xf7, 0x83, 0x8c, 0xec, 0x6b, 0xb0, 0xec, 0x39, 0xfa, 0xc8, 0x3b, 0xa0, + 0xbe, 0x66, 0x39, 0x3e, 0x71, 0x8f, 0x74, 0x5b, 0x5e, 0xe0, 0x28, 0x01, 0xa3, 0x25, 0xe9, 0xe8, + 0x3e, 0xa0, 0x43, 0x42, 0x46, 0x1a, 0xb5, 0x4d, 0x2d, 0x60, 0x8a, 0xa7, 0xe4, 0x34, 0x56, 0x18, + 0xa7, 0x63, 0x9b, 0xbd, 0x80, 0x8e, 0x6a, 0xb0, 0xc6, 0x86, 0x4f, 0x1c, 0xdf, 0xb5, 0x88, 0xa7, + 0xed, 0x53, 0x57, 0xf3, 0x6c, 0x7a, 0xac, 0xed, 0x53, 0xdb, 0xa6, 0xc7, 0xc4, 0x0d, 0xee, 0xc6, + 0x4a, 0x36, 0x1d, 0x34, 0x05, 0x68, 0x83, 0xba, 0x3d, 0x9b, 0x1e, 0x6f, 0x04, 0x08, 0x96, 0xb6, + 0x4d, 0xc7, 0xec, 0x5b, 0xc6, 0x61, 0x90, 0xb6, 0x85, 0xd4, 0xbe, 0x65, 0x1c, 0xa2, 0x77, 0x60, + 0x81, 0xd8, 0x84, 0x5f, 0x91, 0x08, 0x54, 0x86, 0xa3, 0x8a, 0x01, 0x91, 0x81, 0xd4, 0x4f, 0x40, + 0x69, 0x3a, 0x86, 0x3b, 0x19, 0x45, 0xe6, 0xfc, 0x3e, 0x20, 0x16, 0x24, 0x35, 0x9b, 0x1a, 0x87, + 0xda, 0x50, 0x77, 0xf4, 0x01, 0xb3, 0x4b, 0xbc, 0xf0, 0x29, 0x8c, 0xb3, 0x4d, 0x8d, 0xc3, 0x1d, + 0x49, 0x57, 0x3f, 0x00, 0xe8, 0x8d, 0x5c, 0xa2, 0x9b, 0x1d, 0x96, 0x4d, 0x30, 0xd7, 0xf1, 0x96, + 0x66, 0xca, 0x17, 0x52, 0xea, 0xca, 0xad, 0xae, 0x08, 0x46, 0x23, 0xa4, 0xab, 0xbf, 0x0c, 0x37, + 0xba, 0xb6, 0x6e, 0xf0, 0xaf, 0x05, 0xba, 0xe1, 0x93, 0x15, 0x7a, 0x0a, 0x59, 0x01, 0x95, 0x33, + 0x19, 0xbb, 0xdd, 0xa6, 0x7d, 0x6e, 0xcd, 0x61, 0x89, 0xaf, 0x15, 0x01, 0xa6, 0x7a, 0xd4, 0xbf, + 0x4c, 0x40, 0x3e, 0xd4, 0x8f, 0xca, 0xe2, 0x25, 0xc6, 0x77, 0x75, 0xcb, 0x91, 0x55, 0x7d, 0x1e, + 0x47, 0x49, 0xa8, 0x05, 0x85, 0x51, 0x28, 0x7d, 0x65, 0x3e, 0x17, 0x63, 0x35, 0x8e, 0xca, 0xa2, + 0x0f, 0x21, 0x1f, 0x3c, 0x49, 0x07, 0x11, 0xf6, 0xea, 0x17, 0xec, 0x29, 0x5c, 0xfd, 0x36, 0xc0, + 0x77, 0xa8, 0xe5, 0xf4, 0xe9, 0x21, 0x71, 0xf8, 0x13, 0x2b, 0xab, 0x09, 0x49, 0xe0, 0x45, 0xd9, + 0xe2, 0xa5, 0xbe, 0x98, 0x82, 0xf0, 0xa5, 0x51, 0x34, 0xd5, 0xbf, 0x4d, 0x42, 0x16, 0x53, 0xea, + 0xd7, 0xab, 0xa8, 0x0c, 0x59, 0x19, 0x27, 0xf8, 0xf9, 0x53, 0xcb, 0x9f, 0x9f, 0xad, 0x67, 0x44, + 0x80, 0xc8, 0x18, 0x3c, 0x32, 0x44, 0x22, 0x78, 0xf2, 0xb2, 0x08, 0x8e, 0x1e, 0x42, 0x51, 0x82, + 0xb4, 0x03, 0xdd, 0x3b, 0x10, 0x05, 0x5a, 0x6d, 0xf1, 0xfc, 0x6c, 0x1d, 0x04, 0x72, 0x4b, 0xf7, + 0x0e, 0x30, 0x08, 0x34, 0xfb, 0x8d, 0x9a, 0x50, 0xf8, 0x94, 0x5a, 0x8e, 0xe6, 0xf3, 0x41, 0xc8, + 0xcb, 0xc4, 0xd8, 0x79, 0x9c, 0x0e, 0x55, 0x7e, 0x6f, 0x00, 0x9f, 0x4e, 0x07, 0xdf, 0x84, 0x05, + 0x97, 0x52, 0x5f, 0x84, 0x2d, 0x8b, 0x3a, 0xf2, 0x9e, 0xa2, 0x1c, 0x7b, 0x7d, 0x4d, 0xa9, 0x8f, + 0x25, 0x0e, 0x17, 0xdd, 0x48, 0x0b, 0x3d, 0x84, 0x15, 0x5b, 0xf7, 0x7c, 0x8d, 0xc7, 0x3b, 0x73, + 0xaa, 0x2d, 0xcb, 0xb7, 0x1a, 0x62, 0xbc, 0x0d, 0xce, 0x0a, 0x24, 0xd4, 0x7f, 0x4e, 0x40, 0x81, + 0x0d, 0xc6, 0xda, 0xb7, 0x0c, 0x96, 0xe4, 0x7d, 0xfe, 0xdc, 0xe3, 0x16, 0xa4, 0x0c, 0xcf, 0x95, + 0x4e, 0xe5, 0x87, 0x6f, 0xbd, 0x87, 0x31, 0xa3, 0xa1, 0x4f, 0x20, 0x2b, 0xef, 0x4b, 0x44, 0xda, + 0xa1, 0x5e, 0x9f, 0x8e, 0x4a, 0xdf, 0x48, 0x39, 0xbe, 0x96, 0xa7, 0xd6, 0x89, 0x43, 0x00, 0x47, + 0x49, 0xe8, 0x26, 0x24, 0x0d, 0xe1, 0x2e, 0xf9, 0x41, 0x4b, 0xbd, 0x8d, 0x93, 0x86, 0xa3, 0xfe, + 0x28, 0x01, 0x0b, 0xd3, 0x0d, 0xcf, 0x56, 0xc0, 0x6d, 0xc8, 0x7b, 0xe3, 0x3d, 0x6f, 0xe2, 0xf9, + 0x64, 0x18, 0x3c, 0x1f, 0x87, 0x04, 0xd4, 0x82, 0xbc, 0x6e, 0x0f, 0xa8, 0x6b, 0xf9, 0x07, 0x43, + 0x59, 0x89, 0xc6, 0xa7, 0x0a, 0x51, 0x9d, 0x95, 0x6a, 0x20, 0x82, 0xa7, 0xd2, 0xc1, 0xb9, 0x2f, + 0xbe, 0x31, 0xe0, 0xe7, 0xfe, 0xdb, 0x50, 0xb4, 0xf5, 0x21, 0xbf, 0x40, 0xf2, 0xad, 0xa1, 0x18, + 0x47, 0x1a, 0x17, 0x24, 0xad, 0x6f, 0x0d, 0x89, 0xaa, 0x42, 0x3e, 0x54, 0x86, 0x96, 0xa0, 0x50, + 0x6d, 0xf6, 0xb4, 0x47, 0x8f, 0x9f, 0x6a, 0x9b, 0xf5, 0x1d, 0x65, 0x4e, 0xe6, 0xa6, 0x7f, 0x95, + 0x80, 0x05, 0x19, 0x8e, 0x64, 0xbe, 0xff, 0x0e, 0xcc, 0xbb, 0xfa, 0xbe, 0x1f, 0x54, 0x24, 0x69, + 0xb1, 0xaa, 0x59, 0x84, 0x67, 0x15, 0x09, 0x63, 0xc5, 0x57, 0x24, 0x91, 0x0f, 0x1a, 0x52, 0x57, + 0x7e, 0xd0, 0x90, 0xfe, 0xb9, 0x7c, 0xd0, 0xa0, 0xfe, 0x26, 0xc0, 0x86, 0x65, 0x93, 0xbe, 0xb8, + 0x6b, 0x8a, 0xab, 0x2f, 0x59, 0x0e, 0x27, 0xef, 0x32, 0x83, 0x1c, 0xae, 0xd5, 0xc0, 0x8c, 0xc6, + 0x58, 0x03, 0xcb, 0x94, 0x9b, 0x91, 0xb3, 0x36, 0x19, 0x6b, 0x60, 0x99, 0xe1, 0xcb, 0x5b, 0xfa, + 0xba, 0x97, 0xb7, 0xd3, 0x04, 0x2c, 0xc9, 0xdc, 0x35, 0x0c, 0xbf, 0x5f, 0x85, 0xbc, 0x48, 0x63, + 0xa7, 0x05, 0x1d, 0x7f, 0xc4, 0x17, 0xb8, 0x56, 0x03, 0xe7, 0x04, 0xbb, 0x65, 0xa2, 0x75, 0x28, + 0x48, 0x68, 0xe4, 0xe3, 0x27, 0x10, 0xa4, 0x36, 0x33, 0xff, 0xeb, 0x90, 0xde, 0xb7, 0x6c, 0x22, + 0x17, 0x7a, 0x6c, 0x00, 0x98, 0x3a, 0x60, 0x6b, 0x0e, 0x73, 0x74, 0x2d, 0x17, 0x5c, 0xc6, 0x71, + 0xfb, 0x64, 0xd9, 0x19, 0xb5, 0x4f, 0x54, 0xa0, 0x33, 0xf6, 0x09, 0x1c, 0xb3, 0x4f, 0xb0, 0x85, + 0x7d, 0x12, 0x1a, 0xb5, 0x4f, 0x90, 0x7e, 0x2e, 0xf6, 0x6d, 0xc3, 0xcd, 0x9a, 0xad, 0x1b, 0x87, + 0xb6, 0xe5, 0xf9, 0xc4, 0x8c, 0x46, 0x8c, 0xc7, 0x90, 0xbd, 0x90, 0x74, 0x5e, 0x75, 0x6b, 0x29, + 0x91, 0xea, 0x7f, 0x24, 0xa0, 0xb8, 0x45, 0x74, 0xdb, 0x3f, 0x98, 0x5e, 0x0d, 0xf9, 0xc4, 0xf3, + 0xe5, 0x61, 0xc5, 0x7f, 0xa3, 0x6f, 0x40, 0x2e, 0xcc, 0x49, 0xae, 0x7d, 0x7f, 0x0b, 0xa1, 0xe8, + 0x09, 0xcc, 0xb3, 0x3d, 0x46, 0xc7, 0x41, 0xb1, 0x73, 0xd5, 0xd3, 0x8e, 0x44, 0xb2, 0x43, 0xc6, + 0x25, 0x3c, 0x09, 0xe1, 0x4b, 0x29, 0x83, 0x83, 0x26, 0xfa, 0x45, 0x28, 0xf2, 0x97, 0x89, 0x20, + 0xe7, 0xca, 0x5c, 0xa7, 0xb3, 0x20, 0x1e, 0x17, 0x45, 0xbe, 0xf5, 0xbf, 0x09, 0x58, 0xd9, 0xd1, + 0x27, 0x7b, 0x44, 0x86, 0x0d, 0x62, 0x62, 0x62, 0x50, 0xd7, 0x44, 0xdd, 0x68, 0xb8, 0xb9, 0xe2, + 0xad, 0x32, 0x4e, 0x38, 0x3e, 0xea, 0x04, 0x05, 0x58, 0x32, 0x52, 0x80, 0xad, 0x40, 0xc6, 0xa1, + 0x8e, 0x41, 0x64, 0x2c, 0x12, 0x0d, 0xd5, 0x8a, 0x86, 0x9a, 0x52, 0xf8, 0x8c, 0xc8, 0x1f, 0x01, + 0xdb, 0xd4, 0x0f, 0x7b, 0x43, 0x9f, 0x40, 0xa9, 0xd7, 0xac, 0xe3, 0x66, 0xbf, 0xd6, 0xf9, 0xae, + 0xd6, 0xab, 0x6e, 0xf7, 0xaa, 0x8f, 0x1f, 0x6a, 0xdd, 0xce, 0xf6, 0xf7, 0x1e, 0x3d, 0x79, 0xf8, + 0x0d, 0x25, 0x51, 0x2a, 0x9f, 0x9c, 0x96, 0x6f, 0xb7, 0xab, 0xf5, 0x6d, 0xb1, 0x63, 0xf6, 0xe8, + 0xcb, 0x9e, 0x6e, 0x7b, 0xfa, 0xe3, 0x87, 0x5d, 0x6a, 0x4f, 0x18, 0x86, 0x2d, 0xeb, 0x62, 0xf4, + 0xbc, 0x8a, 0x1e, 0xc3, 0x89, 0x4b, 0x8f, 0xe1, 0xe9, 0x69, 0x9e, 0xbc, 0xe4, 0x34, 0xdf, 0x80, + 0x15, 0xc3, 0xa5, 0x9e, 0xa7, 0xb1, 0xec, 0x9f, 0x98, 0x33, 0xf5, 0xc5, 0x97, 0xce, 0xcf, 0xd6, + 0x97, 0xeb, 0x8c, 0xdf, 0xe3, 0x6c, 0xa9, 0x7e, 0xd9, 0x88, 0x90, 0x78, 0x4f, 0xea, 0x1f, 0xa5, + 0x58, 0x22, 0x65, 0x1d, 0x59, 0x36, 0x19, 0x10, 0x0f, 0x3d, 0x87, 0x25, 0xc3, 0x25, 0x26, 0x4b, + 0xeb, 0x75, 0x3b, 0xfa, 0x11, 0xed, 0x2f, 0xc4, 0xe6, 0x34, 0xa1, 0x60, 0xa5, 0x1e, 0x4a, 0xf5, + 0x46, 0xc4, 0xc0, 0x8b, 0xc6, 0x85, 0x36, 0xfa, 0x14, 0x96, 0x3c, 0x62, 0x5b, 0xce, 0xf8, 0xa5, + 0x66, 0x50, 0xc7, 0x27, 0x2f, 0x83, 0x17, 0xb1, 0xeb, 0xf4, 0xf6, 0x9a, 0xdb, 0x4c, 0xaa, 0x2e, + 0x84, 0x6a, 0xe8, 0xfc, 0x6c, 0x7d, 0xf1, 0x22, 0x0d, 0x2f, 0x4a, 0xcd, 0xb2, 0x5d, 0x6a, 0xc3, + 0xe2, 0x45, 0x6b, 0xd0, 0x8a, 0xdc, 0xfb, 0x3c, 0x84, 0x04, 0x7b, 0x1b, 0xdd, 0x86, 0x9c, 0x4b, + 0x06, 0x96, 0xe7, 0xbb, 0xc2, 0xcd, 0x8c, 0x13, 0x52, 0xd8, 0xce, 0x17, 0x5f, 0x40, 0x95, 0x7e, + 0x1d, 0x66, 0x7a, 0x64, 0x9b, 0xc5, 0xb4, 0x3c, 0x7d, 0x4f, 0xaa, 0xcc, 0xe1, 0xa0, 0xc9, 0xd6, + 0xe0, 0xd8, 0x0b, 0x13, 0x35, 0xfe, 0x9b, 0xd1, 0x78, 0x46, 0x21, 0xbf, 0x07, 0xe3, 0x39, 0x43, + 0xf0, 0x61, 0x69, 0x3a, 0xf2, 0x61, 0xe9, 0x0a, 0x64, 0x6c, 0x72, 0x44, 0x6c, 0x71, 0x96, 0x63, + 0xd1, 0xb8, 0xf7, 0x10, 0x8a, 0xc1, 0x17, 0x8c, 0xfc, 0xcb, 0x89, 0x1c, 0xa4, 0xfb, 0xd5, 0xde, + 0x33, 0x65, 0x0e, 0x01, 0x64, 0xc5, 0xe2, 0x14, 0xaf, 0x75, 0xf5, 0x4e, 0x7b, 0xa3, 0xb5, 0xa9, + 0x24, 0xef, 0xfd, 0x2c, 0x05, 0xf9, 0xf0, 0xbd, 0x88, 0x9d, 0x1d, 0xed, 0xe6, 0x8b, 0x60, 0x75, + 0x87, 0xf4, 0x36, 0x39, 0x46, 0x6f, 0x4f, 0x6f, 0xa1, 0x3e, 0x11, 0x0f, 0xe4, 0x21, 0x3b, 0xb8, + 0x81, 0x7a, 0x17, 0x72, 0xd5, 0x5e, 0xaf, 0xb5, 0xd9, 0x6e, 0x36, 0x94, 0xcf, 0x12, 0xa5, 0x2f, + 0x9d, 0x9c, 0x96, 0x97, 0x43, 0x50, 0xd5, 0x13, 0x8b, 0x8f, 0xa3, 0xea, 0xf5, 0x66, 0xb7, 0xdf, + 0x6c, 0x28, 0xaf, 0x92, 0xb3, 0x28, 0x7e, 0xab, 0xc2, 0x3f, 0xdd, 0xc9, 0x77, 0x71, 0xb3, 0x5b, + 0xc5, 0xac, 0xc3, 0xcf, 0x92, 0xe2, 0x72, 0x6c, 0xda, 0xa3, 0x4b, 0x46, 0xba, 0xcb, 0xfa, 0x5c, + 0x0b, 0xbe, 0x85, 0x7b, 0x95, 0x12, 0x9f, 0x77, 0x4c, 0x1f, 0xbf, 0x88, 0x6e, 0x4e, 0x58, 0x6f, + 0xfc, 0xd5, 0x91, 0xab, 0x49, 0xcd, 0xf4, 0xd6, 0x63, 0xb1, 0x87, 0x69, 0x51, 0x61, 0x1e, 0xef, + 0xb6, 0xdb, 0x0c, 0xf4, 0x2a, 0x3d, 0x33, 0x3a, 0x3c, 0x76, 0x58, 0xc5, 0x8c, 0xee, 0x42, 0x2e, + 0x78, 0x94, 0x54, 0x3e, 0x4b, 0xcf, 0x18, 0x54, 0x0f, 0x5e, 0x54, 0x79, 0x87, 0x5b, 0xbb, 0x7d, + 0xfe, 0xa9, 0xde, 0xab, 0xcc, 0x6c, 0x87, 0x07, 0x63, 0xdf, 0xa4, 0xc7, 0x0e, 0xdb, 0xb3, 0xf2, + 0x1e, 0xee, 0xb3, 0x8c, 0xb8, 0xb4, 0x08, 0x31, 0xf2, 0x12, 0xee, 0x5d, 0xc8, 0xe1, 0xe6, 0x77, + 0xc4, 0x57, 0x7d, 0xaf, 0xb2, 0x33, 0x7a, 0x30, 0xf9, 0x94, 0x18, 0xb2, 0xb7, 0x0e, 0xee, 0x6e, + 0x55, 0xb9, 0xcb, 0x67, 0x51, 0x1d, 0x77, 0x74, 0xa0, 0x3b, 0xc4, 0x9c, 0x7e, 0xe3, 0x12, 0xb2, + 0xee, 0xfd, 0x0a, 0xe4, 0x82, 0xcc, 0x14, 0xad, 0x41, 0xf6, 0x45, 0x07, 0x3f, 0x6b, 0x62, 0x65, + 0x4e, 0xf8, 0x30, 0xe0, 0xbc, 0x10, 0x35, 0x45, 0x19, 0xe6, 0x77, 0xaa, 0xed, 0xea, 0x66, 0x13, + 0x07, 0x57, 0xe4, 0x01, 0x40, 0xa6, 0x57, 0x25, 0x45, 0x76, 0x10, 0xea, 0xac, 0xad, 0xfe, 0xf0, + 0x27, 0x6b, 0x73, 0x3f, 0xfe, 0xc9, 0xda, 0xdc, 0xab, 0xf3, 0xb5, 0xc4, 0x0f, 0xcf, 0xd7, 0x12, + 0xff, 0x70, 0xbe, 0x96, 0xf8, 0xf7, 0xf3, 0xb5, 0xc4, 0x5e, 0x96, 0x1f, 0x02, 0x4f, 0xfe, 0x2f, + 0x00, 0x00, 0xff, 0xff, 0x4b, 0xdb, 0xdc, 0xec, 0xf0, 0x31, 0x00, 0x00, } diff --git a/components/engine/vendor/github.com/docker/swarmkit/api/types.proto b/components/engine/vendor/github.com/docker/swarmkit/api/types.proto index eaf037e77d..890b3cfc3f 100644 --- a/components/engine/vendor/github.com/docker/swarmkit/api/types.proto +++ b/components/engine/vendor/github.com/docker/swarmkit/api/types.proto @@ -214,6 +214,18 @@ message Mount { // ReadOnly should be set to true if the mount should not be writable. bool readonly = 4 [(gogoproto.customname) = "ReadOnly"]; + // Consistency indicates the tolerable level of file system consistency + enum Consistency { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "MountConsistency"; + + DEFAULT = 0 [(gogoproto.enumvalue_customname) = "MountConsistencyDefault"]; + CONSISTENT = 1 [(gogoproto.enumvalue_customname) = "MountConsistencyFull"]; + CACHED = 2 [(gogoproto.enumvalue_customname) = "MountConsistencyCached"]; + DELEGATED = 3 [(gogoproto.enumvalue_customname) = "MountConsistencyDelegated"]; + } + Consistency consistency = 8; + // BindOptions specifies options that are specific to a bind mount. message BindOptions { enum Propagation { diff --git a/components/engine/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go b/components/engine/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go index 2cfc95130d..3b31d07817 100644 --- a/components/engine/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go +++ b/components/engine/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go @@ -48,8 +48,10 @@ type cnmNetworkAllocator struct { tasks map[string]struct{} // Allocator state to indicate if allocation has been - // successfully completed for this node. - nodes map[string]struct{} + // successfully completed for this node on this network. + // outer map key: node id + // inner map key: network id + nodes map[string]map[string]struct{} } // Local in-memory state related to network that need to be tracked by cnmNetworkAllocator @@ -89,7 +91,7 @@ func New(pg plugingetter.PluginGetter) (networkallocator.NetworkAllocator, error networks: make(map[string]*network), services: make(map[string]struct{}), tasks: make(map[string]struct{}), - nodes: make(map[string]struct{}), + nodes: make(map[string]map[string]struct{}), } // There are no driver configurations and notification @@ -430,56 +432,6 @@ func (na *cnmNetworkAllocator) IsServiceAllocated(s *api.Service, flags ...func( return true } -// IsNodeAllocated returns if the passed node has its network resources allocated or not. -func (na *cnmNetworkAllocator) IsNodeAllocated(node *api.Node) bool { - // If the node is not found in the allocated set, then it is - // not allocated. - if _, ok := na.nodes[node.ID]; !ok { - return false - } - - // If no attachment, not allocated. - if node.Attachment == nil { - return false - } - - // If the network is not allocated, the node cannot be allocated. - localNet, ok := na.networks[node.Attachment.Network.ID] - if !ok { - return false - } - - // Addresses empty, not allocated. - if len(node.Attachment.Addresses) == 0 { - return false - } - - // The allocated IP address not found in local endpoint state. Not allocated. - if _, ok := localNet.endpoints[node.Attachment.Addresses[0]]; !ok { - return false - } - - return true -} - -// AllocateNode allocates the IP addresses for the network to which -// the node is attached. -func (na *cnmNetworkAllocator) AllocateNode(node *api.Node) error { - if err := na.allocateNetworkIPs(node.Attachment); err != nil { - return err - } - - na.nodes[node.ID] = struct{}{} - return nil -} - -// DeallocateNode deallocates the IP addresses for the network to -// which the node is attached. -func (na *cnmNetworkAllocator) DeallocateNode(node *api.Node) error { - delete(na.nodes, node.ID) - return na.releaseEndpoints([]*api.NetworkAttachment{node.Attachment}) -} - // AllocateTask allocates all the endpoint resources for all the // networks that a task is attached to. func (na *cnmNetworkAllocator) AllocateTask(t *api.Task) error { @@ -489,7 +441,7 @@ func (na *cnmNetworkAllocator) AllocateTask(t *api.Task) error { } if err := na.allocateNetworkIPs(nAttach); err != nil { if err := na.releaseEndpoints(t.Networks[:i]); err != nil { - log.G(context.TODO()).WithError(err).Errorf("Failed to release IP addresses while rolling back allocation for task %s network %s", t.ID, nAttach.Network.ID) + log.G(context.TODO()).WithError(err).Errorf("failed to release IP addresses while rolling back allocation for task %s network %s", t.ID, nAttach.Network.ID) } return errors.Wrapf(err, "failed to allocate network IP for task %s network %s", t.ID, nAttach.Network.ID) } @@ -507,6 +459,75 @@ func (na *cnmNetworkAllocator) DeallocateTask(t *api.Task) error { return na.releaseEndpoints(t.Networks) } +// IsLBAttachmentAllocated returns if the passed node and network has resources allocated or not. +func (na *cnmNetworkAllocator) IsLBAttachmentAllocated(node *api.Node, networkAttachment *api.NetworkAttachment) bool { + if node == nil { + return false + } + + if networkAttachment == nil { + return false + } + + // If the node is not found in the allocated set, then it is + // not allocated. + if _, ok := na.nodes[node.ID]; !ok { + return false + } + + // If the nework is not found in the allocated set, then it is + // not allocated. + if _, ok := na.nodes[node.ID][networkAttachment.Network.ID]; !ok { + return false + } + + // If the network is not allocated, the node cannot be allocated. + localNet, ok := na.networks[networkAttachment.Network.ID] + if !ok { + return false + } + + // Addresses empty, not allocated. + if len(networkAttachment.Addresses) == 0 { + return false + } + + // The allocated IP address not found in local endpoint state. Not allocated. + if _, ok := localNet.endpoints[networkAttachment.Addresses[0]]; !ok { + return false + } + + return true +} + +// AllocateLBAttachment allocates the IP addresses for a LB in a network +// on a given node +func (na *cnmNetworkAllocator) AllocateLBAttachment(node *api.Node, networkAttachment *api.NetworkAttachment) error { + + if err := na.allocateNetworkIPs(networkAttachment); err != nil { + return err + } + + if na.nodes[node.ID] == nil { + na.nodes[node.ID] = make(map[string]struct{}) + } + na.nodes[node.ID][networkAttachment.Network.ID] = struct{}{} + + return nil +} + +// DeallocateLBAttachment deallocates the IP addresses for a LB in a network to +// which the node is attached. +func (na *cnmNetworkAllocator) DeallocateLBAttachment(node *api.Node, networkAttachment *api.NetworkAttachment) error { + + delete(na.nodes[node.ID], networkAttachment.Network.ID) + if len(na.nodes[node.ID]) == 0 { + delete(na.nodes, node.ID) + } + + return na.releaseEndpoints([]*api.NetworkAttachment{networkAttachment}) +} + func (na *cnmNetworkAllocator) releaseEndpoints(networks []*api.NetworkAttachment) error { for _, nAttach := range networks { localNet := na.getNetwork(nAttach.Network.ID) diff --git a/components/engine/vendor/github.com/docker/swarmkit/manager/allocator/network.go b/components/engine/vendor/github.com/docker/swarmkit/manager/allocator/network.go index c760ad53dc..794e616c10 100644 --- a/components/engine/vendor/github.com/docker/swarmkit/manager/allocator/network.go +++ b/components/engine/vendor/github.com/docker/swarmkit/manager/allocator/network.go @@ -154,11 +154,10 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) { // First, allocate objects that already have addresses associated with // them, to reserve these IP addresses in internal state. - if nc.ingressNetwork != nil { - if err := a.allocateNodes(ctx, true); err != nil { - return err - } + if err := a.allocateNodes(ctx, true); err != nil { + return err } + if err := a.allocateServices(ctx, true); err != nil { return err } @@ -166,12 +165,10 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) { return err } - // Now allocate objects that don't have addresses yet. - if nc.ingressNetwork != nil { - if err := a.allocateNodes(ctx, false); err != nil { - return err - } + if err := a.allocateNodes(ctx, false); err != nil { + return err } + if err := a.allocateServices(ctx, false); err != nil { return err } @@ -208,22 +205,22 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) { }); err != nil { log.G(ctx).WithError(err).Errorf("Failed to commit allocation for network %s", n.ID) } - if IsIngressNetwork(n) { nc.ingressNetwork = n - err := a.allocateNodes(ctx, false) - if err != nil { - log.G(ctx).WithError(err).Error(err) - } + } + err := a.allocateNodes(ctx, false) + if err != nil { + log.G(ctx).WithError(err).Error(err) } case api.EventDeleteNetwork: n := v.Network.Copy() if IsIngressNetwork(n) && nc.ingressNetwork != nil && nc.ingressNetwork.ID == n.ID { nc.ingressNetwork = nil - if err := a.deallocateNodes(ctx); err != nil { - log.G(ctx).WithError(err).Error(err) - } + } + + if err := a.deallocateNodeAttachments(ctx, n.ID); err != nil { + log.G(ctx).WithError(err).Error(err) } // The assumption here is that all dependent objects @@ -361,33 +358,67 @@ func (a *Allocator) doNodeAlloc(ctx context.Context, ev events.Event) { nc := a.netCtx if isDelete { - if nc.nwkAllocator.IsNodeAllocated(node) { - if err := nc.nwkAllocator.DeallocateNode(node); err != nil { - log.G(ctx).WithError(err).Errorf("Failed freeing network resources for node %s", node.ID) - } else { - nc.somethingWasDeallocated = true + if err := a.deallocateNode(node); err != nil { + log.G(ctx).WithError(err).Errorf("Failed freeing network resources for node %s", node.ID) + } else { + nc.somethingWasDeallocated = true + } + } else { + allocatedNetworks, err := a.getAllocatedNetworks() + if err != nil { + log.G(ctx).WithError(err).Errorf("Error listing allocated networks in network %s", node.ID) + } + + isAllocated := a.allocateNode(ctx, node, false, allocatedNetworks) + + if isAllocated { + if err := a.store.Batch(func(batch *store.Batch) error { + return a.commitAllocatedNode(ctx, batch, node) + }); err != nil { + log.G(ctx).WithError(err).Errorf("Failed to commit allocation of network resources for node %s", node.ID) } } - return + } +} + +func isOverlayNetwork(n *api.Network) bool { + if n.DriverState != nil && n.DriverState.Name == "overlay" { + return true } - if !nc.nwkAllocator.IsNodeAllocated(node) && nc.ingressNetwork != nil { - if node.Attachment == nil { - node.Attachment = &api.NetworkAttachment{} - } + if n.Spec.DriverConfig != nil && n.Spec.DriverConfig.Name == "overlay" { + return true + } - node.Attachment.Network = nc.ingressNetwork.Copy() - if err := a.allocateNode(ctx, node); err != nil { - log.G(ctx).WithError(err).Errorf("Failed to allocate network resources for node %s", node.ID) - return - } + return false +} - if err := a.store.Batch(func(batch *store.Batch) error { - return a.commitAllocatedNode(ctx, batch, node) - }); err != nil { - log.G(ctx).WithError(err).Errorf("Failed to commit allocation of network resources for node %s", node.ID) +func (a *Allocator) getAllocatedNetworks() ([]*api.Network, error) { + var ( + err error + nc = a.netCtx + na = nc.nwkAllocator + allocatedNetworks []*api.Network + ) + + // Find allocated networks + var networks []*api.Network + a.store.View(func(tx store.ReadTx) { + networks, err = store.FindNetworks(tx, store.All) + }) + + if err != nil { + return nil, errors.Wrap(err, "error listing all networks in store while trying to allocate during init") + } + + for _, n := range networks { + + if isOverlayNetwork(n) && na.IsAllocated(n) { + allocatedNetworks = append(allocatedNetworks, n) } } + + return allocatedNetworks, nil } func (a *Allocator) allocateNodes(ctx context.Context, existingAddressesOnly bool) error { @@ -396,7 +427,6 @@ func (a *Allocator) allocateNodes(ctx context.Context, existingAddressesOnly boo allocatedNodes []*api.Node nodes []*api.Node err error - nc = a.netCtx ) a.store.View(func(tx store.ReadTx) { @@ -406,26 +436,16 @@ func (a *Allocator) allocateNodes(ctx context.Context, existingAddressesOnly boo return errors.Wrap(err, "error listing all nodes in store while trying to allocate network resources") } + allocatedNetworks, err := a.getAllocatedNetworks() + if err != nil { + return errors.Wrap(err, "error listing all nodes in store while trying to allocate network resources") + } + for _, node := range nodes { - if nc.nwkAllocator.IsNodeAllocated(node) { - continue + isAllocated := a.allocateNode(ctx, node, existingAddressesOnly, allocatedNetworks) + if isAllocated { + allocatedNodes = append(allocatedNodes, node) } - - if node.Attachment == nil { - node.Attachment = &api.NetworkAttachment{} - } - - if existingAddressesOnly && len(node.Attachment.Addresses) == 0 { - continue - } - - node.Attachment.Network = nc.ingressNetwork.Copy() - if err := a.allocateNode(ctx, node); err != nil { - log.G(ctx).WithError(err).Errorf("Failed to allocate network resources for node %s", node.ID) - continue - } - - allocatedNodes = append(allocatedNodes, node) } if err := a.store.Batch(func(batch *store.Batch) error { @@ -457,21 +477,90 @@ func (a *Allocator) deallocateNodes(ctx context.Context) error { } for _, node := range nodes { - if nc.nwkAllocator.IsNodeAllocated(node) { - if err := nc.nwkAllocator.DeallocateNode(node); err != nil { - log.G(ctx).WithError(err).Errorf("Failed freeing network resources for node %s", node.ID) - } else { - nc.somethingWasDeallocated = true + if err := a.deallocateNode(node); err != nil { + log.G(ctx).WithError(err).Errorf("Failed freeing network resources for node %s", node.ID) + } else { + nc.somethingWasDeallocated = true + } + if err := a.store.Batch(func(batch *store.Batch) error { + return a.commitAllocatedNode(ctx, batch, node) + }); err != nil { + log.G(ctx).WithError(err).Errorf("Failed to commit deallocation of network resources for node %s", node.ID) + } + } + + return nil +} + +func (a *Allocator) deallocateNodeAttachments(ctx context.Context, nid string) error { + var ( + nodes []*api.Node + nc = a.netCtx + err error + ) + + a.store.View(func(tx store.ReadTx) { + nodes, err = store.FindNodes(tx, store.All) + }) + if err != nil { + return fmt.Errorf("error listing all nodes in store while trying to free network resources") + } + + for _, node := range nodes { + + var networkAttachment *api.NetworkAttachment + var naIndex int + for index, na := range node.LbAttachments { + if na.Network.ID == nid { + networkAttachment = na + naIndex = index + break } - node.Attachment = nil - if err := a.store.Batch(func(batch *store.Batch) error { - return a.commitAllocatedNode(ctx, batch, node) - }); err != nil { + } + + if networkAttachment == nil { + log.G(ctx).Errorf("Failed to find network %s on node %s", nid, node.ID) + continue + } + + if nc.nwkAllocator.IsLBAttachmentAllocated(node, networkAttachment) { + if err := nc.nwkAllocator.DeallocateLBAttachment(node, networkAttachment); err != nil { log.G(ctx).WithError(err).Errorf("Failed to commit deallocation of network resources for node %s", node.ID) + } else { + + // Delete the lbattachment + node.LbAttachments[naIndex] = node.LbAttachments[len(node.LbAttachments)-1] + node.LbAttachments[len(node.LbAttachments)-1] = nil + node.LbAttachments = node.LbAttachments[:len(node.LbAttachments)-1] + + if err := a.store.Batch(func(batch *store.Batch) error { + return a.commitAllocatedNode(ctx, batch, node) + }); err != nil { + log.G(ctx).WithError(err).Errorf("Failed to commit deallocation of network resources for node %s", node.ID) + } + + } + } + + } + return nil +} + +func (a *Allocator) deallocateNode(node *api.Node) error { + var ( + nc = a.netCtx + ) + + for _, na := range node.LbAttachments { + if nc.nwkAllocator.IsLBAttachmentAllocated(node, na) { + if err := nc.nwkAllocator.DeallocateLBAttachment(node, na); err != nil { + return err } } } + node.LbAttachments = nil + return nil } @@ -758,8 +847,48 @@ func (a *Allocator) doTaskAlloc(ctx context.Context, ev events.Event) { nc.pendingTasks[t.ID] = t } -func (a *Allocator) allocateNode(ctx context.Context, node *api.Node) error { - return a.netCtx.nwkAllocator.AllocateNode(node) +func (a *Allocator) allocateNode(ctx context.Context, node *api.Node, existingAddressesOnly bool, networks []*api.Network) bool { + var allocated bool + + nc := a.netCtx + + for _, network := range networks { + + var lbAttachment *api.NetworkAttachment + for _, na := range node.LbAttachments { + if na.Network != nil && na.Network.ID == network.ID { + lbAttachment = na + break + } + } + + if lbAttachment != nil { + if nc.nwkAllocator.IsLBAttachmentAllocated(node, lbAttachment) { + continue + } + } + + if lbAttachment == nil { + lbAttachment = &api.NetworkAttachment{} + node.LbAttachments = append(node.LbAttachments, lbAttachment) + } + + if existingAddressesOnly && len(lbAttachment.Addresses) == 0 { + continue + } + + lbAttachment.Network = network.Copy() + if err := a.netCtx.nwkAllocator.AllocateLBAttachment(node, lbAttachment); err != nil { + log.G(ctx).WithError(err).Errorf("Failed to allocate network resources for node %s", node.ID) + // TODO: Should we add a unallocatedNode and retry allocating resources like we do for network, tasks, services? + // right now, we will only retry allocating network resources for the node when the node is updated. + continue + } + + allocated = true + } + return allocated + } func (a *Allocator) commitAllocatedNode(ctx context.Context, batch *store.Batch, node *api.Node) error { @@ -768,13 +897,13 @@ func (a *Allocator) commitAllocatedNode(ctx context.Context, batch *store.Batch, if err == store.ErrSequenceConflict { storeNode := store.GetNode(tx, node.ID) - storeNode.Attachment = node.Attachment.Copy() + storeNode.LbAttachments = node.LbAttachments err = store.UpdateNode(tx, storeNode) } return errors.Wrapf(err, "failed updating state in store transaction for node %s", node.ID) }); err != nil { - if err := a.netCtx.nwkAllocator.DeallocateNode(node); err != nil { + if err := a.deallocateNode(node); err != nil { log.G(ctx).WithError(err).Errorf("failed rolling back allocation of node %s", node.ID) } diff --git a/components/engine/vendor/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go b/components/engine/vendor/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go index 04dd168126..5fc5801d49 100644 --- a/components/engine/vendor/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go +++ b/components/engine/vendor/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go @@ -65,22 +65,6 @@ type NetworkAllocator interface { // allocations for its published ports in host (non ingress) mode HostPublishPortsNeedUpdate(s *api.Service) bool - // - // Node Allocation - // - - // IsNodeAllocated returns if the passed node has its network - // resources allocated or not. - IsNodeAllocated(node *api.Node) bool - - // AllocateNode allocates the IP addresses for the network to which - // the node is attached. - AllocateNode(node *api.Node) error - - // DeallocateNode deallocates the IP addresses for the network to - // which the node is attached. - DeallocateNode(node *api.Node) error - // // Task Allocation // @@ -96,6 +80,15 @@ type NetworkAllocator interface { // DeallocateTask releases all the endpoint resources for all the // networks that a task is attached to. DeallocateTask(t *api.Task) error + + // AllocateLBAttachment Allocates a load balancer endpoint for the node + AllocateLBAttachment(node *api.Node, networkAttachment *api.NetworkAttachment) error + + // DeallocateLBAttachment Deallocates a load balancer endpoint for the node + DeallocateLBAttachment(node *api.Node, networkAttachment *api.NetworkAttachment) error + + //IsLBAttachmentAllocated If lb endpoint is allocated on the node + IsLBAttachmentAllocated(node *api.Node, networkAttachment *api.NetworkAttachment) bool } // IsIngressNetwork check if the network is an ingress network diff --git a/components/engine/vendor/github.com/sirupsen/logrus/README.md b/components/engine/vendor/github.com/sirupsen/logrus/README.md index 82aeb4eef3..4f5ce576dc 100644 --- a/components/engine/vendor/github.com/sirupsen/logrus/README.md +++ b/components/engine/vendor/github.com/sirupsen/logrus/README.md @@ -1,7 +1,7 @@ # Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. +the standard library logger. **Seeing weird case-sensitive problems?** It's in the past been possible to import Logrus as both upper- and lower-case. Due to the Go package environment, @@ -372,6 +372,7 @@ The built-in logging formatters are: Third party logging formatters: +* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can by parsed by Kubernetes and Google Container Engine. * [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. * [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. * [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. diff --git a/components/engine/vendor/github.com/sirupsen/logrus/entry.go b/components/engine/vendor/github.com/sirupsen/logrus/entry.go index 320e5d5b8b..5bf582ef27 100644 --- a/components/engine/vendor/github.com/sirupsen/logrus/entry.go +++ b/components/engine/vendor/github.com/sirupsen/logrus/entry.go @@ -35,6 +35,7 @@ type Entry struct { Time time.Time // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. Level Level // Message passed to Debug, Info, Warn, Error, Fatal or Panic diff --git a/components/engine/vendor/github.com/sirupsen/logrus/exported.go b/components/engine/vendor/github.com/sirupsen/logrus/exported.go index 1aeaa90ba2..013183edab 100644 --- a/components/engine/vendor/github.com/sirupsen/logrus/exported.go +++ b/components/engine/vendor/github.com/sirupsen/logrus/exported.go @@ -31,7 +31,7 @@ func SetFormatter(formatter Formatter) { func SetLevel(level Level) { std.mu.Lock() defer std.mu.Unlock() - std.setLevel(level) + std.SetLevel(level) } // GetLevel returns the standard logger level. diff --git a/components/engine/vendor/github.com/sirupsen/logrus/formatter.go b/components/engine/vendor/github.com/sirupsen/logrus/formatter.go index b5fbe934d1..b183ff5b1d 100644 --- a/components/engine/vendor/github.com/sirupsen/logrus/formatter.go +++ b/components/engine/vendor/github.com/sirupsen/logrus/formatter.go @@ -2,7 +2,7 @@ package logrus import "time" -const DefaultTimestampFormat = time.RFC3339 +const defaultTimestampFormat = time.RFC3339 // The Formatter interface is used to implement a custom Formatter. It takes an // `Entry`. It exposes all the fields, including the default ones: diff --git a/components/engine/vendor/github.com/sirupsen/logrus/json_formatter.go b/components/engine/vendor/github.com/sirupsen/logrus/json_formatter.go index e787ea1750..fb01c1b104 100644 --- a/components/engine/vendor/github.com/sirupsen/logrus/json_formatter.go +++ b/components/engine/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -6,8 +6,11 @@ import ( ) type fieldKey string + +// FieldMap allows customization of the key names for default fields. type FieldMap map[fieldKey]string +// Default key names for the default fields const ( FieldKeyMsg = "msg" FieldKeyLevel = "level" @@ -22,6 +25,7 @@ func (f FieldMap) resolve(key fieldKey) string { return string(key) } +// JSONFormatter formats logs into parsable json type JSONFormatter struct { // TimestampFormat sets the format used for marshaling timestamps. TimestampFormat string @@ -29,7 +33,7 @@ type JSONFormatter struct { // DisableTimestamp allows disabling automatic timestamps in output DisableTimestamp bool - // FieldMap allows users to customize the names of keys for various fields. + // FieldMap allows users to customize the names of keys for default fields. // As an example: // formatter := &JSONFormatter{ // FieldMap: FieldMap{ @@ -41,6 +45,7 @@ type JSONFormatter struct { FieldMap FieldMap } +// Format renders a single log entry func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data := make(Fields, len(entry.Data)+3) for k, v := range entry.Data { @@ -57,7 +62,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { timestampFormat := f.TimestampFormat if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat + timestampFormat = defaultTimestampFormat } if !f.DisableTimestamp { diff --git a/components/engine/vendor/github.com/sirupsen/logrus/logger.go b/components/engine/vendor/github.com/sirupsen/logrus/logger.go index 370fff5d1b..2acab05098 100644 --- a/components/engine/vendor/github.com/sirupsen/logrus/logger.go +++ b/components/engine/vendor/github.com/sirupsen/logrus/logger.go @@ -25,7 +25,7 @@ type Logger struct { Formatter Formatter // The logging level the logger should log at. This is typically (and defaults // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. `logrus.Debug` is useful in + // logged. Level Level // Used to sync writing to the log. Locking is enabled by Default mu MutexWrap @@ -312,6 +312,6 @@ func (logger *Logger) level() Level { return Level(atomic.LoadUint32((*uint32)(&logger.Level))) } -func (logger *Logger) setLevel(level Level) { +func (logger *Logger) SetLevel(level Level) { atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) } diff --git a/components/engine/vendor/github.com/sirupsen/logrus/terminal_appengine.go b/components/engine/vendor/github.com/sirupsen/logrus/terminal_appengine.go deleted file mode 100644 index e011a86945..0000000000 --- a/components/engine/vendor/github.com/sirupsen/logrus/terminal_appengine.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build appengine - -package logrus - -import "io" - -// IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal(f io.Writer) bool { - return true -} diff --git a/components/engine/vendor/github.com/sirupsen/logrus/terminal_bsd.go b/components/engine/vendor/github.com/sirupsen/logrus/terminal_bsd.go index 5f6be4d3c0..d7b3893f3f 100644 --- a/components/engine/vendor/github.com/sirupsen/logrus/terminal_bsd.go +++ b/components/engine/vendor/github.com/sirupsen/logrus/terminal_bsd.go @@ -3,8 +3,8 @@ package logrus -import "syscall" +import "golang.org/x/sys/unix" -const ioctlReadTermios = syscall.TIOCGETA +const ioctlReadTermios = unix.TIOCGETA -type Termios syscall.Termios +type Termios unix.Termios diff --git a/components/engine/vendor/github.com/sirupsen/logrus/terminal_linux.go b/components/engine/vendor/github.com/sirupsen/logrus/terminal_linux.go index 308160ca80..88d7298e24 100644 --- a/components/engine/vendor/github.com/sirupsen/logrus/terminal_linux.go +++ b/components/engine/vendor/github.com/sirupsen/logrus/terminal_linux.go @@ -7,8 +7,8 @@ package logrus -import "syscall" +import "golang.org/x/sys/unix" -const ioctlReadTermios = syscall.TCGETS +const ioctlReadTermios = unix.TCGETS -type Termios syscall.Termios +type Termios unix.Termios diff --git a/components/engine/vendor/github.com/sirupsen/logrus/terminal_notwindows.go b/components/engine/vendor/github.com/sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index 190297abf3..0000000000 --- a/components/engine/vendor/github.com/sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,28 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin freebsd openbsd netbsd dragonfly -// +build !appengine - -package logrus - -import ( - "io" - "os" - "syscall" - "unsafe" -) - -// IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal(f io.Writer) bool { - var termios Termios - switch v := f.(type) { - case *os.File: - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 - default: - return false - } -} diff --git a/components/engine/vendor/github.com/sirupsen/logrus/terminal_solaris.go b/components/engine/vendor/github.com/sirupsen/logrus/terminal_solaris.go deleted file mode 100644 index 3c86b1abee..0000000000 --- a/components/engine/vendor/github.com/sirupsen/logrus/terminal_solaris.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build solaris,!appengine - -package logrus - -import ( - "io" - "os" - - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(f io.Writer) bool { - switch v := f.(type) { - case *os.File: - _, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA) - return err == nil - default: - return false - } -} diff --git a/components/engine/vendor/github.com/sirupsen/logrus/terminal_windows.go b/components/engine/vendor/github.com/sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 7a336307e5..0000000000 --- a/components/engine/vendor/github.com/sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,82 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows,!appengine - -package logrus - -import ( - "bytes" - "errors" - "io" - "os" - "os/exec" - "strconv" - "strings" - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") - procSetConsoleMode = kernel32.NewProc("SetConsoleMode") -) - -const ( - enableProcessedOutput = 0x0001 - enableWrapAtEolOutput = 0x0002 - enableVirtualTerminalProcessing = 0x0004 -) - -func getVersion() (float64, error) { - stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} - cmd := exec.Command("cmd", "ver") - cmd.Stdout = stdout - cmd.Stderr = stderr - err := cmd.Run() - if err != nil { - return -1, err - } - - // The output should be like "Microsoft Windows [Version XX.X.XXXXXX]" - version := strings.Replace(stdout.String(), "\n", "", -1) - version = strings.Replace(version, "\r\n", "", -1) - - x1 := strings.Index(version, "[Version") - - if x1 == -1 || strings.Index(version, "]") == -1 { - return -1, errors.New("Can't determine Windows version") - } - - return strconv.ParseFloat(version[x1+9:x1+13], 64) -} - -func init() { - ver, err := getVersion() - if err != nil { - return - } - - // Activate Virtual Processing for Windows CMD - // Info: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - if ver >= 10 { - handle := syscall.Handle(os.Stderr.Fd()) - procSetConsoleMode.Call(uintptr(handle), enableProcessedOutput|enableWrapAtEolOutput|enableVirtualTerminalProcessing) - } -} - -// IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal(f io.Writer) bool { - switch v := f.(type) { - case *os.File: - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 - default: - return false - } -} diff --git a/components/engine/vendor/github.com/sirupsen/logrus/text_formatter.go b/components/engine/vendor/github.com/sirupsen/logrus/text_formatter.go index 26dcc15538..be412aa948 100644 --- a/components/engine/vendor/github.com/sirupsen/logrus/text_formatter.go +++ b/components/engine/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -3,10 +3,14 @@ package logrus import ( "bytes" "fmt" + "io" + "os" "sort" "strings" "sync" "time" + + "golang.org/x/crypto/ssh/terminal" ) const ( @@ -14,7 +18,7 @@ const ( red = 31 green = 32 yellow = 33 - blue = 34 + blue = 36 gray = 37 ) @@ -26,6 +30,7 @@ func init() { baseTimestamp = time.Now() } +// TextFormatter formats logs into text type TextFormatter struct { // Set to true to bypass checking for a TTY before outputting colors. ForceColors bool @@ -52,10 +57,6 @@ type TextFormatter struct { // QuoteEmptyFields will wrap empty fields in quotes if true QuoteEmptyFields bool - // QuoteCharacter can be set to the override the default quoting character " - // with something else. For example: ', or `. - QuoteCharacter string - // Whether the logger's out is to a terminal isTerminal bool @@ -63,14 +64,21 @@ type TextFormatter struct { } func (f *TextFormatter) init(entry *Entry) { - if len(f.QuoteCharacter) == 0 { - f.QuoteCharacter = "\"" - } if entry.Logger != nil { - f.isTerminal = IsTerminal(entry.Logger.Out) + f.isTerminal = f.checkIfTerminal(entry.Logger.Out) } } +func (f *TextFormatter) checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return terminal.IsTerminal(int(v.Fd())) + default: + return false + } +} + +// Format renders a single log entry func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { var b *bytes.Buffer keys := make([]string, 0, len(entry.Data)) @@ -95,7 +103,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { timestampFormat := f.TimestampFormat if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat + timestampFormat = defaultTimestampFormat } if isColored { f.printColored(b, entry, keys, timestampFormat) @@ -153,7 +161,7 @@ func (f *TextFormatter) needsQuoting(text string) bool { if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.') { + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { return true } } @@ -161,36 +169,23 @@ func (f *TextFormatter) needsQuoting(text string) bool { } func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - + if b.Len() > 0 { + b.WriteByte(' ') + } b.WriteString(key) b.WriteByte('=') f.appendValue(b, value) - b.WriteByte(' ') } func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { - switch value := value.(type) { - case string: - if !f.needsQuoting(value) { - b.WriteString(value) - } else { - b.WriteString(f.quoteString(value)) - } - case error: - errmsg := value.Error() - if !f.needsQuoting(errmsg) { - b.WriteString(errmsg) - } else { - b.WriteString(f.quoteString(errmsg)) - } - default: - fmt.Fprint(b, value) + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) } } - -func (f *TextFormatter) quoteString(v string) string { - escapedQuote := fmt.Sprintf("\\%s", f.QuoteCharacter) - escapedValue := strings.Replace(v, f.QuoteCharacter, escapedQuote, -1) - - return fmt.Sprintf("%s%v%s", f.QuoteCharacter, escapedValue, f.QuoteCharacter) -} diff --git a/components/engine/vendor/golang.org/x/crypto/README b/components/engine/vendor/golang.org/x/crypto/README deleted file mode 100644 index f1e0cbf94e..0000000000 --- a/components/engine/vendor/golang.org/x/crypto/README +++ /dev/null @@ -1,3 +0,0 @@ -This repository holds supplementary Go cryptography libraries. - -To submit changes to this repository, see http://golang.org/doc/contribute.html. diff --git a/components/engine/vendor/golang.org/x/crypto/README.md b/components/engine/vendor/golang.org/x/crypto/README.md new file mode 100644 index 0000000000..c9d6fecd1e --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/README.md @@ -0,0 +1,21 @@ +# Go Cryptography + +This repository holds supplementary Go cryptography libraries. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/crypto/...`. You +can also manually git clone the repository to `$GOPATH/src/golang.org/x/crypto`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the crypto repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/crypto:" in the +subject line, so it is easy to find. + +Note that contributions to the cryptography package receive additional scrutiny +due to their sensitive nature. Patches may take longer than normal to receive +feedback. diff --git a/components/engine/vendor/golang.org/x/crypto/curve25519/const_amd64.h b/components/engine/vendor/golang.org/x/crypto/curve25519/const_amd64.h new file mode 100644 index 0000000000..b3f74162f6 --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/curve25519/const_amd64.h @@ -0,0 +1,8 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +#define REDMASK51 0x0007FFFFFFFFFFFF diff --git a/components/engine/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/components/engine/vendor/golang.org/x/crypto/curve25519/const_amd64.s new file mode 100644 index 0000000000..ee7b4bd5f8 --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/curve25519/const_amd64.s @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +// These constants cannot be encoded in non-MOVQ immediates. +// We access them directly from memory instead. + +DATA ·_121666_213(SB)/8, $996687872 +GLOBL ·_121666_213(SB), 8, $8 + +DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA +GLOBL ·_2P0(SB), 8, $8 + +DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE +GLOBL ·_2P1234(SB), 8, $8 diff --git a/components/engine/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/components/engine/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s new file mode 100644 index 0000000000..cd793a5b5f --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s @@ -0,0 +1,65 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +// func cswap(inout *[4][5]uint64, v uint64) +TEXT ·cswap(SB),7,$0 + MOVQ inout+0(FP),DI + MOVQ v+8(FP),SI + + SUBQ $1, SI + NOTQ SI + MOVQ SI, X15 + PSHUFD $0x44, X15, X15 + + MOVOU 0(DI), X0 + MOVOU 16(DI), X2 + MOVOU 32(DI), X4 + MOVOU 48(DI), X6 + MOVOU 64(DI), X8 + MOVOU 80(DI), X1 + MOVOU 96(DI), X3 + MOVOU 112(DI), X5 + MOVOU 128(DI), X7 + MOVOU 144(DI), X9 + + MOVO X1, X10 + MOVO X3, X11 + MOVO X5, X12 + MOVO X7, X13 + MOVO X9, X14 + + PXOR X0, X10 + PXOR X2, X11 + PXOR X4, X12 + PXOR X6, X13 + PXOR X8, X14 + PAND X15, X10 + PAND X15, X11 + PAND X15, X12 + PAND X15, X13 + PAND X15, X14 + PXOR X10, X0 + PXOR X10, X1 + PXOR X11, X2 + PXOR X11, X3 + PXOR X12, X4 + PXOR X12, X5 + PXOR X13, X6 + PXOR X13, X7 + PXOR X14, X8 + PXOR X14, X9 + + MOVOU X0, 0(DI) + MOVOU X2, 16(DI) + MOVOU X4, 32(DI) + MOVOU X6, 48(DI) + MOVOU X8, 64(DI) + MOVOU X1, 80(DI) + MOVOU X3, 96(DI) + MOVOU X5, 112(DI) + MOVOU X7, 128(DI) + MOVOU X9, 144(DI) + RET diff --git a/components/engine/vendor/golang.org/x/crypto/curve25519/curve25519.go b/components/engine/vendor/golang.org/x/crypto/curve25519/curve25519.go new file mode 100644 index 0000000000..2d14c2a78a --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -0,0 +1,834 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// We have a implementation in amd64 assembly so this code is only run on +// non-amd64 platforms. The amd64 assembly does not support gccgo. +// +build !amd64 gccgo appengine + +package curve25519 + +import ( + "encoding/binary" +) + +// This code is a port of the public domain, "ref10" implementation of +// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. + +// fieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type fieldElement [10]int32 + +func feZero(fe *fieldElement) { + for i := range fe { + fe[i] = 0 + } +} + +func feOne(fe *fieldElement) { + feZero(fe) + fe[0] = 1 +} + +func feAdd(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] + b[i] + } +} + +func feSub(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] - b[i] + } +} + +func feCopy(dst, src *fieldElement) { + for i := range dst { + dst[i] = src[i] + } +} + +// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func feCSwap(f, g *fieldElement, b int32) { + b = -b + for i := range f { + t := b & (f[i] ^ g[i]) + f[i] ^= t + g[i] ^= t + } +} + +// load3 reads a 24-bit, little-endian value from in. +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +// load4 reads a 32-bit, little-endian value from in. +func load4(in []byte) int64 { + return int64(binary.LittleEndian.Uint32(in)) +} + +func feFromBytes(dst *fieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := load3(src[29:]) << 2 + + var carry [10]int64 + carry[9] = (h9 + 1<<24) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + 1<<24) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + 1<<24) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + 1<<24) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + 1<<24) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + 1<<25) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + 1<<25) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + 1<<25) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + 1<<25) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + 1<<25) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + dst[0] = int32(h0) + dst[1] = int32(h1) + dst[2] = int32(h2) + dst[3] = int32(h3) + dst[4] = int32(h4) + dst[5] = int32(h5) + dst[6] = int32(h6) + dst[7] = int32(h7) + dst[8] = int32(h8) + dst[9] = int32(h9) +} + +// feToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +// feMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs can squeeze carries into int32. +func feMul(h, f, g *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + g0 := g[0] + g1 := g[1] + g2 := g[2] + g3 := g[3] + g4 := g[4] + g5 := g[5] + g6 := g[6] + g7 := g[7] + g8 := g[8] + g9 := g[9] + g1_19 := 19 * g1 // 1.4*2^29 + g2_19 := 19 * g2 // 1.4*2^30; still ok + g3_19 := 19 * g3 + g4_19 := 19 * g4 + g5_19 := 19 * g5 + g6_19 := 19 * g6 + g7_19 := 19 * g7 + g8_19 := 19 * g8 + g9_19 := 19 * g9 + f1_2 := 2 * f1 + f3_2 := 2 * f3 + f5_2 := 2 * f5 + f7_2 := 2 * f7 + f9_2 := 2 * f9 + f0g0 := int64(f0) * int64(g0) + f0g1 := int64(f0) * int64(g1) + f0g2 := int64(f0) * int64(g2) + f0g3 := int64(f0) * int64(g3) + f0g4 := int64(f0) * int64(g4) + f0g5 := int64(f0) * int64(g5) + f0g6 := int64(f0) * int64(g6) + f0g7 := int64(f0) * int64(g7) + f0g8 := int64(f0) * int64(g8) + f0g9 := int64(f0) * int64(g9) + f1g0 := int64(f1) * int64(g0) + f1g1_2 := int64(f1_2) * int64(g1) + f1g2 := int64(f1) * int64(g2) + f1g3_2 := int64(f1_2) * int64(g3) + f1g4 := int64(f1) * int64(g4) + f1g5_2 := int64(f1_2) * int64(g5) + f1g6 := int64(f1) * int64(g6) + f1g7_2 := int64(f1_2) * int64(g7) + f1g8 := int64(f1) * int64(g8) + f1g9_38 := int64(f1_2) * int64(g9_19) + f2g0 := int64(f2) * int64(g0) + f2g1 := int64(f2) * int64(g1) + f2g2 := int64(f2) * int64(g2) + f2g3 := int64(f2) * int64(g3) + f2g4 := int64(f2) * int64(g4) + f2g5 := int64(f2) * int64(g5) + f2g6 := int64(f2) * int64(g6) + f2g7 := int64(f2) * int64(g7) + f2g8_19 := int64(f2) * int64(g8_19) + f2g9_19 := int64(f2) * int64(g9_19) + f3g0 := int64(f3) * int64(g0) + f3g1_2 := int64(f3_2) * int64(g1) + f3g2 := int64(f3) * int64(g2) + f3g3_2 := int64(f3_2) * int64(g3) + f3g4 := int64(f3) * int64(g4) + f3g5_2 := int64(f3_2) * int64(g5) + f3g6 := int64(f3) * int64(g6) + f3g7_38 := int64(f3_2) * int64(g7_19) + f3g8_19 := int64(f3) * int64(g8_19) + f3g9_38 := int64(f3_2) * int64(g9_19) + f4g0 := int64(f4) * int64(g0) + f4g1 := int64(f4) * int64(g1) + f4g2 := int64(f4) * int64(g2) + f4g3 := int64(f4) * int64(g3) + f4g4 := int64(f4) * int64(g4) + f4g5 := int64(f4) * int64(g5) + f4g6_19 := int64(f4) * int64(g6_19) + f4g7_19 := int64(f4) * int64(g7_19) + f4g8_19 := int64(f4) * int64(g8_19) + f4g9_19 := int64(f4) * int64(g9_19) + f5g0 := int64(f5) * int64(g0) + f5g1_2 := int64(f5_2) * int64(g1) + f5g2 := int64(f5) * int64(g2) + f5g3_2 := int64(f5_2) * int64(g3) + f5g4 := int64(f5) * int64(g4) + f5g5_38 := int64(f5_2) * int64(g5_19) + f5g6_19 := int64(f5) * int64(g6_19) + f5g7_38 := int64(f5_2) * int64(g7_19) + f5g8_19 := int64(f5) * int64(g8_19) + f5g9_38 := int64(f5_2) * int64(g9_19) + f6g0 := int64(f6) * int64(g0) + f6g1 := int64(f6) * int64(g1) + f6g2 := int64(f6) * int64(g2) + f6g3 := int64(f6) * int64(g3) + f6g4_19 := int64(f6) * int64(g4_19) + f6g5_19 := int64(f6) * int64(g5_19) + f6g6_19 := int64(f6) * int64(g6_19) + f6g7_19 := int64(f6) * int64(g7_19) + f6g8_19 := int64(f6) * int64(g8_19) + f6g9_19 := int64(f6) * int64(g9_19) + f7g0 := int64(f7) * int64(g0) + f7g1_2 := int64(f7_2) * int64(g1) + f7g2 := int64(f7) * int64(g2) + f7g3_38 := int64(f7_2) * int64(g3_19) + f7g4_19 := int64(f7) * int64(g4_19) + f7g5_38 := int64(f7_2) * int64(g5_19) + f7g6_19 := int64(f7) * int64(g6_19) + f7g7_38 := int64(f7_2) * int64(g7_19) + f7g8_19 := int64(f7) * int64(g8_19) + f7g9_38 := int64(f7_2) * int64(g9_19) + f8g0 := int64(f8) * int64(g0) + f8g1 := int64(f8) * int64(g1) + f8g2_19 := int64(f8) * int64(g2_19) + f8g3_19 := int64(f8) * int64(g3_19) + f8g4_19 := int64(f8) * int64(g4_19) + f8g5_19 := int64(f8) * int64(g5_19) + f8g6_19 := int64(f8) * int64(g6_19) + f8g7_19 := int64(f8) * int64(g7_19) + f8g8_19 := int64(f8) * int64(g8_19) + f8g9_19 := int64(f8) * int64(g9_19) + f9g0 := int64(f9) * int64(g0) + f9g1_38 := int64(f9_2) * int64(g1_19) + f9g2_19 := int64(f9) * int64(g2_19) + f9g3_38 := int64(f9_2) * int64(g3_19) + f9g4_19 := int64(f9) * int64(g4_19) + f9g5_38 := int64(f9_2) * int64(g5_19) + f9g6_19 := int64(f9) * int64(g6_19) + f9g7_38 := int64(f9_2) * int64(g7_19) + f9g8_19 := int64(f9) * int64(g8_19) + f9g9_38 := int64(f9_2) * int64(g9_19) + h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 + h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 + h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 + h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 + h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 + h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 + h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 + h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 + h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 + h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 + var carry [10]int64 + + // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + // |h0| <= 2^25 + // |h4| <= 2^25 + // |h1| <= 1.51*2^58 + // |h5| <= 1.51*2^58 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + // |h1| <= 2^24; from now on fits into int32 + // |h5| <= 2^24; from now on fits into int32 + // |h2| <= 1.21*2^59 + // |h6| <= 1.21*2^59 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + // |h2| <= 2^25; from now on fits into int32 unchanged + // |h6| <= 2^25; from now on fits into int32 unchanged + // |h3| <= 1.51*2^58 + // |h7| <= 1.51*2^58 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + // |h3| <= 2^24; from now on fits into int32 unchanged + // |h7| <= 2^24; from now on fits into int32 unchanged + // |h4| <= 1.52*2^33 + // |h8| <= 1.52*2^33 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + // |h4| <= 2^25; from now on fits into int32 unchanged + // |h8| <= 2^25; from now on fits into int32 unchanged + // |h5| <= 1.01*2^24 + // |h9| <= 1.51*2^58 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + // |h9| <= 2^24; from now on fits into int32 unchanged + // |h0| <= 1.8*2^37 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + // |h0| <= 2^25; from now on fits into int32 unchanged + // |h1| <= 1.01*2^24 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feSquare(h, f *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + f0_2 := 2 * f0 + f1_2 := 2 * f1 + f2_2 := 2 * f2 + f3_2 := 2 * f3 + f4_2 := 2 * f4 + f5_2 := 2 * f5 + f6_2 := 2 * f6 + f7_2 := 2 * f7 + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + f0f0 := int64(f0) * int64(f0) + f0f1_2 := int64(f0_2) * int64(f1) + f0f2_2 := int64(f0_2) * int64(f2) + f0f3_2 := int64(f0_2) * int64(f3) + f0f4_2 := int64(f0_2) * int64(f4) + f0f5_2 := int64(f0_2) * int64(f5) + f0f6_2 := int64(f0_2) * int64(f6) + f0f7_2 := int64(f0_2) * int64(f7) + f0f8_2 := int64(f0_2) * int64(f8) + f0f9_2 := int64(f0_2) * int64(f9) + f1f1_2 := int64(f1_2) * int64(f1) + f1f2_2 := int64(f1_2) * int64(f2) + f1f3_4 := int64(f1_2) * int64(f3_2) + f1f4_2 := int64(f1_2) * int64(f4) + f1f5_4 := int64(f1_2) * int64(f5_2) + f1f6_2 := int64(f1_2) * int64(f6) + f1f7_4 := int64(f1_2) * int64(f7_2) + f1f8_2 := int64(f1_2) * int64(f8) + f1f9_76 := int64(f1_2) * int64(f9_38) + f2f2 := int64(f2) * int64(f2) + f2f3_2 := int64(f2_2) * int64(f3) + f2f4_2 := int64(f2_2) * int64(f4) + f2f5_2 := int64(f2_2) * int64(f5) + f2f6_2 := int64(f2_2) * int64(f6) + f2f7_2 := int64(f2_2) * int64(f7) + f2f8_38 := int64(f2_2) * int64(f8_19) + f2f9_38 := int64(f2) * int64(f9_38) + f3f3_2 := int64(f3_2) * int64(f3) + f3f4_2 := int64(f3_2) * int64(f4) + f3f5_4 := int64(f3_2) * int64(f5_2) + f3f6_2 := int64(f3_2) * int64(f6) + f3f7_76 := int64(f3_2) * int64(f7_38) + f3f8_38 := int64(f3_2) * int64(f8_19) + f3f9_76 := int64(f3_2) * int64(f9_38) + f4f4 := int64(f4) * int64(f4) + f4f5_2 := int64(f4_2) * int64(f5) + f4f6_38 := int64(f4_2) * int64(f6_19) + f4f7_38 := int64(f4) * int64(f7_38) + f4f8_38 := int64(f4_2) * int64(f8_19) + f4f9_38 := int64(f4) * int64(f9_38) + f5f5_38 := int64(f5) * int64(f5_38) + f5f6_38 := int64(f5_2) * int64(f6_19) + f5f7_76 := int64(f5_2) * int64(f7_38) + f5f8_38 := int64(f5_2) * int64(f8_19) + f5f9_76 := int64(f5_2) * int64(f9_38) + f6f6_19 := int64(f6) * int64(f6_19) + f6f7_38 := int64(f6) * int64(f7_38) + f6f8_38 := int64(f6_2) * int64(f8_19) + f6f9_38 := int64(f6) * int64(f9_38) + f7f7_38 := int64(f7) * int64(f7_38) + f7f8_38 := int64(f7_2) * int64(f8_19) + f7f9_76 := int64(f7_2) * int64(f9_38) + f8f8_19 := int64(f8) * int64(f8_19) + f8f9_38 := int64(f8) * int64(f9_38) + f9f9_38 := int64(f9) * int64(f9_38) + h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 + h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 + h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 + h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 + h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 + h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 + h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 + h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 + h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 + h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 + var carry [10]int64 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feMul121666 calculates h = f * 121666. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feMul121666(h, f *fieldElement) { + h0 := int64(f[0]) * 121666 + h1 := int64(f[1]) * 121666 + h2 := int64(f[2]) * 121666 + h3 := int64(f[3]) * 121666 + h4 := int64(f[4]) * 121666 + h5 := int64(f[5]) * 121666 + h6 := int64(f[6]) * 121666 + h7 := int64(f[7]) * 121666 + h8 := int64(f[8]) * 121666 + h9 := int64(f[9]) * 121666 + var carry [10]int64 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feInvert sets out = z^-1. +func feInvert(out, z *fieldElement) { + var t0, t1, t2, t3 fieldElement + var i int + + feSquare(&t0, z) + for i = 1; i < 1; i++ { + feSquare(&t0, &t0) + } + feSquare(&t1, &t0) + for i = 1; i < 2; i++ { + feSquare(&t1, &t1) + } + feMul(&t1, z, &t1) + feMul(&t0, &t0, &t1) + feSquare(&t2, &t0) + for i = 1; i < 1; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t1, &t2) + feSquare(&t2, &t1) + for i = 1; i < 5; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 20; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 100; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t1, &t1) + for i = 1; i < 5; i++ { + feSquare(&t1, &t1) + } + feMul(out, &t1, &t0) +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + + copy(e[:], in[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement + feFromBytes(&x1, base) + feOne(&x2) + feCopy(&x3, &x1) + feOne(&z3) + + swap := int32(0) + for pos := 254; pos >= 0; pos-- { + b := e[pos/8] >> uint(pos&7) + b &= 1 + swap ^= int32(b) + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + swap = int32(b) + + feSub(&tmp0, &x3, &z3) + feSub(&tmp1, &x2, &z2) + feAdd(&x2, &x2, &z2) + feAdd(&z2, &x3, &z3) + feMul(&z3, &tmp0, &x2) + feMul(&z2, &z2, &tmp1) + feSquare(&tmp0, &tmp1) + feSquare(&tmp1, &x2) + feAdd(&x3, &z3, &z2) + feSub(&z2, &z3, &z2) + feMul(&x2, &tmp1, &tmp0) + feSub(&tmp1, &tmp1, &tmp0) + feSquare(&z2, &z2) + feMul121666(&z3, &tmp1) + feSquare(&x3, &x3) + feAdd(&tmp0, &tmp0, &z3) + feMul(&z3, &x1, &z2) + feMul(&z2, &tmp1, &tmp0) + } + + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + + feInvert(&z2, &z2) + feMul(&x2, &x2, &z2) + feToBytes(out, &x2) +} diff --git a/components/engine/vendor/golang.org/x/crypto/curve25519/doc.go b/components/engine/vendor/golang.org/x/crypto/curve25519/doc.go new file mode 100644 index 0000000000..da9b10d9c1 --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/curve25519/doc.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package curve25519 provides an implementation of scalar multiplication on +// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html +package curve25519 // import "golang.org/x/crypto/curve25519" + +// basePoint is the x coordinate of the generator of the curve. +var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +// ScalarMult sets dst to the product in*base where dst and base are the x +// coordinates of group points and all values are in little-endian form. +func ScalarMult(dst, in, base *[32]byte) { + scalarMult(dst, in, base) +} + +// ScalarBaseMult sets dst to the product in*base where dst and base are the x +// coordinates of group points, base is the standard generator and all values +// are in little-endian form. +func ScalarBaseMult(dst, in *[32]byte) { + ScalarMult(dst, in, &basePoint) +} diff --git a/components/engine/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/components/engine/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s new file mode 100644 index 0000000000..390816106e --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func freeze(inout *[5]uint64) +TEXT ·freeze(SB),7,$0-8 + MOVQ inout+0(FP), DI + + MOVQ 0(DI),SI + MOVQ 8(DI),DX + MOVQ 16(DI),CX + MOVQ 24(DI),R8 + MOVQ 32(DI),R9 + MOVQ $REDMASK51,AX + MOVQ AX,R10 + SUBQ $18,R10 + MOVQ $3,R11 +REDUCELOOP: + MOVQ SI,R12 + SHRQ $51,R12 + ANDQ AX,SI + ADDQ R12,DX + MOVQ DX,R12 + SHRQ $51,R12 + ANDQ AX,DX + ADDQ R12,CX + MOVQ CX,R12 + SHRQ $51,R12 + ANDQ AX,CX + ADDQ R12,R8 + MOVQ R8,R12 + SHRQ $51,R12 + ANDQ AX,R8 + ADDQ R12,R9 + MOVQ R9,R12 + SHRQ $51,R12 + ANDQ AX,R9 + IMUL3Q $19,R12,R12 + ADDQ R12,SI + SUBQ $1,R11 + JA REDUCELOOP + MOVQ $1,R12 + CMPQ R10,SI + CMOVQLT R11,R12 + CMPQ AX,DX + CMOVQNE R11,R12 + CMPQ AX,CX + CMOVQNE R11,R12 + CMPQ AX,R8 + CMOVQNE R11,R12 + CMPQ AX,R9 + CMOVQNE R11,R12 + NEGQ R12 + ANDQ R12,AX + ANDQ R12,R10 + SUBQ R10,SI + SUBQ AX,DX + SUBQ AX,CX + SUBQ AX,R8 + SUBQ AX,R9 + MOVQ SI,0(DI) + MOVQ DX,8(DI) + MOVQ CX,16(DI) + MOVQ R8,24(DI) + MOVQ R9,32(DI) + RET diff --git a/components/engine/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/components/engine/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s new file mode 100644 index 0000000000..9e9040b250 --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s @@ -0,0 +1,1377 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func ladderstep(inout *[5][5]uint64) +TEXT ·ladderstep(SB),0,$296-8 + MOVQ inout+0(FP),DI + + MOVQ 40(DI),SI + MOVQ 48(DI),DX + MOVQ 56(DI),CX + MOVQ 64(DI),R8 + MOVQ 72(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 80(DI),SI + ADDQ 88(DI),DX + ADDQ 96(DI),CX + ADDQ 104(DI),R8 + ADDQ 112(DI),R9 + SUBQ 80(DI),AX + SUBQ 88(DI),R10 + SUBQ 96(DI),R11 + SUBQ 104(DI),R12 + SUBQ 112(DI),R13 + MOVQ SI,0(SP) + MOVQ DX,8(SP) + MOVQ CX,16(SP) + MOVQ R8,24(SP) + MOVQ R9,32(SP) + MOVQ AX,40(SP) + MOVQ R10,48(SP) + MOVQ R11,56(SP) + MOVQ R12,64(SP) + MOVQ R13,72(SP) + MOVQ 40(SP),AX + MULQ 40(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 48(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 48(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 72(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(SP) + MOVQ R8,88(SP) + MOVQ R9,96(SP) + MOVQ AX,104(SP) + MOVQ R10,112(SP) + MOVQ 0(SP),AX + MULQ 0(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 8(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 32(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(SP) + MOVQ R8,128(SP) + MOVQ R9,136(SP) + MOVQ AX,144(SP) + MOVQ R10,152(SP) + MOVQ SI,SI + MOVQ R8,DX + MOVQ R9,CX + MOVQ AX,R8 + MOVQ R10,R9 + ADDQ ·_2P0(SB),SI + ADDQ ·_2P1234(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R8 + ADDQ ·_2P1234(SB),R9 + SUBQ 80(SP),SI + SUBQ 88(SP),DX + SUBQ 96(SP),CX + SUBQ 104(SP),R8 + SUBQ 112(SP),R9 + MOVQ SI,160(SP) + MOVQ DX,168(SP) + MOVQ CX,176(SP) + MOVQ R8,184(SP) + MOVQ R9,192(SP) + MOVQ 120(DI),SI + MOVQ 128(DI),DX + MOVQ 136(DI),CX + MOVQ 144(DI),R8 + MOVQ 152(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 160(DI),SI + ADDQ 168(DI),DX + ADDQ 176(DI),CX + ADDQ 184(DI),R8 + ADDQ 192(DI),R9 + SUBQ 160(DI),AX + SUBQ 168(DI),R10 + SUBQ 176(DI),R11 + SUBQ 184(DI),R12 + SUBQ 192(DI),R13 + MOVQ SI,200(SP) + MOVQ DX,208(SP) + MOVQ CX,216(SP) + MOVQ R8,224(SP) + MOVQ R9,232(SP) + MOVQ AX,240(SP) + MOVQ R10,248(SP) + MOVQ R11,256(SP) + MOVQ R12,264(SP) + MOVQ R13,272(SP) + MOVQ 224(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,280(SP) + MULQ 56(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 232(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,288(SP) + MULQ 48(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 40(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 200(SP),AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 200(SP),AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 200(SP),AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 208(SP),AX + MULQ 40(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 208(SP),AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),AX + MULQ 40(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 216(SP),AX + MULQ 48(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 216(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 224(SP),AX + MULQ 40(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 224(SP),AX + MULQ 48(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 280(SP),AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 280(SP),AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 232(SP),AX + MULQ 40(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 288(SP),AX + MULQ 56(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 288(SP),AX + MULQ 64(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 288(SP),AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(SP) + MOVQ R8,48(SP) + MOVQ R9,56(SP) + MOVQ AX,64(SP) + MOVQ R10,72(SP) + MOVQ 264(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,200(SP) + MULQ 16(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 272(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,208(SP) + MULQ 8(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 0(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 240(SP),AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 240(SP),AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 240(SP),AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 248(SP),AX + MULQ 0(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 248(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 248(SP),AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 248(SP),AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 248(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),AX + MULQ 0(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 256(SP),AX + MULQ 8(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 256(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 264(SP),AX + MULQ 0(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 264(SP),AX + MULQ 8(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 200(SP),AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 200(SP),AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 272(SP),AX + MULQ 0(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),AX + MULQ 16(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 24(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,DX + MOVQ R8,CX + MOVQ R9,R11 + MOVQ AX,R12 + MOVQ R10,R13 + ADDQ ·_2P0(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 40(SP),SI + ADDQ 48(SP),R8 + ADDQ 56(SP),R9 + ADDQ 64(SP),AX + ADDQ 72(SP),R10 + SUBQ 40(SP),DX + SUBQ 48(SP),CX + SUBQ 56(SP),R11 + SUBQ 64(SP),R12 + SUBQ 72(SP),R13 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ DX,160(DI) + MOVQ CX,168(DI) + MOVQ R11,176(DI) + MOVQ R12,184(DI) + MOVQ R13,192(DI) + MOVQ 120(DI),AX + MULQ 120(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 128(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 136(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 144(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 152(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(DI),AX + MULQ 128(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 136(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 144(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),AX + MULQ 136(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 144(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $19,DX,AX + MULQ 144(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(DI),DX + IMUL3Q $19,DX,AX + MULQ 152(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ 160(DI),AX + MULQ 160(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 168(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 176(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 184(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 192(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 168(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 176(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 184(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 176(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 184(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 184(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 16(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 0(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 8(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + MULQ 16(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + MULQ 24(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + MULQ 32(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 0(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 168(DI),AX + MULQ 8(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + MULQ 16(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + MULQ 24(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 0(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 176(DI),AX + MULQ 8(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 176(DI),AX + MULQ 16(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 24(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),AX + MULQ 0(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 184(DI),AX + MULQ 8(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 24(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 32(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),AX + MULQ 0(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 16(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 24(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 32(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 144(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 96(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 152(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 88(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 80(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 88(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(SP),AX + MULQ 96(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(SP),AX + MULQ 104(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(SP),AX + MULQ 112(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(SP),AX + MULQ 80(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 128(SP),AX + MULQ 88(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(SP),AX + MULQ 96(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(SP),AX + MULQ 104(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),AX + MULQ 80(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 136(SP),AX + MULQ 88(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 136(SP),AX + MULQ 96(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 104(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(SP),AX + MULQ 80(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 144(SP),AX + MULQ 88(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 104(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 112(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(SP),AX + MULQ 80(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 96(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 104(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 112(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(DI) + MOVQ R8,48(DI) + MOVQ R9,56(DI) + MOVQ AX,64(DI) + MOVQ R10,72(DI) + MOVQ 160(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + MOVQ AX,SI + MOVQ DX,CX + MOVQ 168(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,CX + MOVQ DX,R8 + MOVQ 176(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R8 + MOVQ DX,R9 + MOVQ 184(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R9 + MOVQ DX,R10 + MOVQ 192(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R10 + IMUL3Q $19,DX,DX + ADDQ DX,SI + ADDQ 80(SP),SI + ADDQ 88(SP),CX + ADDQ 96(SP),R8 + ADDQ 104(SP),R9 + ADDQ 112(SP),R10 + MOVQ SI,80(DI) + MOVQ CX,88(DI) + MOVQ R8,96(DI) + MOVQ R9,104(DI) + MOVQ R10,112(DI) + MOVQ 104(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 176(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 112(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 168(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 160(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 168(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 80(DI),AX + MULQ 176(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 80(DI),AX + MULQ 184(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 80(DI),AX + MULQ 192(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 88(DI),AX + MULQ 160(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 88(DI),AX + MULQ 168(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 88(DI),AX + MULQ 176(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 88(DI),AX + MULQ 184(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 88(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),AX + MULQ 160(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 96(DI),AX + MULQ 168(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 96(DI),AX + MULQ 176(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 104(DI),AX + MULQ 160(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 104(DI),AX + MULQ 168(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 184(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 192(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 112(DI),AX + MULQ 160(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 176(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 184(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 192(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(DI) + MOVQ R8,88(DI) + MOVQ R9,96(DI) + MOVQ AX,104(DI) + MOVQ R10,112(DI) + RET diff --git a/components/engine/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/components/engine/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go new file mode 100644 index 0000000000..5822bd5338 --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go @@ -0,0 +1,240 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package curve25519 + +// These functions are implemented in the .s files. The names of the functions +// in the rest of the file are also taken from the SUPERCOP sources to help +// people following along. + +//go:noescape + +func cswap(inout *[5]uint64, v uint64) + +//go:noescape + +func ladderstep(inout *[5][5]uint64) + +//go:noescape + +func freeze(inout *[5]uint64) + +//go:noescape + +func mul(dest, a, b *[5]uint64) + +//go:noescape + +func square(out, in *[5]uint64) + +// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. +func mladder(xr, zr *[5]uint64, s *[32]byte) { + var work [5][5]uint64 + + work[0] = *xr + setint(&work[1], 1) + setint(&work[2], 0) + work[3] = *xr + setint(&work[4], 1) + + j := uint(6) + var prevbit byte + + for i := 31; i >= 0; i-- { + for j < 8 { + bit := ((*s)[i] >> j) & 1 + swap := bit ^ prevbit + prevbit = bit + cswap(&work[1], uint64(swap)) + ladderstep(&work) + j-- + } + j = 7 + } + + *xr = work[1] + *zr = work[2] +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + copy(e[:], (*in)[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var t, z [5]uint64 + unpack(&t, base) + mladder(&t, &z, &e) + invert(&z, &z) + mul(&t, &t, &z) + pack(out, &t) +} + +func setint(r *[5]uint64, v uint64) { + r[0] = v + r[1] = 0 + r[2] = 0 + r[3] = 0 + r[4] = 0 +} + +// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian +// order. +func unpack(r *[5]uint64, x *[32]byte) { + r[0] = uint64(x[0]) | + uint64(x[1])<<8 | + uint64(x[2])<<16 | + uint64(x[3])<<24 | + uint64(x[4])<<32 | + uint64(x[5])<<40 | + uint64(x[6]&7)<<48 + + r[1] = uint64(x[6])>>3 | + uint64(x[7])<<5 | + uint64(x[8])<<13 | + uint64(x[9])<<21 | + uint64(x[10])<<29 | + uint64(x[11])<<37 | + uint64(x[12]&63)<<45 + + r[2] = uint64(x[12])>>6 | + uint64(x[13])<<2 | + uint64(x[14])<<10 | + uint64(x[15])<<18 | + uint64(x[16])<<26 | + uint64(x[17])<<34 | + uint64(x[18])<<42 | + uint64(x[19]&1)<<50 + + r[3] = uint64(x[19])>>1 | + uint64(x[20])<<7 | + uint64(x[21])<<15 | + uint64(x[22])<<23 | + uint64(x[23])<<31 | + uint64(x[24])<<39 | + uint64(x[25]&15)<<47 + + r[4] = uint64(x[25])>>4 | + uint64(x[26])<<4 | + uint64(x[27])<<12 | + uint64(x[28])<<20 | + uint64(x[29])<<28 | + uint64(x[30])<<36 | + uint64(x[31]&127)<<44 +} + +// pack sets out = x where out is the usual, little-endian form of the 5, +// 51-bit limbs in x. +func pack(out *[32]byte, x *[5]uint64) { + t := *x + freeze(&t) + + out[0] = byte(t[0]) + out[1] = byte(t[0] >> 8) + out[2] = byte(t[0] >> 16) + out[3] = byte(t[0] >> 24) + out[4] = byte(t[0] >> 32) + out[5] = byte(t[0] >> 40) + out[6] = byte(t[0] >> 48) + + out[6] ^= byte(t[1]<<3) & 0xf8 + out[7] = byte(t[1] >> 5) + out[8] = byte(t[1] >> 13) + out[9] = byte(t[1] >> 21) + out[10] = byte(t[1] >> 29) + out[11] = byte(t[1] >> 37) + out[12] = byte(t[1] >> 45) + + out[12] ^= byte(t[2]<<6) & 0xc0 + out[13] = byte(t[2] >> 2) + out[14] = byte(t[2] >> 10) + out[15] = byte(t[2] >> 18) + out[16] = byte(t[2] >> 26) + out[17] = byte(t[2] >> 34) + out[18] = byte(t[2] >> 42) + out[19] = byte(t[2] >> 50) + + out[19] ^= byte(t[3]<<1) & 0xfe + out[20] = byte(t[3] >> 7) + out[21] = byte(t[3] >> 15) + out[22] = byte(t[3] >> 23) + out[23] = byte(t[3] >> 31) + out[24] = byte(t[3] >> 39) + out[25] = byte(t[3] >> 47) + + out[25] ^= byte(t[4]<<4) & 0xf0 + out[26] = byte(t[4] >> 4) + out[27] = byte(t[4] >> 12) + out[28] = byte(t[4] >> 20) + out[29] = byte(t[4] >> 28) + out[30] = byte(t[4] >> 36) + out[31] = byte(t[4] >> 44) +} + +// invert calculates r = x^-1 mod p using Fermat's little theorem. +func invert(r *[5]uint64, x *[5]uint64) { + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 + + square(&z2, x) /* 2 */ + square(&t, &z2) /* 4 */ + square(&t, &t) /* 8 */ + mul(&z9, &t, x) /* 9 */ + mul(&z11, &z9, &z2) /* 11 */ + square(&t, &z11) /* 22 */ + mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ + + square(&t, &z2_5_0) /* 2^6 - 2^1 */ + for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ + + square(&t, &z2_10_0) /* 2^11 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ + + square(&t, &z2_20_0) /* 2^21 - 2^1 */ + for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ + square(&t, &t) + } + mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ + + square(&t, &t) /* 2^41 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ + square(&t, &t) + } + mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ + + square(&t, &z2_50_0) /* 2^51 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ + square(&t, &t) + } + mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ + + square(&t, &z2_100_0) /* 2^101 - 2^1 */ + for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ + square(&t, &t) + } + mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ + + square(&t, &t) /* 2^201 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ + square(&t, &t) + } + mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ + + square(&t, &t) /* 2^251 - 2^1 */ + square(&t, &t) /* 2^252 - 2^2 */ + square(&t, &t) /* 2^253 - 2^3 */ + + square(&t, &t) /* 2^254 - 2^4 */ + + square(&t, &t) /* 2^255 - 2^5 */ + mul(r, &t, &z11) /* 2^255 - 21 */ +} diff --git a/components/engine/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/components/engine/vendor/golang.org/x/crypto/curve25519/mul_amd64.s new file mode 100644 index 0000000000..5ce80a2e56 --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/curve25519/mul_amd64.s @@ -0,0 +1,169 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func mul(dest, a, b *[5]uint64) +TEXT ·mul(SB),0,$16-24 + MOVQ dest+0(FP), DI + MOVQ a+8(FP), SI + MOVQ b+16(FP), DX + + MOVQ DX,CX + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,0(SP) + MULQ 16(CX) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 0(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 8(CX) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SI),AX + MULQ 16(CX) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SI),AX + MULQ 24(CX) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 0(SI),AX + MULQ 32(CX) + MOVQ AX,BX + MOVQ DX,BP + MOVQ 8(SI),AX + MULQ 0(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SI),AX + MULQ 8(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SI),AX + MULQ 16(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SI),AX + MULQ 24(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),AX + MULQ 0(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 16(SI),AX + MULQ 8(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SI),AX + MULQ 16(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 24(SI),AX + MULQ 0(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 24(SI),AX + MULQ 8(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 0(SP),AX + MULQ 24(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 0(SP),AX + MULQ 32(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 32(SI),AX + MULQ 0(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SP),AX + MULQ 16(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 24(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + MULQ 32(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ $REDMASK51,SI + SHLQ $13,R9:R8 + ANDQ SI,R8 + SHLQ $13,R11:R10 + ANDQ SI,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ SI,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ SI,R14 + ADDQ R13,R14 + SHLQ $13,BP:BX + ANDQ SI,BX + ADDQ R15,BX + IMUL3Q $19,BP,DX + ADDQ DX,R8 + MOVQ R8,DX + SHRQ $51,DX + ADDQ R10,DX + MOVQ DX,CX + SHRQ $51,DX + ANDQ SI,R8 + ADDQ R12,DX + MOVQ DX,R9 + SHRQ $51,DX + ANDQ SI,CX + ADDQ R14,DX + MOVQ DX,AX + SHRQ $51,DX + ANDQ SI,R9 + ADDQ BX,DX + MOVQ DX,R10 + SHRQ $51,DX + ANDQ SI,AX + IMUL3Q $19,DX,DX + ADDQ DX,R8 + ANDQ SI,R10 + MOVQ R8,0(DI) + MOVQ CX,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/components/engine/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/components/engine/vendor/golang.org/x/crypto/curve25519/square_amd64.s new file mode 100644 index 0000000000..12f73734ff --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/curve25519/square_amd64.s @@ -0,0 +1,132 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func square(out, in *[5]uint64) +TEXT ·square(SB),7,$0-16 + MOVQ out+0(FP), DI + MOVQ in+8(FP), SI + + MOVQ 0(SI),AX + MULQ 0(SI) + MOVQ AX,CX + MOVQ DX,R8 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 8(SI) + MOVQ AX,R9 + MOVQ DX,R10 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 16(SI) + MOVQ AX,R11 + MOVQ DX,R12 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 24(SI) + MOVQ AX,R13 + MOVQ DX,R14 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 32(SI) + MOVQ AX,R15 + MOVQ DX,BX + MOVQ 8(SI),AX + MULQ 8(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 16(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 24(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 8(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),AX + MULQ 16(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 24(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ $REDMASK51,SI + SHLQ $13,R8:CX + ANDQ SI,CX + SHLQ $13,R10:R9 + ANDQ SI,R9 + ADDQ R8,R9 + SHLQ $13,R12:R11 + ANDQ SI,R11 + ADDQ R10,R11 + SHLQ $13,R14:R13 + ANDQ SI,R13 + ADDQ R12,R13 + SHLQ $13,BX:R15 + ANDQ SI,R15 + ADDQ R14,R15 + IMUL3Q $19,BX,DX + ADDQ DX,CX + MOVQ CX,DX + SHRQ $51,DX + ADDQ R9,DX + ANDQ SI,CX + MOVQ DX,R8 + SHRQ $51,DX + ADDQ R11,DX + ANDQ SI,R8 + MOVQ DX,R9 + SHRQ $51,DX + ADDQ R13,DX + ANDQ SI,R9 + MOVQ DX,AX + SHRQ $51,DX + ADDQ R15,DX + ANDQ SI,AX + MOVQ DX,R10 + SHRQ $51,DX + IMUL3Q $19,DX,DX + ADDQ DX,CX + ANDQ SI,R10 + MOVQ CX,0(DI) + MOVQ R8,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/components/engine/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/components/engine/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go index dbf31bbf40..1e1dff5061 100644 --- a/components/engine/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go +++ b/components/engine/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go @@ -13,7 +13,7 @@ example, by using nonce 1 for the first message, nonce 2 for the second message, etc. Nonces are long enough that randomly generated nonces have negligible risk of collision. -This package is interoperable with NaCl: http://nacl.cr.yp.to/secretbox.html. +This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html. */ package secretbox // import "golang.org/x/crypto/nacl/secretbox" diff --git a/components/engine/vendor/golang.org/x/crypto/pkcs12/bmp-string.go b/components/engine/vendor/golang.org/x/crypto/pkcs12/bmp-string.go index 284d2a68f1..233b8b62cc 100644 --- a/components/engine/vendor/golang.org/x/crypto/pkcs12/bmp-string.go +++ b/components/engine/vendor/golang.org/x/crypto/pkcs12/bmp-string.go @@ -13,7 +13,7 @@ import ( func bmpString(s string) ([]byte, error) { // References: // https://tools.ietf.org/html/rfc7292#appendix-B.1 - // http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane + // https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes // EncodeRune returns 0xfffd if the rune does not need special encoding // - the above RFC provides the info that BMPStrings are NULL terminated. diff --git a/components/engine/vendor/golang.org/x/crypto/pkcs12/pkcs12.go b/components/engine/vendor/golang.org/x/crypto/pkcs12/pkcs12.go index ad6341e60f..eff9ad3a98 100644 --- a/components/engine/vendor/golang.org/x/crypto/pkcs12/pkcs12.go +++ b/components/engine/vendor/golang.org/x/crypto/pkcs12/pkcs12.go @@ -109,6 +109,10 @@ func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + if err != nil { + return nil, err + } + blocks := make([]*pem.Block, 0, len(bags)) for _, bag := range bags { block, err := convertBag(&bag, encodedPassword) diff --git a/components/engine/vendor/golang.org/x/crypto/poly1305/const_amd64.s b/components/engine/vendor/golang.org/x/crypto/poly1305/const_amd64.s deleted file mode 100644 index 8e861f337c..0000000000 --- a/components/engine/vendor/golang.org/x/crypto/poly1305/const_amd64.s +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -DATA ·SCALE(SB)/8, $0x37F4000000000000 -GLOBL ·SCALE(SB), 8, $8 -DATA ·TWO32(SB)/8, $0x41F0000000000000 -GLOBL ·TWO32(SB), 8, $8 -DATA ·TWO64(SB)/8, $0x43F0000000000000 -GLOBL ·TWO64(SB), 8, $8 -DATA ·TWO96(SB)/8, $0x45F0000000000000 -GLOBL ·TWO96(SB), 8, $8 -DATA ·ALPHA32(SB)/8, $0x45E8000000000000 -GLOBL ·ALPHA32(SB), 8, $8 -DATA ·ALPHA64(SB)/8, $0x47E8000000000000 -GLOBL ·ALPHA64(SB), 8, $8 -DATA ·ALPHA96(SB)/8, $0x49E8000000000000 -GLOBL ·ALPHA96(SB), 8, $8 -DATA ·ALPHA130(SB)/8, $0x4C08000000000000 -GLOBL ·ALPHA130(SB), 8, $8 -DATA ·DOFFSET0(SB)/8, $0x4330000000000000 -GLOBL ·DOFFSET0(SB), 8, $8 -DATA ·DOFFSET1(SB)/8, $0x4530000000000000 -GLOBL ·DOFFSET1(SB), 8, $8 -DATA ·DOFFSET2(SB)/8, $0x4730000000000000 -GLOBL ·DOFFSET2(SB), 8, $8 -DATA ·DOFFSET3(SB)/8, $0x4930000000000000 -GLOBL ·DOFFSET3(SB), 8, $8 -DATA ·DOFFSET3MINUSTWO128(SB)/8, $0x492FFFFE00000000 -GLOBL ·DOFFSET3MINUSTWO128(SB), 8, $8 -DATA ·HOFFSET0(SB)/8, $0x43300001FFFFFFFB -GLOBL ·HOFFSET0(SB), 8, $8 -DATA ·HOFFSET1(SB)/8, $0x45300001FFFFFFFE -GLOBL ·HOFFSET1(SB), 8, $8 -DATA ·HOFFSET2(SB)/8, $0x47300001FFFFFFFE -GLOBL ·HOFFSET2(SB), 8, $8 -DATA ·HOFFSET3(SB)/8, $0x49300003FFFFFFFE -GLOBL ·HOFFSET3(SB), 8, $8 -DATA ·ROUNDING(SB)/2, $0x137f -GLOBL ·ROUNDING(SB), 8, $2 diff --git a/components/engine/vendor/golang.org/x/crypto/poly1305/poly1305.go b/components/engine/vendor/golang.org/x/crypto/poly1305/poly1305.go index 4a5f826f7a..f562fa5712 100644 --- a/components/engine/vendor/golang.org/x/crypto/poly1305/poly1305.go +++ b/components/engine/vendor/golang.org/x/crypto/poly1305/poly1305.go @@ -3,7 +3,8 @@ // license that can be found in the LICENSE file. /* -Package poly1305 implements Poly1305 one-time message authentication code as specified in http://cr.yp.to/mac/poly1305-20050329.pdf. +Package poly1305 implements Poly1305 one-time message authentication code as +specified in https://cr.yp.to/mac/poly1305-20050329.pdf. Poly1305 is a fast, one-time authentication function. It is infeasible for an attacker to generate an authenticator for a message without the key. However, a diff --git a/components/engine/vendor/golang.org/x/crypto/poly1305/poly1305_amd64.s b/components/engine/vendor/golang.org/x/crypto/poly1305/poly1305_amd64.s deleted file mode 100644 index f8d4ee9289..0000000000 --- a/components/engine/vendor/golang.org/x/crypto/poly1305/poly1305_amd64.s +++ /dev/null @@ -1,497 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -// func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]key) -TEXT ·poly1305(SB),0,$224-32 - MOVQ out+0(FP),DI - MOVQ m+8(FP),SI - MOVQ mlen+16(FP),DX - MOVQ key+24(FP),CX - - MOVQ SP,R11 - MOVQ $31,R9 - NOTQ R9 - ANDQ R9,SP - ADDQ $32,SP - - MOVQ R11,32(SP) - MOVQ R12,40(SP) - MOVQ R13,48(SP) - MOVQ R14,56(SP) - MOVQ R15,64(SP) - MOVQ BX,72(SP) - MOVQ BP,80(SP) - FLDCW ·ROUNDING(SB) - MOVL 0(CX),R8 - MOVL 4(CX),R9 - MOVL 8(CX),AX - MOVL 12(CX),R10 - MOVQ DI,88(SP) - MOVQ CX,96(SP) - MOVL $0X43300000,108(SP) - MOVL $0X45300000,116(SP) - MOVL $0X47300000,124(SP) - MOVL $0X49300000,132(SP) - ANDL $0X0FFFFFFF,R8 - ANDL $0X0FFFFFFC,R9 - ANDL $0X0FFFFFFC,AX - ANDL $0X0FFFFFFC,R10 - MOVL R8,104(SP) - MOVL R9,112(SP) - MOVL AX,120(SP) - MOVL R10,128(SP) - FMOVD 104(SP), F0 - FSUBD ·DOFFSET0(SB), F0 - FMOVD 112(SP), F0 - FSUBD ·DOFFSET1(SB), F0 - FMOVD 120(SP), F0 - FSUBD ·DOFFSET2(SB), F0 - FMOVD 128(SP), F0 - FSUBD ·DOFFSET3(SB), F0 - FXCHD F0, F3 - FMOVDP F0, 136(SP) - FXCHD F0, F1 - FMOVD F0, 144(SP) - FMULD ·SCALE(SB), F0 - FMOVDP F0, 152(SP) - FMOVD F0, 160(SP) - FMULD ·SCALE(SB), F0 - FMOVDP F0, 168(SP) - FMOVD F0, 176(SP) - FMULD ·SCALE(SB), F0 - FMOVDP F0, 184(SP) - FLDZ - FLDZ - FLDZ - FLDZ - CMPQ DX,$16 - JB ADDATMOST15BYTES - INITIALATLEAST16BYTES: - MOVL 12(SI),DI - MOVL 8(SI),CX - MOVL 4(SI),R8 - MOVL 0(SI),R9 - MOVL DI,128(SP) - MOVL CX,120(SP) - MOVL R8,112(SP) - MOVL R9,104(SP) - ADDQ $16,SI - SUBQ $16,DX - FXCHD F0, F3 - FADDD 128(SP), F0 - FSUBD ·DOFFSET3MINUSTWO128(SB), F0 - FXCHD F0, F1 - FADDD 112(SP), F0 - FSUBD ·DOFFSET1(SB), F0 - FXCHD F0, F2 - FADDD 120(SP), F0 - FSUBD ·DOFFSET2(SB), F0 - FXCHD F0, F3 - FADDD 104(SP), F0 - FSUBD ·DOFFSET0(SB), F0 - CMPQ DX,$16 - JB MULTIPLYADDATMOST15BYTES - MULTIPLYADDATLEAST16BYTES: - MOVL 12(SI),DI - MOVL 8(SI),CX - MOVL 4(SI),R8 - MOVL 0(SI),R9 - MOVL DI,128(SP) - MOVL CX,120(SP) - MOVL R8,112(SP) - MOVL R9,104(SP) - ADDQ $16,SI - SUBQ $16,DX - FMOVD ·ALPHA130(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA130(SB), F0 - FSUBD F0,F2 - FMULD ·SCALE(SB), F0 - FMOVD ·ALPHA32(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA32(SB), F0 - FSUBD F0,F2 - FXCHD F0, F2 - FADDDP F0,F1 - FMOVD ·ALPHA64(SB), F0 - FADDD F4,F0 - FSUBD ·ALPHA64(SB), F0 - FSUBD F0,F4 - FMOVD ·ALPHA96(SB), F0 - FADDD F6,F0 - FSUBD ·ALPHA96(SB), F0 - FSUBD F0,F6 - FXCHD F0, F6 - FADDDP F0,F1 - FXCHD F0, F3 - FADDDP F0,F5 - FXCHD F0, F3 - FADDDP F0,F1 - FMOVD 176(SP), F0 - FMULD F3,F0 - FMOVD 160(SP), F0 - FMULD F4,F0 - FMOVD 144(SP), F0 - FMULD F5,F0 - FMOVD 136(SP), F0 - FMULDP F0,F6 - FMOVD 160(SP), F0 - FMULD F4,F0 - FADDDP F0,F3 - FMOVD 144(SP), F0 - FMULD F4,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F4,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULDP F0,F4 - FXCHD F0, F3 - FADDDP F0,F5 - FMOVD 144(SP), F0 - FMULD F4,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F4,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULD F4,F0 - FADDDP F0,F3 - FMOVD 168(SP), F0 - FMULDP F0,F4 - FXCHD F0, F3 - FADDDP F0,F4 - FMOVD 136(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FXCHD F0, F3 - FMOVD 184(SP), F0 - FMULD F5,F0 - FADDDP F0,F3 - FXCHD F0, F1 - FMOVD 168(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FMOVD 152(SP), F0 - FMULDP F0,F5 - FXCHD F0, F4 - FADDDP F0,F1 - CMPQ DX,$16 - FXCHD F0, F2 - FMOVD 128(SP), F0 - FSUBD ·DOFFSET3MINUSTWO128(SB), F0 - FADDDP F0,F1 - FXCHD F0, F1 - FMOVD 120(SP), F0 - FSUBD ·DOFFSET2(SB), F0 - FADDDP F0,F1 - FXCHD F0, F3 - FMOVD 112(SP), F0 - FSUBD ·DOFFSET1(SB), F0 - FADDDP F0,F1 - FXCHD F0, F2 - FMOVD 104(SP), F0 - FSUBD ·DOFFSET0(SB), F0 - FADDDP F0,F1 - JAE MULTIPLYADDATLEAST16BYTES - MULTIPLYADDATMOST15BYTES: - FMOVD ·ALPHA130(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA130(SB), F0 - FSUBD F0,F2 - FMULD ·SCALE(SB), F0 - FMOVD ·ALPHA32(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA32(SB), F0 - FSUBD F0,F2 - FMOVD ·ALPHA64(SB), F0 - FADDD F5,F0 - FSUBD ·ALPHA64(SB), F0 - FSUBD F0,F5 - FMOVD ·ALPHA96(SB), F0 - FADDD F7,F0 - FSUBD ·ALPHA96(SB), F0 - FSUBD F0,F7 - FXCHD F0, F7 - FADDDP F0,F1 - FXCHD F0, F5 - FADDDP F0,F1 - FXCHD F0, F3 - FADDDP F0,F5 - FADDDP F0,F1 - FMOVD 176(SP), F0 - FMULD F1,F0 - FMOVD 160(SP), F0 - FMULD F2,F0 - FMOVD 144(SP), F0 - FMULD F3,F0 - FMOVD 136(SP), F0 - FMULDP F0,F4 - FMOVD 160(SP), F0 - FMULD F5,F0 - FADDDP F0,F3 - FMOVD 144(SP), F0 - FMULD F5,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULDP F0,F5 - FXCHD F0, F4 - FADDDP F0,F3 - FMOVD 144(SP), F0 - FMULD F5,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULD F5,F0 - FADDDP F0,F4 - FMOVD 168(SP), F0 - FMULDP F0,F5 - FXCHD F0, F4 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULD F5,F0 - FADDDP F0,F4 - FMOVD 168(SP), F0 - FMULD F5,F0 - FADDDP F0,F3 - FMOVD 152(SP), F0 - FMULDP F0,F5 - FXCHD F0, F4 - FADDDP F0,F1 - ADDATMOST15BYTES: - CMPQ DX,$0 - JE NOMOREBYTES - MOVL $0,0(SP) - MOVL $0, 4 (SP) - MOVL $0, 8 (SP) - MOVL $0, 12 (SP) - LEAQ 0(SP),DI - MOVQ DX,CX - REP; MOVSB - MOVB $1,0(DI) - MOVL 12 (SP),DI - MOVL 8 (SP),SI - MOVL 4 (SP),DX - MOVL 0(SP),CX - MOVL DI,128(SP) - MOVL SI,120(SP) - MOVL DX,112(SP) - MOVL CX,104(SP) - FXCHD F0, F3 - FADDD 128(SP), F0 - FSUBD ·DOFFSET3(SB), F0 - FXCHD F0, F2 - FADDD 120(SP), F0 - FSUBD ·DOFFSET2(SB), F0 - FXCHD F0, F1 - FADDD 112(SP), F0 - FSUBD ·DOFFSET1(SB), F0 - FXCHD F0, F3 - FADDD 104(SP), F0 - FSUBD ·DOFFSET0(SB), F0 - FMOVD ·ALPHA130(SB), F0 - FADDD F3,F0 - FSUBD ·ALPHA130(SB), F0 - FSUBD F0,F3 - FMULD ·SCALE(SB), F0 - FMOVD ·ALPHA32(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA32(SB), F0 - FSUBD F0,F2 - FMOVD ·ALPHA64(SB), F0 - FADDD F6,F0 - FSUBD ·ALPHA64(SB), F0 - FSUBD F0,F6 - FMOVD ·ALPHA96(SB), F0 - FADDD F5,F0 - FSUBD ·ALPHA96(SB), F0 - FSUBD F0,F5 - FXCHD F0, F4 - FADDDP F0,F3 - FXCHD F0, F6 - FADDDP F0,F1 - FXCHD F0, F3 - FADDDP F0,F5 - FXCHD F0, F3 - FADDDP F0,F1 - FMOVD 176(SP), F0 - FMULD F3,F0 - FMOVD 160(SP), F0 - FMULD F4,F0 - FMOVD 144(SP), F0 - FMULD F5,F0 - FMOVD 136(SP), F0 - FMULDP F0,F6 - FMOVD 160(SP), F0 - FMULD F5,F0 - FADDDP F0,F3 - FMOVD 144(SP), F0 - FMULD F5,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F5,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULDP F0,F5 - FXCHD F0, F4 - FADDDP F0,F5 - FMOVD 144(SP), F0 - FMULD F6,F0 - FADDDP F0,F2 - FMOVD 136(SP), F0 - FMULD F6,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULD F6,F0 - FADDDP F0,F4 - FMOVD 168(SP), F0 - FMULDP F0,F6 - FXCHD F0, F5 - FADDDP F0,F4 - FMOVD 136(SP), F0 - FMULD F2,F0 - FADDDP F0,F1 - FMOVD 184(SP), F0 - FMULD F2,F0 - FADDDP F0,F5 - FMOVD 168(SP), F0 - FMULD F2,F0 - FADDDP F0,F3 - FMOVD 152(SP), F0 - FMULDP F0,F2 - FXCHD F0, F1 - FADDDP F0,F3 - FXCHD F0, F3 - FXCHD F0, F2 - NOMOREBYTES: - MOVL $0,R10 - FMOVD ·ALPHA130(SB), F0 - FADDD F4,F0 - FSUBD ·ALPHA130(SB), F0 - FSUBD F0,F4 - FMULD ·SCALE(SB), F0 - FMOVD ·ALPHA32(SB), F0 - FADDD F2,F0 - FSUBD ·ALPHA32(SB), F0 - FSUBD F0,F2 - FMOVD ·ALPHA64(SB), F0 - FADDD F4,F0 - FSUBD ·ALPHA64(SB), F0 - FSUBD F0,F4 - FMOVD ·ALPHA96(SB), F0 - FADDD F6,F0 - FSUBD ·ALPHA96(SB), F0 - FXCHD F0, F6 - FSUBD F6,F0 - FXCHD F0, F4 - FADDDP F0,F3 - FXCHD F0, F4 - FADDDP F0,F1 - FXCHD F0, F2 - FADDDP F0,F3 - FXCHD F0, F4 - FADDDP F0,F3 - FXCHD F0, F3 - FADDD ·HOFFSET0(SB), F0 - FXCHD F0, F3 - FADDD ·HOFFSET1(SB), F0 - FXCHD F0, F1 - FADDD ·HOFFSET2(SB), F0 - FXCHD F0, F2 - FADDD ·HOFFSET3(SB), F0 - FXCHD F0, F3 - FMOVDP F0, 104(SP) - FMOVDP F0, 112(SP) - FMOVDP F0, 120(SP) - FMOVDP F0, 128(SP) - MOVL 108(SP),DI - ANDL $63,DI - MOVL 116(SP),SI - ANDL $63,SI - MOVL 124(SP),DX - ANDL $63,DX - MOVL 132(SP),CX - ANDL $63,CX - MOVL 112(SP),R8 - ADDL DI,R8 - MOVQ R8,112(SP) - MOVL 120(SP),DI - ADCL SI,DI - MOVQ DI,120(SP) - MOVL 128(SP),DI - ADCL DX,DI - MOVQ DI,128(SP) - MOVL R10,DI - ADCL CX,DI - MOVQ DI,136(SP) - MOVQ $5,DI - MOVL 104(SP),SI - ADDL SI,DI - MOVQ DI,104(SP) - MOVL R10,DI - MOVQ 112(SP),DX - ADCL DX,DI - MOVQ DI,112(SP) - MOVL R10,DI - MOVQ 120(SP),CX - ADCL CX,DI - MOVQ DI,120(SP) - MOVL R10,DI - MOVQ 128(SP),R8 - ADCL R8,DI - MOVQ DI,128(SP) - MOVQ $0XFFFFFFFC,DI - MOVQ 136(SP),R9 - ADCL R9,DI - SARL $16,DI - MOVQ DI,R9 - XORL $0XFFFFFFFF,R9 - ANDQ DI,SI - MOVQ 104(SP),AX - ANDQ R9,AX - ORQ AX,SI - ANDQ DI,DX - MOVQ 112(SP),AX - ANDQ R9,AX - ORQ AX,DX - ANDQ DI,CX - MOVQ 120(SP),AX - ANDQ R9,AX - ORQ AX,CX - ANDQ DI,R8 - MOVQ 128(SP),DI - ANDQ R9,DI - ORQ DI,R8 - MOVQ 88(SP),DI - MOVQ 96(SP),R9 - ADDL 16(R9),SI - ADCL 20(R9),DX - ADCL 24(R9),CX - ADCL 28(R9),R8 - MOVL SI,0(DI) - MOVL DX,4(DI) - MOVL CX,8(DI) - MOVL R8,12(DI) - MOVQ 32(SP),R11 - MOVQ 40(SP),R12 - MOVQ 48(SP),R13 - MOVQ 56(SP),R14 - MOVQ 64(SP),R15 - MOVQ 72(SP),BX - MOVQ 80(SP),BP - MOVQ R11,SP - RET diff --git a/components/engine/vendor/golang.org/x/crypto/poly1305/poly1305_arm.s b/components/engine/vendor/golang.org/x/crypto/poly1305/poly1305_arm.s deleted file mode 100644 index c15386744d..0000000000 --- a/components/engine/vendor/golang.org/x/crypto/poly1305/poly1305_arm.s +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 5a from the public -// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305. - -// +build arm,!gccgo,!appengine - -DATA poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff -DATA poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03 -DATA poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff -DATA poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff -DATA poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff -GLOBL poly1305_init_constants_armv6<>(SB), 8, $20 - -// Warning: the linker may use R11 to synthesize certain instructions. Please -// take care and verify that no synthetic instructions use it. - -TEXT poly1305_init_ext_armv6<>(SB),4,$-4 - MOVM.DB.W [R4-R11], (R13) - MOVM.IA.W (R1), [R2-R5] - MOVW $poly1305_init_constants_armv6<>(SB), R7 - MOVW R2, R8 - MOVW R2>>26, R9 - MOVW R3>>20, g - MOVW R4>>14, R11 - MOVW R5>>8, R12 - ORR R3<<6, R9, R9 - ORR R4<<12, g, g - ORR R5<<18, R11, R11 - MOVM.IA (R7), [R2-R6] - AND R8, R2, R2 - AND R9, R3, R3 - AND g, R4, R4 - AND R11, R5, R5 - AND R12, R6, R6 - MOVM.IA.W [R2-R6], (R0) - EOR R2, R2, R2 - EOR R3, R3, R3 - EOR R4, R4, R4 - EOR R5, R5, R5 - EOR R6, R6, R6 - MOVM.IA.W [R2-R6], (R0) - MOVM.IA.W (R1), [R2-R5] - MOVM.IA [R2-R6], (R0) - MOVM.IA.W (R13), [R4-R11] - RET - -#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \ - MOVBU (offset+0)(Rsrc), Rtmp; \ - MOVBU Rtmp, (offset+0)(Rdst); \ - MOVBU (offset+1)(Rsrc), Rtmp; \ - MOVBU Rtmp, (offset+1)(Rdst); \ - MOVBU (offset+2)(Rsrc), Rtmp; \ - MOVBU Rtmp, (offset+2)(Rdst); \ - MOVBU (offset+3)(Rsrc), Rtmp; \ - MOVBU Rtmp, (offset+3)(Rdst) - -TEXT poly1305_blocks_armv6<>(SB),4,$-4 - MOVM.DB.W [R4, R5, R6, R7, R8, R9, g, R11, R14], (R13) - SUB $128, R13 - MOVW R0, 36(R13) - MOVW R1, 40(R13) - MOVW R2, 44(R13) - MOVW R1, R14 - MOVW R2, R12 - MOVW 56(R0), R8 - WORD $0xe1180008 // TST R8, R8 not working see issue 5921 - EOR R6, R6, R6 - MOVW.EQ $(1<<24), R6 - MOVW R6, 32(R13) - ADD $64, R13, g - MOVM.IA (R0), [R0-R9] - MOVM.IA [R0-R4], (g) - CMP $16, R12 - BLO poly1305_blocks_armv6_done -poly1305_blocks_armv6_mainloop: - WORD $0xe31e0003 // TST R14, #3 not working see issue 5921 - BEQ poly1305_blocks_armv6_mainloop_aligned - ADD $48, R13, g - MOVW_UNALIGNED(R14, g, R0, 0) - MOVW_UNALIGNED(R14, g, R0, 4) - MOVW_UNALIGNED(R14, g, R0, 8) - MOVW_UNALIGNED(R14, g, R0, 12) - MOVM.IA (g), [R0-R3] - ADD $16, R14 - B poly1305_blocks_armv6_mainloop_loaded -poly1305_blocks_armv6_mainloop_aligned: - MOVM.IA.W (R14), [R0-R3] -poly1305_blocks_armv6_mainloop_loaded: - MOVW R0>>26, g - MOVW R1>>20, R11 - MOVW R2>>14, R12 - MOVW R14, 40(R13) - MOVW R3>>8, R4 - ORR R1<<6, g, g - ORR R2<<12, R11, R11 - ORR R3<<18, R12, R12 - BIC $0xfc000000, R0, R0 - BIC $0xfc000000, g, g - MOVW 32(R13), R3 - BIC $0xfc000000, R11, R11 - BIC $0xfc000000, R12, R12 - ADD R0, R5, R5 - ADD g, R6, R6 - ORR R3, R4, R4 - ADD R11, R7, R7 - ADD $64, R13, R14 - ADD R12, R8, R8 - ADD R4, R9, R9 - MOVM.IA (R14), [R0-R4] - MULLU R4, R5, (R11, g) - MULLU R3, R5, (R14, R12) - MULALU R3, R6, (R11, g) - MULALU R2, R6, (R14, R12) - MULALU R2, R7, (R11, g) - MULALU R1, R7, (R14, R12) - ADD R4<<2, R4, R4 - ADD R3<<2, R3, R3 - MULALU R1, R8, (R11, g) - MULALU R0, R8, (R14, R12) - MULALU R0, R9, (R11, g) - MULALU R4, R9, (R14, R12) - MOVW g, 24(R13) - MOVW R11, 28(R13) - MOVW R12, 16(R13) - MOVW R14, 20(R13) - MULLU R2, R5, (R11, g) - MULLU R1, R5, (R14, R12) - MULALU R1, R6, (R11, g) - MULALU R0, R6, (R14, R12) - MULALU R0, R7, (R11, g) - MULALU R4, R7, (R14, R12) - ADD R2<<2, R2, R2 - ADD R1<<2, R1, R1 - MULALU R4, R8, (R11, g) - MULALU R3, R8, (R14, R12) - MULALU R3, R9, (R11, g) - MULALU R2, R9, (R14, R12) - MOVW g, 8(R13) - MOVW R11, 12(R13) - MOVW R12, 0(R13) - MOVW R14, w+4(SP) - MULLU R0, R5, (R11, g) - MULALU R4, R6, (R11, g) - MULALU R3, R7, (R11, g) - MULALU R2, R8, (R11, g) - MULALU R1, R9, (R11, g) - MOVM.IA (R13), [R0-R7] - MOVW g>>26, R12 - MOVW R4>>26, R14 - ORR R11<<6, R12, R12 - ORR R5<<6, R14, R14 - BIC $0xfc000000, g, g - BIC $0xfc000000, R4, R4 - ADD.S R12, R0, R0 - ADC $0, R1, R1 - ADD.S R14, R6, R6 - ADC $0, R7, R7 - MOVW R0>>26, R12 - MOVW R6>>26, R14 - ORR R1<<6, R12, R12 - ORR R7<<6, R14, R14 - BIC $0xfc000000, R0, R0 - BIC $0xfc000000, R6, R6 - ADD R14<<2, R14, R14 - ADD.S R12, R2, R2 - ADC $0, R3, R3 - ADD R14, g, g - MOVW R2>>26, R12 - MOVW g>>26, R14 - ORR R3<<6, R12, R12 - BIC $0xfc000000, g, R5 - BIC $0xfc000000, R2, R7 - ADD R12, R4, R4 - ADD R14, R0, R0 - MOVW R4>>26, R12 - BIC $0xfc000000, R4, R8 - ADD R12, R6, R9 - MOVW w+44(SP), R12 - MOVW w+40(SP), R14 - MOVW R0, R6 - CMP $32, R12 - SUB $16, R12, R12 - MOVW R12, 44(R13) - BHS poly1305_blocks_armv6_mainloop -poly1305_blocks_armv6_done: - MOVW 36(R13), R12 - MOVW R5, 20(R12) - MOVW R6, 24(R12) - MOVW R7, 28(R12) - MOVW R8, 32(R12) - MOVW R9, 36(R12) - ADD $128, R13, R13 - MOVM.IA.W (R13), [R4, R5, R6, R7, R8, R9, g, R11, R14] - RET - -#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \ - MOVBU.P 1(Rsrc), Rtmp; \ - MOVBU.P Rtmp, 1(Rdst); \ - MOVBU.P 1(Rsrc), Rtmp; \ - MOVBU.P Rtmp, 1(Rdst) - -#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \ - MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \ - MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) - -TEXT poly1305_finish_ext_armv6<>(SB),4,$-4 - MOVM.DB.W [R4, R5, R6, R7, R8, R9, g, R11, R14], (R13) - SUB $16, R13, R13 - MOVW R0, R5 - MOVW R1, R6 - MOVW R2, R7 - MOVW R3, R8 - AND.S R2, R2, R2 - BEQ poly1305_finish_ext_armv6_noremaining - EOR R0, R0 - MOVW R13, R9 - MOVW R0, 0(R13) - MOVW R0, 4(R13) - MOVW R0, 8(R13) - MOVW R0, 12(R13) - WORD $0xe3110003 // TST R1, #3 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_aligned - WORD $0xe3120008 // TST R2, #8 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip8 - MOVWP_UNALIGNED(R1, R9, g) - MOVWP_UNALIGNED(R1, R9, g) -poly1305_finish_ext_armv6_skip8: - WORD $0xe3120004 // TST $4, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip4 - MOVWP_UNALIGNED(R1, R9, g) -poly1305_finish_ext_armv6_skip4: - WORD $0xe3120002 // TST $2, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip2 - MOVHUP_UNALIGNED(R1, R9, g) - B poly1305_finish_ext_armv6_skip2 -poly1305_finish_ext_armv6_aligned: - WORD $0xe3120008 // TST R2, #8 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip8_aligned - MOVM.IA.W (R1), [g-R11] - MOVM.IA.W [g-R11], (R9) -poly1305_finish_ext_armv6_skip8_aligned: - WORD $0xe3120004 // TST $4, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip4_aligned - MOVW.P 4(R1), g - MOVW.P g, 4(R9) -poly1305_finish_ext_armv6_skip4_aligned: - WORD $0xe3120002 // TST $2, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip2 - MOVHU.P 2(R1), g - MOVH.P g, 2(R9) -poly1305_finish_ext_armv6_skip2: - WORD $0xe3120001 // TST $1, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip1 - MOVBU.P 1(R1), g - MOVBU.P g, 1(R9) -poly1305_finish_ext_armv6_skip1: - MOVW $1, R11 - MOVBU R11, 0(R9) - MOVW R11, 56(R5) - MOVW R5, R0 - MOVW R13, R1 - MOVW $16, R2 - BL poly1305_blocks_armv6<>(SB) -poly1305_finish_ext_armv6_noremaining: - MOVW 20(R5), R0 - MOVW 24(R5), R1 - MOVW 28(R5), R2 - MOVW 32(R5), R3 - MOVW 36(R5), R4 - MOVW R4>>26, R12 - BIC $0xfc000000, R4, R4 - ADD R12<<2, R12, R12 - ADD R12, R0, R0 - MOVW R0>>26, R12 - BIC $0xfc000000, R0, R0 - ADD R12, R1, R1 - MOVW R1>>26, R12 - BIC $0xfc000000, R1, R1 - ADD R12, R2, R2 - MOVW R2>>26, R12 - BIC $0xfc000000, R2, R2 - ADD R12, R3, R3 - MOVW R3>>26, R12 - BIC $0xfc000000, R3, R3 - ADD R12, R4, R4 - ADD $5, R0, R6 - MOVW R6>>26, R12 - BIC $0xfc000000, R6, R6 - ADD R12, R1, R7 - MOVW R7>>26, R12 - BIC $0xfc000000, R7, R7 - ADD R12, R2, g - MOVW g>>26, R12 - BIC $0xfc000000, g, g - ADD R12, R3, R11 - MOVW $-(1<<26), R12 - ADD R11>>26, R12, R12 - BIC $0xfc000000, R11, R11 - ADD R12, R4, R14 - MOVW R14>>31, R12 - SUB $1, R12 - AND R12, R6, R6 - AND R12, R7, R7 - AND R12, g, g - AND R12, R11, R11 - AND R12, R14, R14 - MVN R12, R12 - AND R12, R0, R0 - AND R12, R1, R1 - AND R12, R2, R2 - AND R12, R3, R3 - AND R12, R4, R4 - ORR R6, R0, R0 - ORR R7, R1, R1 - ORR g, R2, R2 - ORR R11, R3, R3 - ORR R14, R4, R4 - ORR R1<<26, R0, R0 - MOVW R1>>6, R1 - ORR R2<<20, R1, R1 - MOVW R2>>12, R2 - ORR R3<<14, R2, R2 - MOVW R3>>18, R3 - ORR R4<<8, R3, R3 - MOVW 40(R5), R6 - MOVW 44(R5), R7 - MOVW 48(R5), g - MOVW 52(R5), R11 - ADD.S R6, R0, R0 - ADC.S R7, R1, R1 - ADC.S g, R2, R2 - ADC.S R11, R3, R3 - MOVM.IA [R0-R3], (R8) - MOVW R5, R12 - EOR R0, R0, R0 - EOR R1, R1, R1 - EOR R2, R2, R2 - EOR R3, R3, R3 - EOR R4, R4, R4 - EOR R5, R5, R5 - EOR R6, R6, R6 - EOR R7, R7, R7 - MOVM.IA.W [R0-R7], (R12) - MOVM.IA [R0-R7], (R12) - ADD $16, R13, R13 - MOVM.IA.W (R13), [R4, R5, R6, R7, R8, R9, g, R11, R14] - RET - -// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key) -TEXT ·poly1305_auth_armv6(SB),0,$280-16 - MOVW out+0(FP), R4 - MOVW m+4(FP), R5 - MOVW mlen+8(FP), R6 - MOVW key+12(FP), R7 - - MOVW R13, R8 - BIC $63, R13 - SUB $64, R13, R13 - MOVW R13, R0 - MOVW R7, R1 - BL poly1305_init_ext_armv6<>(SB) - BIC.S $15, R6, R2 - BEQ poly1305_auth_armv6_noblocks - MOVW R13, R0 - MOVW R5, R1 - ADD R2, R5, R5 - SUB R2, R6, R6 - BL poly1305_blocks_armv6<>(SB) -poly1305_auth_armv6_noblocks: - MOVW R13, R0 - MOVW R5, R1 - MOVW R6, R2 - MOVW R4, R3 - BL poly1305_finish_ext_armv6<>(SB) - MOVW R8, R13 - RET diff --git a/components/engine/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/components/engine/vendor/golang.org/x/crypto/poly1305/sum_amd64.go index 6775c703f6..4dd72fe799 100644 --- a/components/engine/vendor/golang.org/x/crypto/poly1305/sum_amd64.go +++ b/components/engine/vendor/golang.org/x/crypto/poly1305/sum_amd64.go @@ -6,10 +6,8 @@ package poly1305 -// This function is implemented in poly1305_amd64.s - +// This function is implemented in sum_amd64.s //go:noescape - func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]byte) // Sum generates an authenticator for m using a one-time key and puts the diff --git a/components/engine/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/components/engine/vendor/golang.org/x/crypto/poly1305/sum_amd64.s new file mode 100644 index 0000000000..2edae63828 --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/poly1305/sum_amd64.s @@ -0,0 +1,125 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +#define POLY1305_ADD(msg, h0, h1, h2) \ + ADDQ 0(msg), h0; \ + ADCQ 8(msg), h1; \ + ADCQ $1, h2; \ + LEAQ 16(msg), msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ + MOVQ r0, AX; \ + MULQ h0; \ + MOVQ AX, t0; \ + MOVQ DX, t1; \ + MOVQ r0, AX; \ + MULQ h1; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ r0, t2; \ + IMULQ h2, t2; \ + ADDQ DX, t2; \ + \ + MOVQ r1, AX; \ + MULQ h0; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ DX, h0; \ + MOVQ r1, t3; \ + IMULQ h2, t3; \ + MOVQ r1, AX; \ + MULQ h1; \ + ADDQ AX, t2; \ + ADCQ DX, t3; \ + ADDQ h0, t2; \ + ADCQ $0, t3; \ + \ + MOVQ t0, h0; \ + MOVQ t1, h1; \ + MOVQ t2, h2; \ + ANDQ $3, h2; \ + MOVQ t2, t0; \ + ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ + ADDQ t0, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2; \ + SHRQ $2, t3, t2; \ + SHRQ $2, t3; \ + ADDQ t2, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2 + +DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +GLOBL ·poly1305Mask<>(SB), RODATA, $16 + +// func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]key) +TEXT ·poly1305(SB), $0-32 + MOVQ out+0(FP), DI + MOVQ m+8(FP), SI + MOVQ mlen+16(FP), R15 + MOVQ key+24(FP), AX + + MOVQ 0(AX), R11 + MOVQ 8(AX), R12 + ANDQ ·poly1305Mask<>(SB), R11 // r0 + ANDQ ·poly1305Mask<>+8(SB), R12 // r1 + XORQ R8, R8 // h0 + XORQ R9, R9 // h1 + XORQ R10, R10 // h2 + + CMPQ R15, $16 + JB bytes_between_0_and_15 + +loop: + POLY1305_ADD(SI, R8, R9, R10) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) + SUBQ $16, R15 + CMPQ R15, $16 + JAE loop + +bytes_between_0_and_15: + TESTQ R15, R15 + JZ done + MOVQ $1, BX + XORQ CX, CX + XORQ R13, R13 + ADDQ R15, SI + +flush_buffer: + SHLQ $8, BX, CX + SHLQ $8, BX + MOVB -1(SI), R13 + XORQ R13, BX + DECQ SI + DECQ R15 + JNZ flush_buffer + + ADDQ BX, R8 + ADCQ CX, R9 + ADCQ $0, R10 + MOVQ $16, R15 + JMP multiply + +done: + MOVQ R8, AX + MOVQ R9, BX + SUBQ $0xFFFFFFFFFFFFFFFB, AX + SBBQ $0xFFFFFFFFFFFFFFFF, BX + SBBQ $3, R10 + CMOVQCS R8, AX + CMOVQCS R9, BX + MOVQ key+24(FP), R8 + ADDQ 16(R8), AX + ADCQ 24(R8), BX + + MOVQ AX, 0(DI) + MOVQ BX, 8(DI) + RET diff --git a/components/engine/vendor/golang.org/x/crypto/poly1305/sum_arm.go b/components/engine/vendor/golang.org/x/crypto/poly1305/sum_arm.go index 50b979c24c..5dc321c2f3 100644 --- a/components/engine/vendor/golang.org/x/crypto/poly1305/sum_arm.go +++ b/components/engine/vendor/golang.org/x/crypto/poly1305/sum_arm.go @@ -2,14 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build arm,!gccgo,!appengine +// +build arm,!gccgo,!appengine,!nacl package poly1305 -// This function is implemented in poly1305_arm.s - +// This function is implemented in sum_arm.s //go:noescape - func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte) // Sum generates an authenticator for m using a one-time key and puts the diff --git a/components/engine/vendor/golang.org/x/crypto/poly1305/sum_arm.s b/components/engine/vendor/golang.org/x/crypto/poly1305/sum_arm.s new file mode 100644 index 0000000000..f70b4ac484 --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/poly1305/sum_arm.s @@ -0,0 +1,427 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,!gccgo,!appengine,!nacl + +#include "textflag.h" + +// This code was translated into a form compatible with 5a from the public +// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305. + +DATA ·poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff +DATA ·poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03 +DATA ·poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff +DATA ·poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff +DATA ·poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff +GLOBL ·poly1305_init_constants_armv6<>(SB), 8, $20 + +// Warning: the linker may use R11 to synthesize certain instructions. Please +// take care and verify that no synthetic instructions use it. + +TEXT poly1305_init_ext_armv6<>(SB), NOSPLIT, $0 + // Needs 16 bytes of stack and 64 bytes of space pointed to by R0. (It + // might look like it's only 60 bytes of space but the final four bytes + // will be written by another function.) We need to skip over four + // bytes of stack because that's saving the value of 'g'. + ADD $4, R13, R8 + MOVM.IB [R4-R7], (R8) + MOVM.IA.W (R1), [R2-R5] + MOVW $·poly1305_init_constants_armv6<>(SB), R7 + MOVW R2, R8 + MOVW R2>>26, R9 + MOVW R3>>20, g + MOVW R4>>14, R11 + MOVW R5>>8, R12 + ORR R3<<6, R9, R9 + ORR R4<<12, g, g + ORR R5<<18, R11, R11 + MOVM.IA (R7), [R2-R6] + AND R8, R2, R2 + AND R9, R3, R3 + AND g, R4, R4 + AND R11, R5, R5 + AND R12, R6, R6 + MOVM.IA.W [R2-R6], (R0) + EOR R2, R2, R2 + EOR R3, R3, R3 + EOR R4, R4, R4 + EOR R5, R5, R5 + EOR R6, R6, R6 + MOVM.IA.W [R2-R6], (R0) + MOVM.IA.W (R1), [R2-R5] + MOVM.IA [R2-R6], (R0) + ADD $20, R13, R0 + MOVM.DA (R0), [R4-R7] + RET + +#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \ + MOVBU (offset+0)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+0)(Rdst); \ + MOVBU (offset+1)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+1)(Rdst); \ + MOVBU (offset+2)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+2)(Rdst); \ + MOVBU (offset+3)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+3)(Rdst) + +TEXT poly1305_blocks_armv6<>(SB), NOSPLIT, $0 + // Needs 24 bytes of stack for saved registers and then 88 bytes of + // scratch space after that. We assume that 24 bytes at (R13) have + // already been used: four bytes for the link register saved in the + // prelude of poly1305_auth_armv6, four bytes for saving the value of g + // in that function and 16 bytes of scratch space used around + // poly1305_finish_ext_armv6_skip1. + ADD $24, R13, R12 + MOVM.IB [R4-R8, R14], (R12) + MOVW R0, 88(R13) + MOVW R1, 92(R13) + MOVW R2, 96(R13) + MOVW R1, R14 + MOVW R2, R12 + MOVW 56(R0), R8 + WORD $0xe1180008 // TST R8, R8 not working see issue 5921 + EOR R6, R6, R6 + MOVW.EQ $(1<<24), R6 + MOVW R6, 84(R13) + ADD $116, R13, g + MOVM.IA (R0), [R0-R9] + MOVM.IA [R0-R4], (g) + CMP $16, R12 + BLO poly1305_blocks_armv6_done + +poly1305_blocks_armv6_mainloop: + WORD $0xe31e0003 // TST R14, #3 not working see issue 5921 + BEQ poly1305_blocks_armv6_mainloop_aligned + ADD $100, R13, g + MOVW_UNALIGNED(R14, g, R0, 0) + MOVW_UNALIGNED(R14, g, R0, 4) + MOVW_UNALIGNED(R14, g, R0, 8) + MOVW_UNALIGNED(R14, g, R0, 12) + MOVM.IA (g), [R0-R3] + ADD $16, R14 + B poly1305_blocks_armv6_mainloop_loaded + +poly1305_blocks_armv6_mainloop_aligned: + MOVM.IA.W (R14), [R0-R3] + +poly1305_blocks_armv6_mainloop_loaded: + MOVW R0>>26, g + MOVW R1>>20, R11 + MOVW R2>>14, R12 + MOVW R14, 92(R13) + MOVW R3>>8, R4 + ORR R1<<6, g, g + ORR R2<<12, R11, R11 + ORR R3<<18, R12, R12 + BIC $0xfc000000, R0, R0 + BIC $0xfc000000, g, g + MOVW 84(R13), R3 + BIC $0xfc000000, R11, R11 + BIC $0xfc000000, R12, R12 + ADD R0, R5, R5 + ADD g, R6, R6 + ORR R3, R4, R4 + ADD R11, R7, R7 + ADD $116, R13, R14 + ADD R12, R8, R8 + ADD R4, R9, R9 + MOVM.IA (R14), [R0-R4] + MULLU R4, R5, (R11, g) + MULLU R3, R5, (R14, R12) + MULALU R3, R6, (R11, g) + MULALU R2, R6, (R14, R12) + MULALU R2, R7, (R11, g) + MULALU R1, R7, (R14, R12) + ADD R4<<2, R4, R4 + ADD R3<<2, R3, R3 + MULALU R1, R8, (R11, g) + MULALU R0, R8, (R14, R12) + MULALU R0, R9, (R11, g) + MULALU R4, R9, (R14, R12) + MOVW g, 76(R13) + MOVW R11, 80(R13) + MOVW R12, 68(R13) + MOVW R14, 72(R13) + MULLU R2, R5, (R11, g) + MULLU R1, R5, (R14, R12) + MULALU R1, R6, (R11, g) + MULALU R0, R6, (R14, R12) + MULALU R0, R7, (R11, g) + MULALU R4, R7, (R14, R12) + ADD R2<<2, R2, R2 + ADD R1<<2, R1, R1 + MULALU R4, R8, (R11, g) + MULALU R3, R8, (R14, R12) + MULALU R3, R9, (R11, g) + MULALU R2, R9, (R14, R12) + MOVW g, 60(R13) + MOVW R11, 64(R13) + MOVW R12, 52(R13) + MOVW R14, 56(R13) + MULLU R0, R5, (R11, g) + MULALU R4, R6, (R11, g) + MULALU R3, R7, (R11, g) + MULALU R2, R8, (R11, g) + MULALU R1, R9, (R11, g) + ADD $52, R13, R0 + MOVM.IA (R0), [R0-R7] + MOVW g>>26, R12 + MOVW R4>>26, R14 + ORR R11<<6, R12, R12 + ORR R5<<6, R14, R14 + BIC $0xfc000000, g, g + BIC $0xfc000000, R4, R4 + ADD.S R12, R0, R0 + ADC $0, R1, R1 + ADD.S R14, R6, R6 + ADC $0, R7, R7 + MOVW R0>>26, R12 + MOVW R6>>26, R14 + ORR R1<<6, R12, R12 + ORR R7<<6, R14, R14 + BIC $0xfc000000, R0, R0 + BIC $0xfc000000, R6, R6 + ADD R14<<2, R14, R14 + ADD.S R12, R2, R2 + ADC $0, R3, R3 + ADD R14, g, g + MOVW R2>>26, R12 + MOVW g>>26, R14 + ORR R3<<6, R12, R12 + BIC $0xfc000000, g, R5 + BIC $0xfc000000, R2, R7 + ADD R12, R4, R4 + ADD R14, R0, R0 + MOVW R4>>26, R12 + BIC $0xfc000000, R4, R8 + ADD R12, R6, R9 + MOVW 96(R13), R12 + MOVW 92(R13), R14 + MOVW R0, R6 + CMP $32, R12 + SUB $16, R12, R12 + MOVW R12, 96(R13) + BHS poly1305_blocks_armv6_mainloop + +poly1305_blocks_armv6_done: + MOVW 88(R13), R12 + MOVW R5, 20(R12) + MOVW R6, 24(R12) + MOVW R7, 28(R12) + MOVW R8, 32(R12) + MOVW R9, 36(R12) + ADD $48, R13, R0 + MOVM.DA (R0), [R4-R8, R14] + RET + +#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \ + MOVBU.P 1(Rsrc), Rtmp; \ + MOVBU.P Rtmp, 1(Rdst); \ + MOVBU.P 1(Rsrc), Rtmp; \ + MOVBU.P Rtmp, 1(Rdst) + +#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \ + MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \ + MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) + +// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key) +TEXT ·poly1305_auth_armv6(SB), $196-16 + // The value 196, just above, is the sum of 64 (the size of the context + // structure) and 132 (the amount of stack needed). + // + // At this point, the stack pointer (R13) has been moved down. It + // points to the saved link register and there's 196 bytes of free + // space above it. + // + // The stack for this function looks like: + // + // +--------------------- + // | + // | 64 bytes of context structure + // | + // +--------------------- + // | + // | 112 bytes for poly1305_blocks_armv6 + // | + // +--------------------- + // | 16 bytes of final block, constructed at + // | poly1305_finish_ext_armv6_skip8 + // +--------------------- + // | four bytes of saved 'g' + // +--------------------- + // | lr, saved by prelude <- R13 points here + // +--------------------- + MOVW g, 4(R13) + + MOVW out+0(FP), R4 + MOVW m+4(FP), R5 + MOVW mlen+8(FP), R6 + MOVW key+12(FP), R7 + + ADD $136, R13, R0 // 136 = 4 + 4 + 16 + 112 + MOVW R7, R1 + + // poly1305_init_ext_armv6 will write to the stack from R13+4, but + // that's ok because none of the other values have been written yet. + BL poly1305_init_ext_armv6<>(SB) + BIC.S $15, R6, R2 + BEQ poly1305_auth_armv6_noblocks + ADD $136, R13, R0 + MOVW R5, R1 + ADD R2, R5, R5 + SUB R2, R6, R6 + BL poly1305_blocks_armv6<>(SB) + +poly1305_auth_armv6_noblocks: + ADD $136, R13, R0 + MOVW R5, R1 + MOVW R6, R2 + MOVW R4, R3 + + MOVW R0, R5 + MOVW R1, R6 + MOVW R2, R7 + MOVW R3, R8 + AND.S R2, R2, R2 + BEQ poly1305_finish_ext_armv6_noremaining + EOR R0, R0 + ADD $8, R13, R9 // 8 = offset to 16 byte scratch space + MOVW R0, (R9) + MOVW R0, 4(R9) + MOVW R0, 8(R9) + MOVW R0, 12(R9) + WORD $0xe3110003 // TST R1, #3 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_aligned + WORD $0xe3120008 // TST R2, #8 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip8 + MOVWP_UNALIGNED(R1, R9, g) + MOVWP_UNALIGNED(R1, R9, g) + +poly1305_finish_ext_armv6_skip8: + WORD $0xe3120004 // TST $4, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip4 + MOVWP_UNALIGNED(R1, R9, g) + +poly1305_finish_ext_armv6_skip4: + WORD $0xe3120002 // TST $2, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip2 + MOVHUP_UNALIGNED(R1, R9, g) + B poly1305_finish_ext_armv6_skip2 + +poly1305_finish_ext_armv6_aligned: + WORD $0xe3120008 // TST R2, #8 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip8_aligned + MOVM.IA.W (R1), [g-R11] + MOVM.IA.W [g-R11], (R9) + +poly1305_finish_ext_armv6_skip8_aligned: + WORD $0xe3120004 // TST $4, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip4_aligned + MOVW.P 4(R1), g + MOVW.P g, 4(R9) + +poly1305_finish_ext_armv6_skip4_aligned: + WORD $0xe3120002 // TST $2, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip2 + MOVHU.P 2(R1), g + MOVH.P g, 2(R9) + +poly1305_finish_ext_armv6_skip2: + WORD $0xe3120001 // TST $1, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip1 + MOVBU.P 1(R1), g + MOVBU.P g, 1(R9) + +poly1305_finish_ext_armv6_skip1: + MOVW $1, R11 + MOVBU R11, 0(R9) + MOVW R11, 56(R5) + MOVW R5, R0 + ADD $8, R13, R1 + MOVW $16, R2 + BL poly1305_blocks_armv6<>(SB) + +poly1305_finish_ext_armv6_noremaining: + MOVW 20(R5), R0 + MOVW 24(R5), R1 + MOVW 28(R5), R2 + MOVW 32(R5), R3 + MOVW 36(R5), R4 + MOVW R4>>26, R12 + BIC $0xfc000000, R4, R4 + ADD R12<<2, R12, R12 + ADD R12, R0, R0 + MOVW R0>>26, R12 + BIC $0xfc000000, R0, R0 + ADD R12, R1, R1 + MOVW R1>>26, R12 + BIC $0xfc000000, R1, R1 + ADD R12, R2, R2 + MOVW R2>>26, R12 + BIC $0xfc000000, R2, R2 + ADD R12, R3, R3 + MOVW R3>>26, R12 + BIC $0xfc000000, R3, R3 + ADD R12, R4, R4 + ADD $5, R0, R6 + MOVW R6>>26, R12 + BIC $0xfc000000, R6, R6 + ADD R12, R1, R7 + MOVW R7>>26, R12 + BIC $0xfc000000, R7, R7 + ADD R12, R2, g + MOVW g>>26, R12 + BIC $0xfc000000, g, g + ADD R12, R3, R11 + MOVW $-(1<<26), R12 + ADD R11>>26, R12, R12 + BIC $0xfc000000, R11, R11 + ADD R12, R4, R9 + MOVW R9>>31, R12 + SUB $1, R12 + AND R12, R6, R6 + AND R12, R7, R7 + AND R12, g, g + AND R12, R11, R11 + AND R12, R9, R9 + MVN R12, R12 + AND R12, R0, R0 + AND R12, R1, R1 + AND R12, R2, R2 + AND R12, R3, R3 + AND R12, R4, R4 + ORR R6, R0, R0 + ORR R7, R1, R1 + ORR g, R2, R2 + ORR R11, R3, R3 + ORR R9, R4, R4 + ORR R1<<26, R0, R0 + MOVW R1>>6, R1 + ORR R2<<20, R1, R1 + MOVW R2>>12, R2 + ORR R3<<14, R2, R2 + MOVW R3>>18, R3 + ORR R4<<8, R3, R3 + MOVW 40(R5), R6 + MOVW 44(R5), R7 + MOVW 48(R5), g + MOVW 52(R5), R11 + ADD.S R6, R0, R0 + ADC.S R7, R1, R1 + ADC.S g, R2, R2 + ADC.S R11, R3, R3 + MOVM.IA [R0-R3], (R8) + MOVW R5, R12 + EOR R0, R0, R0 + EOR R1, R1, R1 + EOR R2, R2, R2 + EOR R3, R3, R3 + EOR R4, R4, R4 + EOR R5, R5, R5 + EOR R6, R6, R6 + EOR R7, R7, R7 + MOVM.IA.W [R0-R7], (R12) + MOVM.IA [R0-R7], (R12) + MOVW 4(R13), g + RET diff --git a/components/engine/vendor/golang.org/x/crypto/poly1305/sum_ref.go b/components/engine/vendor/golang.org/x/crypto/poly1305/sum_ref.go index 0b24fc78b9..b2805a5ca1 100644 --- a/components/engine/vendor/golang.org/x/crypto/poly1305/sum_ref.go +++ b/components/engine/vendor/golang.org/x/crypto/poly1305/sum_ref.go @@ -2,1530 +2,140 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64,!arm gccgo appengine +// +build !amd64,!arm gccgo appengine nacl package poly1305 -// Based on original, public domain implementation from NaCl by D. J. -// Bernstein. +import "encoding/binary" -import "math" - -const ( - alpham80 = 0.00000000558793544769287109375 - alpham48 = 24.0 - alpham16 = 103079215104.0 - alpha0 = 6755399441055744.0 - alpha18 = 1770887431076116955136.0 - alpha32 = 29014219670751100192948224.0 - alpha50 = 7605903601369376408980219232256.0 - alpha64 = 124615124604835863084731911901282304.0 - alpha82 = 32667107224410092492483962313449748299776.0 - alpha96 = 535217884764734955396857238543560676143529984.0 - alpha112 = 35076039295941670036888435985190792471742381031424.0 - alpha130 = 9194973245195333150150082162901855101712434733101613056.0 - scale = 0.0000000000000000000000000000000000000036734198463196484624023016788195177431833298649127735047148490821200539357960224151611328125 - offset0 = 6755408030990331.0 - offset1 = 29014256564239239022116864.0 - offset2 = 124615283061160854719918951570079744.0 - offset3 = 535219245894202480694386063513315216128475136.0 -) - -// Sum generates an authenticator for m using a one-time key and puts the +// Sum generates an authenticator for msg using a one-time key and puts the // 16-byte result into out. Authenticating two different messages with the same // key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - r := key - s := key[16:] +func Sum(out *[TagSize]byte, msg []byte, key *[32]byte) { var ( - y7 float64 - y6 float64 - y1 float64 - y0 float64 - y5 float64 - y4 float64 - x7 float64 - x6 float64 - x1 float64 - x0 float64 - y3 float64 - y2 float64 - x5 float64 - r3lowx0 float64 - x4 float64 - r0lowx6 float64 - x3 float64 - r3highx0 float64 - x2 float64 - r0highx6 float64 - r0lowx0 float64 - sr1lowx6 float64 - r0highx0 float64 - sr1highx6 float64 - sr3low float64 - r1lowx0 float64 - sr2lowx6 float64 - r1highx0 float64 - sr2highx6 float64 - r2lowx0 float64 - sr3lowx6 float64 - r2highx0 float64 - sr3highx6 float64 - r1highx4 float64 - r1lowx4 float64 - r0highx4 float64 - r0lowx4 float64 - sr3highx4 float64 - sr3lowx4 float64 - sr2highx4 float64 - sr2lowx4 float64 - r0lowx2 float64 - r0highx2 float64 - r1lowx2 float64 - r1highx2 float64 - r2lowx2 float64 - r2highx2 float64 - sr3lowx2 float64 - sr3highx2 float64 - z0 float64 - z1 float64 - z2 float64 - z3 float64 - m0 int64 - m1 int64 - m2 int64 - m3 int64 - m00 uint32 - m01 uint32 - m02 uint32 - m03 uint32 - m10 uint32 - m11 uint32 - m12 uint32 - m13 uint32 - m20 uint32 - m21 uint32 - m22 uint32 - m23 uint32 - m30 uint32 - m31 uint32 - m32 uint32 - m33 uint64 - lbelow2 int32 - lbelow3 int32 - lbelow4 int32 - lbelow5 int32 - lbelow6 int32 - lbelow7 int32 - lbelow8 int32 - lbelow9 int32 - lbelow10 int32 - lbelow11 int32 - lbelow12 int32 - lbelow13 int32 - lbelow14 int32 - lbelow15 int32 - s00 uint32 - s01 uint32 - s02 uint32 - s03 uint32 - s10 uint32 - s11 uint32 - s12 uint32 - s13 uint32 - s20 uint32 - s21 uint32 - s22 uint32 - s23 uint32 - s30 uint32 - s31 uint32 - s32 uint32 - s33 uint32 - bits32 uint64 - f uint64 - f0 uint64 - f1 uint64 - f2 uint64 - f3 uint64 - f4 uint64 - g uint64 - g0 uint64 - g1 uint64 - g2 uint64 - g3 uint64 - g4 uint64 + h0, h1, h2, h3, h4 uint32 // the hash accumulators + r0, r1, r2, r3, r4 uint64 // the r part of the key ) - var p int32 + r0 = uint64(binary.LittleEndian.Uint32(key[0:]) & 0x3ffffff) + r1 = uint64((binary.LittleEndian.Uint32(key[3:]) >> 2) & 0x3ffff03) + r2 = uint64((binary.LittleEndian.Uint32(key[6:]) >> 4) & 0x3ffc0ff) + r3 = uint64((binary.LittleEndian.Uint32(key[9:]) >> 6) & 0x3f03fff) + r4 = uint64((binary.LittleEndian.Uint32(key[12:]) >> 8) & 0x00fffff) - l := int32(len(m)) + R1, R2, R3, R4 := r1*5, r2*5, r3*5, r4*5 - r00 := uint32(r[0]) + for len(msg) >= TagSize { + // h += msg + h0 += binary.LittleEndian.Uint32(msg[0:]) & 0x3ffffff + h1 += (binary.LittleEndian.Uint32(msg[3:]) >> 2) & 0x3ffffff + h2 += (binary.LittleEndian.Uint32(msg[6:]) >> 4) & 0x3ffffff + h3 += (binary.LittleEndian.Uint32(msg[9:]) >> 6) & 0x3ffffff + h4 += (binary.LittleEndian.Uint32(msg[12:]) >> 8) | (1 << 24) - r01 := uint32(r[1]) + // h *= r + d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1) + d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2) + d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3) + d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4) + d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0) - r02 := uint32(r[2]) - r0 := int64(2151) + // h %= p + h0 = uint32(d0) & 0x3ffffff + h1 = uint32(d1) & 0x3ffffff + h2 = uint32(d2) & 0x3ffffff + h3 = uint32(d3) & 0x3ffffff + h4 = uint32(d4) & 0x3ffffff - r03 := uint32(r[3]) - r03 &= 15 - r0 <<= 51 + h0 += uint32(d4>>26) * 5 + h1 += h0 >> 26 + h0 = h0 & 0x3ffffff - r10 := uint32(r[4]) - r10 &= 252 - r01 <<= 8 - r0 += int64(r00) - - r11 := uint32(r[5]) - r02 <<= 16 - r0 += int64(r01) - - r12 := uint32(r[6]) - r03 <<= 24 - r0 += int64(r02) - - r13 := uint32(r[7]) - r13 &= 15 - r1 := int64(2215) - r0 += int64(r03) - - d0 := r0 - r1 <<= 51 - r2 := int64(2279) - - r20 := uint32(r[8]) - r20 &= 252 - r11 <<= 8 - r1 += int64(r10) - - r21 := uint32(r[9]) - r12 <<= 16 - r1 += int64(r11) - - r22 := uint32(r[10]) - r13 <<= 24 - r1 += int64(r12) - - r23 := uint32(r[11]) - r23 &= 15 - r2 <<= 51 - r1 += int64(r13) - - d1 := r1 - r21 <<= 8 - r2 += int64(r20) - - r30 := uint32(r[12]) - r30 &= 252 - r22 <<= 16 - r2 += int64(r21) - - r31 := uint32(r[13]) - r23 <<= 24 - r2 += int64(r22) - - r32 := uint32(r[14]) - r2 += int64(r23) - r3 := int64(2343) - - d2 := r2 - r3 <<= 51 - - r33 := uint32(r[15]) - r33 &= 15 - r31 <<= 8 - r3 += int64(r30) - - r32 <<= 16 - r3 += int64(r31) - - r33 <<= 24 - r3 += int64(r32) - - r3 += int64(r33) - h0 := alpha32 - alpha32 - - d3 := r3 - h1 := alpha32 - alpha32 - - h2 := alpha32 - alpha32 - - h3 := alpha32 - alpha32 - - h4 := alpha32 - alpha32 - - r0low := math.Float64frombits(uint64(d0)) - h5 := alpha32 - alpha32 - - r1low := math.Float64frombits(uint64(d1)) - h6 := alpha32 - alpha32 - - r2low := math.Float64frombits(uint64(d2)) - h7 := alpha32 - alpha32 - - r0low -= alpha0 - - r1low -= alpha32 - - r2low -= alpha64 - - r0high := r0low + alpha18 - - r3low := math.Float64frombits(uint64(d3)) - - r1high := r1low + alpha50 - sr1low := scale * r1low - - r2high := r2low + alpha82 - sr2low := scale * r2low - - r0high -= alpha18 - r0high_stack := r0high - - r3low -= alpha96 - - r1high -= alpha50 - r1high_stack := r1high - - sr1high := sr1low + alpham80 - - r0low -= r0high - - r2high -= alpha82 - sr3low = scale * r3low - - sr2high := sr2low + alpham48 - - r1low -= r1high - r1low_stack := r1low - - sr1high -= alpham80 - sr1high_stack := sr1high - - r2low -= r2high - r2low_stack := r2low - - sr2high -= alpham48 - sr2high_stack := sr2high - - r3high := r3low + alpha112 - r0low_stack := r0low - - sr1low -= sr1high - sr1low_stack := sr1low - - sr3high := sr3low + alpham16 - r2high_stack := r2high - - sr2low -= sr2high - sr2low_stack := sr2low - - r3high -= alpha112 - r3high_stack := r3high - - sr3high -= alpham16 - sr3high_stack := sr3high - - r3low -= r3high - r3low_stack := r3low - - sr3low -= sr3high - sr3low_stack := sr3low - - if l < 16 { - goto addatmost15bytes + msg = msg[TagSize:] } - m00 = uint32(m[p+0]) - m0 = 2151 + if len(msg) > 0 { + var block [TagSize]byte + off := copy(block[:], msg) + block[off] = 0x01 - m0 <<= 51 - m1 = 2215 - m01 = uint32(m[p+1]) + // h += msg + h0 += binary.LittleEndian.Uint32(block[0:]) & 0x3ffffff + h1 += (binary.LittleEndian.Uint32(block[3:]) >> 2) & 0x3ffffff + h2 += (binary.LittleEndian.Uint32(block[6:]) >> 4) & 0x3ffffff + h3 += (binary.LittleEndian.Uint32(block[9:]) >> 6) & 0x3ffffff + h4 += (binary.LittleEndian.Uint32(block[12:]) >> 8) - m1 <<= 51 - m2 = 2279 - m02 = uint32(m[p+2]) + // h *= r + d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1) + d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2) + d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3) + d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4) + d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0) - m2 <<= 51 - m3 = 2343 - m03 = uint32(m[p+3]) + // h %= p + h0 = uint32(d0) & 0x3ffffff + h1 = uint32(d1) & 0x3ffffff + h2 = uint32(d2) & 0x3ffffff + h3 = uint32(d3) & 0x3ffffff + h4 = uint32(d4) & 0x3ffffff - m10 = uint32(m[p+4]) - m01 <<= 8 - m0 += int64(m00) - - m11 = uint32(m[p+5]) - m02 <<= 16 - m0 += int64(m01) - - m12 = uint32(m[p+6]) - m03 <<= 24 - m0 += int64(m02) - - m13 = uint32(m[p+7]) - m3 <<= 51 - m0 += int64(m03) - - m20 = uint32(m[p+8]) - m11 <<= 8 - m1 += int64(m10) - - m21 = uint32(m[p+9]) - m12 <<= 16 - m1 += int64(m11) - - m22 = uint32(m[p+10]) - m13 <<= 24 - m1 += int64(m12) - - m23 = uint32(m[p+11]) - m1 += int64(m13) - - m30 = uint32(m[p+12]) - m21 <<= 8 - m2 += int64(m20) - - m31 = uint32(m[p+13]) - m22 <<= 16 - m2 += int64(m21) - - m32 = uint32(m[p+14]) - m23 <<= 24 - m2 += int64(m22) - - m33 = uint64(m[p+15]) - m2 += int64(m23) - - d0 = m0 - m31 <<= 8 - m3 += int64(m30) - - d1 = m1 - m32 <<= 16 - m3 += int64(m31) - - d2 = m2 - m33 += 256 - - m33 <<= 24 - m3 += int64(m32) - - m3 += int64(m33) - d3 = m3 - - p += 16 - l -= 16 - - z0 = math.Float64frombits(uint64(d0)) - - z1 = math.Float64frombits(uint64(d1)) - - z2 = math.Float64frombits(uint64(d2)) - - z3 = math.Float64frombits(uint64(d3)) - - z0 -= alpha0 - - z1 -= alpha32 - - z2 -= alpha64 - - z3 -= alpha96 - - h0 += z0 - - h1 += z1 - - h3 += z2 - - h5 += z3 - - if l < 16 { - goto multiplyaddatmost15bytes + h0 += uint32(d4>>26) * 5 + h1 += h0 >> 26 + h0 = h0 & 0x3ffffff } -multiplyaddatleast16bytes: - - m2 = 2279 - m20 = uint32(m[p+8]) - y7 = h7 + alpha130 - - m2 <<= 51 - m3 = 2343 - m21 = uint32(m[p+9]) - y6 = h6 + alpha130 - - m3 <<= 51 - m0 = 2151 - m22 = uint32(m[p+10]) - y1 = h1 + alpha32 - - m0 <<= 51 - m1 = 2215 - m23 = uint32(m[p+11]) - y0 = h0 + alpha32 - - m1 <<= 51 - m30 = uint32(m[p+12]) - y7 -= alpha130 - - m21 <<= 8 - m2 += int64(m20) - m31 = uint32(m[p+13]) - y6 -= alpha130 - - m22 <<= 16 - m2 += int64(m21) - m32 = uint32(m[p+14]) - y1 -= alpha32 - - m23 <<= 24 - m2 += int64(m22) - m33 = uint64(m[p+15]) - y0 -= alpha32 - - m2 += int64(m23) - m00 = uint32(m[p+0]) - y5 = h5 + alpha96 - - m31 <<= 8 - m3 += int64(m30) - m01 = uint32(m[p+1]) - y4 = h4 + alpha96 - - m32 <<= 16 - m02 = uint32(m[p+2]) - x7 = h7 - y7 - y7 *= scale - - m33 += 256 - m03 = uint32(m[p+3]) - x6 = h6 - y6 - y6 *= scale - - m33 <<= 24 - m3 += int64(m31) - m10 = uint32(m[p+4]) - x1 = h1 - y1 - - m01 <<= 8 - m3 += int64(m32) - m11 = uint32(m[p+5]) - x0 = h0 - y0 - - m3 += int64(m33) - m0 += int64(m00) - m12 = uint32(m[p+6]) - y5 -= alpha96 - - m02 <<= 16 - m0 += int64(m01) - m13 = uint32(m[p+7]) - y4 -= alpha96 - - m03 <<= 24 - m0 += int64(m02) - d2 = m2 - x1 += y7 - - m0 += int64(m03) - d3 = m3 - x0 += y6 - - m11 <<= 8 - m1 += int64(m10) - d0 = m0 - x7 += y5 - - m12 <<= 16 - m1 += int64(m11) - x6 += y4 - - m13 <<= 24 - m1 += int64(m12) - y3 = h3 + alpha64 - - m1 += int64(m13) - d1 = m1 - y2 = h2 + alpha64 - - x0 += x1 - - x6 += x7 - - y3 -= alpha64 - r3low = r3low_stack - - y2 -= alpha64 - r0low = r0low_stack - - x5 = h5 - y5 - r3lowx0 = r3low * x0 - r3high = r3high_stack - - x4 = h4 - y4 - r0lowx6 = r0low * x6 - r0high = r0high_stack - - x3 = h3 - y3 - r3highx0 = r3high * x0 - sr1low = sr1low_stack - - x2 = h2 - y2 - r0highx6 = r0high * x6 - sr1high = sr1high_stack - - x5 += y3 - r0lowx0 = r0low * x0 - r1low = r1low_stack - - h6 = r3lowx0 + r0lowx6 - sr1lowx6 = sr1low * x6 - r1high = r1high_stack - - x4 += y2 - r0highx0 = r0high * x0 - sr2low = sr2low_stack - - h7 = r3highx0 + r0highx6 - sr1highx6 = sr1high * x6 - sr2high = sr2high_stack - - x3 += y1 - r1lowx0 = r1low * x0 - r2low = r2low_stack - - h0 = r0lowx0 + sr1lowx6 - sr2lowx6 = sr2low * x6 - r2high = r2high_stack - - x2 += y0 - r1highx0 = r1high * x0 - sr3low = sr3low_stack - - h1 = r0highx0 + sr1highx6 - sr2highx6 = sr2high * x6 - sr3high = sr3high_stack - - x4 += x5 - r2lowx0 = r2low * x0 - z2 = math.Float64frombits(uint64(d2)) - - h2 = r1lowx0 + sr2lowx6 - sr3lowx6 = sr3low * x6 - - x2 += x3 - r2highx0 = r2high * x0 - z3 = math.Float64frombits(uint64(d3)) - - h3 = r1highx0 + sr2highx6 - sr3highx6 = sr3high * x6 - - r1highx4 = r1high * x4 - z2 -= alpha64 - - h4 = r2lowx0 + sr3lowx6 - r1lowx4 = r1low * x4 - - r0highx4 = r0high * x4 - z3 -= alpha96 - - h5 = r2highx0 + sr3highx6 - r0lowx4 = r0low * x4 - - h7 += r1highx4 - sr3highx4 = sr3high * x4 - - h6 += r1lowx4 - sr3lowx4 = sr3low * x4 - - h5 += r0highx4 - sr2highx4 = sr2high * x4 - - h4 += r0lowx4 - sr2lowx4 = sr2low * x4 - - h3 += sr3highx4 - r0lowx2 = r0low * x2 - - h2 += sr3lowx4 - r0highx2 = r0high * x2 - - h1 += sr2highx4 - r1lowx2 = r1low * x2 - - h0 += sr2lowx4 - r1highx2 = r1high * x2 - - h2 += r0lowx2 - r2lowx2 = r2low * x2 - - h3 += r0highx2 - r2highx2 = r2high * x2 - - h4 += r1lowx2 - sr3lowx2 = sr3low * x2 - - h5 += r1highx2 - sr3highx2 = sr3high * x2 - - p += 16 - l -= 16 - h6 += r2lowx2 - - h7 += r2highx2 - - z1 = math.Float64frombits(uint64(d1)) - h0 += sr3lowx2 - - z0 = math.Float64frombits(uint64(d0)) - h1 += sr3highx2 - - z1 -= alpha32 - - z0 -= alpha0 - - h5 += z3 - - h3 += z2 - - h1 += z1 - - h0 += z0 - - if l >= 16 { - goto multiplyaddatleast16bytes - } - -multiplyaddatmost15bytes: - - y7 = h7 + alpha130 - - y6 = h6 + alpha130 - - y1 = h1 + alpha32 - - y0 = h0 + alpha32 - - y7 -= alpha130 - - y6 -= alpha130 - - y1 -= alpha32 - - y0 -= alpha32 - - y5 = h5 + alpha96 - - y4 = h4 + alpha96 - - x7 = h7 - y7 - y7 *= scale - - x6 = h6 - y6 - y6 *= scale - - x1 = h1 - y1 - - x0 = h0 - y0 - - y5 -= alpha96 - - y4 -= alpha96 - - x1 += y7 - - x0 += y6 - - x7 += y5 - - x6 += y4 - - y3 = h3 + alpha64 - - y2 = h2 + alpha64 - - x0 += x1 - - x6 += x7 - - y3 -= alpha64 - r3low = r3low_stack - - y2 -= alpha64 - r0low = r0low_stack - - x5 = h5 - y5 - r3lowx0 = r3low * x0 - r3high = r3high_stack - - x4 = h4 - y4 - r0lowx6 = r0low * x6 - r0high = r0high_stack - - x3 = h3 - y3 - r3highx0 = r3high * x0 - sr1low = sr1low_stack - - x2 = h2 - y2 - r0highx6 = r0high * x6 - sr1high = sr1high_stack - - x5 += y3 - r0lowx0 = r0low * x0 - r1low = r1low_stack - - h6 = r3lowx0 + r0lowx6 - sr1lowx6 = sr1low * x6 - r1high = r1high_stack - - x4 += y2 - r0highx0 = r0high * x0 - sr2low = sr2low_stack - - h7 = r3highx0 + r0highx6 - sr1highx6 = sr1high * x6 - sr2high = sr2high_stack - - x3 += y1 - r1lowx0 = r1low * x0 - r2low = r2low_stack - - h0 = r0lowx0 + sr1lowx6 - sr2lowx6 = sr2low * x6 - r2high = r2high_stack - - x2 += y0 - r1highx0 = r1high * x0 - sr3low = sr3low_stack - - h1 = r0highx0 + sr1highx6 - sr2highx6 = sr2high * x6 - sr3high = sr3high_stack - - x4 += x5 - r2lowx0 = r2low * x0 - - h2 = r1lowx0 + sr2lowx6 - sr3lowx6 = sr3low * x6 - - x2 += x3 - r2highx0 = r2high * x0 - - h3 = r1highx0 + sr2highx6 - sr3highx6 = sr3high * x6 - - r1highx4 = r1high * x4 - - h4 = r2lowx0 + sr3lowx6 - r1lowx4 = r1low * x4 - - r0highx4 = r0high * x4 - - h5 = r2highx0 + sr3highx6 - r0lowx4 = r0low * x4 - - h7 += r1highx4 - sr3highx4 = sr3high * x4 - - h6 += r1lowx4 - sr3lowx4 = sr3low * x4 - - h5 += r0highx4 - sr2highx4 = sr2high * x4 - - h4 += r0lowx4 - sr2lowx4 = sr2low * x4 - - h3 += sr3highx4 - r0lowx2 = r0low * x2 - - h2 += sr3lowx4 - r0highx2 = r0high * x2 - - h1 += sr2highx4 - r1lowx2 = r1low * x2 - - h0 += sr2lowx4 - r1highx2 = r1high * x2 - - h2 += r0lowx2 - r2lowx2 = r2low * x2 - - h3 += r0highx2 - r2highx2 = r2high * x2 - - h4 += r1lowx2 - sr3lowx2 = sr3low * x2 - - h5 += r1highx2 - sr3highx2 = sr3high * x2 - - h6 += r2lowx2 - - h7 += r2highx2 - - h0 += sr3lowx2 - - h1 += sr3highx2 - -addatmost15bytes: - - if l == 0 { - goto nomorebytes - } - - lbelow2 = l - 2 - - lbelow3 = l - 3 - - lbelow2 >>= 31 - lbelow4 = l - 4 - - m00 = uint32(m[p+0]) - lbelow3 >>= 31 - p += lbelow2 - - m01 = uint32(m[p+1]) - lbelow4 >>= 31 - p += lbelow3 - - m02 = uint32(m[p+2]) - p += lbelow4 - m0 = 2151 - - m03 = uint32(m[p+3]) - m0 <<= 51 - m1 = 2215 - - m0 += int64(m00) - m01 &^= uint32(lbelow2) - - m02 &^= uint32(lbelow3) - m01 -= uint32(lbelow2) - - m01 <<= 8 - m03 &^= uint32(lbelow4) - - m0 += int64(m01) - lbelow2 -= lbelow3 - - m02 += uint32(lbelow2) - lbelow3 -= lbelow4 - - m02 <<= 16 - m03 += uint32(lbelow3) - - m03 <<= 24 - m0 += int64(m02) - - m0 += int64(m03) - lbelow5 = l - 5 - - lbelow6 = l - 6 - lbelow7 = l - 7 - - lbelow5 >>= 31 - lbelow8 = l - 8 - - lbelow6 >>= 31 - p += lbelow5 - - m10 = uint32(m[p+4]) - lbelow7 >>= 31 - p += lbelow6 - - m11 = uint32(m[p+5]) - lbelow8 >>= 31 - p += lbelow7 - - m12 = uint32(m[p+6]) - m1 <<= 51 - p += lbelow8 - - m13 = uint32(m[p+7]) - m10 &^= uint32(lbelow5) - lbelow4 -= lbelow5 - - m10 += uint32(lbelow4) - lbelow5 -= lbelow6 - - m11 &^= uint32(lbelow6) - m11 += uint32(lbelow5) - - m11 <<= 8 - m1 += int64(m10) - - m1 += int64(m11) - m12 &^= uint32(lbelow7) - - lbelow6 -= lbelow7 - m13 &^= uint32(lbelow8) - - m12 += uint32(lbelow6) - lbelow7 -= lbelow8 - - m12 <<= 16 - m13 += uint32(lbelow7) - - m13 <<= 24 - m1 += int64(m12) - - m1 += int64(m13) - m2 = 2279 - - lbelow9 = l - 9 - m3 = 2343 - - lbelow10 = l - 10 - lbelow11 = l - 11 - - lbelow9 >>= 31 - lbelow12 = l - 12 - - lbelow10 >>= 31 - p += lbelow9 - - m20 = uint32(m[p+8]) - lbelow11 >>= 31 - p += lbelow10 - - m21 = uint32(m[p+9]) - lbelow12 >>= 31 - p += lbelow11 - - m22 = uint32(m[p+10]) - m2 <<= 51 - p += lbelow12 - - m23 = uint32(m[p+11]) - m20 &^= uint32(lbelow9) - lbelow8 -= lbelow9 - - m20 += uint32(lbelow8) - lbelow9 -= lbelow10 - - m21 &^= uint32(lbelow10) - m21 += uint32(lbelow9) - - m21 <<= 8 - m2 += int64(m20) - - m2 += int64(m21) - m22 &^= uint32(lbelow11) - - lbelow10 -= lbelow11 - m23 &^= uint32(lbelow12) - - m22 += uint32(lbelow10) - lbelow11 -= lbelow12 - - m22 <<= 16 - m23 += uint32(lbelow11) - - m23 <<= 24 - m2 += int64(m22) - - m3 <<= 51 - lbelow13 = l - 13 - - lbelow13 >>= 31 - lbelow14 = l - 14 - - lbelow14 >>= 31 - p += lbelow13 - lbelow15 = l - 15 - - m30 = uint32(m[p+12]) - lbelow15 >>= 31 - p += lbelow14 - - m31 = uint32(m[p+13]) - p += lbelow15 - m2 += int64(m23) - - m32 = uint32(m[p+14]) - m30 &^= uint32(lbelow13) - lbelow12 -= lbelow13 - - m30 += uint32(lbelow12) - lbelow13 -= lbelow14 - - m3 += int64(m30) - m31 &^= uint32(lbelow14) - - m31 += uint32(lbelow13) - m32 &^= uint32(lbelow15) - - m31 <<= 8 - lbelow14 -= lbelow15 - - m3 += int64(m31) - m32 += uint32(lbelow14) - d0 = m0 - - m32 <<= 16 - m33 = uint64(lbelow15 + 1) - d1 = m1 - - m33 <<= 24 - m3 += int64(m32) - d2 = m2 - - m3 += int64(m33) - d3 = m3 - - z3 = math.Float64frombits(uint64(d3)) - - z2 = math.Float64frombits(uint64(d2)) - - z1 = math.Float64frombits(uint64(d1)) - - z0 = math.Float64frombits(uint64(d0)) - - z3 -= alpha96 - - z2 -= alpha64 - - z1 -= alpha32 - - z0 -= alpha0 - - h5 += z3 - - h3 += z2 - - h1 += z1 - - h0 += z0 - - y7 = h7 + alpha130 - - y6 = h6 + alpha130 - - y1 = h1 + alpha32 - - y0 = h0 + alpha32 - - y7 -= alpha130 - - y6 -= alpha130 - - y1 -= alpha32 - - y0 -= alpha32 - - y5 = h5 + alpha96 - - y4 = h4 + alpha96 - - x7 = h7 - y7 - y7 *= scale - - x6 = h6 - y6 - y6 *= scale - - x1 = h1 - y1 - - x0 = h0 - y0 - - y5 -= alpha96 - - y4 -= alpha96 - - x1 += y7 - - x0 += y6 - - x7 += y5 - - x6 += y4 - - y3 = h3 + alpha64 - - y2 = h2 + alpha64 - - x0 += x1 - - x6 += x7 - - y3 -= alpha64 - r3low = r3low_stack - - y2 -= alpha64 - r0low = r0low_stack - - x5 = h5 - y5 - r3lowx0 = r3low * x0 - r3high = r3high_stack - - x4 = h4 - y4 - r0lowx6 = r0low * x6 - r0high = r0high_stack - - x3 = h3 - y3 - r3highx0 = r3high * x0 - sr1low = sr1low_stack - - x2 = h2 - y2 - r0highx6 = r0high * x6 - sr1high = sr1high_stack - - x5 += y3 - r0lowx0 = r0low * x0 - r1low = r1low_stack - - h6 = r3lowx0 + r0lowx6 - sr1lowx6 = sr1low * x6 - r1high = r1high_stack - - x4 += y2 - r0highx0 = r0high * x0 - sr2low = sr2low_stack - - h7 = r3highx0 + r0highx6 - sr1highx6 = sr1high * x6 - sr2high = sr2high_stack - - x3 += y1 - r1lowx0 = r1low * x0 - r2low = r2low_stack - - h0 = r0lowx0 + sr1lowx6 - sr2lowx6 = sr2low * x6 - r2high = r2high_stack - - x2 += y0 - r1highx0 = r1high * x0 - sr3low = sr3low_stack - - h1 = r0highx0 + sr1highx6 - sr2highx6 = sr2high * x6 - sr3high = sr3high_stack - - x4 += x5 - r2lowx0 = r2low * x0 - - h2 = r1lowx0 + sr2lowx6 - sr3lowx6 = sr3low * x6 - - x2 += x3 - r2highx0 = r2high * x0 - - h3 = r1highx0 + sr2highx6 - sr3highx6 = sr3high * x6 - - r1highx4 = r1high * x4 - - h4 = r2lowx0 + sr3lowx6 - r1lowx4 = r1low * x4 - - r0highx4 = r0high * x4 - - h5 = r2highx0 + sr3highx6 - r0lowx4 = r0low * x4 - - h7 += r1highx4 - sr3highx4 = sr3high * x4 - - h6 += r1lowx4 - sr3lowx4 = sr3low * x4 - - h5 += r0highx4 - sr2highx4 = sr2high * x4 - - h4 += r0lowx4 - sr2lowx4 = sr2low * x4 - - h3 += sr3highx4 - r0lowx2 = r0low * x2 - - h2 += sr3lowx4 - r0highx2 = r0high * x2 - - h1 += sr2highx4 - r1lowx2 = r1low * x2 - - h0 += sr2lowx4 - r1highx2 = r1high * x2 - - h2 += r0lowx2 - r2lowx2 = r2low * x2 - - h3 += r0highx2 - r2highx2 = r2high * x2 - - h4 += r1lowx2 - sr3lowx2 = sr3low * x2 - - h5 += r1highx2 - sr3highx2 = sr3high * x2 - - h6 += r2lowx2 - - h7 += r2highx2 - - h0 += sr3lowx2 - - h1 += sr3highx2 - -nomorebytes: - - y7 = h7 + alpha130 - - y0 = h0 + alpha32 - - y1 = h1 + alpha32 - - y2 = h2 + alpha64 - - y7 -= alpha130 - - y3 = h3 + alpha64 - - y4 = h4 + alpha96 - - y5 = h5 + alpha96 - - x7 = h7 - y7 - y7 *= scale - - y0 -= alpha32 - - y1 -= alpha32 - - y2 -= alpha64 - - h6 += x7 - - y3 -= alpha64 - - y4 -= alpha96 - - y5 -= alpha96 - - y6 = h6 + alpha130 - - x0 = h0 - y0 - - x1 = h1 - y1 - - x2 = h2 - y2 - - y6 -= alpha130 - - x0 += y7 - - x3 = h3 - y3 - - x4 = h4 - y4 - - x5 = h5 - y5 - - x6 = h6 - y6 - - y6 *= scale - - x2 += y0 - - x3 += y1 - - x4 += y2 - - x0 += y6 - - x5 += y3 - - x6 += y4 - - x2 += x3 - - x0 += x1 - - x4 += x5 - - x6 += y5 - - x2 += offset1 - d1 = int64(math.Float64bits(x2)) - - x0 += offset0 - d0 = int64(math.Float64bits(x0)) - - x4 += offset2 - d2 = int64(math.Float64bits(x4)) - - x6 += offset3 - d3 = int64(math.Float64bits(x6)) - - f0 = uint64(d0) - - f1 = uint64(d1) - bits32 = math.MaxUint64 - - f2 = uint64(d2) - bits32 >>= 32 - - f3 = uint64(d3) - f = f0 >> 32 - - f0 &= bits32 - f &= 255 - - f1 += f - g0 = f0 + 5 - - g = g0 >> 32 - g0 &= bits32 - - f = f1 >> 32 - f1 &= bits32 - - f &= 255 - g1 = f1 + g - - g = g1 >> 32 - f2 += f - - f = f2 >> 32 - g1 &= bits32 - - f2 &= bits32 - f &= 255 - - f3 += f - g2 = f2 + g - - g = g2 >> 32 - g2 &= bits32 - - f4 = f3 >> 32 - f3 &= bits32 - - f4 &= 255 - g3 = f3 + g - - g = g3 >> 32 - g3 &= bits32 - - g4 = f4 + g - - g4 = g4 - 4 - s00 = uint32(s[0]) - - f = uint64(int64(g4) >> 63) - s01 = uint32(s[1]) - - f0 &= f - g0 &^= f - s02 = uint32(s[2]) - - f1 &= f - f0 |= g0 - s03 = uint32(s[3]) - - g1 &^= f - f2 &= f - s10 = uint32(s[4]) - - f3 &= f - g2 &^= f - s11 = uint32(s[5]) - - g3 &^= f - f1 |= g1 - s12 = uint32(s[6]) - - f2 |= g2 - f3 |= g3 - s13 = uint32(s[7]) - - s01 <<= 8 - f0 += uint64(s00) - s20 = uint32(s[8]) - - s02 <<= 16 - f0 += uint64(s01) - s21 = uint32(s[9]) - - s03 <<= 24 - f0 += uint64(s02) - s22 = uint32(s[10]) - - s11 <<= 8 - f1 += uint64(s10) - s23 = uint32(s[11]) - - s12 <<= 16 - f1 += uint64(s11) - s30 = uint32(s[12]) - - s13 <<= 24 - f1 += uint64(s12) - s31 = uint32(s[13]) - - f0 += uint64(s03) - f1 += uint64(s13) - s32 = uint32(s[14]) - - s21 <<= 8 - f2 += uint64(s20) - s33 = uint32(s[15]) - - s22 <<= 16 - f2 += uint64(s21) - - s23 <<= 24 - f2 += uint64(s22) - - s31 <<= 8 - f3 += uint64(s30) - - s32 <<= 16 - f3 += uint64(s31) - - s33 <<= 24 - f3 += uint64(s32) - - f2 += uint64(s23) - f3 += uint64(s33) - - out[0] = byte(f0) - f0 >>= 8 - out[1] = byte(f0) - f0 >>= 8 - out[2] = byte(f0) - f0 >>= 8 - out[3] = byte(f0) - f0 >>= 8 - f1 += f0 - - out[4] = byte(f1) - f1 >>= 8 - out[5] = byte(f1) - f1 >>= 8 - out[6] = byte(f1) - f1 >>= 8 - out[7] = byte(f1) - f1 >>= 8 - f2 += f1 - - out[8] = byte(f2) - f2 >>= 8 - out[9] = byte(f2) - f2 >>= 8 - out[10] = byte(f2) - f2 >>= 8 - out[11] = byte(f2) - f2 >>= 8 - f3 += f2 - - out[12] = byte(f3) - f3 >>= 8 - out[13] = byte(f3) - f3 >>= 8 - out[14] = byte(f3) - f3 >>= 8 - out[15] = byte(f3) + // h %= p reduction + h2 += h1 >> 26 + h1 &= 0x3ffffff + h3 += h2 >> 26 + h2 &= 0x3ffffff + h4 += h3 >> 26 + h3 &= 0x3ffffff + h0 += 5 * (h4 >> 26) + h4 &= 0x3ffffff + h1 += h0 >> 26 + h0 &= 0x3ffffff + + // h - p + t0 := h0 + 5 + t1 := h1 + (t0 >> 26) + t2 := h2 + (t1 >> 26) + t3 := h3 + (t2 >> 26) + t4 := h4 + (t3 >> 26) - (1 << 26) + t0 &= 0x3ffffff + t1 &= 0x3ffffff + t2 &= 0x3ffffff + t3 &= 0x3ffffff + + // select h if h < p else h - p + t_mask := (t4 >> 31) - 1 + h_mask := ^t_mask + h0 = (h0 & h_mask) | (t0 & t_mask) + h1 = (h1 & h_mask) | (t1 & t_mask) + h2 = (h2 & h_mask) | (t2 & t_mask) + h3 = (h3 & h_mask) | (t3 & t_mask) + h4 = (h4 & h_mask) | (t4 & t_mask) + + // h %= 2^128 + h0 |= h1 << 26 + h1 = ((h1 >> 6) | (h2 << 20)) + h2 = ((h2 >> 12) | (h3 << 14)) + h3 = ((h3 >> 18) | (h4 << 8)) + + // s: the s part of the key + // tag = (h + s) % (2^128) + t := uint64(h0) + uint64(binary.LittleEndian.Uint32(key[16:])) + h0 = uint32(t) + t = uint64(h1) + uint64(binary.LittleEndian.Uint32(key[20:])) + (t >> 32) + h1 = uint32(t) + t = uint64(h2) + uint64(binary.LittleEndian.Uint32(key[24:])) + (t >> 32) + h2 = uint32(t) + t = uint64(h3) + uint64(binary.LittleEndian.Uint32(key[28:])) + (t >> 32) + h3 = uint32(t) + + binary.LittleEndian.PutUint32(out[0:], h0) + binary.LittleEndian.PutUint32(out[4:], h1) + binary.LittleEndian.PutUint32(out[8:], h2) + binary.LittleEndian.PutUint32(out[12:], h3) } diff --git a/components/engine/vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s b/components/engine/vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s index 6e1df96391..22afbdcadc 100644 --- a/components/engine/vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s +++ b/components/engine/vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s @@ -5,29 +5,23 @@ // +build amd64,!appengine,!gccgo // This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html // func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) -TEXT ·salsa2020XORKeyStream(SB),0,$512-40 +// This needs up to 64 bytes at 360(SP); hence the non-obvious frame size. +TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment MOVQ out+0(FP),DI MOVQ in+8(FP),SI MOVQ n+16(FP),DX MOVQ nonce+24(FP),CX MOVQ key+32(FP),R8 - MOVQ SP,R11 - MOVQ $31,R9 - NOTQ R9 - ANDQ R9,SP - ADDQ $32,SP + MOVQ SP,R12 + MOVQ SP,R9 + ADDQ $31, R9 + ANDQ $~31, R9 + MOVQ R9, SP - MOVQ R11,352(SP) - MOVQ R12,360(SP) - MOVQ R13,368(SP) - MOVQ R14,376(SP) - MOVQ R15,384(SP) - MOVQ BX,392(SP) - MOVQ BP,400(SP) MOVQ DX,R9 MOVQ CX,DX MOVQ R8,R10 @@ -133,7 +127,7 @@ TEXT ·salsa2020XORKeyStream(SB),0,$512-40 SHRQ $32,CX MOVL DX,16(SP) MOVL CX, 36 (SP) - MOVQ R9,408(SP) + MOVQ R9,352(SP) MOVQ $20,DX MOVOA 64(SP),X0 MOVOA 80(SP),X1 @@ -650,7 +644,7 @@ TEXT ·salsa2020XORKeyStream(SB),0,$512-40 MOVL CX,244(DI) MOVL R8,248(DI) MOVL R9,252(DI) - MOVQ 408(SP),R9 + MOVQ 352(SP),R9 SUBQ $256,R9 ADDQ $256,SI ADDQ $256,DI @@ -662,13 +656,13 @@ TEXT ·salsa2020XORKeyStream(SB),0,$512-40 CMPQ R9,$64 JAE NOCOPY MOVQ DI,DX - LEAQ 416(SP),DI + LEAQ 360(SP),DI MOVQ R9,CX REP; MOVSB - LEAQ 416(SP),DI - LEAQ 416(SP),SI + LEAQ 360(SP),DI + LEAQ 360(SP),SI NOCOPY: - MOVQ R9,408(SP) + MOVQ R9,352(SP) MOVOA 48(SP),X0 MOVOA 0(SP),X1 MOVOA 16(SP),X2 @@ -867,7 +861,7 @@ TEXT ·salsa2020XORKeyStream(SB),0,$512-40 MOVL R8,44(DI) MOVL R9,28(DI) MOVL AX,12(DI) - MOVQ 408(SP),R9 + MOVQ 352(SP),R9 MOVL 16(SP),CX MOVL 36 (SP),R8 ADDQ $1,CX @@ -886,14 +880,7 @@ TEXT ·salsa2020XORKeyStream(SB),0,$512-40 REP; MOVSB BYTESATLEAST64: DONE: - MOVQ 352(SP),R11 - MOVQ 360(SP),R12 - MOVQ 368(SP),R13 - MOVQ 376(SP),R14 - MOVQ 384(SP),R15 - MOVQ 392(SP),BX - MOVQ 400(SP),BP - MOVQ R11,SP + MOVQ R12,SP RET BYTESATLEAST65: SUBQ $64,R9 diff --git a/components/engine/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/components/engine/vendor/golang.org/x/crypto/ssh/terminal/terminal.go new file mode 100644 index 0000000000..18379a935b --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -0,0 +1,951 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import ( + "bytes" + "io" + "sync" + "unicode/utf8" +) + +// EscapeCodes contains escape sequences that can be written to the terminal in +// order to achieve different styles of text. +type EscapeCodes struct { + // Foreground colors + Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte + + // Reset all attributes + Reset []byte +} + +var vt100EscapeCodes = EscapeCodes{ + Black: []byte{keyEscape, '[', '3', '0', 'm'}, + Red: []byte{keyEscape, '[', '3', '1', 'm'}, + Green: []byte{keyEscape, '[', '3', '2', 'm'}, + Yellow: []byte{keyEscape, '[', '3', '3', 'm'}, + Blue: []byte{keyEscape, '[', '3', '4', 'm'}, + Magenta: []byte{keyEscape, '[', '3', '5', 'm'}, + Cyan: []byte{keyEscape, '[', '3', '6', 'm'}, + White: []byte{keyEscape, '[', '3', '7', 'm'}, + + Reset: []byte{keyEscape, '[', '0', 'm'}, +} + +// Terminal contains the state for running a VT100 terminal that is capable of +// reading lines of input. +type Terminal struct { + // AutoCompleteCallback, if non-null, is called for each keypress with + // the full input line and the current position of the cursor (in + // bytes, as an index into |line|). If it returns ok=false, the key + // press is processed normally. Otherwise it returns a replacement line + // and the new cursor position. + AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) + + // Escape contains a pointer to the escape codes for this terminal. + // It's always a valid pointer, although the escape codes themselves + // may be empty if the terminal doesn't support them. + Escape *EscapeCodes + + // lock protects the terminal and the state in this object from + // concurrent processing of a key press and a Write() call. + lock sync.Mutex + + c io.ReadWriter + prompt []rune + + // line is the current line being entered. + line []rune + // pos is the logical position of the cursor in line + pos int + // echo is true if local echo is enabled + echo bool + // pasteActive is true iff there is a bracketed paste operation in + // progress. + pasteActive bool + + // cursorX contains the current X value of the cursor where the left + // edge is 0. cursorY contains the row number where the first row of + // the current line is 0. + cursorX, cursorY int + // maxLine is the greatest value of cursorY so far. + maxLine int + + termWidth, termHeight int + + // outBuf contains the terminal data to be sent. + outBuf []byte + // remainder contains the remainder of any partial key sequences after + // a read. It aliases into inBuf. + remainder []byte + inBuf [256]byte + + // history contains previously entered commands so that they can be + // accessed with the up and down keys. + history stRingBuffer + // historyIndex stores the currently accessed history entry, where zero + // means the immediately previous entry. + historyIndex int + // When navigating up and down the history it's possible to return to + // the incomplete, initial line. That value is stored in + // historyPending. + historyPending string +} + +// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is +// a local terminal, that terminal must first have been put into raw mode. +// prompt is a string that is written at the start of each input line (i.e. +// "> "). +func NewTerminal(c io.ReadWriter, prompt string) *Terminal { + return &Terminal{ + Escape: &vt100EscapeCodes, + c: c, + prompt: []rune(prompt), + termWidth: 80, + termHeight: 24, + echo: true, + historyIndex: -1, + } +} + +const ( + keyCtrlD = 4 + keyCtrlU = 21 + keyEnter = '\r' + keyEscape = 27 + keyBackspace = 127 + keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota + keyUp + keyDown + keyLeft + keyRight + keyAltLeft + keyAltRight + keyHome + keyEnd + keyDeleteWord + keyDeleteLine + keyClearScreen + keyPasteStart + keyPasteEnd +) + +var ( + crlf = []byte{'\r', '\n'} + pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'} + pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'} +) + +// bytesToKey tries to parse a key sequence from b. If successful, it returns +// the key and the remainder of the input. Otherwise it returns utf8.RuneError. +func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { + if len(b) == 0 { + return utf8.RuneError, nil + } + + if !pasteActive { + switch b[0] { + case 1: // ^A + return keyHome, b[1:] + case 5: // ^E + return keyEnd, b[1:] + case 8: // ^H + return keyBackspace, b[1:] + case 11: // ^K + return keyDeleteLine, b[1:] + case 12: // ^L + return keyClearScreen, b[1:] + case 23: // ^W + return keyDeleteWord, b[1:] + } + } + + if b[0] != keyEscape { + if !utf8.FullRune(b) { + return utf8.RuneError, b + } + r, l := utf8.DecodeRune(b) + return r, b[l:] + } + + if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' { + switch b[2] { + case 'A': + return keyUp, b[3:] + case 'B': + return keyDown, b[3:] + case 'C': + return keyRight, b[3:] + case 'D': + return keyLeft, b[3:] + case 'H': + return keyHome, b[3:] + case 'F': + return keyEnd, b[3:] + } + } + + if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { + switch b[5] { + case 'C': + return keyAltRight, b[6:] + case 'D': + return keyAltLeft, b[6:] + } + } + + if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) { + return keyPasteStart, b[6:] + } + + if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) { + return keyPasteEnd, b[6:] + } + + // If we get here then we have a key that we don't recognise, or a + // partial sequence. It's not clear how one should find the end of a + // sequence without knowing them all, but it seems that [a-zA-Z~] only + // appears at the end of a sequence. + for i, c := range b[0:] { + if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' { + return keyUnknown, b[i+1:] + } + } + + return utf8.RuneError, b +} + +// queue appends data to the end of t.outBuf +func (t *Terminal) queue(data []rune) { + t.outBuf = append(t.outBuf, []byte(string(data))...) +} + +var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'} +var space = []rune{' '} + +func isPrintable(key rune) bool { + isInSurrogateArea := key >= 0xd800 && key <= 0xdbff + return key >= 32 && !isInSurrogateArea +} + +// moveCursorToPos appends data to t.outBuf which will move the cursor to the +// given, logical position in the text. +func (t *Terminal) moveCursorToPos(pos int) { + if !t.echo { + return + } + + x := visualLength(t.prompt) + pos + y := x / t.termWidth + x = x % t.termWidth + + up := 0 + if y < t.cursorY { + up = t.cursorY - y + } + + down := 0 + if y > t.cursorY { + down = y - t.cursorY + } + + left := 0 + if x < t.cursorX { + left = t.cursorX - x + } + + right := 0 + if x > t.cursorX { + right = x - t.cursorX + } + + t.cursorX = x + t.cursorY = y + t.move(up, down, left, right) +} + +func (t *Terminal) move(up, down, left, right int) { + movement := make([]rune, 3*(up+down+left+right)) + m := movement + for i := 0; i < up; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'A' + m = m[3:] + } + for i := 0; i < down; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'B' + m = m[3:] + } + for i := 0; i < left; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'D' + m = m[3:] + } + for i := 0; i < right; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'C' + m = m[3:] + } + + t.queue(movement) +} + +func (t *Terminal) clearLineToRight() { + op := []rune{keyEscape, '[', 'K'} + t.queue(op) +} + +const maxLineLength = 4096 + +func (t *Terminal) setLine(newLine []rune, newPos int) { + if t.echo { + t.moveCursorToPos(0) + t.writeLine(newLine) + for i := len(newLine); i < len(t.line); i++ { + t.writeLine(space) + } + t.moveCursorToPos(newPos) + } + t.line = newLine + t.pos = newPos +} + +func (t *Terminal) advanceCursor(places int) { + t.cursorX += places + t.cursorY += t.cursorX / t.termWidth + if t.cursorY > t.maxLine { + t.maxLine = t.cursorY + } + t.cursorX = t.cursorX % t.termWidth + + if places > 0 && t.cursorX == 0 { + // Normally terminals will advance the current position + // when writing a character. But that doesn't happen + // for the last character in a line. However, when + // writing a character (except a new line) that causes + // a line wrap, the position will be advanced two + // places. + // + // So, if we are stopping at the end of a line, we + // need to write a newline so that our cursor can be + // advanced to the next line. + t.outBuf = append(t.outBuf, '\r', '\n') + } +} + +func (t *Terminal) eraseNPreviousChars(n int) { + if n == 0 { + return + } + + if t.pos < n { + n = t.pos + } + t.pos -= n + t.moveCursorToPos(t.pos) + + copy(t.line[t.pos:], t.line[n+t.pos:]) + t.line = t.line[:len(t.line)-n] + if t.echo { + t.writeLine(t.line[t.pos:]) + for i := 0; i < n; i++ { + t.queue(space) + } + t.advanceCursor(n) + t.moveCursorToPos(t.pos) + } +} + +// countToLeftWord returns then number of characters from the cursor to the +// start of the previous word. +func (t *Terminal) countToLeftWord() int { + if t.pos == 0 { + return 0 + } + + pos := t.pos - 1 + for pos > 0 { + if t.line[pos] != ' ' { + break + } + pos-- + } + for pos > 0 { + if t.line[pos] == ' ' { + pos++ + break + } + pos-- + } + + return t.pos - pos +} + +// countToRightWord returns then number of characters from the cursor to the +// start of the next word. +func (t *Terminal) countToRightWord() int { + pos := t.pos + for pos < len(t.line) { + if t.line[pos] == ' ' { + break + } + pos++ + } + for pos < len(t.line) { + if t.line[pos] != ' ' { + break + } + pos++ + } + return pos - t.pos +} + +// visualLength returns the number of visible glyphs in s. +func visualLength(runes []rune) int { + inEscapeSeq := false + length := 0 + + for _, r := range runes { + switch { + case inEscapeSeq: + if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') { + inEscapeSeq = false + } + case r == '\x1b': + inEscapeSeq = true + default: + length++ + } + } + + return length +} + +// handleKey processes the given key and, optionally, returns a line of text +// that the user has entered. +func (t *Terminal) handleKey(key rune) (line string, ok bool) { + if t.pasteActive && key != keyEnter { + t.addKeyToLine(key) + return + } + + switch key { + case keyBackspace: + if t.pos == 0 { + return + } + t.eraseNPreviousChars(1) + case keyAltLeft: + // move left by a word. + t.pos -= t.countToLeftWord() + t.moveCursorToPos(t.pos) + case keyAltRight: + // move right by a word. + t.pos += t.countToRightWord() + t.moveCursorToPos(t.pos) + case keyLeft: + if t.pos == 0 { + return + } + t.pos-- + t.moveCursorToPos(t.pos) + case keyRight: + if t.pos == len(t.line) { + return + } + t.pos++ + t.moveCursorToPos(t.pos) + case keyHome: + if t.pos == 0 { + return + } + t.pos = 0 + t.moveCursorToPos(t.pos) + case keyEnd: + if t.pos == len(t.line) { + return + } + t.pos = len(t.line) + t.moveCursorToPos(t.pos) + case keyUp: + entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) + if !ok { + return "", false + } + if t.historyIndex == -1 { + t.historyPending = string(t.line) + } + t.historyIndex++ + runes := []rune(entry) + t.setLine(runes, len(runes)) + case keyDown: + switch t.historyIndex { + case -1: + return + case 0: + runes := []rune(t.historyPending) + t.setLine(runes, len(runes)) + t.historyIndex-- + default: + entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) + if ok { + t.historyIndex-- + runes := []rune(entry) + t.setLine(runes, len(runes)) + } + } + case keyEnter: + t.moveCursorToPos(len(t.line)) + t.queue([]rune("\r\n")) + line = string(t.line) + ok = true + t.line = t.line[:0] + t.pos = 0 + t.cursorX = 0 + t.cursorY = 0 + t.maxLine = 0 + case keyDeleteWord: + // Delete zero or more spaces and then one or more characters. + t.eraseNPreviousChars(t.countToLeftWord()) + case keyDeleteLine: + // Delete everything from the current cursor position to the + // end of line. + for i := t.pos; i < len(t.line); i++ { + t.queue(space) + t.advanceCursor(1) + } + t.line = t.line[:t.pos] + t.moveCursorToPos(t.pos) + case keyCtrlD: + // Erase the character under the current position. + // The EOF case when the line is empty is handled in + // readLine(). + if t.pos < len(t.line) { + t.pos++ + t.eraseNPreviousChars(1) + } + case keyCtrlU: + t.eraseNPreviousChars(t.pos) + case keyClearScreen: + // Erases the screen and moves the cursor to the home position. + t.queue([]rune("\x1b[2J\x1b[H")) + t.queue(t.prompt) + t.cursorX, t.cursorY = 0, 0 + t.advanceCursor(visualLength(t.prompt)) + t.setLine(t.line, t.pos) + default: + if t.AutoCompleteCallback != nil { + prefix := string(t.line[:t.pos]) + suffix := string(t.line[t.pos:]) + + t.lock.Unlock() + newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key) + t.lock.Lock() + + if completeOk { + t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos])) + return + } + } + if !isPrintable(key) { + return + } + if len(t.line) == maxLineLength { + return + } + t.addKeyToLine(key) + } + return +} + +// addKeyToLine inserts the given key at the current position in the current +// line. +func (t *Terminal) addKeyToLine(key rune) { + if len(t.line) == cap(t.line) { + newLine := make([]rune, len(t.line), 2*(1+len(t.line))) + copy(newLine, t.line) + t.line = newLine + } + t.line = t.line[:len(t.line)+1] + copy(t.line[t.pos+1:], t.line[t.pos:]) + t.line[t.pos] = key + if t.echo { + t.writeLine(t.line[t.pos:]) + } + t.pos++ + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) writeLine(line []rune) { + for len(line) != 0 { + remainingOnLine := t.termWidth - t.cursorX + todo := len(line) + if todo > remainingOnLine { + todo = remainingOnLine + } + t.queue(line[:todo]) + t.advanceCursor(visualLength(line[:todo])) + line = line[todo:] + } +} + +// writeWithCRLF writes buf to w but replaces all occurrences of \n with \r\n. +func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) { + for len(buf) > 0 { + i := bytes.IndexByte(buf, '\n') + todo := len(buf) + if i >= 0 { + todo = i + } + + var nn int + nn, err = w.Write(buf[:todo]) + n += nn + if err != nil { + return n, err + } + buf = buf[todo:] + + if i >= 0 { + if _, err = w.Write(crlf); err != nil { + return n, err + } + n += 1 + buf = buf[1:] + } + } + + return n, nil +} + +func (t *Terminal) Write(buf []byte) (n int, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + if t.cursorX == 0 && t.cursorY == 0 { + // This is the easy case: there's nothing on the screen that we + // have to move out of the way. + return writeWithCRLF(t.c, buf) + } + + // We have a prompt and possibly user input on the screen. We + // have to clear it first. + t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */) + t.cursorX = 0 + t.clearLineToRight() + + for t.cursorY > 0 { + t.move(1 /* up */, 0, 0, 0) + t.cursorY-- + t.clearLineToRight() + } + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + + if n, err = writeWithCRLF(t.c, buf); err != nil { + return + } + + t.writeLine(t.prompt) + if t.echo { + t.writeLine(t.line) + } + + t.moveCursorToPos(t.pos) + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + return +} + +// ReadPassword temporarily changes the prompt and reads a password, without +// echo, from the terminal. +func (t *Terminal) ReadPassword(prompt string) (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + oldPrompt := t.prompt + t.prompt = []rune(prompt) + t.echo = false + + line, err = t.readLine() + + t.prompt = oldPrompt + t.echo = true + + return +} + +// ReadLine returns a line of input from the terminal. +func (t *Terminal) ReadLine() (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + return t.readLine() +} + +func (t *Terminal) readLine() (line string, err error) { + // t.lock must be held at this point + + if t.cursorX == 0 && t.cursorY == 0 { + t.writeLine(t.prompt) + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + } + + lineIsPasted := t.pasteActive + + for { + rest := t.remainder + lineOk := false + for !lineOk { + var key rune + key, rest = bytesToKey(rest, t.pasteActive) + if key == utf8.RuneError { + break + } + if !t.pasteActive { + if key == keyCtrlD { + if len(t.line) == 0 { + return "", io.EOF + } + } + if key == keyPasteStart { + t.pasteActive = true + if len(t.line) == 0 { + lineIsPasted = true + } + continue + } + } else if key == keyPasteEnd { + t.pasteActive = false + continue + } + if !t.pasteActive { + lineIsPasted = false + } + line, lineOk = t.handleKey(key) + } + if len(rest) > 0 { + n := copy(t.inBuf[:], rest) + t.remainder = t.inBuf[:n] + } else { + t.remainder = nil + } + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + if lineOk { + if t.echo { + t.historyIndex = -1 + t.history.Add(line) + } + if lineIsPasted { + err = ErrPasteIndicator + } + return + } + + // t.remainder is a slice at the beginning of t.inBuf + // containing a partial key sequence + readBuf := t.inBuf[len(t.remainder):] + var n int + + t.lock.Unlock() + n, err = t.c.Read(readBuf) + t.lock.Lock() + + if err != nil { + return + } + + t.remainder = t.inBuf[:n+len(t.remainder)] + } +} + +// SetPrompt sets the prompt to be used when reading subsequent lines. +func (t *Terminal) SetPrompt(prompt string) { + t.lock.Lock() + defer t.lock.Unlock() + + t.prompt = []rune(prompt) +} + +func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) { + // Move cursor to column zero at the start of the line. + t.move(t.cursorY, 0, t.cursorX, 0) + t.cursorX, t.cursorY = 0, 0 + t.clearLineToRight() + for t.cursorY < numPrevLines { + // Move down a line + t.move(0, 1, 0, 0) + t.cursorY++ + t.clearLineToRight() + } + // Move back to beginning. + t.move(t.cursorY, 0, 0, 0) + t.cursorX, t.cursorY = 0, 0 + + t.queue(t.prompt) + t.advanceCursor(visualLength(t.prompt)) + t.writeLine(t.line) + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) SetSize(width, height int) error { + t.lock.Lock() + defer t.lock.Unlock() + + if width == 0 { + width = 1 + } + + oldWidth := t.termWidth + t.termWidth, t.termHeight = width, height + + switch { + case width == oldWidth: + // If the width didn't change then nothing else needs to be + // done. + return nil + case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0: + // If there is nothing on current line and no prompt printed, + // just do nothing + return nil + case width < oldWidth: + // Some terminals (e.g. xterm) will truncate lines that were + // too long when shinking. Others, (e.g. gnome-terminal) will + // attempt to wrap them. For the former, repainting t.maxLine + // works great, but that behaviour goes badly wrong in the case + // of the latter because they have doubled every full line. + + // We assume that we are working on a terminal that wraps lines + // and adjust the cursor position based on every previous line + // wrapping and turning into two. This causes the prompt on + // xterms to move upwards, which isn't great, but it avoids a + // huge mess with gnome-terminal. + if t.cursorX >= t.termWidth { + t.cursorX = t.termWidth - 1 + } + t.cursorY *= 2 + t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2) + case width > oldWidth: + // If the terminal expands then our position calculations will + // be wrong in the future because we think the cursor is + // |t.pos| chars into the string, but there will be a gap at + // the end of any wrapped line. + // + // But the position will actually be correct until we move, so + // we can move back to the beginning and repaint everything. + t.clearAndRepaintLinePlusNPrevious(t.maxLine) + } + + _, err := t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + return err +} + +type pasteIndicatorError struct{} + +func (pasteIndicatorError) Error() string { + return "terminal: ErrPasteIndicator not correctly handled" +} + +// ErrPasteIndicator may be returned from ReadLine as the error, in addition +// to valid line data. It indicates that bracketed paste mode is enabled and +// that the returned line consists only of pasted data. Programs may wish to +// interpret pasted data more literally than typed data. +var ErrPasteIndicator = pasteIndicatorError{} + +// SetBracketedPasteMode requests that the terminal bracket paste operations +// with markers. Not all terminals support this but, if it is supported, then +// enabling this mode will stop any autocomplete callback from running due to +// pastes. Additionally, any lines that are completely pasted will be returned +// from ReadLine with the error set to ErrPasteIndicator. +func (t *Terminal) SetBracketedPasteMode(on bool) { + if on { + io.WriteString(t.c, "\x1b[?2004h") + } else { + io.WriteString(t.c, "\x1b[?2004l") + } +} + +// stRingBuffer is a ring buffer of strings. +type stRingBuffer struct { + // entries contains max elements. + entries []string + max int + // head contains the index of the element most recently added to the ring. + head int + // size contains the number of elements in the ring. + size int +} + +func (s *stRingBuffer) Add(a string) { + if s.entries == nil { + const defaultNumEntries = 100 + s.entries = make([]string, defaultNumEntries) + s.max = defaultNumEntries + } + + s.head = (s.head + 1) % s.max + s.entries[s.head] = a + if s.size < s.max { + s.size++ + } +} + +// NthPreviousEntry returns the value passed to the nth previous call to Add. +// If n is zero then the immediately prior value is returned, if one, then the +// next most recent, and so on. If such an element doesn't exist then ok is +// false. +func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { + if n >= s.size { + return "", false + } + index := s.head - n + if index < 0 { + index += s.max + } + return s.entries[index], true +} + +// readPasswordLine reads from reader until it finds \n or io.EOF. +// The slice returned does not include the \n. +// readPasswordLine also ignores any \r it finds. +func readPasswordLine(reader io.Reader) ([]byte, error) { + var buf [1]byte + var ret []byte + + for { + n, err := reader.Read(buf[:]) + if n > 0 { + switch buf[0] { + case '\n': + return ret, nil + case '\r': + // remove \r from passwords on Windows + default: + ret = append(ret, buf[0]) + } + continue + } + if err != nil { + if err == io.EOF && len(ret) > 0 { + return ret, nil + } + return ret, err + } + } +} diff --git a/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util.go b/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util.go new file mode 100644 index 0000000000..d019196147 --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util.go @@ -0,0 +1,119 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal // import "golang.org/x/crypto/ssh/terminal" + +import ( + "syscall" + "unsafe" +) + +// State contains the state of a terminal. +type State struct { + termios syscall.Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { + return nil, err + } + + newState := oldState.termios + // This attempts to replicate the behaviour documented for cfmakeraw in + // the termios(3) manpage. + newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON + newState.Oflag &^= syscall.OPOST + newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN + newState.Cflag &^= syscall.CSIZE | syscall.PARENB + newState.Cflag |= syscall.CS8 + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0); err != 0 { + return err + } + return nil +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var dimensions [4]uint16 + + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 { + return -1, -1, err + } + return int(dimensions[1]), int(dimensions[0]), nil +} + +// passwordReader is an io.Reader that reads from a specific file descriptor. +type passwordReader int + +func (r passwordReader) Read(buf []byte) (int, error) { + return syscall.Read(int(r), buf) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var oldState syscall.Termios + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 { + return nil, err + } + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { + return nil, err + } + + defer func() { + syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0) + }() + + return readPasswordLine(passwordReader(fd)) +} diff --git a/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go b/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go new file mode 100644 index 0000000000..cb23a59049 --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package terminal + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA +const ioctlWriteTermios = unix.TIOCSETA diff --git a/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go b/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go new file mode 100644 index 0000000000..5fadfe8a1d --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go @@ -0,0 +1,10 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS +const ioctlWriteTermios = unix.TCSETS diff --git a/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go b/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go new file mode 100644 index 0000000000..799f049f04 --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go @@ -0,0 +1,58 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "fmt" + "runtime" +) + +type State struct{} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + return false +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go new file mode 100644 index 0000000000..a2e1b57dc1 --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go @@ -0,0 +1,128 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package terminal // import "golang.org/x/crypto/ssh/terminal" + +import ( + "golang.org/x/sys/unix" + "io" + "syscall" +) + +// State contains the state of a terminal. +type State struct { + state *unix.Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + // see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c + val, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + oldState := *val + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState) + if err != nil { + return nil, err + } + + defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState) + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} + +// MakeRaw puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +// see http://cr.illumos.org/~webrev/andy_js/1060/ +func MakeRaw(fd int) (*State, error) { + oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + oldTermios := *oldTermiosPtr + + newTermios := oldTermios + newTermios.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON + newTermios.Oflag &^= syscall.OPOST + newTermios.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN + newTermios.Cflag &^= syscall.CSIZE | syscall.PARENB + newTermios.Cflag |= syscall.CS8 + newTermios.Cc[unix.VMIN] = 1 + newTermios.Cc[unix.VTIME] = 0 + + if err := unix.IoctlSetTermios(fd, unix.TCSETS, &newTermios); err != nil { + return nil, err + } + + return &State{ + state: oldTermiosPtr, + }, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, oldState *State) error { + return unix.IoctlSetTermios(fd, unix.TCSETS, oldState.state) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + + return &State{ + state: oldTermiosPtr, + }, nil +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return 0, 0, err + } + return int(ws.Col), int(ws.Row), nil +} diff --git a/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go new file mode 100644 index 0000000000..e0a1f36ce5 --- /dev/null +++ b/components/engine/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -0,0 +1,155 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "syscall" + "unsafe" +) + +const ( + enableLineInput = 2 + enableEchoInput = 4 + enableProcessedInput = 1 + enableWindowInput = 8 + enableMouseInput = 16 + enableInsertMode = 32 + enableQuickEditMode = 64 + enableExtendedFlags = 128 + enableAutoPosition = 256 + enableProcessedOutput = 1 + enableWrapAtEolOutput = 2 +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") +) + +type ( + short int16 + word uint16 + + coord struct { + x short + y short + } + smallRect struct { + left short + top short + right short + bottom short + } + consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord + } +) + +type State struct { + mode uint32 +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0) + return err +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return 0, 0, error(e) + } + return int(info.size.x), int(info.size.y), nil +} + +// passwordReader is an io.Reader that reads from a specific Windows HANDLE. +type passwordReader int + +func (r passwordReader) Read(buf []byte) (int, error) { + return syscall.Read(syscall.Handle(r), buf) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + old := st + + st &^= (enableEchoInput) + st |= (enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) + if e != 0 { + return nil, error(e) + } + + defer func() { + syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0) + }() + + return readPasswordLine(passwordReader(fd)) +} diff --git a/components/engine/volume/lcow_parser.go b/components/engine/volume/lcow_parser.go new file mode 100644 index 0000000000..aeb81a4202 --- /dev/null +++ b/components/engine/volume/lcow_parser.go @@ -0,0 +1,35 @@ +package volume + +import ( + "errors" + "fmt" + "path" + + "github.com/docker/docker/api/types/mount" +) + +var lcowSpecificValidators mountValidator = func(m *mount.Mount) error { + if path.Clean(m.Target) == "/" { + return fmt.Errorf("invalid specification: destination can't be '/'") + } + if m.Type == mount.TypeNamedPipe { + return errors.New("Linux containers on Windows do not support named pipe mounts") + } + return nil +} + +type lcowParser struct { + windowsParser +} + +func (p *lcowParser) validateMountConfig(mnt *mount.Mount) error { + return p.validateMountConfigReg(mnt, rxLCOWDestination, lcowSpecificValidators) +} + +func (p *lcowParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + return p.parseMountRaw(raw, volumeDriver, rxLCOWDestination, false, lcowSpecificValidators) +} + +func (p *lcowParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { + return p.parseMountSpec(cfg, rxLCOWDestination, false, lcowSpecificValidators) +} diff --git a/components/engine/volume/linux_parser.go b/components/engine/volume/linux_parser.go new file mode 100644 index 0000000000..1b3493ac75 --- /dev/null +++ b/components/engine/volume/linux_parser.go @@ -0,0 +1,416 @@ +package volume + +import ( + "errors" + "fmt" + "path" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/stringid" +) + +type linuxParser struct { +} + +func linuxSplitRawSpec(raw string) ([]string, error) { + if strings.Count(raw, ":") > 2 { + return nil, errInvalidSpec(raw) + } + + arr := strings.SplitN(raw, ":", 3) + if arr[0] == "" { + return nil, errInvalidSpec(raw) + } + return arr, nil +} + +func linuxValidateNotRoot(p string) error { + p = path.Clean(strings.Replace(p, `\`, `/`, -1)) + if p == "/" { + return fmt.Errorf("invalid specification: destination can't be '/'") + } + return nil +} +func linuxValidateAbsolute(p string) error { + p = strings.Replace(p, `\`, `/`, -1) + if path.IsAbs(p) { + return nil + } + return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) +} +func (p *linuxParser) validateMountConfig(mnt *mount.Mount) error { + // there was something looking like a bug in existing codebase: + // - validateMountConfig on linux was called with options skipping bind source existance when calling ParseMountRaw + // - but not when calling ParseMountSpec directly... nor when the unit test called it directly + return p.validateMountConfigImpl(mnt, true) +} +func (p *linuxParser) validateMountConfigImpl(mnt *mount.Mount, validateBindSourceExists bool) error { + if len(mnt.Target) == 0 { + return &errMountConfig{mnt, errMissingField("Target")} + } + + if err := linuxValidateNotRoot(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + + if err := linuxValidateAbsolute(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + + switch mnt.Type { + case mount.TypeBind: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + // Don't error out just because the propagation mode is not supported on the platform + if opts := mnt.BindOptions; opts != nil { + if len(opts.Propagation) > 0 && len(linuxPropagationModes) > 0 { + if _, ok := linuxPropagationModes[opts.Propagation]; !ok { + return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} + } + } + } + if mnt.VolumeOptions != nil { + return &errMountConfig{mnt, errExtraField("VolumeOptions")} + } + + if err := linuxValidateAbsolute(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + + if validateBindSourceExists { + exists, _, _ := currentFileInfoProvider.fileInfo(mnt.Source) + if !exists { + return &errMountConfig{mnt, errBindNotExist} + } + } + + case mount.TypeVolume: + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if len(mnt.Source) == 0 && mnt.ReadOnly { + return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} + } + case mount.TypeTmpfs: + if len(mnt.Source) != 0 { + return &errMountConfig{mnt, errExtraField("Source")} + } + if _, err := p.ConvertTmpfsOptions(mnt.TmpfsOptions, mnt.ReadOnly); err != nil { + return &errMountConfig{mnt, err} + } + default: + return &errMountConfig{mnt, errors.New("mount type unknown")} + } + return nil +} + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, + "ro": true, +} + +// label modes +var linuxLabelModes = map[string]bool{ + "Z": true, + "z": true, +} + +// consistency modes +var linuxConsistencyModes = map[mount.Consistency]bool{ + mount.ConsistencyFull: true, + mount.ConsistencyCached: true, + mount.ConsistencyDelegated: true, +} +var linuxPropagationModes = map[mount.Propagation]bool{ + mount.PropagationPrivate: true, + mount.PropagationRPrivate: true, + mount.PropagationSlave: true, + mount.PropagationRSlave: true, + mount.PropagationShared: true, + mount.PropagationRShared: true, +} + +const linuxDefaultPropagationMode = mount.PropagationRPrivate + +func linuxGetPropagation(mode string) mount.Propagation { + for _, o := range strings.Split(mode, ",") { + prop := mount.Propagation(o) + if linuxPropagationModes[prop] { + return prop + } + } + return linuxDefaultPropagationMode +} + +func linuxHasPropagation(mode string) bool { + for _, o := range strings.Split(mode, ",") { + if linuxPropagationModes[mount.Propagation(o)] { + return true + } + } + return false +} + +func linuxValidMountMode(mode string) bool { + if mode == "" { + return true + } + + rwModeCount := 0 + labelModeCount := 0 + propagationModeCount := 0 + copyModeCount := 0 + consistencyModeCount := 0 + + for _, o := range strings.Split(mode, ",") { + switch { + case rwModes[o]: + rwModeCount++ + case linuxLabelModes[o]: + labelModeCount++ + case linuxPropagationModes[mount.Propagation(o)]: + propagationModeCount++ + case copyModeExists(o): + copyModeCount++ + case linuxConsistencyModes[mount.Consistency(o)]: + consistencyModeCount++ + default: + return false + } + } + + // Only one string for each mode is allowed. + if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 || consistencyModeCount > 1 { + return false + } + return true +} + +func (p *linuxParser) ReadWrite(mode string) bool { + if !linuxValidMountMode(mode) { + return false + } + + for _, o := range strings.Split(mode, ",") { + if o == "ro" { + return false + } + } + return true +} + +func (p *linuxParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + arr, err := linuxSplitRawSpec(raw) + if err != nil { + return nil, err + } + + var spec mount.Mount + var mode string + switch len(arr) { + case 1: + // Just a destination path in the container + spec.Target = arr[0] + case 2: + if linuxValidMountMode(arr[1]) { + // Destination + Mode is not a valid volume - volumes + // cannot include a mode. e.g. /foo:rw + return nil, errInvalidSpec(raw) + } + // Host Source Path or Name + Destination + spec.Source = arr[0] + spec.Target = arr[1] + case 3: + // HostSourcePath+DestinationPath+Mode + spec.Source = arr[0] + spec.Target = arr[1] + mode = arr[2] + default: + return nil, errInvalidSpec(raw) + } + + if !linuxValidMountMode(mode) { + return nil, errInvalidMode(mode) + } + + if path.IsAbs(spec.Source) { + spec.Type = mount.TypeBind + } else { + spec.Type = mount.TypeVolume + } + + spec.ReadOnly = !p.ReadWrite(mode) + + // cannot assume that if a volume driver is passed in that we should set it + if volumeDriver != "" && spec.Type == mount.TypeVolume { + spec.VolumeOptions = &mount.VolumeOptions{ + DriverConfig: &mount.Driver{Name: volumeDriver}, + } + } + + if copyData, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + if spec.VolumeOptions == nil { + spec.VolumeOptions = &mount.VolumeOptions{} + } + spec.VolumeOptions.NoCopy = !copyData + } + if linuxHasPropagation(mode) { + spec.BindOptions = &mount.BindOptions{ + Propagation: linuxGetPropagation(mode), + } + } + + mp, err := p.parseMountSpec(spec, false) + if mp != nil { + mp.Mode = mode + } + if err != nil { + err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) + } + return mp, err +} +func (p *linuxParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { + return p.parseMountSpec(cfg, true) +} +func (p *linuxParser) parseMountSpec(cfg mount.Mount, validateBindSourceExists bool) (*MountPoint, error) { + if err := p.validateMountConfigImpl(&cfg, validateBindSourceExists); err != nil { + return nil, err + } + mp := &MountPoint{ + RW: !cfg.ReadOnly, + Destination: path.Clean(filepath.ToSlash(cfg.Target)), + Type: cfg.Type, + Spec: cfg, + } + + switch cfg.Type { + case mount.TypeVolume: + if cfg.Source == "" { + mp.Name = stringid.GenerateNonCryptoID() + } else { + mp.Name = cfg.Source + } + mp.CopyData = p.DefaultCopyMode() + + if cfg.VolumeOptions != nil { + if cfg.VolumeOptions.DriverConfig != nil { + mp.Driver = cfg.VolumeOptions.DriverConfig.Name + } + if cfg.VolumeOptions.NoCopy { + mp.CopyData = false + } + } + case mount.TypeBind: + mp.Source = path.Clean(filepath.ToSlash(cfg.Source)) + if cfg.BindOptions != nil && len(cfg.BindOptions.Propagation) > 0 { + mp.Propagation = cfg.BindOptions.Propagation + } else { + // If user did not specify a propagation mode, get + // default propagation mode. + mp.Propagation = linuxDefaultPropagationMode + } + case mount.TypeTmpfs: + // NOP + } + return mp, nil +} + +func (p *linuxParser) ParseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if !linuxValidMountMode(mode) { + return "", "", errInvalidMode(mode) + } + // For now don't allow propagation properties while importing + // volumes from data container. These volumes will inherit + // the same propagation property as of the original volume + // in data container. This probably can be relaxed in future. + if linuxHasPropagation(mode) { + return "", "", errInvalidMode(mode) + } + // Do not allow copy modes on volumes-from + if _, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + return "", "", errInvalidMode(mode) + } + } + return id, mode, nil +} + +func (p *linuxParser) DefaultPropagationMode() mount.Propagation { + return linuxDefaultPropagationMode +} + +func (p *linuxParser) ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) { + var rawOpts []string + if readOnly { + rawOpts = append(rawOpts, "ro") + } + + if opt != nil && opt.Mode != 0 { + rawOpts = append(rawOpts, fmt.Sprintf("mode=%o", opt.Mode)) + } + + if opt != nil && opt.SizeBytes != 0 { + // calculate suffix here, making this linux specific, but that is + // okay, since API is that way anyways. + + // we do this by finding the suffix that divides evenly into the + // value, returning the value itself, with no suffix, if it fails. + // + // For the most part, we don't enforce any semantic to this values. + // The operating system will usually align this and enforce minimum + // and maximums. + var ( + size = opt.SizeBytes + suffix string + ) + for _, r := range []struct { + suffix string + divisor int64 + }{ + {"g", 1 << 30}, + {"m", 1 << 20}, + {"k", 1 << 10}, + } { + if size%r.divisor == 0 { + size = size / r.divisor + suffix = r.suffix + break + } + } + + rawOpts = append(rawOpts, fmt.Sprintf("size=%d%s", size, suffix)) + } + return strings.Join(rawOpts, ","), nil +} + +func (p *linuxParser) DefaultCopyMode() bool { + return true +} +func (p *linuxParser) ValidateVolumeName(name string) error { + return nil +} + +func (p *linuxParser) IsBackwardCompatible(m *MountPoint) bool { + return len(m.Source) > 0 || m.Driver == DefaultDriverName +} + +func (p *linuxParser) ValidateTmpfsMountDestination(dest string) error { + if err := linuxValidateNotRoot(dest); err != nil { + return err + } + return linuxValidateAbsolute(dest) +} diff --git a/components/engine/volume/local/local.go b/components/engine/volume/local/local.go index eb78d875a5..c85122d63a 100644 --- a/components/engine/volume/local/local.go +++ b/components/engine/volume/local/local.go @@ -13,7 +13,7 @@ import ( "strings" "sync" - "github.com/docker/docker/api" + "github.com/docker/docker/daemon/names" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/volume" @@ -35,7 +35,7 @@ var ( // volumeNameRegex ensures the name assigned for the volume is valid. // This name is used to create the bind directory, so we need to avoid characters that // would make the path to escape the root directory. - volumeNameRegex = api.RestrictedNamePattern + volumeNameRegex = names.RestrictedNamePattern ) type activeMount struct { @@ -298,7 +298,7 @@ func (r *Root) validateName(name string) error { return validationError("volume name is too short, names should be at least two alphanumeric characters") } if !volumeNameRegex.MatchString(name) { - return validationError(fmt.Sprintf("%q includes invalid characters for a local volume name, only %q are allowed. If you intended to pass a host directory, use absolute path", name, api.RestrictedNameChars)) + return validationError(fmt.Sprintf("%q includes invalid characters for a local volume name, only %q are allowed. If you intended to pass a host directory, use absolute path", name, names.RestrictedNameChars)) } return nil } diff --git a/components/engine/volume/parser.go b/components/engine/volume/parser.go new file mode 100644 index 0000000000..1f48b60e27 --- /dev/null +++ b/components/engine/volume/parser.go @@ -0,0 +1,43 @@ +package volume + +import ( + "runtime" + + "github.com/docker/docker/api/types/mount" +) + +const ( + // OSLinux is the same as runtime.GOOS on linux + OSLinux = "linux" + // OSWindows is the same as runtime.GOOS on windows + OSWindows = "windows" +) + +// Parser represents a platform specific parser for mount expressions +type Parser interface { + ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) + ParseMountSpec(cfg mount.Mount) (*MountPoint, error) + ParseVolumesFrom(spec string) (string, string, error) + DefaultPropagationMode() mount.Propagation + ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) + DefaultCopyMode() bool + ValidateVolumeName(name string) error + ReadWrite(mode string) bool + IsBackwardCompatible(m *MountPoint) bool + HasResource(m *MountPoint, absPath string) bool + ValidateTmpfsMountDestination(dest string) error + + validateMountConfig(mt *mount.Mount) error +} + +// NewParser creates a parser for a given container OS, depending on the current host OS (linux on a windows host will resolve to an lcowParser) +func NewParser(containerOS string) Parser { + switch containerOS { + case OSWindows: + return &windowsParser{} + } + if runtime.GOOS == OSWindows { + return &lcowParser{} + } + return &linuxParser{} +} diff --git a/components/engine/volume/store/errors.go b/components/engine/volume/store/errors.go index 13c7765070..75e24e619c 100644 --- a/components/engine/volume/store/errors.go +++ b/components/engine/volume/store/errors.go @@ -9,8 +9,6 @@ const ( errVolumeInUse conflictError = "volume is in use" // errNoSuchVolume is a typed error returned if the requested volume doesn't exist in the volume store errNoSuchVolume notFoundError = "no such volume" - // errInvalidName is a typed error returned when creating a volume with a name that is not valid on the platform - errInvalidName invalidName = "volume name is not valid on this platform" // errNameConflict is a typed error returned on create when a volume exists with the given name, but for a different driver errNameConflict conflictError = "volume name must be unique" ) @@ -30,13 +28,6 @@ func (e notFoundError) Error() string { func (notFoundError) NotFound() {} -type invalidName string - -func (e invalidName) Error() string { - return string(e) -} -func (invalidName) InvalidParameter() {} - // OpErr is the error type returned by functions in the store package. It describes // the operation, volume name, and error. type OpErr struct { diff --git a/components/engine/volume/store/store.go b/components/engine/volume/store/store.go index bc447612f9..e47ec0e7da 100644 --- a/components/engine/volume/store/store.go +++ b/components/engine/volume/store/store.go @@ -4,6 +4,7 @@ import ( "net" "os" "path/filepath" + "runtime" "sync" "time" @@ -369,13 +370,14 @@ func volumeExists(v volume.Volume) (bool, error) { // It is expected that callers of this function hold any necessary locks. func (s *VolumeStore) create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) { // Validate the name in a platform-specific manner - valid, err := volume.IsVolumeNameValid(name) + + // volume name validation is specific to the host os and not on container image + // windows/lcow should have an equivalent volumename validation logic so we create a parser for current host OS + parser := volume.NewParser(runtime.GOOS) + err := parser.ValidateVolumeName(name) if err != nil { return nil, err } - if !valid { - return nil, &OpErr{Err: errInvalidName, Name: name, Op: "create"} - } v, err := s.checkConflict(name, driverName) if err != nil { diff --git a/components/engine/volume/validate.go b/components/engine/volume/validate.go index 04883f35c5..b3f6409487 100644 --- a/components/engine/volume/validate.go +++ b/components/engine/volume/validate.go @@ -2,8 +2,6 @@ package volume import ( "fmt" - "os" - "runtime" "github.com/docker/docker/api/types/mount" "github.com/pkg/errors" @@ -11,120 +9,6 @@ import ( var errBindNotExist = errors.New("bind source path does not exist") -type validateOpts struct { - skipBindSourceCheck bool -} - -func validateMountConfig(mnt *mount.Mount, options ...func(*validateOpts)) error { - opts := validateOpts{} - for _, o := range options { - o(&opts) - } - - if len(mnt.Target) == 0 { - return &errMountConfig{mnt, errMissingField("Target")} - } - - if err := validateNotRoot(mnt.Target); err != nil { - return &errMountConfig{mnt, err} - } - - if err := validateAbsolute(mnt.Target); err != nil { - return &errMountConfig{mnt, err} - } - - switch mnt.Type { - case mount.TypeBind: - if len(mnt.Source) == 0 { - return &errMountConfig{mnt, errMissingField("Source")} - } - // Don't error out just because the propagation mode is not supported on the platform - if opts := mnt.BindOptions; opts != nil { - if len(opts.Propagation) > 0 && len(propagationModes) > 0 { - if _, ok := propagationModes[opts.Propagation]; !ok { - return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} - } - } - } - if mnt.VolumeOptions != nil { - return &errMountConfig{mnt, errExtraField("VolumeOptions")} - } - - if err := validateAbsolute(mnt.Source); err != nil { - return &errMountConfig{mnt, err} - } - - // Do not allow binding to non-existent path - if !opts.skipBindSourceCheck { - fi, err := os.Stat(mnt.Source) - if err != nil { - if !os.IsNotExist(err) { - return &errMountConfig{mnt, err} - } - return &errMountConfig{mnt, errBindNotExist} - } - if err := validateStat(fi); err != nil { - return &errMountConfig{mnt, err} - } - } - case mount.TypeVolume: - if mnt.BindOptions != nil { - return &errMountConfig{mnt, errExtraField("BindOptions")} - } - - if len(mnt.Source) == 0 && mnt.ReadOnly { - return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} - } - - if len(mnt.Source) != 0 { - if valid, err := IsVolumeNameValid(mnt.Source); !valid { - if err == nil { - err = errors.New("invalid volume name") - } - return &errMountConfig{mnt, err} - } - } - case mount.TypeTmpfs: - if len(mnt.Source) != 0 { - return &errMountConfig{mnt, errExtraField("Source")} - } - if err := ValidateTmpfsMountDestination(mnt.Target); err != nil { - return &errMountConfig{mnt, err} - } - if _, err := ConvertTmpfsOptions(mnt.TmpfsOptions, mnt.ReadOnly); err != nil { - return &errMountConfig{mnt, err} - } - case mount.TypeNamedPipe: - if runtime.GOOS != "windows" { - return &errMountConfig{mnt, errors.New("named pipe bind mounts are not supported on this OS")} - } - - if len(mnt.Source) == 0 { - return &errMountConfig{mnt, errMissingField("Source")} - } - - if mnt.BindOptions != nil { - return &errMountConfig{mnt, errExtraField("BindOptions")} - } - - if mnt.ReadOnly { - return &errMountConfig{mnt, errExtraField("ReadOnly")} - } - - if detectMountType(mnt.Source) != mount.TypeNamedPipe { - return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Source)} - } - - if detectMountType(mnt.Target) != mount.TypeNamedPipe { - return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Target)} - } - - default: - return &errMountConfig{mnt, errors.New("mount type unknown")} - } - return nil -} - type errMountConfig struct { mount *mount.Mount err error @@ -140,37 +24,3 @@ func errExtraField(name string) error { func errMissingField(name string) error { return errors.Errorf("field %s must not be empty", name) } - -func validateAbsolute(p string) error { - p = convertSlash(p) - if isAbsPath(p) { - return nil - } - return errors.Errorf("invalid mount path: '%s' mount path must be absolute", p) -} - -// ValidateTmpfsMountDestination validates the destination of tmpfs mount. -// Currently, we have only two obvious rule for validation: -// - path must not be "/" -// - path must be absolute -// We should add more rules carefully (#30166) -func ValidateTmpfsMountDestination(dest string) error { - if err := validateNotRoot(dest); err != nil { - return err - } - return validateAbsolute(dest) -} - -type validationError struct { - err error -} - -func (e validationError) Error() string { - return e.err.Error() -} - -func (e validationError) InvalidParameter() {} - -func (e validationError) Cause() error { - return e.err -} diff --git a/components/engine/volume/validate_test.go b/components/engine/volume/validate_test.go index 8732500fc0..6a8e28682b 100644 --- a/components/engine/volume/validate_test.go +++ b/components/engine/volume/validate_test.go @@ -4,6 +4,7 @@ import ( "errors" "io/ioutil" "os" + "runtime" "strings" "testing" @@ -27,17 +28,50 @@ func TestValidateMount(t *testing.T) { {mount.Mount{Type: mount.TypeBind}, errMissingField("Target")}, {mount.Mount{Type: mount.TypeBind, Target: testDestinationPath}, errMissingField("Source")}, {mount.Mount{Type: mount.TypeBind, Target: testDestinationPath, Source: testSourcePath, VolumeOptions: &mount.VolumeOptions{}}, errExtraField("VolumeOptions")}, - {mount.Mount{Type: mount.TypeBind, Source: testSourcePath, Target: testDestinationPath}, errBindNotExist}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, nil}, {mount.Mount{Type: "invalid", Target: testDestinationPath}, errors.New("mount type unknown")}, } + if runtime.GOOS == "windows" { + cases = append(cases, struct { + input mount.Mount + expected error + }{mount.Mount{Type: mount.TypeBind, Source: testSourcePath, Target: testDestinationPath}, errBindNotExist}) // bind source existance is not checked on linux + } + lcowCases := []struct { + input mount.Mount + expected error + }{ + {mount.Mount{Type: mount.TypeVolume}, errMissingField("Target")}, + {mount.Mount{Type: mount.TypeVolume, Target: "/foo", Source: "hello"}, nil}, + {mount.Mount{Type: mount.TypeVolume, Target: "/foo"}, nil}, + {mount.Mount{Type: mount.TypeBind}, errMissingField("Target")}, + {mount.Mount{Type: mount.TypeBind, Target: "/foo"}, errMissingField("Source")}, + {mount.Mount{Type: mount.TypeBind, Target: "/foo", Source: "c:\\foo", VolumeOptions: &mount.VolumeOptions{}}, errExtraField("VolumeOptions")}, + {mount.Mount{Type: mount.TypeBind, Source: "c:\\foo", Target: "/foo"}, errBindNotExist}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: "/foo"}, nil}, + {mount.Mount{Type: "invalid", Target: "/foo"}, errors.New("mount type unknown")}, + } + parser := NewParser(runtime.GOOS) for i, x := range cases { - err := validateMountConfig(&x.input) + err := parser.validateMountConfig(&x.input) if err == nil && x.expected == nil { continue } if (err == nil && x.expected != nil) || (x.expected == nil && err != nil) || !strings.Contains(err.Error(), x.expected.Error()) { - t.Fatalf("expected %q, got %q, case: %d", x.expected, err, i) + t.Errorf("expected %q, got %q, case: %d", x.expected, err, i) + } + } + if runtime.GOOS == "windows" { + parser = &lcowParser{} + for i, x := range lcowCases { + err := parser.validateMountConfig(&x.input) + if err == nil && x.expected == nil { + continue + } + if (err == nil && x.expected != nil) || (x.expected == nil && err != nil) || !strings.Contains(err.Error(), x.expected.Error()) { + t.Errorf("expected %q, got %q, case: %d", x.expected, err, i) + } } } } diff --git a/components/engine/volume/volume.go b/components/engine/volume/volume.go index 7f962a981e..4aa4de513d 100644 --- a/components/engine/volume/volume.go +++ b/components/engine/volume/volume.go @@ -3,7 +3,6 @@ package volume import ( "fmt" "os" - "strings" "syscall" "time" @@ -216,153 +215,10 @@ func (m *MountPoint) Path() string { return m.Source } -// ParseVolumesFrom ensures that the supplied volumes-from is valid. -func ParseVolumesFrom(spec string) (string, string, error) { - if len(spec) == 0 { - return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") - } - - specParts := strings.SplitN(spec, ":", 2) - id := specParts[0] - mode := "rw" - - if len(specParts) == 2 { - mode = specParts[1] - if !ValidMountMode(mode) { - return "", "", errInvalidMode(mode) - } - // For now don't allow propagation properties while importing - // volumes from data container. These volumes will inherit - // the same propagation property as of the original volume - // in data container. This probably can be relaxed in future. - if HasPropagation(mode) { - return "", "", errInvalidMode(mode) - } - // Do not allow copy modes on volumes-from - if _, isSet := getCopyMode(mode); isSet { - return "", "", errInvalidMode(mode) - } - } - return id, mode, nil -} - -// ParseMountRaw parses a raw volume spec (e.g. `-v /foo:/bar:shared`) into a -// structured spec. Once the raw spec is parsed it relies on `ParseMountSpec` to -// validate the spec and create a MountPoint -func ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { - arr, err := splitRawSpec(convertSlash(raw)) - if err != nil { - return nil, err - } - - var spec mounttypes.Mount - var mode string - switch len(arr) { - case 1: - // Just a destination path in the container - spec.Target = arr[0] - case 2: - if ValidMountMode(arr[1]) { - // Destination + Mode is not a valid volume - volumes - // cannot include a mode. e.g. /foo:rw - return nil, errInvalidSpec(raw) - } - // Host Source Path or Name + Destination - spec.Source = arr[0] - spec.Target = arr[1] - case 3: - // HostSourcePath+DestinationPath+Mode - spec.Source = arr[0] - spec.Target = arr[1] - mode = arr[2] - default: - return nil, errInvalidSpec(raw) - } - - if !ValidMountMode(mode) { - return nil, errInvalidMode(mode) - } - - spec.Type = detectMountType(spec.Source) - spec.ReadOnly = !ReadWrite(mode) - - // cannot assume that if a volume driver is passed in that we should set it - if volumeDriver != "" && spec.Type == mounttypes.TypeVolume { - spec.VolumeOptions = &mounttypes.VolumeOptions{ - DriverConfig: &mounttypes.Driver{Name: volumeDriver}, - } - } - - if copyData, isSet := getCopyMode(mode); isSet { - if spec.VolumeOptions == nil { - spec.VolumeOptions = &mounttypes.VolumeOptions{} - } - spec.VolumeOptions.NoCopy = !copyData - } - if HasPropagation(mode) { - spec.BindOptions = &mounttypes.BindOptions{ - Propagation: GetPropagation(mode), - } - } - - mp, err := ParseMountSpec(spec, platformRawValidationOpts...) - if mp != nil { - mp.Mode = mode - } - if err != nil { - err = errors.Wrap(err, errInvalidSpec(raw).Error()) - } - return mp, err -} - -// ParseMountSpec reads a mount config, validates it, and configures a mountpoint from it. -func ParseMountSpec(cfg mounttypes.Mount, options ...func(*validateOpts)) (*MountPoint, error) { - if err := validateMountConfig(&cfg, options...); err != nil { - return nil, validationError{err} - } - mp := &MountPoint{ - RW: !cfg.ReadOnly, - Destination: clean(convertSlash(cfg.Target)), - Type: cfg.Type, - Spec: cfg, - } - - switch cfg.Type { - case mounttypes.TypeVolume: - if cfg.Source == "" { - mp.Name = stringid.GenerateNonCryptoID() - } else { - mp.Name = cfg.Source - } - mp.CopyData = DefaultCopyMode - - if cfg.VolumeOptions != nil { - if cfg.VolumeOptions.DriverConfig != nil { - mp.Driver = cfg.VolumeOptions.DriverConfig.Name - } - if cfg.VolumeOptions.NoCopy { - mp.CopyData = false - } - } - case mounttypes.TypeBind, mounttypes.TypeNamedPipe: - mp.Source = clean(convertSlash(cfg.Source)) - if cfg.BindOptions != nil && len(cfg.BindOptions.Propagation) > 0 { - mp.Propagation = cfg.BindOptions.Propagation - } else { - // If user did not specify a propagation mode, get - // default propagation mode. - mp.Propagation = DefaultPropagationMode - } - case mounttypes.TypeTmpfs: - // NOP - } - return mp, nil -} - func errInvalidMode(mode string) error { - return validationError{errors.Errorf("invalid mode: %v", mode)} + return errors.Errorf("invalid mode: %v", mode) } func errInvalidSpec(spec string) error { - return validationError{errors.Errorf("invalid volume specification: '%s'", spec)} + return errors.Errorf("invalid volume specification: '%s'", spec) } diff --git a/components/engine/volume/volume_copy.go b/components/engine/volume/volume_copy.go index 77f06a0d1f..37c7fa74a6 100644 --- a/components/engine/volume/volume_copy.go +++ b/components/engine/volume/volume_copy.go @@ -13,11 +13,11 @@ func copyModeExists(mode string) bool { } // GetCopyMode gets the copy mode from the mode string for mounts -func getCopyMode(mode string) (bool, bool) { +func getCopyMode(mode string, def bool) (bool, bool) { for _, o := range strings.Split(mode, ",") { if isEnabled, exists := copyModes[o]; exists { return isEnabled, true } } - return DefaultCopyMode, false + return def, false } diff --git a/components/engine/volume/volume_copy_unix.go b/components/engine/volume/volume_copy_unix.go deleted file mode 100644 index ad66e17637..0000000000 --- a/components/engine/volume/volume_copy_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package volume - -const ( - // DefaultCopyMode is the copy mode used by default for normal/named volumes - DefaultCopyMode = true -) diff --git a/components/engine/volume/volume_copy_windows.go b/components/engine/volume/volume_copy_windows.go deleted file mode 100644 index 798638c878..0000000000 --- a/components/engine/volume/volume_copy_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package volume - -const ( - // DefaultCopyMode is the copy mode used by default for normal/named volumes - DefaultCopyMode = false -) diff --git a/components/engine/volume/volume_linux.go b/components/engine/volume/volume_linux.go deleted file mode 100644 index fdf7b63e4b..0000000000 --- a/components/engine/volume/volume_linux.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build linux - -package volume - -import ( - "fmt" - "strings" - - mounttypes "github.com/docker/docker/api/types/mount" -) - -// ConvertTmpfsOptions converts *mounttypes.TmpfsOptions to the raw option string -// for mount(2). -func ConvertTmpfsOptions(opt *mounttypes.TmpfsOptions, readOnly bool) (string, error) { - var rawOpts []string - if readOnly { - rawOpts = append(rawOpts, "ro") - } - - if opt != nil && opt.Mode != 0 { - rawOpts = append(rawOpts, fmt.Sprintf("mode=%o", opt.Mode)) - } - - if opt != nil && opt.SizeBytes != 0 { - // calculate suffix here, making this linux specific, but that is - // okay, since API is that way anyways. - - // we do this by finding the suffix that divides evenly into the - // value, returning the value itself, with no suffix, if it fails. - // - // For the most part, we don't enforce any semantic to this values. - // The operating system will usually align this and enforce minimum - // and maximums. - var ( - size = opt.SizeBytes - suffix string - ) - for _, r := range []struct { - suffix string - divisor int64 - }{ - {"g", 1 << 30}, - {"m", 1 << 20}, - {"k", 1 << 10}, - } { - if size%r.divisor == 0 { - size = size / r.divisor - suffix = r.suffix - break - } - } - - rawOpts = append(rawOpts, fmt.Sprintf("size=%d%s", size, suffix)) - } - return strings.Join(rawOpts, ","), nil -} diff --git a/components/engine/volume/volume_linux_test.go b/components/engine/volume/volume_linux_test.go deleted file mode 100644 index 40ce5525a3..0000000000 --- a/components/engine/volume/volume_linux_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build linux - -package volume - -import ( - "strings" - "testing" - - mounttypes "github.com/docker/docker/api/types/mount" -) - -func TestConvertTmpfsOptions(t *testing.T) { - type testCase struct { - opt mounttypes.TmpfsOptions - readOnly bool - expectedSubstrings []string - unexpectedSubstrings []string - } - cases := []testCase{ - { - opt: mounttypes.TmpfsOptions{SizeBytes: 1024 * 1024, Mode: 0700}, - readOnly: false, - expectedSubstrings: []string{"size=1m", "mode=700"}, - unexpectedSubstrings: []string{"ro"}, - }, - { - opt: mounttypes.TmpfsOptions{}, - readOnly: true, - expectedSubstrings: []string{"ro"}, - unexpectedSubstrings: []string{}, - }, - } - for _, c := range cases { - data, err := ConvertTmpfsOptions(&c.opt, c.readOnly) - if err != nil { - t.Fatalf("could not convert %+v (readOnly: %v) to string: %v", - c.opt, c.readOnly, err) - } - t.Logf("data=%q", data) - for _, s := range c.expectedSubstrings { - if !strings.Contains(data, s) { - t.Fatalf("expected substring: %s, got %v (case=%+v)", s, data, c) - } - } - for _, s := range c.unexpectedSubstrings { - if strings.Contains(data, s) { - t.Fatalf("unexpected substring: %s, got %v (case=%+v)", s, data, c) - } - } - } -} diff --git a/components/engine/volume/volume_propagation_linux.go b/components/engine/volume/volume_propagation_linux.go deleted file mode 100644 index 1de57ab52b..0000000000 --- a/components/engine/volume/volume_propagation_linux.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build linux - -package volume - -import ( - "strings" - - mounttypes "github.com/docker/docker/api/types/mount" -) - -// DefaultPropagationMode defines what propagation mode should be used by -// default if user has not specified one explicitly. -// propagation modes -const DefaultPropagationMode = mounttypes.PropagationRPrivate - -var propagationModes = map[mounttypes.Propagation]bool{ - mounttypes.PropagationPrivate: true, - mounttypes.PropagationRPrivate: true, - mounttypes.PropagationSlave: true, - mounttypes.PropagationRSlave: true, - mounttypes.PropagationShared: true, - mounttypes.PropagationRShared: true, -} - -// GetPropagation extracts and returns the mount propagation mode. If there -// are no specifications, then by default it is "private". -func GetPropagation(mode string) mounttypes.Propagation { - for _, o := range strings.Split(mode, ",") { - prop := mounttypes.Propagation(o) - if propagationModes[prop] { - return prop - } - } - return DefaultPropagationMode -} - -// HasPropagation checks if there is a valid propagation mode present in -// passed string. Returns true if a valid propagation mode specifier is -// present, false otherwise. -func HasPropagation(mode string) bool { - for _, o := range strings.Split(mode, ",") { - if propagationModes[mounttypes.Propagation(o)] { - return true - } - } - return false -} diff --git a/components/engine/volume/volume_propagation_linux_test.go b/components/engine/volume/volume_propagation_linux_test.go deleted file mode 100644 index 46d0265062..0000000000 --- a/components/engine/volume/volume_propagation_linux_test.go +++ /dev/null @@ -1,65 +0,0 @@ -// +build linux - -package volume - -import ( - "strings" - "testing" -) - -func TestParseMountRawPropagation(t *testing.T) { - var ( - valid []string - invalid map[string]string - ) - - valid = []string{ - "/hostPath:/containerPath:shared", - "/hostPath:/containerPath:rshared", - "/hostPath:/containerPath:slave", - "/hostPath:/containerPath:rslave", - "/hostPath:/containerPath:private", - "/hostPath:/containerPath:rprivate", - "/hostPath:/containerPath:ro,shared", - "/hostPath:/containerPath:ro,slave", - "/hostPath:/containerPath:ro,private", - "/hostPath:/containerPath:ro,z,shared", - "/hostPath:/containerPath:ro,Z,slave", - "/hostPath:/containerPath:Z,ro,slave", - "/hostPath:/containerPath:slave,Z,ro", - "/hostPath:/containerPath:Z,slave,ro", - "/hostPath:/containerPath:slave,ro,Z", - "/hostPath:/containerPath:rslave,ro,Z", - "/hostPath:/containerPath:ro,rshared,Z", - "/hostPath:/containerPath:ro,Z,rprivate", - } - invalid = map[string]string{ - "/path:/path:ro,rshared,rslave": `invalid mode`, - "/path:/path:ro,z,rshared,rslave": `invalid mode`, - "/path:shared": "invalid volume specification", - "/path:slave": "invalid volume specification", - "/path:private": "invalid volume specification", - "name:/absolute-path:shared": "invalid volume specification", - "name:/absolute-path:rshared": "invalid volume specification", - "name:/absolute-path:slave": "invalid volume specification", - "name:/absolute-path:rslave": "invalid volume specification", - "name:/absolute-path:private": "invalid volume specification", - "name:/absolute-path:rprivate": "invalid volume specification", - } - - for _, path := range valid { - if _, err := ParseMountRaw(path, "local"); err != nil { - t.Fatalf("ParseMountRaw(`%q`) should succeed: error %q", path, err) - } - } - - for path, expectedError := range invalid { - if _, err := ParseMountRaw(path, "local"); err == nil { - t.Fatalf("ParseMountRaw(`%q`) should have failed validation. Err %v", path, err) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ParseMountRaw(`%q`) error should contain %q, got %v", path, expectedError, err.Error()) - } - } - } -} diff --git a/components/engine/volume/volume_propagation_unsupported.go b/components/engine/volume/volume_propagation_unsupported.go deleted file mode 100644 index 7311ffc2e0..0000000000 --- a/components/engine/volume/volume_propagation_unsupported.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !linux - -package volume - -import mounttypes "github.com/docker/docker/api/types/mount" - -// DefaultPropagationMode is used only in linux. In other cases it returns -// empty string. -const DefaultPropagationMode mounttypes.Propagation = "" - -// propagation modes not supported on this platform. -var propagationModes = map[mounttypes.Propagation]bool{} - -// GetPropagation is not supported. Return empty string. -func GetPropagation(mode string) mounttypes.Propagation { - return DefaultPropagationMode -} - -// HasPropagation checks if there is a valid propagation mode present in -// passed string. Returns true if a valid propagation mode specifier is -// present, false otherwise. -func HasPropagation(mode string) bool { - return false -} diff --git a/components/engine/volume/volume_test.go b/components/engine/volume/volume_test.go index 395f374ff0..9f2020d58d 100644 --- a/components/engine/volume/volume_test.go +++ b/components/engine/volume/volume_test.go @@ -10,14 +10,84 @@ import ( "github.com/docker/docker/api/types/mount" ) -func TestParseMountRaw(t *testing.T) { - var ( - valid []string - invalid map[string]string - ) +type parseMountRawTestSet struct { + valid []string + invalid map[string]string +} - if runtime.GOOS == "windows" { - valid = []string{ +func TestConvertTmpfsOptions(t *testing.T) { + type testCase struct { + opt mount.TmpfsOptions + readOnly bool + expectedSubstrings []string + unexpectedSubstrings []string + } + cases := []testCase{ + { + opt: mount.TmpfsOptions{SizeBytes: 1024 * 1024, Mode: 0700}, + readOnly: false, + expectedSubstrings: []string{"size=1m", "mode=700"}, + unexpectedSubstrings: []string{"ro"}, + }, + { + opt: mount.TmpfsOptions{}, + readOnly: true, + expectedSubstrings: []string{"ro"}, + unexpectedSubstrings: []string{}, + }, + } + p := &linuxParser{} + for _, c := range cases { + data, err := p.ConvertTmpfsOptions(&c.opt, c.readOnly) + if err != nil { + t.Fatalf("could not convert %+v (readOnly: %v) to string: %v", + c.opt, c.readOnly, err) + } + t.Logf("data=%q", data) + for _, s := range c.expectedSubstrings { + if !strings.Contains(data, s) { + t.Fatalf("expected substring: %s, got %v (case=%+v)", s, data, c) + } + } + for _, s := range c.unexpectedSubstrings { + if strings.Contains(data, s) { + t.Fatalf("unexpected substring: %s, got %v (case=%+v)", s, data, c) + } + } + } +} + +type mockFiProvider struct{} + +func (mockFiProvider) fileInfo(path string) (exists, isDir bool, err error) { + dirs := map[string]struct{}{ + `c:\`: {}, + `c:\windows\`: {}, + `c:\windows`: {}, + `c:\program files`: {}, + `c:\Windows`: {}, + `c:\Program Files (x86)`: {}, + `\\?\c:\windows\`: {}, + } + files := map[string]struct{}{ + `c:\windows\system32\ntdll.dll`: {}, + } + if _, ok := dirs[path]; ok { + return true, true, nil + } + if _, ok := files[path]; ok { + return true, false, nil + } + return false, false, nil +} + +func TestParseMountRaw(t *testing.T) { + + previousProvider := currentFileInfoProvider + defer func() { currentFileInfoProvider = previousProvider }() + currentFileInfoProvider = mockFiProvider{} + windowsSet := parseMountRawTestSet{ + valid: []string{ `d:\`, `d:`, `d:\path`, @@ -35,10 +105,14 @@ func TestParseMountRaw(t *testing.T) { `name:D::RO`, `c:/:d:/forward/slashes/are/good/too`, `c:/:d:/including with/spaces:ro`, - `c:\Windows`, // With capital - `c:\Program Files (x86)`, // With capitals and brackets - } - invalid = map[string]string{ + `c:\Windows`, // With capital + `c:\Program Files (x86)`, // With capitals and brackets + `\\?\c:\windows\:d:`, // Long path handling (source) + `c:\windows\:\\?\d:\`, // Long path handling (target) + `\\.\pipe\foo:\\.\pipe\foo`, // named pipe + `//./pipe/foo://./pipe/foo`, // named pipe forward slashes + }, + invalid: map[string]string{ ``: "invalid volume specification: ", `.`: "invalid volume specification: ", `..\`: "invalid volume specification: ", @@ -82,10 +156,79 @@ func TestParseMountRaw(t *testing.T) { `lpt8:d:`: `cannot be a reserved word for Windows filenames`, `lpt9:d:`: `cannot be a reserved word for Windows filenames`, `c:\windows\system32\ntdll.dll`: `Only directories can be mapped on this platform`, - } - - } else { - valid = []string{ + `\\.\pipe\foo:c:\pipe`: `'c:\pipe' is not a valid pipe path`, + }, + } + lcowSet := parseMountRawTestSet{ + valid: []string{ + `/foo`, + `/foo/`, + `/foo bar`, + `c:\:/foo`, + `c:\windows\:/foo`, + `c:\windows:/s p a c e`, + `c:\windows:/s p a c e:RW`, + `c:\program files:/s p a c e i n h o s t d i r`, + `0123456789name:/foo`, + `MiXeDcAsEnAmE:/foo`, + `name:/foo`, + `name:/foo:rW`, + `name:/foo:RW`, + `name:/foo:RO`, + `c:/:/forward/slashes/are/good/too`, + `c:/:/including with/spaces:ro`, + `/Program Files (x86)`, // With capitals and brackets + }, + invalid: map[string]string{ + ``: "invalid volume specification: ", + `.`: "invalid volume specification: ", + `c:`: "invalid volume specification: ", + `c:\`: "invalid volume specification: ", + `../`: "invalid volume specification: ", + `c:\:../`: "invalid volume specification: ", + `c:\:/foo:xyzzy`: "invalid volume specification: ", + `/`: "destination can't be '/'", + `/..`: "destination can't be '/'", + `c:\notexist:/foo`: `source path does not exist`, + `c:\windows\system32\ntdll.dll:/foo`: `source path must be a directory`, + `name<:/foo`: `invalid volume specification`, + `name>:/foo`: `invalid volume specification`, + `name::/foo`: `invalid volume specification`, + `name":/foo`: `invalid volume specification`, + `name\:/foo`: `invalid volume specification`, + `name*:/foo`: `invalid volume specification`, + `name|:/foo`: `invalid volume specification`, + `name?:/foo`: `invalid volume specification`, + `name/:/foo`: `invalid volume specification`, + `/foo:rw`: `invalid volume specification`, + `/foo:ro`: `invalid volume specification`, + `con:/foo`: `cannot be a reserved word for Windows filenames`, + `PRN:/foo`: `cannot be a reserved word for Windows filenames`, + `aUx:/foo`: `cannot be a reserved word for Windows filenames`, + `nul:/foo`: `cannot be a reserved word for Windows filenames`, + `com1:/foo`: `cannot be a reserved word for Windows filenames`, + `com2:/foo`: `cannot be a reserved word for Windows filenames`, + `com3:/foo`: `cannot be a reserved word for Windows filenames`, + `com4:/foo`: `cannot be a reserved word for Windows filenames`, + `com5:/foo`: `cannot be a reserved word for Windows filenames`, + `com6:/foo`: `cannot be a reserved word for Windows filenames`, + `com7:/foo`: `cannot be a reserved word for Windows filenames`, + `com8:/foo`: `cannot be a reserved word for Windows filenames`, + `com9:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt1:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt2:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt3:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt4:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt5:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt6:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt7:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt8:/foo`: `cannot be a reserved word for Windows filenames`, + `lpt9:/foo`: `cannot be a reserved word for Windows filenames`, + `\\.\pipe\foo:/foo`: `Linux containers on Windows do not support named pipe mounts`, + }, + } + linuxSet := parseMountRawTestSet{ + valid: []string{ "/home", "/home:/home", "/home:/something/else", @@ -95,47 +238,87 @@ func TestParseMountRaw(t *testing.T) { "hostPath:/containerPath:ro", "/hostPath:/containerPath:rw", "/rw:/ro", - } - invalid = map[string]string{ - "": "invalid volume specification", - "./": "mount path must be absolute", - "../": "mount path must be absolute", - "/:../": "mount path must be absolute", - "/:path": "mount path must be absolute", - ":": "invalid volume specification", - "/tmp:": "invalid volume specification", - ":test": "invalid volume specification", - ":/test": "invalid volume specification", - "tmp:": "invalid volume specification", - ":test:": "invalid volume specification", - "::": "invalid volume specification", - ":::": "invalid volume specification", - "/tmp:::": "invalid volume specification", - ":/tmp::": "invalid volume specification", - "/path:rw": "invalid volume specification", - "/path:ro": "invalid volume specification", - "/rw:rw": "invalid volume specification", - "path:ro": "invalid volume specification", - "/path:/path:sw": `invalid mode`, - "/path:/path:rwz": `invalid mode`, - } + "/hostPath:/containerPath:shared", + "/hostPath:/containerPath:rshared", + "/hostPath:/containerPath:slave", + "/hostPath:/containerPath:rslave", + "/hostPath:/containerPath:private", + "/hostPath:/containerPath:rprivate", + "/hostPath:/containerPath:ro,shared", + "/hostPath:/containerPath:ro,slave", + "/hostPath:/containerPath:ro,private", + "/hostPath:/containerPath:ro,z,shared", + "/hostPath:/containerPath:ro,Z,slave", + "/hostPath:/containerPath:Z,ro,slave", + "/hostPath:/containerPath:slave,Z,ro", + "/hostPath:/containerPath:Z,slave,ro", + "/hostPath:/containerPath:slave,ro,Z", + "/hostPath:/containerPath:rslave,ro,Z", + "/hostPath:/containerPath:ro,rshared,Z", + "/hostPath:/containerPath:ro,Z,rprivate", + }, + invalid: map[string]string{ + "": "invalid volume specification", + "./": "mount path must be absolute", + "../": "mount path must be absolute", + "/:../": "mount path must be absolute", + "/:path": "mount path must be absolute", + ":": "invalid volume specification", + "/tmp:": "invalid volume specification", + ":test": "invalid volume specification", + ":/test": "invalid volume specification", + "tmp:": "invalid volume specification", + ":test:": "invalid volume specification", + "::": "invalid volume specification", + ":::": "invalid volume specification", + "/tmp:::": "invalid volume specification", + ":/tmp::": "invalid volume specification", + "/path:rw": "invalid volume specification", + "/path:ro": "invalid volume specification", + "/rw:rw": "invalid volume specification", + "path:ro": "invalid volume specification", + "/path:/path:sw": `invalid mode`, + "/path:/path:rwz": `invalid mode`, + "/path:/path:ro,rshared,rslave": `invalid mode`, + "/path:/path:ro,z,rshared,rslave": `invalid mode`, + "/path:shared": "invalid volume specification", + "/path:slave": "invalid volume specification", + "/path:private": "invalid volume specification", + "name:/absolute-path:shared": "invalid volume specification", + "name:/absolute-path:rshared": "invalid volume specification", + "name:/absolute-path:slave": "invalid volume specification", + "name:/absolute-path:rslave": "invalid volume specification", + "name:/absolute-path:private": "invalid volume specification", + "name:/absolute-path:rprivate": "invalid volume specification", + }, } - for _, path := range valid { - if _, err := ParseMountRaw(path, "local"); err != nil { - t.Fatalf("ParseMountRaw(`%q`) should succeed: error %q", path, err) - } - } + linParser := &linuxParser{} + winParser := &windowsParser{} + lcowParser := &lcowParser{} + tester := func(parser Parser, set parseMountRawTestSet) { - for path, expectedError := range invalid { - if mp, err := ParseMountRaw(path, "local"); err == nil { - t.Fatalf("ParseMountRaw(`%q`) should have failed validation. Err '%v' - MP: %v", path, err, mp) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ParseMountRaw(`%q`) error should contain %q, got %v", path, expectedError, err.Error()) + for _, path := range set.valid { + + if _, err := parser.ParseMountRaw(path, "local"); err != nil { + t.Errorf("ParseMountRaw(`%q`) should succeed: error %q", path, err) + } + } + + for path, expectedError := range set.invalid { + if mp, err := parser.ParseMountRaw(path, "local"); err == nil { + t.Errorf("ParseMountRaw(`%q`) should have failed validation. Err '%v' - MP: %v", path, err, mp) + } else { + if !strings.Contains(err.Error(), expectedError) { + t.Errorf("ParseMountRaw(`%q`) error should contain %q, got %v", path, expectedError, err.Error()) + } } } } + tester(linParser, linuxSet) + tester(winParser, windowsSet) + tester(lcowParser, lcowSet) + } // testParseMountRaw is a structure used by TestParseMountRawSplit for @@ -153,76 +336,96 @@ type testParseMountRaw struct { } func TestParseMountRawSplit(t *testing.T) { - var cases []testParseMountRaw - if runtime.GOOS == "windows" { - cases = []testParseMountRaw{ - {`c:\:d:`, "local", mount.TypeBind, `d:`, `c:\`, ``, "", true, false}, - {`c:\:d:\`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", true, false}, - {`c:\:d:\:ro`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", false, false}, - {`c:\:d:\:rw`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", true, false}, - {`c:\:d:\:foo`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", false, true}, - {`\\.\pipe\foo:\\.\pipe\bar`, "local", mount.TypeNamedPipe, `\\.\pipe\bar`, `\\.\pipe\foo`, "", "", true, false}, - {`\\.\pipe\foo:c:\foo\bar`, "local", mount.TypeNamedPipe, ``, ``, "", "", true, true}, - {`c:\foo\bar:\\.\pipe\foo`, "local", mount.TypeNamedPipe, ``, ``, "", "", true, true}, - {`name:d::rw`, "local", mount.TypeVolume, `d:`, ``, `name`, "local", true, false}, - {`name:d:`, "local", mount.TypeVolume, `d:`, ``, `name`, "local", true, false}, - {`name:d::ro`, "local", mount.TypeVolume, `d:`, ``, `name`, "local", false, false}, - {`name:c:`, "", mount.TypeVolume, ``, ``, ``, "", true, true}, - {`driver/name:c:`, "", mount.TypeVolume, ``, ``, ``, "", true, true}, - } - } else { - cases = []testParseMountRaw{ - {"/tmp:/tmp1", "", mount.TypeBind, "/tmp1", "/tmp", "", "", true, false}, - {"/tmp:/tmp2:ro", "", mount.TypeBind, "/tmp2", "/tmp", "", "", false, false}, - {"/tmp:/tmp3:rw", "", mount.TypeBind, "/tmp3", "/tmp", "", "", true, false}, - {"/tmp:/tmp4:foo", "", mount.TypeBind, "", "", "", "", false, true}, - {"name:/named1", "", mount.TypeVolume, "/named1", "", "name", "", true, false}, - {"name:/named2", "external", mount.TypeVolume, "/named2", "", "name", "external", true, false}, - {"name:/named3:ro", "local", mount.TypeVolume, "/named3", "", "name", "local", false, false}, - {"local/name:/tmp:rw", "", mount.TypeVolume, "/tmp", "", "local/name", "", true, false}, - {"/tmp:tmp", "", mount.TypeBind, "", "", "", "", true, true}, - } + previousProvider := currentFileInfoProvider + defer func() { currentFileInfoProvider = previousProvider }() + currentFileInfoProvider = mockFiProvider{} + windowsCases := []testParseMountRaw{ + {`c:\:d:`, "local", mount.TypeBind, `d:`, `c:\`, ``, "", true, false}, + {`c:\:d:\`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", true, false}, + {`c:\:d:\:ro`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", false, false}, + {`c:\:d:\:rw`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", true, false}, + {`c:\:d:\:foo`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", false, true}, + {`name:d::rw`, "local", mount.TypeVolume, `d:`, ``, `name`, "local", true, false}, + {`name:d:`, "local", mount.TypeVolume, `d:`, ``, `name`, "local", true, false}, + {`name:d::ro`, "local", mount.TypeVolume, `d:`, ``, `name`, "local", false, false}, + {`name:c:`, "", mount.TypeVolume, ``, ``, ``, "", true, true}, + {`driver/name:c:`, "", mount.TypeVolume, ``, ``, ``, "", true, true}, + {`\\.\pipe\foo:\\.\pipe\bar`, "local", mount.TypeNamedPipe, `\\.\pipe\bar`, `\\.\pipe\foo`, "", "", true, false}, + {`\\.\pipe\foo:c:\foo\bar`, "local", mount.TypeNamedPipe, ``, ``, "", "", true, true}, + {`c:\foo\bar:\\.\pipe\foo`, "local", mount.TypeNamedPipe, ``, ``, "", "", true, true}, } - - for i, c := range cases { - t.Logf("case %d", i) - m, err := ParseMountRaw(c.bind, c.driver) - if c.fail { - if err == nil { - t.Fatalf("Expected error, was nil, for spec %s\n", c.bind) + lcowCases := []testParseMountRaw{ + {`c:\:/foo`, "local", mount.TypeBind, `/foo`, `c:\`, ``, "", true, false}, + {`c:\:/foo:ro`, "local", mount.TypeBind, `/foo`, `c:\`, ``, "", false, false}, + {`c:\:/foo:rw`, "local", mount.TypeBind, `/foo`, `c:\`, ``, "", true, false}, + {`c:\:/foo:foo`, "local", mount.TypeBind, `/foo`, `c:\`, ``, "", false, true}, + {`name:/foo:rw`, "local", mount.TypeVolume, `/foo`, ``, `name`, "local", true, false}, + {`name:/foo`, "local", mount.TypeVolume, `/foo`, ``, `name`, "local", true, false}, + {`name:/foo:ro`, "local", mount.TypeVolume, `/foo`, ``, `name`, "local", false, false}, + {`name:/`, "", mount.TypeVolume, ``, ``, ``, "", true, true}, + {`driver/name:/`, "", mount.TypeVolume, ``, ``, ``, "", true, true}, + {`\\.\pipe\foo:\\.\pipe\bar`, "local", mount.TypeNamedPipe, `\\.\pipe\bar`, `\\.\pipe\foo`, "", "", true, true}, + {`\\.\pipe\foo:/data`, "local", mount.TypeNamedPipe, ``, ``, "", "", true, true}, + {`c:\foo\bar:\\.\pipe\foo`, "local", mount.TypeNamedPipe, ``, ``, "", "", true, true}, + } + linuxCases := []testParseMountRaw{ + {"/tmp:/tmp1", "", mount.TypeBind, "/tmp1", "/tmp", "", "", true, false}, + {"/tmp:/tmp2:ro", "", mount.TypeBind, "/tmp2", "/tmp", "", "", false, false}, + {"/tmp:/tmp3:rw", "", mount.TypeBind, "/tmp3", "/tmp", "", "", true, false}, + {"/tmp:/tmp4:foo", "", mount.TypeBind, "", "", "", "", false, true}, + {"name:/named1", "", mount.TypeVolume, "/named1", "", "name", "", true, false}, + {"name:/named2", "external", mount.TypeVolume, "/named2", "", "name", "external", true, false}, + {"name:/named3:ro", "local", mount.TypeVolume, "/named3", "", "name", "local", false, false}, + {"local/name:/tmp:rw", "", mount.TypeVolume, "/tmp", "", "local/name", "", true, false}, + {"/tmp:tmp", "", mount.TypeBind, "", "", "", "", true, true}, + } + linParser := &linuxParser{} + winParser := &windowsParser{} + lcowParser := &lcowParser{} + tester := func(parser Parser, cases []testParseMountRaw) { + for i, c := range cases { + t.Logf("case %d", i) + m, err := parser.ParseMountRaw(c.bind, c.driver) + if c.fail { + if err == nil { + t.Errorf("Expected error, was nil, for spec %s\n", c.bind) + } + continue } - continue - } - if m == nil || err != nil { - t.Fatalf("ParseMountRaw failed for spec '%s', driver '%s', error '%v'", c.bind, c.driver, err.Error()) - continue - } + if m == nil || err != nil { + t.Errorf("ParseMountRaw failed for spec '%s', driver '%s', error '%v'", c.bind, c.driver, err.Error()) + continue + } - if m.Type != c.expType { - t.Fatalf("Expected type '%s', was '%s', for spec '%s'", c.expType, m.Type, c.bind) - } + if m.Destination != c.expDest { + t.Errorf("Expected destination '%s, was %s', for spec '%s'", c.expDest, m.Destination, c.bind) + } - if m.Destination != c.expDest { - t.Fatalf("Expected destination '%s', was '%s', for spec '%s'", c.expDest, m.Destination, c.bind) - } + if m.Source != c.expSource { + t.Errorf("Expected source '%s', was '%s', for spec '%s'", c.expSource, m.Source, c.bind) + } - if m.Source != c.expSource { - t.Fatalf("Expected source '%s', was '%s', for spec '%s'", c.expSource, m.Source, c.bind) - } + if m.Name != c.expName { + t.Errorf("Expected name '%s', was '%s' for spec '%s'", c.expName, m.Name, c.bind) + } - if m.Name != c.expName { - t.Fatalf("Expected name '%s', was '%s' for spec '%s'", c.expName, m.Name, c.bind) - } + if m.Driver != c.expDriver { + t.Errorf("Expected driver '%s', was '%s', for spec '%s'", c.expDriver, m.Driver, c.bind) + } - if m.Driver != c.expDriver { - t.Fatalf("Expected driver '%s', was '%s', for spec '%s'", c.expDriver, m.Driver, c.bind) - } - - if m.RW != c.expRW { - t.Fatalf("Expected RW '%v', was '%v' for spec '%s'", c.expRW, m.RW, c.bind) + if m.RW != c.expRW { + t.Errorf("Expected RW '%v', was '%v' for spec '%s'", c.expRW, m.RW, c.bind) + } + if m.Type != c.expType { + t.Fatalf("Expected type '%s', was '%s', for spec '%s'", c.expType, m.Type, c.bind) + } } } + + tester(linParser, linuxCases) + tester(winParser, windowsCases) + tester(lcowParser, lcowCases) } func TestParseMountSpec(t *testing.T) { @@ -235,43 +438,43 @@ func TestParseMountSpec(t *testing.T) { t.Fatal(err) } defer os.RemoveAll(testDir) - + parser := NewParser(runtime.GOOS) cases := []c{ - {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: DefaultPropagationMode}}, - {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, RW: true, Propagation: DefaultPropagationMode}}, - {mount.Mount{Type: mount.TypeBind, Source: testDir + string(os.PathSeparator), Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: DefaultPropagationMode}}, - {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath + string(os.PathSeparator), ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: DefaultPropagationMode}}, - {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: DefaultCopyMode}}, - {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath + string(os.PathSeparator)}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: DefaultCopyMode}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: parser.DefaultPropagationMode()}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, RW: true, Propagation: parser.DefaultPropagationMode()}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir + string(os.PathSeparator), Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: parser.DefaultPropagationMode()}}, + {mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath + string(os.PathSeparator), ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: parser.DefaultPropagationMode()}}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: parser.DefaultCopyMode()}}, + {mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath + string(os.PathSeparator)}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: parser.DefaultCopyMode()}}, } for i, c := range cases { t.Logf("case %d", i) - mp, err := ParseMountSpec(c.input) + mp, err := parser.ParseMountSpec(c.input) if err != nil { - t.Fatal(err) + t.Error(err) } if c.expected.Type != mp.Type { - t.Fatalf("Expected mount types to match. Expected: '%s', Actual: '%s'", c.expected.Type, mp.Type) + t.Errorf("Expected mount types to match. Expected: '%s', Actual: '%s'", c.expected.Type, mp.Type) } if c.expected.Destination != mp.Destination { - t.Fatalf("Expected mount destination to match. Expected: '%s', Actual: '%s'", c.expected.Destination, mp.Destination) + t.Errorf("Expected mount destination to match. Expected: '%s', Actual: '%s'", c.expected.Destination, mp.Destination) } if c.expected.Source != mp.Source { - t.Fatalf("Expected mount source to match. Expected: '%s', Actual: '%s'", c.expected.Source, mp.Source) + t.Errorf("Expected mount source to match. Expected: '%s', Actual: '%s'", c.expected.Source, mp.Source) } if c.expected.RW != mp.RW { - t.Fatalf("Expected mount writable to match. Expected: '%v', Actual: '%v'", c.expected.RW, mp.RW) + t.Errorf("Expected mount writable to match. Expected: '%v', Actual: '%v'", c.expected.RW, mp.RW) } if c.expected.Propagation != mp.Propagation { - t.Fatalf("Expected mount propagation to match. Expected: '%v', Actual: '%s'", c.expected.Propagation, mp.Propagation) + t.Errorf("Expected mount propagation to match. Expected: '%v', Actual: '%s'", c.expected.Propagation, mp.Propagation) } if c.expected.Driver != mp.Driver { - t.Fatalf("Expected mount driver to match. Expected: '%v', Actual: '%s'", c.expected.Driver, mp.Driver) + t.Errorf("Expected mount driver to match. Expected: '%v', Actual: '%s'", c.expected.Driver, mp.Driver) } if c.expected.CopyData != mp.CopyData { - t.Fatalf("Expected mount copy data to match. Expected: '%v', Actual: '%v'", c.expected.CopyData, mp.CopyData) + t.Errorf("Expected mount copy data to match. Expected: '%v', Actual: '%v'", c.expected.CopyData, mp.CopyData) } } } diff --git a/components/engine/volume/volume_unix.go b/components/engine/volume/volume_unix.go index 31555a691a..0968fe37e1 100644 --- a/components/engine/volume/volume_unix.go +++ b/components/engine/volume/volume_unix.go @@ -4,153 +4,15 @@ package volume import ( "fmt" - "os" "path/filepath" "strings" - - mounttypes "github.com/docker/docker/api/types/mount" ) -var platformRawValidationOpts = []func(o *validateOpts){ - // need to make sure to not error out if the bind source does not exist on unix - // this is supported for historical reasons, the path will be automatically - // created later. - func(o *validateOpts) { o.skipBindSourceCheck = true }, -} - -// read-write modes -var rwModes = map[string]bool{ - "rw": true, - "ro": true, -} - -// label modes -var labelModes = map[string]bool{ - "Z": true, - "z": true, -} - -// consistency modes -var consistencyModes = map[mounttypes.Consistency]bool{ - mounttypes.ConsistencyFull: true, - mounttypes.ConsistencyCached: true, - mounttypes.ConsistencyDelegated: true, -} - -// BackwardsCompatible decides whether this mount point can be -// used in old versions of Docker or not. -// Only bind mounts and local volumes can be used in old versions of Docker. -func (m *MountPoint) BackwardsCompatible() bool { - return len(m.Source) > 0 || m.Driver == DefaultDriverName -} - -// HasResource checks whether the given absolute path for a container is in -// this mount point. If the relative path starts with `../` then the resource -// is outside of this mount point, but we can't simply check for this prefix -// because it misses `..` which is also outside of the mount, so check both. -func (m *MountPoint) HasResource(absolutePath string) bool { +func (p *linuxParser) HasResource(m *MountPoint, absolutePath string) bool { relPath, err := filepath.Rel(m.Destination, absolutePath) return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator)) } -// IsVolumeNameValid checks a volume name in a platform specific manner. -func IsVolumeNameValid(name string) (bool, error) { - return true, nil -} - -// ValidMountMode will make sure the mount mode is valid. -// returns if it's a valid mount mode or not. -func ValidMountMode(mode string) bool { - if mode == "" { - return true - } - - rwModeCount := 0 - labelModeCount := 0 - propagationModeCount := 0 - copyModeCount := 0 - consistencyModeCount := 0 - - for _, o := range strings.Split(mode, ",") { - switch { - case rwModes[o]: - rwModeCount++ - case labelModes[o]: - labelModeCount++ - case propagationModes[mounttypes.Propagation(o)]: - propagationModeCount++ - case copyModeExists(o): - copyModeCount++ - case consistencyModes[mounttypes.Consistency(o)]: - consistencyModeCount++ - default: - return false - } - } - - // Only one string for each mode is allowed. - if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 || consistencyModeCount > 1 { - return false - } - return true -} - -// ReadWrite tells you if a mode string is a valid read-write mode or not. -// If there are no specifications w.r.t read write mode, then by default -// it returns true. -func ReadWrite(mode string) bool { - if !ValidMountMode(mode) { - return false - } - - for _, o := range strings.Split(mode, ",") { - if o == "ro" { - return false - } - } - return true -} - -func validateNotRoot(p string) error { - p = filepath.Clean(convertSlash(p)) - if p == "/" { - return fmt.Errorf("invalid specification: destination can't be '/'") - } - return nil -} - -func convertSlash(p string) string { - return p -} - -// isAbsPath reports whether the path is absolute. -func isAbsPath(p string) bool { - return filepath.IsAbs(p) -} - -func splitRawSpec(raw string) ([]string, error) { - if strings.Count(raw, ":") > 2 { - return nil, errInvalidSpec(raw) - } - - arr := strings.SplitN(raw, ":", 3) - if arr[0] == "" { - return nil, errInvalidSpec(raw) - } - return arr, nil -} - -func detectMountType(p string) mounttypes.Type { - if filepath.IsAbs(p) { - return mounttypes.TypeBind - } - return mounttypes.TypeVolume -} - -func clean(p string) string { - return filepath.Clean(p) -} - -func validateStat(fi os.FileInfo) error { - return nil +func (p *windowsParser) HasResource(m *MountPoint, absolutePath string) bool { + return false } diff --git a/components/engine/volume/volume_unsupported.go b/components/engine/volume/volume_unsupported.go deleted file mode 100644 index ff9d6afa27..0000000000 --- a/components/engine/volume/volume_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux - -package volume - -import ( - "fmt" - "runtime" - - mounttypes "github.com/docker/docker/api/types/mount" -) - -// ConvertTmpfsOptions converts *mounttypes.TmpfsOptions to the raw option string -// for mount(2). -func ConvertTmpfsOptions(opt *mounttypes.TmpfsOptions, readOnly bool) (string, error) { - return "", fmt.Errorf("%s does not support tmpfs", runtime.GOOS) -} diff --git a/components/engine/volume/volume_windows.go b/components/engine/volume/volume_windows.go index 5bee223702..8ec1d6c801 100644 --- a/components/engine/volume/volume_windows.go +++ b/components/engine/volume/volume_windows.go @@ -1,213 +1,8 @@ package volume -import ( - "fmt" - "os" - "path/filepath" - "regexp" - "strings" - - mounttypes "github.com/docker/docker/api/types/mount" -) - -// read-write modes -var rwModes = map[string]bool{ - "rw": true, -} - -// read-only modes -var roModes = map[string]bool{ - "ro": true, -} - -var platformRawValidationOpts = []func(*validateOpts){} - -const ( - // Spec should be in the format [source:]destination[:mode] - // - // Examples: c:\foo bar:d:rw - // c:\foo:d:\bar - // myname:d: - // d:\ - // - // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See - // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to - // test is https://regex-golang.appspot.com/assets/html/index.html - // - // Useful link for referencing named capturing groups: - // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex - // - // There are three match groups: source, destination and mode. - // - - // RXHostDir is the first option of a source - RXHostDir = `[a-z]:\\(?:[^\\/:*?"<>|\r\n]+\\?)*` - // RXName is the second option of a source - RXName = `[^\\/:*?"<>|\r\n]+` - // RXPipe is a named path pipe (starts with `\\.\pipe\`, possibly with / instead of \) - RXPipe = `[/\\]{2}.[/\\]pipe[/\\][^:*?"<>|\r\n]+` - // RXReservedNames are reserved names not possible on Windows - RXReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])` - - // RXSource is the combined possibilities for a source - RXSource = `((?P((` + RXHostDir + `)|(` + RXName + `)|(` + RXPipe + `))):)?` - - // Source. Can be either a host directory, a name, or omitted: - // HostDir: - // - Essentially using the folder solution from - // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html - // but adding case insensitivity. - // - Must be an absolute path such as c:\path - // - Can include spaces such as `c:\program files` - // - And then followed by a colon which is not in the capture group - // - And can be optional - // Name: - // - Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) - // - And then followed by a colon which is not in the capture group - // - And can be optional - - // RXDestinationDir is the file path option for the mount destination - RXDestinationDir = `([a-z]):((?:\\[^\\/:*?"<>\r\n]+)*\\?)` - // RXDestination is the regex expression for the mount destination - RXDestination = `(?P(` + RXDestinationDir + `)|(` + RXPipe + `))` - // Destination (aka container path): - // - Variation on hostdir but can be a drive followed by colon as well - // - If a path, must be absolute. Can include spaces - // - Drive cannot be c: (explicitly checked in code, not RegEx) - - // RXMode is the regex expression for the mode of the mount - // Mode (optional): - // - Hopefully self explanatory in comparison to above regex's. - // - Colon is not in the capture group - RXMode = `(:(?P(?i)ro|rw))?` -) - -// BackwardsCompatible decides whether this mount point can be -// used in old versions of Docker or not. -// Windows volumes are never backwards compatible. -func (m *MountPoint) BackwardsCompatible() bool { +func (p *windowsParser) HasResource(m *MountPoint, absolutePath string) bool { return false } - -func splitRawSpec(raw string) ([]string, error) { - specExp := regexp.MustCompile(`^` + RXSource + RXDestination + RXMode + `$`) - match := specExp.FindStringSubmatch(strings.ToLower(raw)) - - // Must have something back - if len(match) == 0 { - return nil, errInvalidSpec(raw) - } - - var split []string - matchgroups := make(map[string]string) - // Pull out the sub expressions from the named capture groups - for i, name := range specExp.SubexpNames() { - matchgroups[name] = strings.ToLower(match[i]) - } - if source, exists := matchgroups["source"]; exists { - if source != "" { - split = append(split, source) - } - } - if destination, exists := matchgroups["destination"]; exists { - if destination != "" { - split = append(split, destination) - } - } - if mode, exists := matchgroups["mode"]; exists { - if mode != "" { - split = append(split, mode) - } - } - // Fix #26329. If the destination appears to be a file, and the source is null, - // it may be because we've fallen through the possible naming regex and hit a - // situation where the user intention was to map a file into a container through - // a local volume, but this is not supported by the platform. - if matchgroups["source"] == "" && matchgroups["destination"] != "" { - validName, err := IsVolumeNameValid(matchgroups["destination"]) - if err != nil { - return nil, err - } - if !validName { - if fi, err := os.Stat(matchgroups["destination"]); err == nil { - if !fi.IsDir() { - return nil, fmt.Errorf("file '%s' cannot be mapped. Only directories can be mapped on this platform", matchgroups["destination"]) - } - } - } - } - return split, nil -} - -func detectMountType(p string) mounttypes.Type { - if strings.HasPrefix(filepath.FromSlash(p), `\\.\pipe\`) { - return mounttypes.TypeNamedPipe - } else if filepath.IsAbs(p) { - return mounttypes.TypeBind - } - return mounttypes.TypeVolume -} - -// IsVolumeNameValid checks a volume name in a platform specific manner. -func IsVolumeNameValid(name string) (bool, error) { - nameExp := regexp.MustCompile(`^` + RXName + `$`) - if !nameExp.MatchString(name) { - return false, nil - } - nameExp = regexp.MustCompile(`^` + RXReservedNames + `$`) - if nameExp.MatchString(name) { - return false, fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", name) - } - return true, nil -} - -// ValidMountMode will make sure the mount mode is valid. -// returns if it's a valid mount mode or not. -func ValidMountMode(mode string) bool { - if mode == "" { - return true - } - return roModes[strings.ToLower(mode)] || rwModes[strings.ToLower(mode)] -} - -// ReadWrite tells you if a mode string is a valid read-write mode or not. -func ReadWrite(mode string) bool { - return rwModes[strings.ToLower(mode)] || mode == "" -} - -func validateNotRoot(p string) error { - p = strings.ToLower(convertSlash(p)) - if p == "c:" || p == `c:\` { - return fmt.Errorf("destination path cannot be `c:` or `c:\\`: %v", p) - } - return nil -} - -func convertSlash(p string) string { - return filepath.FromSlash(p) -} - -// isAbsPath returns whether a path is absolute for the purposes of mounting into a container -// (absolute paths, drive letter paths such as X:, and paths starting with `\\.\` to support named pipes). -func isAbsPath(p string) bool { - return filepath.IsAbs(p) || - strings.HasPrefix(p, `\\.\`) || - (len(p) == 2 && p[1] == ':' && ((p[0] >= 'a' && p[0] <= 'z') || (p[0] >= 'A' && p[0] <= 'Z'))) -} - -// Do not clean plain drive letters or paths starting with `\\.\`. -var cleanRegexp = regexp.MustCompile(`^([a-z]:|[/\\]{2}\.[/\\].*)$`) - -func clean(p string) string { - if match := cleanRegexp.MatchString(p); match { - return p - } - return filepath.Clean(p) -} - -func validateStat(fi os.FileInfo) error { - if !fi.IsDir() { - return fmt.Errorf("source path must be a directory") - } - return nil +func (p *linuxParser) HasResource(m *MountPoint, absolutePath string) bool { + return false } diff --git a/components/engine/volume/windows_parser.go b/components/engine/volume/windows_parser.go new file mode 100644 index 0000000000..172610dbdd --- /dev/null +++ b/components/engine/volume/windows_parser.go @@ -0,0 +1,456 @@ +package volume + +import ( + "errors" + "fmt" + "os" + "regexp" + "runtime" + "strings" + + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/stringid" +) + +type windowsParser struct { +} + +const ( + // Spec should be in the format [source:]destination[:mode] + // + // Examples: c:\foo bar:d:rw + // c:\foo:d:\bar + // myname:d: + // d:\ + // + // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See + // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to + // test is https://regex-golang.appspot.com/assets/html/index.html + // + // Useful link for referencing named capturing groups: + // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex + // + // There are three match groups: source, destination and mode. + // + + // rxHostDir is the first option of a source + rxHostDir = `(?:\\\\\?\\)?[a-z]:[\\/](?:[^\\/:*?"<>|\r\n]+[\\/]?)*` + // rxName is the second option of a source + rxName = `[^\\/:*?"<>|\r\n]+` + + // RXReservedNames are reserved names not possible on Windows + rxReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])` + + // rxPipe is a named path pipe (starts with `\\.\pipe\`, possibly with / instead of \) + rxPipe = `[/\\]{2}.[/\\]pipe[/\\][^:*?"<>|\r\n]+` + // rxSource is the combined possibilities for a source + rxSource = `((?P((` + rxHostDir + `)|(` + rxName + `)|(` + rxPipe + `))):)?` + + // Source. Can be either a host directory, a name, or omitted: + // HostDir: + // - Essentially using the folder solution from + // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html + // but adding case insensitivity. + // - Must be an absolute path such as c:\path + // - Can include spaces such as `c:\program files` + // - And then followed by a colon which is not in the capture group + // - And can be optional + // Name: + // - Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) + // - And then followed by a colon which is not in the capture group + // - And can be optional + + // rxDestination is the regex expression for the mount destination + rxDestination = `(?P((?:\\\\\?\\)?([a-z]):((?:[\\/][^\\/:*?"<>\r\n]+)*[\\/]?))|(` + rxPipe + `))` + + rxLCOWDestination = `(?P/(?:[^\\/:*?"<>\r\n]+[/]?)*)` + // Destination (aka container path): + // - Variation on hostdir but can be a drive followed by colon as well + // - If a path, must be absolute. Can include spaces + // - Drive cannot be c: (explicitly checked in code, not RegEx) + + // rxMode is the regex expression for the mode of the mount + // Mode (optional): + // - Hopefully self explanatory in comparison to above regex's. + // - Colon is not in the capture group + rxMode = `(:(?P(?i)ro|rw))?` +) + +type mountValidator func(mnt *mount.Mount) error + +func windowsSplitRawSpec(raw, destRegex string) ([]string, error) { + specExp := regexp.MustCompile(`^` + rxSource + destRegex + rxMode + `$`) + match := specExp.FindStringSubmatch(strings.ToLower(raw)) + + // Must have something back + if len(match) == 0 { + return nil, errInvalidSpec(raw) + } + + var split []string + matchgroups := make(map[string]string) + // Pull out the sub expressions from the named capture groups + for i, name := range specExp.SubexpNames() { + matchgroups[name] = strings.ToLower(match[i]) + } + if source, exists := matchgroups["source"]; exists { + if source != "" { + split = append(split, source) + } + } + if destination, exists := matchgroups["destination"]; exists { + if destination != "" { + split = append(split, destination) + } + } + if mode, exists := matchgroups["mode"]; exists { + if mode != "" { + split = append(split, mode) + } + } + // Fix #26329. If the destination appears to be a file, and the source is null, + // it may be because we've fallen through the possible naming regex and hit a + // situation where the user intention was to map a file into a container through + // a local volume, but this is not supported by the platform. + if matchgroups["source"] == "" && matchgroups["destination"] != "" { + volExp := regexp.MustCompile(`^` + rxName + `$`) + reservedNameExp := regexp.MustCompile(`^` + rxReservedNames + `$`) + + if volExp.MatchString(matchgroups["destination"]) { + if reservedNameExp.MatchString(matchgroups["destination"]) { + return nil, fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", matchgroups["destination"]) + } + } else { + + exists, isDir, _ := currentFileInfoProvider.fileInfo(matchgroups["destination"]) + if exists && !isDir { + return nil, fmt.Errorf("file '%s' cannot be mapped. Only directories can be mapped on this platform", matchgroups["destination"]) + + } + } + } + return split, nil +} + +func windowsValidMountMode(mode string) bool { + if mode == "" { + return true + } + return rwModes[strings.ToLower(mode)] +} +func windowsValidateNotRoot(p string) error { + p = strings.ToLower(strings.Replace(p, `/`, `\`, -1)) + if p == "c:" || p == `c:\` { + return fmt.Errorf("destination path cannot be `c:` or `c:\\`: %v", p) + } + return nil +} + +var windowsSpecificValidators mountValidator = func(mnt *mount.Mount) error { + return windowsValidateNotRoot(mnt.Target) +} + +func windowsValidateRegex(p, r string) error { + if regexp.MustCompile(`^` + r + `$`).MatchString(strings.ToLower(p)) { + return nil + } + return fmt.Errorf("invalid mount path: '%s'", p) +} +func windowsValidateAbsolute(p string) error { + if err := windowsValidateRegex(p, rxDestination); err != nil { + return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) + } + return nil +} + +func windowsDetectMountType(p string) mount.Type { + if strings.HasPrefix(p, `\\.\pipe\`) { + return mount.TypeNamedPipe + } else if regexp.MustCompile(`^` + rxHostDir + `$`).MatchString(p) { + return mount.TypeBind + } else { + return mount.TypeVolume + } +} + +func (p *windowsParser) ReadWrite(mode string) bool { + return strings.ToLower(mode) != "ro" +} + +// IsVolumeNameValid checks a volume name in a platform specific manner. +func (p *windowsParser) ValidateVolumeName(name string) error { + nameExp := regexp.MustCompile(`^` + rxName + `$`) + if !nameExp.MatchString(name) { + return errors.New("invalid volume name") + } + nameExp = regexp.MustCompile(`^` + rxReservedNames + `$`) + if nameExp.MatchString(name) { + return fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", name) + } + return nil +} +func (p *windowsParser) validateMountConfig(mnt *mount.Mount) error { + return p.validateMountConfigReg(mnt, rxDestination, windowsSpecificValidators) +} + +type fileInfoProvider interface { + fileInfo(path string) (exist, isDir bool, err error) +} + +type defaultFileInfoProvider struct { +} + +func (defaultFileInfoProvider) fileInfo(path string) (exist, isDir bool, err error) { + fi, err := os.Stat(path) + if err != nil { + if !os.IsNotExist(err) { + return false, false, err + } + return false, false, nil + } + return true, fi.IsDir(), nil +} + +var currentFileInfoProvider fileInfoProvider = defaultFileInfoProvider{} + +func (p *windowsParser) validateMountConfigReg(mnt *mount.Mount, destRegex string, additionalValidators ...mountValidator) error { + + for _, v := range additionalValidators { + if err := v(mnt); err != nil { + return &errMountConfig{mnt, err} + } + } + if len(mnt.Target) == 0 { + return &errMountConfig{mnt, errMissingField("Target")} + } + + if err := windowsValidateRegex(mnt.Target, destRegex); err != nil { + return &errMountConfig{mnt, err} + } + + switch mnt.Type { + case mount.TypeBind: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + // Don't error out just because the propagation mode is not supported on the platform + if opts := mnt.BindOptions; opts != nil { + if len(opts.Propagation) > 0 { + return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} + } + } + if mnt.VolumeOptions != nil { + return &errMountConfig{mnt, errExtraField("VolumeOptions")} + } + + if err := windowsValidateAbsolute(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + + exists, isdir, err := currentFileInfoProvider.fileInfo(mnt.Source) + if err != nil { + return &errMountConfig{mnt, err} + } + if !exists { + return &errMountConfig{mnt, errBindNotExist} + } + if !isdir { + return &errMountConfig{mnt, fmt.Errorf("source path must be a directory")} + } + + case mount.TypeVolume: + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if len(mnt.Source) == 0 && mnt.ReadOnly { + return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} + } + + if len(mnt.Source) != 0 { + if err := p.ValidateVolumeName(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + } + case mount.TypeNamedPipe: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if mnt.ReadOnly { + return &errMountConfig{mnt, errExtraField("ReadOnly")} + } + + if windowsDetectMountType(mnt.Source) != mount.TypeNamedPipe { + return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Source)} + } + + if windowsDetectMountType(mnt.Target) != mount.TypeNamedPipe { + return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Target)} + } + default: + return &errMountConfig{mnt, errors.New("mount type unknown")} + } + return nil +} +func (p *windowsParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + return p.parseMountRaw(raw, volumeDriver, rxDestination, true, windowsSpecificValidators) +} + +func (p *windowsParser) parseMountRaw(raw, volumeDriver, destRegex string, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) { + arr, err := windowsSplitRawSpec(raw, destRegex) + if err != nil { + return nil, err + } + + var spec mount.Mount + var mode string + switch len(arr) { + case 1: + // Just a destination path in the container + spec.Target = arr[0] + case 2: + if windowsValidMountMode(arr[1]) { + // Destination + Mode is not a valid volume - volumes + // cannot include a mode. e.g. /foo:rw + return nil, errInvalidSpec(raw) + } + // Host Source Path or Name + Destination + spec.Source = strings.Replace(arr[0], `/`, `\`, -1) + spec.Target = arr[1] + case 3: + // HostSourcePath+DestinationPath+Mode + spec.Source = strings.Replace(arr[0], `/`, `\`, -1) + spec.Target = arr[1] + mode = arr[2] + default: + return nil, errInvalidSpec(raw) + } + if convertTargetToBackslash { + spec.Target = strings.Replace(spec.Target, `/`, `\`, -1) + } + + if !windowsValidMountMode(mode) { + return nil, errInvalidMode(mode) + } + + spec.Type = windowsDetectMountType(spec.Source) + spec.ReadOnly = !p.ReadWrite(mode) + + // cannot assume that if a volume driver is passed in that we should set it + if volumeDriver != "" && spec.Type == mount.TypeVolume { + spec.VolumeOptions = &mount.VolumeOptions{ + DriverConfig: &mount.Driver{Name: volumeDriver}, + } + } + + if copyData, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + if spec.VolumeOptions == nil { + spec.VolumeOptions = &mount.VolumeOptions{} + } + spec.VolumeOptions.NoCopy = !copyData + } + + mp, err := p.parseMountSpec(spec, destRegex, convertTargetToBackslash, additionalValidators...) + if mp != nil { + mp.Mode = mode + } + if err != nil { + err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) + } + return mp, err +} + +func (p *windowsParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { + return p.parseMountSpec(cfg, rxDestination, true, windowsSpecificValidators) +} +func (p *windowsParser) parseMountSpec(cfg mount.Mount, destRegex string, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) { + if err := p.validateMountConfigReg(&cfg, destRegex, additionalValidators...); err != nil { + return nil, err + } + mp := &MountPoint{ + RW: !cfg.ReadOnly, + Destination: cfg.Target, + Type: cfg.Type, + Spec: cfg, + } + if convertTargetToBackslash { + mp.Destination = strings.Replace(cfg.Target, `/`, `\`, -1) + } + + switch cfg.Type { + case mount.TypeVolume: + if cfg.Source == "" { + mp.Name = stringid.GenerateNonCryptoID() + } else { + mp.Name = cfg.Source + } + mp.CopyData = p.DefaultCopyMode() + + if cfg.VolumeOptions != nil { + if cfg.VolumeOptions.DriverConfig != nil { + mp.Driver = cfg.VolumeOptions.DriverConfig.Name + } + if cfg.VolumeOptions.NoCopy { + mp.CopyData = false + } + } + case mount.TypeBind: + mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1) + case mount.TypeNamedPipe: + mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1) + } + // cleanup trailing `\` except for paths like `c:\` + if len(mp.Source) > 3 && mp.Source[len(mp.Source)-1] == '\\' { + mp.Source = mp.Source[:len(mp.Source)-1] + } + if len(mp.Destination) > 3 && mp.Destination[len(mp.Destination)-1] == '\\' { + mp.Destination = mp.Destination[:len(mp.Destination)-1] + } + return mp, nil +} + +func (p *windowsParser) ParseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if !windowsValidMountMode(mode) { + return "", "", errInvalidMode(mode) + } + + // Do not allow copy modes on volumes-from + if _, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + return "", "", errInvalidMode(mode) + } + } + return id, mode, nil +} + +func (p *windowsParser) DefaultPropagationMode() mount.Propagation { + return mount.Propagation("") +} + +func (p *windowsParser) ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) { + return "", fmt.Errorf("%s does not support tmpfs", runtime.GOOS) +} +func (p *windowsParser) DefaultCopyMode() bool { + return false +} +func (p *windowsParser) IsBackwardCompatible(m *MountPoint) bool { + return false +} + +func (p *windowsParser) ValidateTmpfsMountDestination(dest string) error { + return errors.New("Platform does not support tmpfs") +}