Merge component 'engine' from git@github.com:moby/moby master

This commit is contained in:
Andrew Hsu
2017-07-14 21:15:01 +00:00
480 changed files with 44631 additions and 34903 deletions

View File

@ -190,7 +190,7 @@ be found.
* Update runc to 54296cf40ad8143b62dbcaa1d90e520a2136ddfe [#31666](https://github.com/docker/docker/pull/31666)
* Ignore cgroup2 mountpoints [opencontainers/runc#1266](https://github.com/opencontainers/runc/pull/1266)
* Update containerd to 4ab9917febca54791c5f071a9d1f404867857fcc [#31662](https://github.com/docker/docker/pull/31662) [#31852](https://github.com/docker/docker/pull/31852)
* Register healtcheck service before calling restore() [docker/containerd#609](https://github.com/docker/containerd/pull/609)
* Register healthcheck service before calling restore() [docker/containerd#609](https://github.com/docker/containerd/pull/609)
* Fix `docker exec` not working after unattended upgrades that reload apparmor profiles [#31773](https://github.com/docker/docker/pull/31773)
* Fix unmounting layer without merge dir with Overlay2 [#31069](https://github.com/docker/docker/pull/31069)
* Do not ignore "volume in use" errors when force-delete [#31450](https://github.com/docker/docker/pull/31450)
@ -1087,12 +1087,12 @@ installing docker, please make sure to update them accordingly.
+ Add security options to `docker info` output [#21172](https://github.com/docker/docker/pull/21172) [#23520](https://github.com/docker/docker/pull/23520)
+ Add insecure registries to `docker info` output [#20410](https://github.com/docker/docker/pull/20410)
+ Extend Docker authorization with TLS user information [#21556](https://github.com/docker/docker/pull/21556)
+ devicemapper: expose Mininum Thin Pool Free Space through `docker info` [#21945](https://github.com/docker/docker/pull/21945)
+ devicemapper: expose Minimum Thin Pool Free Space through `docker info` [#21945](https://github.com/docker/docker/pull/21945)
* API now returns a JSON object when an error occurs making it more consistent [#22880](https://github.com/docker/docker/pull/22880)
- Prevent `docker run -i --restart` from hanging on exit [#22777](https://github.com/docker/docker/pull/22777)
- Fix API/CLI discrepancy on hostname validation [#21641](https://github.com/docker/docker/pull/21641)
- Fix discrepancy in the format of sizes in `stats` from HumanSize to BytesSize [#21773](https://github.com/docker/docker/pull/21773)
- authz: when request is denied return forbbiden exit code (403) [#22448](https://github.com/docker/docker/pull/22448)
- authz: when request is denied return forbidden exit code (403) [#22448](https://github.com/docker/docker/pull/22448)
- Windows: fix tty-related displaying issues [#23878](https://github.com/docker/docker/pull/23878)
### Runtime
@ -1887,7 +1887,7 @@ by another client (#15489)
#### Remote API
- Fix unmarshalling of Command and Entrypoint
- Fix unmarshaling of Command and Entrypoint
- Set limit for minimum client version supported
- Validate port specification
- Return proper errors when attach/reattach fail
@ -2572,7 +2572,7 @@ With the ongoing changes to the networking and execution subsystems of docker te
- Fix ADD caching issue with . prefixed path
- Fix docker build on devicemapper by reverting sparse file tar option
- Fix issue with file caching and prevent wrong cache hit
* Use same error handling while unmarshalling CMD and ENTRYPOINT
* Use same error handling while unmarshaling CMD and ENTRYPOINT
#### Documentation

View File

@ -217,7 +217,8 @@ COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli
ENV PATH=/usr/local/cli:$PATH
# Activate bash completion if mounted with DOCKER_BASH_COMPLETION_PATH
# Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH
RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc
RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker
# Wrap all commands in the "docker-in-docker" script to allow nested containers

View File

@ -93,7 +93,7 @@ RUN set -x \
&& rm -rf "$SECCOMP_PATH"
# Install Go
# We don't have official binary golang 1.7.5 tarballs for ARM64, eigher for Go or
# We don't have official binary golang 1.7.5 tarballs for ARM64, either for Go or
# bootstrap, so we use golang-go (1.6) as bootstrap to build Go from source code.
# We don't use the official ARMv6 released binaries as a GOROOT_BOOTSTRAP, because
# not all ARM64 platforms support 32-bit mode. 32-bit mode is optional for ARMv8.

View File

@ -51,7 +51,7 @@ Moby is NOT recommended for:
- Application developers looking for an easy way to run their applications in containers. We recommend Docker CE instead.
- Enterprise IT and development teams looking for a ready-to-use, commercially supported container platform. We recommend Docker EE instead.
- Anyone curious about containers and looking for an easy way to learn. We recommend the docker.com website instead.
- Anyone curious about containers and looking for an easy way to learn. We recommend the [docker.com](https://www.docker.com/) website instead.
# Transitioning to Moby

View File

@ -70,7 +70,7 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
// PruneCache removes all cached build sources
func (b *Backend) PruneCache(ctx context.Context) (*types.BuildCachePruneReport, error) {
size, err := b.fsCache.Prune()
size, err := b.fsCache.Prune(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to prune build cache")
}

View File

@ -41,7 +41,7 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
var postForm map[string]interface{}
if err := json.Unmarshal(b, &postForm); err == nil {
maskSecretKeys(postForm)
maskSecretKeys(postForm, r.RequestURI)
formStr, errMarshal := json.Marshal(postForm)
if errMarshal == nil {
logrus.Debugf("form data: %s", string(formStr))
@ -54,13 +54,22 @@ func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWri
}
}
func maskSecretKeys(inp interface{}) {
func maskSecretKeys(inp interface{}, path string) {
// Remove any query string from the path
idx := strings.Index(path, "?")
if idx != -1 {
path = path[:idx]
}
// Remove trailing / characters
path = strings.TrimRight(path, "/")
if arr, ok := inp.([]interface{}); ok {
for _, f := range arr {
maskSecretKeys(f)
maskSecretKeys(f, path)
}
return
}
if form, ok := inp.(map[string]interface{}); ok {
loop0:
for k, v := range form {
@ -70,7 +79,16 @@ func maskSecretKeys(inp interface{}) {
continue loop0
}
}
maskSecretKeys(v)
maskSecretKeys(v, path)
}
// Route-specific redactions
if strings.HasSuffix(path, "/secrets/create") {
for k := range form {
if k == "Data" {
form[k] = "*****"
}
}
}
}
}

View File

@ -0,0 +1,58 @@
package middleware
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestMaskSecretKeys(t *testing.T) {
tests := []struct {
path string
input map[string]interface{}
expected map[string]interface{}
}{
{
path: "/v1.30/secrets/create",
input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}},
expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}},
},
{
path: "/v1.30/secrets/create//",
input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}},
expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}},
},
{
path: "/secrets/create?key=val",
input: map[string]interface{}{"Data": "foo", "Name": "name", "Labels": map[string]interface{}{}},
expected: map[string]interface{}{"Data": "*****", "Name": "name", "Labels": map[string]interface{}{}},
},
{
path: "/v1.30/some/other/path",
input: map[string]interface{}{
"password": "pass",
"other": map[string]interface{}{
"secret": "secret",
"jointoken": "jointoken",
"unlockkey": "unlockkey",
"signingcakey": "signingcakey",
},
},
expected: map[string]interface{}{
"password": "*****",
"other": map[string]interface{}{
"secret": "*****",
"jointoken": "*****",
"unlockkey": "*****",
"signingcakey": "*****",
},
},
},
}
for _, testcase := range tests {
maskSecretKeys(testcase.input, testcase.path)
assert.Equal(t, testcase.expected, testcase.input)
}
}

View File

@ -102,7 +102,7 @@ func (s *containerRouter) getContainersLogs(ctx context.Context, w http.Response
}
// doesn't matter what version the client is on, we're using this internally only
// also do we need size? i'm thinkin no we don't
// also do we need size? i'm thinking no we don't
raw, err := s.backend.ContainerInspect(containerName, false, api.DefaultVersion)
if err != nil {
return err

View File

@ -7,6 +7,7 @@ import (
"github.com/docker/distribution/reference"
enginetypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/plugin"
"golang.org/x/net/context"
)
@ -19,7 +20,7 @@ type Backend interface {
Remove(name string, config *enginetypes.PluginRmConfig) error
Set(name string, args []string) error
Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error)
Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error
Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error
Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error
Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error
CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error

View File

@ -44,7 +44,7 @@ func (sr *swarmRouter) swarmLogs(ctx context.Context, w http.ResponseWriter, r *
// maybe should return some context with this error?
return err
}
tty = s.Spec.TaskTemplate.ContainerSpec.TTY || tty
tty = (s.Spec.TaskTemplate.ContainerSpec != nil && s.Spec.TaskTemplate.ContainerSpec.TTY) || tty
}
for _, task := range selector.Tasks {
t, err := sr.backend.GetTask(task)

View File

@ -1637,7 +1637,7 @@ definitions:
may not be applied if the version number has changed from the last read. In other words,
if two update requests specify the same base version, only one of the requests can succeed.
As a result, two separate update requests that happen at the same time will not
unintentially overwrite each other.
unintentionally overwrite each other.
type: "object"
properties:
Index:
@ -1975,11 +1975,39 @@ definitions:
description: "User modifiable task configuration."
type: "object"
properties:
PluginSpec:
type: "object"
description: "Invalid when specified with `ContainerSpec`."
properties:
Name:
description: "The name or 'alias' to use for the plugin."
type: "string"
Remote:
description: "The plugin image reference to use."
type: "string"
Disabled:
description: "Disable the plugin once scheduled."
type: "boolean"
PluginPrivilege:
type: "array"
items:
description: "Describes a permission accepted by the user upon installing the plugin."
type: "object"
properties:
Name:
type: "string"
Description:
type: "string"
Value:
type: "array"
items:
type: "string"
ContainerSpec:
type: "object"
description: "Invalid when specified with `PluginSpec`."
properties:
Image:
description: "The image name to use for the container."
description: "The image name to use for the container"
type: "string"
Labels:
description: "User-defined key/value data."
@ -5634,16 +5662,24 @@ paths:
Various objects within Docker report events when something happens to them.
Containers report these events: `attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, health_status, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update`
Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, and `update`
Images report these events: `delete, import, load, pull, push, save, tag, untag`
Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, and `untag`
Volumes report these events: `create, mount, unmount, destroy`
Volumes report these events: `create`, `mount`, `unmount`, and `destroy`
Networks report these events: `create, connect, disconnect, destroy`
Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, and `remove`
The Docker daemon reports these events: `reload`
Services report these events: `create`, `update`, and `remove`
Nodes report these events: `create`, `update`, and `remove`
Secrets report these events: `create`, `update`, and `remove`
Configs report these events: `create`, `update`, and `remove`
operationId: "SystemEvents"
produces:
- "application/json"
@ -5717,7 +5753,8 @@ paths:
- `label=<string>` image or container label
- `network=<string>` network name or ID
- `plugin`=<string> plugin name or ID
- `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, or `daemon`
- `scope`<string> local or swarm
- `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service` or `secret`
- `volume=<string>` volume name or ID
type: "string"
tags: ["System"]
@ -7394,6 +7431,16 @@ paths:
AdvertiseAddr:
description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible."
type: "string"
DataPathAddr:
description: |
Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`,
or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr`
is used.
The `DataPathAddr` specifies the address that global scope network drivers will publish towards other
nodes in order to reach the containers running on this node. Using this parameter it is possible to
separate the container data traffic from the management traffic of the cluster.
type: "string"
ForceNewCluster:
description: "Force creation of a new swarm."
type: "boolean"
@ -7442,6 +7489,17 @@ paths:
type: "string"
AdvertiseAddr:
description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible."
type: "string"
DataPathAddr:
description: |
Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`,
or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr`
is used.
The `DataPathAddr` specifies the address that global scope network drivers will publish towards other
nodes in order to reach the containers running on this node. Using this parameter it is possible to
separate the container data traffic from the management traffic of the cluster.
type: "string"
RemoteAddrs:
description: "Addresses of manager nodes already participating in the swarm."

View File

@ -19,6 +19,8 @@ const (
NodeEventType = "node"
// SecretEventType is the event type that secrets generate
SecretEventType = "secret"
// ConfigEventType is the event type that configs generate
ConfigEventType = "config"
)
// Actor describes something that generates events,

View File

@ -90,15 +90,15 @@ func TestFromParam(t *testing.T) {
`{"key": "value"}`,
}
valid := map[*Args][]string{
&Args{fields: map[string]map[string]bool{"key": {"value": true}}}: {
{fields: map[string]map[string]bool{"key": {"value": true}}}: {
`{"key": ["value"]}`,
`{"key": {"value": true}}`,
},
&Args{fields: map[string]map[string]bool{"key": {"value1": true, "value2": true}}}: {
{fields: map[string]map[string]bool{"key": {"value1": true, "value2": true}}}: {
`{"key": ["value1", "value2"]}`,
`{"key": {"value1": true, "value2": true}}`,
},
&Args{fields: map[string]map[string]bool{"key1": {"value1": true}, "key2": {"value2": true}}}: {
{fields: map[string]map[string]bool{"key1": {"value1": true}, "key2": {"value2": true}}}: {
`{"key1": ["value1"], "key2": ["value2"]}`,
`{"key1": {"value1": true}, "key2": {"value2": true}}`,
},
@ -172,14 +172,14 @@ func TestArgsMatchKVList(t *testing.T) {
}
matches := map[*Args]string{
&Args{}: "field",
&Args{map[string]map[string]bool{
"created": map[string]bool{"today": true},
"labels": map[string]bool{"key1": true}},
{}: "field",
{map[string]map[string]bool{
"created": {"today": true},
"labels": {"key1": true}},
}: "labels",
&Args{map[string]map[string]bool{
"created": map[string]bool{"today": true},
"labels": map[string]bool{"key1=value1": true}},
{map[string]map[string]bool{
"created": {"today": true},
"labels": {"key1=value1": true}},
}: "labels",
}
@ -190,16 +190,16 @@ func TestArgsMatchKVList(t *testing.T) {
}
differs := map[*Args]string{
&Args{map[string]map[string]bool{
"created": map[string]bool{"today": true}},
{map[string]map[string]bool{
"created": {"today": true}},
}: "created",
&Args{map[string]map[string]bool{
"created": map[string]bool{"today": true},
"labels": map[string]bool{"key4": true}},
{map[string]map[string]bool{
"created": {"today": true},
"labels": {"key4": true}},
}: "labels",
&Args{map[string]map[string]bool{
"created": map[string]bool{"today": true},
"labels": map[string]bool{"key1=value3": true}},
{map[string]map[string]bool{
"created": {"today": true},
"labels": {"key1=value3": true}},
}: "labels",
}
@ -214,21 +214,21 @@ func TestArgsMatch(t *testing.T) {
source := "today"
matches := map[*Args]string{
&Args{}: "field",
&Args{map[string]map[string]bool{
"created": map[string]bool{"today": true}},
{}: "field",
{map[string]map[string]bool{
"created": {"today": true}},
}: "today",
&Args{map[string]map[string]bool{
"created": map[string]bool{"to*": true}},
{map[string]map[string]bool{
"created": {"to*": true}},
}: "created",
&Args{map[string]map[string]bool{
"created": map[string]bool{"to(.*)": true}},
{map[string]map[string]bool{
"created": {"to(.*)": true}},
}: "created",
&Args{map[string]map[string]bool{
"created": map[string]bool{"tod": true}},
{map[string]map[string]bool{
"created": {"tod": true}},
}: "created",
&Args{map[string]map[string]bool{
"created": map[string]bool{"anyting": true, "to*": true}},
{map[string]map[string]bool{
"created": {"anything": true, "to*": true}},
}: "created",
}
@ -239,21 +239,21 @@ func TestArgsMatch(t *testing.T) {
}
differs := map[*Args]string{
&Args{map[string]map[string]bool{
"created": map[string]bool{"tomorrow": true}},
{map[string]map[string]bool{
"created": {"tomorrow": true}},
}: "created",
&Args{map[string]map[string]bool{
"created": map[string]bool{"to(day": true}},
{map[string]map[string]bool{
"created": {"to(day": true}},
}: "created",
&Args{map[string]map[string]bool{
"created": map[string]bool{"tom(.*)": true}},
{map[string]map[string]bool{
"created": {"tom(.*)": true}},
}: "created",
&Args{map[string]map[string]bool{
"created": map[string]bool{"tom": true}},
{map[string]map[string]bool{
"created": {"tom": true}},
}: "created",
&Args{map[string]map[string]bool{
"created": map[string]bool{"today1": true},
"labels": map[string]bool{"today": true}},
{map[string]map[string]bool{
"created": {"today1": true},
"labels": {"today": true}},
}: "created",
}

View File

@ -0,0 +1,3 @@
//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto
package runtime

View File

@ -0,0 +1,712 @@
// Code generated by protoc-gen-gogo.
// source: plugin.proto
// DO NOT EDIT!
/*
Package runtime is a generated protocol buffer package.
It is generated from these files:
plugin.proto
It has these top-level messages:
PluginSpec
PluginPrivilege
*/
package runtime
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// PluginSpec defines the base payload which clients can specify for creating
// a service with the plugin runtime.
type PluginSpec struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"`
Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"`
Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"`
}
func (m *PluginSpec) Reset() { *m = PluginSpec{} }
func (m *PluginSpec) String() string { return proto.CompactTextString(m) }
func (*PluginSpec) ProtoMessage() {}
func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} }
func (m *PluginSpec) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *PluginSpec) GetRemote() string {
if m != nil {
return m.Remote
}
return ""
}
func (m *PluginSpec) GetPrivileges() []*PluginPrivilege {
if m != nil {
return m.Privileges
}
return nil
}
func (m *PluginSpec) GetDisabled() bool {
if m != nil {
return m.Disabled
}
return false
}
// PluginPrivilege describes a permission the user has to accept
// upon installing a plugin.
type PluginPrivilege struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"`
}
func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} }
func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) }
func (*PluginPrivilege) ProtoMessage() {}
func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} }
func (m *PluginPrivilege) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *PluginPrivilege) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func (m *PluginPrivilege) GetValue() []string {
if m != nil {
return m.Value
}
return nil
}
func init() {
proto.RegisterType((*PluginSpec)(nil), "PluginSpec")
proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege")
}
func (m *PluginSpec) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
if len(m.Remote) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote)))
i += copy(dAtA[i:], m.Remote)
}
if len(m.Privileges) > 0 {
for _, msg := range m.Privileges {
dAtA[i] = 0x1a
i++
i = encodeVarintPlugin(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.Disabled {
dAtA[i] = 0x20
i++
if m.Disabled {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
return i, nil
}
func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
if len(m.Description) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description)))
i += copy(dAtA[i:], m.Description)
}
if len(m.Value) > 0 {
for _, s := range m.Value {
dAtA[i] = 0x1a
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
return i, nil
}
func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *PluginSpec) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovPlugin(uint64(l))
}
l = len(m.Remote)
if l > 0 {
n += 1 + l + sovPlugin(uint64(l))
}
if len(m.Privileges) > 0 {
for _, e := range m.Privileges {
l = e.Size()
n += 1 + l + sovPlugin(uint64(l))
}
}
if m.Disabled {
n += 2
}
return n
}
func (m *PluginPrivilege) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovPlugin(uint64(l))
}
l = len(m.Description)
if l > 0 {
n += 1 + l + sovPlugin(uint64(l))
}
if len(m.Value) > 0 {
for _, s := range m.Value {
l = len(s)
n += 1 + l + sovPlugin(uint64(l))
}
}
return n
}
func sovPlugin(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozPlugin(x uint64) (n int) {
return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *PluginSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPlugin
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPlugin
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Remote = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthPlugin
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Privileges = append(m.Privileges, &PluginPrivilege{})
if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Disabled = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipPlugin(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPlugin
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PluginPrivilege) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPlugin
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPlugin
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Description = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlugin
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPlugin
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPlugin(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPlugin
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipPlugin(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPlugin
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPlugin
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPlugin
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthPlugin
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPlugin
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipPlugin(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) }
var fileDescriptorPlugin = []byte{
// 196 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d,
0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b,
0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30,
0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12,
0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35,
0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c,
0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a,
0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab,
0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0,
0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33,
0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79,
0x0c, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,18 @@
syntax = "proto3";
// PluginSpec defines the base payload which clients can specify for creating
// a service with the plugin runtime.
message PluginSpec {
string name = 1;
string remote = 2;
repeated PluginPrivilege privileges = 3;
bool disabled = 4;
}
// PluginPrivilege describes a permission the user has to accept
// upon installing a plugin.
message PluginPrivilege {
string name = 1;
string description = 2;
repeated string value = 3;
}

View File

@ -2,7 +2,7 @@ package swarm
import "time"
// ClusterInfo represents info about the cluster for outputing in "info"
// ClusterInfo represents info about the cluster for outputting in "info"
// it contains the same information as "Swarm", but without the JoinTokens
type ClusterInfo struct {
ID string

View File

@ -1,6 +1,10 @@
package swarm
import "time"
import (
"time"
"github.com/docker/docker/api/types/swarm/runtime"
)
// TaskState represents the state of a task.
type TaskState string
@ -51,7 +55,11 @@ type Task struct {
// TaskSpec represents the spec of a task.
type TaskSpec struct {
ContainerSpec ContainerSpec `json:",omitempty"`
// ContainerSpec and PluginSpec are mutually exclusive.
// PluginSpec will only be used when the `Runtime` field is set to `plugin`
ContainerSpec *ContainerSpec `json:",omitempty"`
PluginSpec *runtime.PluginSpec `json:",omitempty"`
Resources *ResourceRequirements `json:",omitempty"`
RestartPolicy *RestartPolicy `json:",omitempty"`
Placement *Placement `json:",omitempty"`

View File

@ -20,7 +20,7 @@ func TestGetAllAllowed(t *testing.T) {
})
buildArgs.AddMetaArg("ArgFromMeta", strPtr("frommeta1"))
buildArgs.AddMetaArg("ArgFromMetaOverriden", strPtr("frommeta2"))
buildArgs.AddMetaArg("ArgFromMetaOverridden", strPtr("frommeta2"))
buildArgs.AddMetaArg("ArgFromMetaNotUsed", strPtr("frommeta3"))
buildArgs.AddArg("ArgOverriddenByOptions", strPtr("fromdockerfile2"))
@ -28,7 +28,7 @@ func TestGetAllAllowed(t *testing.T) {
buildArgs.AddArg("ArgNoDefaultInDockerfile", nil)
buildArgs.AddArg("ArgNoDefaultInDockerfileFromOptions", nil)
buildArgs.AddArg("ArgFromMeta", nil)
buildArgs.AddArg("ArgFromMetaOverriden", strPtr("fromdockerfile3"))
buildArgs.AddArg("ArgFromMetaOverridden", strPtr("fromdockerfile3"))
all := buildArgs.GetAllAllowed()
expected := map[string]string{
@ -37,7 +37,7 @@ func TestGetAllAllowed(t *testing.T) {
"ArgWithDefaultInDockerfile": "fromdockerfile1",
"ArgNoDefaultInDockerfileFromOptions": "fromopt3",
"ArgFromMeta": "frommeta1",
"ArgFromMetaOverriden": "fromdockerfile3",
"ArgFromMetaOverridden": "fromdockerfile3",
}
assert.Equal(t, expected, all)
}

View File

@ -140,8 +140,7 @@ func (bm *BuildManager) initializeClientSession(ctx context.Context, cancel func
}()
if options.RemoteContext == remotecontext.ClientSessionRemote {
st := time.Now()
csi, err := NewClientSessionSourceIdentifier(ctx, bm.sg,
options.SessionID, []string{"/"})
csi, err := NewClientSessionSourceIdentifier(ctx, bm.sg, options.SessionID)
if err != nil {
return nil, err
}

View File

@ -30,26 +30,25 @@ func (cst *ClientSessionTransport) Copy(ctx context.Context, id fscache.RemoteId
}
return filesync.FSSync(ctx, csi.caller, filesync.FSSendRequestOpt{
SrcPaths: csi.srcPaths,
DestDir: dest,
CacheUpdater: cu,
IncludePatterns: csi.includePatterns,
DestDir: dest,
CacheUpdater: cu,
})
}
// ClientSessionSourceIdentifier is an identifier that can be used for requesting
// files from remote client
type ClientSessionSourceIdentifier struct {
srcPaths []string
caller session.Caller
sharedKey string
uuid string
includePatterns []string
caller session.Caller
sharedKey string
uuid string
}
// NewClientSessionSourceIdentifier returns new ClientSessionSourceIdentifier instance
func NewClientSessionSourceIdentifier(ctx context.Context, sg SessionGetter, uuid string, sources []string) (*ClientSessionSourceIdentifier, error) {
func NewClientSessionSourceIdentifier(ctx context.Context, sg SessionGetter, uuid string) (*ClientSessionSourceIdentifier, error) {
csi := &ClientSessionSourceIdentifier{
uuid: uuid,
srcPaths: sources,
uuid: uuid,
}
caller, err := sg.Get(ctx, uuid)
if err != nil {

View File

@ -154,8 +154,8 @@ func (fsc *FSCache) DiskUsage() (int64, error) {
}
// Prune allows manually cleaning up the cache
func (fsc *FSCache) Prune() (uint64, error) {
return fsc.store.Prune()
func (fsc *FSCache) Prune(ctx context.Context) (uint64, error) {
return fsc.store.Prune(ctx)
}
// Close stops the gc and closes the persistent db
@ -396,12 +396,19 @@ func (s *fsCacheStore) DiskUsage() (int64, error) {
}
// Prune allows manually cleaning up the cache
func (s *fsCacheStore) Prune() (uint64, error) {
func (s *fsCacheStore) Prune(ctx context.Context) (uint64, error) {
s.mu.Lock()
defer s.mu.Unlock()
var size uint64
for id, snap := range s.sources {
select {
case <-ctx.Done():
logrus.Debugf("Cache prune operation cancelled, pruned size: %d", size)
// when the context is cancelled, only return current size and nil
return size, nil
default:
}
if len(snap.refs) == 0 {
ss, err := snap.getSize()
if err != nil {

View File

@ -97,7 +97,7 @@ func TestFSCache(t *testing.T) {
assert.Equal(t, s, int64(8))
// prune deletes everything
released, err := fscache.Prune()
released, err := fscache.Prune(context.TODO())
assert.Nil(t, err)
assert.Equal(t, released, uint64(8))

View File

@ -91,7 +91,7 @@ type Client struct {
// CheckRedirect specifies the policy for dealing with redirect responses:
// If the request is non-GET return `ErrRedirect`. Otherwise use the last response.
//
// Go 1.8 changes behavior for HTTP redirects (specificlaly 301, 307, and 308) in the client .
// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client .
// The Docker client (and by extension docker API client) can be made to to send a request
// like POST /containers//start where what would normally be in the name section of the URL is empty.
// This triggers an HTTP 301 from the daemon.

View File

@ -11,6 +11,26 @@ import (
// It returns a types.HijackedConnection with the hijacked connection
// and the a reader to get output. It's up to the called to close
// the hijacked connection by calling types.HijackedResponse.Close.
//
// The stream format on the response will be in one of two formats:
//
// If the container is using a TTY, there is only a single stream (stdout), and
// data is copied directly from the container output stream, no extra
// multiplexing or headers.
//
// If the container is *not* using a TTY, streams for stdout and stderr are
// multiplexed.
// The format of the multiplexed stream is as follows:
//
// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
//
// STREAM_TYPE can be 1 for stdout and 2 for stderr
//
// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian.
// This is the size of OUTPUT.
//
// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
// stream.
func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) {
query := url.Values{}
if options.Stream {

View File

@ -13,6 +13,26 @@ import (
// ContainerLogs returns the logs generated by a container in an io.ReadCloser.
// It's up to the caller to close the stream.
//
// The stream format on the response will be in one of two formats:
//
// If the container is using a TTY, there is only a single stream (stdout), and
// data is copied directly from the container output stream, no extra
// multiplexing or headers.
//
// If the container is *not* using a TTY, streams for stdout and stderr are
// multiplexed.
// The format of the multiplexed stream is as follows:
//
// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
//
// STREAM_TYPE can be 1 for stdout and 2 for stderr
//
// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian.
// This is the size of OUTPUT.
//
// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
// stream.
func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
query := url.Values{}
if options.ShowStdout {

View File

@ -14,7 +14,7 @@ import (
// indicated by the given condition, either "not-running" (default),
// "next-exit", or "removed".
//
// If this client's API version is beforer 1.30, condition is ignored and
// If this client's API version is before 1.30, condition is ignored and
// ContainerWait will return immediately with the two channels, as the server
// will wait as if the condition were "not-running".
//
@ -23,7 +23,7 @@ import (
// then returns two channels on which the caller can wait for the exit status
// of the container or an error if there was a problem either beginning the
// wait request or in getting the response. This allows the caller to
// sychronize ContainerWait with other calls, such as specifying a
// synchronize ContainerWait with other calls, such as specifying a
// "next-exit" condition before issuing a ContainerStart request.
func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) {
if versions.LessThan(cli.ClientVersion(), "1.30") {

View File

@ -228,7 +228,7 @@ func IsErrPluginPermissionDenied(err error) bool {
// NewVersionError returns an error if the APIVersion required
// if less than the current supported version
func (cli *Client) NewVersionError(APIrequired, feature string) error {
if versions.LessThan(cli.version, APIrequired) {
if cli.version != "" && versions.LessThan(cli.version, APIrequired) {
return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version)
}
return nil

View File

@ -6,9 +6,9 @@ import (
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/swarm"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
@ -24,24 +24,51 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec,
headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth}
}
// ensure that the image is tagged
if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
service.TaskTemplate.ContainerSpec.Image = taggedImg
// Make sure containerSpec is not nil when no runtime is set or the runtime is set to container
if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) {
service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
}
// Contact the registry to retrieve digest and platform information
if options.QueryRegistry {
distributionInspect, err := cli.DistributionInspect(ctx, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth)
distErr = err
if err == nil {
// now pin by digest if the image doesn't already contain a digest
if img := imageWithDigestString(service.TaskTemplate.ContainerSpec.Image, distributionInspect.Descriptor.Digest); img != "" {
if err := validateServiceSpec(service); err != nil {
return types.ServiceCreateResponse{}, err
}
// ensure that the image is tagged
var imgPlatforms []swarm.Platform
if service.TaskTemplate.ContainerSpec != nil {
if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
service.TaskTemplate.ContainerSpec.Image = taggedImg
}
if options.QueryRegistry {
var img string
img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth)
if img != "" {
service.TaskTemplate.ContainerSpec.Image = img
}
// add platforms that are compatible with the service
service.TaskTemplate.Placement = setServicePlatforms(service.TaskTemplate.Placement, distributionInspect)
}
}
// ensure that the image is tagged
if service.TaskTemplate.PluginSpec != nil {
if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" {
service.TaskTemplate.PluginSpec.Remote = taggedImg
}
if options.QueryRegistry {
var img string
img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth)
if img != "" {
service.TaskTemplate.PluginSpec.Remote = img
}
}
}
if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 {
service.TaskTemplate.Placement = &swarm.Placement{}
}
if len(imgPlatforms) > 0 {
service.TaskTemplate.Placement.Platforms = imgPlatforms
}
var response types.ServiceCreateResponse
resp, err := cli.post(ctx, "/services/create", nil, service, headers)
if err != nil {
@ -58,6 +85,28 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec,
return response, err
}
func imageDigestAndPlatforms(ctx context.Context, cli *Client, image, encodedAuth string) (string, []swarm.Platform, error) {
distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth)
imageWithDigest := image
var platforms []swarm.Platform
if err != nil {
return "", nil, err
}
imageWithDigest = imageWithDigestString(image, distributionInspect.Descriptor.Digest)
if len(distributionInspect.Platforms) > 0 {
platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms))
for _, p := range distributionInspect.Platforms {
platforms = append(platforms, swarm.Platform{
Architecture: p.Architecture,
OS: p.OS,
})
}
}
return imageWithDigest, platforms, err
}
// imageWithDigestString takes an image string and a digest, and updates
// the image string if it didn't originally contain a digest. It returns
// an empty string if there are no updates.
@ -86,27 +135,22 @@ func imageWithTagString(image string) string {
return ""
}
// setServicePlatforms sets Platforms in swarm.Placement to list all
// compatible platforms for the service, as found in distributionInspect
// and returns a pointer to the new or updated swarm.Placement struct.
func setServicePlatforms(placement *swarm.Placement, distributionInspect registrytypes.DistributionInspect) *swarm.Placement {
if placement == nil {
placement = &swarm.Placement{}
}
// reset any existing listed platforms
placement.Platforms = []swarm.Platform{}
for _, p := range distributionInspect.Platforms {
placement.Platforms = append(placement.Platforms, swarm.Platform{
Architecture: p.Architecture,
OS: p.OS,
})
}
return placement
}
// digestWarning constructs a formatted warning string using the
// image name that could not be pinned by digest. The formatting
// is hardcoded, but could me made smarter in the future
func digestWarning(image string) string {
return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image)
}
func validateServiceSpec(s swarm.ServiceSpec) error {
if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil {
return errors.New("must not specify both a container spec and a plugin spec in the task template")
}
if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin {
return errors.New("mismatched runtime with plugin spec")
}
if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) {
return errors.New("mismatched runtime with container spec")
}
return nil
}

View File

@ -112,7 +112,7 @@ func TestServiceCreateCompatiblePlatforms(t *testing.T) {
}),
}
spec := swarm.ServiceSpec{TaskTemplate: swarm.TaskSpec{ContainerSpec: swarm.ContainerSpec{Image: "foobar:1.0"}}}
spec := swarm.ServiceSpec{TaskTemplate: swarm.TaskSpec{ContainerSpec: &swarm.ContainerSpec{Image: "foobar:1.0"}}}
r, err := client.ServiceCreate(context.Background(), spec, types.ServiceCreateOptions{QueryRegistry: true})
assert.NoError(t, err)
@ -189,7 +189,7 @@ func TestServiceCreateDigestPinning(t *testing.T) {
for _, p := range pinByDigestTests {
r, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{
TaskTemplate: swarm.TaskSpec{
ContainerSpec: swarm.ContainerSpec{
ContainerSpec: &swarm.ContainerSpec{
Image: p.img,
},
},

View File

@ -35,26 +35,46 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version
query.Set("version", strconv.FormatUint(version.Index, 10))
// ensure that the image is tagged
if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
service.TaskTemplate.ContainerSpec.Image = taggedImg
if err := validateServiceSpec(service); err != nil {
return types.ServiceUpdateResponse{}, err
}
// Contact the registry to retrieve digest and platform information
// This happens only when the image has changed
if options.QueryRegistry {
distributionInspect, err := cli.DistributionInspect(ctx, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth)
distErr = err
if err == nil {
// now pin by digest if the image doesn't already contain a digest
if img := imageWithDigestString(service.TaskTemplate.ContainerSpec.Image, distributionInspect.Descriptor.Digest); img != "" {
var imgPlatforms []swarm.Platform
// ensure that the image is tagged
if service.TaskTemplate.ContainerSpec != nil {
if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
service.TaskTemplate.ContainerSpec.Image = taggedImg
}
if options.QueryRegistry {
var img string
img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth)
if img != "" {
service.TaskTemplate.ContainerSpec.Image = img
}
// add platforms that are compatible with the service
service.TaskTemplate.Placement = setServicePlatforms(service.TaskTemplate.Placement, distributionInspect)
}
}
// ensure that the image is tagged
if service.TaskTemplate.PluginSpec != nil {
if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" {
service.TaskTemplate.PluginSpec.Remote = taggedImg
}
if options.QueryRegistry {
var img string
img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth)
if img != "" {
service.TaskTemplate.PluginSpec.Remote = img
}
}
}
if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 {
service.TaskTemplate.Placement = &swarm.Placement{}
}
if len(imgPlatforms) > 0 {
service.TaskTemplate.Placement.Platforms = imgPlatforms
}
var response types.ServiceUpdateResponse
resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers)
if err != nil {

View File

@ -9,9 +9,10 @@ import (
"github.com/tonistiigi/fsutil"
)
func sendDiffCopy(stream grpc.Stream, dir string, excludes []string, progress progressCb) error {
func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb) error {
return fsutil.Send(stream.Context(), stream, dir, &fsutil.WalkOpt{
ExcludePatterns: excludes,
IncludePaths: includes, // TODO: rename IncludePatterns
}, progress)
}

View File

@ -12,6 +12,11 @@ import (
"google.golang.org/grpc/metadata"
)
const (
keyOverrideExcludes = "override-excludes"
keyIncludePatterns = "include-patterns"
)
type fsSyncProvider struct {
root string
excludes []string
@ -54,9 +59,10 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) error
opts, _ := metadata.FromContext(stream.Context()) // if no metadata continue with empty object
var excludes []string
if len(opts["Override-Excludes"]) == 0 || opts["Override-Excludes"][0] != "true" {
if len(opts[keyOverrideExcludes]) == 0 || opts[keyOverrideExcludes][0] != "true" {
excludes = sp.excludes
}
includes := opts[keyIncludePatterns]
var progress progressCb
if sp.p != nil {
@ -69,7 +75,7 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) error
doneCh = sp.doneCh
sp.doneCh = nil
}
err := pr.sendFn(stream, sp.root, excludes, progress)
err := pr.sendFn(stream, sp.root, includes, excludes, progress)
if doneCh != nil {
if err != nil {
doneCh <- err
@ -88,7 +94,7 @@ type progressCb func(int, bool)
type protocol struct {
name string
sendFn func(stream grpc.Stream, srcDir string, excludes []string, progress progressCb) error
sendFn func(stream grpc.Stream, srcDir string, includes, excludes []string, progress progressCb) error
recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater) error
}
@ -115,7 +121,7 @@ var supportedProtocols = []protocol{
// FSSendRequestOpt defines options for FSSend request
type FSSendRequestOpt struct {
SrcPaths []string
IncludePatterns []string
OverrideExcludes bool
DestDir string
CacheUpdater CacheUpdater
@ -142,7 +148,11 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error {
opts := make(map[string][]string)
if opt.OverrideExcludes {
opts["Override-Excludes"] = []string{"true"}
opts[keyOverrideExcludes] = []string{"true"}
}
if opt.IncludePatterns != nil {
opts[keyIncludePatterns] = opt.IncludePatterns
}
ctx, cancel := context.WithCancel(ctx)

View File

@ -0,0 +1,71 @@
package filesync
import (
"context"
"io/ioutil"
"path/filepath"
"testing"
"github.com/docker/docker/client/session"
"github.com/docker/docker/client/session/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
func TestFileSyncIncludePatterns(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "fsynctest")
require.NoError(t, err)
destDir, err := ioutil.TempDir("", "fsynctest")
require.NoError(t, err)
err = ioutil.WriteFile(filepath.Join(tmpDir, "foo"), []byte("content1"), 0600)
require.NoError(t, err)
err = ioutil.WriteFile(filepath.Join(tmpDir, "bar"), []byte("content2"), 0600)
require.NoError(t, err)
s, err := session.NewSession("foo", "bar")
require.NoError(t, err)
m, err := session.NewManager()
require.NoError(t, err)
fs := NewFSSyncProvider(tmpDir, nil)
s.Allow(fs)
dialer := session.Dialer(testutil.TestStream(testutil.Handler(m.HandleConn)))
g, ctx := errgroup.WithContext(context.Background())
g.Go(func() error {
return s.Run(ctx, dialer)
})
g.Go(func() (reterr error) {
c, err := m.Get(ctx, s.UUID())
if err != nil {
return err
}
if err := FSSync(ctx, c, FSSendRequestOpt{
DestDir: destDir,
IncludePatterns: []string{"ba*"},
}); err != nil {
return err
}
_, err = ioutil.ReadFile(filepath.Join(destDir, "foo"))
assert.Error(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar"))
if err != nil {
return err
}
assert.Equal(t, "content2", string(dt))
return s.Close()
})
err = g.Wait()
require.NoError(t, err)
}

View File

@ -10,7 +10,7 @@ import (
"google.golang.org/grpc"
)
func sendTarStream(stream grpc.Stream, dir string, excludes []string, progress progressCb) error {
func sendTarStream(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb) error {
a, err := archive.TarWithOptions(dir, &archive.TarOptions{
ExcludePatterns: excludes,
})

View File

@ -1,6 +1,7 @@
package session
import (
"net"
"net/http"
"strings"
"sync"
@ -49,8 +50,6 @@ func (sm *Manager) HandleHTTPRequest(ctx context.Context, w http.ResponseWriter,
}
uuid := r.Header.Get(headerSessionUUID)
name := r.Header.Get(headerSessionName)
sharedKey := r.Header.Get(headerSessionSharedKey)
proto := r.Header.Get("Upgrade")
@ -89,9 +88,25 @@ func (sm *Manager) HandleHTTPRequest(ctx context.Context, w http.ResponseWriter,
conn.Write([]byte{})
resp.Write(conn)
return sm.handleConn(ctx, conn, r.Header)
}
// HandleConn handles an incoming raw connection
func (sm *Manager) HandleConn(ctx context.Context, conn net.Conn, opts map[string][]string) error {
sm.mu.Lock()
return sm.handleConn(ctx, conn, opts)
}
// caller needs to take lock, this function will release it
func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[string][]string) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
h := http.Header(opts)
uuid := h.Get(headerSessionUUID)
name := h.Get(headerSessionName)
sharedKey := h.Get(headerSessionSharedKey)
ctx, cc, err := grpcClientConn(ctx, conn)
if err != nil {
sm.mu.Unlock()
@ -111,7 +126,7 @@ func (sm *Manager) HandleHTTPRequest(ctx context.Context, w http.ResponseWriter,
supported: make(map[string]struct{}),
}
for _, m := range r.Header[headerSessionMethod] {
for _, m := range opts[headerSessionMethod] {
c.supported[strings.ToLower(m)] = struct{}{}
}
sm.sessions[uuid] = c

View File

@ -0,0 +1,70 @@
package testutil
import (
"io"
"net"
"time"
"github.com/Sirupsen/logrus"
"golang.org/x/net/context"
)
// Handler is function called to handle incoming connection
type Handler func(ctx context.Context, conn net.Conn, meta map[string][]string) error
// Dialer is a function for dialing an outgoing connection
type Dialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error)
// TestStream creates an in memory session dialer for a handler function
func TestStream(handler Handler) Dialer {
s1, s2 := sockPair()
return func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
go func() {
err := handler(context.TODO(), s1, meta)
if err != nil {
logrus.Error(err)
}
s1.Close()
}()
return s2, nil
}
}
func sockPair() (*sock, *sock) {
pr1, pw1 := io.Pipe()
pr2, pw2 := io.Pipe()
return &sock{pw1, pr2, pw1}, &sock{pw2, pr1, pw2}
}
type sock struct {
io.Writer
io.Reader
io.Closer
}
func (s *sock) LocalAddr() net.Addr {
return dummyAddr{}
}
func (s *sock) RemoteAddr() net.Addr {
return dummyAddr{}
}
func (s *sock) SetDeadline(t time.Time) error {
return nil
}
func (s *sock) SetReadDeadline(t time.Time) error {
return nil
}
func (s *sock) SetWriteDeadline(t time.Time) error {
return nil
}
type dummyAddr struct {
}
func (d dummyAddr) Network() string {
return "tcp"
}
func (d dummyAddr) String() string {
return "localhost"
}

View File

@ -253,6 +253,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
Root: cli.Config.Root,
Name: name,
Backend: d,
PluginBackend: d.PluginManager(),
NetworkSubnetsProvider: d,
DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr,
RuntimeRoot: cli.getSwarmRunRoot(),

View File

@ -7,10 +7,10 @@ import (
"net"
"os"
"path/filepath"
"syscall"
"github.com/docker/docker/libcontainerd"
"github.com/docker/docker/pkg/system"
"golang.org/x/sys/unix"
)
const defaultDaemonConfigFile = ""
@ -30,8 +30,8 @@ func currentUserIsOwner(f string) bool {
// caused by custom umask
func setDefaultUmask() error {
desiredUmask := 0022
syscall.Umask(desiredUmask)
if umask := syscall.Umask(desiredUmask); umask != desiredUmask {
unix.Umask(desiredUmask)
if umask := unix.Umask(desiredUmask); umask != desiredUmask {
return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask)
}

View File

@ -9,12 +9,12 @@ import (
"os/signal"
"path/filepath"
"strconv"
"syscall"
"github.com/docker/docker/cmd/dockerd/hack"
"github.com/docker/docker/daemon"
"github.com/docker/docker/libcontainerd"
"github.com/docker/libnetwork/portallocator"
"golang.org/x/sys/unix"
)
const defaultDaemonConfigFile = "/etc/docker/daemon.json"
@ -23,8 +23,8 @@ const defaultDaemonConfigFile = "/etc/docker/daemon.json"
// caused by custom umask
func setDefaultUmask() error {
desiredUmask := 0022
syscall.Umask(desiredUmask)
if umask := syscall.Umask(desiredUmask); umask != desiredUmask {
unix.Umask(desiredUmask)
if umask := unix.Umask(desiredUmask); umask != desiredUmask {
return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask)
}
@ -38,7 +38,7 @@ func getDaemonConfDir(_ string) string {
// setupConfigReloadTrap configures the USR2 signal to reload the configuration.
func (cli *DaemonCli) setupConfigReloadTrap() {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP)
signal.Notify(c, unix.SIGHUP)
go func() {
for range c {
cli.reloadConfig()

View File

@ -5,11 +5,11 @@ import (
"net"
"os"
"path/filepath"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/libcontainerd"
"github.com/docker/docker/pkg/system"
"golang.org/x/sys/windows"
)
var defaultDaemonConfigFile = ""
@ -58,14 +58,14 @@ func notifyShutdown(err error) {
// setupConfigReloadTrap configures a Win32 event to reload the configuration.
func (cli *DaemonCli) setupConfigReloadTrap() {
go func() {
sa := syscall.SecurityAttributes{
sa := windows.SecurityAttributes{
Length: 0,
}
ev := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid())
if h, _ := system.CreateEvent(&sa, false, false, ev); h != 0 {
logrus.Debugf("Config reload - waiting signal at %s", ev)
for {
syscall.WaitForSingleObject(h, syscall.INFINITE)
windows.WaitForSingleObject(h, windows.INFINITE)
cli.reloadConfig()
}
}

View File

@ -9,7 +9,6 @@ import (
"os"
"os/exec"
"path/filepath"
"syscall"
"time"
"unsafe"
@ -30,7 +29,7 @@ var (
flRunService *bool
setStdHandle = windows.NewLazySystemDLL("kernel32.dll").NewProc("SetStdHandle")
oldStderr syscall.Handle
oldStderr windows.Handle
panicFile *os.File
service *handler
@ -131,14 +130,14 @@ func (h *etwHook) Fire(e *logrus.Entry) error {
err error
)
ss[0], err = syscall.UTF16PtrFromString(e.Message)
ss[0], err = windows.UTF16PtrFromString(e.Message)
if err != nil {
return err
}
count := uint16(1)
if exts != "" {
ss[1], err = syscall.UTF16PtrFromString(exts)
ss[1], err = windows.UTF16PtrFromString(exts)
if err != nil {
return err
}
@ -397,8 +396,8 @@ func initPanicFile(path string) error {
// Update STD_ERROR_HANDLE to point to the panic file so that Go writes to
// it when it panics. Remember the old stderr to restore it before removing
// the panic file.
sh := syscall.STD_ERROR_HANDLE
h, err := syscall.GetStdHandle(sh)
sh := windows.STD_ERROR_HANDLE
h, err := windows.GetStdHandle(uint32(sh))
if err != nil {
return err
}
@ -422,7 +421,7 @@ func initPanicFile(path string) error {
func removePanicFile() {
if st, err := panicFile.Stat(); err == nil {
if st.Size() == 0 {
sh := syscall.STD_ERROR_HANDLE
sh := windows.STD_ERROR_HANDLE
setStdHandle.Call(uintptr(sh), uintptr(oldStderr))
panicFile.Close()
os.Remove(panicFile.Name())

View File

@ -107,7 +107,8 @@ type Container struct {
NoNewPrivileges bool
// Fields here are specific to Windows
NetworkSharedContainerID string
NetworkSharedContainerID string `json:"-"`
SharedEndpointList []string `json:"-"`
}
// NewBaseContainer creates a new container with its

View File

@ -269,7 +269,7 @@ func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfi
cResources := &container.HostConfig.Resources
// validate NanoCPUs, CPUPeriod, and CPUQuota
// Becuase NanoCPU effectively updates CPUPeriod/CPUQuota,
// Because NanoCPU effectively updates CPUPeriod/CPUQuota,
// once NanoCPU is already set, updating CPUPeriod/CPUQuota will be blocked, and vice versa.
// In the following we make sure the intended update (resources) does not conflict with the existing (cResource).
if resources.NanoCPUs > 0 && cResources.CPUPeriod > 0 {

View File

@ -185,7 +185,7 @@ const (
// timeouts, and avoiding goroutine leaks. Wait must be called without holding
// the state lock. Returns a channel from which the caller will receive the
// result. If the container exited on its own, the result's Err() method will
// be nil and its ExitCode() method will return the conatiners exit code,
// be nil and its ExitCode() method will return the container's exit code,
// otherwise, the results Err() method will return an error indicating why the
// wait operation failed.
func (s *State) Wait(ctx context.Context, condition WaitCondition) <-chan StateStatus {
@ -278,6 +278,7 @@ func (s *State) SetRunning(pid int, initial bool) {
s.ErrorMsg = ""
s.Running = true
s.Restarting = false
s.Paused = false
s.ExitCodeValue = 0
s.Pid = pid
if initial {
@ -304,6 +305,7 @@ func (s *State) SetRestarting(exitStatus *ExitStatus) {
// all the checks in docker around rm/stop/etc
s.Running = true
s.Restarting = true
s.Paused = false
s.Pid = 0
s.FinishedAt = time.Now().UTC()
s.setFromExitStatus(exitStatus)

View File

@ -49,6 +49,7 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types/network"
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/controllers/plugin"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
"github.com/docker/docker/pkg/signal"
lncluster "github.com/docker/libnetwork/cluster"
@ -97,6 +98,7 @@ type Config struct {
Root string
Name string
Backend executorpkg.Backend
PluginBackend plugin.Backend
NetworkSubnetsProvider NetworkSubnetsProvider
// DefaultAdvertiseAddr is the default host/IP or network interface to use

View File

@ -1,79 +1,261 @@
package plugin
import (
"io"
"io/ioutil"
"net/http"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/reference"
enginetypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm/runtime"
"github.com/docker/docker/plugin"
"github.com/docker/docker/plugin/v2"
"github.com/docker/swarmkit/api"
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// Controller is the controller for the plugin backend
type Controller struct{}
// Controller is the controller for the plugin backend.
// Plugins are managed as a singleton object with a desired state (different from containers).
// With the the plugin controller instead of having a strict create->start->stop->remove
// task lifecycle like containers, we manage the desired state of the plugin and let
// the plugin manager do what it already does and monitor the plugin.
// We'll also end up with many tasks all pointing to the same plugin ID.
//
// TODO(@cpuguy83): registry auth is intentionally not supported until we work out
// the right way to pass registry crednetials via secrets.
type Controller struct {
backend Backend
spec runtime.PluginSpec
logger *logrus.Entry
pluginID string
serviceID string
taskID string
// hook used to signal tests that `Wait()` is actually ready and waiting
signalWaitReady func()
}
// Backend is the interface for interacting with the plugin manager
// Controller actions are passed to the configured backend to do the real work.
type Backend interface {
Disable(name string, config *enginetypes.PluginDisableConfig) error
Enable(name string, config *enginetypes.PluginEnableConfig) error
Remove(name string, config *enginetypes.PluginRmConfig) error
Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error
Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error
Get(name string) (*v2.Plugin, error)
SubscribeEvents(buffer int, events ...plugin.Event) (eventCh <-chan interface{}, cancel func())
}
// NewController returns a new cluster plugin controller
func NewController() (*Controller, error) {
return &Controller{}, nil
func NewController(backend Backend, t *api.Task) (*Controller, error) {
spec, err := readSpec(t)
if err != nil {
return nil, err
}
return &Controller{
backend: backend,
spec: spec,
serviceID: t.ServiceID,
logger: logrus.WithFields(logrus.Fields{
"controller": "plugin",
"task": t.ID,
"plugin": spec.Name,
})}, nil
}
func readSpec(t *api.Task) (runtime.PluginSpec, error) {
var cfg runtime.PluginSpec
generic := t.Spec.GetGeneric()
if err := proto.Unmarshal(generic.Payload.Value, &cfg); err != nil {
return cfg, errors.Wrap(err, "error reading plugin spec")
}
return cfg, nil
}
// Update is the update phase from swarmkit
func (p *Controller) Update(ctx context.Context, t *api.Task) error {
logrus.WithFields(logrus.Fields{
"controller": "plugin",
}).Debug("Update")
p.logger.Debug("Update")
return nil
}
// Prepare is the prepare phase from swarmkit
func (p *Controller) Prepare(ctx context.Context) error {
logrus.WithFields(logrus.Fields{
"controller": "plugin",
}).Debug("Prepare")
func (p *Controller) Prepare(ctx context.Context) (err error) {
p.logger.Debug("Prepare")
remote, err := reference.ParseNormalizedNamed(p.spec.Remote)
if err != nil {
return errors.Wrapf(err, "error parsing remote reference %q", p.spec.Remote)
}
if p.spec.Name == "" {
p.spec.Name = remote.String()
}
var authConfig enginetypes.AuthConfig
privs := convertPrivileges(p.spec.Privileges)
pl, err := p.backend.Get(p.spec.Name)
defer func() {
if pl != nil && err == nil {
pl.Acquire()
}
}()
if err == nil && pl != nil {
if pl.SwarmServiceID != p.serviceID {
return errors.Errorf("plugin already exists: %s", p.spec.Name)
}
if pl.IsEnabled() {
if err := p.backend.Disable(pl.GetID(), &enginetypes.PluginDisableConfig{ForceDisable: true}); err != nil {
p.logger.WithError(err).Debug("could not disable plugin before running upgrade")
}
}
p.pluginID = pl.GetID()
return p.backend.Upgrade(ctx, remote, p.spec.Name, nil, &authConfig, privs, ioutil.Discard)
}
if err := p.backend.Pull(ctx, remote, p.spec.Name, nil, &authConfig, privs, ioutil.Discard, plugin.WithSwarmService(p.serviceID)); err != nil {
return err
}
pl, err = p.backend.Get(p.spec.Name)
if err != nil {
return err
}
p.pluginID = pl.GetID()
return nil
}
// Start is the start phase from swarmkit
func (p *Controller) Start(ctx context.Context) error {
logrus.WithFields(logrus.Fields{
"controller": "plugin",
}).Debug("Start")
p.logger.Debug("Start")
pl, err := p.backend.Get(p.pluginID)
if err != nil {
return err
}
if p.spec.Disabled {
if pl.IsEnabled() {
return p.backend.Disable(p.pluginID, &enginetypes.PluginDisableConfig{ForceDisable: false})
}
return nil
}
if !pl.IsEnabled() {
return p.backend.Enable(p.pluginID, &enginetypes.PluginEnableConfig{Timeout: 30})
}
return nil
}
// Wait causes the task to wait until returned
func (p *Controller) Wait(ctx context.Context) error {
logrus.WithFields(logrus.Fields{
"controller": "plugin",
}).Debug("Wait")
return nil
p.logger.Debug("Wait")
pl, err := p.backend.Get(p.pluginID)
if err != nil {
return err
}
events, cancel := p.backend.SubscribeEvents(1, plugin.EventDisable{Plugin: pl.PluginObj}, plugin.EventRemove{Plugin: pl.PluginObj}, plugin.EventEnable{Plugin: pl.PluginObj})
defer cancel()
if p.signalWaitReady != nil {
p.signalWaitReady()
}
if !p.spec.Disabled != pl.IsEnabled() {
return errors.New("mismatched plugin state")
}
for {
select {
case <-ctx.Done():
return ctx.Err()
case e := <-events:
p.logger.Debugf("got event %#T", e)
switch e.(type) {
case plugin.EventEnable:
if p.spec.Disabled {
return errors.New("plugin enabled")
}
case plugin.EventRemove:
return errors.New("plugin removed")
case plugin.EventDisable:
if !p.spec.Disabled {
return errors.New("plugin disabled")
}
}
}
}
}
func isNotFound(err error) bool {
_, ok := errors.Cause(err).(plugin.ErrNotFound)
return ok
}
// Shutdown is the shutdown phase from swarmkit
func (p *Controller) Shutdown(ctx context.Context) error {
logrus.WithFields(logrus.Fields{
"controller": "plugin",
}).Debug("Shutdown")
p.logger.Debug("Shutdown")
return nil
}
// Terminate is the terminate phase from swarmkit
func (p *Controller) Terminate(ctx context.Context) error {
logrus.WithFields(logrus.Fields{
"controller": "plugin",
}).Debug("Terminate")
p.logger.Debug("Terminate")
return nil
}
// Remove is the remove phase from swarmkit
func (p *Controller) Remove(ctx context.Context) error {
logrus.WithFields(logrus.Fields{
"controller": "plugin",
}).Debug("Remove")
return nil
p.logger.Debug("Remove")
pl, err := p.backend.Get(p.pluginID)
if err != nil {
if isNotFound(err) {
return nil
}
return err
}
pl.Release()
if pl.GetRefCount() > 0 {
p.logger.Debug("skipping remove due to ref count")
return nil
}
// This may error because we have exactly 1 plugin, but potentially multiple
// tasks which are calling remove.
err = p.backend.Remove(p.pluginID, &enginetypes.PluginRmConfig{ForceRemove: true})
if isNotFound(err) {
return nil
}
return err
}
// Close is the close phase from swarmkit
func (p *Controller) Close() error {
logrus.WithFields(logrus.Fields{
"controller": "plugin",
}).Debug("Close")
p.logger.Debug("Close")
return nil
}
func convertPrivileges(ls []*runtime.PluginPrivilege) enginetypes.PluginPrivileges {
var out enginetypes.PluginPrivileges
for _, p := range ls {
pp := enginetypes.PluginPrivilege{
Name: p.Name,
Description: p.Description,
Value: p.Value,
}
out = append(out, pp)
}
return out
}

View File

@ -0,0 +1,390 @@
package plugin
import (
"errors"
"io"
"io/ioutil"
"net/http"
"strings"
"testing"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/reference"
enginetypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/swarm/runtime"
"github.com/docker/docker/pkg/pubsub"
"github.com/docker/docker/plugin"
"github.com/docker/docker/plugin/v2"
"golang.org/x/net/context"
)
const (
pluginTestName = "test"
pluginTestRemote = "testremote"
pluginTestRemoteUpgrade = "testremote2"
)
func TestPrepare(t *testing.T) {
b := newMockBackend()
c := newTestController(b, false)
ctx := context.Background()
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if b.p == nil {
t.Fatal("pull not performed")
}
c = newTestController(b, false)
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if b.p == nil {
t.Fatal("unexpected nil")
}
if b.p.PluginObj.PluginReference != pluginTestRemoteUpgrade {
t.Fatal("upgrade not performed")
}
c = newTestController(b, false)
c.serviceID = "1"
if err := c.Prepare(ctx); err == nil {
t.Fatal("expected error on prepare")
}
}
func TestStart(t *testing.T) {
b := newMockBackend()
c := newTestController(b, false)
ctx := context.Background()
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if err := c.Start(ctx); err != nil {
t.Fatal(err)
}
if !b.p.IsEnabled() {
t.Fatal("expected plugin to be enabled")
}
c = newTestController(b, true)
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if err := c.Start(ctx); err != nil {
t.Fatal(err)
}
if b.p.IsEnabled() {
t.Fatal("expected plugin to be disabled")
}
c = newTestController(b, false)
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if err := c.Start(ctx); err != nil {
t.Fatal(err)
}
if !b.p.IsEnabled() {
t.Fatal("expected plugin to be enabled")
}
}
func TestWaitCancel(t *testing.T) {
b := newMockBackend()
c := newTestController(b, true)
ctx := context.Background()
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if err := c.Start(ctx); err != nil {
t.Fatal(err)
}
ctxCancel, cancel := context.WithCancel(ctx)
chErr := make(chan error)
go func() {
chErr <- c.Wait(ctxCancel)
}()
cancel()
select {
case err := <-chErr:
if err != context.Canceled {
t.Fatal(err)
}
case <-time.After(10 * time.Second):
t.Fatal("timeout waiting for cancelation")
}
}
func TestWaitDisabled(t *testing.T) {
b := newMockBackend()
c := newTestController(b, true)
ctx := context.Background()
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if err := c.Start(ctx); err != nil {
t.Fatal(err)
}
chErr := make(chan error)
go func() {
chErr <- c.Wait(ctx)
}()
if err := b.Enable("test", nil); err != nil {
t.Fatal(err)
}
select {
case err := <-chErr:
if err == nil {
t.Fatal("expected error")
}
case <-time.After(10 * time.Second):
t.Fatal("timeout waiting for event")
}
if err := c.Start(ctx); err != nil {
t.Fatal(err)
}
ctxWaitReady, cancelCtxWaitReady := context.WithTimeout(ctx, 30*time.Second)
c.signalWaitReady = cancelCtxWaitReady
defer cancelCtxWaitReady()
go func() {
chErr <- c.Wait(ctx)
}()
chEvent, cancel := b.SubscribeEvents(1)
defer cancel()
if err := b.Disable("test", nil); err != nil {
t.Fatal(err)
}
select {
case <-chEvent:
<-ctxWaitReady.Done()
if err := ctxWaitReady.Err(); err == context.DeadlineExceeded {
t.Fatal(err)
}
select {
case <-chErr:
t.Fatal("wait returned unexpectedly")
default:
// all good
}
case <-chErr:
t.Fatal("wait returned unexpectedly")
case <-time.After(10 * time.Second):
t.Fatal("timeout waiting for event")
}
if err := b.Remove("test", nil); err != nil {
t.Fatal(err)
}
select {
case err := <-chErr:
if err == nil {
t.Fatal("expected error")
}
if !strings.Contains(err.Error(), "removed") {
t.Fatal(err)
}
case <-time.After(10 * time.Second):
t.Fatal("timeout waiting for event")
}
}
func TestWaitEnabled(t *testing.T) {
b := newMockBackend()
c := newTestController(b, false)
ctx := context.Background()
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if err := c.Start(ctx); err != nil {
t.Fatal(err)
}
chErr := make(chan error)
go func() {
chErr <- c.Wait(ctx)
}()
if err := b.Disable("test", nil); err != nil {
t.Fatal(err)
}
select {
case err := <-chErr:
if err == nil {
t.Fatal("expected error")
}
case <-time.After(10 * time.Second):
t.Fatal("timeout waiting for event")
}
if err := c.Start(ctx); err != nil {
t.Fatal(err)
}
ctxWaitReady, ctxWaitCancel := context.WithCancel(ctx)
c.signalWaitReady = ctxWaitCancel
defer ctxWaitCancel()
go func() {
chErr <- c.Wait(ctx)
}()
chEvent, cancel := b.SubscribeEvents(1)
defer cancel()
if err := b.Enable("test", nil); err != nil {
t.Fatal(err)
}
select {
case <-chEvent:
<-ctxWaitReady.Done()
if err := ctxWaitReady.Err(); err == context.DeadlineExceeded {
t.Fatal(err)
}
select {
case <-chErr:
t.Fatal("wait returned unexpectedly")
default:
// all good
}
case <-chErr:
t.Fatal("wait returned unexpectedly")
case <-time.After(10 * time.Second):
t.Fatal("timeout waiting for event")
}
if err := b.Remove("test", nil); err != nil {
t.Fatal(err)
}
select {
case err := <-chErr:
if err == nil {
t.Fatal("expected error")
}
if !strings.Contains(err.Error(), "removed") {
t.Fatal(err)
}
case <-time.After(10 * time.Second):
t.Fatal("timeout waiting for event")
}
}
func TestRemove(t *testing.T) {
b := newMockBackend()
c := newTestController(b, false)
ctx := context.Background()
if err := c.Prepare(ctx); err != nil {
t.Fatal(err)
}
if err := c.Shutdown(ctx); err != nil {
t.Fatal(err)
}
c2 := newTestController(b, false)
if err := c2.Prepare(ctx); err != nil {
t.Fatal(err)
}
if err := c.Remove(ctx); err != nil {
t.Fatal(err)
}
if b.p == nil {
t.Fatal("plugin removed unexpectedly")
}
if err := c2.Shutdown(ctx); err != nil {
t.Fatal(err)
}
if err := c2.Remove(ctx); err != nil {
t.Fatal(err)
}
if b.p != nil {
t.Fatal("expected plugin to be removed")
}
}
func newTestController(b Backend, disabled bool) *Controller {
return &Controller{
logger: &logrus.Entry{Logger: &logrus.Logger{Out: ioutil.Discard}},
backend: b,
spec: runtime.PluginSpec{
Name: pluginTestName,
Remote: pluginTestRemote,
Disabled: disabled,
},
}
}
func newMockBackend() *mockBackend {
return &mockBackend{
pub: pubsub.NewPublisher(0, 0),
}
}
type mockBackend struct {
p *v2.Plugin
pub *pubsub.Publisher
}
func (m *mockBackend) Disable(name string, config *enginetypes.PluginDisableConfig) error {
m.p.PluginObj.Enabled = false
m.pub.Publish(plugin.EventDisable{})
return nil
}
func (m *mockBackend) Enable(name string, config *enginetypes.PluginEnableConfig) error {
m.p.PluginObj.Enabled = true
m.pub.Publish(plugin.EventEnable{})
return nil
}
func (m *mockBackend) Remove(name string, config *enginetypes.PluginRmConfig) error {
m.p = nil
m.pub.Publish(plugin.EventRemove{})
return nil
}
func (m *mockBackend) Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error {
m.p = &v2.Plugin{
PluginObj: enginetypes.Plugin{
ID: "1234",
Name: name,
PluginReference: ref.String(),
},
}
return nil
}
func (m *mockBackend) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error {
m.p.PluginObj.PluginReference = pluginTestRemoteUpgrade
return nil
}
func (m *mockBackend) Get(name string) (*v2.Plugin, error) {
if m.p == nil {
return nil, errors.New("not found")
}
return m.p, nil
}
func (m *mockBackend) SubscribeEvents(buffer int, events ...plugin.Event) (eventCh <-chan interface{}, cancel func()) {
ch := m.pub.SubscribeTopicWithBuffer(nil, buffer)
cancel = func() { m.pub.Evict(ch) }
return ch, cancel
}

View File

@ -13,8 +13,11 @@ import (
gogotypes "github.com/gogo/protobuf/types"
)
func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec {
containerSpec := types.ContainerSpec{
func containerSpecFromGRPC(c *swarmapi.ContainerSpec) *types.ContainerSpec {
if c == nil {
return nil
}
containerSpec := &types.ContainerSpec{
Image: c.Image,
Labels: c.Labels,
Command: c.Command,
@ -211,7 +214,7 @@ func configReferencesFromGRPC(sr []*swarmapi.ConfigReference) []*types.ConfigRef
return refs
}
func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) {
func containerToGRPC(c *types.ContainerSpec) (*swarmapi.ContainerSpec, error) {
containerSpec := &swarmapi.ContainerSpec{
Image: c.Image,
Labels: c.Labels,

View File

@ -1,14 +1,16 @@
package convert
import (
"errors"
"fmt"
"strings"
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/swarm/runtime"
"github.com/docker/docker/pkg/namesgenerator"
swarmapi "github.com/docker/swarmkit/api"
"github.com/gogo/protobuf/proto"
gogotypes "github.com/gogo/protobuf/types"
"github.com/pkg/errors"
)
var (
@ -85,7 +87,10 @@ func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) (*types.ServiceSpec, error)
}
taskTemplate := taskSpecFromGRPC(spec.Task)
taskTemplate, err := taskSpecFromGRPC(spec.Task)
if err != nil {
return nil, err
}
switch t := spec.Task.GetRuntime().(type) {
case *swarmapi.TaskSpec_Container:
@ -164,19 +169,34 @@ func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) {
switch s.TaskTemplate.Runtime {
case types.RuntimeContainer, "": // if empty runtime default to container
containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec)
if err != nil {
return swarmapi.ServiceSpec{}, err
if s.TaskTemplate.ContainerSpec != nil {
containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec)
if err != nil {
return swarmapi.ServiceSpec{}, err
}
spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec}
}
spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec}
case types.RuntimePlugin:
spec.Task.Runtime = &swarmapi.TaskSpec_Generic{
Generic: &swarmapi.GenericRuntimeSpec{
Kind: string(types.RuntimePlugin),
Payload: &gogotypes.Any{
TypeUrl: string(types.RuntimeURLPlugin),
if s.Mode.Replicated != nil {
return swarmapi.ServiceSpec{}, errors.New("plugins must not use replicated mode")
}
s.Mode.Global = &types.GlobalService{} // must always be global
if s.TaskTemplate.PluginSpec != nil {
pluginSpec, err := proto.Marshal(s.TaskTemplate.PluginSpec)
if err != nil {
return swarmapi.ServiceSpec{}, err
}
spec.Task.Runtime = &swarmapi.TaskSpec_Generic{
Generic: &swarmapi.GenericRuntimeSpec{
Kind: string(types.RuntimePlugin),
Payload: &gogotypes.Any{
TypeUrl: string(types.RuntimeURLPlugin),
Value: pluginSpec,
},
},
},
}
}
default:
return swarmapi.ServiceSpec{}, ErrUnsupportedRuntime
@ -507,21 +527,14 @@ func updateConfigToGRPC(updateConfig *types.UpdateConfig) (*swarmapi.UpdateConfi
return converted, nil
}
func taskSpecFromGRPC(taskSpec swarmapi.TaskSpec) types.TaskSpec {
func taskSpecFromGRPC(taskSpec swarmapi.TaskSpec) (types.TaskSpec, error) {
taskNetworks := make([]types.NetworkAttachmentConfig, 0, len(taskSpec.Networks))
for _, n := range taskSpec.Networks {
netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts}
taskNetworks = append(taskNetworks, netConfig)
}
c := taskSpec.GetContainer()
cSpec := types.ContainerSpec{}
if c != nil {
cSpec = containerSpecFromGRPC(c)
}
return types.TaskSpec{
ContainerSpec: cSpec,
t := types.TaskSpec{
Resources: resourcesFromGRPC(taskSpec.Resources),
RestartPolicy: restartPolicyFromGRPC(taskSpec.Restart),
Placement: placementFromGRPC(taskSpec.Placement),
@ -529,4 +542,26 @@ func taskSpecFromGRPC(taskSpec swarmapi.TaskSpec) types.TaskSpec {
Networks: taskNetworks,
ForceUpdate: taskSpec.ForceUpdate,
}
switch taskSpec.GetRuntime().(type) {
case *swarmapi.TaskSpec_Container, nil:
c := taskSpec.GetContainer()
if c != nil {
t.ContainerSpec = containerSpecFromGRPC(c)
}
case *swarmapi.TaskSpec_Generic:
g := taskSpec.GetGeneric()
if g != nil {
switch g.Kind {
case string(types.RuntimePlugin):
var p runtime.PluginSpec
if err := proto.Unmarshal(g.Payload.Value, &p); err != nil {
return t, errors.Wrap(err, "error unmarshalling plugin spec")
}
t.PluginSpec = &p
}
}
}
return t, nil
}

View File

@ -4,6 +4,7 @@ import (
"testing"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/api/types/swarm/runtime"
swarmapi "github.com/docker/swarmkit/api"
google_protobuf3 "github.com/gogo/protobuf/types"
)
@ -82,7 +83,8 @@ func TestServiceConvertFromGRPCGenericRuntimePlugin(t *testing.T) {
func TestServiceConvertToGRPCGenericRuntimePlugin(t *testing.T) {
s := swarmtypes.ServiceSpec{
TaskTemplate: swarmtypes.TaskSpec{
Runtime: swarmtypes.RuntimePlugin,
Runtime: swarmtypes.RuntimePlugin,
PluginSpec: &runtime.PluginSpec{},
},
Mode: swarmtypes.ServiceMode{
Global: &swarmtypes.GlobalService{},
@ -108,7 +110,7 @@ func TestServiceConvertToGRPCContainerRuntime(t *testing.T) {
image := "alpine:latest"
s := swarmtypes.ServiceSpec{
TaskTemplate: swarmtypes.TaskSpec{
ContainerSpec: swarmtypes.ContainerSpec{
ContainerSpec: &swarmtypes.ContainerSpec{
Image: image,
},
},

View File

@ -9,19 +9,22 @@ import (
)
// TaskFromGRPC converts a grpc Task to a Task.
func TaskFromGRPC(t swarmapi.Task) types.Task {
func TaskFromGRPC(t swarmapi.Task) (types.Task, error) {
if t.Spec.GetAttachment() != nil {
return types.Task{}
return types.Task{}, nil
}
containerStatus := t.Status.GetContainer()
taskSpec, err := taskSpecFromGRPC(t.Spec)
if err != nil {
return types.Task{}, err
}
task := types.Task{
ID: t.ID,
Annotations: annotationsFromGRPC(t.Annotations),
ServiceID: t.ServiceID,
Slot: int(t.Slot),
NodeID: t.NodeID,
Spec: taskSpecFromGRPC(t.Spec),
Spec: taskSpec,
Status: types.TaskStatus{
State: types.TaskState(strings.ToLower(t.Status.State.String())),
Message: t.Status.Message,
@ -49,7 +52,7 @@ func TaskFromGRPC(t swarmapi.Task) types.Task {
}
if t.Status.PortStatus == nil {
return task
return task, nil
}
for _, p := range t.Status.PortStatus.Ports {
@ -62,5 +65,5 @@ func TaskFromGRPC(t swarmapi.Task) types.Task {
})
}
return task
return task, nil
}

View File

@ -199,7 +199,7 @@ func (c *containerAdapter) networkAttach(ctx context.Context) error {
}
}
return c.backend.UpdateAttachment(networkName, networkID, c.container.id(), config)
return c.backend.UpdateAttachment(networkName, networkID, c.container.networkAttachmentContainerID(), config)
}
func (c *containerAdapter) waitForDetach(ctx context.Context) error {
@ -218,7 +218,7 @@ func (c *containerAdapter) waitForDetach(ctx context.Context) error {
}
}
return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.id())
return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.networkAttachmentContainerID())
}
func (c *containerAdapter) create(ctx context.Context) error {

View File

@ -77,7 +77,7 @@ func (c *containerConfig) setTask(t *api.Task) error {
c.task = t
if t.Spec.GetContainer() != nil {
preparedSpec, err := template.ExpandContainerSpec(t)
preparedSpec, err := template.ExpandContainerSpec(nil, t)
if err != nil {
return err
}
@ -89,7 +89,7 @@ func (c *containerConfig) setTask(t *api.Task) error {
return nil
}
func (c *containerConfig) id() string {
func (c *containerConfig) networkAttachmentContainerID() string {
attachment := c.task.Spec.GetAttachment()
if attachment == nil {
return ""
@ -115,7 +115,7 @@ func (c *containerConfig) nameOrID() string {
return c.name()
}
return c.id()
return c.networkAttachmentContainerID()
}
func (c *containerConfig) name() string {

View File

@ -28,11 +28,10 @@ const defaultGossipConvergeDelay = 2 * time.Second
// Most operations against docker's API are done through the container name,
// which is unique to the task.
type controller struct {
task *api.Task
adapter *containerAdapter
closed chan struct{}
err error
task *api.Task
adapter *containerAdapter
closed chan struct{}
err error
pulled chan struct{} // closed after pull
cancelPull func() // cancels pull context if not nil
pullErr error // pull error, only read after pulled closed
@ -146,7 +145,6 @@ func (r *controller) Prepare(ctx context.Context) error {
}
}
}
if err := r.adapter.create(ctx); err != nil {
if isContainerCreateNameConflict(err) {
if _, err := r.adapter.inspect(ctx); err != nil {
@ -343,7 +341,7 @@ func (r *controller) Shutdown(ctx context.Context) error {
}
// add a delay for gossip converge
// TODO(dongluochen): this delay shoud be configurable to fit different cluster size and network delay.
// TODO(dongluochen): this delay should be configurable to fit different cluster size and network delay.
time.Sleep(defaultGossipConvergeDelay)
}
@ -564,15 +562,8 @@ func (r *controller) matchevent(event events.Message) bool {
if event.Type != events.ContainerEventType {
return false
}
// TODO(stevvooe): Filter based on ID matching, in addition to name.
// Make sure the events are for this container.
if event.Actor.Attributes["name"] != r.adapter.container.name() {
return false
}
return true
// we can't filter using id since it will have huge chances to introduce a deadlock. see #33377.
return event.Actor.Attributes["name"] == r.adapter.container.name()
}
func (r *controller) checkClosed() error {

View File

@ -22,15 +22,17 @@ import (
)
type executor struct {
backend executorpkg.Backend
dependencies exec.DependencyManager
backend executorpkg.Backend
pluginBackend plugin.Backend
dependencies exec.DependencyManager
}
// NewExecutor returns an executor from the docker client.
func NewExecutor(b executorpkg.Backend) exec.Executor {
func NewExecutor(b executorpkg.Backend, p plugin.Backend) exec.Executor {
return &executor{
backend: b,
dependencies: agent.NewDependencyManager(),
backend: b,
pluginBackend: p,
dependencies: agent.NewDependencyManager(),
}
}
@ -181,7 +183,7 @@ func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
}
switch runtimeKind {
case string(swarmtypes.RuntimePlugin):
c, err := plugin.NewController()
c, err := plugin.NewController(e.pluginBackend, t)
if err != nil {
return ctlr, err
}

View File

@ -57,6 +57,7 @@ func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) e
// internal use in checking create/update progress. Therefore,
// we prefix it with a '_'.
"_up-to-date": true,
"runtime": true,
}
if err := filter.Validate(accepted); err != nil {
return nil, err
@ -73,6 +74,7 @@ func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) e
ServiceIDs: filter.Get("service"),
NodeIDs: filter.Get("node"),
UpToDate: len(filter.Get("_up-to-date")) != 0,
Runtimes: filter.Get("runtime"),
}
for _, s := range filter.Get("desired-state") {

View File

@ -118,7 +118,7 @@ func (n *nodeRunner) start(conf nodeStartConfig) error {
JoinAddr: joinAddr,
StateDir: n.cluster.root,
JoinToken: conf.joinToken,
Executor: container.NewExecutor(n.cluster.config.Backend),
Executor: container.NewExecutor(n.cluster.config.Backend, n.cluster.config.PluginBackend),
HeartbeatTick: 1,
ElectionTick: 3,
UnlockKey: conf.lockKey,
@ -202,6 +202,10 @@ func (n *nodeRunner) watchClusterEvents(ctx context.Context, conn *grpc.ClientCo
Kind: "secret",
Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
},
{
Kind: "config",
Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove,
},
},
IncludeOldObject: true,
})

View File

@ -50,14 +50,16 @@ func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Serv
return nil, err
}
if len(options.Filters.Get("runtime")) == 0 {
// Default to using the container runtime filter
options.Filters.Add("runtime", string(types.RuntimeContainer))
}
filters := &swarmapi.ListServicesRequest_Filters{
NamePrefixes: options.Filters.Get("name"),
IDPrefixes: options.Filters.Get("id"),
Labels: runconfigopts.ConvertKVStringsToMap(options.Filters.Get("label")),
// (ehazlett): hardcode runtime for now. eventually we will
// be able to filter for the desired runtimes once more
// are supported.
Runtimes: []string{string(types.RuntimeContainer)},
Runtimes: options.Filters.Get("runtime"),
}
ctx, cancel := c.getRequestContext()
@ -134,6 +136,20 @@ func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string, queryRe
switch serviceSpec.Task.Runtime.(type) {
// handle other runtimes here
case *swarmapi.TaskSpec_Generic:
switch serviceSpec.Task.GetGeneric().Kind {
case string(types.RuntimePlugin):
if s.TaskTemplate.PluginSpec == nil {
return errors.New("plugin spec must be set")
}
}
r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
if err != nil {
return err
}
resp.ID = r.Service.ID
case *swarmapi.TaskSpec_Container:
ctnr := serviceSpec.Task.GetContainer()
if ctnr == nil {
@ -146,7 +162,9 @@ func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string, queryRe
// retrieve auth config from encoded auth
authConfig := &apitypes.AuthConfig{}
if encodedAuth != "" {
if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
authReader := strings.NewReader(encodedAuth)
dec := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, authReader))
if err := dec.Decode(authConfig); err != nil {
logrus.Warnf("invalid authconfig: %v", err)
}
}
@ -216,75 +234,85 @@ func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec typ
return err
}
newCtnr := serviceSpec.Task.GetContainer()
if newCtnr == nil {
return errors.New("service does not use container tasks")
}
encodedAuth := flags.EncodedRegistryAuth
if encodedAuth != "" {
newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
} else {
// this is needed because if the encodedAuth isn't being updated then we
// shouldn't lose it, and continue to use the one that was already present
var ctnr *swarmapi.ContainerSpec
switch flags.RegistryAuthFrom {
case apitypes.RegistryAuthFromSpec, "":
ctnr = currentService.Spec.Task.GetContainer()
case apitypes.RegistryAuthFromPreviousSpec:
if currentService.PreviousSpec == nil {
return errors.New("service does not have a previous spec")
}
ctnr = currentService.PreviousSpec.Task.GetContainer()
default:
return errors.New("unsupported registryAuthFrom value")
}
if ctnr == nil {
return errors.New("service does not use container tasks")
}
newCtnr.PullOptions = ctnr.PullOptions
// update encodedAuth so it can be used to pin image by digest
if ctnr.PullOptions != nil {
encodedAuth = ctnr.PullOptions.RegistryAuth
}
}
// retrieve auth config from encoded auth
authConfig := &apitypes.AuthConfig{}
if encodedAuth != "" {
if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
logrus.Warnf("invalid authconfig: %v", err)
}
}
resp = &apitypes.ServiceUpdateResponse{}
// pin image by digest for API versions < 1.30
// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
// should be removed in the future. Since integration tests only use the
// latest API version, so this is no longer required.
if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig)
if err != nil {
logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())
// warning in the client response should be concise
resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image))
} else if newCtnr.Image != digestImage {
logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage)
newCtnr.Image = digestImage
} else {
logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image)
switch serviceSpec.Task.Runtime.(type) {
case *swarmapi.TaskSpec_Generic:
switch serviceSpec.Task.GetGeneric().Kind {
case string(types.RuntimePlugin):
if spec.TaskTemplate.PluginSpec == nil {
return errors.New("plugin spec must be set")
}
}
case *swarmapi.TaskSpec_Container:
newCtnr := serviceSpec.Task.GetContainer()
if newCtnr == nil {
return errors.New("service does not use container tasks")
}
// Replace the context with a fresh one.
// If we timed out while communicating with the
// registry, then "ctx" will already be expired, which
// would cause UpdateService below to fail. Reusing
// "ctx" could make it impossible to update a service
// if the registry is slow or unresponsive.
var cancel func()
ctx, cancel = c.getRequestContext()
defer cancel()
encodedAuth := flags.EncodedRegistryAuth
if encodedAuth != "" {
newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
} else {
// this is needed because if the encodedAuth isn't being updated then we
// shouldn't lose it, and continue to use the one that was already present
var ctnr *swarmapi.ContainerSpec
switch flags.RegistryAuthFrom {
case apitypes.RegistryAuthFromSpec, "":
ctnr = currentService.Spec.Task.GetContainer()
case apitypes.RegistryAuthFromPreviousSpec:
if currentService.PreviousSpec == nil {
return errors.New("service does not have a previous spec")
}
ctnr = currentService.PreviousSpec.Task.GetContainer()
default:
return errors.New("unsupported registryAuthFrom value")
}
if ctnr == nil {
return errors.New("service does not use container tasks")
}
newCtnr.PullOptions = ctnr.PullOptions
// update encodedAuth so it can be used to pin image by digest
if ctnr.PullOptions != nil {
encodedAuth = ctnr.PullOptions.RegistryAuth
}
}
// retrieve auth config from encoded auth
authConfig := &apitypes.AuthConfig{}
if encodedAuth != "" {
if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
logrus.Warnf("invalid authconfig: %v", err)
}
}
// pin image by digest for API versions < 1.30
// TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE"
// should be removed in the future. Since integration tests only use the
// latest API version, so this is no longer required.
if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry {
digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig)
if err != nil {
logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error())
// warning in the client response should be concise
resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image))
} else if newCtnr.Image != digestImage {
logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage)
newCtnr.Image = digestImage
} else {
logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image)
}
// Replace the context with a fresh one.
// If we timed out while communicating with the
// registry, then "ctx" will already be expired, which
// would cause UpdateService below to fail. Reusing
// "ctx" could make it impossible to update a service
// if the registry is slow or unresponsive.
var cancel func()
ctx, cancel = c.getRequestContext()
defer cancel()
}
}
var rollback swarmapi.UpdateServiceRequest_Rollback

View File

@ -19,7 +19,7 @@ func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, erro
return nil, c.errNoManager(state)
}
byName := func(filter filters.Args) error {
filterTransform := func(filter filters.Args) error {
if filter.Include("service") {
serviceFilters := filter.Get("service")
for _, serviceFilter := range serviceFilters {
@ -42,10 +42,15 @@ func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, erro
filter.Add("node", node.ID)
}
}
if !filter.Include("runtime") {
// default to only showing container tasks
filter.Add("runtime", "container")
filter.Add("runtime", "")
}
return nil
}
filters, err := newListTasksFilters(options.Filters, byName)
filters, err := newListTasksFilters(options.Filters, filterTransform)
if err != nil {
return nil, err
}
@ -61,11 +66,12 @@ func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, erro
}
tasks := make([]types.Task, 0, len(r.Tasks))
for _, task := range r.Tasks {
if task.Spec.GetContainer() != nil {
tasks = append(tasks, convert.TaskFromGRPC(*task))
t, err := convert.TaskFromGRPC(*task)
if err != nil {
return nil, err
}
tasks = append(tasks, t)
}
return tasks, nil
}
@ -83,5 +89,5 @@ func (c *Cluster) GetTask(input string) (types.Task, error) {
}); err != nil {
return types.Task{}, err
}
return convert.TaskFromGRPC(*task), nil
return convert.TaskFromGRPC(*task)
}

View File

@ -886,7 +886,12 @@ func (daemon *Daemon) initializeNetworking(container *container.Container) error
if err != nil {
return err
}
initializeNetworkingPaths(container, nc)
err = daemon.initializeNetworkingPaths(container, nc)
if err != nil {
return err
}
container.Config.Hostname = nc.Config.Hostname
container.Config.Domainname = nc.Config.Domainname
return nil

View File

@ -42,5 +42,6 @@ func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]
return nil
}
func initializeNetworkingPaths(container *container.Container, nc *container.Container) {
func (daemon *Daemon) initializeNetworkingPaths(container *container.Container, nc *container.Container) error {
return nil
}

View File

@ -9,7 +9,6 @@ import (
"os"
"path/filepath"
"strconv"
"syscall"
"time"
"github.com/Sirupsen/logrus"
@ -22,6 +21,7 @@ import (
"github.com/docker/libnetwork"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) {
@ -125,7 +125,7 @@ func (daemon *Daemon) setupIpcDirs(c *container.Container) error {
shmSize = c.HostConfig.ShmSize
}
shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10)
if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil {
if err := unix.Mount("shm", shmPath, "tmpfs", uintptr(unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil {
return fmt.Errorf("mounting shm tmpfs: %s", err)
}
if err := os.Chown(shmPath, rootIDs.UID, rootIDs.GID); err != nil {
@ -301,8 +301,8 @@ func killProcessDirectly(cntr *container.Container) error {
// Ensure that we don't kill ourselves
if pid := cntr.GetPID(); pid != 0 {
logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(cntr.ID))
if err := syscall.Kill(pid, 9); err != nil {
if err != syscall.ESRCH {
if err := unix.Kill(pid, 9); err != nil {
if err != unix.ESRCH {
return err
}
e := errNoSuchProcess{pid, 9}
@ -315,7 +315,7 @@ func killProcessDirectly(cntr *container.Container) error {
}
func detachMounted(path string) error {
return syscall.Unmount(path, syscall.MNT_DETACH)
return unix.Unmount(path, unix.MNT_DETACH)
}
func isLinkable(child *container.Container) bool {
@ -349,8 +349,9 @@ func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]
return nil
}
func initializeNetworkingPaths(container *container.Container, nc *container.Container) {
func (daemon *Daemon) initializeNetworkingPaths(container *container.Container, nc *container.Container) error {
container.HostnamePath = nc.HostnamePath
container.HostsPath = nc.HostsPath
container.ResolvConfPath = nc.ResolvConfPath
return nil
}

View File

@ -160,6 +160,43 @@ func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]
return nil
}
func initializeNetworkingPaths(container *container.Container, nc *container.Container) {
func (daemon *Daemon) initializeNetworkingPaths(container *container.Container, nc *container.Container) error {
if nc.HostConfig.Isolation.IsHyperV() {
return fmt.Errorf("sharing of hyperv containers network is not supported")
}
container.NetworkSharedContainerID = nc.ID
if nc.NetworkSettings != nil {
for n := range nc.NetworkSettings.Networks {
sn, err := daemon.FindNetwork(n)
if err != nil {
continue
}
ep, err := nc.GetEndpointInNetwork(sn)
if err != nil {
continue
}
data, err := ep.DriverInfo()
if err != nil {
continue
}
if data["GW_INFO"] != nil {
gwInfo := data["GW_INFO"].(map[string]interface{})
if gwInfo["hnsid"] != nil {
container.SharedEndpointList = append(container.SharedEndpointList, gwInfo["hnsid"].(string))
}
}
if data["hnsid"] != nil {
container.SharedEndpointList = append(container.SharedEndpointList, data["hnsid"].(string))
}
}
}
return nil
}

View File

@ -43,7 +43,6 @@ import (
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/registrar"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/truncindex"
@ -838,42 +837,7 @@ func (daemon *Daemon) waitForStartupDone() {
func (daemon *Daemon) shutdownContainer(c *container.Container) error {
stopTimeout := c.StopTimeout()
// TODO(windows): Handle docker restart with paused containers
if c.IsPaused() {
// To terminate a process in freezer cgroup, we should send
// SIGTERM to this process then unfreeze it, and the process will
// force to terminate immediately.
logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID)
sig, ok := signal.SignalMap["TERM"]
if !ok {
return errors.New("System does not support SIGTERM")
}
if err := daemon.kill(c, int(sig)); err != nil {
return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err)
}
if err := daemon.containerUnpause(c); err != nil {
return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(stopTimeout)*time.Second)
defer cancel()
// Wait with timeout for container to exit.
if status := <-c.Wait(ctx, container.WaitConditionNotRunning); status.Err() != nil {
logrus.Debugf("container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force", c.ID, stopTimeout)
sig, ok := signal.SignalMap["KILL"]
if !ok {
return errors.New("System does not support SIGKILL")
}
if err := daemon.kill(c, int(sig)); err != nil {
logrus.Errorf("Failed to SIGKILL container %s", c.ID)
}
// Wait for exit again without a timeout.
// Explicitly ignore the result.
_ = <-c.Wait(context.Background(), container.WaitConditionNotRunning)
return status.Err()
}
}
// If container failed to exit in stopTimeout seconds of SIGTERM, then using the force
if err := daemon.containerStop(c, stopTimeout); err != nil {
return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err)
@ -1243,3 +1207,11 @@ func (daemon *Daemon) checkpointAndSave(container *container.Container) error {
}
return nil
}
// because the CLI sends a -1 when it wants to unset the swappiness value
// we need to clear it on the server side
func fixMemorySwappiness(resources *containertypes.Resources) {
if resources.MemorySwappiness != nil && *resources.MemorySwappiness == -1 {
resources.MemorySwappiness = nil
}
}

View File

@ -143,6 +143,7 @@ func UsingSystemd(config *Config) bool {
// verifyPlatformContainerSettings performs platform-specific validation of the
// hostconfig and config structures.
func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) {
fixMemorySwappiness(resources)
warnings := []string{}
sysInfo := sysinfo.New(true)
// NOTE: We do not enforce a minimum value for swap limits for zones on Solaris and
@ -163,7 +164,7 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.
}
// Solaris NOTE: We allow and encourage setting the swap without setting the memory limit.
if hostConfig.MemorySwappiness != nil && *hostConfig.MemorySwappiness != -1 && !sysInfo.MemorySwappiness {
if hostConfig.MemorySwappiness != nil && !sysInfo.MemorySwappiness {
warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.")
logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.")
hostConfig.MemorySwappiness = nil

View File

@ -282,10 +282,6 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConf
return err
}
hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, opts...)
if hostConfig.MemorySwappiness == nil {
defaultSwappiness := int64(-1)
hostConfig.MemorySwappiness = &defaultSwappiness
}
if hostConfig.OomKillDisable == nil {
defaultOomKillDisable := false
hostConfig.OomKillDisable = &defaultOomKillDisable
@ -296,6 +292,7 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConf
func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) ([]string, error) {
warnings := []string{}
fixMemorySwappiness(resources)
// memory subsystem checks and adjustments
if resources.Memory != 0 && resources.Memory < linuxMinMemory {
@ -318,14 +315,14 @@ func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysi
if resources.Memory == 0 && resources.MemorySwap > 0 && !update {
return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage")
}
if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 && !sysInfo.MemorySwappiness {
if resources.MemorySwappiness != nil && !sysInfo.MemorySwappiness {
warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.")
logrus.Warn("Your kernel does not support memory swappiness capabilities, or the cgroup is not mounted. Memory swappiness discarded.")
resources.MemorySwappiness = nil
}
if resources.MemorySwappiness != nil {
swappiness := *resources.MemorySwappiness
if swappiness < -1 || swappiness > 100 {
if swappiness < 0 || swappiness > 100 {
return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness)
}
}
@ -1168,6 +1165,9 @@ func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) {
}
stats, err := daemon.containerd.Stats(c.ID)
if err != nil {
if strings.Contains(err.Error(), "container not found") {
return nil, errNotFound{c.ID}
}
return nil, err
}
s := &types.StatsJSON{}

View File

@ -100,7 +100,7 @@ func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConf
func verifyContainerResources(resources *containertypes.Resources, isHyperv bool) ([]string, error) {
warnings := []string{}
fixMemorySwappiness(resources)
if !isHyperv {
// The processor resource controls are mutually exclusive on
// Windows Server Containers, the order of precedence is
@ -197,7 +197,7 @@ func verifyContainerResources(resources *containertypes.Resources, isHyperv bool
if resources.MemorySwap != 0 {
return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap")
}
if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 {
if resources.MemorySwappiness != nil {
return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness")
}
if resources.OomKillDisable != nil && *resources.OomKillDisable {
@ -525,6 +525,9 @@ func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) {
// Obtain the stats from HCS via libcontainerd
stats, err := daemon.containerd.Stats(c.ID)
if err != nil {
if strings.Contains(err.Error(), "container not found") {
return nil, errNotFound{c.ID}
}
return nil, err
}

View File

@ -5,15 +5,15 @@ package daemon
import (
"os"
"os/signal"
"syscall"
"github.com/Sirupsen/logrus"
stackdump "github.com/docker/docker/pkg/signal"
"golang.org/x/sys/unix"
)
func (d *Daemon) setupDumpStackTrap(root string) {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGUSR1)
signal.Notify(c, unix.SIGUSR1)
go func() {
for range c {
path, err := stackdump.DumpStacks(root)

View File

@ -3,13 +3,13 @@ package daemon
import (
"fmt"
"os"
"syscall"
"unsafe"
winio "github.com/Microsoft/go-winio"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/system"
"golang.org/x/sys/windows"
)
func (d *Daemon) setupDumpStackTrap(root string) {
@ -22,7 +22,7 @@ func (d *Daemon) setupDumpStackTrap(root string) {
logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", ev, err.Error())
return
}
var sa syscall.SecurityAttributes
var sa windows.SecurityAttributes
sa.Length = uint32(unsafe.Sizeof(sa))
sa.InheritHandle = 1
sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0]))
@ -34,7 +34,7 @@ func (d *Daemon) setupDumpStackTrap(root string) {
go func() {
logrus.Debugf("Stackdump - waiting signal at %s", ev)
for {
syscall.WaitForSingleObject(h, syscall.INFINITE)
windows.WaitForSingleObject(h, windows.INFINITE)
path, err := signal.DumpStacks(root)
if err != nil {
logrus.WithError(err).Error("failed to write goroutines dump")

View File

@ -87,8 +87,8 @@ func TestDiscoveryOpts(t *testing.T) {
t.Fatalf("Heartbeat - Expected : %v, Actual : %v", expected, heartbeat)
}
discaveryTTL := fmt.Sprintf("%d", defaultDiscoveryTTLFactor-1)
clusterOpts = map[string]string{"discovery.ttl": discaveryTTL}
discoveryTTL := fmt.Sprintf("%d", defaultDiscoveryTTLFactor-1)
clusterOpts = map[string]string{"discovery.ttl": discoveryTTL}
heartbeat, ttl, err = discoveryOpts(clusterOpts)
if err == nil && heartbeat == 0 {
t.Fatal("discovery.heartbeat must be positive")

View File

@ -39,3 +39,15 @@ func errExecPaused(id string) error {
err := fmt.Errorf("Container %s is paused, unpause the container before exec", id)
return errors.NewRequestConflictError(err)
}
type errNotFound struct {
containerID string
}
func (e errNotFound) Error() string {
return fmt.Sprintf("Container %s is not found", e.containerID)
}
func (e errNotFound) ContainerNotFound() bool {
return true
}

View File

@ -175,6 +175,8 @@ func (daemon *Daemon) generateClusterEvent(msg *swarmapi.WatchMessage) {
daemon.logNetworkEvent(event.Action, v.Network, event.OldObject.GetNetwork())
case *swarmapi.Object_Secret:
daemon.logSecretEvent(event.Action, v.Secret, event.OldObject.GetSecret())
case *swarmapi.Object_Config:
daemon.logConfigEvent(event.Action, v.Config, event.OldObject.GetConfig())
default:
logrus.Warnf("unrecognized event: %v", event)
}
@ -197,6 +199,14 @@ func (daemon *Daemon) logSecretEvent(action swarmapi.WatchActionKind, secret *sw
daemon.logClusterEvent(action, secret.ID, "secret", attributes, eventTime)
}
func (daemon *Daemon) logConfigEvent(action swarmapi.WatchActionKind, config *swarmapi.Config, oldConfig *swarmapi.Config) {
attributes := map[string]string{
"name": config.Spec.Annotations.Name,
}
eventTime := eventTimestamp(config.Meta, action)
daemon.logClusterEvent(action, config.ID, "config", attributes, eventTime)
}
func (daemon *Daemon) logNodeEvent(action swarmapi.WatchActionKind, node *swarmapi.Node, oldNode *swarmapi.Node) {
name := node.Spec.Annotations.Name
if name == "" && node.Description != nil {

View File

@ -247,7 +247,7 @@ func TestLoadBufferedEventsOnlyFromPast(t *testing.T) {
}
// #13753
func TestIngoreBufferedWhenNoTimes(t *testing.T) {
func TestIgnoreBufferedWhenNoTimes(t *testing.T) {
m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)")
if err != nil {
t.Fatal(err)

View File

@ -94,6 +94,10 @@ func (ef *Filter) matchSecret(ev events.Message) bool {
return ef.fuzzyMatchName(ev, events.SecretEventType)
}
func (ef *Filter) matchConfig(ev events.Message) bool {
return ef.fuzzyMatchName(ev, events.ConfigEventType)
}
func (ef *Filter) fuzzyMatchName(ev events.Message, eventType string) bool {
return ef.filter.FuzzyMatch(eventType, ev.Actor.ID) ||
ef.filter.FuzzyMatch(eventType, ev.Actor.Attributes["name"])

View File

@ -33,12 +33,9 @@ import (
"path/filepath"
"strings"
"sync"
"syscall"
"time"
"github.com/Sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
@ -47,9 +44,10 @@ import (
"github.com/docker/docker/pkg/locker"
mountpk "github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/system"
rsystem "github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/vbatts/tar-split/tar/storage"
"golang.org/x/sys/unix"
)
var (
@ -295,7 +293,7 @@ func (a *Driver) Remove(id string) error {
}
if err := a.unmount(mountpoint); err != nil {
if err != syscall.EBUSY {
if err != unix.EBUSY {
return fmt.Errorf("aufs: unmount error: %s: %v", mountpoint, err)
}
if retries >= 5 {
@ -315,7 +313,7 @@ func (a *Driver) Remove(id string) error {
// the whole tree.
tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id))
if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) {
if err == syscall.EBUSY {
if err == unix.EBUSY {
logrus.Warn("os.Rename err due to EBUSY")
}
return err
@ -575,7 +573,7 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
if useDirperm() {
offset += len(",dirperm1")
}
b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel
b := make([]byte, unix.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel
bp := copy(b, fmt.Sprintf("br:%s=rw", rw))
index := 0
@ -599,7 +597,7 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
for ; index < len(ro); index++ {
layer := fmt.Sprintf(":%s=ro+wh", ro[index])
data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel)
if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil {
if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil {
return
}
}

View File

@ -4,9 +4,9 @@ package aufs
import (
"os/exec"
"syscall"
"github.com/Sirupsen/logrus"
"golang.org/x/sys/unix"
)
// Unmount the target specified.
@ -14,7 +14,7 @@ func Unmount(target string) error {
if err := exec.Command("auplink", target, "flush").Run(); err != nil {
logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err)
}
if err := syscall.Unmount(target, 0); err != nil {
if err := unix.Unmount(target, 0); err != nil {
return err
}
return nil

View File

@ -1,7 +1,7 @@
package aufs
import "syscall"
import "golang.org/x/sys/unix"
func mount(source string, target string, fstype string, flags uintptr, data string) error {
return syscall.Mount(source, target, fstype, flags, data)
return unix.Mount(source, target, fstype, flags, data)
}

View File

@ -24,7 +24,6 @@ import (
"strconv"
"strings"
"sync"
"syscall"
"unsafe"
"github.com/Sirupsen/logrus"
@ -35,6 +34,7 @@ import (
"github.com/docker/docker/pkg/system"
"github.com/docker/go-units"
"github.com/opencontainers/selinux/go-selinux/label"
"golang.org/x/sys/unix"
)
func init() {
@ -197,7 +197,7 @@ func subvolCreate(path, name string) error {
args.name[i] = C.char(c)
}
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE,
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error())
@ -225,7 +225,7 @@ func subvolSnapshot(src, dest, name string) error {
C.set_name_btrfs_ioctl_vol_args_v2(&args, cs)
C.free(unsafe.Pointer(cs))
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2,
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error())
@ -234,8 +234,8 @@ func subvolSnapshot(src, dest, name string) error {
}
func isSubvolume(p string) (bool, error) {
var bufStat syscall.Stat_t
if err := syscall.Lstat(p, &bufStat); err != nil {
var bufStat unix.Stat_t
if err := unix.Lstat(p, &bufStat); err != nil {
return false, err
}
@ -287,7 +287,7 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error {
var args C.struct_btrfs_ioctl_qgroup_create_args
args.qgroupid = C.__u64(qgroupid)
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE,
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
logrus.Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error())
@ -302,7 +302,7 @@ func subvolDelete(dirpath, name string, quotaEnabled bool) error {
for i, c := range []byte(name) {
args.name[i] = C.char(c)
}
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY,
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error())
@ -338,7 +338,7 @@ func (d *Driver) subvolEnableQuota() error {
var args C.struct_btrfs_ioctl_quota_ctl_args
args.cmd = C.BTRFS_QUOTA_CTL_ENABLE
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error())
@ -364,7 +364,7 @@ func (d *Driver) subvolDisableQuota() error {
var args C.struct_btrfs_ioctl_quota_ctl_args
args.cmd = C.BTRFS_QUOTA_CTL_DISABLE
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error())
@ -389,7 +389,7 @@ func (d *Driver) subvolRescanQuota() error {
defer closeDir(dir)
var args C.struct_btrfs_ioctl_quota_rescan_args
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT,
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error())
@ -408,7 +408,7 @@ func subvolLimitQgroup(path string, size uint64) error {
var args C.struct_btrfs_ioctl_qgroup_limit_args
args.lim.max_referenced = C.__u64(size)
args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT,
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error())
@ -437,7 +437,7 @@ func subvolQgroupStatus(path string) error {
args.key.max_transid = C.__u64(math.MaxUint64)
args.key.nr_items = 4096
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH,
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error())
@ -459,7 +459,7 @@ func subvolLookupQgroup(path string) (uint64, error) {
var args C.struct_btrfs_ioctl_ino_lookup_args
args.objectid = C.BTRFS_FIRST_FREE_OBJECTID
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP,
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP,
uintptr(unsafe.Pointer(&args)))
if errno != 0 {
return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error())

View File

@ -174,27 +174,27 @@ func writeLVMConfig(root string, cfg directLVMConfig) error {
func setupDirectLVM(cfg directLVMConfig) error {
pvCreate, err := exec.LookPath("pvcreate")
if err != nil {
return errors.Wrap(err, "error lookuping up command `pvcreate` while setting up direct lvm")
return errors.Wrap(err, "error looking up command `pvcreate` while setting up direct lvm")
}
vgCreate, err := exec.LookPath("vgcreate")
if err != nil {
return errors.Wrap(err, "error lookuping up command `vgcreate` while setting up direct lvm")
return errors.Wrap(err, "error looking up command `vgcreate` while setting up direct lvm")
}
lvCreate, err := exec.LookPath("lvcreate")
if err != nil {
return errors.Wrap(err, "error lookuping up command `lvcreate` while setting up direct lvm")
return errors.Wrap(err, "error looking up command `lvcreate` while setting up direct lvm")
}
lvConvert, err := exec.LookPath("lvconvert")
if err != nil {
return errors.Wrap(err, "error lookuping up command `lvconvert` while setting up direct lvm")
return errors.Wrap(err, "error looking up command `lvconvert` while setting up direct lvm")
}
lvChange, err := exec.LookPath("lvchange")
if err != nil {
return errors.Wrap(err, "error lookuping up command `lvchange` while setting up direct lvm")
return errors.Wrap(err, "error looking up command `lvchange` while setting up direct lvm")
}
if cfg.AutoExtendPercent == 0 {

View File

@ -20,7 +20,6 @@ import (
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/pkg/devicemapper"
@ -29,23 +28,19 @@ import (
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/parsers"
units "github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
var (
defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024
defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024
defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024
defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors
defaultUdevSyncOverride = false
maxDeviceID = 0xffffff // 24 bit, pool limit
deviceIDMapSz = (maxDeviceID + 1) / 8
// We retry device removal so many a times that even error messages
// will fill up console during normal operation. So only log Fatal
// messages by default.
logLevel = devicemapper.LogLevelFatal
defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024
defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024
defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024
defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors
defaultUdevSyncOverride = false
maxDeviceID = 0xffffff // 24 bit, pool limit
deviceIDMapSz = (maxDeviceID + 1) / 8
driverDeferredRemovalSupport = false
enableDeferredRemoval = false
enableDeferredDeletion = false
@ -1197,7 +1192,7 @@ func (devices *DeviceSet) growFS(info *devInfo) error {
return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err)
}
defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH)
defer unix.Unmount(fsMountPoint, unix.MNT_DETACH)
switch devices.BaseDeviceFilesystem {
case "ext4":
@ -1266,33 +1261,13 @@ func setCloseOnExec(name string) {
if link == name {
fd, err := strconv.Atoi(i.Name())
if err == nil {
syscall.CloseOnExec(fd)
unix.CloseOnExec(fd)
}
}
}
}
}
// DMLog implements logging using DevMapperLogger interface.
func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) {
// By default libdm sends us all the messages including debug ones.
// We need to filter out messages here and figure out which one
// should be printed.
if level > logLevel {
return
}
// FIXME(vbatts) push this back into ./pkg/devicemapper/
if level <= devicemapper.LogLevelErr {
logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
} else if level <= devicemapper.LogLevelInfo {
logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
} else {
// FIXME(vbatts) push this back into ./pkg/devicemapper/
logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message)
}
}
func major(device uint64) uint64 {
return (device >> 8) & 0xfff
}
@ -1690,9 +1665,6 @@ func (devices *DeviceSet) enableDeferredRemovalDeletion() error {
}
func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) {
// give ourselves to libdm as a log handler
devicemapper.LogInit(devices)
if err := devices.enableDeferredRemovalDeletion(); err != nil {
return err
}
@ -2088,7 +2060,16 @@ func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error {
}
// Try to deactivate device in case it is active.
if err := devices.deactivateDevice(info); err != nil {
// If deferred removal is enabled and deferred deletion is disabled
// then make sure device is removed synchronously. There have been
// some cases of device being busy for short duration and we would
// rather busy wait for device removal to take care of these cases.
deferredRemove := devices.deferredRemove
if !devices.deferredDelete {
deferredRemove = false
}
if err := devices.deactivateDeviceMode(info, deferredRemove); err != nil {
logrus.Debugf("devmapper: Error deactivating device: %s", err)
return err
}
@ -2145,6 +2126,11 @@ func (devices *DeviceSet) deactivatePool() error {
}
func (devices *DeviceSet) deactivateDevice(info *devInfo) error {
return devices.deactivateDeviceMode(info, devices.deferredRemove)
}
func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove bool) error {
var err error
logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash)
defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash)
@ -2157,14 +2143,17 @@ func (devices *DeviceSet) deactivateDevice(info *devInfo) error {
return nil
}
if devices.deferredRemove {
if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil {
return err
}
if deferredRemove {
err = devicemapper.RemoveDeviceDeferred(info.Name())
} else {
if err := devices.removeDevice(info.Name()); err != nil {
return err
}
err = devices.removeDevice(info.Name())
}
// This function's semantics is such that it does not return an
// error if device does not exist. So if device went away by
// the time we actually tried to remove it, do not return error.
if err != devicemapper.ErrEnxio {
return err
}
return nil
}
@ -2287,7 +2276,7 @@ func (devices *DeviceSet) Shutdown(home string) error {
// We use MNT_DETACH here in case it is still busy in some running
// container. This means it'll go away from the global scope directly,
// and the device will be released when that container dies.
if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil {
if err := unix.Unmount(p, unix.MNT_DETACH); err != nil {
logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err)
}
}
@ -2400,7 +2389,7 @@ func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error {
if fstype == "xfs" && devices.xfsNospaceRetries != "" {
if err := devices.xfsSetNospaceRetries(info); err != nil {
syscall.Unmount(path, syscall.MNT_DETACH)
unix.Unmount(path, unix.MNT_DETACH)
devices.deactivateDevice(info)
return err
}
@ -2426,7 +2415,7 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
defer devices.Unlock()
logrus.Debugf("devmapper: Unmount(%s)", mountPath)
if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil {
if err := unix.Unmount(mountPath, unix.MNT_DETACH); err != nil {
return err
}
logrus.Debug("devmapper: Unmount done")
@ -2523,8 +2512,8 @@ func (devices *DeviceSet) MetadataDevicePath() string {
}
func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) {
buf := new(syscall.Statfs_t)
if err := syscall.Statfs(loopFile, buf); err != nil {
buf := new(unix.Statfs_t)
if err := unix.Statfs(loopFile, buf); err != nil {
logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err)
return 0, err
}
@ -2788,6 +2777,18 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100")
}
lvmSetupConfig.AutoExtendThreshold = per
case "dm.libdm_log_level":
level, err := strconv.ParseInt(val, 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val)
}
if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug {
return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug)
}
// Register a new logging callback with the specified level.
devicemapper.LogInit(devicemapper.DefaultLogger{
Level: int(level),
})
default:
return nil, fmt.Errorf("devmapper: Unknown option %s\n", key)
}

View File

@ -1,6 +1,6 @@
package graphdriver
import "syscall"
import "golang.org/x/sys/unix"
var (
// Slice of drivers that should be used in an order
@ -11,7 +11,7 @@ var (
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t
var buf unix.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil {
return false, err
}

View File

@ -4,9 +4,9 @@ package graphdriver
import (
"path/filepath"
"syscall"
"github.com/docker/docker/pkg/mount"
"golang.org/x/sys/unix"
)
const (
@ -88,14 +88,14 @@ var (
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil {
var buf unix.Statfs_t
if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil {
return 0, err
}
return FsMagic(buf.Type), nil
}
// NewFsChecker returns a checker configured for the provied FsMagic
// NewFsChecker returns a checker configured for the provided FsMagic
func NewFsChecker(t FsMagic) Checker {
return &fsChecker{
t: t,
@ -127,8 +127,8 @@ func (c *defaultChecker) IsMounted(path string) bool {
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t
if err := syscall.Statfs(mountPath, &buf); err != nil {
var buf unix.Statfs_t
if err := unix.Statfs(mountPath, &buf); err != nil {
return false, err
}
return FsMagic(buf.Type) == fsType, nil

View File

@ -54,7 +54,7 @@ func (c *fsChecker) IsMounted(path string) bool {
return m
}
// NewFsChecker returns a checker configured for the provied FsMagic
// NewFsChecker returns a checker configured for the provided FsMagic
func NewFsChecker(t FsMagic) Checker {
return &fsChecker{
t: t,

View File

@ -9,7 +9,6 @@ import (
"os"
"path"
"reflect"
"syscall"
"testing"
"unsafe"
@ -18,6 +17,7 @@ import (
"github.com/docker/go-units"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/sys/unix"
)
var (
@ -329,8 +329,8 @@ func DriverTestSetQuota(t *testing.T, drivername string) {
quota := uint64(50 * units.MiB)
err = writeRandomFile(path.Join(mountPath, "file"), quota*2)
if pathError, ok := err.(*os.PathError); ok && pathError.Err != syscall.EDQUOT {
t.Fatalf("expect write() to fail with %v, got %v", syscall.EDQUOT, err)
if pathError, ok := err.(*os.PathError); ok && pathError.Err != unix.EDQUOT {
t.Fatalf("expect write() to fail with %v, got %v", unix.EDQUOT, err)
}
}

View File

@ -12,6 +12,7 @@ import (
"github.com/docker/docker/daemon/graphdriver"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/sys/unix"
)
func verifyFile(t testing.TB, path string, mode os.FileMode, uid, gid uint32) {
@ -33,8 +34,8 @@ func verifyFile(t testing.TB, path string, mode os.FileMode, uid, gid uint32) {
func createBase(t testing.TB, driver graphdriver.Driver, name string) {
// We need to be able to set any perms
oldmask := syscall.Umask(0)
defer syscall.Umask(oldmask)
oldmask := unix.Umask(0)
defer unix.Umask(oldmask)
err := driver.CreateReadWrite(name, "", nil)
require.NoError(t, err)

File diff suppressed because it is too large Load Diff

View File

@ -12,6 +12,7 @@ import (
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
rsystem "github.com/opencontainers/runc/libcontainer/system"
"golang.org/x/sys/unix"
)
type copyFlags int
@ -110,12 +111,12 @@ func copyDir(srcDir, dstDir string, flags copyFlags) error {
// cannot create a device if running in user namespace
return nil
}
if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil {
if err := unix.Mkfifo(dstPath, stat.Mode); err != nil {
return err
}
case os.ModeDevice:
if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil {
if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil {
return err
}

View File

@ -11,7 +11,6 @@ import (
"os/exec"
"path"
"strconv"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/graphdriver"
@ -23,6 +22,7 @@ import (
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/system"
"github.com/opencontainers/selinux/go-selinux/label"
"golang.org/x/sys/unix"
)
// This is a small wrapper over the NaiveDiffWriter that lets us have a custom
@ -363,7 +363,7 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
defer func() {
if err != nil {
if c := d.ctr.Decrement(mergedDir); c <= 0 {
syscall.Unmount(mergedDir, 0)
unix.Unmount(mergedDir, 0)
}
}
}()
@ -377,7 +377,7 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
workDir = path.Join(dir, "work")
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir)
)
if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil {
if err := unix.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil {
return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
}
// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
@ -404,7 +404,7 @@ func (d *Driver) Put(id string) error {
if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil
}
if err := syscall.Unmount(mountpoint, syscall.MNT_DETACH); err != nil {
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
}
return nil

View File

@ -8,11 +8,11 @@ import (
"os"
"path"
"path/filepath"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/system"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
// hasOpaqueCopyUpBug checks whether the filesystem has a bug
@ -52,11 +52,11 @@ func hasOpaqueCopyUpBug(d string) error {
}
opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work"))
if err := syscall.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil {
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil {
return errors.Wrap(err, "failed to mount overlay")
}
defer func() {
if err := syscall.Unmount(filepath.Join(td, "merged"), 0); err != nil {
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err)
}
}()

View File

@ -9,9 +9,9 @@ import (
"fmt"
"os"
"runtime"
"syscall"
"github.com/docker/docker/pkg/reexec"
"golang.org/x/sys/unix"
)
func init() {
@ -80,7 +80,7 @@ func mountFromMain() {
fatal(err)
}
if err := syscall.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil {
if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil {
fatal(err)
}

View File

@ -15,7 +15,6 @@ import (
"strconv"
"strings"
"sync"
"syscall"
"github.com/Sirupsen/logrus"
@ -35,6 +34,7 @@ import (
units "github.com/docker/go-units"
"github.com/opencontainers/selinux/go-selinux/label"
"golang.org/x/sys/unix"
)
var (
@ -539,7 +539,7 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
defer func() {
if err != nil {
if c := d.ctr.Decrement(mergedDir); c <= 0 {
syscall.Unmount(mergedDir, 0)
unix.Unmount(mergedDir, 0)
}
}
}()
@ -552,10 +552,10 @@ func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
}
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), path.Join(dir, "diff"), path.Join(dir, "work"))
mountData := label.FormatMountLabel(opts, mountLabel)
mount := syscall.Mount
mount := unix.Mount
mountTarget := mergedDir
pageSize := syscall.Getpagesize()
pageSize := unix.Getpagesize()
// Go can return a larger page size than supported by the system
// as of go 1.7. This will be fixed in 1.8 and this block can be
@ -619,7 +619,7 @@ func (d *Driver) Put(id string) error {
if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil
}
if err := syscall.Unmount(mountpoint, syscall.MNT_DETACH); err != nil {
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
}
return nil

View File

@ -5,13 +5,13 @@ package overlay2
import (
"io/ioutil"
"os"
"syscall"
"testing"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/daemon/graphdriver/graphtest"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
"golang.org/x/sys/unix"
)
func init() {
@ -31,7 +31,7 @@ func cdMountFrom(dir, device, target, mType, label string) error {
os.Chdir(dir)
defer os.Chdir(wd)
return syscall.Mount(device, target, mType, 0, label)
return unix.Mount(device, target, mType, 0, label)
}
func skipIfNaive(t *testing.T) {

View File

@ -12,6 +12,7 @@ import (
"time"
"github.com/Sirupsen/logrus"
"golang.org/x/sys/unix"
)
// generateID creates a new random string identifier with the given length
@ -69,7 +70,7 @@ func retryOnError(err error) bool {
case *os.PathError:
return retryOnError(err.Err) // unpack the target error
case syscall.Errno:
if err == syscall.EPERM {
if err == unix.EPERM {
// EPERM represents an entropy pool exhaustion, a condition under
// which we backoff and retry.
return true

View File

@ -59,6 +59,7 @@ import (
"unsafe"
"github.com/Sirupsen/logrus"
"golang.org/x/sys/unix"
)
// Quota limit params - currently we only control blocks hard limit
@ -184,7 +185,7 @@ func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) er
var cs = C.CString(backingFsBlockDev)
defer C.free(unsafe.Pointer(cs))
_, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XSETPQLIM,
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
uintptr(unsafe.Pointer(&d)), 0, 0)
if errno != 0 {
@ -211,7 +212,7 @@ func (q *Control) GetQuota(targetPath string, quota *Quota) error {
var cs = C.CString(q.backingFsBlockDev)
defer C.free(unsafe.Pointer(cs))
_, _, errno := syscall.Syscall6(syscall.SYS_QUOTACTL, C.Q_XGETPQUOTA,
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA,
uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),
uintptr(unsafe.Pointer(&d)), 0, 0)
if errno != 0 {
@ -232,7 +233,7 @@ func getProjectID(targetPath string) (uint32, error) {
defer closeDir(dir)
var fsx C.struct_fsxattr
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error())
@ -250,14 +251,14 @@ func setProjectID(targetPath string, projectID uint32) error {
defer closeDir(dir)
var fsx C.struct_fsxattr
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error())
}
fsx.fsx_projid = C.__u32(projectID)
fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
_, _, errno = syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
uintptr(unsafe.Pointer(&fsx)))
if errno != 0 {
return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error())
@ -328,10 +329,10 @@ func makeBackingFsDev(home string) (string, error) {
}
backingFsBlockDev := path.Join(home, "backingFsBlockDev")
// Re-create just in case comeone copied the home directory over to a new device
syscall.Unlink(backingFsBlockDev)
// Re-create just in case someone copied the home directory over to a new device
unix.Unlink(backingFsBlockDev)
stat := fileinfo.Sys().(*syscall.Stat_t)
if err := syscall.Mknod(backingFsBlockDev, syscall.S_IFBLK|0600, int(stat.Dev)); err != nil {
if err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)); err != nil {
return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err)
}

View File

@ -124,7 +124,7 @@ func getFileSystemType(drive string) (fsType string, hr error) {
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW")
buf = make([]uint16, 255)
size = syscall.MAX_PATH + 1
size = windows.MAX_PATH + 1
)
if len(drive) != 1 {
hr = errors.New("getFileSystemType must be called with a drive letter")
@ -132,11 +132,11 @@ func getFileSystemType(drive string) (fsType string, hr error) {
}
drive += `:\`
n := uintptr(unsafe.Pointer(nil))
r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0)
r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0)
if int32(r0) < 0 {
hr = syscall.Errno(win32FromHresult(r0))
}
fsType = syscall.UTF16ToString(buf)
fsType = windows.UTF16ToString(buf)
return
}
@ -300,7 +300,7 @@ func (d *Driver) Remove(id string) error {
//
// TODO @jhowardmsft - For RS3, we can remove the retries. Also consider
// using platform APIs (if available) to get this more succinctly. Also
// consider enlighting the Remove() interface to have context of why
// consider enhancing the Remove() interface to have context of why
// the remove is being called - that could improve efficiency by not
// enumerating compute systems during a remove of a container as it's
// not required.
@ -904,12 +904,12 @@ func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser
// standby list - Microsoft VSO Bug Tracker #9900466
err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error {
path := longpath.AddPrefix(filepath.Join(fg.path, filename))
p, err := syscall.UTF16FromString(path)
p, err := windows.UTF16FromString(path)
if err != nil {
return err
}
const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
h, err := syscall.CreateFile(&p[0], syscall.GENERIC_READ, syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0)
h, err := windows.CreateFile(&p[0], windows.GENERIC_READ, windows.FILE_SHARE_READ, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0)
if err != nil {
return &os.PathError{Op: "open", Path: path, Err: err}
}

View File

@ -10,7 +10,6 @@ import (
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/Sirupsen/logrus"
@ -20,6 +19,7 @@ import (
"github.com/docker/docker/pkg/parsers"
zfs "github.com/mistifyio/go-zfs"
"github.com/opencontainers/selinux/go-selinux/label"
"golang.org/x/sys/unix"
)
type zfsOptions struct {
@ -141,8 +141,8 @@ func parseOptions(opt []string) (zfsOptions, error) {
}
func lookupZfsDataset(rootdir string) (string, error) {
var stat syscall.Stat_t
if err := syscall.Stat(rootdir, &stat); err != nil {
var stat unix.Stat_t
if err := unix.Stat(rootdir, &stat); err != nil {
return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err)
}
wantedDev := stat.Dev
@ -152,7 +152,7 @@ func lookupZfsDataset(rootdir string) (string, error) {
return "", err
}
for _, m := range mounts {
if err := syscall.Stat(m.Mountpoint, &stat); err != nil {
if err := unix.Stat(m.Mountpoint, &stat); err != nil {
logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err)
continue // may fail on fuse file systems
}

View File

@ -3,15 +3,15 @@ package zfs
import (
"fmt"
"strings"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/graphdriver"
"golang.org/x/sys/unix"
)
func checkRootdirFs(rootdir string) error {
var buf syscall.Statfs_t
if err := syscall.Statfs(rootdir, &buf); err != nil {
var buf unix.Statfs_t
if err := unix.Statfs(rootdir, &buf); err != nil {
return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
}

View File

@ -2,15 +2,15 @@ package zfs
import (
"fmt"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/graphdriver"
"golang.org/x/sys/unix"
)
func checkRootdirFs(rootdir string) error {
var buf syscall.Statfs_t
if err := syscall.Statfs(rootdir, &buf); err != nil {
var buf unix.Statfs_t
if err := unix.Statfs(rootdir, &buf); err != nil {
return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
}

View File

@ -6,9 +6,9 @@ import (
"os"
"path/filepath"
"strings"
"syscall"
"github.com/docker/docker/pkg/idtools"
"golang.org/x/sys/unix"
)
// Setup populates a directory with mountpoints suitable
@ -33,7 +33,7 @@ func Setup(initLayer string, rootIDs idtools.IDPair) error {
prev := "/"
for _, p := range parts[1:] {
prev = filepath.Join(prev, p)
syscall.Unlink(filepath.Join(initLayer, prev))
unix.Unlink(filepath.Join(initLayer, prev))
}
if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil {

Some files were not shown because too many files have changed in this diff Show More