Compare commits
83 Commits
v17.09.0-c
...
v17.09.1-c
| Author | SHA1 | Date | |
|---|---|---|---|
| 2d63290f4b | |||
| bef07a1ce4 | |||
| 5f5660fc44 | |||
| d69695aaf1 | |||
| 3f7bf4e845 | |||
| 0ca354c767 | |||
| b3f86e3d0f | |||
| c24589548f | |||
| 6b4f347ab9 | |||
| c3be82eb0a | |||
| bea7b88e4b | |||
| 7da03c8e7c | |||
| 93c8cd4a54 | |||
| bb351b1f19 | |||
| de00d5a407 | |||
| d102cb2dd5 | |||
| e68f903cfc | |||
| ff664c00db | |||
| c4124d0875 | |||
| e5b5086d0b | |||
| 61ee5a0623 | |||
| 87f5f1d49c | |||
| 79757e0860 | |||
| 56e2353034 | |||
| 9cef9a54e6 | |||
| 3fbf1fbd82 | |||
| ceb3b42a7f | |||
| 9083769a6e | |||
| 21e53b4643 | |||
| 3406769872 | |||
| 15e5af3a07 | |||
| f1bf9f2c5b | |||
| e02f35c1fc | |||
| b91a1041f6 | |||
| b01ad4a8b6 | |||
| e70eb02632 | |||
| 66cbbe22db | |||
| 338e914b20 | |||
| 6c6a1827cf | |||
| 030334d397 | |||
| b02a5e5084 | |||
| 8916204beb | |||
| a5694003b0 | |||
| f9cf09436b | |||
| 3fdbcd1680 | |||
| c480a40b85 | |||
| 40bd542277 | |||
| 341aec8bb0 | |||
| 51ab500b6c | |||
| afdb6d44a8 | |||
| 3b0f381088 | |||
| 09d58a6cc0 | |||
| 8056485bad | |||
| 81cc2b36fa | |||
| 2357fb28b5 | |||
| 9661f00ed4 | |||
| db97c3db91 | |||
| fe19ba678a | |||
| d86b81fcce | |||
| 0a7586971f | |||
| 5fd57722d1 | |||
| 4856a9ec15 | |||
| 6a80ba3b8a | |||
| 508a2630a3 | |||
| 1ae6ae93c4 | |||
| ccdf90d524 | |||
| 79ca5680b8 | |||
| 1f4bf6c347 | |||
| 0f16d9b90a | |||
| a825d7bfdf | |||
| af4a1fc87e | |||
| a7f4500a2c | |||
| 5fb8e37ec7 | |||
| da6e49e1f7 | |||
| fc12f83f79 | |||
| 3d9fc736e9 | |||
| 390c0782bc | |||
| f238e40c68 | |||
| 2a4728f860 | |||
| 1ad05ae326 | |||
| 393d90ba3d | |||
| 2a3140fcb7 | |||
| 66a5055240 |
67
CHANGELOG.md
67
CHANGELOG.md
@ -5,7 +5,51 @@ information on the list of deprecated flags and APIs please have a look at
|
||||
https://docs.docker.com/engine/deprecated/ where target removal dates can also
|
||||
be found.
|
||||
|
||||
## 17.09.0-ce (2017-09-DD)
|
||||
## 17.09.1-ce (2017-11-DD)
|
||||
|
||||
### Builder
|
||||
|
||||
- Fix config leakage on shared parent stage [moby/moby#33753](https://github.com/moby/moby/issues/33753)
|
||||
- Warn on empty continuation lines only, not on comment-only lines [moby/moby#35004](https://github.com/moby/moby/pull/35004)
|
||||
|
||||
### Client
|
||||
|
||||
- Set API version on Client even when Ping fails [docker/cli#546](https://github.com/docker/cli/pull/546)
|
||||
|
||||
### Networking
|
||||
|
||||
- Overlay fix for transient IP reuse [docker/libnetwork#2016](https://github.com/docker/libnetwork/pull/2016)
|
||||
- Fix reapTime logic in NetworkDB and handle DNS cleanup for attachable container [docker/libnetwork#2017](https://github.com/docker/libnetwork/pull/2017)
|
||||
- Disable hostname lookup on chain exists check [docker/libnetwork#2019](https://github.com/docker/libnetwork/pull/2019)
|
||||
- Fix lint issues [docker/libnetwork#2020](https://github.com/docker/libnetwork/pull/2020)
|
||||
- Restore error type in FindNetwork [moby/moby#35634](https://github.com/moby/moby/pull/35634)
|
||||
|
||||
### Runtime
|
||||
|
||||
- Protect `health monitor` Go channel [moby/moby#35482](https://github.com/moby/moby/pull/35482)
|
||||
- Fix test failure on stopped container [moby/moby#34730](https://github.com/moby/moby/pull/34730)
|
||||
- Fix leaking container/exec state [moby/moby#35484](https://github.com/moby/moby/pull/35484)
|
||||
- Add /proc/scsi to masked paths (patch to work around [CVE-2017-16539](http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-16539)) [moby/moby/#35399](https://github.com/moby/moby/pull/35399)
|
||||
- Vendor tar-split: fix to prevent memory exhaustion issue that could crash Docker daemon [moby/moby/#35424](https://github.com/moby/moby/pull/35424) Fixes [CVE-2017-14992](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-14992)
|
||||
- Fix P/Z HubPullSuite tests [moby/moby#34837](https://github.com/moby/moby/pull/34837)
|
||||
+ Windows: Add support for version filtering on pull [moby/moby#35090](https://github.com/moby/moby/pull/35090)
|
||||
- Windows: Stop filtering Windows manifest lists by version [moby/moby#35117](https://github.com/moby/moby/pull/35117)
|
||||
- Use rslave instead of rprivate in chroot archive [moby/moby/#35217](https://github.com/moby/moby/pull/35217)
|
||||
- Remove container rootfs mountPath after unmount [moby/moby#34573](https://github.com/moby/moby/pull/34573)
|
||||
- Fix honoring tmpfs size of user /dev/shm mount [moby/moby#35316](https://github.com/moby/moby/pull/35316)
|
||||
- Don't abort when setting may_detach_mounts (log the error instead) [moby/moby#35172](https://github.com/moby/moby/pull/35172)
|
||||
- Fix version comparison when negotiating the API version [moby/moby#35008](https://github.com/moby/moby/pull/35008)
|
||||
|
||||
### Swarm mode
|
||||
|
||||
* Increase gRPC request timeout when sending snapshots [docker/swarmkit#2404](https://github.com/docker/swarmkit/pull/2404)
|
||||
- Fix node filtering when there is no log driver [docker/swarmkit#2442](https://github.com/docker/swarmkit/pull/2442)
|
||||
- Add an error on attempt to change cluster name [docker/swarmkit/#2454](https://github.com/docker/swarmkit/pull/2454)
|
||||
- Delete node attachments when node is removed [docker/swarmkit/#2456](https://github.com/docker/swarmkit/pull/2456)
|
||||
- Provide custom gRPC dialer to override default proxy dialer [docker/swarmkit/#2457](https://github.com/docker/swarmkit/pull/2457)
|
||||
- Avoids recursive readlock on swarm info [moby/moby#35388](https://github.com/moby/moby/pull/35388)
|
||||
|
||||
## 17.09.0-ce (2017-09-26)
|
||||
|
||||
### Builder
|
||||
|
||||
@ -23,8 +67,15 @@ be found.
|
||||
+ Add ulimits to unsupported compose fields [docker/cli#482](https://github.com/docker/cli/pull/482)
|
||||
+ Add `--format` to `docker-search` [docker/cli#440](https://github.com/docker/cli/pull/440)
|
||||
* Show images digests when `{{.Digest}}` is in format [docker/cli#439](https://github.com/docker/cli/pull/439)
|
||||
* Print timestamp when `--human=true` [docker/cli#438](https://github.com/docker/cli/pull/438)
|
||||
* Print output of `docker stack rm` on `stdout` instead of `stderr` [docker/cli#491](https://github.com/docker/cli/pull/491)
|
||||
- Fix `docker history --format '{{json .}}'` printing human-readable timestamps instead of ISO8601 when `--human=true` [docker/cli#438](https://github.com/docker/cli/pull/438)
|
||||
- Fix idempotence of `docker stack deploy` when secrets or configs are used [docker/cli#509](https://github.com/docker/cli/pull/509)
|
||||
- Fix presentation of random host ports [docker/cli#404](https://github.com/docker/cli/pull/404)
|
||||
- Fix redundant service restarts when service created with multiple secrets [moby/moby#34746](https://github.com/moby/moby/issues/34746)
|
||||
|
||||
### Logging
|
||||
|
||||
- Fix Splunk logger not transmitting log data when tag is empty and raw-mode is used [moby/moby#34520](https://github.com/moby/moby/pull/34520)
|
||||
|
||||
### Networking
|
||||
|
||||
@ -42,12 +93,22 @@ be found.
|
||||
* LCOW: Remove hard-coding [moby/moby#34398](https://github.com/moby/moby/pull/34398)
|
||||
* LCOW: WORKDIR correct handling [moby/moby#34405](https://github.com/moby/moby/pull/34405)
|
||||
* Windows: named pipe mounts [moby/moby#33852](https://github.com/moby/moby/pull/33852)
|
||||
- Fix "permission denied" errors when accessing volume with SELinux enforcing mode [moby/moby#34684](https://github.com/moby/moby/pull/34684)
|
||||
- Fix layers size reported as `0` in `docker system df` [moby/moby#34826](https://github.com/moby/moby/pull/34826)
|
||||
- Fix some "device or resource busy" errors when removing containers on RHEL 7.4 based kernels [moby/moby#34886](https://github.com/moby/moby/pull/34886)
|
||||
|
||||
### Swarm Mode
|
||||
### Swarm mode
|
||||
|
||||
* Include whether the managers in the swarm are autolocked as part of `docker info` [docker/cli#471](https://github.com/docker/cli/pull/471)
|
||||
+ Add 'docker service rollback' subcommand [docker/cli#205](https://github.com/docker/cli/pull/205)
|
||||
- Fix managers failing to join if the gRPC snapshot is larger than 4MB [docker/swarmkit#2375](https://github.com/docker/swarmkit/pull/2375)
|
||||
- Fix "permission denied" errors for configuration file in SELinux-enabled containers [moby/moby#34732](https://github.com/moby/moby/pull/34732)
|
||||
- Fix services failing to deploy on ARM nodes [moby/moby#34021](https://github.com/moby/moby/pull/34021)
|
||||
|
||||
### Packaging
|
||||
|
||||
+ Build scripts for ppc64el on Ubuntu [docker/docker-ce-packaging#43](https://github.com/docker/docker-ce-packaging/pull/43)
|
||||
|
||||
### Deprecation
|
||||
|
||||
+ Remove deprecated `--enable-api-cors` daemon flag [moby/moby#34821](https://github.com/moby/moby/pull/34821)
|
||||
|
||||
@ -1 +1 @@
|
||||
17.09.0-ce-rc2
|
||||
17.09.1-ce-rc1
|
||||
|
||||
@ -17,6 +17,7 @@ import (
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/docker/notary"
|
||||
"github.com/docker/notary/passphrase"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
@ -111,44 +112,53 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error {
|
||||
var err error
|
||||
cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile)
|
||||
if tlsconfig.IsErrEncryptedKey(err) {
|
||||
var (
|
||||
passwd string
|
||||
giveup bool
|
||||
)
|
||||
passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil)
|
||||
|
||||
for attempts := 0; tlsconfig.IsErrEncryptedKey(err); attempts++ {
|
||||
// some code and comments borrowed from notary/trustmanager/keystore.go
|
||||
passwd, giveup, err = passRetriever("private", "encrypted TLS private", false, attempts)
|
||||
// Check if the passphrase retriever got an error or if it is telling us to give up
|
||||
if giveup || err != nil {
|
||||
return errors.Wrap(err, "private key is encrypted, but could not get passphrase")
|
||||
}
|
||||
|
||||
opts.Common.TLSOptions.Passphrase = passwd
|
||||
cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile)
|
||||
newClient := func(password string) (client.APIClient, error) {
|
||||
opts.Common.TLSOptions.Passphrase = password
|
||||
return NewAPIClientFromFlags(opts.Common, cli.configFile)
|
||||
}
|
||||
cli.client, err = getClientWithPassword(passRetriever, newClient)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cli.initializeFromClient()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) initializeFromClient() {
|
||||
cli.defaultVersion = cli.client.ClientVersion()
|
||||
|
||||
if ping, err := cli.client.Ping(context.Background()); err == nil {
|
||||
cli.server = ServerInfo{
|
||||
HasExperimental: ping.Experimental,
|
||||
OSType: ping.OSType,
|
||||
}
|
||||
|
||||
cli.client.NegotiateAPIVersionPing(ping)
|
||||
} else {
|
||||
ping, err := cli.client.Ping(context.Background())
|
||||
if err != nil {
|
||||
// Default to true if we fail to connect to daemon
|
||||
cli.server = ServerInfo{HasExperimental: true}
|
||||
|
||||
if ping.APIVersion != "" {
|
||||
cli.client.NegotiateAPIVersionPing(ping)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
cli.server = ServerInfo{
|
||||
HasExperimental: ping.Experimental,
|
||||
OSType: ping.OSType,
|
||||
}
|
||||
cli.client.NegotiateAPIVersionPing(ping)
|
||||
}
|
||||
|
||||
func getClientWithPassword(passRetriever notary.PassRetriever, newClient func(password string) (client.APIClient, error)) (client.APIClient, error) {
|
||||
for attempts := 0; ; attempts++ {
|
||||
passwd, giveup, err := passRetriever("private", "encrypted TLS private", false, attempts)
|
||||
if giveup || err != nil {
|
||||
return nil, errors.Wrap(err, "private key is encrypted, but could not get passphrase")
|
||||
}
|
||||
|
||||
apiclient, err := newClient(passwd)
|
||||
if !tlsconfig.IsErrEncryptedKey(err) {
|
||||
return apiclient, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ServerInfo stores details about the supported features and platform of the
|
||||
|
||||
@ -4,12 +4,18 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"crypto/x509"
|
||||
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/docker/cli/cli/flags"
|
||||
"github.com/docker/cli/internal/test/testutil"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestNewAPIClientFromFlags(t *testing.T) {
|
||||
@ -43,7 +49,7 @@ func TestNewAPIClientFromFlagsWithAPIVersionFromEnv(t *testing.T) {
|
||||
assert.Equal(t, customVersion, apiclient.ClientVersion())
|
||||
}
|
||||
|
||||
// TODO: move to gotestyourself
|
||||
// TODO: use gotestyourself/env.Patch
|
||||
func patchEnvVariable(t *testing.T, key, value string) func() {
|
||||
oldValue, ok := os.LookupEnv(key)
|
||||
require.NoError(t, os.Setenv(key, value))
|
||||
@ -55,3 +61,138 @@ func patchEnvVariable(t *testing.T, key, value string) func() {
|
||||
require.NoError(t, os.Setenv(key, oldValue))
|
||||
}
|
||||
}
|
||||
|
||||
type fakeClient struct {
|
||||
client.Client
|
||||
pingFunc func() (types.Ping, error)
|
||||
version string
|
||||
negotiated bool
|
||||
}
|
||||
|
||||
func (c *fakeClient) Ping(_ context.Context) (types.Ping, error) {
|
||||
return c.pingFunc()
|
||||
}
|
||||
|
||||
func (c *fakeClient) ClientVersion() string {
|
||||
return c.version
|
||||
}
|
||||
|
||||
func (c *fakeClient) NegotiateAPIVersionPing(types.Ping) {
|
||||
c.negotiated = true
|
||||
}
|
||||
|
||||
func TestInitializeFromClient(t *testing.T) {
|
||||
defaultVersion := "v1.55"
|
||||
|
||||
var testcases = []struct {
|
||||
doc string
|
||||
pingFunc func() (types.Ping, error)
|
||||
expectedServer ServerInfo
|
||||
negotiated bool
|
||||
}{
|
||||
{
|
||||
doc: "successful ping",
|
||||
pingFunc: func() (types.Ping, error) {
|
||||
return types.Ping{Experimental: true, OSType: "linux", APIVersion: "v1.30"}, nil
|
||||
},
|
||||
expectedServer: ServerInfo{HasExperimental: true, OSType: "linux"},
|
||||
negotiated: true,
|
||||
},
|
||||
{
|
||||
doc: "failed ping, no API version",
|
||||
pingFunc: func() (types.Ping, error) {
|
||||
return types.Ping{}, errors.New("failed")
|
||||
},
|
||||
expectedServer: ServerInfo{HasExperimental: true},
|
||||
},
|
||||
{
|
||||
doc: "failed ping, with API version",
|
||||
pingFunc: func() (types.Ping, error) {
|
||||
return types.Ping{APIVersion: "v1.33"}, errors.New("failed")
|
||||
},
|
||||
expectedServer: ServerInfo{HasExperimental: true},
|
||||
negotiated: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range testcases {
|
||||
t.Run(testcase.doc, func(t *testing.T) {
|
||||
apiclient := &fakeClient{
|
||||
pingFunc: testcase.pingFunc,
|
||||
version: defaultVersion,
|
||||
}
|
||||
|
||||
cli := &DockerCli{client: apiclient}
|
||||
cli.initializeFromClient()
|
||||
assert.Equal(t, defaultVersion, cli.defaultVersion)
|
||||
assert.Equal(t, testcase.expectedServer, cli.server)
|
||||
assert.Equal(t, testcase.negotiated, apiclient.negotiated)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetClientWithPassword(t *testing.T) {
|
||||
expected := "password"
|
||||
|
||||
var testcases = []struct {
|
||||
doc string
|
||||
password string
|
||||
retrieverErr error
|
||||
retrieverGiveup bool
|
||||
newClientErr error
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
doc: "successful connect",
|
||||
password: expected,
|
||||
},
|
||||
{
|
||||
doc: "password retriever exhausted",
|
||||
retrieverGiveup: true,
|
||||
retrieverErr: errors.New("failed"),
|
||||
expectedErr: "private key is encrypted, but could not get passphrase",
|
||||
},
|
||||
{
|
||||
doc: "password retriever error",
|
||||
retrieverErr: errors.New("failed"),
|
||||
expectedErr: "failed",
|
||||
},
|
||||
{
|
||||
doc: "newClient error",
|
||||
newClientErr: errors.New("failed to connect"),
|
||||
expectedErr: "failed to connect",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range testcases {
|
||||
t.Run(testcase.doc, func(t *testing.T) {
|
||||
passRetriever := func(_, _ string, _ bool, attempts int) (passphrase string, giveup bool, err error) {
|
||||
// Always return an invalid pass first to test iteration
|
||||
switch attempts {
|
||||
case 0:
|
||||
return "something else", false, nil
|
||||
default:
|
||||
return testcase.password, testcase.retrieverGiveup, testcase.retrieverErr
|
||||
}
|
||||
}
|
||||
|
||||
newClient := func(currentPassword string) (client.APIClient, error) {
|
||||
if testcase.newClientErr != nil {
|
||||
return nil, testcase.newClientErr
|
||||
}
|
||||
if currentPassword == expected {
|
||||
return &client.Client{}, nil
|
||||
}
|
||||
return &client.Client{}, x509.IncorrectPasswordError
|
||||
}
|
||||
|
||||
_, err := getClientWithPassword(passRetriever, newClient)
|
||||
if testcase.expectedErr != "" {
|
||||
testutil.ErrorContains(t, err, testcase.expectedErr)
|
||||
return
|
||||
}
|
||||
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -295,7 +295,13 @@ func convertServiceSecrets(
|
||||
})
|
||||
}
|
||||
|
||||
return servicecli.ParseSecrets(client, refs)
|
||||
secrs, err := servicecli.ParseSecrets(client, refs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// sort to ensure idempotence (don't restart services just because the entries are in different order)
|
||||
sort.SliceStable(secrs, func(i, j int) bool { return secrs[i].SecretName < secrs[j].SecretName })
|
||||
return secrs, err
|
||||
}
|
||||
|
||||
// TODO: fix configs API so that ConfigsAPIClient is not required here
|
||||
@ -346,7 +352,13 @@ func convertServiceConfigObjs(
|
||||
})
|
||||
}
|
||||
|
||||
return servicecli.ParseConfigs(client, refs)
|
||||
confs, err := servicecli.ParseConfigs(client, refs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// sort to ensure idempotence (don't restart services just because the entries are in different order)
|
||||
sort.SliceStable(confs, func(i, j int) bool { return confs[i].ConfigName < confs[j].ConfigName })
|
||||
return confs, err
|
||||
}
|
||||
|
||||
func uint32Ptr(value uint32) *uint32 {
|
||||
|
||||
@ -315,7 +315,7 @@ Since 1.9, Docker Content Trust Offline key has been renamed to Root key and the
|
||||
|
||||
**Deprecated In Release: [v1.6.0](https://github.com/docker/docker/releases/tag/v1.6.0)**
|
||||
|
||||
**Target For Removal In Release: v17.09**
|
||||
**Removed In Release: [v17.09](https://github.com/docker/docker-ce/releases/tag/v17.09.0-ce)**
|
||||
|
||||
The flag `--api-enable-cors` is deprecated since v1.6.0. Use the flag
|
||||
`--api-cors-header` instead.
|
||||
|
||||
@ -14,10 +14,14 @@ import (
|
||||
)
|
||||
|
||||
type cmdOption struct {
|
||||
Option string
|
||||
Shorthand string `yaml:",omitempty"`
|
||||
DefaultValue string `yaml:"default_value,omitempty"`
|
||||
Description string `yaml:",omitempty"`
|
||||
Option string
|
||||
Shorthand string `yaml:",omitempty"`
|
||||
ValueType string `yaml:"value_type,omitempty"`
|
||||
DefaultValue string `yaml:"default_value,omitempty"`
|
||||
Description string `yaml:",omitempty"`
|
||||
Deprecated bool
|
||||
MinAPIVersion string `yaml:"min_api_version,omitempty"`
|
||||
Experimental bool
|
||||
}
|
||||
|
||||
type cmdDoc struct {
|
||||
@ -35,6 +39,9 @@ type cmdDoc struct {
|
||||
Options []cmdOption `yaml:",omitempty"`
|
||||
InheritedOptions []cmdOption `yaml:"inherited_options,omitempty"`
|
||||
Example string `yaml:"examples,omitempty"`
|
||||
Deprecated bool
|
||||
MinAPIVersion string `yaml:"min_api_version,omitempty"`
|
||||
Experimental bool
|
||||
}
|
||||
|
||||
// GenYamlTree creates yaml structured ref files
|
||||
@ -73,12 +80,11 @@ func GenYamlTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandle
|
||||
}
|
||||
|
||||
// GenYamlCustom creates custom yaml output
|
||||
// nolint: gocyclo
|
||||
func GenYamlCustom(cmd *cobra.Command, w io.Writer) error {
|
||||
cliDoc := cmdDoc{}
|
||||
cliDoc.Name = cmd.CommandPath()
|
||||
|
||||
// Check experimental: ok := cmd.Tags["experimental"]
|
||||
|
||||
cliDoc.Aliases = strings.Join(cmd.Aliases, ", ")
|
||||
cliDoc.Short = cmd.Short
|
||||
cliDoc.Long = cmd.Long
|
||||
@ -93,6 +99,18 @@ func GenYamlCustom(cmd *cobra.Command, w io.Writer) error {
|
||||
if len(cmd.Example) > 0 {
|
||||
cliDoc.Example = cmd.Example
|
||||
}
|
||||
if len(cmd.Deprecated) > 0 {
|
||||
cliDoc.Deprecated = true
|
||||
}
|
||||
// Check recursively so that, e.g., `docker stack ls` returns the same output as `docker stack`
|
||||
for curr := cmd; curr != nil; curr = curr.Parent() {
|
||||
if v, ok := curr.Tags["version"]; ok && cliDoc.MinAPIVersion == "" {
|
||||
cliDoc.MinAPIVersion = v
|
||||
}
|
||||
if _, ok := curr.Tags["experimental"]; ok && !cliDoc.Experimental {
|
||||
cliDoc.Experimental = true
|
||||
}
|
||||
}
|
||||
|
||||
flags := cmd.NonInheritedFlags()
|
||||
if flags.HasFlags() {
|
||||
@ -142,28 +160,34 @@ func GenYamlCustom(cmd *cobra.Command, w io.Writer) error {
|
||||
}
|
||||
|
||||
func genFlagResult(flags *pflag.FlagSet) []cmdOption {
|
||||
var result []cmdOption
|
||||
var (
|
||||
result []cmdOption
|
||||
opt cmdOption
|
||||
)
|
||||
|
||||
flags.VisitAll(func(flag *pflag.Flag) {
|
||||
opt = cmdOption{
|
||||
Option: flag.Name,
|
||||
ValueType: flag.Value.Type(),
|
||||
DefaultValue: forceMultiLine(flag.DefValue),
|
||||
Description: forceMultiLine(flag.Usage),
|
||||
Deprecated: len(flag.Deprecated) > 0,
|
||||
}
|
||||
|
||||
// Todo, when we mark a shorthand is deprecated, but specify an empty message.
|
||||
// The flag.ShorthandDeprecated is empty as the shorthand is deprecated.
|
||||
// Using len(flag.ShorthandDeprecated) > 0 can't handle this, others are ok.
|
||||
if !(len(flag.ShorthandDeprecated) > 0) && len(flag.Shorthand) > 0 {
|
||||
opt := cmdOption{
|
||||
Option: flag.Name,
|
||||
Shorthand: flag.Shorthand,
|
||||
DefaultValue: flag.DefValue,
|
||||
Description: forceMultiLine(flag.Usage),
|
||||
}
|
||||
result = append(result, opt)
|
||||
} else {
|
||||
opt := cmdOption{
|
||||
Option: flag.Name,
|
||||
DefaultValue: forceMultiLine(flag.DefValue),
|
||||
Description: forceMultiLine(flag.Usage),
|
||||
}
|
||||
result = append(result, opt)
|
||||
opt.Shorthand = flag.Shorthand
|
||||
}
|
||||
if _, ok := flag.Annotations["experimental"]; ok {
|
||||
opt.Experimental = true
|
||||
}
|
||||
if v, ok := flag.Annotations["version"]; ok {
|
||||
opt.MinAPIVersion = v[0]
|
||||
}
|
||||
|
||||
result = append(result, opt)
|
||||
})
|
||||
|
||||
return result
|
||||
|
||||
@ -4,7 +4,9 @@ github.com/coreos/etcd 824277cb3a577a0e8c829ca9ec557b973fe06d20
|
||||
github.com/cpuguy83/go-md2man a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
|
||||
github.com/docker/docker 84144a8c66c1bb2af8fa997288f51ef2719971b4
|
||||
|
||||
# github.com/docker/docker has https://github.com/moby/moby/pull/35008/commits/4b6ec10b07c14e7fff1cc51156b6d954147f826f applied
|
||||
#github.com/docker/docker 84144a8c66c1bb2af8fa997288f51ef2719971b4
|
||||
github.com/docker/docker-credential-helpers v0.5.1
|
||||
|
||||
# the docker/go package contains a customized version of canonical/json
|
||||
|
||||
4
components/cli/vendor/github.com/docker/docker/client/client.go
generated
vendored
4
components/cli/vendor/github.com/docker/docker/client/client.go
generated
vendored
@ -248,8 +248,8 @@ func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
|
||||
cli.version = api.DefaultVersion
|
||||
}
|
||||
|
||||
// if server version is lower than the maximum version supported by the Client, downgrade
|
||||
if versions.LessThan(p.APIVersion, api.DefaultVersion) {
|
||||
// if server version is lower than the client version, downgrade
|
||||
if versions.LessThan(p.APIVersion, cli.version) {
|
||||
cli.version = p.APIVersion
|
||||
}
|
||||
}
|
||||
|
||||
@ -1 +1 @@
|
||||
17.09.0-ce-rc2
|
||||
17.09.1-ce-rc1
|
||||
|
||||
@ -23,7 +23,6 @@ const versionMatcher = "/v{version:[0-9.]+}"
|
||||
// Config provides the configuration for the API server
|
||||
type Config struct {
|
||||
Logging bool
|
||||
EnableCors bool
|
||||
CorsHeaders string
|
||||
Version string
|
||||
SocketGroup string
|
||||
|
||||
@ -220,7 +220,7 @@ func (s *dispatchState) beginStage(stageName string, image builder.Image) {
|
||||
s.imageID = image.ImageID()
|
||||
|
||||
if image.RunConfig() != nil {
|
||||
s.runConfig = image.RunConfig()
|
||||
s.runConfig = copyRunConfig(image.RunConfig())
|
||||
} else {
|
||||
s.runConfig = &container.Config{}
|
||||
}
|
||||
|
||||
@ -290,6 +290,10 @@ func Parse(rwc io.Reader) (*Result, error) {
|
||||
}
|
||||
currentLine++
|
||||
|
||||
if isComment(scanner.Bytes()) {
|
||||
// original line was a comment (processLine strips comments)
|
||||
continue
|
||||
}
|
||||
if isEmptyContinuationLine(bytesRead) {
|
||||
hasEmptyContinuationLine = true
|
||||
continue
|
||||
@ -331,8 +335,12 @@ func trimWhitespace(src []byte) []byte {
|
||||
return bytes.TrimLeftFunc(src, unicode.IsSpace)
|
||||
}
|
||||
|
||||
func isComment(line []byte) bool {
|
||||
return tokenComment.Match(trimWhitespace(line))
|
||||
}
|
||||
|
||||
func isEmptyContinuationLine(line []byte) bool {
|
||||
return len(trimComments(trimWhitespace(line))) == 0
|
||||
return len(trimWhitespace(line)) == 0
|
||||
}
|
||||
|
||||
var utf8bom = []byte{0xEF, 0xBB, 0xBF}
|
||||
|
||||
@ -141,6 +141,13 @@ RUN something \
|
||||
RUN another \
|
||||
|
||||
thing
|
||||
RUN non-indented \
|
||||
# this is a comment
|
||||
after-comment
|
||||
|
||||
RUN indented \
|
||||
# this is an indented comment
|
||||
comment
|
||||
`)
|
||||
|
||||
result, err := Parse(dockerfile)
|
||||
|
||||
@ -266,8 +266,8 @@ func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
|
||||
cli.version = api.DefaultVersion
|
||||
}
|
||||
|
||||
// if server version is lower than the maximum version supported by the Client, downgrade
|
||||
if versions.LessThan(p.APIVersion, api.DefaultVersion) {
|
||||
// if server version is lower than the client version, downgrade
|
||||
if versions.LessThan(p.APIVersion, cli.version) {
|
||||
cli.version = p.APIVersion
|
||||
}
|
||||
}
|
||||
|
||||
@ -245,6 +245,14 @@ func TestNegotiateAPIVersion(t *testing.T) {
|
||||
// test downgrade
|
||||
client.NegotiateAPIVersionPing(ping)
|
||||
assert.Equal(t, expected, client.version)
|
||||
|
||||
// set the client version to something older, and verify that we keep the
|
||||
// original setting.
|
||||
expected = "1.20"
|
||||
client.version = expected
|
||||
client.NegotiateAPIVersionPing(ping)
|
||||
assert.Equal(t, expected, client.version)
|
||||
|
||||
}
|
||||
|
||||
// TestNegotiateAPIVersionOverride asserts that we honor
|
||||
|
||||
@ -33,8 +33,6 @@ func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) {
|
||||
flags.StringVar(&conf.BridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs")
|
||||
flags.BoolVar(&conf.BridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic")
|
||||
flags.StringVar(&conf.BridgeConfig.UserlandProxyPath, "userland-proxy-path", "", "Path to the userland proxy binary")
|
||||
flags.BoolVar(&conf.EnableCors, "api-enable-cors", false, "Enable CORS headers in the Engine API, this is deprecated by --api-cors-header")
|
||||
flags.MarkDeprecated("api-enable-cors", "Please use --api-cors-header")
|
||||
flags.StringVar(&conf.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers")
|
||||
flags.StringVar(&conf.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces")
|
||||
flags.StringVar(&conf.ContainerdAddr, "containerd", "", "Path to containerd socket")
|
||||
|
||||
@ -132,7 +132,6 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||
Logging: true,
|
||||
SocketGroup: cli.Config.SocketGroup,
|
||||
Version: dockerversion.Version,
|
||||
EnableCors: cli.Config.EnableCors,
|
||||
CorsHeaders: cli.Config.CorsHeaders,
|
||||
}
|
||||
|
||||
@ -548,7 +547,7 @@ func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config
|
||||
vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion)
|
||||
s.UseMiddleware(vm)
|
||||
|
||||
if cfg.EnableCors || cfg.CorsHeaders != "" {
|
||||
if cfg.CorsHeaders != "" {
|
||||
c := middleware.NewCORSMiddleware(cfg.CorsHeaders)
|
||||
s.UseMiddleware(c)
|
||||
}
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -9,6 +11,7 @@ import (
|
||||
type Health struct {
|
||||
types.Health
|
||||
stop chan struct{} // Write struct{} to stop the monitor
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// String returns a human-readable description of the health-check state
|
||||
@ -26,9 +29,12 @@ func (s *Health) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
// OpenMonitorChannel creates and returns a new monitor channel. If there already is one,
|
||||
// it returns nil.
|
||||
// OpenMonitorChannel creates and returns a new monitor channel. If there
|
||||
// already is one, it returns nil.
|
||||
func (s *Health) OpenMonitorChannel() chan struct{} {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.stop == nil {
|
||||
logrus.Debug("OpenMonitorChannel")
|
||||
s.stop = make(chan struct{})
|
||||
@ -39,6 +45,9 @@ func (s *Health) OpenMonitorChannel() chan struct{} {
|
||||
|
||||
// CloseMonitorChannel closes any existing monitor channel.
|
||||
func (s *Health) CloseMonitorChannel() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.stop != nil {
|
||||
logrus.Debug("CloseMonitorChannel: waiting for probe to stop")
|
||||
close(s.stop)
|
||||
|
||||
@ -183,7 +183,7 @@ func (r *controller) Start(ctx context.Context) error {
|
||||
|
||||
for {
|
||||
if err := r.adapter.start(ctx); err != nil {
|
||||
if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok {
|
||||
if _, ok := errors.Cause(err).(libnetwork.ErrNoSuchNetwork); ok {
|
||||
// Retry network creation again if we
|
||||
// failed because some of the networks
|
||||
// were not found.
|
||||
|
||||
@ -194,9 +194,9 @@ func (c *Cluster) Join(req types.JoinRequest) error {
|
||||
|
||||
// Inspect retrieves the configuration properties of a managed swarm cluster.
|
||||
func (c *Cluster) Inspect() (types.Swarm, error) {
|
||||
var swarm *swarmapi.Cluster
|
||||
var swarm types.Swarm
|
||||
if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
|
||||
s, err := getSwarm(ctx, state.controlClient)
|
||||
s, err := c.inspect(ctx, state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -205,7 +205,15 @@ func (c *Cluster) Inspect() (types.Swarm, error) {
|
||||
}); err != nil {
|
||||
return types.Swarm{}, err
|
||||
}
|
||||
return convert.SwarmFromGRPC(*swarm), nil
|
||||
return swarm, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) inspect(ctx context.Context, state nodeState) (types.Swarm, error) {
|
||||
s, err := getSwarm(ctx, state.controlClient)
|
||||
if err != nil {
|
||||
return types.Swarm{}, err
|
||||
}
|
||||
return convert.SwarmFromGRPC(*s), nil
|
||||
}
|
||||
|
||||
// Update updates configuration of a managed swarm cluster.
|
||||
@ -409,7 +417,7 @@ func (c *Cluster) Info() types.Info {
|
||||
|
||||
if state.IsActiveManager() {
|
||||
info.ControlAvailable = true
|
||||
swarm, err := c.Inspect()
|
||||
swarm, err := c.inspect(ctx, state)
|
||||
if err != nil {
|
||||
info.Error = err.Error()
|
||||
}
|
||||
|
||||
@ -103,7 +103,6 @@ type CommonConfig struct {
|
||||
Root string `json:"data-root,omitempty"`
|
||||
SocketGroup string `json:"group,omitempty"`
|
||||
CorsHeaders string `json:"api-cors-header,omitempty"`
|
||||
EnableCors bool `json:"api-enable-cors,omitempty"`
|
||||
|
||||
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
|
||||
// when pushing to a registry which does not support schema 2. This field is marked as
|
||||
|
||||
@ -307,6 +307,8 @@ func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) {
|
||||
if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil {
|
||||
return errors.Wrap(err, "error setting ownership for config")
|
||||
}
|
||||
|
||||
label.Relabel(fPath, c.MountLabel, false)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@ -5,6 +5,13 @@ package daemon
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/oci"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const mountsFixture = `142 78 0:38 / / rw,relatime - aufs none rw,si=573b861da0b3a05b,dio
|
||||
@ -102,3 +109,51 @@ func TestNotCleanupMounts(t *testing.T) {
|
||||
t.Fatal("Expected not to clean up /dev/shm")
|
||||
}
|
||||
}
|
||||
|
||||
// TestTmpfsDevShmSizeOverride checks that user-specified /dev/tmpfs mount
|
||||
// size is not overriden by the default shmsize (that should only be used
|
||||
// for default /dev/shm (as in "shareable" and "private" ipc modes).
|
||||
// https://github.com/moby/moby/issues/35271
|
||||
func TestTmpfsDevShmSizeOverride(t *testing.T) {
|
||||
size := "777m"
|
||||
mnt := "/dev/shm"
|
||||
|
||||
d := Daemon{
|
||||
idMappings: &idtools.IDMappings{},
|
||||
}
|
||||
c := &container.Container{
|
||||
HostConfig: &containertypes.HostConfig{
|
||||
ShmSize: 48 * 1024, // size we should NOT end up with
|
||||
},
|
||||
}
|
||||
ms := []container.Mount{
|
||||
{
|
||||
Source: "tmpfs",
|
||||
Destination: mnt,
|
||||
Data: "size=" + size,
|
||||
},
|
||||
}
|
||||
|
||||
// convert ms to spec
|
||||
spec := oci.DefaultSpec()
|
||||
err := setMounts(&d, &spec, c, ms)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check the resulting spec for the correct size
|
||||
found := false
|
||||
for _, m := range spec.Mounts {
|
||||
if m.Destination == mnt {
|
||||
for _, o := range m.Options {
|
||||
if !strings.HasPrefix(o, "size=") {
|
||||
continue
|
||||
}
|
||||
t.Logf("%+v\n", m.Options)
|
||||
assert.Equal(t, "size="+size, o)
|
||||
found = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatal("/dev/shm not found in spec, or size option missing")
|
||||
}
|
||||
}
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/errdefs"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/container"
|
||||
_ "github.com/docker/docker/pkg/discovery/memory"
|
||||
@ -18,6 +19,9 @@ import (
|
||||
"github.com/docker/docker/volume/local"
|
||||
"github.com/docker/docker/volume/store"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/docker/libnetwork"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
//
|
||||
@ -304,3 +308,12 @@ func TestMerge(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindNetworkErrorType(t *testing.T) {
|
||||
d := Daemon{}
|
||||
_, err := d.FindNetwork("fakeNet")
|
||||
_, ok := errors.Cause(err).(libnetwork.ErrNoSuchNetwork)
|
||||
if !errdefs.IsNotFound(err) || !ok {
|
||||
assert.Fail(t, "The FindNetwork method MUST always return an error that implements the NotFound interface and is ErrNoSuchNetwork")
|
||||
}
|
||||
}
|
||||
|
||||
@ -1295,7 +1295,42 @@ func rootFSToAPIType(rootfs *image.RootFS) types.RootFS {
|
||||
// setupDaemonProcess sets various settings for the daemon's process
|
||||
func setupDaemonProcess(config *config.Config) error {
|
||||
// setup the daemons oom_score_adj
|
||||
return setupOOMScoreAdj(config.OOMScoreAdjust)
|
||||
if err := setupOOMScoreAdj(config.OOMScoreAdjust); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := setMayDetachMounts(); err != nil {
|
||||
logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is used to allow removal of mountpoints that may be mounted in other
|
||||
// namespaces on RHEL based kernels starting from RHEL 7.4.
|
||||
// Without this setting, removals on these RHEL based kernels may fail with
|
||||
// "device or resource busy".
|
||||
// This setting is not available in upstream kernels as it is not configurable,
|
||||
// but has been in the upstream kernels since 3.15.
|
||||
func setMayDetachMounts() error {
|
||||
f, err := os.OpenFile("/proc/sys/fs/may_detach_mounts", os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "error opening may_detach_mounts kernel config file")
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
_, err = f.WriteString("1")
|
||||
if os.IsPermission(err) {
|
||||
// Setting may_detach_mounts does not work in an
|
||||
// unprivileged container. Ignore the error, but log
|
||||
// it if we appear not to be in that situation.
|
||||
if !rsystem.RunningInUserNS() {
|
||||
logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func setupOOMScoreAdj(score int) error {
|
||||
|
||||
@ -99,7 +99,6 @@ func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, er
|
||||
for platform := range daemon.stores {
|
||||
layerRefs := daemon.getLayerRefs(platform)
|
||||
allLayers := daemon.stores[platform].layerStore.Map()
|
||||
var allLayersSize int64
|
||||
for _, l := range allLayers {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
||||
@ -21,10 +21,6 @@ func volumeNotFound(id string) error {
|
||||
return objNotFoundError{"volume", id}
|
||||
}
|
||||
|
||||
func networkNotFound(id string) error {
|
||||
return objNotFoundError{"network", id}
|
||||
}
|
||||
|
||||
type objNotFoundError struct {
|
||||
object string
|
||||
id string
|
||||
@ -214,3 +210,20 @@ func translateContainerdStartErr(cmd string, setExitCode func(int), err error) e
|
||||
// TODO: it would be nice to get some better errors from containerd so we can return better errors here
|
||||
return retErr
|
||||
}
|
||||
|
||||
// TODO: cpuguy83 take care of it once the new library is ready
|
||||
type errNotFound struct{ error }
|
||||
|
||||
func (errNotFound) NotFound() {}
|
||||
|
||||
func (e errNotFound) Cause() error {
|
||||
return e.error
|
||||
}
|
||||
|
||||
// notFound is a helper to create an error of the class with the same name from any error type
|
||||
func notFound(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return errNotFound{err}
|
||||
}
|
||||
|
||||
@ -2416,6 +2416,18 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
|
||||
}
|
||||
logrus.Debug("devmapper: Unmount done")
|
||||
|
||||
// Remove the mountpoint here. Removing the mountpoint (in newer kernels)
|
||||
// will cause all other instances of this mount in other mount namespaces
|
||||
// to be killed (this is an anti-DoS measure that is necessary for things
|
||||
// like devicemapper). This is necessary to avoid cases where a libdm mount
|
||||
// that is present in another namespace will cause subsequent RemoveDevice
|
||||
// operations to fail. We ignore any errors here because this may fail on
|
||||
// older kernels which don't have
|
||||
// torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied.
|
||||
if err := os.Remove(mountPath); err != nil {
|
||||
logrus.Debugf("devmapper: error doing a remove on unmounted device %s: %v", mountPath, err)
|
||||
}
|
||||
|
||||
return devices.deactivateDevice(info)
|
||||
}
|
||||
|
||||
|
||||
@ -5,12 +5,15 @@ package devmapper
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/daemon/graphdriver/graphtest"
|
||||
"github.com/docker/docker/pkg/parsers/kernel"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -150,3 +153,53 @@ func TestDevmapperLockReleasedDeviceDeletion(t *testing.T) {
|
||||
case <-doneChan:
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that mounts aren't leakedriver. It's non-trivial for us to test the full
|
||||
// reproducer of #34573 in a unit test, but we can at least make sure that a
|
||||
// simple command run in a new namespace doesn't break things horribly.
|
||||
func TestDevmapperMountLeaks(t *testing.T) {
|
||||
if !kernel.CheckKernelVersion(3, 18, 0) {
|
||||
t.Skipf("kernel version <3.18.0 and so is missing torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe.")
|
||||
}
|
||||
|
||||
driver := graphtest.GetDriver(t, "devicemapper", "dm.use_deferred_removal=false", "dm.use_deferred_deletion=false").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver)
|
||||
defer graphtest.PutDriver(t)
|
||||
|
||||
// We need to create a new (dummy) device.
|
||||
if err := driver.Create("some-layer", "", nil); err != nil {
|
||||
t.Fatalf("setting up some-layer: %v", err)
|
||||
}
|
||||
|
||||
// Mount the device.
|
||||
_, err := driver.Get("some-layer", "")
|
||||
if err != nil {
|
||||
t.Fatalf("mounting some-layer: %v", err)
|
||||
}
|
||||
|
||||
// Create a new subprocess which will inherit our mountpoint, then
|
||||
// intentionally leak it and stick around. We can't do this entirely within
|
||||
// Go because forking and namespaces in Go are really not handled well at
|
||||
// all.
|
||||
cmd := exec.Cmd{
|
||||
Path: "/bin/sh",
|
||||
Args: []string{
|
||||
"/bin/sh", "-c",
|
||||
"mount --make-rprivate / && sleep 1000s",
|
||||
},
|
||||
SysProcAttr: &syscall.SysProcAttr{
|
||||
Unshareflags: syscall.CLONE_NEWNS,
|
||||
},
|
||||
}
|
||||
if err := cmd.Start(); err != nil {
|
||||
t.Fatalf("starting sub-command: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
unix.Kill(cmd.Process.Pid, unix.SIGKILL)
|
||||
cmd.Wait()
|
||||
}()
|
||||
|
||||
// Now try to "drop" the device.
|
||||
if err := driver.Put("some-layer"); err != nil {
|
||||
t.Fatalf("unmounting some-layer: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -228,10 +228,12 @@ func (d *Driver) Put(id string) error {
|
||||
if count := d.ctr.Decrement(mp); count > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := d.DeviceSet.UnmountDevice(id, mp)
|
||||
if err != nil {
|
||||
logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err)
|
||||
logrus.Errorf("devmapper: Error unmounting device %s: %v", id, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@ -49,18 +49,19 @@ func mountFrom(dir, device, target, mType string, flags uintptr, label string) e
|
||||
output := bytes.NewBuffer(nil)
|
||||
cmd.Stdout = output
|
||||
cmd.Stderr = output
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
w.Close()
|
||||
return fmt.Errorf("mountfrom error on re-exec cmd: %v", err)
|
||||
}
|
||||
//write the options to the pipe for the untar exec to read
|
||||
if err := json.NewEncoder(w).Encode(options); err != nil {
|
||||
w.Close()
|
||||
return fmt.Errorf("mountfrom json encode to pipe failed: %v", err)
|
||||
}
|
||||
w.Close()
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output)
|
||||
return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -15,6 +15,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -363,6 +364,11 @@ func (l *splunkLoggerJSON) Log(msg *logger.Message) error {
|
||||
}
|
||||
|
||||
func (l *splunkLoggerRaw) Log(msg *logger.Message) error {
|
||||
// empty or whitespace-only messages are not accepted by HEC
|
||||
if strings.TrimSpace(string(msg.Line)) == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
message := l.createSplunkMessage(msg)
|
||||
|
||||
message.Event = string(append(l.prefix, msg.Line...))
|
||||
|
||||
@ -716,12 +716,19 @@ func TestRawFormatWithoutTag(t *testing.T) {
|
||||
if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
message3Time := time.Now()
|
||||
if err := loggerDriver.Log(&logger.Message{Line: []byte(" "), Source: "stdout", Timestamp: message3Time}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = loggerDriver.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// message3 would have an empty or whitespace only string in the "event" field
|
||||
// both of which are not acceptable to HEC
|
||||
// thus here we must expect 2 messages, not 3
|
||||
if len(hec.messages) != 2 {
|
||||
t.Fatal("Expected two messages")
|
||||
}
|
||||
|
||||
@ -56,7 +56,9 @@ func (daemon *Daemon) GetNetworkByID(partialID string) (libnetwork.Network, erro
|
||||
list := daemon.GetNetworksByID(partialID)
|
||||
|
||||
if len(list) == 0 {
|
||||
return nil, errors.WithStack(networkNotFound(partialID))
|
||||
// Be very careful to change the error type here, the libnetwork.ErrNoSuchNetwork error is used by the controller
|
||||
// to retry the creation of the network as managed through the swarm manager
|
||||
return nil, errors.WithStack(notFound(libnetwork.ErrNoSuchNetwork(partialID)))
|
||||
}
|
||||
if len(list) > 1 {
|
||||
return nil, errors.WithStack(invalidIdentifier(partialID))
|
||||
|
||||
@ -495,22 +495,35 @@ func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []c
|
||||
userMounts[m.Destination] = struct{}{}
|
||||
}
|
||||
|
||||
// Filter out mounts from spec
|
||||
noIpc := c.HostConfig.IpcMode.IsNone()
|
||||
// Copy all mounts from spec to defaultMounts, except for
|
||||
// - mounts overriden by a user supplied mount;
|
||||
// - all mounts under /dev if a user supplied /dev is present;
|
||||
// - /dev/shm, in case IpcMode is none.
|
||||
// While at it, also
|
||||
// - set size for /dev/shm from shmsize.
|
||||
var defaultMounts []specs.Mount
|
||||
_, mountDev := userMounts["/dev"]
|
||||
for _, m := range s.Mounts {
|
||||
// filter out /dev/shm mount if case IpcMode is none
|
||||
if noIpc && m.Destination == "/dev/shm" {
|
||||
if _, ok := userMounts[m.Destination]; ok {
|
||||
// filter out mount overridden by a user supplied mount
|
||||
continue
|
||||
}
|
||||
// filter out mount overridden by a user supplied mount
|
||||
if _, ok := userMounts[m.Destination]; !ok {
|
||||
if mountDev && strings.HasPrefix(m.Destination, "/dev/") {
|
||||
if mountDev && strings.HasPrefix(m.Destination, "/dev/") {
|
||||
// filter out everything under /dev if /dev is user-mounted
|
||||
continue
|
||||
}
|
||||
|
||||
if m.Destination == "/dev/shm" {
|
||||
if c.HostConfig.IpcMode.IsNone() {
|
||||
// filter out /dev/shm for "none" IpcMode
|
||||
continue
|
||||
}
|
||||
defaultMounts = append(defaultMounts, m)
|
||||
// set size for /dev/shm mount from spec
|
||||
sizeOpt := "size=" + strconv.FormatInt(c.HostConfig.ShmSize, 10)
|
||||
m.Options = append(m.Options, sizeOpt)
|
||||
}
|
||||
|
||||
defaultMounts = append(defaultMounts, m)
|
||||
}
|
||||
|
||||
s.Mounts = defaultMounts
|
||||
@ -604,14 +617,6 @@ func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []c
|
||||
s.Linux.MaskedPaths = nil
|
||||
}
|
||||
|
||||
// Set size for /dev/shm mount that comes from spec (IpcMode: private only)
|
||||
for i, m := range s.Mounts {
|
||||
if m.Destination == "/dev/shm" {
|
||||
sizeOpt := "size=" + strconv.FormatInt(c.HostConfig.ShmSize, 10)
|
||||
s.Mounts[i].Options = append(s.Mounts[i].Options, sizeOpt)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: until a kernel/mount solution exists for handling remount in a user namespace,
|
||||
// we must clear the readonly flag for the cgroups mount (@mrunalp concurs)
|
||||
if uidMap := daemon.idMappings.UIDs(); uidMap != nil || c.HostConfig.Privileged {
|
||||
|
||||
@ -206,6 +206,9 @@ func (daemon *Daemon) registerMountPoints(container *container.Container, hostCo
|
||||
}); ok {
|
||||
mp.Source = cv.CachedPath()
|
||||
}
|
||||
if mp.Driver == volume.DefaultDriverName {
|
||||
setBindModeIfNull(mp)
|
||||
}
|
||||
}
|
||||
|
||||
binds[mp.Destination] = true
|
||||
|
||||
@ -708,29 +708,20 @@ func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mf
|
||||
}
|
||||
|
||||
logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a os/arch match", ref, len(mfstList.Manifests))
|
||||
var manifestDigest digest.Digest
|
||||
// TODO @jhowardmsft LCOW Support: Need to remove the hard coding in LCOW mode.
|
||||
lookingForOS := runtime.GOOS
|
||||
if system.LCOWSupported() {
|
||||
lookingForOS = "linux"
|
||||
}
|
||||
for _, manifestDescriptor := range mfstList.Manifests {
|
||||
// TODO(aaronl): The manifest list spec supports optional
|
||||
// "features" and "variant" fields. These are not yet used.
|
||||
// Once they are, their values should be interpreted here.
|
||||
if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == lookingForOS {
|
||||
manifestDigest = manifestDescriptor.Digest
|
||||
logrus.Debugf("found match for %s/%s with media type %s, digest %s", runtime.GOOS, runtime.GOARCH, manifestDescriptor.MediaType, manifestDigest.String())
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if manifestDigest == "" {
|
||||
manifestMatches := filterManifests(mfstList.Manifests)
|
||||
|
||||
if len(manifestMatches) == 0 {
|
||||
errMsg := fmt.Sprintf("no matching manifest for %s/%s in the manifest list entries", runtime.GOOS, runtime.GOARCH)
|
||||
logrus.Debugf(errMsg)
|
||||
return "", "", errors.New(errMsg)
|
||||
}
|
||||
|
||||
if len(manifestMatches) > 1 {
|
||||
logrus.Debugf("found multiple matches in manifest list, choosing best match %s", manifestMatches[0].Digest.String())
|
||||
}
|
||||
manifestDigest := manifestMatches[0].Digest
|
||||
|
||||
manSvc, err := p.repo.Manifests(ctx)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
|
||||
@ -3,11 +3,27 @@
|
||||
package distribution
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) {
|
||||
blobs := ld.repo.Blobs(ctx)
|
||||
return blobs.Open(ctx, ld.digest)
|
||||
}
|
||||
|
||||
func filterManifests(manifests []manifestlist.ManifestDescriptor) []manifestlist.ManifestDescriptor {
|
||||
var matches []manifestlist.ManifestDescriptor
|
||||
for _, manifestDescriptor := range manifests {
|
||||
if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS {
|
||||
matches = append(matches, manifestDescriptor)
|
||||
|
||||
logrus.Debugf("found match for %s/%s with media type %s, digest %s", runtime.GOOS, runtime.GOARCH, manifestDescriptor.MediaType, manifestDescriptor.Digest.String())
|
||||
}
|
||||
}
|
||||
return matches
|
||||
}
|
||||
|
||||
@ -3,13 +3,19 @@
|
||||
package distribution
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -55,3 +61,54 @@ func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekClo
|
||||
}
|
||||
return rsc, err
|
||||
}
|
||||
|
||||
func filterManifests(manifests []manifestlist.ManifestDescriptor) []manifestlist.ManifestDescriptor {
|
||||
version := system.GetOSVersion()
|
||||
|
||||
// TODO @jhowardmsft LCOW Support: Need to remove the hard coding in LCOW mode.
|
||||
lookingForOS := runtime.GOOS
|
||||
osVersion := fmt.Sprintf("%d.%d.%d", version.MajorVersion, version.MinorVersion, version.Build)
|
||||
if system.LCOWSupported() {
|
||||
lookingForOS = "linux"
|
||||
osVersion = ""
|
||||
}
|
||||
|
||||
var matches []manifestlist.ManifestDescriptor
|
||||
for _, manifestDescriptor := range manifests {
|
||||
// TODO: Consider filtering out greater versions, including only greater UBR
|
||||
if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == lookingForOS {
|
||||
matches = append(matches, manifestDescriptor)
|
||||
|
||||
logrus.Debugf("found match for %s/%s with media type %s, digest %s", runtime.GOOS, runtime.GOARCH, manifestDescriptor.MediaType, manifestDescriptor.Digest.String())
|
||||
}
|
||||
}
|
||||
if lookingForOS == "windows" {
|
||||
sort.Stable(manifestsByVersion{osVersion, matches})
|
||||
}
|
||||
return matches
|
||||
}
|
||||
|
||||
func versionMatch(actual, expected string) bool {
|
||||
// Check whether the version matches up to the build, ignoring UBR
|
||||
return strings.HasPrefix(actual, expected+".")
|
||||
}
|
||||
|
||||
type manifestsByVersion struct {
|
||||
version string
|
||||
list []manifestlist.ManifestDescriptor
|
||||
}
|
||||
|
||||
func (mbv manifestsByVersion) Less(i, j int) bool {
|
||||
// TODO: Split version by parts and compare
|
||||
// TODO: Prefer versions which have a greater version number
|
||||
// Move compatible versions to the top, with no other ordering changes
|
||||
return versionMatch(mbv.list[i].Platform.OSVersion, mbv.version) && !versionMatch(mbv.list[j].Platform.OSVersion, mbv.version)
|
||||
}
|
||||
|
||||
func (mbv manifestsByVersion) Len() int {
|
||||
return len(mbv.list)
|
||||
}
|
||||
|
||||
func (mbv manifestsByVersion) Swap(i, j int) {
|
||||
mbv.list[i], mbv.list[j] = mbv.list[j], mbv.list[i]
|
||||
}
|
||||
|
||||
@ -11,6 +11,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -30,6 +31,9 @@ import (
|
||||
"github.com/docker/docker/volume"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/go-check/check"
|
||||
"github.com/gotestyourself/gotestyourself/poll"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -1910,18 +1914,38 @@ func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) {
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
cfg mounttypes.Mount
|
||||
spec mounttypes.Mount
|
||||
expected types.MountPoint
|
||||
}
|
||||
|
||||
var selinuxSharedLabel string
|
||||
if runtime.GOOS == "linux" {
|
||||
selinuxSharedLabel = "z"
|
||||
}
|
||||
|
||||
cases := []testCase{
|
||||
// use literal strings here for `Type` instead of the defined constants in the volume package to keep this honest
|
||||
// Validation of the actual `Mount` struct is done in another test is not needed here
|
||||
{mounttypes.Mount{Type: "volume", Target: destPath}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}},
|
||||
{mounttypes.Mount{Type: "volume", Target: destPath + slash}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}},
|
||||
{mounttypes.Mount{Type: "volume", Target: destPath, Source: "test1"}, types.MountPoint{Type: "volume", Name: "test1", RW: true, Destination: destPath}},
|
||||
{mounttypes.Mount{Type: "volume", Target: destPath, ReadOnly: true, Source: "test2"}, types.MountPoint{Type: "volume", Name: "test2", RW: false, Destination: destPath}},
|
||||
{mounttypes.Mount{Type: "volume", Target: destPath, Source: "test3", VolumeOptions: &mounttypes.VolumeOptions{DriverConfig: &mounttypes.Driver{Name: volume.DefaultDriverName}}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", Name: "test3", RW: true, Destination: destPath}},
|
||||
{
|
||||
spec: mounttypes.Mount{Type: "volume", Target: destPath},
|
||||
expected: types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath, Mode: selinuxSharedLabel},
|
||||
},
|
||||
{
|
||||
spec: mounttypes.Mount{Type: "volume", Target: destPath + slash},
|
||||
expected: types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath, Mode: selinuxSharedLabel},
|
||||
},
|
||||
{
|
||||
spec: mounttypes.Mount{Type: "volume", Target: destPath, Source: "test1"},
|
||||
expected: types.MountPoint{Type: "volume", Name: "test1", RW: true, Destination: destPath, Mode: selinuxSharedLabel},
|
||||
},
|
||||
{
|
||||
spec: mounttypes.Mount{Type: "volume", Target: destPath, ReadOnly: true, Source: "test2"},
|
||||
expected: types.MountPoint{Type: "volume", Name: "test2", RW: false, Destination: destPath, Mode: selinuxSharedLabel},
|
||||
},
|
||||
{
|
||||
spec: mounttypes.Mount{Type: "volume", Target: destPath, Source: "test3", VolumeOptions: &mounttypes.VolumeOptions{DriverConfig: &mounttypes.Driver{Name: volume.DefaultDriverName}}},
|
||||
expected: types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", Name: "test3", RW: true, Destination: destPath, Mode: selinuxSharedLabel},
|
||||
},
|
||||
}
|
||||
|
||||
if SameHostDaemon() {
|
||||
@ -1930,8 +1954,23 @@ func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) {
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer os.RemoveAll(tmpDir1)
|
||||
cases = append(cases, []testCase{
|
||||
{mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath}, types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir1}},
|
||||
{mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath, ReadOnly: true}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir1}},
|
||||
{
|
||||
spec: mounttypes.Mount{
|
||||
Type: "bind",
|
||||
Source: tmpDir1,
|
||||
Target: destPath,
|
||||
},
|
||||
expected: types.MountPoint{
|
||||
Type: "bind",
|
||||
RW: true,
|
||||
Destination: destPath,
|
||||
Source: tmpDir1,
|
||||
},
|
||||
},
|
||||
{
|
||||
spec: mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath, ReadOnly: true},
|
||||
expected: types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir1},
|
||||
},
|
||||
}...)
|
||||
|
||||
// for modes only supported on Linux
|
||||
@ -1944,19 +1983,40 @@ func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) {
|
||||
c.Assert(mount.ForceMount("", tmpDir3, "none", "shared"), checker.IsNil)
|
||||
|
||||
cases = append(cases, []testCase{
|
||||
{mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath}, types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir3}},
|
||||
{mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3}},
|
||||
{mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true, BindOptions: &mounttypes.BindOptions{Propagation: "shared"}}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3, Propagation: "shared"}},
|
||||
{
|
||||
spec: mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath},
|
||||
expected: types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir3},
|
||||
},
|
||||
{
|
||||
spec: mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true},
|
||||
expected: types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3},
|
||||
},
|
||||
{
|
||||
spec: mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true, BindOptions: &mounttypes.BindOptions{Propagation: "shared"}},
|
||||
expected: types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3, Propagation: "shared"},
|
||||
},
|
||||
}...)
|
||||
}
|
||||
}
|
||||
|
||||
if testEnv.DaemonPlatform() != "windows" { // Windows does not support volume populate
|
||||
cases = append(cases, []testCase{
|
||||
{mounttypes.Mount{Type: "volume", Target: destPath, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}},
|
||||
{mounttypes.Mount{Type: "volume", Target: destPath + slash, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}},
|
||||
{mounttypes.Mount{Type: "volume", Target: destPath, Source: "test4", VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Type: "volume", Name: "test4", RW: true, Destination: destPath}},
|
||||
{mounttypes.Mount{Type: "volume", Target: destPath, Source: "test5", ReadOnly: true, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Type: "volume", Name: "test5", RW: false, Destination: destPath}},
|
||||
{
|
||||
spec: mounttypes.Mount{Type: "volume", Target: destPath, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}},
|
||||
expected: types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath, Mode: selinuxSharedLabel},
|
||||
},
|
||||
{
|
||||
spec: mounttypes.Mount{Type: "volume", Target: destPath + slash, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}},
|
||||
expected: types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath, Mode: selinuxSharedLabel},
|
||||
},
|
||||
{
|
||||
spec: mounttypes.Mount{Type: "volume", Target: destPath, Source: "test4", VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}},
|
||||
expected: types.MountPoint{Type: "volume", Name: "test4", RW: true, Destination: destPath, Mode: selinuxSharedLabel},
|
||||
},
|
||||
{
|
||||
spec: mounttypes.Mount{Type: "volume", Target: destPath, Source: "test5", ReadOnly: true, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}},
|
||||
expected: types.MountPoint{Type: "volume", Name: "test5", RW: false, Destination: destPath, Mode: selinuxSharedLabel},
|
||||
},
|
||||
}...)
|
||||
}
|
||||
|
||||
@ -1968,58 +2028,83 @@ func (s *DockerSuite) TestContainersAPICreateMountsCreate(c *check.C) {
|
||||
ID string `json:"Id"`
|
||||
}
|
||||
|
||||
cli, err := client.NewEnvClient()
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cli.Close()
|
||||
|
||||
ctx := context.Background()
|
||||
apiclient := testEnv.APIClient()
|
||||
for i, x := range cases {
|
||||
c.Logf("case %d - config: %v", i, x.cfg)
|
||||
container, err := cli.ContainerCreate(context.Background(), &containertypes.Config{Image: testImg}, &containertypes.HostConfig{Mounts: []mounttypes.Mount{x.cfg}}, &networktypes.NetworkingConfig{}, "")
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Logf("case %d - config: %v", i, x.spec)
|
||||
container, err := apiclient.ContainerCreate(
|
||||
ctx,
|
||||
&containertypes.Config{Image: testImg},
|
||||
&containertypes.HostConfig{Mounts: []mounttypes.Mount{x.spec}},
|
||||
&networktypes.NetworkingConfig{},
|
||||
"")
|
||||
require.NoError(c, err)
|
||||
|
||||
id := container.ID
|
||||
containerInspect, err := apiclient.ContainerInspect(ctx, container.ID)
|
||||
require.NoError(c, err)
|
||||
mps := containerInspect.Mounts
|
||||
require.Len(c, mps, 1)
|
||||
mountPoint := mps[0]
|
||||
|
||||
var mps []types.MountPoint
|
||||
err = json.NewDecoder(strings.NewReader(inspectFieldJSON(c, id, "Mounts"))).Decode(&mps)
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(mps, checker.HasLen, 1)
|
||||
c.Assert(mps[0].Destination, checker.Equals, x.expected.Destination)
|
||||
if x.expected.Source != "" {
|
||||
assert.Equal(c, x.expected.Source, mountPoint.Source)
|
||||
}
|
||||
if x.expected.Name != "" {
|
||||
assert.Equal(c, x.expected.Name, mountPoint.Name)
|
||||
}
|
||||
if x.expected.Driver != "" {
|
||||
assert.Equal(c, x.expected.Driver, mountPoint.Driver)
|
||||
}
|
||||
if x.expected.Propagation != "" {
|
||||
assert.Equal(c, x.expected.Propagation, mountPoint.Propagation)
|
||||
}
|
||||
assert.Equal(c, x.expected.RW, mountPoint.RW)
|
||||
assert.Equal(c, x.expected.Type, mountPoint.Type)
|
||||
assert.Equal(c, x.expected.Mode, mountPoint.Mode)
|
||||
assert.Equal(c, x.expected.Destination, mountPoint.Destination)
|
||||
|
||||
if len(x.expected.Source) > 0 {
|
||||
c.Assert(mps[0].Source, checker.Equals, x.expected.Source)
|
||||
}
|
||||
if len(x.expected.Name) > 0 {
|
||||
c.Assert(mps[0].Name, checker.Equals, x.expected.Name)
|
||||
}
|
||||
if len(x.expected.Driver) > 0 {
|
||||
c.Assert(mps[0].Driver, checker.Equals, x.expected.Driver)
|
||||
}
|
||||
c.Assert(mps[0].RW, checker.Equals, x.expected.RW)
|
||||
c.Assert(mps[0].Type, checker.Equals, x.expected.Type)
|
||||
c.Assert(mps[0].Mode, checker.Equals, x.expected.Mode)
|
||||
if len(x.expected.Propagation) > 0 {
|
||||
c.Assert(mps[0].Propagation, checker.Equals, x.expected.Propagation)
|
||||
}
|
||||
err = apiclient.ContainerStart(ctx, container.ID, types.ContainerStartOptions{})
|
||||
require.NoError(c, err)
|
||||
poll.WaitOn(c, containerExit(apiclient, container.ID), poll.WithDelay(time.Second))
|
||||
|
||||
out, _, err := dockerCmdWithError("start", "-a", id)
|
||||
if (x.cfg.Type != "volume" || (x.cfg.VolumeOptions != nil && x.cfg.VolumeOptions.NoCopy)) && testEnv.DaemonPlatform() != "windows" {
|
||||
c.Assert(err, checker.NotNil, check.Commentf("%s\n%v", out, mps[0]))
|
||||
} else {
|
||||
c.Assert(err, checker.IsNil, check.Commentf("%s\n%v", out, mps[0]))
|
||||
}
|
||||
err = apiclient.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
Force: true,
|
||||
})
|
||||
require.NoError(c, err)
|
||||
|
||||
dockerCmd(c, "rm", "-fv", id)
|
||||
if x.cfg.Type == "volume" && len(x.cfg.Source) > 0 {
|
||||
// This should still exist even though we removed the container
|
||||
dockerCmd(c, "volume", "inspect", mps[0].Name)
|
||||
} else {
|
||||
// This should be removed automatically when we removed the container
|
||||
out, _, err := dockerCmdWithError("volume", "inspect", mps[0].Name)
|
||||
c.Assert(err, checker.NotNil, check.Commentf(out))
|
||||
switch {
|
||||
|
||||
// Named volumes still exist after the container is removed
|
||||
case x.spec.Type == "volume" && len(x.spec.Source) > 0:
|
||||
_, err := apiclient.VolumeInspect(ctx, mountPoint.Name)
|
||||
require.NoError(c, err)
|
||||
|
||||
// Bind mounts are never removed with the container
|
||||
case x.spec.Type == "bind":
|
||||
|
||||
// anonymous volumes are removed
|
||||
default:
|
||||
_, err := apiclient.VolumeInspect(ctx, mountPoint.Name)
|
||||
assert.True(c, client.IsErrNotFound(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func containerExit(apiclient client.APIClient, name string) func(poll.LogT) poll.Result {
|
||||
return func(logT poll.LogT) poll.Result {
|
||||
container, err := apiclient.ContainerInspect(context.Background(), name)
|
||||
if err != nil {
|
||||
return poll.Error(err)
|
||||
}
|
||||
switch container.State.Status {
|
||||
case "created", "running":
|
||||
return poll.Continue("container %s is %s, waiting for exit", name, container.State.Status)
|
||||
}
|
||||
return poll.Success()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestContainersAPICreateMountsTmpfs(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
type testCase struct {
|
||||
|
||||
@ -8,6 +8,8 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
@ -198,6 +200,45 @@ func (s *DockerSuite) TestExecAPIStartInvalidCommand(c *check.C) {
|
||||
c.Assert(inspectJSON.ExecIDs, checker.IsNil)
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestExecStateCleanup(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux, SameHostDaemon)
|
||||
|
||||
// This test checks accidental regressions. Not part of stable API.
|
||||
|
||||
name := "exec_cleanup"
|
||||
cid, _ := dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh")
|
||||
cid = strings.TrimSpace(cid)
|
||||
|
||||
stateDir := "/var/run/docker/libcontainerd/" + cid
|
||||
|
||||
checkReadDir := func(c *check.C) (interface{}, check.CommentInterface) {
|
||||
fi, err := ioutil.ReadDir(stateDir)
|
||||
c.Assert(err, checker.IsNil)
|
||||
return len(fi), nil
|
||||
}
|
||||
|
||||
fi, err := ioutil.ReadDir(stateDir)
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(len(fi), checker.GreaterThan, 1)
|
||||
|
||||
id := createExecCmd(c, name, "ls")
|
||||
startExec(c, id, http.StatusOK)
|
||||
waitForExec(c, id)
|
||||
|
||||
waitAndAssert(c, 5*time.Second, checkReadDir, checker.Equals, len(fi))
|
||||
|
||||
id = createExecCmd(c, name, "invalid")
|
||||
startExec(c, id, http.StatusBadRequest)
|
||||
waitForExec(c, id)
|
||||
|
||||
waitAndAssert(c, 5*time.Second, checkReadDir, checker.Equals, len(fi))
|
||||
|
||||
dockerCmd(c, "stop", name)
|
||||
_, err = os.Stat(stateDir)
|
||||
c.Assert(err, checker.NotNil)
|
||||
c.Assert(os.IsNotExist(err), checker.True)
|
||||
}
|
||||
|
||||
func createExec(c *check.C, name string) string {
|
||||
return createExecCmd(c, name, "true")
|
||||
}
|
||||
|
||||
@ -6234,6 +6234,28 @@ func (s *DockerSuite) TestBuildCopyFromResetScratch(c *check.C) {
|
||||
c.Assert(strings.TrimSpace(res), checker.Equals, "")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestBuildMultiStageParentConfig(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
|
||||
dockerfile := `
|
||||
FROM busybox AS stage0
|
||||
WORKDIR /foo
|
||||
FROM stage0
|
||||
WORKDIR sub1
|
||||
FROM stage0
|
||||
WORKDIR sub2
|
||||
`
|
||||
ctx := fakecontext.New(c, "",
|
||||
fakecontext.WithDockerfile(dockerfile),
|
||||
)
|
||||
defer ctx.Close()
|
||||
|
||||
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
|
||||
|
||||
res := cli.InspectCmd(c, "build1", cli.Format(".Config.WorkingDir")).Combined()
|
||||
c.Assert(strings.TrimSpace(res), checker.Equals, "/foo/sub2")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestBuildIntermediateTarget(c *check.C) {
|
||||
dockerfile := `
|
||||
FROM busybox AS build-env
|
||||
|
||||
@ -1825,7 +1825,7 @@ func (s *DockerDaemonSuite) TestDaemonNoSpaceLeftOnDeviceError(c *check.C) {
|
||||
defer s.d.Stop(c)
|
||||
|
||||
// pull a repository large enough to fill the mount point
|
||||
pullOut, err := s.d.Cmd("pull", "registry:2")
|
||||
pullOut, err := s.d.Cmd("pull", "debian:stretch")
|
||||
c.Assert(err, checker.NotNil, check.Commentf(pullOut))
|
||||
c.Assert(pullOut, checker.Contains, "no space left on device")
|
||||
}
|
||||
|
||||
@ -97,7 +97,7 @@ func (s *DockerSuite) TestEventsOOMDisableTrue(c *check.C) {
|
||||
}()
|
||||
|
||||
c.Assert(waitRun("oomTrue"), checker.IsNil)
|
||||
defer dockerCmd(c, "kill", "oomTrue")
|
||||
defer dockerCmdWithResult("kill", "oomTrue")
|
||||
containerID := inspectField(c, "oomTrue", "Id")
|
||||
|
||||
testActions := map[string]chan bool{
|
||||
|
||||
@ -49,7 +49,7 @@ func (s *DockerSuite) TestNetHostname(c *check.C) {
|
||||
c.Assert(out, checker.Contains, "Invalid network mode: invalid container format container:<name|id>")
|
||||
|
||||
out, _ = dockerCmdWithFail(c, "run", "--net=weird", "busybox", "ps")
|
||||
c.Assert(strings.ToLower(out), checker.Contains, "no such network")
|
||||
c.Assert(strings.ToLower(out), checker.Contains, "not found")
|
||||
}
|
||||
|
||||
func (s *DockerSuite) TestConflictContainerNetworkAndLinks(c *check.C) {
|
||||
|
||||
@ -3,7 +3,6 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@ -194,30 +193,26 @@ func (s *DockerHubPullSuite) TestPullScratchNotAllowed(c *check.C) {
|
||||
// results in more images than a naked pull.
|
||||
func (s *DockerHubPullSuite) TestPullAllTagsFromCentralRegistry(c *check.C) {
|
||||
testRequires(c, DaemonIsLinux)
|
||||
s.Cmd(c, "pull", "busybox")
|
||||
outImageCmd := s.Cmd(c, "images", "busybox")
|
||||
s.Cmd(c, "pull", "dockercore/engine-pull-all-test-fixture")
|
||||
outImageCmd := s.Cmd(c, "images", "dockercore/engine-pull-all-test-fixture")
|
||||
splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n")
|
||||
c.Assert(splitOutImageCmd, checker.HasLen, 2)
|
||||
|
||||
s.Cmd(c, "pull", "--all-tags=true", "busybox")
|
||||
outImageAllTagCmd := s.Cmd(c, "images", "busybox")
|
||||
s.Cmd(c, "pull", "--all-tags=true", "dockercore/engine-pull-all-test-fixture")
|
||||
outImageAllTagCmd := s.Cmd(c, "images", "dockercore/engine-pull-all-test-fixture")
|
||||
linesCount := strings.Count(outImageAllTagCmd, "\n")
|
||||
c.Assert(linesCount, checker.GreaterThan, 2, check.Commentf("pulling all tags should provide more than two images, got %s", outImageAllTagCmd))
|
||||
|
||||
// Verify that the line for 'busybox:latest' is left unchanged.
|
||||
// Verify that the line for 'dockercore/engine-pull-all-test-fixture:latest' is left unchanged.
|
||||
var latestLine string
|
||||
for _, line := range strings.Split(outImageAllTagCmd, "\n") {
|
||||
if strings.HasPrefix(line, "busybox") && strings.Contains(line, "latest") {
|
||||
if strings.HasPrefix(line, "dockercore/engine-pull-all-test-fixture") && strings.Contains(line, "latest") {
|
||||
latestLine = line
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(latestLine, checker.Not(checker.Equals), "", check.Commentf("no entry for dockercore/engine-pull-all-test-fixture:latest found after pulling all tags"))
|
||||
|
||||
if runtime.GOARCH == "amd64" {
|
||||
c.Assert(latestLine, checker.Not(checker.Equals), "", check.Commentf("no entry for busybox:latest found after pulling all tags"))
|
||||
} else {
|
||||
c.Assert(latestLine, checker.Not(checker.Contains), "", check.Commentf("no matching manifest"))
|
||||
}
|
||||
splitLatest := strings.Fields(latestLine)
|
||||
splitCurrent := strings.Fields(splitOutImageCmd[1])
|
||||
|
||||
@ -233,7 +228,7 @@ func (s *DockerHubPullSuite) TestPullAllTagsFromCentralRegistry(c *check.C) {
|
||||
splitCurrent[4] = ""
|
||||
splitCurrent[5] = ""
|
||||
|
||||
c.Assert(splitLatest, checker.DeepEquals, splitCurrent, check.Commentf("busybox:latest was changed after pulling all tags"))
|
||||
c.Assert(splitLatest, checker.DeepEquals, splitCurrent, check.Commentf("dockercore/engine-pull-all-test-fixture:latest was changed after pulling all tags"))
|
||||
}
|
||||
|
||||
// TestPullClientDisconnect kills the client during a pull operation and verifies that the operation
|
||||
|
||||
36
components/engine/integration/image/import_test.go
Normal file
36
components/engine/integration/image/import_test.go
Normal file
@ -0,0 +1,36 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/integration/util/request"
|
||||
"github.com/docker/docker/internal/testutil"
|
||||
)
|
||||
|
||||
// Ensure we don't regress on CVE-2017-14992.
|
||||
func TestImportExtremelyLargeImageWorks(t *testing.T) {
|
||||
client := request.NewAPIClient(t)
|
||||
|
||||
// Construct an empty tar archive with about 8GB of junk padding at the
|
||||
// end. This should not cause any crashes (the padding should be mostly
|
||||
// ignored).
|
||||
var tarBuffer bytes.Buffer
|
||||
tw := tar.NewWriter(&tarBuffer)
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
imageRdr := io.MultiReader(&tarBuffer, io.LimitReader(testutil.DevZero, 8*1024*1024*1024))
|
||||
|
||||
_, err := client.ImageImport(context.Background(),
|
||||
types.ImageImportSource{Source: imageRdr, SourceName: "-"},
|
||||
"test1234:v42",
|
||||
types.ImageImportOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -1,6 +1,8 @@
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@ -11,3 +13,15 @@ func ErrorContains(t require.TestingT, err error, expectedError string, msgAndAr
|
||||
require.Error(t, err, msgAndArgs...)
|
||||
assert.Contains(t, err.Error(), expectedError, msgAndArgs...)
|
||||
}
|
||||
|
||||
// DevZero acts like /dev/zero but in an OS-independent fashion.
|
||||
var DevZero io.Reader = devZero{}
|
||||
|
||||
type devZero struct{}
|
||||
|
||||
func (d devZero) Read(p []byte) (n int, err error) {
|
||||
for i := 0; i < len(p); i++ {
|
||||
p[i] = '\x00'
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
@ -117,6 +117,13 @@ func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendly
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// clean up fifos if failed to add process
|
||||
defer func() {
|
||||
if err != nil {
|
||||
p.cleanFifos(processFriendlyName)
|
||||
}
|
||||
}()
|
||||
|
||||
resp, err := clnt.remote.apiClient.AddProcess(ctx, r)
|
||||
if err != nil {
|
||||
p.closeFifos(iopipe)
|
||||
|
||||
@ -69,11 +69,7 @@ func (ctr *container) clean() error {
|
||||
// Caller needs to lock container ID before calling this method.
|
||||
func (ctr *container) cleanProcess(id string) {
|
||||
if p, ok := ctr.processes[id]; ok {
|
||||
for _, i := range []int{unix.Stdin, unix.Stdout, unix.Stderr} {
|
||||
if err := os.Remove(p.fifo(i)); err != nil && !os.IsNotExist(err) {
|
||||
logrus.Warnf("libcontainerd: failed to remove %v for process %v: %v", p.fifo(i), id, err)
|
||||
}
|
||||
}
|
||||
p.cleanFifos(id)
|
||||
}
|
||||
delete(ctr.processes, id)
|
||||
}
|
||||
|
||||
@ -9,8 +9,10 @@ import (
|
||||
"path/filepath"
|
||||
goruntime "runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
containerd "github.com/containerd/containerd/api/grpc/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/tonistiigi/fifo"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/sys/unix"
|
||||
@ -105,3 +107,12 @@ func (r emptyReader) Read(b []byte) (int, error) {
|
||||
func (p *process) fifo(index int) string {
|
||||
return filepath.Join(p.dir, p.friendlyName+"-"+fdNames[index])
|
||||
}
|
||||
|
||||
func (p *process) cleanFifos(id string) {
|
||||
for _, i := range []int{syscall.Stdin, syscall.Stdout, syscall.Stderr} {
|
||||
if err := os.Remove(p.fifo(i)); err != nil && !os.IsNotExist(err) {
|
||||
logrus.Warnf("failed to remove %v for process %v: %v", p.fifo(i), id, err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -132,6 +132,7 @@ func DefaultLinuxSpec() specs.Spec {
|
||||
"/proc/timer_list",
|
||||
"/proc/timer_stats",
|
||||
"/proc/sched_debug",
|
||||
"/proc/scsi",
|
||||
},
|
||||
ReadonlyPaths: []string{
|
||||
"/proc/asound",
|
||||
|
||||
@ -26,8 +26,13 @@ func chroot(path string) (err error) {
|
||||
return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
|
||||
}
|
||||
|
||||
// make everything in new ns private
|
||||
if err := mount.MakeRPrivate("/"); err != nil {
|
||||
// Make everything in new ns slave.
|
||||
// Don't use `private` here as this could race where the mountns gets a
|
||||
// reference to a mount and an unmount from the host does not propagate,
|
||||
// which could potentially cause transient errors for other operations,
|
||||
// even though this should be relatively small window here `slave` should
|
||||
// not cause any problems.
|
||||
if err := mount.MakeRSlave("/"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
# the following lines are in sorted order, FYI
|
||||
github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
|
||||
github.com/Microsoft/hcsshim v0.6.3
|
||||
github.com/Microsoft/hcsshim v0.6.5
|
||||
github.com/Microsoft/go-winio v0.4.5
|
||||
github.com/moby/buildkit da2b9dc7dab99e824b2b1067ad7d0523e32dd2d9 https://github.com/dmcgowan/buildkit.git
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
@ -28,7 +28,7 @@ github.com/imdario/mergo 0.2.1
|
||||
golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
|
||||
|
||||
#get libnetwork packages
|
||||
github.com/docker/libnetwork 5b28c0ec98236c489e39ae6a9e1aeb802e071681
|
||||
github.com/docker/libnetwork 690b4c05d4a30f627f8e128d6e5fcd99431cc892
|
||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
|
||||
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
||||
@ -53,7 +53,7 @@ github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
|
||||
|
||||
# get graph and distribution packages
|
||||
github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
|
||||
github.com/vbatts/tar-split v0.10.1
|
||||
github.com/vbatts/tar-split v0.10.2
|
||||
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
|
||||
|
||||
# get go-zfs packages
|
||||
@ -108,7 +108,7 @@ github.com/stevvooe/continuity cd7a8e21e2b6f84799f5dd4b65faf49c8d3ee02d
|
||||
github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb
|
||||
|
||||
# cluster
|
||||
github.com/docker/swarmkit ddb4539f883b18ea40af44ee6de63ac2adc8dc1e
|
||||
github.com/docker/swarmkit b40ffde2b85d5165de3c809c6566b05234aa384a
|
||||
github.com/gogo/protobuf v0.4
|
||||
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
|
||||
github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
|
||||
|
||||
6
components/engine/vendor/github.com/Microsoft/hcsshim/container.go
generated
vendored
6
components/engine/vendor/github.com/Microsoft/hcsshim/container.go
generated
vendored
@ -201,12 +201,18 @@ func createContainerWithJSON(id string, c *ContainerConfig, additionalJSON strin
|
||||
|
||||
if createError == nil || IsPending(createError) {
|
||||
if err := container.registerCallback(); err != nil {
|
||||
// Terminate the container if it still exists. We're okay to ignore a failure here.
|
||||
container.Terminate()
|
||||
return nil, makeContainerError(container, operation, "", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = processAsyncHcsResult(createError, resultp, container.callbackNumber, hcsNotificationSystemCreateCompleted, &defaultTimeout)
|
||||
if err != nil {
|
||||
if err == ErrTimeout {
|
||||
// Terminate the container if it still exists. We're okay to ignore a failure here.
|
||||
container.Terminate()
|
||||
}
|
||||
return nil, makeContainerError(container, operation, configuration, err)
|
||||
}
|
||||
|
||||
|
||||
11
components/engine/vendor/github.com/Microsoft/hcsshim/interface.go
generated
vendored
11
components/engine/vendor/github.com/Microsoft/hcsshim/interface.go
generated
vendored
@ -30,11 +30,12 @@ type Layer struct {
|
||||
}
|
||||
|
||||
type MappedDir struct {
|
||||
HostPath string
|
||||
ContainerPath string
|
||||
ReadOnly bool
|
||||
BandwidthMaximum uint64
|
||||
IOPSMaximum uint64
|
||||
HostPath string
|
||||
ContainerPath string
|
||||
ReadOnly bool
|
||||
BandwidthMaximum uint64
|
||||
IOPSMaximum uint64
|
||||
CreateInUtilityVM bool
|
||||
}
|
||||
|
||||
type MappedPipe struct {
|
||||
|
||||
1
components/engine/vendor/github.com/docker/libnetwork/controller.go
generated
vendored
1
components/engine/vendor/github.com/docker/libnetwork/controller.go
generated
vendored
@ -341,6 +341,7 @@ func (c *controller) clusterAgentInit() {
|
||||
// should still be present when cleaning up
|
||||
// service bindings
|
||||
c.agentClose()
|
||||
c.cleanupServiceDiscovery("")
|
||||
c.cleanupServiceBindings("")
|
||||
|
||||
c.agentStopComplete()
|
||||
|
||||
6
components/engine/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go
generated
vendored
6
components/engine/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go
generated
vendored
@ -763,11 +763,7 @@ func (d *driver) createNetwork(config *networkConfiguration) error {
|
||||
|
||||
// Apply the prepared list of steps, and abort at the first error.
|
||||
bridgeSetup.queueStep(setupDeviceUp)
|
||||
if err = bridgeSetup.apply(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return bridgeSetup.apply()
|
||||
}
|
||||
|
||||
func (d *driver) DeleteNetwork(nid string) error {
|
||||
|
||||
11
components/engine/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go
generated
vendored
11
components/engine/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go
generated
vendored
@ -169,11 +169,7 @@ func setupIPTablesInternal(bridgeIface string, addr net.Addr, icc, ipmasq, hairp
|
||||
}
|
||||
|
||||
// Set Accept on all non-intercontainer outgoing packets.
|
||||
if err := programChainRule(outRule, "ACCEPT NON_ICC OUTGOING", enable); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return programChainRule(outRule, "ACCEPT NON_ICC OUTGOING", enable)
|
||||
}
|
||||
|
||||
func programChainRule(rule iptRule, ruleDescr string, insert bool) error {
|
||||
@ -304,10 +300,7 @@ func setupInternalNetworkRules(bridgeIface string, addr net.Addr, icc, insert bo
|
||||
return err
|
||||
}
|
||||
// Set Inter Container Communication.
|
||||
if err := setIcc(bridgeIface, icc, insert); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return setIcc(bridgeIface, icc, insert)
|
||||
}
|
||||
|
||||
func clearEndpointConnections(nlh *netlink.Handle, ep *bridgeEndpoint) {
|
||||
|
||||
2
components/engine/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go
generated
vendored
2
components/engine/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go
generated
vendored
@ -201,5 +201,5 @@ func delDummyLink(linkName string) error {
|
||||
|
||||
// getDummyName returns the name of a dummy parent with truncated net ID and driver prefix
|
||||
func getDummyName(netID string) string {
|
||||
return fmt.Sprintf("%s%s", dummyPrefix, netID)
|
||||
return dummyPrefix + netID
|
||||
}
|
||||
|
||||
@ -205,5 +205,5 @@ func delDummyLink(linkName string) error {
|
||||
|
||||
// getDummyName returns the name of a dummy parent with truncated net ID and driver prefix
|
||||
func getDummyName(netID string) string {
|
||||
return fmt.Sprintf("%s%s", dummyPrefix, netID)
|
||||
return dummyPrefix + netID
|
||||
}
|
||||
|
||||
1
components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/encryption.go
generated
vendored
1
components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/encryption.go
generated
vendored
@ -21,7 +21,6 @@ import (
|
||||
|
||||
const (
|
||||
r = 0xD0C4E3
|
||||
timeout = 30
|
||||
pktExpansion = 26 // SPI(4) + SeqN(4) + IV(8) + PadLength(1) + NextHeader(1) + ICV(8)
|
||||
)
|
||||
|
||||
|
||||
18
components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/joinleave.go
generated
vendored
18
components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/joinleave.go
generated
vendored
@ -68,7 +68,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
|
||||
|
||||
ep.ifName = containerIfName
|
||||
|
||||
if err := d.writeEndpointToStore(ep); err != nil {
|
||||
if err = d.writeEndpointToStore(ep); err != nil {
|
||||
return fmt.Errorf("failed to update overlay endpoint %s to local data store: %v", ep.id[0:7], err)
|
||||
}
|
||||
|
||||
@ -86,7 +86,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sbox.AddInterface(overlayIfName, "veth",
|
||||
if err = sbox.AddInterface(overlayIfName, "veth",
|
||||
sbox.InterfaceOptions().Master(s.brName)); err != nil {
|
||||
return fmt.Errorf("could not add veth pair inside the network sandbox: %v", err)
|
||||
}
|
||||
@ -100,7 +100,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
|
||||
return err
|
||||
}
|
||||
|
||||
if err := nlh.LinkSetHardwareAddr(veth, ep.mac); err != nil {
|
||||
if err = nlh.LinkSetHardwareAddr(veth, ep.mac); err != nil {
|
||||
return fmt.Errorf("could not set mac address (%v) to the container interface: %v", ep.mac, err)
|
||||
}
|
||||
|
||||
@ -108,7 +108,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
|
||||
if sub == s {
|
||||
continue
|
||||
}
|
||||
if err := jinfo.AddStaticRoute(sub.subnetIP, types.NEXTHOP, s.gwIP.IP); err != nil {
|
||||
if err = jinfo.AddStaticRoute(sub.subnetIP, types.NEXTHOP, s.gwIP.IP); err != nil {
|
||||
logrus.Errorf("Adding subnet %s static route in network %q failed\n", s.subnetIP, n.id)
|
||||
}
|
||||
}
|
||||
@ -122,7 +122,7 @@ func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo,
|
||||
|
||||
d.peerAdd(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.advertiseAddress), false, false, true)
|
||||
|
||||
if err := d.checkEncryption(nid, nil, n.vxlanID(s), true, true); err != nil {
|
||||
if err = d.checkEncryption(nid, nil, n.vxlanID(s), true, true); err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
@ -200,7 +200,7 @@ func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key stri
|
||||
}
|
||||
|
||||
if etype == driverapi.Delete {
|
||||
d.peerDelete(nid, eid, addr.IP, addr.Mask, mac, vtep)
|
||||
d.peerDelete(nid, eid, addr.IP, addr.Mask, mac, vtep, false)
|
||||
return
|
||||
}
|
||||
|
||||
@ -232,11 +232,9 @@ func (d *driver) Leave(nid, eid string) error {
|
||||
}
|
||||
}
|
||||
|
||||
n.leaveSandbox()
|
||||
d.peerDelete(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.advertiseAddress), true)
|
||||
|
||||
if err := d.checkEncryption(nid, nil, 0, true, false); err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
n.leaveSandbox()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
11
components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go
generated
vendored
11
components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go
generated
vendored
@ -144,11 +144,7 @@ func (d *driver) deleteEndpointFromStore(e *endpoint) error {
|
||||
return fmt.Errorf("overlay local store not initialized, ep not deleted")
|
||||
}
|
||||
|
||||
if err := d.localStore.DeleteObjectAtomic(e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return d.localStore.DeleteObjectAtomic(e)
|
||||
}
|
||||
|
||||
func (d *driver) writeEndpointToStore(e *endpoint) error {
|
||||
@ -156,10 +152,7 @@ func (d *driver) writeEndpointToStore(e *endpoint) error {
|
||||
return fmt.Errorf("overlay local store not initialized, ep not added")
|
||||
}
|
||||
|
||||
if err := d.localStore.PutObjectAtomic(e); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return d.localStore.PutObjectAtomic(e)
|
||||
}
|
||||
|
||||
func (ep *endpoint) DataScope() string {
|
||||
|
||||
70
components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go
generated
vendored
70
components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go
generated
vendored
@ -251,8 +251,9 @@ func (d *driver) DeleteNetwork(nid string) error {
|
||||
if err := d.deleteEndpointFromStore(ep); err != nil {
|
||||
logrus.Warnf("Failed to delete overlay endpoint %s from local store: %v", ep.id[0:7], err)
|
||||
}
|
||||
|
||||
}
|
||||
// flush the peerDB entries
|
||||
d.peerFlush(nid)
|
||||
d.deleteNetwork(nid)
|
||||
|
||||
vnis, err := n.releaseVxlanID()
|
||||
@ -494,7 +495,7 @@ func (n *network) restoreSubnetSandbox(s *subnet, brName, vxlanName string) erro
|
||||
brIfaceOption := make([]osl.IfaceOption, 2)
|
||||
brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Address(s.gwIP))
|
||||
brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Bridge(true))
|
||||
Ifaces[fmt.Sprintf("%s+%s", brName, "br")] = brIfaceOption
|
||||
Ifaces[brName+"+br"] = brIfaceOption
|
||||
|
||||
err := sbox.Restore(Ifaces, nil, nil, nil)
|
||||
if err != nil {
|
||||
@ -504,12 +505,8 @@ func (n *network) restoreSubnetSandbox(s *subnet, brName, vxlanName string) erro
|
||||
Ifaces = make(map[string][]osl.IfaceOption)
|
||||
vxlanIfaceOption := make([]osl.IfaceOption, 1)
|
||||
vxlanIfaceOption = append(vxlanIfaceOption, sbox.InterfaceOptions().Master(brName))
|
||||
Ifaces[fmt.Sprintf("%s+%s", vxlanName, "vxlan")] = vxlanIfaceOption
|
||||
err = sbox.Restore(Ifaces, nil, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
Ifaces[vxlanName+"+vxlan"] = vxlanIfaceOption
|
||||
return sbox.Restore(Ifaces, nil, nil, nil)
|
||||
}
|
||||
|
||||
func (n *network) setupSubnetSandbox(s *subnet, brName, vxlanName string) error {
|
||||
@ -760,58 +757,38 @@ func (n *network) watchMiss(nlSock *nl.NetlinkSocket) {
|
||||
continue
|
||||
}
|
||||
|
||||
logrus.Debugf("miss notification: dest IP %v, dest MAC %v", ip, mac)
|
||||
|
||||
if neigh.State&(netlink.NUD_STALE|netlink.NUD_INCOMPLETE) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if n.driver.isSerfAlive() {
|
||||
logrus.Debugf("miss notification: dest IP %v, dest MAC %v", ip, mac)
|
||||
mac, IPmask, vtep, err := n.driver.resolvePeer(n.id, ip)
|
||||
if err != nil {
|
||||
logrus.Errorf("could not resolve peer %q: %v", ip, err)
|
||||
continue
|
||||
}
|
||||
n.driver.peerAdd(n.id, "dummy", ip, IPmask, mac, vtep, l2Miss, l3Miss, false)
|
||||
} else {
|
||||
// If the gc_thresh values are lower kernel might knock off the neighor entries.
|
||||
// When we get a L3 miss check if its a valid peer and reprogram the neighbor
|
||||
// entry again. Rate limit it to once attempt every 500ms, just in case a faulty
|
||||
// container sends a flood of packets to invalid peers
|
||||
if !l3Miss {
|
||||
continue
|
||||
}
|
||||
if time.Since(t) > 500*time.Millisecond {
|
||||
} else if l3Miss && time.Since(t) > time.Second {
|
||||
// All the local peers will trigger a miss notification but this one is expected and the local container will reply
|
||||
// autonomously to the ARP request
|
||||
// In case the gc_thresh3 values is low kernel might reject new entries during peerAdd. This will trigger the following
|
||||
// extra logs that will inform of the possible issue.
|
||||
// Entries created would not be deleted see documentation http://man7.org/linux/man-pages/man7/arp.7.html:
|
||||
// Entries which are marked as permanent are never deleted by the garbage-collector.
|
||||
// The time limit here is to guarantee that the dbSearch is not
|
||||
// done too frequently causing a stall of the peerDB operations.
|
||||
pKey, pEntry, err := n.driver.peerDbSearch(n.id, ip)
|
||||
if err == nil && !pEntry.isLocal {
|
||||
t = time.Now()
|
||||
n.programNeighbor(ip)
|
||||
logrus.Warnf("miss notification for peer:%+v l3Miss:%t l2Miss:%t, if the problem persist check the gc_thresh on the host pKey:%+v pEntry:%+v err:%v",
|
||||
neigh, l3Miss, l2Miss, *pKey, *pEntry, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *network) programNeighbor(ip net.IP) {
|
||||
peerMac, _, _, err := n.driver.peerDbSearch(n.id, ip)
|
||||
if err != nil {
|
||||
logrus.Errorf("Reprogramming on L3 miss failed for %s, no peer entry", ip)
|
||||
return
|
||||
}
|
||||
s := n.getSubnetforIPAddr(ip)
|
||||
if s == nil {
|
||||
logrus.Errorf("Reprogramming on L3 miss failed for %s, not a valid subnet", ip)
|
||||
return
|
||||
}
|
||||
sbox := n.sandbox()
|
||||
if sbox == nil {
|
||||
logrus.Errorf("Reprogramming on L3 miss failed for %s, overlay sandbox missing", ip)
|
||||
return
|
||||
}
|
||||
if err := sbox.AddNeighbor(ip, peerMac, true, sbox.NeighborOptions().LinkName(s.vxlanName)); err != nil {
|
||||
logrus.Errorf("Reprogramming on L3 miss failed for %s: %v", ip, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) addNetwork(n *network) {
|
||||
d.Lock()
|
||||
d.networks[n.id] = n
|
||||
@ -1090,15 +1067,6 @@ func (n *network) contains(ip net.IP) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (n *network) getSubnetforIPAddr(ip net.IP) *subnet {
|
||||
for _, s := range n.subnets {
|
||||
if s.subnetIP.Contains(ip) {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getSubnetforIP returns the subnet to which the given IP belongs
|
||||
func (n *network) getSubnetforIP(ip *net.IPNet) *subnet {
|
||||
for _, s := range n.subnets {
|
||||
|
||||
8
components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/ov_serf.go
generated
vendored
8
components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/ov_serf.go
generated
vendored
@ -122,7 +122,7 @@ func (d *driver) processEvent(u serf.UserEvent) {
|
||||
case "join":
|
||||
d.peerAdd(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac, net.ParseIP(vtepStr), false, false, false)
|
||||
case "leave":
|
||||
d.peerDelete(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac, net.ParseIP(vtepStr))
|
||||
d.peerDelete(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac, net.ParseIP(vtepStr), false)
|
||||
}
|
||||
}
|
||||
|
||||
@ -135,13 +135,13 @@ func (d *driver) processQuery(q *serf.Query) {
|
||||
fmt.Printf("Failed to scan query payload string: %v\n", err)
|
||||
}
|
||||
|
||||
peerMac, peerIPMask, vtep, err := d.peerDbSearch(nid, net.ParseIP(ipStr))
|
||||
pKey, pEntry, err := d.peerDbSearch(nid, net.ParseIP(ipStr))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
logrus.Debugf("Sending peer query resp mac %s, mask %s, vtep %s", peerMac, net.IP(peerIPMask), vtep)
|
||||
q.Respond([]byte(fmt.Sprintf("%s %s %s", peerMac.String(), net.IP(peerIPMask).String(), vtep.String())))
|
||||
logrus.Debugf("Sending peer query resp mac %v, mask %s, vtep %s", pKey.peerMac, net.IP(pEntry.peerIPMask).String(), pEntry.vtep)
|
||||
q.Respond([]byte(fmt.Sprintf("%s %s %s", pKey.peerMac.String(), net.IP(pEntry.peerIPMask).String(), pEntry.vtep.String())))
|
||||
}
|
||||
|
||||
func (d *driver) resolvePeer(nid string, peerIP net.IP) (net.HardwareAddr, net.IPMask, net.IP, error) {
|
||||
|
||||
6
components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.go
generated
vendored
6
components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.go
generated
vendored
@ -162,7 +162,7 @@ func (d *driver) restoreEndpoints() error {
|
||||
Ifaces := make(map[string][]osl.IfaceOption)
|
||||
vethIfaceOption := make([]osl.IfaceOption, 1)
|
||||
vethIfaceOption = append(vethIfaceOption, n.sbox.InterfaceOptions().Master(s.brName))
|
||||
Ifaces[fmt.Sprintf("%s+%s", "veth", "veth")] = vethIfaceOption
|
||||
Ifaces["veth+veth"] = vethIfaceOption
|
||||
|
||||
err := n.sbox.Restore(Ifaces, nil, nil, nil)
|
||||
if err != nil {
|
||||
@ -262,7 +262,7 @@ func (d *driver) nodeJoin(advertiseAddress, bindAddress string, self bool) {
|
||||
d.Unlock()
|
||||
|
||||
// If containers are already running on this network update the
|
||||
// advertiseaddress in the peerDB
|
||||
// advertise address in the peerDB
|
||||
d.localJoinOnce.Do(func() {
|
||||
d.peerDBUpdateSelf()
|
||||
})
|
||||
@ -270,7 +270,7 @@ func (d *driver) nodeJoin(advertiseAddress, bindAddress string, self bool) {
|
||||
// If there is no cluster store there is no need to start serf.
|
||||
if d.store != nil {
|
||||
if err := validateSelf(advertiseAddress); err != nil {
|
||||
logrus.Warnf("%s", err.Error())
|
||||
logrus.Warn(err.Error())
|
||||
}
|
||||
err := d.serfInit()
|
||||
if err != nil {
|
||||
|
||||
2
components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.proto
generated
vendored
2
components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.proto
generated
vendored
@ -24,4 +24,4 @@ message PeerRecord {
|
||||
// which this container is running and can be reached by
|
||||
// building a tunnel to that host IP.
|
||||
string tunnel_endpoint_ip = 3 [(gogoproto.customname) = "TunnelEndpointIP"];
|
||||
}
|
||||
}
|
||||
|
||||
246
components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/peerdb.go
generated
vendored
246
components/engine/vendor/github.com/docker/libnetwork/drivers/overlay/peerdb.go
generated
vendored
@ -8,6 +8,7 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/libnetwork/common"
|
||||
"github.com/docker/libnetwork/osl"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -22,16 +23,48 @@ type peerEntry struct {
|
||||
eid string
|
||||
vtep net.IP
|
||||
peerIPMask net.IPMask
|
||||
inSandbox bool
|
||||
isLocal bool
|
||||
}
|
||||
|
||||
func (p *peerEntry) MarshalDB() peerEntryDB {
|
||||
ones, bits := p.peerIPMask.Size()
|
||||
return peerEntryDB{
|
||||
eid: p.eid,
|
||||
vtep: p.vtep.String(),
|
||||
peerIPMaskOnes: ones,
|
||||
peerIPMaskBits: bits,
|
||||
isLocal: p.isLocal,
|
||||
}
|
||||
}
|
||||
|
||||
// This the structure saved into the set (SetMatrix), due to the implementation of it
|
||||
// the value inserted in the set has to be Hashable so the []byte had to be converted into
|
||||
// strings
|
||||
type peerEntryDB struct {
|
||||
eid string
|
||||
vtep string
|
||||
peerIPMaskOnes int
|
||||
peerIPMaskBits int
|
||||
isLocal bool
|
||||
}
|
||||
|
||||
func (p *peerEntryDB) UnMarshalDB() peerEntry {
|
||||
return peerEntry{
|
||||
eid: p.eid,
|
||||
vtep: net.ParseIP(p.vtep),
|
||||
peerIPMask: net.CIDRMask(p.peerIPMaskOnes, p.peerIPMaskBits),
|
||||
isLocal: p.isLocal,
|
||||
}
|
||||
}
|
||||
|
||||
type peerMap struct {
|
||||
mp map[string]peerEntry
|
||||
// set of peerEntry, note they have to be objects and not pointers to maintain the proper equality checks
|
||||
mp common.SetMatrix
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
type peerNetworkMap struct {
|
||||
// map with key peerKey
|
||||
mp map[string]*peerMap
|
||||
sync.Mutex
|
||||
}
|
||||
@ -54,11 +87,7 @@ func (pKey *peerKey) Scan(state fmt.ScanState, verb rune) error {
|
||||
}
|
||||
|
||||
pKey.peerMac, err = net.ParseMAC(string(macB))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *driver) peerDbWalk(f func(string, *peerKey, *peerEntry) bool) error {
|
||||
@ -87,10 +116,13 @@ func (d *driver) peerDbNetworkWalk(nid string, f func(*peerKey, *peerEntry) bool
|
||||
}
|
||||
|
||||
mp := map[string]peerEntry{}
|
||||
|
||||
pMap.Lock()
|
||||
for pKeyStr, pEntry := range pMap.mp {
|
||||
mp[pKeyStr] = pEntry
|
||||
for _, pKeyStr := range pMap.mp.Keys() {
|
||||
entryDBList, ok := pMap.mp.Get(pKeyStr)
|
||||
if ok {
|
||||
peerEntryDB := entryDBList[0].(peerEntryDB)
|
||||
mp[pKeyStr] = peerEntryDB.UnMarshalDB()
|
||||
}
|
||||
}
|
||||
pMap.Unlock()
|
||||
|
||||
@ -107,45 +139,38 @@ func (d *driver) peerDbNetworkWalk(nid string, f func(*peerKey, *peerEntry) bool
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) peerDbSearch(nid string, peerIP net.IP) (net.HardwareAddr, net.IPMask, net.IP, error) {
|
||||
var (
|
||||
peerMac net.HardwareAddr
|
||||
vtep net.IP
|
||||
peerIPMask net.IPMask
|
||||
found bool
|
||||
)
|
||||
|
||||
func (d *driver) peerDbSearch(nid string, peerIP net.IP) (*peerKey, *peerEntry, error) {
|
||||
var pKeyMatched *peerKey
|
||||
var pEntryMatched *peerEntry
|
||||
err := d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
|
||||
if pKey.peerIP.Equal(peerIP) {
|
||||
peerMac = pKey.peerMac
|
||||
peerIPMask = pEntry.peerIPMask
|
||||
vtep = pEntry.vtep
|
||||
found = true
|
||||
return found
|
||||
pKeyMatched = pKey
|
||||
pEntryMatched = pEntry
|
||||
return true
|
||||
}
|
||||
|
||||
return found
|
||||
return false
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("peerdb search for peer ip %q failed: %v", peerIP, err)
|
||||
return nil, nil, fmt.Errorf("peerdb search for peer ip %q failed: %v", peerIP, err)
|
||||
}
|
||||
|
||||
if !found {
|
||||
return nil, nil, nil, fmt.Errorf("peer ip %q not found in peerdb", peerIP)
|
||||
if pKeyMatched == nil || pEntryMatched == nil {
|
||||
return nil, nil, fmt.Errorf("peer ip %q not found in peerdb", peerIP)
|
||||
}
|
||||
|
||||
return peerMac, peerIPMask, vtep, nil
|
||||
return pKeyMatched, pEntryMatched, nil
|
||||
}
|
||||
|
||||
func (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||
peerMac net.HardwareAddr, vtep net.IP, isLocal bool) {
|
||||
peerMac net.HardwareAddr, vtep net.IP, isLocal bool) (bool, int) {
|
||||
|
||||
d.peerDb.Lock()
|
||||
pMap, ok := d.peerDb.mp[nid]
|
||||
if !ok {
|
||||
d.peerDb.mp[nid] = &peerMap{
|
||||
mp: make(map[string]peerEntry),
|
||||
mp: common.NewSetMatrix(),
|
||||
}
|
||||
|
||||
pMap = d.peerDb.mp[nid]
|
||||
@ -165,18 +190,24 @@ func (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask
|
||||
}
|
||||
|
||||
pMap.Lock()
|
||||
pMap.mp[pKey.String()] = pEntry
|
||||
pMap.Unlock()
|
||||
defer pMap.Unlock()
|
||||
b, i := pMap.mp.Insert(pKey.String(), pEntry.MarshalDB())
|
||||
if i != 1 {
|
||||
// Transient case, there is more than one endpoint that is using the same IP,MAC pair
|
||||
s, _ := pMap.mp.String(pKey.String())
|
||||
logrus.Warnf("peerDbAdd transient condition - Key:%s cardinality:%d db state:%s", pKey.String(), i, s)
|
||||
}
|
||||
return b, i
|
||||
}
|
||||
|
||||
func (d *driver) peerDbDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||
peerMac net.HardwareAddr, vtep net.IP) peerEntry {
|
||||
peerMac net.HardwareAddr, vtep net.IP, isLocal bool) (bool, int) {
|
||||
|
||||
d.peerDb.Lock()
|
||||
pMap, ok := d.peerDb.mp[nid]
|
||||
if !ok {
|
||||
d.peerDb.Unlock()
|
||||
return peerEntry{}
|
||||
return false, 0
|
||||
}
|
||||
d.peerDb.Unlock()
|
||||
|
||||
@ -185,22 +216,22 @@ func (d *driver) peerDbDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPM
|
||||
peerMac: peerMac,
|
||||
}
|
||||
|
||||
pMap.Lock()
|
||||
|
||||
pEntry, ok := pMap.mp[pKey.String()]
|
||||
if ok {
|
||||
// Mismatched endpoint ID(possibly outdated). Do not
|
||||
// delete peerdb
|
||||
if pEntry.eid != eid {
|
||||
pMap.Unlock()
|
||||
return pEntry
|
||||
}
|
||||
pEntry := peerEntry{
|
||||
eid: eid,
|
||||
vtep: vtep,
|
||||
peerIPMask: peerIPMask,
|
||||
isLocal: isLocal,
|
||||
}
|
||||
|
||||
delete(pMap.mp, pKey.String())
|
||||
pMap.Unlock()
|
||||
|
||||
return pEntry
|
||||
pMap.Lock()
|
||||
defer pMap.Unlock()
|
||||
b, i := pMap.mp.Remove(pKey.String(), pEntry.MarshalDB())
|
||||
if i != 0 {
|
||||
// Transient case, there is more than one endpoint that is using the same IP,MAC pair
|
||||
s, _ := pMap.mp.String(pKey.String())
|
||||
logrus.Warnf("peerDbDelete transient condition - Key:%s cardinality:%d db state:%s", pKey.String(), i, s)
|
||||
}
|
||||
return b, i
|
||||
}
|
||||
|
||||
// The overlay uses a lazy initialization approach, this means that when a network is created
|
||||
@ -224,6 +255,7 @@ const (
|
||||
peerOperationINIT peerOperationType = iota
|
||||
peerOperationADD
|
||||
peerOperationDELETE
|
||||
peerOperationFLUSH
|
||||
)
|
||||
|
||||
type peerOperation struct {
|
||||
@ -253,7 +285,9 @@ func (d *driver) peerOpRoutine(ctx context.Context, ch chan *peerOperation) {
|
||||
case peerOperationADD:
|
||||
err = d.peerAddOp(op.networkID, op.endpointID, op.peerIP, op.peerIPMask, op.peerMac, op.vtepIP, op.l2Miss, op.l3Miss, true, op.localPeer)
|
||||
case peerOperationDELETE:
|
||||
err = d.peerDeleteOp(op.networkID, op.endpointID, op.peerIP, op.peerIPMask, op.peerMac, op.vtepIP)
|
||||
err = d.peerDeleteOp(op.networkID, op.endpointID, op.peerIP, op.peerIPMask, op.peerMac, op.vtepIP, op.localPeer)
|
||||
case peerOperationFLUSH:
|
||||
err = d.peerFlushOp(op.networkID)
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Warnf("Peer operation failed:%s op:%v", err, op)
|
||||
@ -286,7 +320,6 @@ func (d *driver) peerInitOp(nid string) error {
|
||||
|
||||
func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||
peerMac net.HardwareAddr, vtep net.IP, l2Miss, l3Miss, localPeer bool) {
|
||||
callerName := common.CallerName(1)
|
||||
d.peerOpCh <- &peerOperation{
|
||||
opType: peerOperationADD,
|
||||
networkID: nid,
|
||||
@ -298,24 +331,32 @@ func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||
l2Miss: l2Miss,
|
||||
l3Miss: l3Miss,
|
||||
localPeer: localPeer,
|
||||
callerName: callerName,
|
||||
callerName: common.CallerName(1),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) peerAddOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||
peerMac net.HardwareAddr, vtep net.IP, l2Miss, l3Miss, updateDB, updateOnlyDB bool) error {
|
||||
peerMac net.HardwareAddr, vtep net.IP, l2Miss, l3Miss, updateDB, localPeer bool) error {
|
||||
|
||||
if err := validateID(nid, eid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var dbEntries int
|
||||
var inserted bool
|
||||
if updateDB {
|
||||
d.peerDbAdd(nid, eid, peerIP, peerIPMask, peerMac, vtep, false)
|
||||
if updateOnlyDB {
|
||||
return nil
|
||||
inserted, dbEntries = d.peerDbAdd(nid, eid, peerIP, peerIPMask, peerMac, vtep, localPeer)
|
||||
if !inserted {
|
||||
logrus.Warnf("Entry already present in db: nid:%s eid:%s peerIP:%v peerMac:%v isLocal:%t vtep:%v",
|
||||
nid, eid, peerIP, peerMac, localPeer, vtep)
|
||||
}
|
||||
}
|
||||
|
||||
// Local peers do not need any further configuration
|
||||
if localPeer {
|
||||
return nil
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
return nil
|
||||
@ -353,21 +394,26 @@ func (d *driver) peerAddOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask
|
||||
|
||||
// Add neighbor entry for the peer IP
|
||||
if err := sbox.AddNeighbor(peerIP, peerMac, l3Miss, sbox.NeighborOptions().LinkName(s.vxlanName)); err != nil {
|
||||
return fmt.Errorf("could not add neighbor entry into the sandbox: %v", err)
|
||||
if _, ok := err.(osl.NeighborSearchError); ok && dbEntries > 1 {
|
||||
// We are in the transient case so only the first configuration is programmed into the kernel
|
||||
// Upon deletion if the active configuration is deleted the next one from the database will be restored
|
||||
// Note we are skipping also the next configuration
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("could not add neighbor entry for nid:%s eid:%s into the sandbox:%v", nid, eid, err)
|
||||
}
|
||||
|
||||
// Add fdb entry to the bridge for the peer mac
|
||||
if err := sbox.AddNeighbor(vtep, peerMac, l2Miss, sbox.NeighborOptions().LinkName(s.vxlanName),
|
||||
sbox.NeighborOptions().Family(syscall.AF_BRIDGE)); err != nil {
|
||||
return fmt.Errorf("could not add fdb entry into the sandbox: %v", err)
|
||||
return fmt.Errorf("could not add fdb entry for nid:%s eid:%s into the sandbox:%v", nid, eid, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||
peerMac net.HardwareAddr, vtep net.IP) {
|
||||
callerName := common.CallerName(1)
|
||||
peerMac net.HardwareAddr, vtep net.IP, localPeer bool) {
|
||||
d.peerOpCh <- &peerOperation{
|
||||
opType: peerOperationDELETE,
|
||||
networkID: nid,
|
||||
@ -376,18 +422,23 @@ func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMas
|
||||
peerIPMask: peerIPMask,
|
||||
peerMac: peerMac,
|
||||
vtepIP: vtep,
|
||||
callerName: callerName,
|
||||
callerName: common.CallerName(1),
|
||||
localPeer: localPeer,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) peerDeleteOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
|
||||
peerMac net.HardwareAddr, vtep net.IP) error {
|
||||
peerMac net.HardwareAddr, vtep net.IP, localPeer bool) error {
|
||||
|
||||
if err := validateID(nid, eid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pEntry := d.peerDbDelete(nid, eid, peerIP, peerIPMask, peerMac, vtep)
|
||||
deleted, dbEntries := d.peerDbDelete(nid, eid, peerIP, peerIPMask, peerMac, vtep, localPeer)
|
||||
if !deleted {
|
||||
logrus.Warnf("Entry was not in db: nid:%s eid:%s peerIP:%v peerMac:%v isLocal:%t vtep:%v",
|
||||
nid, eid, peerIP, peerMac, localPeer, vtep)
|
||||
}
|
||||
|
||||
n := d.network(nid)
|
||||
if n == nil {
|
||||
@ -399,30 +450,59 @@ func (d *driver) peerDeleteOp(nid, eid string, peerIP net.IP, peerIPMask net.IPM
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete fdb entry to the bridge for the peer mac only if the
|
||||
// entry existed in local peerdb. If it is a stale delete
|
||||
// request, still call DeleteNeighbor but only to cleanup any
|
||||
// leftover sandbox neighbor cache and not actually delete the
|
||||
// kernel state.
|
||||
if (eid == pEntry.eid && vtep.Equal(pEntry.vtep)) ||
|
||||
(eid != pEntry.eid && !vtep.Equal(pEntry.vtep)) {
|
||||
if err := sbox.DeleteNeighbor(vtep, peerMac,
|
||||
eid == pEntry.eid && vtep.Equal(pEntry.vtep)); err != nil {
|
||||
return fmt.Errorf("could not delete fdb entry into the sandbox: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete neighbor entry for the peer IP
|
||||
if eid == pEntry.eid {
|
||||
if err := sbox.DeleteNeighbor(peerIP, peerMac, true); err != nil {
|
||||
return fmt.Errorf("could not delete neighbor entry into the sandbox: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := d.checkEncryption(nid, vtep, 0, false, false); err != nil {
|
||||
if err := d.checkEncryption(nid, vtep, 0, localPeer, false); err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
// Local peers do not have any local configuration to delete
|
||||
if !localPeer {
|
||||
// Remove fdb entry to the bridge for the peer mac
|
||||
if err := sbox.DeleteNeighbor(vtep, peerMac, true); err != nil {
|
||||
if _, ok := err.(osl.NeighborSearchError); ok && dbEntries > 0 {
|
||||
// We fall in here if there is a transient state and if the neighbor that is being deleted
|
||||
// was never been configured into the kernel (we allow only 1 configuration at the time per <ip,mac> mapping)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("could not delete fdb entry for nid:%s eid:%s into the sandbox:%v", nid, eid, err)
|
||||
}
|
||||
|
||||
// Delete neighbor entry for the peer IP
|
||||
if err := sbox.DeleteNeighbor(peerIP, peerMac, true); err != nil {
|
||||
return fmt.Errorf("could not delete neighbor entry for nid:%s eid:%s into the sandbox:%v", nid, eid, err)
|
||||
}
|
||||
}
|
||||
|
||||
if dbEntries == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If there is still an entry into the database and the deletion went through without errors means that there is now no
|
||||
// configuration active in the kernel.
|
||||
// Restore one configuration for the <ip,mac> directly from the database, note that is guaranteed that there is one
|
||||
peerKey, peerEntry, err := d.peerDbSearch(nid, peerIP)
|
||||
if err != nil {
|
||||
logrus.Errorf("peerDeleteOp unable to restore a configuration for nid:%s ip:%v mac:%v err:%s", nid, peerIP, peerMac, err)
|
||||
return err
|
||||
}
|
||||
return d.peerAddOp(nid, peerEntry.eid, peerIP, peerEntry.peerIPMask, peerKey.peerMac, peerEntry.vtep, false, false, false, peerEntry.isLocal)
|
||||
}
|
||||
|
||||
func (d *driver) peerFlush(nid string) {
|
||||
d.peerOpCh <- &peerOperation{
|
||||
opType: peerOperationFLUSH,
|
||||
networkID: nid,
|
||||
callerName: common.CallerName(1),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *driver) peerFlushOp(nid string) error {
|
||||
d.peerDb.Lock()
|
||||
defer d.peerDb.Unlock()
|
||||
_, ok := d.peerDb.mp[nid]
|
||||
if !ok {
|
||||
return fmt.Errorf("Unable to find the peerDB for nid:%s", nid)
|
||||
}
|
||||
delete(d.peerDb.mp, nid)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -134,11 +134,7 @@ func (d *driver) deleteEndpointFromStore(e *endpoint) error {
|
||||
return fmt.Errorf("overlay local store not initialized, ep not deleted")
|
||||
}
|
||||
|
||||
if err := d.localStore.DeleteObjectAtomic(e); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return d.localStore.DeleteObjectAtomic(e)
|
||||
}
|
||||
|
||||
func (d *driver) writeEndpointToStore(e *endpoint) error {
|
||||
@ -146,10 +142,7 @@ func (d *driver) writeEndpointToStore(e *endpoint) error {
|
||||
return fmt.Errorf("overlay local store not initialized, ep not added")
|
||||
}
|
||||
|
||||
if err := d.localStore.PutObjectAtomic(e); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return d.localStore.PutObjectAtomic(e)
|
||||
}
|
||||
|
||||
func (ep *endpoint) DataScope() string {
|
||||
|
||||
@ -331,7 +331,7 @@ func (n *network) restoreSubnetSandbox(s *subnet, brName, vxlanName string) erro
|
||||
brIfaceOption := make([]osl.IfaceOption, 2)
|
||||
brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Address(s.gwIP))
|
||||
brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Bridge(true))
|
||||
Ifaces[fmt.Sprintf("%s+%s", brName, "br")] = brIfaceOption
|
||||
Ifaces[brName+"+br"] = brIfaceOption
|
||||
|
||||
err := sbox.Restore(Ifaces, nil, nil, nil)
|
||||
if err != nil {
|
||||
@ -341,7 +341,7 @@ func (n *network) restoreSubnetSandbox(s *subnet, brName, vxlanName string) erro
|
||||
Ifaces = make(map[string][]osl.IfaceOption)
|
||||
vxlanIfaceOption := make([]osl.IfaceOption, 1)
|
||||
vxlanIfaceOption = append(vxlanIfaceOption, sbox.InterfaceOptions().Master(brName))
|
||||
Ifaces[fmt.Sprintf("%s+%s", vxlanName, "vxlan")] = vxlanIfaceOption
|
||||
Ifaces[vxlanName+"+vxlan"] = vxlanIfaceOption
|
||||
err = sbox.Restore(Ifaces, nil, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@ -141,7 +141,7 @@ func (d *driver) restoreEndpoints() error {
|
||||
Ifaces := make(map[string][]osl.IfaceOption)
|
||||
vethIfaceOption := make([]osl.IfaceOption, 1)
|
||||
vethIfaceOption = append(vethIfaceOption, n.sbox.InterfaceOptions().Master(s.brName))
|
||||
Ifaces[fmt.Sprintf("%s+%s", "veth", "veth")] = vethIfaceOption
|
||||
Ifaces["veth+veth"] = vethIfaceOption
|
||||
|
||||
err := n.sbox.Restore(Ifaces, nil, nil, nil)
|
||||
if err != nil {
|
||||
@ -234,7 +234,7 @@ func (d *driver) nodeJoin(advertiseAddress, bindAddress string, self bool) {
|
||||
// If there is no cluster store there is no need to start serf.
|
||||
if d.store != nil {
|
||||
if err := validateSelf(advertiseAddress); err != nil {
|
||||
logrus.Warnf("%s", err.Error())
|
||||
logrus.Warn(err.Error())
|
||||
}
|
||||
err := d.serfInit()
|
||||
if err != nil {
|
||||
|
||||
6
components/engine/vendor/github.com/docker/libnetwork/endpoint_info.go
generated
vendored
6
components/engine/vendor/github.com/docker/libnetwork/endpoint_info.go
generated
vendored
@ -199,11 +199,7 @@ func (ep *endpoint) Info() EndpointInfo {
|
||||
return ep
|
||||
}
|
||||
|
||||
if epi := sb.getEndpoint(ep.ID()); epi != nil {
|
||||
return epi
|
||||
}
|
||||
|
||||
return nil
|
||||
return sb.getEndpoint(ep.ID())
|
||||
}
|
||||
|
||||
func (ep *endpoint) Iface() InterfaceInfo {
|
||||
|
||||
4
components/engine/vendor/github.com/docker/libnetwork/ipam/allocator.go
generated
vendored
4
components/engine/vendor/github.com/docker/libnetwork/ipam/allocator.go
generated
vendored
@ -579,7 +579,7 @@ func (a *Allocator) DumpDatabase() string {
|
||||
s = fmt.Sprintf("\n\n%s Config", as)
|
||||
aSpace.Lock()
|
||||
for k, config := range aSpace.subnets {
|
||||
s = fmt.Sprintf("%s%s", s, fmt.Sprintf("\n%v: %v", k, config))
|
||||
s += fmt.Sprintf("\n%v: %v", k, config)
|
||||
if config.Range == nil {
|
||||
a.retrieveBitmask(k, config.Pool)
|
||||
}
|
||||
@ -589,7 +589,7 @@ func (a *Allocator) DumpDatabase() string {
|
||||
|
||||
s = fmt.Sprintf("%s\n\nBitmasks", s)
|
||||
for k, bm := range a.addresses {
|
||||
s = fmt.Sprintf("%s%s", s, fmt.Sprintf("\n%s: %s", k, bm))
|
||||
s += fmt.Sprintf("\n%s: %s", k, bm)
|
||||
}
|
||||
|
||||
return s
|
||||
|
||||
13
components/engine/vendor/github.com/docker/libnetwork/iptables/iptables.go
generated
vendored
13
components/engine/vendor/github.com/docker/libnetwork/iptables/iptables.go
generated
vendored
@ -276,11 +276,7 @@ func (c *ChainInfo) Forward(action Action, ip net.IP, port int, proto, destAddr
|
||||
"--dport", strconv.Itoa(destPort),
|
||||
"-j", "MASQUERADE",
|
||||
}
|
||||
if err := ProgramRule(Nat, "POSTROUTING", action, args); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return ProgramRule(Nat, "POSTROUTING", action, args)
|
||||
}
|
||||
|
||||
// Link adds reciprocal ACCEPT rule for two supplied IP addresses.
|
||||
@ -301,10 +297,7 @@ func (c *ChainInfo) Link(action Action, ip1, ip2 net.IP, port int, proto string,
|
||||
// reverse
|
||||
args[7], args[9] = args[9], args[7]
|
||||
args[10] = "--sport"
|
||||
if err := ProgramRule(Filter, c.Name, action, args); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return ProgramRule(Filter, c.Name, action, args)
|
||||
}
|
||||
|
||||
// ProgramRule adds the rule specified by args only if the
|
||||
@ -463,7 +456,7 @@ func RawCombinedOutputNative(args ...string) error {
|
||||
|
||||
// ExistChain checks if a chain exists
|
||||
func ExistChain(chain string, table Table) bool {
|
||||
if _, err := Raw("-t", string(table), "-L", chain); err == nil {
|
||||
if _, err := Raw("-t", string(table), "-nL", chain); err == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
||||
4
components/engine/vendor/github.com/docker/libnetwork/network.go
generated
vendored
4
components/engine/vendor/github.com/docker/libnetwork/network.go
generated
vendored
@ -995,6 +995,10 @@ func (n *network) delete(force bool) error {
|
||||
logrus.Errorf("Failed leaving network %s from the agent cluster: %v", n.Name(), err)
|
||||
}
|
||||
|
||||
// Cleanup the service discovery for this network
|
||||
c.cleanupServiceDiscovery(n.ID())
|
||||
|
||||
// Cleanup the load balancer
|
||||
c.cleanupServiceBindings(n.ID())
|
||||
|
||||
removeFromStore:
|
||||
|
||||
2
components/engine/vendor/github.com/docker/libnetwork/networkdb/broadcast.go
generated
vendored
2
components/engine/vendor/github.com/docker/libnetwork/networkdb/broadcast.go
generated
vendored
@ -134,6 +134,8 @@ func (nDB *NetworkDB) sendTableEvent(event TableEvent_Type, nid string, tname st
|
||||
TableName: tname,
|
||||
Key: key,
|
||||
Value: entry.value,
|
||||
// The duration in second is a float that below would be truncated
|
||||
ResidualReapTime: int32(entry.reapTime.Seconds()),
|
||||
}
|
||||
|
||||
raw, err := encodeMessage(MessageTypeTableEvent, &tEvent)
|
||||
|
||||
94
components/engine/vendor/github.com/docker/libnetwork/networkdb/cluster.go
generated
vendored
94
components/engine/vendor/github.com/docker/libnetwork/networkdb/cluster.go
generated
vendored
@ -17,11 +17,15 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
reapInterval = 30 * time.Minute
|
||||
reapPeriod = 5 * time.Second
|
||||
retryInterval = 1 * time.Second
|
||||
nodeReapInterval = 24 * time.Hour
|
||||
nodeReapPeriod = 2 * time.Hour
|
||||
// The garbage collection logic for entries leverage the presence of the network.
|
||||
// For this reason the expiration time of the network is put slightly higher than the entry expiration so that
|
||||
// there is at least 5 extra cycle to make sure that all the entries are properly deleted before deleting the network.
|
||||
reapEntryInterval = 30 * time.Minute
|
||||
reapNetworkInterval = reapEntryInterval + 5*reapPeriod
|
||||
reapPeriod = 5 * time.Second
|
||||
retryInterval = 1 * time.Second
|
||||
nodeReapInterval = 24 * time.Hour
|
||||
nodeReapPeriod = 2 * time.Hour
|
||||
)
|
||||
|
||||
type logWriter struct{}
|
||||
@ -300,8 +304,9 @@ func (nDB *NetworkDB) reconnectNode() {
|
||||
// the reaper runs. NOTE nDB.reapTableEntries updates the reapTime with a readlock. This
|
||||
// is safe as long as no other concurrent path touches the reapTime field.
|
||||
func (nDB *NetworkDB) reapState() {
|
||||
nDB.reapNetworks()
|
||||
// The reapTableEntries leverage the presence of the network so garbage collect entries first
|
||||
nDB.reapTableEntries()
|
||||
nDB.reapNetworks()
|
||||
}
|
||||
|
||||
func (nDB *NetworkDB) reapNetworks() {
|
||||
@ -321,43 +326,51 @@ func (nDB *NetworkDB) reapNetworks() {
|
||||
}
|
||||
|
||||
func (nDB *NetworkDB) reapTableEntries() {
|
||||
var paths []string
|
||||
|
||||
var nodeNetworks []string
|
||||
// This is best effort, if the list of network changes will be picked up in the next cycle
|
||||
nDB.RLock()
|
||||
nDB.indexes[byTable].Walk(func(path string, v interface{}) bool {
|
||||
entry, ok := v.(*entry)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if !entry.deleting {
|
||||
return false
|
||||
}
|
||||
if entry.reapTime > 0 {
|
||||
entry.reapTime -= reapPeriod
|
||||
return false
|
||||
}
|
||||
paths = append(paths, path)
|
||||
return false
|
||||
})
|
||||
for nid := range nDB.networks[nDB.config.NodeName] {
|
||||
nodeNetworks = append(nodeNetworks, nid)
|
||||
}
|
||||
nDB.RUnlock()
|
||||
|
||||
nDB.Lock()
|
||||
for _, path := range paths {
|
||||
params := strings.Split(path[1:], "/")
|
||||
tname := params[0]
|
||||
nid := params[1]
|
||||
key := params[2]
|
||||
cycleStart := time.Now()
|
||||
// In order to avoid blocking the database for a long time, apply the garbage collection logic by network
|
||||
// The lock is taken at the beginning of the cycle and the deletion is inline
|
||||
for _, nid := range nodeNetworks {
|
||||
nDB.Lock()
|
||||
nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), func(path string, v interface{}) bool {
|
||||
// timeCompensation compensate in case the lock took some time to be released
|
||||
timeCompensation := time.Since(cycleStart)
|
||||
entry, ok := v.(*entry)
|
||||
if !ok || !entry.deleting {
|
||||
return false
|
||||
}
|
||||
|
||||
if _, ok := nDB.indexes[byTable].Delete(fmt.Sprintf("/%s/%s/%s", tname, nid, key)); !ok {
|
||||
logrus.Errorf("Could not delete entry in table %s with network id %s and key %s as it does not exist", tname, nid, key)
|
||||
}
|
||||
// In this check we are adding an extra 1 second to guarantee that when the number is truncated to int32 to fit the packet
|
||||
// for the tableEvent the number is always strictly > 1 and never 0
|
||||
if entry.reapTime > reapPeriod+timeCompensation+time.Second {
|
||||
entry.reapTime -= reapPeriod + timeCompensation
|
||||
return false
|
||||
}
|
||||
|
||||
if _, ok := nDB.indexes[byNetwork].Delete(fmt.Sprintf("/%s/%s/%s", nid, tname, key)); !ok {
|
||||
logrus.Errorf("Could not delete entry in network %s with table name %s and key %s as it does not exist", nid, tname, key)
|
||||
}
|
||||
params := strings.Split(path[1:], "/")
|
||||
nid := params[0]
|
||||
tname := params[1]
|
||||
key := params[2]
|
||||
|
||||
okTable, okNetwork := nDB.deleteEntry(nid, tname, key)
|
||||
if !okTable {
|
||||
logrus.Errorf("Table tree delete failed, entry with key:%s does not exists in the table:%s network:%s", key, tname, nid)
|
||||
}
|
||||
if !okNetwork {
|
||||
logrus.Errorf("Network tree delete failed, entry with key:%s does not exists in the network:%s table:%s", key, nid, tname)
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
nDB.Unlock()
|
||||
}
|
||||
nDB.Unlock()
|
||||
}
|
||||
|
||||
func (nDB *NetworkDB) gossip() {
|
||||
@ -406,8 +419,9 @@ func (nDB *NetworkDB) gossip() {
|
||||
// Collect stats and print the queue info, note this code is here also to have a view of the queues empty
|
||||
network.qMessagesSent += len(msgs)
|
||||
if printStats {
|
||||
logrus.Infof("NetworkDB stats - Queue net:%s qLen:%d netPeers:%d netMsg/s:%d",
|
||||
nid, broadcastQ.NumQueued(), broadcastQ.NumNodes(), network.qMessagesSent/int((nDB.config.StatsPrintPeriod/time.Second)))
|
||||
logrus.Infof("NetworkDB stats - netID:%s leaving:%t netPeers:%d entries:%d Queue qLen:%d netMsg/s:%d",
|
||||
nid, network.leaving, broadcastQ.NumNodes(), network.entriesNumber, broadcastQ.NumQueued(),
|
||||
network.qMessagesSent/int((nDB.config.StatsPrintPeriod/time.Second)))
|
||||
network.qMessagesSent = 0
|
||||
}
|
||||
|
||||
@ -572,6 +586,8 @@ func (nDB *NetworkDB) bulkSyncNode(networks []string, node string, unsolicited b
|
||||
TableName: params[1],
|
||||
Key: params[2],
|
||||
Value: entry.value,
|
||||
// The duration in second is a float that below would be truncated
|
||||
ResidualReapTime: int32(entry.reapTime.Seconds()),
|
||||
}
|
||||
|
||||
msg, err := encodeMessage(MessageTypeTableEvent, &tEvent)
|
||||
|
||||
39
components/engine/vendor/github.com/docker/libnetwork/networkdb/delegate.go
generated
vendored
39
components/engine/vendor/github.com/docker/libnetwork/networkdb/delegate.go
generated
vendored
@ -1,9 +1,9 @@
|
||||
package networkdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -165,7 +165,7 @@ func (nDB *NetworkDB) handleNetworkEvent(nEvent *NetworkEvent) bool {
|
||||
n.ltime = nEvent.LTime
|
||||
n.leaving = nEvent.Type == NetworkEventTypeLeave
|
||||
if n.leaving {
|
||||
n.reapTime = reapInterval
|
||||
n.reapTime = reapNetworkInterval
|
||||
|
||||
// The remote node is leaving the network, but not the gossip cluster.
|
||||
// Mark all its entries in deleted state, this will guarantee that
|
||||
@ -198,8 +198,7 @@ func (nDB *NetworkDB) handleNetworkEvent(nEvent *NetworkEvent) bool {
|
||||
}
|
||||
|
||||
func (nDB *NetworkDB) handleTableEvent(tEvent *TableEvent) bool {
|
||||
// Update our local clock if the received messages has newer
|
||||
// time.
|
||||
// Update our local clock if the received messages has newer time.
|
||||
nDB.tableClock.Witness(tEvent.LTime)
|
||||
|
||||
// Ignore the table events for networks that are in the process of going away
|
||||
@ -235,20 +234,27 @@ func (nDB *NetworkDB) handleTableEvent(tEvent *TableEvent) bool {
|
||||
node: tEvent.NodeName,
|
||||
value: tEvent.Value,
|
||||
deleting: tEvent.Type == TableEventTypeDelete,
|
||||
reapTime: time.Duration(tEvent.ResidualReapTime) * time.Second,
|
||||
}
|
||||
|
||||
if e.deleting {
|
||||
e.reapTime = reapInterval
|
||||
// All the entries marked for deletion should have a reapTime set greater than 0
|
||||
// This case can happen if the cluster is running different versions of the engine where the old version does not have the
|
||||
// field. If that is not the case, this can be a BUG
|
||||
if e.deleting && e.reapTime == 0 {
|
||||
logrus.Warnf("handleTableEvent object %+v has a 0 reapTime, is the cluster running the same docker engine version?", tEvent)
|
||||
e.reapTime = reapEntryInterval
|
||||
}
|
||||
|
||||
nDB.Lock()
|
||||
nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tEvent.TableName, tEvent.NetworkID, tEvent.Key), e)
|
||||
nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", tEvent.NetworkID, tEvent.TableName, tEvent.Key), e)
|
||||
nDB.createOrUpdateEntry(tEvent.NetworkID, tEvent.TableName, tEvent.Key, e)
|
||||
nDB.Unlock()
|
||||
|
||||
if err != nil && tEvent.Type == TableEventTypeDelete {
|
||||
// If it is a delete event and we didn't have the entry here don't repropagate
|
||||
return true
|
||||
// If it is a delete event and we did not have a state for it, don't propagate to the application
|
||||
// If the residual reapTime is lower or equal to 1/6 of the total reapTime don't bother broadcasting it around
|
||||
// most likely the cluster is already aware of it, if not who will sync with this node will catch the state too.
|
||||
// This also avoids that deletion of entries close to their garbage collection ends up circuling around forever
|
||||
return e.reapTime > reapEntryInterval/6
|
||||
}
|
||||
|
||||
var op opType
|
||||
@ -303,22 +309,17 @@ func (nDB *NetworkDB) handleTableMessage(buf []byte, isBulkSync bool) {
|
||||
n, ok := nDB.networks[nDB.config.NodeName][tEvent.NetworkID]
|
||||
nDB.RUnlock()
|
||||
|
||||
if !ok {
|
||||
// if the network is not there anymore, OR we are leaving the network OR the broadcast queue is not present
|
||||
if !ok || n.leaving || n.tableBroadcasts == nil {
|
||||
return
|
||||
}
|
||||
|
||||
broadcastQ := n.tableBroadcasts
|
||||
|
||||
if broadcastQ == nil {
|
||||
return
|
||||
}
|
||||
|
||||
broadcastQ.QueueBroadcast(&tableEventMessage{
|
||||
n.tableBroadcasts.QueueBroadcast(&tableEventMessage{
|
||||
msg: buf,
|
||||
id: tEvent.NetworkID,
|
||||
tname: tEvent.TableName,
|
||||
key: tEvent.Key,
|
||||
node: nDB.config.NodeName,
|
||||
node: tEvent.NodeName,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
78
components/engine/vendor/github.com/docker/libnetwork/networkdb/networkdb.go
generated
vendored
78
components/engine/vendor/github.com/docker/libnetwork/networkdb/networkdb.go
generated
vendored
@ -141,6 +141,11 @@ type network struct {
|
||||
|
||||
// Number of gossip messages sent related to this network during the last stats collection period
|
||||
qMessagesSent int
|
||||
|
||||
// Number of entries on the network. This value is the sum of all the entries of all the tables of a specific network.
|
||||
// Its use is for statistics purposes. It keep tracks of database size and is printed per network every StatsPrintPeriod
|
||||
// interval
|
||||
entriesNumber int
|
||||
}
|
||||
|
||||
// Config represents the configuration of the networdb instance and
|
||||
@ -338,8 +343,7 @@ func (nDB *NetworkDB) CreateEntry(tname, nid, key string, value []byte) error {
|
||||
}
|
||||
|
||||
nDB.Lock()
|
||||
nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tname, nid, key), entry)
|
||||
nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", nid, tname, key), entry)
|
||||
nDB.createOrUpdateEntry(nid, tname, key, entry)
|
||||
nDB.Unlock()
|
||||
|
||||
return nil
|
||||
@ -365,8 +369,7 @@ func (nDB *NetworkDB) UpdateEntry(tname, nid, key string, value []byte) error {
|
||||
}
|
||||
|
||||
nDB.Lock()
|
||||
nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tname, nid, key), entry)
|
||||
nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", nid, tname, key), entry)
|
||||
nDB.createOrUpdateEntry(nid, tname, key, entry)
|
||||
nDB.Unlock()
|
||||
|
||||
return nil
|
||||
@ -402,7 +405,7 @@ func (nDB *NetworkDB) DeleteEntry(tname, nid, key string) error {
|
||||
node: nDB.config.NodeName,
|
||||
value: value,
|
||||
deleting: true,
|
||||
reapTime: reapInterval,
|
||||
reapTime: reapEntryInterval,
|
||||
}
|
||||
|
||||
if err := nDB.sendTableEvent(TableEventTypeDelete, nid, tname, key, entry); err != nil {
|
||||
@ -410,8 +413,7 @@ func (nDB *NetworkDB) DeleteEntry(tname, nid, key string) error {
|
||||
}
|
||||
|
||||
nDB.Lock()
|
||||
nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tname, nid, key), entry)
|
||||
nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", nid, tname, key), entry)
|
||||
nDB.createOrUpdateEntry(nid, tname, key, entry)
|
||||
nDB.Unlock()
|
||||
|
||||
return nil
|
||||
@ -473,10 +475,10 @@ func (nDB *NetworkDB) deleteNodeNetworkEntries(nid, node string) {
|
||||
|
||||
entry := &entry{
|
||||
ltime: oldEntry.ltime,
|
||||
node: node,
|
||||
node: oldEntry.node,
|
||||
value: oldEntry.value,
|
||||
deleting: true,
|
||||
reapTime: reapInterval,
|
||||
reapTime: reapEntryInterval,
|
||||
}
|
||||
|
||||
// we arrived at this point in 2 cases:
|
||||
@ -488,15 +490,19 @@ func (nDB *NetworkDB) deleteNodeNetworkEntries(nid, node string) {
|
||||
// without doing a delete of all the objects
|
||||
entry.ltime++
|
||||
}
|
||||
nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tname, nid, key), entry)
|
||||
nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", nid, tname, key), entry)
|
||||
|
||||
if !oldEntry.deleting {
|
||||
nDB.createOrUpdateEntry(nid, tname, key, entry)
|
||||
}
|
||||
} else {
|
||||
// the local node is leaving the network, all the entries of remote nodes can be safely removed
|
||||
nDB.indexes[byTable].Delete(fmt.Sprintf("/%s/%s/%s", tname, nid, key))
|
||||
nDB.indexes[byNetwork].Delete(fmt.Sprintf("/%s/%s/%s", nid, tname, key))
|
||||
nDB.deleteEntry(nid, tname, key)
|
||||
}
|
||||
|
||||
nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, entry.value))
|
||||
// Notify to the upper layer only entries not already marked for deletion
|
||||
if !oldEntry.deleting {
|
||||
nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, entry.value))
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
@ -513,8 +519,7 @@ func (nDB *NetworkDB) deleteNodeTableEntries(node string) {
|
||||
nid := params[1]
|
||||
key := params[2]
|
||||
|
||||
nDB.indexes[byTable].Delete(fmt.Sprintf("/%s/%s/%s", tname, nid, key))
|
||||
nDB.indexes[byNetwork].Delete(fmt.Sprintf("/%s/%s/%s", nid, tname, key))
|
||||
nDB.deleteEntry(nid, tname, key)
|
||||
|
||||
nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, oldEntry.value))
|
||||
return false
|
||||
@ -558,7 +563,12 @@ func (nDB *NetworkDB) JoinNetwork(nid string) error {
|
||||
nodeNetworks = make(map[string]*network)
|
||||
nDB.networks[nDB.config.NodeName] = nodeNetworks
|
||||
}
|
||||
nodeNetworks[nid] = &network{id: nid, ltime: ltime}
|
||||
n, ok := nodeNetworks[nid]
|
||||
var entries int
|
||||
if ok {
|
||||
entries = n.entriesNumber
|
||||
}
|
||||
nodeNetworks[nid] = &network{id: nid, ltime: ltime, entriesNumber: entries}
|
||||
nodeNetworks[nid].tableBroadcasts = &memberlist.TransmitLimitedQueue{
|
||||
NumNodes: func() int {
|
||||
nDB.RLock()
|
||||
@ -567,6 +577,7 @@ func (nDB *NetworkDB) JoinNetwork(nid string) error {
|
||||
},
|
||||
RetransmitMult: 4,
|
||||
}
|
||||
|
||||
nDB.addNetworkNode(nid, nDB.config.NodeName)
|
||||
networkNodes := nDB.networkNodes[nid]
|
||||
nDB.Unlock()
|
||||
@ -614,8 +625,9 @@ func (nDB *NetworkDB) LeaveNetwork(nid string) error {
|
||||
return fmt.Errorf("could not find network %s while trying to leave", nid)
|
||||
}
|
||||
|
||||
logrus.Debugf("%s: leaving network %s", nDB.config.NodeName, nid)
|
||||
n.ltime = ltime
|
||||
n.reapTime = reapInterval
|
||||
n.reapTime = reapNetworkInterval
|
||||
n.leaving = true
|
||||
return nil
|
||||
}
|
||||
@ -679,3 +691,33 @@ func (nDB *NetworkDB) updateLocalNetworkTime() {
|
||||
n.ltime = ltime
|
||||
}
|
||||
}
|
||||
|
||||
// createOrUpdateEntry this function handles the creation or update of entries into the local
|
||||
// tree store. It is also used to keep in sync the entries number of the network (all tables are aggregated)
|
||||
func (nDB *NetworkDB) createOrUpdateEntry(nid, tname, key string, entry interface{}) (bool, bool) {
|
||||
_, okTable := nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tname, nid, key), entry)
|
||||
_, okNetwork := nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", nid, tname, key), entry)
|
||||
if !okNetwork {
|
||||
// Add only if it is an insert not an update
|
||||
n, ok := nDB.networks[nDB.config.NodeName][nid]
|
||||
if ok {
|
||||
n.entriesNumber++
|
||||
}
|
||||
}
|
||||
return okTable, okNetwork
|
||||
}
|
||||
|
||||
// deleteEntry this function handles the deletion of entries into the local tree store.
|
||||
// It is also used to keep in sync the entries number of the network (all tables are aggregated)
|
||||
func (nDB *NetworkDB) deleteEntry(nid, tname, key string) (bool, bool) {
|
||||
_, okTable := nDB.indexes[byTable].Delete(fmt.Sprintf("/%s/%s/%s", tname, nid, key))
|
||||
_, okNetwork := nDB.indexes[byNetwork].Delete(fmt.Sprintf("/%s/%s/%s", nid, tname, key))
|
||||
if okNetwork {
|
||||
// Remove only if the delete is successful
|
||||
n, ok := nDB.networks[nDB.config.NodeName][nid]
|
||||
if ok {
|
||||
n.entriesNumber--
|
||||
}
|
||||
}
|
||||
return okTable, okNetwork
|
||||
}
|
||||
|
||||
811
components/engine/vendor/github.com/docker/libnetwork/networkdb/networkdb.pb.go
generated
vendored
811
components/engine/vendor/github.com/docker/libnetwork/networkdb/networkdb.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
8
components/engine/vendor/github.com/docker/libnetwork/networkdb/networkdb.proto
generated
vendored
8
components/engine/vendor/github.com/docker/libnetwork/networkdb/networkdb.proto
generated
vendored
@ -109,7 +109,7 @@ message NetworkEntry {
|
||||
// network event was recorded.
|
||||
uint64 l_time = 2 [(gogoproto.customtype) = "github.com/hashicorp/serf/serf.LamportTime", (gogoproto.nullable) = false];
|
||||
// Source node name where this network attachment happened.
|
||||
string node_name = 3;
|
||||
string node_name = 3 [(gogoproto.customname) = "NodeName"];
|
||||
// Indicates if a leave from this network is in progress.
|
||||
bool leaving = 4;
|
||||
}
|
||||
@ -119,6 +119,8 @@ message NetworkPushPull {
|
||||
// Lamport time when this push pull was initiated.
|
||||
uint64 l_time = 1 [(gogoproto.customtype) = "github.com/hashicorp/serf/serf.LamportTime", (gogoproto.nullable) = false];
|
||||
repeated NetworkEntry networks = 2;
|
||||
// Name of the node sending this push pull payload.
|
||||
string node_name = 3 [(gogoproto.customname) = "NodeName"];
|
||||
}
|
||||
|
||||
// TableEvent message payload definition.
|
||||
@ -152,6 +154,8 @@ message TableEvent {
|
||||
string key = 6;
|
||||
// Entry value.
|
||||
bytes value = 7;
|
||||
// Residual reap time for the entry before getting deleted in seconds
|
||||
int32 residual_reap_time = 8 [(gogoproto.customname) = "ResidualReapTime"];;
|
||||
}
|
||||
|
||||
// BulkSync message payload definition.
|
||||
@ -180,4 +184,4 @@ message CompoundMessage {
|
||||
|
||||
// A list of simple messages.
|
||||
repeated SimpleMessage messages = 1;
|
||||
}
|
||||
}
|
||||
|
||||
37
components/engine/vendor/github.com/docker/libnetwork/osl/neigh_linux.go
generated
vendored
37
components/engine/vendor/github.com/docker/libnetwork/osl/neigh_linux.go
generated
vendored
@ -9,6 +9,17 @@ import (
|
||||
"github.com/vishvananda/netlink"
|
||||
)
|
||||
|
||||
// NeighborSearchError indicates that the neighbor is already present
|
||||
type NeighborSearchError struct {
|
||||
ip net.IP
|
||||
mac net.HardwareAddr
|
||||
present bool
|
||||
}
|
||||
|
||||
func (n NeighborSearchError) Error() string {
|
||||
return fmt.Sprintf("Search neighbor failed for IP %v, mac %v, present in db:%t", n.ip, n.mac, n.present)
|
||||
}
|
||||
|
||||
// NeighOption is a function option type to set interface options
|
||||
type NeighOption func(nh *neigh)
|
||||
|
||||
@ -41,7 +52,7 @@ func (n *networkNamespace) DeleteNeighbor(dstIP net.IP, dstMac net.HardwareAddr,
|
||||
|
||||
nh := n.findNeighbor(dstIP, dstMac)
|
||||
if nh == nil {
|
||||
return fmt.Errorf("could not find the neighbor entry to delete")
|
||||
return NeighborSearchError{dstIP, dstMac, false}
|
||||
}
|
||||
|
||||
if osDelete {
|
||||
@ -103,26 +114,27 @@ func (n *networkNamespace) DeleteNeighbor(dstIP net.IP, dstMac net.HardwareAddr,
|
||||
}
|
||||
}
|
||||
n.Unlock()
|
||||
logrus.Debugf("Neighbor entry deleted for IP %v, mac %v", dstIP, dstMac)
|
||||
logrus.Debugf("Neighbor entry deleted for IP %v, mac %v osDelete:%t", dstIP, dstMac, osDelete)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *networkNamespace) AddNeighbor(dstIP net.IP, dstMac net.HardwareAddr, force bool, options ...NeighOption) error {
|
||||
var (
|
||||
iface netlink.Link
|
||||
err error
|
||||
iface netlink.Link
|
||||
err error
|
||||
neighborAlreadyPresent bool
|
||||
)
|
||||
|
||||
// If the namespace already has the neighbor entry but the AddNeighbor is called
|
||||
// because of a miss notification (force flag) program the kernel anyway.
|
||||
nh := n.findNeighbor(dstIP, dstMac)
|
||||
if nh != nil {
|
||||
neighborAlreadyPresent = true
|
||||
logrus.Warnf("Neighbor entry already present for IP %v, mac %v neighbor:%+v forceUpdate:%t", dstIP, dstMac, nh, force)
|
||||
if !force {
|
||||
logrus.Warnf("Neighbor entry already present for IP %v, mac %v", dstIP, dstMac)
|
||||
return nil
|
||||
return NeighborSearchError{dstIP, dstMac, true}
|
||||
}
|
||||
logrus.Warnf("Force kernel update, Neighbor entry already present for IP %v, mac %v", dstIP, dstMac)
|
||||
}
|
||||
|
||||
nh = &neigh{
|
||||
@ -146,8 +158,7 @@ func (n *networkNamespace) AddNeighbor(dstIP net.IP, dstMac net.HardwareAddr, fo
|
||||
if nh.linkDst != "" {
|
||||
iface, err = nlh.LinkByName(nh.linkDst)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not find interface with destination name %s: %v",
|
||||
nh.linkDst, err)
|
||||
return fmt.Errorf("could not find interface with destination name %s: %v", nh.linkDst, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -167,13 +178,17 @@ func (n *networkNamespace) AddNeighbor(dstIP net.IP, dstMac net.HardwareAddr, fo
|
||||
}
|
||||
|
||||
if err := nlh.NeighSet(nlnh); err != nil {
|
||||
return fmt.Errorf("could not add neighbor entry: %v", err)
|
||||
return fmt.Errorf("could not add neighbor entry:%+v error:%v", nlnh, err)
|
||||
}
|
||||
|
||||
if neighborAlreadyPresent {
|
||||
return nil
|
||||
}
|
||||
|
||||
n.Lock()
|
||||
n.neighbors = append(n.neighbors, nh)
|
||||
n.Unlock()
|
||||
logrus.Debugf("Neighbor entry added for IP %v, mac %v", dstIP, dstMac)
|
||||
logrus.Debugf("Neighbor entry added for IP:%v, mac:%v on ifc:%s", dstIP, dstMac, nh.linkName)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
6
components/engine/vendor/github.com/docker/libnetwork/sandbox_dns_unix.go
generated
vendored
6
components/engine/vendor/github.com/docker/libnetwork/sandbox_dns_unix.go
generated
vendored
@ -67,11 +67,7 @@ func (sb *sandbox) setupResolutionFiles() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sb.setupDNS(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return sb.setupDNS()
|
||||
}
|
||||
|
||||
func (sb *sandbox) buildHostsFile() error {
|
||||
|
||||
22
components/engine/vendor/github.com/docker/libnetwork/service_common.go
generated
vendored
22
components/engine/vendor/github.com/docker/libnetwork/service_common.go
generated
vendored
@ -161,6 +161,19 @@ func (c *controller) getLBIndex(sid, nid string, ingressPorts []*PortConfig) int
|
||||
return int(lb.fwMark)
|
||||
}
|
||||
|
||||
// cleanupServiceDiscovery when the network is being deleted, erase all the associated service discovery records
|
||||
func (c *controller) cleanupServiceDiscovery(cleanupNID string) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if cleanupNID == "" {
|
||||
logrus.Debugf("cleanupServiceDiscovery for all networks")
|
||||
c.svcRecords = make(map[string]svcInfo)
|
||||
return
|
||||
}
|
||||
logrus.Debugf("cleanupServiceDiscovery for network:%s", cleanupNID)
|
||||
delete(c.svcRecords, cleanupNID)
|
||||
}
|
||||
|
||||
func (c *controller) cleanupServiceBindings(cleanupNID string) {
|
||||
var cleanupFuncs []func()
|
||||
|
||||
@ -184,15 +197,6 @@ func (c *controller) cleanupServiceBindings(cleanupNID string) {
|
||||
continue
|
||||
}
|
||||
|
||||
// The network is being deleted, erase all the associated service discovery records
|
||||
// TODO(fcrisciani) separate the Load Balancer from the Service discovery, this operation
|
||||
// can be done safely here, but the rmServiceBinding is still keeping consistency in the
|
||||
// data structures that are tracking the endpoint to IP mapping.
|
||||
c.Lock()
|
||||
logrus.Debugf("cleanupServiceBindings erasing the svcRecords for %s", nid)
|
||||
delete(c.svcRecords, nid)
|
||||
c.Unlock()
|
||||
|
||||
for eid, ip := range lb.backEnds {
|
||||
epID := eid
|
||||
epIP := ip
|
||||
|
||||
4
components/engine/vendor/github.com/docker/libnetwork/types/types.go
generated
vendored
4
components/engine/vendor/github.com/docker/libnetwork/types/types.go
generated
vendored
@ -129,11 +129,11 @@ func (p *PortBinding) GetCopy() PortBinding {
|
||||
func (p *PortBinding) String() string {
|
||||
ret := fmt.Sprintf("%s/", p.Proto)
|
||||
if p.IP != nil {
|
||||
ret = fmt.Sprintf("%s%s", ret, p.IP.String())
|
||||
ret += p.IP.String()
|
||||
}
|
||||
ret = fmt.Sprintf("%s:%d/", ret, p.Port)
|
||||
if p.HostIP != nil {
|
||||
ret = fmt.Sprintf("%s%s", ret, p.HostIP.String())
|
||||
ret += p.HostIP.String()
|
||||
}
|
||||
ret = fmt.Sprintf("%s:%d", ret, p.HostPort)
|
||||
return ret
|
||||
|
||||
5
components/engine/vendor/github.com/docker/swarmkit/ca/keyreadwriter.go
generated
vendored
5
components/engine/vendor/github.com/docker/swarmkit/ca/keyreadwriter.go
generated
vendored
@ -187,10 +187,7 @@ func (k *KeyReadWriter) ViewAndRotateKEK(cb func(KEKData, PEMKeyHeaders) (KEKDat
|
||||
return err
|
||||
}
|
||||
|
||||
if err := k.writeKey(keyBlock, updatedKEK, updatedHeaderObj); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return k.writeKey(keyBlock, updatedKEK, updatedHeaderObj)
|
||||
}
|
||||
|
||||
// ViewAndUpdateHeaders updates the header manager, and updates any headers on the existing key
|
||||
|
||||
9
components/engine/vendor/github.com/docker/swarmkit/connectionbroker/broker.go
generated
vendored
9
components/engine/vendor/github.com/docker/swarmkit/connectionbroker/broker.go
generated
vendored
@ -4,7 +4,9 @@
|
||||
package connectionbroker
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/swarmkit/api"
|
||||
"github.com/docker/swarmkit/remotes"
|
||||
@ -60,9 +62,14 @@ func (b *Broker) SelectRemote(dialOpts ...grpc.DialOption) (*Conn, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// gRPC dialer connects to proxy first. Provide a custom dialer here avoid that.
|
||||
// TODO(anshul) Add an option to configure this.
|
||||
dialOpts = append(dialOpts,
|
||||
grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor),
|
||||
grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor))
|
||||
grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor),
|
||||
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return net.DialTimeout("tcp", addr, timeout)
|
||||
}))
|
||||
|
||||
cc, err := grpc.Dial(peer.Addr, dialOpts...)
|
||||
if err != nil {
|
||||
|
||||
6
components/engine/vendor/github.com/docker/swarmkit/manager/allocator/network.go
generated
vendored
6
components/engine/vendor/github.com/docker/swarmkit/manager/allocator/network.go
generated
vendored
@ -175,11 +175,7 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) {
|
||||
if err := a.allocateServices(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := a.allocateTasks(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return a.allocateTasks(ctx, false)
|
||||
}
|
||||
|
||||
func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
|
||||
|
||||
4
components/engine/vendor/github.com/docker/swarmkit/manager/controlapi/cluster.go
generated
vendored
4
components/engine/vendor/github.com/docker/swarmkit/manager/controlapi/cluster.go
generated
vendored
@ -58,6 +58,10 @@ func validateClusterSpec(spec *api.ClusterSpec) error {
|
||||
}
|
||||
}
|
||||
|
||||
if spec.Annotations.Name != store.DefaultClusterName {
|
||||
return grpc.Errorf(codes.InvalidArgument, "modification of cluster name is not allowed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
6
components/engine/vendor/github.com/docker/swarmkit/manager/controlapi/network.go
generated
vendored
6
components/engine/vendor/github.com/docker/swarmkit/manager/controlapi/network.go
generated
vendored
@ -96,11 +96,7 @@ func validateNetworkSpec(spec *api.NetworkSpec, pg plugingetter.PluginGetter) er
|
||||
return err
|
||||
}
|
||||
|
||||
if err := validateIPAM(spec.IPAM, pg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return validateIPAM(spec.IPAM, pg)
|
||||
}
|
||||
|
||||
// CreateNetwork creates and returns a Network based on the provided NetworkSpec.
|
||||
|
||||
29
components/engine/vendor/github.com/docker/swarmkit/manager/controlapi/node.go
generated
vendored
29
components/engine/vendor/github.com/docker/swarmkit/manager/controlapi/node.go
generated
vendored
@ -248,6 +248,29 @@ func (s *Server) UpdateNode(ctx context.Context, request *api.UpdateNodeRequest)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func removeNodeAttachments(tx store.Tx, nodeID string) error {
|
||||
// orphan the node's attached containers. if we don't do this, the
|
||||
// network these attachments are connected to will never be removeable
|
||||
tasks, err := store.FindTasks(tx, store.ByNodeID(nodeID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, task := range tasks {
|
||||
// if the task is an attachment, then we just delete it. the allocator
|
||||
// will do the heavy lifting. basically, GetAttachment will return the
|
||||
// attachment if that's the kind of runtime, or nil if it's not.
|
||||
if task.Spec.GetAttachment() != nil {
|
||||
// don't delete the task. instead, update it to `ORPHANED` so that
|
||||
// the taskreaper will clean it up.
|
||||
task.Status.State = api.TaskStateOrphaned
|
||||
if err := store.UpdateTask(tx, task); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveNode removes a Node referenced by NodeID with the given NodeSpec.
|
||||
// - Returns NotFound if the Node is not found.
|
||||
// - Returns FailedPrecondition if the Node has manager role (and is part of the memberlist) or is not shut down.
|
||||
@ -276,7 +299,7 @@ func (s *Server) RemoveNode(ctx context.Context, request *api.RemoveNodeRequest)
|
||||
}
|
||||
|
||||
// lookup the cluster
|
||||
clusters, err := store.FindClusters(tx, store.ByName("default"))
|
||||
clusters, err := store.FindClusters(tx, store.ByName(store.DefaultClusterName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -313,6 +336,10 @@ func (s *Server) RemoveNode(ctx context.Context, request *api.RemoveNodeRequest)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := removeNodeAttachments(tx, request.NodeID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return store.DeleteNode(tx, request.NodeID)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
17
components/engine/vendor/github.com/docker/swarmkit/manager/controlapi/service.go
generated
vendored
17
components/engine/vendor/github.com/docker/swarmkit/manager/controlapi/service.go
generated
vendored
@ -56,10 +56,7 @@ func validateResourceRequirements(r *api.ResourceRequirements) error {
|
||||
if err := validateResources(r.Limits); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateResources(r.Reservations); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return validateResources(r.Reservations)
|
||||
}
|
||||
|
||||
func validateRestartPolicy(rp *api.RestartPolicy) error {
|
||||
@ -161,11 +158,7 @@ func validateContainerSpec(taskSpec api.TaskSpec) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := validateHealthCheck(container.Healthcheck); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return validateHealthCheck(container.Healthcheck)
|
||||
}
|
||||
|
||||
// validateImage validates image name in containerSpec
|
||||
@ -481,11 +474,7 @@ func validateServiceSpec(spec *api.ServiceSpec) error {
|
||||
if err := validateEndpointSpec(spec.Endpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateMode(spec); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return validateMode(spec)
|
||||
}
|
||||
|
||||
// checkPortConflicts does a best effort to find if the passed in spec has port
|
||||
|
||||
5
components/engine/vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
generated
vendored
5
components/engine/vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
generated
vendored
@ -854,10 +854,7 @@ func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatche
|
||||
appliesTo = msg.ResultsIn
|
||||
msg.Type = assignmentType
|
||||
|
||||
if err := stream.Send(&msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return stream.Send(&msg)
|
||||
}
|
||||
|
||||
// TODO(aaronl): Also send node secrets that should be exposed to
|
||||
|
||||
6
components/engine/vendor/github.com/docker/swarmkit/manager/manager.go
generated
vendored
6
components/engine/vendor/github.com/docker/swarmkit/manager/manager.go
generated
vendored
@ -54,6 +54,9 @@ import (
|
||||
const (
|
||||
// defaultTaskHistoryRetentionLimit is the number of tasks to keep.
|
||||
defaultTaskHistoryRetentionLimit = 5
|
||||
|
||||
// Default value for grpc max message size.
|
||||
grpcMaxMessageSize = 128 << 20
|
||||
)
|
||||
|
||||
// RemoteAddrs provides a listening address and an optional advertise address
|
||||
@ -231,6 +234,7 @@ func New(config *Config) (*Manager, error) {
|
||||
grpc.Creds(config.SecurityConfig.ServerTLSCreds),
|
||||
grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
|
||||
grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
|
||||
grpc.MaxMsgSize(grpcMaxMessageSize),
|
||||
}
|
||||
|
||||
m := &Manager{
|
||||
@ -400,7 +404,7 @@ func (m *Manager) Run(parent context.Context) error {
|
||||
)
|
||||
|
||||
m.raftNode.MemoryStore().View(func(readTx store.ReadTx) {
|
||||
clusters, err = store.FindClusters(readTx, store.ByName("default"))
|
||||
clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
|
||||
|
||||
})
|
||||
|
||||
|
||||
@ -73,7 +73,7 @@ func (g *Orchestrator) Run(ctx context.Context) error {
|
||||
var err error
|
||||
g.store.View(func(readTx store.ReadTx) {
|
||||
var clusters []*api.Cluster
|
||||
clusters, err = store.FindClusters(readTx, store.ByName("default"))
|
||||
clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
|
||||
|
||||
if len(clusters) != 1 {
|
||||
return // just pick up the cluster when it is created.
|
||||
|
||||
@ -17,7 +17,7 @@ import (
|
||||
// responds to changes in individual tasks (or nodes which run them).
|
||||
|
||||
func (r *Orchestrator) initCluster(readTx store.ReadTx) error {
|
||||
clusters, err := store.FindClusters(readTx, store.ByName("default"))
|
||||
clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -384,10 +384,7 @@ func (u *Updater) updateTask(ctx context.Context, slot orchestrator.Slot, update
|
||||
return errors.New("service was deleted")
|
||||
}
|
||||
|
||||
if err := store.CreateTask(tx, updated); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return store.CreateTask(tx, updated)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user