Merge component 'engine' from git@github.com:moby/moby master

This commit is contained in:
GordonTheTurtle
2018-03-22 17:07:05 +00:00
11 changed files with 140 additions and 64 deletions

View File

@ -192,7 +192,7 @@ func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, e
// object that implements CloseWrite iff the underlying connection
// implements it.
if _, ok := c.(types.CloseWriter); ok {
c = &hijackedConnCloseWriter{c, br}
c = &hijackedConnCloseWriter{&hijackedConn{c, br}}
} else {
c = &hijackedConn{c, br}
}
@ -220,7 +220,9 @@ func (c *hijackedConn) Read(b []byte) (int, error) {
// CloseWrite(). It is returned by setupHijackConn in the case that a) there
// was already buffered data in the http layer when Hijack() was called, and b)
// the underlying net.Conn *does* implement CloseWrite().
type hijackedConnCloseWriter hijackedConn
type hijackedConnCloseWriter struct {
*hijackedConn
}
var _ types.CloseWriter = &hijackedConnCloseWriter{}

View File

@ -104,10 +104,6 @@ func allocateDaemonPort(addr string) error {
return nil
}
// notifyShutdown is called after the daemon shuts down but before the process exits.
func notifyShutdown(err error) {
}
func wrapListeners(proto string, ls []net.Listener) []net.Listener {
switch proto {
case "unix":

View File

@ -3,7 +3,6 @@ package main
import (
"fmt"
"os"
"path/filepath"
"runtime"
"github.com/docker/docker/cli"
@ -25,6 +24,10 @@ func newDaemonCommand() *cobra.Command {
SilenceErrors: true,
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
if opts.version {
showVersion()
return nil
}
opts.flags = cmd.Flags()
return runDaemon(opts)
},
@ -41,45 +44,6 @@ func newDaemonCommand() *cobra.Command {
return cmd
}
func runDaemon(opts *daemonOptions) error {
if opts.version {
showVersion()
return nil
}
daemonCli := NewDaemonCli()
// Windows specific settings as these are not defaulted.
if runtime.GOOS == "windows" {
if opts.daemonConfig.Pidfile == "" {
opts.daemonConfig.Pidfile = filepath.Join(opts.daemonConfig.Root, "docker.pid")
}
if opts.configFile == "" {
opts.configFile = filepath.Join(opts.daemonConfig.Root, `config\daemon.json`)
}
}
// On Windows, this may be launching as a service or with an option to
// register the service.
stop, runAsService, err := initService(daemonCli)
if err != nil {
logrus.Fatal(err)
}
if stop {
return nil
}
// If Windows SCM manages the service - no need for PID files
if runAsService {
opts.daemonConfig.Pidfile = ""
}
err = daemonCli.start(opts)
notifyShutdown(err)
return err
}
func showVersion() {
fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit)
}

View File

@ -0,0 +1,8 @@
// +build !windows
package main
func runDaemon(opts *daemonOptions) error {
daemonCli := NewDaemonCli()
return daemonCli.start(opts)
}

View File

@ -1,5 +1,38 @@
package main
import (
"path/filepath"
_ "github.com/docker/docker/autogen/winresources/dockerd"
"github.com/sirupsen/logrus"
)
func runDaemon(opts *daemonOptions) error {
daemonCli := NewDaemonCli()
// On Windows, this may be launching as a service or with an option to
// register the service.
stop, runAsService, err := initService(daemonCli)
if err != nil {
logrus.Fatal(err)
}
if stop {
return nil
}
// Windows specific settings as these are not defaulted.
if opts.configFile == "" {
opts.configFile = filepath.Join(opts.daemonConfig.Root, `config\daemon.json`)
}
if runAsService {
// If Windows SCM manages the service - no need for PID files
opts.daemonConfig.Pidfile = ""
} else if opts.daemonConfig.Pidfile == "" {
opts.daemonConfig.Pidfile = filepath.Join(opts.daemonConfig.Root, "docker.pid")
}
err = daemonCli.start(opts)
notifyShutdown(err)
return err
}

View File

@ -6,9 +6,5 @@ import (
"github.com/spf13/pflag"
)
func initService(daemonCli *DaemonCli) (bool, bool, error) {
return false, false, nil
}
func installServiceFlags(flags *pflag.FlagSet) {
}

View File

@ -255,7 +255,7 @@ func setCapabilities(s *specs.Spec, c *container.Container) error {
if c.HostConfig.Privileged {
caplist = caps.GetAllCapabilities()
} else {
caplist, err = caps.TweakCapabilities(s.Process.Capabilities.Effective, c.HostConfig.CapAdd, c.HostConfig.CapDrop)
caplist, err = caps.TweakCapabilities(s.Process.Capabilities.Bounding, c.HostConfig.CapAdd, c.HostConfig.CapDrop)
if err != nil {
return err
}
@ -264,6 +264,12 @@ func setCapabilities(s *specs.Spec, c *container.Container) error {
s.Process.Capabilities.Bounding = caplist
s.Process.Capabilities.Permitted = caplist
s.Process.Capabilities.Inheritable = caplist
// setUser has already been executed here
// if non root drop capabilities in the way execve does
if s.Process.User.UID != 0 {
s.Process.Capabilities.Effective = []string{}
s.Process.Capabilities.Permitted = []string{}
}
return nil
}

View File

@ -62,14 +62,14 @@ func TestNetworkLoopbackNat(t *testing.T) {
defer setupTest(t)()
msg := "it works"
startServerContainer(t, msg, 8080)
serverContainerID := startServerContainer(t, msg, 8080)
endpoint := getExternalAddress(t)
client := request.NewAPIClient(t)
ctx := context.Background()
cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())), container.WithTty(true), container.WithNetworkMode("container:server"))
cID := container.Run(t, ctx, client, container.WithCmd("sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())), container.WithTty(true), container.WithNetworkMode("container:"+serverContainerID))
poll.WaitOn(t, container.IsStopped(ctx, client, cID), poll.WithDelay(100*time.Millisecond))
@ -90,7 +90,7 @@ func startServerContainer(t *testing.T, msg string, port int) string {
client := request.NewAPIClient(t)
ctx := context.Background()
cID := container.Run(t, ctx, client, container.WithName("server"), container.WithCmd("sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port)), container.WithExposedPorts(fmt.Sprintf("%d/tcp", port)), func(c *container.TestContainerConfig) {
cID := container.Run(t, ctx, client, container.WithName("server-"+t.Name()), container.WithCmd("sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port)), container.WithExposedPorts(fmt.Sprintf("%d/tcp", port)), func(c *container.TestContainerConfig) {
c.HostConfig.PortBindings = nat.PortMap{
nat.Port(fmt.Sprintf("%d/tcp", port)): []nat.PortBinding{
{

View File

@ -6,7 +6,6 @@ import (
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"github.com/docker/docker/integration/internal/swarm"
@ -51,10 +50,6 @@ func TestServiceWithPredefinedNetwork(t *testing.T) {
err = client.ServiceRemove(context.Background(), serviceID)
assert.NilError(t, err)
poll.WaitOn(t, serviceIsRemoved(client, serviceID), pollSettings)
poll.WaitOn(t, noTasks(client), pollSettings)
}
const ingressNet = "ingress"
@ -108,7 +103,7 @@ func TestServiceWithIngressNetwork(t *testing.T) {
assert.NilError(t, err)
poll.WaitOn(t, serviceIsRemoved(client, serviceID), pollSettings)
poll.WaitOn(t, noTasks(client), pollSettings)
poll.WaitOn(t, noServices(client), pollSettings)
// Ensure that "ingress" is not removed or corrupted
time.Sleep(10 * time.Second)
@ -125,8 +120,6 @@ func TestServiceWithIngressNetwork(t *testing.T) {
func serviceRunningCount(client client.ServiceAPIClient, serviceID string, instances uint64) func(log poll.LogT) poll.Result {
return func(log poll.LogT) poll.Result {
filter := filters.NewArgs()
filter.Add("service", serviceID)
services, err := client.ServiceList(context.Background(), types.ServiceListOptions{})
if err != nil {
return poll.Error(err)
@ -160,3 +153,17 @@ func swarmIngressReady(client client.NetworkAPIClient) func(log poll.LogT) poll.
return poll.Success()
}
}
func noServices(client client.ServiceAPIClient) func(log poll.LogT) poll.Result {
return func(log poll.LogT) poll.Result {
services, err := client.ServiceList(context.Background(), types.ServiceListOptions{})
switch {
case err != nil:
return poll.Error(err)
case len(services) == 0:
return poll.Success()
default:
return poll.Continue("Service count at %d waiting for 0", len(services))
}
}
}

View File

@ -0,0 +1,64 @@
package system
import (
"context"
"os"
"testing"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/integration-cli/daemon"
"github.com/gotestyourself/gotestyourself/assert"
)
// hasSystemd checks whether the host was booted with systemd as its init
// system. Stolen from
// https://github.com/coreos/go-systemd/blob/176f85496f4e/util/util.go#L68
func hasSystemd() bool {
fi, err := os.Lstat("/run/systemd/system")
if err != nil {
return false
}
return fi.IsDir()
}
// TestCgroupDriverSystemdMemoryLimit checks that container
// memory limit can be set when using systemd cgroupdriver.
// https://github.com/moby/moby/issues/35123
func TestCgroupDriverSystemdMemoryLimit(t *testing.T) {
t.Parallel()
if !hasSystemd() {
t.Skip("systemd not available")
}
d := daemon.New(t, "docker", "dockerd", daemon.Config{})
client, err := d.NewClient()
assert.NilError(t, err)
d.StartWithBusybox(t, "--exec-opt", "native.cgroupdriver=systemd", "--iptables=false")
defer d.Stop(t)
const mem = 64 * 1024 * 1024 // 64 MB
cfg := container.Config{
Image: "busybox",
Cmd: []string{"top"},
}
hostcfg := container.HostConfig{
Resources: container.Resources{
Memory: mem,
},
}
ctx := context.Background()
ctr, err := client.ContainerCreate(ctx, &cfg, &hostcfg, nil, "")
assert.NilError(t, err)
defer client.ContainerRemove(ctx, ctr.ID, types.ContainerRemoveOptions{Force: true})
err = client.ContainerStart(ctx, ctr.ID, types.ContainerStartOptions{})
assert.NilError(t, err)
s, err := client.ContainerInspect(ctx, ctr.ID)
assert.NilError(t, err)
assert.Equal(t, s.HostConfig.Memory, mem)
}

View File

@ -105,7 +105,7 @@ Loop:
}
if len(call.Excludes.Caps) > 0 {
for _, c := range call.Excludes.Caps {
if inSlice(rs.Process.Capabilities.Effective, c) {
if inSlice(rs.Process.Capabilities.Bounding, c) {
continue Loop
}
}
@ -117,7 +117,7 @@ Loop:
}
if len(call.Includes.Caps) > 0 {
for _, c := range call.Includes.Caps {
if !inSlice(rs.Process.Capabilities.Effective, c) {
if !inSlice(rs.Process.Capabilities.Bounding, c) {
continue Loop
}
}