Compare commits
16 Commits
v17.06.1-c
...
v17.06.2-c
| Author | SHA1 | Date | |
|---|---|---|---|
| cec0b72a99 | |||
| ee7dfb4373 | |||
| eedf92a47a | |||
| a3cf1fb301 | |||
| 34d73cd0d8 | |||
| e31c8d8a5c | |||
| 6e39a2cb71 | |||
| 638a87493e | |||
| 5f8026ab88 | |||
| 6988f4f23f | |||
| 896a9ffc6b | |||
| 6c44271690 | |||
| 7294ad7f3c | |||
| 4661389bd3 | |||
| d25b1a3913 | |||
| 08552809a0 |
15
CHANGELOG.md
15
CHANGELOG.md
@ -5,6 +5,21 @@ information on the list of deprecated flags and APIs please have a look at
|
||||
https://docs.docker.com/engine/deprecated/ where target removal dates can also
|
||||
be found.
|
||||
|
||||
## 17.06.2-ce (2017-09-05)
|
||||
|
||||
### Client
|
||||
|
||||
- Enable TCP keepalive in the client to prevent loss of connection [docker/cli#415](https://github.com/docker/cli/pull/415)
|
||||
|
||||
### Runtime
|
||||
|
||||
- Devmapper: ensure UdevWait is called after calls to setCookie [moby/moby#33732](https://github.com/moby/moby/pull/33732)
|
||||
- Aufs: ensure diff layers are correctly removed to prevent leftover files from using up storage [moby/moby#34587](https://github.com/moby/moby/pull/34587)
|
||||
|
||||
### Swarm mode
|
||||
|
||||
- Ignore PullOptions for running tasks [docker/swarmkit#2351](https://github.com/docker/swarmkit/pull/2351)
|
||||
|
||||
## 17.06.1-ce (2017-08-17)
|
||||
|
||||
### Builder
|
||||
|
||||
@ -1 +1 @@
|
||||
17.06.1-ce
|
||||
17.06.2-ce
|
||||
|
||||
@ -3,9 +3,11 @@ package command
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
cliconfig "github.com/docker/cli/cli/config"
|
||||
@ -285,6 +287,10 @@ func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, er
|
||||
}
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: config,
|
||||
DialContext: (&net.Dialer{
|
||||
KeepAlive: 30 * time.Second,
|
||||
Timeout: 30 * time.Second,
|
||||
}).DialContext,
|
||||
}
|
||||
proto, addr, _, err := client.ParseHost(host)
|
||||
if err != nil {
|
||||
|
||||
@ -1 +1 @@
|
||||
17.06.1-ce
|
||||
17.06.2-ce
|
||||
|
||||
@ -143,6 +143,23 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
}
|
||||
}
|
||||
|
||||
for _, path := range []string{"mnt", "diff"} {
|
||||
p := filepath.Join(root, path)
|
||||
dirs, err := ioutil.ReadDir(p)
|
||||
if err != nil {
|
||||
logrus.WithError(err).WithField("dir", p).Error("error reading dir entries")
|
||||
continue
|
||||
}
|
||||
for _, dir := range dirs {
|
||||
if strings.HasSuffix(dir.Name(), "-removing") {
|
||||
logrus.WithField("dir", dir.Name()).Debug("Cleaning up stale layer dir")
|
||||
if err := system.EnsureRemoveAll(filepath.Join(p, dir.Name())); err != nil {
|
||||
logrus.WithField("dir", dir.Name()).WithError(err).Error("Error removing stale layer dir")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps)
|
||||
return a, nil
|
||||
}
|
||||
@ -318,26 +335,6 @@ func (a *Driver) Remove(id string) error {
|
||||
retries++
|
||||
logger.Warnf("unmount failed due to EBUSY: retry count: %d", retries)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
// Atomically remove each directory in turn by first moving it out of the
|
||||
// way (so that docker doesn't find it anymore) before doing removal of
|
||||
// the whole tree.
|
||||
tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id))
|
||||
if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) {
|
||||
if err == syscall.EBUSY {
|
||||
logger.WithField("dir", mountpoint).WithError(err).Warn("os.Rename err due to EBUSY")
|
||||
}
|
||||
return errors.Wrapf(err, "error preparing atomic delete of aufs mountpoint for id: %s", id)
|
||||
}
|
||||
if err := system.EnsureRemoveAll(tmpMntPath); err != nil {
|
||||
return errors.Wrapf(err, "error removing aufs layer %s", id)
|
||||
}
|
||||
|
||||
tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id))
|
||||
if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "error preparing atomic delete of aufs diff dir for id: %s", id)
|
||||
}
|
||||
|
||||
// Remove the layers file for the id
|
||||
@ -345,12 +342,44 @@ func (a *Driver) Remove(id string) error {
|
||||
return errors.Wrapf(err, "error removing layers dir for %s", id)
|
||||
}
|
||||
|
||||
if err := atomicRemove(a.getDiffPath(id)); err != nil {
|
||||
return errors.Wrapf(err, "could not remove diff path for id %s", id)
|
||||
}
|
||||
|
||||
// Atomically remove each directory in turn by first moving it out of the
|
||||
// way (so that docker doesn't find it anymore) before doing removal of
|
||||
// the whole tree.
|
||||
if err := atomicRemove(mountpoint); err != nil {
|
||||
if errors.Cause(err) == syscall.EBUSY {
|
||||
logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY")
|
||||
}
|
||||
return errors.Wrapf(err, "could not remove mountpoint for id %s", id)
|
||||
}
|
||||
|
||||
a.pathCacheLock.Lock()
|
||||
delete(a.pathCache, id)
|
||||
a.pathCacheLock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func atomicRemove(source string) error {
|
||||
target := source + "-removing"
|
||||
|
||||
err := os.Rename(source, target)
|
||||
switch {
|
||||
case err == nil, os.IsNotExist(err):
|
||||
case os.IsExist(err):
|
||||
// Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove
|
||||
if _, e := os.Stat(source); !os.IsNotExist(e) {
|
||||
return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up")
|
||||
}
|
||||
default:
|
||||
return errors.Wrapf(err, "error preparing atomic delete")
|
||||
}
|
||||
|
||||
return system.EnsureRemoveAll(target)
|
||||
}
|
||||
|
||||
// Get returns the rootfs path for the id.
|
||||
// This will mount the dir at its given path
|
||||
func (a *Driver) Get(id, mountLabel string) (string, error) {
|
||||
|
||||
@ -12,6 +12,8 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
@ -147,7 +149,10 @@ func TestRemoveImage(t *testing.T) {
|
||||
|
||||
for _, p := range paths {
|
||||
if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil {
|
||||
t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p)
|
||||
t.Fatalf("Error should not be nil because dirs with id 1 should be deleted: %s", p)
|
||||
}
|
||||
if _, err := os.Stat(path.Join(tmp, p, "1-removing")); err == nil {
|
||||
t.Fatalf("Error should not be nil because dirs with id 1-removing should be deleted: %s", p)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -800,3 +805,23 @@ func BenchmarkConcurrentAccess(b *testing.B) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitStaleCleanup(t *testing.T) {
|
||||
if err := os.MkdirAll(tmp, 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
|
||||
for _, d := range []string{"diff", "mnt"} {
|
||||
if err := os.MkdirAll(filepath.Join(tmp, d, "123-removing"), 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
testInit(tmp, t)
|
||||
for _, d := range []string{"diff", "mnt"} {
|
||||
if _, err := os.Stat(filepath.Join(tmp, d, "123-removing")); err == nil {
|
||||
t.Fatal("cleanup failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -333,6 +333,7 @@ func RemoveDevice(name string) error {
|
||||
if err := task.setCookie(cookie, 0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
|
||||
}
|
||||
defer UdevWait(cookie)
|
||||
|
||||
dmSawBusy = false // reset before the task is run
|
||||
if err = task.run(); err != nil {
|
||||
@ -342,7 +343,7 @@ func RemoveDevice(name string) error {
|
||||
return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err)
|
||||
}
|
||||
|
||||
return UdevWait(cookie)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred.
|
||||
@ -368,10 +369,6 @@ func RemoveDeviceDeferred(name string) error {
|
||||
return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
|
||||
}
|
||||
|
||||
if err = task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err)
|
||||
}
|
||||
|
||||
// libdevmapper and udev relies on System V semaphore for synchronization,
|
||||
// semaphores created in `task.setCookie` will be cleaned up in `UdevWait`.
|
||||
// So these two function call must come in pairs, otherwise semaphores will
|
||||
@ -381,8 +378,13 @@ func RemoveDeviceDeferred(name string) error {
|
||||
// this call will not wait for the deferred removal's final executing, since no
|
||||
// udev event will be generated, and the semaphore's value will not be incremented
|
||||
// by udev, what UdevWait is just cleaning up the semaphore.
|
||||
defer UdevWait(cookie)
|
||||
|
||||
return UdevWait(cookie)
|
||||
if err = task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CancelDeferredRemove cancels a deferred remove for a device.
|
||||
@ -476,12 +478,13 @@ func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize
|
||||
if err := task.setCookie(cookie, flags); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set cookie %s", err)
|
||||
}
|
||||
defer UdevWait(cookie)
|
||||
|
||||
if err := task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err)
|
||||
}
|
||||
|
||||
return UdevWait(cookie)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReloadPool is the programmatic example of "dmsetup reload".
|
||||
@ -661,12 +664,13 @@ func ResumeDevice(name string) error {
|
||||
if err := task.setCookie(cookie, 0); err != nil {
|
||||
return fmt.Errorf("devicemapper: Can't set cookie %s", err)
|
||||
}
|
||||
defer UdevWait(cookie)
|
||||
|
||||
if err := task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running deviceResume %s", err)
|
||||
}
|
||||
|
||||
return UdevWait(cookie)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateDevice creates a device with the specified poolName with the specified device id.
|
||||
@ -759,11 +763,13 @@ func activateDevice(poolName string, name string, deviceID int, size uint64, ext
|
||||
return fmt.Errorf("devicemapper: Can't set cookie %s", err)
|
||||
}
|
||||
|
||||
defer UdevWait(cookie)
|
||||
|
||||
if err := task.run(); err != nil {
|
||||
return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err)
|
||||
}
|
||||
|
||||
return UdevWait(cookie)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active.
|
||||
|
||||
@ -108,7 +108,7 @@ github.com/containerd/containerd 6e23458c129b551d5c9871e5174f6b1b7f6d1170 https:
|
||||
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
|
||||
|
||||
# cluster
|
||||
github.com/docker/swarmkit a0a7f6f663c35c92ddcd73e2c1b97b0f4ed8caf3
|
||||
github.com/docker/swarmkit bf3d9a21fa618289839963138923b1534103486a
|
||||
github.com/gogo/protobuf v0.4
|
||||
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
|
||||
github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
|
||||
|
||||
24
components/engine/vendor/github.com/docker/swarmkit/manager/orchestrator/task.go
generated
vendored
24
components/engine/vendor/github.com/docker/swarmkit/manager/orchestrator/task.go
generated
vendored
@ -67,7 +67,29 @@ func IsTaskDirty(s *api.Service, t *api.Task) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
return !reflect.DeepEqual(s.Spec.Task, t.Spec) ||
|
||||
// Make a deep copy of the service and task spec for the comparison.
|
||||
serviceTaskSpec := *s.Spec.Task.Copy()
|
||||
|
||||
// For non-failed tasks with a container spec runtime that have already
|
||||
// pulled the required image (i.e., current state is between READY and
|
||||
// RUNNING inclusively), ignore the value of the `PullOptions` field by
|
||||
// setting the copied service to have the same PullOptions value as the
|
||||
// task. A difference in only the `PullOptions` field should not cause
|
||||
// a running (or ready to run) task to be considered 'dirty' when we
|
||||
// handle updates.
|
||||
// See https://github.com/docker/swarmkit/issues/971
|
||||
currentState := t.Status.State
|
||||
// Ignore PullOpts if the task is desired to be in a "runnable" state
|
||||
// and its last known current state is between READY and RUNNING in
|
||||
// which case we know that the task either successfully pulled its
|
||||
// container image or didn't need to.
|
||||
ignorePullOpts := t.DesiredState <= api.TaskStateRunning && currentState >= api.TaskStateReady && currentState <= api.TaskStateRunning
|
||||
if ignorePullOpts && serviceTaskSpec.GetContainer() != nil && t.Spec.GetContainer() != nil {
|
||||
// Modify the service's container spec.
|
||||
serviceTaskSpec.GetContainer().PullOptions = t.Spec.GetContainer().PullOptions
|
||||
}
|
||||
|
||||
return !reflect.DeepEqual(serviceTaskSpec, t.Spec) ||
|
||||
(t.Endpoint != nil && !reflect.DeepEqual(s.Spec.Endpoint, t.Endpoint.Spec))
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user