diff --git a/components/engine/container/container.go b/components/engine/container/container.go index 4b42924301..2fd007e0a5 100644 --- a/components/engine/container/container.go +++ b/components/engine/container/container.go @@ -159,7 +159,7 @@ func (container *Container) FromDisk() error { } // ToDisk saves the container configuration on disk. -func (container *Container) ToDisk() error { +func (container *Container) toDisk() error { pth, err := container.ConfigPath() if err != nil { return err @@ -181,27 +181,13 @@ func (container *Container) ToDisk() error { return container.WriteHostConfig() } -// ToDiskLocking saves the container configuration on disk in a thread safe way. -func (container *Container) ToDiskLocking() error { - container.Lock() - err := container.ToDisk() - container.Unlock() - return err -} - -// CheckpointTo makes the Container's current state visible to queries. +// CheckpointTo makes the Container's current state visible to queries, and persists state. // Callers must hold a Container lock. func (container *Container) CheckpointTo(store ViewDB) error { - return store.Save(container.snapshot()) -} - -// CheckpointAndSaveToDisk is equivalent to calling CheckpointTo and ToDisk. -// Callers must hold a Container lock. -func (container *Container) CheckpointAndSaveToDisk(store ViewDB) error { - if err := container.CheckpointTo(store); err != nil { + if err := container.toDisk(); err != nil { return err } - return container.ToDisk() + return store.Save(container) } // readHostConfig reads the host configuration from disk for the container. diff --git a/components/engine/container/snapshot.go b/components/engine/container/snapshot.go deleted file mode 100644 index 861b7ae65e..0000000000 --- a/components/engine/container/snapshot.go +++ /dev/null @@ -1,153 +0,0 @@ -package container - -import ( - "fmt" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/network" - "github.com/docker/go-connections/nat" -) - -// Snapshot is a read only view for Containers. It holds all information necessary to serve container queries in a -// versioned ACID in-memory store. Pointers are avoided here to make sure all values are copied into the store. -type Snapshot struct { - ID string `json:"Id"` - Name string - Pid int - Managed bool - Image string - ImageID string - Command string - Ports []types.Port - ExposedPorts nat.PortSet - PublishPorts nat.PortSet - Labels map[string]string - State string - Status string - Health string - HostConfig struct { - NetworkMode string - Isolation string - } - NetworkSettings types.SummaryNetworkSettings - Mounts []types.MountPoint - Created time.Time - StartedAt time.Time - Running bool - Paused bool - ExitCode int -} - -// Snapshot provides a read only view of a Container. Callers must hold a Lock on the container object. -func (container *Container) snapshot() *Snapshot { - snapshot := &Snapshot{ - ID: container.ID, - Name: container.Name, - Pid: container.Pid, - Managed: container.Managed, - ImageID: container.ImageID.String(), - Ports: []types.Port{}, - ExposedPorts: make(nat.PortSet), - PublishPorts: make(nat.PortSet), - State: container.State.StateString(), - Status: container.State.String(), - Health: container.State.HealthString(), - Mounts: container.GetMountPoints(), - Created: container.Created, - StartedAt: container.StartedAt, - Running: container.Running, - Paused: container.Paused, - ExitCode: container.ExitCode(), - } - - if container.HostConfig != nil { - snapshot.HostConfig.Isolation = string(container.HostConfig.Isolation) - snapshot.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) - for publish := range container.HostConfig.PortBindings { - snapshot.PublishPorts[publish] = struct{}{} - } - } - - if container.Config != nil { - snapshot.Image = container.Config.Image - snapshot.Labels = container.Config.Labels - for exposed := range container.Config.ExposedPorts { - snapshot.ExposedPorts[exposed] = struct{}{} - } - } - - if len(container.Args) > 0 { - args := []string{} - for _, arg := range container.Args { - if strings.Contains(arg, " ") { - args = append(args, fmt.Sprintf("'%s'", arg)) - } else { - args = append(args, arg) - } - } - argsAsString := strings.Join(args, " ") - snapshot.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) - } else { - snapshot.Command = container.Path - } - - if container.NetworkSettings != nil { - networks := make(map[string]*network.EndpointSettings) - for name, netw := range container.NetworkSettings.Networks { - if netw == nil || netw.EndpointSettings == nil { - continue - } - networks[name] = &network.EndpointSettings{ - EndpointID: netw.EndpointID, - Gateway: netw.Gateway, - IPAddress: netw.IPAddress, - IPPrefixLen: netw.IPPrefixLen, - IPv6Gateway: netw.IPv6Gateway, - GlobalIPv6Address: netw.GlobalIPv6Address, - GlobalIPv6PrefixLen: netw.GlobalIPv6PrefixLen, - MacAddress: netw.MacAddress, - NetworkID: netw.NetworkID, - } - if netw.IPAMConfig != nil { - networks[name].IPAMConfig = &network.EndpointIPAMConfig{ - IPv4Address: netw.IPAMConfig.IPv4Address, - IPv6Address: netw.IPAMConfig.IPv6Address, - } - } - } - snapshot.NetworkSettings = types.SummaryNetworkSettings{Networks: networks} - for port, bindings := range container.NetworkSettings.Ports { - p, err := nat.ParsePort(port.Port()) - if err != nil { - logrus.Warnf("invalid port map %+v", err) - continue - } - if len(bindings) == 0 { - snapshot.Ports = append(snapshot.Ports, types.Port{ - PrivatePort: uint16(p), - Type: port.Proto(), - }) - continue - } - for _, binding := range bindings { - h, err := nat.ParsePort(binding.HostPort) - if err != nil { - logrus.Warnf("invalid host port map %+v", err) - continue - } - snapshot.Ports = append(snapshot.Ports, types.Port{ - PrivatePort: uint16(p), - PublicPort: uint16(h), - Type: port.Proto(), - IP: binding.HostIP, - }) - } - } - - } - - return snapshot -} diff --git a/components/engine/container/view.go b/components/engine/container/view.go index dd4708c313..e5e1542f77 100644 --- a/components/engine/container/view.go +++ b/components/engine/container/view.go @@ -1,18 +1,51 @@ package container -import "github.com/hashicorp/go-memdb" +import ( + "fmt" + "strings" + "time" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/pkg/registrar" + "github.com/docker/go-connections/nat" + "github.com/hashicorp/go-memdb" +) const ( memdbTable = "containers" - memdbIDField = "ID" memdbIDIndex = "id" ) +// Snapshot is a read only view for Containers. It holds all information necessary to serve container queries in a +// versioned ACID in-memory store. +type Snapshot struct { + types.Container + + // additional info queries need to filter on + // preserve nanosec resolution for queries + CreatedAt time.Time + StartedAt time.Time + Name string + Pid int + ExitCode int + Running bool + Paused bool + Managed bool + ExposedPorts nat.PortSet + PortBindings nat.PortSet + Health string + HostConfig struct { + Isolation string + } +} + // ViewDB provides an in-memory transactional (ACID) container Store type ViewDB interface { - Snapshot() View - Save(snapshot *Snapshot) error - Delete(id string) error + Snapshot(nameIndex *registrar.Registrar) View + Save(*Container) error + Delete(*Container) error } // View can be used by readers to avoid locking @@ -29,7 +62,7 @@ var schema = &memdb.DBSchema{ memdbIDIndex: { Name: memdbIDIndex, Unique: true, - Indexer: &memdb.StringFieldIndex{Field: memdbIDField}, + Indexer: &containerByIDIndexer{}, }, }, }, @@ -50,29 +83,38 @@ func NewViewDB() (ViewDB, error) { } // Snapshot provides a consistent read-only View of the database -func (db *memDB) Snapshot() View { - return &memdbView{db.store.Txn(false)} +func (db *memDB) Snapshot(index *registrar.Registrar) View { + return &memdbView{ + txn: db.store.Txn(false), + nameIndex: index.GetAll(), + } } -// Save atomically updates the in-memory store -func (db *memDB) Save(snapshot *Snapshot) error { +// Save atomically updates the in-memory store from the current on-disk state of a Container. +func (db *memDB) Save(c *Container) error { txn := db.store.Txn(true) defer txn.Commit() - return txn.Insert(memdbTable, snapshot) + deepCopy := NewBaseContainer(c.ID, c.Root) + err := deepCopy.FromDisk() // TODO: deal with reserveLabel + if err != nil { + return err + } + return txn.Insert(memdbTable, deepCopy) } // Delete removes an item by ID -func (db *memDB) Delete(id string) error { +func (db *memDB) Delete(c *Container) error { txn := db.store.Txn(true) defer txn.Commit() - return txn.Delete(memdbTable, &Snapshot{ID: id}) + return txn.Delete(memdbTable, NewBaseContainer(c.ID, c.Root)) } type memdbView struct { - txn *memdb.Txn + txn *memdb.Txn + nameIndex map[string][]string } -// All returns a all items in this snapshot +// All returns a all items in this snapshot. Returned objects must never be modified. func (v *memdbView) All() ([]Snapshot, error) { var all []Snapshot iter, err := v.txn.Get(memdbTable, memdbIDIndex) @@ -84,18 +126,167 @@ func (v *memdbView) All() ([]Snapshot, error) { if item == nil { break } - snapshot := *(item.(*Snapshot)) // force a copy - all = append(all, snapshot) + snapshot := v.transform(item.(*Container)) + all = append(all, *snapshot) } return all, nil } -//Get returns an item by id +// Get returns an item by id. Returned objects must never be modified. func (v *memdbView) Get(id string) (*Snapshot, error) { s, err := v.txn.First(memdbTable, memdbIDIndex, id) if err != nil { return nil, err } - snapshot := *(s.(*Snapshot)) // force a copy - return &snapshot, nil + return v.transform(s.(*Container)), nil +} + +// transform maps a (deep) copied Container object to what queries need. +// A lock on the Container is not held because these are immutable deep copies. +func (v *memdbView) transform(container *Container) *Snapshot { + snapshot := &Snapshot{ + Container: types.Container{ + ID: container.ID, + Names: v.nameIndex[container.ID], + ImageID: container.ImageID.String(), + Ports: []types.Port{}, + Mounts: container.GetMountPoints(), + State: container.State.StateString(), + Status: container.State.String(), + Created: container.Created.Unix(), + }, + CreatedAt: container.Created, + StartedAt: container.StartedAt, + Name: container.Name, + Pid: container.Pid, + Managed: container.Managed, + ExposedPorts: make(nat.PortSet), + PortBindings: make(nat.PortSet), + Health: container.HealthString(), + Running: container.Running, + Paused: container.Paused, + ExitCode: container.ExitCode(), + } + + if snapshot.Names == nil { + // Dead containers will often have no name, so make sure the response isn't null + snapshot.Names = []string{} + } + + if container.HostConfig != nil { + snapshot.Container.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) + snapshot.HostConfig.Isolation = string(container.HostConfig.Isolation) + for binding := range container.HostConfig.PortBindings { + snapshot.PortBindings[binding] = struct{}{} + } + } + + if container.Config != nil { + snapshot.Image = container.Config.Image + snapshot.Labels = container.Config.Labels + for exposed := range container.Config.ExposedPorts { + snapshot.ExposedPorts[exposed] = struct{}{} + } + } + + if len(container.Args) > 0 { + args := []string{} + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + snapshot.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) + } else { + snapshot.Command = container.Path + } + + snapshot.Ports = []types.Port{} + networks := make(map[string]*network.EndpointSettings) + if container.NetworkSettings != nil { + for name, netw := range container.NetworkSettings.Networks { + if netw == nil || netw.EndpointSettings == nil { + continue + } + networks[name] = &network.EndpointSettings{ + EndpointID: netw.EndpointID, + Gateway: netw.Gateway, + IPAddress: netw.IPAddress, + IPPrefixLen: netw.IPPrefixLen, + IPv6Gateway: netw.IPv6Gateway, + GlobalIPv6Address: netw.GlobalIPv6Address, + GlobalIPv6PrefixLen: netw.GlobalIPv6PrefixLen, + MacAddress: netw.MacAddress, + NetworkID: netw.NetworkID, + } + if netw.IPAMConfig != nil { + networks[name].IPAMConfig = &network.EndpointIPAMConfig{ + IPv4Address: netw.IPAMConfig.IPv4Address, + IPv6Address: netw.IPAMConfig.IPv6Address, + } + } + } + for port, bindings := range container.NetworkSettings.Ports { + p, err := nat.ParsePort(port.Port()) + if err != nil { + logrus.Warnf("invalid port map %+v", err) + continue + } + if len(bindings) == 0 { + snapshot.Ports = append(snapshot.Ports, types.Port{ + PrivatePort: uint16(p), + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + h, err := nat.ParsePort(binding.HostPort) + if err != nil { + logrus.Warnf("invalid host port map %+v", err) + continue + } + snapshot.Ports = append(snapshot.Ports, types.Port{ + PrivatePort: uint16(p), + PublicPort: uint16(h), + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + } + snapshot.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} + + return snapshot +} + +// containerByIDIndexer is used to extract the ID field from Container types. +// memdb.StringFieldIndex can not be used since ID is a field from an embedded struct. +type containerByIDIndexer struct{} + +// FromObject implements the memdb.SingleIndexer interface for Container objects +func (e *containerByIDIndexer) FromObject(obj interface{}) (bool, []byte, error) { + c, ok := obj.(*Container) + if !ok { + return false, nil, fmt.Errorf("%T is not a Container", obj) + } + // Add the null character as a terminator + v := c.ID + "\x00" + return true, []byte(v), nil +} + +// FromArgs implements the memdb.Indexer interface +func (e *containerByIDIndexer) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil } diff --git a/components/engine/container/view_test.go b/components/engine/container/view_test.go index 20424ffd98..9b872998bd 100644 --- a/components/engine/container/view_test.go +++ b/components/engine/container/view_test.go @@ -1,29 +1,73 @@ package container -import "testing" +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" -func TestViewSave(t *testing.T) { + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/registrar" + "github.com/pborman/uuid" +) + +var root string + +func TestMain(m *testing.M) { + var err error + root, err = ioutil.TempDir("", "docker-container-test-") + if err != nil { + panic(err) + } + defer os.RemoveAll(root) + + os.Exit(m.Run()) +} + +func newContainer(t *testing.T) *Container { + var ( + id = uuid.New() + cRoot = filepath.Join(root, id) + ) + if err := os.MkdirAll(cRoot, 0755); err != nil { + t.Fatal(err) + } + c := NewBaseContainer(id, cRoot) + c.HostConfig = &containertypes.HostConfig{} + return c +} + +func TestViewSaveDelete(t *testing.T) { db, err := NewViewDB() if err != nil { t.Fatal(err) } - c := NewBaseContainer("id", "root") + c := newContainer(t) if err := c.CheckpointTo(db); err != nil { t.Fatal(err) } + if err := db.Delete(c); err != nil { + t.Fatal(err) + } } func TestViewAll(t *testing.T) { var ( db, _ = NewViewDB() - one = NewBaseContainer("id1", "root1") - two = NewBaseContainer("id2", "root2") + names = registrar.NewRegistrar() + one = newContainer(t) + two = newContainer(t) ) one.Pid = 10 + if err := one.CheckpointTo(db); err != nil { + t.Fatal(err) + } two.Pid = 20 - one.CheckpointTo(db) - two.CheckpointTo(db) - all, err := db.Snapshot().All() + if err := two.CheckpointTo(db); err != nil { + t.Fatal(err) + } + + all, err := db.Snapshot(names).All() if err != nil { t.Fatal(err) } @@ -34,24 +78,29 @@ func TestViewAll(t *testing.T) { for i := range all { byID[all[i].ID] = all[i] } - if s, ok := byID["id1"]; !ok || s.Pid != 10 { - t.Fatalf("expected something different with for id1: %v", s) + if s, ok := byID[one.ID]; !ok || s.Pid != 10 { + t.Fatalf("expected something different with for id=%s: %v", one.ID, s) } - if s, ok := byID["id2"]; !ok || s.Pid != 20 { - t.Fatalf("expected something different with for id1: %v", s) + if s, ok := byID[two.ID]; !ok || s.Pid != 20 { + t.Fatalf("expected something different with for id=%s: %v", two.ID, s) } } func TestViewGet(t *testing.T) { - db, _ := NewViewDB() - one := NewBaseContainer("id", "root") + var ( + db, _ = NewViewDB() + names = registrar.NewRegistrar() + one = newContainer(t) + ) one.ImageID = "some-image-123" - one.CheckpointTo(db) - s, err := db.Snapshot().Get("id") + if err := one.CheckpointTo(db); err != nil { + t.Fatal(err) + } + s, err := db.Snapshot(names).Get(one.ID) if err != nil { t.Fatal(err) } if s == nil || s.ImageID != "some-image-123" { - t.Fatalf("expected something different. Got: %v", s) + t.Fatalf("expected ImageID=some-image-123. Got: %v", s) } } diff --git a/components/engine/daemon/container.go b/components/engine/daemon/container.go index 72689fbbaa..6582da82c8 100644 --- a/components/engine/daemon/container.go +++ b/components/engine/daemon/container.go @@ -218,7 +218,7 @@ func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig * runconfig.SetDefaultNetModeIfBlank(hostConfig) container.HostConfig = hostConfig - return container.CheckpointAndSaveToDisk(daemon.containersReplica) + return container.CheckpointTo(daemon.containersReplica) } // verifyContainerSettings performs validation of the hostconfig and config diff --git a/components/engine/daemon/container_operations.go b/components/engine/daemon/container_operations.go index 9e5a02c3f8..8a066bc899 100644 --- a/components/engine/daemon/container_operations.go +++ b/components/engine/daemon/container_operations.go @@ -45,10 +45,11 @@ func (daemon *Daemon) getDNSSearchSettings(container *container.Container) []str return nil } +// checkpointAndSave grabs a container lock to safely call container.CheckpointTo func (daemon *Daemon) checkpointAndSave(container *container.Container) error { container.Lock() defer container.Unlock() - if err := container.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil { + if err := container.CheckpointTo(daemon.containersReplica); err != nil { return fmt.Errorf("Error saving container state: %v", err) } return nil diff --git a/components/engine/daemon/create.go b/components/engine/daemon/create.go index a39dde5114..d47de31fa8 100644 --- a/components/engine/daemon/create.go +++ b/components/engine/daemon/create.go @@ -167,11 +167,6 @@ func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) ( runconfig.SetDefaultNetModeIfBlank(container.HostConfig) daemon.updateContainerNetworkSettings(container, endpointsConfigs) - - if err := container.ToDisk(); err != nil { - logrus.Errorf("Error saving new container to disk: %v", err) - return nil, err - } if err := daemon.Register(container); err != nil { return nil, err } diff --git a/components/engine/daemon/daemon.go b/components/engine/daemon/daemon.go index ab46ad3341..6f4ac26ac2 100644 --- a/components/engine/daemon/daemon.go +++ b/components/engine/daemon/daemon.go @@ -217,7 +217,7 @@ func (daemon *Daemon) restore() error { go func(c *container.Container) { defer wg.Done() daemon.backportMountSpec(c) - if err := c.ToDiskLocking(); err != nil { + if err := daemon.checkpointAndSave(c); err != nil { logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk") } @@ -289,7 +289,9 @@ func (daemon *Daemon) restore() error { logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) c.RemovalInProgress = false c.Dead = true - c.ToDisk() + if err := c.CheckpointTo(daemon.containersReplica); err != nil { + logrus.Errorf("Failed to update container %s state: %v", c.ID, err) + } } c.Unlock() }(c) diff --git a/components/engine/daemon/daemon_unix_test.go b/components/engine/daemon/daemon_unix_test.go index a2847bfbee..eb19376845 100644 --- a/components/engine/daemon/daemon_unix_test.go +++ b/components/engine/daemon/daemon_unix_test.go @@ -274,6 +274,10 @@ func TestMigratePre17Volumes(t *testing.T) { } `) + viewDB, err := container.NewViewDB() + if err != nil { + t.Fatal(err) + } volStore, err := store.New(volumeRoot) if err != nil { t.Fatal(err) @@ -284,7 +288,12 @@ func TestMigratePre17Volumes(t *testing.T) { } volumedrivers.Register(drv, volume.DefaultDriverName) - daemon := &Daemon{root: rootDir, repository: containerRoot, volumes: volStore} + daemon := &Daemon{ + root: rootDir, + repository: containerRoot, + containersReplica: viewDB, + volumes: volStore, + } err = ioutil.WriteFile(filepath.Join(containerRoot, cid, "config.v2.json"), config, 600) if err != nil { t.Fatal(err) diff --git a/components/engine/daemon/delete.go b/components/engine/daemon/delete.go index 3609a88e9f..2d3cd0f90f 100644 --- a/components/engine/daemon/delete.go +++ b/components/engine/daemon/delete.go @@ -105,15 +105,11 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo // Mark container dead. We don't want anybody to be restarting it. container.Lock() container.Dead = true - if err = container.CheckpointTo(daemon.containersReplica); err != nil { - container.Unlock() - return err - } // Save container state to disk. So that if error happens before // container meta file got removed from disk, then a restart of // docker should not make a dead container alive. - if err := container.ToDisk(); err != nil && !os.IsNotExist(err) { + if err := container.CheckpointTo(daemon.containersReplica); err != nil && !os.IsNotExist(err) { logrus.Errorf("Error saving dying container to disk: %v", err) } container.Unlock() @@ -137,7 +133,7 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo selinuxFreeLxcContexts(container.ProcessLabel) daemon.idIndex.Delete(container.ID) daemon.containers.Delete(container.ID) - daemon.containersReplica.Delete(container.ID) + daemon.containersReplica.Delete(container) if e := daemon.removeMountPoints(container, removeVolume); e != nil { logrus.Error(e) } diff --git a/components/engine/daemon/list.go b/components/engine/daemon/list.go index 2d10e3d6de..870a7bda2d 100644 --- a/components/engine/daemon/list.go +++ b/components/engine/daemon/list.go @@ -100,18 +100,18 @@ type listContext struct { *types.ContainerListOptions } -// byContainerCreated is a temporary type used to sort a list of containers by creation time. -type byContainerCreated []container.Snapshot +// byCreatedDescending is a temporary type used to sort a list of containers by creation time. +type byCreatedDescending []container.Snapshot -func (r byContainerCreated) Len() int { return len(r) } -func (r byContainerCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byContainerCreated) Less(i, j int) bool { - return r[i].Created.UnixNano() < r[j].Created.UnixNano() +func (r byCreatedDescending) Len() int { return len(r) } +func (r byCreatedDescending) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byCreatedDescending) Less(i, j int) bool { + return r[j].CreatedAt.UnixNano() < r[i].CreatedAt.UnixNano() } // Containers returns the list of containers to show given the user's filtering. func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { - return daemon.reduceContainers(config, daemon.transformContainer) + return daemon.reduceContainers(config, daemon.refreshImage) } func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContext) ([]container.Snapshot, error) { @@ -123,7 +123,7 @@ func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContex // standard behavior of walking the entire container // list from the daemon's in-memory store all, err := view.All() - sort.Sort(sort.Reverse(byContainerCreated(all))) + sort.Sort(byCreatedDescending(all)) return all, err } @@ -172,7 +172,7 @@ func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContex // Restore sort-order after filtering // Created gives us nanosec resolution for sorting - sort.Sort(sort.Reverse(byContainerCreated(cntrs))) + sort.Sort(byCreatedDescending(cntrs)) return cntrs, nil } @@ -180,7 +180,7 @@ func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContex // reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { var ( - view = daemon.containersReplica.Snapshot() + view = daemon.containersReplica.Snapshot(daemon.nameIndex) containers = []*types.Container{} ) @@ -503,9 +503,15 @@ func includeContainerInList(container *container.Snapshot, ctx *listContext) ite } } - networkExist := fmt.Errorf("container part of network") + var ( + networkExist = errors.New("container part of network") + noNetworks = errors.New("container is not part of any networks") + ) if ctx.filters.Include("network") { err := ctx.filters.WalkValues("network", func(value string) error { + if container.NetworkSettings == nil { + return noNetworks + } if _, ok := container.NetworkSettings.Networks[value]; ok { return networkExist } @@ -527,7 +533,7 @@ func includeContainerInList(container *container.Snapshot, ctx *listContext) ite if len(ctx.publish) > 0 { shouldSkip := true for port := range ctx.publish { - if _, ok := container.PublishPorts[port]; ok { + if _, ok := container.PortBindings[port]; ok { shouldSkip = false break } @@ -553,40 +559,22 @@ func includeContainerInList(container *container.Snapshot, ctx *listContext) ite return includeContainer } -// transformContainer generates the container type expected by the docker ps command. -func (daemon *Daemon) transformContainer(container *container.Snapshot, ctx *listContext) (*types.Container, error) { - newC := &types.Container{ - ID: container.ID, - Names: ctx.names[container.ID], - ImageID: container.ImageID, - Command: container.Command, - Created: container.Created.Unix(), - State: container.State, - Status: container.Status, - NetworkSettings: &container.NetworkSettings, - Ports: container.Ports, - Labels: container.Labels, - Mounts: container.Mounts, - } - if newC.Names == nil { - // Dead containers will often have no name, so make sure the response isn't null - newC.Names = []string{} - } - newC.HostConfig.NetworkMode = container.HostConfig.NetworkMode - - image := container.Image // if possible keep the original ref - if image != container.ImageID { +// refreshImage checks if the Image ref still points to the correct ID, and updates the ref to the actual ID when it doesn't +func (daemon *Daemon) refreshImage(s *container.Snapshot, ctx *listContext) (*types.Container, error) { + c := s.Container + image := s.Image // keep the original ref if still valid (hasn't changed) + if image != s.ImageID { id, _, err := daemon.GetImageIDAndPlatform(image) if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE { return nil, err } - if err != nil || id.String() != container.ImageID { - image = container.ImageID + if err != nil || id.String() != s.ImageID { + // ref changed, we need to use original ID + image = s.ImageID } } - newC.Image = image - - return newC, nil + c.Image = image + return &c, nil } // Volumes lists known volumes, using the filter to restrict the range diff --git a/components/engine/daemon/monitor.go b/components/engine/daemon/monitor.go index be8a500020..4f9cc37a04 100644 --- a/components/engine/daemon/monitor.go +++ b/components/engine/daemon/monitor.go @@ -90,7 +90,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { daemon.setStateCounter(c) defer c.Unlock() - if err := c.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil { + if err := c.CheckpointTo(daemon.containersReplica); err != nil { return err } return daemon.postRunProcessing(c, e) @@ -119,7 +119,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { c.HasBeenStartedBefore = true daemon.setStateCounter(c) - if err := c.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil { + if err := c.CheckpointTo(daemon.containersReplica); err != nil { c.Reset(false) return err } @@ -130,7 +130,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { // Container is already locked in this case c.Paused = true daemon.setStateCounter(c) - if err := c.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil { + if err := c.CheckpointTo(daemon.containersReplica); err != nil { return err } daemon.updateHealthMonitor(c) @@ -139,7 +139,7 @@ func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { // Container is already locked in this case c.Paused = false daemon.setStateCounter(c) - if err := c.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil { + if err := c.CheckpointTo(daemon.containersReplica); err != nil { return err } daemon.updateHealthMonitor(c) diff --git a/components/engine/daemon/rename.go b/components/engine/daemon/rename.go index 1a5e985cfc..ad21593c19 100644 --- a/components/engine/daemon/rename.go +++ b/components/engine/daemon/rename.go @@ -82,7 +82,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error { daemon.nameIndex.Release(oldName + k) } daemon.releaseName(oldName) - if err = container.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil { + if err = container.CheckpointTo(daemon.containersReplica); err != nil { return err } @@ -99,7 +99,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error { if err != nil { container.Name = oldName container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint - if e := container.CheckpointAndSaveToDisk(daemon.containersReplica); e != nil { + if e := container.CheckpointTo(daemon.containersReplica); e != nil { logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) } } diff --git a/components/engine/daemon/restart.go b/components/engine/daemon/restart.go index 79292f3752..9f2ef569af 100644 --- a/components/engine/daemon/restart.go +++ b/components/engine/daemon/restart.go @@ -52,7 +52,7 @@ func (daemon *Daemon) containerRestart(container *container.Container, seconds i container.HostConfig.AutoRemove = autoRemove // containerStop will write HostConfig to disk, we shall restore AutoRemove // in disk too - if toDiskErr := container.ToDiskLocking(); toDiskErr != nil { + if toDiskErr := daemon.checkpointAndSave(container); toDiskErr != nil { logrus.Errorf("Write container to disk error: %v", toDiskErr) } diff --git a/components/engine/daemon/start.go b/components/engine/daemon/start.go index c2e9b7069f..a00cb901b5 100644 --- a/components/engine/daemon/start.go +++ b/components/engine/daemon/start.go @@ -58,7 +58,7 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos // if user has change the network mode on starting, clean up the // old networks. It is a deprecated feature and has been removed in Docker 1.12 container.NetworkSettings.Networks = nil - if err := container.ToDisk(); err != nil { + if err := container.CheckpointTo(daemon.containersReplica); err != nil { return err } } @@ -117,7 +117,7 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint if container.ExitCode() == 0 { container.SetExitCode(128) } - if err := container.CheckpointAndSaveToDisk(daemon.containersReplica); err != nil { + if err := container.CheckpointTo(daemon.containersReplica); err != nil { logrus.Errorf("%s: failed saving state on start failure: %v", container.ID, err) } container.Reset(false) diff --git a/components/engine/daemon/start_unix.go b/components/engine/daemon/start_unix.go index 103cc73b86..12ecdab2db 100644 --- a/components/engine/daemon/start_unix.go +++ b/components/engine/daemon/start_unix.go @@ -9,13 +9,14 @@ import ( "github.com/docker/docker/libcontainerd" ) +// getLibcontainerdCreateOptions callers must hold a lock on the container func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { createOptions := []libcontainerd.CreateOption{} // Ensure a runtime has been assigned to this container if container.HostConfig.Runtime == "" { container.HostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() - container.ToDisk() + container.CheckpointTo(daemon.containersReplica) } rt := daemon.configStore.GetRuntime(container.HostConfig.Runtime) diff --git a/components/engine/daemon/update.go b/components/engine/daemon/update.go index af9bf7b991..a65cbd51b1 100644 --- a/components/engine/daemon/update.go +++ b/components/engine/daemon/update.go @@ -38,7 +38,7 @@ func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) erro if restoreConfig { container.Lock() container.HostConfig = &backupHostConfig - container.CheckpointAndSaveToDisk(daemon.containersReplica) + container.CheckpointTo(daemon.containersReplica) container.Unlock() } }() diff --git a/components/engine/daemon/volumes_unix.go b/components/engine/daemon/volumes_unix.go index c01c573e93..d6b48d381b 100644 --- a/components/engine/daemon/volumes_unix.go +++ b/components/engine/daemon/volumes_unix.go @@ -180,7 +180,7 @@ func (daemon *Daemon) verifyVolumesInfo(container *container.Container) error { container.MountPoints[destination] = &m } } - return container.ToDisk() + return container.CheckpointTo(daemon.containersReplica) } return nil }