From 23cc76ac7e984fe7690426bc74161634c9b064c4 Mon Sep 17 00:00:00 2001 From: Fengtu Wang Date: Wed, 12 Jul 2017 16:24:28 +0800 Subject: [PATCH 01/21] Keep pause state when restoring container's status Do not change pause state when restoring container's status, or status in docker will be different with status in runc. Signed-off-by: Fengtu Wang Upstream-commit: 977c4046fd2147d7c04f4b513a94138013ca0dd6 Component: engine --- components/engine/container/state.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/components/engine/container/state.go b/components/engine/container/state.go index 01f7ab4584..32f3f5b7a5 100644 --- a/components/engine/container/state.go +++ b/components/engine/container/state.go @@ -278,7 +278,9 @@ func (s *State) SetRunning(pid int, initial bool) { s.ErrorMsg = "" s.Running = true s.Restarting = false - s.Paused = false + if initial { + s.Paused = false + } s.ExitCodeValue = 0 s.Pid = pid if initial { From 107190981d22c60fea7d1fc691d33e399e67bb36 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Thu, 29 Jun 2017 18:56:22 -0700 Subject: [PATCH 02/21] Store container names in memdb Currently, names are maintained by a separate system called "registrar". This means there is no way to atomically snapshot the state of containers and the names associated with them. We can add this atomicity and simplify the code by storing name associations in the memdb. This removes the need for pkg/registrar, and makes snapshots a lot less expensive because they no longer need to copy all the names. This change also avoids some problematic behavior from pkg/registrar where it returns slices which may be modified later on. Note that while this change makes the *snapshotting* atomic, it doesn't yet do anything to make sure containers are named at the same time that they are added to the database. We can do that by adding a transactional interface, either as a followup, or as part of this PR. Signed-off-by: Aaron Lehmann Upstream-commit: 1128fc1add66a849c12d2045aed39605e673abc6 Component: engine --- components/engine/container/view.go | 223 ++++++++++++++++-- components/engine/container/view_test.go | 57 ++++- components/engine/daemon/container.go | 2 +- components/engine/daemon/daemon.go | 7 +- components/engine/daemon/daemon_test.go | 12 +- components/engine/daemon/delete.go | 3 +- components/engine/daemon/list.go | 4 +- components/engine/daemon/names.go | 17 +- components/engine/daemon/rename.go | 8 +- components/engine/pkg/registrar/registrar.go | 130 ---------- .../engine/pkg/registrar/registrar_test.go | 119 ---------- 11 files changed, 285 insertions(+), 297 deletions(-) delete mode 100644 components/engine/pkg/registrar/registrar.go delete mode 100644 components/engine/pkg/registrar/registrar_test.go diff --git a/components/engine/container/view.go b/components/engine/container/view.go index f605b4f483..449cade149 100644 --- a/components/engine/container/view.go +++ b/components/engine/container/view.go @@ -1,6 +1,7 @@ package container import ( + "errors" "fmt" "strings" "time" @@ -8,14 +9,23 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/network" - "github.com/docker/docker/pkg/registrar" "github.com/docker/go-connections/nat" "github.com/hashicorp/go-memdb" ) const ( - memdbTable = "containers" - memdbIDIndex = "id" + memdbContainersTable = "containers" + memdbNamesTable = "names" + + memdbIDIndex = "id" + memdbContainerIDIndex = "containerid" +) + +var ( + // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved + ErrNameReserved = errors.New("name is reserved") + // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved + ErrNameNotReserved = errors.New("name is not reserved") ) // Snapshot is a read only view for Containers. It holds all information necessary to serve container queries in a @@ -41,23 +51,37 @@ type Snapshot struct { } } +// nameAssociation associates a container id with a name. +type nameAssociation struct { + // name is the name to associate. Note that name is the primary key + // ("id" in memdb). + name string + containerID string +} + // ViewDB provides an in-memory transactional (ACID) container Store type ViewDB interface { - Snapshot(nameIndex *registrar.Registrar) View + Snapshot() View Save(*Container) error Delete(*Container) error + + ReserveName(name, containerID string) error + ReleaseName(name string) } // View can be used by readers to avoid locking type View interface { All() ([]Snapshot, error) Get(id string) (*Snapshot, error) + + GetID(name string) (string, error) + GetAllNames() map[string][]string } var schema = &memdb.DBSchema{ Tables: map[string]*memdb.TableSchema{ - memdbTable: { - Name: memdbTable, + memdbContainersTable: { + Name: memdbContainersTable, Indexes: map[string]*memdb.IndexSchema{ memdbIDIndex: { Name: memdbIDIndex, @@ -66,6 +90,21 @@ var schema = &memdb.DBSchema{ }, }, }, + memdbNamesTable: { + Name: memdbNamesTable, + Indexes: map[string]*memdb.IndexSchema{ + // Used for names, because "id" is the primary key in memdb. + memdbIDIndex: { + Name: memdbIDIndex, + Unique: true, + Indexer: &namesByNameIndexer{}, + }, + memdbContainerIDIndex: { + Name: memdbContainerIDIndex, + Indexer: &namesByContainerIDIndexer{}, + }, + }, + }, }, } @@ -94,10 +133,9 @@ func NewViewDB() (ViewDB, error) { } // Snapshot provides a consistent read-only View of the database -func (db *memDB) Snapshot(index *registrar.Registrar) View { +func (db *memDB) Snapshot() View { return &memdbView{ - txn: db.store.Txn(false), - nameIndex: index.GetAll(), + txn: db.store.Txn(false), } } @@ -106,25 +144,75 @@ func (db *memDB) Snapshot(index *registrar.Registrar) View { func (db *memDB) Save(c *Container) error { txn := db.store.Txn(true) defer txn.Commit() - return txn.Insert(memdbTable, c) + return txn.Insert(memdbContainersTable, c) } // Delete removes an item by ID func (db *memDB) Delete(c *Container) error { txn := db.store.Txn(true) defer txn.Commit() - return txn.Delete(memdbTable, NewBaseContainer(c.ID, c.Root)) + + // Delete any names referencing this container's ID. + iter, err := txn.Get(memdbNamesTable, memdbContainerIDIndex, c.ID) + if err != nil { + return err + } + + var names []string + for { + item := iter.Next() + if item == nil { + break + } + names = append(names, item.(nameAssociation).name) + } + + for _, name := range names { + txn.Delete(memdbNamesTable, nameAssociation{name: name}) + } + + return txn.Delete(memdbContainersTable, NewBaseContainer(c.ID, c.Root)) +} + +// ReserveName registers a container ID to a name +// ReserveName is idempotent +// Attempting to reserve a container ID to a name that already exists results in an `ErrNameReserved` +// A name reservation is globally unique +func (db *memDB) ReserveName(name, containerID string) error { + txn := db.store.Txn(true) + defer txn.Commit() + + s, err := txn.First(memdbNamesTable, memdbIDIndex, name) + if err != nil { + return err + } + if s != nil { + if s.(nameAssociation).containerID != containerID { + return ErrNameReserved + } + return nil + } + + txn.Insert(memdbNamesTable, nameAssociation{name: name, containerID: containerID}) + return nil +} + +// ReleaseName releases the reserved name +// Once released, a name can be reserved again +func (db *memDB) ReleaseName(name string) { + txn := db.store.Txn(true) + txn.Delete(memdbNamesTable, nameAssociation{name: name}) + txn.Commit() } type memdbView struct { - txn *memdb.Txn - nameIndex map[string][]string + txn *memdb.Txn } // All returns a all items in this snapshot. Returned objects must never be modified. func (v *memdbView) All() ([]Snapshot, error) { var all []Snapshot - iter, err := v.txn.Get(memdbTable, memdbIDIndex) + iter, err := v.txn.Get(memdbContainersTable, memdbIDIndex) if err != nil { return nil, err } @@ -141,7 +229,7 @@ func (v *memdbView) All() ([]Snapshot, error) { // Get returns an item by id. Returned objects must never be modified. func (v *memdbView) Get(id string) (*Snapshot, error) { - s, err := v.txn.First(memdbTable, memdbIDIndex, id) + s, err := v.txn.First(memdbContainersTable, memdbIDIndex, id) if err != nil { return nil, err } @@ -151,13 +239,64 @@ func (v *memdbView) Get(id string) (*Snapshot, error) { return v.transform(s.(*Container)), nil } +// getNames lists all the reserved names for the given container ID. +func (v *memdbView) getNames(containerID string) []string { + iter, err := v.txn.Get(memdbNamesTable, memdbContainerIDIndex, containerID) + if err != nil { + return nil + } + + var names []string + for { + item := iter.Next() + if item == nil { + break + } + names = append(names, item.(nameAssociation).name) + } + + return names +} + +// GetID returns the container ID that the passed in name is reserved to. +func (v *memdbView) GetID(name string) (string, error) { + s, err := v.txn.First(memdbNamesTable, memdbIDIndex, name) + if err != nil { + return "", err + } + if s == nil { + return "", ErrNameNotReserved + } + return s.(nameAssociation).containerID, nil +} + +// GetAllNames returns all registered names. +func (v *memdbView) GetAllNames() map[string][]string { + iter, err := v.txn.Get(memdbNamesTable, memdbContainerIDIndex) + if err != nil { + return nil + } + + out := make(map[string][]string) + for { + item := iter.Next() + if item == nil { + break + } + assoc := item.(nameAssociation) + out[assoc.containerID] = append(out[assoc.containerID], assoc.name) + } + + return out +} + // transform maps a (deep) copied Container object to what queries need. // A lock on the Container is not held because these are immutable deep copies. func (v *memdbView) transform(container *Container) *Snapshot { snapshot := &Snapshot{ Container: types.Container{ ID: container.ID, - Names: v.nameIndex[container.ID], + Names: v.getNames(container.ID), ImageID: container.ImageID.String(), Ports: []types.Port{}, Mounts: container.GetMountPoints(), @@ -300,3 +439,55 @@ func (e *containerByIDIndexer) FromArgs(args ...interface{}) ([]byte, error) { arg += "\x00" return []byte(arg), nil } + +// namesByNameIndexer is used to index container name associations by name. +type namesByNameIndexer struct{} + +func (e *namesByNameIndexer) FromObject(obj interface{}) (bool, []byte, error) { + n, ok := obj.(nameAssociation) + if !ok { + return false, nil, fmt.Errorf(`%T does not have type "nameAssociation"`, obj) + } + + // Add the null character as a terminator + return true, []byte(n.name + "\x00"), nil +} + +func (e *namesByNameIndexer) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +// namesByContainerIDIndexer is used to index container names by container ID. +type namesByContainerIDIndexer struct{} + +func (e *namesByContainerIDIndexer) FromObject(obj interface{}) (bool, []byte, error) { + n, ok := obj.(nameAssociation) + if !ok { + return false, nil, fmt.Errorf(`%T does not have type "nameAssocation"`, obj) + } + + // Add the null character as a terminator + return true, []byte(n.containerID + "\x00"), nil +} + +func (e *namesByContainerIDIndexer) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} diff --git a/components/engine/container/view_test.go b/components/engine/container/view_test.go index 9b872998bd..2e81998ca4 100644 --- a/components/engine/container/view_test.go +++ b/components/engine/container/view_test.go @@ -7,8 +7,8 @@ import ( "testing" containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/pkg/registrar" "github.com/pborman/uuid" + "github.com/stretchr/testify/assert" ) var root string @@ -54,7 +54,6 @@ func TestViewSaveDelete(t *testing.T) { func TestViewAll(t *testing.T) { var ( db, _ = NewViewDB() - names = registrar.NewRegistrar() one = newContainer(t) two = newContainer(t) ) @@ -67,7 +66,7 @@ func TestViewAll(t *testing.T) { t.Fatal(err) } - all, err := db.Snapshot(names).All() + all, err := db.Snapshot().All() if err != nil { t.Fatal(err) } @@ -89,14 +88,13 @@ func TestViewAll(t *testing.T) { func TestViewGet(t *testing.T) { var ( db, _ = NewViewDB() - names = registrar.NewRegistrar() one = newContainer(t) ) one.ImageID = "some-image-123" if err := one.CheckpointTo(db); err != nil { t.Fatal(err) } - s, err := db.Snapshot(names).Get(one.ID) + s, err := db.Snapshot().Get(one.ID) if err != nil { t.Fatal(err) } @@ -104,3 +102,52 @@ func TestViewGet(t *testing.T) { t.Fatalf("expected ImageID=some-image-123. Got: %v", s) } } + +func TestNames(t *testing.T) { + db, err := NewViewDB() + if err != nil { + t.Fatal(err) + } + assert.NoError(t, db.ReserveName("name1", "containerid1")) + assert.NoError(t, db.ReserveName("name1", "containerid1")) // idempotent + assert.NoError(t, db.ReserveName("name2", "containerid2")) + assert.EqualError(t, db.ReserveName("name2", "containerid3"), ErrNameReserved.Error()) + + // Releasing a name allows the name to point to something else later. + db.ReleaseName("name2") + assert.NoError(t, db.ReserveName("name2", "containerid3")) + + view := db.Snapshot() + + id, err := view.GetID("name1") + assert.NoError(t, err) + assert.Equal(t, "containerid1", id) + + id, err = view.GetID("name2") + assert.NoError(t, err) + assert.Equal(t, "containerid3", id) + + _, err = view.GetID("notreserved") + assert.EqualError(t, err, ErrNameNotReserved.Error()) + + // Releasing and re-reserving a name doesn't affect the snapshot. + db.ReleaseName("name2") + assert.NoError(t, db.ReserveName("name2", "containerid4")) + + id, err = view.GetID("name1") + assert.NoError(t, err) + assert.Equal(t, "containerid1", id) + + id, err = view.GetID("name2") + assert.NoError(t, err) + assert.Equal(t, "containerid3", id) + + // GetAllNames + assert.Equal(t, map[string][]string{"containerid1": {"name1"}, "containerid3": {"name2"}}, view.GetAllNames()) + + assert.NoError(t, db.ReserveName("name3", "containerid1")) + assert.NoError(t, db.ReserveName("name4", "containerid1")) + + view = db.Snapshot() + assert.Equal(t, map[string][]string{"containerid1": {"name1", "name3", "name4"}, "containerid4": {"name2"}}, view.GetAllNames()) +} diff --git a/components/engine/daemon/container.go b/components/engine/daemon/container.go index 149df0dec6..4c015b70de 100644 --- a/components/engine/daemon/container.go +++ b/components/engine/daemon/container.go @@ -168,7 +168,7 @@ func (daemon *Daemon) GetByName(name string) (*container.Container, error) { if name[0] != '/' { fullName = "/" + name } - id, err := daemon.nameIndex.Get(fullName) + id, err := daemon.containersReplica.Snapshot().GetID(fullName) if err != nil { return nil, fmt.Errorf("Could not find entity for %s", name) } diff --git a/components/engine/daemon/daemon.go b/components/engine/daemon/daemon.go index 8359ef31ca..93d871d6df 100644 --- a/components/engine/daemon/daemon.go +++ b/components/engine/daemon/daemon.go @@ -42,7 +42,6 @@ import ( "github.com/docker/docker/migrate/v1" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/registrar" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/truncindex" @@ -104,7 +103,6 @@ type Daemon struct { stores map[string]daemonStore // By container target platform PluginStore *plugin.Store // todo: remove pluginManager *plugin.Manager - nameIndex *registrar.Registrar linkIndex *linkIndex containerd libcontainerd.Client containerdRemote libcontainerd.Remote @@ -448,8 +446,8 @@ func (daemon *Daemon) parents(c *container.Container) map[string]*container.Cont func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { fullName := path.Join(parent.Name, alias) - if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { - if err == registrar.ErrNameReserved { + if err := daemon.containersReplica.ReserveName(fullName, child.ID); err != nil { + if err == container.ErrNameReserved { logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) return nil } @@ -780,7 +778,6 @@ func NewDaemon(config *config.Config, registryService registry.Service, containe d.seccompEnabled = sysInfo.Seccomp d.apparmorEnabled = sysInfo.AppArmor - d.nameIndex = registrar.NewRegistrar() d.linkIndex = newLinkIndex() d.containerdRemote = containerdRemote diff --git a/components/engine/daemon/daemon_test.go b/components/engine/daemon/daemon_test.go index 6f07d0d1ee..13d1059c1c 100644 --- a/components/engine/daemon/daemon_test.go +++ b/components/engine/daemon/daemon_test.go @@ -12,7 +12,6 @@ import ( "github.com/docker/docker/container" _ "github.com/docker/docker/pkg/discovery/memory" "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/registrar" "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/volume" volumedrivers "github.com/docker/docker/volume/drivers" @@ -65,10 +64,15 @@ func TestGetContainer(t *testing.T) { index.Add(c4.ID) index.Add(c5.ID) + containersReplica, err := container.NewViewDB() + if err != nil { + t.Fatalf("could not create ViewDB: %v", err) + } + daemon := &Daemon{ - containers: store, - idIndex: index, - nameIndex: registrar.NewRegistrar(), + containers: store, + containersReplica: containersReplica, + idIndex: index, } daemon.reserveName(c1.ID, c1.Name) diff --git a/components/engine/daemon/delete.go b/components/engine/daemon/delete.go index 2d3cd0f90f..c57a89654b 100644 --- a/components/engine/daemon/delete.go +++ b/components/engine/daemon/delete.go @@ -60,7 +60,7 @@ func (daemon *Daemon) rmLink(container *container.Container, name string) error } parent = strings.TrimSuffix(parent, "/") - pe, err := daemon.nameIndex.Get(parent) + pe, err := daemon.containersReplica.Snapshot().GetID(parent) if err != nil { return fmt.Errorf("Cannot get parent %s for name %s", parent, name) } @@ -128,7 +128,6 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemo return errors.Wrapf(err, "unable to remove filesystem for %s", container.ID) } - daemon.nameIndex.Delete(container.ID) daemon.linkIndex.delete(container) selinuxFreeLxcContexts(container.ProcessLabel) daemon.idIndex.Delete(container.ID) diff --git a/components/engine/daemon/list.go b/components/engine/daemon/list.go index b854be7549..6889c55889 100644 --- a/components/engine/daemon/list.go +++ b/components/engine/daemon/list.go @@ -182,7 +182,7 @@ func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContex // reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { var ( - view = daemon.containersReplica.Snapshot(daemon.nameIndex) + view = daemon.containersReplica.Snapshot() containers = []*types.Container{} ) @@ -361,7 +361,7 @@ func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerLis publish: publishFilter, expose: exposeFilter, ContainerListOptions: config, - names: daemon.nameIndex.GetAll(), + names: view.GetAllNames(), }, nil } func portOp(key string, filter map[nat.Port]bool) func(value string) error { diff --git a/components/engine/daemon/names.go b/components/engine/daemon/names.go index ec6ac2924f..7cdabeba9f 100644 --- a/components/engine/daemon/names.go +++ b/components/engine/daemon/names.go @@ -8,7 +8,6 @@ import ( "github.com/docker/docker/api" "github.com/docker/docker/container" "github.com/docker/docker/pkg/namesgenerator" - "github.com/docker/docker/pkg/registrar" "github.com/docker/docker/pkg/stringid" ) @@ -31,7 +30,7 @@ func (daemon *Daemon) registerName(container *container.Container) error { } container.Name = name } - return daemon.nameIndex.Reserve(container.Name, container.ID) + return daemon.containersReplica.ReserveName(container.Name, container.ID) } func (daemon *Daemon) generateIDAndName(name string) (string, string, error) { @@ -62,9 +61,9 @@ func (daemon *Daemon) reserveName(id, name string) (string, error) { name = "/" + name } - if err := daemon.nameIndex.Reserve(name, id); err != nil { - if err == registrar.ErrNameReserved { - id, err := daemon.nameIndex.Get(name) + if err := daemon.containersReplica.ReserveName(name, id); err != nil { + if err == container.ErrNameReserved { + id, err := daemon.containersReplica.Snapshot().GetID(name) if err != nil { logrus.Errorf("got unexpected error while looking up reserved name: %v", err) return "", err @@ -77,7 +76,7 @@ func (daemon *Daemon) reserveName(id, name string) (string, error) { } func (daemon *Daemon) releaseName(name string) { - daemon.nameIndex.Release(name) + daemon.containersReplica.ReleaseName(name) } func (daemon *Daemon) generateNewName(id string) (string, error) { @@ -88,8 +87,8 @@ func (daemon *Daemon) generateNewName(id string) (string, error) { name = "/" + name } - if err := daemon.nameIndex.Reserve(name, id); err != nil { - if err == registrar.ErrNameReserved { + if err := daemon.containersReplica.ReserveName(name, id); err != nil { + if err == container.ErrNameReserved { continue } return "", err @@ -98,7 +97,7 @@ func (daemon *Daemon) generateNewName(id string) (string, error) { } name = "/" + stringid.TruncateID(id) - if err := daemon.nameIndex.Reserve(name, id); err != nil { + if err := daemon.containersReplica.ReserveName(name, id); err != nil { return "", err } return name, nil diff --git a/components/engine/daemon/rename.go b/components/engine/daemon/rename.go index 2a8d0b22c7..686fbd3b99 100644 --- a/components/engine/daemon/rename.go +++ b/components/engine/daemon/rename.go @@ -55,7 +55,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error { } for k, v := range links { - daemon.nameIndex.Reserve(newName+k, v.ID) + daemon.containersReplica.ReserveName(newName+k, v.ID) daemon.linkIndex.link(container, v, newName+k) } @@ -68,10 +68,10 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error { container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint daemon.reserveName(container.ID, oldName) for k, v := range links { - daemon.nameIndex.Reserve(oldName+k, v.ID) + daemon.containersReplica.ReserveName(oldName+k, v.ID) daemon.linkIndex.link(container, v, oldName+k) daemon.linkIndex.unlink(newName+k, v, container) - daemon.nameIndex.Release(newName + k) + daemon.containersReplica.ReleaseName(newName + k) } daemon.releaseName(newName) } @@ -79,7 +79,7 @@ func (daemon *Daemon) ContainerRename(oldName, newName string) error { for k, v := range links { daemon.linkIndex.unlink(oldName+k, v, container) - daemon.nameIndex.Release(oldName + k) + daemon.containersReplica.ReleaseName(oldName + k) } daemon.releaseName(oldName) if err = container.CheckpointTo(daemon.containersReplica); err != nil { diff --git a/components/engine/pkg/registrar/registrar.go b/components/engine/pkg/registrar/registrar.go deleted file mode 100644 index df12db7eeb..0000000000 --- a/components/engine/pkg/registrar/registrar.go +++ /dev/null @@ -1,130 +0,0 @@ -// Package registrar provides name registration. It reserves a name to a given key. -package registrar - -import ( - "errors" - "sync" -) - -var ( - // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved - ErrNameReserved = errors.New("name is reserved") - // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved - ErrNameNotReserved = errors.New("name is not reserved") - // ErrNoSuchKey is returned when trying to find the names for a key which is not known - ErrNoSuchKey = errors.New("provided key does not exist") -) - -// Registrar stores indexes a list of keys and their registered names as well as indexes names and the key that they are registered to -// Names must be unique. -// Registrar is safe for concurrent access. -type Registrar struct { - idx map[string][]string - names map[string]string - mu sync.Mutex -} - -// NewRegistrar creates a new Registrar with the an empty index -func NewRegistrar() *Registrar { - return &Registrar{ - idx: make(map[string][]string), - names: make(map[string]string), - } -} - -// Reserve registers a key to a name -// Reserve is idempotent -// Attempting to reserve a key to a name that already exists results in an `ErrNameReserved` -// A name reservation is globally unique -func (r *Registrar) Reserve(name, key string) error { - r.mu.Lock() - defer r.mu.Unlock() - - if k, exists := r.names[name]; exists { - if k != key { - return ErrNameReserved - } - return nil - } - - r.idx[key] = append(r.idx[key], name) - r.names[name] = key - return nil -} - -// Release releases the reserved name -// Once released, a name can be reserved again -func (r *Registrar) Release(name string) { - r.mu.Lock() - defer r.mu.Unlock() - - key, exists := r.names[name] - if !exists { - return - } - - for i, n := range r.idx[key] { - if n != name { - continue - } - r.idx[key] = append(r.idx[key][:i], r.idx[key][i+1:]...) - break - } - - delete(r.names, name) - - if len(r.idx[key]) == 0 { - delete(r.idx, key) - } -} - -// Delete removes all reservations for the passed in key. -// All names reserved to this key are released. -func (r *Registrar) Delete(key string) { - r.mu.Lock() - for _, name := range r.idx[key] { - delete(r.names, name) - } - delete(r.idx, key) - r.mu.Unlock() -} - -// GetNames lists all the reserved names for the given key -func (r *Registrar) GetNames(key string) ([]string, error) { - r.mu.Lock() - defer r.mu.Unlock() - - names, exists := r.idx[key] - if !exists { - return nil, ErrNoSuchKey - } - - ls := make([]string, 0, len(names)) - ls = append(ls, names...) - return ls, nil -} - -// Get returns the key that the passed in name is reserved to -func (r *Registrar) Get(name string) (string, error) { - r.mu.Lock() - key, exists := r.names[name] - r.mu.Unlock() - - if !exists { - return "", ErrNameNotReserved - } - return key, nil -} - -// GetAll returns all registered names -func (r *Registrar) GetAll() map[string][]string { - out := make(map[string][]string) - - r.mu.Lock() - // copy index into out - for id, names := range r.idx { - out[id] = names - } - r.mu.Unlock() - return out -} diff --git a/components/engine/pkg/registrar/registrar_test.go b/components/engine/pkg/registrar/registrar_test.go deleted file mode 100644 index 70f8084b30..0000000000 --- a/components/engine/pkg/registrar/registrar_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package registrar - -import ( - "reflect" - "testing" -) - -func TestReserve(t *testing.T) { - r := NewRegistrar() - - obj := "test1" - if err := r.Reserve("test", obj); err != nil { - t.Fatal(err) - } - - if err := r.Reserve("test", obj); err != nil { - t.Fatal(err) - } - - obj2 := "test2" - err := r.Reserve("test", obj2) - if err == nil { - t.Fatalf("expected error when reserving an already reserved name to another object") - } - if err != ErrNameReserved { - t.Fatal("expected `ErrNameReserved` error when attempting to reserve an already reserved name") - } -} - -func TestRelease(t *testing.T) { - r := NewRegistrar() - obj := "testing" - - if err := r.Reserve("test", obj); err != nil { - t.Fatal(err) - } - r.Release("test") - r.Release("test") // Ensure there is no panic here - - if err := r.Reserve("test", obj); err != nil { - t.Fatal(err) - } -} - -func TestGetNames(t *testing.T) { - r := NewRegistrar() - obj := "testing" - names := []string{"test1", "test2"} - - for _, name := range names { - if err := r.Reserve(name, obj); err != nil { - t.Fatal(err) - } - } - r.Reserve("test3", "other") - - names2, err := r.GetNames(obj) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(names, names2) { - t.Fatalf("Expected: %v, Got: %v", names, names2) - } -} - -func TestDelete(t *testing.T) { - r := NewRegistrar() - obj := "testing" - names := []string{"test1", "test2"} - for _, name := range names { - if err := r.Reserve(name, obj); err != nil { - t.Fatal(err) - } - } - - r.Reserve("test3", "other") - r.Delete(obj) - - _, err := r.GetNames(obj) - if err == nil { - t.Fatal("expected error getting names for deleted key") - } - - if err != ErrNoSuchKey { - t.Fatal("expected `ErrNoSuchKey`") - } -} - -func TestGet(t *testing.T) { - r := NewRegistrar() - obj := "testing" - name := "test" - - _, err := r.Get(name) - if err == nil { - t.Fatal("expected error when key does not exist") - } - if err != ErrNameNotReserved { - t.Fatal(err) - } - - if err := r.Reserve(name, obj); err != nil { - t.Fatal(err) - } - - if _, err = r.Get(name); err != nil { - t.Fatal(err) - } - - r.Delete(obj) - _, err = r.Get(name) - if err == nil { - t.Fatal("expected error when key does not exist") - } - if err != ErrNameNotReserved { - t.Fatal(err) - } -} From 975e5b07233b02fbf41990f05f44d76c4584fd51 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 10 Jul 2017 10:05:14 -0700 Subject: [PATCH 03/21] container: Abort transactions when memdb calls fail Signed-off-by: Aaron Lehmann Upstream-commit: bc3209bc156fc5a5bc6e76e5f79a64c60e9a5f7b Component: engine --- components/engine/container/view.go | 43 ++++++++++++------------ components/engine/container/view_test.go | 4 +-- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/components/engine/container/view.go b/components/engine/container/view.go index 449cade149..e6055cc932 100644 --- a/components/engine/container/view.go +++ b/components/engine/container/view.go @@ -66,7 +66,7 @@ type ViewDB interface { Delete(*Container) error ReserveName(name, containerID string) error - ReleaseName(name string) + ReleaseName(name string) error } // View can be used by readers to avoid locking @@ -150,28 +150,20 @@ func (db *memDB) Save(c *Container) error { // Delete removes an item by ID func (db *memDB) Delete(c *Container) error { txn := db.store.Txn(true) - defer txn.Commit() - // Delete any names referencing this container's ID. - iter, err := txn.Get(memdbNamesTable, memdbContainerIDIndex, c.ID) - if err != nil { - return err - } - - var names []string - for { - item := iter.Next() - if item == nil { - break - } - names = append(names, item.(nameAssociation).name) - } + view := &memdbView{txn: txn} + names := view.getNames(c.ID) for _, name := range names { txn.Delete(memdbNamesTable, nameAssociation{name: name}) } - return txn.Delete(memdbContainersTable, NewBaseContainer(c.ID, c.Root)) + if err := txn.Delete(memdbContainersTable, NewBaseContainer(c.ID, c.Root)); err != nil { + txn.Abort() + return err + } + txn.Commit() + return nil } // ReserveName registers a container ID to a name @@ -180,29 +172,38 @@ func (db *memDB) Delete(c *Container) error { // A name reservation is globally unique func (db *memDB) ReserveName(name, containerID string) error { txn := db.store.Txn(true) - defer txn.Commit() s, err := txn.First(memdbNamesTable, memdbIDIndex, name) if err != nil { + txn.Abort() return err } if s != nil { + txn.Abort() if s.(nameAssociation).containerID != containerID { return ErrNameReserved } return nil } - txn.Insert(memdbNamesTable, nameAssociation{name: name, containerID: containerID}) + if err := txn.Insert(memdbNamesTable, nameAssociation{name: name, containerID: containerID}); err != nil { + txn.Abort() + return err + } + txn.Commit() return nil } // ReleaseName releases the reserved name // Once released, a name can be reserved again -func (db *memDB) ReleaseName(name string) { +func (db *memDB) ReleaseName(name string) error { txn := db.store.Txn(true) - txn.Delete(memdbNamesTable, nameAssociation{name: name}) + if err := txn.Delete(memdbNamesTable, nameAssociation{name: name}); err != nil { + txn.Abort() + return err + } txn.Commit() + return nil } type memdbView struct { diff --git a/components/engine/container/view_test.go b/components/engine/container/view_test.go index 2e81998ca4..09ba343830 100644 --- a/components/engine/container/view_test.go +++ b/components/engine/container/view_test.go @@ -114,7 +114,7 @@ func TestNames(t *testing.T) { assert.EqualError(t, db.ReserveName("name2", "containerid3"), ErrNameReserved.Error()) // Releasing a name allows the name to point to something else later. - db.ReleaseName("name2") + assert.NoError(t, db.ReleaseName("name2")) assert.NoError(t, db.ReserveName("name2", "containerid3")) view := db.Snapshot() @@ -131,7 +131,7 @@ func TestNames(t *testing.T) { assert.EqualError(t, err, ErrNameNotReserved.Error()) // Releasing and re-reserving a name doesn't affect the snapshot. - db.ReleaseName("name2") + assert.NoError(t, db.ReleaseName("name2")) assert.NoError(t, db.ReserveName("name2", "containerid4")) id, err = view.GetID("name1") From 52cbacdf1868469b5a66806fa11b819672d5395e Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Mon, 10 Jul 2017 13:36:36 -0700 Subject: [PATCH 04/21] container: Use wrapper to ensure commit/abort happens Signed-off-by: Aaron Lehmann Upstream-commit: 0e57eb95c5989d0f4e93b7d12efe735a6287781b Component: engine --- components/engine/container/view.go | 92 +++++++++++++++-------------- 1 file changed, 48 insertions(+), 44 deletions(-) diff --git a/components/engine/container/view.go b/components/engine/container/view.go index e6055cc932..e865e4d5df 100644 --- a/components/engine/container/view.go +++ b/components/engine/container/view.go @@ -139,26 +139,10 @@ func (db *memDB) Snapshot() View { } } -// Save atomically updates the in-memory store state for a Container. -// Only read only (deep) copies of containers may be passed in. -func (db *memDB) Save(c *Container) error { +func (db *memDB) withTxn(cb func(*memdb.Txn) error) error { txn := db.store.Txn(true) - defer txn.Commit() - return txn.Insert(memdbContainersTable, c) -} - -// Delete removes an item by ID -func (db *memDB) Delete(c *Container) error { - txn := db.store.Txn(true) - - view := &memdbView{txn: txn} - names := view.getNames(c.ID) - - for _, name := range names { - txn.Delete(memdbNamesTable, nameAssociation{name: name}) - } - - if err := txn.Delete(memdbContainersTable, NewBaseContainer(c.ID, c.Root)); err != nil { + err := cb(txn) + if err != nil { txn.Abort() return err } @@ -166,44 +150,64 @@ func (db *memDB) Delete(c *Container) error { return nil } +// Save atomically updates the in-memory store state for a Container. +// Only read only (deep) copies of containers may be passed in. +func (db *memDB) Save(c *Container) error { + return db.withTxn(func(txn *memdb.Txn) error { + return txn.Insert(memdbContainersTable, c) + }) +} + +// Delete removes an item by ID +func (db *memDB) Delete(c *Container) error { + return db.withTxn(func(txn *memdb.Txn) error { + view := &memdbView{txn: txn} + names := view.getNames(c.ID) + + for _, name := range names { + txn.Delete(memdbNamesTable, nameAssociation{name: name}) + } + + if err := txn.Delete(memdbContainersTable, NewBaseContainer(c.ID, c.Root)); err != nil { + return err + } + return nil + }) +} + // ReserveName registers a container ID to a name // ReserveName is idempotent // Attempting to reserve a container ID to a name that already exists results in an `ErrNameReserved` // A name reservation is globally unique func (db *memDB) ReserveName(name, containerID string) error { - txn := db.store.Txn(true) + return db.withTxn(func(txn *memdb.Txn) error { + s, err := txn.First(memdbNamesTable, memdbIDIndex, name) + if err != nil { + return err + } + if s != nil { + if s.(nameAssociation).containerID != containerID { + return ErrNameReserved + } + return nil + } - s, err := txn.First(memdbNamesTable, memdbIDIndex, name) - if err != nil { - txn.Abort() - return err - } - if s != nil { - txn.Abort() - if s.(nameAssociation).containerID != containerID { - return ErrNameReserved + if err := txn.Insert(memdbNamesTable, nameAssociation{name: name, containerID: containerID}); err != nil { + return err } return nil - } - - if err := txn.Insert(memdbNamesTable, nameAssociation{name: name, containerID: containerID}); err != nil { - txn.Abort() - return err - } - txn.Commit() - return nil + }) } // ReleaseName releases the reserved name // Once released, a name can be reserved again func (db *memDB) ReleaseName(name string) error { - txn := db.store.Txn(true) - if err := txn.Delete(memdbNamesTable, nameAssociation{name: name}); err != nil { - txn.Abort() - return err - } - txn.Commit() - return nil + return db.withTxn(func(txn *memdb.Txn) error { + if err := txn.Delete(memdbNamesTable, nameAssociation{name: name}); err != nil { + return err + } + return nil + }) } type memdbView struct { From f708c1ef17a5e4a1ec5b06bab40711f966cdb5a6 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Thu, 13 Jul 2017 18:17:16 -0700 Subject: [PATCH 05/21] vendor: add archive/tar Signed-off-by: Tonis Tiigi Upstream-commit: 72df48d1ad417401a5ce0a7ee82a3c8ba33e091c Component: engine --- components/engine/Dockerfile | 1 + components/engine/vendor.conf | 8 + .../engine/vendor/archive/tar/common.go | 286 +++++++ .../engine/vendor/archive/tar/format.go | 197 +++++ .../engine/vendor/archive/tar/reader.go | 800 ++++++++++++++++++ .../engine/vendor/archive/tar/stat_atim.go | 20 + .../vendor/archive/tar/stat_atimespec.go | 20 + .../engine/vendor/archive/tar/stat_unix.go | 32 + .../engine/vendor/archive/tar/strconv.go | 252 ++++++ .../engine/vendor/archive/tar/writer.go | 364 ++++++++ 10 files changed, 1980 insertions(+) create mode 100644 components/engine/vendor/archive/tar/common.go create mode 100644 components/engine/vendor/archive/tar/format.go create mode 100644 components/engine/vendor/archive/tar/reader.go create mode 100644 components/engine/vendor/archive/tar/stat_atim.go create mode 100644 components/engine/vendor/archive/tar/stat_atimespec.go create mode 100644 components/engine/vendor/archive/tar/stat_unix.go create mode 100644 components/engine/vendor/archive/tar/strconv.go create mode 100644 components/engine/vendor/archive/tar/writer.go diff --git a/components/engine/Dockerfile b/components/engine/Dockerfile index a5b35e0ec7..33e88dce26 100644 --- a/components/engine/Dockerfile +++ b/components/engine/Dockerfile @@ -107,6 +107,7 @@ RUN set -x \ # IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines # will need updating, to avoid errors. Ping #docker-maintainers on IRC # with a heads-up. +# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ENV GO_VERSION 1.8.3 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ | tar -xzC /usr/local diff --git a/components/engine/vendor.conf b/components/engine/vendor.conf index eed5fe3f74..7be4a60e70 100644 --- a/components/engine/vendor.conf +++ b/components/engine/vendor.conf @@ -136,3 +136,11 @@ github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github. github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18 github.com/opencontainers/selinux v1.0.0-rc1 + +# archive/tar +# mkdir -p ./vendor/archive +# git clone git://github.com/tonistiigi/go-1.git ./go +# git --git-dir ./go/.git --work-tree ./go checkout revert-prefix-ignore +# cp -a go/src/archive/tar ./vendor/archive/tar +# rm -rf ./go +# vndr \ No newline at end of file diff --git a/components/engine/vendor/archive/tar/common.go b/components/engine/vendor/archive/tar/common.go new file mode 100644 index 0000000000..d2ae66d554 --- /dev/null +++ b/components/engine/vendor/archive/tar/common.go @@ -0,0 +1,286 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tar implements access to tar archives. +// It aims to cover most of the variations, including those produced +// by GNU and BSD tars. +// +// References: +// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5 +// http://www.gnu.org/software/tar/manual/html_node/Standard.html +// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html +package tar + +import ( + "errors" + "fmt" + "os" + "path" + "time" +) + +// BUG: Use of the Uid and Gid fields in Header could overflow on 32-bit +// architectures. If a large value is encountered when decoding, the result +// stored in Header will be the truncated version. + +// Header type flags. +const ( + TypeReg = '0' // regular file + TypeRegA = '\x00' // regular file + TypeLink = '1' // hard link + TypeSymlink = '2' // symbolic link + TypeChar = '3' // character device node + TypeBlock = '4' // block device node + TypeDir = '5' // directory + TypeFifo = '6' // fifo node + TypeCont = '7' // reserved + TypeXHeader = 'x' // extended header + TypeXGlobalHeader = 'g' // global extended header + TypeGNULongName = 'L' // Next file has a long name + TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name + TypeGNUSparse = 'S' // sparse file +) + +// A Header represents a single header in a tar archive. +// Some fields may not be populated. +type Header struct { + Name string // name of header file entry + Mode int64 // permission and mode bits + Uid int // user id of owner + Gid int // group id of owner + Size int64 // length in bytes + ModTime time.Time // modified time + Typeflag byte // type of header entry + Linkname string // target name of link + Uname string // user name of owner + Gname string // group name of owner + Devmajor int64 // major number of character or block device + Devminor int64 // minor number of character or block device + AccessTime time.Time // access time + ChangeTime time.Time // status change time + Xattrs map[string]string +} + +// FileInfo returns an os.FileInfo for the Header. +func (h *Header) FileInfo() os.FileInfo { + return headerFileInfo{h} +} + +// headerFileInfo implements os.FileInfo. +type headerFileInfo struct { + h *Header +} + +func (fi headerFileInfo) Size() int64 { return fi.h.Size } +func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } +func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } +func (fi headerFileInfo) Sys() interface{} { return fi.h } + +// Name returns the base name of the file. +func (fi headerFileInfo) Name() string { + if fi.IsDir() { + return path.Base(path.Clean(fi.h.Name)) + } + return path.Base(fi.h.Name) +} + +// Mode returns the permission and mode bits for the headerFileInfo. +func (fi headerFileInfo) Mode() (mode os.FileMode) { + // Set file permission bits. + mode = os.FileMode(fi.h.Mode).Perm() + + // Set setuid, setgid and sticky bits. + if fi.h.Mode&c_ISUID != 0 { + // setuid + mode |= os.ModeSetuid + } + if fi.h.Mode&c_ISGID != 0 { + // setgid + mode |= os.ModeSetgid + } + if fi.h.Mode&c_ISVTX != 0 { + // sticky + mode |= os.ModeSticky + } + + // Set file mode bits. + // clear perm, setuid, setgid and sticky bits. + m := os.FileMode(fi.h.Mode) &^ 07777 + if m == c_ISDIR { + // directory + mode |= os.ModeDir + } + if m == c_ISFIFO { + // named pipe (FIFO) + mode |= os.ModeNamedPipe + } + if m == c_ISLNK { + // symbolic link + mode |= os.ModeSymlink + } + if m == c_ISBLK { + // device file + mode |= os.ModeDevice + } + if m == c_ISCHR { + // Unix character device + mode |= os.ModeDevice + mode |= os.ModeCharDevice + } + if m == c_ISSOCK { + // Unix domain socket + mode |= os.ModeSocket + } + + switch fi.h.Typeflag { + case TypeSymlink: + // symbolic link + mode |= os.ModeSymlink + case TypeChar: + // character device node + mode |= os.ModeDevice + mode |= os.ModeCharDevice + case TypeBlock: + // block device node + mode |= os.ModeDevice + case TypeDir: + // directory + mode |= os.ModeDir + case TypeFifo: + // fifo node + mode |= os.ModeNamedPipe + } + + return mode +} + +// sysStat, if non-nil, populates h from system-dependent fields of fi. +var sysStat func(fi os.FileInfo, h *Header) error + +// Mode constants from the tar spec. +const ( + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +// Keywords for the PAX Extended Header +const ( + paxAtime = "atime" + paxCharset = "charset" + paxComment = "comment" + paxCtime = "ctime" // please note that ctime is not a valid pax header. + paxGid = "gid" + paxGname = "gname" + paxLinkpath = "linkpath" + paxMtime = "mtime" + paxPath = "path" + paxSize = "size" + paxUid = "uid" + paxUname = "uname" + paxXattr = "SCHILY.xattr." + paxNone = "" +) + +// FileInfoHeader creates a partially-populated Header from fi. +// If fi describes a symlink, FileInfoHeader records link as the link target. +// If fi describes a directory, a slash is appended to the name. +// Because os.FileInfo's Name method returns only the base name of +// the file it describes, it may be necessary to modify the Name field +// of the returned header to provide the full path name of the file. +func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { + if fi == nil { + return nil, errors.New("tar: FileInfo is nil") + } + fm := fi.Mode() + h := &Header{ + Name: fi.Name(), + ModTime: fi.ModTime(), + Mode: int64(fm.Perm()), // or'd with c_IS* constants later + } + switch { + case fm.IsRegular(): + h.Mode |= c_ISREG + h.Typeflag = TypeReg + h.Size = fi.Size() + case fi.IsDir(): + h.Typeflag = TypeDir + h.Mode |= c_ISDIR + h.Name += "/" + case fm&os.ModeSymlink != 0: + h.Typeflag = TypeSymlink + h.Mode |= c_ISLNK + h.Linkname = link + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + h.Mode |= c_ISCHR + h.Typeflag = TypeChar + } else { + h.Mode |= c_ISBLK + h.Typeflag = TypeBlock + } + case fm&os.ModeNamedPipe != 0: + h.Typeflag = TypeFifo + h.Mode |= c_ISFIFO + case fm&os.ModeSocket != 0: + h.Mode |= c_ISSOCK + default: + return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) + } + if fm&os.ModeSetuid != 0 { + h.Mode |= c_ISUID + } + if fm&os.ModeSetgid != 0 { + h.Mode |= c_ISGID + } + if fm&os.ModeSticky != 0 { + h.Mode |= c_ISVTX + } + // If possible, populate additional fields from OS-specific + // FileInfo fields. + if sys, ok := fi.Sys().(*Header); ok { + // This FileInfo came from a Header (not the OS). Use the + // original Header to populate all remaining fields. + h.Uid = sys.Uid + h.Gid = sys.Gid + h.Uname = sys.Uname + h.Gname = sys.Gname + h.AccessTime = sys.AccessTime + h.ChangeTime = sys.ChangeTime + if sys.Xattrs != nil { + h.Xattrs = make(map[string]string) + for k, v := range sys.Xattrs { + h.Xattrs[k] = v + } + } + if sys.Typeflag == TypeLink { + // hard link + h.Typeflag = TypeLink + h.Size = 0 + h.Linkname = sys.Linkname + } + } + if sysStat != nil { + return h, sysStat(fi, h) + } + return h, nil +} + +// isHeaderOnlyType checks if the given type flag is of the type that has no +// data section even if a size is specified. +func isHeaderOnlyType(flag byte) bool { + switch flag { + case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo: + return true + default: + return false + } +} diff --git a/components/engine/vendor/archive/tar/format.go b/components/engine/vendor/archive/tar/format.go new file mode 100644 index 0000000000..c2c9910d00 --- /dev/null +++ b/components/engine/vendor/archive/tar/format.go @@ -0,0 +1,197 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// Constants to identify various tar formats. +const ( + // The format is unknown. + formatUnknown = (1 << iota) / 2 // Sequence of 0, 1, 2, 4, 8, etc... + + // The format of the original Unix V7 tar tool prior to standardization. + formatV7 + + // The old and new GNU formats, which are incompatible with USTAR. + // This does cover the old GNU sparse extension. + // This does not cover the GNU sparse extensions using PAX headers, + // versions 0.0, 0.1, and 1.0; these fall under the PAX format. + formatGNU + + // Schily's tar format, which is incompatible with USTAR. + // This does not cover STAR extensions to the PAX format; these fall under + // the PAX format. + formatSTAR + + // USTAR is the former standardization of tar defined in POSIX.1-1988. + // This is incompatible with the GNU and STAR formats. + formatUSTAR + + // PAX is the latest standardization of tar defined in POSIX.1-2001. + // This is an extension of USTAR and is "backwards compatible" with it. + // + // Some newer formats add their own extensions to PAX, such as GNU sparse + // files and SCHILY extended attributes. Since they are backwards compatible + // with PAX, they will be labelled as "PAX". + formatPAX +) + +// Magics used to identify various formats. +const ( + magicGNU, versionGNU = "ustar ", " \x00" + magicUSTAR, versionUSTAR = "ustar\x00", "00" + trailerSTAR = "tar\x00" +) + +// Size constants from various tar specifications. +const ( + blockSize = 512 // Size of each block in a tar stream + nameSize = 100 // Max length of the name field in USTAR format + prefixSize = 155 // Max length of the prefix field in USTAR format +) + +var zeroBlock block + +type block [blockSize]byte + +// Convert block to any number of formats. +func (b *block) V7() *headerV7 { return (*headerV7)(b) } +func (b *block) GNU() *headerGNU { return (*headerGNU)(b) } +func (b *block) STAR() *headerSTAR { return (*headerSTAR)(b) } +func (b *block) USTAR() *headerUSTAR { return (*headerUSTAR)(b) } +func (b *block) Sparse() sparseArray { return (sparseArray)(b[:]) } + +// GetFormat checks that the block is a valid tar header based on the checksum. +// It then attempts to guess the specific format based on magic values. +// If the checksum fails, then formatUnknown is returned. +func (b *block) GetFormat() (format int) { + // Verify checksum. + var p parser + value := p.parseOctal(b.V7().Chksum()) + chksum1, chksum2 := b.ComputeChecksum() + if p.err != nil || (value != chksum1 && value != chksum2) { + return formatUnknown + } + + // Guess the magic values. + magic := string(b.USTAR().Magic()) + version := string(b.USTAR().Version()) + trailer := string(b.STAR().Trailer()) + switch { + case magic == magicUSTAR && trailer == trailerSTAR: + return formatSTAR + case magic == magicUSTAR: + return formatUSTAR + case magic == magicGNU && version == versionGNU: + return formatGNU + default: + return formatV7 + } +} + +// SetFormat writes the magic values necessary for specified format +// and then updates the checksum accordingly. +func (b *block) SetFormat(format int) { + // Set the magic values. + switch format { + case formatV7: + // Do nothing. + case formatGNU: + copy(b.GNU().Magic(), magicGNU) + copy(b.GNU().Version(), versionGNU) + case formatSTAR: + copy(b.STAR().Magic(), magicUSTAR) + copy(b.STAR().Version(), versionUSTAR) + copy(b.STAR().Trailer(), trailerSTAR) + case formatUSTAR, formatPAX: + copy(b.USTAR().Magic(), magicUSTAR) + copy(b.USTAR().Version(), versionUSTAR) + default: + panic("invalid format") + } + + // Update checksum. + // This field is special in that it is terminated by a NULL then space. + var f formatter + field := b.V7().Chksum() + chksum, _ := b.ComputeChecksum() // Possible values are 256..128776 + f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143 + field[7] = ' ' +} + +// ComputeChecksum computes the checksum for the header block. +// POSIX specifies a sum of the unsigned byte values, but the Sun tar used +// signed byte values. +// We compute and return both. +func (b *block) ComputeChecksum() (unsigned, signed int64) { + for i, c := range b { + if 148 <= i && i < 156 { + c = ' ' // Treat the checksum field itself as all spaces. + } + unsigned += int64(uint8(c)) + signed += int64(int8(c)) + } + return unsigned, signed +} + +type headerV7 [blockSize]byte + +func (h *headerV7) Name() []byte { return h[000:][:100] } +func (h *headerV7) Mode() []byte { return h[100:][:8] } +func (h *headerV7) UID() []byte { return h[108:][:8] } +func (h *headerV7) GID() []byte { return h[116:][:8] } +func (h *headerV7) Size() []byte { return h[124:][:12] } +func (h *headerV7) ModTime() []byte { return h[136:][:12] } +func (h *headerV7) Chksum() []byte { return h[148:][:8] } +func (h *headerV7) TypeFlag() []byte { return h[156:][:1] } +func (h *headerV7) LinkName() []byte { return h[157:][:100] } + +type headerGNU [blockSize]byte + +func (h *headerGNU) V7() *headerV7 { return (*headerV7)(h) } +func (h *headerGNU) Magic() []byte { return h[257:][:6] } +func (h *headerGNU) Version() []byte { return h[263:][:2] } +func (h *headerGNU) UserName() []byte { return h[265:][:32] } +func (h *headerGNU) GroupName() []byte { return h[297:][:32] } +func (h *headerGNU) DevMajor() []byte { return h[329:][:8] } +func (h *headerGNU) DevMinor() []byte { return h[337:][:8] } +func (h *headerGNU) AccessTime() []byte { return h[345:][:12] } +func (h *headerGNU) ChangeTime() []byte { return h[357:][:12] } +func (h *headerGNU) Sparse() sparseArray { return (sparseArray)(h[386:][:24*4+1]) } +func (h *headerGNU) RealSize() []byte { return h[483:][:12] } + +type headerSTAR [blockSize]byte + +func (h *headerSTAR) V7() *headerV7 { return (*headerV7)(h) } +func (h *headerSTAR) Magic() []byte { return h[257:][:6] } +func (h *headerSTAR) Version() []byte { return h[263:][:2] } +func (h *headerSTAR) UserName() []byte { return h[265:][:32] } +func (h *headerSTAR) GroupName() []byte { return h[297:][:32] } +func (h *headerSTAR) DevMajor() []byte { return h[329:][:8] } +func (h *headerSTAR) DevMinor() []byte { return h[337:][:8] } +func (h *headerSTAR) Prefix() []byte { return h[345:][:131] } +func (h *headerSTAR) AccessTime() []byte { return h[476:][:12] } +func (h *headerSTAR) ChangeTime() []byte { return h[488:][:12] } +func (h *headerSTAR) Trailer() []byte { return h[508:][:4] } + +type headerUSTAR [blockSize]byte + +func (h *headerUSTAR) V7() *headerV7 { return (*headerV7)(h) } +func (h *headerUSTAR) Magic() []byte { return h[257:][:6] } +func (h *headerUSTAR) Version() []byte { return h[263:][:2] } +func (h *headerUSTAR) UserName() []byte { return h[265:][:32] } +func (h *headerUSTAR) GroupName() []byte { return h[297:][:32] } +func (h *headerUSTAR) DevMajor() []byte { return h[329:][:8] } +func (h *headerUSTAR) DevMinor() []byte { return h[337:][:8] } +func (h *headerUSTAR) Prefix() []byte { return h[345:][:155] } + +type sparseArray []byte + +func (s sparseArray) Entry(i int) sparseNode { return (sparseNode)(s[i*24:]) } +func (s sparseArray) IsExtended() []byte { return s[24*s.MaxEntries():][:1] } +func (s sparseArray) MaxEntries() int { return len(s) / 24 } + +type sparseNode []byte + +func (s sparseNode) Offset() []byte { return s[00:][:12] } +func (s sparseNode) NumBytes() []byte { return s[12:][:12] } diff --git a/components/engine/vendor/archive/tar/reader.go b/components/engine/vendor/archive/tar/reader.go new file mode 100644 index 0000000000..a6142c6b86 --- /dev/null +++ b/components/engine/vendor/archive/tar/reader.go @@ -0,0 +1,800 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - pax extensions + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "math" + "strconv" + "strings" + "time" +) + +var ( + ErrHeader = errors.New("archive/tar: invalid tar header") +) + +// A Reader provides sequential access to the contents of a tar archive. +// A tar archive consists of a sequence of files. +// The Next method advances to the next file in the archive (including the first), +// and then it can be treated as an io.Reader to access the file's data. +type Reader struct { + r io.Reader + pad int64 // amount of padding (ignored) after current file entry + curr numBytesReader // reader for current file entry + blk block // buffer to use as temporary local storage + + // err is a persistent error. + // It is only the responsibility of every exported method of Reader to + // ensure that this error is sticky. + err error +} + +// A numBytesReader is an io.Reader with a numBytes method, returning the number +// of bytes remaining in the underlying encoded data. +type numBytesReader interface { + io.Reader + numBytes() int64 +} + +// A regFileReader is a numBytesReader for reading file data from a tar archive. +type regFileReader struct { + r io.Reader // underlying reader + nb int64 // number of unread bytes for current file entry +} + +// A sparseFileReader is a numBytesReader for reading sparse file data from a +// tar archive. +type sparseFileReader struct { + rfr numBytesReader // Reads the sparse-encoded file data + sp []sparseEntry // The sparse map for the file + pos int64 // Keeps track of file position + total int64 // Total size of the file +} + +// A sparseEntry holds a single entry in a sparse file's sparse map. +// +// Sparse files are represented using a series of sparseEntrys. +// Despite the name, a sparseEntry represents an actual data fragment that +// references data found in the underlying archive stream. All regions not +// covered by a sparseEntry are logically filled with zeros. +// +// For example, if the underlying raw file contains the 10-byte data: +// var compactData = "abcdefgh" +// +// And the sparse map has the following entries: +// var sp = []sparseEntry{ +// {offset: 2, numBytes: 5} // Data fragment for [2..7] +// {offset: 18, numBytes: 3} // Data fragment for [18..21] +// } +// +// Then the content of the resulting sparse file with a "real" size of 25 is: +// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 +type sparseEntry struct { + offset int64 // Starting position of the fragment + numBytes int64 // Length of the fragment +} + +// Keywords for GNU sparse files in a PAX extended header +const ( + paxGNUSparseNumBlocks = "GNU.sparse.numblocks" + paxGNUSparseOffset = "GNU.sparse.offset" + paxGNUSparseNumBytes = "GNU.sparse.numbytes" + paxGNUSparseMap = "GNU.sparse.map" + paxGNUSparseName = "GNU.sparse.name" + paxGNUSparseMajor = "GNU.sparse.major" + paxGNUSparseMinor = "GNU.sparse.minor" + paxGNUSparseSize = "GNU.sparse.size" + paxGNUSparseRealSize = "GNU.sparse.realsize" +) + +// NewReader creates a new Reader reading from r. +func NewReader(r io.Reader) *Reader { return &Reader{r: r} } + +// Next advances to the next entry in the tar archive. +// +// io.EOF is returned at the end of the input. +func (tr *Reader) Next() (*Header, error) { + if tr.err != nil { + return nil, tr.err + } + hdr, err := tr.next() + tr.err = err + return hdr, err +} + +func (tr *Reader) next() (*Header, error) { + var extHdrs map[string]string + + // Externally, Next iterates through the tar archive as if it is a series of + // files. Internally, the tar format often uses fake "files" to add meta + // data that describes the next file. These meta data "files" should not + // normally be visible to the outside. As such, this loop iterates through + // one or more "header files" until it finds a "normal file". +loop: + for { + if err := tr.skipUnread(); err != nil { + return nil, err + } + hdr, rawHdr, err := tr.readHeader() + if err != nil { + return nil, err + } + if err := tr.handleRegularFile(hdr); err != nil { + return nil, err + } + + // Check for PAX/GNU special headers and files. + switch hdr.Typeflag { + case TypeXHeader: + extHdrs, err = parsePAX(tr) + if err != nil { + return nil, err + } + continue loop // This is a meta header affecting the next header + case TypeGNULongName, TypeGNULongLink: + realname, err := ioutil.ReadAll(tr) + if err != nil { + return nil, err + } + + // Convert GNU extensions to use PAX headers. + if extHdrs == nil { + extHdrs = make(map[string]string) + } + var p parser + switch hdr.Typeflag { + case TypeGNULongName: + extHdrs[paxPath] = p.parseString(realname) + case TypeGNULongLink: + extHdrs[paxLinkpath] = p.parseString(realname) + } + if p.err != nil { + return nil, p.err + } + continue loop // This is a meta header affecting the next header + default: + // The old GNU sparse format is handled here since it is technically + // just a regular file with additional attributes. + + if err := mergePAX(hdr, extHdrs); err != nil { + return nil, err + } + + // The extended headers may have updated the size. + // Thus, setup the regFileReader again after merging PAX headers. + if err := tr.handleRegularFile(hdr); err != nil { + return nil, err + } + + // Sparse formats rely on being able to read from the logical data + // section; there must be a preceding call to handleRegularFile. + if err := tr.handleSparseFile(hdr, rawHdr, extHdrs); err != nil { + return nil, err + } + return hdr, nil // This is a file, so stop + } + } +} + +// handleRegularFile sets up the current file reader and padding such that it +// can only read the following logical data section. It will properly handle +// special headers that contain no data section. +func (tr *Reader) handleRegularFile(hdr *Header) error { + nb := hdr.Size + if isHeaderOnlyType(hdr.Typeflag) { + nb = 0 + } + if nb < 0 { + return ErrHeader + } + + tr.pad = -nb & (blockSize - 1) // blockSize is a power of two + tr.curr = ®FileReader{r: tr.r, nb: nb} + return nil +} + +// handleSparseFile checks if the current file is a sparse format of any type +// and sets the curr reader appropriately. +func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block, extHdrs map[string]string) error { + var sp []sparseEntry + var err error + if hdr.Typeflag == TypeGNUSparse { + sp, err = tr.readOldGNUSparseMap(hdr, rawHdr) + if err != nil { + return err + } + } else { + sp, err = tr.checkForGNUSparsePAXHeaders(hdr, extHdrs) + if err != nil { + return err + } + } + + // If sp is non-nil, then this is a sparse file. + // Note that it is possible for len(sp) to be zero. + if sp != nil { + tr.curr, err = newSparseFileReader(tr.curr, sp, hdr.Size) + } + return err +} + +// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then +// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to +// be treated as a regular file. +func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) { + var sparseFormat string + + // Check for sparse format indicators + major, majorOk := headers[paxGNUSparseMajor] + minor, minorOk := headers[paxGNUSparseMinor] + sparseName, sparseNameOk := headers[paxGNUSparseName] + _, sparseMapOk := headers[paxGNUSparseMap] + sparseSize, sparseSizeOk := headers[paxGNUSparseSize] + sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize] + + // Identify which, if any, sparse format applies from which PAX headers are set + if majorOk && minorOk { + sparseFormat = major + "." + minor + } else if sparseNameOk && sparseMapOk { + sparseFormat = "0.1" + } else if sparseSizeOk { + sparseFormat = "0.0" + } else { + // Not a PAX format GNU sparse file. + return nil, nil + } + + // Check for unknown sparse format + if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" { + return nil, nil + } + + // Update hdr from GNU sparse PAX headers + if sparseNameOk { + hdr.Name = sparseName + } + if sparseSizeOk { + realSize, err := strconv.ParseInt(sparseSize, 10, 64) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } else if sparseRealSizeOk { + realSize, err := strconv.ParseInt(sparseRealSize, 10, 64) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } + + // Set up the sparse map, according to the particular sparse format in use + var sp []sparseEntry + var err error + switch sparseFormat { + case "0.0", "0.1": + sp, err = readGNUSparseMap0x1(headers) + case "1.0": + sp, err = readGNUSparseMap1x0(tr.curr) + } + return sp, err +} + +// mergePAX merges well known headers according to PAX standard. +// In general headers with the same name as those found +// in the header struct overwrite those found in the header +// struct with higher precision or longer values. Esp. useful +// for name and linkname fields. +func mergePAX(hdr *Header, headers map[string]string) (err error) { + var id64 int64 + for k, v := range headers { + switch k { + case paxPath: + hdr.Name = v + case paxLinkpath: + hdr.Linkname = v + case paxUname: + hdr.Uname = v + case paxGname: + hdr.Gname = v + case paxUid: + id64, err = strconv.ParseInt(v, 10, 64) + hdr.Uid = int(id64) // Integer overflow possible + case paxGid: + id64, err = strconv.ParseInt(v, 10, 64) + hdr.Gid = int(id64) // Integer overflow possible + case paxAtime: + hdr.AccessTime, err = parsePAXTime(v) + case paxMtime: + hdr.ModTime, err = parsePAXTime(v) + case paxCtime: + hdr.ChangeTime, err = parsePAXTime(v) + case paxSize: + hdr.Size, err = strconv.ParseInt(v, 10, 64) + default: + if strings.HasPrefix(k, paxXattr) { + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + hdr.Xattrs[k[len(paxXattr):]] = v + } + } + if err != nil { + return ErrHeader + } + } + return nil +} + +// parsePAX parses PAX headers. +// If an extended header (type 'x') is invalid, ErrHeader is returned +func parsePAX(r io.Reader) (map[string]string, error) { + buf, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + sbuf := string(buf) + + // For GNU PAX sparse format 0.0 support. + // This function transforms the sparse format 0.0 headers into format 0.1 + // headers since 0.0 headers were not PAX compliant. + var sparseMap []string + + extHdrs := make(map[string]string) + for len(sbuf) > 0 { + key, value, residual, err := parsePAXRecord(sbuf) + if err != nil { + return nil, ErrHeader + } + sbuf = residual + + switch key { + case paxGNUSparseOffset, paxGNUSparseNumBytes: + // Validate sparse header order and value. + if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) || + (len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) || + strings.Contains(value, ",") { + return nil, ErrHeader + } + sparseMap = append(sparseMap, value) + default: + // According to PAX specification, a value is stored only if it is + // non-empty. Otherwise, the key is deleted. + if len(value) > 0 { + extHdrs[key] = value + } else { + delete(extHdrs, key) + } + } + } + if len(sparseMap) > 0 { + extHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",") + } + return extHdrs, nil +} + +// skipUnread skips any unread bytes in the existing file entry, as well as any +// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is +// encountered in the data portion; it is okay to hit io.EOF in the padding. +// +// Note that this function still works properly even when sparse files are being +// used since numBytes returns the bytes remaining in the underlying io.Reader. +func (tr *Reader) skipUnread() error { + dataSkip := tr.numBytes() // Number of data bytes to skip + totalSkip := dataSkip + tr.pad // Total number of bytes to skip + tr.curr, tr.pad = nil, 0 + + // If possible, Seek to the last byte before the end of the data section. + // Do this because Seek is often lazy about reporting errors; this will mask + // the fact that the tar stream may be truncated. We can rely on the + // io.CopyN done shortly afterwards to trigger any IO errors. + var seekSkipped int64 // Number of bytes skipped via Seek + if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 { + // Not all io.Seeker can actually Seek. For example, os.Stdin implements + // io.Seeker, but calling Seek always returns an error and performs + // no action. Thus, we try an innocent seek to the current position + // to see if Seek is really supported. + pos1, err := sr.Seek(0, io.SeekCurrent) + if err == nil { + // Seek seems supported, so perform the real Seek. + pos2, err := sr.Seek(dataSkip-1, io.SeekCurrent) + if err != nil { + return err + } + seekSkipped = pos2 - pos1 + } + } + + copySkipped, err := io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped) + if err == io.EOF && seekSkipped+copySkipped < dataSkip { + err = io.ErrUnexpectedEOF + } + return err +} + +// readHeader reads the next block header and assumes that the underlying reader +// is already aligned to a block boundary. It returns the raw block of the +// header in case further processing is required. +// +// The err will be set to io.EOF only when one of the following occurs: +// * Exactly 0 bytes are read and EOF is hit. +// * Exactly 1 block of zeros is read and EOF is hit. +// * At least 2 blocks of zeros are read. +func (tr *Reader) readHeader() (*Header, *block, error) { + // Two blocks of zero bytes marks the end of the archive. + if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil { + return nil, nil, err // EOF is okay here; exactly 0 bytes read + } + if bytes.Equal(tr.blk[:], zeroBlock[:]) { + if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil { + return nil, nil, err // EOF is okay here; exactly 1 block of zeros read + } + if bytes.Equal(tr.blk[:], zeroBlock[:]) { + return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read + } + return nil, nil, ErrHeader // Zero block and then non-zero block + } + + // Verify the header matches a known format. + format := tr.blk.GetFormat() + if format == formatUnknown { + return nil, nil, ErrHeader + } + + var p parser + hdr := new(Header) + + // Unpack the V7 header. + v7 := tr.blk.V7() + hdr.Name = p.parseString(v7.Name()) + hdr.Mode = p.parseNumeric(v7.Mode()) + hdr.Uid = int(p.parseNumeric(v7.UID())) + hdr.Gid = int(p.parseNumeric(v7.GID())) + hdr.Size = p.parseNumeric(v7.Size()) + hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0) + hdr.Typeflag = v7.TypeFlag()[0] + hdr.Linkname = p.parseString(v7.LinkName()) + + // Unpack format specific fields. + if format > formatV7 { + ustar := tr.blk.USTAR() + hdr.Uname = p.parseString(ustar.UserName()) + hdr.Gname = p.parseString(ustar.GroupName()) + if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock { + hdr.Devmajor = p.parseNumeric(ustar.DevMajor()) + hdr.Devminor = p.parseNumeric(ustar.DevMinor()) + } + + var prefix string + switch format { + case formatUSTAR, formatGNU: + // TODO(dsnet): Do not use the prefix field for the GNU format! + // See golang.org/issues/12594 + ustar := tr.blk.USTAR() + prefix = p.parseString(ustar.Prefix()) + case formatSTAR: + star := tr.blk.STAR() + prefix = p.parseString(star.Prefix()) + hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0) + hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0) + } + if len(prefix) > 0 { + hdr.Name = prefix + "/" + hdr.Name + } + } + return hdr, &tr.blk, p.err +} + +// readOldGNUSparseMap reads the sparse map from the old GNU sparse format. +// The sparse map is stored in the tar header if it's small enough. +// If it's larger than four entries, then one or more extension headers are used +// to store the rest of the sparse map. +// +// The Header.Size does not reflect the size of any extended headers used. +// Thus, this function will read from the raw io.Reader to fetch extra headers. +// This method mutates blk in the process. +func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) ([]sparseEntry, error) { + // Make sure that the input format is GNU. + // Unfortunately, the STAR format also has a sparse header format that uses + // the same type flag but has a completely different layout. + if blk.GetFormat() != formatGNU { + return nil, ErrHeader + } + + var p parser + hdr.Size = p.parseNumeric(blk.GNU().RealSize()) + if p.err != nil { + return nil, p.err + } + var s sparseArray = blk.GNU().Sparse() + var sp = make([]sparseEntry, 0, s.MaxEntries()) + for { + for i := 0; i < s.MaxEntries(); i++ { + // This termination condition is identical to GNU and BSD tar. + if s.Entry(i).Offset()[0] == 0x00 { + break // Don't return, need to process extended headers (even if empty) + } + offset := p.parseNumeric(s.Entry(i).Offset()) + numBytes := p.parseNumeric(s.Entry(i).NumBytes()) + if p.err != nil { + return nil, p.err + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + if s.IsExtended()[0] > 0 { + // There are more entries. Read an extension header and parse its entries. + if _, err := io.ReadFull(tr.r, blk[:]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + s = blk.Sparse() + continue + } + return sp, nil // Done + } +} + +// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format +// version 1.0. The format of the sparse map consists of a series of +// newline-terminated numeric fields. The first field is the number of entries +// and is always present. Following this are the entries, consisting of two +// fields (offset, numBytes). This function must stop reading at the end +// boundary of the block containing the last newline. +// +// Note that the GNU manual says that numeric values should be encoded in octal +// format. However, the GNU tar utility itself outputs these values in decimal. +// As such, this library treats values as being encoded in decimal. +func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { + var cntNewline int64 + var buf bytes.Buffer + var blk = make([]byte, blockSize) + + // feedTokens copies data in numBlock chunks from r into buf until there are + // at least cnt newlines in buf. It will not read more blocks than needed. + var feedTokens = func(cnt int64) error { + for cntNewline < cnt { + if _, err := io.ReadFull(r, blk); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + buf.Write(blk) + for _, c := range blk { + if c == '\n' { + cntNewline++ + } + } + } + return nil + } + + // nextToken gets the next token delimited by a newline. This assumes that + // at least one newline exists in the buffer. + var nextToken = func() string { + cntNewline-- + tok, _ := buf.ReadString('\n') + return tok[:len(tok)-1] // Cut off newline + } + + // Parse for the number of entries. + // Use integer overflow resistant math to check this. + if err := feedTokens(1); err != nil { + return nil, err + } + numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // Parse for all member entries. + // numEntries is trusted after this since a potential attacker must have + // committed resources proportional to what this library used. + if err := feedTokens(2 * numEntries); err != nil { + return nil, err + } + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err := strconv.ParseInt(nextToken(), 10, 64) + if err != nil { + return nil, ErrHeader + } + numBytes, err := strconv.ParseInt(nextToken(), 10, 64) + if err != nil { + return nil, ErrHeader + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + return sp, nil +} + +// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format +// version 0.1. The sparse map is stored in the PAX headers. +func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) { + // Get number of entries. + // Use integer overflow resistant math to check this. + numEntriesStr := extHdrs[paxGNUSparseNumBlocks] + numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // There should be two numbers in sparseMap for each entry. + sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",") + if int64(len(sparseMap)) != 2*numEntries { + return nil, ErrHeader + } + + // Loop through the entries in the sparse map. + // numEntries is trusted now. + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64) + if err != nil { + return nil, ErrHeader + } + numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64) + if err != nil { + return nil, ErrHeader + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + return sp, nil +} + +// numBytes returns the number of bytes left to read in the current file's entry +// in the tar archive, or 0 if there is no current file. +func (tr *Reader) numBytes() int64 { + if tr.curr == nil { + // No current file, so no bytes + return 0 + } + return tr.curr.numBytes() +} + +// Read reads from the current entry in the tar archive. +// It returns 0, io.EOF when it reaches the end of that entry, +// until Next is called to advance to the next entry. +// +// Calling Read on special types like TypeLink, TypeSymLink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what +// the Header.Size claims. +func (tr *Reader) Read(b []byte) (int, error) { + if tr.err != nil { + return 0, tr.err + } + if tr.curr == nil { + return 0, io.EOF + } + + n, err := tr.curr.Read(b) + if err != nil && err != io.EOF { + tr.err = err + } + return n, err +} + +func (rfr *regFileReader) Read(b []byte) (n int, err error) { + if rfr.nb == 0 { + // file consumed + return 0, io.EOF + } + if int64(len(b)) > rfr.nb { + b = b[0:rfr.nb] + } + n, err = rfr.r.Read(b) + rfr.nb -= int64(n) + + if err == io.EOF && rfr.nb > 0 { + err = io.ErrUnexpectedEOF + } + return +} + +// numBytes returns the number of bytes left to read in the file's data in the tar archive. +func (rfr *regFileReader) numBytes() int64 { + return rfr.nb +} + +// newSparseFileReader creates a new sparseFileReader, but validates all of the +// sparse entries before doing so. +func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) { + if total < 0 { + return nil, ErrHeader // Total size cannot be negative + } + + // Validate all sparse entries. These are the same checks as performed by + // the BSD tar utility. + for i, s := range sp { + switch { + case s.offset < 0 || s.numBytes < 0: + return nil, ErrHeader // Negative values are never okay + case s.offset > math.MaxInt64-s.numBytes: + return nil, ErrHeader // Integer overflow with large length + case s.offset+s.numBytes > total: + return nil, ErrHeader // Region extends beyond the "real" size + case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset: + return nil, ErrHeader // Regions can't overlap and must be in order + } + } + return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil +} + +// readHole reads a sparse hole ending at endOffset. +func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int { + n64 := endOffset - sfr.pos + if n64 > int64(len(b)) { + n64 = int64(len(b)) + } + n := int(n64) + for i := 0; i < n; i++ { + b[i] = 0 + } + sfr.pos += n64 + return n +} + +// Read reads the sparse file data in expanded form. +func (sfr *sparseFileReader) Read(b []byte) (n int, err error) { + // Skip past all empty fragments. + for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 { + sfr.sp = sfr.sp[1:] + } + + // If there are no more fragments, then it is possible that there + // is one last sparse hole. + if len(sfr.sp) == 0 { + // This behavior matches the BSD tar utility. + // However, GNU tar stops returning data even if sfr.total is unmet. + if sfr.pos < sfr.total { + return sfr.readHole(b, sfr.total), nil + } + return 0, io.EOF + } + + // In front of a data fragment, so read a hole. + if sfr.pos < sfr.sp[0].offset { + return sfr.readHole(b, sfr.sp[0].offset), nil + } + + // In a data fragment, so read from it. + // This math is overflow free since we verify that offset and numBytes can + // be safely added when creating the sparseFileReader. + endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment + bytesLeft := endPos - sfr.pos // Bytes left in fragment + if int64(len(b)) > bytesLeft { + b = b[:bytesLeft] + } + + n, err = sfr.rfr.Read(b) + sfr.pos += int64(n) + if err == io.EOF { + if sfr.pos < endPos { + err = io.ErrUnexpectedEOF // There was supposed to be more data + } else if sfr.pos < sfr.total { + err = nil // There is still an implicit sparse hole at the end + } + } + + if sfr.pos == endPos { + sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it + } + return n, err +} + +// numBytes returns the number of bytes left to read in the sparse file's +// sparse-encoded data in the tar archive. +func (sfr *sparseFileReader) numBytes() int64 { + return sfr.rfr.numBytes() +} diff --git a/components/engine/vendor/archive/tar/stat_atim.go b/components/engine/vendor/archive/tar/stat_atim.go new file mode 100644 index 0000000000..cf9cc79c59 --- /dev/null +++ b/components/engine/vendor/archive/tar/stat_atim.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux dragonfly openbsd solaris + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atim.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctim.Unix()) +} diff --git a/components/engine/vendor/archive/tar/stat_atimespec.go b/components/engine/vendor/archive/tar/stat_atimespec.go new file mode 100644 index 0000000000..6f17dbe307 --- /dev/null +++ b/components/engine/vendor/archive/tar/stat_atimespec.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd netbsd + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atimespec.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctimespec.Unix()) +} diff --git a/components/engine/vendor/archive/tar/stat_unix.go b/components/engine/vendor/archive/tar/stat_unix.go new file mode 100644 index 0000000000..cb843db4cf --- /dev/null +++ b/components/engine/vendor/archive/tar/stat_unix.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin dragonfly freebsd openbsd netbsd solaris + +package tar + +import ( + "os" + "syscall" +) + +func init() { + sysStat = statUnix +} + +func statUnix(fi os.FileInfo, h *Header) error { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + h.Uid = int(sys.Uid) + h.Gid = int(sys.Gid) + // TODO(bradfitz): populate username & group. os/user + // doesn't cache LookupId lookups, and lacks group + // lookup functions. + h.AccessTime = statAtime(sys) + h.ChangeTime = statCtime(sys) + // TODO(bradfitz): major/minor device numbers? + return nil +} diff --git a/components/engine/vendor/archive/tar/strconv.go b/components/engine/vendor/archive/tar/strconv.go new file mode 100644 index 0000000000..bb5b51c02d --- /dev/null +++ b/components/engine/vendor/archive/tar/strconv.go @@ -0,0 +1,252 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "time" +) + +func isASCII(s string) bool { + for _, c := range s { + if c >= 0x80 { + return false + } + } + return true +} + +func toASCII(s string) string { + if isASCII(s) { + return s + } + var buf bytes.Buffer + for _, c := range s { + if c < 0x80 { + buf.WriteByte(byte(c)) + } + } + return buf.String() +} + +type parser struct { + err error // Last error seen +} + +type formatter struct { + err error // Last error seen +} + +// parseString parses bytes as a NUL-terminated C-style string. +// If a NUL byte is not found then the whole slice is returned as a string. +func (*parser) parseString(b []byte) string { + n := 0 + for n < len(b) && b[n] != 0 { + n++ + } + return string(b[0:n]) +} + +// Write s into b, terminating it with a NUL if there is room. +func (f *formatter) formatString(b []byte, s string) { + if len(s) > len(b) { + f.err = ErrFieldTooLong + return + } + ascii := toASCII(s) + copy(b, ascii) + if len(ascii) < len(b) { + b[len(ascii)] = 0 + } +} + +// fitsInBase256 reports whether x can be encoded into n bytes using base-256 +// encoding. Unlike octal encoding, base-256 encoding does not require that the +// string ends with a NUL character. Thus, all n bytes are available for output. +// +// If operating in binary mode, this assumes strict GNU binary mode; which means +// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is +// equivalent to the sign bit in two's complement form. +func fitsInBase256(n int, x int64) bool { + var binBits = uint(n-1) * 8 + return n >= 9 || (x >= -1< 0 && b[0]&0x80 != 0 { + // Handling negative numbers relies on the following identity: + // -a-1 == ^a + // + // If the number is negative, we use an inversion mask to invert the + // data bytes and treat the value as an unsigned number. + var inv byte // 0x00 if positive or zero, 0xff if negative + if b[0]&0x40 != 0 { + inv = 0xff + } + + var x uint64 + for i, c := range b { + c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing + if i == 0 { + c &= 0x7f // Ignore signal bit in first byte + } + if (x >> 56) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + x = x<<8 | uint64(c) + } + if (x >> 63) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + if inv == 0xff { + return ^int64(x) + } + return int64(x) + } + + // Normal case is base-8 (octal) format. + return p.parseOctal(b) +} + +// Write x into b, as binary (GNUtar/star extension). +func (f *formatter) formatNumeric(b []byte, x int64) { + if fitsInBase256(len(b), x) { + for i := len(b) - 1; i >= 0; i-- { + b[i] = byte(x) + x >>= 8 + } + b[0] |= 0x80 // Highest bit indicates binary format + return + } + + f.formatOctal(b, 0) // Last resort, just write zero + f.err = ErrFieldTooLong +} + +func (p *parser) parseOctal(b []byte) int64 { + // Because unused fields are filled with NULs, we need + // to skip leading NULs. Fields may also be padded with + // spaces or NULs. + // So we remove leading and trailing NULs and spaces to + // be sure. + b = bytes.Trim(b, " \x00") + + if len(b) == 0 { + return 0 + } + x, perr := strconv.ParseUint(p.parseString(b), 8, 64) + if perr != nil { + p.err = ErrHeader + } + return int64(x) +} + +func (f *formatter) formatOctal(b []byte, x int64) { + s := strconv.FormatInt(x, 8) + // Add leading zeros, but leave room for a NUL. + if n := len(b) - len(s) - 1; n > 0 { + s = strings.Repeat("0", n) + s + } + f.formatString(b, s) +} + +// parsePAXTime takes a string of the form %d.%d as described in the PAX +// specification. Note that this implementation allows for negative timestamps, +// which is allowed for by the PAX specification, but not always portable. +func parsePAXTime(s string) (time.Time, error) { + const maxNanoSecondDigits = 9 + + // Split string into seconds and sub-seconds parts. + ss, sn := s, "" + if pos := strings.IndexByte(s, '.'); pos >= 0 { + ss, sn = s[:pos], s[pos+1:] + } + + // Parse the seconds. + secs, err := strconv.ParseInt(ss, 10, 64) + if err != nil { + return time.Time{}, ErrHeader + } + if len(sn) == 0 { + return time.Unix(secs, 0), nil // No sub-second values + } + + // Parse the nanoseconds. + if strings.Trim(sn, "0123456789") != "" { + return time.Time{}, ErrHeader + } + if len(sn) < maxNanoSecondDigits { + sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad + } else { + sn = sn[:maxNanoSecondDigits] // Right truncate + } + nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed + if len(ss) > 0 && ss[0] == '-' { + return time.Unix(secs, -1*int64(nsecs)), nil // Negative correction + } + return time.Unix(secs, int64(nsecs)), nil +} + +// TODO(dsnet): Implement formatPAXTime. + +// parsePAXRecord parses the input PAX record string into a key-value pair. +// If parsing is successful, it will slice off the currently read record and +// return the remainder as r. +// +// A PAX record is of the following form: +// "%d %s=%s\n" % (size, key, value) +func parsePAXRecord(s string) (k, v, r string, err error) { + // The size field ends at the first space. + sp := strings.IndexByte(s, ' ') + if sp == -1 { + return "", "", s, ErrHeader + } + + // Parse the first token as a decimal integer. + n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int + if perr != nil || n < 5 || int64(len(s)) < n { + return "", "", s, ErrHeader + } + + // Extract everything between the space and the final newline. + rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:] + if nl != "\n" { + return "", "", s, ErrHeader + } + + // The first equals separates the key from the value. + eq := strings.IndexByte(rec, '=') + if eq == -1 { + return "", "", s, ErrHeader + } + return rec[:eq], rec[eq+1:], rem, nil +} + +// formatPAXRecord formats a single PAX record, prefixing it with the +// appropriate length. +func formatPAXRecord(k, v string) string { + const padding = 3 // Extra padding for ' ', '=', and '\n' + size := len(k) + len(v) + padding + size += len(strconv.Itoa(size)) + record := fmt.Sprintf("%d %s=%s\n", size, k, v) + + // Final adjustment if adding size field increased the record size. + if len(record) != size { + size = len(record) + record = fmt.Sprintf("%d %s=%s\n", size, k, v) + } + return record +} diff --git a/components/engine/vendor/archive/tar/writer.go b/components/engine/vendor/archive/tar/writer.go new file mode 100644 index 0000000000..596fb8b9e1 --- /dev/null +++ b/components/engine/vendor/archive/tar/writer.go @@ -0,0 +1,364 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - catch more errors (no first header, etc.) + +import ( + "bytes" + "errors" + "fmt" + "io" + "path" + "sort" + "strconv" + "strings" + "time" +) + +var ( + ErrWriteTooLong = errors.New("archive/tar: write too long") + ErrFieldTooLong = errors.New("archive/tar: header field too long") + ErrWriteAfterClose = errors.New("archive/tar: write after close") + errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values") +) + +// A Writer provides sequential writing of a tar archive in POSIX.1 format. +// A tar archive consists of a sequence of files. +// Call WriteHeader to begin a new file, and then call Write to supply that file's data, +// writing at most hdr.Size bytes in total. +type Writer struct { + w io.Writer + err error + nb int64 // number of unwritten bytes for current file entry + pad int64 // amount of padding to write after current file entry + closed bool + usedBinary bool // whether the binary numeric field extension was used + preferPax bool // use PAX header instead of binary numeric header + hdrBuff block // buffer to use in writeHeader when writing a regular header + paxHdrBuff block // buffer to use in writeHeader when writing a PAX header +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { return &Writer{w: w} } + +// Flush finishes writing the current file (optional). +func (tw *Writer) Flush() error { + if tw.nb > 0 { + tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb) + return tw.err + } + + n := tw.nb + tw.pad + for n > 0 && tw.err == nil { + nr := n + if nr > blockSize { + nr = blockSize + } + var nw int + nw, tw.err = tw.w.Write(zeroBlock[0:nr]) + n -= int64(nw) + } + tw.nb = 0 + tw.pad = 0 + return tw.err +} + +var ( + minTime = time.Unix(0, 0) + // There is room for 11 octal digits (33 bits) of mtime. + maxTime = minTime.Add((1<<33 - 1) * time.Second) +) + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +func (tw *Writer) WriteHeader(hdr *Header) error { + return tw.writeHeader(hdr, true) +} + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +// As this method is called internally by writePax header to allow it to +// suppress writing the pax header. +func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { + if tw.closed { + return ErrWriteAfterClose + } + if tw.err == nil { + tw.Flush() + } + if tw.err != nil { + return tw.err + } + + // a map to hold pax header records, if any are needed + paxHeaders := make(map[string]string) + + // TODO(dsnet): we might want to use PAX headers for + // subsecond time resolution, but for now let's just capture + // too long fields or non ascii characters + + // We need to select which scratch buffer to use carefully, + // since this method is called recursively to write PAX headers. + // If allowPax is true, this is the non-recursive call, and we will use hdrBuff. + // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is + // already being used by the non-recursive call, so we must use paxHdrBuff. + header := &tw.hdrBuff + if !allowPax { + header = &tw.paxHdrBuff + } + copy(header[:], zeroBlock[:]) + + // Wrappers around formatter that automatically sets paxHeaders if the + // argument extends beyond the capacity of the input byte slice. + var f formatter + var formatString = func(b []byte, s string, paxKeyword string) { + needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s) + if needsPaxHeader { + paxHeaders[paxKeyword] = s + return + } + f.formatString(b, s) + } + var formatNumeric = func(b []byte, x int64, paxKeyword string) { + // Try octal first. + s := strconv.FormatInt(x, 8) + if len(s) < len(b) { + f.formatOctal(b, x) + return + } + + // If it is too long for octal, and PAX is preferred, use a PAX header. + if paxKeyword != paxNone && tw.preferPax { + f.formatOctal(b, 0) + s := strconv.FormatInt(x, 10) + paxHeaders[paxKeyword] = s + return + } + + tw.usedBinary = true + f.formatNumeric(b, x) + } + + // Handle out of range ModTime carefully. + var modTime int64 + if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) { + modTime = hdr.ModTime.Unix() + } + + v7 := header.V7() + formatString(v7.Name(), hdr.Name, paxPath) + // TODO(dsnet): The GNU format permits the mode field to be encoded in + // base-256 format. Thus, we can use formatNumeric instead of formatOctal. + f.formatOctal(v7.Mode(), hdr.Mode) + formatNumeric(v7.UID(), int64(hdr.Uid), paxUid) + formatNumeric(v7.GID(), int64(hdr.Gid), paxGid) + formatNumeric(v7.Size(), hdr.Size, paxSize) + // TODO(dsnet): Consider using PAX for finer time granularity. + formatNumeric(v7.ModTime(), modTime, paxNone) + v7.TypeFlag()[0] = hdr.Typeflag + formatString(v7.LinkName(), hdr.Linkname, paxLinkpath) + + ustar := header.USTAR() + formatString(ustar.UserName(), hdr.Uname, paxUname) + formatString(ustar.GroupName(), hdr.Gname, paxGname) + formatNumeric(ustar.DevMajor(), hdr.Devmajor, paxNone) + formatNumeric(ustar.DevMinor(), hdr.Devminor, paxNone) + + // TODO(dsnet): The logic surrounding the prefix field is broken when trying + // to encode the header as GNU format. The challenge with the current logic + // is that we are unsure what format we are using at any given moment until + // we have processed *all* of the fields. The problem is that by the time + // all fields have been processed, some work has already been done to handle + // each field under the assumption that it is for one given format or + // another. In some situations, this causes the Writer to be confused and + // encode a prefix field when the format being used is GNU. Thus, producing + // an invalid tar file. + // + // As a short-term fix, we disable the logic to use the prefix field, which + // will force the badly generated GNU files to become encoded as being + // the PAX format. + // + // As an alternative fix, we could hard-code preferPax to be true. However, + // this is problematic for the following reasons: + // * The preferPax functionality is not tested at all. + // * This can result in headers that try to use both the GNU and PAX + // features at the same time, which is also wrong. + // + // The proper fix for this is to use a two-pass method: + // * The first pass simply determines what set of formats can possibly + // encode the given header. + // * The second pass actually encodes the header as that given format + // without worrying about violating the format. + // + // See the following: + // https://golang.org/issue/12594 + // https://golang.org/issue/17630 + // https://golang.org/issue/9683 + const usePrefix = false + + // try to use a ustar header when only the name is too long + _, paxPathUsed := paxHeaders[paxPath] + if usePrefix && !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed { + prefix, suffix, ok := splitUSTARPath(hdr.Name) + if ok { + // Since we can encode in USTAR format, disable PAX header. + delete(paxHeaders, paxPath) + + // Update the path fields + formatString(v7.Name(), suffix, paxNone) + formatString(ustar.Prefix(), prefix, paxNone) + } + } + + if tw.usedBinary { + header.SetFormat(formatGNU) + } else { + header.SetFormat(formatUSTAR) + } + + // Check if there were any formatting errors. + if f.err != nil { + tw.err = f.err + return tw.err + } + + if allowPax { + for k, v := range hdr.Xattrs { + paxHeaders[paxXattr+k] = v + } + } + + if len(paxHeaders) > 0 { + if !allowPax { + return errInvalidHeader + } + if err := tw.writePAXHeader(hdr, paxHeaders); err != nil { + return err + } + } + tw.nb = hdr.Size + tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize + + _, tw.err = tw.w.Write(header[:]) + return tw.err +} + +// splitUSTARPath splits a path according to USTAR prefix and suffix rules. +// If the path is not splittable, then it will return ("", "", false). +func splitUSTARPath(name string) (prefix, suffix string, ok bool) { + length := len(name) + if length <= nameSize || !isASCII(name) { + return "", "", false + } else if length > prefixSize+1 { + length = prefixSize + 1 + } else if name[length-1] == '/' { + length-- + } + + i := strings.LastIndex(name[:length], "/") + nlen := len(name) - i - 1 // nlen is length of suffix + plen := i // plen is length of prefix + if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize { + return "", "", false + } + return name[:i], name[i+1:], true +} + +// writePaxHeader writes an extended pax header to the +// archive. +func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error { + // Prepare extended header + ext := new(Header) + ext.Typeflag = TypeXHeader + // Setting ModTime is required for reader parsing to + // succeed, and seems harmless enough. + ext.ModTime = hdr.ModTime + // The spec asks that we namespace our pseudo files + // with the current pid. However, this results in differing outputs + // for identical inputs. As such, the constant 0 is now used instead. + // golang.org/issue/12358 + dir, file := path.Split(hdr.Name) + fullName := path.Join(dir, "PaxHeaders.0", file) + + ascii := toASCII(fullName) + if len(ascii) > nameSize { + ascii = ascii[:nameSize] + } + ext.Name = ascii + // Construct the body + var buf bytes.Buffer + + // Keys are sorted before writing to body to allow deterministic output. + keys := make([]string, 0, len(paxHeaders)) + for k := range paxHeaders { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k])) + } + + ext.Size = int64(len(buf.Bytes())) + if err := tw.writeHeader(ext, false); err != nil { + return err + } + if _, err := tw.Write(buf.Bytes()); err != nil { + return err + } + if err := tw.Flush(); err != nil { + return err + } + return nil +} + +// Write writes to the current entry in the tar archive. +// Write returns the error ErrWriteTooLong if more than +// hdr.Size bytes are written after WriteHeader. +func (tw *Writer) Write(b []byte) (n int, err error) { + if tw.closed { + err = ErrWriteAfterClose + return + } + overwrite := false + if int64(len(b)) > tw.nb { + b = b[0:tw.nb] + overwrite = true + } + n, err = tw.w.Write(b) + tw.nb -= int64(n) + if err == nil && overwrite { + err = ErrWriteTooLong + return + } + tw.err = err + return +} + +// Close closes the tar archive, flushing any unwritten +// data to the underlying writer. +func (tw *Writer) Close() error { + if tw.err != nil || tw.closed { + return tw.err + } + tw.Flush() + tw.closed = true + if tw.err != nil { + return tw.err + } + + // trailer: two zero blocks + for i := 0; i < 2; i++ { + _, tw.err = tw.w.Write(zeroBlock[:]) + if tw.err != nil { + break + } + } + return tw.err +} From 8061bcd2a73c0753ef09d8e30c52e9b5ec3d9c25 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Thu, 13 Jul 2017 18:56:00 -0700 Subject: [PATCH 06/21] archive: add test for prefix header With docker-17.06.0 some images pulled do not extract properly. Some files don't appear in correct directories. This may or may not cause the pull to fail. These images can't be pushed or saved. 17.06 is the first version of Docker built with go1.8. Cause There are multiple updates to the tar package in go1.8. https://go-review.googlesource.com/c/32234/ disables using "prefix" field when new tar archives are being written. Prefix field was previously set when a record in the archive used a path longer than 100 bytes. Another change https://go-review.googlesource.com/c/31444/ makes the reader ignore the "prefix" field value if the record is in GNU format. GNU format defines that same area should be used for access and modified times. If the "prefix" field is not read, a file will only be extracted by the basename. The problem is that with a previous version of the golang archive package headers could be written, that use the prefix field while at the same time setting the header format to GNU. This happens when numeric fields are big enough that they can not be written as octal strings and need to be written in binary. Usually, this shouldn't happen: uid, gid, devmajor, devminor can use up to 7 bytes, size and timestamp can use 11. If one of the records does overflow it switches the whole writer to GNU mode and all next files will be saved in GNU format. Signed-off-by: Tonis Tiigi Upstream-commit: 4a3cfda45e37b81211fbfbf0c45dbe64860a3ad0 Component: engine --- components/engine/Dockerfile.aarch64 | 1 + components/engine/Dockerfile.armhf | 1 + components/engine/Dockerfile.ppc64le | 1 + components/engine/Dockerfile.s390x | 1 + components/engine/Dockerfile.simple | 1 + components/engine/pkg/archive/archive_test.go | 19 +++++++++++++++++++ 6 files changed, 24 insertions(+) diff --git a/components/engine/Dockerfile.aarch64 b/components/engine/Dockerfile.aarch64 index 7a8f5f793c..cabcda28bd 100644 --- a/components/engine/Dockerfile.aarch64 +++ b/components/engine/Dockerfile.aarch64 @@ -97,6 +97,7 @@ RUN set -x \ # bootstrap, so we use golang-go (1.6) as bootstrap to build Go from source code. # We don't use the official ARMv6 released binaries as a GOROOT_BOOTSTRAP, because # not all ARM64 platforms support 32-bit mode. 32-bit mode is optional for ARMv8. +# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ENV GO_VERSION 1.8.3 RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ && cd /usr/src/go/src \ diff --git a/components/engine/Dockerfile.armhf b/components/engine/Dockerfile.armhf index 6103c5a3ad..dd1f536191 100644 --- a/components/engine/Dockerfile.armhf +++ b/components/engine/Dockerfile.armhf @@ -70,6 +70,7 @@ RUN cd /usr/local/lvm2 \ # See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL # Install Go +# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ENV GO_VERSION 1.8.3 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \ | tar -xzC /usr/local diff --git a/components/engine/Dockerfile.ppc64le b/components/engine/Dockerfile.ppc64le index e64153800c..43b84e4501 100644 --- a/components/engine/Dockerfile.ppc64le +++ b/components/engine/Dockerfile.ppc64le @@ -94,6 +94,7 @@ RUN set -x \ # Install Go # NOTE: official ppc64le go binaries weren't available until go 1.6.4 and 1.7.4 +# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ENV GO_VERSION 1.8.3 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \ | tar -xzC /usr/local diff --git a/components/engine/Dockerfile.s390x b/components/engine/Dockerfile.s390x index c69da3c969..35ec683739 100644 --- a/components/engine/Dockerfile.s390x +++ b/components/engine/Dockerfile.s390x @@ -87,6 +87,7 @@ RUN cd /usr/local/lvm2 \ && make install_device-mapper # See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL +# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ENV GO_VERSION 1.8.3 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \ | tar -xzC /usr/local diff --git a/components/engine/Dockerfile.simple b/components/engine/Dockerfile.simple index 4edc08f9ed..b4682d4cbc 100644 --- a/components/engine/Dockerfile.simple +++ b/components/engine/Dockerfile.simple @@ -53,6 +53,7 @@ RUN set -x \ # IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines # will need updating, to avoid errors. Ping #docker-maintainers on IRC # with a heads-up. +# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ENV GO_VERSION 1.8.3 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ | tar -xzC /usr/local diff --git a/components/engine/pkg/archive/archive_test.go b/components/engine/pkg/archive/archive_test.go index e85878fd0f..1371b8ab12 100644 --- a/components/engine/pkg/archive/archive_test.go +++ b/components/engine/pkg/archive/archive_test.go @@ -1203,6 +1203,25 @@ func TestReplaceFileTarWrapper(t *testing.T) { } } +// TestPrefixHeaderReadable tests that files that could be created with the +// version of this package that was built with <=go17 are still readable. +func TestPrefixHeaderReadable(t *testing.T) { + // https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go + var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00") + + tmpDir, err := ioutil.TempDir("", "prefix-test") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + err = Untar(bytes.NewReader(testFile), tmpDir, nil) + require.NoError(t, err) + + baseName := "foo" + pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName + + _, err = os.Lstat(filepath.Join(tmpDir, pth)) + require.NoError(t, err) +} + func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := ioutil.TempDir("", "docker-test-srcDir") require.NoError(t, err) From 5d957152ba74766ec7d0328d789be9ccf99a4698 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Wed, 12 Jul 2017 16:31:13 -0400 Subject: [PATCH 07/21] Testing: Use local plugins, not from hub Use the (new) plugin fixtures for plugin tests rather than pulling plugins from hub. This removes the restriction for platforms/archs since plugin binaries get built in the test environment. Future work would be to add test plugins for the various subsystems so tests that are actually using plugins (e.g. volumes, networks) can be ported to use the fixtures as well. Signed-off-by: Brian Goff Upstream-commit: 15a538a627e1d0898862c9e6ca7472cd7fb517ce Component: engine --- .../engine/integration-cli/check_test.go | 52 ++++++ .../docker_cli_plugins_test.go | 147 +++++++++------- .../integration-cli/fixtures/plugin/plugin.go | 149 ----------------- .../fixtures/plugin/plugin_linux.go | 157 ++++++++++++++++++ .../fixtures/plugin/plugin_unsuported.go | 19 +++ .../integration-cli/trust_server_test.go | 21 ++- 6 files changed, 338 insertions(+), 207 deletions(-) create mode 100644 components/engine/integration-cli/fixtures/plugin/plugin_linux.go create mode 100644 components/engine/integration-cli/fixtures/plugin/plugin_unsuported.go diff --git a/components/engine/integration-cli/check_test.go b/components/engine/integration-cli/check_test.go index cc3b80c94f..f05b6504e9 100644 --- a/components/engine/integration-cli/check_test.go +++ b/components/engine/integration-cli/check_test.go @@ -5,21 +5,26 @@ import ( "net/http/httptest" "os" "os/exec" + "path" "path/filepath" "strings" "sync" "syscall" "testing" + "time" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/cli/config" + "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/cli/build/fakestorage" "github.com/docker/docker/integration-cli/daemon" "github.com/docker/docker/integration-cli/environment" + "github.com/docker/docker/integration-cli/fixtures/plugin" "github.com/docker/docker/integration-cli/registry" "github.com/docker/docker/pkg/reexec" "github.com/go-check/check" + "golang.org/x/net/context" ) const ( @@ -442,3 +447,50 @@ func (s *DockerTrustedSwarmSuite) TearDownTest(c *check.C) { func (s *DockerTrustedSwarmSuite) OnTimeout(c *check.C) { s.swarmSuite.OnTimeout(c) } + +func init() { + check.Suite(&DockerPluginSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerPluginSuite struct { + ds *DockerSuite + registry *registry.V2 +} + +func (ps *DockerPluginSuite) registryHost() string { + return privateRegistryURL +} + +func (ps *DockerPluginSuite) getPluginRepo() string { + return path.Join(ps.registryHost(), "plugin", "basic") +} +func (ps *DockerPluginSuite) getPluginRepoWithTag() string { + return ps.getPluginRepo() + ":" + "latest" +} + +func (ps *DockerPluginSuite) SetUpSuite(c *check.C) { + testRequires(c, DaemonIsLinux) + ps.registry = setupRegistry(c, false, "", "") + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + err := plugin.CreateInRegistry(ctx, ps.getPluginRepo(), nil) + c.Assert(err, checker.IsNil, check.Commentf("failed to create plugin")) +} + +func (ps *DockerPluginSuite) TearDownSuite(c *check.C) { + if ps.registry != nil { + ps.registry.Close() + } +} + +func (ps *DockerPluginSuite) TearDownTest(c *check.C) { + ps.ds.TearDownTest(c) +} + +func (ps *DockerPluginSuite) OnTimeout(c *check.C) { + ps.ds.OnTimeout(c) +} diff --git a/components/engine/integration-cli/docker_cli_plugins_test.go b/components/engine/integration-cli/docker_cli_plugins_test.go index e1fcaf2c3e..38b4af8f1e 100644 --- a/components/engine/integration-cli/docker_cli_plugins_test.go +++ b/components/engine/integration-cli/docker_cli_plugins_test.go @@ -5,14 +5,20 @@ import ( "io/ioutil" "net/http" "os" + "path" "path/filepath" "strings" + "time" + "github.com/docker/docker/api/types" "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/cli" "github.com/docker/docker/integration-cli/daemon" + "github.com/docker/docker/integration-cli/fixtures/plugin" + "github.com/docker/docker/integration-cli/request" icmd "github.com/docker/docker/pkg/testutil/cmd" "github.com/go-check/check" + "golang.org/x/net/context" ) var ( @@ -24,31 +30,30 @@ var ( npNameWithTag = npName + ":" + pTag ) -func (s *DockerSuite) TestPluginBasicOps(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) - _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) +func (ps *DockerPluginSuite) TestPluginBasicOps(c *check.C) { + plugin := ps.getPluginRepoWithTag() + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", plugin) c.Assert(err, checker.IsNil) out, _, err := dockerCmdWithError("plugin", "ls") c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, pName) - c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, plugin) c.Assert(out, checker.Contains, "true") - id, _, err := dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + id, _, err := dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", plugin) id = strings.TrimSpace(id) c.Assert(err, checker.IsNil) - out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + out, _, err = dockerCmdWithError("plugin", "remove", plugin) c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, "is enabled") - _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) + _, _, err = dockerCmdWithError("plugin", "disable", plugin) c.Assert(err, checker.IsNil) - out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) + out, _, err = dockerCmdWithError("plugin", "remove", plugin) c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, pNameWithTag) + c.Assert(out, checker.Contains, plugin) _, err = os.Stat(filepath.Join(testEnv.DockerBasePath(), "plugins", id)) if !os.IsNotExist(err) { @@ -56,8 +61,9 @@ func (s *DockerSuite) TestPluginBasicOps(c *check.C) { } } -func (s *DockerSuite) TestPluginForceRemove(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) +func (ps *DockerPluginSuite) TestPluginForceRemove(c *check.C) { + pNameWithTag := ps.getPluginRepoWithTag() + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) c.Assert(err, checker.IsNil) @@ -71,6 +77,7 @@ func (s *DockerSuite) TestPluginForceRemove(c *check.C) { func (s *DockerSuite) TestPluginActive(c *check.C) { testRequires(c, DaemonIsLinux, IsAmd64, Network) + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) c.Assert(err, checker.IsNil) @@ -118,8 +125,9 @@ func (s *DockerSuite) TestPluginActiveNetwork(c *check.C) { c.Assert(out, checker.Contains, npNameWithTag) } -func (s *DockerSuite) TestPluginInstallDisable(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) +func (ps *DockerPluginSuite) TestPluginInstallDisable(c *check.C) { + pName := ps.getPluginRepoWithTag() + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", "--disable", pName) c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Contains, pName) @@ -150,22 +158,39 @@ func (s *DockerSuite) TestPluginInstallDisableVolumeLs(c *check.C) { dockerCmd(c, "volume", "ls") } -func (s *DockerSuite) TestPluginSet(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) - out, _ := dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--disable", pName) - c.Assert(strings.TrimSpace(out), checker.Contains, pName) +func (ps *DockerPluginSuite) TestPluginSet(c *check.C) { + // Create a new plugin with extra settings + client, err := request.NewClient() + c.Assert(err, checker.IsNil, check.Commentf("failed to create test client")) - env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", pName) + name := "test" + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + initialValue := "0" + err = plugin.Create(ctx, client, name, func(cfg *plugin.Config) { + cfg.Env = []types.PluginEnv{{Name: "DEBUG", Value: &initialValue, Settable: []string{"value"}}} + }) + c.Assert(err, checker.IsNil, check.Commentf("failed to create test plugin")) + + env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", name) c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=0]") - dockerCmd(c, "plugin", "set", pName, "DEBUG=1") + dockerCmd(c, "plugin", "set", name, "DEBUG=1") - env, _ = dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", pName) + env, _ = dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", name) c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") } -func (s *DockerSuite) TestPluginInstallArgs(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) +func (ps *DockerPluginSuite) TestPluginInstallArgs(c *check.C) { + pName := path.Join(ps.registryHost(), "plugin", "testplugininstallwithargs") + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + plugin.CreateInRegistry(ctx, pName, nil, func(cfg *plugin.Config) { + cfg.Env = []types.PluginEnv{{Name: "DEBUG", Settable: []string{"value"}}} + }) + out, _ := dockerCmd(c, "plugin", "install", "--grant-all-permissions", "--disable", pName, "DEBUG=1") c.Assert(strings.TrimSpace(out), checker.Contains, pName) @@ -173,8 +198,8 @@ func (s *DockerSuite) TestPluginInstallArgs(c *check.C) { c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]") } -func (s *DockerRegistrySuite) TestPluginInstallImage(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64) +func (ps *DockerPluginSuite) TestPluginInstallImage(c *check.C) { + testRequires(c, IsAmd64) repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) // tag the image to upload it to the private registry @@ -187,8 +212,9 @@ func (s *DockerRegistrySuite) TestPluginInstallImage(c *check.C) { c.Assert(out, checker.Contains, `Encountered remote "application/vnd.docker.container.image.v1+json"(image) when fetching`) } -func (s *DockerSuite) TestPluginEnableDisableNegative(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) +func (ps *DockerPluginSuite) TestPluginEnableDisableNegative(c *check.C) { + pName := ps.getPluginRepoWithTag() + out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pName) c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Contains, pName) @@ -208,9 +234,7 @@ func (s *DockerSuite) TestPluginEnableDisableNegative(c *check.C) { c.Assert(err, checker.IsNil) } -func (s *DockerSuite) TestPluginCreate(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) - +func (ps *DockerPluginSuite) TestPluginCreate(c *check.C) { name := "foo/bar-driver" temp, err := ioutil.TempDir("", "foo") c.Assert(err, checker.IsNil) @@ -242,15 +266,15 @@ func (s *DockerSuite) TestPluginCreate(c *check.C) { c.Assert(len(strings.Split(strings.TrimSpace(out), "\n")), checker.Equals, 2) } -func (s *DockerSuite) TestPluginInspect(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) +func (ps *DockerPluginSuite) TestPluginInspect(c *check.C) { + pNameWithTag := ps.getPluginRepoWithTag() + _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) c.Assert(err, checker.IsNil) out, _, err := dockerCmdWithError("plugin", "ls") c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, pName) - c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, pNameWithTag) c.Assert(out, checker.Contains, "true") // Find the ID first @@ -275,7 +299,7 @@ func (s *DockerSuite) TestPluginInspect(c *check.C) { c.Assert(strings.TrimSpace(out), checker.Equals, id) // Name without tag form - out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pName) + out, _, err = dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", ps.getPluginRepo()) c.Assert(err, checker.IsNil) c.Assert(strings.TrimSpace(out), checker.Equals, id) @@ -347,21 +371,29 @@ func (s *DockerTrustSuite) TestPluginUntrustedInstall(c *check.C) { }) } -func (s *DockerSuite) TestPluginIDPrefix(c *check.C) { - testRequires(c, DaemonIsLinux, IsAmd64, Network) - _, _, err := dockerCmdWithError("plugin", "install", "--disable", "--grant-all-permissions", pNameWithTag) - c.Assert(err, checker.IsNil) +func (ps *DockerPluginSuite) TestPluginIDPrefix(c *check.C) { + name := "test" + client, err := request.NewClient() + c.Assert(err, checker.IsNil, check.Commentf("error creating test client")) + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + initialValue := "0" + err = plugin.Create(ctx, client, name, func(cfg *plugin.Config) { + cfg.Env = []types.PluginEnv{{Name: "DEBUG", Value: &initialValue, Settable: []string{"value"}}} + }) + cancel() + + c.Assert(err, checker.IsNil, check.Commentf("failed to create test plugin")) // Find ID first - id, _, err := dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", pNameWithTag) + id, _, err := dockerCmdWithError("plugin", "inspect", "-f", "{{.Id}}", name) id = strings.TrimSpace(id) c.Assert(err, checker.IsNil) // List current state out, _, err := dockerCmdWithError("plugin", "ls") c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, pName) - c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, name) c.Assert(out, checker.Contains, "false") env, _ := dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", id[:5]) @@ -377,8 +409,7 @@ func (s *DockerSuite) TestPluginIDPrefix(c *check.C) { c.Assert(err, checker.IsNil) out, _, err = dockerCmdWithError("plugin", "ls") c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, pName) - c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, name) c.Assert(out, checker.Contains, "true") // Disable @@ -386,8 +417,7 @@ func (s *DockerSuite) TestPluginIDPrefix(c *check.C) { c.Assert(err, checker.IsNil) out, _, err = dockerCmdWithError("plugin", "ls") c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, pName) - c.Assert(out, checker.Contains, pTag) + c.Assert(out, checker.Contains, name) c.Assert(out, checker.Contains, "false") // Remove @@ -396,13 +426,10 @@ func (s *DockerSuite) TestPluginIDPrefix(c *check.C) { // List returns none out, _, err = dockerCmdWithError("plugin", "ls") c.Assert(err, checker.IsNil) - c.Assert(out, checker.Not(checker.Contains), pName) - c.Assert(out, checker.Not(checker.Contains), pTag) + c.Assert(out, checker.Not(checker.Contains), name) } -func (s *DockerSuite) TestPluginListDefaultFormat(c *check.C) { - testRequires(c, DaemonIsLinux, Network, IsAmd64) - +func (ps *DockerPluginSuite) TestPluginListDefaultFormat(c *check.C) { config, err := ioutil.TempDir("", "config-file-") c.Assert(err, check.IsNil) defer os.RemoveAll(config) @@ -410,17 +437,25 @@ func (s *DockerSuite) TestPluginListDefaultFormat(c *check.C) { err = ioutil.WriteFile(filepath.Join(config, "config.json"), []byte(`{"pluginsFormat": "raw"}`), 0644) c.Assert(err, check.IsNil) - out, _ := dockerCmd(c, "plugin", "install", "--grant-all-permissions", pName) - c.Assert(strings.TrimSpace(out), checker.Contains, pName) + name := "test:latest" + client, err := request.NewClient() + c.Assert(err, checker.IsNil, check.Commentf("error creating test client")) - out, _ = dockerCmd(c, "plugin", "inspect", "--format", "{{.ID}}", pNameWithTag) + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + err = plugin.Create(ctx, client, name, func(cfg *plugin.Config) { + cfg.Description = "test plugin" + }) + c.Assert(err, checker.IsNil, check.Commentf("failed to create test plugin")) + + out, _ := dockerCmd(c, "plugin", "inspect", "--format", "{{.ID}}", name) id := strings.TrimSpace(out) // We expect the format to be in `raw + --no-trunc` expectedOutput := fmt.Sprintf(`plugin_id: %s name: %s -description: A sample volume plugin for Docker -enabled: true`, id, pNameWithTag) +description: test plugin +enabled: false`, id, name) out, _ = dockerCmd(c, "--config", config, "plugin", "ls", "--no-trunc") c.Assert(strings.TrimSpace(out), checker.Contains, expectedOutput) diff --git a/components/engine/integration-cli/fixtures/plugin/plugin.go b/components/engine/integration-cli/fixtures/plugin/plugin.go index 1be6169735..c8259be1a7 100644 --- a/components/engine/integration-cli/fixtures/plugin/plugin.go +++ b/components/engine/integration-cli/fixtures/plugin/plugin.go @@ -1,20 +1,9 @@ package plugin import ( - "encoding/json" "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "time" "github.com/docker/docker/api/types" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/plugin" - "github.com/docker/docker/registry" - "github.com/pkg/errors" "golang.org/x/net/context" ) @@ -43,141 +32,3 @@ func WithBinary(bin string) CreateOpt { type CreateClient interface { PluginCreate(context.Context, io.Reader, types.PluginCreateOptions) error } - -// Create creates a new plugin with the specified name -func Create(ctx context.Context, c CreateClient, name string, opts ...CreateOpt) error { - tmpDir, err := ioutil.TempDir("", "create-test-plugin") - if err != nil { - return err - } - defer os.RemoveAll(tmpDir) - - tar, err := makePluginBundle(tmpDir, opts...) - if err != nil { - return err - } - defer tar.Close() - - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - return c.PluginCreate(ctx, tar, types.PluginCreateOptions{RepoName: name}) -} - -// TODO(@cpuguy83): we really shouldn't have to do this... -// The manager panics on init when `Executor` is not set. -type dummyExecutor struct{} - -func (dummyExecutor) Client(libcontainerd.Backend) (libcontainerd.Client, error) { return nil, nil } -func (dummyExecutor) Cleanup() {} -func (dummyExecutor) UpdateOptions(...libcontainerd.RemoteOption) error { return nil } - -// CreateInRegistry makes a plugin (locally) and pushes it to a registry. -// This does not use a dockerd instance to create or push the plugin. -// If you just want to create a plugin in some daemon, use `Create`. -// -// This can be useful when testing plugins on swarm where you don't really want -// the plugin to exist on any of the daemons (immediately) and there needs to be -// some way to distribute the plugin. -func CreateInRegistry(ctx context.Context, repo string, auth *types.AuthConfig, opts ...CreateOpt) error { - tmpDir, err := ioutil.TempDir("", "create-test-plugin-local") - if err != nil { - return err - } - defer os.RemoveAll(tmpDir) - - inPath := filepath.Join(tmpDir, "plugin") - if err := os.MkdirAll(inPath, 0755); err != nil { - return errors.Wrap(err, "error creating plugin root") - } - - tar, err := makePluginBundle(inPath, opts...) - if err != nil { - return err - } - defer tar.Close() - - managerConfig := plugin.ManagerConfig{ - Store: plugin.NewStore(), - RegistryService: registry.NewService(registry.ServiceOptions{V2Only: true}), - Root: filepath.Join(tmpDir, "root"), - ExecRoot: "/run/docker", // manager init fails if not set - Executor: dummyExecutor{}, - LogPluginEvent: func(id, name, action string) {}, // panics when not set - } - manager, err := plugin.NewManager(managerConfig) - if err != nil { - return errors.Wrap(err, "error creating plugin manager") - } - - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - if err := manager.CreateFromContext(ctx, tar, &types.PluginCreateOptions{RepoName: repo}); err != nil { - return err - } - - if auth == nil { - auth = &types.AuthConfig{} - } - err = manager.Push(ctx, repo, nil, auth, ioutil.Discard) - return errors.Wrap(err, "error pushing plugin") -} - -func makePluginBundle(inPath string, opts ...CreateOpt) (io.ReadCloser, error) { - p := &types.PluginConfig{ - Interface: types.PluginConfigInterface{ - Socket: "basic.sock", - Types: []types.PluginInterfaceType{{Capability: "docker.dummy/1.0"}}, - }, - Entrypoint: []string{"/basic"}, - } - cfg := &Config{ - PluginConfig: p, - } - for _, o := range opts { - o(cfg) - } - if cfg.binPath == "" { - binPath, err := ensureBasicPluginBin() - if err != nil { - return nil, err - } - cfg.binPath = binPath - } - - configJSON, err := json.Marshal(p) - if err != nil { - return nil, err - } - if err := ioutil.WriteFile(filepath.Join(inPath, "config.json"), configJSON, 0644); err != nil { - return nil, err - } - if err := os.MkdirAll(filepath.Join(inPath, "rootfs", filepath.Dir(p.Entrypoint[0])), 0755); err != nil { - return nil, errors.Wrap(err, "error creating plugin rootfs dir") - } - if err := archive.NewDefaultArchiver().CopyFileWithTar(cfg.binPath, filepath.Join(inPath, "rootfs", p.Entrypoint[0])); err != nil { - return nil, errors.Wrap(err, "error copying plugin binary to rootfs path") - } - tar, err := archive.Tar(inPath, archive.Uncompressed) - return tar, errors.Wrap(err, "error making plugin archive") -} - -func ensureBasicPluginBin() (string, error) { - name := "docker-basic-plugin" - p, err := exec.LookPath(name) - if err == nil { - return p, nil - } - - goBin, err := exec.LookPath("go") - if err != nil { - return "", err - } - installPath := filepath.Join(os.Getenv("GOPATH"), "bin", name) - cmd := exec.Command(goBin, "build", "-o", installPath, "./"+filepath.Join("fixtures", "plugin", "basic")) - cmd.Env = append(cmd.Env, "CGO_ENABLED=0") - if out, err := cmd.CombinedOutput(); err != nil { - return "", errors.Wrapf(err, "error building basic plugin bin: %s", string(out)) - } - return installPath, nil -} diff --git a/components/engine/integration-cli/fixtures/plugin/plugin_linux.go b/components/engine/integration-cli/fixtures/plugin/plugin_linux.go new file mode 100644 index 0000000000..757694cd37 --- /dev/null +++ b/components/engine/integration-cli/fixtures/plugin/plugin_linux.go @@ -0,0 +1,157 @@ +package plugin + +import ( + "encoding/json" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/libcontainerd" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/plugin" + "github.com/docker/docker/registry" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// Create creates a new plugin with the specified name +func Create(ctx context.Context, c CreateClient, name string, opts ...CreateOpt) error { + tmpDir, err := ioutil.TempDir("", "create-test-plugin") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + tar, err := makePluginBundle(tmpDir, opts...) + if err != nil { + return err + } + defer tar.Close() + + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + return c.PluginCreate(ctx, tar, types.PluginCreateOptions{RepoName: name}) +} + +// TODO(@cpuguy83): we really shouldn't have to do this... +// The manager panics on init when `Executor` is not set. +type dummyExecutor struct{} + +func (dummyExecutor) Client(libcontainerd.Backend) (libcontainerd.Client, error) { return nil, nil } +func (dummyExecutor) Cleanup() {} +func (dummyExecutor) UpdateOptions(...libcontainerd.RemoteOption) error { return nil } + +// CreateInRegistry makes a plugin (locally) and pushes it to a registry. +// This does not use a dockerd instance to create or push the plugin. +// If you just want to create a plugin in some daemon, use `Create`. +// +// This can be useful when testing plugins on swarm where you don't really want +// the plugin to exist on any of the daemons (immediately) and there needs to be +// some way to distribute the plugin. +func CreateInRegistry(ctx context.Context, repo string, auth *types.AuthConfig, opts ...CreateOpt) error { + tmpDir, err := ioutil.TempDir("", "create-test-plugin-local") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + inPath := filepath.Join(tmpDir, "plugin") + if err := os.MkdirAll(inPath, 0755); err != nil { + return errors.Wrap(err, "error creating plugin root") + } + + tar, err := makePluginBundle(inPath, opts...) + if err != nil { + return err + } + defer tar.Close() + + managerConfig := plugin.ManagerConfig{ + Store: plugin.NewStore(), + RegistryService: registry.NewService(registry.ServiceOptions{V2Only: true}), + Root: filepath.Join(tmpDir, "root"), + ExecRoot: "/run/docker", // manager init fails if not set + Executor: dummyExecutor{}, + LogPluginEvent: func(id, name, action string) {}, // panics when not set + } + manager, err := plugin.NewManager(managerConfig) + if err != nil { + return errors.Wrap(err, "error creating plugin manager") + } + + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + if err := manager.CreateFromContext(ctx, tar, &types.PluginCreateOptions{RepoName: repo}); err != nil { + return err + } + + if auth == nil { + auth = &types.AuthConfig{} + } + err = manager.Push(ctx, repo, nil, auth, ioutil.Discard) + return errors.Wrap(err, "error pushing plugin") +} + +func makePluginBundle(inPath string, opts ...CreateOpt) (io.ReadCloser, error) { + p := &types.PluginConfig{ + Interface: types.PluginConfigInterface{ + Socket: "basic.sock", + Types: []types.PluginInterfaceType{{Capability: "docker.dummy/1.0"}}, + }, + Entrypoint: []string{"/basic"}, + } + cfg := &Config{ + PluginConfig: p, + } + for _, o := range opts { + o(cfg) + } + if cfg.binPath == "" { + binPath, err := ensureBasicPluginBin() + if err != nil { + return nil, err + } + cfg.binPath = binPath + } + + configJSON, err := json.Marshal(p) + if err != nil { + return nil, err + } + if err := ioutil.WriteFile(filepath.Join(inPath, "config.json"), configJSON, 0644); err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Join(inPath, "rootfs", filepath.Dir(p.Entrypoint[0])), 0755); err != nil { + return nil, errors.Wrap(err, "error creating plugin rootfs dir") + } + if err := archive.NewDefaultArchiver().CopyFileWithTar(cfg.binPath, filepath.Join(inPath, "rootfs", p.Entrypoint[0])); err != nil { + return nil, errors.Wrap(err, "error copying plugin binary to rootfs path") + } + tar, err := archive.Tar(inPath, archive.Uncompressed) + return tar, errors.Wrap(err, "error making plugin archive") +} + +func ensureBasicPluginBin() (string, error) { + name := "docker-basic-plugin" + p, err := exec.LookPath(name) + if err == nil { + return p, nil + } + + goBin, err := exec.LookPath("go") + if err != nil { + return "", err + } + installPath := filepath.Join(os.Getenv("GOPATH"), "bin", name) + cmd := exec.Command(goBin, "build", "-o", installPath, "./"+filepath.Join("fixtures", "plugin", "basic")) + cmd.Env = append(cmd.Env, "CGO_ENABLED=0") + if out, err := cmd.CombinedOutput(); err != nil { + return "", errors.Wrapf(err, "error building basic plugin bin: %s", string(out)) + } + return installPath, nil +} diff --git a/components/engine/integration-cli/fixtures/plugin/plugin_unsuported.go b/components/engine/integration-cli/fixtures/plugin/plugin_unsuported.go new file mode 100644 index 0000000000..7c272a317f --- /dev/null +++ b/components/engine/integration-cli/fixtures/plugin/plugin_unsuported.go @@ -0,0 +1,19 @@ +// +build !linux + +package plugin + +import ( + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// Create is not supported on this platform +func Create(ctx context.Context, c CreateClient, name string, opts ...CreateOpt) error { + return errors.New("not supported on this platform") +} + +// CreateInRegistry is not supported on this platform +func CreateInRegistry(ctx context.Context, repo string, auth *types.AuthConfig, opts ...CreateOpt) error { + return errors.New("not supported on this platform") +} diff --git a/components/engine/integration-cli/trust_server_test.go b/components/engine/integration-cli/trust_server_test.go index e3f0674cf3..9a999323f8 100644 --- a/components/engine/integration-cli/trust_server_test.go +++ b/components/engine/integration-cli/trust_server_test.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "io/ioutil" "net" @@ -11,9 +12,12 @@ import ( "strings" "time" + "github.com/docker/docker/api/types" cliconfig "github.com/docker/docker/cli/config" "github.com/docker/docker/integration-cli/checker" "github.com/docker/docker/integration-cli/cli" + "github.com/docker/docker/integration-cli/fixtures/plugin" + "github.com/docker/docker/integration-cli/request" icmd "github.com/docker/docker/pkg/testutil/cmd" "github.com/docker/go-connections/tlsconfig" "github.com/go-check/check" @@ -225,10 +229,23 @@ func (s *DockerTrustSuite) setupTrustedImage(c *check.C, name string) string { func (s *DockerTrustSuite) setupTrustedplugin(c *check.C, source, name string) string { repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) + + client, err := request.NewClient() + c.Assert(err, checker.IsNil, check.Commentf("could not create test client")) + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + err = plugin.Create(ctx, client, repoName) + cancel() + c.Assert(err, checker.IsNil, check.Commentf("could not create test plugin")) + // tag the image and upload it to the private registry - cli.DockerCmd(c, "plugin", "install", "--grant-all-permissions", "--alias", repoName, source) + // TODO: shouldn't need to use the CLI to do trust cli.Docker(cli.Args("plugin", "push", repoName), trustedCmd).Assert(c, SuccessSigningAndPushing) - cli.DockerCmd(c, "plugin", "rm", "-f", repoName) + + ctx, cancel = context.WithTimeout(context.Background(), 60*time.Second) + err = client.PluginRemove(ctx, repoName, types.PluginRemoveOptions{Force: true}) + cancel() + c.Assert(err, checker.IsNil, check.Commentf("failed to cleanup test plugin for trust suite")) return repoName } From 5b6e1a1aa33a834454cbfe8e0e090fd04d44787e Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Thu, 6 Jul 2017 18:59:29 -0700 Subject: [PATCH 08/21] Service privileges: API docs This documents the Service privileges API changes, that were added in: 091b5e68ea735bf4e8ece708bbc8c413a32eab73 Signed-off-by: Sebastiaan van Stijn Upstream-commit: d0a8e73e7b60f61db0c3799643aaccbbf33f3601 Component: engine --- components/engine/api/swagger.yaml | 51 +++++++++++++++++++ components/engine/docs/api/version-history.md | 2 + 2 files changed, 53 insertions(+) diff --git a/components/engine/api/swagger.yaml b/components/engine/api/swagger.yaml index 7e451167e8..253884db2e 100644 --- a/components/engine/api/swagger.yaml +++ b/components/engine/api/swagger.yaml @@ -2043,6 +2043,57 @@ definitions: description: "A list of additional groups that the container process will run as." items: type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + File: + type: "string" + description: | + Load credential spec from this file. The file is read by the daemon, and must be present in the + `CredentialSpecs` subdirectory in the docker data directory, which defaults to + `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + +


+ + > **Note**: `CredentialSpec.File` and `CredentialSpec.Registry` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows registry. The specified registry value must be + located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + +


+ + + > **Note**: `CredentialSpec.File` and `CredentialSpec.Registry` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" TTY: description: "Whether a pseudo-TTY should be allocated." type: "boolean" diff --git a/components/engine/docs/api/version-history.md b/components/engine/docs/api/version-history.md index 0f33ffecbb..b65931d5d0 100644 --- a/components/engine/docs/api/version-history.md +++ b/components/engine/docs/api/version-history.md @@ -62,6 +62,8 @@ keywords: "API, Docker, rcli, REST, documentation" * `POST /containers/create`, `POST /service/create` and `POST /services/(id or name)/update` now takes the field `StartPeriod` as a part of the `HealthConfig` allowing for specification of a period during which the container should not be considered unhealthy even if health checks do not pass. * `GET /services/(id)` now accepts an `insertDefaults` query-parameter to merge default values into the service inspect output. * `POST /containers/prune`, `POST /images/prune`, `POST /volumes/prune`, and `POST /networks/prune` now support a `label` filter to filter containers, images, volumes, or networks based on the label. The format of the label filter could be `label=`/`label==` to remove those with the specified labels, or `label!=`/`label!==` to remove those without the specified labels. +* `POST /services/create` now accepts `Privileges` as part of `ContainerSpec`. Privileges currently include + `CredentialSpec` and `SELinuxContext`. ## v1.28 API changes From ed052d78fda5f3668e9e66b82f03456fca015d19 Mon Sep 17 00:00:00 2001 From: Liron Levin Date: Sat, 15 Jul 2017 16:03:17 +0300 Subject: [PATCH 09/21] pluggable secret backend This commit extends SwarmKit secret management with pluggable secret backends support. Updating the work in [swarmkit](docker/swarmkit@eebac27434d34708fac993f9f5181d106c5c2fae) for pluggable secret backend and adding the driver parameter to `SecretSpec`. Remaining work: - [ ] CLI support (docker/cli) - [ ] api in [plugin helpers](docker/go-plugins-helpers)) - [ ] Reference plugin - [ ] Documenation (after cli work) Signed-off-by: Liron Levin Upstream-commit: 7d45cafd5746e847e58078aa2fbdde57b5f49fa4 Component: engine --- components/engine/api/types/swarm/secret.go | 3 +- .../engine/daemon/cluster/convert/secret.go | 4 +- components/engine/vendor.conf | 2 +- .../docker/swarmkit/api/specs.pb.go | 295 +++++++++++------- .../docker/swarmkit/api/specs.proto | 3 + .../docker/swarmkit/api/validation/secrets.go | 14 + .../swarmkit/manager/controlapi/secret.go | 18 +- .../swarmkit/manager/controlapi/server.go | 1 - .../manager/dispatcher/assignments.go | 44 ++- .../swarmkit/manager/dispatcher/dispatcher.go | 7 +- .../swarmkit/manager/drivers/provider.go | 34 ++ .../swarmkit/manager/drivers/secrets.go | 55 ++++ .../docker/swarmkit/manager/manager.go | 3 +- .../manager/scheduler/decision_tree.go | 5 +- 14 files changed, 344 insertions(+), 144 deletions(-) create mode 100644 components/engine/vendor/github.com/docker/swarmkit/api/validation/secrets.go create mode 100644 components/engine/vendor/github.com/docker/swarmkit/manager/drivers/provider.go create mode 100644 components/engine/vendor/github.com/docker/swarmkit/manager/drivers/secrets.go diff --git a/components/engine/api/types/swarm/secret.go b/components/engine/api/types/swarm/secret.go index fdb2388888..91f3578428 100644 --- a/components/engine/api/types/swarm/secret.go +++ b/components/engine/api/types/swarm/secret.go @@ -12,7 +12,8 @@ type Secret struct { // SecretSpec represents a secret specification from a secret in swarm type SecretSpec struct { Annotations - Data []byte `json:",omitempty"` + Data []byte `json:",omitempty"` + Driver *Driver `json:"omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store } // SecretReferenceFileTarget is a file target in a secret reference diff --git a/components/engine/daemon/cluster/convert/secret.go b/components/engine/daemon/cluster/convert/secret.go index 91d67736ad..edbf8e5932 100644 --- a/components/engine/daemon/cluster/convert/secret.go +++ b/components/engine/daemon/cluster/convert/secret.go @@ -13,6 +13,7 @@ func SecretFromGRPC(s *swarmapi.Secret) swarmtypes.Secret { Spec: swarmtypes.SecretSpec{ Annotations: annotationsFromGRPC(s.Spec.Annotations), Data: s.Spec.Data, + Driver: driverFromGRPC(s.Spec.Driver), }, } @@ -31,7 +32,8 @@ func SecretSpecToGRPC(s swarmtypes.SecretSpec) swarmapi.SecretSpec { Name: s.Name, Labels: s.Labels, }, - Data: s.Data, + Data: s.Data, + Driver: driverToGRPC(s.Driver), } } diff --git a/components/engine/vendor.conf b/components/engine/vendor.conf index eed5fe3f74..6353a46429 100644 --- a/components/engine/vendor.conf +++ b/components/engine/vendor.conf @@ -106,7 +106,7 @@ github.com/stevvooe/continuity cd7a8e21e2b6f84799f5dd4b65faf49c8d3ee02d github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb # cluster -github.com/docker/swarmkit a3d96fe13e30e46c3d4cfc3f316ebdd8446a079d +github.com/docker/swarmkit 3e2dd3c0a76149b1620b42d28dd6ff48270404e5 github.com/gogo/protobuf v0.4 github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e diff --git a/components/engine/vendor/github.com/docker/swarmkit/api/specs.pb.go b/components/engine/vendor/github.com/docker/swarmkit/api/specs.pb.go index 8578cf3849..bb9b0db918 100644 --- a/components/engine/vendor/github.com/docker/swarmkit/api/specs.pb.go +++ b/components/engine/vendor/github.com/docker/swarmkit/api/specs.pb.go @@ -766,6 +766,8 @@ type SecretSpec struct { // The currently recognized values are: // - golang: Go templating Templating *Driver `protobuf:"bytes,3,opt,name=templating" json:"templating,omitempty"` + // Driver is the the secret driver that is used to store the specified secret + Driver *Driver `protobuf:"bytes,4,opt,name=driver" json:"driver,omitempty"` } func (m *SecretSpec) Reset() { *m = SecretSpec{} } @@ -1240,6 +1242,10 @@ func (m *SecretSpec) CopyFrom(src interface{}) { m.Templating = &Driver{} github_com_docker_swarmkit_api_deepcopy.Copy(m.Templating, o.Templating) } + if o.Driver != nil { + m.Driver = &Driver{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Driver, o.Driver) + } } func (m *ConfigSpec) Copy() *ConfigSpec { @@ -2257,6 +2263,16 @@ func (m *SecretSpec) MarshalTo(dAtA []byte) (int, error) { } i += n37 } + if m.Driver != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Driver.Size())) + n38, err := m.Driver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } return i, nil } @@ -2278,11 +2294,11 @@ func (m *ConfigSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) - n38, err := m.Annotations.MarshalTo(dAtA[i:]) + n39, err := m.Annotations.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n39 if len(m.Data) > 0 { dAtA[i] = 0x12 i++ @@ -2293,11 +2309,11 @@ func (m *ConfigSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintSpecs(dAtA, i, uint64(m.Templating.Size())) - n39, err := m.Templating.MarshalTo(dAtA[i:]) + n40, err := m.Templating.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n40 } return i, nil } @@ -2729,6 +2745,10 @@ func (m *SecretSpec) Size() (n int) { l = m.Templating.Size() n += 1 + l + sovSpecs(uint64(l)) } + if m.Driver != nil { + l = m.Driver.Size() + n += 1 + l + sovSpecs(uint64(l)) + } return n } @@ -3022,6 +3042,7 @@ func (this *SecretSpec) String() string { `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, `Data:` + fmt.Sprintf("%v", this.Data) + `,`, `Templating:` + strings.Replace(fmt.Sprintf("%v", this.Templating), "Driver", "Driver", 1) + `,`, + `Driver:` + strings.Replace(fmt.Sprintf("%v", this.Driver), "Driver", "Driver", 1) + `,`, `}`, }, "") return s @@ -5883,6 +5904,39 @@ func (m *SecretSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Driver == nil { + m.Driver = &Driver{} + } + if err := m.Driver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipSpecs(dAtA[iNdEx:]) @@ -6156,122 +6210,123 @@ var ( func init() { proto.RegisterFile("specs.proto", fileDescriptorSpecs) } var fileDescriptorSpecs = []byte{ - // 1867 bytes of a gzipped FileDescriptorProto + // 1880 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0xcf, 0x73, 0x1b, 0x49, - 0x15, 0xb6, 0x6c, 0x59, 0x3f, 0xde, 0xc8, 0x89, 0xd2, 0x24, 0x61, 0xa2, 0xb0, 0xb2, 0xa2, 0x0d, + 0x15, 0xb6, 0x6c, 0x59, 0x3f, 0xde, 0xc8, 0x89, 0xdc, 0x24, 0x61, 0xac, 0xb0, 0xb2, 0xa2, 0x0d, 0xc1, 0xcb, 0x16, 0x72, 0x61, 0xa8, 0x25, 0xbb, 0x61, 0x01, 0xc9, 0x12, 0x8e, 0x31, 0x76, 0x54, - 0x6d, 0x6f, 0x20, 0x27, 0x55, 0x7b, 0xa6, 0x3d, 0x9a, 0xf2, 0xa8, 0x7b, 0xe8, 0xe9, 0xd1, 0x96, - 0x6e, 0x1c, 0xb7, 0x72, 0xe5, 0xec, 0xe2, 0x40, 0xf1, 0xbf, 0xe4, 0x48, 0x71, 0xe2, 0xe4, 0x62, - 0xfd, 0x2f, 0x70, 0xe3, 0x02, 0xd5, 0x3d, 0x3d, 0xd2, 0x28, 0x19, 0x27, 0xa9, 0x22, 0x07, 0x6e, - 0xdd, 0xaf, 0xbf, 0xef, 0xcd, 0xeb, 0xd7, 0x5f, 0xf7, 0x7b, 0x03, 0x56, 0x14, 0x52, 0x27, 0xea, - 0x84, 0x82, 0x4b, 0x8e, 0x90, 0xcb, 0x9d, 0x73, 0x2a, 0x3a, 0xd1, 0xd7, 0x44, 0x4c, 0xce, 0x7d, - 0xd9, 0x99, 0xfe, 0xb8, 0x61, 0xc9, 0x59, 0x48, 0x0d, 0xa0, 0x71, 0xdb, 0xe3, 0x1e, 0xd7, 0xc3, - 0x6d, 0x35, 0x32, 0xd6, 0xa6, 0xc7, 0xb9, 0x17, 0xd0, 0x6d, 0x3d, 0x3b, 0x8d, 0xcf, 0xb6, 0xdd, - 0x58, 0x10, 0xe9, 0x73, 0x66, 0xd6, 0xef, 0xbd, 0xbe, 0x4e, 0xd8, 0x2c, 0x59, 0x6a, 0x5f, 0x14, - 0xa1, 0x72, 0xc4, 0x5d, 0x7a, 0x1c, 0x52, 0x07, 0xed, 0x81, 0x45, 0x18, 0xe3, 0x52, 0x73, 0x23, - 0xbb, 0xd0, 0x2a, 0x6c, 0x59, 0x3b, 0x9b, 0x9d, 0x37, 0x83, 0xea, 0x74, 0x17, 0xb0, 0x5e, 0xf1, - 0xd5, 0xe5, 0xe6, 0x0a, 0xce, 0x32, 0xd1, 0x2f, 0xa1, 0xe6, 0xd2, 0xc8, 0x17, 0xd4, 0x1d, 0x09, - 0x1e, 0x50, 0x7b, 0xb5, 0x55, 0xd8, 0xba, 0xb1, 0xf3, 0xbd, 0x3c, 0x4f, 0xea, 0xe3, 0x98, 0x07, - 0x14, 0x5b, 0x86, 0xa1, 0x26, 0x68, 0x0f, 0x60, 0x42, 0x27, 0xa7, 0x54, 0x44, 0x63, 0x3f, 0xb4, - 0xd7, 0x34, 0xfd, 0x07, 0xd7, 0xd1, 0x55, 0xec, 0x9d, 0xc3, 0x39, 0x1c, 0x67, 0xa8, 0xe8, 0x10, - 0x6a, 0x64, 0x4a, 0xfc, 0x80, 0x9c, 0xfa, 0x81, 0x2f, 0x67, 0x76, 0x51, 0xbb, 0xfa, 0xe4, 0xad, - 0xae, 0xba, 0x19, 0x02, 0x5e, 0xa2, 0xb7, 0x5d, 0x80, 0xc5, 0x87, 0xd0, 0x23, 0x28, 0x0f, 0x07, - 0x47, 0xfd, 0xfd, 0xa3, 0xbd, 0xfa, 0x4a, 0xe3, 0xde, 0xcb, 0x8b, 0xd6, 0x1d, 0xe5, 0x63, 0x01, - 0x18, 0x52, 0xe6, 0xfa, 0xcc, 0x43, 0x5b, 0x50, 0xe9, 0xee, 0xee, 0x0e, 0x86, 0x27, 0x83, 0x7e, - 0xbd, 0xd0, 0x68, 0xbc, 0xbc, 0x68, 0xdd, 0x5d, 0x06, 0x76, 0x1d, 0x87, 0x86, 0x92, 0xba, 0x8d, - 0xe2, 0x37, 0x7f, 0x69, 0xae, 0xb4, 0xbf, 0x29, 0x40, 0x2d, 0x1b, 0x04, 0x7a, 0x04, 0xa5, 0xee, - 0xee, 0xc9, 0xfe, 0xf3, 0x41, 0x7d, 0x65, 0x41, 0xcf, 0x22, 0xba, 0x8e, 0xf4, 0xa7, 0x14, 0x3d, - 0x84, 0xf5, 0x61, 0xf7, 0xab, 0xe3, 0x41, 0xbd, 0xb0, 0x08, 0x27, 0x0b, 0x1b, 0x92, 0x38, 0xd2, - 0xa8, 0x3e, 0xee, 0xee, 0x1f, 0xd5, 0x57, 0xf3, 0x51, 0x7d, 0x41, 0x7c, 0x66, 0x42, 0xf9, 0x73, - 0x11, 0xac, 0x63, 0x2a, 0xa6, 0xbe, 0xf3, 0x81, 0x25, 0xf2, 0x19, 0x14, 0x25, 0x89, 0xce, 0xb5, - 0x34, 0xac, 0x7c, 0x69, 0x9c, 0x90, 0xe8, 0x5c, 0x7d, 0xd4, 0xd0, 0x35, 0x5e, 0x29, 0x43, 0xd0, - 0x30, 0xf0, 0x1d, 0x22, 0xa9, 0xab, 0x95, 0x61, 0xed, 0x7c, 0x3f, 0x8f, 0x8d, 0xe7, 0x28, 0x13, - 0xff, 0xd3, 0x15, 0x9c, 0xa1, 0xa2, 0x27, 0x50, 0xf2, 0x02, 0x7e, 0x4a, 0x02, 0xad, 0x09, 0x6b, - 0xe7, 0x41, 0x9e, 0x93, 0x3d, 0x8d, 0x58, 0x38, 0x30, 0x14, 0xf4, 0x18, 0x4a, 0x71, 0xe8, 0x12, - 0x49, 0xed, 0x92, 0x26, 0xb7, 0xf2, 0xc8, 0x5f, 0x69, 0xc4, 0x2e, 0x67, 0x67, 0xbe, 0x87, 0x0d, - 0x1e, 0x1d, 0x40, 0x85, 0x51, 0xf9, 0x35, 0x17, 0xe7, 0x91, 0x5d, 0x6e, 0xad, 0x6d, 0x59, 0x3b, - 0x9f, 0xe6, 0x8a, 0x31, 0xc1, 0x74, 0xa5, 0x24, 0xce, 0x78, 0x42, 0x99, 0x4c, 0xdc, 0xf4, 0x56, - 0xed, 0x02, 0x9e, 0x3b, 0x40, 0x3f, 0x87, 0x0a, 0x65, 0x6e, 0xc8, 0x7d, 0x26, 0xed, 0xca, 0xf5, - 0x81, 0x0c, 0x0c, 0x46, 0x25, 0x13, 0xcf, 0x19, 0x8a, 0x2d, 0x78, 0x10, 0x9c, 0x12, 0xe7, 0xdc, - 0xae, 0xbe, 0xe7, 0x36, 0xe6, 0x8c, 0x5e, 0x09, 0x8a, 0x13, 0xee, 0xd2, 0xf6, 0x36, 0xdc, 0x7a, - 0x23, 0xd5, 0xa8, 0x01, 0x15, 0x93, 0xea, 0x44, 0x23, 0x45, 0x3c, 0x9f, 0xb7, 0x6f, 0xc2, 0xc6, - 0x52, 0x5a, 0xdb, 0x7f, 0x2f, 0x42, 0x25, 0x3d, 0x6b, 0xd4, 0x85, 0xaa, 0xc3, 0x99, 0x24, 0x3e, - 0xa3, 0xc2, 0xc8, 0x2b, 0xf7, 0x64, 0x76, 0x53, 0x90, 0x62, 0x3d, 0x5d, 0xc1, 0x0b, 0x16, 0xfa, - 0x35, 0x54, 0x05, 0x8d, 0x78, 0x2c, 0x1c, 0x1a, 0x19, 0x7d, 0x6d, 0xe5, 0x2b, 0x24, 0x01, 0x61, - 0xfa, 0x87, 0xd8, 0x17, 0x54, 0x65, 0x39, 0xc2, 0x0b, 0x2a, 0x7a, 0x02, 0x65, 0x41, 0x23, 0x49, - 0x84, 0x7c, 0x9b, 0x44, 0x70, 0x02, 0x19, 0xf2, 0xc0, 0x77, 0x66, 0x38, 0x65, 0xa0, 0x27, 0x50, - 0x0d, 0x03, 0xe2, 0x68, 0xaf, 0xf6, 0xba, 0xa6, 0x7f, 0x94, 0x47, 0x1f, 0xa6, 0x20, 0xbc, 0xc0, - 0xa3, 0xcf, 0x01, 0x02, 0xee, 0x8d, 0x5c, 0xe1, 0x4f, 0xa9, 0x30, 0x12, 0x6b, 0xe4, 0xb1, 0xfb, - 0x1a, 0x81, 0xab, 0x01, 0xf7, 0x92, 0x21, 0xda, 0xfb, 0x9f, 0xf4, 0x95, 0xd1, 0xd6, 0x01, 0x00, - 0x99, 0xaf, 0x1a, 0x75, 0x7d, 0xf2, 0x5e, 0xae, 0xcc, 0x89, 0x64, 0xe8, 0xe8, 0x01, 0xd4, 0xce, - 0xb8, 0x70, 0xe8, 0xc8, 0xdc, 0x9a, 0xaa, 0xd6, 0x84, 0xa5, 0x6d, 0x89, 0xbe, 0x50, 0x0f, 0xca, - 0x1e, 0x65, 0x54, 0xf8, 0x8e, 0x0d, 0xfa, 0x63, 0x8f, 0x72, 0x2f, 0x64, 0x02, 0xc1, 0x31, 0x93, - 0xfe, 0x84, 0x9a, 0x2f, 0xa5, 0xc4, 0x5e, 0x15, 0xca, 0x22, 0x59, 0x69, 0xff, 0x1e, 0xd0, 0x9b, - 0x58, 0x84, 0xa0, 0x78, 0xee, 0x33, 0x57, 0x0b, 0xab, 0x8a, 0xf5, 0x18, 0x75, 0xa0, 0x1c, 0x92, - 0x59, 0xc0, 0x89, 0x6b, 0xc4, 0x72, 0xbb, 0x93, 0xd4, 0xcb, 0x4e, 0x5a, 0x2f, 0x3b, 0x5d, 0x36, - 0xc3, 0x29, 0xa8, 0x7d, 0x00, 0x77, 0x72, 0xb7, 0x8c, 0x76, 0xa0, 0x36, 0x17, 0xe1, 0xc8, 0x37, - 0x1f, 0xe9, 0xdd, 0xbc, 0xba, 0xdc, 0xb4, 0xe6, 0x6a, 0xdd, 0xef, 0x63, 0x6b, 0x0e, 0xda, 0x77, - 0xdb, 0x7f, 0xaa, 0xc2, 0xc6, 0x92, 0x94, 0xd1, 0x6d, 0x58, 0xf7, 0x27, 0xc4, 0xa3, 0x26, 0xc6, - 0x64, 0x82, 0x06, 0x50, 0x0a, 0xc8, 0x29, 0x0d, 0x94, 0xa0, 0xd5, 0xa1, 0xfe, 0xe8, 0x9d, 0x77, - 0xa2, 0xf3, 0x5b, 0x8d, 0x1f, 0x30, 0x29, 0x66, 0xd8, 0x90, 0x91, 0x0d, 0x65, 0x87, 0x4f, 0x26, - 0x84, 0xa9, 0xa7, 0x73, 0x6d, 0xab, 0x8a, 0xd3, 0xa9, 0xca, 0x0c, 0x11, 0x5e, 0x64, 0x17, 0xb5, - 0x59, 0x8f, 0x51, 0x1d, 0xd6, 0x28, 0x9b, 0xda, 0xeb, 0xda, 0xa4, 0x86, 0xca, 0xe2, 0xfa, 0x89, - 0x22, 0xab, 0x58, 0x0d, 0x15, 0x2f, 0x8e, 0xa8, 0xb0, 0xcb, 0x49, 0x46, 0xd5, 0x18, 0xfd, 0x0c, - 0x4a, 0x13, 0x1e, 0x33, 0x19, 0xd9, 0x15, 0x1d, 0xec, 0xbd, 0xbc, 0x60, 0x0f, 0x15, 0xc2, 0x3c, - 0xed, 0x06, 0x8e, 0x06, 0x70, 0x2b, 0x92, 0x3c, 0x1c, 0x79, 0x82, 0x38, 0x74, 0x14, 0x52, 0xe1, - 0x73, 0xd7, 0x3c, 0x4d, 0xf7, 0xde, 0x38, 0x94, 0xbe, 0x69, 0x72, 0xf0, 0x4d, 0xc5, 0xd9, 0x53, - 0x94, 0xa1, 0x66, 0xa0, 0x21, 0xd4, 0xc2, 0x38, 0x08, 0x46, 0x3c, 0x4c, 0xaa, 0x54, 0xa2, 0xa7, - 0xf7, 0x48, 0xd9, 0x30, 0x0e, 0x82, 0x67, 0x09, 0x09, 0x5b, 0xe1, 0x62, 0x82, 0xee, 0x42, 0xc9, - 0x13, 0x3c, 0x0e, 0x23, 0xdb, 0xd2, 0xc9, 0x30, 0x33, 0xf4, 0x25, 0x94, 0x23, 0xea, 0x08, 0x2a, - 0x23, 0xbb, 0xa6, 0xb7, 0xfa, 0x71, 0xde, 0x47, 0x8e, 0x35, 0x04, 0xd3, 0x33, 0x2a, 0x28, 0x73, - 0x28, 0x4e, 0x39, 0xe8, 0x1e, 0xac, 0x49, 0x39, 0xb3, 0x37, 0x5a, 0x85, 0xad, 0x4a, 0xaf, 0x7c, - 0x75, 0xb9, 0xb9, 0x76, 0x72, 0xf2, 0x02, 0x2b, 0x9b, 0x7a, 0x41, 0xc7, 0x3c, 0x92, 0x8c, 0x4c, - 0xa8, 0x7d, 0x43, 0xe7, 0x76, 0x3e, 0x47, 0x2f, 0x00, 0x5c, 0x16, 0x8d, 0x1c, 0x7d, 0x65, 0xed, - 0x9b, 0x7a, 0x77, 0x9f, 0xbe, 0x7b, 0x77, 0xfd, 0xa3, 0x63, 0x53, 0x45, 0x36, 0xae, 0x2e, 0x37, - 0xab, 0xf3, 0x29, 0xae, 0xba, 0x2c, 0x4a, 0x86, 0xa8, 0x07, 0xd6, 0x98, 0x92, 0x40, 0x8e, 0x9d, - 0x31, 0x75, 0xce, 0xed, 0xfa, 0xf5, 0x65, 0xe1, 0xa9, 0x86, 0x19, 0x0f, 0x59, 0x92, 0x52, 0xb0, - 0x0a, 0x35, 0xb2, 0x6f, 0xe9, 0x5c, 0x25, 0x13, 0xf4, 0x11, 0x00, 0x0f, 0x29, 0x1b, 0x45, 0xd2, - 0xf5, 0x99, 0x8d, 0xd4, 0x96, 0x71, 0x55, 0x59, 0x8e, 0x95, 0x01, 0xdd, 0x57, 0x8f, 0x36, 0x71, - 0x47, 0x9c, 0x05, 0x33, 0xfb, 0x3b, 0x7a, 0xb5, 0xa2, 0x0c, 0xcf, 0x58, 0x30, 0x43, 0x9b, 0x60, - 0x69, 0x5d, 0x44, 0xbe, 0xc7, 0x48, 0x60, 0xdf, 0xd6, 0xf9, 0x00, 0x65, 0x3a, 0xd6, 0x16, 0x75, - 0x0e, 0x49, 0x36, 0x22, 0xfb, 0xce, 0xf5, 0xe7, 0x60, 0x82, 0x5d, 0x9c, 0x83, 0xe1, 0xa0, 0x5f, - 0x00, 0x84, 0xc2, 0x9f, 0xfa, 0x01, 0xf5, 0x68, 0x64, 0xdf, 0xd5, 0x9b, 0x6e, 0xe6, 0xbe, 0xd6, - 0x73, 0x14, 0xce, 0x30, 0x1a, 0x9f, 0x83, 0x95, 0xb9, 0x6d, 0xea, 0x96, 0x9c, 0xd3, 0x99, 0xb9, - 0xc0, 0x6a, 0xa8, 0x52, 0x32, 0x25, 0x41, 0x9c, 0x74, 0xc2, 0x55, 0x9c, 0x4c, 0xbe, 0x58, 0x7d, - 0x5c, 0x68, 0xec, 0x80, 0x95, 0x51, 0x1d, 0xfa, 0x18, 0x36, 0x04, 0xf5, 0xfc, 0x48, 0x8a, 0xd9, - 0x88, 0xc4, 0x72, 0x6c, 0xff, 0x4a, 0x13, 0x6a, 0xa9, 0xb1, 0x1b, 0xcb, 0x71, 0x63, 0x04, 0x8b, - 0xc3, 0x43, 0x2d, 0xb0, 0x94, 0x28, 0x22, 0x2a, 0xa6, 0x54, 0xa8, 0x6a, 0xab, 0x72, 0x9e, 0x35, - 0x29, 0xf1, 0x46, 0x94, 0x08, 0x67, 0xac, 0xdf, 0x8e, 0x2a, 0x36, 0x33, 0xf5, 0x18, 0xa4, 0x37, - 0xc4, 0x3c, 0x06, 0x66, 0xda, 0xfe, 0x57, 0x01, 0x6a, 0xd9, 0xa6, 0x01, 0xed, 0x26, 0xc5, 0x5e, - 0x6f, 0xe9, 0xc6, 0xce, 0xf6, 0xbb, 0x9a, 0x0c, 0x5d, 0x5a, 0x83, 0x58, 0x39, 0x3b, 0x54, 0xfd, - 0xbd, 0x26, 0xa3, 0x9f, 0xc2, 0x7a, 0xc8, 0x85, 0x4c, 0x9f, 0xb0, 0xfc, 0x04, 0x73, 0x91, 0x96, - 0xa2, 0x04, 0xdc, 0x1e, 0xc3, 0x8d, 0x65, 0x6f, 0xe8, 0x21, 0xac, 0x3d, 0xdf, 0x1f, 0xd6, 0x57, - 0x1a, 0xf7, 0x5f, 0x5e, 0xb4, 0xbe, 0xbb, 0xbc, 0xf8, 0xdc, 0x17, 0x32, 0x26, 0xc1, 0xfe, 0x10, - 0xfd, 0x10, 0xd6, 0xfb, 0x47, 0xc7, 0x18, 0xd7, 0x0b, 0x8d, 0xcd, 0x97, 0x17, 0xad, 0xfb, 0xcb, - 0x38, 0xb5, 0xc4, 0x63, 0xe6, 0x62, 0x7e, 0x3a, 0xef, 0x75, 0xff, 0xbd, 0x0a, 0x96, 0x79, 0xd9, - 0x3f, 0xf4, 0xef, 0xd0, 0x46, 0x52, 0xca, 0xd3, 0x2b, 0xbb, 0xfa, 0xce, 0x8a, 0x5e, 0x4b, 0x08, - 0xe6, 0x8c, 0x1f, 0x40, 0xcd, 0x0f, 0xa7, 0x9f, 0x8d, 0x28, 0x23, 0xa7, 0x81, 0x69, 0x7b, 0x2b, - 0xd8, 0x52, 0xb6, 0x41, 0x62, 0x52, 0xef, 0x85, 0xcf, 0x24, 0x15, 0xcc, 0x34, 0xb4, 0x15, 0x3c, - 0x9f, 0xa3, 0x2f, 0xa1, 0xe8, 0x87, 0x64, 0x62, 0xda, 0x90, 0xdc, 0x1d, 0xec, 0x0f, 0xbb, 0x87, - 0x46, 0x83, 0xbd, 0xca, 0xd5, 0xe5, 0x66, 0x51, 0x19, 0xb0, 0xa6, 0xa1, 0x66, 0xda, 0x09, 0xa8, - 0x2f, 0xe9, 0xb7, 0xbf, 0x82, 0x33, 0x16, 0xa5, 0x23, 0x9f, 0x79, 0x82, 0x46, 0x91, 0xae, 0x02, - 0x15, 0x9c, 0x4e, 0x51, 0x03, 0xca, 0xa6, 0x9f, 0xd0, 0x0d, 0x44, 0x55, 0xd5, 0x6a, 0x63, 0xe8, - 0x6d, 0x80, 0x95, 0x64, 0x63, 0x74, 0x26, 0xf8, 0xa4, 0xfd, 0x9f, 0x22, 0x58, 0xbb, 0x41, 0x1c, - 0x49, 0x53, 0x06, 0x3f, 0x58, 0xf2, 0x5f, 0xc0, 0x2d, 0xa2, 0x7f, 0xaf, 0x08, 0x53, 0x35, 0x45, - 0xb7, 0x69, 0xe6, 0x00, 0x1e, 0xe6, 0xba, 0x9b, 0x83, 0x93, 0x96, 0xae, 0x57, 0x52, 0x3e, 0xed, - 0x02, 0xae, 0x93, 0xd7, 0x56, 0xd0, 0x31, 0x6c, 0x70, 0xe1, 0x8c, 0x69, 0x24, 0x93, 0x4a, 0x64, - 0x7e, 0x47, 0x72, 0x7f, 0x54, 0x9f, 0x65, 0x81, 0xe6, 0x19, 0x4e, 0xa2, 0x5d, 0xf6, 0x81, 0x1e, - 0x43, 0x51, 0x90, 0xb3, 0xb4, 0xe5, 0xcc, 0xbd, 0x24, 0x98, 0x9c, 0xc9, 0x25, 0x17, 0x9a, 0x81, - 0x7e, 0x03, 0xe0, 0xfa, 0x51, 0x48, 0xa4, 0x33, 0xa6, 0xc2, 0x1c, 0x76, 0xee, 0x16, 0xfb, 0x73, - 0xd4, 0x92, 0x97, 0x0c, 0x1b, 0x1d, 0x40, 0xd5, 0x21, 0xa9, 0x5c, 0x4b, 0xd7, 0xff, 0xa3, 0xed, - 0x76, 0x8d, 0x8b, 0xba, 0x72, 0x71, 0x75, 0xb9, 0x59, 0x49, 0x2d, 0xb8, 0xe2, 0x10, 0x23, 0xdf, - 0x03, 0xd8, 0x50, 0xff, 0x6e, 0x23, 0x97, 0x9e, 0x91, 0x38, 0x90, 0x89, 0x4c, 0xae, 0x29, 0x2b, - 0xea, 0x47, 0xa0, 0x6f, 0x70, 0x26, 0xae, 0x9a, 0xcc, 0xd8, 0xd0, 0xef, 0xe0, 0x16, 0x65, 0x8e, - 0x98, 0x69, 0xb1, 0xa6, 0x11, 0x56, 0xae, 0xdf, 0xec, 0x60, 0x0e, 0x5e, 0xda, 0x6c, 0x9d, 0xbe, - 0x66, 0x6f, 0xff, 0xb5, 0x00, 0x90, 0x54, 0xea, 0x0f, 0x2b, 0x40, 0x04, 0x45, 0x97, 0x48, 0xa2, - 0x35, 0x57, 0xc3, 0x7a, 0x8c, 0xbe, 0x00, 0x90, 0x74, 0x12, 0x06, 0x44, 0xfa, 0xcc, 0x33, 0xb2, - 0x79, 0xdb, 0x73, 0x90, 0x41, 0xeb, 0x38, 0x93, 0x90, 0xff, 0xaf, 0xe3, 0xec, 0xd9, 0xaf, 0xbe, - 0x6d, 0xae, 0xfc, 0xe3, 0xdb, 0xe6, 0xca, 0x1f, 0xaf, 0x9a, 0x85, 0x57, 0x57, 0xcd, 0xc2, 0xdf, - 0xae, 0x9a, 0x85, 0x7f, 0x5e, 0x35, 0x0b, 0xa7, 0x25, 0xdd, 0xc3, 0xfd, 0xe4, 0xbf, 0x01, 0x00, - 0x00, 0xff, 0xff, 0x06, 0x93, 0x6e, 0xba, 0xfc, 0x12, 0x00, 0x00, + 0x6d, 0x6f, 0x20, 0x27, 0x55, 0x7b, 0xa6, 0x2d, 0x4d, 0x79, 0xd4, 0x3d, 0xf4, 0xf4, 0x68, 0x4b, + 0x37, 0x8e, 0x5b, 0xb9, 0x72, 0x76, 0x71, 0xa0, 0xf8, 0x5f, 0x72, 0xa4, 0x38, 0xc1, 0xc5, 0x45, + 0xfc, 0x2f, 0x70, 0xe3, 0x02, 0xd5, 0x3d, 0x3d, 0xd2, 0x28, 0x19, 0xc7, 0xa9, 0x22, 0x87, 0xbd, + 0x75, 0xbf, 0xfe, 0xbe, 0xd7, 0xdd, 0xaf, 0xbf, 0xee, 0xf7, 0x1a, 0xac, 0x30, 0xa0, 0x4e, 0xd8, + 0x0a, 0x04, 0x97, 0x1c, 0x21, 0x97, 0x3b, 0xe7, 0x54, 0xb4, 0xc2, 0xaf, 0x89, 0x18, 0x9f, 0x7b, + 0xb2, 0x35, 0xf9, 0x71, 0xcd, 0x92, 0xd3, 0x80, 0x1a, 0x40, 0xed, 0xce, 0x90, 0x0f, 0xb9, 0x6e, + 0x6e, 0xab, 0x96, 0xb1, 0xd6, 0x87, 0x9c, 0x0f, 0x7d, 0xba, 0xad, 0x7b, 0xa7, 0xd1, 0xd9, 0xb6, + 0x1b, 0x09, 0x22, 0x3d, 0xce, 0xcc, 0xf8, 0xc6, 0x9b, 0xe3, 0x84, 0x4d, 0xe3, 0xa1, 0xe6, 0x45, + 0x1e, 0x4a, 0x47, 0xdc, 0xa5, 0xc7, 0x01, 0x75, 0xd0, 0x1e, 0x58, 0x84, 0x31, 0x2e, 0x35, 0x37, + 0xb4, 0x73, 0x8d, 0xdc, 0x96, 0xb5, 0xb3, 0xd9, 0x7a, 0x7b, 0x51, 0xad, 0xf6, 0x1c, 0xd6, 0xc9, + 0xbf, 0xba, 0xdc, 0x5c, 0xc2, 0x69, 0x26, 0xfa, 0x25, 0x54, 0x5c, 0x1a, 0x7a, 0x82, 0xba, 0x03, + 0xc1, 0x7d, 0x6a, 0x2f, 0x37, 0x72, 0x5b, 0xb7, 0x76, 0xbe, 0x97, 0xe5, 0x49, 0x4d, 0x8e, 0xb9, + 0x4f, 0xb1, 0x65, 0x18, 0xaa, 0x83, 0xf6, 0x00, 0xc6, 0x74, 0x7c, 0x4a, 0x45, 0x38, 0xf2, 0x02, + 0x7b, 0x45, 0xd3, 0x7f, 0x70, 0x1d, 0x5d, 0xad, 0xbd, 0x75, 0x38, 0x83, 0xe3, 0x14, 0x15, 0x1d, + 0x42, 0x85, 0x4c, 0x88, 0xe7, 0x93, 0x53, 0xcf, 0xf7, 0xe4, 0xd4, 0xce, 0x6b, 0x57, 0x9f, 0xbc, + 0xd3, 0x55, 0x3b, 0x45, 0xc0, 0x0b, 0xf4, 0xa6, 0x0b, 0x30, 0x9f, 0x08, 0x3d, 0x82, 0x62, 0xbf, + 0x77, 0xd4, 0xdd, 0x3f, 0xda, 0xab, 0x2e, 0xd5, 0x36, 0x5e, 0x5e, 0x34, 0xee, 0x2a, 0x1f, 0x73, + 0x40, 0x9f, 0x32, 0xd7, 0x63, 0x43, 0xb4, 0x05, 0xa5, 0xf6, 0xee, 0x6e, 0xaf, 0x7f, 0xd2, 0xeb, + 0x56, 0x73, 0xb5, 0xda, 0xcb, 0x8b, 0xc6, 0xbd, 0x45, 0x60, 0xdb, 0x71, 0x68, 0x20, 0xa9, 0x5b, + 0xcb, 0x7f, 0xf3, 0x97, 0xfa, 0x52, 0xf3, 0x9b, 0x1c, 0x54, 0xd2, 0x8b, 0x40, 0x8f, 0xa0, 0xd0, + 0xde, 0x3d, 0xd9, 0x7f, 0xde, 0xab, 0x2e, 0xcd, 0xe9, 0x69, 0x44, 0xdb, 0x91, 0xde, 0x84, 0xa2, + 0x87, 0xb0, 0xda, 0x6f, 0x7f, 0x75, 0xdc, 0xab, 0xe6, 0xe6, 0xcb, 0x49, 0xc3, 0xfa, 0x24, 0x0a, + 0x35, 0xaa, 0x8b, 0xdb, 0xfb, 0x47, 0xd5, 0xe5, 0x6c, 0x54, 0x57, 0x10, 0x8f, 0x99, 0xa5, 0xfc, + 0x39, 0x0f, 0xd6, 0x31, 0x15, 0x13, 0xcf, 0xf9, 0xc0, 0x12, 0xf9, 0x0c, 0xf2, 0x92, 0x84, 0xe7, + 0x5a, 0x1a, 0x56, 0xb6, 0x34, 0x4e, 0x48, 0x78, 0xae, 0x26, 0x35, 0x74, 0x8d, 0x57, 0xca, 0x10, + 0x34, 0xf0, 0x3d, 0x87, 0x48, 0xea, 0x6a, 0x65, 0x58, 0x3b, 0xdf, 0xcf, 0x62, 0xe3, 0x19, 0xca, + 0xac, 0xff, 0xe9, 0x12, 0x4e, 0x51, 0xd1, 0x13, 0x28, 0x0c, 0x7d, 0x7e, 0x4a, 0x7c, 0xad, 0x09, + 0x6b, 0xe7, 0x41, 0x96, 0x93, 0x3d, 0x8d, 0x98, 0x3b, 0x30, 0x14, 0xf4, 0x18, 0x0a, 0x51, 0xe0, + 0x12, 0x49, 0xed, 0x82, 0x26, 0x37, 0xb2, 0xc8, 0x5f, 0x69, 0xc4, 0x2e, 0x67, 0x67, 0xde, 0x10, + 0x1b, 0x3c, 0x3a, 0x80, 0x12, 0xa3, 0xf2, 0x6b, 0x2e, 0xce, 0x43, 0xbb, 0xd8, 0x58, 0xd9, 0xb2, + 0x76, 0x3e, 0xcd, 0x14, 0x63, 0x8c, 0x69, 0x4b, 0x49, 0x9c, 0xd1, 0x98, 0x32, 0x19, 0xbb, 0xe9, + 0x2c, 0xdb, 0x39, 0x3c, 0x73, 0x80, 0x7e, 0x0e, 0x25, 0xca, 0xdc, 0x80, 0x7b, 0x4c, 0xda, 0xa5, + 0xeb, 0x17, 0xd2, 0x33, 0x18, 0x15, 0x4c, 0x3c, 0x63, 0x28, 0xb6, 0xe0, 0xbe, 0x7f, 0x4a, 0x9c, + 0x73, 0xbb, 0xfc, 0x9e, 0xdb, 0x98, 0x31, 0x3a, 0x05, 0xc8, 0x8f, 0xb9, 0x4b, 0x9b, 0xdb, 0xb0, + 0xfe, 0x56, 0xa8, 0x51, 0x0d, 0x4a, 0x26, 0xd4, 0xb1, 0x46, 0xf2, 0x78, 0xd6, 0x6f, 0xde, 0x86, + 0xb5, 0x85, 0xb0, 0x36, 0xff, 0x9e, 0x87, 0x52, 0x72, 0xd6, 0xa8, 0x0d, 0x65, 0x87, 0x33, 0x49, + 0x3c, 0x46, 0x85, 0x91, 0x57, 0xe6, 0xc9, 0xec, 0x26, 0x20, 0xc5, 0x7a, 0xba, 0x84, 0xe7, 0x2c, + 0xf4, 0x6b, 0x28, 0x0b, 0x1a, 0xf2, 0x48, 0x38, 0x34, 0x34, 0xfa, 0xda, 0xca, 0x56, 0x48, 0x0c, + 0xc2, 0xf4, 0x0f, 0x91, 0x27, 0xa8, 0x8a, 0x72, 0x88, 0xe7, 0x54, 0xf4, 0x04, 0x8a, 0x82, 0x86, + 0x92, 0x08, 0xf9, 0x2e, 0x89, 0xe0, 0x18, 0xd2, 0xe7, 0xbe, 0xe7, 0x4c, 0x71, 0xc2, 0x40, 0x4f, + 0xa0, 0x1c, 0xf8, 0xc4, 0xd1, 0x5e, 0xed, 0x55, 0x4d, 0xff, 0x28, 0x8b, 0xde, 0x4f, 0x40, 0x78, + 0x8e, 0x47, 0x9f, 0x03, 0xf8, 0x7c, 0x38, 0x70, 0x85, 0x37, 0xa1, 0xc2, 0x48, 0xac, 0x96, 0xc5, + 0xee, 0x6a, 0x04, 0x2e, 0xfb, 0x7c, 0x18, 0x37, 0xd1, 0xde, 0xff, 0xa5, 0xaf, 0x94, 0xb6, 0x0e, + 0x00, 0xc8, 0x6c, 0xd4, 0xa8, 0xeb, 0x93, 0xf7, 0x72, 0x65, 0x4e, 0x24, 0x45, 0x47, 0x0f, 0xa0, + 0x72, 0xc6, 0x85, 0x43, 0x07, 0xe6, 0xd6, 0x94, 0xb5, 0x26, 0x2c, 0x6d, 0x8b, 0xf5, 0x85, 0x3a, + 0x50, 0x1c, 0x52, 0x46, 0x85, 0xe7, 0xd8, 0xa0, 0x27, 0x7b, 0x94, 0x79, 0x21, 0x63, 0x08, 0x8e, + 0x98, 0xf4, 0xc6, 0xd4, 0xcc, 0x94, 0x10, 0x3b, 0x65, 0x28, 0x8a, 0x78, 0xa4, 0xf9, 0x7b, 0x40, + 0x6f, 0x63, 0x11, 0x82, 0xfc, 0xb9, 0xc7, 0x5c, 0x2d, 0xac, 0x32, 0xd6, 0x6d, 0xd4, 0x82, 0x62, + 0x40, 0xa6, 0x3e, 0x27, 0xae, 0x11, 0xcb, 0x9d, 0x56, 0x9c, 0x2f, 0x5b, 0x49, 0xbe, 0x6c, 0xb5, + 0xd9, 0x14, 0x27, 0xa0, 0xe6, 0x01, 0xdc, 0xcd, 0xdc, 0x32, 0xda, 0x81, 0xca, 0x4c, 0x84, 0x03, + 0xcf, 0x4c, 0xd2, 0xb9, 0x7d, 0x75, 0xb9, 0x69, 0xcd, 0xd4, 0xba, 0xdf, 0xc5, 0xd6, 0x0c, 0xb4, + 0xef, 0x36, 0xff, 0x54, 0x86, 0xb5, 0x05, 0x29, 0xa3, 0x3b, 0xb0, 0xea, 0x8d, 0xc9, 0x90, 0x9a, + 0x35, 0xc6, 0x1d, 0xd4, 0x83, 0x82, 0x4f, 0x4e, 0xa9, 0xaf, 0x04, 0xad, 0x0e, 0xf5, 0x47, 0x37, + 0xde, 0x89, 0xd6, 0x6f, 0x35, 0xbe, 0xc7, 0xa4, 0x98, 0x62, 0x43, 0x46, 0x36, 0x14, 0x1d, 0x3e, + 0x1e, 0x13, 0xa6, 0x9e, 0xce, 0x95, 0xad, 0x32, 0x4e, 0xba, 0x2a, 0x32, 0x44, 0x0c, 0x43, 0x3b, + 0xaf, 0xcd, 0xba, 0x8d, 0xaa, 0xb0, 0x42, 0xd9, 0xc4, 0x5e, 0xd5, 0x26, 0xd5, 0x54, 0x16, 0xd7, + 0x8b, 0x15, 0x59, 0xc6, 0xaa, 0xa9, 0x78, 0x51, 0x48, 0x85, 0x5d, 0x8c, 0x23, 0xaa, 0xda, 0xe8, + 0x67, 0x50, 0x18, 0xf3, 0x88, 0xc9, 0xd0, 0x2e, 0xe9, 0xc5, 0x6e, 0x64, 0x2d, 0xf6, 0x50, 0x21, + 0xcc, 0xd3, 0x6e, 0xe0, 0xa8, 0x07, 0xeb, 0xa1, 0xe4, 0xc1, 0x60, 0x28, 0x88, 0x43, 0x07, 0x01, + 0x15, 0x1e, 0x77, 0xcd, 0xd3, 0xb4, 0xf1, 0xd6, 0xa1, 0x74, 0x4d, 0x91, 0x83, 0x6f, 0x2b, 0xce, + 0x9e, 0xa2, 0xf4, 0x35, 0x03, 0xf5, 0xa1, 0x12, 0x44, 0xbe, 0x3f, 0xe0, 0x41, 0x9c, 0xa5, 0x62, + 0x3d, 0xbd, 0x47, 0xc8, 0xfa, 0x91, 0xef, 0x3f, 0x8b, 0x49, 0xd8, 0x0a, 0xe6, 0x1d, 0x74, 0x0f, + 0x0a, 0x43, 0xc1, 0xa3, 0x20, 0xb4, 0x2d, 0x1d, 0x0c, 0xd3, 0x43, 0x5f, 0x42, 0x31, 0xa4, 0x8e, + 0xa0, 0x32, 0xb4, 0x2b, 0x7a, 0xab, 0x1f, 0x67, 0x4d, 0x72, 0xac, 0x21, 0x98, 0x9e, 0x51, 0x41, + 0x99, 0x43, 0x71, 0xc2, 0x41, 0x1b, 0xb0, 0x22, 0xe5, 0xd4, 0x5e, 0x6b, 0xe4, 0xb6, 0x4a, 0x9d, + 0xe2, 0xd5, 0xe5, 0xe6, 0xca, 0xc9, 0xc9, 0x0b, 0xac, 0x6c, 0xea, 0x05, 0x1d, 0xf1, 0x50, 0x32, + 0x32, 0xa6, 0xf6, 0x2d, 0x1d, 0xdb, 0x59, 0x1f, 0xbd, 0x00, 0x70, 0x59, 0x38, 0x70, 0xf4, 0x95, + 0xb5, 0x6f, 0xeb, 0xdd, 0x7d, 0x7a, 0xf3, 0xee, 0xba, 0x47, 0xc7, 0x26, 0x8b, 0xac, 0x5d, 0x5d, + 0x6e, 0x96, 0x67, 0x5d, 0x5c, 0x76, 0x59, 0x18, 0x37, 0x51, 0x07, 0xac, 0x11, 0x25, 0xbe, 0x1c, + 0x39, 0x23, 0xea, 0x9c, 0xdb, 0xd5, 0xeb, 0xd3, 0xc2, 0x53, 0x0d, 0x33, 0x1e, 0xd2, 0x24, 0xa5, + 0x60, 0xb5, 0xd4, 0xd0, 0x5e, 0xd7, 0xb1, 0x8a, 0x3b, 0xe8, 0x23, 0x00, 0x1e, 0x50, 0x36, 0x08, + 0xa5, 0xeb, 0x31, 0x1b, 0xa9, 0x2d, 0xe3, 0xb2, 0xb2, 0x1c, 0x2b, 0x03, 0xba, 0xaf, 0x1e, 0x6d, + 0xe2, 0x0e, 0x38, 0xf3, 0xa7, 0xf6, 0x77, 0xf4, 0x68, 0x49, 0x19, 0x9e, 0x31, 0x7f, 0x8a, 0x36, + 0xc1, 0xd2, 0xba, 0x08, 0xbd, 0x21, 0x23, 0xbe, 0x7d, 0x47, 0xc7, 0x03, 0x94, 0xe9, 0x58, 0x5b, + 0xd4, 0x39, 0xc4, 0xd1, 0x08, 0xed, 0xbb, 0xd7, 0x9f, 0x83, 0x59, 0xec, 0xfc, 0x1c, 0x0c, 0x07, + 0xfd, 0x02, 0x20, 0x10, 0xde, 0xc4, 0xf3, 0xe9, 0x90, 0x86, 0xf6, 0x3d, 0xbd, 0xe9, 0x7a, 0xe6, + 0x6b, 0x3d, 0x43, 0xe1, 0x14, 0xa3, 0xf6, 0x39, 0x58, 0xa9, 0xdb, 0xa6, 0x6e, 0xc9, 0x39, 0x9d, + 0x9a, 0x0b, 0xac, 0x9a, 0x2a, 0x24, 0x13, 0xe2, 0x47, 0x71, 0x25, 0x5c, 0xc6, 0x71, 0xe7, 0x8b, + 0xe5, 0xc7, 0xb9, 0xda, 0x0e, 0x58, 0x29, 0xd5, 0xa1, 0x8f, 0x61, 0x4d, 0xd0, 0xa1, 0x17, 0x4a, + 0x31, 0x1d, 0x90, 0x48, 0x8e, 0xec, 0x5f, 0x69, 0x42, 0x25, 0x31, 0xb6, 0x23, 0x39, 0xaa, 0x0d, + 0x60, 0x7e, 0x78, 0xa8, 0x01, 0x96, 0x12, 0x45, 0x48, 0xc5, 0x84, 0x0a, 0x95, 0x6d, 0x55, 0xcc, + 0xd3, 0x26, 0x25, 0xde, 0x90, 0x12, 0xe1, 0x8c, 0xf4, 0xdb, 0x51, 0xc6, 0xa6, 0xa7, 0x1e, 0x83, + 0xe4, 0x86, 0x98, 0xc7, 0xc0, 0x74, 0x9b, 0xff, 0xce, 0x41, 0x25, 0x5d, 0x34, 0xa0, 0xdd, 0x38, + 0xd9, 0xeb, 0x2d, 0xdd, 0xda, 0xd9, 0xbe, 0xa9, 0xc8, 0xd0, 0xa9, 0xd5, 0x8f, 0x94, 0xb3, 0x43, + 0x55, 0xdf, 0x6b, 0x32, 0xfa, 0x29, 0xac, 0x06, 0x5c, 0xc8, 0xe4, 0x09, 0xcb, 0x0e, 0x30, 0x17, + 0x49, 0x2a, 0x8a, 0xc1, 0xcd, 0x11, 0xdc, 0x5a, 0xf4, 0x86, 0x1e, 0xc2, 0xca, 0xf3, 0xfd, 0x7e, + 0x75, 0xa9, 0x76, 0xff, 0xe5, 0x45, 0xe3, 0xbb, 0x8b, 0x83, 0xcf, 0x3d, 0x21, 0x23, 0xe2, 0xef, + 0xf7, 0xd1, 0x0f, 0x61, 0xb5, 0x7b, 0x74, 0x8c, 0x71, 0x35, 0x57, 0xdb, 0x7c, 0x79, 0xd1, 0xb8, + 0xbf, 0x88, 0x53, 0x43, 0x3c, 0x62, 0x2e, 0xe6, 0xa7, 0xb3, 0x5a, 0xf7, 0x3f, 0xcb, 0x60, 0x99, + 0x97, 0xfd, 0x43, 0x7f, 0x87, 0xd6, 0xe2, 0x54, 0x9e, 0x5c, 0xd9, 0xe5, 0x1b, 0x33, 0x7a, 0x25, + 0x26, 0x98, 0x33, 0x7e, 0x00, 0x15, 0x2f, 0x98, 0x7c, 0x36, 0xa0, 0x8c, 0x9c, 0xfa, 0xa6, 0xec, + 0x2d, 0x61, 0x4b, 0xd9, 0x7a, 0xb1, 0x49, 0xbd, 0x17, 0x1e, 0x93, 0x54, 0x30, 0x53, 0xd0, 0x96, + 0xf0, 0xac, 0x8f, 0xbe, 0x84, 0xbc, 0x17, 0x90, 0xb1, 0x29, 0x43, 0x32, 0x77, 0xb0, 0xdf, 0x6f, + 0x1f, 0x1a, 0x0d, 0x76, 0x4a, 0x57, 0x97, 0x9b, 0x79, 0x65, 0xc0, 0x9a, 0x86, 0xea, 0x49, 0x25, + 0xa0, 0x66, 0xd2, 0x6f, 0x7f, 0x09, 0xa7, 0x2c, 0x4a, 0x47, 0x1e, 0x1b, 0x0a, 0x1a, 0x86, 0x3a, + 0x0b, 0x94, 0x70, 0xd2, 0x45, 0x35, 0x28, 0x9a, 0x7a, 0x42, 0x17, 0x10, 0x65, 0x95, 0xab, 0x8d, + 0xa1, 0xb3, 0x06, 0x56, 0x1c, 0x8d, 0xc1, 0x99, 0xe0, 0xe3, 0xe6, 0x7f, 0xf3, 0x60, 0xed, 0xfa, + 0x51, 0x28, 0x4d, 0x1a, 0xfc, 0x60, 0xc1, 0x7f, 0x01, 0xeb, 0x44, 0x7f, 0xaf, 0x08, 0x53, 0x39, + 0x45, 0x97, 0x69, 0xe6, 0x00, 0x1e, 0x66, 0xba, 0x9b, 0x81, 0xe3, 0x92, 0xae, 0x53, 0x50, 0x3e, + 0xed, 0x1c, 0xae, 0x92, 0x37, 0x46, 0xd0, 0x31, 0xac, 0x71, 0xe1, 0x8c, 0x68, 0x28, 0xe3, 0x4c, + 0x64, 0xbe, 0x23, 0x99, 0x1f, 0xd5, 0x67, 0x69, 0xa0, 0x79, 0x86, 0xe3, 0xd5, 0x2e, 0xfa, 0x40, + 0x8f, 0x21, 0x2f, 0xc8, 0x59, 0x52, 0x72, 0x66, 0x5e, 0x12, 0x4c, 0xce, 0xe4, 0x82, 0x0b, 0xcd, + 0x40, 0xbf, 0x01, 0x70, 0xbd, 0x30, 0x20, 0xd2, 0x19, 0x51, 0x61, 0x0e, 0x3b, 0x73, 0x8b, 0xdd, + 0x19, 0x6a, 0xc1, 0x4b, 0x8a, 0x8d, 0x0e, 0xa0, 0xec, 0x90, 0x44, 0xae, 0x85, 0xeb, 0xff, 0x68, + 0xbb, 0x6d, 0xe3, 0xa2, 0xaa, 0x5c, 0x5c, 0x5d, 0x6e, 0x96, 0x12, 0x0b, 0x2e, 0x39, 0xc4, 0xc8, + 0xf7, 0x00, 0xd6, 0xd4, 0xdf, 0x6d, 0xe0, 0xd2, 0x33, 0x12, 0xf9, 0x32, 0x96, 0xc9, 0x35, 0x69, + 0x45, 0x7d, 0x04, 0xba, 0x06, 0x67, 0xd6, 0x55, 0x91, 0x29, 0x1b, 0xfa, 0x1d, 0xac, 0x53, 0xe6, + 0x88, 0xa9, 0x16, 0x6b, 0xb2, 0xc2, 0xd2, 0xf5, 0x9b, 0xed, 0xcd, 0xc0, 0x0b, 0x9b, 0xad, 0xd2, + 0x37, 0xec, 0xcd, 0x7f, 0xe6, 0x00, 0xe2, 0x4c, 0xfd, 0x61, 0x05, 0x88, 0x20, 0xef, 0x12, 0x49, + 0xb4, 0xe6, 0x2a, 0x58, 0xb7, 0xd1, 0x17, 0x00, 0x92, 0x8e, 0x03, 0x9f, 0x48, 0x8f, 0x0d, 0x8d, + 0x6c, 0xde, 0xf5, 0x1c, 0xa4, 0xd0, 0x68, 0x07, 0x0a, 0xe6, 0x63, 0x90, 0xbf, 0x91, 0x67, 0x90, + 0xcd, 0xbf, 0xe6, 0x00, 0xe2, 0x6d, 0x7e, 0xab, 0xf7, 0xd6, 0xb1, 0x5f, 0xbd, 0xae, 0x2f, 0xfd, + 0xe3, 0x75, 0x7d, 0xe9, 0x8f, 0x57, 0xf5, 0xdc, 0xab, 0xab, 0x7a, 0xee, 0x6f, 0x57, 0xf5, 0xdc, + 0xbf, 0xae, 0xea, 0xb9, 0xd3, 0x82, 0xae, 0xfb, 0x7e, 0xf2, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, + 0xc1, 0xd8, 0x19, 0x9e, 0x30, 0x13, 0x00, 0x00, } diff --git a/components/engine/vendor/github.com/docker/swarmkit/api/specs.proto b/components/engine/vendor/github.com/docker/swarmkit/api/specs.proto index 13b52d4105..c14ebeb6d3 100644 --- a/components/engine/vendor/github.com/docker/swarmkit/api/specs.proto +++ b/components/engine/vendor/github.com/docker/swarmkit/api/specs.proto @@ -393,6 +393,9 @@ message SecretSpec { // The currently recognized values are: // - golang: Go templating Driver templating = 3; + + // Driver is the the secret driver that is used to store the specified secret + Driver driver = 4; } // ConfigSpec specifies user-provided configuration files. diff --git a/components/engine/vendor/github.com/docker/swarmkit/api/validation/secrets.go b/components/engine/vendor/github.com/docker/swarmkit/api/validation/secrets.go new file mode 100644 index 0000000000..e907b6b411 --- /dev/null +++ b/components/engine/vendor/github.com/docker/swarmkit/api/validation/secrets.go @@ -0,0 +1,14 @@ +package validation + +import "fmt" + +// MaxSecretSize is the maximum byte length of the `Secret.Spec.Data` field. +const MaxSecretSize = 500 * 1024 // 500KB + +// ValidateSecretPayload validates the secret payload size +func ValidateSecretPayload(data []byte) error { + if len(data) >= MaxSecretSize || len(data) < 1 { + return fmt.Errorf("secret data must be larger than 0 and less than %d bytes", MaxSecretSize) + } + return nil +} diff --git a/components/engine/vendor/github.com/docker/swarmkit/manager/controlapi/secret.go b/components/engine/vendor/github.com/docker/swarmkit/manager/controlapi/secret.go index 5f125e9c60..23708146d6 100644 --- a/components/engine/vendor/github.com/docker/swarmkit/manager/controlapi/secret.go +++ b/components/engine/vendor/github.com/docker/swarmkit/manager/controlapi/secret.go @@ -6,6 +6,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/validation" "github.com/docker/swarmkit/identity" "github.com/docker/swarmkit/log" "github.com/docker/swarmkit/manager/state/store" @@ -14,9 +15,6 @@ import ( "google.golang.org/grpc/codes" ) -// MaxSecretSize is the maximum byte length of the `Secret.Spec.Data` field. -const MaxSecretSize = 500 * 1024 // 500KB - // assumes spec is not nil func secretFromSecretSpec(spec *api.SecretSpec) *api.Secret { return &api.Secret{ @@ -56,7 +54,6 @@ func (s *Server) UpdateSecret(ctx context.Context, request *api.UpdateSecretRequ if request.SecretID == "" || request.SecretVersion == nil { return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) } - var secret *api.Secret err := s.store.Update(func(tx store.Tx) error { secret = store.GetSecret(tx, request.SecretID) @@ -245,9 +242,16 @@ func validateSecretSpec(spec *api.SecretSpec) error { if err := validateConfigOrSecretAnnotations(spec.Annotations); err != nil { return err } - - if len(spec.Data) >= MaxSecretSize || len(spec.Data) < 1 { - return grpc.Errorf(codes.InvalidArgument, "secret data must be larger than 0 and less than %d bytes", MaxSecretSize) + // Check if secret driver is defined + if spec.Driver != nil { + // Ensure secret driver has a name + if spec.Driver.Name == "" { + return grpc.Errorf(codes.InvalidArgument, "secret driver must have a name") + } + return nil + } + if err := validation.ValidateSecretPayload(spec.Data); err != nil { + return grpc.Errorf(codes.InvalidArgument, "%s", err.Error()) } return nil } diff --git a/components/engine/vendor/github.com/docker/swarmkit/manager/controlapi/server.go b/components/engine/vendor/github.com/docker/swarmkit/manager/controlapi/server.go index 3d49ef9430..c2490ba002 100644 --- a/components/engine/vendor/github.com/docker/swarmkit/manager/controlapi/server.go +++ b/components/engine/vendor/github.com/docker/swarmkit/manager/controlapi/server.go @@ -10,7 +10,6 @@ import ( ) var ( - errNotImplemented = errors.New("not implemented") errInvalidArgument = errors.New("invalid argument") ) diff --git a/components/engine/vendor/github.com/docker/swarmkit/manager/dispatcher/assignments.go b/components/engine/vendor/github.com/docker/swarmkit/manager/dispatcher/assignments.go index 871519efa2..3f17a6d20f 100644 --- a/components/engine/vendor/github.com/docker/swarmkit/manager/dispatcher/assignments.go +++ b/components/engine/vendor/github.com/docker/swarmkit/manager/dispatcher/assignments.go @@ -1,9 +1,13 @@ package dispatcher import ( + "fmt" + "github.com/Sirupsen/logrus" "github.com/docker/swarmkit/api" "github.com/docker/swarmkit/api/equality" + "github.com/docker/swarmkit/api/validation" + "github.com/docker/swarmkit/manager/drivers" "github.com/docker/swarmkit/manager/state/store" ) @@ -24,15 +28,16 @@ type typeAndID struct { } type assignmentSet struct { + dp *drivers.DriverProvider tasksMap map[string]*api.Task tasksUsingDependency map[typeAndID]map[string]struct{} changes map[typeAndID]*api.AssignmentChange - - log *logrus.Entry + log *logrus.Entry } -func newAssignmentSet(log *logrus.Entry) *assignmentSet { +func newAssignmentSet(log *logrus.Entry, dp *drivers.DriverProvider) *assignmentSet { return &assignmentSet{ + dp: dp, changes: make(map[typeAndID]*api.AssignmentChange), tasksMap: make(map[string]*api.Task), tasksUsingDependency: make(map[typeAndID]map[string]struct{}), @@ -53,12 +58,13 @@ func (a *assignmentSet) addTaskDependencies(readTx store.ReadTx, t *api.Task) { if len(a.tasksUsingDependency[mapKey]) == 0 { a.tasksUsingDependency[mapKey] = make(map[string]struct{}) - secret := store.GetSecret(readTx, secretID) - if secret == nil { + secret, err := a.secret(readTx, secretID) + if err != nil { a.log.WithFields(logrus.Fields{ "secret.id": secretID, "secret.name": secretRef.SecretName, - }).Debug("secret not found") + "error": err, + }).Error("failed to fetch secret") continue } @@ -245,3 +251,29 @@ func (a *assignmentSet) message() api.AssignmentsMessage { return message } + +// secret populates the secret value from raft store. For external secrets, the value is populated +// from the secret driver. +func (a *assignmentSet) secret(readTx store.ReadTx, secretID string) (*api.Secret, error) { + secret := store.GetSecret(readTx, secretID) + if secret == nil { + return nil, fmt.Errorf("secret not found") + } + if secret.Spec.Driver == nil { + return secret, nil + } + d, err := a.dp.NewSecretDriver(secret.Spec.Driver) + if err != nil { + return nil, err + } + value, err := d.Get(&secret.Spec) + if err != nil { + return nil, err + } + if err := validation.ValidateSecretPayload(value); err != nil { + return nil, err + } + // Assign the secret + secret.Spec.Data = value + return secret, nil +} diff --git a/components/engine/vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go b/components/engine/vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go index 47899cc48a..8ec3ae27df 100644 --- a/components/engine/vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go +++ b/components/engine/vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go @@ -17,6 +17,7 @@ import ( "github.com/docker/swarmkit/api/equality" "github.com/docker/swarmkit/ca" "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/drivers" "github.com/docker/swarmkit/manager/state/store" "github.com/docker/swarmkit/remotes" "github.com/docker/swarmkit/watch" @@ -125,6 +126,7 @@ type Dispatcher struct { ctx context.Context cancel context.CancelFunc clusterUpdateQueue *watch.Queue + dp *drivers.DriverProvider taskUpdates map[string]*api.TaskStatus // indexed by task ID taskUpdatesLock sync.Mutex @@ -142,8 +144,9 @@ type Dispatcher struct { } // New returns Dispatcher with cluster interface(usually raft.Node). -func New(cluster Cluster, c *Config) *Dispatcher { +func New(cluster Cluster, c *Config, dp *drivers.DriverProvider) *Dispatcher { d := &Dispatcher{ + dp: dp, nodes: newNodeStore(c.HeartbeatPeriod, c.HeartbeatEpsilon, c.GracePeriodMultiplier, c.RateLimitPeriod), downNodes: newNodeStore(defaultNodeDownPeriod, 0, 1, 0), store: cluster.MemoryStore(), @@ -836,7 +839,7 @@ func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatche var ( sequence int64 appliesTo string - assignments = newAssignmentSet(log) + assignments = newAssignmentSet(log, d.dp) ) sendMessage := func(msg api.AssignmentsMessage, assignmentType api.AssignmentsMessage_Type) error { diff --git a/components/engine/vendor/github.com/docker/swarmkit/manager/drivers/provider.go b/components/engine/vendor/github.com/docker/swarmkit/manager/drivers/provider.go new file mode 100644 index 0000000000..0d9be6119d --- /dev/null +++ b/components/engine/vendor/github.com/docker/swarmkit/manager/drivers/provider.go @@ -0,0 +1,34 @@ +package drivers + +import ( + "fmt" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/swarmkit/api" +) + +// DriverProvider provides external drivers +type DriverProvider struct { + pluginGetter plugingetter.PluginGetter +} + +// New returns a new driver provider +func New(pluginGetter plugingetter.PluginGetter) *DriverProvider { + return &DriverProvider{pluginGetter: pluginGetter} +} + +// NewSecretDriver creates a new driver for fetching secrets +func (m *DriverProvider) NewSecretDriver(driver *api.Driver) (*SecretDriver, error) { + if m.pluginGetter == nil { + return nil, fmt.Errorf("plugin getter is nil") + } + if driver == nil && driver.Name == "" { + return nil, fmt.Errorf("driver specification is nil") + } + // Search for the specified plugin + plugin, err := m.pluginGetter.Get(driver.Name, SecretsProviderCapability, plugingetter.Lookup) + if err != nil { + return nil, err + } + return NewSecretDriver(plugin), nil +} diff --git a/components/engine/vendor/github.com/docker/swarmkit/manager/drivers/secrets.go b/components/engine/vendor/github.com/docker/swarmkit/manager/drivers/secrets.go new file mode 100644 index 0000000000..9c8ccc7d01 --- /dev/null +++ b/components/engine/vendor/github.com/docker/swarmkit/manager/drivers/secrets.go @@ -0,0 +1,55 @@ +package drivers + +import ( + "fmt" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/swarmkit/api" +) + +const ( + // SecretsProviderAPI is the endpoint for fetching secrets from plugins + SecretsProviderAPI = "/SecretProvider.GetSecret" + + // SecretsProviderCapability is the secrets provider plugin capability identification + SecretsProviderCapability = "secretprovider" +) + +// SecretDriver provides secrets from different stores +type SecretDriver struct { + plugin plugingetter.CompatPlugin +} + +// NewSecretDriver creates a new driver that provides third party secrets +func NewSecretDriver(plugin plugingetter.CompatPlugin) *SecretDriver { + return &SecretDriver{plugin: plugin} +} + +// Get gets a secret from the secret provider +func (d *SecretDriver) Get(spec *api.SecretSpec) ([]byte, error) { + if spec == nil { + return nil, fmt.Errorf("spec is nil") + } + var secretResp SecretsProviderResponse + secretReq := &SecretsProviderRequest{Name: spec.Annotations.Name} + err := d.plugin.Client().Call(SecretsProviderAPI, secretReq, &secretResp) + if err != nil { + return nil, err + } + if secretResp.Err != "" { + return nil, fmt.Errorf(secretResp.Err) + } + // Assign the secret value + return []byte(secretResp.Value), nil +} + +// SecretsProviderRequest is the secrets provider request. +type SecretsProviderRequest struct { + Name string `json:"name"` // Name is the name of the secret plugin +} + +// SecretsProviderResponse is the secrets provider response. +type SecretsProviderResponse struct { + Value string `json:"value"` // Value is the value of the secret + Err string `json:"err"` // Err is the error response of the plugin +} diff --git a/components/engine/vendor/github.com/docker/swarmkit/manager/manager.go b/components/engine/vendor/github.com/docker/swarmkit/manager/manager.go index d17e8ec231..4771c9dcbe 100644 --- a/components/engine/vendor/github.com/docker/swarmkit/manager/manager.go +++ b/components/engine/vendor/github.com/docker/swarmkit/manager/manager.go @@ -26,6 +26,7 @@ import ( "github.com/docker/swarmkit/manager/allocator/networkallocator" "github.com/docker/swarmkit/manager/controlapi" "github.com/docker/swarmkit/manager/dispatcher" + "github.com/docker/swarmkit/manager/drivers" "github.com/docker/swarmkit/manager/health" "github.com/docker/swarmkit/manager/keymanager" "github.com/docker/swarmkit/manager/logbroker" @@ -218,7 +219,7 @@ func New(config *Config) (*Manager, error) { m := &Manager{ config: *config, caserver: ca.NewServer(raftNode.MemoryStore(), config.SecurityConfig, config.RootCAPaths), - dispatcher: dispatcher.New(raftNode, dispatcher.DefaultConfig()), + dispatcher: dispatcher.New(raftNode, dispatcher.DefaultConfig(), drivers.New(config.PluginGetter)), logbroker: logbroker.New(raftNode.MemoryStore()), server: grpc.NewServer(opts...), localserver: grpc.NewServer(opts...), diff --git a/components/engine/vendor/github.com/docker/swarmkit/manager/scheduler/decision_tree.go b/components/engine/vendor/github.com/docker/swarmkit/manager/scheduler/decision_tree.go index 4567753704..34e52ae3d0 100644 --- a/components/engine/vendor/github.com/docker/swarmkit/manager/scheduler/decision_tree.go +++ b/components/engine/vendor/github.com/docker/swarmkit/manager/scheduler/decision_tree.go @@ -20,10 +20,7 @@ type decisionTree struct { // (lowest) first according to the sorting function. Must be called on a leaf // of the decision tree. // -// The caller may modify the nodes in the returned slice. This has the effect -// of changing the nodes in the decision tree entry. The next node to -// findBestNodes on this decisionTree entry will take into account the changes -// that were made to the nodes. +// The caller may modify the nodes in the returned slice. func (dt *decisionTree) orderedNodes(meetsConstraints func(*NodeInfo) bool, nodeLess func(*NodeInfo, *NodeInfo) bool) []NodeInfo { if dt.nodeHeap.length != len(dt.nodeHeap.nodes) { // We already collapsed the heap into a sorted slice, so From 812c72c3da5b22ae700d261212a389f903abde8f Mon Sep 17 00:00:00 2001 From: tim Date: Sun, 16 Jul 2017 22:56:52 -0700 Subject: [PATCH 10/21] Replaces fluentd-address string by constant Signed-off-by: tim Upstream-commit: cb972b1515428fd7a8ef573fa65af08a6fef669c Component: engine --- components/engine/daemon/logger/fluentd/fluentd.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/components/engine/daemon/logger/fluentd/fluentd.go b/components/engine/daemon/logger/fluentd/fluentd.go index 23465b3f44..c8977ec0da 100644 --- a/components/engine/daemon/logger/fluentd/fluentd.go +++ b/components/engine/daemon/logger/fluentd/fluentd.go @@ -189,7 +189,7 @@ func ValidateLogOpt(cfg map[string]string) error { } } - _, err := parseAddress(cfg["fluentd-address"]) + _, err := parseAddress(cfg[addressKey]) return err } From 4437cd08dbe42fd0fabb8f3aed4cfe7947ead03c Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Mon, 17 Jul 2017 10:36:46 +0200 Subject: [PATCH 11/21] sysinfo: use Prctl() from x/sys/unix Use unix.Prctl() instead of manually reimplementing it using unix.RawSyscall. Also use unix.SECCOMP_MODE_FILTER instead of locally defining it. Signed-off-by: Tobias Klauser Upstream-commit: 6c9d715a8c64a7c782b8c7b57925e1dc19b29517 Component: engine --- components/engine/pkg/sysinfo/sysinfo_linux.go | 9 ++------- components/engine/pkg/sysinfo/sysinfo_linux_test.go | 6 +++--- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/components/engine/pkg/sysinfo/sysinfo_linux.go b/components/engine/pkg/sysinfo/sysinfo_linux.go index 2d33b4dbc3..50ae265bb6 100644 --- a/components/engine/pkg/sysinfo/sysinfo_linux.go +++ b/components/engine/pkg/sysinfo/sysinfo_linux.go @@ -12,11 +12,6 @@ import ( "golang.org/x/sys/unix" ) -const ( - // SeccompModeFilter refers to the syscall argument SECCOMP_MODE_FILTER. - SeccompModeFilter = uintptr(2) -) - func findCgroupMountpoints() (map[string]string, error) { cgMounts, err := cgroups.GetCgroupMounts(false) if err != nil { @@ -60,9 +55,9 @@ func New(quiet bool) *SysInfo { } // Check if Seccomp is supported, via CONFIG_SECCOMP. - if _, _, err := unix.RawSyscall(unix.SYS_PRCTL, unix.PR_GET_SECCOMP, 0, 0); err != unix.EINVAL { + if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { // Make sure the kernel has CONFIG_SECCOMP_FILTER. - if _, _, err := unix.RawSyscall(unix.SYS_PRCTL, unix.PR_SET_SECCOMP, SeccompModeFilter, 0); err != unix.EINVAL { + if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { sysInfo.Seccomp = true } } diff --git a/components/engine/pkg/sysinfo/sysinfo_linux_test.go b/components/engine/pkg/sysinfo/sysinfo_linux_test.go index 77c54f27c9..860784f2ae 100644 --- a/components/engine/pkg/sysinfo/sysinfo_linux_test.go +++ b/components/engine/pkg/sysinfo/sysinfo_linux_test.go @@ -5,10 +5,10 @@ import ( "os" "path" "path/filepath" - "syscall" "testing" "github.com/stretchr/testify/require" + "golang.org/x/sys/unix" ) func TestReadProcBool(t *testing.T) { @@ -66,9 +66,9 @@ func TestNew(t *testing.T) { func checkSysInfo(t *testing.T, sysInfo *SysInfo) { // Check if Seccomp is supported, via CONFIG_SECCOMP.then sysInfo.Seccomp must be TRUE , else FALSE - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL { + if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { // Make sure the kernel has CONFIG_SECCOMP_FILTER. - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL { + if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { require.True(t, sysInfo.Seccomp) } } else { From 62a7e56d340762d632046d1964fc9f758c73bafb Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Mon, 17 Jul 2017 10:36:52 +0200 Subject: [PATCH 12/21] [pkg/term] use IoctlGetTermios/IoctlSetTermios from x/sys/unix Use IoctlGetTermios/IoctlSetTermios from golang.org/x/sys/unix instead of manually reimplementing them. Signed-off-by: Tobias Klauser Upstream-commit: 6476504695284fcdc32b5f7621cffca22746e67d Component: engine --- components/engine/pkg/term/termios_linux.go | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/components/engine/pkg/term/termios_linux.go b/components/engine/pkg/term/termios_linux.go index 31bfa8419e..3e25eb7a41 100644 --- a/components/engine/pkg/term/termios_linux.go +++ b/components/engine/pkg/term/termios_linux.go @@ -1,8 +1,6 @@ package term import ( - "unsafe" - "golang.org/x/sys/unix" ) @@ -18,20 +16,21 @@ type Termios unix.Termios // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + termios, err := unix.IoctlGetTermios(int(fd), getTermios) + if err != nil { return nil, err } - newState := oldState.termios + var oldState State + oldState.termios = Termios(*termios) - newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) - newState.Oflag &^= unix.OPOST - newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) - newState.Cflag &^= (unix.CSIZE | unix.PARENB) - newState.Cflag |= unix.CS8 + termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + termios.Oflag &^= unix.OPOST + termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + termios.Cflag &^= (unix.CSIZE | unix.PARENB) + termios.Cflag |= unix.CS8 - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil { return nil, err } return &oldState, nil From 149027868312351d79da6a219c3ece021580439a Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Mon, 17 Jul 2017 10:36:56 +0200 Subject: [PATCH 13/21] loopback: use IoctlGetInt/IoctlSetInt from x/sys/unix Use IoctlGetInt/IoctlSetInt from golang.org/x/sys/unix (where applicable) instead of manually reimplementing them. Signed-off-by: Tobias Klauser Upstream-commit: bedf09363cb7f2f59bf2b72fea0704351b9f5c8d Component: engine --- components/engine/pkg/loopback/ioctl.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/components/engine/pkg/loopback/ioctl.go b/components/engine/pkg/loopback/ioctl.go index 534907a023..fa744f0a69 100644 --- a/components/engine/pkg/loopback/ioctl.go +++ b/components/engine/pkg/loopback/ioctl.go @@ -9,15 +9,15 @@ import ( ) func ioctlLoopCtlGetFree(fd uintptr) (int, error) { - index, _, err := unix.Syscall(unix.SYS_IOCTL, fd, LoopCtlGetFree, 0) - if err != 0 { + index, err := unix.IoctlGetInt(int(fd), LoopCtlGetFree) + if err != nil { return 0, err } - return int(index), nil + return index, nil } func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { - if _, _, err := unix.Syscall(unix.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { + if err := unix.IoctlSetInt(int(loopFd), LoopSetFd, int(sparseFd)); err != nil { return err } return nil @@ -47,7 +47,7 @@ func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { } func ioctlLoopSetCapacity(loopFd uintptr, value int) error { - if _, _, err := unix.Syscall(unix.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { + if err := unix.IoctlSetInt(int(loopFd), LoopSetCapacity, value); err != nil { return err } return nil From 1549d1d8d99a709df77dd772d6690f1714ae1e5c Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Sat, 8 Apr 2017 14:43:42 -0400 Subject: [PATCH 14/21] Enable pprof/debug endpoints by default Makes sure that debug endpoints are always available, which will aid in debugging demon issues. Wraps debug endpoints in the middleware chain so the can be blocked by authz. Signed-off-by: Brian Goff Upstream-commit: 408c7ade7008ca9b2181e12e51a01250a7a94413 Component: engine --- components/engine/api/server/profiler.go | 46 ---------------- .../engine/api/server/router/debug/debug.go | 53 +++++++++++++++++++ .../api/server/router/debug/debug_routes.go | 13 +++++ components/engine/api/server/server.go | 25 ++++----- components/engine/cmd/dockerd/daemon.go | 4 +- 5 files changed, 76 insertions(+), 65 deletions(-) delete mode 100644 components/engine/api/server/profiler.go create mode 100644 components/engine/api/server/router/debug/debug.go create mode 100644 components/engine/api/server/router/debug/debug_routes.go diff --git a/components/engine/api/server/profiler.go b/components/engine/api/server/profiler.go deleted file mode 100644 index d49be338c8..0000000000 --- a/components/engine/api/server/profiler.go +++ /dev/null @@ -1,46 +0,0 @@ -package server - -import ( - "expvar" - "fmt" - "net/http" - "net/http/pprof" - - "github.com/gorilla/mux" -) - -const debugPathPrefix = "/debug/" - -func profilerSetup(mainRouter *mux.Router) { - var r = mainRouter.PathPrefix(debugPathPrefix).Subrouter() - r.HandleFunc("/vars", expVars) - r.HandleFunc("/pprof/", pprof.Index) - r.HandleFunc("/pprof/cmdline", pprof.Cmdline) - r.HandleFunc("/pprof/profile", pprof.Profile) - r.HandleFunc("/pprof/symbol", pprof.Symbol) - r.HandleFunc("/pprof/trace", pprof.Trace) - r.HandleFunc("/pprof/{name}", handlePprof) -} - -func handlePprof(w http.ResponseWriter, r *http.Request) { - var name string - if vars := mux.Vars(r); vars != nil { - name = vars["name"] - } - pprof.Handler(name).ServeHTTP(w, r) -} - -// Replicated from expvar.go as not public. -func expVars(w http.ResponseWriter, r *http.Request) { - first := true - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintln(w, "{") - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintln(w, ",") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintln(w, "\n}") -} diff --git a/components/engine/api/server/router/debug/debug.go b/components/engine/api/server/router/debug/debug.go new file mode 100644 index 0000000000..b66ff3cf3a --- /dev/null +++ b/components/engine/api/server/router/debug/debug.go @@ -0,0 +1,53 @@ +package debug + +import ( + "expvar" + "net/http" + "net/http/pprof" + + "github.com/docker/docker/api/server/httputils" + "github.com/docker/docker/api/server/router" + "golang.org/x/net/context" +) + +// NewRouter creates a new debug router +// The debug router holds endpoints for debug the daemon, such as those for pprof. +func NewRouter() router.Router { + r := &debugRouter{} + r.initRoutes() + return r +} + +type debugRouter struct { + routes []router.Route +} + +func (r *debugRouter) initRoutes() { + r.routes = []router.Route{ + router.NewGetRoute("/vars", frameworkAdaptHandler(expvar.Handler())), + router.NewGetRoute("/pprof/", frameworkAdaptHandlerFunc(pprof.Index)), + router.NewGetRoute("/pprof/cmdline", frameworkAdaptHandlerFunc(pprof.Cmdline)), + router.NewGetRoute("/pprof/profile", frameworkAdaptHandlerFunc(pprof.Profile)), + router.NewGetRoute("/pprof/symbol", frameworkAdaptHandlerFunc(pprof.Symbol)), + router.NewGetRoute("/pprof/trace", frameworkAdaptHandlerFunc(pprof.Trace)), + router.NewGetRoute("/pprof/{name}", handlePprof), + } +} + +func (r *debugRouter) Routes() []router.Route { + return r.routes +} + +func frameworkAdaptHandler(handler http.Handler) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + handler.ServeHTTP(w, r) + return nil + } +} + +func frameworkAdaptHandlerFunc(handler http.HandlerFunc) httputils.APIFunc { + return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + handler(w, r) + return nil + } +} diff --git a/components/engine/api/server/router/debug/debug_routes.go b/components/engine/api/server/router/debug/debug_routes.go new file mode 100644 index 0000000000..f2a72615a0 --- /dev/null +++ b/components/engine/api/server/router/debug/debug_routes.go @@ -0,0 +1,13 @@ +package debug + +import ( + "net/http" + "net/http/pprof" + + "golang.org/x/net/context" +) + +func handlePprof(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + pprof.Handler(vars["name"]).ServeHTTP(w, r) + return nil +} diff --git a/components/engine/api/server/server.go b/components/engine/api/server/server.go index d402019113..e0f2d89d9a 100644 --- a/components/engine/api/server/server.go +++ b/components/engine/api/server/server.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/middleware" "github.com/docker/docker/api/server/router" + "github.com/docker/docker/api/server/router/debug" "github.com/docker/docker/dockerversion" "github.com/gorilla/mux" "golang.org/x/net/context" @@ -148,13 +149,10 @@ func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { // InitRouter initializes the list of routers for the server. // This method also enables the Go profiler if enableProfiler is true. -func (s *Server) InitRouter(enableProfiler bool, routers ...router.Router) { +func (s *Server) InitRouter(routers ...router.Router) { s.routers = append(s.routers, routers...) m := s.createMux() - if enableProfiler { - profilerSetup(m) - } s.routerSwapper = &routerSwapper{ router: m, } @@ -175,6 +173,13 @@ func (s *Server) createMux() *mux.Router { } } + debugRouter := debug.NewRouter() + s.routers = append(s.routers, debugRouter) + for _, r := range debugRouter.Routes() { + f := s.makeHTTPHandler(r.Handler()) + m.Path("/debug" + r.Path()).Handler(f) + } + err := errors.NewRequestNotFoundError(fmt.Errorf("page not found")) notFoundHandler := httputils.MakeErrorHandler(err) m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler) @@ -194,15 +199,3 @@ func (s *Server) Wait(waitChan chan error) { } waitChan <- nil } - -// DisableProfiler reloads the server mux without adding the profiler routes. -func (s *Server) DisableProfiler() { - s.routerSwapper.Swap(s.createMux()) -} - -// EnableProfiler reloads the server mux adding the profiler routes. -func (s *Server) EnableProfiler() { - m := s.createMux() - profilerSetup(m) - s.routerSwapper.Swap(m) -} diff --git a/components/engine/cmd/dockerd/daemon.go b/components/engine/cmd/dockerd/daemon.go index ad937ede7c..215f9c2d59 100644 --- a/components/engine/cmd/dockerd/daemon.go +++ b/components/engine/cmd/dockerd/daemon.go @@ -383,10 +383,8 @@ func (cli *DaemonCli) reloadConfig() { switch { case debugEnabled && !config.Debug: // disable debug debug.Disable() - cli.api.DisableProfiler() case config.Debug && !debugEnabled: // enable debug debug.Enable() - cli.api.EnableProfiler() } } @@ -536,7 +534,7 @@ func initRouter(opts routerOptions) { } } - opts.api.InitRouter(debug.IsEnabled(), routers...) + opts.api.InitRouter(routers...) } // TODO: remove this from cli and return the authzMiddleware From 59f36f3793b05fee2a874d1ab64f49938ae20b42 Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Mon, 17 Jul 2017 17:40:29 -0700 Subject: [PATCH 15/21] devmapper_wrapper.go: fix gcc warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I am getting the following warning from gcc when compiling the daemon: > # github.com/docker/docker/pkg/devicemapper > pkg/devicemapper/devmapper_wrapper.go: In function ‘log_cb’: > pkg/devicemapper/devmapper_wrapper.go:20:2: warning: ignoring return > value of ‘vasprintf’, declared with attribute warn_unused_result > [-Wunused-result] > vasprintf(&buffer, f, ap); > ^ vasprintf(3) man page says if the function returns -1, the buffer is undefined, so we should not use it. In practice, I assume, this never happens so we just return. Introduced by https://github.com/moby/moby/pull/33845 that resulted in commit 63328c6 ("devicemapper: remove 256 character limit of libdm logs") Cc: Aleksa Sarai Signed-off-by: Kir Kolyshkin Upstream-commit: 7da12bcfa9db4d84b2c547bee93dafeaead15b16 Component: engine --- components/engine/pkg/devicemapper/devmapper_wrapper.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/components/engine/pkg/devicemapper/devmapper_wrapper.go b/components/engine/pkg/devicemapper/devmapper_wrapper.go index 0fb70fe5b3..da3b43f796 100644 --- a/components/engine/pkg/devicemapper/devmapper_wrapper.go +++ b/components/engine/pkg/devicemapper/devmapper_wrapper.go @@ -15,10 +15,15 @@ static void log_cb(int level, const char *file, int line, int dm_errno_or_class, { char *buffer = NULL; va_list ap; + int ret; va_start(ap, f); - vasprintf(&buffer, f, ap); + ret = vasprintf(&buffer, f, ap); va_end(ap); + if (ret < 0) { + // memory allocation failed -- should never happen? + return; + } DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); free(buffer); From 59247b34926e5146458e069b6689cdae33e82358 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Sat, 15 Jul 2017 15:50:49 -0700 Subject: [PATCH 16/21] Update authors and mailmap Signed-off-by: Sebastiaan van Stijn Upstream-commit: 6be41f3858522ac54f97b54a5011cda42828db39 Component: engine --- components/engine/.mailmap | 28 +++++++++++++++++- components/engine/AUTHORS | 59 +++++++++++++++++++++++++++++--------- 2 files changed, 72 insertions(+), 15 deletions(-) diff --git a/components/engine/.mailmap b/components/engine/.mailmap index 49275048f0..c48f290412 100644 --- a/components/engine/.mailmap +++ b/components/engine/.mailmap @@ -151,7 +151,8 @@ Jessica Frazelle - +Sebastiaan van Stijn +Sebastiaan van Stijn Thomas LEVEIL Thomas LÉVEIL @@ -178,6 +179,7 @@ John Howard (VM) Kevin Feyrer Liao Qingwei Luke Marsden +Madhan Raj Mookkandy Madhu Venugopal Mageee <21521230.zju.edu.cn> Mansi Nahar @@ -301,17 +303,26 @@ Bhumika Bayani Bingshen Wang Chen Chuanliang Chen Mingjie +Chen Qiu +Chen Qiu <21321229@zju.edu.cn> +Chris Dias +Chris McKinnel CUI Wei cuiwei13 Daniel Grunwell Daniel J Walsh Dattatraya Kumbhar David Sheets Diego Siqueira +Elan Ruusamäe +Elan Ruusamäe Eric G. Noriega Evelyn Xu Felix Ruess Gabriel Nicolas Avellaneda Gang Qiao <1373319223@qq.com> +George Kontridze +Gopikannan Venugopalsamy +Gou Rao Gustav Sinder Harshal Patil Helen Xie @@ -319,8 +330,10 @@ Hyzhou Zhy <1187766782@qq.com> Hyzhou Zhy Jacob Tomlinson Jiuyue Ma +John Stephens Jose Diaz-Gonzalez Josh Eveleth +Josh Soref Josh Wilson Jim Galasyn Kevin Kern @@ -335,13 +348,23 @@ Michael Hudson-Doyle Mike Casas Milind Chawre Ma Müller +Moorthy RS +Neil Horman +Pavel Tikhomirov +Peter Choi +Peter Dave Hello Philipp Gillé +Robert Terhaar Roberto Muñoz Fernández +Roman Dudin +Sandeep Bansal Sean Lee Shukui Yang +Srinivasan Srivatsan Stefan S. Steve Desmond Sun Gengze <690388648@qq.com> +Tim Bart Tim Zju <21651152@zju.edu.cn> Tõnis Tiigi Wayne Song @@ -350,6 +373,9 @@ Wang Ping Wang Yuexiao Wewang Xiaorenfine Wei Wu cizixs +Xiaoyu Zhang +Yamasaki Masahide +Yassine Tijani Ying Li Yong Tang Yu Chengxia diff --git a/components/engine/AUTHORS b/components/engine/AUTHORS index 4f908078a1..e091ed7dc1 100644 --- a/components/engine/AUTHORS +++ b/components/engine/AUTHORS @@ -11,8 +11,10 @@ Aaron Welch Aaron.L.Xu Abel Muiño Abhijeet Kasurde +Abhinandan Prativadi Abhinav Ajgaonkar Abhishek Chanda +Abhishek Sharma Abin Shahab Adam Avilla Adam Eijdenberg @@ -65,6 +67,7 @@ Alex Warhawk Alexander Artemenko Alexander Boyd Alexander Larsson +Alexander Midlash Alexander Morozov Alexander Shopov Alexandre Beslic @@ -221,6 +224,7 @@ Brian Flad Brian Goff Brian McCallister Brian Olsen +Brian Schwind Brian Shumate Brian Torres-Gil Brian Trump @@ -273,19 +277,22 @@ ChaYoung You Chen Chao Chen Chuanliang Chen Hanxiao +Chen Min Chen Mingjie -cheney90 +Chen Qiu Chewey Chia-liang Kao chli Cholerae Hu Chris Alfonso Chris Armstrong +Chris Dias Chris Dituri Chris Fordham Chris Gavin Chris Gibson Chris Khoo +Chris McKinnel Chris McKinnel Chris Seto Chris Snow @@ -294,7 +301,6 @@ Chris Stivers Chris Swan Chris Wahl Chris Weyl -chrismckinnel Christian Berendt Christian Böhme Christian Persson @@ -390,6 +396,7 @@ David Davis David Dooling David Gageot David Gebler +David Glasser David Lawrence David Lechner David M. Karr @@ -410,7 +417,7 @@ Davide Ceretti Dawn Chen dbdd dcylabs -decadent +Deborah Gertrude Digges deed02392 Deng Guangxing Deni Bertovic @@ -428,6 +435,7 @@ Deshi Xiao devmeyster Devvyn Murphy Dharmit Shah +Dhawal Yogesh Bhanushali Diego Romero Diego Siqueira Dieter Reuter @@ -476,7 +484,7 @@ Eiichi Tsukata Eike Herzbach Eivin Giske Skaaren Eivind Uggedal -Elan Ruusamäe +Elan Ruusamäe Elena Morozova Elias Faxö Elias Probst @@ -533,6 +541,7 @@ Ezra Silvera Fabian Lauer Fabiano Rosas Fabio Falci +Fabio Kung Fabio Rapposelli Fabio Rehm Fabrizio Regini @@ -603,6 +612,7 @@ Gaël PORTAY Genki Takiuchi GennadySpb Geoffrey Bachelet +George Kontridze George MacRorie George Xie Georgi Hristozov @@ -620,8 +630,9 @@ Gleb M Borisov Glyn Normington GoBella Goffert van Gool +Gopikannan Venugopalsamy Gosuke Miyashita -Gou Rao +Gou Rao Govinda Fichtner Grant Reaber Graydon Hoare @@ -706,6 +717,7 @@ Jack Danger Canty Jacob Atzen Jacob Edelman Jacob Tomlinson +Jacob Wen Jake Champlin Jake Moshenko Jake Sanders @@ -872,6 +884,7 @@ Josh Eveleth Josh Hawn Josh Horwitz Josh Poimboeuf +Josh Soref Josh Wilson Josiah Kiehl José Tomás Albornoz @@ -892,6 +905,7 @@ Jussi Nummelin Justas Brazauskas Justin Cormack Justin Force +Justin Menga Justin Plock Justin Simonelis Justin Terry @@ -1183,6 +1197,7 @@ Mike Naberezny Mike Snitzer mikelinjie <294893458@qq.com> Mikhail Sobolev +Miklos Szegedi Milind Chawre Miloslav Trmač mingqing @@ -1193,6 +1208,7 @@ mlarcher Mohammad Banikazemi Mohammed Aaqib Ansari Mohit Soni +Moorthy RS Morgan Bauer Morgante Pell Morgy93 @@ -1224,7 +1240,9 @@ Nathan Kleyn Nathan LeClaire Nathan McCauley Nathan Williams +Naveed Jamil Neal McBurnett +Neil Horman Neil Peterson Nelson Chen Neyazul Haque @@ -1309,19 +1327,21 @@ Paulo Ribeiro Pavel Lobashov Pavel Pospisil Pavel Sutyrin -Pavel Tikhomirov +Pavel Tikhomirov Pavlos Ratis Pavol Vargovcik Peeyush Gupta Peggy Li Pei Su +Peng Tao Penghan Wang Per Weijnitz perhapszzy@sina.com Peter Bourgon Peter Braden -Peter Choi -Peter Dave Hello +Peter Bücker +Peter Choi +Peter Dave Hello Peter Edge Peter Ericson Peter Esbensen @@ -1370,6 +1390,7 @@ Rafal Jeczalik Rafe Colton Raghavendra K T Raghuram Devarakonda +Raja Sami Rajat Pandit Rajdeep Dua Ralf Sippl @@ -1411,8 +1432,9 @@ Rob Vesse Robert Bachmann Robert Bittle Robert Obryk +Robert Schneider Robert Stern -Robert Terhaar +Robert Terhaar Robert Wallis Roberto G. Hashioka Roberto Muñoz Fernández @@ -1431,6 +1453,7 @@ Roland Huß Roland Kammerer Roland Moriz Roma Sokolov +Roman Dudin Roman Strashkin Ron Smits Ron Williams @@ -1443,8 +1466,8 @@ Rory Hunter Rory McCune Ross Boucher Rovanion Luckey +Royce Remer Rozhnov Alexandr -rsmoorthy Rudolph Gottesheim Rui Lopes Runshen Zhu @@ -1486,7 +1509,7 @@ Samuel Andaya Samuel Dion-Girardeau Samuel Karp Samuel PHAN -Sandeep Bansal +Sandeep Bansal Sankar சங்கர் Sanket Saurav Santhosh Manohar @@ -1547,6 +1570,7 @@ Simei He Simon Eskildsen Simon Ferquel Simon Leinen +Simon Menke Simon Taranto Sindhu S Sjoerd Langkemper @@ -1561,7 +1585,8 @@ Spencer Smith Sridatta Thatipamala Sridhar Ratnakumar Srini Brahmaroutu -srinsriv +Srinivasan Srivatsan +Stanislav Bondarenko Steeve Morin Stefan Berger Stefan J. Wernli @@ -1636,10 +1661,12 @@ Tianyi Wang Tibor Vass Tiffany Jernigan Tiffany Low +Tim Bart Tim Bosse Tim Dettrick Tim Düsterhus Tim Hockin +Tim Potter Tim Ruffles Tim Smith Tim Terhorst @@ -1700,11 +1727,11 @@ Tyler Brock Tzu-Jung Lee uhayate Ulysse Carion -unknown +Utz Bacher vagrant Vaidas Jablonskis +vanderliang Veres Lajos -vgeta Victor Algaze Victor Coisne Victor Costan @@ -1786,6 +1813,7 @@ Xianglin Gao Xianlu Bird XiaoBing Jiang Xiaoxu Chen +Xiaoyu Zhang xiekeyang Xinbo Weng Xinzi Zhou @@ -1794,10 +1822,13 @@ xlgao-zju xuzhaokui Yahya YAMADA Tsuyoshi +Yamasaki Masahide Yan Feng Yang Bai +Yang Pengfei Yanqiang Miao Yao Zaiyong +Yassine Tijani Yasunori Mahata Yestin Sun Yi EungJun From 988a5f28f91431f1621ea2298a15a296cd6c6197 Mon Sep 17 00:00:00 2001 From: Liron Levin Date: Tue, 18 Jul 2017 12:45:30 +0300 Subject: [PATCH 17/21] pluggable secret backend Fixing secret driver serialization issue from 08f7cf05268782a0dd8e4c41a4cc65fdf78d09f2 Signed-off-by: Liron Levin Upstream-commit: e3f920d2f147025634e12abd5af3a84f436ddad1 Component: engine --- components/engine/api/types/swarm/secret.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/components/engine/api/types/swarm/secret.go b/components/engine/api/types/swarm/secret.go index 91f3578428..f9b1e92669 100644 --- a/components/engine/api/types/swarm/secret.go +++ b/components/engine/api/types/swarm/secret.go @@ -13,7 +13,7 @@ type Secret struct { type SecretSpec struct { Annotations Data []byte `json:",omitempty"` - Driver *Driver `json:"omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store + Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store } // SecretReferenceFileTarget is a file target in a secret reference From 416ca0615920ed86d1598b7afd5f1f7e07a9144b Mon Sep 17 00:00:00 2001 From: John Howard Date: Tue, 18 Jul 2017 14:23:23 -0700 Subject: [PATCH 18/21] Windows: Disable TestAttachTTYWithoutStdin Signed-off-by: John Howard Upstream-commit: e4ec9195fed2f3653ec6d0a2b1c9ca6b0e2b9c37 Component: engine --- .../engine/integration-cli/docker_cli_attach_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/components/engine/integration-cli/docker_cli_attach_test.go b/components/engine/integration-cli/docker_cli_attach_test.go index 33ecb44b61..ff319c0d8c 100644 --- a/components/engine/integration-cli/docker_cli_attach_test.go +++ b/components/engine/integration-cli/docker_cli_attach_test.go @@ -88,6 +88,14 @@ func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) { } func (s *DockerSuite) TestAttachTTYWithoutStdin(c *check.C) { + // TODO @jhowardmsft. Figure out how to get this running again reliable on Windows. + // It works by accident at the moment. Sometimes. I've gone back to v1.13.0 and see the same. + // On Windows, docker run -d -ti busybox causes the container to exit immediately. + // Obviously a year back when I updated the test, that was not the case. However, + // with this, and the test racing with the tear-down which panic's, sometimes CI + // will just fail and `MISS` all the other tests. For now, disabling it. Will + // open an issue to track re-enabling this and root-causing the problem. + testRequires(c, DaemonIsLinux) out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") id := strings.TrimSpace(out) From 21d120121d2a8316e14b165ea279967e9d36f284 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Wed, 19 Jul 2017 00:22:05 +0200 Subject: [PATCH 19/21] Fix API docs for GET /secrets/{id}, GET /secrets The swagger.yml defined these endpoints to return a "ServiceSpec" instead of a "SecretSpec". Signed-off-by: Sebastiaan van Stijn Upstream-commit: f6954bea9f28c62c50b88c895968045cf801aa81 Component: engine --- components/engine/api/swagger.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/components/engine/api/swagger.yaml b/components/engine/api/swagger.yaml index 253884db2e..802facfe32 100644 --- a/components/engine/api/swagger.yaml +++ b/components/engine/api/swagger.yaml @@ -2779,7 +2779,7 @@ definitions: type: "string" format: "dateTime" Spec: - $ref: "#/definitions/ServiceSpec" + $ref: "#/definitions/SecretSpec" paths: /containers/json: get: From 0112a044fc67a752ee615d242ac02c1b31081a7d Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Wed, 19 Jul 2017 15:30:18 +0200 Subject: [PATCH 20/21] Update API history and example response for volume CreatedAt This adds the new `CreatedAt` field to the API version history and updates some examples to show this information. The `CreatedAt` field was implemented in a46f757c4043031379362c5d6b3bad7562ab9fed Signed-off-by: Sebastiaan van Stijn Upstream-commit: 48a83a3a18185e0ad48737d448524670f8fac4bf Component: engine --- components/engine/api/swagger.yaml | 5 +++-- components/engine/api/types/volume.go | 2 +- components/engine/docs/api/version-history.md | 7 +++++++ 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/components/engine/api/swagger.yaml b/components/engine/api/swagger.yaml index 253884db2e..81c031732d 100644 --- a/components/engine/api/swagger.yaml +++ b/components/engine/api/swagger.yaml @@ -1058,7 +1058,7 @@ definitions: CreatedAt: type: "string" format: "dateTime" - description: "Time volume was created." + description: "Date/Time the volume was created." Status: type: "object" description: | @@ -6257,7 +6257,8 @@ paths: examples: application/json: Volumes: - - Name: "tardis" + - CreatedAt: "2017-07-19T12:00:26Z" + Name: "tardis" Driver: "local" Mountpoint: "/var/lib/docker/volumes/tardis" Labels: diff --git a/components/engine/api/types/volume.go b/components/engine/api/types/volume.go index a69b0cfb17..8fab786759 100644 --- a/components/engine/api/types/volume.go +++ b/components/engine/api/types/volume.go @@ -7,7 +7,7 @@ package types // swagger:model Volume type Volume struct { - // Time volume was created. + // Date/Time the volume was created. CreatedAt string `json:"CreatedAt,omitempty"` // Name of the volume driver used by the volume. diff --git a/components/engine/docs/api/version-history.md b/components/engine/docs/api/version-history.md index b65931d5d0..e5d9d8d4ca 100644 --- a/components/engine/docs/api/version-history.md +++ b/components/engine/docs/api/version-history.md @@ -28,6 +28,13 @@ keywords: "API, Docker, rcli, REST, documentation" * `GET /images/(name)/get` now includes an `ImageMetadata` field which contains image metadata that is local to the engine and not part of the image config. * `POST /services/create` now accepts a `PluginSpec` when `TaskTemplate.Runtime` is set to `plugin` * `GET /events` now supports config events `create`, `update` and `remove` that are emitted when users create, update or remove a config +* `GET /volumes/` and `GET /volumes/{name}` now return a `CreatedAt` field, + containing the date/time the volume was created. This field is omitted if the + creation date/time for the volume is unknown. For volumes with scope "global", + this field represents the creation date/time of the local _instance_ of the + volume, which may differ from instances of the same volume on different nodes. +* `GET /system/df` now returns a `CreatedAt` field for `Volumes`. Refer to the + `/volumes/` endpoint for a description of this field. ## v1.30 API changes From 8830d4f1ff008fdb76eb2ba769727fe43967ea92 Mon Sep 17 00:00:00 2001 From: Aaron Lehmann Date: Tue, 18 Jul 2017 16:51:51 -0700 Subject: [PATCH 21/21] api: Update swagger.yaml for configs Also fix bad reference to ServiceSpec. Signed-off-by: Aaron Lehmann Upstream-commit: ea1d14a189d62df34427b037a6d043ae3028760b Component: engine --- components/engine/api/swagger.yaml | 267 ++++++++++++++++++++++++++++- 1 file changed, 261 insertions(+), 6 deletions(-) diff --git a/components/engine/api/swagger.yaml b/components/engine/api/swagger.yaml index 802facfe32..ac2d55eb16 100644 --- a/components/engine/api/swagger.yaml +++ b/components/engine/api/swagger.yaml @@ -712,7 +712,7 @@ definitions: - "process" - "hyperv" - Config: + ContainerConfig: description: "Configuration for a container that is portable between hosts" type: "object" properties: @@ -909,7 +909,7 @@ definitions: type: "string" x-nullable: false ContainerConfig: - $ref: "#/definitions/Config" + $ref: "#/definitions/ContainerConfig" DockerVersion: type: "string" x-nullable: false @@ -917,7 +917,7 @@ definitions: type: "string" x-nullable: false Config: - $ref: "#/definitions/Config" + $ref: "#/definitions/ContainerConfig" Architecture: type: "string" x-nullable: false @@ -2176,6 +2176,37 @@ definitions: SecretName is the name of the secret that this references, but this is just provided for lookup/display purposes. The secret in the reference will be identified by its ID. type: "string" + Configs: + description: "Configs contains references to zero or more configs that will be exposed to the service." + type: "array" + items: + type: "object" + properties: + File: + description: "File represents a specific target that is backed by a file." + type: "object" + properties: + Name: + description: "Name represents the final filename in the filesystem." + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + ConfigID: + description: "ConfigID represents the ID of the specific config that we're referencing." + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, but this is just provided for + lookup/display purposes. The config in the reference will be identified by its ID. + type: "string" Resources: description: "Resource requirements which apply to each individual container created as part of the service." @@ -2780,6 +2811,38 @@ definitions: format: "dateTime" Spec: $ref: "#/definitions/SecretSpec" + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: "Base64-url-safe-encoded config data" + type: "array" + items: + type: "string" + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + paths: /containers/json: get: @@ -2989,7 +3052,7 @@ paths: description: "Container to create" schema: allOf: - - $ref: "#/definitions/Config" + - $ref: "#/definitions/ContainerConfig" - type: "object" properties: HostConfig: @@ -3287,7 +3350,7 @@ paths: items: $ref: "#/definitions/MountPoint" Config: - $ref: "#/definitions/Config" + $ref: "#/definitions/ContainerConfig" NetworkSettings: $ref: "#/definitions/NetworkConfig" examples: @@ -5674,7 +5737,7 @@ paths: in: "body" description: "The container configuration" schema: - $ref: "#/definitions/Config" + $ref: "#/definitions/ContainerConfig" - name: "container" in: "query" description: "The ID or name of the container to commit" @@ -8530,6 +8593,198 @@ paths: format: "int64" required: true tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + properties: + ID: + description: "The ID of the created config." + type: "string" + example: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: "The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values." + - name: "version" + in: "query" + description: "The version number of the config object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Config"] /distribution/{name}/json: get: summary: "Get image information from the registry"