This commit is contained in:
100
vendor/github.com/theupdateframework/notary/client/changelist/change.go
generated
vendored
Normal file
100
vendor/github.com/theupdateframework/notary/client/changelist/change.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
package changelist
|
||||
|
||||
import (
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
)
|
||||
|
||||
// Scopes for TUFChanges are simply the TUF roles.
|
||||
// Unfortunately because of targets delegations, we can only
|
||||
// cover the base roles.
|
||||
const (
|
||||
ScopeRoot = "root"
|
||||
ScopeTargets = "targets"
|
||||
)
|
||||
|
||||
// Types for TUFChanges are namespaced by the Role they
|
||||
// are relevant for. The Root and Targets roles are the
|
||||
// only ones for which user action can cause a change, as
|
||||
// all changes in Snapshot and Timestamp are programmatically
|
||||
// generated base on Root and Targets changes.
|
||||
const (
|
||||
TypeBaseRole = "role"
|
||||
TypeTargetsTarget = "target"
|
||||
TypeTargetsDelegation = "delegation"
|
||||
TypeWitness = "witness"
|
||||
)
|
||||
|
||||
// TUFChange represents a change to a TUF repo
|
||||
type TUFChange struct {
|
||||
// Abbreviated because Go doesn't permit a field and method of the same name
|
||||
Actn string `json:"action"`
|
||||
Role data.RoleName `json:"role"`
|
||||
ChangeType string `json:"type"`
|
||||
ChangePath string `json:"path"`
|
||||
Data []byte `json:"data"`
|
||||
}
|
||||
|
||||
// TUFRootData represents a modification of the keys associated
|
||||
// with a role that appears in the root.json
|
||||
type TUFRootData struct {
|
||||
Keys data.KeyList `json:"keys"`
|
||||
RoleName data.RoleName `json:"role"`
|
||||
}
|
||||
|
||||
// NewTUFChange initializes a TUFChange object
|
||||
func NewTUFChange(action string, role data.RoleName, changeType, changePath string, content []byte) *TUFChange {
|
||||
return &TUFChange{
|
||||
Actn: action,
|
||||
Role: role,
|
||||
ChangeType: changeType,
|
||||
ChangePath: changePath,
|
||||
Data: content,
|
||||
}
|
||||
}
|
||||
|
||||
// Action return c.Actn
|
||||
func (c TUFChange) Action() string {
|
||||
return c.Actn
|
||||
}
|
||||
|
||||
// Scope returns c.Role
|
||||
func (c TUFChange) Scope() data.RoleName {
|
||||
return c.Role
|
||||
}
|
||||
|
||||
// Type returns c.ChangeType
|
||||
func (c TUFChange) Type() string {
|
||||
return c.ChangeType
|
||||
}
|
||||
|
||||
// Path return c.ChangePath
|
||||
func (c TUFChange) Path() string {
|
||||
return c.ChangePath
|
||||
}
|
||||
|
||||
// Content returns c.Data
|
||||
func (c TUFChange) Content() []byte {
|
||||
return c.Data
|
||||
}
|
||||
|
||||
// TUFDelegation represents a modification to a target delegation
|
||||
// this includes creating a delegations. This format is used to avoid
|
||||
// unexpected race conditions between humans modifying the same delegation
|
||||
type TUFDelegation struct {
|
||||
NewName data.RoleName `json:"new_name,omitempty"`
|
||||
NewThreshold int `json:"threshold,omitempty"`
|
||||
AddKeys data.KeyList `json:"add_keys,omitempty"`
|
||||
RemoveKeys []string `json:"remove_keys,omitempty"`
|
||||
AddPaths []string `json:"add_paths,omitempty"`
|
||||
RemovePaths []string `json:"remove_paths,omitempty"`
|
||||
ClearAllPaths bool `json:"clear_paths,omitempty"`
|
||||
}
|
||||
|
||||
// ToNewRole creates a fresh role object from the TUFDelegation data
|
||||
func (td TUFDelegation) ToNewRole(scope data.RoleName) (*data.Role, error) {
|
||||
name := scope
|
||||
if td.NewName != "" {
|
||||
name = td.NewName
|
||||
}
|
||||
return data.NewRole(name, td.NewThreshold, td.AddKeys.IDs(), td.AddPaths)
|
||||
}
|
82
vendor/github.com/theupdateframework/notary/client/changelist/changelist.go
generated
vendored
Normal file
82
vendor/github.com/theupdateframework/notary/client/changelist/changelist.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
package changelist
|
||||
|
||||
// memChangeList implements a simple in memory change list.
|
||||
type memChangelist struct {
|
||||
changes []Change
|
||||
}
|
||||
|
||||
// NewMemChangelist instantiates a new in-memory changelist
|
||||
func NewMemChangelist() Changelist {
|
||||
return &memChangelist{}
|
||||
}
|
||||
|
||||
// List returns a list of Changes
|
||||
func (cl memChangelist) List() []Change {
|
||||
return cl.changes
|
||||
}
|
||||
|
||||
// Add adds a change to the in-memory change list
|
||||
func (cl *memChangelist) Add(c Change) error {
|
||||
cl.changes = append(cl.changes, c)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Location returns the string "memory"
|
||||
func (cl memChangelist) Location() string {
|
||||
return "memory"
|
||||
}
|
||||
|
||||
// Remove deletes the changes found at the given indices
|
||||
func (cl *memChangelist) Remove(idxs []int) error {
|
||||
remove := make(map[int]struct{})
|
||||
for _, i := range idxs {
|
||||
remove[i] = struct{}{}
|
||||
}
|
||||
var keep []Change
|
||||
|
||||
for i, c := range cl.changes {
|
||||
if _, ok := remove[i]; ok {
|
||||
continue
|
||||
}
|
||||
keep = append(keep, c)
|
||||
}
|
||||
cl.changes = keep
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear empties the changelist file.
|
||||
func (cl *memChangelist) Clear(archive string) error {
|
||||
// appending to a nil list initializes it.
|
||||
cl.changes = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close is a no-op in this in-memory change-list
|
||||
func (cl *memChangelist) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cl *memChangelist) NewIterator() (ChangeIterator, error) {
|
||||
return &MemChangeListIterator{index: 0, collection: cl.changes}, nil
|
||||
}
|
||||
|
||||
// MemChangeListIterator is a concrete instance of ChangeIterator
|
||||
type MemChangeListIterator struct {
|
||||
index int
|
||||
collection []Change // Same type as memChangeList.changes
|
||||
}
|
||||
|
||||
// Next returns the next Change
|
||||
func (m *MemChangeListIterator) Next() (item Change, err error) {
|
||||
if m.index >= len(m.collection) {
|
||||
return nil, IteratorBoundsError(m.index)
|
||||
}
|
||||
item = m.collection[m.index]
|
||||
m.index++
|
||||
return item, err
|
||||
}
|
||||
|
||||
// HasNext indicates whether the iterator is exhausted
|
||||
func (m *MemChangeListIterator) HasNext() bool {
|
||||
return m.index < len(m.collection)
|
||||
}
|
208
vendor/github.com/theupdateframework/notary/client/changelist/file_changelist.go
generated
vendored
Normal file
208
vendor/github.com/theupdateframework/notary/client/changelist/file_changelist.go
generated
vendored
Normal file
@ -0,0 +1,208 @@
|
||||
package changelist
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// FileChangelist stores all the changes as files
|
||||
type FileChangelist struct {
|
||||
dir string
|
||||
}
|
||||
|
||||
// NewFileChangelist is a convenience method for returning FileChangeLists
|
||||
func NewFileChangelist(dir string) (*FileChangelist, error) {
|
||||
logrus.Debug("Making dir path: ", dir)
|
||||
err := os.MkdirAll(dir, 0700)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FileChangelist{dir: dir}, nil
|
||||
}
|
||||
|
||||
// getFileNames reads directory, filtering out child directories
|
||||
func getFileNames(dirName string) ([]os.FileInfo, error) {
|
||||
var dirListing, fileInfos []os.FileInfo
|
||||
dir, err := os.Open(dirName)
|
||||
if err != nil {
|
||||
return fileInfos, err
|
||||
}
|
||||
defer func() {
|
||||
_ = dir.Close()
|
||||
}()
|
||||
|
||||
dirListing, err = dir.Readdir(0)
|
||||
if err != nil {
|
||||
return fileInfos, err
|
||||
}
|
||||
for _, f := range dirListing {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
fileInfos = append(fileInfos, f)
|
||||
}
|
||||
sort.Sort(fileChanges(fileInfos))
|
||||
return fileInfos, nil
|
||||
}
|
||||
|
||||
// Read a JSON formatted file from disk; convert to TUFChange struct
|
||||
func unmarshalFile(dirname string, f os.FileInfo) (*TUFChange, error) {
|
||||
c := &TUFChange{}
|
||||
raw, err := ioutil.ReadFile(filepath.Join(dirname, f.Name()))
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
err = json.Unmarshal(raw, c)
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// List returns a list of sorted changes
|
||||
func (cl FileChangelist) List() []Change {
|
||||
var changes []Change
|
||||
fileInfos, err := getFileNames(cl.dir)
|
||||
if err != nil {
|
||||
return changes
|
||||
}
|
||||
for _, f := range fileInfos {
|
||||
c, err := unmarshalFile(cl.dir, f)
|
||||
if err != nil {
|
||||
logrus.Warn(err.Error())
|
||||
continue
|
||||
}
|
||||
changes = append(changes, c)
|
||||
}
|
||||
return changes
|
||||
}
|
||||
|
||||
// Add adds a change to the file change list
|
||||
func (cl FileChangelist) Add(c Change) error {
|
||||
cJSON, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filename := fmt.Sprintf("%020d_%s.change", time.Now().UnixNano(), uuid.Generate())
|
||||
return ioutil.WriteFile(filepath.Join(cl.dir, filename), cJSON, 0600)
|
||||
}
|
||||
|
||||
// Remove deletes the changes found at the given indices
|
||||
func (cl FileChangelist) Remove(idxs []int) error {
|
||||
fileInfos, err := getFileNames(cl.dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
remove := make(map[int]struct{})
|
||||
for _, i := range idxs {
|
||||
remove[i] = struct{}{}
|
||||
}
|
||||
for i, c := range fileInfos {
|
||||
if _, ok := remove[i]; ok {
|
||||
file := filepath.Join(cl.dir, c.Name())
|
||||
if err := os.Remove(file); err != nil {
|
||||
logrus.Errorf("could not remove change %d: %s", i, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear clears the change list
|
||||
// N.B. archiving not currently implemented
|
||||
func (cl FileChangelist) Clear(archive string) error {
|
||||
dir, err := os.Open(cl.dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = dir.Close()
|
||||
}()
|
||||
|
||||
files, err := dir.Readdir(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, f := range files {
|
||||
os.Remove(filepath.Join(cl.dir, f.Name()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close is a no-op
|
||||
func (cl FileChangelist) Close() error {
|
||||
// Nothing to do here
|
||||
return nil
|
||||
}
|
||||
|
||||
// Location returns the file path to the changelist
|
||||
func (cl FileChangelist) Location() string {
|
||||
return cl.dir
|
||||
}
|
||||
|
||||
// NewIterator creates an iterator from FileChangelist
|
||||
func (cl FileChangelist) NewIterator() (ChangeIterator, error) {
|
||||
fileInfos, err := getFileNames(cl.dir)
|
||||
if err != nil {
|
||||
return &FileChangeListIterator{}, err
|
||||
}
|
||||
return &FileChangeListIterator{dirname: cl.dir, collection: fileInfos}, nil
|
||||
}
|
||||
|
||||
// IteratorBoundsError is an Error type used by Next()
|
||||
type IteratorBoundsError int
|
||||
|
||||
// Error implements the Error interface
|
||||
func (e IteratorBoundsError) Error() string {
|
||||
return fmt.Sprintf("Iterator index (%d) out of bounds", e)
|
||||
}
|
||||
|
||||
// FileChangeListIterator is a concrete instance of ChangeIterator
|
||||
type FileChangeListIterator struct {
|
||||
index int
|
||||
dirname string
|
||||
collection []os.FileInfo
|
||||
}
|
||||
|
||||
// Next returns the next Change in the FileChangeList
|
||||
func (m *FileChangeListIterator) Next() (item Change, err error) {
|
||||
if m.index >= len(m.collection) {
|
||||
return nil, IteratorBoundsError(m.index)
|
||||
}
|
||||
f := m.collection[m.index]
|
||||
m.index++
|
||||
item, err = unmarshalFile(m.dirname, f)
|
||||
return
|
||||
}
|
||||
|
||||
// HasNext indicates whether iterator is exhausted
|
||||
func (m *FileChangeListIterator) HasNext() bool {
|
||||
return m.index < len(m.collection)
|
||||
}
|
||||
|
||||
type fileChanges []os.FileInfo
|
||||
|
||||
// Len returns the length of a file change list
|
||||
func (cs fileChanges) Len() int {
|
||||
return len(cs)
|
||||
}
|
||||
|
||||
// Less compares the names of two different file changes
|
||||
func (cs fileChanges) Less(i, j int) bool {
|
||||
return cs[i].Name() < cs[j].Name()
|
||||
}
|
||||
|
||||
// Swap swaps the position of two file changes
|
||||
func (cs fileChanges) Swap(i, j int) {
|
||||
tmp := cs[i]
|
||||
cs[i] = cs[j]
|
||||
cs[j] = tmp
|
||||
}
|
78
vendor/github.com/theupdateframework/notary/client/changelist/interface.go
generated
vendored
Normal file
78
vendor/github.com/theupdateframework/notary/client/changelist/interface.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
package changelist
|
||||
|
||||
import "github.com/theupdateframework/notary/tuf/data"
|
||||
|
||||
// Changelist is the interface for all TUF change lists
|
||||
type Changelist interface {
|
||||
// List returns the ordered list of changes
|
||||
// currently stored
|
||||
List() []Change
|
||||
|
||||
// Add change appends the provided change to
|
||||
// the list of changes
|
||||
Add(Change) error
|
||||
|
||||
// Clear empties the current change list.
|
||||
// Archive may be provided as a directory path
|
||||
// to save a copy of the changelist in that location
|
||||
Clear(archive string) error
|
||||
|
||||
// Remove deletes the changes corresponding with the indices given
|
||||
Remove(idxs []int) error
|
||||
|
||||
// Close synchronizes any pending writes to the underlying
|
||||
// storage and closes the file/connection
|
||||
Close() error
|
||||
|
||||
// NewIterator returns an iterator for walking through the list
|
||||
// of changes currently stored
|
||||
NewIterator() (ChangeIterator, error)
|
||||
|
||||
// Location returns the place the changelist is stores
|
||||
Location() string
|
||||
}
|
||||
|
||||
const (
|
||||
// ActionCreate represents a Create action
|
||||
ActionCreate = "create"
|
||||
// ActionUpdate represents an Update action
|
||||
ActionUpdate = "update"
|
||||
// ActionDelete represents a Delete action
|
||||
ActionDelete = "delete"
|
||||
)
|
||||
|
||||
// Change is the interface for a TUF Change
|
||||
type Change interface {
|
||||
// "create","update", or "delete"
|
||||
Action() string
|
||||
|
||||
// Where the change should be made.
|
||||
// For TUF this will be the role
|
||||
Scope() data.RoleName
|
||||
|
||||
// The content type being affected.
|
||||
// For TUF this will be "target", or "delegation".
|
||||
// If the type is "delegation", the Scope will be
|
||||
// used to determine if a root role is being updated
|
||||
// or a target delegation.
|
||||
Type() string
|
||||
|
||||
// Path indicates the entry within a role to be affected by the
|
||||
// change. For targets, this is simply the target's path,
|
||||
// for delegations it's the delegated role name.
|
||||
Path() string
|
||||
|
||||
// Serialized content that the interpreter of a changelist
|
||||
// can use to apply the change.
|
||||
// For TUF this will be the serialized JSON that needs
|
||||
// to be inserted or merged. In the case of a "delete"
|
||||
// action, it will be nil.
|
||||
Content() []byte
|
||||
}
|
||||
|
||||
// ChangeIterator is the interface for iterating across collections of
|
||||
// TUF Change items
|
||||
type ChangeIterator interface {
|
||||
Next() (Change, error)
|
||||
HasNext() bool
|
||||
}
|
998
vendor/github.com/theupdateframework/notary/client/client.go
generated
vendored
Normal file
998
vendor/github.com/theupdateframework/notary/client/client.go
generated
vendored
Normal file
@ -0,0 +1,998 @@
|
||||
// Package client implements everything required for interacting with a Notary repository.
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
canonicaljson "github.com/docker/go/canonical/json"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary"
|
||||
"github.com/theupdateframework/notary/client/changelist"
|
||||
"github.com/theupdateframework/notary/cryptoservice"
|
||||
store "github.com/theupdateframework/notary/storage"
|
||||
"github.com/theupdateframework/notary/trustpinning"
|
||||
"github.com/theupdateframework/notary/tuf"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/signed"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
tufDir = "tuf"
|
||||
|
||||
// SignWithAllOldVersions is a sentinel constant for LegacyVersions flag
|
||||
SignWithAllOldVersions = -1
|
||||
)
|
||||
|
||||
func init() {
|
||||
data.SetDefaultExpiryTimes(data.NotaryDefaultExpiries)
|
||||
}
|
||||
|
||||
// repository stores all the information needed to operate on a notary repository.
|
||||
type repository struct {
|
||||
gun data.GUN
|
||||
baseURL string
|
||||
changelist changelist.Changelist
|
||||
cache store.MetadataStore
|
||||
remoteStore store.RemoteStore
|
||||
cryptoService signed.CryptoService
|
||||
tufRepo *tuf.Repo
|
||||
invalid *tuf.Repo // known data that was parsable but deemed invalid
|
||||
roundTrip http.RoundTripper
|
||||
trustPinning trustpinning.TrustPinConfig
|
||||
LegacyVersions int // number of versions back to fetch roots to sign with
|
||||
}
|
||||
|
||||
// NewFileCachedRepository is a wrapper for NewRepository that initializes
|
||||
// a file cache from the provided repository, local config information and a crypto service.
|
||||
// It also retrieves the remote store associated to the base directory under where all the
|
||||
// trust files will be stored (This is normally defaults to "~/.notary" or "~/.docker/trust"
|
||||
// when enabling Docker content trust) and the specified GUN.
|
||||
//
|
||||
// In case of a nil RoundTripper, a default offline store is used instead.
|
||||
func NewFileCachedRepository(baseDir string, gun data.GUN, baseURL string, rt http.RoundTripper,
|
||||
retriever notary.PassRetriever, trustPinning trustpinning.TrustPinConfig) (Repository, error) {
|
||||
|
||||
cache, err := store.NewFileStore(
|
||||
filepath.Join(baseDir, tufDir, filepath.FromSlash(gun.String()), "metadata"),
|
||||
"json",
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keyStores, err := getKeyStores(baseDir, retriever)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cryptoService := cryptoservice.NewCryptoService(keyStores...)
|
||||
|
||||
remoteStore, err := getRemoteStore(baseURL, gun, rt)
|
||||
if err != nil {
|
||||
// baseURL is syntactically invalid
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cl, err := changelist.NewFileChangelist(filepath.Join(
|
||||
filepath.Join(baseDir, tufDir, filepath.FromSlash(gun.String()), "changelist"),
|
||||
))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewRepository(gun, baseURL, remoteStore, cache, trustPinning, cryptoService, cl)
|
||||
}
|
||||
|
||||
// NewRepository is the base method that returns a new notary repository.
|
||||
// It expects an initialized cache. In case of a nil remote store, a default
|
||||
// offline store is used.
|
||||
func NewRepository(gun data.GUN, baseURL string, remoteStore store.RemoteStore, cache store.MetadataStore,
|
||||
trustPinning trustpinning.TrustPinConfig, cryptoService signed.CryptoService, cl changelist.Changelist) (Repository, error) {
|
||||
|
||||
// Repo's remote store is either a valid remote store or an OfflineStore
|
||||
if remoteStore == nil {
|
||||
remoteStore = store.OfflineStore{}
|
||||
}
|
||||
|
||||
if cache == nil {
|
||||
return nil, fmt.Errorf("got an invalid cache (nil metadata store)")
|
||||
}
|
||||
|
||||
nRepo := &repository{
|
||||
gun: gun,
|
||||
baseURL: baseURL,
|
||||
changelist: cl,
|
||||
cache: cache,
|
||||
remoteStore: remoteStore,
|
||||
cryptoService: cryptoService,
|
||||
trustPinning: trustPinning,
|
||||
LegacyVersions: 0, // By default, don't sign with legacy roles
|
||||
}
|
||||
|
||||
return nRepo, nil
|
||||
}
|
||||
|
||||
// GetGUN is a getter for the GUN object from a Repository
|
||||
func (r *repository) GetGUN() data.GUN {
|
||||
return r.gun
|
||||
}
|
||||
|
||||
func (r *repository) updateTUF(forWrite bool) error {
|
||||
repo, invalid, err := LoadTUFRepo(TUFLoadOptions{
|
||||
GUN: r.gun,
|
||||
TrustPinning: r.trustPinning,
|
||||
CryptoService: r.cryptoService,
|
||||
Cache: r.cache,
|
||||
RemoteStore: r.remoteStore,
|
||||
AlwaysCheckInitialized: forWrite,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.tufRepo = repo
|
||||
r.invalid = invalid
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListTargets calls update first before listing targets
|
||||
func (r *repository) ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error) {
|
||||
if err := r.updateTUF(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewReadOnly(r.tufRepo).ListTargets(roles...)
|
||||
}
|
||||
|
||||
// GetTargetByName calls update first before getting target by name
|
||||
func (r *repository) GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error) {
|
||||
if err := r.updateTUF(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewReadOnly(r.tufRepo).GetTargetByName(name, roles...)
|
||||
}
|
||||
|
||||
// GetAllTargetMetadataByName calls update first before getting targets by name
|
||||
func (r *repository) GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error) {
|
||||
if err := r.updateTUF(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewReadOnly(r.tufRepo).GetAllTargetMetadataByName(name)
|
||||
|
||||
}
|
||||
|
||||
// ListRoles calls update first before getting roles
|
||||
func (r *repository) ListRoles() ([]RoleWithSignatures, error) {
|
||||
if err := r.updateTUF(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewReadOnly(r.tufRepo).ListRoles()
|
||||
}
|
||||
|
||||
// GetDelegationRoles calls update first before getting all delegation roles
|
||||
func (r *repository) GetDelegationRoles() ([]data.Role, error) {
|
||||
if err := r.updateTUF(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewReadOnly(r.tufRepo).GetDelegationRoles()
|
||||
}
|
||||
|
||||
// NewTarget is a helper method that returns a Target
|
||||
func NewTarget(targetName, targetPath string, targetCustom *canonicaljson.RawMessage) (*Target, error) {
|
||||
b, err := ioutil.ReadFile(targetPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
meta, err := data.NewFileMeta(bytes.NewBuffer(b), data.NotaryDefaultHashes...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Target{Name: targetName, Hashes: meta.Hashes, Length: meta.Length, Custom: targetCustom}, nil
|
||||
}
|
||||
|
||||
// rootCertKey generates the corresponding certificate for the private key given the privKey and repo's GUN
|
||||
func rootCertKey(gun data.GUN, privKey data.PrivateKey) (data.PublicKey, error) {
|
||||
// Hard-coded policy: the generated certificate expires in 10 years.
|
||||
startTime := time.Now()
|
||||
cert, err := cryptoservice.GenerateCertificate(
|
||||
privKey, gun, startTime, startTime.Add(notary.Year*10))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
x509PublicKey := utils.CertToKey(cert)
|
||||
if x509PublicKey == nil {
|
||||
return nil, fmt.Errorf("cannot generate public key from private key with id: %v and algorithm: %v", privKey.ID(), privKey.Algorithm())
|
||||
}
|
||||
|
||||
return x509PublicKey, nil
|
||||
}
|
||||
|
||||
// GetCryptoService is the getter for the repository's CryptoService
|
||||
func (r *repository) GetCryptoService() signed.CryptoService {
|
||||
return r.cryptoService
|
||||
}
|
||||
|
||||
// initialize initializes the notary repository with a set of rootkeys, root certificates and roles.
|
||||
func (r *repository) initialize(rootKeyIDs []string, rootCerts []data.PublicKey, serverManagedRoles ...data.RoleName) error {
|
||||
|
||||
// currently we only support server managing timestamps and snapshots, and
|
||||
// nothing else - timestamps are always managed by the server, and implicit
|
||||
// (do not have to be passed in as part of `serverManagedRoles`, so that
|
||||
// the API of Initialize doesn't change).
|
||||
var serverManagesSnapshot bool
|
||||
locallyManagedKeys := []data.RoleName{
|
||||
data.CanonicalTargetsRole,
|
||||
data.CanonicalSnapshotRole,
|
||||
// root is also locally managed, but that should have been created
|
||||
// already
|
||||
}
|
||||
remotelyManagedKeys := []data.RoleName{data.CanonicalTimestampRole}
|
||||
for _, role := range serverManagedRoles {
|
||||
switch role {
|
||||
case data.CanonicalTimestampRole:
|
||||
continue // timestamp is already in the right place
|
||||
case data.CanonicalSnapshotRole:
|
||||
// because we put Snapshot last
|
||||
locallyManagedKeys = []data.RoleName{data.CanonicalTargetsRole}
|
||||
remotelyManagedKeys = append(
|
||||
remotelyManagedKeys, data.CanonicalSnapshotRole)
|
||||
serverManagesSnapshot = true
|
||||
default:
|
||||
return ErrInvalidRemoteRole{Role: role}
|
||||
}
|
||||
}
|
||||
|
||||
// gets valid public keys corresponding to the rootKeyIDs or generate if necessary
|
||||
var publicKeys []data.PublicKey
|
||||
var err error
|
||||
if len(rootCerts) == 0 {
|
||||
publicKeys, err = r.createNewPublicKeyFromKeyIDs(rootKeyIDs)
|
||||
} else {
|
||||
publicKeys, err = r.publicKeysOfKeyIDs(rootKeyIDs, rootCerts)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//initialize repo with public keys
|
||||
rootRole, targetsRole, snapshotRole, timestampRole, err := r.initializeRoles(
|
||||
publicKeys,
|
||||
locallyManagedKeys,
|
||||
remotelyManagedKeys,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.tufRepo = tuf.NewRepo(r.GetCryptoService())
|
||||
|
||||
if err := r.tufRepo.InitRoot(
|
||||
rootRole,
|
||||
timestampRole,
|
||||
snapshotRole,
|
||||
targetsRole,
|
||||
false,
|
||||
); err != nil {
|
||||
logrus.Debug("Error on InitRoot: ", err.Error())
|
||||
return err
|
||||
}
|
||||
if _, err := r.tufRepo.InitTargets(data.CanonicalTargetsRole); err != nil {
|
||||
logrus.Debug("Error on InitTargets: ", err.Error())
|
||||
return err
|
||||
}
|
||||
if err := r.tufRepo.InitSnapshot(); err != nil {
|
||||
logrus.Debug("Error on InitSnapshot: ", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
return r.saveMetadata(serverManagesSnapshot)
|
||||
}
|
||||
|
||||
// createNewPublicKeyFromKeyIDs generates a set of public keys corresponding to the given list of
|
||||
// key IDs existing in the repository's CryptoService.
|
||||
// the public keys returned are ordered to correspond to the keyIDs
|
||||
func (r *repository) createNewPublicKeyFromKeyIDs(keyIDs []string) ([]data.PublicKey, error) {
|
||||
publicKeys := []data.PublicKey{}
|
||||
|
||||
privKeys, err := getAllPrivKeys(keyIDs, r.GetCryptoService())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, privKey := range privKeys {
|
||||
rootKey, err := rootCertKey(r.gun, privKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
publicKeys = append(publicKeys, rootKey)
|
||||
}
|
||||
return publicKeys, nil
|
||||
}
|
||||
|
||||
// publicKeysOfKeyIDs confirms that the public key and private keys (by Key IDs) forms valid, strictly ordered key pairs
|
||||
// (eg. keyIDs[0] must match pubKeys[0] and keyIDs[1] must match certs[1] and so on).
|
||||
// Or throw error when they mismatch.
|
||||
func (r *repository) publicKeysOfKeyIDs(keyIDs []string, pubKeys []data.PublicKey) ([]data.PublicKey, error) {
|
||||
if len(keyIDs) != len(pubKeys) {
|
||||
err := fmt.Errorf("require matching number of keyIDs and public keys but got %d IDs and %d public keys", len(keyIDs), len(pubKeys))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := matchKeyIdsWithPubKeys(r, keyIDs, pubKeys); err != nil {
|
||||
return nil, fmt.Errorf("could not obtain public key from IDs: %v", err)
|
||||
}
|
||||
return pubKeys, nil
|
||||
}
|
||||
|
||||
// matchKeyIdsWithPubKeys validates that the private keys (represented by their IDs) and the public keys
|
||||
// forms matching key pairs
|
||||
func matchKeyIdsWithPubKeys(r *repository, ids []string, pubKeys []data.PublicKey) error {
|
||||
for i := 0; i < len(ids); i++ {
|
||||
privKey, _, err := r.GetCryptoService().GetPrivateKey(ids[i])
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get the private key matching id %v: %v", ids[i], err)
|
||||
}
|
||||
|
||||
pubKey := pubKeys[i]
|
||||
err = signed.VerifyPublicKeyMatchesPrivateKey(privKey, pubKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initialize creates a new repository by using rootKey as the root Key for the
|
||||
// TUF repository. The server must be reachable (and is asked to generate a
|
||||
// timestamp key and possibly other serverManagedRoles), but the created repository
|
||||
// result is only stored on local disk, not published to the server. To do that,
|
||||
// use r.Publish() eventually.
|
||||
func (r *repository) Initialize(rootKeyIDs []string, serverManagedRoles ...data.RoleName) error {
|
||||
return r.initialize(rootKeyIDs, nil, serverManagedRoles...)
|
||||
}
|
||||
|
||||
type errKeyNotFound struct{}
|
||||
|
||||
func (errKeyNotFound) Error() string {
|
||||
return fmt.Sprintf("cannot find matching private key id")
|
||||
}
|
||||
|
||||
// keyExistsInList returns the id of the private key in ids that matches the public key
|
||||
// otherwise return empty string
|
||||
func keyExistsInList(cert data.PublicKey, ids map[string]bool) error {
|
||||
pubKeyID, err := utils.CanonicalKeyID(cert)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obtain the public key id from the given certificate: %v", err)
|
||||
}
|
||||
if _, ok := ids[pubKeyID]; ok {
|
||||
return nil
|
||||
}
|
||||
return errKeyNotFound{}
|
||||
}
|
||||
|
||||
// InitializeWithCertificate initializes the repository with root keys and their corresponding certificates
|
||||
func (r *repository) InitializeWithCertificate(rootKeyIDs []string, rootCerts []data.PublicKey,
|
||||
serverManagedRoles ...data.RoleName) error {
|
||||
|
||||
// If we explicitly pass in certificate(s) but not key, then look keys up using certificate
|
||||
if len(rootKeyIDs) == 0 && len(rootCerts) != 0 {
|
||||
rootKeyIDs = []string{}
|
||||
availableRootKeyIDs := make(map[string]bool)
|
||||
for _, k := range r.GetCryptoService().ListKeys(data.CanonicalRootRole) {
|
||||
availableRootKeyIDs[k] = true
|
||||
}
|
||||
|
||||
for _, cert := range rootCerts {
|
||||
if err := keyExistsInList(cert, availableRootKeyIDs); err != nil {
|
||||
return fmt.Errorf("error initializing repository with certificate: %v", err)
|
||||
}
|
||||
keyID, _ := utils.CanonicalKeyID(cert)
|
||||
rootKeyIDs = append(rootKeyIDs, keyID)
|
||||
}
|
||||
}
|
||||
return r.initialize(rootKeyIDs, rootCerts, serverManagedRoles...)
|
||||
}
|
||||
|
||||
func (r *repository) initializeRoles(rootKeys []data.PublicKey, localRoles, remoteRoles []data.RoleName) (
|
||||
root, targets, snapshot, timestamp data.BaseRole, err error) {
|
||||
root = data.NewBaseRole(
|
||||
data.CanonicalRootRole,
|
||||
notary.MinThreshold,
|
||||
rootKeys...,
|
||||
)
|
||||
|
||||
// we want to create all the local keys first so we don't have to
|
||||
// make unnecessary network calls
|
||||
for _, role := range localRoles {
|
||||
// This is currently hardcoding the keys to ECDSA.
|
||||
var key data.PublicKey
|
||||
key, err = r.GetCryptoService().Create(role, r.gun, data.ECDSAKey)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
switch role {
|
||||
case data.CanonicalSnapshotRole:
|
||||
snapshot = data.NewBaseRole(
|
||||
role,
|
||||
notary.MinThreshold,
|
||||
key,
|
||||
)
|
||||
case data.CanonicalTargetsRole:
|
||||
targets = data.NewBaseRole(
|
||||
role,
|
||||
notary.MinThreshold,
|
||||
key,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
remote := r.getRemoteStore()
|
||||
|
||||
for _, role := range remoteRoles {
|
||||
// This key is generated by the remote server.
|
||||
var key data.PublicKey
|
||||
key, err = getRemoteKey(role, remote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
logrus.Debugf("got remote %s %s key with keyID: %s",
|
||||
role, key.Algorithm(), key.ID())
|
||||
switch role {
|
||||
case data.CanonicalSnapshotRole:
|
||||
snapshot = data.NewBaseRole(
|
||||
role,
|
||||
notary.MinThreshold,
|
||||
key,
|
||||
)
|
||||
case data.CanonicalTimestampRole:
|
||||
timestamp = data.NewBaseRole(
|
||||
role,
|
||||
notary.MinThreshold,
|
||||
key,
|
||||
)
|
||||
}
|
||||
}
|
||||
return root, targets, snapshot, timestamp, nil
|
||||
}
|
||||
|
||||
// adds a TUF Change template to the given roles
|
||||
func addChange(cl changelist.Changelist, c changelist.Change, roles ...data.RoleName) error {
|
||||
if len(roles) == 0 {
|
||||
roles = []data.RoleName{data.CanonicalTargetsRole}
|
||||
}
|
||||
|
||||
var changes []changelist.Change
|
||||
for _, role := range roles {
|
||||
// Ensure we can only add targets to the CanonicalTargetsRole,
|
||||
// or a Delegation role (which is <CanonicalTargetsRole>/something else)
|
||||
if role != data.CanonicalTargetsRole && !data.IsDelegation(role) && !data.IsWildDelegation(role) {
|
||||
return data.ErrInvalidRole{
|
||||
Role: role,
|
||||
Reason: "cannot add targets to this role",
|
||||
}
|
||||
}
|
||||
|
||||
changes = append(changes, changelist.NewTUFChange(
|
||||
c.Action(),
|
||||
role,
|
||||
c.Type(),
|
||||
c.Path(),
|
||||
c.Content(),
|
||||
))
|
||||
}
|
||||
|
||||
for _, c := range changes {
|
||||
if err := cl.Add(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddTarget creates new changelist entries to add a target to the given roles
|
||||
// in the repository when the changelist gets applied at publish time.
|
||||
// If roles are unspecified, the default role is "targets"
|
||||
func (r *repository) AddTarget(target *Target, roles ...data.RoleName) error {
|
||||
if len(target.Hashes) == 0 {
|
||||
return fmt.Errorf("no hashes specified for target \"%s\"", target.Name)
|
||||
}
|
||||
logrus.Debugf("Adding target \"%s\" with sha256 \"%x\" and size %d bytes.\n", target.Name, target.Hashes["sha256"], target.Length)
|
||||
|
||||
meta := data.FileMeta{Length: target.Length, Hashes: target.Hashes, Custom: target.Custom}
|
||||
metaJSON, err := json.Marshal(meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
template := changelist.NewTUFChange(
|
||||
changelist.ActionCreate, "", changelist.TypeTargetsTarget,
|
||||
target.Name, metaJSON)
|
||||
return addChange(r.changelist, template, roles...)
|
||||
}
|
||||
|
||||
// RemoveTarget creates new changelist entries to remove a target from the given
|
||||
// roles in the repository when the changelist gets applied at publish time.
|
||||
// If roles are unspecified, the default role is "target".
|
||||
func (r *repository) RemoveTarget(targetName string, roles ...data.RoleName) error {
|
||||
logrus.Debugf("Removing target \"%s\"", targetName)
|
||||
template := changelist.NewTUFChange(changelist.ActionDelete, "",
|
||||
changelist.TypeTargetsTarget, targetName, nil)
|
||||
return addChange(r.changelist, template, roles...)
|
||||
}
|
||||
|
||||
// GetChangelist returns the list of the repository's unpublished changes
|
||||
func (r *repository) GetChangelist() (changelist.Changelist, error) {
|
||||
return r.changelist, nil
|
||||
}
|
||||
|
||||
// getRemoteStore returns the remoteStore of a repository if valid or
|
||||
// or an OfflineStore otherwise
|
||||
func (r *repository) getRemoteStore() store.RemoteStore {
|
||||
if r.remoteStore != nil {
|
||||
return r.remoteStore
|
||||
}
|
||||
|
||||
r.remoteStore = &store.OfflineStore{}
|
||||
|
||||
return r.remoteStore
|
||||
}
|
||||
|
||||
// Publish pushes the local changes in signed material to the remote notary-server
|
||||
// Conceptually it performs an operation similar to a `git rebase`
|
||||
func (r *repository) Publish() error {
|
||||
if err := r.publish(r.changelist); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := r.changelist.Clear(""); err != nil {
|
||||
// This is not a critical problem when only a single host is pushing
|
||||
// but will cause weird behaviour if changelist cleanup is failing
|
||||
// and there are multiple hosts writing to the repo.
|
||||
logrus.Warn("Unable to clear changelist. You may want to manually delete the folder ", r.changelist.Location())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// publish pushes the changes in the given changelist to the remote notary-server
|
||||
// Conceptually it performs an operation similar to a `git rebase`
|
||||
func (r *repository) publish(cl changelist.Changelist) error {
|
||||
var initialPublish bool
|
||||
// update first before publishing
|
||||
if err := r.updateTUF(true); err != nil {
|
||||
// If the remote is not aware of the repo, then this is being published
|
||||
// for the first time. Try to initialize the repository before publishing.
|
||||
if _, ok := err.(ErrRepositoryNotExist); ok {
|
||||
err := r.bootstrapRepo()
|
||||
if _, ok := err.(store.ErrMetaNotFound); ok {
|
||||
logrus.Infof("No TUF data found locally or remotely - initializing repository %s for the first time", r.gun.String())
|
||||
err = r.Initialize(nil)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logrus.WithError(err).Debugf("Unable to load or initialize repository during first publish: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure we will push the initial root and targets file. Either or
|
||||
// both of the root and targets may not be marked as Dirty, since
|
||||
// there may not be any changes that update them, so use a
|
||||
// different boolean.
|
||||
initialPublish = true
|
||||
} else {
|
||||
// We could not update, so we cannot publish.
|
||||
logrus.Error("Could not publish Repository since we could not update: ", err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
// apply the changelist to the repo
|
||||
if err := applyChangelist(r.tufRepo, r.invalid, cl); err != nil {
|
||||
logrus.Debug("Error applying changelist")
|
||||
return err
|
||||
}
|
||||
|
||||
// these are the TUF files we will need to update, serialized as JSON before
|
||||
// we send anything to remote
|
||||
updatedFiles := make(map[data.RoleName][]byte)
|
||||
|
||||
// Fetch old keys to support old clients
|
||||
legacyKeys, err := r.oldKeysForLegacyClientSupport(r.LegacyVersions, initialPublish)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check if our root file is nearing expiry or dirty. Resign if it is. If
|
||||
// root is not dirty but we are publishing for the first time, then just
|
||||
// publish the existing root we have.
|
||||
if err := signRootIfNecessary(updatedFiles, r.tufRepo, legacyKeys, initialPublish); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := signTargets(updatedFiles, r.tufRepo, initialPublish); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if we initialized the repo while designating the server as the snapshot
|
||||
// signer, then there won't be a snapshots file. However, we might now
|
||||
// have a local key (if there was a rotation), so initialize one.
|
||||
if r.tufRepo.Snapshot == nil {
|
||||
if err := r.tufRepo.InitSnapshot(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if snapshotJSON, err := serializeCanonicalRole(
|
||||
r.tufRepo, data.CanonicalSnapshotRole, nil); err == nil {
|
||||
// Only update the snapshot if we've successfully signed it.
|
||||
updatedFiles[data.CanonicalSnapshotRole] = snapshotJSON
|
||||
} else if signErr, ok := err.(signed.ErrInsufficientSignatures); ok && signErr.FoundKeys == 0 {
|
||||
// If signing fails due to us not having the snapshot key, then
|
||||
// assume the server is going to sign, and do not include any snapshot
|
||||
// data.
|
||||
logrus.Debugf("Client does not have the key to sign snapshot. " +
|
||||
"Assuming that server should sign the snapshot.")
|
||||
} else {
|
||||
logrus.Debugf("Client was unable to sign the snapshot: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
remote := r.getRemoteStore()
|
||||
|
||||
return remote.SetMulti(data.MetadataRoleMapToStringMap(updatedFiles))
|
||||
}
|
||||
|
||||
func signRootIfNecessary(updates map[data.RoleName][]byte, repo *tuf.Repo, extraSigningKeys data.KeyList, initialPublish bool) error {
|
||||
if len(extraSigningKeys) > 0 {
|
||||
repo.Root.Dirty = true
|
||||
}
|
||||
if nearExpiry(repo.Root.Signed.SignedCommon) || repo.Root.Dirty {
|
||||
rootJSON, err := serializeCanonicalRole(repo, data.CanonicalRootRole, extraSigningKeys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updates[data.CanonicalRootRole] = rootJSON
|
||||
} else if initialPublish {
|
||||
rootJSON, err := repo.Root.MarshalJSON()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updates[data.CanonicalRootRole] = rootJSON
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetch back a `legacyVersions` number of roots files, collect the root public keys
|
||||
// This includes old `root` roles as well as legacy versioned root roles, e.g. `1.root`
|
||||
func (r *repository) oldKeysForLegacyClientSupport(legacyVersions int, initialPublish bool) (data.KeyList, error) {
|
||||
if initialPublish {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var oldestVersion int
|
||||
prevVersion := r.tufRepo.Root.Signed.Version
|
||||
|
||||
if legacyVersions == SignWithAllOldVersions {
|
||||
oldestVersion = 1
|
||||
} else {
|
||||
oldestVersion = r.tufRepo.Root.Signed.Version - legacyVersions
|
||||
}
|
||||
|
||||
if oldestVersion < 1 {
|
||||
oldestVersion = 1
|
||||
}
|
||||
|
||||
if prevVersion <= 1 || oldestVersion == prevVersion {
|
||||
return nil, nil
|
||||
}
|
||||
oldKeys := make(map[string]data.PublicKey)
|
||||
|
||||
c, err := bootstrapClient(TUFLoadOptions{
|
||||
GUN: r.gun,
|
||||
TrustPinning: r.trustPinning,
|
||||
CryptoService: r.cryptoService,
|
||||
Cache: r.cache,
|
||||
RemoteStore: r.remoteStore,
|
||||
AlwaysCheckInitialized: true,
|
||||
})
|
||||
// require a server connection to fetch old roots
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for v := prevVersion; v >= oldestVersion; v-- {
|
||||
logrus.Debugf("fetching old keys from version %d", v)
|
||||
// fetch old root version
|
||||
versionedRole := fmt.Sprintf("%d.%s", v, data.CanonicalRootRole.String())
|
||||
|
||||
raw, err := c.remote.GetSized(versionedRole, -1)
|
||||
if err != nil {
|
||||
logrus.Debugf("error downloading %s: %s", versionedRole, err)
|
||||
continue
|
||||
}
|
||||
|
||||
signedOldRoot := &data.Signed{}
|
||||
if err := json.Unmarshal(raw, signedOldRoot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
oldRootVersion, err := data.RootFromSigned(signedOldRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// extract legacy versioned root keys
|
||||
oldRootVersionKeys := getOldRootPublicKeys(oldRootVersion)
|
||||
for _, oldKey := range oldRootVersionKeys {
|
||||
oldKeys[oldKey.ID()] = oldKey
|
||||
}
|
||||
}
|
||||
oldKeyList := make(data.KeyList, 0, len(oldKeys))
|
||||
for _, key := range oldKeys {
|
||||
oldKeyList = append(oldKeyList, key)
|
||||
}
|
||||
return oldKeyList, nil
|
||||
}
|
||||
|
||||
// get all the saved previous roles keys < the current root version
|
||||
func getOldRootPublicKeys(root *data.SignedRoot) data.KeyList {
|
||||
rootRole, err := root.BuildBaseRole(data.CanonicalRootRole)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return rootRole.ListKeys()
|
||||
}
|
||||
|
||||
func signTargets(updates map[data.RoleName][]byte, repo *tuf.Repo, initialPublish bool) error {
|
||||
// iterate through all the targets files - if they are dirty, sign and update
|
||||
for roleName, roleObj := range repo.Targets {
|
||||
if roleObj.Dirty || (roleName == data.CanonicalTargetsRole && initialPublish) {
|
||||
targetsJSON, err := serializeCanonicalRole(repo, roleName, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updates[roleName] = targetsJSON
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// bootstrapRepo loads the repository from the local file system (i.e.
|
||||
// a not yet published repo or a possibly obsolete local copy) into
|
||||
// r.tufRepo. This attempts to load metadata for all roles. Since server
|
||||
// snapshots are supported, if the snapshot metadata fails to load, that's ok.
|
||||
// This assumes that bootstrapRepo is only used by Publish() or RotateKey()
|
||||
func (r *repository) bootstrapRepo() error {
|
||||
b := tuf.NewRepoBuilder(r.gun, r.GetCryptoService(), r.trustPinning)
|
||||
|
||||
logrus.Debugf("Loading trusted collection.")
|
||||
|
||||
for _, role := range data.BaseRoles {
|
||||
jsonBytes, err := r.cache.GetSized(role.String(), store.NoSizeLimit)
|
||||
if err != nil {
|
||||
if _, ok := err.(store.ErrMetaNotFound); ok &&
|
||||
// server snapshots are supported, and server timestamp management
|
||||
// is required, so if either of these fail to load that's ok - especially
|
||||
// if the repo is new
|
||||
role == data.CanonicalSnapshotRole || role == data.CanonicalTimestampRole {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err := b.Load(role, jsonBytes, 1, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tufRepo, _, err := b.Finish()
|
||||
if err == nil {
|
||||
r.tufRepo = tufRepo
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveMetadata saves contents of r.tufRepo onto the local disk, creating
|
||||
// signatures as necessary, possibly prompting for passphrases.
|
||||
func (r *repository) saveMetadata(ignoreSnapshot bool) error {
|
||||
logrus.Debugf("Saving changes to Trusted Collection.")
|
||||
|
||||
rootJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalRootRole, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = r.cache.Set(data.CanonicalRootRole.String(), rootJSON)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
targetsToSave := make(map[data.RoleName][]byte)
|
||||
for t := range r.tufRepo.Targets {
|
||||
signedTargets, err := r.tufRepo.SignTargets(t, data.DefaultExpires(data.CanonicalTargetsRole))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
targetsJSON, err := json.Marshal(signedTargets)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
targetsToSave[t] = targetsJSON
|
||||
}
|
||||
|
||||
for role, blob := range targetsToSave {
|
||||
// If the parent directory does not exist, the cache.Set will create it
|
||||
r.cache.Set(role.String(), blob)
|
||||
}
|
||||
|
||||
if ignoreSnapshot {
|
||||
return nil
|
||||
}
|
||||
|
||||
snapshotJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalSnapshotRole, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return r.cache.Set(data.CanonicalSnapshotRole.String(), snapshotJSON)
|
||||
}
|
||||
|
||||
// RotateKey removes all existing keys associated with the role. If no keys are
|
||||
// specified in keyList, then this creates and adds one new key or delegates
|
||||
// managing the key to the server. If key(s) are specified by keyList, then they are
|
||||
// used for signing the role.
|
||||
// These changes are staged in a changelist until publish is called.
|
||||
func (r *repository) RotateKey(role data.RoleName, serverManagesKey bool, keyList []string) error {
|
||||
if err := checkRotationInput(role, serverManagesKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pubKeyList, err := r.pubKeyListForRotation(role, serverManagesKey, keyList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cl := changelist.NewMemChangelist()
|
||||
if err := r.rootFileKeyChange(cl, role, changelist.ActionCreate, pubKeyList); err != nil {
|
||||
return err
|
||||
}
|
||||
return r.publish(cl)
|
||||
}
|
||||
|
||||
// Given a set of new keys to rotate to and a set of keys to drop, returns the list of current keys to use
|
||||
func (r *repository) pubKeyListForRotation(role data.RoleName, serverManaged bool, newKeys []string) (pubKeyList data.KeyList, err error) {
|
||||
var pubKey data.PublicKey
|
||||
|
||||
// If server manages the key being rotated, request a rotation and return the new key
|
||||
if serverManaged {
|
||||
remote := r.getRemoteStore()
|
||||
pubKey, err = rotateRemoteKey(role, remote)
|
||||
pubKeyList = make(data.KeyList, 0, 1)
|
||||
pubKeyList = append(pubKeyList, pubKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to rotate remote key: %s", err)
|
||||
}
|
||||
return pubKeyList, nil
|
||||
}
|
||||
|
||||
// If no new keys are passed in, we generate one
|
||||
if len(newKeys) == 0 {
|
||||
pubKeyList = make(data.KeyList, 0, 1)
|
||||
pubKey, err = r.GetCryptoService().Create(role, r.gun, data.ECDSAKey)
|
||||
pubKeyList = append(pubKeyList, pubKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to generate key: %s", err)
|
||||
}
|
||||
|
||||
// If a list of keys to rotate to are provided, we add those
|
||||
if len(newKeys) > 0 {
|
||||
pubKeyList = make(data.KeyList, 0, len(newKeys))
|
||||
for _, keyID := range newKeys {
|
||||
pubKey = r.GetCryptoService().GetKey(keyID)
|
||||
if pubKey == nil {
|
||||
return nil, fmt.Errorf("unable to find key: %s", keyID)
|
||||
}
|
||||
pubKeyList = append(pubKeyList, pubKey)
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to certs (for root keys)
|
||||
if pubKeyList, err = r.pubKeysToCerts(role, pubKeyList); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pubKeyList, nil
|
||||
}
|
||||
|
||||
func (r *repository) pubKeysToCerts(role data.RoleName, pubKeyList data.KeyList) (data.KeyList, error) {
|
||||
// only generate certs for root keys
|
||||
if role != data.CanonicalRootRole {
|
||||
return pubKeyList, nil
|
||||
}
|
||||
|
||||
for i, pubKey := range pubKeyList {
|
||||
privKey, loadedRole, err := r.GetCryptoService().GetPrivateKey(pubKey.ID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if loadedRole != role {
|
||||
return nil, fmt.Errorf("attempted to load root key but given %s key instead", loadedRole)
|
||||
}
|
||||
pubKey, err = rootCertKey(r.gun, privKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pubKeyList[i] = pubKey
|
||||
}
|
||||
return pubKeyList, nil
|
||||
}
|
||||
|
||||
func checkRotationInput(role data.RoleName, serverManaged bool) error {
|
||||
// We currently support remotely managing timestamp and snapshot keys
|
||||
canBeRemoteKey := role == data.CanonicalTimestampRole || role == data.CanonicalSnapshotRole
|
||||
// And locally managing root, targets, and snapshot keys
|
||||
canBeLocalKey := role == data.CanonicalSnapshotRole || role == data.CanonicalTargetsRole ||
|
||||
role == data.CanonicalRootRole
|
||||
|
||||
switch {
|
||||
case !data.ValidRole(role) || data.IsDelegation(role):
|
||||
return fmt.Errorf("notary does not currently permit rotating the %s key", role)
|
||||
case serverManaged && !canBeRemoteKey:
|
||||
return ErrInvalidRemoteRole{Role: role}
|
||||
case !serverManaged && !canBeLocalKey:
|
||||
return ErrInvalidLocalRole{Role: role}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *repository) rootFileKeyChange(cl changelist.Changelist, role data.RoleName, action string, keyList []data.PublicKey) error {
|
||||
meta := changelist.TUFRootData{
|
||||
RoleName: role,
|
||||
Keys: keyList,
|
||||
}
|
||||
metaJSON, err := json.Marshal(meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c := changelist.NewTUFChange(
|
||||
action,
|
||||
changelist.ScopeRoot,
|
||||
changelist.TypeBaseRole,
|
||||
role.String(),
|
||||
metaJSON,
|
||||
)
|
||||
return cl.Add(c)
|
||||
}
|
||||
|
||||
// DeleteTrustData removes the trust data stored for this repo in the TUF cache on the client side
|
||||
// Note that we will not delete any private key material from local storage
|
||||
func DeleteTrustData(baseDir string, gun data.GUN, URL string, rt http.RoundTripper, deleteRemote bool) error {
|
||||
localRepo := filepath.Join(baseDir, tufDir, filepath.FromSlash(gun.String()))
|
||||
// Remove the tufRepoPath directory, which includes local TUF metadata files and changelist information
|
||||
if err := os.RemoveAll(localRepo); err != nil {
|
||||
return fmt.Errorf("error clearing TUF repo data: %v", err)
|
||||
}
|
||||
// Note that this will require admin permission for the gun in the roundtripper
|
||||
if deleteRemote {
|
||||
remote, err := getRemoteStore(URL, gun, rt)
|
||||
if err != nil {
|
||||
logrus.Errorf("unable to instantiate a remote store: %v", err)
|
||||
return err
|
||||
}
|
||||
if err := remote.RemoveAll(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetLegacyVersions allows the number of legacy versions of the root
|
||||
// to be inspected for old signing keys to be configured.
|
||||
func (r *repository) SetLegacyVersions(n int) {
|
||||
r.LegacyVersions = n
|
||||
}
|
226
vendor/github.com/theupdateframework/notary/client/delegations.go
generated
vendored
Normal file
226
vendor/github.com/theupdateframework/notary/client/delegations.go
generated
vendored
Normal file
@ -0,0 +1,226 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary"
|
||||
"github.com/theupdateframework/notary/client/changelist"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// AddDelegation creates changelist entries to add provided delegation public keys and paths.
|
||||
// This method composes AddDelegationRoleAndKeys and AddDelegationPaths (each creates one changelist if called).
|
||||
func (r *repository) AddDelegation(name data.RoleName, delegationKeys []data.PublicKey, paths []string) error {
|
||||
if len(delegationKeys) > 0 {
|
||||
err := r.AddDelegationRoleAndKeys(name, delegationKeys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(paths) > 0 {
|
||||
err := r.AddDelegationPaths(name, paths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddDelegationRoleAndKeys creates a changelist entry to add provided delegation public keys.
|
||||
// This method is the simplest way to create a new delegation, because the delegation must have at least
|
||||
// one key upon creation to be valid since we will reject the changelist while validating the threshold.
|
||||
func (r *repository) AddDelegationRoleAndKeys(name data.RoleName, delegationKeys []data.PublicKey) error {
|
||||
|
||||
if !data.IsDelegation(name) {
|
||||
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
|
||||
}
|
||||
|
||||
logrus.Debugf(`Adding delegation "%s" with threshold %d, and %d keys\n`,
|
||||
name, notary.MinThreshold, len(delegationKeys))
|
||||
|
||||
// Defaulting to threshold of 1, since we don't allow for larger thresholds at the moment.
|
||||
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
||||
NewThreshold: notary.MinThreshold,
|
||||
AddKeys: data.KeyList(delegationKeys),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
template := newCreateDelegationChange(name, tdJSON)
|
||||
return addChange(r.changelist, template, name)
|
||||
}
|
||||
|
||||
// AddDelegationPaths creates a changelist entry to add provided paths to an existing delegation.
|
||||
// This method cannot create a new delegation itself because the role must meet the key threshold upon creation.
|
||||
func (r *repository) AddDelegationPaths(name data.RoleName, paths []string) error {
|
||||
|
||||
if !data.IsDelegation(name) {
|
||||
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
|
||||
}
|
||||
|
||||
logrus.Debugf(`Adding %s paths to delegation %s\n`, paths, name)
|
||||
|
||||
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
||||
AddPaths: paths,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
template := newCreateDelegationChange(name, tdJSON)
|
||||
return addChange(r.changelist, template, name)
|
||||
}
|
||||
|
||||
// RemoveDelegationKeysAndPaths creates changelist entries to remove provided delegation key IDs and paths.
|
||||
// This method composes RemoveDelegationPaths and RemoveDelegationKeys (each creates one changelist entry if called).
|
||||
func (r *repository) RemoveDelegationKeysAndPaths(name data.RoleName, keyIDs, paths []string) error {
|
||||
if len(paths) > 0 {
|
||||
err := r.RemoveDelegationPaths(name, paths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(keyIDs) > 0 {
|
||||
err := r.RemoveDelegationKeys(name, keyIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveDelegationRole creates a changelist to remove all paths and keys from a role, and delete the role in its entirety.
|
||||
func (r *repository) RemoveDelegationRole(name data.RoleName) error {
|
||||
|
||||
if !data.IsDelegation(name) {
|
||||
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
|
||||
}
|
||||
|
||||
logrus.Debugf(`Removing delegation "%s"\n`, name)
|
||||
|
||||
template := newDeleteDelegationChange(name, nil)
|
||||
return addChange(r.changelist, template, name)
|
||||
}
|
||||
|
||||
// RemoveDelegationPaths creates a changelist entry to remove provided paths from an existing delegation.
|
||||
func (r *repository) RemoveDelegationPaths(name data.RoleName, paths []string) error {
|
||||
|
||||
if !data.IsDelegation(name) {
|
||||
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
|
||||
}
|
||||
|
||||
logrus.Debugf(`Removing %s paths from delegation "%s"\n`, paths, name)
|
||||
|
||||
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
||||
RemovePaths: paths,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
template := newUpdateDelegationChange(name, tdJSON)
|
||||
return addChange(r.changelist, template, name)
|
||||
}
|
||||
|
||||
// RemoveDelegationKeys creates a changelist entry to remove provided keys from an existing delegation.
|
||||
// When this changelist is applied, if the specified keys are the only keys left in the role,
|
||||
// the role itself will be deleted in its entirety.
|
||||
// It can also delete a key from all delegations under a parent using a name
|
||||
// with a wildcard at the end.
|
||||
func (r *repository) RemoveDelegationKeys(name data.RoleName, keyIDs []string) error {
|
||||
|
||||
if !data.IsDelegation(name) && !data.IsWildDelegation(name) {
|
||||
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
|
||||
}
|
||||
|
||||
logrus.Debugf(`Removing %s keys from delegation "%s"\n`, keyIDs, name)
|
||||
|
||||
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
||||
RemoveKeys: keyIDs,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
template := newUpdateDelegationChange(name, tdJSON)
|
||||
return addChange(r.changelist, template, name)
|
||||
}
|
||||
|
||||
// ClearDelegationPaths creates a changelist entry to remove all paths from an existing delegation.
|
||||
func (r *repository) ClearDelegationPaths(name data.RoleName) error {
|
||||
|
||||
if !data.IsDelegation(name) {
|
||||
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
|
||||
}
|
||||
|
||||
logrus.Debugf(`Removing all paths from delegation "%s"\n`, name)
|
||||
|
||||
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
||||
ClearAllPaths: true,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
template := newUpdateDelegationChange(name, tdJSON)
|
||||
return addChange(r.changelist, template, name)
|
||||
}
|
||||
|
||||
func newUpdateDelegationChange(name data.RoleName, content []byte) *changelist.TUFChange {
|
||||
return changelist.NewTUFChange(
|
||||
changelist.ActionUpdate,
|
||||
name,
|
||||
changelist.TypeTargetsDelegation,
|
||||
"", // no path for delegations
|
||||
content,
|
||||
)
|
||||
}
|
||||
|
||||
func newCreateDelegationChange(name data.RoleName, content []byte) *changelist.TUFChange {
|
||||
return changelist.NewTUFChange(
|
||||
changelist.ActionCreate,
|
||||
name,
|
||||
changelist.TypeTargetsDelegation,
|
||||
"", // no path for delegations
|
||||
content,
|
||||
)
|
||||
}
|
||||
|
||||
func newDeleteDelegationChange(name data.RoleName, content []byte) *changelist.TUFChange {
|
||||
return changelist.NewTUFChange(
|
||||
changelist.ActionDelete,
|
||||
name,
|
||||
changelist.TypeTargetsDelegation,
|
||||
"", // no path for delegations
|
||||
content,
|
||||
)
|
||||
}
|
||||
|
||||
func translateDelegationsToCanonicalIDs(delegationInfo data.Delegations) ([]data.Role, error) {
|
||||
canonicalDelegations := make([]data.Role, len(delegationInfo.Roles))
|
||||
// Do a copy by value to ensure local delegation metadata is untouched
|
||||
for idx, origRole := range delegationInfo.Roles {
|
||||
canonicalDelegations[idx] = *origRole
|
||||
}
|
||||
delegationKeys := delegationInfo.Keys
|
||||
for i, delegation := range canonicalDelegations {
|
||||
canonicalKeyIDs := []string{}
|
||||
for _, keyID := range delegation.KeyIDs {
|
||||
pubKey, ok := delegationKeys[keyID]
|
||||
if !ok {
|
||||
return []data.Role{}, fmt.Errorf("Could not translate canonical key IDs for %s", delegation.Name)
|
||||
}
|
||||
canonicalKeyID, err := utils.CanonicalKeyID(pubKey)
|
||||
if err != nil {
|
||||
return []data.Role{}, fmt.Errorf("Could not translate canonical key IDs for %s: %v", delegation.Name, err)
|
||||
}
|
||||
canonicalKeyIDs = append(canonicalKeyIDs, canonicalKeyID)
|
||||
}
|
||||
canonicalDelegations[i].KeyIDs = canonicalKeyIDs
|
||||
}
|
||||
return canonicalDelegations, nil
|
||||
}
|
48
vendor/github.com/theupdateframework/notary/client/errors.go
generated
vendored
Normal file
48
vendor/github.com/theupdateframework/notary/client/errors.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
)
|
||||
|
||||
// ErrRepoNotInitialized is returned when trying to publish an uninitialized
|
||||
// notary repository
|
||||
type ErrRepoNotInitialized struct{}
|
||||
|
||||
func (err ErrRepoNotInitialized) Error() string {
|
||||
return "repository has not been initialized"
|
||||
}
|
||||
|
||||
// ErrInvalidRemoteRole is returned when the server is requested to manage
|
||||
// a key type that is not permitted
|
||||
type ErrInvalidRemoteRole struct {
|
||||
Role data.RoleName
|
||||
}
|
||||
|
||||
func (err ErrInvalidRemoteRole) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"notary does not permit the server managing the %s key", err.Role.String())
|
||||
}
|
||||
|
||||
// ErrInvalidLocalRole is returned when the client wants to manage
|
||||
// a key type that is not permitted
|
||||
type ErrInvalidLocalRole struct {
|
||||
Role data.RoleName
|
||||
}
|
||||
|
||||
func (err ErrInvalidLocalRole) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"notary does not permit the client managing the %s key", err.Role)
|
||||
}
|
||||
|
||||
// ErrRepositoryNotExist is returned when an action is taken on a remote
|
||||
// repository that doesn't exist
|
||||
type ErrRepositoryNotExist struct {
|
||||
remote string
|
||||
gun data.GUN
|
||||
}
|
||||
|
||||
func (err ErrRepositoryNotExist) Error() string {
|
||||
return fmt.Sprintf("%s does not have trust data for %s", err.remote, err.gun.String())
|
||||
}
|
306
vendor/github.com/theupdateframework/notary/client/helpers.go
generated
vendored
Normal file
306
vendor/github.com/theupdateframework/notary/client/helpers.go
generated
vendored
Normal file
@ -0,0 +1,306 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary/client/changelist"
|
||||
store "github.com/theupdateframework/notary/storage"
|
||||
"github.com/theupdateframework/notary/tuf"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/signed"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// Use this to initialize remote HTTPStores from the config settings
|
||||
func getRemoteStore(baseURL string, gun data.GUN, rt http.RoundTripper) (store.RemoteStore, error) {
|
||||
s, err := store.NewHTTPStore(
|
||||
baseURL+"/v2/"+gun.String()+"/_trust/tuf/",
|
||||
"",
|
||||
"json",
|
||||
"key",
|
||||
rt,
|
||||
)
|
||||
if err != nil {
|
||||
return store.OfflineStore{}, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func applyChangelist(repo *tuf.Repo, invalid *tuf.Repo, cl changelist.Changelist) error {
|
||||
it, err := cl.NewIterator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
index := 0
|
||||
for it.HasNext() {
|
||||
c, err := it.Next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isDel := data.IsDelegation(c.Scope()) || data.IsWildDelegation(c.Scope())
|
||||
switch {
|
||||
case c.Scope() == changelist.ScopeTargets || isDel:
|
||||
err = applyTargetsChange(repo, invalid, c)
|
||||
case c.Scope() == changelist.ScopeRoot:
|
||||
err = applyRootChange(repo, c)
|
||||
default:
|
||||
return fmt.Errorf("scope not supported: %s", c.Scope().String())
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("error attempting to apply change #%d: %s, on scope: %s path: %s type: %s", index, c.Action(), c.Scope(), c.Path(), c.Type())
|
||||
return err
|
||||
}
|
||||
index++
|
||||
}
|
||||
logrus.Debugf("applied %d change(s)", index)
|
||||
return nil
|
||||
}
|
||||
|
||||
func applyTargetsChange(repo *tuf.Repo, invalid *tuf.Repo, c changelist.Change) error {
|
||||
switch c.Type() {
|
||||
case changelist.TypeTargetsTarget:
|
||||
return changeTargetMeta(repo, c)
|
||||
case changelist.TypeTargetsDelegation:
|
||||
return changeTargetsDelegation(repo, c)
|
||||
case changelist.TypeWitness:
|
||||
return witnessTargets(repo, invalid, c.Scope())
|
||||
default:
|
||||
return fmt.Errorf("only target meta and delegations changes supported")
|
||||
}
|
||||
}
|
||||
|
||||
func changeTargetsDelegation(repo *tuf.Repo, c changelist.Change) error {
|
||||
switch c.Action() {
|
||||
case changelist.ActionCreate:
|
||||
td := changelist.TUFDelegation{}
|
||||
err := json.Unmarshal(c.Content(), &td)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Try to create brand new role or update one
|
||||
// First add the keys, then the paths. We can only add keys and paths in this scenario
|
||||
err = repo.UpdateDelegationKeys(c.Scope(), td.AddKeys, []string{}, td.NewThreshold)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return repo.UpdateDelegationPaths(c.Scope(), td.AddPaths, []string{}, false)
|
||||
case changelist.ActionUpdate:
|
||||
td := changelist.TUFDelegation{}
|
||||
err := json.Unmarshal(c.Content(), &td)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if data.IsWildDelegation(c.Scope()) {
|
||||
return repo.PurgeDelegationKeys(c.Scope(), td.RemoveKeys)
|
||||
}
|
||||
|
||||
delgRole, err := repo.GetDelegationRole(c.Scope())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We need to translate the keys from canonical ID to TUF ID for compatibility
|
||||
canonicalToTUFID := make(map[string]string)
|
||||
for tufID, pubKey := range delgRole.Keys {
|
||||
canonicalID, err := utils.CanonicalKeyID(pubKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
canonicalToTUFID[canonicalID] = tufID
|
||||
}
|
||||
|
||||
removeTUFKeyIDs := []string{}
|
||||
for _, canonID := range td.RemoveKeys {
|
||||
removeTUFKeyIDs = append(removeTUFKeyIDs, canonicalToTUFID[canonID])
|
||||
}
|
||||
|
||||
err = repo.UpdateDelegationKeys(c.Scope(), td.AddKeys, removeTUFKeyIDs, td.NewThreshold)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return repo.UpdateDelegationPaths(c.Scope(), td.AddPaths, td.RemovePaths, td.ClearAllPaths)
|
||||
case changelist.ActionDelete:
|
||||
return repo.DeleteDelegation(c.Scope())
|
||||
default:
|
||||
return fmt.Errorf("unsupported action against delegations: %s", c.Action())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func changeTargetMeta(repo *tuf.Repo, c changelist.Change) error {
|
||||
var err error
|
||||
switch c.Action() {
|
||||
case changelist.ActionCreate:
|
||||
logrus.Debug("changelist add: ", c.Path())
|
||||
meta := &data.FileMeta{}
|
||||
err = json.Unmarshal(c.Content(), meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files := data.Files{c.Path(): *meta}
|
||||
|
||||
// Attempt to add the target to this role
|
||||
if _, err = repo.AddTargets(c.Scope(), files); err != nil {
|
||||
logrus.Errorf("couldn't add target to %s: %s", c.Scope(), err.Error())
|
||||
}
|
||||
|
||||
case changelist.ActionDelete:
|
||||
logrus.Debug("changelist remove: ", c.Path())
|
||||
|
||||
// Attempt to remove the target from this role
|
||||
if err = repo.RemoveTargets(c.Scope(), c.Path()); err != nil {
|
||||
logrus.Errorf("couldn't remove target from %s: %s", c.Scope(), err.Error())
|
||||
}
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("action not yet supported: %s", c.Action())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func applyRootChange(repo *tuf.Repo, c changelist.Change) error {
|
||||
var err error
|
||||
switch c.Type() {
|
||||
case changelist.TypeBaseRole:
|
||||
err = applyRootRoleChange(repo, c)
|
||||
default:
|
||||
err = fmt.Errorf("type of root change not yet supported: %s", c.Type())
|
||||
}
|
||||
return err // might be nil
|
||||
}
|
||||
|
||||
func applyRootRoleChange(repo *tuf.Repo, c changelist.Change) error {
|
||||
switch c.Action() {
|
||||
case changelist.ActionCreate:
|
||||
// replaces all keys for a role
|
||||
d := &changelist.TUFRootData{}
|
||||
err := json.Unmarshal(c.Content(), d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = repo.ReplaceBaseKeys(d.RoleName, d.Keys...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("action not yet supported for root: %s", c.Action())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func nearExpiry(r data.SignedCommon) bool {
|
||||
plus6mo := time.Now().AddDate(0, 6, 0)
|
||||
return r.Expires.Before(plus6mo)
|
||||
}
|
||||
|
||||
func warnRolesNearExpiry(r *tuf.Repo) {
|
||||
//get every role and its respective signed common and call nearExpiry on it
|
||||
//Root check
|
||||
if nearExpiry(r.Root.Signed.SignedCommon) {
|
||||
logrus.Warn("root is nearing expiry, you should re-sign the role metadata")
|
||||
}
|
||||
//Targets and delegations check
|
||||
for role, signedTOrD := range r.Targets {
|
||||
//signedTOrD is of type *data.SignedTargets
|
||||
if nearExpiry(signedTOrD.Signed.SignedCommon) {
|
||||
logrus.Warn(role, " metadata is nearing expiry, you should re-sign the role metadata")
|
||||
}
|
||||
}
|
||||
//Snapshot check
|
||||
if nearExpiry(r.Snapshot.Signed.SignedCommon) {
|
||||
logrus.Warn("snapshot is nearing expiry, you should re-sign the role metadata")
|
||||
}
|
||||
//do not need to worry about Timestamp, notary signer will re-sign with the timestamp key
|
||||
}
|
||||
|
||||
// Fetches a public key from a remote store, given a gun and role
|
||||
func getRemoteKey(role data.RoleName, remote store.RemoteStore) (data.PublicKey, error) {
|
||||
rawPubKey, err := remote.GetKey(role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubKey, err := data.UnmarshalPublicKey(rawPubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pubKey, nil
|
||||
}
|
||||
|
||||
// Rotates a private key in a remote store and returns the public key component
|
||||
func rotateRemoteKey(role data.RoleName, remote store.RemoteStore) (data.PublicKey, error) {
|
||||
rawPubKey, err := remote.RotateKey(role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubKey, err := data.UnmarshalPublicKey(rawPubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pubKey, nil
|
||||
}
|
||||
|
||||
// signs and serializes the metadata for a canonical role in a TUF repo to JSON
|
||||
func serializeCanonicalRole(tufRepo *tuf.Repo, role data.RoleName, extraSigningKeys data.KeyList) (out []byte, err error) {
|
||||
var s *data.Signed
|
||||
switch {
|
||||
case role == data.CanonicalRootRole:
|
||||
s, err = tufRepo.SignRoot(data.DefaultExpires(role), extraSigningKeys)
|
||||
case role == data.CanonicalSnapshotRole:
|
||||
s, err = tufRepo.SignSnapshot(data.DefaultExpires(role))
|
||||
case tufRepo.Targets[role] != nil:
|
||||
s, err = tufRepo.SignTargets(
|
||||
role, data.DefaultExpires(data.CanonicalTargetsRole))
|
||||
default:
|
||||
err = fmt.Errorf("%s not supported role to sign on the client", role)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return json.Marshal(s)
|
||||
}
|
||||
|
||||
func getAllPrivKeys(rootKeyIDs []string, cryptoService signed.CryptoService) ([]data.PrivateKey, error) {
|
||||
if cryptoService == nil {
|
||||
return nil, fmt.Errorf("no crypto service available to get private keys from")
|
||||
}
|
||||
|
||||
privKeys := make([]data.PrivateKey, 0, len(rootKeyIDs))
|
||||
for _, keyID := range rootKeyIDs {
|
||||
privKey, _, err := cryptoService.GetPrivateKey(keyID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privKeys = append(privKeys, privKey)
|
||||
}
|
||||
if len(privKeys) == 0 {
|
||||
var rootKeyID string
|
||||
rootKeyList := cryptoService.ListKeys(data.CanonicalRootRole)
|
||||
if len(rootKeyList) == 0 {
|
||||
rootPublicKey, err := cryptoService.Create(data.CanonicalRootRole, "", data.ECDSAKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rootKeyID = rootPublicKey.ID()
|
||||
} else {
|
||||
rootKeyID = rootKeyList[0]
|
||||
}
|
||||
privKey, _, err := cryptoService.GetPrivateKey(rootKeyID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privKeys = append(privKeys, privKey)
|
||||
}
|
||||
|
||||
return privKeys, nil
|
||||
}
|
150
vendor/github.com/theupdateframework/notary/client/interface.go
generated
vendored
Normal file
150
vendor/github.com/theupdateframework/notary/client/interface.go
generated
vendored
Normal file
@ -0,0 +1,150 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"github.com/theupdateframework/notary/client/changelist"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/signed"
|
||||
)
|
||||
|
||||
// ReadOnly represents the set of options that must be supported over a TUF repo for
|
||||
// reading
|
||||
type ReadOnly interface {
|
||||
// ListTargets lists all targets for the current repository. The list of
|
||||
// roles should be passed in order from highest to lowest priority.
|
||||
//
|
||||
// IMPORTANT: if you pass a set of roles such as [ "targets/a", "targets/x"
|
||||
// "targets/a/b" ], even though "targets/a/b" is part of the "targets/a" subtree
|
||||
// its entries will be strictly shadowed by those in other parts of the "targets/a"
|
||||
// subtree and also the "targets/x" subtree, as we will defer parsing it until
|
||||
// we explicitly reach it in our iteration of the provided list of roles.
|
||||
ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error)
|
||||
|
||||
// GetTargetByName returns a target by the given name. If no roles are passed
|
||||
// it uses the targets role and does a search of the entire delegation
|
||||
// graph, finding the first entry in a breadth first search of the delegations.
|
||||
// If roles are passed, they should be passed in descending priority and
|
||||
// the target entry found in the subtree of the highest priority role
|
||||
// will be returned.
|
||||
// See the IMPORTANT section on ListTargets above. Those roles also apply here.
|
||||
GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error)
|
||||
|
||||
// GetAllTargetMetadataByName searches the entire delegation role tree to find
|
||||
// the specified target by name for all roles, and returns a list of
|
||||
// TargetSignedStructs for each time it finds the specified target.
|
||||
// If given an empty string for a target name, it will return back all targets
|
||||
// signed into the repository in every role
|
||||
GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error)
|
||||
|
||||
// ListRoles returns a list of RoleWithSignatures objects for this repo
|
||||
// This represents the latest metadata for each role in this repo
|
||||
ListRoles() ([]RoleWithSignatures, error)
|
||||
|
||||
// GetDelegationRoles returns the keys and roles of the repository's delegations
|
||||
// Also converts key IDs to canonical key IDs to keep consistent with signing prompts
|
||||
GetDelegationRoles() ([]data.Role, error)
|
||||
}
|
||||
|
||||
// Repository represents the set of options that must be supported over a TUF repo
|
||||
// for both reading and writing.
|
||||
type Repository interface {
|
||||
ReadOnly
|
||||
|
||||
// ------------------- Publishing operations -------------------
|
||||
|
||||
// GetGUN returns the GUN associated with the repository
|
||||
GetGUN() data.GUN
|
||||
|
||||
// SetLegacyVersion sets the number of versions back to fetch roots to sign with
|
||||
SetLegacyVersions(int)
|
||||
|
||||
// ----- General management operations -----
|
||||
|
||||
// Initialize creates a new repository by using rootKey as the root Key for the
|
||||
// TUF repository. The remote store/server must be reachable (and is asked to
|
||||
// generate a timestamp key and possibly other serverManagedRoles), but the
|
||||
// created repository result is only stored on local cache, not published to
|
||||
// the remote store. To do that, use r.Publish() eventually.
|
||||
Initialize(rootKeyIDs []string, serverManagedRoles ...data.RoleName) error
|
||||
|
||||
// InitializeWithCertificate initializes the repository with root keys and their
|
||||
// corresponding certificates
|
||||
InitializeWithCertificate(rootKeyIDs []string, rootCerts []data.PublicKey, serverManagedRoles ...data.RoleName) error
|
||||
|
||||
// Publish pushes the local changes in signed material to the remote notary-server
|
||||
// Conceptually it performs an operation similar to a `git rebase`
|
||||
Publish() error
|
||||
|
||||
// ----- Target Operations -----
|
||||
|
||||
// AddTarget creates new changelist entries to add a target to the given roles
|
||||
// in the repository when the changelist gets applied at publish time.
|
||||
// If roles are unspecified, the default role is "targets"
|
||||
AddTarget(target *Target, roles ...data.RoleName) error
|
||||
|
||||
// RemoveTarget creates new changelist entries to remove a target from the given
|
||||
// roles in the repository when the changelist gets applied at publish time.
|
||||
// If roles are unspecified, the default role is "target".
|
||||
RemoveTarget(targetName string, roles ...data.RoleName) error
|
||||
|
||||
// ----- Changelist operations -----
|
||||
|
||||
// GetChangelist returns the list of the repository's unpublished changes
|
||||
GetChangelist() (changelist.Changelist, error)
|
||||
|
||||
// ----- Role operations -----
|
||||
|
||||
// AddDelegation creates changelist entries to add provided delegation public keys and paths.
|
||||
// This method composes AddDelegationRoleAndKeys and AddDelegationPaths (each creates one changelist if called).
|
||||
AddDelegation(name data.RoleName, delegationKeys []data.PublicKey, paths []string) error
|
||||
|
||||
// AddDelegationRoleAndKeys creates a changelist entry to add provided delegation public keys.
|
||||
// This method is the simplest way to create a new delegation, because the delegation must have at least
|
||||
// one key upon creation to be valid since we will reject the changelist while validating the threshold.
|
||||
AddDelegationRoleAndKeys(name data.RoleName, delegationKeys []data.PublicKey) error
|
||||
|
||||
// AddDelegationPaths creates a changelist entry to add provided paths to an existing delegation.
|
||||
// This method cannot create a new delegation itself because the role must meet the key threshold upon
|
||||
// creation.
|
||||
AddDelegationPaths(name data.RoleName, paths []string) error
|
||||
|
||||
// RemoveDelegationKeysAndPaths creates changelist entries to remove provided delegation key IDs and
|
||||
// paths. This method composes RemoveDelegationPaths and RemoveDelegationKeys (each creates one
|
||||
// changelist entry if called).
|
||||
RemoveDelegationKeysAndPaths(name data.RoleName, keyIDs, paths []string) error
|
||||
|
||||
// RemoveDelegationRole creates a changelist to remove all paths and keys from a role, and delete the
|
||||
// role in its entirety.
|
||||
RemoveDelegationRole(name data.RoleName) error
|
||||
|
||||
// RemoveDelegationPaths creates a changelist entry to remove provided paths from an existing delegation.
|
||||
RemoveDelegationPaths(name data.RoleName, paths []string) error
|
||||
|
||||
// RemoveDelegationKeys creates a changelist entry to remove provided keys from an existing delegation.
|
||||
// When this changelist is applied, if the specified keys are the only keys left in the role,
|
||||
// the role itself will be deleted in its entirety.
|
||||
// It can also delete a key from all delegations under a parent using a name
|
||||
// with a wildcard at the end.
|
||||
RemoveDelegationKeys(name data.RoleName, keyIDs []string) error
|
||||
|
||||
// ClearDelegationPaths creates a changelist entry to remove all paths from an existing delegation.
|
||||
ClearDelegationPaths(name data.RoleName) error
|
||||
|
||||
// ----- Witness and other re-signing operations -----
|
||||
|
||||
// Witness creates change objects to witness (i.e. re-sign) the given
|
||||
// roles on the next publish. One change is created per role
|
||||
Witness(roles ...data.RoleName) ([]data.RoleName, error)
|
||||
|
||||
// ----- Key Operations -----
|
||||
|
||||
// RotateKey removes all existing keys associated with the role. If no keys are
|
||||
// specified in keyList, then this creates and adds one new key or delegates
|
||||
// managing the key to the server. If key(s) are specified by keyList, then they are
|
||||
// used for signing the role.
|
||||
// These changes are staged in a changelist until publish is called.
|
||||
RotateKey(role data.RoleName, serverManagesKey bool, keyList []string) error
|
||||
|
||||
// GetCryptoService is the getter for the repository's CryptoService, which is used
|
||||
// to sign all updates.
|
||||
GetCryptoService() signed.CryptoService
|
||||
}
|
257
vendor/github.com/theupdateframework/notary/client/reader.go
generated
vendored
Normal file
257
vendor/github.com/theupdateframework/notary/client/reader.go
generated
vendored
Normal file
@ -0,0 +1,257 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
canonicaljson "github.com/docker/go/canonical/json"
|
||||
store "github.com/theupdateframework/notary/storage"
|
||||
"github.com/theupdateframework/notary/tuf"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// Target represents a simplified version of the data TUF operates on, so external
|
||||
// applications don't have to depend on TUF data types.
|
||||
type Target struct {
|
||||
Name string // the name of the target
|
||||
Hashes data.Hashes // the hash of the target
|
||||
Length int64 // the size in bytes of the target
|
||||
Custom *canonicaljson.RawMessage // the custom data provided to describe the file at TARGETPATH
|
||||
}
|
||||
|
||||
// TargetWithRole represents a Target that exists in a particular role - this is
|
||||
// produced by ListTargets and GetTargetByName
|
||||
type TargetWithRole struct {
|
||||
Target
|
||||
Role data.RoleName
|
||||
}
|
||||
|
||||
// TargetSignedStruct is a struct that contains a Target, the role it was found in, and the list of signatures for that role
|
||||
type TargetSignedStruct struct {
|
||||
Role data.DelegationRole
|
||||
Target Target
|
||||
Signatures []data.Signature
|
||||
}
|
||||
|
||||
// ErrNoSuchTarget is returned when no valid trust data is found.
|
||||
type ErrNoSuchTarget string
|
||||
|
||||
func (f ErrNoSuchTarget) Error() string {
|
||||
return fmt.Sprintf("No valid trust data for %s", string(f))
|
||||
}
|
||||
|
||||
// RoleWithSignatures is a Role with its associated signatures
|
||||
type RoleWithSignatures struct {
|
||||
Signatures []data.Signature
|
||||
data.Role
|
||||
}
|
||||
|
||||
// NewReadOnly is the base method that returns a new notary repository for reading.
|
||||
// It expects an initialized cache. In case of a nil remote store, a default
|
||||
// offline store is used.
|
||||
func NewReadOnly(repo *tuf.Repo) ReadOnly {
|
||||
return &reader{tufRepo: repo}
|
||||
}
|
||||
|
||||
type reader struct {
|
||||
tufRepo *tuf.Repo
|
||||
}
|
||||
|
||||
// ListTargets lists all targets for the current repository. The list of
|
||||
// roles should be passed in order from highest to lowest priority.
|
||||
//
|
||||
// IMPORTANT: if you pass a set of roles such as [ "targets/a", "targets/x"
|
||||
// "targets/a/b" ], even though "targets/a/b" is part of the "targets/a" subtree
|
||||
// its entries will be strictly shadowed by those in other parts of the "targets/a"
|
||||
// subtree and also the "targets/x" subtree, as we will defer parsing it until
|
||||
// we explicitly reach it in our iteration of the provided list of roles.
|
||||
func (r *reader) ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error) {
|
||||
if len(roles) == 0 {
|
||||
roles = []data.RoleName{data.CanonicalTargetsRole}
|
||||
}
|
||||
targets := make(map[string]*TargetWithRole)
|
||||
for _, role := range roles {
|
||||
// Define an array of roles to skip for this walk (see IMPORTANT comment above)
|
||||
skipRoles := utils.RoleNameSliceRemove(roles, role)
|
||||
|
||||
// Define a visitor function to populate the targets map in priority order
|
||||
listVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
|
||||
// We found targets so we should try to add them to our targets map
|
||||
for targetName, targetMeta := range tgt.Signed.Targets {
|
||||
// Follow the priority by not overriding previously set targets
|
||||
// and check that this path is valid with this role
|
||||
if _, ok := targets[targetName]; ok || !validRole.CheckPaths(targetName) {
|
||||
continue
|
||||
}
|
||||
targets[targetName] = &TargetWithRole{
|
||||
Target: Target{
|
||||
Name: targetName,
|
||||
Hashes: targetMeta.Hashes,
|
||||
Length: targetMeta.Length,
|
||||
Custom: targetMeta.Custom,
|
||||
},
|
||||
Role: validRole.Name,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
r.tufRepo.WalkTargets("", role, listVisitorFunc, skipRoles...)
|
||||
}
|
||||
|
||||
var targetList []*TargetWithRole
|
||||
for _, v := range targets {
|
||||
targetList = append(targetList, v)
|
||||
}
|
||||
|
||||
return targetList, nil
|
||||
}
|
||||
|
||||
// GetTargetByName returns a target by the given name. If no roles are passed
|
||||
// it uses the targets role and does a search of the entire delegation
|
||||
// graph, finding the first entry in a breadth first search of the delegations.
|
||||
// If roles are passed, they should be passed in descending priority and
|
||||
// the target entry found in the subtree of the highest priority role
|
||||
// will be returned.
|
||||
// See the IMPORTANT section on ListTargets above. Those roles also apply here.
|
||||
func (r *reader) GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error) {
|
||||
if len(roles) == 0 {
|
||||
roles = append(roles, data.CanonicalTargetsRole)
|
||||
}
|
||||
var resultMeta data.FileMeta
|
||||
var resultRoleName data.RoleName
|
||||
var foundTarget bool
|
||||
for _, role := range roles {
|
||||
// Define an array of roles to skip for this walk (see IMPORTANT comment above)
|
||||
skipRoles := utils.RoleNameSliceRemove(roles, role)
|
||||
|
||||
// Define a visitor function to find the specified target
|
||||
getTargetVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
|
||||
if tgt == nil {
|
||||
return nil
|
||||
}
|
||||
// We found the target and validated path compatibility in our walk,
|
||||
// so we should stop our walk and set the resultMeta and resultRoleName variables
|
||||
if resultMeta, foundTarget = tgt.Signed.Targets[name]; foundTarget {
|
||||
resultRoleName = validRole.Name
|
||||
return tuf.StopWalk{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Check that we didn't error, and that we assigned to our target
|
||||
if err := r.tufRepo.WalkTargets(name, role, getTargetVisitorFunc, skipRoles...); err == nil && foundTarget {
|
||||
return &TargetWithRole{Target: Target{Name: name, Hashes: resultMeta.Hashes, Length: resultMeta.Length, Custom: resultMeta.Custom}, Role: resultRoleName}, nil
|
||||
}
|
||||
}
|
||||
return nil, ErrNoSuchTarget(name)
|
||||
|
||||
}
|
||||
|
||||
// GetAllTargetMetadataByName searches the entire delegation role tree to find the specified target by name for all
|
||||
// roles, and returns a list of TargetSignedStructs for each time it finds the specified target.
|
||||
// If given an empty string for a target name, it will return back all targets signed into the repository in every role
|
||||
func (r *reader) GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error) {
|
||||
var targetInfoList []TargetSignedStruct
|
||||
|
||||
// Define a visitor function to find the specified target
|
||||
getAllTargetInfoByNameVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
|
||||
if tgt == nil {
|
||||
return nil
|
||||
}
|
||||
// We found a target and validated path compatibility in our walk,
|
||||
// so add it to our list if we have a match
|
||||
// if we have an empty name, add all targets, else check if we have it
|
||||
var targetMetaToAdd data.Files
|
||||
if name == "" {
|
||||
targetMetaToAdd = tgt.Signed.Targets
|
||||
} else {
|
||||
if meta, ok := tgt.Signed.Targets[name]; ok {
|
||||
targetMetaToAdd = data.Files{name: meta}
|
||||
}
|
||||
}
|
||||
|
||||
for targetName, resultMeta := range targetMetaToAdd {
|
||||
targetInfo := TargetSignedStruct{
|
||||
Role: validRole,
|
||||
Target: Target{Name: targetName, Hashes: resultMeta.Hashes, Length: resultMeta.Length, Custom: resultMeta.Custom},
|
||||
Signatures: tgt.Signatures,
|
||||
}
|
||||
targetInfoList = append(targetInfoList, targetInfo)
|
||||
}
|
||||
// continue walking to all child roles
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check that we didn't error, and that we found the target at least once
|
||||
if err := r.tufRepo.WalkTargets(name, "", getAllTargetInfoByNameVisitorFunc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(targetInfoList) == 0 {
|
||||
return nil, ErrNoSuchTarget(name)
|
||||
}
|
||||
return targetInfoList, nil
|
||||
}
|
||||
|
||||
// ListRoles returns a list of RoleWithSignatures objects for this repo
|
||||
// This represents the latest metadata for each role in this repo
|
||||
func (r *reader) ListRoles() ([]RoleWithSignatures, error) {
|
||||
// Get all role info from our updated keysDB, can be empty
|
||||
roles := r.tufRepo.GetAllLoadedRoles()
|
||||
|
||||
var roleWithSigs []RoleWithSignatures
|
||||
|
||||
// Populate RoleWithSignatures with Role from keysDB and signatures from TUF metadata
|
||||
for _, role := range roles {
|
||||
roleWithSig := RoleWithSignatures{Role: *role, Signatures: nil}
|
||||
switch role.Name {
|
||||
case data.CanonicalRootRole:
|
||||
roleWithSig.Signatures = r.tufRepo.Root.Signatures
|
||||
case data.CanonicalTargetsRole:
|
||||
roleWithSig.Signatures = r.tufRepo.Targets[data.CanonicalTargetsRole].Signatures
|
||||
case data.CanonicalSnapshotRole:
|
||||
roleWithSig.Signatures = r.tufRepo.Snapshot.Signatures
|
||||
case data.CanonicalTimestampRole:
|
||||
roleWithSig.Signatures = r.tufRepo.Timestamp.Signatures
|
||||
default:
|
||||
if !data.IsDelegation(role.Name) {
|
||||
continue
|
||||
}
|
||||
if _, ok := r.tufRepo.Targets[role.Name]; ok {
|
||||
// We'll only find a signature if we've published any targets with this delegation
|
||||
roleWithSig.Signatures = r.tufRepo.Targets[role.Name].Signatures
|
||||
}
|
||||
}
|
||||
roleWithSigs = append(roleWithSigs, roleWithSig)
|
||||
}
|
||||
return roleWithSigs, nil
|
||||
}
|
||||
|
||||
// GetDelegationRoles returns the keys and roles of the repository's delegations
|
||||
// Also converts key IDs to canonical key IDs to keep consistent with signing prompts
|
||||
func (r *reader) GetDelegationRoles() ([]data.Role, error) {
|
||||
// All top level delegations (ex: targets/level1) are stored exclusively in targets.json
|
||||
_, ok := r.tufRepo.Targets[data.CanonicalTargetsRole]
|
||||
if !ok {
|
||||
return nil, store.ErrMetaNotFound{Resource: data.CanonicalTargetsRole.String()}
|
||||
}
|
||||
|
||||
// make a copy for traversing nested delegations
|
||||
allDelegations := []data.Role{}
|
||||
|
||||
// Define a visitor function to populate the delegations list and translate their key IDs to canonical IDs
|
||||
delegationCanonicalListVisitor := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
|
||||
// For the return list, update with a copy that includes canonicalKeyIDs
|
||||
// These aren't validated by the validRole
|
||||
canonicalDelegations, err := translateDelegationsToCanonicalIDs(tgt.Signed.Delegations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
allDelegations = append(allDelegations, canonicalDelegations...)
|
||||
return nil
|
||||
}
|
||||
err := r.tufRepo.WalkTargets("", "", delegationCanonicalListVisitor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return allDelegations, nil
|
||||
}
|
19
vendor/github.com/theupdateframework/notary/client/repo.go
generated
vendored
Normal file
19
vendor/github.com/theupdateframework/notary/client/repo.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
//go:build !pkcs11
|
||||
// +build !pkcs11
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/theupdateframework/notary"
|
||||
"github.com/theupdateframework/notary/trustmanager"
|
||||
)
|
||||
|
||||
func getKeyStores(baseDir string, retriever notary.PassRetriever) ([]trustmanager.KeyStore, error) {
|
||||
fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create private key store in directory: %s", baseDir)
|
||||
}
|
||||
return []trustmanager.KeyStore{fileKeyStore}, nil
|
||||
}
|
26
vendor/github.com/theupdateframework/notary/client/repo_pkcs11.go
generated
vendored
Normal file
26
vendor/github.com/theupdateframework/notary/client/repo_pkcs11.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
//go:build pkcs11
|
||||
// +build pkcs11
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/theupdateframework/notary"
|
||||
"github.com/theupdateframework/notary/trustmanager"
|
||||
"github.com/theupdateframework/notary/trustmanager/yubikey"
|
||||
)
|
||||
|
||||
func getKeyStores(baseDir string, retriever notary.PassRetriever) ([]trustmanager.KeyStore, error) {
|
||||
fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create private key store in directory: %s", baseDir)
|
||||
}
|
||||
|
||||
keyStores := []trustmanager.KeyStore{fileKeyStore}
|
||||
yubiKeyStore, _ := yubikey.NewYubiStore(fileKeyStore, retriever)
|
||||
if yubiKeyStore != nil {
|
||||
keyStores = []trustmanager.KeyStore{yubiKeyStore, fileKeyStore}
|
||||
}
|
||||
return keyStores, nil
|
||||
}
|
463
vendor/github.com/theupdateframework/notary/client/tufclient.go
generated
vendored
Normal file
463
vendor/github.com/theupdateframework/notary/client/tufclient.go
generated
vendored
Normal file
@ -0,0 +1,463 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary"
|
||||
"github.com/theupdateframework/notary/cryptoservice"
|
||||
store "github.com/theupdateframework/notary/storage"
|
||||
"github.com/theupdateframework/notary/trustpinning"
|
||||
"github.com/theupdateframework/notary/tuf"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/signed"
|
||||
)
|
||||
|
||||
// tufClient is a usability wrapper around a raw TUF repo
|
||||
type tufClient struct {
|
||||
remote store.RemoteStore
|
||||
cache store.MetadataStore
|
||||
oldBuilder tuf.RepoBuilder
|
||||
newBuilder tuf.RepoBuilder
|
||||
}
|
||||
|
||||
// Update performs an update to the TUF repo as defined by the TUF spec
|
||||
func (c *tufClient) Update() (*tuf.Repo, *tuf.Repo, error) {
|
||||
// 1. Get timestamp
|
||||
// a. If timestamp error (verification, expired, etc...) download new root and return to 1.
|
||||
// 2. Check if local snapshot is up to date
|
||||
// a. If out of date, get updated snapshot
|
||||
// i. If snapshot error, download new root and return to 1.
|
||||
// 3. Check if root correct against snapshot
|
||||
// a. If incorrect, download new root and return to 1.
|
||||
// 4. Iteratively download and search targets and delegations to find target meta
|
||||
logrus.Debug("updating TUF client")
|
||||
err := c.update()
|
||||
if err != nil {
|
||||
logrus.Debug("Error occurred. Root will be downloaded and another update attempted")
|
||||
logrus.Debug("Resetting the TUF builder...")
|
||||
|
||||
c.newBuilder = c.newBuilder.BootstrapNewBuilder()
|
||||
|
||||
if err := c.updateRoot(); err != nil {
|
||||
logrus.Debug("Client Update (Root): ", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
// If we error again, we now have the latest root and just want to fail
|
||||
// out as there's no expectation the problem can be resolved automatically
|
||||
logrus.Debug("retrying TUF client update")
|
||||
if err := c.update(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return c.newBuilder.Finish()
|
||||
}
|
||||
|
||||
func (c *tufClient) update() error {
|
||||
if err := c.downloadTimestamp(); err != nil {
|
||||
logrus.Debugf("Client Update (Timestamp): %s", err.Error())
|
||||
return err
|
||||
}
|
||||
if err := c.downloadSnapshot(); err != nil {
|
||||
logrus.Debugf("Client Update (Snapshot): %s", err.Error())
|
||||
return err
|
||||
}
|
||||
// will always need top level targets at a minimum
|
||||
if err := c.downloadTargets(); err != nil {
|
||||
logrus.Debugf("Client Update (Targets): %s", err.Error())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateRoot checks if there is a newer version of the root available, and if so
|
||||
// downloads all intermediate root files to allow proper key rotation.
|
||||
func (c *tufClient) updateRoot() error {
|
||||
// Get current root version
|
||||
currentRootConsistentInfo := c.oldBuilder.GetConsistentInfo(data.CanonicalRootRole)
|
||||
currentVersion := c.oldBuilder.GetLoadedVersion(currentRootConsistentInfo.RoleName)
|
||||
|
||||
// Get new root version
|
||||
raw, err := c.downloadRoot()
|
||||
|
||||
switch err.(type) {
|
||||
case *trustpinning.ErrRootRotationFail:
|
||||
// Rotation errors are okay since we haven't yet downloaded
|
||||
// all intermediate root files
|
||||
break
|
||||
case nil:
|
||||
// No error updating root - we were at most 1 version behind
|
||||
return nil
|
||||
default:
|
||||
// Return any non-rotation error.
|
||||
return err
|
||||
}
|
||||
|
||||
// Load current version into newBuilder
|
||||
currentRaw, err := c.cache.GetSized(data.CanonicalRootRole.String(), -1)
|
||||
if err != nil {
|
||||
logrus.Debugf("error loading %d.%s: %s", currentVersion, data.CanonicalRootRole, err)
|
||||
return err
|
||||
}
|
||||
if err := c.newBuilder.LoadRootForUpdate(currentRaw, currentVersion, false); err != nil {
|
||||
logrus.Debugf("%d.%s is invalid: %s", currentVersion, data.CanonicalRootRole, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Extract newest version number
|
||||
signedRoot := &data.Signed{}
|
||||
if err := json.Unmarshal(raw, signedRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
newestRoot, err := data.RootFromSigned(signedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newestVersion := newestRoot.Signed.SignedCommon.Version
|
||||
|
||||
// Update from current + 1 (current already loaded) to newest - 1 (newest loaded below)
|
||||
if err := c.updateRootVersions(currentVersion+1, newestVersion-1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Already downloaded newest, verify it against newest - 1
|
||||
if err := c.newBuilder.LoadRootForUpdate(raw, newestVersion, true); err != nil {
|
||||
logrus.Debugf("downloaded %d.%s is invalid: %s", newestVersion, data.CanonicalRootRole, err)
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("successfully verified downloaded %d.%s", newestVersion, data.CanonicalRootRole)
|
||||
|
||||
// Write newest to cache
|
||||
if err := c.cache.Set(data.CanonicalRootRole.String(), raw); err != nil {
|
||||
logrus.Debugf("unable to write %d.%s to cache: %s", newestVersion, data.CanonicalRootRole, err)
|
||||
}
|
||||
logrus.Debugf("finished updating root files")
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateRootVersions updates the root from it's current version to a target, rotating keys
|
||||
// as they are found
|
||||
func (c *tufClient) updateRootVersions(fromVersion, toVersion int) error {
|
||||
for v := fromVersion; v <= toVersion; v++ {
|
||||
logrus.Debugf("updating root from version %d to version %d, currently fetching %d", fromVersion, toVersion, v)
|
||||
|
||||
versionedRole := fmt.Sprintf("%d.%s", v, data.CanonicalRootRole)
|
||||
|
||||
raw, err := c.remote.GetSized(versionedRole, -1)
|
||||
if err != nil {
|
||||
logrus.Debugf("error downloading %s: %s", versionedRole, err)
|
||||
return err
|
||||
}
|
||||
if err := c.newBuilder.LoadRootForUpdate(raw, v, false); err != nil {
|
||||
logrus.Debugf("downloaded %s is invalid: %s", versionedRole, err)
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("successfully verified downloaded %s", versionedRole)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// downloadTimestamp is responsible for downloading the timestamp.json
|
||||
// Timestamps are special in that we ALWAYS attempt to download and only
|
||||
// use cache if the download fails (and the cache is still valid).
|
||||
func (c *tufClient) downloadTimestamp() error {
|
||||
logrus.Debug("Loading timestamp...")
|
||||
role := data.CanonicalTimestampRole
|
||||
consistentInfo := c.newBuilder.GetConsistentInfo(role)
|
||||
|
||||
// always get the remote timestamp, since it supersedes the local one
|
||||
cachedTS, cachedErr := c.cache.GetSized(role.String(), notary.MaxTimestampSize)
|
||||
_, remoteErr := c.tryLoadRemote(consistentInfo, cachedTS)
|
||||
|
||||
// check that there was no remote error, or if there was a network problem
|
||||
// If there was a validation error, we should error out so we can download a new root or fail the update
|
||||
switch remoteErr.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case store.ErrMetaNotFound, store.ErrServerUnavailable, store.ErrOffline, store.NetworkError:
|
||||
break
|
||||
default:
|
||||
return remoteErr
|
||||
}
|
||||
|
||||
// since it was a network error: get the cached timestamp, if it exists
|
||||
if cachedErr != nil {
|
||||
logrus.Debug("no cached or remote timestamp available")
|
||||
return remoteErr
|
||||
}
|
||||
|
||||
logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely")
|
||||
err := c.newBuilder.Load(role, cachedTS, 1, false)
|
||||
if err == nil {
|
||||
logrus.Debug("successfully verified cached timestamp")
|
||||
}
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
// downloadSnapshot is responsible for downloading the snapshot.json
|
||||
func (c *tufClient) downloadSnapshot() error {
|
||||
logrus.Debug("Loading snapshot...")
|
||||
role := data.CanonicalSnapshotRole
|
||||
consistentInfo := c.newBuilder.GetConsistentInfo(role)
|
||||
|
||||
_, err := c.tryLoadCacheThenRemote(consistentInfo)
|
||||
return err
|
||||
}
|
||||
|
||||
// downloadTargets downloads all targets and delegated targets for the repository.
|
||||
// It uses a pre-order tree traversal as it's necessary to download parents first
|
||||
// to obtain the keys to validate children.
|
||||
func (c *tufClient) downloadTargets() error {
|
||||
toDownload := []data.DelegationRole{{
|
||||
BaseRole: data.BaseRole{Name: data.CanonicalTargetsRole},
|
||||
Paths: []string{""},
|
||||
}}
|
||||
|
||||
for len(toDownload) > 0 {
|
||||
role := toDownload[0]
|
||||
toDownload = toDownload[1:]
|
||||
|
||||
consistentInfo := c.newBuilder.GetConsistentInfo(role.Name)
|
||||
if !consistentInfo.ChecksumKnown() {
|
||||
logrus.Debugf("skipping %s because there is no checksum for it", role.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
children, err := c.getTargetsFile(role, consistentInfo)
|
||||
switch err.(type) {
|
||||
case signed.ErrExpired, signed.ErrRoleThreshold:
|
||||
if role.Name == data.CanonicalTargetsRole {
|
||||
return err
|
||||
}
|
||||
logrus.Warnf("Error getting %s: %s", role.Name, err)
|
||||
break
|
||||
case nil:
|
||||
toDownload = append(children, toDownload...)
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c tufClient) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo) ([]data.DelegationRole, error) {
|
||||
logrus.Debugf("Loading %s...", role.Name)
|
||||
tgs := &data.SignedTargets{}
|
||||
|
||||
raw, err := c.tryLoadCacheThenRemote(ci)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// we know it unmarshals because if `tryLoadCacheThenRemote` didn't fail, then
|
||||
// the raw has already been loaded into the builder
|
||||
json.Unmarshal(raw, tgs)
|
||||
return tgs.GetValidDelegations(role), nil
|
||||
}
|
||||
|
||||
// downloadRoot is responsible for downloading the root.json
|
||||
func (c *tufClient) downloadRoot() ([]byte, error) {
|
||||
role := data.CanonicalRootRole
|
||||
consistentInfo := c.newBuilder.GetConsistentInfo(role)
|
||||
|
||||
// We can't read an exact size for the root metadata without risking getting stuck in the TUF update cycle
|
||||
// since it's possible that downloading timestamp/snapshot metadata may fail due to a signature mismatch
|
||||
if !consistentInfo.ChecksumKnown() {
|
||||
logrus.Debugf("Loading root with no expected checksum")
|
||||
|
||||
// get the cached root, if it exists, just for version checking
|
||||
cachedRoot, _ := c.cache.GetSized(role.String(), -1)
|
||||
// prefer to download a new root
|
||||
return c.tryLoadRemote(consistentInfo, cachedRoot)
|
||||
}
|
||||
return c.tryLoadCacheThenRemote(consistentInfo)
|
||||
}
|
||||
|
||||
func (c *tufClient) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]byte, error) {
|
||||
cachedTS, err := c.cache.GetSized(consistentInfo.RoleName.String(), consistentInfo.Length())
|
||||
if err != nil {
|
||||
logrus.Debugf("no %s in cache, must download", consistentInfo.RoleName)
|
||||
return c.tryLoadRemote(consistentInfo, nil)
|
||||
}
|
||||
|
||||
if err = c.newBuilder.Load(consistentInfo.RoleName, cachedTS, 1, false); err == nil {
|
||||
logrus.Debugf("successfully verified cached %s", consistentInfo.RoleName)
|
||||
return cachedTS, nil
|
||||
}
|
||||
|
||||
logrus.Debugf("cached %s is invalid (must download): %s", consistentInfo.RoleName, err)
|
||||
return c.tryLoadRemote(consistentInfo, cachedTS)
|
||||
}
|
||||
|
||||
func (c *tufClient) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([]byte, error) {
|
||||
consistentName := consistentInfo.ConsistentName()
|
||||
raw, err := c.remote.GetSized(consistentName, consistentInfo.Length())
|
||||
if err != nil {
|
||||
logrus.Debugf("error downloading %s: %s", consistentName, err)
|
||||
return old, err
|
||||
}
|
||||
|
||||
// try to load the old data into the old builder - only use it to validate
|
||||
// versions if it loads successfully. If it errors, then the loaded version
|
||||
// will be 1
|
||||
c.oldBuilder.Load(consistentInfo.RoleName, old, 1, true)
|
||||
minVersion := c.oldBuilder.GetLoadedVersion(consistentInfo.RoleName)
|
||||
if err := c.newBuilder.Load(consistentInfo.RoleName, raw, minVersion, false); err != nil {
|
||||
logrus.Debugf("downloaded %s is invalid: %s", consistentName, err)
|
||||
return raw, err
|
||||
}
|
||||
logrus.Debugf("successfully verified downloaded %s", consistentName)
|
||||
if err := c.cache.Set(consistentInfo.RoleName.String(), raw); err != nil {
|
||||
logrus.Debugf("Unable to write %s to cache: %s", consistentInfo.RoleName, err)
|
||||
}
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
// TUFLoadOptions are provided to LoadTUFRepo, which loads a TUF repo from cache,
|
||||
// from a remote store, or both
|
||||
type TUFLoadOptions struct {
|
||||
GUN data.GUN
|
||||
TrustPinning trustpinning.TrustPinConfig
|
||||
CryptoService signed.CryptoService
|
||||
Cache store.MetadataStore
|
||||
RemoteStore store.RemoteStore
|
||||
AlwaysCheckInitialized bool
|
||||
}
|
||||
|
||||
// bootstrapClient attempts to bootstrap a root.json to be used as the trust
|
||||
// anchor for a repository. The checkInitialized argument indicates whether
|
||||
// we should always attempt to contact the server to determine if the repository
|
||||
// is initialized or not. If set to true, we will always attempt to download
|
||||
// and return an error if the remote repository errors.
|
||||
//
|
||||
// Populates a tuf.RepoBuilder with this root metadata. If the root metadata
|
||||
// downloaded is a newer version than what is on disk, then intermediate
|
||||
// versions will be downloaded and verified in order to rotate trusted keys
|
||||
// properly. Newer root metadata must always be signed with the previous
|
||||
// threshold and keys.
|
||||
//
|
||||
// Fails if the remote server is reachable and does not know the repo
|
||||
// (i.e. before any metadata has been published), in which case the error is
|
||||
// store.ErrMetaNotFound, or if the root metadata (from whichever source is used)
|
||||
// is not trusted.
|
||||
//
|
||||
// Returns a TUFClient for the remote server, which may not be actually
|
||||
// operational (if the URL is invalid but a root.json is cached).
|
||||
func bootstrapClient(l TUFLoadOptions) (*tufClient, error) {
|
||||
minVersion := 1
|
||||
// the old root on disk should not be validated against any trust pinning configuration
|
||||
// because if we have an old root, it itself is the thing that pins trust
|
||||
oldBuilder := tuf.NewRepoBuilder(l.GUN, l.CryptoService, trustpinning.TrustPinConfig{})
|
||||
|
||||
// by default, we want to use the trust pinning configuration on any new root that we download
|
||||
newBuilder := tuf.NewRepoBuilder(l.GUN, l.CryptoService, l.TrustPinning)
|
||||
|
||||
// Try to read root from cache first. We will trust this root until we detect a problem
|
||||
// during update which will cause us to download a new root and perform a rotation.
|
||||
// If we have an old root, and it's valid, then we overwrite the newBuilder to be one
|
||||
// preloaded with the old root or one which uses the old root for trust bootstrapping.
|
||||
if rootJSON, err := l.Cache.GetSized(data.CanonicalRootRole.String(), store.NoSizeLimit); err == nil {
|
||||
// if we can't load the cached root, fail hard because that is how we pin trust
|
||||
if err := oldBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// again, the root on disk is the source of trust pinning, so use an empty trust
|
||||
// pinning configuration
|
||||
newBuilder = tuf.NewRepoBuilder(l.GUN, l.CryptoService, trustpinning.TrustPinConfig{})
|
||||
|
||||
if err := newBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, false); err != nil {
|
||||
// Ok, the old root is expired - we want to download a new one. But we want to use the
|
||||
// old root to verify the new root, so bootstrap a new builder with the old builder
|
||||
// but use the trustpinning to validate the new root
|
||||
minVersion = oldBuilder.GetLoadedVersion(data.CanonicalRootRole)
|
||||
newBuilder = oldBuilder.BootstrapNewBuilderWithNewTrustpin(l.TrustPinning)
|
||||
}
|
||||
}
|
||||
|
||||
if !newBuilder.IsLoaded(data.CanonicalRootRole) || l.AlwaysCheckInitialized {
|
||||
// remoteErr was nil and we were not able to load a root from cache or
|
||||
// are specifically checking for initialization of the repo.
|
||||
|
||||
// if remote store successfully set up, try and get root from remote
|
||||
// We don't have any local data to determine the size of root, so try the maximum (though it is restricted at 100MB)
|
||||
tmpJSON, err := l.RemoteStore.GetSized(data.CanonicalRootRole.String(), store.NoSizeLimit)
|
||||
if err != nil {
|
||||
// we didn't have a root in cache and were unable to load one from
|
||||
// the server. Nothing we can do but error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !newBuilder.IsLoaded(data.CanonicalRootRole) {
|
||||
// we always want to use the downloaded root if we couldn't load from cache
|
||||
if err := newBuilder.Load(data.CanonicalRootRole, tmpJSON, minVersion, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = l.Cache.Set(data.CanonicalRootRole.String(), tmpJSON)
|
||||
if err != nil {
|
||||
// if we can't write cache we should still continue, just log error
|
||||
logrus.Errorf("could not save root to cache: %s", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We can only get here if remoteErr != nil (hence we don't download any new root),
|
||||
// and there was no root on disk
|
||||
if !newBuilder.IsLoaded(data.CanonicalRootRole) {
|
||||
return nil, ErrRepoNotInitialized{}
|
||||
}
|
||||
|
||||
return &tufClient{
|
||||
oldBuilder: oldBuilder,
|
||||
newBuilder: newBuilder,
|
||||
remote: l.RemoteStore,
|
||||
cache: l.Cache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LoadTUFRepo bootstraps a trust anchor (root.json) from cache (if provided) before updating
|
||||
// all the metadata for the repo from the remote (if provided). It loads a TUF repo from cache,
|
||||
// from a remote store, or both.
|
||||
func LoadTUFRepo(options TUFLoadOptions) (*tuf.Repo, *tuf.Repo, error) {
|
||||
// set some sane defaults, so nothing has to be provided necessarily
|
||||
if options.RemoteStore == nil {
|
||||
options.RemoteStore = store.OfflineStore{}
|
||||
}
|
||||
if options.Cache == nil {
|
||||
options.Cache = store.NewMemoryStore(nil)
|
||||
}
|
||||
if options.CryptoService == nil {
|
||||
options.CryptoService = cryptoservice.EmptyService
|
||||
}
|
||||
|
||||
c, err := bootstrapClient(options)
|
||||
if err != nil {
|
||||
if _, ok := err.(store.ErrMetaNotFound); ok {
|
||||
return nil, nil, ErrRepositoryNotExist{
|
||||
remote: options.RemoteStore.Location(),
|
||||
gun: options.GUN,
|
||||
}
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
repo, invalid, err := c.Update()
|
||||
if err != nil {
|
||||
// notFound.Resource may include a version or checksum so when the role is root,
|
||||
// it will be root, <version>.root or root.<checksum>.
|
||||
notFound, ok := err.(store.ErrMetaNotFound)
|
||||
isRoot, _ := regexp.MatchString(`\.?`+data.CanonicalRootRole.String()+`\.?`, notFound.Resource)
|
||||
if ok && isRoot {
|
||||
return nil, nil, ErrRepositoryNotExist{
|
||||
remote: options.RemoteStore.Location(),
|
||||
gun: options.GUN,
|
||||
}
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
warnRolesNearExpiry(repo)
|
||||
return repo, invalid, nil
|
||||
}
|
62
vendor/github.com/theupdateframework/notary/client/witness.go
generated
vendored
Normal file
62
vendor/github.com/theupdateframework/notary/client/witness.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"github.com/theupdateframework/notary/client/changelist"
|
||||
"github.com/theupdateframework/notary/tuf"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
)
|
||||
|
||||
// Witness creates change objects to witness (i.e. re-sign) the given
|
||||
// roles on the next publish. One change is created per role
|
||||
func (r *repository) Witness(roles ...data.RoleName) ([]data.RoleName, error) {
|
||||
var err error
|
||||
successful := make([]data.RoleName, 0, len(roles))
|
||||
for _, role := range roles {
|
||||
// scope is role
|
||||
c := changelist.NewTUFChange(
|
||||
changelist.ActionUpdate,
|
||||
role,
|
||||
changelist.TypeWitness,
|
||||
"",
|
||||
nil,
|
||||
)
|
||||
err = r.changelist.Add(c)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
successful = append(successful, role)
|
||||
}
|
||||
return successful, err
|
||||
}
|
||||
|
||||
func witnessTargets(repo *tuf.Repo, invalid *tuf.Repo, role data.RoleName) error {
|
||||
if r, ok := repo.Targets[role]; ok {
|
||||
// role is already valid, mark for re-signing/updating
|
||||
r.Dirty = true
|
||||
return nil
|
||||
}
|
||||
|
||||
if roleObj, err := repo.GetDelegationRole(role); err == nil && invalid != nil {
|
||||
// A role with a threshold > len(keys) is technically invalid, but we let it build in the builder because
|
||||
// we want to be able to download the role (which may still have targets on it), add more keys, and then
|
||||
// witness the role, thus bringing it back to valid. However, if no keys have been added before witnessing,
|
||||
// then it is still an invalid role, and can't be witnessed because nothing can bring it back to valid.
|
||||
if roleObj.Threshold > len(roleObj.Keys) {
|
||||
return data.ErrInvalidRole{
|
||||
Role: role,
|
||||
Reason: "role does not specify enough valid signing keys to meet its required threshold",
|
||||
}
|
||||
}
|
||||
if r, ok := invalid.Targets[role]; ok {
|
||||
// role is recognized but invalid, move to valid data and mark for re-signing
|
||||
repo.Targets[role] = r
|
||||
r.Dirty = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// role isn't recognized, even as invalid
|
||||
return data.ErrInvalidRole{
|
||||
Role: role,
|
||||
Reason: "this role is not known",
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user