This commit is contained in:
17
vendor/github.com/theupdateframework/notary/.gitignore
generated
vendored
Normal file
17
vendor/github.com/theupdateframework/notary/.gitignore
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
/.vscode
|
||||
/cmd/notary-server/notary-server
|
||||
/cmd/notary-server/local.config.*
|
||||
/cmd/notary-signer/notary-signer
|
||||
/cmd/notary-signer/local.config.*
|
||||
/cmd/escrow/escrow
|
||||
/cmd/escrow/local.config.*
|
||||
cover
|
||||
bin
|
||||
cross
|
||||
.cover
|
||||
*.swp
|
||||
.idea
|
||||
*.iml
|
||||
*.test
|
||||
coverage*.txt
|
||||
gosec_output.csv
|
156
vendor/github.com/theupdateframework/notary/CHANGELOG.md
generated
vendored
Normal file
156
vendor/github.com/theupdateframework/notary/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
# Changelog
|
||||
|
||||
## [v0.7.0](https://github.com/docker/notary/releases/tag/v0.7.0) 12/01/2021
|
||||
+ Switch to Go modules [#1523](https://github.com/theupdateframework/notary/pull/1523)
|
||||
+ Use golang/x/crypto for ed25519 [#1344](https://github.com/theupdateframework/notary/pull/1344)
|
||||
+ Update Go version
|
||||
+ Update dependency versions
|
||||
+ Fixes from using Gosec for source analysis
|
||||
|
||||
## [v0.6.1](https://github.com/docker/notary/releases/tag/v0.6.0) 04/10/2018
|
||||
+ Fixed bug where CLI requested admin privileges for all metadata operations, including listing targets on a repo [#1315](https://github.com/theupdateframework/notary/pull/1315)
|
||||
+ Prevented notary signer from being dumpable or ptraceable in Linux, except in debug mode [#1327](https://github.com/theupdateframework/notary/pull/1327)
|
||||
+ Bumped JWT dependency to fix potential Invalid Curve Attack on NIST curves within ECDH key management [#1334](https://github.com/theupdateframework/notary/pull/1334)
|
||||
+ If the home directory cannot be found, log a warning instead of erroring out [#1318](https://github.com/theupdateframework/notary/pull/1318)
|
||||
+ Bumped go version and various dependencies [#1323](https://github.com/theupdateframework/notary/pull/1323) [#1332](https://github.com/theupdateframework/notary/pull/1332) [#1335](https://github.com/theupdateframework/notary/pull/1335) [#1336](https://github.com/theupdateframework/notary/pull/1336)
|
||||
+ Various internal and documentation fixes [#1312](https://github.com/theupdateframework/notary/pull/1312) [#1313](https://github.com/theupdateframework/notary/pull/1313) [#1319](https://github.com/theupdateframework/notary/pull/1319) [#1320](https://github.com/theupdateframework/notary/pull/1320) [#1324](https://github.com/theupdateframework/notary/pull/1324) [#1326](https://github.com/theupdateframework/notary/pull/1326) [#1328](https://github.com/theupdateframework/notary/pull/1328) [#1329](https://github.com/theupdateframework/notary/pull/1329) [#1333](https://github.com/theupdateframework/notary/pull/1333)
|
||||
|
||||
## [v0.6.0](https://github.com/docker/notary/releases/tag/v0.6.0) 02/28/2018
|
||||
+ **The project has been moved from https://github.com/docker/notary to https://github.com/theupdateframework/notary, as it has been accepted into the CNCF. Downstream users should update their go imports.**
|
||||
+ Removed support for RSA-key exchange ciphers supported by the server and signer and require TLS >= 1.2 for the server and signer. [#1307](https://github.com/theupdateframework/notary/pull/1307)
|
||||
+ `libykcs11` can be found in several additional locations on Fedora. [#1286](https://github.com/theupdateframework/notary/pull/1286/)
|
||||
+ If a certificate is used as a delegation public key, notary no longer warns if the certificate has expired, since notary should be relying on the role expiry instead. [#1263](https://github.com/theupdateframework/notary/pull/1263)
|
||||
+ An error is now returned when importing keys if there were invalid PEM blocks. [#1260](https://github.com/theupdateframework/notary/pull/1260)
|
||||
+ Notary server authentication credentials can now be provided as an environment variable `NOTARY_AUTH`, which should contain a base64-encoded "username:password" value. [#1246](https://github.com/theupdateframework/notary/pull/1246)
|
||||
+ Changefeeds are now supported for RethinkDB as well as SQL servers. [#1214](https://github.com/theupdateframework/notary/pull/1214)
|
||||
+ Notary CLI will now time out after 30 seconds if a username and password are not provided when authenticating to anotary server, fixing an issue where scripts for the notary CLI may hang forever. [#1200](https://github.com/theupdateframework/notary/pull/1200)
|
||||
+ Fixed potential race condition in the signer keystore. [#1198](https://github.com/theupdateframework/notary/pull/1198)
|
||||
+ Notary now no longer provides the option to generate RSA keys for a repository, but externally generated RSA keys can still be imported as keys for a repository. [#1191](https://github.com/theupdateframework/notary/pull/1191)
|
||||
+ Fixed bug where the notary client would `ioutil.ReadAll` responses from the server without limiting the size. [#1186](https://github.com/theupdateframework/notary/pull/1186)
|
||||
+ Default notary CLI log level is now `warn`, and if the `-v` option is passed, it is at `info`. [#1179](https://github.com/theupdateframework/notary/pull/1179)
|
||||
+ Example Postgres config now includes an example of mutual TLS authentication between the server/signer and Postgres. [#1160](https://github.com/theupdateframework/notary/pull/1160) [#1163](https://github.com/theupdateframework/notary/pull/1163/)
|
||||
+ Fixed an error where piping the server authentication credentials via STDIN when scripting the notary CLI did not work. [#1155](https://github.com/theupdateframework/notary/pull/1155)
|
||||
+ If the server and signer configurations forget to specify `parseTime=true` when using MySQL, notary server and signer will automatically add the option. [#1150](https://github.com/theupdateframework/notary/pull/1150)
|
||||
+ Custom metadata can now be provided and read on a target when using the notary client as a library (not yet exposed on the CLI). [#1146](https://github.com/theupdateframework/notary/pull/1146)
|
||||
+ `notary init` now accepts a `--root-cert` and `--root-key` flag for use with privately generated certificates and keys. [#1144](https://github.com/theupdateframework/notary/pull/1144)
|
||||
+ `notary key generate` now accepts a `--role` flag as well as a `--output` flag. This means it can generate new targets or delegation keys, and it can also output keys to a file instead of storing it in the default notary key store. [#1134](https://github.com/theupdateframework/notary/pull/1134)
|
||||
+ Newly generated keys are now stored encrypted and encoded in PKCS#8 format. **This is not forwards-compatible against notary<0.6.0 and docker<17.12.x. Also please note that docker>=17.12.x is not forwards compatible with notary<0.6.0.**. [#1130](https://github.com/theupdateframework/notary/pull/1130) [#1201](https://github.com/theupdateframework/notary/pull/1201)
|
||||
+ Added support for wildcarded certificate IDs in the trustpinning configuration [#1126](https://github.com/theupdateframework/notary/pull/1126)
|
||||
+ Added support using the client against notary servers which are hosted as subpath under another server (e.g. https://domain.com/notary instead of https://notary.com) [#1108](https://github.com/theupdateframework/notary/pull/1108)
|
||||
+ If no changes were made to the targets file, you are no longer required to sign the target [#1104](https://github.com/theupdateframework/notary/pull/1104)
|
||||
+ escrow placeholder [#1096](https://github.com/theupdateframework/notary/pull/1096)
|
||||
+ Added support for wildcard suffixes for root certificates CNs for root keys, so that a single root certificate would be valid for multiple repositories [#1088](https://github.com/theupdateframework/notary/pull/1088)
|
||||
+ Root key rotations now do not require all previous root keys sign new root metadata. [#942](https://github.com/theupdateframework/notary/pull/942).
|
||||
+ New keys are trusted if the root metadata file specifying the new key was signed by the previous root key/threshold
|
||||
+ Root metadata can now be requested by version from the server, allowing clients with older root metadata to validate each new version one by one up to the current metadata
|
||||
+ `notary key rotate` now accepts a flag specifying which key to rotate to [#942](https://github.com/theupdateframework/notary/pull/942)
|
||||
+ Refactoring of the client to make it easier to use as a library and to inject dependencies:
|
||||
+ References to GUN have now been changed to "imagename". [#1081](https://github.com/theupdateframework/notary/pull/1081)
|
||||
+ `NewNotaryRepository` can now be provided with a remote store and changelist, as opposed to always constructing its own. [#1094](https://github.com/theupdateframework/notary/pull/1094)
|
||||
+ If needed, the notary repository will be initialized first when publishing. [#1105](https://github.com/theupdateframework/notary/pull/1105)
|
||||
+ `NewNotaryReository` now requires a non-nil cache store. [#1185](https://github.com/theupdateframework/notary/pull/1185)
|
||||
+ The "No valid trust data" error is now typed. [#1212](https://github.com/theupdateframework/notary/pull/1212)
|
||||
+ `TUFClient` was previously mistakenly exported, and is now unexported. [#1215](https://github.com/theupdateframework/notary/pull/1215)
|
||||
+ The notary client now has a `Repository` interface type to standardize `client.NotaryRepository`. [#1220](https://github.com/theupdateframework/notary/pull/1220)
|
||||
+ The constructor functions `NewFileCachedNotaryRepository` and `NewNotaryRepository` have been renamed, respectively, to `NewFileCachedRepository` and `NewRepository` to reduce redundancy. [#1226](https://github.com/theupdateframework/notary/pull/1226)
|
||||
+ `NewRepository` returns an interface as opposed to the concrete type `NotaryRepository` it previously did. `NotaryRepository` is also now an unexported concrete type. [#1226](https://github.com/theupdateframework/notary/pull/1226)
|
||||
+ Key import/export logic has been moved from the `utils` package to the `trustmanager` package. [#1250](https://github.com/theupdateframework/notary/pull/1250)
|
||||
|
||||
|
||||
## [v0.5.0](https://github.com/docker/notary/releases/tag/v0.5.0) 11/14/2016
|
||||
+ Non-certificate public keys in PEM format can now be added to delegation roles [#965](https://github.com/docker/notary/pull/965)
|
||||
+ PostgreSQL support as a storage backend for Server and Signer [#920](https://github.com/docker/notary/pull/920)
|
||||
+ Notary server's health check now fails if it cannot connect to the signer, since no new repositories can be created and existing repositories cannot be updated if the server cannot reach the signer [#952](https://github.com/docker/notary/pull/952)
|
||||
+ Server runs its connectivity healthcheck to the server once every 10 seconds instead of once every minute. [#902](https://github.com/docker/notary/pull/902)
|
||||
+ The keys on disk are now stored in the `~/.notary/private` directory, rather than in a key hierarchy that separates them by GUN and by role. Notary will automatically migrate old-style directory layouts to the new style. **This is not forwards-compatible against notary<0.4.2 and docker<=1.12** [#872](https://github.com/docker/notary/pull/872)
|
||||
+ A new changefeed API has been added to Notary Server. It is only supported when using one of the relational database backends: MySQL, PostgreSQL, or SQLite.[#1019](https://github.com/docker/notary/pull/1019)
|
||||
|
||||
## [v0.4.3](https://github.com/docker/notary/releases/tag/v0.4.3) 1/3/2017
|
||||
+ Fix build tags for static notary client binaries in linux [#1039](https://github.com/docker/notary/pull/1039)
|
||||
+ Fix key import for exported delegation keys [#1067](https://github.com/docker/notary/pull/1067)
|
||||
|
||||
## [v0.4.2](https://github.com/docker/notary/releases/tag/v0.4.2) 9/30/2016
|
||||
+ Bump the cross compiler to golang 1.7.1, since [1.6.3 builds binaries that could have non-deterministic bugs in OS X Sierra](https://groups.google.com/forum/#!msg/golang-dev/Jho5sBHZgAg/cq6d97S1AwAJ) [#984](https://github.com/docker/notary/pull/984)
|
||||
|
||||
## [v0.4.1](https://github.com/docker/notary/releases/tag/v0.4.1) 9/27/2016
|
||||
+ Preliminary Windows support for notary client [#970](https://github.com/docker/notary/pull/970)
|
||||
+ Output message to CLI when repo changes have been successfully published [#974](https://github.com/docker/notary/pull/974)
|
||||
+ Improved error messages for client authentication errors and for the witness command [#972](https://github.com/docker/notary/pull/972)
|
||||
+ Support for finding keys that are anywhere in the notary directory's "private" directory, not just under "private/root_keys" or "private/tuf_keys" [#981](https://github.com/docker/notary/pull/981)
|
||||
+ Previously, on any error updating, the client would fall back on the cache. Now we only do so if there is a network error or if the server is unavailable or missing the TUF data. Invalid TUF data will cause the update to fail - for example if there was an invalid root rotation. [#884](https://github.com/docker/notary/pull/884) [#982](https://github.com/docker/notary/pull/982)
|
||||
|
||||
## [v0.4.0](https://github.com/docker/notary/releases/tag/v0.4.0) 9/21/2016
|
||||
+ Server-managed key rotations [#889](https://github.com/docker/notary/pull/889)
|
||||
+ Remove `timestamp_keys` table, which stored redundant information [#889](https://github.com/docker/notary/pull/889)
|
||||
+ Introduce `notary delete` command to delete local and/or remote repo data [#895](https://github.com/docker/notary/pull/895)
|
||||
+ Introduce `notary witness` command to stage signatures for specified roles [#875](https://github.com/docker/notary/pull/875)
|
||||
+ Add `-p` flag to offline commands to attempt auto-publish [#886](https://github.com/docker/notary/pull/886) [#912](https://github.com/docker/notary/pull/912) [#923](https://github.com/docker/notary/pull/923)
|
||||
+ Introduce `notary reset` command to manage staged changes [#959](https://github.com/docker/notary/pull/959) [#856](https://github.com/docker/notary/pull/856)
|
||||
+ Add `--rootkey` flag to `notary init` to provide a private root key for a repo [#801](https://github.com/docker/notary/pull/801)
|
||||
+ Introduce `notary delegation purge` command to remove a specified key from all delegations [#855](https://github.com/docker/notary/pull/855)
|
||||
+ Removed HTTP endpoint from notary-signer [#870](https://github.com/docker/notary/pull/870)
|
||||
+ Refactored and unified key storage [#825](https://github.com/docker/notary/pull/825)
|
||||
+ Batched key import and export now operate on PEM files (potentially with multiple blocks) instead of ZIP [#825](https://github.com/docker/notary/pull/825) [#882](https://github.com/docker/notary/pull/882)
|
||||
+ Add full database integration test-suite [#824](https://github.com/docker/notary/pull/824) [#854](https://github.com/docker/notary/pull/854) [#863](https://github.com/docker/notary/pull/863)
|
||||
+ Improve notary-server, trust pinning, and yubikey logging [#798](https://github.com/docker/notary/pull/798) [#858](https://github.com/docker/notary/pull/858) [#891](https://github.com/docker/notary/pull/891)
|
||||
+ Warn if certificates for root or delegations are near expiry [#802](https://github.com/docker/notary/pull/802)
|
||||
+ Warn if role metadata is near expiry [#786](https://github.com/docker/notary/pull/786)
|
||||
+ Reformat CLI table output to use the `text/tabwriter` package [#809](https://github.com/docker/notary/pull/809)
|
||||
+ Fix passphrase retrieval attempt counting and terminal detection [#906](https://github.com/docker/notary/pull/906)
|
||||
+ Fix listing nested delegations [#864](https://github.com/docker/notary/pull/864)
|
||||
+ Bump go version to 1.6.3, fix go1.7 compatibility [#851](https://github.com/docker/notary/pull/851) [#793](https://github.com/docker/notary/pull/793)
|
||||
+ Convert docker-compose files to v2 format [#755](https://github.com/docker/notary/pull/755)
|
||||
+ Validate root rotations against trust pinning [#800](https://github.com/docker/notary/pull/800)
|
||||
+ Update fixture certificates for two-year expiry window [#951](https://github.com/docker/notary/pull/951)
|
||||
|
||||
## [v0.3.0](https://github.com/docker/notary/releases/tag/v0.3.0) 5/11/2016
|
||||
+ Root rotations
|
||||
+ RethinkDB support as a storage backend for Server and Signer
|
||||
+ A new TUF repo builder that merges server and client validation
|
||||
+ Trust Pinning: configure known good key IDs and CAs to replace TOFU.
|
||||
+ Add --input, --output, and --quiet flags to notary verify command
|
||||
+ Remove local certificate store. It was redundant as all certs were also stored in the cached root.json
|
||||
+ Cleanup of dead code in client side key storage logic
|
||||
+ Update project to Go 1.6.1
|
||||
+ Reorganize vendoring to meet Go 1.6+ standard. Still using Godeps to manage vendored packages
|
||||
+ Add targets by hash, no longer necessary to have the original target data available
|
||||
+ Active Key ID verification during signature verification
|
||||
+ Switch all testing from assert to require, reduces noise in test runs
|
||||
+ Use alpine based images for smaller downloads and faster setup times
|
||||
+ Clean up out of data signatures when re-signing content
|
||||
+ Set cache control headers on HTTP responses from Notary Server
|
||||
+ Add sha512 support for targets
|
||||
+ Add environment variable for delegation key passphrase
|
||||
+ Reduce permissions requested by client from token server
|
||||
+ Update formatting for delegation list output
|
||||
+ Move SQLite dependency to tests only so it doesn't get built into official images
|
||||
+ Fixed asking for password to list private repositories
|
||||
+ Enable using notary client with username/password in a scripted fashion
|
||||
+ Fix static compilation of client
|
||||
+ Enforce TUF version to be >= 1, previously 0 was acceptable although unused
|
||||
+ json.RawMessage should always be used as *json.RawMessage due to concepts of addressability in Go and effects on encoding
|
||||
|
||||
## [v0.2](https://github.com/docker/notary/releases/tag/v0.2.0) 2/24/2016
|
||||
+ Add support for delegation roles in `notary` server and client
|
||||
+ Add `notary CLI` commands for managing delegation roles: `notary delegation`
|
||||
+ `add`, `list` and `remove` subcommands
|
||||
+ Enhance `notary CLI` commands for adding targets to delegation roles
|
||||
+ `notary add --roles` and `notary remove --roles` to manipulate targets for delegations
|
||||
+ Support for rotating the snapshot key to one managed by the `notary` server
|
||||
+ Add consistent download functionality to download metadata and content by checksum
|
||||
+ Update `docker-compose` configuration to use official mariadb image
|
||||
+ deprecate `notarymysql`
|
||||
+ default to using a volume for `data` directory
|
||||
+ use separate databases for `notary-server` and `notary-signer` with separate users
|
||||
+ Add `notary CLI` command for changing private key passphrases: `notary key passwd`
|
||||
+ Enhance `notary CLI` commands for importing and exporting keys
|
||||
+ Change default `notary CLI` log level to fatal, introduce new verbose (error-level) and debug-level settings
|
||||
+ Store roles as PEM headers in private keys, incompatible with previous notary v0.1 key format
|
||||
+ No longer store keys as `<KEY_ID>_role.key`, instead store as `<KEY_ID>.key`; new private keys from new notary clients will crash old notary clients
|
||||
+ Support logging as JSON format on server and signer
|
||||
+ Support mutual TLS between notary client and notary server
|
||||
|
||||
## [v0.1](https://github.com/docker/notary/releases/tag/v0.1) 11/15/2015
|
||||
+ Initial non-alpha `notary` version
|
||||
+ Implement TUF (the update framework) with support for root, targets, snapshot, and timestamp roles
|
||||
+ Add PKCS11 interface to store and sign with keys in HSMs (i.e. Yubikey)
|
43
vendor/github.com/theupdateframework/notary/CODE_OF_CONDUCT.md
generated
vendored
Normal file
43
vendor/github.com/theupdateframework/notary/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
## CNCF Community Code of Conduct v1.0
|
||||
|
||||
### Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of fostering
|
||||
an open and welcoming community, we pledge to respect all people who contribute
|
||||
through reporting issues, posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project a harassment-free experience for
|
||||
everyone, regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
|
||||
religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information, such as physical or electronic addresses,
|
||||
without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are not
|
||||
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
|
||||
commit themselves to fairly and consistently applying these principles to every aspect
|
||||
of managing this project. Project maintainers who do not follow or enforce the Code of
|
||||
Conduct may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a CNCF project maintainer, Sarah Novotny <sarahnovotny@google.com>, and/or Dan Kohn <dan@linuxfoundation.org>.
|
||||
|
||||
This Code of Conduct is adapted from the Contributor Covenant
|
||||
(https://contributor-covenant.org), version 1.2.0, available at
|
||||
https://contributor-covenant.org/version/1/2/0/
|
||||
|
||||
### CNCF Events Code of Conduct
|
||||
|
||||
CNCF events are governed by the Linux Foundation [Code of Conduct](https://events.linuxfoundation.org/events/cloudnativecon/attend/code-of-conduct) available on the event page. This is designed to be compatible with the above policy and also includes more details on responding to incidents.
|
95
vendor/github.com/theupdateframework/notary/CONTRIBUTING.md
generated
vendored
Normal file
95
vendor/github.com/theupdateframework/notary/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
# Contributing to notary
|
||||
|
||||
## Before reporting an issue...
|
||||
|
||||
### If your problem is with...
|
||||
|
||||
- automated builds
|
||||
- your account on the [Docker Hub](https://hub.docker.com/)
|
||||
- any other [Docker Hub](https://hub.docker.com/) issue
|
||||
|
||||
Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com)
|
||||
|
||||
### If you...
|
||||
|
||||
- need help setting up notary
|
||||
- can't figure out something
|
||||
- are not sure what's going on or what your problem is
|
||||
|
||||
Then please do not open an issue here yet - you should first try one of the following support forums:
|
||||
|
||||
- irc: #docker-trust on freenode
|
||||
|
||||
## Reporting an issue properly
|
||||
|
||||
By following these simple rules you will get better and faster feedback on your issue.
|
||||
|
||||
- search the bugtracker for an already reported issue
|
||||
|
||||
### If you found an issue that describes your problem:
|
||||
|
||||
- please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
|
||||
- please refrain from adding "same thing here" or "+1" comments
|
||||
- you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
|
||||
- comment if you have some new, technical and relevant information to add to the case
|
||||
|
||||
### If you have not found an existing issue that describes your problem:
|
||||
|
||||
1. create a new issue, with a succinct title that describes your issue:
|
||||
- bad title: "It doesn't work with my docker"
|
||||
- good title: "Publish fail: 400 error with E_INVALID_DIGEST"
|
||||
2. copy the output of:
|
||||
- `notary version` or `docker version`
|
||||
3. Run `notary` or `docker` with the `-D` option for debug output, and please include a copy of the command and the output.
|
||||
4. If relevant, copy your `notaryserver` and `notarysigner` logs that show the error (this is likely the output from running `docker-compose up`)
|
||||
|
||||
## Contributing a patch for a known bug, or a small correction
|
||||
|
||||
You should follow the basic GitHub workflow:
|
||||
|
||||
1. fork
|
||||
2. commit a change
|
||||
3. make sure the tests pass
|
||||
4. PR
|
||||
|
||||
Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple:
|
||||
|
||||
- configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com`
|
||||
- sign your commits using `-s`: `git commit -s -m "My commit"`
|
||||
|
||||
Some simple rules to ensure quick merge:
|
||||
|
||||
- clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`)
|
||||
- prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once
|
||||
- if you need to amend your PR following comments, please squash instead of adding more commits
|
||||
- if fixing a bug or adding a feature, please add or update the relevant `CHANGELOG.md` entry with your pull request number
|
||||
and a description of the change
|
||||
|
||||
## Contributing new features
|
||||
|
||||
You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve.
|
||||
|
||||
If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning.
|
||||
If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work
|
||||
|
||||
Then you should submit your implementation, clearly linking to the issue (and possible proposal).
|
||||
|
||||
Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged.
|
||||
|
||||
It's mandatory to:
|
||||
|
||||
- interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines)
|
||||
- address maintainers' comments and modify your submission accordingly
|
||||
- write tests for any new code
|
||||
|
||||
Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry.
|
||||
|
||||
## Review and Development notes
|
||||
|
||||
- All merges require LGTMs from any 2 maintainers.
|
||||
- We use the git flow model (as best we can) using the `releases` branch as the stable branch, and the `master` branch as the development branch. When we get near a potential release, a release branch (`release/<semver>`) will be created from `master`. Any PRs that should go into the release should be made against that branch. Hotfixes for a minor release will be added to the branch `hotfix/<semver>`.
|
||||
|
||||
## Vendoring new dependency versions
|
||||
|
||||
We use [VNDR](https://github.com/LK4D4/vndr); please update `vendor.conf` with the new dependency or the new version, and run
|
||||
`vndr <top level package name>`.
|
4
vendor/github.com/theupdateframework/notary/CONTRIBUTORS
generated
vendored
Normal file
4
vendor/github.com/theupdateframework/notary/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
David Williamson <david.williamson@docker.com> (github: davidwilliamson)
|
||||
Aaron Lehmann <aaron.lehmann@docker.com> (github: aaronlehmann)
|
||||
Lewis Marshall <lewis@flynn.io> (github: lmars)
|
||||
Jonathan Rudenberg <jonathan@flynn.io> (github: titanous)
|
27
vendor/github.com/theupdateframework/notary/Dockerfile
generated
vendored
Normal file
27
vendor/github.com/theupdateframework/notary/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
FROM golang:1.14.1
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl \
|
||||
clang \
|
||||
libsqlite3-dev \
|
||||
patch \
|
||||
tar \
|
||||
xz-utils \
|
||||
python \
|
||||
python-pip \
|
||||
python-setuptools \
|
||||
--no-install-recommends \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN useradd -ms /bin/bash notary \
|
||||
&& pip install codecov \
|
||||
&& go get golang.org/x/lint/golint github.com/fzipp/gocyclo github.com/client9/misspell/cmd/misspell github.com/gordonklaus/ineffassign github.com/securego/gosec/cmd/gosec/...
|
||||
|
||||
ENV NOTARYDIR /go/src/github.com/theupdateframework/notary
|
||||
|
||||
COPY . ${NOTARYDIR}
|
||||
RUN chmod -R a+rw /go && chmod 0600 ${NOTARYDIR}/fixtures/database/*
|
||||
|
||||
ENV GO111MODULE=on
|
||||
|
||||
WORKDIR ${NOTARYDIR}
|
7
vendor/github.com/theupdateframework/notary/Jenkinsfile
generated
vendored
Normal file
7
vendor/github.com/theupdateframework/notary/Jenkinsfile
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
// Only run on Linux atm
|
||||
wrappedNode(label: 'ubuntu && ec2 && docker-edge') {
|
||||
deleteDir()
|
||||
stage "checkout"
|
||||
checkout scm
|
||||
|
||||
}
|
201
vendor/github.com/theupdateframework/notary/LICENSE
generated
vendored
Normal file
201
vendor/github.com/theupdateframework/notary/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2015 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
70
vendor/github.com/theupdateframework/notary/MAINTAINERS
generated
vendored
Normal file
70
vendor/github.com/theupdateframework/notary/MAINTAINERS
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
# Notary maintainers file
|
||||
#
|
||||
# This file describes who runs the theupdateframework/notary project and how.
|
||||
# This is a living document - if you see something out of date or missing, speak up!
|
||||
#
|
||||
# It is structured to be consumable by both humans and programs.
|
||||
# To extract its contents programmatically, use any TOML-compliant parser.
|
||||
#
|
||||
# This file is compiled into the MAINTAINERS file in docker/opensource.
|
||||
#
|
||||
[Org]
|
||||
[Org."Core maintainers"]
|
||||
people = [
|
||||
"cyli",
|
||||
"diogomonica",
|
||||
"endophage",
|
||||
"ecordell",
|
||||
"hukeping",
|
||||
"justincormack",
|
||||
"nathanmccauley",
|
||||
"riyazdf",
|
||||
]
|
||||
|
||||
[people]
|
||||
|
||||
# A reference list of all people associated with the project.
|
||||
# All other sections should refer to people by their canonical key
|
||||
# in the people section.
|
||||
|
||||
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
|
||||
|
||||
[people.cyli]
|
||||
Name = "Ying Li"
|
||||
Email = "ying.li@docker.com"
|
||||
GitHub = "cyli"
|
||||
|
||||
[people.diogomonica]
|
||||
Name = "Diogo Monica"
|
||||
Email = "diogo@docker.com"
|
||||
GitHub = "diogomonica"
|
||||
|
||||
[people.endophage]
|
||||
Name = "David Lawrence"
|
||||
Email = "david.lawrence@docker.com"
|
||||
GitHub = "endophage"
|
||||
|
||||
[people.ecordell]
|
||||
Name = "Evan Cordell"
|
||||
Email = "evan.cordell@coreos.com"
|
||||
GitHub = "ecordell"
|
||||
|
||||
[people.hukeping]
|
||||
Name = "Hu Keping"
|
||||
Email = "hukeping@huawei.com"
|
||||
GitHub = "hukeping"
|
||||
|
||||
[people.justincormack]
|
||||
Name = "Justin Cormack"
|
||||
Email = "justin.cormack@docker.com"
|
||||
GitHub = "justincormack"
|
||||
|
||||
[people.nathanmccauley]
|
||||
Name = "Nathan McCauley"
|
||||
Email = "nathan.mccauley@docker.com"
|
||||
GitHub = "nathanmccauley"
|
||||
|
||||
[people.riyazdf]
|
||||
Name = "Riyaz Faizullabhoy"
|
||||
Email = "riyazdf@berkeley.edu"
|
||||
GitHub = "riyazdf"
|
22
vendor/github.com/theupdateframework/notary/MAINTAINERS.ALUMNI
generated
vendored
Normal file
22
vendor/github.com/theupdateframework/notary/MAINTAINERS.ALUMNI
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
# Notary maintainers alumni file
|
||||
#
|
||||
# This file describes past maintainers who have stepped down from the role.
|
||||
# This is a living document - if you see something out of date or missing, speak up!
|
||||
#
|
||||
# It is structured to be consumable by both humans and programs.
|
||||
# To extract its contents programmatically, use any TOML-compliant parser.
|
||||
#
|
||||
[Org]
|
||||
[Org."Notary Alumni"]
|
||||
people = [
|
||||
"dmcgowan",
|
||||
]
|
||||
|
||||
[people]
|
||||
|
||||
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
|
||||
|
||||
[people.dmcgowan]
|
||||
Name = "Derek McGowan"
|
||||
Email = "derek@docker.com"
|
||||
GitHub = "dmcgowan"
|
39
vendor/github.com/theupdateframework/notary/MAINTAINERS_RULES.md
generated
vendored
Normal file
39
vendor/github.com/theupdateframework/notary/MAINTAINERS_RULES.md
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
# Maintainers Rules
|
||||
|
||||
This document lays out some basic rules and guidelines all maintainers are expected to follow.
|
||||
Changes to the [Acceptance Criteria](#hard-acceptance-criteria-for-merging-a-pr) for merging PRs require a ceiling(two-thirds) supermajority from the maintainers.
|
||||
Changes to the [Repo Guidelines](#repo-guidelines) require a simple majority.
|
||||
|
||||
## Hard Acceptance Criteria for merging a PR:
|
||||
|
||||
- 2 LGTMs are required when merging a PR
|
||||
- If there is obviously still discussion going on in the PR, even with 2 LGTMs, let the discussion resolve before merging. If you’re not sure, reach out to the maintainers involved in the discussion.
|
||||
- All checks must be green
|
||||
- There are limited mitigating circumstances for this, like if the docs builds are just broken and that’s the only test failing.
|
||||
- Adding or removing a check requires simple majority approval from the maintainers.
|
||||
|
||||
## Repo Guidelines:
|
||||
|
||||
- Consistency is vital to keep complexity low and understandable.
|
||||
- Automate as much as possible (we don’t have guidelines about coding style for example because we’ve automated fmt, vet, lint, etc…).
|
||||
- Try to keep PRs small and focussed (this is not always possible, i.e. builder refactor, storage refactor, etc… but a good target).
|
||||
|
||||
## Process for becoming a maintainer:
|
||||
|
||||
- Invitation is proposed by an existing maintainer.
|
||||
- Ceiling(two-thirds) supermajority approval from existing maintainers (including vote of proposing maintainer) required to accept proposal.
|
||||
- Newly approved maintainer submits PR adding themselves to the MAINTAINERS file.
|
||||
- Existing maintainers publicly mark their approval on the PR.
|
||||
- Existing maintainer updates repository permissions to grant write access to new maintainer.
|
||||
- New maintainer merges their PR.
|
||||
|
||||
## Removing maintainers
|
||||
|
||||
It is preferrable that a maintainer gracefully removes themselves from the MAINTAINERS file if they are
|
||||
aware they will no longer have the time or motivation to contribute to the project. Maintainers that
|
||||
have been inactive in the repo for a period of at least one year should be contacted to ask if they
|
||||
wish to be removed.
|
||||
|
||||
In the case that an inactive maintainer is unresponsive for any reason, a ceiling(two-thirds) supermajority
|
||||
vote of the existing maintainers can be used to approve their removal from the MAINTAINERS file, and revoke
|
||||
their merge permissions on the repository.
|
205
vendor/github.com/theupdateframework/notary/Makefile
generated
vendored
Normal file
205
vendor/github.com/theupdateframework/notary/Makefile
generated
vendored
Normal file
@ -0,0 +1,205 @@
|
||||
# Set an output prefix, which is the local directory if not specified
|
||||
PREFIX?=$(shell pwd)
|
||||
|
||||
GOFLAGS := -mod=vendor
|
||||
|
||||
# Populate version variables
|
||||
# Add to compile time flags
|
||||
NOTARY_PKG := github.com/theupdateframework/notary
|
||||
NOTARY_VERSION := $(shell cat NOTARY_VERSION)
|
||||
GITCOMMIT := $(shell git rev-parse --short HEAD)
|
||||
GITUNTRACKEDCHANGES := $(shell git status --porcelain --untracked-files=no)
|
||||
ifneq ($(GITUNTRACKEDCHANGES),)
|
||||
GITCOMMIT := $(GITCOMMIT)-dirty
|
||||
endif
|
||||
CTIMEVAR=-X $(NOTARY_PKG)/version.GitCommit=$(GITCOMMIT) -X $(NOTARY_PKG)/version.NotaryVersion=$(NOTARY_VERSION)
|
||||
GO_LDFLAGS=-ldflags "-w $(CTIMEVAR)"
|
||||
GO_LDFLAGS_STATIC=-ldflags "-w $(CTIMEVAR) -extldflags -static"
|
||||
GOOSES = darwin linux windows
|
||||
NOTARY_BUILDTAGS ?= pkcs11
|
||||
NOTARYDIR := /go/src/github.com/theupdateframework/notary
|
||||
|
||||
# check to be sure pkcs11 lib is always imported with a build tag
|
||||
GO_LIST_PKCS11 := $(shell go list -tags "${NOTARY_BUILDTAGS}" -e -f '{{join .Deps "\n"}}' ./... | grep -v /vendor/ | xargs go list -e -f '{{if not .Standard}}{{.ImportPath}}{{end}}' | grep -q pkcs11)
|
||||
ifeq ($(GO_LIST_PKCS11),)
|
||||
$(info pkcs11 import was not found anywhere without a build tag, yay)
|
||||
else
|
||||
$(error You are importing pkcs11 somewhere and not using a build tag)
|
||||
endif
|
||||
|
||||
_empty :=
|
||||
_space := $(empty) $(empty)
|
||||
|
||||
# go cover test variables
|
||||
COVERPROFILE?=coverage.txt
|
||||
COVERMODE=atomic
|
||||
PKGS ?= $(shell go list -tags "${NOTARY_BUILDTAGS}" ./... | grep -v /vendor/ | tr '\n' ' ')
|
||||
|
||||
.PHONY: clean all lint build test binaries cross cover docker-images notary-dockerfile
|
||||
.DELETE_ON_ERROR: cover
|
||||
.DEFAULT: default
|
||||
|
||||
all: clean lint build test binaries
|
||||
|
||||
# This only needs to be generated by hand when cutting full releases.
|
||||
version/version.go:
|
||||
./version/version.sh > $@
|
||||
|
||||
${PREFIX}/bin/notary-server: NOTARY_VERSION $(shell find . -type f -name '*.go')
|
||||
@echo "+ $@"
|
||||
@go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS} ./cmd/notary-server
|
||||
|
||||
${PREFIX}/bin/notary: NOTARY_VERSION $(shell find . -type f -name '*.go')
|
||||
@echo "+ $@"
|
||||
@go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS} ./cmd/notary
|
||||
|
||||
${PREFIX}/bin/notary-signer: NOTARY_VERSION $(shell find . -type f -name '*.go')
|
||||
@echo "+ $@"
|
||||
@go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS} ./cmd/notary-signer
|
||||
|
||||
${PREFIX}/bin/escrow: NOTARY_VERSION $(shell find . -type f -name '*.go')
|
||||
@echo "+ $@"
|
||||
@go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS} ./cmd/escrow
|
||||
|
||||
ifeq ($(shell uname -s),Darwin)
|
||||
${PREFIX}/bin/static/notary-server:
|
||||
@echo "notary-server: static builds not supported on OS X"
|
||||
|
||||
${PREFIX}/bin/static/notary-signer:
|
||||
@echo "notary-signer: static builds not supported on OS X"
|
||||
|
||||
${PREFIX}/bin/static/notary:
|
||||
@echo "notary: static builds not supported on OS X"
|
||||
else
|
||||
${PREFIX}/bin/static/notary-server: NOTARY_VERSION $(shell find . -type f -name '*.go')
|
||||
@echo "+ $@"
|
||||
@(export CGO_ENABLED=0; go build -tags "${NOTARY_BUILDTAGS} netgo" -o $@ ${GO_LDFLAGS_STATIC} ./cmd/notary-server)
|
||||
|
||||
${PREFIX}/bin/static/notary-signer: NOTARY_VERSION $(shell find . -type f -name '*.go')
|
||||
@echo "+ $@"
|
||||
@(export CGO_ENABLED=0; go build -tags "${NOTARY_BUILDTAGS} netgo" -o $@ ${GO_LDFLAGS_STATIC} ./cmd/notary-signer)
|
||||
|
||||
${PREFIX}/bin/static/notary:
|
||||
@echo "+ $@"
|
||||
@go build -tags "${NOTARY_BUILDTAGS} netgo" -o $@ ${GO_LDFLAGS_STATIC} ./cmd/notary
|
||||
endif
|
||||
|
||||
|
||||
# run all lint functionality - excludes Godep directory, vendoring, binaries, python tests, and git files
|
||||
lint:
|
||||
@echo "+ $@: golint, go vet, go fmt, gocycle, misspell, ineffassign"
|
||||
# golint
|
||||
@test -z "$(shell find . -type f -name "*.go" -not -path "./vendor/*" -not -name "*.pb.*" -exec golint {} \; | tee /dev/stderr)"
|
||||
# gofmt
|
||||
@test -z "$$(gofmt -s -l .| grep -v .pb. | grep -v vendor/ | tee /dev/stderr)"
|
||||
# govet
|
||||
ifeq ($(shell uname -s), Darwin)
|
||||
@test -z "$(shell find . -iname *test*.go | grep -v _test.go | grep -v vendor | xargs echo "This file should end with '_test':" | tee /dev/stderr)"
|
||||
else
|
||||
@test -z "$(shell find . -iname *test*.go | grep -v _test.go | grep -v vendor | xargs -r echo "This file should end with '_test':" | tee /dev/stderr)"
|
||||
endif
|
||||
@test -z "$$(go vet -printf=false . 2>&1 | grep -v vendor/ | tee /dev/stderr)"
|
||||
# gocyclo - we require cyclomatic complexity to be < 16
|
||||
@test -z "$(shell find . -type f -name "*.go" -not -path "./vendor/*" -not -name "*.pb.*" -exec gocyclo -over 15 {} \; | tee /dev/stderr)"
|
||||
# misspell - requires that the following be run first:
|
||||
# go get -u github.com/client9/misspell/cmd/misspell
|
||||
@test -z "$$(find . -type f | grep -v vendor/ | grep -v bin/ | grep -v misc/ | grep -v .git/ | grep -v \.pdf | xargs misspell | tee /dev/stderr)"
|
||||
# ineffassign - requires that the following be run first:
|
||||
# go get -u github.com/gordonklaus/ineffassign
|
||||
@test -z "$(shell find . -type f -name "*.go" -not -path "./vendor/*" -not -name "*.pb.*" -exec ineffassign {} \; | tee /dev/stderr)"
|
||||
# gosec - requires that the following be run first:
|
||||
# go get -u github.com/securego/gosec/cmd/gosec/...
|
||||
@rm -f gosec_output.csv
|
||||
@gosec -fmt=csv -out=gosec_output.csv -exclude=G104,G304 ./... || (cat gosec_output.csv >&2; exit 1)
|
||||
|
||||
build:
|
||||
@echo "+ $@"
|
||||
@go build -tags "${NOTARY_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS)
|
||||
|
||||
# When running `go test ./...`, it runs all the suites in parallel, which causes
|
||||
# problems when running with a yubikey
|
||||
test: TESTOPTS =
|
||||
test:
|
||||
@echo Note: when testing with a yubikey plugged in, make sure to include 'TESTOPTS="-p 1"'
|
||||
@echo "+ $@ $(TESTOPTS)"
|
||||
@echo
|
||||
go test -tags "${NOTARY_BUILDTAGS}" $(TESTOPTS) $(PKGS)
|
||||
|
||||
integration: TESTDB = mysql
|
||||
integration: clean
|
||||
buildscripts/integrationtest.sh $(TESTDB)
|
||||
|
||||
testdb: TESTDB = mysql
|
||||
testdb:
|
||||
buildscripts/dbtests.sh $(TESTDB)
|
||||
|
||||
protos:
|
||||
@protoc --go_out=plugins=grpc:. proto/*.proto
|
||||
|
||||
# This allows coverage for a package to come from tests in different package.
|
||||
# Requires that the following:
|
||||
# go get github.com/wadey/gocovmerge; go install github.com/wadey/gocovmerge
|
||||
#
|
||||
# be run first
|
||||
gen-cover:
|
||||
gen-cover:
|
||||
@python -u buildscripts/covertest.py --tags "$(NOTARY_BUILDTAGS)" --pkgs="$(PKGS)" --testopts="${TESTOPTS}"
|
||||
|
||||
# Generates the cover binaries and runs them all in serial, so this can be used
|
||||
# run all tests with a yubikey without any problems
|
||||
cover: gen-cover covmerge
|
||||
@go tool cover -html="$(COVERPROFILE)"
|
||||
|
||||
# Generates the cover binaries and runs them all in serial, so this can be used
|
||||
# run all tests with a yubikey without any problems
|
||||
ci: override TESTOPTS = -race
|
||||
# Codecov knows how to merge multiple coverage files, so covmerge is not needed
|
||||
ci: gen-cover
|
||||
|
||||
yubikey-tests: override PKGS = github.com/theupdateframework/notary/cmd/notary github.com/theupdateframework/notary/trustmanager/yubikey
|
||||
yubikey-tests: ci
|
||||
|
||||
covmerge:
|
||||
@gocovmerge $(shell find . -name coverage*.txt | tr "\n" " ") > $(COVERPROFILE)
|
||||
@go tool cover -func="$(COVERPROFILE)"
|
||||
|
||||
clean-protos:
|
||||
@rm proto/*.pb.go
|
||||
|
||||
client: ${PREFIX}/bin/notary
|
||||
@echo "+ $@"
|
||||
|
||||
binaries: ${PREFIX}/bin/notary-server ${PREFIX}/bin/notary ${PREFIX}/bin/notary-signer
|
||||
@echo "+ $@"
|
||||
|
||||
escrow: ${PREFIX}/bin/escrow
|
||||
@echo "+ $@"
|
||||
|
||||
static: ${PREFIX}/bin/static/notary-server ${PREFIX}/bin/static/notary-signer ${PREFIX}/bin/static/notary
|
||||
@echo "+ $@"
|
||||
|
||||
notary-dockerfile:
|
||||
@docker build --rm --force-rm -t notary .
|
||||
|
||||
server-dockerfile:
|
||||
@docker build --rm --force-rm -f server.Dockerfile -t notary-server .
|
||||
|
||||
signer-dockerfile:
|
||||
@docker build --rm --force-rm -f signer.Dockerfile -t notary-signer .
|
||||
|
||||
docker-images: notary-dockerfile server-dockerfile signer-dockerfile
|
||||
|
||||
shell: notary-dockerfile
|
||||
docker run --rm -it -v $(CURDIR)/cross:$(NOTARYDIR)/cross -v $(CURDIR)/bin:$(NOTARYDIR)/bin notary bash
|
||||
|
||||
cross:
|
||||
@rm -rf $(CURDIR)/cross
|
||||
@docker build --rm --force-rm -t notary -f cross.Dockerfile .
|
||||
docker run --rm -v $(CURDIR)/cross:$(NOTARYDIR)/cross -e CTIMEVAR="${CTIMEVAR}" -e NOTARY_BUILDTAGS=$(NOTARY_BUILDTAGS) notary buildscripts/cross.sh $(GOOSES)
|
||||
|
||||
clean:
|
||||
@echo "+ $@"
|
||||
@rm -rf .cover cross
|
||||
find . -name coverage.txt -delete
|
||||
@rm -rf "${PREFIX}/bin/notary-server" "${PREFIX}/bin/notary" "${PREFIX}/bin/notary-signer"
|
||||
@rm -rf "${PREFIX}/bin/static"
|
1
vendor/github.com/theupdateframework/notary/NOTARY_VERSION
generated
vendored
Normal file
1
vendor/github.com/theupdateframework/notary/NOTARY_VERSION
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
0.6.1
|
135
vendor/github.com/theupdateframework/notary/README.md
generated
vendored
Normal file
135
vendor/github.com/theupdateframework/notary/README.md
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
<img src="docs/images/notary-blk.svg" alt="Notary" width="400px"/>
|
||||
|
||||
[](https://godoc.org/github.com/theupdateframework/notary)
|
||||
[](https://circleci.com/gh/theupdateframework/notary/tree/master) [](https://codecov.io/github/theupdateframework/notary) [](https://goreportcard.com/report/github.com/theupdateframework/notary)
|
||||
[](https://app.fossa.io/projects/git%2Bgithub.com%2Ftheupdateframework%2Fnotary?ref=badge_shield)
|
||||
|
||||
# Notice
|
||||
|
||||
The Notary project has officially been accepted in to the Cloud Native Computing Foundation (CNCF).
|
||||
It has moved to https://github.com/theupdateframework/notary. Any downstream consumers should update
|
||||
their Go imports to use this new location, which will be the canonical location going forward.
|
||||
|
||||
We have moved the repo in GitHub, which will allow existing importers to continue using the old
|
||||
location via GitHub's redirect.
|
||||
|
||||
# Overview
|
||||
|
||||
The Notary project comprises a [server](cmd/notary-server) and a [client](cmd/notary) for running and interacting
|
||||
with trusted collections. See the [service architecture](docs/service_architecture.md) documentation
|
||||
for more information.
|
||||
|
||||
Notary aims to make the internet more secure by making it easy for people to
|
||||
publish and verify content. We often rely on TLS to secure our communications
|
||||
with a web server, which is inherently flawed, as any compromise of the server
|
||||
enables malicious content to be substituted for the legitimate content.
|
||||
|
||||
With Notary, publishers can sign their content offline using keys kept highly
|
||||
secure. Once the publisher is ready to make the content available, they can
|
||||
push their signed trusted collection to a Notary Server.
|
||||
|
||||
Consumers, having acquired the publisher's public key through a secure channel,
|
||||
can then communicate with any Notary server or (insecure) mirror, relying
|
||||
only on the publisher's key to determine the validity and integrity of the
|
||||
received content.
|
||||
|
||||
## Goals
|
||||
|
||||
Notary is based on [The Update Framework](https://www.theupdateframework.com/), a secure general design for the problem of software distribution and updates. By using TUF, Notary achieves a number of key advantages:
|
||||
|
||||
* **Survivable Key Compromise**: Content publishers must manage keys in order to sign their content. Signing keys may be compromised or lost so systems must be designed in order to be flexible and recoverable in the case of key compromise. TUF's notion of key roles is utilized to separate responsibilities across a hierarchy of keys such that loss of any particular key (except the root role) by itself is not fatal to the security of the system.
|
||||
* **Freshness Guarantees**: Replay attacks are a common problem in designing secure systems, where previously valid payloads are replayed to trick another system. The same problem exists in the software update systems, where old signed can be presented as the most recent. Notary makes use of timestamping on publishing so that consumers can know that they are receiving the most up to date content. This is particularly important when dealing with software update where old vulnerable versions could be used to attack users.
|
||||
* **Configurable Trust Thresholds**: Oftentimes there are a large number of publishers that are allowed to publish a particular piece of content. For example, open source projects where there are a number of core maintainers. Trust thresholds can be used so that content consumers require a configurable number of signatures on a piece of content in order to trust it. Using thresholds increases security so that loss of individual signing keys doesn't allow publishing of malicious content.
|
||||
* **Signing Delegation**: To allow for flexible publishing of trusted collections, a content publisher can delegate part of their collection to another signer. This delegation is represented as signed metadata so that a consumer of the content can verify both the content and the delegation.
|
||||
* **Use of Existing Distribution**: Notary's trust guarantees are not tied at all to particular distribution channels from which content is delivered. Therefore, trust can be added to any existing content delivery mechanism.
|
||||
* **Untrusted Mirrors and Transport**: All of the notary metadata can be mirrored and distributed via arbitrary channels.
|
||||
|
||||
## Security
|
||||
|
||||
Any security vulnerabilities can be reported to security@docker.com.
|
||||
|
||||
See Notary's [service architecture docs](docs/service_architecture.md#threat-model) for more information about our threat model, which details the varying survivability and severities for key compromise as well as mitigations.
|
||||
|
||||
### Security Audits
|
||||
|
||||
Notary has had two public security audits:
|
||||
|
||||
* [August 7, 2018 by Cure53](docs/resources/cure53_tuf_notary_audit_2018_08_07.pdf) covering TUF and Notary
|
||||
* [July 31, 2015 by NCC](docs/resources/ncc_docker_notary_audit_2015_07_31.pdf) covering Notary
|
||||
|
||||
# Getting started with the Notary CLI
|
||||
|
||||
Get the Notary Client CLI binary from [the official releases page](https://github.com/theupdateframework/notary/releases) or you can [build one yourself](#building-notary).
|
||||
The version of the Notary server and signer should be greater than or equal to Notary CLI's version to ensure feature compatibility (ex: CLI version 0.2, server/signer version >= 0.2), and all official releases are associated with GitHub tags.
|
||||
|
||||
To use the Notary CLI with Docker hub images, have a look at Notary's
|
||||
[getting started docs](docs/getting_started.md).
|
||||
|
||||
For more advanced usage, see the
|
||||
[advanced usage docs](docs/advanced_usage.md).
|
||||
|
||||
To use the CLI against a local Notary server rather than against Docker Hub:
|
||||
|
||||
1. Ensure that you have [docker and docker-compose](https://docs.docker.com/compose/install/) installed.
|
||||
1. `git clone https://github.com/theupdateframework/notary.git` and from the cloned repository path,
|
||||
start up a local Notary server and signer and copy the config file and testing certs to your
|
||||
local Notary config directory:
|
||||
|
||||
```sh
|
||||
$ docker-compose build
|
||||
$ docker-compose up -d
|
||||
$ mkdir -p ~/.notary && cp cmd/notary/config.json cmd/notary/root-ca.crt ~/.notary
|
||||
```
|
||||
|
||||
1. Add `127.0.0.1 notary-server` to your `/etc/hosts`, or if using docker-machine,
|
||||
add `$(docker-machine ip) notary-server`).
|
||||
|
||||
You can run through the examples in the
|
||||
[getting started docs](docs/getting_started.md) and
|
||||
[advanced usage docs](docs/advanced_usage.md), but
|
||||
without the `-s` (server URL) argument to the `notary` command since the server
|
||||
URL is specified already in the configuration, file you copied.
|
||||
|
||||
You can also leave off the `-d ~/.docker/trust` argument if you do not care
|
||||
to use `notary` with Docker images.
|
||||
|
||||
## Upgrading dependencies
|
||||
|
||||
To prevent mistakes in vendoring the go modules a buildscript has been added to properly vendor the modules using the correct version of Go to mitigate differences in CI and development environment.
|
||||
|
||||
Following procedure should be executed to upgrade a dependency. Preferably keep dependency upgrades in a separate commit from your code changes.
|
||||
|
||||
```bash
|
||||
go get -u github.com/spf13/viper
|
||||
buildscripts/circle-validate-vendor.sh
|
||||
git add .
|
||||
git commit -m "Upgraded github.com/spf13/viper"
|
||||
```
|
||||
|
||||
The `buildscripts/circle-validate-vendor.sh` runs `go mod tidy` and `go mod vendor` using the given version of Go to prevent differences if you are for example running on a different version of Go.
|
||||
|
||||
## Building Notary
|
||||
|
||||
Note that Notary's [latest stable release](https://github.com/theupdateframework/notary/releases) is at the head of the
|
||||
[releases branch](https://github.com/theupdateframework/notary/tree/releases). The master branch is the development
|
||||
branch and contains features for the next release.
|
||||
|
||||
Prerequisites:
|
||||
|
||||
* Go >= 1.12
|
||||
|
||||
Set [```GOPATH```](https://golang.org/doc/code.html#GOPATH). Then, run:
|
||||
|
||||
```bash
|
||||
$ export GO111MODULE=on
|
||||
$ go get github.com/theupdateframework/notary
|
||||
# build with pkcs11 support by default to support yubikey
|
||||
$ go install -tags pkcs11 github.com/theupdateframework/notary/cmd/notary
|
||||
$ notary
|
||||
```
|
||||
|
||||
To build the server and signer, run `docker-compose build`.
|
||||
|
||||
## License
|
||||
|
||||
[](https://app.fossa.io/projects/git%2Bgithub.com%2Ftheupdateframework%2Fnotary?ref=badge_large)
|
100
vendor/github.com/theupdateframework/notary/client/changelist/change.go
generated
vendored
Normal file
100
vendor/github.com/theupdateframework/notary/client/changelist/change.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
package changelist
|
||||
|
||||
import (
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
)
|
||||
|
||||
// Scopes for TUFChanges are simply the TUF roles.
|
||||
// Unfortunately because of targets delegations, we can only
|
||||
// cover the base roles.
|
||||
const (
|
||||
ScopeRoot = "root"
|
||||
ScopeTargets = "targets"
|
||||
)
|
||||
|
||||
// Types for TUFChanges are namespaced by the Role they
|
||||
// are relevant for. The Root and Targets roles are the
|
||||
// only ones for which user action can cause a change, as
|
||||
// all changes in Snapshot and Timestamp are programmatically
|
||||
// generated base on Root and Targets changes.
|
||||
const (
|
||||
TypeBaseRole = "role"
|
||||
TypeTargetsTarget = "target"
|
||||
TypeTargetsDelegation = "delegation"
|
||||
TypeWitness = "witness"
|
||||
)
|
||||
|
||||
// TUFChange represents a change to a TUF repo
|
||||
type TUFChange struct {
|
||||
// Abbreviated because Go doesn't permit a field and method of the same name
|
||||
Actn string `json:"action"`
|
||||
Role data.RoleName `json:"role"`
|
||||
ChangeType string `json:"type"`
|
||||
ChangePath string `json:"path"`
|
||||
Data []byte `json:"data"`
|
||||
}
|
||||
|
||||
// TUFRootData represents a modification of the keys associated
|
||||
// with a role that appears in the root.json
|
||||
type TUFRootData struct {
|
||||
Keys data.KeyList `json:"keys"`
|
||||
RoleName data.RoleName `json:"role"`
|
||||
}
|
||||
|
||||
// NewTUFChange initializes a TUFChange object
|
||||
func NewTUFChange(action string, role data.RoleName, changeType, changePath string, content []byte) *TUFChange {
|
||||
return &TUFChange{
|
||||
Actn: action,
|
||||
Role: role,
|
||||
ChangeType: changeType,
|
||||
ChangePath: changePath,
|
||||
Data: content,
|
||||
}
|
||||
}
|
||||
|
||||
// Action return c.Actn
|
||||
func (c TUFChange) Action() string {
|
||||
return c.Actn
|
||||
}
|
||||
|
||||
// Scope returns c.Role
|
||||
func (c TUFChange) Scope() data.RoleName {
|
||||
return c.Role
|
||||
}
|
||||
|
||||
// Type returns c.ChangeType
|
||||
func (c TUFChange) Type() string {
|
||||
return c.ChangeType
|
||||
}
|
||||
|
||||
// Path return c.ChangePath
|
||||
func (c TUFChange) Path() string {
|
||||
return c.ChangePath
|
||||
}
|
||||
|
||||
// Content returns c.Data
|
||||
func (c TUFChange) Content() []byte {
|
||||
return c.Data
|
||||
}
|
||||
|
||||
// TUFDelegation represents a modification to a target delegation
|
||||
// this includes creating a delegations. This format is used to avoid
|
||||
// unexpected race conditions between humans modifying the same delegation
|
||||
type TUFDelegation struct {
|
||||
NewName data.RoleName `json:"new_name,omitempty"`
|
||||
NewThreshold int `json:"threshold,omitempty"`
|
||||
AddKeys data.KeyList `json:"add_keys,omitempty"`
|
||||
RemoveKeys []string `json:"remove_keys,omitempty"`
|
||||
AddPaths []string `json:"add_paths,omitempty"`
|
||||
RemovePaths []string `json:"remove_paths,omitempty"`
|
||||
ClearAllPaths bool `json:"clear_paths,omitempty"`
|
||||
}
|
||||
|
||||
// ToNewRole creates a fresh role object from the TUFDelegation data
|
||||
func (td TUFDelegation) ToNewRole(scope data.RoleName) (*data.Role, error) {
|
||||
name := scope
|
||||
if td.NewName != "" {
|
||||
name = td.NewName
|
||||
}
|
||||
return data.NewRole(name, td.NewThreshold, td.AddKeys.IDs(), td.AddPaths)
|
||||
}
|
82
vendor/github.com/theupdateframework/notary/client/changelist/changelist.go
generated
vendored
Normal file
82
vendor/github.com/theupdateframework/notary/client/changelist/changelist.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
package changelist
|
||||
|
||||
// memChangeList implements a simple in memory change list.
|
||||
type memChangelist struct {
|
||||
changes []Change
|
||||
}
|
||||
|
||||
// NewMemChangelist instantiates a new in-memory changelist
|
||||
func NewMemChangelist() Changelist {
|
||||
return &memChangelist{}
|
||||
}
|
||||
|
||||
// List returns a list of Changes
|
||||
func (cl memChangelist) List() []Change {
|
||||
return cl.changes
|
||||
}
|
||||
|
||||
// Add adds a change to the in-memory change list
|
||||
func (cl *memChangelist) Add(c Change) error {
|
||||
cl.changes = append(cl.changes, c)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Location returns the string "memory"
|
||||
func (cl memChangelist) Location() string {
|
||||
return "memory"
|
||||
}
|
||||
|
||||
// Remove deletes the changes found at the given indices
|
||||
func (cl *memChangelist) Remove(idxs []int) error {
|
||||
remove := make(map[int]struct{})
|
||||
for _, i := range idxs {
|
||||
remove[i] = struct{}{}
|
||||
}
|
||||
var keep []Change
|
||||
|
||||
for i, c := range cl.changes {
|
||||
if _, ok := remove[i]; ok {
|
||||
continue
|
||||
}
|
||||
keep = append(keep, c)
|
||||
}
|
||||
cl.changes = keep
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear empties the changelist file.
|
||||
func (cl *memChangelist) Clear(archive string) error {
|
||||
// appending to a nil list initializes it.
|
||||
cl.changes = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close is a no-op in this in-memory change-list
|
||||
func (cl *memChangelist) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cl *memChangelist) NewIterator() (ChangeIterator, error) {
|
||||
return &MemChangeListIterator{index: 0, collection: cl.changes}, nil
|
||||
}
|
||||
|
||||
// MemChangeListIterator is a concrete instance of ChangeIterator
|
||||
type MemChangeListIterator struct {
|
||||
index int
|
||||
collection []Change // Same type as memChangeList.changes
|
||||
}
|
||||
|
||||
// Next returns the next Change
|
||||
func (m *MemChangeListIterator) Next() (item Change, err error) {
|
||||
if m.index >= len(m.collection) {
|
||||
return nil, IteratorBoundsError(m.index)
|
||||
}
|
||||
item = m.collection[m.index]
|
||||
m.index++
|
||||
return item, err
|
||||
}
|
||||
|
||||
// HasNext indicates whether the iterator is exhausted
|
||||
func (m *MemChangeListIterator) HasNext() bool {
|
||||
return m.index < len(m.collection)
|
||||
}
|
208
vendor/github.com/theupdateframework/notary/client/changelist/file_changelist.go
generated
vendored
Normal file
208
vendor/github.com/theupdateframework/notary/client/changelist/file_changelist.go
generated
vendored
Normal file
@ -0,0 +1,208 @@
|
||||
package changelist
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// FileChangelist stores all the changes as files
|
||||
type FileChangelist struct {
|
||||
dir string
|
||||
}
|
||||
|
||||
// NewFileChangelist is a convenience method for returning FileChangeLists
|
||||
func NewFileChangelist(dir string) (*FileChangelist, error) {
|
||||
logrus.Debug("Making dir path: ", dir)
|
||||
err := os.MkdirAll(dir, 0700)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FileChangelist{dir: dir}, nil
|
||||
}
|
||||
|
||||
// getFileNames reads directory, filtering out child directories
|
||||
func getFileNames(dirName string) ([]os.FileInfo, error) {
|
||||
var dirListing, fileInfos []os.FileInfo
|
||||
dir, err := os.Open(dirName)
|
||||
if err != nil {
|
||||
return fileInfos, err
|
||||
}
|
||||
defer func() {
|
||||
_ = dir.Close()
|
||||
}()
|
||||
|
||||
dirListing, err = dir.Readdir(0)
|
||||
if err != nil {
|
||||
return fileInfos, err
|
||||
}
|
||||
for _, f := range dirListing {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
fileInfos = append(fileInfos, f)
|
||||
}
|
||||
sort.Sort(fileChanges(fileInfos))
|
||||
return fileInfos, nil
|
||||
}
|
||||
|
||||
// Read a JSON formatted file from disk; convert to TUFChange struct
|
||||
func unmarshalFile(dirname string, f os.FileInfo) (*TUFChange, error) {
|
||||
c := &TUFChange{}
|
||||
raw, err := ioutil.ReadFile(filepath.Join(dirname, f.Name()))
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
err = json.Unmarshal(raw, c)
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// List returns a list of sorted changes
|
||||
func (cl FileChangelist) List() []Change {
|
||||
var changes []Change
|
||||
fileInfos, err := getFileNames(cl.dir)
|
||||
if err != nil {
|
||||
return changes
|
||||
}
|
||||
for _, f := range fileInfos {
|
||||
c, err := unmarshalFile(cl.dir, f)
|
||||
if err != nil {
|
||||
logrus.Warn(err.Error())
|
||||
continue
|
||||
}
|
||||
changes = append(changes, c)
|
||||
}
|
||||
return changes
|
||||
}
|
||||
|
||||
// Add adds a change to the file change list
|
||||
func (cl FileChangelist) Add(c Change) error {
|
||||
cJSON, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filename := fmt.Sprintf("%020d_%s.change", time.Now().UnixNano(), uuid.Generate())
|
||||
return ioutil.WriteFile(filepath.Join(cl.dir, filename), cJSON, 0600)
|
||||
}
|
||||
|
||||
// Remove deletes the changes found at the given indices
|
||||
func (cl FileChangelist) Remove(idxs []int) error {
|
||||
fileInfos, err := getFileNames(cl.dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
remove := make(map[int]struct{})
|
||||
for _, i := range idxs {
|
||||
remove[i] = struct{}{}
|
||||
}
|
||||
for i, c := range fileInfos {
|
||||
if _, ok := remove[i]; ok {
|
||||
file := filepath.Join(cl.dir, c.Name())
|
||||
if err := os.Remove(file); err != nil {
|
||||
logrus.Errorf("could not remove change %d: %s", i, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear clears the change list
|
||||
// N.B. archiving not currently implemented
|
||||
func (cl FileChangelist) Clear(archive string) error {
|
||||
dir, err := os.Open(cl.dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = dir.Close()
|
||||
}()
|
||||
|
||||
files, err := dir.Readdir(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, f := range files {
|
||||
os.Remove(filepath.Join(cl.dir, f.Name()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close is a no-op
|
||||
func (cl FileChangelist) Close() error {
|
||||
// Nothing to do here
|
||||
return nil
|
||||
}
|
||||
|
||||
// Location returns the file path to the changelist
|
||||
func (cl FileChangelist) Location() string {
|
||||
return cl.dir
|
||||
}
|
||||
|
||||
// NewIterator creates an iterator from FileChangelist
|
||||
func (cl FileChangelist) NewIterator() (ChangeIterator, error) {
|
||||
fileInfos, err := getFileNames(cl.dir)
|
||||
if err != nil {
|
||||
return &FileChangeListIterator{}, err
|
||||
}
|
||||
return &FileChangeListIterator{dirname: cl.dir, collection: fileInfos}, nil
|
||||
}
|
||||
|
||||
// IteratorBoundsError is an Error type used by Next()
|
||||
type IteratorBoundsError int
|
||||
|
||||
// Error implements the Error interface
|
||||
func (e IteratorBoundsError) Error() string {
|
||||
return fmt.Sprintf("Iterator index (%d) out of bounds", e)
|
||||
}
|
||||
|
||||
// FileChangeListIterator is a concrete instance of ChangeIterator
|
||||
type FileChangeListIterator struct {
|
||||
index int
|
||||
dirname string
|
||||
collection []os.FileInfo
|
||||
}
|
||||
|
||||
// Next returns the next Change in the FileChangeList
|
||||
func (m *FileChangeListIterator) Next() (item Change, err error) {
|
||||
if m.index >= len(m.collection) {
|
||||
return nil, IteratorBoundsError(m.index)
|
||||
}
|
||||
f := m.collection[m.index]
|
||||
m.index++
|
||||
item, err = unmarshalFile(m.dirname, f)
|
||||
return
|
||||
}
|
||||
|
||||
// HasNext indicates whether iterator is exhausted
|
||||
func (m *FileChangeListIterator) HasNext() bool {
|
||||
return m.index < len(m.collection)
|
||||
}
|
||||
|
||||
type fileChanges []os.FileInfo
|
||||
|
||||
// Len returns the length of a file change list
|
||||
func (cs fileChanges) Len() int {
|
||||
return len(cs)
|
||||
}
|
||||
|
||||
// Less compares the names of two different file changes
|
||||
func (cs fileChanges) Less(i, j int) bool {
|
||||
return cs[i].Name() < cs[j].Name()
|
||||
}
|
||||
|
||||
// Swap swaps the position of two file changes
|
||||
func (cs fileChanges) Swap(i, j int) {
|
||||
tmp := cs[i]
|
||||
cs[i] = cs[j]
|
||||
cs[j] = tmp
|
||||
}
|
78
vendor/github.com/theupdateframework/notary/client/changelist/interface.go
generated
vendored
Normal file
78
vendor/github.com/theupdateframework/notary/client/changelist/interface.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
package changelist
|
||||
|
||||
import "github.com/theupdateframework/notary/tuf/data"
|
||||
|
||||
// Changelist is the interface for all TUF change lists
|
||||
type Changelist interface {
|
||||
// List returns the ordered list of changes
|
||||
// currently stored
|
||||
List() []Change
|
||||
|
||||
// Add change appends the provided change to
|
||||
// the list of changes
|
||||
Add(Change) error
|
||||
|
||||
// Clear empties the current change list.
|
||||
// Archive may be provided as a directory path
|
||||
// to save a copy of the changelist in that location
|
||||
Clear(archive string) error
|
||||
|
||||
// Remove deletes the changes corresponding with the indices given
|
||||
Remove(idxs []int) error
|
||||
|
||||
// Close synchronizes any pending writes to the underlying
|
||||
// storage and closes the file/connection
|
||||
Close() error
|
||||
|
||||
// NewIterator returns an iterator for walking through the list
|
||||
// of changes currently stored
|
||||
NewIterator() (ChangeIterator, error)
|
||||
|
||||
// Location returns the place the changelist is stores
|
||||
Location() string
|
||||
}
|
||||
|
||||
const (
|
||||
// ActionCreate represents a Create action
|
||||
ActionCreate = "create"
|
||||
// ActionUpdate represents an Update action
|
||||
ActionUpdate = "update"
|
||||
// ActionDelete represents a Delete action
|
||||
ActionDelete = "delete"
|
||||
)
|
||||
|
||||
// Change is the interface for a TUF Change
|
||||
type Change interface {
|
||||
// "create","update", or "delete"
|
||||
Action() string
|
||||
|
||||
// Where the change should be made.
|
||||
// For TUF this will be the role
|
||||
Scope() data.RoleName
|
||||
|
||||
// The content type being affected.
|
||||
// For TUF this will be "target", or "delegation".
|
||||
// If the type is "delegation", the Scope will be
|
||||
// used to determine if a root role is being updated
|
||||
// or a target delegation.
|
||||
Type() string
|
||||
|
||||
// Path indicates the entry within a role to be affected by the
|
||||
// change. For targets, this is simply the target's path,
|
||||
// for delegations it's the delegated role name.
|
||||
Path() string
|
||||
|
||||
// Serialized content that the interpreter of a changelist
|
||||
// can use to apply the change.
|
||||
// For TUF this will be the serialized JSON that needs
|
||||
// to be inserted or merged. In the case of a "delete"
|
||||
// action, it will be nil.
|
||||
Content() []byte
|
||||
}
|
||||
|
||||
// ChangeIterator is the interface for iterating across collections of
|
||||
// TUF Change items
|
||||
type ChangeIterator interface {
|
||||
Next() (Change, error)
|
||||
HasNext() bool
|
||||
}
|
998
vendor/github.com/theupdateframework/notary/client/client.go
generated
vendored
Normal file
998
vendor/github.com/theupdateframework/notary/client/client.go
generated
vendored
Normal file
@ -0,0 +1,998 @@
|
||||
// Package client implements everything required for interacting with a Notary repository.
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
canonicaljson "github.com/docker/go/canonical/json"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary"
|
||||
"github.com/theupdateframework/notary/client/changelist"
|
||||
"github.com/theupdateframework/notary/cryptoservice"
|
||||
store "github.com/theupdateframework/notary/storage"
|
||||
"github.com/theupdateframework/notary/trustpinning"
|
||||
"github.com/theupdateframework/notary/tuf"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/signed"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
tufDir = "tuf"
|
||||
|
||||
// SignWithAllOldVersions is a sentinel constant for LegacyVersions flag
|
||||
SignWithAllOldVersions = -1
|
||||
)
|
||||
|
||||
func init() {
|
||||
data.SetDefaultExpiryTimes(data.NotaryDefaultExpiries)
|
||||
}
|
||||
|
||||
// repository stores all the information needed to operate on a notary repository.
|
||||
type repository struct {
|
||||
gun data.GUN
|
||||
baseURL string
|
||||
changelist changelist.Changelist
|
||||
cache store.MetadataStore
|
||||
remoteStore store.RemoteStore
|
||||
cryptoService signed.CryptoService
|
||||
tufRepo *tuf.Repo
|
||||
invalid *tuf.Repo // known data that was parsable but deemed invalid
|
||||
roundTrip http.RoundTripper
|
||||
trustPinning trustpinning.TrustPinConfig
|
||||
LegacyVersions int // number of versions back to fetch roots to sign with
|
||||
}
|
||||
|
||||
// NewFileCachedRepository is a wrapper for NewRepository that initializes
|
||||
// a file cache from the provided repository, local config information and a crypto service.
|
||||
// It also retrieves the remote store associated to the base directory under where all the
|
||||
// trust files will be stored (This is normally defaults to "~/.notary" or "~/.docker/trust"
|
||||
// when enabling Docker content trust) and the specified GUN.
|
||||
//
|
||||
// In case of a nil RoundTripper, a default offline store is used instead.
|
||||
func NewFileCachedRepository(baseDir string, gun data.GUN, baseURL string, rt http.RoundTripper,
|
||||
retriever notary.PassRetriever, trustPinning trustpinning.TrustPinConfig) (Repository, error) {
|
||||
|
||||
cache, err := store.NewFileStore(
|
||||
filepath.Join(baseDir, tufDir, filepath.FromSlash(gun.String()), "metadata"),
|
||||
"json",
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keyStores, err := getKeyStores(baseDir, retriever)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cryptoService := cryptoservice.NewCryptoService(keyStores...)
|
||||
|
||||
remoteStore, err := getRemoteStore(baseURL, gun, rt)
|
||||
if err != nil {
|
||||
// baseURL is syntactically invalid
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cl, err := changelist.NewFileChangelist(filepath.Join(
|
||||
filepath.Join(baseDir, tufDir, filepath.FromSlash(gun.String()), "changelist"),
|
||||
))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewRepository(gun, baseURL, remoteStore, cache, trustPinning, cryptoService, cl)
|
||||
}
|
||||
|
||||
// NewRepository is the base method that returns a new notary repository.
|
||||
// It expects an initialized cache. In case of a nil remote store, a default
|
||||
// offline store is used.
|
||||
func NewRepository(gun data.GUN, baseURL string, remoteStore store.RemoteStore, cache store.MetadataStore,
|
||||
trustPinning trustpinning.TrustPinConfig, cryptoService signed.CryptoService, cl changelist.Changelist) (Repository, error) {
|
||||
|
||||
// Repo's remote store is either a valid remote store or an OfflineStore
|
||||
if remoteStore == nil {
|
||||
remoteStore = store.OfflineStore{}
|
||||
}
|
||||
|
||||
if cache == nil {
|
||||
return nil, fmt.Errorf("got an invalid cache (nil metadata store)")
|
||||
}
|
||||
|
||||
nRepo := &repository{
|
||||
gun: gun,
|
||||
baseURL: baseURL,
|
||||
changelist: cl,
|
||||
cache: cache,
|
||||
remoteStore: remoteStore,
|
||||
cryptoService: cryptoService,
|
||||
trustPinning: trustPinning,
|
||||
LegacyVersions: 0, // By default, don't sign with legacy roles
|
||||
}
|
||||
|
||||
return nRepo, nil
|
||||
}
|
||||
|
||||
// GetGUN is a getter for the GUN object from a Repository
|
||||
func (r *repository) GetGUN() data.GUN {
|
||||
return r.gun
|
||||
}
|
||||
|
||||
func (r *repository) updateTUF(forWrite bool) error {
|
||||
repo, invalid, err := LoadTUFRepo(TUFLoadOptions{
|
||||
GUN: r.gun,
|
||||
TrustPinning: r.trustPinning,
|
||||
CryptoService: r.cryptoService,
|
||||
Cache: r.cache,
|
||||
RemoteStore: r.remoteStore,
|
||||
AlwaysCheckInitialized: forWrite,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.tufRepo = repo
|
||||
r.invalid = invalid
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListTargets calls update first before listing targets
|
||||
func (r *repository) ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error) {
|
||||
if err := r.updateTUF(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewReadOnly(r.tufRepo).ListTargets(roles...)
|
||||
}
|
||||
|
||||
// GetTargetByName calls update first before getting target by name
|
||||
func (r *repository) GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error) {
|
||||
if err := r.updateTUF(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewReadOnly(r.tufRepo).GetTargetByName(name, roles...)
|
||||
}
|
||||
|
||||
// GetAllTargetMetadataByName calls update first before getting targets by name
|
||||
func (r *repository) GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error) {
|
||||
if err := r.updateTUF(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewReadOnly(r.tufRepo).GetAllTargetMetadataByName(name)
|
||||
|
||||
}
|
||||
|
||||
// ListRoles calls update first before getting roles
|
||||
func (r *repository) ListRoles() ([]RoleWithSignatures, error) {
|
||||
if err := r.updateTUF(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewReadOnly(r.tufRepo).ListRoles()
|
||||
}
|
||||
|
||||
// GetDelegationRoles calls update first before getting all delegation roles
|
||||
func (r *repository) GetDelegationRoles() ([]data.Role, error) {
|
||||
if err := r.updateTUF(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewReadOnly(r.tufRepo).GetDelegationRoles()
|
||||
}
|
||||
|
||||
// NewTarget is a helper method that returns a Target
|
||||
func NewTarget(targetName, targetPath string, targetCustom *canonicaljson.RawMessage) (*Target, error) {
|
||||
b, err := ioutil.ReadFile(targetPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
meta, err := data.NewFileMeta(bytes.NewBuffer(b), data.NotaryDefaultHashes...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Target{Name: targetName, Hashes: meta.Hashes, Length: meta.Length, Custom: targetCustom}, nil
|
||||
}
|
||||
|
||||
// rootCertKey generates the corresponding certificate for the private key given the privKey and repo's GUN
|
||||
func rootCertKey(gun data.GUN, privKey data.PrivateKey) (data.PublicKey, error) {
|
||||
// Hard-coded policy: the generated certificate expires in 10 years.
|
||||
startTime := time.Now()
|
||||
cert, err := cryptoservice.GenerateCertificate(
|
||||
privKey, gun, startTime, startTime.Add(notary.Year*10))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
x509PublicKey := utils.CertToKey(cert)
|
||||
if x509PublicKey == nil {
|
||||
return nil, fmt.Errorf("cannot generate public key from private key with id: %v and algorithm: %v", privKey.ID(), privKey.Algorithm())
|
||||
}
|
||||
|
||||
return x509PublicKey, nil
|
||||
}
|
||||
|
||||
// GetCryptoService is the getter for the repository's CryptoService
|
||||
func (r *repository) GetCryptoService() signed.CryptoService {
|
||||
return r.cryptoService
|
||||
}
|
||||
|
||||
// initialize initializes the notary repository with a set of rootkeys, root certificates and roles.
|
||||
func (r *repository) initialize(rootKeyIDs []string, rootCerts []data.PublicKey, serverManagedRoles ...data.RoleName) error {
|
||||
|
||||
// currently we only support server managing timestamps and snapshots, and
|
||||
// nothing else - timestamps are always managed by the server, and implicit
|
||||
// (do not have to be passed in as part of `serverManagedRoles`, so that
|
||||
// the API of Initialize doesn't change).
|
||||
var serverManagesSnapshot bool
|
||||
locallyManagedKeys := []data.RoleName{
|
||||
data.CanonicalTargetsRole,
|
||||
data.CanonicalSnapshotRole,
|
||||
// root is also locally managed, but that should have been created
|
||||
// already
|
||||
}
|
||||
remotelyManagedKeys := []data.RoleName{data.CanonicalTimestampRole}
|
||||
for _, role := range serverManagedRoles {
|
||||
switch role {
|
||||
case data.CanonicalTimestampRole:
|
||||
continue // timestamp is already in the right place
|
||||
case data.CanonicalSnapshotRole:
|
||||
// because we put Snapshot last
|
||||
locallyManagedKeys = []data.RoleName{data.CanonicalTargetsRole}
|
||||
remotelyManagedKeys = append(
|
||||
remotelyManagedKeys, data.CanonicalSnapshotRole)
|
||||
serverManagesSnapshot = true
|
||||
default:
|
||||
return ErrInvalidRemoteRole{Role: role}
|
||||
}
|
||||
}
|
||||
|
||||
// gets valid public keys corresponding to the rootKeyIDs or generate if necessary
|
||||
var publicKeys []data.PublicKey
|
||||
var err error
|
||||
if len(rootCerts) == 0 {
|
||||
publicKeys, err = r.createNewPublicKeyFromKeyIDs(rootKeyIDs)
|
||||
} else {
|
||||
publicKeys, err = r.publicKeysOfKeyIDs(rootKeyIDs, rootCerts)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//initialize repo with public keys
|
||||
rootRole, targetsRole, snapshotRole, timestampRole, err := r.initializeRoles(
|
||||
publicKeys,
|
||||
locallyManagedKeys,
|
||||
remotelyManagedKeys,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.tufRepo = tuf.NewRepo(r.GetCryptoService())
|
||||
|
||||
if err := r.tufRepo.InitRoot(
|
||||
rootRole,
|
||||
timestampRole,
|
||||
snapshotRole,
|
||||
targetsRole,
|
||||
false,
|
||||
); err != nil {
|
||||
logrus.Debug("Error on InitRoot: ", err.Error())
|
||||
return err
|
||||
}
|
||||
if _, err := r.tufRepo.InitTargets(data.CanonicalTargetsRole); err != nil {
|
||||
logrus.Debug("Error on InitTargets: ", err.Error())
|
||||
return err
|
||||
}
|
||||
if err := r.tufRepo.InitSnapshot(); err != nil {
|
||||
logrus.Debug("Error on InitSnapshot: ", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
return r.saveMetadata(serverManagesSnapshot)
|
||||
}
|
||||
|
||||
// createNewPublicKeyFromKeyIDs generates a set of public keys corresponding to the given list of
|
||||
// key IDs existing in the repository's CryptoService.
|
||||
// the public keys returned are ordered to correspond to the keyIDs
|
||||
func (r *repository) createNewPublicKeyFromKeyIDs(keyIDs []string) ([]data.PublicKey, error) {
|
||||
publicKeys := []data.PublicKey{}
|
||||
|
||||
privKeys, err := getAllPrivKeys(keyIDs, r.GetCryptoService())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, privKey := range privKeys {
|
||||
rootKey, err := rootCertKey(r.gun, privKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
publicKeys = append(publicKeys, rootKey)
|
||||
}
|
||||
return publicKeys, nil
|
||||
}
|
||||
|
||||
// publicKeysOfKeyIDs confirms that the public key and private keys (by Key IDs) forms valid, strictly ordered key pairs
|
||||
// (eg. keyIDs[0] must match pubKeys[0] and keyIDs[1] must match certs[1] and so on).
|
||||
// Or throw error when they mismatch.
|
||||
func (r *repository) publicKeysOfKeyIDs(keyIDs []string, pubKeys []data.PublicKey) ([]data.PublicKey, error) {
|
||||
if len(keyIDs) != len(pubKeys) {
|
||||
err := fmt.Errorf("require matching number of keyIDs and public keys but got %d IDs and %d public keys", len(keyIDs), len(pubKeys))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := matchKeyIdsWithPubKeys(r, keyIDs, pubKeys); err != nil {
|
||||
return nil, fmt.Errorf("could not obtain public key from IDs: %v", err)
|
||||
}
|
||||
return pubKeys, nil
|
||||
}
|
||||
|
||||
// matchKeyIdsWithPubKeys validates that the private keys (represented by their IDs) and the public keys
|
||||
// forms matching key pairs
|
||||
func matchKeyIdsWithPubKeys(r *repository, ids []string, pubKeys []data.PublicKey) error {
|
||||
for i := 0; i < len(ids); i++ {
|
||||
privKey, _, err := r.GetCryptoService().GetPrivateKey(ids[i])
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get the private key matching id %v: %v", ids[i], err)
|
||||
}
|
||||
|
||||
pubKey := pubKeys[i]
|
||||
err = signed.VerifyPublicKeyMatchesPrivateKey(privKey, pubKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initialize creates a new repository by using rootKey as the root Key for the
|
||||
// TUF repository. The server must be reachable (and is asked to generate a
|
||||
// timestamp key and possibly other serverManagedRoles), but the created repository
|
||||
// result is only stored on local disk, not published to the server. To do that,
|
||||
// use r.Publish() eventually.
|
||||
func (r *repository) Initialize(rootKeyIDs []string, serverManagedRoles ...data.RoleName) error {
|
||||
return r.initialize(rootKeyIDs, nil, serverManagedRoles...)
|
||||
}
|
||||
|
||||
type errKeyNotFound struct{}
|
||||
|
||||
func (errKeyNotFound) Error() string {
|
||||
return fmt.Sprintf("cannot find matching private key id")
|
||||
}
|
||||
|
||||
// keyExistsInList returns the id of the private key in ids that matches the public key
|
||||
// otherwise return empty string
|
||||
func keyExistsInList(cert data.PublicKey, ids map[string]bool) error {
|
||||
pubKeyID, err := utils.CanonicalKeyID(cert)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obtain the public key id from the given certificate: %v", err)
|
||||
}
|
||||
if _, ok := ids[pubKeyID]; ok {
|
||||
return nil
|
||||
}
|
||||
return errKeyNotFound{}
|
||||
}
|
||||
|
||||
// InitializeWithCertificate initializes the repository with root keys and their corresponding certificates
|
||||
func (r *repository) InitializeWithCertificate(rootKeyIDs []string, rootCerts []data.PublicKey,
|
||||
serverManagedRoles ...data.RoleName) error {
|
||||
|
||||
// If we explicitly pass in certificate(s) but not key, then look keys up using certificate
|
||||
if len(rootKeyIDs) == 0 && len(rootCerts) != 0 {
|
||||
rootKeyIDs = []string{}
|
||||
availableRootKeyIDs := make(map[string]bool)
|
||||
for _, k := range r.GetCryptoService().ListKeys(data.CanonicalRootRole) {
|
||||
availableRootKeyIDs[k] = true
|
||||
}
|
||||
|
||||
for _, cert := range rootCerts {
|
||||
if err := keyExistsInList(cert, availableRootKeyIDs); err != nil {
|
||||
return fmt.Errorf("error initializing repository with certificate: %v", err)
|
||||
}
|
||||
keyID, _ := utils.CanonicalKeyID(cert)
|
||||
rootKeyIDs = append(rootKeyIDs, keyID)
|
||||
}
|
||||
}
|
||||
return r.initialize(rootKeyIDs, rootCerts, serverManagedRoles...)
|
||||
}
|
||||
|
||||
func (r *repository) initializeRoles(rootKeys []data.PublicKey, localRoles, remoteRoles []data.RoleName) (
|
||||
root, targets, snapshot, timestamp data.BaseRole, err error) {
|
||||
root = data.NewBaseRole(
|
||||
data.CanonicalRootRole,
|
||||
notary.MinThreshold,
|
||||
rootKeys...,
|
||||
)
|
||||
|
||||
// we want to create all the local keys first so we don't have to
|
||||
// make unnecessary network calls
|
||||
for _, role := range localRoles {
|
||||
// This is currently hardcoding the keys to ECDSA.
|
||||
var key data.PublicKey
|
||||
key, err = r.GetCryptoService().Create(role, r.gun, data.ECDSAKey)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
switch role {
|
||||
case data.CanonicalSnapshotRole:
|
||||
snapshot = data.NewBaseRole(
|
||||
role,
|
||||
notary.MinThreshold,
|
||||
key,
|
||||
)
|
||||
case data.CanonicalTargetsRole:
|
||||
targets = data.NewBaseRole(
|
||||
role,
|
||||
notary.MinThreshold,
|
||||
key,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
remote := r.getRemoteStore()
|
||||
|
||||
for _, role := range remoteRoles {
|
||||
// This key is generated by the remote server.
|
||||
var key data.PublicKey
|
||||
key, err = getRemoteKey(role, remote)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
logrus.Debugf("got remote %s %s key with keyID: %s",
|
||||
role, key.Algorithm(), key.ID())
|
||||
switch role {
|
||||
case data.CanonicalSnapshotRole:
|
||||
snapshot = data.NewBaseRole(
|
||||
role,
|
||||
notary.MinThreshold,
|
||||
key,
|
||||
)
|
||||
case data.CanonicalTimestampRole:
|
||||
timestamp = data.NewBaseRole(
|
||||
role,
|
||||
notary.MinThreshold,
|
||||
key,
|
||||
)
|
||||
}
|
||||
}
|
||||
return root, targets, snapshot, timestamp, nil
|
||||
}
|
||||
|
||||
// adds a TUF Change template to the given roles
|
||||
func addChange(cl changelist.Changelist, c changelist.Change, roles ...data.RoleName) error {
|
||||
if len(roles) == 0 {
|
||||
roles = []data.RoleName{data.CanonicalTargetsRole}
|
||||
}
|
||||
|
||||
var changes []changelist.Change
|
||||
for _, role := range roles {
|
||||
// Ensure we can only add targets to the CanonicalTargetsRole,
|
||||
// or a Delegation role (which is <CanonicalTargetsRole>/something else)
|
||||
if role != data.CanonicalTargetsRole && !data.IsDelegation(role) && !data.IsWildDelegation(role) {
|
||||
return data.ErrInvalidRole{
|
||||
Role: role,
|
||||
Reason: "cannot add targets to this role",
|
||||
}
|
||||
}
|
||||
|
||||
changes = append(changes, changelist.NewTUFChange(
|
||||
c.Action(),
|
||||
role,
|
||||
c.Type(),
|
||||
c.Path(),
|
||||
c.Content(),
|
||||
))
|
||||
}
|
||||
|
||||
for _, c := range changes {
|
||||
if err := cl.Add(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddTarget creates new changelist entries to add a target to the given roles
|
||||
// in the repository when the changelist gets applied at publish time.
|
||||
// If roles are unspecified, the default role is "targets"
|
||||
func (r *repository) AddTarget(target *Target, roles ...data.RoleName) error {
|
||||
if len(target.Hashes) == 0 {
|
||||
return fmt.Errorf("no hashes specified for target \"%s\"", target.Name)
|
||||
}
|
||||
logrus.Debugf("Adding target \"%s\" with sha256 \"%x\" and size %d bytes.\n", target.Name, target.Hashes["sha256"], target.Length)
|
||||
|
||||
meta := data.FileMeta{Length: target.Length, Hashes: target.Hashes, Custom: target.Custom}
|
||||
metaJSON, err := json.Marshal(meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
template := changelist.NewTUFChange(
|
||||
changelist.ActionCreate, "", changelist.TypeTargetsTarget,
|
||||
target.Name, metaJSON)
|
||||
return addChange(r.changelist, template, roles...)
|
||||
}
|
||||
|
||||
// RemoveTarget creates new changelist entries to remove a target from the given
|
||||
// roles in the repository when the changelist gets applied at publish time.
|
||||
// If roles are unspecified, the default role is "target".
|
||||
func (r *repository) RemoveTarget(targetName string, roles ...data.RoleName) error {
|
||||
logrus.Debugf("Removing target \"%s\"", targetName)
|
||||
template := changelist.NewTUFChange(changelist.ActionDelete, "",
|
||||
changelist.TypeTargetsTarget, targetName, nil)
|
||||
return addChange(r.changelist, template, roles...)
|
||||
}
|
||||
|
||||
// GetChangelist returns the list of the repository's unpublished changes
|
||||
func (r *repository) GetChangelist() (changelist.Changelist, error) {
|
||||
return r.changelist, nil
|
||||
}
|
||||
|
||||
// getRemoteStore returns the remoteStore of a repository if valid or
|
||||
// or an OfflineStore otherwise
|
||||
func (r *repository) getRemoteStore() store.RemoteStore {
|
||||
if r.remoteStore != nil {
|
||||
return r.remoteStore
|
||||
}
|
||||
|
||||
r.remoteStore = &store.OfflineStore{}
|
||||
|
||||
return r.remoteStore
|
||||
}
|
||||
|
||||
// Publish pushes the local changes in signed material to the remote notary-server
|
||||
// Conceptually it performs an operation similar to a `git rebase`
|
||||
func (r *repository) Publish() error {
|
||||
if err := r.publish(r.changelist); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := r.changelist.Clear(""); err != nil {
|
||||
// This is not a critical problem when only a single host is pushing
|
||||
// but will cause weird behaviour if changelist cleanup is failing
|
||||
// and there are multiple hosts writing to the repo.
|
||||
logrus.Warn("Unable to clear changelist. You may want to manually delete the folder ", r.changelist.Location())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// publish pushes the changes in the given changelist to the remote notary-server
|
||||
// Conceptually it performs an operation similar to a `git rebase`
|
||||
func (r *repository) publish(cl changelist.Changelist) error {
|
||||
var initialPublish bool
|
||||
// update first before publishing
|
||||
if err := r.updateTUF(true); err != nil {
|
||||
// If the remote is not aware of the repo, then this is being published
|
||||
// for the first time. Try to initialize the repository before publishing.
|
||||
if _, ok := err.(ErrRepositoryNotExist); ok {
|
||||
err := r.bootstrapRepo()
|
||||
if _, ok := err.(store.ErrMetaNotFound); ok {
|
||||
logrus.Infof("No TUF data found locally or remotely - initializing repository %s for the first time", r.gun.String())
|
||||
err = r.Initialize(nil)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logrus.WithError(err).Debugf("Unable to load or initialize repository during first publish: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure we will push the initial root and targets file. Either or
|
||||
// both of the root and targets may not be marked as Dirty, since
|
||||
// there may not be any changes that update them, so use a
|
||||
// different boolean.
|
||||
initialPublish = true
|
||||
} else {
|
||||
// We could not update, so we cannot publish.
|
||||
logrus.Error("Could not publish Repository since we could not update: ", err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
// apply the changelist to the repo
|
||||
if err := applyChangelist(r.tufRepo, r.invalid, cl); err != nil {
|
||||
logrus.Debug("Error applying changelist")
|
||||
return err
|
||||
}
|
||||
|
||||
// these are the TUF files we will need to update, serialized as JSON before
|
||||
// we send anything to remote
|
||||
updatedFiles := make(map[data.RoleName][]byte)
|
||||
|
||||
// Fetch old keys to support old clients
|
||||
legacyKeys, err := r.oldKeysForLegacyClientSupport(r.LegacyVersions, initialPublish)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check if our root file is nearing expiry or dirty. Resign if it is. If
|
||||
// root is not dirty but we are publishing for the first time, then just
|
||||
// publish the existing root we have.
|
||||
if err := signRootIfNecessary(updatedFiles, r.tufRepo, legacyKeys, initialPublish); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := signTargets(updatedFiles, r.tufRepo, initialPublish); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if we initialized the repo while designating the server as the snapshot
|
||||
// signer, then there won't be a snapshots file. However, we might now
|
||||
// have a local key (if there was a rotation), so initialize one.
|
||||
if r.tufRepo.Snapshot == nil {
|
||||
if err := r.tufRepo.InitSnapshot(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if snapshotJSON, err := serializeCanonicalRole(
|
||||
r.tufRepo, data.CanonicalSnapshotRole, nil); err == nil {
|
||||
// Only update the snapshot if we've successfully signed it.
|
||||
updatedFiles[data.CanonicalSnapshotRole] = snapshotJSON
|
||||
} else if signErr, ok := err.(signed.ErrInsufficientSignatures); ok && signErr.FoundKeys == 0 {
|
||||
// If signing fails due to us not having the snapshot key, then
|
||||
// assume the server is going to sign, and do not include any snapshot
|
||||
// data.
|
||||
logrus.Debugf("Client does not have the key to sign snapshot. " +
|
||||
"Assuming that server should sign the snapshot.")
|
||||
} else {
|
||||
logrus.Debugf("Client was unable to sign the snapshot: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
remote := r.getRemoteStore()
|
||||
|
||||
return remote.SetMulti(data.MetadataRoleMapToStringMap(updatedFiles))
|
||||
}
|
||||
|
||||
func signRootIfNecessary(updates map[data.RoleName][]byte, repo *tuf.Repo, extraSigningKeys data.KeyList, initialPublish bool) error {
|
||||
if len(extraSigningKeys) > 0 {
|
||||
repo.Root.Dirty = true
|
||||
}
|
||||
if nearExpiry(repo.Root.Signed.SignedCommon) || repo.Root.Dirty {
|
||||
rootJSON, err := serializeCanonicalRole(repo, data.CanonicalRootRole, extraSigningKeys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updates[data.CanonicalRootRole] = rootJSON
|
||||
} else if initialPublish {
|
||||
rootJSON, err := repo.Root.MarshalJSON()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updates[data.CanonicalRootRole] = rootJSON
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetch back a `legacyVersions` number of roots files, collect the root public keys
|
||||
// This includes old `root` roles as well as legacy versioned root roles, e.g. `1.root`
|
||||
func (r *repository) oldKeysForLegacyClientSupport(legacyVersions int, initialPublish bool) (data.KeyList, error) {
|
||||
if initialPublish {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var oldestVersion int
|
||||
prevVersion := r.tufRepo.Root.Signed.Version
|
||||
|
||||
if legacyVersions == SignWithAllOldVersions {
|
||||
oldestVersion = 1
|
||||
} else {
|
||||
oldestVersion = r.tufRepo.Root.Signed.Version - legacyVersions
|
||||
}
|
||||
|
||||
if oldestVersion < 1 {
|
||||
oldestVersion = 1
|
||||
}
|
||||
|
||||
if prevVersion <= 1 || oldestVersion == prevVersion {
|
||||
return nil, nil
|
||||
}
|
||||
oldKeys := make(map[string]data.PublicKey)
|
||||
|
||||
c, err := bootstrapClient(TUFLoadOptions{
|
||||
GUN: r.gun,
|
||||
TrustPinning: r.trustPinning,
|
||||
CryptoService: r.cryptoService,
|
||||
Cache: r.cache,
|
||||
RemoteStore: r.remoteStore,
|
||||
AlwaysCheckInitialized: true,
|
||||
})
|
||||
// require a server connection to fetch old roots
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for v := prevVersion; v >= oldestVersion; v-- {
|
||||
logrus.Debugf("fetching old keys from version %d", v)
|
||||
// fetch old root version
|
||||
versionedRole := fmt.Sprintf("%d.%s", v, data.CanonicalRootRole.String())
|
||||
|
||||
raw, err := c.remote.GetSized(versionedRole, -1)
|
||||
if err != nil {
|
||||
logrus.Debugf("error downloading %s: %s", versionedRole, err)
|
||||
continue
|
||||
}
|
||||
|
||||
signedOldRoot := &data.Signed{}
|
||||
if err := json.Unmarshal(raw, signedOldRoot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
oldRootVersion, err := data.RootFromSigned(signedOldRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// extract legacy versioned root keys
|
||||
oldRootVersionKeys := getOldRootPublicKeys(oldRootVersion)
|
||||
for _, oldKey := range oldRootVersionKeys {
|
||||
oldKeys[oldKey.ID()] = oldKey
|
||||
}
|
||||
}
|
||||
oldKeyList := make(data.KeyList, 0, len(oldKeys))
|
||||
for _, key := range oldKeys {
|
||||
oldKeyList = append(oldKeyList, key)
|
||||
}
|
||||
return oldKeyList, nil
|
||||
}
|
||||
|
||||
// get all the saved previous roles keys < the current root version
|
||||
func getOldRootPublicKeys(root *data.SignedRoot) data.KeyList {
|
||||
rootRole, err := root.BuildBaseRole(data.CanonicalRootRole)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return rootRole.ListKeys()
|
||||
}
|
||||
|
||||
func signTargets(updates map[data.RoleName][]byte, repo *tuf.Repo, initialPublish bool) error {
|
||||
// iterate through all the targets files - if they are dirty, sign and update
|
||||
for roleName, roleObj := range repo.Targets {
|
||||
if roleObj.Dirty || (roleName == data.CanonicalTargetsRole && initialPublish) {
|
||||
targetsJSON, err := serializeCanonicalRole(repo, roleName, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updates[roleName] = targetsJSON
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// bootstrapRepo loads the repository from the local file system (i.e.
|
||||
// a not yet published repo or a possibly obsolete local copy) into
|
||||
// r.tufRepo. This attempts to load metadata for all roles. Since server
|
||||
// snapshots are supported, if the snapshot metadata fails to load, that's ok.
|
||||
// This assumes that bootstrapRepo is only used by Publish() or RotateKey()
|
||||
func (r *repository) bootstrapRepo() error {
|
||||
b := tuf.NewRepoBuilder(r.gun, r.GetCryptoService(), r.trustPinning)
|
||||
|
||||
logrus.Debugf("Loading trusted collection.")
|
||||
|
||||
for _, role := range data.BaseRoles {
|
||||
jsonBytes, err := r.cache.GetSized(role.String(), store.NoSizeLimit)
|
||||
if err != nil {
|
||||
if _, ok := err.(store.ErrMetaNotFound); ok &&
|
||||
// server snapshots are supported, and server timestamp management
|
||||
// is required, so if either of these fail to load that's ok - especially
|
||||
// if the repo is new
|
||||
role == data.CanonicalSnapshotRole || role == data.CanonicalTimestampRole {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err := b.Load(role, jsonBytes, 1, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
tufRepo, _, err := b.Finish()
|
||||
if err == nil {
|
||||
r.tufRepo = tufRepo
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveMetadata saves contents of r.tufRepo onto the local disk, creating
|
||||
// signatures as necessary, possibly prompting for passphrases.
|
||||
func (r *repository) saveMetadata(ignoreSnapshot bool) error {
|
||||
logrus.Debugf("Saving changes to Trusted Collection.")
|
||||
|
||||
rootJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalRootRole, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = r.cache.Set(data.CanonicalRootRole.String(), rootJSON)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
targetsToSave := make(map[data.RoleName][]byte)
|
||||
for t := range r.tufRepo.Targets {
|
||||
signedTargets, err := r.tufRepo.SignTargets(t, data.DefaultExpires(data.CanonicalTargetsRole))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
targetsJSON, err := json.Marshal(signedTargets)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
targetsToSave[t] = targetsJSON
|
||||
}
|
||||
|
||||
for role, blob := range targetsToSave {
|
||||
// If the parent directory does not exist, the cache.Set will create it
|
||||
r.cache.Set(role.String(), blob)
|
||||
}
|
||||
|
||||
if ignoreSnapshot {
|
||||
return nil
|
||||
}
|
||||
|
||||
snapshotJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalSnapshotRole, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return r.cache.Set(data.CanonicalSnapshotRole.String(), snapshotJSON)
|
||||
}
|
||||
|
||||
// RotateKey removes all existing keys associated with the role. If no keys are
|
||||
// specified in keyList, then this creates and adds one new key or delegates
|
||||
// managing the key to the server. If key(s) are specified by keyList, then they are
|
||||
// used for signing the role.
|
||||
// These changes are staged in a changelist until publish is called.
|
||||
func (r *repository) RotateKey(role data.RoleName, serverManagesKey bool, keyList []string) error {
|
||||
if err := checkRotationInput(role, serverManagesKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pubKeyList, err := r.pubKeyListForRotation(role, serverManagesKey, keyList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cl := changelist.NewMemChangelist()
|
||||
if err := r.rootFileKeyChange(cl, role, changelist.ActionCreate, pubKeyList); err != nil {
|
||||
return err
|
||||
}
|
||||
return r.publish(cl)
|
||||
}
|
||||
|
||||
// Given a set of new keys to rotate to and a set of keys to drop, returns the list of current keys to use
|
||||
func (r *repository) pubKeyListForRotation(role data.RoleName, serverManaged bool, newKeys []string) (pubKeyList data.KeyList, err error) {
|
||||
var pubKey data.PublicKey
|
||||
|
||||
// If server manages the key being rotated, request a rotation and return the new key
|
||||
if serverManaged {
|
||||
remote := r.getRemoteStore()
|
||||
pubKey, err = rotateRemoteKey(role, remote)
|
||||
pubKeyList = make(data.KeyList, 0, 1)
|
||||
pubKeyList = append(pubKeyList, pubKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to rotate remote key: %s", err)
|
||||
}
|
||||
return pubKeyList, nil
|
||||
}
|
||||
|
||||
// If no new keys are passed in, we generate one
|
||||
if len(newKeys) == 0 {
|
||||
pubKeyList = make(data.KeyList, 0, 1)
|
||||
pubKey, err = r.GetCryptoService().Create(role, r.gun, data.ECDSAKey)
|
||||
pubKeyList = append(pubKeyList, pubKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to generate key: %s", err)
|
||||
}
|
||||
|
||||
// If a list of keys to rotate to are provided, we add those
|
||||
if len(newKeys) > 0 {
|
||||
pubKeyList = make(data.KeyList, 0, len(newKeys))
|
||||
for _, keyID := range newKeys {
|
||||
pubKey = r.GetCryptoService().GetKey(keyID)
|
||||
if pubKey == nil {
|
||||
return nil, fmt.Errorf("unable to find key: %s", keyID)
|
||||
}
|
||||
pubKeyList = append(pubKeyList, pubKey)
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to certs (for root keys)
|
||||
if pubKeyList, err = r.pubKeysToCerts(role, pubKeyList); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pubKeyList, nil
|
||||
}
|
||||
|
||||
func (r *repository) pubKeysToCerts(role data.RoleName, pubKeyList data.KeyList) (data.KeyList, error) {
|
||||
// only generate certs for root keys
|
||||
if role != data.CanonicalRootRole {
|
||||
return pubKeyList, nil
|
||||
}
|
||||
|
||||
for i, pubKey := range pubKeyList {
|
||||
privKey, loadedRole, err := r.GetCryptoService().GetPrivateKey(pubKey.ID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if loadedRole != role {
|
||||
return nil, fmt.Errorf("attempted to load root key but given %s key instead", loadedRole)
|
||||
}
|
||||
pubKey, err = rootCertKey(r.gun, privKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pubKeyList[i] = pubKey
|
||||
}
|
||||
return pubKeyList, nil
|
||||
}
|
||||
|
||||
func checkRotationInput(role data.RoleName, serverManaged bool) error {
|
||||
// We currently support remotely managing timestamp and snapshot keys
|
||||
canBeRemoteKey := role == data.CanonicalTimestampRole || role == data.CanonicalSnapshotRole
|
||||
// And locally managing root, targets, and snapshot keys
|
||||
canBeLocalKey := role == data.CanonicalSnapshotRole || role == data.CanonicalTargetsRole ||
|
||||
role == data.CanonicalRootRole
|
||||
|
||||
switch {
|
||||
case !data.ValidRole(role) || data.IsDelegation(role):
|
||||
return fmt.Errorf("notary does not currently permit rotating the %s key", role)
|
||||
case serverManaged && !canBeRemoteKey:
|
||||
return ErrInvalidRemoteRole{Role: role}
|
||||
case !serverManaged && !canBeLocalKey:
|
||||
return ErrInvalidLocalRole{Role: role}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *repository) rootFileKeyChange(cl changelist.Changelist, role data.RoleName, action string, keyList []data.PublicKey) error {
|
||||
meta := changelist.TUFRootData{
|
||||
RoleName: role,
|
||||
Keys: keyList,
|
||||
}
|
||||
metaJSON, err := json.Marshal(meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c := changelist.NewTUFChange(
|
||||
action,
|
||||
changelist.ScopeRoot,
|
||||
changelist.TypeBaseRole,
|
||||
role.String(),
|
||||
metaJSON,
|
||||
)
|
||||
return cl.Add(c)
|
||||
}
|
||||
|
||||
// DeleteTrustData removes the trust data stored for this repo in the TUF cache on the client side
|
||||
// Note that we will not delete any private key material from local storage
|
||||
func DeleteTrustData(baseDir string, gun data.GUN, URL string, rt http.RoundTripper, deleteRemote bool) error {
|
||||
localRepo := filepath.Join(baseDir, tufDir, filepath.FromSlash(gun.String()))
|
||||
// Remove the tufRepoPath directory, which includes local TUF metadata files and changelist information
|
||||
if err := os.RemoveAll(localRepo); err != nil {
|
||||
return fmt.Errorf("error clearing TUF repo data: %v", err)
|
||||
}
|
||||
// Note that this will require admin permission for the gun in the roundtripper
|
||||
if deleteRemote {
|
||||
remote, err := getRemoteStore(URL, gun, rt)
|
||||
if err != nil {
|
||||
logrus.Errorf("unable to instantiate a remote store: %v", err)
|
||||
return err
|
||||
}
|
||||
if err := remote.RemoveAll(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetLegacyVersions allows the number of legacy versions of the root
|
||||
// to be inspected for old signing keys to be configured.
|
||||
func (r *repository) SetLegacyVersions(n int) {
|
||||
r.LegacyVersions = n
|
||||
}
|
226
vendor/github.com/theupdateframework/notary/client/delegations.go
generated
vendored
Normal file
226
vendor/github.com/theupdateframework/notary/client/delegations.go
generated
vendored
Normal file
@ -0,0 +1,226 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary"
|
||||
"github.com/theupdateframework/notary/client/changelist"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// AddDelegation creates changelist entries to add provided delegation public keys and paths.
|
||||
// This method composes AddDelegationRoleAndKeys and AddDelegationPaths (each creates one changelist if called).
|
||||
func (r *repository) AddDelegation(name data.RoleName, delegationKeys []data.PublicKey, paths []string) error {
|
||||
if len(delegationKeys) > 0 {
|
||||
err := r.AddDelegationRoleAndKeys(name, delegationKeys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(paths) > 0 {
|
||||
err := r.AddDelegationPaths(name, paths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddDelegationRoleAndKeys creates a changelist entry to add provided delegation public keys.
|
||||
// This method is the simplest way to create a new delegation, because the delegation must have at least
|
||||
// one key upon creation to be valid since we will reject the changelist while validating the threshold.
|
||||
func (r *repository) AddDelegationRoleAndKeys(name data.RoleName, delegationKeys []data.PublicKey) error {
|
||||
|
||||
if !data.IsDelegation(name) {
|
||||
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
|
||||
}
|
||||
|
||||
logrus.Debugf(`Adding delegation "%s" with threshold %d, and %d keys\n`,
|
||||
name, notary.MinThreshold, len(delegationKeys))
|
||||
|
||||
// Defaulting to threshold of 1, since we don't allow for larger thresholds at the moment.
|
||||
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
||||
NewThreshold: notary.MinThreshold,
|
||||
AddKeys: data.KeyList(delegationKeys),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
template := newCreateDelegationChange(name, tdJSON)
|
||||
return addChange(r.changelist, template, name)
|
||||
}
|
||||
|
||||
// AddDelegationPaths creates a changelist entry to add provided paths to an existing delegation.
|
||||
// This method cannot create a new delegation itself because the role must meet the key threshold upon creation.
|
||||
func (r *repository) AddDelegationPaths(name data.RoleName, paths []string) error {
|
||||
|
||||
if !data.IsDelegation(name) {
|
||||
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
|
||||
}
|
||||
|
||||
logrus.Debugf(`Adding %s paths to delegation %s\n`, paths, name)
|
||||
|
||||
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
||||
AddPaths: paths,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
template := newCreateDelegationChange(name, tdJSON)
|
||||
return addChange(r.changelist, template, name)
|
||||
}
|
||||
|
||||
// RemoveDelegationKeysAndPaths creates changelist entries to remove provided delegation key IDs and paths.
|
||||
// This method composes RemoveDelegationPaths and RemoveDelegationKeys (each creates one changelist entry if called).
|
||||
func (r *repository) RemoveDelegationKeysAndPaths(name data.RoleName, keyIDs, paths []string) error {
|
||||
if len(paths) > 0 {
|
||||
err := r.RemoveDelegationPaths(name, paths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(keyIDs) > 0 {
|
||||
err := r.RemoveDelegationKeys(name, keyIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveDelegationRole creates a changelist to remove all paths and keys from a role, and delete the role in its entirety.
|
||||
func (r *repository) RemoveDelegationRole(name data.RoleName) error {
|
||||
|
||||
if !data.IsDelegation(name) {
|
||||
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
|
||||
}
|
||||
|
||||
logrus.Debugf(`Removing delegation "%s"\n`, name)
|
||||
|
||||
template := newDeleteDelegationChange(name, nil)
|
||||
return addChange(r.changelist, template, name)
|
||||
}
|
||||
|
||||
// RemoveDelegationPaths creates a changelist entry to remove provided paths from an existing delegation.
|
||||
func (r *repository) RemoveDelegationPaths(name data.RoleName, paths []string) error {
|
||||
|
||||
if !data.IsDelegation(name) {
|
||||
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
|
||||
}
|
||||
|
||||
logrus.Debugf(`Removing %s paths from delegation "%s"\n`, paths, name)
|
||||
|
||||
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
||||
RemovePaths: paths,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
template := newUpdateDelegationChange(name, tdJSON)
|
||||
return addChange(r.changelist, template, name)
|
||||
}
|
||||
|
||||
// RemoveDelegationKeys creates a changelist entry to remove provided keys from an existing delegation.
|
||||
// When this changelist is applied, if the specified keys are the only keys left in the role,
|
||||
// the role itself will be deleted in its entirety.
|
||||
// It can also delete a key from all delegations under a parent using a name
|
||||
// with a wildcard at the end.
|
||||
func (r *repository) RemoveDelegationKeys(name data.RoleName, keyIDs []string) error {
|
||||
|
||||
if !data.IsDelegation(name) && !data.IsWildDelegation(name) {
|
||||
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
|
||||
}
|
||||
|
||||
logrus.Debugf(`Removing %s keys from delegation "%s"\n`, keyIDs, name)
|
||||
|
||||
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
||||
RemoveKeys: keyIDs,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
template := newUpdateDelegationChange(name, tdJSON)
|
||||
return addChange(r.changelist, template, name)
|
||||
}
|
||||
|
||||
// ClearDelegationPaths creates a changelist entry to remove all paths from an existing delegation.
|
||||
func (r *repository) ClearDelegationPaths(name data.RoleName) error {
|
||||
|
||||
if !data.IsDelegation(name) {
|
||||
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
|
||||
}
|
||||
|
||||
logrus.Debugf(`Removing all paths from delegation "%s"\n`, name)
|
||||
|
||||
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
|
||||
ClearAllPaths: true,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
template := newUpdateDelegationChange(name, tdJSON)
|
||||
return addChange(r.changelist, template, name)
|
||||
}
|
||||
|
||||
func newUpdateDelegationChange(name data.RoleName, content []byte) *changelist.TUFChange {
|
||||
return changelist.NewTUFChange(
|
||||
changelist.ActionUpdate,
|
||||
name,
|
||||
changelist.TypeTargetsDelegation,
|
||||
"", // no path for delegations
|
||||
content,
|
||||
)
|
||||
}
|
||||
|
||||
func newCreateDelegationChange(name data.RoleName, content []byte) *changelist.TUFChange {
|
||||
return changelist.NewTUFChange(
|
||||
changelist.ActionCreate,
|
||||
name,
|
||||
changelist.TypeTargetsDelegation,
|
||||
"", // no path for delegations
|
||||
content,
|
||||
)
|
||||
}
|
||||
|
||||
func newDeleteDelegationChange(name data.RoleName, content []byte) *changelist.TUFChange {
|
||||
return changelist.NewTUFChange(
|
||||
changelist.ActionDelete,
|
||||
name,
|
||||
changelist.TypeTargetsDelegation,
|
||||
"", // no path for delegations
|
||||
content,
|
||||
)
|
||||
}
|
||||
|
||||
func translateDelegationsToCanonicalIDs(delegationInfo data.Delegations) ([]data.Role, error) {
|
||||
canonicalDelegations := make([]data.Role, len(delegationInfo.Roles))
|
||||
// Do a copy by value to ensure local delegation metadata is untouched
|
||||
for idx, origRole := range delegationInfo.Roles {
|
||||
canonicalDelegations[idx] = *origRole
|
||||
}
|
||||
delegationKeys := delegationInfo.Keys
|
||||
for i, delegation := range canonicalDelegations {
|
||||
canonicalKeyIDs := []string{}
|
||||
for _, keyID := range delegation.KeyIDs {
|
||||
pubKey, ok := delegationKeys[keyID]
|
||||
if !ok {
|
||||
return []data.Role{}, fmt.Errorf("Could not translate canonical key IDs for %s", delegation.Name)
|
||||
}
|
||||
canonicalKeyID, err := utils.CanonicalKeyID(pubKey)
|
||||
if err != nil {
|
||||
return []data.Role{}, fmt.Errorf("Could not translate canonical key IDs for %s: %v", delegation.Name, err)
|
||||
}
|
||||
canonicalKeyIDs = append(canonicalKeyIDs, canonicalKeyID)
|
||||
}
|
||||
canonicalDelegations[i].KeyIDs = canonicalKeyIDs
|
||||
}
|
||||
return canonicalDelegations, nil
|
||||
}
|
48
vendor/github.com/theupdateframework/notary/client/errors.go
generated
vendored
Normal file
48
vendor/github.com/theupdateframework/notary/client/errors.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
)
|
||||
|
||||
// ErrRepoNotInitialized is returned when trying to publish an uninitialized
|
||||
// notary repository
|
||||
type ErrRepoNotInitialized struct{}
|
||||
|
||||
func (err ErrRepoNotInitialized) Error() string {
|
||||
return "repository has not been initialized"
|
||||
}
|
||||
|
||||
// ErrInvalidRemoteRole is returned when the server is requested to manage
|
||||
// a key type that is not permitted
|
||||
type ErrInvalidRemoteRole struct {
|
||||
Role data.RoleName
|
||||
}
|
||||
|
||||
func (err ErrInvalidRemoteRole) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"notary does not permit the server managing the %s key", err.Role.String())
|
||||
}
|
||||
|
||||
// ErrInvalidLocalRole is returned when the client wants to manage
|
||||
// a key type that is not permitted
|
||||
type ErrInvalidLocalRole struct {
|
||||
Role data.RoleName
|
||||
}
|
||||
|
||||
func (err ErrInvalidLocalRole) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"notary does not permit the client managing the %s key", err.Role)
|
||||
}
|
||||
|
||||
// ErrRepositoryNotExist is returned when an action is taken on a remote
|
||||
// repository that doesn't exist
|
||||
type ErrRepositoryNotExist struct {
|
||||
remote string
|
||||
gun data.GUN
|
||||
}
|
||||
|
||||
func (err ErrRepositoryNotExist) Error() string {
|
||||
return fmt.Sprintf("%s does not have trust data for %s", err.remote, err.gun.String())
|
||||
}
|
306
vendor/github.com/theupdateframework/notary/client/helpers.go
generated
vendored
Normal file
306
vendor/github.com/theupdateframework/notary/client/helpers.go
generated
vendored
Normal file
@ -0,0 +1,306 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary/client/changelist"
|
||||
store "github.com/theupdateframework/notary/storage"
|
||||
"github.com/theupdateframework/notary/tuf"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/signed"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// Use this to initialize remote HTTPStores from the config settings
|
||||
func getRemoteStore(baseURL string, gun data.GUN, rt http.RoundTripper) (store.RemoteStore, error) {
|
||||
s, err := store.NewHTTPStore(
|
||||
baseURL+"/v2/"+gun.String()+"/_trust/tuf/",
|
||||
"",
|
||||
"json",
|
||||
"key",
|
||||
rt,
|
||||
)
|
||||
if err != nil {
|
||||
return store.OfflineStore{}, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func applyChangelist(repo *tuf.Repo, invalid *tuf.Repo, cl changelist.Changelist) error {
|
||||
it, err := cl.NewIterator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
index := 0
|
||||
for it.HasNext() {
|
||||
c, err := it.Next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isDel := data.IsDelegation(c.Scope()) || data.IsWildDelegation(c.Scope())
|
||||
switch {
|
||||
case c.Scope() == changelist.ScopeTargets || isDel:
|
||||
err = applyTargetsChange(repo, invalid, c)
|
||||
case c.Scope() == changelist.ScopeRoot:
|
||||
err = applyRootChange(repo, c)
|
||||
default:
|
||||
return fmt.Errorf("scope not supported: %s", c.Scope().String())
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("error attempting to apply change #%d: %s, on scope: %s path: %s type: %s", index, c.Action(), c.Scope(), c.Path(), c.Type())
|
||||
return err
|
||||
}
|
||||
index++
|
||||
}
|
||||
logrus.Debugf("applied %d change(s)", index)
|
||||
return nil
|
||||
}
|
||||
|
||||
func applyTargetsChange(repo *tuf.Repo, invalid *tuf.Repo, c changelist.Change) error {
|
||||
switch c.Type() {
|
||||
case changelist.TypeTargetsTarget:
|
||||
return changeTargetMeta(repo, c)
|
||||
case changelist.TypeTargetsDelegation:
|
||||
return changeTargetsDelegation(repo, c)
|
||||
case changelist.TypeWitness:
|
||||
return witnessTargets(repo, invalid, c.Scope())
|
||||
default:
|
||||
return fmt.Errorf("only target meta and delegations changes supported")
|
||||
}
|
||||
}
|
||||
|
||||
func changeTargetsDelegation(repo *tuf.Repo, c changelist.Change) error {
|
||||
switch c.Action() {
|
||||
case changelist.ActionCreate:
|
||||
td := changelist.TUFDelegation{}
|
||||
err := json.Unmarshal(c.Content(), &td)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Try to create brand new role or update one
|
||||
// First add the keys, then the paths. We can only add keys and paths in this scenario
|
||||
err = repo.UpdateDelegationKeys(c.Scope(), td.AddKeys, []string{}, td.NewThreshold)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return repo.UpdateDelegationPaths(c.Scope(), td.AddPaths, []string{}, false)
|
||||
case changelist.ActionUpdate:
|
||||
td := changelist.TUFDelegation{}
|
||||
err := json.Unmarshal(c.Content(), &td)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if data.IsWildDelegation(c.Scope()) {
|
||||
return repo.PurgeDelegationKeys(c.Scope(), td.RemoveKeys)
|
||||
}
|
||||
|
||||
delgRole, err := repo.GetDelegationRole(c.Scope())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We need to translate the keys from canonical ID to TUF ID for compatibility
|
||||
canonicalToTUFID := make(map[string]string)
|
||||
for tufID, pubKey := range delgRole.Keys {
|
||||
canonicalID, err := utils.CanonicalKeyID(pubKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
canonicalToTUFID[canonicalID] = tufID
|
||||
}
|
||||
|
||||
removeTUFKeyIDs := []string{}
|
||||
for _, canonID := range td.RemoveKeys {
|
||||
removeTUFKeyIDs = append(removeTUFKeyIDs, canonicalToTUFID[canonID])
|
||||
}
|
||||
|
||||
err = repo.UpdateDelegationKeys(c.Scope(), td.AddKeys, removeTUFKeyIDs, td.NewThreshold)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return repo.UpdateDelegationPaths(c.Scope(), td.AddPaths, td.RemovePaths, td.ClearAllPaths)
|
||||
case changelist.ActionDelete:
|
||||
return repo.DeleteDelegation(c.Scope())
|
||||
default:
|
||||
return fmt.Errorf("unsupported action against delegations: %s", c.Action())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func changeTargetMeta(repo *tuf.Repo, c changelist.Change) error {
|
||||
var err error
|
||||
switch c.Action() {
|
||||
case changelist.ActionCreate:
|
||||
logrus.Debug("changelist add: ", c.Path())
|
||||
meta := &data.FileMeta{}
|
||||
err = json.Unmarshal(c.Content(), meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files := data.Files{c.Path(): *meta}
|
||||
|
||||
// Attempt to add the target to this role
|
||||
if _, err = repo.AddTargets(c.Scope(), files); err != nil {
|
||||
logrus.Errorf("couldn't add target to %s: %s", c.Scope(), err.Error())
|
||||
}
|
||||
|
||||
case changelist.ActionDelete:
|
||||
logrus.Debug("changelist remove: ", c.Path())
|
||||
|
||||
// Attempt to remove the target from this role
|
||||
if err = repo.RemoveTargets(c.Scope(), c.Path()); err != nil {
|
||||
logrus.Errorf("couldn't remove target from %s: %s", c.Scope(), err.Error())
|
||||
}
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("action not yet supported: %s", c.Action())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func applyRootChange(repo *tuf.Repo, c changelist.Change) error {
|
||||
var err error
|
||||
switch c.Type() {
|
||||
case changelist.TypeBaseRole:
|
||||
err = applyRootRoleChange(repo, c)
|
||||
default:
|
||||
err = fmt.Errorf("type of root change not yet supported: %s", c.Type())
|
||||
}
|
||||
return err // might be nil
|
||||
}
|
||||
|
||||
func applyRootRoleChange(repo *tuf.Repo, c changelist.Change) error {
|
||||
switch c.Action() {
|
||||
case changelist.ActionCreate:
|
||||
// replaces all keys for a role
|
||||
d := &changelist.TUFRootData{}
|
||||
err := json.Unmarshal(c.Content(), d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = repo.ReplaceBaseKeys(d.RoleName, d.Keys...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("action not yet supported for root: %s", c.Action())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func nearExpiry(r data.SignedCommon) bool {
|
||||
plus6mo := time.Now().AddDate(0, 6, 0)
|
||||
return r.Expires.Before(plus6mo)
|
||||
}
|
||||
|
||||
func warnRolesNearExpiry(r *tuf.Repo) {
|
||||
//get every role and its respective signed common and call nearExpiry on it
|
||||
//Root check
|
||||
if nearExpiry(r.Root.Signed.SignedCommon) {
|
||||
logrus.Warn("root is nearing expiry, you should re-sign the role metadata")
|
||||
}
|
||||
//Targets and delegations check
|
||||
for role, signedTOrD := range r.Targets {
|
||||
//signedTOrD is of type *data.SignedTargets
|
||||
if nearExpiry(signedTOrD.Signed.SignedCommon) {
|
||||
logrus.Warn(role, " metadata is nearing expiry, you should re-sign the role metadata")
|
||||
}
|
||||
}
|
||||
//Snapshot check
|
||||
if nearExpiry(r.Snapshot.Signed.SignedCommon) {
|
||||
logrus.Warn("snapshot is nearing expiry, you should re-sign the role metadata")
|
||||
}
|
||||
//do not need to worry about Timestamp, notary signer will re-sign with the timestamp key
|
||||
}
|
||||
|
||||
// Fetches a public key from a remote store, given a gun and role
|
||||
func getRemoteKey(role data.RoleName, remote store.RemoteStore) (data.PublicKey, error) {
|
||||
rawPubKey, err := remote.GetKey(role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubKey, err := data.UnmarshalPublicKey(rawPubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pubKey, nil
|
||||
}
|
||||
|
||||
// Rotates a private key in a remote store and returns the public key component
|
||||
func rotateRemoteKey(role data.RoleName, remote store.RemoteStore) (data.PublicKey, error) {
|
||||
rawPubKey, err := remote.RotateKey(role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubKey, err := data.UnmarshalPublicKey(rawPubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pubKey, nil
|
||||
}
|
||||
|
||||
// signs and serializes the metadata for a canonical role in a TUF repo to JSON
|
||||
func serializeCanonicalRole(tufRepo *tuf.Repo, role data.RoleName, extraSigningKeys data.KeyList) (out []byte, err error) {
|
||||
var s *data.Signed
|
||||
switch {
|
||||
case role == data.CanonicalRootRole:
|
||||
s, err = tufRepo.SignRoot(data.DefaultExpires(role), extraSigningKeys)
|
||||
case role == data.CanonicalSnapshotRole:
|
||||
s, err = tufRepo.SignSnapshot(data.DefaultExpires(role))
|
||||
case tufRepo.Targets[role] != nil:
|
||||
s, err = tufRepo.SignTargets(
|
||||
role, data.DefaultExpires(data.CanonicalTargetsRole))
|
||||
default:
|
||||
err = fmt.Errorf("%s not supported role to sign on the client", role)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return json.Marshal(s)
|
||||
}
|
||||
|
||||
func getAllPrivKeys(rootKeyIDs []string, cryptoService signed.CryptoService) ([]data.PrivateKey, error) {
|
||||
if cryptoService == nil {
|
||||
return nil, fmt.Errorf("no crypto service available to get private keys from")
|
||||
}
|
||||
|
||||
privKeys := make([]data.PrivateKey, 0, len(rootKeyIDs))
|
||||
for _, keyID := range rootKeyIDs {
|
||||
privKey, _, err := cryptoService.GetPrivateKey(keyID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privKeys = append(privKeys, privKey)
|
||||
}
|
||||
if len(privKeys) == 0 {
|
||||
var rootKeyID string
|
||||
rootKeyList := cryptoService.ListKeys(data.CanonicalRootRole)
|
||||
if len(rootKeyList) == 0 {
|
||||
rootPublicKey, err := cryptoService.Create(data.CanonicalRootRole, "", data.ECDSAKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rootKeyID = rootPublicKey.ID()
|
||||
} else {
|
||||
rootKeyID = rootKeyList[0]
|
||||
}
|
||||
privKey, _, err := cryptoService.GetPrivateKey(rootKeyID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privKeys = append(privKeys, privKey)
|
||||
}
|
||||
|
||||
return privKeys, nil
|
||||
}
|
150
vendor/github.com/theupdateframework/notary/client/interface.go
generated
vendored
Normal file
150
vendor/github.com/theupdateframework/notary/client/interface.go
generated
vendored
Normal file
@ -0,0 +1,150 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"github.com/theupdateframework/notary/client/changelist"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/signed"
|
||||
)
|
||||
|
||||
// ReadOnly represents the set of options that must be supported over a TUF repo for
|
||||
// reading
|
||||
type ReadOnly interface {
|
||||
// ListTargets lists all targets for the current repository. The list of
|
||||
// roles should be passed in order from highest to lowest priority.
|
||||
//
|
||||
// IMPORTANT: if you pass a set of roles such as [ "targets/a", "targets/x"
|
||||
// "targets/a/b" ], even though "targets/a/b" is part of the "targets/a" subtree
|
||||
// its entries will be strictly shadowed by those in other parts of the "targets/a"
|
||||
// subtree and also the "targets/x" subtree, as we will defer parsing it until
|
||||
// we explicitly reach it in our iteration of the provided list of roles.
|
||||
ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error)
|
||||
|
||||
// GetTargetByName returns a target by the given name. If no roles are passed
|
||||
// it uses the targets role and does a search of the entire delegation
|
||||
// graph, finding the first entry in a breadth first search of the delegations.
|
||||
// If roles are passed, they should be passed in descending priority and
|
||||
// the target entry found in the subtree of the highest priority role
|
||||
// will be returned.
|
||||
// See the IMPORTANT section on ListTargets above. Those roles also apply here.
|
||||
GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error)
|
||||
|
||||
// GetAllTargetMetadataByName searches the entire delegation role tree to find
|
||||
// the specified target by name for all roles, and returns a list of
|
||||
// TargetSignedStructs for each time it finds the specified target.
|
||||
// If given an empty string for a target name, it will return back all targets
|
||||
// signed into the repository in every role
|
||||
GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error)
|
||||
|
||||
// ListRoles returns a list of RoleWithSignatures objects for this repo
|
||||
// This represents the latest metadata for each role in this repo
|
||||
ListRoles() ([]RoleWithSignatures, error)
|
||||
|
||||
// GetDelegationRoles returns the keys and roles of the repository's delegations
|
||||
// Also converts key IDs to canonical key IDs to keep consistent with signing prompts
|
||||
GetDelegationRoles() ([]data.Role, error)
|
||||
}
|
||||
|
||||
// Repository represents the set of options that must be supported over a TUF repo
|
||||
// for both reading and writing.
|
||||
type Repository interface {
|
||||
ReadOnly
|
||||
|
||||
// ------------------- Publishing operations -------------------
|
||||
|
||||
// GetGUN returns the GUN associated with the repository
|
||||
GetGUN() data.GUN
|
||||
|
||||
// SetLegacyVersion sets the number of versions back to fetch roots to sign with
|
||||
SetLegacyVersions(int)
|
||||
|
||||
// ----- General management operations -----
|
||||
|
||||
// Initialize creates a new repository by using rootKey as the root Key for the
|
||||
// TUF repository. The remote store/server must be reachable (and is asked to
|
||||
// generate a timestamp key and possibly other serverManagedRoles), but the
|
||||
// created repository result is only stored on local cache, not published to
|
||||
// the remote store. To do that, use r.Publish() eventually.
|
||||
Initialize(rootKeyIDs []string, serverManagedRoles ...data.RoleName) error
|
||||
|
||||
// InitializeWithCertificate initializes the repository with root keys and their
|
||||
// corresponding certificates
|
||||
InitializeWithCertificate(rootKeyIDs []string, rootCerts []data.PublicKey, serverManagedRoles ...data.RoleName) error
|
||||
|
||||
// Publish pushes the local changes in signed material to the remote notary-server
|
||||
// Conceptually it performs an operation similar to a `git rebase`
|
||||
Publish() error
|
||||
|
||||
// ----- Target Operations -----
|
||||
|
||||
// AddTarget creates new changelist entries to add a target to the given roles
|
||||
// in the repository when the changelist gets applied at publish time.
|
||||
// If roles are unspecified, the default role is "targets"
|
||||
AddTarget(target *Target, roles ...data.RoleName) error
|
||||
|
||||
// RemoveTarget creates new changelist entries to remove a target from the given
|
||||
// roles in the repository when the changelist gets applied at publish time.
|
||||
// If roles are unspecified, the default role is "target".
|
||||
RemoveTarget(targetName string, roles ...data.RoleName) error
|
||||
|
||||
// ----- Changelist operations -----
|
||||
|
||||
// GetChangelist returns the list of the repository's unpublished changes
|
||||
GetChangelist() (changelist.Changelist, error)
|
||||
|
||||
// ----- Role operations -----
|
||||
|
||||
// AddDelegation creates changelist entries to add provided delegation public keys and paths.
|
||||
// This method composes AddDelegationRoleAndKeys and AddDelegationPaths (each creates one changelist if called).
|
||||
AddDelegation(name data.RoleName, delegationKeys []data.PublicKey, paths []string) error
|
||||
|
||||
// AddDelegationRoleAndKeys creates a changelist entry to add provided delegation public keys.
|
||||
// This method is the simplest way to create a new delegation, because the delegation must have at least
|
||||
// one key upon creation to be valid since we will reject the changelist while validating the threshold.
|
||||
AddDelegationRoleAndKeys(name data.RoleName, delegationKeys []data.PublicKey) error
|
||||
|
||||
// AddDelegationPaths creates a changelist entry to add provided paths to an existing delegation.
|
||||
// This method cannot create a new delegation itself because the role must meet the key threshold upon
|
||||
// creation.
|
||||
AddDelegationPaths(name data.RoleName, paths []string) error
|
||||
|
||||
// RemoveDelegationKeysAndPaths creates changelist entries to remove provided delegation key IDs and
|
||||
// paths. This method composes RemoveDelegationPaths and RemoveDelegationKeys (each creates one
|
||||
// changelist entry if called).
|
||||
RemoveDelegationKeysAndPaths(name data.RoleName, keyIDs, paths []string) error
|
||||
|
||||
// RemoveDelegationRole creates a changelist to remove all paths and keys from a role, and delete the
|
||||
// role in its entirety.
|
||||
RemoveDelegationRole(name data.RoleName) error
|
||||
|
||||
// RemoveDelegationPaths creates a changelist entry to remove provided paths from an existing delegation.
|
||||
RemoveDelegationPaths(name data.RoleName, paths []string) error
|
||||
|
||||
// RemoveDelegationKeys creates a changelist entry to remove provided keys from an existing delegation.
|
||||
// When this changelist is applied, if the specified keys are the only keys left in the role,
|
||||
// the role itself will be deleted in its entirety.
|
||||
// It can also delete a key from all delegations under a parent using a name
|
||||
// with a wildcard at the end.
|
||||
RemoveDelegationKeys(name data.RoleName, keyIDs []string) error
|
||||
|
||||
// ClearDelegationPaths creates a changelist entry to remove all paths from an existing delegation.
|
||||
ClearDelegationPaths(name data.RoleName) error
|
||||
|
||||
// ----- Witness and other re-signing operations -----
|
||||
|
||||
// Witness creates change objects to witness (i.e. re-sign) the given
|
||||
// roles on the next publish. One change is created per role
|
||||
Witness(roles ...data.RoleName) ([]data.RoleName, error)
|
||||
|
||||
// ----- Key Operations -----
|
||||
|
||||
// RotateKey removes all existing keys associated with the role. If no keys are
|
||||
// specified in keyList, then this creates and adds one new key or delegates
|
||||
// managing the key to the server. If key(s) are specified by keyList, then they are
|
||||
// used for signing the role.
|
||||
// These changes are staged in a changelist until publish is called.
|
||||
RotateKey(role data.RoleName, serverManagesKey bool, keyList []string) error
|
||||
|
||||
// GetCryptoService is the getter for the repository's CryptoService, which is used
|
||||
// to sign all updates.
|
||||
GetCryptoService() signed.CryptoService
|
||||
}
|
257
vendor/github.com/theupdateframework/notary/client/reader.go
generated
vendored
Normal file
257
vendor/github.com/theupdateframework/notary/client/reader.go
generated
vendored
Normal file
@ -0,0 +1,257 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
canonicaljson "github.com/docker/go/canonical/json"
|
||||
store "github.com/theupdateframework/notary/storage"
|
||||
"github.com/theupdateframework/notary/tuf"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// Target represents a simplified version of the data TUF operates on, so external
|
||||
// applications don't have to depend on TUF data types.
|
||||
type Target struct {
|
||||
Name string // the name of the target
|
||||
Hashes data.Hashes // the hash of the target
|
||||
Length int64 // the size in bytes of the target
|
||||
Custom *canonicaljson.RawMessage // the custom data provided to describe the file at TARGETPATH
|
||||
}
|
||||
|
||||
// TargetWithRole represents a Target that exists in a particular role - this is
|
||||
// produced by ListTargets and GetTargetByName
|
||||
type TargetWithRole struct {
|
||||
Target
|
||||
Role data.RoleName
|
||||
}
|
||||
|
||||
// TargetSignedStruct is a struct that contains a Target, the role it was found in, and the list of signatures for that role
|
||||
type TargetSignedStruct struct {
|
||||
Role data.DelegationRole
|
||||
Target Target
|
||||
Signatures []data.Signature
|
||||
}
|
||||
|
||||
// ErrNoSuchTarget is returned when no valid trust data is found.
|
||||
type ErrNoSuchTarget string
|
||||
|
||||
func (f ErrNoSuchTarget) Error() string {
|
||||
return fmt.Sprintf("No valid trust data for %s", string(f))
|
||||
}
|
||||
|
||||
// RoleWithSignatures is a Role with its associated signatures
|
||||
type RoleWithSignatures struct {
|
||||
Signatures []data.Signature
|
||||
data.Role
|
||||
}
|
||||
|
||||
// NewReadOnly is the base method that returns a new notary repository for reading.
|
||||
// It expects an initialized cache. In case of a nil remote store, a default
|
||||
// offline store is used.
|
||||
func NewReadOnly(repo *tuf.Repo) ReadOnly {
|
||||
return &reader{tufRepo: repo}
|
||||
}
|
||||
|
||||
type reader struct {
|
||||
tufRepo *tuf.Repo
|
||||
}
|
||||
|
||||
// ListTargets lists all targets for the current repository. The list of
|
||||
// roles should be passed in order from highest to lowest priority.
|
||||
//
|
||||
// IMPORTANT: if you pass a set of roles such as [ "targets/a", "targets/x"
|
||||
// "targets/a/b" ], even though "targets/a/b" is part of the "targets/a" subtree
|
||||
// its entries will be strictly shadowed by those in other parts of the "targets/a"
|
||||
// subtree and also the "targets/x" subtree, as we will defer parsing it until
|
||||
// we explicitly reach it in our iteration of the provided list of roles.
|
||||
func (r *reader) ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error) {
|
||||
if len(roles) == 0 {
|
||||
roles = []data.RoleName{data.CanonicalTargetsRole}
|
||||
}
|
||||
targets := make(map[string]*TargetWithRole)
|
||||
for _, role := range roles {
|
||||
// Define an array of roles to skip for this walk (see IMPORTANT comment above)
|
||||
skipRoles := utils.RoleNameSliceRemove(roles, role)
|
||||
|
||||
// Define a visitor function to populate the targets map in priority order
|
||||
listVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
|
||||
// We found targets so we should try to add them to our targets map
|
||||
for targetName, targetMeta := range tgt.Signed.Targets {
|
||||
// Follow the priority by not overriding previously set targets
|
||||
// and check that this path is valid with this role
|
||||
if _, ok := targets[targetName]; ok || !validRole.CheckPaths(targetName) {
|
||||
continue
|
||||
}
|
||||
targets[targetName] = &TargetWithRole{
|
||||
Target: Target{
|
||||
Name: targetName,
|
||||
Hashes: targetMeta.Hashes,
|
||||
Length: targetMeta.Length,
|
||||
Custom: targetMeta.Custom,
|
||||
},
|
||||
Role: validRole.Name,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
r.tufRepo.WalkTargets("", role, listVisitorFunc, skipRoles...)
|
||||
}
|
||||
|
||||
var targetList []*TargetWithRole
|
||||
for _, v := range targets {
|
||||
targetList = append(targetList, v)
|
||||
}
|
||||
|
||||
return targetList, nil
|
||||
}
|
||||
|
||||
// GetTargetByName returns a target by the given name. If no roles are passed
|
||||
// it uses the targets role and does a search of the entire delegation
|
||||
// graph, finding the first entry in a breadth first search of the delegations.
|
||||
// If roles are passed, they should be passed in descending priority and
|
||||
// the target entry found in the subtree of the highest priority role
|
||||
// will be returned.
|
||||
// See the IMPORTANT section on ListTargets above. Those roles also apply here.
|
||||
func (r *reader) GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error) {
|
||||
if len(roles) == 0 {
|
||||
roles = append(roles, data.CanonicalTargetsRole)
|
||||
}
|
||||
var resultMeta data.FileMeta
|
||||
var resultRoleName data.RoleName
|
||||
var foundTarget bool
|
||||
for _, role := range roles {
|
||||
// Define an array of roles to skip for this walk (see IMPORTANT comment above)
|
||||
skipRoles := utils.RoleNameSliceRemove(roles, role)
|
||||
|
||||
// Define a visitor function to find the specified target
|
||||
getTargetVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
|
||||
if tgt == nil {
|
||||
return nil
|
||||
}
|
||||
// We found the target and validated path compatibility in our walk,
|
||||
// so we should stop our walk and set the resultMeta and resultRoleName variables
|
||||
if resultMeta, foundTarget = tgt.Signed.Targets[name]; foundTarget {
|
||||
resultRoleName = validRole.Name
|
||||
return tuf.StopWalk{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Check that we didn't error, and that we assigned to our target
|
||||
if err := r.tufRepo.WalkTargets(name, role, getTargetVisitorFunc, skipRoles...); err == nil && foundTarget {
|
||||
return &TargetWithRole{Target: Target{Name: name, Hashes: resultMeta.Hashes, Length: resultMeta.Length, Custom: resultMeta.Custom}, Role: resultRoleName}, nil
|
||||
}
|
||||
}
|
||||
return nil, ErrNoSuchTarget(name)
|
||||
|
||||
}
|
||||
|
||||
// GetAllTargetMetadataByName searches the entire delegation role tree to find the specified target by name for all
|
||||
// roles, and returns a list of TargetSignedStructs for each time it finds the specified target.
|
||||
// If given an empty string for a target name, it will return back all targets signed into the repository in every role
|
||||
func (r *reader) GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error) {
|
||||
var targetInfoList []TargetSignedStruct
|
||||
|
||||
// Define a visitor function to find the specified target
|
||||
getAllTargetInfoByNameVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
|
||||
if tgt == nil {
|
||||
return nil
|
||||
}
|
||||
// We found a target and validated path compatibility in our walk,
|
||||
// so add it to our list if we have a match
|
||||
// if we have an empty name, add all targets, else check if we have it
|
||||
var targetMetaToAdd data.Files
|
||||
if name == "" {
|
||||
targetMetaToAdd = tgt.Signed.Targets
|
||||
} else {
|
||||
if meta, ok := tgt.Signed.Targets[name]; ok {
|
||||
targetMetaToAdd = data.Files{name: meta}
|
||||
}
|
||||
}
|
||||
|
||||
for targetName, resultMeta := range targetMetaToAdd {
|
||||
targetInfo := TargetSignedStruct{
|
||||
Role: validRole,
|
||||
Target: Target{Name: targetName, Hashes: resultMeta.Hashes, Length: resultMeta.Length, Custom: resultMeta.Custom},
|
||||
Signatures: tgt.Signatures,
|
||||
}
|
||||
targetInfoList = append(targetInfoList, targetInfo)
|
||||
}
|
||||
// continue walking to all child roles
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check that we didn't error, and that we found the target at least once
|
||||
if err := r.tufRepo.WalkTargets(name, "", getAllTargetInfoByNameVisitorFunc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(targetInfoList) == 0 {
|
||||
return nil, ErrNoSuchTarget(name)
|
||||
}
|
||||
return targetInfoList, nil
|
||||
}
|
||||
|
||||
// ListRoles returns a list of RoleWithSignatures objects for this repo
|
||||
// This represents the latest metadata for each role in this repo
|
||||
func (r *reader) ListRoles() ([]RoleWithSignatures, error) {
|
||||
// Get all role info from our updated keysDB, can be empty
|
||||
roles := r.tufRepo.GetAllLoadedRoles()
|
||||
|
||||
var roleWithSigs []RoleWithSignatures
|
||||
|
||||
// Populate RoleWithSignatures with Role from keysDB and signatures from TUF metadata
|
||||
for _, role := range roles {
|
||||
roleWithSig := RoleWithSignatures{Role: *role, Signatures: nil}
|
||||
switch role.Name {
|
||||
case data.CanonicalRootRole:
|
||||
roleWithSig.Signatures = r.tufRepo.Root.Signatures
|
||||
case data.CanonicalTargetsRole:
|
||||
roleWithSig.Signatures = r.tufRepo.Targets[data.CanonicalTargetsRole].Signatures
|
||||
case data.CanonicalSnapshotRole:
|
||||
roleWithSig.Signatures = r.tufRepo.Snapshot.Signatures
|
||||
case data.CanonicalTimestampRole:
|
||||
roleWithSig.Signatures = r.tufRepo.Timestamp.Signatures
|
||||
default:
|
||||
if !data.IsDelegation(role.Name) {
|
||||
continue
|
||||
}
|
||||
if _, ok := r.tufRepo.Targets[role.Name]; ok {
|
||||
// We'll only find a signature if we've published any targets with this delegation
|
||||
roleWithSig.Signatures = r.tufRepo.Targets[role.Name].Signatures
|
||||
}
|
||||
}
|
||||
roleWithSigs = append(roleWithSigs, roleWithSig)
|
||||
}
|
||||
return roleWithSigs, nil
|
||||
}
|
||||
|
||||
// GetDelegationRoles returns the keys and roles of the repository's delegations
|
||||
// Also converts key IDs to canonical key IDs to keep consistent with signing prompts
|
||||
func (r *reader) GetDelegationRoles() ([]data.Role, error) {
|
||||
// All top level delegations (ex: targets/level1) are stored exclusively in targets.json
|
||||
_, ok := r.tufRepo.Targets[data.CanonicalTargetsRole]
|
||||
if !ok {
|
||||
return nil, store.ErrMetaNotFound{Resource: data.CanonicalTargetsRole.String()}
|
||||
}
|
||||
|
||||
// make a copy for traversing nested delegations
|
||||
allDelegations := []data.Role{}
|
||||
|
||||
// Define a visitor function to populate the delegations list and translate their key IDs to canonical IDs
|
||||
delegationCanonicalListVisitor := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
|
||||
// For the return list, update with a copy that includes canonicalKeyIDs
|
||||
// These aren't validated by the validRole
|
||||
canonicalDelegations, err := translateDelegationsToCanonicalIDs(tgt.Signed.Delegations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
allDelegations = append(allDelegations, canonicalDelegations...)
|
||||
return nil
|
||||
}
|
||||
err := r.tufRepo.WalkTargets("", "", delegationCanonicalListVisitor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return allDelegations, nil
|
||||
}
|
19
vendor/github.com/theupdateframework/notary/client/repo.go
generated
vendored
Normal file
19
vendor/github.com/theupdateframework/notary/client/repo.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
//go:build !pkcs11
|
||||
// +build !pkcs11
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/theupdateframework/notary"
|
||||
"github.com/theupdateframework/notary/trustmanager"
|
||||
)
|
||||
|
||||
func getKeyStores(baseDir string, retriever notary.PassRetriever) ([]trustmanager.KeyStore, error) {
|
||||
fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create private key store in directory: %s", baseDir)
|
||||
}
|
||||
return []trustmanager.KeyStore{fileKeyStore}, nil
|
||||
}
|
26
vendor/github.com/theupdateframework/notary/client/repo_pkcs11.go
generated
vendored
Normal file
26
vendor/github.com/theupdateframework/notary/client/repo_pkcs11.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
//go:build pkcs11
|
||||
// +build pkcs11
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/theupdateframework/notary"
|
||||
"github.com/theupdateframework/notary/trustmanager"
|
||||
"github.com/theupdateframework/notary/trustmanager/yubikey"
|
||||
)
|
||||
|
||||
func getKeyStores(baseDir string, retriever notary.PassRetriever) ([]trustmanager.KeyStore, error) {
|
||||
fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create private key store in directory: %s", baseDir)
|
||||
}
|
||||
|
||||
keyStores := []trustmanager.KeyStore{fileKeyStore}
|
||||
yubiKeyStore, _ := yubikey.NewYubiStore(fileKeyStore, retriever)
|
||||
if yubiKeyStore != nil {
|
||||
keyStores = []trustmanager.KeyStore{yubiKeyStore, fileKeyStore}
|
||||
}
|
||||
return keyStores, nil
|
||||
}
|
463
vendor/github.com/theupdateframework/notary/client/tufclient.go
generated
vendored
Normal file
463
vendor/github.com/theupdateframework/notary/client/tufclient.go
generated
vendored
Normal file
@ -0,0 +1,463 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary"
|
||||
"github.com/theupdateframework/notary/cryptoservice"
|
||||
store "github.com/theupdateframework/notary/storage"
|
||||
"github.com/theupdateframework/notary/trustpinning"
|
||||
"github.com/theupdateframework/notary/tuf"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/signed"
|
||||
)
|
||||
|
||||
// tufClient is a usability wrapper around a raw TUF repo
|
||||
type tufClient struct {
|
||||
remote store.RemoteStore
|
||||
cache store.MetadataStore
|
||||
oldBuilder tuf.RepoBuilder
|
||||
newBuilder tuf.RepoBuilder
|
||||
}
|
||||
|
||||
// Update performs an update to the TUF repo as defined by the TUF spec
|
||||
func (c *tufClient) Update() (*tuf.Repo, *tuf.Repo, error) {
|
||||
// 1. Get timestamp
|
||||
// a. If timestamp error (verification, expired, etc...) download new root and return to 1.
|
||||
// 2. Check if local snapshot is up to date
|
||||
// a. If out of date, get updated snapshot
|
||||
// i. If snapshot error, download new root and return to 1.
|
||||
// 3. Check if root correct against snapshot
|
||||
// a. If incorrect, download new root and return to 1.
|
||||
// 4. Iteratively download and search targets and delegations to find target meta
|
||||
logrus.Debug("updating TUF client")
|
||||
err := c.update()
|
||||
if err != nil {
|
||||
logrus.Debug("Error occurred. Root will be downloaded and another update attempted")
|
||||
logrus.Debug("Resetting the TUF builder...")
|
||||
|
||||
c.newBuilder = c.newBuilder.BootstrapNewBuilder()
|
||||
|
||||
if err := c.updateRoot(); err != nil {
|
||||
logrus.Debug("Client Update (Root): ", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
// If we error again, we now have the latest root and just want to fail
|
||||
// out as there's no expectation the problem can be resolved automatically
|
||||
logrus.Debug("retrying TUF client update")
|
||||
if err := c.update(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return c.newBuilder.Finish()
|
||||
}
|
||||
|
||||
func (c *tufClient) update() error {
|
||||
if err := c.downloadTimestamp(); err != nil {
|
||||
logrus.Debugf("Client Update (Timestamp): %s", err.Error())
|
||||
return err
|
||||
}
|
||||
if err := c.downloadSnapshot(); err != nil {
|
||||
logrus.Debugf("Client Update (Snapshot): %s", err.Error())
|
||||
return err
|
||||
}
|
||||
// will always need top level targets at a minimum
|
||||
if err := c.downloadTargets(); err != nil {
|
||||
logrus.Debugf("Client Update (Targets): %s", err.Error())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateRoot checks if there is a newer version of the root available, and if so
|
||||
// downloads all intermediate root files to allow proper key rotation.
|
||||
func (c *tufClient) updateRoot() error {
|
||||
// Get current root version
|
||||
currentRootConsistentInfo := c.oldBuilder.GetConsistentInfo(data.CanonicalRootRole)
|
||||
currentVersion := c.oldBuilder.GetLoadedVersion(currentRootConsistentInfo.RoleName)
|
||||
|
||||
// Get new root version
|
||||
raw, err := c.downloadRoot()
|
||||
|
||||
switch err.(type) {
|
||||
case *trustpinning.ErrRootRotationFail:
|
||||
// Rotation errors are okay since we haven't yet downloaded
|
||||
// all intermediate root files
|
||||
break
|
||||
case nil:
|
||||
// No error updating root - we were at most 1 version behind
|
||||
return nil
|
||||
default:
|
||||
// Return any non-rotation error.
|
||||
return err
|
||||
}
|
||||
|
||||
// Load current version into newBuilder
|
||||
currentRaw, err := c.cache.GetSized(data.CanonicalRootRole.String(), -1)
|
||||
if err != nil {
|
||||
logrus.Debugf("error loading %d.%s: %s", currentVersion, data.CanonicalRootRole, err)
|
||||
return err
|
||||
}
|
||||
if err := c.newBuilder.LoadRootForUpdate(currentRaw, currentVersion, false); err != nil {
|
||||
logrus.Debugf("%d.%s is invalid: %s", currentVersion, data.CanonicalRootRole, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Extract newest version number
|
||||
signedRoot := &data.Signed{}
|
||||
if err := json.Unmarshal(raw, signedRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
newestRoot, err := data.RootFromSigned(signedRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newestVersion := newestRoot.Signed.SignedCommon.Version
|
||||
|
||||
// Update from current + 1 (current already loaded) to newest - 1 (newest loaded below)
|
||||
if err := c.updateRootVersions(currentVersion+1, newestVersion-1); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Already downloaded newest, verify it against newest - 1
|
||||
if err := c.newBuilder.LoadRootForUpdate(raw, newestVersion, true); err != nil {
|
||||
logrus.Debugf("downloaded %d.%s is invalid: %s", newestVersion, data.CanonicalRootRole, err)
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("successfully verified downloaded %d.%s", newestVersion, data.CanonicalRootRole)
|
||||
|
||||
// Write newest to cache
|
||||
if err := c.cache.Set(data.CanonicalRootRole.String(), raw); err != nil {
|
||||
logrus.Debugf("unable to write %d.%s to cache: %s", newestVersion, data.CanonicalRootRole, err)
|
||||
}
|
||||
logrus.Debugf("finished updating root files")
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateRootVersions updates the root from it's current version to a target, rotating keys
|
||||
// as they are found
|
||||
func (c *tufClient) updateRootVersions(fromVersion, toVersion int) error {
|
||||
for v := fromVersion; v <= toVersion; v++ {
|
||||
logrus.Debugf("updating root from version %d to version %d, currently fetching %d", fromVersion, toVersion, v)
|
||||
|
||||
versionedRole := fmt.Sprintf("%d.%s", v, data.CanonicalRootRole)
|
||||
|
||||
raw, err := c.remote.GetSized(versionedRole, -1)
|
||||
if err != nil {
|
||||
logrus.Debugf("error downloading %s: %s", versionedRole, err)
|
||||
return err
|
||||
}
|
||||
if err := c.newBuilder.LoadRootForUpdate(raw, v, false); err != nil {
|
||||
logrus.Debugf("downloaded %s is invalid: %s", versionedRole, err)
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("successfully verified downloaded %s", versionedRole)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// downloadTimestamp is responsible for downloading the timestamp.json
|
||||
// Timestamps are special in that we ALWAYS attempt to download and only
|
||||
// use cache if the download fails (and the cache is still valid).
|
||||
func (c *tufClient) downloadTimestamp() error {
|
||||
logrus.Debug("Loading timestamp...")
|
||||
role := data.CanonicalTimestampRole
|
||||
consistentInfo := c.newBuilder.GetConsistentInfo(role)
|
||||
|
||||
// always get the remote timestamp, since it supersedes the local one
|
||||
cachedTS, cachedErr := c.cache.GetSized(role.String(), notary.MaxTimestampSize)
|
||||
_, remoteErr := c.tryLoadRemote(consistentInfo, cachedTS)
|
||||
|
||||
// check that there was no remote error, or if there was a network problem
|
||||
// If there was a validation error, we should error out so we can download a new root or fail the update
|
||||
switch remoteErr.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case store.ErrMetaNotFound, store.ErrServerUnavailable, store.ErrOffline, store.NetworkError:
|
||||
break
|
||||
default:
|
||||
return remoteErr
|
||||
}
|
||||
|
||||
// since it was a network error: get the cached timestamp, if it exists
|
||||
if cachedErr != nil {
|
||||
logrus.Debug("no cached or remote timestamp available")
|
||||
return remoteErr
|
||||
}
|
||||
|
||||
logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely")
|
||||
err := c.newBuilder.Load(role, cachedTS, 1, false)
|
||||
if err == nil {
|
||||
logrus.Debug("successfully verified cached timestamp")
|
||||
}
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
// downloadSnapshot is responsible for downloading the snapshot.json
|
||||
func (c *tufClient) downloadSnapshot() error {
|
||||
logrus.Debug("Loading snapshot...")
|
||||
role := data.CanonicalSnapshotRole
|
||||
consistentInfo := c.newBuilder.GetConsistentInfo(role)
|
||||
|
||||
_, err := c.tryLoadCacheThenRemote(consistentInfo)
|
||||
return err
|
||||
}
|
||||
|
||||
// downloadTargets downloads all targets and delegated targets for the repository.
|
||||
// It uses a pre-order tree traversal as it's necessary to download parents first
|
||||
// to obtain the keys to validate children.
|
||||
func (c *tufClient) downloadTargets() error {
|
||||
toDownload := []data.DelegationRole{{
|
||||
BaseRole: data.BaseRole{Name: data.CanonicalTargetsRole},
|
||||
Paths: []string{""},
|
||||
}}
|
||||
|
||||
for len(toDownload) > 0 {
|
||||
role := toDownload[0]
|
||||
toDownload = toDownload[1:]
|
||||
|
||||
consistentInfo := c.newBuilder.GetConsistentInfo(role.Name)
|
||||
if !consistentInfo.ChecksumKnown() {
|
||||
logrus.Debugf("skipping %s because there is no checksum for it", role.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
children, err := c.getTargetsFile(role, consistentInfo)
|
||||
switch err.(type) {
|
||||
case signed.ErrExpired, signed.ErrRoleThreshold:
|
||||
if role.Name == data.CanonicalTargetsRole {
|
||||
return err
|
||||
}
|
||||
logrus.Warnf("Error getting %s: %s", role.Name, err)
|
||||
break
|
||||
case nil:
|
||||
toDownload = append(children, toDownload...)
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c tufClient) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo) ([]data.DelegationRole, error) {
|
||||
logrus.Debugf("Loading %s...", role.Name)
|
||||
tgs := &data.SignedTargets{}
|
||||
|
||||
raw, err := c.tryLoadCacheThenRemote(ci)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// we know it unmarshals because if `tryLoadCacheThenRemote` didn't fail, then
|
||||
// the raw has already been loaded into the builder
|
||||
json.Unmarshal(raw, tgs)
|
||||
return tgs.GetValidDelegations(role), nil
|
||||
}
|
||||
|
||||
// downloadRoot is responsible for downloading the root.json
|
||||
func (c *tufClient) downloadRoot() ([]byte, error) {
|
||||
role := data.CanonicalRootRole
|
||||
consistentInfo := c.newBuilder.GetConsistentInfo(role)
|
||||
|
||||
// We can't read an exact size for the root metadata without risking getting stuck in the TUF update cycle
|
||||
// since it's possible that downloading timestamp/snapshot metadata may fail due to a signature mismatch
|
||||
if !consistentInfo.ChecksumKnown() {
|
||||
logrus.Debugf("Loading root with no expected checksum")
|
||||
|
||||
// get the cached root, if it exists, just for version checking
|
||||
cachedRoot, _ := c.cache.GetSized(role.String(), -1)
|
||||
// prefer to download a new root
|
||||
return c.tryLoadRemote(consistentInfo, cachedRoot)
|
||||
}
|
||||
return c.tryLoadCacheThenRemote(consistentInfo)
|
||||
}
|
||||
|
||||
func (c *tufClient) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]byte, error) {
|
||||
cachedTS, err := c.cache.GetSized(consistentInfo.RoleName.String(), consistentInfo.Length())
|
||||
if err != nil {
|
||||
logrus.Debugf("no %s in cache, must download", consistentInfo.RoleName)
|
||||
return c.tryLoadRemote(consistentInfo, nil)
|
||||
}
|
||||
|
||||
if err = c.newBuilder.Load(consistentInfo.RoleName, cachedTS, 1, false); err == nil {
|
||||
logrus.Debugf("successfully verified cached %s", consistentInfo.RoleName)
|
||||
return cachedTS, nil
|
||||
}
|
||||
|
||||
logrus.Debugf("cached %s is invalid (must download): %s", consistentInfo.RoleName, err)
|
||||
return c.tryLoadRemote(consistentInfo, cachedTS)
|
||||
}
|
||||
|
||||
func (c *tufClient) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([]byte, error) {
|
||||
consistentName := consistentInfo.ConsistentName()
|
||||
raw, err := c.remote.GetSized(consistentName, consistentInfo.Length())
|
||||
if err != nil {
|
||||
logrus.Debugf("error downloading %s: %s", consistentName, err)
|
||||
return old, err
|
||||
}
|
||||
|
||||
// try to load the old data into the old builder - only use it to validate
|
||||
// versions if it loads successfully. If it errors, then the loaded version
|
||||
// will be 1
|
||||
c.oldBuilder.Load(consistentInfo.RoleName, old, 1, true)
|
||||
minVersion := c.oldBuilder.GetLoadedVersion(consistentInfo.RoleName)
|
||||
if err := c.newBuilder.Load(consistentInfo.RoleName, raw, minVersion, false); err != nil {
|
||||
logrus.Debugf("downloaded %s is invalid: %s", consistentName, err)
|
||||
return raw, err
|
||||
}
|
||||
logrus.Debugf("successfully verified downloaded %s", consistentName)
|
||||
if err := c.cache.Set(consistentInfo.RoleName.String(), raw); err != nil {
|
||||
logrus.Debugf("Unable to write %s to cache: %s", consistentInfo.RoleName, err)
|
||||
}
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
// TUFLoadOptions are provided to LoadTUFRepo, which loads a TUF repo from cache,
|
||||
// from a remote store, or both
|
||||
type TUFLoadOptions struct {
|
||||
GUN data.GUN
|
||||
TrustPinning trustpinning.TrustPinConfig
|
||||
CryptoService signed.CryptoService
|
||||
Cache store.MetadataStore
|
||||
RemoteStore store.RemoteStore
|
||||
AlwaysCheckInitialized bool
|
||||
}
|
||||
|
||||
// bootstrapClient attempts to bootstrap a root.json to be used as the trust
|
||||
// anchor for a repository. The checkInitialized argument indicates whether
|
||||
// we should always attempt to contact the server to determine if the repository
|
||||
// is initialized or not. If set to true, we will always attempt to download
|
||||
// and return an error if the remote repository errors.
|
||||
//
|
||||
// Populates a tuf.RepoBuilder with this root metadata. If the root metadata
|
||||
// downloaded is a newer version than what is on disk, then intermediate
|
||||
// versions will be downloaded and verified in order to rotate trusted keys
|
||||
// properly. Newer root metadata must always be signed with the previous
|
||||
// threshold and keys.
|
||||
//
|
||||
// Fails if the remote server is reachable and does not know the repo
|
||||
// (i.e. before any metadata has been published), in which case the error is
|
||||
// store.ErrMetaNotFound, or if the root metadata (from whichever source is used)
|
||||
// is not trusted.
|
||||
//
|
||||
// Returns a TUFClient for the remote server, which may not be actually
|
||||
// operational (if the URL is invalid but a root.json is cached).
|
||||
func bootstrapClient(l TUFLoadOptions) (*tufClient, error) {
|
||||
minVersion := 1
|
||||
// the old root on disk should not be validated against any trust pinning configuration
|
||||
// because if we have an old root, it itself is the thing that pins trust
|
||||
oldBuilder := tuf.NewRepoBuilder(l.GUN, l.CryptoService, trustpinning.TrustPinConfig{})
|
||||
|
||||
// by default, we want to use the trust pinning configuration on any new root that we download
|
||||
newBuilder := tuf.NewRepoBuilder(l.GUN, l.CryptoService, l.TrustPinning)
|
||||
|
||||
// Try to read root from cache first. We will trust this root until we detect a problem
|
||||
// during update which will cause us to download a new root and perform a rotation.
|
||||
// If we have an old root, and it's valid, then we overwrite the newBuilder to be one
|
||||
// preloaded with the old root or one which uses the old root for trust bootstrapping.
|
||||
if rootJSON, err := l.Cache.GetSized(data.CanonicalRootRole.String(), store.NoSizeLimit); err == nil {
|
||||
// if we can't load the cached root, fail hard because that is how we pin trust
|
||||
if err := oldBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// again, the root on disk is the source of trust pinning, so use an empty trust
|
||||
// pinning configuration
|
||||
newBuilder = tuf.NewRepoBuilder(l.GUN, l.CryptoService, trustpinning.TrustPinConfig{})
|
||||
|
||||
if err := newBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, false); err != nil {
|
||||
// Ok, the old root is expired - we want to download a new one. But we want to use the
|
||||
// old root to verify the new root, so bootstrap a new builder with the old builder
|
||||
// but use the trustpinning to validate the new root
|
||||
minVersion = oldBuilder.GetLoadedVersion(data.CanonicalRootRole)
|
||||
newBuilder = oldBuilder.BootstrapNewBuilderWithNewTrustpin(l.TrustPinning)
|
||||
}
|
||||
}
|
||||
|
||||
if !newBuilder.IsLoaded(data.CanonicalRootRole) || l.AlwaysCheckInitialized {
|
||||
// remoteErr was nil and we were not able to load a root from cache or
|
||||
// are specifically checking for initialization of the repo.
|
||||
|
||||
// if remote store successfully set up, try and get root from remote
|
||||
// We don't have any local data to determine the size of root, so try the maximum (though it is restricted at 100MB)
|
||||
tmpJSON, err := l.RemoteStore.GetSized(data.CanonicalRootRole.String(), store.NoSizeLimit)
|
||||
if err != nil {
|
||||
// we didn't have a root in cache and were unable to load one from
|
||||
// the server. Nothing we can do but error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !newBuilder.IsLoaded(data.CanonicalRootRole) {
|
||||
// we always want to use the downloaded root if we couldn't load from cache
|
||||
if err := newBuilder.Load(data.CanonicalRootRole, tmpJSON, minVersion, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = l.Cache.Set(data.CanonicalRootRole.String(), tmpJSON)
|
||||
if err != nil {
|
||||
// if we can't write cache we should still continue, just log error
|
||||
logrus.Errorf("could not save root to cache: %s", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We can only get here if remoteErr != nil (hence we don't download any new root),
|
||||
// and there was no root on disk
|
||||
if !newBuilder.IsLoaded(data.CanonicalRootRole) {
|
||||
return nil, ErrRepoNotInitialized{}
|
||||
}
|
||||
|
||||
return &tufClient{
|
||||
oldBuilder: oldBuilder,
|
||||
newBuilder: newBuilder,
|
||||
remote: l.RemoteStore,
|
||||
cache: l.Cache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LoadTUFRepo bootstraps a trust anchor (root.json) from cache (if provided) before updating
|
||||
// all the metadata for the repo from the remote (if provided). It loads a TUF repo from cache,
|
||||
// from a remote store, or both.
|
||||
func LoadTUFRepo(options TUFLoadOptions) (*tuf.Repo, *tuf.Repo, error) {
|
||||
// set some sane defaults, so nothing has to be provided necessarily
|
||||
if options.RemoteStore == nil {
|
||||
options.RemoteStore = store.OfflineStore{}
|
||||
}
|
||||
if options.Cache == nil {
|
||||
options.Cache = store.NewMemoryStore(nil)
|
||||
}
|
||||
if options.CryptoService == nil {
|
||||
options.CryptoService = cryptoservice.EmptyService
|
||||
}
|
||||
|
||||
c, err := bootstrapClient(options)
|
||||
if err != nil {
|
||||
if _, ok := err.(store.ErrMetaNotFound); ok {
|
||||
return nil, nil, ErrRepositoryNotExist{
|
||||
remote: options.RemoteStore.Location(),
|
||||
gun: options.GUN,
|
||||
}
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
repo, invalid, err := c.Update()
|
||||
if err != nil {
|
||||
// notFound.Resource may include a version or checksum so when the role is root,
|
||||
// it will be root, <version>.root or root.<checksum>.
|
||||
notFound, ok := err.(store.ErrMetaNotFound)
|
||||
isRoot, _ := regexp.MatchString(`\.?`+data.CanonicalRootRole.String()+`\.?`, notFound.Resource)
|
||||
if ok && isRoot {
|
||||
return nil, nil, ErrRepositoryNotExist{
|
||||
remote: options.RemoteStore.Location(),
|
||||
gun: options.GUN,
|
||||
}
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
warnRolesNearExpiry(repo)
|
||||
return repo, invalid, nil
|
||||
}
|
62
vendor/github.com/theupdateframework/notary/client/witness.go
generated
vendored
Normal file
62
vendor/github.com/theupdateframework/notary/client/witness.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"github.com/theupdateframework/notary/client/changelist"
|
||||
"github.com/theupdateframework/notary/tuf"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
)
|
||||
|
||||
// Witness creates change objects to witness (i.e. re-sign) the given
|
||||
// roles on the next publish. One change is created per role
|
||||
func (r *repository) Witness(roles ...data.RoleName) ([]data.RoleName, error) {
|
||||
var err error
|
||||
successful := make([]data.RoleName, 0, len(roles))
|
||||
for _, role := range roles {
|
||||
// scope is role
|
||||
c := changelist.NewTUFChange(
|
||||
changelist.ActionUpdate,
|
||||
role,
|
||||
changelist.TypeWitness,
|
||||
"",
|
||||
nil,
|
||||
)
|
||||
err = r.changelist.Add(c)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
successful = append(successful, role)
|
||||
}
|
||||
return successful, err
|
||||
}
|
||||
|
||||
func witnessTargets(repo *tuf.Repo, invalid *tuf.Repo, role data.RoleName) error {
|
||||
if r, ok := repo.Targets[role]; ok {
|
||||
// role is already valid, mark for re-signing/updating
|
||||
r.Dirty = true
|
||||
return nil
|
||||
}
|
||||
|
||||
if roleObj, err := repo.GetDelegationRole(role); err == nil && invalid != nil {
|
||||
// A role with a threshold > len(keys) is technically invalid, but we let it build in the builder because
|
||||
// we want to be able to download the role (which may still have targets on it), add more keys, and then
|
||||
// witness the role, thus bringing it back to valid. However, if no keys have been added before witnessing,
|
||||
// then it is still an invalid role, and can't be witnessed because nothing can bring it back to valid.
|
||||
if roleObj.Threshold > len(roleObj.Keys) {
|
||||
return data.ErrInvalidRole{
|
||||
Role: role,
|
||||
Reason: "role does not specify enough valid signing keys to meet its required threshold",
|
||||
}
|
||||
}
|
||||
if r, ok := invalid.Targets[role]; ok {
|
||||
// role is recognized but invalid, move to valid data and mark for re-signing
|
||||
repo.Targets[role] = r
|
||||
r.Dirty = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// role isn't recognized, even as invalid
|
||||
return data.ErrInvalidRole{
|
||||
Role: role,
|
||||
Reason: "this role is not known",
|
||||
}
|
||||
}
|
25
vendor/github.com/theupdateframework/notary/codecov.yml
generated
vendored
Normal file
25
vendor/github.com/theupdateframework/notary/codecov.yml
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
codecov:
|
||||
notify:
|
||||
# 2 builds on circleci, 1 jenkins build
|
||||
after_n_builds: 3
|
||||
coverage:
|
||||
range: "50...100"
|
||||
status:
|
||||
# project will give us the diff in the total code coverage between a commit
|
||||
# and its parent
|
||||
project:
|
||||
default:
|
||||
target: auto
|
||||
threshold: "0.05%"
|
||||
# patch would give us the code coverage of the diff only
|
||||
patch: false
|
||||
# changes tells us if there are unexpected code coverage changes in other files
|
||||
# which were not changed by the diff
|
||||
changes: false
|
||||
ignore: # ignore testutils for coverage
|
||||
- "tuf/testutils/*"
|
||||
- "vendor/*"
|
||||
- "proto/*.pb.go"
|
||||
- "trustmanager/remoteks/*.pb.go"
|
||||
comment: off
|
||||
|
95
vendor/github.com/theupdateframework/notary/const.go
generated
vendored
Normal file
95
vendor/github.com/theupdateframework/notary/const.go
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
package notary
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// application wide constants
|
||||
const (
|
||||
// MaxDownloadSize is the maximum size we'll download for metadata if no limit is given
|
||||
MaxDownloadSize int64 = 100 << 20
|
||||
// MaxTimestampSize is the maximum size of timestamp metadata - 1MiB.
|
||||
MaxTimestampSize int64 = 1 << 20
|
||||
// MinRSABitSize is the minimum bit size for RSA keys allowed in notary
|
||||
MinRSABitSize = 2048
|
||||
// MinThreshold requires a minimum of one threshold for roles; currently we do not support a higher threshold
|
||||
MinThreshold = 1
|
||||
// SHA256HexSize is how big a SHA256 hex is in number of characters
|
||||
SHA256HexSize = 64
|
||||
// SHA512HexSize is how big a SHA512 hex is in number of characters
|
||||
SHA512HexSize = 128
|
||||
// SHA256 is the name of SHA256 hash algorithm
|
||||
SHA256 = "sha256"
|
||||
// SHA512 is the name of SHA512 hash algorithm
|
||||
SHA512 = "sha512"
|
||||
// TrustedCertsDir is the directory, under the notary repo base directory, where trusted certs are stored
|
||||
TrustedCertsDir = "trusted_certificates"
|
||||
// PrivDir is the directory, under the notary repo base directory, where private keys are stored
|
||||
PrivDir = "private"
|
||||
// RootKeysSubdir is the subdirectory under PrivDir where root private keys are stored
|
||||
// DEPRECATED: The only reason we need this constant is compatibility with older versions
|
||||
RootKeysSubdir = "root_keys"
|
||||
// NonRootKeysSubdir is the subdirectory under PrivDir where non-root private keys are stored
|
||||
// DEPRECATED: The only reason we need this constant is compatibility with older versions
|
||||
NonRootKeysSubdir = "tuf_keys"
|
||||
// KeyExtension is the file extension to use for private key files
|
||||
KeyExtension = "key"
|
||||
|
||||
// Day is a duration of one day
|
||||
Day = 24 * time.Hour
|
||||
Year = 365 * Day
|
||||
|
||||
// NotaryRootExpiry is the duration representing the expiry time of the Root role
|
||||
NotaryRootExpiry = 10 * Year
|
||||
NotaryTargetsExpiry = 3 * Year
|
||||
NotarySnapshotExpiry = 3 * Year
|
||||
NotaryTimestampExpiry = 14 * Day
|
||||
|
||||
ConsistentMetadataCacheMaxAge = 30 * Day
|
||||
CurrentMetadataCacheMaxAge = 5 * time.Minute
|
||||
// CacheMaxAgeLimit is the generally recommended maximum age for Cache-Control headers
|
||||
// (one year, in seconds, since one year is forever in terms of internet
|
||||
// content)
|
||||
CacheMaxAgeLimit = 1 * Year
|
||||
|
||||
MySQLBackend = "mysql"
|
||||
MemoryBackend = "memory"
|
||||
PostgresBackend = "postgres"
|
||||
SQLiteBackend = "sqlite3"
|
||||
RethinkDBBackend = "rethinkdb"
|
||||
FileBackend = "file"
|
||||
|
||||
DefaultImportRole = "delegation"
|
||||
|
||||
// HealthCheckKeyManagement and HealthCheckSigner are the grpc service name
|
||||
// for "KeyManagement" and "Signer" respectively which used for health check.
|
||||
// The "Overall" indicates the querying for overall status of the server.
|
||||
HealthCheckKeyManagement = "grpc.health.v1.Health.KeyManagement"
|
||||
HealthCheckSigner = "grpc.health.v1.Health.Signer"
|
||||
HealthCheckOverall = "grpc.health.v1.Health.Overall"
|
||||
|
||||
// PrivExecPerms indicates the file permissions for directory
|
||||
// and PrivNoExecPerms for file.
|
||||
PrivExecPerms = 0700
|
||||
PrivNoExecPerms = 0600
|
||||
|
||||
// DefaultPageSize is the default number of records to return from the changefeed
|
||||
DefaultPageSize = 100
|
||||
)
|
||||
|
||||
// enum to use for setting and retrieving values from contexts
|
||||
const (
|
||||
CtxKeyMetaStore CtxKey = iota
|
||||
CtxKeyKeyAlgo
|
||||
CtxKeyCryptoSvc
|
||||
CtxKeyRepo
|
||||
)
|
||||
|
||||
// NotarySupportedBackends contains the backends we would like to support at present
|
||||
var NotarySupportedBackends = []string{
|
||||
MemoryBackend,
|
||||
MySQLBackend,
|
||||
SQLiteBackend,
|
||||
RethinkDBBackend,
|
||||
PostgresBackend,
|
||||
}
|
17
vendor/github.com/theupdateframework/notary/const_nowindows.go
generated
vendored
Normal file
17
vendor/github.com/theupdateframework/notary/const_nowindows.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package notary
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// NotarySupportedSignals contains the signals we would like to capture:
|
||||
// - SIGUSR1, indicates a increment of the log level.
|
||||
// - SIGUSR2, indicates a decrement of the log level.
|
||||
var NotarySupportedSignals = []os.Signal{
|
||||
syscall.SIGUSR1,
|
||||
syscall.SIGUSR2,
|
||||
}
|
9
vendor/github.com/theupdateframework/notary/const_windows.go
generated
vendored
Normal file
9
vendor/github.com/theupdateframework/notary/const_windows.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package notary
|
||||
|
||||
import "os"
|
||||
|
||||
// NotarySupportedSignals does not contain any signals, because SIGUSR1/2 are not supported on windows
|
||||
var NotarySupportedSignals = []os.Signal{}
|
29
vendor/github.com/theupdateframework/notary/cross.Dockerfile
generated
vendored
Normal file
29
vendor/github.com/theupdateframework/notary/cross.Dockerfile
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
FROM dockercore/golang-cross:1.12.15
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl \
|
||||
clang \
|
||||
file \
|
||||
libsqlite3-dev \
|
||||
patch \
|
||||
tar \
|
||||
xz-utils \
|
||||
python \
|
||||
python-pip \
|
||||
--no-install-recommends \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN useradd -ms /bin/bash notary \
|
||||
&& pip install codecov \
|
||||
&& go get golang.org/x/lint/golint github.com/fzipp/gocyclo github.com/client9/misspell/cmd/misspell github.com/gordonklaus/ineffassign github.com/securego/gosec/cmd/gosec/...
|
||||
|
||||
ENV NOTARYDIR /go/src/github.com/theupdateframework/notary
|
||||
ENV GO111MODULE=on
|
||||
ENV GOFLAGS=-mod=vendor
|
||||
|
||||
COPY . ${NOTARYDIR}
|
||||
RUN chmod -R a+rw /go
|
||||
|
||||
WORKDIR ${NOTARYDIR}
|
||||
|
||||
# Note this cannot use alpine because of the MacOSX Cross SDK: the cctools there uses sys/cdefs.h and that cannot be used in alpine: http://wiki.musl-libc.org/wiki/FAQ#Q:_I.27m_trying_to_compile_something_against_musl_and_I_get_error_messages_about_sys.2Fcdefs.h
|
41
vendor/github.com/theupdateframework/notary/cryptoservice/certificate.go
generated
vendored
Normal file
41
vendor/github.com/theupdateframework/notary/cryptoservice/certificate.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
package cryptoservice
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// GenerateCertificate generates an X509 Certificate from a template, given a GUN and validity interval
|
||||
func GenerateCertificate(rootKey data.PrivateKey, gun data.GUN, startTime, endTime time.Time) (*x509.Certificate, error) {
|
||||
signer := rootKey.CryptoSigner()
|
||||
if signer == nil {
|
||||
return nil, fmt.Errorf("key type not supported for Certificate generation: %s", rootKey.Algorithm())
|
||||
}
|
||||
|
||||
return generateCertificate(signer, gun, startTime, endTime)
|
||||
}
|
||||
|
||||
func generateCertificate(signer crypto.Signer, gun data.GUN, startTime, endTime time.Time) (*x509.Certificate, error) {
|
||||
template, err := utils.NewCertificate(gun.String(), startTime, endTime)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create the certificate template for: %s (%v)", gun, err)
|
||||
}
|
||||
|
||||
derBytes, err := x509.CreateCertificate(rand.Reader, template, template, signer.Public(), signer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create the certificate for: %s (%v)", gun, err)
|
||||
}
|
||||
|
||||
cert, err := x509.ParseCertificate(derBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse the certificate for key: %s (%v)", gun, err)
|
||||
}
|
||||
|
||||
return cert, nil
|
||||
}
|
162
vendor/github.com/theupdateframework/notary/cryptoservice/crypto_service.go
generated
vendored
Normal file
162
vendor/github.com/theupdateframework/notary/cryptoservice/crypto_service.go
generated
vendored
Normal file
@ -0,0 +1,162 @@
|
||||
package cryptoservice
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary"
|
||||
"github.com/theupdateframework/notary/trustmanager"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoValidPrivateKey is returned if a key being imported doesn't
|
||||
// look like a private key
|
||||
ErrNoValidPrivateKey = errors.New("no valid private key found")
|
||||
|
||||
// ErrRootKeyNotEncrypted is returned if a root key being imported is
|
||||
// unencrypted
|
||||
ErrRootKeyNotEncrypted = errors.New("only encrypted root keys may be imported")
|
||||
|
||||
// EmptyService is an empty crypto service
|
||||
EmptyService = NewCryptoService()
|
||||
)
|
||||
|
||||
// CryptoService implements Sign and Create, holding a specific GUN and keystore to
|
||||
// operate on
|
||||
type CryptoService struct {
|
||||
keyStores []trustmanager.KeyStore
|
||||
}
|
||||
|
||||
// NewCryptoService returns an instance of CryptoService
|
||||
func NewCryptoService(keyStores ...trustmanager.KeyStore) *CryptoService {
|
||||
return &CryptoService{keyStores: keyStores}
|
||||
}
|
||||
|
||||
// Create is used to generate keys for targets, snapshots and timestamps
|
||||
func (cs *CryptoService) Create(role data.RoleName, gun data.GUN, algorithm string) (data.PublicKey, error) {
|
||||
if algorithm == data.RSAKey {
|
||||
return nil, fmt.Errorf("%s keys can only be imported", data.RSAKey)
|
||||
}
|
||||
|
||||
privKey, err := utils.GenerateKey(algorithm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate %s key: %v", algorithm, err)
|
||||
}
|
||||
logrus.Debugf("generated new %s key for role: %s and keyID: %s", algorithm, role.String(), privKey.ID())
|
||||
pubKey := data.PublicKeyFromPrivate(privKey)
|
||||
|
||||
return pubKey, cs.AddKey(role, gun, privKey)
|
||||
}
|
||||
|
||||
// GetPrivateKey returns a private key and role if present by ID.
|
||||
func (cs *CryptoService) GetPrivateKey(keyID string) (k data.PrivateKey, role data.RoleName, err error) {
|
||||
for _, ks := range cs.keyStores {
|
||||
if k, role, err = ks.GetKey(keyID); err == nil {
|
||||
return
|
||||
}
|
||||
switch err.(type) {
|
||||
case trustmanager.ErrPasswordInvalid, trustmanager.ErrAttemptsExceeded:
|
||||
return
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
return // returns whatever the final values were
|
||||
}
|
||||
|
||||
// GetKey returns a key by ID
|
||||
func (cs *CryptoService) GetKey(keyID string) data.PublicKey {
|
||||
privKey, _, err := cs.GetPrivateKey(keyID)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return data.PublicKeyFromPrivate(privKey)
|
||||
}
|
||||
|
||||
// GetKeyInfo returns role and GUN info of a key by ID
|
||||
func (cs *CryptoService) GetKeyInfo(keyID string) (trustmanager.KeyInfo, error) {
|
||||
for _, store := range cs.keyStores {
|
||||
if info, err := store.GetKeyInfo(keyID); err == nil {
|
||||
return info, nil
|
||||
}
|
||||
}
|
||||
return trustmanager.KeyInfo{}, fmt.Errorf("Could not find info for keyID %s", keyID)
|
||||
}
|
||||
|
||||
// RemoveKey deletes a key by ID
|
||||
func (cs *CryptoService) RemoveKey(keyID string) (err error) {
|
||||
for _, ks := range cs.keyStores {
|
||||
ks.RemoveKey(keyID)
|
||||
}
|
||||
return // returns whatever the final values were
|
||||
}
|
||||
|
||||
// AddKey adds a private key to a specified role.
|
||||
// The GUN is inferred from the cryptoservice itself for non-root roles
|
||||
func (cs *CryptoService) AddKey(role data.RoleName, gun data.GUN, key data.PrivateKey) (err error) {
|
||||
// First check if this key already exists in any of our keystores
|
||||
for _, ks := range cs.keyStores {
|
||||
if keyInfo, err := ks.GetKeyInfo(key.ID()); err == nil {
|
||||
if keyInfo.Role != role {
|
||||
return fmt.Errorf("key with same ID already exists for role: %s", keyInfo.Role.String())
|
||||
}
|
||||
logrus.Debugf("key with same ID %s and role %s already exists", key.ID(), keyInfo.Role.String())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// If the key didn't exist in any of our keystores, add and return on the first successful keystore
|
||||
for _, ks := range cs.keyStores {
|
||||
// Try to add to this keystore, return if successful
|
||||
if err = ks.AddKey(trustmanager.KeyInfo{Role: role, Gun: gun}, key); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return // returns whatever the final values were
|
||||
}
|
||||
|
||||
// ListKeys returns a list of key IDs valid for the given role
|
||||
func (cs *CryptoService) ListKeys(role data.RoleName) []string {
|
||||
var res []string
|
||||
for _, ks := range cs.keyStores {
|
||||
for k, r := range ks.ListKeys() {
|
||||
if r.Role == role {
|
||||
res = append(res, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// ListAllKeys returns a map of key IDs to role
|
||||
func (cs *CryptoService) ListAllKeys() map[string]data.RoleName {
|
||||
res := make(map[string]data.RoleName)
|
||||
for _, ks := range cs.keyStores {
|
||||
for k, r := range ks.ListKeys() {
|
||||
res[k] = r.Role // keys are content addressed so don't care about overwrites
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// CheckRootKeyIsEncrypted makes sure the root key is encrypted. We have
|
||||
// internal assumptions that depend on this.
|
||||
func CheckRootKeyIsEncrypted(pemBytes []byte) error {
|
||||
block, _ := pem.Decode(pemBytes)
|
||||
if block == nil {
|
||||
return ErrNoValidPrivateKey
|
||||
}
|
||||
|
||||
if block.Type == "ENCRYPTED PRIVATE KEY" {
|
||||
return nil
|
||||
}
|
||||
if !notary.FIPSEnabled() && x509.IsEncryptedPEMBlock(block) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return ErrRootKeyNotEncrypted
|
||||
}
|
60
vendor/github.com/theupdateframework/notary/development.mysql.yml
generated
vendored
Normal file
60
vendor/github.com/theupdateframework/notary/development.mysql.yml
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
version: "2"
|
||||
services:
|
||||
server:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: server.Dockerfile
|
||||
networks:
|
||||
mdb:
|
||||
sig:
|
||||
srv:
|
||||
aliases:
|
||||
- notary-server
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "./migrations/migrate.sh && notary-server -config=fixtures/server-config.json"
|
||||
depends_on:
|
||||
- mysql
|
||||
- signer
|
||||
signer:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: signer.Dockerfile
|
||||
networks:
|
||||
mdb:
|
||||
sig:
|
||||
aliases:
|
||||
- notarysigner
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "./migrations/migrate.sh && notary-signer -config=fixtures/signer-config.json"
|
||||
depends_on:
|
||||
- mysql
|
||||
mysql:
|
||||
networks:
|
||||
- mdb
|
||||
volumes:
|
||||
- ./notarysql/mysql-initdb.d:/docker-entrypoint-initdb.d
|
||||
image: mariadb:10.4
|
||||
environment:
|
||||
- TERM=dumb
|
||||
- MYSQL_ALLOW_EMPTY_PASSWORD="true"
|
||||
command: mysqld --innodb_file_per_table
|
||||
client:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
env_file: buildscripts/env.list
|
||||
command: buildscripts/testclient.py
|
||||
volumes:
|
||||
- ./test_output:/test_output
|
||||
networks:
|
||||
- mdb
|
||||
- srv
|
||||
depends_on:
|
||||
- server
|
||||
networks:
|
||||
mdb:
|
||||
external: false
|
||||
sig:
|
||||
external: false
|
||||
srv:
|
||||
external: false
|
63
vendor/github.com/theupdateframework/notary/development.postgresql.yml
generated
vendored
Normal file
63
vendor/github.com/theupdateframework/notary/development.postgresql.yml
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
version: "2"
|
||||
services:
|
||||
server:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: server.Dockerfile
|
||||
networks:
|
||||
mdb:
|
||||
sig:
|
||||
srv:
|
||||
aliases:
|
||||
- notary-server
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "./migrations/migrate.sh && notary-server -config=fixtures/server-config.postgres.json"
|
||||
environment:
|
||||
MIGRATIONS_PATH: migrations/server/postgresql
|
||||
DB_URL: postgres://server@postgresql:5432/notaryserver?sslmode=verify-ca&sslrootcert=/go/src/github.com/theupdateframework/notary/fixtures/database/ca.pem&sslcert=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-server.pem&sslkey=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-server-key.pem
|
||||
depends_on:
|
||||
- postgresql
|
||||
- signer
|
||||
signer:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: signer.Dockerfile
|
||||
networks:
|
||||
mdb:
|
||||
sig:
|
||||
aliases:
|
||||
- notarysigner
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "./migrations/migrate.sh && notary-signer -config=fixtures/signer-config.postgres.json"
|
||||
environment:
|
||||
MIGRATIONS_PATH: migrations/signer/postgresql
|
||||
DB_URL: postgres://signer@postgresql:5432/notarysigner?sslmode=verify-ca&sslrootcert=/go/src/github.com/theupdateframework/notary/fixtures/database/ca.pem&sslcert=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-signer.pem&sslkey=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-signer-key.pem
|
||||
depends_on:
|
||||
- postgresql
|
||||
postgresql:
|
||||
image: postgres:9.5.4
|
||||
networks:
|
||||
- mdb
|
||||
volumes:
|
||||
- ./notarysql/postgresql-initdb.d:/docker-entrypoint-initdb.d
|
||||
command: -l
|
||||
client:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
env_file: buildscripts/env.list
|
||||
command: buildscripts/testclient.py
|
||||
volumes:
|
||||
- ./test_output:/test_output
|
||||
networks:
|
||||
- mdb
|
||||
- srv
|
||||
depends_on:
|
||||
- server
|
||||
networks:
|
||||
mdb:
|
||||
external: false
|
||||
sig:
|
||||
external: false
|
||||
srv:
|
||||
external: false
|
110
vendor/github.com/theupdateframework/notary/development.rethink.yml
generated
vendored
Normal file
110
vendor/github.com/theupdateframework/notary/development.rethink.yml
generated
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
version: "2"
|
||||
services:
|
||||
server:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: server.Dockerfile
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
networks:
|
||||
- rdb
|
||||
links:
|
||||
- rdb-proxy:rdb-proxy.rdb
|
||||
- signer
|
||||
ports:
|
||||
- "8080"
|
||||
- "4443:4443"
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "sh migrations/rethink_migrate.sh && notary-server -config=fixtures/server-config.rethink.json"
|
||||
depends_on:
|
||||
- rdb-proxy
|
||||
signer:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: signer.Dockerfile
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
networks:
|
||||
rdb:
|
||||
aliases:
|
||||
- notarysigner
|
||||
links:
|
||||
- rdb-proxy:rdb-proxy.rdb
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "sh migrations/rethink_migrate.sh && notary-signer -config=fixtures/signer-config.rethink.json"
|
||||
depends_on:
|
||||
- rdb-proxy
|
||||
rdb-01:
|
||||
image: jlhawn/rethinkdb:2.3.4
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
- rdb-01-data:/var/data
|
||||
networks:
|
||||
rdb:
|
||||
aliases:
|
||||
- rdb
|
||||
- rdb.rdb
|
||||
- rdb-01.rdb
|
||||
command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
rdb-02:
|
||||
image: jlhawn/rethinkdb:2.3.4
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
- rdb-02-data:/var/data
|
||||
networks:
|
||||
rdb:
|
||||
aliases:
|
||||
- rdb
|
||||
- rdb.rdb
|
||||
- rdb-02.rdb
|
||||
command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
rdb-03:
|
||||
image: jlhawn/rethinkdb:2.3.4
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
- rdb-03-data:/var/data
|
||||
networks:
|
||||
rdb:
|
||||
aliases:
|
||||
- rdb
|
||||
- rdb.rdb
|
||||
- rdb-03.rdb
|
||||
command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
rdb-proxy:
|
||||
image: jlhawn/rethinkdb:2.3.4
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
networks:
|
||||
rdb:
|
||||
aliases:
|
||||
- rdb-proxy
|
||||
- rdb-proxy.rdp
|
||||
command: "proxy --bind all --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
depends_on:
|
||||
- rdb-01
|
||||
- rdb-02
|
||||
- rdb-03
|
||||
client:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
volumes:
|
||||
- ./test_output:/test_output
|
||||
networks:
|
||||
- rdb
|
||||
env_file: buildscripts/env.list
|
||||
links:
|
||||
- server:notary-server
|
||||
command: buildscripts/testclient.py
|
||||
volumes:
|
||||
rdb-01-data:
|
||||
external: false
|
||||
rdb-02-data:
|
||||
external: false
|
||||
rdb-03-data:
|
||||
external: false
|
||||
networks:
|
||||
rdb:
|
||||
external: false
|
54
vendor/github.com/theupdateframework/notary/docker-compose.postgresql.yml
generated
vendored
Normal file
54
vendor/github.com/theupdateframework/notary/docker-compose.postgresql.yml
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
version: "2"
|
||||
services:
|
||||
server:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: server.Dockerfile
|
||||
networks:
|
||||
- mdb
|
||||
- sig
|
||||
ports:
|
||||
- "8080"
|
||||
- "4443:4443"
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "./migrations/migrate.sh && notary-server -config=fixtures/server-config.postgres.json"
|
||||
environment:
|
||||
MIGRATIONS_PATH: migrations/server/postgresql
|
||||
DB_URL: postgres://server@postgresql:5432/notaryserver?sslmode=verify-ca&sslrootcert=/go/src/github.com/theupdateframework/notary/fixtures/database/ca.pem&sslcert=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-server.pem&sslkey=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-server-key.pem
|
||||
depends_on:
|
||||
- postgresql
|
||||
- signer
|
||||
signer:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: signer.Dockerfile
|
||||
networks:
|
||||
mdb:
|
||||
sig:
|
||||
aliases:
|
||||
- notarysigner
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "./migrations/migrate.sh && notary-signer -config=fixtures/signer-config.postgres.json"
|
||||
environment:
|
||||
MIGRATIONS_PATH: migrations/signer/postgresql
|
||||
DB_URL: postgres://signer@postgresql:5432/notarysigner?sslmode=verify-ca&sslrootcert=/go/src/github.com/theupdateframework/notary/fixtures/database/ca.pem&sslcert=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-signer.pem&sslkey=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-signer-key.pem
|
||||
depends_on:
|
||||
- postgresql
|
||||
postgresql:
|
||||
image: postgres:9.5.4
|
||||
networks:
|
||||
- mdb
|
||||
volumes:
|
||||
- ./notarysql/postgresql-initdb.d:/docker-entrypoint-initdb.d
|
||||
- notary_data:/var/lib/postgresql
|
||||
ports:
|
||||
- 5432:5432
|
||||
command: -l
|
||||
volumes:
|
||||
notary_data:
|
||||
external: false
|
||||
networks:
|
||||
mdb:
|
||||
external: false
|
||||
sig:
|
||||
external: false
|
96
vendor/github.com/theupdateframework/notary/docker-compose.rethink.yml
generated
vendored
Normal file
96
vendor/github.com/theupdateframework/notary/docker-compose.rethink.yml
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
version: "2"
|
||||
services:
|
||||
server:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: server.Dockerfile
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
networks:
|
||||
- rdb
|
||||
links:
|
||||
- rdb-proxy:rdb-proxy.rdb
|
||||
- signer
|
||||
ports:
|
||||
- "4443:4443"
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "sh migrations/rethink_migrate.sh && notary-server -config=fixtures/server-config.rethink.json"
|
||||
depends_on:
|
||||
- rdb-proxy
|
||||
signer:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: signer.Dockerfile
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
networks:
|
||||
rdb:
|
||||
aliases:
|
||||
- notarysigner
|
||||
links:
|
||||
- rdb-proxy:rdb-proxy.rdb
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "sh migrations/rethink_migrate.sh && notary-signer -config=fixtures/signer-config.rethink.json"
|
||||
depends_on:
|
||||
- rdb-proxy
|
||||
rdb-01:
|
||||
image: jlhawn/rethinkdb:2.3.4
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
- rdb-01-data:/var/data
|
||||
networks:
|
||||
rdb:
|
||||
aliases:
|
||||
- rdb-01.rdb
|
||||
command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
rdb-02:
|
||||
image: jlhawn/rethinkdb:2.3.4
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
- rdb-02-data:/var/data
|
||||
networks:
|
||||
rdb:
|
||||
aliases:
|
||||
- rdb-02.rdb
|
||||
command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb-01 --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
depends_on:
|
||||
- rdb-01
|
||||
rdb-03:
|
||||
image: jlhawn/rethinkdb:2.3.4
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
- rdb-03-data:/var/data
|
||||
networks:
|
||||
rdb:
|
||||
aliases:
|
||||
- rdb-03.rdb
|
||||
command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb-02 --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
depends_on:
|
||||
- rdb-01
|
||||
- rdb-02
|
||||
rdb-proxy:
|
||||
image: jlhawn/rethinkdb:2.3.4
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- ./fixtures/rethinkdb:/tls
|
||||
networks:
|
||||
rdb:
|
||||
aliases:
|
||||
- rdb-proxy
|
||||
- rdb-proxy.rdp
|
||||
command: "proxy --bind all --join rdb-03 --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
|
||||
depends_on:
|
||||
- rdb-01
|
||||
- rdb-02
|
||||
- rdb-03
|
||||
volumes:
|
||||
rdb-01-data:
|
||||
external: false
|
||||
rdb-02-data:
|
||||
external: false
|
||||
rdb-03-data:
|
||||
external: false
|
||||
networks:
|
||||
rdb:
|
||||
external: false
|
49
vendor/github.com/theupdateframework/notary/docker-compose.yml
generated
vendored
Normal file
49
vendor/github.com/theupdateframework/notary/docker-compose.yml
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
version: "2"
|
||||
services:
|
||||
server:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: server.Dockerfile
|
||||
networks:
|
||||
- mdb
|
||||
- sig
|
||||
ports:
|
||||
- "8080"
|
||||
- "4443:4443"
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "./migrations/migrate.sh && notary-server -config=fixtures/server-config.json"
|
||||
depends_on:
|
||||
- mysql
|
||||
- signer
|
||||
signer:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: signer.Dockerfile
|
||||
networks:
|
||||
mdb:
|
||||
sig:
|
||||
aliases:
|
||||
- notarysigner
|
||||
entrypoint: /usr/bin/env sh
|
||||
command: -c "./migrations/migrate.sh && notary-signer -config=fixtures/signer-config.json"
|
||||
depends_on:
|
||||
- mysql
|
||||
mysql:
|
||||
networks:
|
||||
- mdb
|
||||
volumes:
|
||||
- ./notarysql/mysql-initdb.d:/docker-entrypoint-initdb.d
|
||||
- notary_data:/var/lib/mysql
|
||||
image: mariadb:10.4
|
||||
environment:
|
||||
- TERM=dumb
|
||||
- MYSQL_ALLOW_EMPTY_PASSWORD="true"
|
||||
command: mysqld --innodb_file_per_table
|
||||
volumes:
|
||||
notary_data:
|
||||
external: false
|
||||
networks:
|
||||
mdb:
|
||||
external: false
|
||||
sig:
|
||||
external: false
|
17
vendor/github.com/theupdateframework/notary/escrow.Dockerfile
generated
vendored
Normal file
17
vendor/github.com/theupdateframework/notary/escrow.Dockerfile
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
FROM golang:1.14.1-alpine
|
||||
|
||||
ENV NOTARYPKG github.com/theupdateframework/notary
|
||||
ENV GO111MODULE=on
|
||||
|
||||
# Copy the local repo to the expected go path
|
||||
COPY . /go/src/${NOTARYPKG}
|
||||
|
||||
WORKDIR /go/src/${NOTARYPKG}
|
||||
|
||||
EXPOSE 4450
|
||||
|
||||
# Install escrow
|
||||
RUN go install ${NOTARYPKG}/cmd/escrow
|
||||
|
||||
ENTRYPOINT [ "escrow" ]
|
||||
CMD [ "-config=cmd/escrow/config.toml" ]
|
14
vendor/github.com/theupdateframework/notary/fips.go
generated
vendored
Normal file
14
vendor/github.com/theupdateframework/notary/fips.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
package notary
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
// Need to import md5 so can test availability.
|
||||
_ "crypto/md5" // #nosec
|
||||
)
|
||||
|
||||
// FIPSEnabled returns true if running in FIPS mode.
|
||||
// If compiled in FIPS mode the md5 hash function is never available
|
||||
// even when imported. This seems to be the best test we have for it.
|
||||
func FIPSEnabled() bool {
|
||||
return !crypto.MD5.Available()
|
||||
}
|
12
vendor/github.com/theupdateframework/notary/notary.go
generated
vendored
Normal file
12
vendor/github.com/theupdateframework/notary/notary.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
package notary
|
||||
|
||||
// PassRetriever is a callback function that should retrieve a passphrase
|
||||
// for a given named key. If it should be treated as new passphrase (e.g. with
|
||||
// confirmation), createNew will be true. Attempts is passed in so that implementers
|
||||
// decide how many chances to give to a human, for example.
|
||||
type PassRetriever func(keyName, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error)
|
||||
|
||||
// CtxKey is a wrapper type for use in context.WithValue() to satisfy golint
|
||||
// https://github.com/golang/go/issues/17293
|
||||
// https://github.com/golang/lint/pull/245
|
||||
type CtxKey int
|
210
vendor/github.com/theupdateframework/notary/passphrase/passphrase.go
generated
vendored
Normal file
210
vendor/github.com/theupdateframework/notary/passphrase/passphrase.go
generated
vendored
Normal file
@ -0,0 +1,210 @@
|
||||
// Package passphrase is a utility function for managing passphrase
|
||||
// for TUF and Notary keys.
|
||||
package passphrase
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/theupdateframework/notary"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
const (
|
||||
idBytesToDisplay = 7
|
||||
tufRootAlias = "root"
|
||||
tufRootKeyGenerationWarning = `You are about to create a new root signing key passphrase. This passphrase
|
||||
will be used to protect the most sensitive key in your signing system. Please
|
||||
choose a long, complex passphrase and be careful to keep the password and the
|
||||
key file itself secure and backed up. It is highly recommended that you use a
|
||||
password manager to generate the passphrase and keep it safe. There will be no
|
||||
way to recover this key. You can find the key in your config directory.`
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrTooShort is returned if the passphrase entered for a new key is
|
||||
// below the minimum length
|
||||
ErrTooShort = errors.New("Passphrase too short")
|
||||
|
||||
// ErrDontMatch is returned if the two entered passphrases don't match.
|
||||
// new key is below the minimum length
|
||||
ErrDontMatch = errors.New("The entered passphrases do not match")
|
||||
|
||||
// ErrTooManyAttempts is returned if the maximum number of passphrase
|
||||
// entry attempts is reached.
|
||||
ErrTooManyAttempts = errors.New("Too many attempts")
|
||||
|
||||
// ErrNoInput is returned if we do not have a valid input method for passphrases
|
||||
ErrNoInput = errors.New("Please either use environment variables or STDIN with a terminal to provide key passphrases")
|
||||
)
|
||||
|
||||
// PromptRetriever returns a new Retriever which will provide a prompt on stdin
|
||||
// and stdout to retrieve a passphrase. stdin will be checked if it is a terminal,
|
||||
// else the PromptRetriever will error when attempting to retrieve a passphrase.
|
||||
// Upon successful passphrase retrievals, the passphrase will be cached such that
|
||||
// subsequent prompts will produce the same passphrase.
|
||||
func PromptRetriever() notary.PassRetriever {
|
||||
if !term.IsTerminal(int(os.Stdin.Fd())) {
|
||||
return func(string, string, bool, int) (string, bool, error) {
|
||||
return "", false, ErrNoInput
|
||||
}
|
||||
}
|
||||
return PromptRetrieverWithInOut(os.Stdin, os.Stdout, nil)
|
||||
}
|
||||
|
||||
type boundRetriever struct {
|
||||
in io.Reader
|
||||
out io.Writer
|
||||
aliasMap map[string]string
|
||||
passphraseCache map[string]string
|
||||
}
|
||||
|
||||
func (br *boundRetriever) getPassphrase(keyName, alias string, createNew bool, numAttempts int) (string, bool, error) {
|
||||
if numAttempts == 0 {
|
||||
if alias == tufRootAlias && createNew {
|
||||
fmt.Fprintln(br.out, tufRootKeyGenerationWarning)
|
||||
}
|
||||
|
||||
if pass, ok := br.passphraseCache[alias]; ok {
|
||||
return pass, false, nil
|
||||
}
|
||||
} else if !createNew { // per `if`, numAttempts > 0 if we're at this `else`
|
||||
if numAttempts > 3 {
|
||||
return "", true, ErrTooManyAttempts
|
||||
}
|
||||
fmt.Fprintln(br.out, "Passphrase incorrect. Please retry.")
|
||||
}
|
||||
|
||||
// passphrase not cached and we're not aborting, get passphrase from user!
|
||||
return br.requestPassphrase(keyName, alias, createNew, numAttempts)
|
||||
}
|
||||
|
||||
func (br *boundRetriever) requestPassphrase(keyName, alias string, createNew bool, numAttempts int) (string, bool, error) {
|
||||
// Figure out if we should display a different string for this alias
|
||||
displayAlias := alias
|
||||
if val, ok := br.aliasMap[alias]; ok {
|
||||
displayAlias = val
|
||||
}
|
||||
|
||||
indexOfLastSeparator := strings.LastIndex(keyName, string(filepath.Separator))
|
||||
if indexOfLastSeparator == -1 {
|
||||
indexOfLastSeparator = 0
|
||||
}
|
||||
|
||||
var shortName string
|
||||
if len(keyName) > indexOfLastSeparator+idBytesToDisplay {
|
||||
if indexOfLastSeparator > 0 {
|
||||
keyNamePrefix := keyName[:indexOfLastSeparator]
|
||||
keyNameID := keyName[indexOfLastSeparator+1 : indexOfLastSeparator+idBytesToDisplay+1]
|
||||
shortName = keyNameID + " (" + keyNamePrefix + ")"
|
||||
} else {
|
||||
shortName = keyName[indexOfLastSeparator : indexOfLastSeparator+idBytesToDisplay]
|
||||
}
|
||||
}
|
||||
|
||||
withID := fmt.Sprintf(" with ID %s", shortName)
|
||||
if shortName == "" {
|
||||
withID = ""
|
||||
}
|
||||
|
||||
switch {
|
||||
case createNew:
|
||||
fmt.Fprintf(br.out, "Enter passphrase for new %s key%s: ", displayAlias, withID)
|
||||
case displayAlias == "yubikey":
|
||||
fmt.Fprintf(br.out, "Enter the %s for the attached Yubikey: ", keyName)
|
||||
default:
|
||||
fmt.Fprintf(br.out, "Enter passphrase for %s key%s: ", displayAlias, withID)
|
||||
}
|
||||
|
||||
stdin := bufio.NewReader(br.in)
|
||||
passphrase, err := GetPassphrase(stdin)
|
||||
fmt.Fprintln(br.out)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
retPass := strings.TrimSpace(string(passphrase))
|
||||
|
||||
if createNew {
|
||||
err = br.verifyAndConfirmPassword(stdin, retPass, displayAlias, withID)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
}
|
||||
|
||||
br.cachePassword(alias, retPass)
|
||||
|
||||
return retPass, false, nil
|
||||
}
|
||||
|
||||
func (br *boundRetriever) verifyAndConfirmPassword(stdin *bufio.Reader, retPass, displayAlias, withID string) error {
|
||||
if len(retPass) < 8 {
|
||||
fmt.Fprintln(br.out, "Passphrase is too short. Please use a password manager to generate and store a good random passphrase.")
|
||||
return ErrTooShort
|
||||
}
|
||||
|
||||
fmt.Fprintf(br.out, "Repeat passphrase for new %s key%s: ", displayAlias, withID)
|
||||
|
||||
confirmation, err := GetPassphrase(stdin)
|
||||
fmt.Fprintln(br.out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
confirmationStr := strings.TrimSpace(string(confirmation))
|
||||
|
||||
if retPass != confirmationStr {
|
||||
fmt.Fprintln(br.out, "Passphrases do not match. Please retry.")
|
||||
return ErrDontMatch
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (br *boundRetriever) cachePassword(alias, retPass string) {
|
||||
br.passphraseCache[alias] = retPass
|
||||
}
|
||||
|
||||
// PromptRetrieverWithInOut returns a new Retriever which will provide a
|
||||
// prompt using the given in and out readers. The passphrase will be cached
|
||||
// such that subsequent prompts will produce the same passphrase.
|
||||
// aliasMap can be used to specify display names for TUF key aliases. If aliasMap
|
||||
// is nil, a sensible default will be used.
|
||||
func PromptRetrieverWithInOut(in io.Reader, out io.Writer, aliasMap map[string]string) notary.PassRetriever {
|
||||
bound := &boundRetriever{
|
||||
in: in,
|
||||
out: out,
|
||||
aliasMap: aliasMap,
|
||||
passphraseCache: make(map[string]string),
|
||||
}
|
||||
|
||||
return bound.getPassphrase
|
||||
}
|
||||
|
||||
// ConstantRetriever returns a new Retriever which will return a constant string
|
||||
// as a passphrase.
|
||||
func ConstantRetriever(constantPassphrase string) notary.PassRetriever {
|
||||
return func(k, a string, c bool, n int) (string, bool, error) {
|
||||
return constantPassphrase, false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// GetPassphrase get the passphrase from bufio.Reader or from terminal.
|
||||
// If typing on the terminal, we disable terminal to echo the passphrase.
|
||||
func GetPassphrase(in *bufio.Reader) ([]byte, error) {
|
||||
var (
|
||||
passphrase []byte
|
||||
err error
|
||||
)
|
||||
|
||||
if term.IsTerminal(int(os.Stdin.Fd())) {
|
||||
passphrase, err = term.ReadPassword(int(os.Stdin.Fd()))
|
||||
} else {
|
||||
passphrase, err = in.ReadBytes('\n')
|
||||
}
|
||||
|
||||
return passphrase, err
|
||||
}
|
30
vendor/github.com/theupdateframework/notary/server.Dockerfile
generated
vendored
Normal file
30
vendor/github.com/theupdateframework/notary/server.Dockerfile
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
FROM golang:1.14.1-alpine
|
||||
|
||||
RUN apk add --update git gcc libc-dev
|
||||
|
||||
ENV GO111MODULE=on
|
||||
|
||||
ARG MIGRATE_VER=v4.6.2
|
||||
RUN go get -tags 'mysql postgres file' github.com/golang-migrate/migrate/v4/cli@${MIGRATE_VER} && mv /go/bin/cli /go/bin/migrate
|
||||
|
||||
ENV GOFLAGS=-mod=vendor
|
||||
ENV NOTARYPKG github.com/theupdateframework/notary
|
||||
|
||||
# Copy the local repo to the expected go path
|
||||
COPY . /go/src/${NOTARYPKG}
|
||||
|
||||
WORKDIR /go/src/${NOTARYPKG}
|
||||
|
||||
RUN chmod 0600 ./fixtures/database/*
|
||||
|
||||
ENV SERVICE_NAME=notary_server
|
||||
EXPOSE 4443
|
||||
|
||||
# Install notary-server
|
||||
RUN go install \
|
||||
-tags pkcs11 \
|
||||
-ldflags "-w -X ${NOTARYPKG}/version.GitCommit=`git rev-parse --short HEAD` -X ${NOTARYPKG}/version.NotaryVersion=`cat NOTARY_VERSION`" \
|
||||
${NOTARYPKG}/cmd/notary-server && apk del git gcc libc-dev && rm -rf /var/cache/apk/*
|
||||
|
||||
ENTRYPOINT [ "notary-server" ]
|
||||
CMD [ "-config=fixtures/server-config-local.json" ]
|
42
vendor/github.com/theupdateframework/notary/server.minimal.Dockerfile
generated
vendored
Normal file
42
vendor/github.com/theupdateframework/notary/server.minimal.Dockerfile
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
FROM golang:1.14.1-alpine AS build-env
|
||||
|
||||
RUN apk add --update git gcc libc-dev
|
||||
|
||||
ENV GO111MODULE=on
|
||||
|
||||
ARG MIGRATE_VER=v4.6.2
|
||||
RUN go get -tags 'mysql postgres file' github.com/golang-migrate/migrate/v4/cli@${MIGRATE_VER} && mv /go/bin/cli /go/bin/migrate
|
||||
|
||||
ENV GOFLAGS=-mod=vendor
|
||||
ENV NOTARYPKG github.com/theupdateframework/notary
|
||||
|
||||
# Copy the local repo to the expected go path
|
||||
COPY . /go/src/${NOTARYPKG}
|
||||
WORKDIR /go/src/${NOTARYPKG}
|
||||
|
||||
# Build notary-server
|
||||
RUN go install \
|
||||
-tags pkcs11 \
|
||||
-ldflags "-w -X ${NOTARYPKG}/version.GitCommit=`git rev-parse --short HEAD` -X ${NOTARYPKG}/version.NotaryVersion=`cat NOTARY_VERSION`" \
|
||||
${NOTARYPKG}/cmd/notary-server
|
||||
|
||||
|
||||
FROM busybox:latest
|
||||
|
||||
# the ln is for compatibility with the docker-compose.yml, making these
|
||||
# images a straight swap for the those built in the compose file.
|
||||
RUN mkdir -p /usr/bin /var/lib && ln -s /bin/env /usr/bin/env
|
||||
|
||||
COPY --from=build-env /go/bin/notary-server /usr/bin/notary-server
|
||||
COPY --from=build-env /go/bin/migrate /usr/bin/migrate
|
||||
COPY --from=build-env /lib/ld-musl-x86_64.so.1 /lib/ld-musl-x86_64.so.1
|
||||
COPY --from=build-env /go/src/github.com/theupdateframework/notary/migrations/ /var/lib/notary/migrations
|
||||
COPY --from=build-env /go/src/github.com/theupdateframework/notary/fixtures /var/lib/notary/fixtures
|
||||
RUN chmod 0600 /var/lib/notary/fixtures/database/*
|
||||
|
||||
WORKDIR /var/lib/notary
|
||||
# SERVICE_NAME needed for migration script
|
||||
ENV SERVICE_NAME=notary_server
|
||||
EXPOSE 4443
|
||||
ENTRYPOINT [ "/usr/bin/notary-server" ]
|
||||
CMD [ "-config=/var/lib/notary/fixtures/server-config-local.json" ]
|
31
vendor/github.com/theupdateframework/notary/signer.Dockerfile
generated
vendored
Normal file
31
vendor/github.com/theupdateframework/notary/signer.Dockerfile
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
FROM golang:1.14.1-alpine
|
||||
|
||||
RUN apk add --update git gcc libc-dev
|
||||
|
||||
ENV GO111MODULE=on
|
||||
|
||||
ARG MIGRATE_VER=v4.6.2
|
||||
RUN go get -tags 'mysql postgres file' github.com/golang-migrate/migrate/v4/cli@${MIGRATE_VER} && mv /go/bin/cli /go/bin/migrate
|
||||
|
||||
ENV GOFLAGS=-mod=vendor
|
||||
ENV NOTARYPKG github.com/theupdateframework/notary
|
||||
|
||||
# Copy the local repo to the expected go path
|
||||
COPY . /go/src/${NOTARYPKG}
|
||||
|
||||
WORKDIR /go/src/${NOTARYPKG}
|
||||
|
||||
RUN chmod 0600 ./fixtures/database/*
|
||||
|
||||
ENV SERVICE_NAME=notary_signer
|
||||
ENV NOTARY_SIGNER_DEFAULT_ALIAS="timestamp_1"
|
||||
ENV NOTARY_SIGNER_TIMESTAMP_1="testpassword"
|
||||
|
||||
# Install notary-signer
|
||||
RUN go install \
|
||||
-tags pkcs11 \
|
||||
-ldflags "-w -X ${NOTARYPKG}/version.GitCommit=`git rev-parse --short HEAD` -X ${NOTARYPKG}/version.NotaryVersion=`cat NOTARY_VERSION`" \
|
||||
${NOTARYPKG}/cmd/notary-signer && apk del git gcc libc-dev && rm -rf /var/cache/apk/*
|
||||
|
||||
ENTRYPOINT [ "notary-signer" ]
|
||||
CMD [ "-config=fixtures/signer-config-local.json" ]
|
44
vendor/github.com/theupdateframework/notary/signer.minimal.Dockerfile
generated
vendored
Normal file
44
vendor/github.com/theupdateframework/notary/signer.minimal.Dockerfile
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
FROM golang:1.14.1-alpine AS build-env
|
||||
|
||||
RUN apk add --update git gcc libc-dev
|
||||
|
||||
ENV GO111MODULE=on
|
||||
|
||||
ARG MIGRATE_VER=v4.6.2
|
||||
RUN go get -tags 'mysql postgres file' github.com/golang-migrate/migrate/v4/cli@${MIGRATE_VER} && mv /go/bin/cli /go/bin/migrate
|
||||
|
||||
ENV GOFLAGS=-mod=vendor
|
||||
ENV NOTARYPKG github.com/theupdateframework/notary
|
||||
|
||||
# Copy the local repo to the expected go path
|
||||
COPY . /go/src/${NOTARYPKG}
|
||||
WORKDIR /go/src/${NOTARYPKG}
|
||||
|
||||
# Build notary-signer
|
||||
RUN go install \
|
||||
-tags pkcs11 \
|
||||
-ldflags "-w -X ${NOTARYPKG}/version.GitCommit=`git rev-parse --short HEAD` -X ${NOTARYPKG}/version.NotaryVersion=`cat NOTARY_VERSION`" \
|
||||
${NOTARYPKG}/cmd/notary-signer
|
||||
|
||||
|
||||
FROM busybox:latest
|
||||
|
||||
# the ln is for compatibility with the docker-compose.yml, making these
|
||||
# images a straight swap for the those built in the compose file.
|
||||
RUN mkdir -p /usr/bin /var/lib && ln -s /bin/env /usr/bin/env
|
||||
|
||||
COPY --from=build-env /go/bin/notary-signer /usr/bin/notary-signer
|
||||
COPY --from=build-env /go/bin/migrate /usr/bin/migrate
|
||||
COPY --from=build-env /lib/ld-musl-x86_64.so.1 /lib/ld-musl-x86_64.so.1
|
||||
COPY --from=build-env /go/src/github.com/theupdateframework/notary/migrations/ /var/lib/notary/migrations
|
||||
COPY --from=build-env /go/src/github.com/theupdateframework/notary/fixtures /var/lib/notary/fixtures
|
||||
RUN chmod 0600 /var/lib/notary/fixtures/database/*
|
||||
|
||||
WORKDIR /var/lib/notary
|
||||
# SERVICE_NAME needed for migration script
|
||||
ENV SERVICE_NAME=notary_signer
|
||||
ENV NOTARY_SIGNER_DEFAULT_ALIAS="timestamp_1"
|
||||
ENV NOTARY_SIGNER_TIMESTAMP_1="testpassword"
|
||||
|
||||
ENTRYPOINT [ "/usr/bin/notary-signer" ]
|
||||
CMD [ "-config=/var/lib/notary/fixtures/signer-config-local.json" ]
|
22
vendor/github.com/theupdateframework/notary/storage/errors.go
generated
vendored
Normal file
22
vendor/github.com/theupdateframework/notary/storage/errors.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrPathOutsideStore indicates that the returned path would be
|
||||
// outside the store
|
||||
ErrPathOutsideStore = errors.New("path outside file store")
|
||||
)
|
||||
|
||||
// ErrMetaNotFound indicates we did not find a particular piece
|
||||
// of metadata in the store
|
||||
type ErrMetaNotFound struct {
|
||||
Resource string
|
||||
}
|
||||
|
||||
func (err ErrMetaNotFound) Error() string {
|
||||
return fmt.Sprintf("%s trust data unavailable. Has a notary repository been initialized?", err.Resource)
|
||||
}
|
278
vendor/github.com/theupdateframework/notary/storage/filestore.go
generated
vendored
Normal file
278
vendor/github.com/theupdateframework/notary/storage/filestore.go
generated
vendored
Normal file
@ -0,0 +1,278 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary"
|
||||
)
|
||||
|
||||
// NewFileStore creates a fully configurable file store
|
||||
func NewFileStore(baseDir, fileExt string) (*FilesystemStore, error) {
|
||||
baseDir = filepath.Clean(baseDir)
|
||||
if err := createDirectory(baseDir, notary.PrivExecPerms); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !strings.HasPrefix(fileExt, ".") {
|
||||
fileExt = "." + fileExt
|
||||
}
|
||||
|
||||
return &FilesystemStore{
|
||||
baseDir: baseDir,
|
||||
ext: fileExt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewPrivateKeyFileStorage initializes a new filestore for private keys, appending
|
||||
// the notary.PrivDir to the baseDir.
|
||||
func NewPrivateKeyFileStorage(baseDir, fileExt string) (*FilesystemStore, error) {
|
||||
baseDir = filepath.Join(baseDir, notary.PrivDir)
|
||||
myStore, err := NewFileStore(baseDir, fileExt)
|
||||
myStore.migrateTo0Dot4()
|
||||
return myStore, err
|
||||
}
|
||||
|
||||
// NewPrivateSimpleFileStore is a wrapper to create an owner readable/writeable
|
||||
// _only_ filestore
|
||||
func NewPrivateSimpleFileStore(baseDir, fileExt string) (*FilesystemStore, error) {
|
||||
return NewFileStore(baseDir, fileExt)
|
||||
}
|
||||
|
||||
// FilesystemStore is a store in a locally accessible directory
|
||||
type FilesystemStore struct {
|
||||
baseDir string
|
||||
ext string
|
||||
}
|
||||
|
||||
func (f *FilesystemStore) moveKeyTo0Dot4Location(file string) {
|
||||
keyID := filepath.Base(file)
|
||||
fileDir := filepath.Dir(file)
|
||||
d, _ := f.Get(file)
|
||||
block, _ := pem.Decode(d)
|
||||
if block == nil {
|
||||
logrus.Warn("Key data for", file, "could not be decoded as a valid PEM block. The key will not been migrated and may not be available")
|
||||
return
|
||||
}
|
||||
fileDir = strings.TrimPrefix(fileDir, notary.RootKeysSubdir)
|
||||
fileDir = strings.TrimPrefix(fileDir, notary.NonRootKeysSubdir)
|
||||
if fileDir != "" {
|
||||
block.Headers["gun"] = filepath.ToSlash(fileDir[1:])
|
||||
}
|
||||
if strings.Contains(keyID, "_") {
|
||||
role := strings.Split(keyID, "_")[1]
|
||||
keyID = strings.TrimSuffix(keyID, "_"+role)
|
||||
block.Headers["role"] = role
|
||||
}
|
||||
var keyPEM bytes.Buffer
|
||||
// since block came from decoding the PEM bytes in the first place, and all we're doing is adding some headers we ignore the possibility of an error while encoding the block
|
||||
pem.Encode(&keyPEM, block)
|
||||
f.Set(keyID, keyPEM.Bytes())
|
||||
}
|
||||
|
||||
func (f *FilesystemStore) migrateTo0Dot4() {
|
||||
rootKeysSubDir := filepath.Clean(filepath.Join(f.Location(), notary.RootKeysSubdir))
|
||||
nonRootKeysSubDir := filepath.Clean(filepath.Join(f.Location(), notary.NonRootKeysSubdir))
|
||||
if _, err := os.Stat(rootKeysSubDir); !os.IsNotExist(err) && f.Location() != rootKeysSubDir {
|
||||
if rootKeysSubDir == "" || rootKeysSubDir == "/" {
|
||||
// making sure we don't remove a user's homedir
|
||||
logrus.Warn("The directory for root keys is an unsafe value, we are not going to delete the directory. Please delete it manually")
|
||||
} else {
|
||||
// root_keys exists, migrate things from it
|
||||
listOnlyRootKeysDirStore, _ := NewFileStore(rootKeysSubDir, f.ext)
|
||||
for _, file := range listOnlyRootKeysDirStore.ListFiles() {
|
||||
f.moveKeyTo0Dot4Location(filepath.Join(notary.RootKeysSubdir, file))
|
||||
}
|
||||
// delete the old directory
|
||||
os.RemoveAll(rootKeysSubDir)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Stat(nonRootKeysSubDir); !os.IsNotExist(err) && f.Location() != nonRootKeysSubDir {
|
||||
if nonRootKeysSubDir == "" || nonRootKeysSubDir == "/" {
|
||||
// making sure we don't remove a user's homedir
|
||||
logrus.Warn("The directory for non root keys is an unsafe value, we are not going to delete the directory. Please delete it manually")
|
||||
} else {
|
||||
// tuf_keys exists, migrate things from it
|
||||
listOnlyNonRootKeysDirStore, _ := NewFileStore(nonRootKeysSubDir, f.ext)
|
||||
for _, file := range listOnlyNonRootKeysDirStore.ListFiles() {
|
||||
f.moveKeyTo0Dot4Location(filepath.Join(notary.NonRootKeysSubdir, file))
|
||||
}
|
||||
// delete the old directory
|
||||
os.RemoveAll(nonRootKeysSubDir)
|
||||
}
|
||||
}
|
||||
|
||||
// if we have a trusted_certificates folder, let's delete for a complete migration since it is unused by new clients
|
||||
certsSubDir := filepath.Join(f.Location(), "trusted_certificates")
|
||||
if certsSubDir == "" || certsSubDir == "/" {
|
||||
logrus.Warn("The directory for trusted certificate is an unsafe value, we are not going to delete the directory. Please delete it manually")
|
||||
} else {
|
||||
os.RemoveAll(certsSubDir)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FilesystemStore) getPath(name string) (string, error) {
|
||||
fileName := fmt.Sprintf("%s%s", name, f.ext)
|
||||
fullPath := filepath.Join(f.baseDir, fileName)
|
||||
|
||||
if !strings.HasPrefix(fullPath, f.baseDir) {
|
||||
return "", ErrPathOutsideStore
|
||||
}
|
||||
return fullPath, nil
|
||||
}
|
||||
|
||||
// GetSized returns the meta for the given name (a role) up to size bytes
|
||||
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
|
||||
// predefined threshold "notary.MaxDownloadSize". If the file is larger than size
|
||||
// we return ErrMaliciousServer for consistency with the HTTPStore
|
||||
func (f *FilesystemStore) GetSized(name string, size int64) ([]byte, error) {
|
||||
p, err := f.getPath(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
file, err := os.Open(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = ErrMetaNotFound{Resource: name}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = file.Close()
|
||||
}()
|
||||
|
||||
if size == NoSizeLimit {
|
||||
size = notary.MaxDownloadSize
|
||||
}
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if stat.Size() > size {
|
||||
return nil, ErrMaliciousServer{}
|
||||
}
|
||||
|
||||
l := io.LimitReader(file, size)
|
||||
return ioutil.ReadAll(l)
|
||||
}
|
||||
|
||||
// Get returns the meta for the given name.
|
||||
func (f *FilesystemStore) Get(name string) ([]byte, error) {
|
||||
p, err := f.getPath(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
meta, err := ioutil.ReadFile(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = ErrMetaNotFound{Resource: name}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
// SetMulti sets the metadata for multiple roles in one operation
|
||||
func (f *FilesystemStore) SetMulti(metas map[string][]byte) error {
|
||||
for role, blob := range metas {
|
||||
err := f.Set(role, blob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set sets the meta for a single role
|
||||
func (f *FilesystemStore) Set(name string, meta []byte) error {
|
||||
fp, err := f.getPath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensures the parent directories of the file we are about to write exist
|
||||
err = os.MkdirAll(filepath.Dir(fp), notary.PrivExecPerms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if something already exists, just delete it and re-write it
|
||||
os.RemoveAll(fp)
|
||||
|
||||
// Write the file to disk
|
||||
return ioutil.WriteFile(fp, meta, notary.PrivNoExecPerms)
|
||||
}
|
||||
|
||||
// RemoveAll clears the existing filestore by removing its base directory
|
||||
func (f *FilesystemStore) RemoveAll() error {
|
||||
return os.RemoveAll(f.baseDir)
|
||||
}
|
||||
|
||||
// Remove removes the metadata for a single role - if the metadata doesn't
|
||||
// exist, no error is returned
|
||||
func (f *FilesystemStore) Remove(name string) error {
|
||||
p, err := f.getPath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.RemoveAll(p) // RemoveAll succeeds if path doesn't exist
|
||||
}
|
||||
|
||||
// Location returns a human readable name for the storage location
|
||||
func (f FilesystemStore) Location() string {
|
||||
return f.baseDir
|
||||
}
|
||||
|
||||
// ListFiles returns a list of all the filenames that can be used with Get*
|
||||
// to retrieve content from this filestore
|
||||
func (f FilesystemStore) ListFiles() []string {
|
||||
files := make([]string, 0, 0)
|
||||
filepath.Walk(f.baseDir, func(fp string, fi os.FileInfo, err error) error {
|
||||
// If there are errors, ignore this particular file
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// Ignore if it is a directory
|
||||
if fi.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If this is a symlink, ignore it
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only allow matches that end with our certificate extension (e.g. *.crt)
|
||||
matched, _ := filepath.Match("*"+f.ext, fi.Name())
|
||||
|
||||
if matched {
|
||||
// Find the relative path for this file relative to the base path.
|
||||
fp, err = filepath.Rel(f.baseDir, fp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
trimmed := strings.TrimSuffix(fp, f.ext)
|
||||
files = append(files, trimmed)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return files
|
||||
}
|
||||
|
||||
// createDirectory receives a string of the path to a directory.
|
||||
// It does not support passing files, so the caller has to remove
|
||||
// the filename by doing filepath.Dir(full_path_to_file)
|
||||
func createDirectory(dir string, perms os.FileMode) error {
|
||||
// This prevents someone passing /path/to/dir and 'dir' not being created
|
||||
// If two '//' exist, MkdirAll deals it with correctly
|
||||
dir = dir + "/"
|
||||
return os.MkdirAll(dir, perms)
|
||||
}
|
379
vendor/github.com/theupdateframework/notary/storage/httpstore.go
generated
vendored
Normal file
379
vendor/github.com/theupdateframework/notary/storage/httpstore.go
generated
vendored
Normal file
@ -0,0 +1,379 @@
|
||||
// A Store that can fetch and set metadata on a remote server.
|
||||
// Some API constraints:
|
||||
// - Response bodies for error codes should be unmarshallable as:
|
||||
// {"errors": [{..., "detail": <serialized validation error>}]}
|
||||
// else validation error details, etc. will be unparsable. The errors
|
||||
// should have a github.com/theupdateframework/notary/tuf/validation/SerializableError
|
||||
// in the Details field.
|
||||
// If writing your own server, please have a look at
|
||||
// github.com/docker/distribution/registry/api/errcode
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/validation"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxErrorResponseSize is the maximum size for an error message - 1KiB
|
||||
MaxErrorResponseSize int64 = 1 << 10
|
||||
// MaxKeySize is the maximum size for a stored TUF key - 256KiB
|
||||
MaxKeySize = 256 << 10
|
||||
)
|
||||
|
||||
// ErrServerUnavailable indicates an error from the server. code allows us to
|
||||
// populate the http error we received
|
||||
type ErrServerUnavailable struct {
|
||||
code int
|
||||
}
|
||||
|
||||
// NetworkError represents any kind of network error when attempting to make a request
|
||||
type NetworkError struct {
|
||||
Wrapped error
|
||||
}
|
||||
|
||||
func (n NetworkError) Error() string {
|
||||
if _, ok := n.Wrapped.(*url.Error); ok {
|
||||
// QueryUnescape does the inverse transformation of QueryEscape,
|
||||
// converting %AB into the byte 0xAB and '+' into ' ' (space).
|
||||
// It returns an error if any % is not followed by two hexadecimal digits.
|
||||
//
|
||||
// If this happens, we log out the QueryUnescape error and return the
|
||||
// original error to client.
|
||||
res, err := url.QueryUnescape(n.Wrapped.Error())
|
||||
if err != nil {
|
||||
logrus.Errorf("unescape network error message failed: %s", err)
|
||||
return n.Wrapped.Error()
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
return n.Wrapped.Error()
|
||||
}
|
||||
|
||||
func (err ErrServerUnavailable) Error() string {
|
||||
if err.code == 401 {
|
||||
return fmt.Sprintf("you are not authorized to perform this operation: server returned 401.")
|
||||
}
|
||||
return fmt.Sprintf("unable to reach trust server at this time: %d.", err.code)
|
||||
}
|
||||
|
||||
// ErrMaliciousServer indicates the server returned a response that is highly suspected
|
||||
// of being malicious. i.e. it attempted to send us more data than the known size of a
|
||||
// particular role metadata.
|
||||
type ErrMaliciousServer struct{}
|
||||
|
||||
func (err ErrMaliciousServer) Error() string {
|
||||
return "trust server returned a bad response."
|
||||
}
|
||||
|
||||
// ErrInvalidOperation indicates that the server returned a 400 response and
|
||||
// propagate any body we received.
|
||||
type ErrInvalidOperation struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (err ErrInvalidOperation) Error() string {
|
||||
if err.msg != "" {
|
||||
return fmt.Sprintf("trust server rejected operation: %s", err.msg)
|
||||
}
|
||||
return "trust server rejected operation."
|
||||
}
|
||||
|
||||
// HTTPStore manages pulling and pushing metadata from and to a remote
|
||||
// service over HTTP. It assumes the URL structure of the remote service
|
||||
// maps identically to the structure of the TUF repo:
|
||||
// <baseURL>/<metaPrefix>/(root|targets|snapshot|timestamp).json
|
||||
// <baseURL>/<targetsPrefix>/foo.sh
|
||||
//
|
||||
// If consistent snapshots are disabled, it is advised that caching is not
|
||||
// enabled. Simple set a cachePath (and ensure it's writeable) to enable
|
||||
// caching.
|
||||
type HTTPStore struct {
|
||||
baseURL url.URL
|
||||
metaPrefix string
|
||||
metaExtension string
|
||||
keyExtension string
|
||||
roundTrip http.RoundTripper
|
||||
}
|
||||
|
||||
// NewNotaryServerStore returns a new HTTPStore against a URL which should represent a notary
|
||||
// server
|
||||
func NewNotaryServerStore(serverURL string, gun data.GUN, roundTrip http.RoundTripper) (RemoteStore, error) {
|
||||
return NewHTTPStore(
|
||||
serverURL+"/v2/"+gun.String()+"/_trust/tuf/",
|
||||
"",
|
||||
"json",
|
||||
"key",
|
||||
roundTrip,
|
||||
)
|
||||
}
|
||||
|
||||
// NewHTTPStore initializes a new store against a URL and a number of configuration options.
|
||||
//
|
||||
// In case of a nil `roundTrip`, a default offline store is used instead.
|
||||
func NewHTTPStore(baseURL, metaPrefix, metaExtension, keyExtension string, roundTrip http.RoundTripper) (RemoteStore, error) {
|
||||
base, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !base.IsAbs() {
|
||||
return nil, errors.New("HTTPStore requires an absolute baseURL")
|
||||
}
|
||||
if roundTrip == nil {
|
||||
return &OfflineStore{}, nil
|
||||
}
|
||||
return &HTTPStore{
|
||||
baseURL: *base,
|
||||
metaPrefix: metaPrefix,
|
||||
metaExtension: metaExtension,
|
||||
keyExtension: keyExtension,
|
||||
roundTrip: roundTrip,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func tryUnmarshalError(resp *http.Response, defaultError error) error {
|
||||
b := io.LimitReader(resp.Body, MaxErrorResponseSize)
|
||||
bodyBytes, err := ioutil.ReadAll(b)
|
||||
if err != nil {
|
||||
return defaultError
|
||||
}
|
||||
var parsedErrors struct {
|
||||
Errors []struct {
|
||||
Detail validation.SerializableError `json:"detail"`
|
||||
} `json:"errors"`
|
||||
}
|
||||
if err := json.Unmarshal(bodyBytes, &parsedErrors); err != nil {
|
||||
return defaultError
|
||||
}
|
||||
if len(parsedErrors.Errors) != 1 {
|
||||
return defaultError
|
||||
}
|
||||
err = parsedErrors.Errors[0].Detail.Error
|
||||
if err == nil {
|
||||
return defaultError
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func translateStatusToError(resp *http.Response, resource string) error {
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
return nil
|
||||
case http.StatusNotFound:
|
||||
return ErrMetaNotFound{Resource: resource}
|
||||
case http.StatusBadRequest:
|
||||
return tryUnmarshalError(resp, ErrInvalidOperation{})
|
||||
default:
|
||||
return ErrServerUnavailable{code: resp.StatusCode}
|
||||
}
|
||||
}
|
||||
|
||||
// GetSized downloads the named meta file with the given size. A short body
|
||||
// is acceptable because in the case of timestamp.json, the size is a cap,
|
||||
// not an exact length.
|
||||
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
|
||||
// predefined threshold "notary.MaxDownloadSize".
|
||||
func (s HTTPStore) GetSized(name string, size int64) ([]byte, error) {
|
||||
url, err := s.buildMetaURL(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := http.NewRequest("GET", url.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := s.roundTrip.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, NetworkError{Wrapped: err}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if err := translateStatusToError(resp, name); err != nil {
|
||||
logrus.Debugf("received HTTP status %d when requesting %s.", resp.StatusCode, name)
|
||||
return nil, err
|
||||
}
|
||||
if size == NoSizeLimit {
|
||||
size = notary.MaxDownloadSize
|
||||
}
|
||||
if resp.ContentLength > size {
|
||||
return nil, ErrMaliciousServer{}
|
||||
}
|
||||
logrus.Debugf("%d when retrieving metadata for %s", resp.StatusCode, name)
|
||||
b := io.LimitReader(resp.Body, size)
|
||||
body, err := ioutil.ReadAll(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// Set sends a single piece of metadata to the TUF server
|
||||
func (s HTTPStore) Set(name string, blob []byte) error {
|
||||
return s.SetMulti(map[string][]byte{name: blob})
|
||||
}
|
||||
|
||||
// Remove always fails, because we should never be able to delete metadata
|
||||
// remotely
|
||||
func (s HTTPStore) Remove(name string) error {
|
||||
return ErrInvalidOperation{msg: "cannot delete individual metadata files"}
|
||||
}
|
||||
|
||||
// NewMultiPartMetaRequest builds a request with the provided metadata updates
|
||||
// in multipart form
|
||||
func NewMultiPartMetaRequest(url string, metas map[string][]byte) (*http.Request, error) {
|
||||
body := &bytes.Buffer{}
|
||||
writer := multipart.NewWriter(body)
|
||||
for role, blob := range metas {
|
||||
part, err := writer.CreateFormFile("files", role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = io.Copy(part, bytes.NewBuffer(blob))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
err := writer.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := http.NewRequest("POST", url, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// SetMulti does a single batch upload of multiple pieces of TUF metadata.
|
||||
// This should be preferred for updating a remote server as it enable the server
|
||||
// to remain consistent, either accepting or rejecting the complete update.
|
||||
func (s HTTPStore) SetMulti(metas map[string][]byte) error {
|
||||
url, err := s.buildMetaURL("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := NewMultiPartMetaRequest(url.String(), metas)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := s.roundTrip.RoundTrip(req)
|
||||
if err != nil {
|
||||
return NetworkError{Wrapped: err}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
// if this 404's something is pretty wrong
|
||||
return translateStatusToError(resp, "POST metadata endpoint")
|
||||
}
|
||||
|
||||
// RemoveAll will attempt to delete all TUF metadata for a GUN
|
||||
func (s HTTPStore) RemoveAll() error {
|
||||
url, err := s.buildMetaURL("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("DELETE", url.String(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := s.roundTrip.RoundTrip(req)
|
||||
if err != nil {
|
||||
return NetworkError{Wrapped: err}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return translateStatusToError(resp, "DELETE metadata for GUN endpoint")
|
||||
}
|
||||
|
||||
func (s HTTPStore) buildMetaURL(name string) (*url.URL, error) {
|
||||
var filename string
|
||||
if name != "" {
|
||||
filename = fmt.Sprintf("%s.%s", name, s.metaExtension)
|
||||
}
|
||||
uri := path.Join(s.metaPrefix, filename)
|
||||
return s.buildURL(uri)
|
||||
}
|
||||
|
||||
func (s HTTPStore) buildKeyURL(name data.RoleName) (*url.URL, error) {
|
||||
filename := fmt.Sprintf("%s.%s", name.String(), s.keyExtension)
|
||||
uri := path.Join(s.metaPrefix, filename)
|
||||
return s.buildURL(uri)
|
||||
}
|
||||
|
||||
func (s HTTPStore) buildURL(uri string) (*url.URL, error) {
|
||||
sub, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.baseURL.ResolveReference(sub), nil
|
||||
}
|
||||
|
||||
// GetKey retrieves a public key from the remote server
|
||||
func (s HTTPStore) GetKey(role data.RoleName) ([]byte, error) {
|
||||
url, err := s.buildKeyURL(role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := http.NewRequest("GET", url.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := s.roundTrip.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, NetworkError{Wrapped: err}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if err := translateStatusToError(resp, role.String()+" key"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := io.LimitReader(resp.Body, MaxKeySize)
|
||||
body, err := ioutil.ReadAll(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// RotateKey rotates a private key and returns the public component from the remote server
|
||||
func (s HTTPStore) RotateKey(role data.RoleName) ([]byte, error) {
|
||||
url, err := s.buildKeyURL(role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := http.NewRequest("POST", url.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := s.roundTrip.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, NetworkError{Wrapped: err}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if err := translateStatusToError(resp, role.String()+" key"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := io.LimitReader(resp.Body, MaxKeySize)
|
||||
body, err := ioutil.ReadAll(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// Location returns a human readable name for the storage location
|
||||
func (s HTTPStore) Location() string {
|
||||
return s.baseURL.Host
|
||||
}
|
39
vendor/github.com/theupdateframework/notary/storage/interfaces.go
generated
vendored
Normal file
39
vendor/github.com/theupdateframework/notary/storage/interfaces.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
)
|
||||
|
||||
// NoSizeLimit is represented as -1 for arguments to GetMeta
|
||||
const NoSizeLimit int64 = -1
|
||||
|
||||
// MetadataStore must be implemented by anything that intends to interact
|
||||
// with a store of TUF files
|
||||
type MetadataStore interface {
|
||||
GetSized(name string, size int64) ([]byte, error)
|
||||
Set(name string, blob []byte) error
|
||||
SetMulti(map[string][]byte) error
|
||||
RemoveAll() error
|
||||
Remove(name string) error
|
||||
Location() string
|
||||
}
|
||||
|
||||
// PublicKeyStore must be implemented by a key service
|
||||
type PublicKeyStore interface {
|
||||
GetKey(role data.RoleName) ([]byte, error)
|
||||
RotateKey(role data.RoleName) ([]byte, error)
|
||||
}
|
||||
|
||||
// RemoteStore is similar to LocalStore with the added expectation that it should
|
||||
// provide a way to download targets once located
|
||||
type RemoteStore interface {
|
||||
MetadataStore
|
||||
PublicKeyStore
|
||||
}
|
||||
|
||||
// Bootstrapper is a thing that can set itself up
|
||||
type Bootstrapper interface {
|
||||
// Bootstrap instructs a configured Bootstrapper to perform
|
||||
// its setup operations.
|
||||
Bootstrap() error
|
||||
}
|
137
vendor/github.com/theupdateframework/notary/storage/memorystore.go
generated
vendored
Normal file
137
vendor/github.com/theupdateframework/notary/storage/memorystore.go
generated
vendored
Normal file
@ -0,0 +1,137 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/theupdateframework/notary"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// NewMemoryStore returns a MetadataStore that operates entirely in memory.
|
||||
// Very useful for testing
|
||||
func NewMemoryStore(seed map[data.RoleName][]byte) *MemoryStore {
|
||||
var (
|
||||
consistent = make(map[string][]byte)
|
||||
initial = make(map[string][]byte)
|
||||
)
|
||||
// add all seed meta to consistent
|
||||
for name, d := range seed {
|
||||
checksum := sha256.Sum256(d)
|
||||
path := utils.ConsistentName(name.String(), checksum[:])
|
||||
initial[name.String()] = d
|
||||
consistent[path] = d
|
||||
}
|
||||
|
||||
return &MemoryStore{
|
||||
data: initial,
|
||||
consistent: consistent,
|
||||
}
|
||||
}
|
||||
|
||||
// MemoryStore implements a mock RemoteStore entirely in memory.
|
||||
// For testing purposes only.
|
||||
type MemoryStore struct {
|
||||
data map[string][]byte
|
||||
consistent map[string][]byte
|
||||
}
|
||||
|
||||
// GetSized returns up to size bytes of data references by name.
|
||||
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
|
||||
// predefined threshold "notary.MaxDownloadSize", as we will always know the
|
||||
// size for everything but a timestamp and sometimes a root,
|
||||
// neither of which should be exceptionally large
|
||||
func (m MemoryStore) GetSized(name string, size int64) ([]byte, error) {
|
||||
d, ok := m.data[name]
|
||||
if ok {
|
||||
if size == NoSizeLimit {
|
||||
size = notary.MaxDownloadSize
|
||||
}
|
||||
if int64(len(d)) < size {
|
||||
return d, nil
|
||||
}
|
||||
return d[:size], nil
|
||||
}
|
||||
d, ok = m.consistent[name]
|
||||
if ok {
|
||||
if int64(len(d)) < size {
|
||||
return d, nil
|
||||
}
|
||||
return d[:size], nil
|
||||
}
|
||||
return nil, ErrMetaNotFound{Resource: name}
|
||||
}
|
||||
|
||||
// Get returns the data associated with name
|
||||
func (m MemoryStore) Get(name string) ([]byte, error) {
|
||||
if d, ok := m.data[name]; ok {
|
||||
return d, nil
|
||||
}
|
||||
if d, ok := m.consistent[name]; ok {
|
||||
return d, nil
|
||||
}
|
||||
return nil, ErrMetaNotFound{Resource: name}
|
||||
}
|
||||
|
||||
// Set sets the metadata value for the given name
|
||||
func (m *MemoryStore) Set(name string, meta []byte) error {
|
||||
m.data[name] = meta
|
||||
|
||||
parsedMeta := &data.SignedMeta{}
|
||||
err := json.Unmarshal(meta, parsedMeta)
|
||||
if err == nil {
|
||||
// no parse error means this is metadata and not a key, so store by version
|
||||
version := parsedMeta.Signed.Version
|
||||
versionedName := fmt.Sprintf("%d.%s", version, name)
|
||||
m.data[versionedName] = meta
|
||||
}
|
||||
|
||||
checksum := sha256.Sum256(meta)
|
||||
path := utils.ConsistentName(name, checksum[:])
|
||||
m.consistent[path] = meta
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMulti sets multiple pieces of metadata for multiple names
|
||||
// in a single operation.
|
||||
func (m *MemoryStore) SetMulti(metas map[string][]byte) error {
|
||||
for role, blob := range metas {
|
||||
m.Set(role, blob)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes the metadata for a single role - if the metadata doesn't
|
||||
// exist, no error is returned
|
||||
func (m *MemoryStore) Remove(name string) error {
|
||||
if meta, ok := m.data[name]; ok {
|
||||
checksum := sha256.Sum256(meta)
|
||||
path := utils.ConsistentName(name, checksum[:])
|
||||
delete(m.data, name)
|
||||
delete(m.consistent, path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveAll clears the existing memory store by setting this store as new empty one
|
||||
func (m *MemoryStore) RemoveAll() error {
|
||||
*m = *NewMemoryStore(nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Location provides a human readable name for the storage location
|
||||
func (m MemoryStore) Location() string {
|
||||
return "memory"
|
||||
}
|
||||
|
||||
// ListFiles returns a list of all files. The names returned should be
|
||||
// usable with Get directly, with no modification.
|
||||
func (m *MemoryStore) ListFiles() []string {
|
||||
names := make([]string, 0, len(m.data))
|
||||
for n := range m.data {
|
||||
names = append(names, n)
|
||||
}
|
||||
return names
|
||||
}
|
58
vendor/github.com/theupdateframework/notary/storage/offlinestore.go
generated
vendored
Normal file
58
vendor/github.com/theupdateframework/notary/storage/offlinestore.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
)
|
||||
|
||||
// ErrOffline is used to indicate we are operating offline
|
||||
type ErrOffline struct{}
|
||||
|
||||
func (e ErrOffline) Error() string {
|
||||
return "client is offline"
|
||||
}
|
||||
|
||||
var err = ErrOffline{}
|
||||
|
||||
// OfflineStore is to be used as a placeholder for a nil store. It simply
|
||||
// returns ErrOffline for every operation
|
||||
type OfflineStore struct{}
|
||||
|
||||
// GetSized returns ErrOffline
|
||||
func (es OfflineStore) GetSized(name string, size int64) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set returns ErrOffline
|
||||
func (es OfflineStore) Set(name string, blob []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// SetMulti returns ErrOffline
|
||||
func (es OfflineStore) SetMulti(map[string][]byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove returns ErrOffline
|
||||
func (es OfflineStore) Remove(name string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// GetKey returns ErrOffline
|
||||
func (es OfflineStore) GetKey(role data.RoleName) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// RotateKey returns ErrOffline
|
||||
func (es OfflineStore) RotateKey(role data.RoleName) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// RemoveAll return ErrOffline
|
||||
func (es OfflineStore) RemoveAll() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Location returns a human readable name for the storage location
|
||||
func (es OfflineStore) Location() string {
|
||||
return "offline"
|
||||
}
|
31
vendor/github.com/theupdateframework/notary/trustmanager/errors.go
generated
vendored
Normal file
31
vendor/github.com/theupdateframework/notary/trustmanager/errors.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
package trustmanager
|
||||
|
||||
import "fmt"
|
||||
|
||||
// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key
|
||||
type ErrAttemptsExceeded struct{}
|
||||
|
||||
// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key
|
||||
func (err ErrAttemptsExceeded) Error() string {
|
||||
return "maximum number of passphrase attempts exceeded"
|
||||
}
|
||||
|
||||
// ErrPasswordInvalid is returned when signing fails. It could also mean the signing
|
||||
// key file was corrupted, but we have no way to distinguish.
|
||||
type ErrPasswordInvalid struct{}
|
||||
|
||||
// ErrPasswordInvalid is returned when signing fails. It could also mean the signing
|
||||
// key file was corrupted, but we have no way to distinguish.
|
||||
func (err ErrPasswordInvalid) Error() string {
|
||||
return "password invalid, operation has failed."
|
||||
}
|
||||
|
||||
// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key.
|
||||
type ErrKeyNotFound struct {
|
||||
KeyID string
|
||||
}
|
||||
|
||||
// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key.
|
||||
func (err ErrKeyNotFound) Error() string {
|
||||
return fmt.Sprintf("signing key not found: %s", err.KeyID)
|
||||
}
|
8
vendor/github.com/theupdateframework/notary/trustmanager/importLogic.md
generated
vendored
Normal file
8
vendor/github.com/theupdateframework/notary/trustmanager/importLogic.md
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
###This document is intended as an overview of the logic we use for importing keys
|
||||
|
||||
# A flowchart to detail the logic of our import function in `utils/keys.go` (`func ImportKeys`)
|
||||
|
||||

|
||||
|
||||
### Should this logic change, you can edit this image at `https://www.draw.io/i/HQICWeO`
|
||||
|
54
vendor/github.com/theupdateframework/notary/trustmanager/interfaces.go
generated
vendored
Normal file
54
vendor/github.com/theupdateframework/notary/trustmanager/interfaces.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package trustmanager
|
||||
|
||||
import (
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
)
|
||||
|
||||
// Storage implements the bare bones primitives (no hierarchy)
|
||||
type Storage interface {
|
||||
// Add writes a file to the specified location, returning an error if this
|
||||
// is not possible (reasons may include permissions errors). The path is cleaned
|
||||
// before being made absolute against the store's base dir.
|
||||
Set(fileName string, data []byte) error
|
||||
|
||||
// Remove deletes a file from the store relative to the store's base directory.
|
||||
// The path is cleaned before being made absolute to ensure no path traversal
|
||||
// outside the base directory is possible.
|
||||
Remove(fileName string) error
|
||||
|
||||
// Get returns the file content found at fileName relative to the base directory
|
||||
// of the file store. The path is cleaned before being made absolute to ensure
|
||||
// path traversal outside the store is not possible. If the file is not found
|
||||
// an error to that effect is returned.
|
||||
Get(fileName string) ([]byte, error)
|
||||
|
||||
// ListFiles returns a list of paths relative to the base directory of the
|
||||
// filestore. Any of these paths must be retrievable via the
|
||||
// Storage.Get method.
|
||||
ListFiles() []string
|
||||
|
||||
// Location returns a human readable name indicating where the implementer
|
||||
// is storing keys
|
||||
Location() string
|
||||
}
|
||||
|
||||
// KeyInfo stores the role and gun for a corresponding private key ID
|
||||
// It is assumed that each private key ID is unique
|
||||
type KeyInfo struct {
|
||||
Gun data.GUN
|
||||
Role data.RoleName
|
||||
}
|
||||
|
||||
// KeyStore is a generic interface for private key storage
|
||||
type KeyStore interface {
|
||||
// AddKey adds a key to the KeyStore, and if the key already exists,
|
||||
// succeeds. Otherwise, returns an error if it cannot add.
|
||||
AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error
|
||||
// Should fail with ErrKeyNotFound if the keystore is operating normally
|
||||
// and knows that it does not store the requested key.
|
||||
GetKey(keyID string) (data.PrivateKey, data.RoleName, error)
|
||||
GetKeyInfo(keyID string) (KeyInfo, error)
|
||||
ListKeys() map[string]KeyInfo
|
||||
RemoveKey(keyID string) error
|
||||
Name() string
|
||||
}
|
246
vendor/github.com/theupdateframework/notary/trustmanager/keys.go
generated
vendored
Normal file
246
vendor/github.com/theupdateframework/notary/trustmanager/keys.go
generated
vendored
Normal file
@ -0,0 +1,246 @@
|
||||
package trustmanager
|
||||
|
||||
import (
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary"
|
||||
tufdata "github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// Exporter is a simple interface for the two functions we need from the Storage interface
|
||||
type Exporter interface {
|
||||
Get(string) ([]byte, error)
|
||||
ListFiles() []string
|
||||
}
|
||||
|
||||
// Importer is a simple interface for the one function we need from the Storage interface
|
||||
type Importer interface {
|
||||
Set(string, []byte) error
|
||||
}
|
||||
|
||||
// ExportKeysByGUN exports all keys filtered to a GUN
|
||||
func ExportKeysByGUN(to io.Writer, s Exporter, gun string) error {
|
||||
keys := s.ListFiles()
|
||||
sort.Strings(keys) // ensure consistency. ListFiles has no order guarantee
|
||||
for _, loc := range keys {
|
||||
keyFile, err := s.Get(loc)
|
||||
if err != nil {
|
||||
logrus.Warn("Could not parse key file at ", loc)
|
||||
continue
|
||||
}
|
||||
block, _ := pem.Decode(keyFile)
|
||||
keyGun := block.Headers["gun"]
|
||||
if keyGun == gun { // must be full GUN match
|
||||
if err := ExportKeys(to, s, loc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExportKeysByID exports all keys matching the given ID
|
||||
func ExportKeysByID(to io.Writer, s Exporter, ids []string) error {
|
||||
want := make(map[string]struct{})
|
||||
for _, id := range ids {
|
||||
want[id] = struct{}{}
|
||||
}
|
||||
keys := s.ListFiles()
|
||||
for _, k := range keys {
|
||||
id := filepath.Base(k)
|
||||
if _, ok := want[id]; ok {
|
||||
if err := ExportKeys(to, s, k); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExportKeys copies a key from the store to the io.Writer
|
||||
func ExportKeys(to io.Writer, s Exporter, from string) error {
|
||||
// get PEM block
|
||||
k, err := s.Get(from)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// parse PEM blocks if there are more than one
|
||||
for block, rest := pem.Decode(k); block != nil; block, rest = pem.Decode(rest) {
|
||||
// add from path in a header for later import
|
||||
block.Headers["path"] = from
|
||||
// write serialized PEM
|
||||
err = pem.Encode(to, block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ImportKeys expects an io.Reader containing one or more PEM blocks.
|
||||
// It reads PEM blocks one at a time until pem.Decode returns a nil
|
||||
// block.
|
||||
// Each block is written to the subpath indicated in the "path" PEM
|
||||
// header. If the file already exists, the file is truncated. Multiple
|
||||
// adjacent PEMs with the same "path" header are appended together.
|
||||
func ImportKeys(from io.Reader, to []Importer, fallbackRole string, fallbackGUN string, passRet notary.PassRetriever) error {
|
||||
// importLogic.md contains a small flowchart I made to clear up my understand while writing the cases in this function
|
||||
// it is very rough, but it may help while reading this piece of code
|
||||
data, err := ioutil.ReadAll(from)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var (
|
||||
writeTo string
|
||||
toWrite []byte
|
||||
errBlocks []string
|
||||
)
|
||||
for block, rest := pem.Decode(data); block != nil; block, rest = pem.Decode(rest) {
|
||||
handleLegacyPath(block)
|
||||
setFallbacks(block, fallbackGUN, fallbackRole)
|
||||
|
||||
loc, err := checkValidity(block)
|
||||
if err != nil {
|
||||
// already logged in checkValidity
|
||||
errBlocks = append(errBlocks, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// the path header is not of any use once we've imported the key so strip it away
|
||||
delete(block.Headers, "path")
|
||||
|
||||
// we are now all set for import but let's first encrypt the key
|
||||
blockBytes := pem.EncodeToMemory(block)
|
||||
// check if key is encrypted, note: if it is encrypted at this point, it will have had a path header
|
||||
if privKey, err := utils.ParsePEMPrivateKey(blockBytes, ""); err == nil {
|
||||
// Key is not encrypted- ask for a passphrase and encrypt this key
|
||||
var chosenPassphrase string
|
||||
for attempts := 0; ; attempts++ {
|
||||
var giveup bool
|
||||
chosenPassphrase, giveup, err = passRet(loc, block.Headers["role"], true, attempts)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if giveup || attempts > 10 {
|
||||
return errors.New("maximum number of passphrase attempts exceeded")
|
||||
}
|
||||
}
|
||||
blockBytes, err = utils.ConvertPrivateKeyToPKCS8(privKey, tufdata.RoleName(block.Headers["role"]), tufdata.GUN(block.Headers["gun"]), chosenPassphrase)
|
||||
if err != nil {
|
||||
return errors.New("failed to encrypt key with given passphrase")
|
||||
}
|
||||
}
|
||||
|
||||
if loc != writeTo {
|
||||
// next location is different from previous one. We've finished aggregating
|
||||
// data for the previous file. If we have data, write the previous file,
|
||||
// clear toWrite and set writeTo to the next path we're going to write
|
||||
if toWrite != nil {
|
||||
if err = importToStores(to, writeTo, toWrite); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// set up for aggregating next file's data
|
||||
toWrite = nil
|
||||
writeTo = loc
|
||||
}
|
||||
|
||||
toWrite = append(toWrite, blockBytes...)
|
||||
}
|
||||
if toWrite != nil { // close out final iteration if there's data left
|
||||
return importToStores(to, writeTo, toWrite)
|
||||
}
|
||||
if len(errBlocks) > 0 {
|
||||
return fmt.Errorf("failed to import all keys: %s", strings.Join(errBlocks, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleLegacyPath(block *pem.Block) {
|
||||
// if there is a legacy path then we set the gun header from this path
|
||||
// this is the case when a user attempts to import a key bundle generated by an older client
|
||||
if rawPath := block.Headers["path"]; rawPath != "" && rawPath != filepath.Base(rawPath) {
|
||||
// this is a legacy filepath and we should try to deduce the gun name from it
|
||||
pathWOFileName := filepath.Dir(rawPath)
|
||||
if strings.HasPrefix(pathWOFileName, notary.NonRootKeysSubdir) {
|
||||
// remove the notary keystore-specific segment of the path, and any potential leading or trailing slashes
|
||||
gunName := strings.Trim(strings.TrimPrefix(pathWOFileName, notary.NonRootKeysSubdir), "/")
|
||||
if gunName != "" {
|
||||
block.Headers["gun"] = gunName
|
||||
}
|
||||
}
|
||||
block.Headers["path"] = filepath.Base(rawPath)
|
||||
}
|
||||
}
|
||||
|
||||
func setFallbacks(block *pem.Block, fallbackGUN, fallbackRole string) {
|
||||
if block.Headers["gun"] == "" {
|
||||
if fallbackGUN != "" {
|
||||
block.Headers["gun"] = fallbackGUN
|
||||
}
|
||||
}
|
||||
|
||||
if block.Headers["role"] == "" {
|
||||
if fallbackRole == "" {
|
||||
block.Headers["role"] = notary.DefaultImportRole
|
||||
} else {
|
||||
block.Headers["role"] = fallbackRole
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkValidity ensures the fields in the pem headers are valid and parses out the location.
|
||||
// While importing a collection of keys, errors from this function should result in only the
|
||||
// current pem block being skipped.
|
||||
func checkValidity(block *pem.Block) (string, error) {
|
||||
// A root key or a delegations key should not have a gun
|
||||
// Note that a key that is not any of the canonical roles (except root) is a delegations key and should not have a gun
|
||||
switch block.Headers["role"] {
|
||||
case tufdata.CanonicalSnapshotRole.String(), tufdata.CanonicalTargetsRole.String(), tufdata.CanonicalTimestampRole.String():
|
||||
// check if the key is missing a gun header or has an empty gun and error out since we don't know what gun it belongs to
|
||||
if block.Headers["gun"] == "" {
|
||||
logrus.Warnf("failed to import key (%s) to store: Cannot have canonical role key without a gun, don't know what gun it belongs to", block.Headers["path"])
|
||||
return "", errors.New("invalid key pem block")
|
||||
}
|
||||
default:
|
||||
delete(block.Headers, "gun")
|
||||
}
|
||||
|
||||
loc, ok := block.Headers["path"]
|
||||
// only if the path isn't specified do we get into this parsing path logic
|
||||
if !ok || loc == "" {
|
||||
// if the path isn't specified, we will try to infer the path rel to trust dir from the role (and then gun)
|
||||
// parse key for the keyID which we will save it by.
|
||||
// if the key is encrypted at this point, we will generate an error and continue since we don't know the ID to save it by
|
||||
|
||||
decodedKey, err := utils.ParsePEMPrivateKey(pem.EncodeToMemory(block), "")
|
||||
if err != nil {
|
||||
logrus.Warn("failed to import key to store: Invalid key generated, key may be encrypted and does not contain path header")
|
||||
return "", errors.New("invalid key pem block")
|
||||
}
|
||||
loc = decodedKey.ID()
|
||||
}
|
||||
return loc, nil
|
||||
}
|
||||
|
||||
func importToStores(to []Importer, path string, bytes []byte) error {
|
||||
var err error
|
||||
for _, i := range to {
|
||||
if err = i.Set(path, bytes); err != nil {
|
||||
logrus.Errorf("failed to import key to store: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
262
vendor/github.com/theupdateframework/notary/trustmanager/keystore.go
generated
vendored
Normal file
262
vendor/github.com/theupdateframework/notary/trustmanager/keystore.go
generated
vendored
Normal file
@ -0,0 +1,262 @@
|
||||
package trustmanager
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary"
|
||||
store "github.com/theupdateframework/notary/storage"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
type keyInfoMap map[string]KeyInfo
|
||||
|
||||
type cachedKey struct {
|
||||
role data.RoleName
|
||||
key data.PrivateKey
|
||||
}
|
||||
|
||||
// GenericKeyStore is a wrapper for Storage instances that provides
|
||||
// translation between the []byte form and Public/PrivateKey objects
|
||||
type GenericKeyStore struct {
|
||||
store Storage
|
||||
sync.Mutex
|
||||
notary.PassRetriever
|
||||
cachedKeys map[string]*cachedKey
|
||||
keyInfoMap
|
||||
}
|
||||
|
||||
// NewKeyFileStore returns a new KeyFileStore creating a private directory to
|
||||
// hold the keys.
|
||||
func NewKeyFileStore(baseDir string, p notary.PassRetriever) (*GenericKeyStore, error) {
|
||||
fileStore, err := store.NewPrivateKeyFileStorage(baseDir, notary.KeyExtension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewGenericKeyStore(fileStore, p), nil
|
||||
}
|
||||
|
||||
// NewKeyMemoryStore returns a new KeyMemoryStore which holds keys in memory
|
||||
func NewKeyMemoryStore(p notary.PassRetriever) *GenericKeyStore {
|
||||
memStore := store.NewMemoryStore(nil)
|
||||
return NewGenericKeyStore(memStore, p)
|
||||
}
|
||||
|
||||
// NewGenericKeyStore creates a GenericKeyStore wrapping the provided
|
||||
// Storage instance, using the PassRetriever to enc/decrypt keys
|
||||
func NewGenericKeyStore(s Storage, p notary.PassRetriever) *GenericKeyStore {
|
||||
ks := GenericKeyStore{
|
||||
store: s,
|
||||
PassRetriever: p,
|
||||
cachedKeys: make(map[string]*cachedKey),
|
||||
keyInfoMap: make(keyInfoMap),
|
||||
}
|
||||
ks.loadKeyInfo()
|
||||
return &ks
|
||||
}
|
||||
|
||||
func generateKeyInfoMap(s Storage) map[string]KeyInfo {
|
||||
keyInfoMap := make(map[string]KeyInfo)
|
||||
for _, keyPath := range s.ListFiles() {
|
||||
d, err := s.Get(keyPath)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
keyID, keyInfo, err := KeyInfoFromPEM(d, keyPath)
|
||||
if err != nil {
|
||||
logrus.Error(err)
|
||||
continue
|
||||
}
|
||||
keyInfoMap[keyID] = keyInfo
|
||||
}
|
||||
return keyInfoMap
|
||||
}
|
||||
|
||||
func (s *GenericKeyStore) loadKeyInfo() {
|
||||
s.keyInfoMap = generateKeyInfoMap(s.store)
|
||||
}
|
||||
|
||||
// GetKeyInfo returns the corresponding gun and role key info for a keyID
|
||||
func (s *GenericKeyStore) GetKeyInfo(keyID string) (KeyInfo, error) {
|
||||
if info, ok := s.keyInfoMap[keyID]; ok {
|
||||
return info, nil
|
||||
}
|
||||
return KeyInfo{}, fmt.Errorf("Could not find info for keyID %s", keyID)
|
||||
}
|
||||
|
||||
// AddKey stores the contents of a PEM-encoded private key as a PEM block
|
||||
func (s *GenericKeyStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error {
|
||||
var (
|
||||
chosenPassphrase string
|
||||
giveup bool
|
||||
err error
|
||||
pemPrivKey []byte
|
||||
)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if keyInfo.Role == data.CanonicalRootRole || data.IsDelegation(keyInfo.Role) || !data.ValidRole(keyInfo.Role) {
|
||||
keyInfo.Gun = ""
|
||||
}
|
||||
keyID := privKey.ID()
|
||||
for attempts := 0; ; attempts++ {
|
||||
chosenPassphrase, giveup, err = s.PassRetriever(keyID, keyInfo.Role.String(), true, attempts)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if giveup || attempts > 10 {
|
||||
return ErrAttemptsExceeded{}
|
||||
}
|
||||
}
|
||||
|
||||
pemPrivKey, err = utils.ConvertPrivateKeyToPKCS8(privKey, keyInfo.Role, keyInfo.Gun, chosenPassphrase)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.cachedKeys[keyID] = &cachedKey{role: keyInfo.Role, key: privKey}
|
||||
err = s.store.Set(keyID, pemPrivKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.keyInfoMap[privKey.ID()] = keyInfo
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetKey returns the PrivateKey given a KeyID
|
||||
func (s *GenericKeyStore) GetKey(keyID string) (data.PrivateKey, data.RoleName, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
cachedKeyEntry, ok := s.cachedKeys[keyID]
|
||||
if ok {
|
||||
return cachedKeyEntry.key, cachedKeyEntry.role, nil
|
||||
}
|
||||
|
||||
role, err := getKeyRole(s.store, keyID)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
keyBytes, err := s.store.Get(keyID)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// See if the key is encrypted. If its encrypted we'll fail to parse the private key
|
||||
privKey, err := utils.ParsePEMPrivateKey(keyBytes, "")
|
||||
if err != nil {
|
||||
privKey, _, err = GetPasswdDecryptBytes(s.PassRetriever, keyBytes, keyID, string(role))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
s.cachedKeys[keyID] = &cachedKey{role: role, key: privKey}
|
||||
return privKey, role, nil
|
||||
}
|
||||
|
||||
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore, by returning a copy of the keyInfoMap
|
||||
func (s *GenericKeyStore) ListKeys() map[string]KeyInfo {
|
||||
return copyKeyInfoMap(s.keyInfoMap)
|
||||
}
|
||||
|
||||
// RemoveKey removes the key from the keyfilestore
|
||||
func (s *GenericKeyStore) RemoveKey(keyID string) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
delete(s.cachedKeys, keyID)
|
||||
|
||||
err := s.store.Remove(keyID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
delete(s.keyInfoMap, keyID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Name returns a user friendly name for the location this store
|
||||
// keeps its data
|
||||
func (s *GenericKeyStore) Name() string {
|
||||
return s.store.Location()
|
||||
}
|
||||
|
||||
// copyKeyInfoMap returns a deep copy of the passed-in keyInfoMap
|
||||
func copyKeyInfoMap(keyInfoMap map[string]KeyInfo) map[string]KeyInfo {
|
||||
copyMap := make(map[string]KeyInfo)
|
||||
for keyID, keyInfo := range keyInfoMap {
|
||||
copyMap[keyID] = KeyInfo{Role: keyInfo.Role, Gun: keyInfo.Gun}
|
||||
}
|
||||
return copyMap
|
||||
}
|
||||
|
||||
// KeyInfoFromPEM attempts to get a keyID and KeyInfo from the filename and PEM bytes of a key
|
||||
func KeyInfoFromPEM(pemBytes []byte, filename string) (string, KeyInfo, error) {
|
||||
var keyID string
|
||||
keyID = filepath.Base(filename)
|
||||
role, gun, err := utils.ExtractPrivateKeyAttributes(pemBytes)
|
||||
if err != nil {
|
||||
return "", KeyInfo{}, err
|
||||
}
|
||||
return keyID, KeyInfo{Gun: gun, Role: role}, nil
|
||||
}
|
||||
|
||||
// getKeyRole finds the role for the given keyID. It attempts to look
|
||||
// both in the newer format PEM headers, and also in the legacy filename
|
||||
// format. It returns: the role, and an error
|
||||
func getKeyRole(s Storage, keyID string) (data.RoleName, error) {
|
||||
name := strings.TrimSpace(strings.TrimSuffix(filepath.Base(keyID), filepath.Ext(keyID)))
|
||||
|
||||
for _, file := range s.ListFiles() {
|
||||
filename := filepath.Base(file)
|
||||
if strings.HasPrefix(filename, name) {
|
||||
d, err := s.Get(file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
role, _, err := utils.ExtractPrivateKeyAttributes(d)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return role, nil
|
||||
}
|
||||
}
|
||||
return "", ErrKeyNotFound{KeyID: keyID}
|
||||
}
|
||||
|
||||
// GetPasswdDecryptBytes gets the password to decrypt the given pem bytes.
|
||||
// Returns the password and private key
|
||||
func GetPasswdDecryptBytes(passphraseRetriever notary.PassRetriever, pemBytes []byte, name, alias string) (data.PrivateKey, string, error) {
|
||||
var (
|
||||
passwd string
|
||||
privKey data.PrivateKey
|
||||
)
|
||||
for attempts := 0; ; attempts++ {
|
||||
var (
|
||||
giveup bool
|
||||
err error
|
||||
)
|
||||
if attempts > 10 {
|
||||
return nil, "", ErrAttemptsExceeded{}
|
||||
}
|
||||
passwd, giveup, err = passphraseRetriever(name, alias, false, attempts)
|
||||
// Check if the passphrase retriever got an error or if it is telling us to give up
|
||||
if giveup || err != nil {
|
||||
return nil, "", ErrPasswordInvalid{}
|
||||
}
|
||||
|
||||
// Try to convert PEM encoded bytes back to a PrivateKey using the passphrase
|
||||
privKey, err = utils.ParsePEMPrivateKey(pemBytes, passwd)
|
||||
if err == nil {
|
||||
// We managed to parse the PrivateKey. We've succeeded!
|
||||
break
|
||||
}
|
||||
}
|
||||
return privKey, passwd, nil
|
||||
}
|
60
vendor/github.com/theupdateframework/notary/trustmanager/yubikey/import.go
generated
vendored
Normal file
60
vendor/github.com/theupdateframework/notary/trustmanager/yubikey/import.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
//go:build pkcs11
|
||||
// +build pkcs11
|
||||
|
||||
package yubikey
|
||||
|
||||
import (
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
|
||||
"github.com/theupdateframework/notary"
|
||||
"github.com/theupdateframework/notary/trustmanager"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// YubiImport is a wrapper around the YubiStore that allows us to import private
|
||||
// keys to the yubikey
|
||||
type YubiImport struct {
|
||||
dest *YubiStore
|
||||
passRetriever notary.PassRetriever
|
||||
}
|
||||
|
||||
// NewImporter returns a wrapper for the YubiStore provided that enables importing
|
||||
// keys via the simple Set(string, []byte) interface
|
||||
func NewImporter(ys *YubiStore, ret notary.PassRetriever) *YubiImport {
|
||||
return &YubiImport{
|
||||
dest: ys,
|
||||
passRetriever: ret,
|
||||
}
|
||||
}
|
||||
|
||||
// Set determines if we are allowed to set the given key on the Yubikey and
|
||||
// calls through to YubiStore.AddKey if it's valid
|
||||
func (s *YubiImport) Set(name string, bytes []byte) error {
|
||||
block, _ := pem.Decode(bytes)
|
||||
if block == nil {
|
||||
return errors.New("invalid PEM data, could not parse")
|
||||
}
|
||||
role, ok := block.Headers["role"]
|
||||
if !ok {
|
||||
return errors.New("no role found for key")
|
||||
}
|
||||
ki := trustmanager.KeyInfo{
|
||||
// GUN is ignored by YubiStore
|
||||
Role: data.RoleName(role),
|
||||
}
|
||||
privKey, err := utils.ParsePEMPrivateKey(bytes, "")
|
||||
if err != nil {
|
||||
privKey, _, err = trustmanager.GetPasswdDecryptBytes(
|
||||
s.passRetriever,
|
||||
bytes,
|
||||
name,
|
||||
ki.Role.String(),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return s.dest.AddKey(ki, privKey)
|
||||
}
|
9
vendor/github.com/theupdateframework/notary/trustmanager/yubikey/non_pkcs11.go
generated
vendored
Normal file
9
vendor/github.com/theupdateframework/notary/trustmanager/yubikey/non_pkcs11.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
// go list ./... and go test ./... will not pick up this package without this
|
||||
// file, because go ? ./... does not honor build tags.
|
||||
|
||||
// e.g. "go list -tags pkcs11 ./..." will not list this package if all the
|
||||
// files in it have a build tag.
|
||||
|
||||
// See https://github.com/golang/go/issues/11246
|
||||
|
||||
package yubikey
|
10
vendor/github.com/theupdateframework/notary/trustmanager/yubikey/pkcs11_darwin.go
generated
vendored
Normal file
10
vendor/github.com/theupdateframework/notary/trustmanager/yubikey/pkcs11_darwin.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
//go:build pkcs11 && darwin
|
||||
// +build pkcs11,darwin
|
||||
|
||||
package yubikey
|
||||
|
||||
var possiblePkcs11Libs = []string{
|
||||
"/usr/local/lib/libykcs11.dylib",
|
||||
"/usr/local/docker/lib/libykcs11.dylib",
|
||||
"/usr/local/docker-experimental/lib/libykcs11.dylib",
|
||||
}
|
41
vendor/github.com/theupdateframework/notary/trustmanager/yubikey/pkcs11_interface.go
generated
vendored
Normal file
41
vendor/github.com/theupdateframework/notary/trustmanager/yubikey/pkcs11_interface.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
//go:build pkcs11
|
||||
// +build pkcs11
|
||||
|
||||
// an interface around the pkcs11 library, so that things can be mocked out
|
||||
// for testing
|
||||
|
||||
package yubikey
|
||||
|
||||
import "github.com/miekg/pkcs11"
|
||||
|
||||
// IPKCS11 is an interface for wrapping github.com/miekg/pkcs11
|
||||
type pkcs11LibLoader func(module string) IPKCS11Ctx
|
||||
|
||||
func defaultLoader(module string) IPKCS11Ctx {
|
||||
return pkcs11.New(module)
|
||||
}
|
||||
|
||||
// IPKCS11Ctx is an interface for wrapping the parts of
|
||||
// github.com/miekg/pkcs11.Ctx that yubikeystore requires
|
||||
type IPKCS11Ctx interface {
|
||||
Destroy()
|
||||
Initialize() error
|
||||
Finalize() error
|
||||
GetSlotList(tokenPresent bool) ([]uint, error)
|
||||
OpenSession(slotID uint, flags uint) (pkcs11.SessionHandle, error)
|
||||
CloseSession(sh pkcs11.SessionHandle) error
|
||||
Login(sh pkcs11.SessionHandle, userType uint, pin string) error
|
||||
Logout(sh pkcs11.SessionHandle) error
|
||||
CreateObject(sh pkcs11.SessionHandle, temp []*pkcs11.Attribute) (
|
||||
pkcs11.ObjectHandle, error)
|
||||
DestroyObject(sh pkcs11.SessionHandle, oh pkcs11.ObjectHandle) error
|
||||
GetAttributeValue(sh pkcs11.SessionHandle, o pkcs11.ObjectHandle,
|
||||
a []*pkcs11.Attribute) ([]*pkcs11.Attribute, error)
|
||||
FindObjectsInit(sh pkcs11.SessionHandle, temp []*pkcs11.Attribute) error
|
||||
FindObjects(sh pkcs11.SessionHandle, max int) (
|
||||
[]pkcs11.ObjectHandle, bool, error)
|
||||
FindObjectsFinal(sh pkcs11.SessionHandle) error
|
||||
SignInit(sh pkcs11.SessionHandle, m []*pkcs11.Mechanism,
|
||||
o pkcs11.ObjectHandle) error
|
||||
Sign(sh pkcs11.SessionHandle, message []byte) ([]byte, error)
|
||||
}
|
13
vendor/github.com/theupdateframework/notary/trustmanager/yubikey/pkcs11_linux.go
generated
vendored
Normal file
13
vendor/github.com/theupdateframework/notary/trustmanager/yubikey/pkcs11_linux.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
//go:build pkcs11 && linux
|
||||
// +build pkcs11,linux
|
||||
|
||||
package yubikey
|
||||
|
||||
var possiblePkcs11Libs = []string{
|
||||
"/usr/lib/libykcs11.so",
|
||||
"/usr/lib/libykcs11.so.1", // yubico-piv-tool on Fedora installs here
|
||||
"/usr/lib64/libykcs11.so",
|
||||
"/usr/lib64/libykcs11.so.1", // yubico-piv-tool on Fedora installs here
|
||||
"/usr/lib/x86_64-linux-gnu/libykcs11.so",
|
||||
"/usr/local/lib/libykcs11.so",
|
||||
}
|
925
vendor/github.com/theupdateframework/notary/trustmanager/yubikey/yubikeystore.go
generated
vendored
Normal file
925
vendor/github.com/theupdateframework/notary/trustmanager/yubikey/yubikeystore.go
generated
vendored
Normal file
@ -0,0 +1,925 @@
|
||||
//go:build pkcs11
|
||||
// +build pkcs11
|
||||
|
||||
package yubikey
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/miekg/pkcs11"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary"
|
||||
"github.com/theupdateframework/notary/trustmanager"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/signed"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
// UserPin is the user pin of a yubikey (in PIV parlance, is the PIN)
|
||||
UserPin = "123456"
|
||||
// SOUserPin is the "Security Officer" user pin - this is the PIV management
|
||||
// (MGM) key, which is different than the admin pin of the Yubikey PGP interface
|
||||
// (which in PIV parlance is the PUK, and defaults to 12345678)
|
||||
SOUserPin = "010203040506070801020304050607080102030405060708"
|
||||
numSlots = 4 // number of slots in the yubikey
|
||||
|
||||
// KeymodeNone means that no touch or PIN is required to sign with the yubikey
|
||||
KeymodeNone = 0
|
||||
// KeymodeTouch means that only touch is required to sign with the yubikey
|
||||
KeymodeTouch = 1
|
||||
// KeymodePinOnce means that the pin entry is required once the first time to sign with the yubikey
|
||||
KeymodePinOnce = 2
|
||||
// KeymodePinAlways means that pin entry is required every time to sign with the yubikey
|
||||
KeymodePinAlways = 4
|
||||
|
||||
// the key size, when importing a key into yubikey, MUST be 32 bytes
|
||||
ecdsaPrivateKeySize = 32
|
||||
|
||||
sigAttempts = 5
|
||||
)
|
||||
|
||||
// what key mode to use when generating keys
|
||||
var (
|
||||
yubikeyKeymode = KeymodeTouch | KeymodePinOnce
|
||||
// order in which to prefer token locations on the yubikey.
|
||||
// corresponds to: 9c, 9e, 9d, 9a
|
||||
slotIDs = []int{2, 1, 3, 0}
|
||||
)
|
||||
|
||||
// SetYubikeyKeyMode - sets the mode when generating yubikey keys.
|
||||
// This is to be used for testing. It does nothing if not building with tag
|
||||
// pkcs11.
|
||||
func SetYubikeyKeyMode(keyMode int) error {
|
||||
// technically 7 (1 | 2 | 4) is valid, but KeymodePinOnce +
|
||||
// KeymdoePinAlways don't really make sense together
|
||||
if keyMode < 0 || keyMode > 5 {
|
||||
return errors.New("Invalid key mode")
|
||||
}
|
||||
yubikeyKeymode = keyMode
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetTouchToSignUI - allows configurable UX for notifying a user that they
|
||||
// need to touch the yubikey to sign. The callback may be used to provide a
|
||||
// mechanism for updating a GUI (such as removing a modal) after the touch
|
||||
// has been made
|
||||
func SetTouchToSignUI(notifier func(), callback func()) {
|
||||
touchToSignUI = notifier
|
||||
if callback != nil {
|
||||
touchDoneCallback = callback
|
||||
}
|
||||
}
|
||||
|
||||
var touchToSignUI = func() {
|
||||
fmt.Println("Please touch the attached Yubikey to perform signing.")
|
||||
}
|
||||
|
||||
var touchDoneCallback = func() {
|
||||
// noop
|
||||
}
|
||||
|
||||
var pkcs11Lib string
|
||||
|
||||
func init() {
|
||||
for _, loc := range possiblePkcs11Libs {
|
||||
_, err := os.Stat(loc)
|
||||
if err == nil {
|
||||
p := pkcs11.New(loc)
|
||||
if p != nil {
|
||||
pkcs11Lib = loc
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ErrBackupFailed is returned when a YubiStore fails to back up a key that
|
||||
// is added
|
||||
type ErrBackupFailed struct {
|
||||
err string
|
||||
}
|
||||
|
||||
func (err ErrBackupFailed) Error() string {
|
||||
return fmt.Sprintf("Failed to backup private key to: %s", err.err)
|
||||
}
|
||||
|
||||
// An error indicating that the HSM is not present (as opposed to failing),
|
||||
// i.e. that we can confidently claim that the key is not stored in the HSM
|
||||
// without notifying the user about a missing or failing HSM.
|
||||
type errHSMNotPresent struct {
|
||||
err string
|
||||
}
|
||||
|
||||
func (err errHSMNotPresent) Error() string {
|
||||
return err.err
|
||||
}
|
||||
|
||||
type yubiSlot struct {
|
||||
role data.RoleName
|
||||
slotID []byte
|
||||
}
|
||||
|
||||
// YubiPrivateKey represents a private key inside of a yubikey
|
||||
type YubiPrivateKey struct {
|
||||
data.ECDSAPublicKey
|
||||
passRetriever notary.PassRetriever
|
||||
slot []byte
|
||||
libLoader pkcs11LibLoader
|
||||
}
|
||||
|
||||
// yubikeySigner wraps a YubiPrivateKey and implements the crypto.Signer interface
|
||||
type yubikeySigner struct {
|
||||
YubiPrivateKey
|
||||
}
|
||||
|
||||
// NewYubiPrivateKey returns a YubiPrivateKey, which implements the data.PrivateKey
|
||||
// interface except that the private material is inaccessible
|
||||
func NewYubiPrivateKey(slot []byte, pubKey data.ECDSAPublicKey,
|
||||
passRetriever notary.PassRetriever) *YubiPrivateKey {
|
||||
|
||||
return &YubiPrivateKey{
|
||||
ECDSAPublicKey: pubKey,
|
||||
passRetriever: passRetriever,
|
||||
slot: slot,
|
||||
libLoader: defaultLoader,
|
||||
}
|
||||
}
|
||||
|
||||
// Public is a required method of the crypto.Signer interface
|
||||
func (ys *yubikeySigner) Public() crypto.PublicKey {
|
||||
publicKey, err := x509.ParsePKIXPublicKey(ys.YubiPrivateKey.Public())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return publicKey
|
||||
}
|
||||
|
||||
func (y *YubiPrivateKey) setLibLoader(loader pkcs11LibLoader) {
|
||||
y.libLoader = loader
|
||||
}
|
||||
|
||||
// CryptoSigner returns a crypto.Signer tha wraps the YubiPrivateKey. Needed for
|
||||
// Certificate generation only
|
||||
func (y *YubiPrivateKey) CryptoSigner() crypto.Signer {
|
||||
return &yubikeySigner{YubiPrivateKey: *y}
|
||||
}
|
||||
|
||||
// Private is not implemented in hardware keys
|
||||
func (y *YubiPrivateKey) Private() []byte {
|
||||
// We cannot return the private material from a Yubikey
|
||||
// TODO(david): We probably want to return an error here
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignatureAlgorithm returns which algorithm this key uses to sign - currently
|
||||
// hardcoded to ECDSA
|
||||
func (y YubiPrivateKey) SignatureAlgorithm() data.SigAlgorithm {
|
||||
return data.ECDSASignature
|
||||
}
|
||||
|
||||
// Sign is a required method of the crypto.Signer interface and the data.PrivateKey
|
||||
// interface
|
||||
func (y *YubiPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) {
|
||||
ctx, session, err := SetupHSMEnv(pkcs11Lib, y.libLoader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer cleanup(ctx, session)
|
||||
|
||||
v := signed.Verifiers[data.ECDSASignature]
|
||||
for i := 0; i < sigAttempts; i++ {
|
||||
sig, err := sign(ctx, session, y.slot, y.passRetriever, msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sign using Yubikey: %v", err)
|
||||
}
|
||||
if err := v.Verify(&y.ECDSAPublicKey, sig, msg); err == nil {
|
||||
return sig, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("failed to generate signature on Yubikey")
|
||||
}
|
||||
|
||||
// If a byte array is less than the number of bytes specified by
|
||||
// ecdsaPrivateKeySize, left-zero-pad the byte array until
|
||||
// it is the required size.
|
||||
func ensurePrivateKeySize(payload []byte) []byte {
|
||||
final := payload
|
||||
if len(payload) < ecdsaPrivateKeySize {
|
||||
final = make([]byte, ecdsaPrivateKeySize)
|
||||
copy(final[ecdsaPrivateKeySize-len(payload):], payload)
|
||||
}
|
||||
return final
|
||||
}
|
||||
|
||||
// addECDSAKey adds a key to the yubikey
|
||||
func addECDSAKey(
|
||||
ctx IPKCS11Ctx,
|
||||
session pkcs11.SessionHandle,
|
||||
privKey data.PrivateKey,
|
||||
pkcs11KeyID []byte,
|
||||
passRetriever notary.PassRetriever,
|
||||
role data.RoleName,
|
||||
) error {
|
||||
logrus.Debugf("Attempting to add key to yubikey with ID: %s", privKey.ID())
|
||||
|
||||
err := login(ctx, session, passRetriever, pkcs11.CKU_SO, SOUserPin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ctx.Logout(session)
|
||||
|
||||
// Create an ecdsa.PrivateKey out of the private key bytes
|
||||
ecdsaPrivKey, err := x509.ParseECPrivateKey(privKey.Private())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ecdsaPrivKeyD := ensurePrivateKeySize(ecdsaPrivKey.D.Bytes())
|
||||
|
||||
// Hard-coded policy: the generated certificate expires in 10 years.
|
||||
startTime := time.Now()
|
||||
template, err := utils.NewCertificate(role.String(), startTime, startTime.AddDate(10, 0, 0))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create the certificate template: %v", err)
|
||||
}
|
||||
|
||||
certBytes, err := x509.CreateCertificate(rand.Reader, template, template, ecdsaPrivKey.Public(), ecdsaPrivKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create the certificate: %v", err)
|
||||
}
|
||||
|
||||
certTemplate := []*pkcs11.Attribute{
|
||||
pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_CERTIFICATE),
|
||||
pkcs11.NewAttribute(pkcs11.CKA_VALUE, certBytes),
|
||||
pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID),
|
||||
}
|
||||
|
||||
privateKeyTemplate := []*pkcs11.Attribute{
|
||||
pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PRIVATE_KEY),
|
||||
pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, pkcs11.CKK_ECDSA),
|
||||
pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID),
|
||||
pkcs11.NewAttribute(pkcs11.CKA_EC_PARAMS, []byte{0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07}),
|
||||
pkcs11.NewAttribute(pkcs11.CKA_VALUE, ecdsaPrivKeyD),
|
||||
pkcs11.NewAttribute(pkcs11.CKA_VENDOR_DEFINED, yubikeyKeymode),
|
||||
}
|
||||
|
||||
_, err = ctx.CreateObject(session, certTemplate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error importing: %v", err)
|
||||
}
|
||||
|
||||
_, err = ctx.CreateObject(session, privateKeyTemplate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error importing: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getECDSAKey(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte) (*data.ECDSAPublicKey, data.RoleName, error) {
|
||||
findTemplate := []*pkcs11.Attribute{
|
||||
pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true),
|
||||
pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID),
|
||||
pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PUBLIC_KEY),
|
||||
}
|
||||
|
||||
attrTemplate := []*pkcs11.Attribute{
|
||||
pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, []byte{0}),
|
||||
pkcs11.NewAttribute(pkcs11.CKA_EC_POINT, []byte{0}),
|
||||
pkcs11.NewAttribute(pkcs11.CKA_EC_PARAMS, []byte{0}),
|
||||
}
|
||||
|
||||
if err := ctx.FindObjectsInit(session, findTemplate); err != nil {
|
||||
logrus.Debugf("Failed to init: %s", err.Error())
|
||||
return nil, "", err
|
||||
}
|
||||
obj, _, err := ctx.FindObjects(session, 1)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to find objects: %v", err)
|
||||
return nil, "", err
|
||||
}
|
||||
if err := ctx.FindObjectsFinal(session); err != nil {
|
||||
logrus.Debugf("Failed to finalize: %s", err.Error())
|
||||
return nil, "", err
|
||||
}
|
||||
if len(obj) != 1 {
|
||||
logrus.Debugf("should have found one object")
|
||||
return nil, "", errors.New("no matching keys found inside of yubikey")
|
||||
}
|
||||
|
||||
// Retrieve the public-key material to be able to create a new ECSAKey
|
||||
attr, err := ctx.GetAttributeValue(session, obj[0], attrTemplate)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to get Attribute for: %v", obj[0])
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// Iterate through all the attributes of this key and saves CKA_PUBLIC_EXPONENT and CKA_MODULUS. Removes ordering specific issues.
|
||||
var rawPubKey []byte
|
||||
for _, a := range attr {
|
||||
if a.Type == pkcs11.CKA_EC_POINT {
|
||||
rawPubKey = a.Value
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
ecdsaPubKey := ecdsa.PublicKey{Curve: elliptic.P256(), X: new(big.Int).SetBytes(rawPubKey[3:35]), Y: new(big.Int).SetBytes(rawPubKey[35:])}
|
||||
pubBytes, err := x509.MarshalPKIXPublicKey(&ecdsaPubKey)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to Marshal public key")
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return data.NewECDSAPublicKey(pubBytes), data.CanonicalRootRole, nil
|
||||
}
|
||||
|
||||
// sign returns a signature for a given signature request
|
||||
func sign(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, passRetriever notary.PassRetriever, payload []byte) ([]byte, error) {
|
||||
err := login(ctx, session, passRetriever, pkcs11.CKU_USER, UserPin)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error logging in: %v", err)
|
||||
}
|
||||
defer ctx.Logout(session)
|
||||
|
||||
// Define the ECDSA Private key template
|
||||
class := pkcs11.CKO_PRIVATE_KEY
|
||||
privateKeyTemplate := []*pkcs11.Attribute{
|
||||
pkcs11.NewAttribute(pkcs11.CKA_CLASS, class),
|
||||
pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, pkcs11.CKK_ECDSA),
|
||||
pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID),
|
||||
}
|
||||
|
||||
if err := ctx.FindObjectsInit(session, privateKeyTemplate); err != nil {
|
||||
logrus.Debugf("Failed to init find objects: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
obj, _, err := ctx.FindObjects(session, 1)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to find objects: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
if err = ctx.FindObjectsFinal(session); err != nil {
|
||||
logrus.Debugf("Failed to finalize find objects: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
if len(obj) != 1 {
|
||||
return nil, errors.New("length of objects found not 1")
|
||||
}
|
||||
|
||||
var sig []byte
|
||||
err = ctx.SignInit(
|
||||
session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_ECDSA, nil)}, obj[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the SHA256 of the payload
|
||||
digest := sha256.Sum256(payload)
|
||||
|
||||
if (yubikeyKeymode & KeymodeTouch) > 0 {
|
||||
touchToSignUI()
|
||||
defer touchDoneCallback()
|
||||
}
|
||||
// a call to Sign, whether or not Sign fails, will clear the SignInit
|
||||
sig, err = ctx.Sign(session, digest[:])
|
||||
if err != nil {
|
||||
logrus.Debugf("Error while signing: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sig == nil {
|
||||
return nil, errors.New("Failed to create signature")
|
||||
}
|
||||
return sig[:], nil
|
||||
}
|
||||
|
||||
func yubiRemoveKey(ctx IPKCS11Ctx, session pkcs11.SessionHandle, pkcs11KeyID []byte, passRetriever notary.PassRetriever, keyID string) error {
|
||||
err := login(ctx, session, passRetriever, pkcs11.CKU_SO, SOUserPin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ctx.Logout(session)
|
||||
|
||||
template := []*pkcs11.Attribute{
|
||||
pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true),
|
||||
pkcs11.NewAttribute(pkcs11.CKA_ID, pkcs11KeyID),
|
||||
//pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PRIVATE_KEY),
|
||||
pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_CERTIFICATE),
|
||||
}
|
||||
|
||||
if err := ctx.FindObjectsInit(session, template); err != nil {
|
||||
logrus.Debugf("Failed to init find objects: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
obj, b, err := ctx.FindObjects(session, 1)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to find objects: %s %v", err.Error(), b)
|
||||
return err
|
||||
}
|
||||
if err := ctx.FindObjectsFinal(session); err != nil {
|
||||
logrus.Debugf("Failed to finalize find objects: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
if len(obj) != 1 {
|
||||
logrus.Debugf("should have found exactly one object")
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the certificate
|
||||
err = ctx.DestroyObject(session, obj[0])
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to delete cert")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func yubiListKeys(ctx IPKCS11Ctx, session pkcs11.SessionHandle) (keys map[string]yubiSlot, err error) {
|
||||
keys = make(map[string]yubiSlot)
|
||||
|
||||
attrTemplate := []*pkcs11.Attribute{
|
||||
pkcs11.NewAttribute(pkcs11.CKA_ID, []byte{0}),
|
||||
pkcs11.NewAttribute(pkcs11.CKA_VALUE, []byte{0}),
|
||||
}
|
||||
|
||||
objs, err := listObjects(ctx, session)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(objs) == 0 {
|
||||
return nil, errors.New("no keys found in yubikey")
|
||||
}
|
||||
logrus.Debugf("Found %d objects matching list filters", len(objs))
|
||||
for _, obj := range objs {
|
||||
var (
|
||||
cert *x509.Certificate
|
||||
slot []byte
|
||||
)
|
||||
// Retrieve the public-key material to be able to create a new ECDSA
|
||||
attr, err := ctx.GetAttributeValue(session, obj, attrTemplate)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to get Attribute for: %v", obj)
|
||||
continue
|
||||
}
|
||||
|
||||
// Iterate through all the attributes of this key and saves CKA_PUBLIC_EXPONENT and CKA_MODULUS. Removes ordering specific issues.
|
||||
for _, a := range attr {
|
||||
if a.Type == pkcs11.CKA_ID {
|
||||
slot = a.Value
|
||||
}
|
||||
if a.Type == pkcs11.CKA_VALUE {
|
||||
cert, err = x509.ParseCertificate(a.Value)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if !data.ValidRole(data.RoleName(cert.Subject.CommonName)) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// we found nothing
|
||||
if cert == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var ecdsaPubKey *ecdsa.PublicKey
|
||||
switch cert.PublicKeyAlgorithm {
|
||||
case x509.ECDSA:
|
||||
ecdsaPubKey = cert.PublicKey.(*ecdsa.PublicKey)
|
||||
default:
|
||||
logrus.Infof("Unsupported x509 PublicKeyAlgorithm: %d", cert.PublicKeyAlgorithm)
|
||||
continue
|
||||
}
|
||||
|
||||
pubBytes, err := x509.MarshalPKIXPublicKey(ecdsaPubKey)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to Marshal public key")
|
||||
continue
|
||||
}
|
||||
|
||||
keys[data.NewECDSAPublicKey(pubBytes).ID()] = yubiSlot{
|
||||
role: data.RoleName(cert.Subject.CommonName),
|
||||
slotID: slot,
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func listObjects(ctx IPKCS11Ctx, session pkcs11.SessionHandle) ([]pkcs11.ObjectHandle, error) {
|
||||
findTemplate := []*pkcs11.Attribute{
|
||||
pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true),
|
||||
pkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_CERTIFICATE),
|
||||
}
|
||||
|
||||
if err := ctx.FindObjectsInit(session, findTemplate); err != nil {
|
||||
logrus.Debugf("Failed to init: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objs, b, err := ctx.FindObjects(session, numSlots)
|
||||
for err == nil {
|
||||
var o []pkcs11.ObjectHandle
|
||||
o, b, err = ctx.FindObjects(session, numSlots)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if len(o) == 0 {
|
||||
break
|
||||
}
|
||||
objs = append(objs, o...)
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to find: %s %v", err.Error(), b)
|
||||
if len(objs) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := ctx.FindObjectsFinal(session); err != nil {
|
||||
logrus.Debugf("Failed to finalize: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
return objs, nil
|
||||
}
|
||||
|
||||
func getNextEmptySlot(ctx IPKCS11Ctx, session pkcs11.SessionHandle) ([]byte, error) {
|
||||
findTemplate := []*pkcs11.Attribute{
|
||||
pkcs11.NewAttribute(pkcs11.CKA_TOKEN, true),
|
||||
}
|
||||
attrTemplate := []*pkcs11.Attribute{
|
||||
pkcs11.NewAttribute(pkcs11.CKA_ID, []byte{0}),
|
||||
}
|
||||
|
||||
if err := ctx.FindObjectsInit(session, findTemplate); err != nil {
|
||||
logrus.Debugf("Failed to init: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
objs, b, err := ctx.FindObjects(session, numSlots)
|
||||
// if there are more objects than `numSlots`, get all of them until
|
||||
// there are no more to get
|
||||
for err == nil {
|
||||
var o []pkcs11.ObjectHandle
|
||||
o, b, err = ctx.FindObjects(session, numSlots)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if len(o) == 0 {
|
||||
break
|
||||
}
|
||||
objs = append(objs, o...)
|
||||
}
|
||||
taken := make(map[int]bool)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to find: %s %v", err.Error(), b)
|
||||
return nil, err
|
||||
}
|
||||
if err = ctx.FindObjectsFinal(session); err != nil {
|
||||
logrus.Debugf("Failed to finalize: %s\n", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
for _, obj := range objs {
|
||||
// Retrieve the slot ID
|
||||
attr, err := ctx.GetAttributeValue(session, obj, attrTemplate)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Iterate through attributes. If an ID attr was found, mark it as taken
|
||||
for _, a := range attr {
|
||||
if a.Type == pkcs11.CKA_ID {
|
||||
if len(a.Value) < 1 {
|
||||
continue
|
||||
}
|
||||
// a byte will always be capable of representing all slot IDs
|
||||
// for the Yubikeys
|
||||
slotNum := int(a.Value[0])
|
||||
if slotNum >= numSlots {
|
||||
// defensive
|
||||
continue
|
||||
}
|
||||
taken[slotNum] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
// iterate the token locations in our preferred order and use the first
|
||||
// available one. Otherwise exit the loop and return an error.
|
||||
for _, loc := range slotIDs {
|
||||
if !taken[loc] {
|
||||
return []byte{byte(loc)}, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("yubikey has no available slots")
|
||||
}
|
||||
|
||||
// YubiStore is a KeyStore for private keys inside a Yubikey
|
||||
type YubiStore struct {
|
||||
passRetriever notary.PassRetriever
|
||||
keys map[string]yubiSlot
|
||||
backupStore trustmanager.KeyStore
|
||||
libLoader pkcs11LibLoader
|
||||
}
|
||||
|
||||
// NewYubiStore returns a YubiStore, given a backup key store to write any
|
||||
// generated keys to (usually a KeyFileStore)
|
||||
func NewYubiStore(backupStore trustmanager.KeyStore, passphraseRetriever notary.PassRetriever) (
|
||||
*YubiStore, error) {
|
||||
|
||||
s := &YubiStore{
|
||||
passRetriever: passphraseRetriever,
|
||||
keys: make(map[string]yubiSlot),
|
||||
backupStore: backupStore,
|
||||
libLoader: defaultLoader,
|
||||
}
|
||||
s.ListKeys() // populate keys field
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Name returns a user friendly name for the location this store
|
||||
// keeps its data
|
||||
func (s YubiStore) Name() string {
|
||||
return "yubikey"
|
||||
}
|
||||
|
||||
func (s *YubiStore) setLibLoader(loader pkcs11LibLoader) {
|
||||
s.libLoader = loader
|
||||
}
|
||||
|
||||
// ListKeys returns a list of keys in the yubikey store
|
||||
func (s *YubiStore) ListKeys() map[string]trustmanager.KeyInfo {
|
||||
if len(s.keys) > 0 {
|
||||
return buildKeyMap(s.keys)
|
||||
}
|
||||
ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader)
|
||||
if err != nil {
|
||||
logrus.Debugf("No yubikey found, using alternative key storage: %s", err.Error())
|
||||
return nil
|
||||
}
|
||||
defer cleanup(ctx, session)
|
||||
|
||||
keys, err := yubiListKeys(ctx, session)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to list key from the yubikey: %s", err.Error())
|
||||
return nil
|
||||
}
|
||||
s.keys = keys
|
||||
|
||||
return buildKeyMap(keys)
|
||||
}
|
||||
|
||||
// AddKey puts a key inside the Yubikey, as well as writing it to the backup store
|
||||
func (s *YubiStore) AddKey(keyInfo trustmanager.KeyInfo, privKey data.PrivateKey) error {
|
||||
added, err := s.addKey(privKey.ID(), keyInfo.Role, privKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if added && s.backupStore != nil {
|
||||
err = s.backupStore.AddKey(keyInfo, privKey)
|
||||
if err != nil {
|
||||
defer s.RemoveKey(privKey.ID())
|
||||
return ErrBackupFailed{err: err.Error()}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only add if we haven't seen the key already. Return whether the key was
|
||||
// added.
|
||||
func (s *YubiStore) addKey(keyID string, role data.RoleName, privKey data.PrivateKey) (
|
||||
bool, error) {
|
||||
|
||||
// We only allow adding root keys for now
|
||||
if role != data.CanonicalRootRole {
|
||||
return false, fmt.Errorf(
|
||||
"yubikey only supports storing root keys, got %s for key: %s", role, keyID)
|
||||
}
|
||||
|
||||
ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader)
|
||||
if err != nil {
|
||||
logrus.Debugf("No yubikey found, using alternative key storage: %s", err.Error())
|
||||
return false, err
|
||||
}
|
||||
defer cleanup(ctx, session)
|
||||
|
||||
if k, ok := s.keys[keyID]; ok {
|
||||
if k.role == role {
|
||||
// already have the key and it's associated with the correct role
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
slot, err := getNextEmptySlot(ctx, session)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to get an empty yubikey slot: %s", err.Error())
|
||||
return false, err
|
||||
}
|
||||
logrus.Debugf("Attempting to store key using yubikey slot %v", slot)
|
||||
|
||||
err = addECDSAKey(
|
||||
ctx, session, privKey, slot, s.passRetriever, role)
|
||||
if err == nil {
|
||||
s.keys[privKey.ID()] = yubiSlot{
|
||||
role: role,
|
||||
slotID: slot,
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
logrus.Debugf("Failed to add key to yubikey: %v", err)
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
// GetKey retrieves a key from the Yubikey only (it does not look inside the
|
||||
// backup store)
|
||||
func (s *YubiStore) GetKey(keyID string) (data.PrivateKey, data.RoleName, error) {
|
||||
ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader)
|
||||
if err != nil {
|
||||
logrus.Debugf("No yubikey found, using alternative key storage: %s", err.Error())
|
||||
if _, ok := err.(errHSMNotPresent); ok {
|
||||
err = trustmanager.ErrKeyNotFound{KeyID: keyID}
|
||||
}
|
||||
return nil, "", err
|
||||
}
|
||||
defer cleanup(ctx, session)
|
||||
|
||||
key, ok := s.keys[keyID]
|
||||
if !ok {
|
||||
return nil, "", trustmanager.ErrKeyNotFound{KeyID: keyID}
|
||||
}
|
||||
|
||||
pubKey, alias, err := getECDSAKey(ctx, session, key.slotID)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to get key from slot %s: %s", key.slotID, err.Error())
|
||||
return nil, "", err
|
||||
}
|
||||
// Check to see if we're returning the intended keyID
|
||||
if pubKey.ID() != keyID {
|
||||
return nil, "", fmt.Errorf("expected root key: %s, but found: %s", keyID, pubKey.ID())
|
||||
}
|
||||
privKey := NewYubiPrivateKey(key.slotID, *pubKey, s.passRetriever)
|
||||
if privKey == nil {
|
||||
return nil, "", errors.New("could not initialize new YubiPrivateKey")
|
||||
}
|
||||
|
||||
return privKey, alias, err
|
||||
}
|
||||
|
||||
// RemoveKey deletes a key from the Yubikey only (it does not remove it from the
|
||||
// backup store)
|
||||
func (s *YubiStore) RemoveKey(keyID string) error {
|
||||
ctx, session, err := SetupHSMEnv(pkcs11Lib, s.libLoader)
|
||||
if err != nil {
|
||||
logrus.Debugf("No yubikey found, using alternative key storage: %s", err.Error())
|
||||
return nil
|
||||
}
|
||||
defer cleanup(ctx, session)
|
||||
|
||||
key, ok := s.keys[keyID]
|
||||
if !ok {
|
||||
return errors.New("Key not present in yubikey")
|
||||
}
|
||||
err = yubiRemoveKey(ctx, session, key.slotID, s.passRetriever, keyID)
|
||||
if err == nil {
|
||||
delete(s.keys, keyID)
|
||||
} else {
|
||||
logrus.Debugf("Failed to remove from the yubikey KeyID %s: %v", keyID, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// GetKeyInfo is not yet implemented
|
||||
func (s *YubiStore) GetKeyInfo(keyID string) (trustmanager.KeyInfo, error) {
|
||||
return trustmanager.KeyInfo{}, fmt.Errorf("Not yet implemented")
|
||||
}
|
||||
|
||||
func cleanup(ctx IPKCS11Ctx, session pkcs11.SessionHandle) {
|
||||
err := ctx.CloseSession(session)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error closing session: %s", err.Error())
|
||||
}
|
||||
finalizeAndDestroy(ctx)
|
||||
}
|
||||
|
||||
func finalizeAndDestroy(ctx IPKCS11Ctx) {
|
||||
err := ctx.Finalize()
|
||||
if err != nil {
|
||||
logrus.Debugf("Error finalizing: %s", err.Error())
|
||||
}
|
||||
ctx.Destroy()
|
||||
}
|
||||
|
||||
// SetupHSMEnv is a method that depends on the existences
|
||||
func SetupHSMEnv(libraryPath string, libLoader pkcs11LibLoader) (
|
||||
IPKCS11Ctx, pkcs11.SessionHandle, error) {
|
||||
|
||||
if libraryPath == "" {
|
||||
return nil, 0, errHSMNotPresent{err: "no library found"}
|
||||
}
|
||||
p := libLoader(libraryPath)
|
||||
|
||||
if p == nil {
|
||||
return nil, 0, fmt.Errorf("failed to load library %s", libraryPath)
|
||||
}
|
||||
|
||||
if err := p.Initialize(); err != nil {
|
||||
defer finalizeAndDestroy(p)
|
||||
return nil, 0, fmt.Errorf("found library %s, but initialize error %s", libraryPath, err.Error())
|
||||
}
|
||||
|
||||
slots, err := p.GetSlotList(true)
|
||||
if err != nil {
|
||||
defer finalizeAndDestroy(p)
|
||||
return nil, 0, fmt.Errorf(
|
||||
"loaded library %s, but failed to list HSM slots %s", libraryPath, err)
|
||||
}
|
||||
// Check to see if we got any slots from the HSM.
|
||||
if len(slots) < 1 {
|
||||
defer finalizeAndDestroy(p)
|
||||
return nil, 0, fmt.Errorf(
|
||||
"loaded library %s, but no HSM slots found", libraryPath)
|
||||
}
|
||||
|
||||
// CKF_SERIAL_SESSION: TRUE if cryptographic functions are performed in serial with the application; FALSE if the functions may be performed in parallel with the application.
|
||||
// CKF_RW_SESSION: TRUE if the session is read/write; FALSE if the session is read-only
|
||||
session, err := p.OpenSession(slots[0], pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION)
|
||||
if err != nil {
|
||||
defer cleanup(p, session)
|
||||
return nil, 0, fmt.Errorf(
|
||||
"loaded library %s, but failed to start session with HSM %s",
|
||||
libraryPath, err)
|
||||
}
|
||||
|
||||
logrus.Debugf("Initialized PKCS11 library %s and started HSM session", libraryPath)
|
||||
return p, session, nil
|
||||
}
|
||||
|
||||
// IsAccessible returns true if a Yubikey can be accessed
|
||||
func IsAccessible() bool {
|
||||
if pkcs11Lib == "" {
|
||||
return false
|
||||
}
|
||||
ctx, session, err := SetupHSMEnv(pkcs11Lib, defaultLoader)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer cleanup(ctx, session)
|
||||
return true
|
||||
}
|
||||
|
||||
func login(ctx IPKCS11Ctx, session pkcs11.SessionHandle, passRetriever notary.PassRetriever, userFlag uint, defaultPassw string) error {
|
||||
// try default password
|
||||
err := ctx.Login(session, userFlag, defaultPassw)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// default failed, ask user for password
|
||||
for attempts := 0; ; attempts++ {
|
||||
var (
|
||||
giveup bool
|
||||
err error
|
||||
user string
|
||||
)
|
||||
if userFlag == pkcs11.CKU_SO {
|
||||
user = "SO Pin"
|
||||
} else {
|
||||
user = "User Pin"
|
||||
}
|
||||
passwd, giveup, err := passRetriever(user, "yubikey", false, attempts)
|
||||
// Check if the passphrase retriever got an error or if it is telling us to give up
|
||||
if giveup || err != nil {
|
||||
return trustmanager.ErrPasswordInvalid{}
|
||||
}
|
||||
if attempts > 2 {
|
||||
return trustmanager.ErrAttemptsExceeded{}
|
||||
}
|
||||
|
||||
// attempt to login. Loop if failed
|
||||
err = ctx.Login(session, userFlag, passwd)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func buildKeyMap(keys map[string]yubiSlot) map[string]trustmanager.KeyInfo {
|
||||
res := make(map[string]trustmanager.KeyInfo)
|
||||
for k, v := range keys {
|
||||
res[k] = trustmanager.KeyInfo{Role: v.role, Gun: ""}
|
||||
}
|
||||
return res
|
||||
}
|
37
vendor/github.com/theupdateframework/notary/trustpinning/ca.crt
generated
vendored
Normal file
37
vendor/github.com/theupdateframework/notary/trustpinning/ca.crt
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIGMzCCBBugAwIBAgIBATANBgkqhkiG9w0BAQsFADBfMQswCQYDVQQGEwJVUzEL
|
||||
MAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRv
|
||||
Y2tlcjEaMBgGA1UEAwwRTm90YXJ5IFRlc3RpbmcgQ0EwHhcNMTUwNzE2MDQyNTAz
|
||||
WhcNMjUwNzEzMDQyNTAzWjBfMRowGAYDVQQDDBFOb3RhcnkgVGVzdGluZyBDQTEL
|
||||
MAkGA1UEBhMCVVMxFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNVBAoMBkRv
|
||||
Y2tlcjELMAkGA1UECAwCQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
|
||||
AQCwVVD4pK7z7pXPpJbaZ1Hg5eRXIcaYtbFPCnN0iqy9HsVEGnEn5BPNSEsuP+m0
|
||||
5N0qVV7DGb1SjiloLXD1qDDvhXWk+giS9ppqPHPLVPB4bvzsqwDYrtpbqkYvO0YK
|
||||
0SL3kxPXUFdlkFfgu0xjlczm2PhWG3Jd8aAtspL/L+VfPA13JUaWxSLpui1In8rh
|
||||
gAyQTK6Q4Of6GbJYTnAHb59UoLXSzB5AfqiUq6L7nEYYKoPflPbRAIWL/UBm0c+H
|
||||
ocms706PYpmPS2RQv3iOGmnn9hEVp3P6jq7WAevbA4aYGx5EsbVtYABqJBbFWAuw
|
||||
wTGRYmzn0Mj0eTMge9ztYB2/2sxdTe6uhmFgpUXngDqJI5O9N3zPfvlEImCky3HM
|
||||
jJoL7g5smqX9o1P+ESLh0VZzhh7IDPzQTXpcPIS/6z0l22QGkK/1N1PaADaUHdLL
|
||||
vSav3y2BaEmPvf2fkZj8yP5eYgi7Cw5ONhHLDYHFcl9Zm/ywmdxHJETz9nfgXnsW
|
||||
HNxDqrkCVO46r/u6rSrUt6hr3oddJG8s8Jo06earw6XU3MzM+3giwkK0SSM3uRPq
|
||||
4AscR1Tv+E31AuOAmjqYQoT29bMIxoSzeljj/YnedwjW45pWyc3JoHaibDwvW9Uo
|
||||
GSZBVy4hrM/Fa7XCWv1WfHNW1gDwaLYwDnl5jFmRBvcfuQIDAQABo4H5MIH2MIGR
|
||||
BgNVHSMEgYkwgYaAFHUM1U3E4WyL1nvFd+dPY8f4O2hZoWOkYTBfMQswCQYDVQQG
|
||||
EwJVUzELMAkGA1UECAwCQ0ExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDzANBgNV
|
||||
BAoMBkRvY2tlcjEaMBgGA1UEAwwRTm90YXJ5IFRlc3RpbmcgQ0GCCQDCeDLbemIT
|
||||
SzASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEF
|
||||
BQcDATAOBgNVHQ8BAf8EBAMCAUYwHQYDVR0OBBYEFHe48hcBcAp0bUVlTxXeRA4o
|
||||
E16pMA0GCSqGSIb3DQEBCwUAA4ICAQAWUtAPdUFpwRq+N1SzGUejSikeMGyPZscZ
|
||||
JBUCmhZoFufgXGbLO5OpcRLaV3Xda0t/5PtdGMSEzczeoZHWknDtw+79OBittPPj
|
||||
Sh1oFDuPo35R7eP624lUCch/InZCphTaLx9oDLGcaK3ailQ9wjBdKdlBl8KNKIZp
|
||||
a13aP5rnSm2Jva+tXy/yi3BSds3dGD8ITKZyI/6AFHxGvObrDIBpo4FF/zcWXVDj
|
||||
paOmxplRtM4Hitm+sXGvfqJe4x5DuOXOnPrT3dHvRT6vSZUoKobxMqmRTOcrOIPa
|
||||
EeMpOobshORuRntMDYvvgO3D6p6iciDW2Vp9N6rdMdfOWEQN8JVWvB7IxRHk9qKJ
|
||||
vYOWVbczAt0qpMvXF3PXLjZbUM0knOdUKIEbqP4YUbgdzx6RtgiiY930Aj6tAtce
|
||||
0fpgNlvjMRpSBuWTlAfNNjG/YhndMz9uI68TMfFpR3PcgVIv30krw/9VzoLi2Dpe
|
||||
ow6DrGO6oi+DhN78P4jY/O9UczZK2roZL1Oi5P0RIxf23UZC7x1DlcN3nBr4sYSv
|
||||
rBx4cFTMNpwU+nzsIi4djcFDKmJdEOyjMnkP2v0Lwe7yvK08pZdEu+0zbrq17kue
|
||||
XpXLc7K68QB15yxzGylU5rRwzmC/YsAVyE4eoGu8PxWxrERvHby4B8YP0vAfOraL
|
||||
lKmXlK4dTg==
|
||||
-----END CERTIFICATE-----
|
||||
|
304
vendor/github.com/theupdateframework/notary/trustpinning/certs.go
generated
vendored
Normal file
304
vendor/github.com/theupdateframework/notary/trustpinning/certs.go
generated
vendored
Normal file
@ -0,0 +1,304 @@
|
||||
package trustpinning
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/signed"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
const wildcard = "*"
|
||||
|
||||
// ErrValidationFail is returned when there is no valid trusted certificates
|
||||
// being served inside of the roots.json
|
||||
type ErrValidationFail struct {
|
||||
Reason string
|
||||
}
|
||||
|
||||
// ErrValidationFail is returned when there is no valid trusted certificates
|
||||
// being served inside of the roots.json
|
||||
func (err ErrValidationFail) Error() string {
|
||||
return fmt.Sprintf("could not validate the path to a trusted root: %s", err.Reason)
|
||||
}
|
||||
|
||||
// ErrRootRotationFail is returned when we fail to do a full root key rotation
|
||||
// by either failing to add the new root certificate, or delete the old ones
|
||||
type ErrRootRotationFail struct {
|
||||
Reason string
|
||||
}
|
||||
|
||||
// ErrRootRotationFail is returned when we fail to do a full root key rotation
|
||||
// by either failing to add the new root certificate, or delete the old ones
|
||||
func (err ErrRootRotationFail) Error() string {
|
||||
return fmt.Sprintf("could not rotate trust to a new trusted root: %s", err.Reason)
|
||||
}
|
||||
|
||||
func prettyFormatCertIDs(certs map[string]*x509.Certificate) string {
|
||||
ids := make([]string, 0, len(certs))
|
||||
for id := range certs {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
return strings.Join(ids, ", ")
|
||||
}
|
||||
|
||||
/*
|
||||
ValidateRoot receives a new root, validates its correctness and attempts to
|
||||
do root key rotation if needed.
|
||||
|
||||
First we check if we have any trusted certificates for a particular GUN in
|
||||
a previous root, if we have one. If the previous root is not nil and we find
|
||||
certificates for this GUN, we've already seen this repository before, and
|
||||
have a list of trusted certificates for it. In this case, we use this list of
|
||||
certificates to attempt to validate this root file.
|
||||
|
||||
If the previous validation succeeds, we check the integrity of the root by
|
||||
making sure that it is validated by itself. This means that we will attempt to
|
||||
validate the root data with the certificates that are included in the root keys
|
||||
themselves.
|
||||
|
||||
However, if we do not have any current trusted certificates for this GUN, we
|
||||
check if there are any pinned certificates specified in the trust_pinning section
|
||||
of the notary client config. If this section specifies a Certs section with this
|
||||
GUN, we attempt to validate that the certificates present in the downloaded root
|
||||
file match the pinned ID.
|
||||
|
||||
If the Certs section is empty for this GUN, we check if the trust_pinning
|
||||
section specifies a CA section specified in the config for this GUN. If so, we check
|
||||
that the specified CA is valid and has signed a certificate included in the downloaded
|
||||
root file. The specified CA can be a prefix for this GUN.
|
||||
|
||||
If both the Certs and CA configs do not match this GUN, we fall back to the TOFU
|
||||
section in the config: if true, we trust certificates specified in the root for
|
||||
this GUN. If later we see a different certificate for that certificate, we return
|
||||
an ErrValidationFailed error.
|
||||
|
||||
Note that since we only allow trust data to be downloaded over an HTTPS channel
|
||||
we are using the current public PKI to validate the first download of the certificate
|
||||
adding an extra layer of security over the normal (SSH style) trust model.
|
||||
We shall call this: TOFUS.
|
||||
|
||||
Validation failure at any step will result in an ErrValidationFailed error.
|
||||
*/
|
||||
func ValidateRoot(prevRoot *data.SignedRoot, root *data.Signed, gun data.GUN, trustPinning TrustPinConfig) (*data.SignedRoot, error) {
|
||||
logrus.Debugf("entered ValidateRoot with dns: %s", gun)
|
||||
signedRoot, err := data.RootFromSigned(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rootRole, err := signedRoot.BuildBaseRole(data.CanonicalRootRole)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Retrieve all the leaf and intermediate certificates in root for which the CN matches the GUN
|
||||
allLeafCerts, allIntCerts := parseAllCerts(signedRoot)
|
||||
certsFromRoot, err := validRootLeafCerts(allLeafCerts, gun, true)
|
||||
validIntCerts := validRootIntCerts(allIntCerts)
|
||||
|
||||
if err != nil {
|
||||
logrus.Debugf("error retrieving valid leaf certificates for: %s, %v", gun, err)
|
||||
return nil, &ErrValidationFail{Reason: "unable to retrieve valid leaf certificates"}
|
||||
}
|
||||
|
||||
logrus.Debugf("found %d leaf certs, of which %d are valid leaf certs for %s", len(allLeafCerts), len(certsFromRoot), gun)
|
||||
|
||||
// If we have a previous root, let's try to use it to validate that this new root is valid.
|
||||
havePrevRoot := prevRoot != nil
|
||||
if havePrevRoot {
|
||||
// Retrieve all the trusted certificates from our previous root
|
||||
// Note that we do not validate expiries here since our originally trusted root might have expired certs
|
||||
allTrustedLeafCerts, allTrustedIntCerts := parseAllCerts(prevRoot)
|
||||
trustedLeafCerts, err := validRootLeafCerts(allTrustedLeafCerts, gun, false)
|
||||
if err != nil {
|
||||
return nil, &ErrValidationFail{Reason: "could not retrieve trusted certs from previous root role data"}
|
||||
}
|
||||
|
||||
// Use the certificates we found in the previous root for the GUN to verify its signatures
|
||||
// This could potentially be an empty set, in which case we will fail to verify
|
||||
logrus.Debugf("found %d valid root leaf certificates for %s: %s", len(trustedLeafCerts), gun,
|
||||
prettyFormatCertIDs(trustedLeafCerts))
|
||||
|
||||
// Extract the previous root's threshold for signature verification
|
||||
prevRootRoleData, ok := prevRoot.Signed.Roles[data.CanonicalRootRole]
|
||||
if !ok {
|
||||
return nil, &ErrValidationFail{Reason: "could not retrieve previous root role data"}
|
||||
}
|
||||
err = signed.VerifySignatures(
|
||||
root, data.BaseRole{Keys: utils.CertsToKeys(trustedLeafCerts, allTrustedIntCerts), Threshold: prevRootRoleData.Threshold})
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
|
||||
return nil, &ErrRootRotationFail{Reason: "failed to validate data with current trusted certificates"}
|
||||
}
|
||||
// Clear the IsValid marks we could have received from VerifySignatures
|
||||
for i := range root.Signatures {
|
||||
root.Signatures[i].IsValid = false
|
||||
}
|
||||
}
|
||||
|
||||
// Regardless of having a previous root or not, confirm that the new root validates against the trust pinning
|
||||
logrus.Debugf("checking root against trust_pinning config for %s", gun)
|
||||
trustPinCheckFunc, err := NewTrustPinChecker(trustPinning, gun, !havePrevRoot)
|
||||
if err != nil {
|
||||
return nil, &ErrValidationFail{Reason: err.Error()}
|
||||
}
|
||||
|
||||
validPinnedCerts := map[string]*x509.Certificate{}
|
||||
for id, cert := range certsFromRoot {
|
||||
logrus.Debugf("checking trust-pinning for cert: %s", id)
|
||||
if ok := trustPinCheckFunc(cert, validIntCerts[id]); !ok {
|
||||
logrus.Debugf("trust-pinning check failed for cert: %s", id)
|
||||
continue
|
||||
}
|
||||
validPinnedCerts[id] = cert
|
||||
}
|
||||
if len(validPinnedCerts) == 0 {
|
||||
return nil, &ErrValidationFail{Reason: "unable to match any certificates to trust_pinning config"}
|
||||
}
|
||||
certsFromRoot = validPinnedCerts
|
||||
|
||||
// Validate the integrity of the new root (does it have valid signatures)
|
||||
// Note that certsFromRoot is guaranteed to be unchanged only if we had prior cert data for this GUN or enabled TOFUS
|
||||
// If we attempted to pin a certain certificate or CA, certsFromRoot could have been pruned accordingly
|
||||
err = signed.VerifySignatures(root, data.BaseRole{
|
||||
Keys: utils.CertsToKeys(certsFromRoot, validIntCerts), Threshold: rootRole.Threshold})
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to verify TUF data for: %s, %v", gun, err)
|
||||
return nil, &ErrValidationFail{Reason: "failed to validate integrity of roots"}
|
||||
}
|
||||
|
||||
logrus.Debugf("root validation succeeded for %s", gun)
|
||||
// Call RootFromSigned to make sure we pick up on the IsValid markings from VerifySignatures
|
||||
return data.RootFromSigned(root)
|
||||
}
|
||||
|
||||
// MatchCNToGun checks that the common name in a cert is valid for the given gun.
|
||||
// This allows wildcards as suffixes, e.g. `namespace/*`
|
||||
func MatchCNToGun(commonName string, gun data.GUN) bool {
|
||||
if strings.HasSuffix(commonName, wildcard) {
|
||||
prefix := strings.TrimRight(commonName, wildcard)
|
||||
logrus.Debugf("checking gun %s against wildcard prefix %s", gun, prefix)
|
||||
return strings.HasPrefix(gun.String(), prefix)
|
||||
}
|
||||
return commonName == gun.String()
|
||||
}
|
||||
|
||||
// validRootLeafCerts returns a list of possibly (if checkExpiry is true) non-expired, non-sha1 certificates
|
||||
// found in root whose Common-Names match the provided GUN. Note that this
|
||||
// "validity" alone does not imply any measure of trust.
|
||||
func validRootLeafCerts(allLeafCerts map[string]*x509.Certificate, gun data.GUN, checkExpiry bool) (map[string]*x509.Certificate, error) {
|
||||
validLeafCerts := make(map[string]*x509.Certificate)
|
||||
|
||||
// Go through every leaf certificate and check that the CN matches the gun
|
||||
for id, cert := range allLeafCerts {
|
||||
// Validate that this leaf certificate has a CN that matches the gun
|
||||
if !MatchCNToGun(cert.Subject.CommonName, gun) {
|
||||
logrus.Debugf("error leaf certificate CN: %s doesn't match the given GUN: %s",
|
||||
cert.Subject.CommonName, gun)
|
||||
continue
|
||||
}
|
||||
// Make sure the certificate is not expired if checkExpiry is true
|
||||
// and warn if it hasn't expired yet but is within 6 months of expiry
|
||||
if err := utils.ValidateCertificate(cert, checkExpiry); err != nil {
|
||||
logrus.Debugf("%s is invalid: %s", id, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
validLeafCerts[id] = cert
|
||||
}
|
||||
|
||||
if len(validLeafCerts) < 1 {
|
||||
logrus.Debugf("didn't find any valid leaf certificates for %s", gun)
|
||||
return nil, errors.New("no valid leaf certificates found in any of the root keys")
|
||||
}
|
||||
|
||||
logrus.Debugf("found %d valid leaf certificates for %s: %s", len(validLeafCerts), gun,
|
||||
prettyFormatCertIDs(validLeafCerts))
|
||||
return validLeafCerts, nil
|
||||
}
|
||||
|
||||
// validRootIntCerts filters the passed in structure of intermediate certificates to only include non-expired, non-sha1 certificates
|
||||
// Note that this "validity" alone does not imply any measure of trust.
|
||||
func validRootIntCerts(allIntCerts map[string][]*x509.Certificate) map[string][]*x509.Certificate {
|
||||
validIntCerts := make(map[string][]*x509.Certificate)
|
||||
|
||||
// Go through every leaf cert ID, and build its valid intermediate certificate list
|
||||
for leafID, intCertList := range allIntCerts {
|
||||
for _, intCert := range intCertList {
|
||||
if err := utils.ValidateCertificate(intCert, true); err != nil {
|
||||
continue
|
||||
}
|
||||
validIntCerts[leafID] = append(validIntCerts[leafID], intCert)
|
||||
}
|
||||
|
||||
}
|
||||
return validIntCerts
|
||||
}
|
||||
|
||||
// parseAllCerts returns two maps, one with all of the leafCertificates and one
|
||||
// with all the intermediate certificates found in signedRoot
|
||||
func parseAllCerts(signedRoot *data.SignedRoot) (map[string]*x509.Certificate, map[string][]*x509.Certificate) {
|
||||
if signedRoot == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
leafCerts := make(map[string]*x509.Certificate)
|
||||
intCerts := make(map[string][]*x509.Certificate)
|
||||
|
||||
// Before we loop through all root keys available, make sure any exist
|
||||
rootRoles, ok := signedRoot.Signed.Roles[data.CanonicalRootRole]
|
||||
if !ok {
|
||||
logrus.Debugf("tried to parse certificates from invalid root signed data")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
logrus.Debugf("found the following root keys: %v", rootRoles.KeyIDs)
|
||||
// Iterate over every keyID for the root role inside of roots.json
|
||||
for _, keyID := range rootRoles.KeyIDs {
|
||||
// check that the key exists in the signed root keys map
|
||||
key, ok := signedRoot.Signed.Keys[keyID]
|
||||
if !ok {
|
||||
logrus.Debugf("error while getting data for keyID: %s", keyID)
|
||||
continue
|
||||
}
|
||||
|
||||
// Decode all the x509 certificates that were bundled with this
|
||||
// Specific root key
|
||||
decodedCerts, err := utils.LoadCertBundleFromPEM(key.Public())
|
||||
if err != nil {
|
||||
logrus.Debugf("error while parsing root certificate with keyID: %s, %v", keyID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Get all non-CA certificates in the decoded certificates
|
||||
leafCertList := utils.GetLeafCerts(decodedCerts)
|
||||
|
||||
// If we got no leaf certificates or we got more than one, fail
|
||||
if len(leafCertList) != 1 {
|
||||
logrus.Debugf("invalid chain due to leaf certificate missing or too many leaf certificates for keyID: %s", keyID)
|
||||
continue
|
||||
}
|
||||
// If we found a leaf certificate, assert that the cert bundle started with a leaf
|
||||
if decodedCerts[0].IsCA {
|
||||
logrus.Debugf("invalid chain due to leaf certificate not being first certificate for keyID: %s", keyID)
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the ID of the leaf certificate
|
||||
leafCert := leafCertList[0]
|
||||
|
||||
// Store the leaf cert in the map
|
||||
leafCerts[key.ID()] = leafCert
|
||||
|
||||
// Get all the remainder certificates marked as a CA to be used as intermediates
|
||||
intermediateCerts := utils.GetIntermediateCerts(decodedCerts)
|
||||
intCerts[key.ID()] = intermediateCerts
|
||||
}
|
||||
|
||||
return leafCerts, intCerts
|
||||
}
|
31
vendor/github.com/theupdateframework/notary/trustpinning/test.crt
generated
vendored
Normal file
31
vendor/github.com/theupdateframework/notary/trustpinning/test.crt
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFKzCCAxWgAwIBAgIQRyp9QqcJfd3ayqdjiz8xIDALBgkqhkiG9w0BAQswODEa
|
||||
MBgGA1UEChMRZG9ja2VyLmNvbS9ub3RhcnkxGjAYBgNVBAMTEWRvY2tlci5jb20v
|
||||
bm90YXJ5MB4XDTE1MDcxNzA2MzQyM1oXDTE3MDcxNjA2MzQyM1owODEaMBgGA1UE
|
||||
ChMRZG9ja2VyLmNvbS9ub3RhcnkxGjAYBgNVBAMTEWRvY2tlci5jb20vbm90YXJ5
|
||||
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAoQffrzsYnsH8vGf4Jh55
|
||||
Cj5wrjUGzD/sHkaFHptjJ6ToJGJv5yMAPxzyInu5sIoGLJapnYVBoAU0YgI9qlAc
|
||||
YA6SxaSwgm6rpvmnl8Qn0qc6ger3inpGaUJylWHuPwWkvcimQAqHZx2dQtL7g6kp
|
||||
rmKeTWpWoWLw3JoAUZUVhZMd6a22ZL/DvAw+Hrogbz4XeyahFb9IH402zPxN6vga
|
||||
JEFTF0Ji1jtNg0Mo4pb9SHsMsiw+LZK7SffHVKPxvd21m/biNmwsgExA3U8OOG8p
|
||||
uygfacys5c8+ZrX+ZFG/cvwKz0k6/QfJU40s6MhXw5C2WttdVmsG9/7rGFYjHoIJ
|
||||
weDyxgWk7vxKzRJI/un7cagDIaQsKrJQcCHIGFRlpIR5TwX7vl3R7cRncrDRMVvc
|
||||
VSEG2esxbw7jtzIp/ypnVRxcOny7IypyjKqVeqZ6HgxZtTBVrF1O/aHo2kvlwyRS
|
||||
Aus4kvh6z3+jzTm9EzfXiPQzY9BEk5gOLxhW9rc6UhlS+pe5lkaN/Hyqy/lPuq89
|
||||
fMr2rr7lf5WFdFnze6WNYMAaW7dNA4NE0dyD53428ZLXxNVPL4WU66Gac6lynQ8l
|
||||
r5tPsYIFXzh6FVaRKGQUtW1hz9ecO6Y27Rh2JsyiIxgUqk2ooxE69uN42t+dtqKC
|
||||
1s8G/7VtY8GDALFLYTnzLvsCAwEAAaM1MDMwDgYDVR0PAQH/BAQDAgCgMBMGA1Ud
|
||||
JQQMMAoGCCsGAQUFBwMDMAwGA1UdEwEB/wQCMAAwCwYJKoZIhvcNAQELA4ICAQBM
|
||||
Oll3G/XBz8idiNdNJDWUh+5w3ojmwanrTBdCdqEk1WenaR6DtcflJx6Z3f/mwV4o
|
||||
b1skOAX1yX5RCahJHUMxMicz/Q38pOVelGPrWnc3TJB+VKjGyHXlQDVkZFb+4+ef
|
||||
wtj7HngXhHFFDSgjm3EdMndvgDQ7SQb4skOnCNS9iyX7eXxhFBCZmZL+HALKBj2B
|
||||
yhV4IcBDqmp504t14rx9/Jvty0dG7fY7I51gEQpm4S02JML5xvTm1xfboWIhZODI
|
||||
swEAO+ekBoFHbS1Q9KMPjIAw3TrCHH8x8XZq5zsYtAC1yZHdCKa26aWdy56A9eHj
|
||||
O1VxzwmbNyXRenVuBYP+0wr3HVKFG4JJ4ZZpNZzQW/pqEPghCTJIvIueK652ByUc
|
||||
//sv+nXd5f19LeES9pf0l253NDaFZPb6aegKfquWh8qlQBmUQ2GzaTLbtmNd28M6
|
||||
W7iL7tkKZe1ZnBz9RKgtPrDjjWGZInjjcOU8EtT4SLq7kCVDmPs5MD8vaAm96JsE
|
||||
jmLC3Uu/4k7HiDYX0i0mOWkFjZQMdVatcIF5FPSppwsSbW8QidnXt54UtwtFDEPz
|
||||
lpjs7ybeQE71JXcMZnVIK4bjRXsEFPI98RpIlEdedbSUdYAncLNJRT7HZBMPGSwZ
|
||||
0PNJuglnlr3srVzdW1dz2xQjdvLwxy6mNUF6rbQBWA==
|
||||
-----END CERTIFICATE-----
|
||||
|
163
vendor/github.com/theupdateframework/notary/trustpinning/trustpin.go
generated
vendored
Normal file
163
vendor/github.com/theupdateframework/notary/trustpinning/trustpin.go
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
||||
package trustpinning
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// TrustPinConfig represents the configuration under the trust_pinning section of the config file
|
||||
// This struct represents the preferred way to bootstrap trust for this repository
|
||||
// This is fully optional. If left at the default, uninitialized value Notary will use TOFU over
|
||||
// HTTPS.
|
||||
// You can use this to provide certificates or a CA to pin to as a root of trust for a GUN.
|
||||
// These are used with the following precedence:
|
||||
//
|
||||
// 1. Certs
|
||||
// 2. CA
|
||||
// 3. TOFUS (TOFU over HTTPS)
|
||||
//
|
||||
// Only one trust pinning option will be used to validate a particular GUN.
|
||||
type TrustPinConfig struct {
|
||||
// CA maps a GUN prefix to file paths containing the root CA.
|
||||
// This file can contain multiple root certificates, bundled in separate PEM blocks.
|
||||
CA map[string]string
|
||||
// Certs maps a GUN to a list of certificate IDs
|
||||
Certs map[string][]string
|
||||
// DisableTOFU, when true, disables "Trust On First Use" of new key data
|
||||
// This is false by default, which means new key data will always be trusted the first time it is seen.
|
||||
DisableTOFU bool
|
||||
}
|
||||
|
||||
type trustPinChecker struct {
|
||||
gun data.GUN
|
||||
config TrustPinConfig
|
||||
pinnedCAPool *x509.CertPool
|
||||
pinnedCertIDs []string
|
||||
}
|
||||
|
||||
// CertChecker is a function type that will be used to check leaf certs against pinned trust
|
||||
type CertChecker func(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool
|
||||
|
||||
// NewTrustPinChecker returns a new certChecker function from a TrustPinConfig for a GUN
|
||||
func NewTrustPinChecker(trustPinConfig TrustPinConfig, gun data.GUN, firstBootstrap bool) (CertChecker, error) {
|
||||
t := trustPinChecker{gun: gun, config: trustPinConfig}
|
||||
// Determine the mode, and if it's even valid
|
||||
if pinnedCerts, ok := trustPinConfig.Certs[gun.String()]; ok {
|
||||
logrus.Debugf("trust-pinning using Cert IDs")
|
||||
t.pinnedCertIDs = pinnedCerts
|
||||
return t.certsCheck, nil
|
||||
}
|
||||
var ok bool
|
||||
t.pinnedCertIDs, ok = wildcardMatch(gun, trustPinConfig.Certs)
|
||||
if ok {
|
||||
return t.certsCheck, nil
|
||||
}
|
||||
|
||||
if caFilepath, err := getPinnedCAFilepathByPrefix(gun, trustPinConfig); err == nil {
|
||||
logrus.Debugf("trust-pinning using root CA bundle at: %s", caFilepath)
|
||||
|
||||
// Try to add the CA certs from its bundle file to our certificate store,
|
||||
// and use it to validate certs in the root.json later
|
||||
caCerts, err := utils.LoadCertBundleFromFile(caFilepath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load root cert from CA path")
|
||||
}
|
||||
// Now only consider certificates that are direct children from this CA cert chain
|
||||
caRootPool := x509.NewCertPool()
|
||||
for _, caCert := range caCerts {
|
||||
if err = utils.ValidateCertificate(caCert, true); err != nil {
|
||||
logrus.Debugf("ignoring root CA certificate with CN %s in bundle: %s", caCert.Subject.CommonName, err)
|
||||
continue
|
||||
}
|
||||
caRootPool.AddCert(caCert)
|
||||
}
|
||||
// If we didn't have any valid CA certs, error out
|
||||
if len(caRootPool.Subjects()) == 0 {
|
||||
return nil, fmt.Errorf("invalid CA certs provided")
|
||||
}
|
||||
t.pinnedCAPool = caRootPool
|
||||
return t.caCheck, nil
|
||||
}
|
||||
|
||||
// If TOFUs is disabled and we don't have any previous trusted root data for this GUN, we error out
|
||||
if trustPinConfig.DisableTOFU && firstBootstrap {
|
||||
return nil, fmt.Errorf("invalid trust pinning specified")
|
||||
|
||||
}
|
||||
return t.tofusCheck, nil
|
||||
}
|
||||
|
||||
func (t trustPinChecker) certsCheck(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool {
|
||||
// reconstruct the leaf + intermediate cert chain, which is bundled as {leaf, intermediates...},
|
||||
// in order to get the matching id in the root file
|
||||
key, err := utils.CertBundleToKey(leafCert, intCerts)
|
||||
if err != nil {
|
||||
logrus.Debug("error creating cert bundle: ", err.Error())
|
||||
return false
|
||||
}
|
||||
return utils.StrSliceContains(t.pinnedCertIDs, key.ID())
|
||||
}
|
||||
|
||||
func (t trustPinChecker) caCheck(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool {
|
||||
// Use intermediate certificates included in the root TUF metadata for our validation
|
||||
caIntPool := x509.NewCertPool()
|
||||
for _, intCert := range intCerts {
|
||||
caIntPool.AddCert(intCert)
|
||||
}
|
||||
// Attempt to find a valid certificate chain from the leaf cert to CA root
|
||||
// Use this certificate if such a valid chain exists (possibly using intermediates)
|
||||
var err error
|
||||
if _, err = leafCert.Verify(x509.VerifyOptions{Roots: t.pinnedCAPool, Intermediates: caIntPool}); err == nil {
|
||||
return true
|
||||
}
|
||||
logrus.Debugf("unable to find a valid certificate chain from leaf cert to CA root: %s", err)
|
||||
return false
|
||||
}
|
||||
|
||||
func (t trustPinChecker) tofusCheck(leafCert *x509.Certificate, intCerts []*x509.Certificate) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Will return the CA filepath corresponding to the most specific (longest) entry in the map that is still a prefix
|
||||
// of the provided gun. Returns an error if no entry matches this GUN as a prefix.
|
||||
func getPinnedCAFilepathByPrefix(gun data.GUN, t TrustPinConfig) (string, error) {
|
||||
specificGUN := ""
|
||||
specificCAFilepath := ""
|
||||
foundCA := false
|
||||
for gunPrefix, caFilepath := range t.CA {
|
||||
if strings.HasPrefix(gun.String(), gunPrefix) && len(gunPrefix) >= len(specificGUN) {
|
||||
specificGUN = gunPrefix
|
||||
specificCAFilepath = caFilepath
|
||||
foundCA = true
|
||||
}
|
||||
}
|
||||
if !foundCA {
|
||||
return "", fmt.Errorf("could not find pinned CA for GUN: %s", gun)
|
||||
}
|
||||
return specificCAFilepath, nil
|
||||
}
|
||||
|
||||
// wildcardMatch will attempt to match the most specific (longest prefix) wildcarded
|
||||
// trustpinning option for key IDs. Given the simple globbing and the use of maps,
|
||||
// it is impossible to have two different prefixes of equal length.
|
||||
// This logic also solves the issue of Go's randomization of map iteration.
|
||||
func wildcardMatch(gun data.GUN, certs map[string][]string) ([]string, bool) {
|
||||
var (
|
||||
longest = ""
|
||||
ids []string
|
||||
)
|
||||
for gunPrefix, keyIDs := range certs {
|
||||
if strings.HasSuffix(gunPrefix, "*") {
|
||||
if strings.HasPrefix(gun.String(), gunPrefix[:len(gunPrefix)-1]) && len(gunPrefix) > len(longest) {
|
||||
longest = gunPrefix
|
||||
ids = keyIDs
|
||||
}
|
||||
}
|
||||
}
|
||||
return ids, ids != nil
|
||||
}
|
30
vendor/github.com/theupdateframework/notary/tuf/LICENSE
generated
vendored
Normal file
30
vendor/github.com/theupdateframework/notary/tuf/LICENSE
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
Copyright (c) 2015, Docker Inc.
|
||||
Copyright (c) 2014-2015 Prime Directive, Inc.
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Prime Directive, Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
6
vendor/github.com/theupdateframework/notary/tuf/README.md
generated
vendored
Normal file
6
vendor/github.com/theupdateframework/notary/tuf/README.md
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
## Credits
|
||||
|
||||
This implementation was originally forked from [flynn/go-tuf](https://github.com/flynn/go-tuf)
|
||||
|
||||
This implementation retains the same 3 Clause BSD license present on
|
||||
the original flynn implementation.
|
732
vendor/github.com/theupdateframework/notary/tuf/builder.go
generated
vendored
Normal file
732
vendor/github.com/theupdateframework/notary/tuf/builder.go
generated
vendored
Normal file
@ -0,0 +1,732 @@
|
||||
package tuf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/go/canonical/json"
|
||||
"github.com/theupdateframework/notary"
|
||||
|
||||
"github.com/theupdateframework/notary/trustpinning"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/signed"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// ErrBuildDone is returned when any functions are called on RepoBuilder, and it
|
||||
// is already finished building
|
||||
var ErrBuildDone = fmt.Errorf(
|
||||
"the builder has finished building and cannot accept any more input or produce any more output")
|
||||
|
||||
// ErrInvalidBuilderInput is returned when RepoBuilder.Load is called
|
||||
// with the wrong type of metadata for the state that it's in
|
||||
type ErrInvalidBuilderInput struct{ msg string }
|
||||
|
||||
func (e ErrInvalidBuilderInput) Error() string {
|
||||
return e.msg
|
||||
}
|
||||
|
||||
// ConsistentInfo is the consistent name and size of a role, or just the name
|
||||
// of the role and a -1 if no file metadata for the role is known
|
||||
type ConsistentInfo struct {
|
||||
RoleName data.RoleName
|
||||
fileMeta data.FileMeta
|
||||
}
|
||||
|
||||
// ChecksumKnown determines whether or not we know enough to provide a size and
|
||||
// consistent name
|
||||
func (c ConsistentInfo) ChecksumKnown() bool {
|
||||
// empty hash, no size : this is the zero value
|
||||
return len(c.fileMeta.Hashes) > 0 || c.fileMeta.Length != 0
|
||||
}
|
||||
|
||||
// ConsistentName returns the consistent name (rolename.sha256) for the role
|
||||
// given this consistent information
|
||||
func (c ConsistentInfo) ConsistentName() string {
|
||||
return utils.ConsistentName(c.RoleName.String(), c.fileMeta.Hashes[notary.SHA256])
|
||||
}
|
||||
|
||||
// Length returns the expected length of the role as per this consistent
|
||||
// information - if no checksum information is known, the size is -1.
|
||||
func (c ConsistentInfo) Length() int64 {
|
||||
if c.ChecksumKnown() {
|
||||
return c.fileMeta.Length
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// RepoBuilder is an interface for an object which builds a tuf.Repo
|
||||
type RepoBuilder interface {
|
||||
Load(roleName data.RoleName, content []byte, minVersion int, allowExpired bool) error
|
||||
LoadRootForUpdate(content []byte, minVersion int, isFinal bool) error
|
||||
GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error)
|
||||
GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error)
|
||||
Finish() (*Repo, *Repo, error)
|
||||
BootstrapNewBuilder() RepoBuilder
|
||||
BootstrapNewBuilderWithNewTrustpin(trustpin trustpinning.TrustPinConfig) RepoBuilder
|
||||
|
||||
// informative functions
|
||||
IsLoaded(roleName data.RoleName) bool
|
||||
GetLoadedVersion(roleName data.RoleName) int
|
||||
GetConsistentInfo(roleName data.RoleName) ConsistentInfo
|
||||
}
|
||||
|
||||
// finishedBuilder refuses any more input or output
|
||||
type finishedBuilder struct{}
|
||||
|
||||
func (f finishedBuilder) Load(roleName data.RoleName, content []byte, minVersion int, allowExpired bool) error {
|
||||
return ErrBuildDone
|
||||
}
|
||||
func (f finishedBuilder) LoadRootForUpdate(content []byte, minVersion int, isFinal bool) error {
|
||||
return ErrBuildDone
|
||||
}
|
||||
func (f finishedBuilder) GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error) {
|
||||
return nil, 0, ErrBuildDone
|
||||
}
|
||||
func (f finishedBuilder) GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error) {
|
||||
return nil, 0, ErrBuildDone
|
||||
}
|
||||
func (f finishedBuilder) Finish() (*Repo, *Repo, error) { return nil, nil, ErrBuildDone }
|
||||
func (f finishedBuilder) BootstrapNewBuilder() RepoBuilder { return f }
|
||||
func (f finishedBuilder) BootstrapNewBuilderWithNewTrustpin(trustpin trustpinning.TrustPinConfig) RepoBuilder {
|
||||
return f
|
||||
}
|
||||
func (f finishedBuilder) IsLoaded(roleName data.RoleName) bool { return false }
|
||||
func (f finishedBuilder) GetLoadedVersion(roleName data.RoleName) int { return 0 }
|
||||
func (f finishedBuilder) GetConsistentInfo(roleName data.RoleName) ConsistentInfo {
|
||||
return ConsistentInfo{RoleName: roleName}
|
||||
}
|
||||
|
||||
// NewRepoBuilder is the only way to get a pre-built RepoBuilder
|
||||
func NewRepoBuilder(gun data.GUN, cs signed.CryptoService, trustpin trustpinning.TrustPinConfig) RepoBuilder {
|
||||
return NewBuilderFromRepo(gun, NewRepo(cs), trustpin)
|
||||
}
|
||||
|
||||
// NewBuilderFromRepo allows us to bootstrap a builder given existing repo data.
|
||||
// YOU PROBABLY SHOULDN'T BE USING THIS OUTSIDE OF TESTING CODE!!!
|
||||
func NewBuilderFromRepo(gun data.GUN, repo *Repo, trustpin trustpinning.TrustPinConfig) RepoBuilder {
|
||||
return &repoBuilderWrapper{
|
||||
RepoBuilder: &repoBuilder{
|
||||
repo: repo,
|
||||
invalidRoles: NewRepo(nil),
|
||||
gun: gun,
|
||||
trustpin: trustpin,
|
||||
loadedNotChecksummed: make(map[data.RoleName][]byte),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// repoBuilderWrapper embeds a repoBuilder, but once Finish is called, swaps
|
||||
// the embed out with a finishedBuilder
|
||||
type repoBuilderWrapper struct {
|
||||
RepoBuilder
|
||||
}
|
||||
|
||||
func (rbw *repoBuilderWrapper) Finish() (*Repo, *Repo, error) {
|
||||
switch rbw.RepoBuilder.(type) {
|
||||
case finishedBuilder:
|
||||
return rbw.RepoBuilder.Finish()
|
||||
default:
|
||||
old := rbw.RepoBuilder
|
||||
rbw.RepoBuilder = finishedBuilder{}
|
||||
return old.Finish()
|
||||
}
|
||||
}
|
||||
|
||||
// repoBuilder actually builds a tuf.Repo
|
||||
type repoBuilder struct {
|
||||
repo *Repo
|
||||
invalidRoles *Repo
|
||||
|
||||
// needed for root trust pininng verification
|
||||
gun data.GUN
|
||||
trustpin trustpinning.TrustPinConfig
|
||||
|
||||
// in case we load root and/or targets before snapshot and timestamp (
|
||||
// or snapshot and not timestamp), so we know what to verify when the
|
||||
// data with checksums come in
|
||||
loadedNotChecksummed map[data.RoleName][]byte
|
||||
|
||||
// bootstrapped values to validate a new root
|
||||
prevRoot *data.SignedRoot
|
||||
bootstrappedRootChecksum *data.FileMeta
|
||||
|
||||
// for bootstrapping the next builder
|
||||
nextRootChecksum *data.FileMeta
|
||||
}
|
||||
|
||||
func (rb *repoBuilder) Finish() (*Repo, *Repo, error) {
|
||||
return rb.repo, rb.invalidRoles, nil
|
||||
}
|
||||
|
||||
func (rb *repoBuilder) BootstrapNewBuilder() RepoBuilder {
|
||||
return &repoBuilderWrapper{RepoBuilder: &repoBuilder{
|
||||
repo: NewRepo(rb.repo.cryptoService),
|
||||
invalidRoles: NewRepo(nil),
|
||||
gun: rb.gun,
|
||||
loadedNotChecksummed: make(map[data.RoleName][]byte),
|
||||
trustpin: rb.trustpin,
|
||||
|
||||
prevRoot: rb.repo.Root,
|
||||
bootstrappedRootChecksum: rb.nextRootChecksum,
|
||||
}}
|
||||
}
|
||||
|
||||
func (rb *repoBuilder) BootstrapNewBuilderWithNewTrustpin(trustpin trustpinning.TrustPinConfig) RepoBuilder {
|
||||
return &repoBuilderWrapper{RepoBuilder: &repoBuilder{
|
||||
repo: NewRepo(rb.repo.cryptoService),
|
||||
gun: rb.gun,
|
||||
loadedNotChecksummed: make(map[data.RoleName][]byte),
|
||||
trustpin: trustpin,
|
||||
|
||||
prevRoot: rb.repo.Root,
|
||||
bootstrappedRootChecksum: rb.nextRootChecksum,
|
||||
}}
|
||||
}
|
||||
|
||||
// IsLoaded returns whether a particular role has already been loaded
|
||||
func (rb *repoBuilder) IsLoaded(roleName data.RoleName) bool {
|
||||
switch roleName {
|
||||
case data.CanonicalRootRole:
|
||||
return rb.repo.Root != nil
|
||||
case data.CanonicalSnapshotRole:
|
||||
return rb.repo.Snapshot != nil
|
||||
case data.CanonicalTimestampRole:
|
||||
return rb.repo.Timestamp != nil
|
||||
default:
|
||||
return rb.repo.Targets[roleName] != nil
|
||||
}
|
||||
}
|
||||
|
||||
// GetLoadedVersion returns the metadata version, if it is loaded, or 1 (the
|
||||
// minimum valid version number) otherwise
|
||||
func (rb *repoBuilder) GetLoadedVersion(roleName data.RoleName) int {
|
||||
switch {
|
||||
case roleName == data.CanonicalRootRole && rb.repo.Root != nil:
|
||||
return rb.repo.Root.Signed.Version
|
||||
case roleName == data.CanonicalSnapshotRole && rb.repo.Snapshot != nil:
|
||||
return rb.repo.Snapshot.Signed.Version
|
||||
case roleName == data.CanonicalTimestampRole && rb.repo.Timestamp != nil:
|
||||
return rb.repo.Timestamp.Signed.Version
|
||||
default:
|
||||
if tgts, ok := rb.repo.Targets[roleName]; ok {
|
||||
return tgts.Signed.Version
|
||||
}
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
// GetConsistentInfo returns the consistent name and size of a role, if it is known,
|
||||
// otherwise just the rolename and a -1 for size (both of which are inside a
|
||||
// ConsistentInfo object)
|
||||
func (rb *repoBuilder) GetConsistentInfo(roleName data.RoleName) ConsistentInfo {
|
||||
info := ConsistentInfo{RoleName: roleName} // starts out with unknown filemeta
|
||||
switch roleName {
|
||||
case data.CanonicalTimestampRole:
|
||||
// we do not want to get a consistent timestamp, but we do want to
|
||||
// limit its size
|
||||
info.fileMeta.Length = notary.MaxTimestampSize
|
||||
case data.CanonicalSnapshotRole:
|
||||
if rb.repo.Timestamp != nil {
|
||||
info.fileMeta = rb.repo.Timestamp.Signed.Meta[roleName.String()]
|
||||
}
|
||||
case data.CanonicalRootRole:
|
||||
switch {
|
||||
case rb.bootstrappedRootChecksum != nil:
|
||||
info.fileMeta = *rb.bootstrappedRootChecksum
|
||||
case rb.repo.Snapshot != nil:
|
||||
info.fileMeta = rb.repo.Snapshot.Signed.Meta[roleName.String()]
|
||||
}
|
||||
default:
|
||||
if rb.repo.Snapshot != nil {
|
||||
info.fileMeta = rb.repo.Snapshot.Signed.Meta[roleName.String()]
|
||||
}
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
func (rb *repoBuilder) Load(roleName data.RoleName, content []byte, minVersion int, allowExpired bool) error {
|
||||
return rb.loadOptions(roleName, content, minVersion, allowExpired, false, false)
|
||||
}
|
||||
|
||||
// LoadRootForUpdate adds additional flags for updating the root.json file
|
||||
func (rb *repoBuilder) LoadRootForUpdate(content []byte, minVersion int, isFinal bool) error {
|
||||
if err := rb.loadOptions(data.CanonicalRootRole, content, minVersion, !isFinal, !isFinal, true); err != nil {
|
||||
return err
|
||||
}
|
||||
if !isFinal {
|
||||
rb.prevRoot = rb.repo.Root
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadOptions adds additional flags that should only be used for updating the root.json
|
||||
func (rb *repoBuilder) loadOptions(roleName data.RoleName, content []byte, minVersion int, allowExpired, skipChecksum, allowLoaded bool) error {
|
||||
if !data.ValidRole(roleName) {
|
||||
return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s is an invalid role", roleName)}
|
||||
}
|
||||
|
||||
if !allowLoaded && rb.IsLoaded(roleName) {
|
||||
return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s has already been loaded", roleName)}
|
||||
}
|
||||
|
||||
var err error
|
||||
switch roleName {
|
||||
case data.CanonicalRootRole:
|
||||
break
|
||||
case data.CanonicalTimestampRole, data.CanonicalSnapshotRole, data.CanonicalTargetsRole:
|
||||
err = rb.checkPrereqsLoaded([]data.RoleName{data.CanonicalRootRole})
|
||||
default: // delegations
|
||||
err = rb.checkPrereqsLoaded([]data.RoleName{data.CanonicalRootRole, data.CanonicalTargetsRole})
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch roleName {
|
||||
case data.CanonicalRootRole:
|
||||
return rb.loadRoot(content, minVersion, allowExpired, skipChecksum)
|
||||
case data.CanonicalSnapshotRole:
|
||||
return rb.loadSnapshot(content, minVersion, allowExpired)
|
||||
case data.CanonicalTimestampRole:
|
||||
return rb.loadTimestamp(content, minVersion, allowExpired)
|
||||
case data.CanonicalTargetsRole:
|
||||
return rb.loadTargets(content, minVersion, allowExpired)
|
||||
default:
|
||||
return rb.loadDelegation(roleName, content, minVersion, allowExpired)
|
||||
}
|
||||
}
|
||||
|
||||
func (rb *repoBuilder) checkPrereqsLoaded(prereqRoles []data.RoleName) error {
|
||||
for _, req := range prereqRoles {
|
||||
if !rb.IsLoaded(req) {
|
||||
return ErrInvalidBuilderInput{msg: fmt.Sprintf("%s must be loaded first", req)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenerateSnapshot generates a new snapshot given a previous (optional) snapshot
|
||||
// We can't just load the previous snapshot, because it may have been signed by a different
|
||||
// snapshot key (maybe from a previous root version). Note that we need the root role and
|
||||
// targets role to be loaded, because we need to generate metadata for both (and we need
|
||||
// the root to be loaded so we can get the snapshot role to sign with)
|
||||
func (rb *repoBuilder) GenerateSnapshot(prev *data.SignedSnapshot) ([]byte, int, error) {
|
||||
switch {
|
||||
case rb.repo.cryptoService == nil:
|
||||
return nil, 0, ErrInvalidBuilderInput{msg: "cannot generate snapshot without a cryptoservice"}
|
||||
case rb.IsLoaded(data.CanonicalSnapshotRole):
|
||||
return nil, 0, ErrInvalidBuilderInput{msg: "snapshot has already been loaded"}
|
||||
case rb.IsLoaded(data.CanonicalTimestampRole):
|
||||
return nil, 0, ErrInvalidBuilderInput{msg: "cannot generate snapshot if timestamp has already been loaded"}
|
||||
}
|
||||
|
||||
if err := rb.checkPrereqsLoaded([]data.RoleName{data.CanonicalRootRole}); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// If there is no previous snapshot, we need to generate one, and so the targets must
|
||||
// have already been loaded. Otherwise, so long as the previous snapshot structure is
|
||||
// valid (it has a targets meta), we're good.
|
||||
switch prev {
|
||||
case nil:
|
||||
if err := rb.checkPrereqsLoaded([]data.RoleName{data.CanonicalTargetsRole}); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if err := rb.repo.InitSnapshot(); err != nil {
|
||||
rb.repo.Snapshot = nil
|
||||
return nil, 0, err
|
||||
}
|
||||
default:
|
||||
if err := data.IsValidSnapshotStructure(prev.Signed); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
rb.repo.Snapshot = prev
|
||||
}
|
||||
|
||||
sgnd, err := rb.repo.SignSnapshot(data.DefaultExpires(data.CanonicalSnapshotRole))
|
||||
if err != nil {
|
||||
rb.repo.Snapshot = nil
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
sgndJSON, err := json.Marshal(sgnd)
|
||||
if err != nil {
|
||||
rb.repo.Snapshot = nil
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// loadedNotChecksummed should currently contain the root awaiting checksumming,
|
||||
// since it has to have been loaded. Since the snapshot was generated using
|
||||
// the root and targets data (there may not be any) that have been loaded,
|
||||
// remove all of them from rb.loadedNotChecksummed
|
||||
for tgtName := range rb.repo.Targets {
|
||||
delete(rb.loadedNotChecksummed, data.RoleName(tgtName))
|
||||
}
|
||||
delete(rb.loadedNotChecksummed, data.CanonicalRootRole)
|
||||
|
||||
// The timestamp can't have been loaded yet, so we want to cache the snapshot
|
||||
// bytes so we can validate the checksum when a timestamp gets generated or
|
||||
// loaded later.
|
||||
rb.loadedNotChecksummed[data.CanonicalSnapshotRole] = sgndJSON
|
||||
|
||||
return sgndJSON, rb.repo.Snapshot.Signed.Version, nil
|
||||
}
|
||||
|
||||
// GenerateTimestamp generates a new timestamp given a previous (optional) timestamp
|
||||
// We can't just load the previous timestamp, because it may have been signed by a different
|
||||
// timestamp key (maybe from a previous root version)
|
||||
func (rb *repoBuilder) GenerateTimestamp(prev *data.SignedTimestamp) ([]byte, int, error) {
|
||||
switch {
|
||||
case rb.repo.cryptoService == nil:
|
||||
return nil, 0, ErrInvalidBuilderInput{msg: "cannot generate timestamp without a cryptoservice"}
|
||||
case rb.IsLoaded(data.CanonicalTimestampRole):
|
||||
return nil, 0, ErrInvalidBuilderInput{msg: "timestamp has already been loaded"}
|
||||
}
|
||||
|
||||
// SignTimestamp always serializes the loaded snapshot and signs in the data, so we must always
|
||||
// have the snapshot loaded first
|
||||
if err := rb.checkPrereqsLoaded([]data.RoleName{data.CanonicalRootRole, data.CanonicalSnapshotRole}); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
switch prev {
|
||||
case nil:
|
||||
if err := rb.repo.InitTimestamp(); err != nil {
|
||||
rb.repo.Timestamp = nil
|
||||
return nil, 0, err
|
||||
}
|
||||
default:
|
||||
if err := data.IsValidTimestampStructure(prev.Signed); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
rb.repo.Timestamp = prev
|
||||
}
|
||||
|
||||
sgnd, err := rb.repo.SignTimestamp(data.DefaultExpires(data.CanonicalTimestampRole))
|
||||
if err != nil {
|
||||
rb.repo.Timestamp = nil
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
sgndJSON, err := json.Marshal(sgnd)
|
||||
if err != nil {
|
||||
rb.repo.Timestamp = nil
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// The snapshot should have been loaded (and not checksummed, since a timestamp
|
||||
// cannot have been loaded), so it is awaiting checksumming. Since this
|
||||
// timestamp was generated using the snapshot awaiting checksumming, we can
|
||||
// remove it from rb.loadedNotChecksummed. There should be no other items
|
||||
// awaiting checksumming now since loading/generating a snapshot should have
|
||||
// cleared out everything else in `loadNotChecksummed`.
|
||||
delete(rb.loadedNotChecksummed, data.CanonicalSnapshotRole)
|
||||
|
||||
return sgndJSON, rb.repo.Timestamp.Signed.Version, nil
|
||||
}
|
||||
|
||||
// loadRoot loads a root if one has not been loaded
|
||||
func (rb *repoBuilder) loadRoot(content []byte, minVersion int, allowExpired, skipChecksum bool) error {
|
||||
roleName := data.CanonicalRootRole
|
||||
|
||||
signedObj, err := rb.bytesToSigned(content, data.CanonicalRootRole, skipChecksum)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// ValidateRoot validates against the previous root's role, as well as validates that the root
|
||||
// itself is self-consistent with its own signatures and thresholds.
|
||||
// This assumes that ValidateRoot calls data.RootFromSigned, which validates
|
||||
// the metadata, rather than just unmarshalling signedObject into a SignedRoot object itself.
|
||||
signedRoot, err := trustpinning.ValidateRoot(rb.prevRoot, signedObj, rb.gun, rb.trustpin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := signed.VerifyVersion(&(signedRoot.Signed.SignedCommon), minVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !allowExpired { // check must go at the end because all other validation should pass
|
||||
if err := signed.VerifyExpiry(&(signedRoot.Signed.SignedCommon), roleName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
rootRole, err := signedRoot.BuildBaseRole(data.CanonicalRootRole)
|
||||
if err != nil { // this should never happen since the root has been validated
|
||||
return err
|
||||
}
|
||||
rb.repo.Root = signedRoot
|
||||
rb.repo.originalRootRole = rootRole
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *repoBuilder) loadTimestamp(content []byte, minVersion int, allowExpired bool) error {
|
||||
roleName := data.CanonicalTimestampRole
|
||||
|
||||
timestampRole, err := rb.repo.Root.BuildBaseRole(roleName)
|
||||
if err != nil { // this should never happen, since it's already been validated
|
||||
return err
|
||||
}
|
||||
|
||||
signedObj, err := rb.bytesToSignedAndValidateSigs(timestampRole, content)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
signedTimestamp, err := data.TimestampFromSigned(signedObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := signed.VerifyVersion(&(signedTimestamp.Signed.SignedCommon), minVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !allowExpired { // check must go at the end because all other validation should pass
|
||||
if err := signed.VerifyExpiry(&(signedTimestamp.Signed.SignedCommon), roleName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := rb.validateChecksumsFromTimestamp(signedTimestamp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rb.repo.Timestamp = signedTimestamp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *repoBuilder) loadSnapshot(content []byte, minVersion int, allowExpired bool) error {
|
||||
roleName := data.CanonicalSnapshotRole
|
||||
|
||||
snapshotRole, err := rb.repo.Root.BuildBaseRole(roleName)
|
||||
if err != nil { // this should never happen, since it's already been validated
|
||||
return err
|
||||
}
|
||||
|
||||
signedObj, err := rb.bytesToSignedAndValidateSigs(snapshotRole, content)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
signedSnapshot, err := data.SnapshotFromSigned(signedObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := signed.VerifyVersion(&(signedSnapshot.Signed.SignedCommon), minVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !allowExpired { // check must go at the end because all other validation should pass
|
||||
if err := signed.VerifyExpiry(&(signedSnapshot.Signed.SignedCommon), roleName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// at this point, the only thing left to validate is existing checksums - we can use
|
||||
// this snapshot to bootstrap the next builder if needed - and we don't need to do
|
||||
// the 2-value assignment since we've already validated the signedSnapshot, which MUST
|
||||
// have root metadata
|
||||
rootMeta := signedSnapshot.Signed.Meta[data.CanonicalRootRole.String()]
|
||||
rb.nextRootChecksum = &rootMeta
|
||||
|
||||
if err := rb.validateChecksumsFromSnapshot(signedSnapshot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rb.repo.Snapshot = signedSnapshot
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *repoBuilder) loadTargets(content []byte, minVersion int, allowExpired bool) error {
|
||||
roleName := data.CanonicalTargetsRole
|
||||
|
||||
targetsRole, err := rb.repo.Root.BuildBaseRole(roleName)
|
||||
if err != nil { // this should never happen, since it's already been validated
|
||||
return err
|
||||
}
|
||||
|
||||
signedObj, err := rb.bytesToSignedAndValidateSigs(targetsRole, content)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
signedTargets, err := data.TargetsFromSigned(signedObj, roleName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := signed.VerifyVersion(&(signedTargets.Signed.SignedCommon), minVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !allowExpired { // check must go at the end because all other validation should pass
|
||||
if err := signed.VerifyExpiry(&(signedTargets.Signed.SignedCommon), roleName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
signedTargets.Signatures = signedObj.Signatures
|
||||
rb.repo.Targets[roleName] = signedTargets
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *repoBuilder) loadDelegation(roleName data.RoleName, content []byte, minVersion int, allowExpired bool) error {
|
||||
delegationRole, err := rb.repo.GetDelegationRole(roleName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// bytesToSigned checks checksum
|
||||
signedObj, err := rb.bytesToSigned(content, roleName, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
signedTargets, err := data.TargetsFromSigned(signedObj, roleName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := signed.VerifyVersion(&(signedTargets.Signed.SignedCommon), minVersion); err != nil {
|
||||
// don't capture in invalidRoles because the role we received is a rollback
|
||||
return err
|
||||
}
|
||||
|
||||
// verify signature
|
||||
if err := signed.VerifySignatures(signedObj, delegationRole.BaseRole); err != nil {
|
||||
rb.invalidRoles.Targets[roleName] = signedTargets
|
||||
return err
|
||||
}
|
||||
|
||||
if !allowExpired { // check must go at the end because all other validation should pass
|
||||
if err := signed.VerifyExpiry(&(signedTargets.Signed.SignedCommon), roleName); err != nil {
|
||||
rb.invalidRoles.Targets[roleName] = signedTargets
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
signedTargets.Signatures = signedObj.Signatures
|
||||
rb.repo.Targets[roleName] = signedTargets
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *repoBuilder) validateChecksumsFromTimestamp(ts *data.SignedTimestamp) error {
|
||||
sn, ok := rb.loadedNotChecksummed[data.CanonicalSnapshotRole]
|
||||
if ok {
|
||||
// by this point, the SignedTimestamp has been validated so it must have a snapshot hash
|
||||
snMeta := ts.Signed.Meta[data.CanonicalSnapshotRole.String()].Hashes
|
||||
if err := data.CheckHashes(sn, data.CanonicalSnapshotRole.String(), snMeta); err != nil {
|
||||
return err
|
||||
}
|
||||
delete(rb.loadedNotChecksummed, data.CanonicalSnapshotRole)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *repoBuilder) validateChecksumsFromSnapshot(sn *data.SignedSnapshot) error {
|
||||
var goodRoles []data.RoleName
|
||||
for roleName, loadedBytes := range rb.loadedNotChecksummed {
|
||||
switch roleName {
|
||||
case data.CanonicalSnapshotRole, data.CanonicalTimestampRole:
|
||||
break
|
||||
default:
|
||||
if err := data.CheckHashes(loadedBytes, roleName.String(), sn.Signed.Meta[roleName.String()].Hashes); err != nil {
|
||||
return err
|
||||
}
|
||||
goodRoles = append(goodRoles, roleName)
|
||||
}
|
||||
}
|
||||
for _, roleName := range goodRoles {
|
||||
delete(rb.loadedNotChecksummed, roleName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *repoBuilder) validateChecksumFor(content []byte, roleName data.RoleName) error {
|
||||
// validate the bootstrap checksum for root, if provided
|
||||
if roleName == data.CanonicalRootRole && rb.bootstrappedRootChecksum != nil {
|
||||
if err := data.CheckHashes(content, roleName.String(), rb.bootstrappedRootChecksum.Hashes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// but we also want to cache the root content, so that when the snapshot is
|
||||
// loaded it is validated (to make sure everything in the repo is self-consistent)
|
||||
checksums := rb.getChecksumsFor(roleName)
|
||||
if checksums != nil { // as opposed to empty, in which case hash check should fail
|
||||
if err := data.CheckHashes(content, roleName.String(), *checksums); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if roleName != data.CanonicalTimestampRole {
|
||||
// timestamp is the only role which does not need to be checksummed, but
|
||||
// for everything else, cache the contents in the list of roles that have
|
||||
// not been checksummed by the snapshot/timestamp yet
|
||||
rb.loadedNotChecksummed[roleName] = content
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Checksums the given bytes, and if they validate, convert to a data.Signed object.
|
||||
// If a checksums are nil (as opposed to empty), adds the bytes to the list of roles that
|
||||
// haven't been checksummed (unless it's a timestamp, which has no checksum reference).
|
||||
func (rb *repoBuilder) bytesToSigned(content []byte, roleName data.RoleName, skipChecksum bool) (*data.Signed, error) {
|
||||
if !skipChecksum {
|
||||
if err := rb.validateChecksumFor(content, roleName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// unmarshal to signed
|
||||
signedObj := &data.Signed{}
|
||||
if err := json.Unmarshal(content, signedObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return signedObj, nil
|
||||
}
|
||||
|
||||
func (rb *repoBuilder) bytesToSignedAndValidateSigs(role data.BaseRole, content []byte) (*data.Signed, error) {
|
||||
|
||||
signedObj, err := rb.bytesToSigned(content, role.Name, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// verify signature
|
||||
if err := signed.VerifySignatures(signedObj, role); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return signedObj, nil
|
||||
}
|
||||
|
||||
// If the checksum reference (the loaded timestamp for the snapshot role, and
|
||||
// the loaded snapshot for every other role except timestamp and snapshot) is nil,
|
||||
// then return nil for the checksums, meaning that the checksum is not yet
|
||||
// available. If the checksum reference *is* loaded, then always returns the
|
||||
// Hashes object for the given role - if it doesn't exist, returns an empty Hash
|
||||
// object (against which any checksum validation would fail).
|
||||
func (rb *repoBuilder) getChecksumsFor(role data.RoleName) *data.Hashes {
|
||||
var hashes data.Hashes
|
||||
switch role {
|
||||
case data.CanonicalTimestampRole:
|
||||
return nil
|
||||
case data.CanonicalSnapshotRole:
|
||||
if rb.repo.Timestamp == nil {
|
||||
return nil
|
||||
}
|
||||
hashes = rb.repo.Timestamp.Signed.Meta[data.CanonicalSnapshotRole.String()].Hashes
|
||||
default:
|
||||
if rb.repo.Snapshot == nil {
|
||||
return nil
|
||||
}
|
||||
hashes = rb.repo.Snapshot.Signed.Meta[role.String()].Hashes
|
||||
}
|
||||
return &hashes
|
||||
}
|
53
vendor/github.com/theupdateframework/notary/tuf/data/errors.go
generated
vendored
Normal file
53
vendor/github.com/theupdateframework/notary/tuf/data/errors.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
package data
|
||||
|
||||
import "fmt"
|
||||
|
||||
// ErrInvalidMetadata is the error to be returned when metadata is invalid
|
||||
type ErrInvalidMetadata struct {
|
||||
role RoleName
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e ErrInvalidMetadata) Error() string {
|
||||
return fmt.Sprintf("%s type metadata invalid: %s", e.role.String(), e.msg)
|
||||
}
|
||||
|
||||
// ErrMissingMeta - couldn't find the FileMeta object for the given Role, or
|
||||
// the FileMeta object contained no supported checksums
|
||||
type ErrMissingMeta struct {
|
||||
Role string
|
||||
}
|
||||
|
||||
func (e ErrMissingMeta) Error() string {
|
||||
return fmt.Sprintf("no checksums for supported algorithms were provided for %s", e.Role)
|
||||
}
|
||||
|
||||
// ErrInvalidChecksum is the error to be returned when checksum is invalid
|
||||
type ErrInvalidChecksum struct {
|
||||
alg string
|
||||
}
|
||||
|
||||
func (e ErrInvalidChecksum) Error() string {
|
||||
return fmt.Sprintf("%s checksum invalid", e.alg)
|
||||
}
|
||||
|
||||
// ErrMismatchedChecksum is the error to be returned when checksum is mismatched
|
||||
type ErrMismatchedChecksum struct {
|
||||
alg string
|
||||
name string
|
||||
expected string
|
||||
}
|
||||
|
||||
func (e ErrMismatchedChecksum) Error() string {
|
||||
return fmt.Sprintf("%s checksum for %s did not match: expected %s", e.alg, e.name,
|
||||
e.expected)
|
||||
}
|
||||
|
||||
// ErrCertExpired is the error to be returned when a certificate has expired
|
||||
type ErrCertExpired struct {
|
||||
CN string
|
||||
}
|
||||
|
||||
func (e ErrCertExpired) Error() string {
|
||||
return fmt.Sprintf("certificate with CN %s is expired", e.CN)
|
||||
}
|
529
vendor/github.com/theupdateframework/notary/tuf/data/keys.go
generated
vendored
Normal file
529
vendor/github.com/theupdateframework/notary/tuf/data/keys.go
generated
vendored
Normal file
@ -0,0 +1,529 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"github.com/docker/go/canonical/json"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// PublicKey is the necessary interface for public keys
|
||||
type PublicKey interface {
|
||||
ID() string
|
||||
Algorithm() string
|
||||
Public() []byte
|
||||
}
|
||||
|
||||
// PrivateKey adds the ability to access the private key
|
||||
type PrivateKey interface {
|
||||
PublicKey
|
||||
Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error)
|
||||
Private() []byte
|
||||
CryptoSigner() crypto.Signer
|
||||
SignatureAlgorithm() SigAlgorithm
|
||||
}
|
||||
|
||||
// KeyPair holds the public and private key bytes
|
||||
type KeyPair struct {
|
||||
Public []byte `json:"public"`
|
||||
Private []byte `json:"private"`
|
||||
}
|
||||
|
||||
// Keys represents a map of key ID to PublicKey object. It's necessary
|
||||
// to allow us to unmarshal into an interface via the json.Unmarshaller
|
||||
// interface
|
||||
type Keys map[string]PublicKey
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaller interface
|
||||
func (ks *Keys) UnmarshalJSON(data []byte) error {
|
||||
parsed := make(map[string]TUFKey)
|
||||
err := json.Unmarshal(data, &parsed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
final := make(map[string]PublicKey)
|
||||
for k, tk := range parsed {
|
||||
final[k] = typedPublicKey(tk)
|
||||
}
|
||||
*ks = final
|
||||
return nil
|
||||
}
|
||||
|
||||
// KeyList represents a list of keys
|
||||
type KeyList []PublicKey
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaller interface
|
||||
func (ks *KeyList) UnmarshalJSON(data []byte) error {
|
||||
parsed := make([]TUFKey, 0, 1)
|
||||
err := json.Unmarshal(data, &parsed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
final := make([]PublicKey, 0, len(parsed))
|
||||
for _, tk := range parsed {
|
||||
final = append(final, typedPublicKey(tk))
|
||||
}
|
||||
*ks = final
|
||||
return nil
|
||||
}
|
||||
|
||||
// IDs generates a list of the hex encoded key IDs in the KeyList
|
||||
func (ks KeyList) IDs() []string {
|
||||
keyIDs := make([]string, 0, len(ks))
|
||||
for _, k := range ks {
|
||||
keyIDs = append(keyIDs, k.ID())
|
||||
}
|
||||
return keyIDs
|
||||
}
|
||||
|
||||
func typedPublicKey(tk TUFKey) PublicKey {
|
||||
switch tk.Algorithm() {
|
||||
case ECDSAKey:
|
||||
return &ECDSAPublicKey{TUFKey: tk}
|
||||
case ECDSAx509Key:
|
||||
return &ECDSAx509PublicKey{TUFKey: tk}
|
||||
case RSAKey:
|
||||
return &RSAPublicKey{TUFKey: tk}
|
||||
case RSAx509Key:
|
||||
return &RSAx509PublicKey{TUFKey: tk}
|
||||
case ED25519Key:
|
||||
return &ED25519PublicKey{TUFKey: tk}
|
||||
}
|
||||
return &UnknownPublicKey{TUFKey: tk}
|
||||
}
|
||||
|
||||
func typedPrivateKey(tk TUFKey) (PrivateKey, error) {
|
||||
private := tk.Value.Private
|
||||
tk.Value.Private = nil
|
||||
switch tk.Algorithm() {
|
||||
case ECDSAKey:
|
||||
return NewECDSAPrivateKey(
|
||||
&ECDSAPublicKey{
|
||||
TUFKey: tk,
|
||||
},
|
||||
private,
|
||||
)
|
||||
case ECDSAx509Key:
|
||||
return NewECDSAPrivateKey(
|
||||
&ECDSAx509PublicKey{
|
||||
TUFKey: tk,
|
||||
},
|
||||
private,
|
||||
)
|
||||
case RSAKey:
|
||||
return NewRSAPrivateKey(
|
||||
&RSAPublicKey{
|
||||
TUFKey: tk,
|
||||
},
|
||||
private,
|
||||
)
|
||||
case RSAx509Key:
|
||||
return NewRSAPrivateKey(
|
||||
&RSAx509PublicKey{
|
||||
TUFKey: tk,
|
||||
},
|
||||
private,
|
||||
)
|
||||
case ED25519Key:
|
||||
return NewED25519PrivateKey(
|
||||
ED25519PublicKey{
|
||||
TUFKey: tk,
|
||||
},
|
||||
private,
|
||||
)
|
||||
}
|
||||
return &UnknownPrivateKey{
|
||||
TUFKey: tk,
|
||||
privateKey: privateKey{private: private},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewPublicKey creates a new, correctly typed PublicKey, using the
|
||||
// UnknownPublicKey catchall for unsupported ciphers
|
||||
func NewPublicKey(alg string, public []byte) PublicKey {
|
||||
tk := TUFKey{
|
||||
Type: alg,
|
||||
Value: KeyPair{
|
||||
Public: public,
|
||||
},
|
||||
}
|
||||
return typedPublicKey(tk)
|
||||
}
|
||||
|
||||
// NewPrivateKey creates a new, correctly typed PrivateKey, using the
|
||||
// UnknownPrivateKey catchall for unsupported ciphers
|
||||
func NewPrivateKey(pubKey PublicKey, private []byte) (PrivateKey, error) {
|
||||
tk := TUFKey{
|
||||
Type: pubKey.Algorithm(),
|
||||
Value: KeyPair{
|
||||
Public: pubKey.Public(),
|
||||
Private: private, // typedPrivateKey moves this value
|
||||
},
|
||||
}
|
||||
return typedPrivateKey(tk)
|
||||
}
|
||||
|
||||
// UnmarshalPublicKey is used to parse individual public keys in JSON
|
||||
func UnmarshalPublicKey(data []byte) (PublicKey, error) {
|
||||
var parsed TUFKey
|
||||
err := json.Unmarshal(data, &parsed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return typedPublicKey(parsed), nil
|
||||
}
|
||||
|
||||
// UnmarshalPrivateKey is used to parse individual private keys in JSON
|
||||
func UnmarshalPrivateKey(data []byte) (PrivateKey, error) {
|
||||
var parsed TUFKey
|
||||
err := json.Unmarshal(data, &parsed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return typedPrivateKey(parsed)
|
||||
}
|
||||
|
||||
// TUFKey is the structure used for both public and private keys in TUF.
|
||||
// Normally it would make sense to use a different structures for public and
|
||||
// private keys, but that would change the key ID algorithm (since the canonical
|
||||
// JSON would be different). This structure should normally be accessed through
|
||||
// the PublicKey or PrivateKey interfaces.
|
||||
type TUFKey struct {
|
||||
id string
|
||||
Type string `json:"keytype"`
|
||||
Value KeyPair `json:"keyval"`
|
||||
}
|
||||
|
||||
// Algorithm returns the algorithm of the key
|
||||
func (k TUFKey) Algorithm() string {
|
||||
return k.Type
|
||||
}
|
||||
|
||||
// ID efficiently generates if necessary, and caches the ID of the key
|
||||
func (k *TUFKey) ID() string {
|
||||
if k.id == "" {
|
||||
pubK := TUFKey{
|
||||
Type: k.Algorithm(),
|
||||
Value: KeyPair{
|
||||
Public: k.Public(),
|
||||
Private: nil,
|
||||
},
|
||||
}
|
||||
data, err := json.MarshalCanonical(&pubK)
|
||||
if err != nil {
|
||||
logrus.Error("Error generating key ID:", err)
|
||||
}
|
||||
digest := sha256.Sum256(data)
|
||||
k.id = hex.EncodeToString(digest[:])
|
||||
}
|
||||
return k.id
|
||||
}
|
||||
|
||||
// Public returns the public bytes
|
||||
func (k TUFKey) Public() []byte {
|
||||
return k.Value.Public
|
||||
}
|
||||
|
||||
// Public key types
|
||||
|
||||
// ECDSAPublicKey represents an ECDSA key using a raw serialization
|
||||
// of the public key
|
||||
type ECDSAPublicKey struct {
|
||||
TUFKey
|
||||
}
|
||||
|
||||
// ECDSAx509PublicKey represents an ECDSA key using an x509 cert
|
||||
// as the serialized format of the public key
|
||||
type ECDSAx509PublicKey struct {
|
||||
TUFKey
|
||||
}
|
||||
|
||||
// RSAPublicKey represents an RSA key using a raw serialization
|
||||
// of the public key
|
||||
type RSAPublicKey struct {
|
||||
TUFKey
|
||||
}
|
||||
|
||||
// RSAx509PublicKey represents an RSA key using an x509 cert
|
||||
// as the serialized format of the public key
|
||||
type RSAx509PublicKey struct {
|
||||
TUFKey
|
||||
}
|
||||
|
||||
// ED25519PublicKey represents an ED25519 key using a raw serialization
|
||||
// of the public key
|
||||
type ED25519PublicKey struct {
|
||||
TUFKey
|
||||
}
|
||||
|
||||
// UnknownPublicKey is a catchall for key types that are not supported
|
||||
type UnknownPublicKey struct {
|
||||
TUFKey
|
||||
}
|
||||
|
||||
// NewECDSAPublicKey initializes a new public key with the ECDSAKey type
|
||||
func NewECDSAPublicKey(public []byte) *ECDSAPublicKey {
|
||||
return &ECDSAPublicKey{
|
||||
TUFKey: TUFKey{
|
||||
Type: ECDSAKey,
|
||||
Value: KeyPair{
|
||||
Public: public,
|
||||
Private: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewECDSAx509PublicKey initializes a new public key with the ECDSAx509Key type
|
||||
func NewECDSAx509PublicKey(public []byte) *ECDSAx509PublicKey {
|
||||
return &ECDSAx509PublicKey{
|
||||
TUFKey: TUFKey{
|
||||
Type: ECDSAx509Key,
|
||||
Value: KeyPair{
|
||||
Public: public,
|
||||
Private: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewRSAPublicKey initializes a new public key with the RSA type
|
||||
func NewRSAPublicKey(public []byte) *RSAPublicKey {
|
||||
return &RSAPublicKey{
|
||||
TUFKey: TUFKey{
|
||||
Type: RSAKey,
|
||||
Value: KeyPair{
|
||||
Public: public,
|
||||
Private: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewRSAx509PublicKey initializes a new public key with the RSAx509Key type
|
||||
func NewRSAx509PublicKey(public []byte) *RSAx509PublicKey {
|
||||
return &RSAx509PublicKey{
|
||||
TUFKey: TUFKey{
|
||||
Type: RSAx509Key,
|
||||
Value: KeyPair{
|
||||
Public: public,
|
||||
Private: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewED25519PublicKey initializes a new public key with the ED25519Key type
|
||||
func NewED25519PublicKey(public []byte) *ED25519PublicKey {
|
||||
return &ED25519PublicKey{
|
||||
TUFKey: TUFKey{
|
||||
Type: ED25519Key,
|
||||
Value: KeyPair{
|
||||
Public: public,
|
||||
Private: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Private key types
|
||||
type privateKey struct {
|
||||
private []byte
|
||||
}
|
||||
|
||||
type signer struct {
|
||||
signer crypto.Signer
|
||||
}
|
||||
|
||||
// ECDSAPrivateKey represents a private ECDSA key
|
||||
type ECDSAPrivateKey struct {
|
||||
PublicKey
|
||||
privateKey
|
||||
signer
|
||||
}
|
||||
|
||||
// RSAPrivateKey represents a private RSA key
|
||||
type RSAPrivateKey struct {
|
||||
PublicKey
|
||||
privateKey
|
||||
signer
|
||||
}
|
||||
|
||||
// ED25519PrivateKey represents a private ED25519 key
|
||||
type ED25519PrivateKey struct {
|
||||
ED25519PublicKey
|
||||
privateKey
|
||||
}
|
||||
|
||||
// UnknownPrivateKey is a catchall for unsupported key types
|
||||
type UnknownPrivateKey struct {
|
||||
TUFKey
|
||||
privateKey
|
||||
}
|
||||
|
||||
// NewECDSAPrivateKey initializes a new ECDSA private key
|
||||
func NewECDSAPrivateKey(public PublicKey, private []byte) (*ECDSAPrivateKey, error) {
|
||||
switch public.(type) {
|
||||
case *ECDSAPublicKey, *ECDSAx509PublicKey:
|
||||
default:
|
||||
return nil, errors.New("invalid public key type provided to NewECDSAPrivateKey")
|
||||
}
|
||||
ecdsaPrivKey, err := x509.ParseECPrivateKey(private)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ECDSAPrivateKey{
|
||||
PublicKey: public,
|
||||
privateKey: privateKey{private: private},
|
||||
signer: signer{signer: ecdsaPrivKey},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewRSAPrivateKey initialized a new RSA private key
|
||||
func NewRSAPrivateKey(public PublicKey, private []byte) (*RSAPrivateKey, error) {
|
||||
switch public.(type) {
|
||||
case *RSAPublicKey, *RSAx509PublicKey:
|
||||
default:
|
||||
return nil, errors.New("invalid public key type provided to NewRSAPrivateKey")
|
||||
}
|
||||
rsaPrivKey, err := x509.ParsePKCS1PrivateKey(private)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &RSAPrivateKey{
|
||||
PublicKey: public,
|
||||
privateKey: privateKey{private: private},
|
||||
signer: signer{signer: rsaPrivKey},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewED25519PrivateKey initialized a new ED25519 private key
|
||||
func NewED25519PrivateKey(public ED25519PublicKey, private []byte) (*ED25519PrivateKey, error) {
|
||||
return &ED25519PrivateKey{
|
||||
ED25519PublicKey: public,
|
||||
privateKey: privateKey{private: private},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Private return the serialized private bytes of the key
|
||||
func (k privateKey) Private() []byte {
|
||||
return k.private
|
||||
}
|
||||
|
||||
// CryptoSigner returns the underlying crypto.Signer for use cases where we need the default
|
||||
// signature or public key functionality (like when we generate certificates)
|
||||
func (s signer) CryptoSigner() crypto.Signer {
|
||||
return s.signer
|
||||
}
|
||||
|
||||
// CryptoSigner returns the ED25519PrivateKey which already implements crypto.Signer
|
||||
func (k ED25519PrivateKey) CryptoSigner() crypto.Signer {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CryptoSigner returns the UnknownPrivateKey which already implements crypto.Signer
|
||||
func (k UnknownPrivateKey) CryptoSigner() crypto.Signer {
|
||||
return nil
|
||||
}
|
||||
|
||||
type ecdsaSig struct {
|
||||
R *big.Int
|
||||
S *big.Int
|
||||
}
|
||||
|
||||
// Sign creates an ecdsa signature
|
||||
func (k ECDSAPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) {
|
||||
ecdsaPrivKey, ok := k.CryptoSigner().(*ecdsa.PrivateKey)
|
||||
if !ok {
|
||||
return nil, errors.New("signer was based on the wrong key type")
|
||||
}
|
||||
hashed := sha256.Sum256(msg)
|
||||
sigASN1, err := ecdsaPrivKey.Sign(rand, hashed[:], opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sig := ecdsaSig{}
|
||||
_, err = asn1.Unmarshal(sigASN1, &sig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rBytes, sBytes := sig.R.Bytes(), sig.S.Bytes()
|
||||
octetLength := (ecdsaPrivKey.Params().BitSize + 7) >> 3
|
||||
|
||||
// MUST include leading zeros in the output
|
||||
rBuf := make([]byte, octetLength-len(rBytes), octetLength)
|
||||
sBuf := make([]byte, octetLength-len(sBytes), octetLength)
|
||||
|
||||
rBuf = append(rBuf, rBytes...)
|
||||
sBuf = append(sBuf, sBytes...)
|
||||
return append(rBuf, sBuf...), nil
|
||||
}
|
||||
|
||||
// Sign creates an rsa signature
|
||||
func (k RSAPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) {
|
||||
hashed := sha256.Sum256(msg)
|
||||
if opts == nil {
|
||||
opts = &rsa.PSSOptions{
|
||||
SaltLength: rsa.PSSSaltLengthEqualsHash,
|
||||
Hash: crypto.SHA256,
|
||||
}
|
||||
}
|
||||
return k.CryptoSigner().Sign(rand, hashed[:], opts)
|
||||
}
|
||||
|
||||
// Sign creates an ed25519 signature
|
||||
func (k ED25519PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) {
|
||||
priv := make([]byte, ed25519.PrivateKeySize)
|
||||
// The ed25519 key is serialized as public key then private key, so just use private key here.
|
||||
copy(priv, k.private[ed25519.PublicKeySize:])
|
||||
return ed25519.Sign(ed25519.PrivateKey(priv), msg)[:], nil
|
||||
}
|
||||
|
||||
// Sign on an UnknownPrivateKey raises an error because the client does not
|
||||
// know how to sign with this key type.
|
||||
func (k UnknownPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) {
|
||||
return nil, errors.New("unknown key type, cannot sign")
|
||||
}
|
||||
|
||||
// SignatureAlgorithm returns the SigAlgorithm for a ECDSAPrivateKey
|
||||
func (k ECDSAPrivateKey) SignatureAlgorithm() SigAlgorithm {
|
||||
return ECDSASignature
|
||||
}
|
||||
|
||||
// SignatureAlgorithm returns the SigAlgorithm for a RSAPrivateKey
|
||||
func (k RSAPrivateKey) SignatureAlgorithm() SigAlgorithm {
|
||||
return RSAPSSSignature
|
||||
}
|
||||
|
||||
// SignatureAlgorithm returns the SigAlgorithm for a ED25519PrivateKey
|
||||
func (k ED25519PrivateKey) SignatureAlgorithm() SigAlgorithm {
|
||||
return EDDSASignature
|
||||
}
|
||||
|
||||
// SignatureAlgorithm returns the SigAlgorithm for an UnknownPrivateKey
|
||||
func (k UnknownPrivateKey) SignatureAlgorithm() SigAlgorithm {
|
||||
return ""
|
||||
}
|
||||
|
||||
// PublicKeyFromPrivate returns a new TUFKey based on a private key, with
|
||||
// the private key bytes guaranteed to be nil.
|
||||
func PublicKeyFromPrivate(pk PrivateKey) PublicKey {
|
||||
return typedPublicKey(TUFKey{
|
||||
Type: pk.Algorithm(),
|
||||
Value: KeyPair{
|
||||
Public: pk.Public(),
|
||||
Private: nil,
|
||||
},
|
||||
})
|
||||
}
|
339
vendor/github.com/theupdateframework/notary/tuf/data/roles.go
generated
vendored
Normal file
339
vendor/github.com/theupdateframework/notary/tuf/data/roles.go
generated
vendored
Normal file
@ -0,0 +1,339 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Canonical base role names
|
||||
var (
|
||||
CanonicalRootRole RoleName = "root"
|
||||
CanonicalTargetsRole RoleName = "targets"
|
||||
CanonicalSnapshotRole RoleName = "snapshot"
|
||||
CanonicalTimestampRole RoleName = "timestamp"
|
||||
)
|
||||
|
||||
// BaseRoles is an easy to iterate list of the top level
|
||||
// roles.
|
||||
var BaseRoles = []RoleName{
|
||||
CanonicalRootRole,
|
||||
CanonicalTargetsRole,
|
||||
CanonicalSnapshotRole,
|
||||
CanonicalTimestampRole,
|
||||
}
|
||||
|
||||
// Regex for validating delegation names
|
||||
var delegationRegexp = regexp.MustCompile("^[-a-z0-9_/]+$")
|
||||
|
||||
// ErrNoSuchRole indicates the roles doesn't exist
|
||||
type ErrNoSuchRole struct {
|
||||
Role RoleName
|
||||
}
|
||||
|
||||
func (e ErrNoSuchRole) Error() string {
|
||||
return fmt.Sprintf("role does not exist: %s", e.Role)
|
||||
}
|
||||
|
||||
// ErrInvalidRole represents an error regarding a role. Typically
|
||||
// something like a role for which sone of the public keys were
|
||||
// not found in the TUF repo.
|
||||
type ErrInvalidRole struct {
|
||||
Role RoleName
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e ErrInvalidRole) Error() string {
|
||||
if e.Reason != "" {
|
||||
return fmt.Sprintf("tuf: invalid role %s. %s", e.Role, e.Reason)
|
||||
}
|
||||
return fmt.Sprintf("tuf: invalid role %s.", e.Role)
|
||||
}
|
||||
|
||||
// ValidRole only determines the name is semantically
|
||||
// correct. For target delegated roles, it does NOT check
|
||||
// the appropriate parent roles exist.
|
||||
func ValidRole(name RoleName) bool {
|
||||
if IsDelegation(name) {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, v := range BaseRoles {
|
||||
if name == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsDelegation checks if the role is a delegation or a root role
|
||||
func IsDelegation(role RoleName) bool {
|
||||
strRole := role.String()
|
||||
targetsBase := CanonicalTargetsRole + "/"
|
||||
|
||||
whitelistedChars := delegationRegexp.MatchString(strRole)
|
||||
|
||||
// Limit size of full role string to 255 chars for db column size limit
|
||||
correctLength := len(role) < 256
|
||||
|
||||
// Removes ., .., extra slashes, and trailing slash
|
||||
isClean := path.Clean(strRole) == strRole
|
||||
return strings.HasPrefix(strRole, targetsBase.String()) &&
|
||||
whitelistedChars &&
|
||||
correctLength &&
|
||||
isClean
|
||||
}
|
||||
|
||||
// IsBaseRole checks if the role is a base role
|
||||
func IsBaseRole(role RoleName) bool {
|
||||
for _, baseRole := range BaseRoles {
|
||||
if role == baseRole {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsWildDelegation determines if a role represents a valid wildcard delegation
|
||||
// path, i.e. targets/*, targets/foo/*.
|
||||
// The wildcard may only appear as the final part of the delegation and must
|
||||
// be a whole segment, i.e. targets/foo* is not a valid wildcard delegation.
|
||||
func IsWildDelegation(role RoleName) bool {
|
||||
if path.Clean(role.String()) != role.String() {
|
||||
return false
|
||||
}
|
||||
base := role.Parent()
|
||||
if !(IsDelegation(base) || base == CanonicalTargetsRole) {
|
||||
return false
|
||||
}
|
||||
return role[len(role)-2:] == "/*"
|
||||
}
|
||||
|
||||
// BaseRole is an internal representation of a root/targets/snapshot/timestamp role, with its public keys included
|
||||
type BaseRole struct {
|
||||
Keys map[string]PublicKey
|
||||
Name RoleName
|
||||
Threshold int
|
||||
}
|
||||
|
||||
// NewBaseRole creates a new BaseRole object with the provided parameters
|
||||
func NewBaseRole(name RoleName, threshold int, keys ...PublicKey) BaseRole {
|
||||
r := BaseRole{
|
||||
Name: name,
|
||||
Threshold: threshold,
|
||||
Keys: make(map[string]PublicKey),
|
||||
}
|
||||
for _, k := range keys {
|
||||
r.Keys[k.ID()] = k
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// ListKeys retrieves the public keys valid for this role
|
||||
func (b BaseRole) ListKeys() KeyList {
|
||||
return listKeys(b.Keys)
|
||||
}
|
||||
|
||||
// ListKeyIDs retrieves the list of key IDs valid for this role
|
||||
func (b BaseRole) ListKeyIDs() []string {
|
||||
return listKeyIDs(b.Keys)
|
||||
}
|
||||
|
||||
// Equals returns whether this BaseRole equals another BaseRole
|
||||
func (b BaseRole) Equals(o BaseRole) bool {
|
||||
if b.Threshold != o.Threshold || b.Name != o.Name || len(b.Keys) != len(o.Keys) {
|
||||
return false
|
||||
}
|
||||
|
||||
for keyID, key := range b.Keys {
|
||||
oKey, ok := o.Keys[keyID]
|
||||
if !ok || key.ID() != oKey.ID() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// DelegationRole is an internal representation of a delegation role, with its public keys included
|
||||
type DelegationRole struct {
|
||||
BaseRole
|
||||
Paths []string
|
||||
}
|
||||
|
||||
func listKeys(keyMap map[string]PublicKey) KeyList {
|
||||
keys := KeyList{}
|
||||
for _, key := range keyMap {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func listKeyIDs(keyMap map[string]PublicKey) []string {
|
||||
keyIDs := []string{}
|
||||
for id := range keyMap {
|
||||
keyIDs = append(keyIDs, id)
|
||||
}
|
||||
return keyIDs
|
||||
}
|
||||
|
||||
// Restrict restricts the paths and path hash prefixes for the passed in delegation role,
|
||||
// returning a copy of the role with validated paths as if it was a direct child
|
||||
func (d DelegationRole) Restrict(child DelegationRole) (DelegationRole, error) {
|
||||
if !d.IsParentOf(child) {
|
||||
return DelegationRole{}, fmt.Errorf("%s is not a parent of %s", d.Name, child.Name)
|
||||
}
|
||||
return DelegationRole{
|
||||
BaseRole: BaseRole{
|
||||
Keys: child.Keys,
|
||||
Name: child.Name,
|
||||
Threshold: child.Threshold,
|
||||
},
|
||||
Paths: RestrictDelegationPathPrefixes(d.Paths, child.Paths),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsParentOf returns whether the passed in delegation role is the direct child of this role,
|
||||
// determined by delegation name.
|
||||
// Ex: targets/a is a direct parent of targets/a/b, but targets/a is not a direct parent of targets/a/b/c
|
||||
func (d DelegationRole) IsParentOf(child DelegationRole) bool {
|
||||
return path.Dir(child.Name.String()) == d.Name.String()
|
||||
}
|
||||
|
||||
// CheckPaths checks if a given path is valid for the role
|
||||
func (d DelegationRole) CheckPaths(path string) bool {
|
||||
return checkPaths(path, d.Paths)
|
||||
}
|
||||
|
||||
func checkPaths(path string, permitted []string) bool {
|
||||
for _, p := range permitted {
|
||||
if strings.HasPrefix(path, p) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RestrictDelegationPathPrefixes returns the list of valid delegationPaths that are prefixed by parentPaths
|
||||
func RestrictDelegationPathPrefixes(parentPaths, delegationPaths []string) []string {
|
||||
validPaths := []string{}
|
||||
if len(delegationPaths) == 0 {
|
||||
return validPaths
|
||||
}
|
||||
|
||||
// Validate each individual delegation path
|
||||
for _, delgPath := range delegationPaths {
|
||||
isPrefixed := false
|
||||
for _, parentPath := range parentPaths {
|
||||
if strings.HasPrefix(delgPath, parentPath) {
|
||||
isPrefixed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// If the delegation path did not match prefix against any parent path, it is not valid
|
||||
if isPrefixed {
|
||||
validPaths = append(validPaths, delgPath)
|
||||
}
|
||||
}
|
||||
return validPaths
|
||||
}
|
||||
|
||||
// RootRole is a cut down role as it appears in the root.json
|
||||
// Eventually should only be used for immediately before and after serialization/deserialization
|
||||
type RootRole struct {
|
||||
KeyIDs []string `json:"keyids"`
|
||||
Threshold int `json:"threshold"`
|
||||
}
|
||||
|
||||
// Role is a more verbose role as they appear in targets delegations
|
||||
// Eventually should only be used for immediately before and after serialization/deserialization
|
||||
type Role struct {
|
||||
RootRole
|
||||
Name RoleName `json:"name"`
|
||||
Paths []string `json:"paths,omitempty"`
|
||||
}
|
||||
|
||||
// NewRole creates a new Role object from the given parameters
|
||||
func NewRole(name RoleName, threshold int, keyIDs, paths []string) (*Role, error) {
|
||||
if IsDelegation(name) {
|
||||
if len(paths) == 0 {
|
||||
logrus.Debugf("role %s with no Paths will never be able to publish content until one or more are added", name)
|
||||
}
|
||||
}
|
||||
if threshold < 1 {
|
||||
return nil, ErrInvalidRole{Role: name}
|
||||
}
|
||||
if !ValidRole(name) {
|
||||
return nil, ErrInvalidRole{Role: name}
|
||||
}
|
||||
return &Role{
|
||||
RootRole: RootRole{
|
||||
KeyIDs: keyIDs,
|
||||
Threshold: threshold,
|
||||
},
|
||||
Name: name,
|
||||
Paths: paths,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
// CheckPaths checks if a given path is valid for the role
|
||||
func (r Role) CheckPaths(path string) bool {
|
||||
return checkPaths(path, r.Paths)
|
||||
}
|
||||
|
||||
// AddKeys merges the ids into the current list of role key ids
|
||||
func (r *Role) AddKeys(ids []string) {
|
||||
r.KeyIDs = mergeStrSlices(r.KeyIDs, ids)
|
||||
}
|
||||
|
||||
// AddPaths merges the paths into the current list of role paths
|
||||
func (r *Role) AddPaths(paths []string) error {
|
||||
if len(paths) == 0 {
|
||||
return nil
|
||||
}
|
||||
r.Paths = mergeStrSlices(r.Paths, paths)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveKeys removes the ids from the current list of key ids
|
||||
func (r *Role) RemoveKeys(ids []string) {
|
||||
r.KeyIDs = subtractStrSlices(r.KeyIDs, ids)
|
||||
}
|
||||
|
||||
// RemovePaths removes the paths from the current list of role paths
|
||||
func (r *Role) RemovePaths(paths []string) {
|
||||
r.Paths = subtractStrSlices(r.Paths, paths)
|
||||
}
|
||||
|
||||
func mergeStrSlices(orig, new []string) []string {
|
||||
have := make(map[string]bool)
|
||||
for _, e := range orig {
|
||||
have[e] = true
|
||||
}
|
||||
merged := make([]string, len(orig), len(orig)+len(new))
|
||||
copy(merged, orig)
|
||||
for _, e := range new {
|
||||
if !have[e] {
|
||||
merged = append(merged, e)
|
||||
}
|
||||
}
|
||||
return merged
|
||||
}
|
||||
|
||||
func subtractStrSlices(orig, remove []string) []string {
|
||||
kill := make(map[string]bool)
|
||||
for _, e := range remove {
|
||||
kill[e] = true
|
||||
}
|
||||
var keep []string
|
||||
for _, e := range orig {
|
||||
if !kill[e] {
|
||||
keep = append(keep, e)
|
||||
}
|
||||
}
|
||||
return keep
|
||||
}
|
171
vendor/github.com/theupdateframework/notary/tuf/data/root.go
generated
vendored
Normal file
171
vendor/github.com/theupdateframework/notary/tuf/data/root.go
generated
vendored
Normal file
@ -0,0 +1,171 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/go/canonical/json"
|
||||
)
|
||||
|
||||
// SignedRoot is a fully unpacked root.json
|
||||
type SignedRoot struct {
|
||||
Signatures []Signature
|
||||
Signed Root
|
||||
Dirty bool
|
||||
}
|
||||
|
||||
// Root is the Signed component of a root.json
|
||||
type Root struct {
|
||||
SignedCommon
|
||||
Keys Keys `json:"keys"`
|
||||
Roles map[RoleName]*RootRole `json:"roles"`
|
||||
ConsistentSnapshot bool `json:"consistent_snapshot"`
|
||||
}
|
||||
|
||||
// isValidRootStructure returns an error, or nil, depending on whether the content of the struct
|
||||
// is valid for root metadata. This does not check signatures or expiry, just that
|
||||
// the metadata content is valid.
|
||||
func isValidRootStructure(r Root) error {
|
||||
expectedType := TUFTypes[CanonicalRootRole]
|
||||
if r.Type != expectedType {
|
||||
return ErrInvalidMetadata{
|
||||
role: CanonicalRootRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, r.Type)}
|
||||
}
|
||||
|
||||
if r.Version < 1 {
|
||||
return ErrInvalidMetadata{
|
||||
role: CanonicalRootRole, msg: "version cannot be less than 1"}
|
||||
}
|
||||
|
||||
// all the base roles MUST appear in the root.json - other roles are allowed,
|
||||
// but other than the mirror role (not currently supported) are out of spec
|
||||
for _, roleName := range BaseRoles {
|
||||
roleObj, ok := r.Roles[roleName]
|
||||
if !ok || roleObj == nil {
|
||||
return ErrInvalidMetadata{
|
||||
role: CanonicalRootRole, msg: fmt.Sprintf("missing %s role specification", roleName)}
|
||||
}
|
||||
if err := isValidRootRoleStructure(CanonicalRootRole, roleName, *roleObj, r.Keys); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isValidRootRoleStructure(metaContainingRole, rootRoleName RoleName, r RootRole, validKeys Keys) error {
|
||||
if r.Threshold < 1 {
|
||||
return ErrInvalidMetadata{
|
||||
role: metaContainingRole,
|
||||
msg: fmt.Sprintf("invalid threshold specified for %s: %v ", rootRoleName, r.Threshold),
|
||||
}
|
||||
}
|
||||
for _, keyID := range r.KeyIDs {
|
||||
if _, ok := validKeys[keyID]; !ok {
|
||||
return ErrInvalidMetadata{
|
||||
role: metaContainingRole,
|
||||
msg: fmt.Sprintf("key ID %s specified in %s without corresponding key", keyID, rootRoleName),
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewRoot initializes a new SignedRoot with a set of keys, roles, and the consistent flag
|
||||
func NewRoot(keys map[string]PublicKey, roles map[RoleName]*RootRole, consistent bool) (*SignedRoot, error) {
|
||||
signedRoot := &SignedRoot{
|
||||
Signatures: make([]Signature, 0),
|
||||
Signed: Root{
|
||||
SignedCommon: SignedCommon{
|
||||
Type: TUFTypes[CanonicalRootRole],
|
||||
Version: 0,
|
||||
Expires: DefaultExpires(CanonicalRootRole),
|
||||
},
|
||||
Keys: keys,
|
||||
Roles: roles,
|
||||
ConsistentSnapshot: consistent,
|
||||
},
|
||||
Dirty: true,
|
||||
}
|
||||
|
||||
return signedRoot, nil
|
||||
}
|
||||
|
||||
// BuildBaseRole returns a copy of a BaseRole using the information in this SignedRoot for the specified role name.
|
||||
// Will error for invalid role name or key metadata within this SignedRoot
|
||||
func (r SignedRoot) BuildBaseRole(roleName RoleName) (BaseRole, error) {
|
||||
roleData, ok := r.Signed.Roles[roleName]
|
||||
if !ok {
|
||||
return BaseRole{}, ErrInvalidRole{Role: roleName, Reason: "role not found in root file"}
|
||||
}
|
||||
// Get all public keys for the base role from TUF metadata
|
||||
keyIDs := roleData.KeyIDs
|
||||
pubKeys := make(map[string]PublicKey)
|
||||
for _, keyID := range keyIDs {
|
||||
pubKey, ok := r.Signed.Keys[keyID]
|
||||
if !ok {
|
||||
return BaseRole{}, ErrInvalidRole{
|
||||
Role: roleName,
|
||||
Reason: fmt.Sprintf("key with ID %s was not found in root metadata", keyID),
|
||||
}
|
||||
}
|
||||
pubKeys[keyID] = pubKey
|
||||
}
|
||||
|
||||
return BaseRole{
|
||||
Name: roleName,
|
||||
Keys: pubKeys,
|
||||
Threshold: roleData.Threshold,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ToSigned partially serializes a SignedRoot for further signing
|
||||
func (r SignedRoot) ToSigned() (*Signed, error) {
|
||||
s, err := defaultSerializer.MarshalCanonical(r.Signed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// cast into a json.RawMessage
|
||||
signed := json.RawMessage{}
|
||||
err = signed.UnmarshalJSON(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigs := make([]Signature, len(r.Signatures))
|
||||
copy(sigs, r.Signatures)
|
||||
return &Signed{
|
||||
Signatures: sigs,
|
||||
Signed: &signed,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MarshalJSON returns the serialized form of SignedRoot as bytes
|
||||
func (r SignedRoot) MarshalJSON() ([]byte, error) {
|
||||
signed, err := r.ToSigned()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return defaultSerializer.Marshal(signed)
|
||||
}
|
||||
|
||||
// RootFromSigned fully unpacks a Signed object into a SignedRoot and ensures
|
||||
// that it is a valid SignedRoot
|
||||
func RootFromSigned(s *Signed) (*SignedRoot, error) {
|
||||
r := Root{}
|
||||
if s.Signed == nil {
|
||||
return nil, ErrInvalidMetadata{
|
||||
role: CanonicalRootRole,
|
||||
msg: "root file contained an empty payload",
|
||||
}
|
||||
}
|
||||
if err := defaultSerializer.Unmarshal(*s.Signed, &r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := isValidRootStructure(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigs := make([]Signature, len(s.Signatures))
|
||||
copy(sigs, s.Signatures)
|
||||
return &SignedRoot{
|
||||
Signatures: sigs,
|
||||
Signed: r,
|
||||
}, nil
|
||||
}
|
36
vendor/github.com/theupdateframework/notary/tuf/data/serializer.go
generated
vendored
Normal file
36
vendor/github.com/theupdateframework/notary/tuf/data/serializer.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
package data
|
||||
|
||||
import "github.com/docker/go/canonical/json"
|
||||
|
||||
// Serializer is an interface that can marshal and unmarshal TUF data. This
|
||||
// is expected to be a canonical JSON marshaller
|
||||
type serializer interface {
|
||||
MarshalCanonical(from interface{}) ([]byte, error)
|
||||
Marshal(from interface{}) ([]byte, error)
|
||||
Unmarshal(from []byte, to interface{}) error
|
||||
}
|
||||
|
||||
// CanonicalJSON marshals to and from canonical JSON
|
||||
type canonicalJSON struct{}
|
||||
|
||||
// MarshalCanonical returns the canonical JSON form of a thing
|
||||
func (c canonicalJSON) MarshalCanonical(from interface{}) ([]byte, error) {
|
||||
return json.MarshalCanonical(from)
|
||||
}
|
||||
|
||||
// Marshal returns the regular non-canonical JSON form of a thing
|
||||
func (c canonicalJSON) Marshal(from interface{}) ([]byte, error) {
|
||||
return json.Marshal(from)
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals some JSON bytes
|
||||
func (c canonicalJSON) Unmarshal(from []byte, to interface{}) error {
|
||||
return json.Unmarshal(from, to)
|
||||
}
|
||||
|
||||
// defaultSerializer is a canonical JSON serializer
|
||||
var defaultSerializer serializer = canonicalJSON{}
|
||||
|
||||
func setDefaultSerializer(s serializer) {
|
||||
defaultSerializer = s
|
||||
}
|
169
vendor/github.com/theupdateframework/notary/tuf/data/snapshot.go
generated
vendored
Normal file
169
vendor/github.com/theupdateframework/notary/tuf/data/snapshot.go
generated
vendored
Normal file
@ -0,0 +1,169 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/go/canonical/json"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary"
|
||||
)
|
||||
|
||||
// SignedSnapshot is a fully unpacked snapshot.json
|
||||
type SignedSnapshot struct {
|
||||
Signatures []Signature
|
||||
Signed Snapshot
|
||||
Dirty bool
|
||||
}
|
||||
|
||||
// Snapshot is the Signed component of a snapshot.json
|
||||
type Snapshot struct {
|
||||
SignedCommon
|
||||
Meta Files `json:"meta"`
|
||||
}
|
||||
|
||||
// IsValidSnapshotStructure returns an error, or nil, depending on whether the content of the
|
||||
// struct is valid for snapshot metadata. This does not check signatures or expiry, just that
|
||||
// the metadata content is valid.
|
||||
func IsValidSnapshotStructure(s Snapshot) error {
|
||||
expectedType := TUFTypes[CanonicalSnapshotRole]
|
||||
if s.Type != expectedType {
|
||||
return ErrInvalidMetadata{
|
||||
role: CanonicalSnapshotRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, s.Type)}
|
||||
}
|
||||
|
||||
if s.Version < 1 {
|
||||
return ErrInvalidMetadata{
|
||||
role: CanonicalSnapshotRole, msg: "version cannot be less than one"}
|
||||
}
|
||||
|
||||
for _, file := range []RoleName{CanonicalRootRole, CanonicalTargetsRole} {
|
||||
// Meta is a map of FileMeta, so if the role isn't in the map it returns
|
||||
// an empty FileMeta, which has an empty map, and you can check on keys
|
||||
// from an empty map.
|
||||
//
|
||||
// For now sha256 is required and sha512 is not.
|
||||
if _, ok := s.Meta[file.String()].Hashes[notary.SHA256]; !ok {
|
||||
return ErrInvalidMetadata{
|
||||
role: CanonicalSnapshotRole,
|
||||
msg: fmt.Sprintf("missing %s sha256 checksum information", file.String()),
|
||||
}
|
||||
}
|
||||
if err := CheckValidHashStructures(s.Meta[file.String()].Hashes); err != nil {
|
||||
return ErrInvalidMetadata{
|
||||
role: CanonicalSnapshotRole,
|
||||
msg: fmt.Sprintf("invalid %s checksum information, %v", file.String(), err),
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewSnapshot initializes a SignedSnapshot with a given top level root
|
||||
// and targets objects
|
||||
func NewSnapshot(root *Signed, targets *Signed) (*SignedSnapshot, error) {
|
||||
logrus.Debug("generating new snapshot...")
|
||||
targetsJSON, err := json.Marshal(targets)
|
||||
if err != nil {
|
||||
logrus.Debug("Error Marshalling Targets")
|
||||
return nil, err
|
||||
}
|
||||
rootJSON, err := json.Marshal(root)
|
||||
if err != nil {
|
||||
logrus.Debug("Error Marshalling Root")
|
||||
return nil, err
|
||||
}
|
||||
rootMeta, err := NewFileMeta(bytes.NewReader(rootJSON), NotaryDefaultHashes...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
targetsMeta, err := NewFileMeta(bytes.NewReader(targetsJSON), NotaryDefaultHashes...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SignedSnapshot{
|
||||
Signatures: make([]Signature, 0),
|
||||
Signed: Snapshot{
|
||||
SignedCommon: SignedCommon{
|
||||
Type: TUFTypes[CanonicalSnapshotRole],
|
||||
Version: 0,
|
||||
Expires: DefaultExpires(CanonicalSnapshotRole),
|
||||
},
|
||||
Meta: Files{
|
||||
CanonicalRootRole.String(): rootMeta,
|
||||
CanonicalTargetsRole.String(): targetsMeta,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ToSigned partially serializes a SignedSnapshot for further signing
|
||||
func (sp *SignedSnapshot) ToSigned() (*Signed, error) {
|
||||
s, err := defaultSerializer.MarshalCanonical(sp.Signed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signed := json.RawMessage{}
|
||||
err = signed.UnmarshalJSON(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigs := make([]Signature, len(sp.Signatures))
|
||||
copy(sigs, sp.Signatures)
|
||||
return &Signed{
|
||||
Signatures: sigs,
|
||||
Signed: &signed,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AddMeta updates a role in the snapshot with new meta
|
||||
func (sp *SignedSnapshot) AddMeta(role RoleName, meta FileMeta) {
|
||||
sp.Signed.Meta[role.String()] = meta
|
||||
sp.Dirty = true
|
||||
}
|
||||
|
||||
// GetMeta gets the metadata for a particular role, returning an error if it's
|
||||
// not found
|
||||
func (sp *SignedSnapshot) GetMeta(role RoleName) (*FileMeta, error) {
|
||||
if meta, ok := sp.Signed.Meta[role.String()]; ok {
|
||||
if _, ok := meta.Hashes["sha256"]; ok {
|
||||
return &meta, nil
|
||||
}
|
||||
}
|
||||
return nil, ErrMissingMeta{Role: role.String()}
|
||||
}
|
||||
|
||||
// DeleteMeta removes a role from the snapshot. If the role doesn't
|
||||
// exist in the snapshot, it's a noop.
|
||||
func (sp *SignedSnapshot) DeleteMeta(role RoleName) {
|
||||
if _, ok := sp.Signed.Meta[role.String()]; ok {
|
||||
delete(sp.Signed.Meta, role.String())
|
||||
sp.Dirty = true
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalJSON returns the serialized form of SignedSnapshot as bytes
|
||||
func (sp *SignedSnapshot) MarshalJSON() ([]byte, error) {
|
||||
signed, err := sp.ToSigned()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return defaultSerializer.Marshal(signed)
|
||||
}
|
||||
|
||||
// SnapshotFromSigned fully unpacks a Signed object into a SignedSnapshot
|
||||
func SnapshotFromSigned(s *Signed) (*SignedSnapshot, error) {
|
||||
sp := Snapshot{}
|
||||
if err := defaultSerializer.Unmarshal(*s.Signed, &sp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := IsValidSnapshotStructure(sp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigs := make([]Signature, len(s.Signatures))
|
||||
copy(sigs, s.Signatures)
|
||||
return &SignedSnapshot{
|
||||
Signatures: sigs,
|
||||
Signed: sp,
|
||||
}, nil
|
||||
}
|
201
vendor/github.com/theupdateframework/notary/tuf/data/targets.go
generated
vendored
Normal file
201
vendor/github.com/theupdateframework/notary/tuf/data/targets.go
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/docker/go/canonical/json"
|
||||
)
|
||||
|
||||
// SignedTargets is a fully unpacked targets.json, or target delegation
|
||||
// json file
|
||||
type SignedTargets struct {
|
||||
Signatures []Signature
|
||||
Signed Targets
|
||||
Dirty bool
|
||||
}
|
||||
|
||||
// Targets is the Signed components of a targets.json or delegation json file
|
||||
type Targets struct {
|
||||
SignedCommon
|
||||
Targets Files `json:"targets"`
|
||||
Delegations Delegations `json:"delegations,omitempty"`
|
||||
}
|
||||
|
||||
// isValidTargetsStructure returns an error, or nil, depending on whether the content of the struct
|
||||
// is valid for targets metadata. This does not check signatures or expiry, just that
|
||||
// the metadata content is valid.
|
||||
func isValidTargetsStructure(t Targets, roleName RoleName) error {
|
||||
if roleName != CanonicalTargetsRole && !IsDelegation(roleName) {
|
||||
return ErrInvalidRole{Role: roleName}
|
||||
}
|
||||
|
||||
// even if it's a delegated role, the metadata type is "Targets"
|
||||
expectedType := TUFTypes[CanonicalTargetsRole]
|
||||
if t.Type != expectedType {
|
||||
return ErrInvalidMetadata{
|
||||
role: roleName, msg: fmt.Sprintf("expected type %s, not %s", expectedType, t.Type)}
|
||||
}
|
||||
|
||||
if t.Version < 1 {
|
||||
return ErrInvalidMetadata{role: roleName, msg: "version cannot be less than one"}
|
||||
}
|
||||
|
||||
for _, roleObj := range t.Delegations.Roles {
|
||||
if !IsDelegation(roleObj.Name) || path.Dir(roleObj.Name.String()) != roleName.String() {
|
||||
return ErrInvalidMetadata{
|
||||
role: roleName, msg: fmt.Sprintf("delegation role %s invalid", roleObj.Name)}
|
||||
}
|
||||
if err := isValidRootRoleStructure(roleName, roleObj.Name, roleObj.RootRole, t.Delegations.Keys); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewTargets initializes a new empty SignedTargets object
|
||||
func NewTargets() *SignedTargets {
|
||||
return &SignedTargets{
|
||||
Signatures: make([]Signature, 0),
|
||||
Signed: Targets{
|
||||
SignedCommon: SignedCommon{
|
||||
Type: TUFTypes["targets"],
|
||||
Version: 0,
|
||||
Expires: DefaultExpires("targets"),
|
||||
},
|
||||
Targets: make(Files),
|
||||
Delegations: *NewDelegations(),
|
||||
},
|
||||
Dirty: true,
|
||||
}
|
||||
}
|
||||
|
||||
// GetMeta attempts to find the targets entry for the path. It
|
||||
// will return nil in the case of the target not being found.
|
||||
func (t SignedTargets) GetMeta(path string) *FileMeta {
|
||||
for p, meta := range t.Signed.Targets {
|
||||
if p == path {
|
||||
return &meta
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetValidDelegations filters the delegation roles specified in the signed targets, and
|
||||
// only returns roles that are direct children and restricts their paths
|
||||
func (t SignedTargets) GetValidDelegations(parent DelegationRole) []DelegationRole {
|
||||
roles := t.buildDelegationRoles()
|
||||
result := []DelegationRole{}
|
||||
for _, r := range roles {
|
||||
validRole, err := parent.Restrict(r)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
result = append(result, validRole)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// BuildDelegationRole returns a copy of a DelegationRole using the information in this SignedTargets for the specified role name.
|
||||
// Will error for invalid role name or key metadata within this SignedTargets. Path data is not validated.
|
||||
func (t *SignedTargets) BuildDelegationRole(roleName RoleName) (DelegationRole, error) {
|
||||
for _, role := range t.Signed.Delegations.Roles {
|
||||
if role.Name == roleName {
|
||||
pubKeys := make(map[string]PublicKey)
|
||||
for _, keyID := range role.KeyIDs {
|
||||
pubKey, ok := t.Signed.Delegations.Keys[keyID]
|
||||
if !ok {
|
||||
// Couldn't retrieve all keys, so stop walking and return invalid role
|
||||
return DelegationRole{}, ErrInvalidRole{
|
||||
Role: roleName,
|
||||
Reason: "role lists unknown key " + keyID + " as a signing key",
|
||||
}
|
||||
}
|
||||
pubKeys[keyID] = pubKey
|
||||
}
|
||||
return DelegationRole{
|
||||
BaseRole: BaseRole{
|
||||
Name: role.Name,
|
||||
Keys: pubKeys,
|
||||
Threshold: role.Threshold,
|
||||
},
|
||||
Paths: role.Paths,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return DelegationRole{}, ErrNoSuchRole{Role: roleName}
|
||||
}
|
||||
|
||||
// helper function to create DelegationRole structures from all delegations in a SignedTargets,
|
||||
// these delegations are read directly from the SignedTargets and not modified or validated
|
||||
func (t SignedTargets) buildDelegationRoles() []DelegationRole {
|
||||
var roles []DelegationRole
|
||||
for _, roleData := range t.Signed.Delegations.Roles {
|
||||
delgRole, err := t.BuildDelegationRole(roleData.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
roles = append(roles, delgRole)
|
||||
}
|
||||
return roles
|
||||
}
|
||||
|
||||
// AddTarget adds or updates the meta for the given path
|
||||
func (t *SignedTargets) AddTarget(path string, meta FileMeta) {
|
||||
t.Signed.Targets[path] = meta
|
||||
t.Dirty = true
|
||||
}
|
||||
|
||||
// AddDelegation will add a new delegated role with the given keys,
|
||||
// ensuring the keys either already exist, or are added to the map
|
||||
// of delegation keys
|
||||
func (t *SignedTargets) AddDelegation(role *Role, keys []*PublicKey) error {
|
||||
return errors.New("Not Implemented")
|
||||
}
|
||||
|
||||
// ToSigned partially serializes a SignedTargets for further signing
|
||||
func (t *SignedTargets) ToSigned() (*Signed, error) {
|
||||
s, err := defaultSerializer.MarshalCanonical(t.Signed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signed := json.RawMessage{}
|
||||
err = signed.UnmarshalJSON(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigs := make([]Signature, len(t.Signatures))
|
||||
copy(sigs, t.Signatures)
|
||||
return &Signed{
|
||||
Signatures: sigs,
|
||||
Signed: &signed,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MarshalJSON returns the serialized form of SignedTargets as bytes
|
||||
func (t *SignedTargets) MarshalJSON() ([]byte, error) {
|
||||
signed, err := t.ToSigned()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return defaultSerializer.Marshal(signed)
|
||||
}
|
||||
|
||||
// TargetsFromSigned fully unpacks a Signed object into a SignedTargets, given
|
||||
// a role name (so it can validate the SignedTargets object)
|
||||
func TargetsFromSigned(s *Signed, roleName RoleName) (*SignedTargets, error) {
|
||||
t := Targets{}
|
||||
if err := defaultSerializer.Unmarshal(*s.Signed, &t); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := isValidTargetsStructure(t, roleName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigs := make([]Signature, len(s.Signatures))
|
||||
copy(sigs, s.Signatures)
|
||||
return &SignedTargets{
|
||||
Signatures: sigs,
|
||||
Signed: t,
|
||||
}, nil
|
||||
}
|
136
vendor/github.com/theupdateframework/notary/tuf/data/timestamp.go
generated
vendored
Normal file
136
vendor/github.com/theupdateframework/notary/tuf/data/timestamp.go
generated
vendored
Normal file
@ -0,0 +1,136 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/go/canonical/json"
|
||||
"github.com/theupdateframework/notary"
|
||||
)
|
||||
|
||||
// SignedTimestamp is a fully unpacked timestamp.json
|
||||
type SignedTimestamp struct {
|
||||
Signatures []Signature
|
||||
Signed Timestamp
|
||||
Dirty bool
|
||||
}
|
||||
|
||||
// Timestamp is the Signed component of a timestamp.json
|
||||
type Timestamp struct {
|
||||
SignedCommon
|
||||
Meta Files `json:"meta"`
|
||||
}
|
||||
|
||||
// IsValidTimestampStructure returns an error, or nil, depending on whether the content of the struct
|
||||
// is valid for timestamp metadata. This does not check signatures or expiry, just that
|
||||
// the metadata content is valid.
|
||||
func IsValidTimestampStructure(t Timestamp) error {
|
||||
expectedType := TUFTypes[CanonicalTimestampRole]
|
||||
if t.Type != expectedType {
|
||||
return ErrInvalidMetadata{
|
||||
role: CanonicalTimestampRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, t.Type)}
|
||||
}
|
||||
|
||||
if t.Version < 1 {
|
||||
return ErrInvalidMetadata{
|
||||
role: CanonicalTimestampRole, msg: "version cannot be less than one"}
|
||||
}
|
||||
|
||||
// Meta is a map of FileMeta, so if the role isn't in the map it returns
|
||||
// an empty FileMeta, which has an empty map, and you can check on keys
|
||||
// from an empty map.
|
||||
//
|
||||
// For now sha256 is required and sha512 is not.
|
||||
if _, ok := t.Meta[CanonicalSnapshotRole.String()].Hashes[notary.SHA256]; !ok {
|
||||
return ErrInvalidMetadata{
|
||||
role: CanonicalTimestampRole, msg: "missing snapshot sha256 checksum information"}
|
||||
}
|
||||
if err := CheckValidHashStructures(t.Meta[CanonicalSnapshotRole.String()].Hashes); err != nil {
|
||||
return ErrInvalidMetadata{
|
||||
role: CanonicalTimestampRole, msg: fmt.Sprintf("invalid snapshot checksum information, %v", err)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewTimestamp initializes a timestamp with an existing snapshot
|
||||
func NewTimestamp(snapshot *Signed) (*SignedTimestamp, error) {
|
||||
snapshotJSON, err := json.Marshal(snapshot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
snapshotMeta, err := NewFileMeta(bytes.NewReader(snapshotJSON), NotaryDefaultHashes...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SignedTimestamp{
|
||||
Signatures: make([]Signature, 0),
|
||||
Signed: Timestamp{
|
||||
SignedCommon: SignedCommon{
|
||||
Type: TUFTypes[CanonicalTimestampRole],
|
||||
Version: 0,
|
||||
Expires: DefaultExpires(CanonicalTimestampRole),
|
||||
},
|
||||
Meta: Files{
|
||||
CanonicalSnapshotRole.String(): snapshotMeta,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ToSigned partially serializes a SignedTimestamp such that it can
|
||||
// be signed
|
||||
func (ts *SignedTimestamp) ToSigned() (*Signed, error) {
|
||||
s, err := defaultSerializer.MarshalCanonical(ts.Signed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signed := json.RawMessage{}
|
||||
err = signed.UnmarshalJSON(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigs := make([]Signature, len(ts.Signatures))
|
||||
copy(sigs, ts.Signatures)
|
||||
return &Signed{
|
||||
Signatures: sigs,
|
||||
Signed: &signed,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetSnapshot gets the expected snapshot metadata hashes in the timestamp metadata,
|
||||
// or nil if it doesn't exist
|
||||
func (ts *SignedTimestamp) GetSnapshot() (*FileMeta, error) {
|
||||
snapshotExpected, ok := ts.Signed.Meta[CanonicalSnapshotRole.String()]
|
||||
if !ok {
|
||||
return nil, ErrMissingMeta{Role: CanonicalSnapshotRole.String()}
|
||||
}
|
||||
return &snapshotExpected, nil
|
||||
}
|
||||
|
||||
// MarshalJSON returns the serialized form of SignedTimestamp as bytes
|
||||
func (ts *SignedTimestamp) MarshalJSON() ([]byte, error) {
|
||||
signed, err := ts.ToSigned()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return defaultSerializer.Marshal(signed)
|
||||
}
|
||||
|
||||
// TimestampFromSigned parsed a Signed object into a fully unpacked
|
||||
// SignedTimestamp
|
||||
func TimestampFromSigned(s *Signed) (*SignedTimestamp, error) {
|
||||
ts := Timestamp{}
|
||||
if err := defaultSerializer.Unmarshal(*s.Signed, &ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := IsValidTimestampStructure(ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigs := make([]Signature, len(s.Signatures))
|
||||
copy(sigs, s.Signatures)
|
||||
return &SignedTimestamp{
|
||||
Signatures: sigs,
|
||||
Signed: ts,
|
||||
}, nil
|
||||
}
|
390
vendor/github.com/theupdateframework/notary/tuf/data/types.go
generated
vendored
Normal file
390
vendor/github.com/theupdateframework/notary/tuf/data/types.go
generated
vendored
Normal file
@ -0,0 +1,390 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"crypto/subtle"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go/canonical/json"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary"
|
||||
)
|
||||
|
||||
// GUN is a Globally Unique Name. It is used to identify trust collections.
|
||||
// An example usage of this is for container image repositories.
|
||||
// For example: myregistry.io/myuser/myimage
|
||||
type GUN string
|
||||
|
||||
func (g GUN) String() string {
|
||||
return string(g)
|
||||
}
|
||||
|
||||
// RoleName type for specifying role
|
||||
type RoleName string
|
||||
|
||||
func (r RoleName) String() string {
|
||||
return string(r)
|
||||
}
|
||||
|
||||
// Parent provides the parent path role from the provided child role
|
||||
func (r RoleName) Parent() RoleName {
|
||||
return RoleName(path.Dir(r.String()))
|
||||
}
|
||||
|
||||
// MetadataRoleMapToStringMap generates a map string of bytes from a map RoleName of bytes
|
||||
func MetadataRoleMapToStringMap(roles map[RoleName][]byte) map[string][]byte {
|
||||
metadata := make(map[string][]byte)
|
||||
for k, v := range roles {
|
||||
metadata[k.String()] = v
|
||||
}
|
||||
return metadata
|
||||
}
|
||||
|
||||
// NewRoleList generates an array of RoleName objects from a slice of strings
|
||||
func NewRoleList(roles []string) []RoleName {
|
||||
var roleNames []RoleName
|
||||
for _, role := range roles {
|
||||
roleNames = append(roleNames, RoleName(role))
|
||||
}
|
||||
return roleNames
|
||||
}
|
||||
|
||||
// RolesListToStringList generates an array of string objects from a slice of roles
|
||||
func RolesListToStringList(roles []RoleName) []string {
|
||||
var roleNames []string
|
||||
for _, role := range roles {
|
||||
roleNames = append(roleNames, role.String())
|
||||
}
|
||||
return roleNames
|
||||
}
|
||||
|
||||
// SigAlgorithm for types of signatures
|
||||
type SigAlgorithm string
|
||||
|
||||
func (k SigAlgorithm) String() string {
|
||||
return string(k)
|
||||
}
|
||||
|
||||
const defaultHashAlgorithm = "sha256"
|
||||
|
||||
// NotaryDefaultExpiries is the construct used to configure the default expiry times of
|
||||
// the various role files.
|
||||
var NotaryDefaultExpiries = map[RoleName]time.Duration{
|
||||
CanonicalRootRole: notary.NotaryRootExpiry,
|
||||
CanonicalTargetsRole: notary.NotaryTargetsExpiry,
|
||||
CanonicalSnapshotRole: notary.NotarySnapshotExpiry,
|
||||
CanonicalTimestampRole: notary.NotaryTimestampExpiry,
|
||||
}
|
||||
|
||||
// Signature types
|
||||
const (
|
||||
EDDSASignature SigAlgorithm = "eddsa"
|
||||
RSAPSSSignature SigAlgorithm = "rsapss"
|
||||
RSAPKCS1v15Signature SigAlgorithm = "rsapkcs1v15"
|
||||
ECDSASignature SigAlgorithm = "ecdsa"
|
||||
PyCryptoSignature SigAlgorithm = "pycrypto-pkcs#1 pss"
|
||||
)
|
||||
|
||||
// Key types
|
||||
const (
|
||||
ED25519Key = "ed25519"
|
||||
RSAKey = "rsa"
|
||||
RSAx509Key = "rsa-x509"
|
||||
ECDSAKey = "ecdsa"
|
||||
ECDSAx509Key = "ecdsa-x509"
|
||||
)
|
||||
|
||||
// TUFTypes is the set of metadata types
|
||||
var TUFTypes = map[RoleName]string{
|
||||
CanonicalRootRole: "Root",
|
||||
CanonicalTargetsRole: "Targets",
|
||||
CanonicalSnapshotRole: "Snapshot",
|
||||
CanonicalTimestampRole: "Timestamp",
|
||||
}
|
||||
|
||||
// ValidTUFType checks if the given type is valid for the role
|
||||
func ValidTUFType(typ string, role RoleName) bool {
|
||||
if ValidRole(role) {
|
||||
// All targets delegation roles must have
|
||||
// the valid type is for targets.
|
||||
if role == "" {
|
||||
// role is unknown and does not map to
|
||||
// a type
|
||||
return false
|
||||
}
|
||||
if strings.HasPrefix(role.String(), CanonicalTargetsRole.String()+"/") {
|
||||
role = CanonicalTargetsRole
|
||||
}
|
||||
}
|
||||
// most people will just use the defaults so have this optimal check
|
||||
// first. Do comparison just in case there is some unknown vulnerability
|
||||
// if a key and value in the map differ.
|
||||
if v, ok := TUFTypes[role]; ok {
|
||||
return typ == v
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Signed is the high level, partially deserialized metadata object
|
||||
// used to verify signatures before fully unpacking, or to add signatures
|
||||
// before fully packing
|
||||
type Signed struct {
|
||||
Signed *json.RawMessage `json:"signed"`
|
||||
Signatures []Signature `json:"signatures"`
|
||||
}
|
||||
|
||||
// SignedCommon contains the fields common to the Signed component of all
|
||||
// TUF metadata files
|
||||
type SignedCommon struct {
|
||||
Type string `json:"_type"`
|
||||
Expires time.Time `json:"expires"`
|
||||
Version int `json:"version"`
|
||||
}
|
||||
|
||||
// SignedMeta is used in server validation where we only need signatures
|
||||
// and common fields
|
||||
type SignedMeta struct {
|
||||
Signed SignedCommon `json:"signed"`
|
||||
Signatures []Signature `json:"signatures"`
|
||||
}
|
||||
|
||||
// Signature is a signature on a piece of metadata
|
||||
type Signature struct {
|
||||
KeyID string `json:"keyid"`
|
||||
Method SigAlgorithm `json:"method"`
|
||||
Signature []byte `json:"sig"`
|
||||
IsValid bool `json:"-"`
|
||||
}
|
||||
|
||||
// Files is the map of paths to file meta container in targets and delegations
|
||||
// metadata files
|
||||
type Files map[string]FileMeta
|
||||
|
||||
// Hashes is the map of hash type to digest created for each metadata
|
||||
// and target file
|
||||
type Hashes map[string][]byte
|
||||
|
||||
// NotaryDefaultHashes contains the default supported hash algorithms.
|
||||
var NotaryDefaultHashes = []string{notary.SHA256, notary.SHA512}
|
||||
|
||||
// FileMeta contains the size and hashes for a metadata or target file. Custom
|
||||
// data can be optionally added.
|
||||
type FileMeta struct {
|
||||
Length int64 `json:"length"`
|
||||
Hashes Hashes `json:"hashes"`
|
||||
Custom *json.RawMessage `json:"custom,omitempty"`
|
||||
}
|
||||
|
||||
// Equals returns true if the other FileMeta object is equivalent to this one
|
||||
func (f FileMeta) Equals(o FileMeta) bool {
|
||||
if o.Length != f.Length || len(o.Hashes) != len(f.Hashes) {
|
||||
return false
|
||||
}
|
||||
if f.Custom == nil && o.Custom != nil || f.Custom != nil && o.Custom == nil {
|
||||
return false
|
||||
}
|
||||
// we don't care if these are valid hashes, just that they are equal
|
||||
for key, val := range f.Hashes {
|
||||
if !bytes.Equal(val, o.Hashes[key]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if f.Custom == nil && o.Custom == nil {
|
||||
return true
|
||||
}
|
||||
fBytes, err := f.Custom.MarshalJSON()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
oBytes, err := o.Custom.MarshalJSON()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(fBytes, oBytes)
|
||||
}
|
||||
|
||||
// CheckHashes verifies all the checksums specified by the "hashes" of the payload.
|
||||
func CheckHashes(payload []byte, name string, hashes Hashes) error {
|
||||
cnt := 0
|
||||
|
||||
// k, v indicate the hash algorithm and the corresponding value
|
||||
for k, v := range hashes {
|
||||
switch k {
|
||||
case notary.SHA256:
|
||||
checksum := sha256.Sum256(payload)
|
||||
if subtle.ConstantTimeCompare(checksum[:], v) == 0 {
|
||||
return ErrMismatchedChecksum{alg: notary.SHA256, name: name, expected: hex.EncodeToString(v)}
|
||||
}
|
||||
cnt++
|
||||
case notary.SHA512:
|
||||
checksum := sha512.Sum512(payload)
|
||||
if subtle.ConstantTimeCompare(checksum[:], v) == 0 {
|
||||
return ErrMismatchedChecksum{alg: notary.SHA512, name: name, expected: hex.EncodeToString(v)}
|
||||
}
|
||||
cnt++
|
||||
}
|
||||
}
|
||||
|
||||
if cnt == 0 {
|
||||
return ErrMissingMeta{Role: name}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CompareMultiHashes verifies that the two Hashes passed in can represent the same data.
|
||||
// This means that both maps must have at least one key defined for which they map, and no conflicts.
|
||||
// Note that we check the intersection of map keys, which adds support for non-default hash algorithms in notary
|
||||
func CompareMultiHashes(hashes1, hashes2 Hashes) error {
|
||||
// First check if the two hash structures are valid
|
||||
if err := CheckValidHashStructures(hashes1); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := CheckValidHashStructures(hashes2); err != nil {
|
||||
return err
|
||||
}
|
||||
// Check if they have at least one matching hash, and no conflicts
|
||||
cnt := 0
|
||||
for hashAlg, hash1 := range hashes1 {
|
||||
|
||||
hash2, ok := hashes2[hashAlg]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if subtle.ConstantTimeCompare(hash1[:], hash2[:]) == 0 {
|
||||
return fmt.Errorf("mismatched %s checksum", hashAlg)
|
||||
}
|
||||
// If we reached here, we had a match
|
||||
cnt++
|
||||
}
|
||||
|
||||
if cnt == 0 {
|
||||
return fmt.Errorf("at least one matching hash needed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckValidHashStructures returns an error, or nil, depending on whether
|
||||
// the content of the hashes is valid or not.
|
||||
func CheckValidHashStructures(hashes Hashes) error {
|
||||
cnt := 0
|
||||
|
||||
for k, v := range hashes {
|
||||
switch k {
|
||||
case notary.SHA256:
|
||||
if len(v) != sha256.Size {
|
||||
return ErrInvalidChecksum{alg: notary.SHA256}
|
||||
}
|
||||
cnt++
|
||||
case notary.SHA512:
|
||||
if len(v) != sha512.Size {
|
||||
return ErrInvalidChecksum{alg: notary.SHA512}
|
||||
}
|
||||
cnt++
|
||||
}
|
||||
}
|
||||
|
||||
if cnt == 0 {
|
||||
return fmt.Errorf("at least one supported hash needed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFileMeta generates a FileMeta object from the reader, using the
|
||||
// hash algorithms provided
|
||||
func NewFileMeta(r io.Reader, hashAlgorithms ...string) (FileMeta, error) {
|
||||
if len(hashAlgorithms) == 0 {
|
||||
hashAlgorithms = []string{defaultHashAlgorithm}
|
||||
}
|
||||
hashes := make(map[string]hash.Hash, len(hashAlgorithms))
|
||||
for _, hashAlgorithm := range hashAlgorithms {
|
||||
var h hash.Hash
|
||||
switch hashAlgorithm {
|
||||
case notary.SHA256:
|
||||
h = sha256.New()
|
||||
case notary.SHA512:
|
||||
h = sha512.New()
|
||||
default:
|
||||
return FileMeta{}, fmt.Errorf("Unknown hash algorithm: %s", hashAlgorithm)
|
||||
}
|
||||
hashes[hashAlgorithm] = h
|
||||
r = io.TeeReader(r, h)
|
||||
}
|
||||
n, err := io.Copy(ioutil.Discard, r)
|
||||
if err != nil {
|
||||
return FileMeta{}, err
|
||||
}
|
||||
m := FileMeta{Length: n, Hashes: make(Hashes, len(hashes))}
|
||||
for hashAlgorithm, h := range hashes {
|
||||
m.Hashes[hashAlgorithm] = h.Sum(nil)
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Delegations holds a tier of targets delegations
|
||||
type Delegations struct {
|
||||
Keys Keys `json:"keys"`
|
||||
Roles []*Role `json:"roles"`
|
||||
}
|
||||
|
||||
// NewDelegations initializes an empty Delegations object
|
||||
func NewDelegations() *Delegations {
|
||||
return &Delegations{
|
||||
Keys: make(map[string]PublicKey),
|
||||
Roles: make([]*Role, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// These values are recommended TUF expiry times.
|
||||
var defaultExpiryTimes = map[RoleName]time.Duration{
|
||||
CanonicalRootRole: notary.Year,
|
||||
CanonicalTargetsRole: 90 * notary.Day,
|
||||
CanonicalSnapshotRole: 7 * notary.Day,
|
||||
CanonicalTimestampRole: notary.Day,
|
||||
}
|
||||
|
||||
// SetDefaultExpiryTimes allows one to change the default expiries.
|
||||
func SetDefaultExpiryTimes(times map[RoleName]time.Duration) {
|
||||
for key, value := range times {
|
||||
if _, ok := defaultExpiryTimes[key]; !ok {
|
||||
logrus.Errorf("Attempted to set default expiry for an unknown role: %s", key.String())
|
||||
continue
|
||||
}
|
||||
defaultExpiryTimes[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultExpires gets the default expiry time for the given role
|
||||
func DefaultExpires(role RoleName) time.Time {
|
||||
if d, ok := defaultExpiryTimes[role]; ok {
|
||||
return time.Now().Add(d)
|
||||
}
|
||||
var t time.Time
|
||||
return t.UTC().Round(time.Second)
|
||||
}
|
||||
|
||||
type unmarshalledSignature Signature
|
||||
|
||||
// UnmarshalJSON does a custom unmarshalling of the signature JSON
|
||||
func (s *Signature) UnmarshalJSON(data []byte) error {
|
||||
uSignature := unmarshalledSignature{}
|
||||
err := json.Unmarshal(data, &uSignature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uSignature.Method = SigAlgorithm(strings.ToLower(string(uSignature.Method)))
|
||||
*s = Signature(uSignature)
|
||||
return nil
|
||||
}
|
111
vendor/github.com/theupdateframework/notary/tuf/signed/ed25519.go
generated
vendored
Normal file
111
vendor/github.com/theupdateframework/notary/tuf/signed/ed25519.go
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
package signed
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
|
||||
"github.com/theupdateframework/notary/trustmanager"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
type edCryptoKey struct {
|
||||
role data.RoleName
|
||||
privKey data.PrivateKey
|
||||
}
|
||||
|
||||
// Ed25519 implements a simple in memory cryptosystem for ED25519 keys
|
||||
type Ed25519 struct {
|
||||
keys map[string]edCryptoKey
|
||||
}
|
||||
|
||||
// NewEd25519 initializes a new empty Ed25519 CryptoService that operates
|
||||
// entirely in memory
|
||||
func NewEd25519() *Ed25519 {
|
||||
return &Ed25519{
|
||||
make(map[string]edCryptoKey),
|
||||
}
|
||||
}
|
||||
|
||||
// AddKey allows you to add a private key
|
||||
func (e *Ed25519) AddKey(role data.RoleName, gun data.GUN, k data.PrivateKey) error {
|
||||
e.addKey(role, k)
|
||||
return nil
|
||||
}
|
||||
|
||||
// addKey allows you to add a private key
|
||||
func (e *Ed25519) addKey(role data.RoleName, k data.PrivateKey) {
|
||||
e.keys[k.ID()] = edCryptoKey{
|
||||
role: role,
|
||||
privKey: k,
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveKey deletes a key from the signer
|
||||
func (e *Ed25519) RemoveKey(keyID string) error {
|
||||
delete(e.keys, keyID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListKeys returns the list of keys IDs for the role
|
||||
func (e *Ed25519) ListKeys(role data.RoleName) []string {
|
||||
keyIDs := make([]string, 0, len(e.keys))
|
||||
for id, edCryptoKey := range e.keys {
|
||||
if edCryptoKey.role == role {
|
||||
keyIDs = append(keyIDs, id)
|
||||
}
|
||||
}
|
||||
return keyIDs
|
||||
}
|
||||
|
||||
// ListAllKeys returns the map of keys IDs to role
|
||||
func (e *Ed25519) ListAllKeys() map[string]data.RoleName {
|
||||
keys := make(map[string]data.RoleName)
|
||||
for id, edKey := range e.keys {
|
||||
keys[id] = edKey.role
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// Create generates a new key and returns the public part
|
||||
func (e *Ed25519) Create(role data.RoleName, gun data.GUN, algorithm string) (data.PublicKey, error) {
|
||||
if algorithm != data.ED25519Key {
|
||||
return nil, errors.New("only ED25519 supported by this cryptoservice")
|
||||
}
|
||||
|
||||
private, err := utils.GenerateED25519Key(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
e.addKey(role, private)
|
||||
return data.PublicKeyFromPrivate(private), nil
|
||||
}
|
||||
|
||||
// PublicKeys returns a map of public keys for the ids provided, when those IDs are found
|
||||
// in the store.
|
||||
func (e *Ed25519) PublicKeys(keyIDs ...string) (map[string]data.PublicKey, error) {
|
||||
k := make(map[string]data.PublicKey)
|
||||
for _, keyID := range keyIDs {
|
||||
if edKey, ok := e.keys[keyID]; ok {
|
||||
k[keyID] = data.PublicKeyFromPrivate(edKey.privKey)
|
||||
}
|
||||
}
|
||||
return k, nil
|
||||
}
|
||||
|
||||
// GetKey returns a single public key based on the ID
|
||||
func (e *Ed25519) GetKey(keyID string) data.PublicKey {
|
||||
if privKey, _, err := e.GetPrivateKey(keyID); err == nil {
|
||||
return data.PublicKeyFromPrivate(privKey)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPrivateKey returns a single private key and role if present, based on the ID
|
||||
func (e *Ed25519) GetPrivateKey(keyID string) (data.PrivateKey, data.RoleName, error) {
|
||||
if k, ok := e.keys[keyID]; ok {
|
||||
return k.privKey, k.role, nil
|
||||
}
|
||||
return nil, "", trustmanager.ErrKeyNotFound{KeyID: keyID}
|
||||
}
|
98
vendor/github.com/theupdateframework/notary/tuf/signed/errors.go
generated
vendored
Normal file
98
vendor/github.com/theupdateframework/notary/tuf/signed/errors.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
package signed
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
)
|
||||
|
||||
// ErrInsufficientSignatures - can not create enough signatures on a piece of
|
||||
// metadata
|
||||
type ErrInsufficientSignatures struct {
|
||||
FoundKeys int
|
||||
NeededKeys int
|
||||
MissingKeyIDs []string
|
||||
}
|
||||
|
||||
func (e ErrInsufficientSignatures) Error() string {
|
||||
candidates := ""
|
||||
if len(e.MissingKeyIDs) > 0 {
|
||||
candidates = fmt.Sprintf(" (%s)", strings.Join(e.MissingKeyIDs, ", "))
|
||||
}
|
||||
|
||||
if e.FoundKeys == 0 {
|
||||
return fmt.Sprintf("signing keys not available: need %d keys from %d possible keys%s",
|
||||
e.NeededKeys, len(e.MissingKeyIDs), candidates)
|
||||
}
|
||||
return fmt.Sprintf("not enough signing keys: found %d of %d needed keys - %d other possible keys%s",
|
||||
e.FoundKeys, e.NeededKeys, len(e.MissingKeyIDs), candidates)
|
||||
}
|
||||
|
||||
// ErrExpired indicates a piece of metadata has expired
|
||||
type ErrExpired struct {
|
||||
Role data.RoleName
|
||||
Expired string
|
||||
}
|
||||
|
||||
func (e ErrExpired) Error() string {
|
||||
return fmt.Sprintf("%s expired at %v", e.Role.String(), e.Expired)
|
||||
}
|
||||
|
||||
// ErrLowVersion indicates the piece of metadata has a version number lower than
|
||||
// a version number we're already seen for this role
|
||||
type ErrLowVersion struct {
|
||||
Actual int
|
||||
Current int
|
||||
}
|
||||
|
||||
func (e ErrLowVersion) Error() string {
|
||||
return fmt.Sprintf("version %d is lower than current version %d", e.Actual, e.Current)
|
||||
}
|
||||
|
||||
// ErrRoleThreshold indicates we did not validate enough signatures to meet the threshold
|
||||
type ErrRoleThreshold struct {
|
||||
Msg string
|
||||
}
|
||||
|
||||
func (e ErrRoleThreshold) Error() string {
|
||||
if e.Msg == "" {
|
||||
return "valid signatures did not meet threshold"
|
||||
}
|
||||
return e.Msg
|
||||
}
|
||||
|
||||
// ErrInvalidKeyType indicates the types for the key and signature it's associated with are
|
||||
// mismatched. Probably a sign of malicious behaviour
|
||||
type ErrInvalidKeyType struct{}
|
||||
|
||||
func (e ErrInvalidKeyType) Error() string {
|
||||
return "key type is not valid for signature"
|
||||
}
|
||||
|
||||
// ErrInvalidKeyID indicates the specified key ID was incorrect for its associated data
|
||||
type ErrInvalidKeyID struct{}
|
||||
|
||||
func (e ErrInvalidKeyID) Error() string {
|
||||
return "key ID is not valid for key content"
|
||||
}
|
||||
|
||||
// ErrInvalidKeyLength indicates that while we may support the cipher, the provided
|
||||
// key length is not specifically supported, i.e. we support RSA, but not 1024 bit keys
|
||||
type ErrInvalidKeyLength struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e ErrInvalidKeyLength) Error() string {
|
||||
return fmt.Sprintf("key length is not supported: %s", e.msg)
|
||||
}
|
||||
|
||||
// ErrNoKeys indicates no signing keys were found when trying to sign
|
||||
type ErrNoKeys struct {
|
||||
KeyIDs []string
|
||||
}
|
||||
|
||||
func (e ErrNoKeys) Error() string {
|
||||
return fmt.Sprintf("could not find necessary signing keys, at least one of these keys must be available: %s",
|
||||
strings.Join(e.KeyIDs, ", "))
|
||||
}
|
47
vendor/github.com/theupdateframework/notary/tuf/signed/interface.go
generated
vendored
Normal file
47
vendor/github.com/theupdateframework/notary/tuf/signed/interface.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
package signed
|
||||
|
||||
import "github.com/theupdateframework/notary/tuf/data"
|
||||
|
||||
// KeyService provides management of keys locally. It will never
|
||||
// accept or provide private keys. Communication between the KeyService
|
||||
// and a SigningService happen behind the Create function.
|
||||
type KeyService interface {
|
||||
// Create issues a new key pair and is responsible for loading
|
||||
// the private key into the appropriate signing service.
|
||||
Create(role data.RoleName, gun data.GUN, algorithm string) (data.PublicKey, error)
|
||||
|
||||
// AddKey adds a private key to the specified role and gun
|
||||
AddKey(role data.RoleName, gun data.GUN, key data.PrivateKey) error
|
||||
|
||||
// GetKey retrieves the public key if present, otherwise it returns nil
|
||||
GetKey(keyID string) data.PublicKey
|
||||
|
||||
// GetPrivateKey retrieves the private key and role if present and retrievable,
|
||||
// otherwise it returns nil and an error
|
||||
GetPrivateKey(keyID string) (data.PrivateKey, data.RoleName, error)
|
||||
|
||||
// RemoveKey deletes the specified key, and returns an error only if the key
|
||||
// removal fails. If the key doesn't exist, no error should be returned.
|
||||
RemoveKey(keyID string) error
|
||||
|
||||
// ListKeys returns a list of key IDs for the role, or an empty list or
|
||||
// nil if there are no keys.
|
||||
ListKeys(role data.RoleName) []string
|
||||
|
||||
// ListAllKeys returns a map of all available signing key IDs to role, or
|
||||
// an empty map or nil if there are no keys.
|
||||
ListAllKeys() map[string]data.RoleName
|
||||
}
|
||||
|
||||
// CryptoService is deprecated and all instances of its use should be
|
||||
// replaced with KeyService
|
||||
type CryptoService interface {
|
||||
KeyService
|
||||
}
|
||||
|
||||
// Verifier defines an interface for verifying signatures. An implementer
|
||||
// of this interface should verify signatures for one and only one
|
||||
// signing scheme.
|
||||
type Verifier interface {
|
||||
Verify(key data.PublicKey, sig []byte, msg []byte) error
|
||||
}
|
114
vendor/github.com/theupdateframework/notary/tuf/signed/sign.go
generated
vendored
Normal file
114
vendor/github.com/theupdateframework/notary/tuf/signed/sign.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
package signed
|
||||
|
||||
// The Sign function is a choke point for all code paths that do signing.
|
||||
// We use this fact to do key ID translation. There are 2 types of key ID:
|
||||
// - Scoped: the key ID based purely on the data that appears in the TUF
|
||||
// files. This may be wrapped by a certificate that scopes the
|
||||
// key to be used in a specific context.
|
||||
// - Canonical: the key ID based purely on the public key bytes. This is
|
||||
// used by keystores to easily identify keys that may be reused
|
||||
// in many scoped locations.
|
||||
// Currently these types only differ in the context of Root Keys in Notary
|
||||
// for which the root key is wrapped using an x509 certificate.
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary/trustmanager"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// Sign takes a data.Signed and a cryptoservice containing private keys,
|
||||
// calculates and adds at least minSignature signatures using signingKeys the
|
||||
// data.Signed. It will also clean up any signatures that are not in produced
|
||||
// by either a signingKey or an otherWhitelistedKey.
|
||||
// Note that in most cases, otherWhitelistedKeys should probably be null. They
|
||||
// are for keys you don't want to sign with, but you also don't want to remove
|
||||
// existing signatures by those keys. For instance, if you want to call Sign
|
||||
// multiple times with different sets of signing keys without undoing removing
|
||||
// signatures produced by the previous call to Sign.
|
||||
func Sign(service CryptoService, s *data.Signed, signingKeys []data.PublicKey,
|
||||
minSignatures int, otherWhitelistedKeys []data.PublicKey) error {
|
||||
|
||||
logrus.Debugf("sign called with %d/%d required keys", minSignatures, len(signingKeys))
|
||||
signatures := make([]data.Signature, 0, len(s.Signatures)+1)
|
||||
signingKeyIDs := make(map[string]struct{})
|
||||
tufIDs := make(map[string]data.PublicKey)
|
||||
|
||||
privKeys := make(map[string]data.PrivateKey)
|
||||
|
||||
// Get all the private key objects related to the public keys
|
||||
missingKeyIDs := []string{}
|
||||
for _, key := range signingKeys {
|
||||
canonicalID, err := utils.CanonicalKeyID(key)
|
||||
tufIDs[key.ID()] = key
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k, _, err := service.GetPrivateKey(canonicalID)
|
||||
if err != nil {
|
||||
if _, ok := err.(trustmanager.ErrKeyNotFound); ok {
|
||||
missingKeyIDs = append(missingKeyIDs, canonicalID)
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
privKeys[key.ID()] = k
|
||||
}
|
||||
|
||||
// include the list of otherWhitelistedKeys
|
||||
for _, key := range otherWhitelistedKeys {
|
||||
if _, ok := tufIDs[key.ID()]; !ok {
|
||||
tufIDs[key.ID()] = key
|
||||
}
|
||||
}
|
||||
|
||||
// Check to ensure we have enough signing keys
|
||||
if len(privKeys) < minSignatures {
|
||||
return ErrInsufficientSignatures{FoundKeys: len(privKeys),
|
||||
NeededKeys: minSignatures, MissingKeyIDs: missingKeyIDs}
|
||||
}
|
||||
|
||||
emptyStruct := struct{}{}
|
||||
// Do signing and generate list of signatures
|
||||
for keyID, pk := range privKeys {
|
||||
sig, err := pk.Sign(rand.Reader, *s.Signed, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to sign with key: %s. Reason: %v", keyID, err)
|
||||
return err
|
||||
}
|
||||
signingKeyIDs[keyID] = emptyStruct
|
||||
signatures = append(signatures, data.Signature{
|
||||
KeyID: keyID,
|
||||
Method: pk.SignatureAlgorithm(),
|
||||
Signature: sig[:],
|
||||
})
|
||||
}
|
||||
|
||||
for i := range s.Signatures {
|
||||
sig := s.Signatures[i]
|
||||
if _, ok := signingKeyIDs[sig.KeyID]; ok {
|
||||
// key is in the set of key IDs for which a signature has been created
|
||||
continue
|
||||
}
|
||||
var (
|
||||
k data.PublicKey
|
||||
ok bool
|
||||
)
|
||||
if k, ok = tufIDs[sig.KeyID]; !ok {
|
||||
// key is no longer a valid signing key
|
||||
continue
|
||||
}
|
||||
if err := VerifySignature(*s.Signed, &sig, k); err != nil {
|
||||
// signature is no longer valid
|
||||
continue
|
||||
}
|
||||
// keep any signatures that still represent valid keys and are
|
||||
// themselves valid
|
||||
signatures = append(signatures, sig)
|
||||
}
|
||||
s.Signatures = signatures
|
||||
return nil
|
||||
}
|
264
vendor/github.com/theupdateframework/notary/tuf/signed/verifiers.go
generated
vendored
Normal file
264
vendor/github.com/theupdateframework/notary/tuf/signed/verifiers.go
generated
vendored
Normal file
@ -0,0 +1,264 @@
|
||||
package signed
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
const (
|
||||
minRSAKeySizeBit = 2048 // 2048 bits = 256 bytes
|
||||
minRSAKeySizeByte = minRSAKeySizeBit / 8
|
||||
)
|
||||
|
||||
// Verifiers serves as a map of all verifiers available on the system and
|
||||
// can be injected into a verificationService. For testing and configuration
|
||||
// purposes, it will not be used by default.
|
||||
var Verifiers = map[data.SigAlgorithm]Verifier{
|
||||
data.RSAPSSSignature: RSAPSSVerifier{},
|
||||
data.RSAPKCS1v15Signature: RSAPKCS1v15Verifier{},
|
||||
data.PyCryptoSignature: RSAPyCryptoVerifier{},
|
||||
data.ECDSASignature: ECDSAVerifier{},
|
||||
data.EDDSASignature: Ed25519Verifier{},
|
||||
}
|
||||
|
||||
// Ed25519Verifier used to verify Ed25519 signatures
|
||||
type Ed25519Verifier struct{}
|
||||
|
||||
// Verify checks that an ed25519 signature is valid
|
||||
func (v Ed25519Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
|
||||
if key.Algorithm() != data.ED25519Key {
|
||||
return ErrInvalidKeyType{}
|
||||
}
|
||||
sigBytes := make([]byte, ed25519.SignatureSize)
|
||||
if len(sig) != ed25519.SignatureSize {
|
||||
logrus.Debugf("signature length is incorrect, must be %d, was %d.", ed25519.SignatureSize, len(sig))
|
||||
return ErrInvalid
|
||||
}
|
||||
copy(sigBytes, sig)
|
||||
|
||||
keyBytes := make([]byte, ed25519.PublicKeySize)
|
||||
pub := key.Public()
|
||||
if len(pub) != ed25519.PublicKeySize {
|
||||
logrus.Errorf("public key is incorrect size, must be %d, was %d.", ed25519.PublicKeySize, len(pub))
|
||||
return ErrInvalidKeyLength{msg: fmt.Sprintf("ed25519 public key must be %d bytes.", ed25519.PublicKeySize)}
|
||||
}
|
||||
n := copy(keyBytes, key.Public())
|
||||
if n < ed25519.PublicKeySize {
|
||||
logrus.Errorf("failed to copy the key, must have %d bytes, copied %d bytes.", ed25519.PublicKeySize, n)
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
if !ed25519.Verify(ed25519.PublicKey(keyBytes), msg, sigBytes) {
|
||||
logrus.Debugf("failed ed25519 verification")
|
||||
return ErrInvalid
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifyPSS(key interface{}, digest, sig []byte) error {
|
||||
rsaPub, ok := key.(*rsa.PublicKey)
|
||||
if !ok {
|
||||
logrus.Debugf("value was not an RSA public key")
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
if rsaPub.N.BitLen() < minRSAKeySizeBit {
|
||||
logrus.Debugf("RSA keys less than 2048 bits are not acceptable, provided key has length %d.", rsaPub.N.BitLen())
|
||||
return ErrInvalidKeyLength{msg: fmt.Sprintf("RSA key must be at least %d bits.", minRSAKeySizeBit)}
|
||||
}
|
||||
|
||||
if len(sig) < minRSAKeySizeByte {
|
||||
logrus.Debugf("RSA keys less than 2048 bits are not acceptable, provided signature has length %d.", len(sig))
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
opts := rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}
|
||||
if err := rsa.VerifyPSS(rsaPub, crypto.SHA256, digest[:], sig, &opts); err != nil {
|
||||
logrus.Debugf("failed RSAPSS verification: %s", err)
|
||||
return ErrInvalid
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getRSAPubKey(key data.PublicKey) (crypto.PublicKey, error) {
|
||||
algorithm := key.Algorithm()
|
||||
var pubKey crypto.PublicKey
|
||||
|
||||
switch algorithm {
|
||||
case data.RSAx509Key:
|
||||
pemCert, _ := pem.Decode([]byte(key.Public()))
|
||||
if pemCert == nil {
|
||||
logrus.Debugf("failed to decode PEM-encoded x509 certificate")
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
cert, err := x509.ParseCertificate(pemCert.Bytes)
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to parse x509 certificate: %s\n", err)
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
pubKey = cert.PublicKey
|
||||
case data.RSAKey:
|
||||
var err error
|
||||
pubKey, err = x509.ParsePKIXPublicKey(key.Public())
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to parse public key: %s\n", err)
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
default:
|
||||
// only accept RSA keys
|
||||
logrus.Debugf("invalid key type for RSAPSS verifier: %s", algorithm)
|
||||
return nil, ErrInvalidKeyType{}
|
||||
}
|
||||
|
||||
return pubKey, nil
|
||||
}
|
||||
|
||||
// RSAPSSVerifier checks RSASSA-PSS signatures
|
||||
type RSAPSSVerifier struct{}
|
||||
|
||||
// Verify does the actual check.
|
||||
func (v RSAPSSVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
|
||||
// will return err if keytype is not a recognized RSA type
|
||||
pubKey, err := getRSAPubKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
digest := sha256.Sum256(msg)
|
||||
|
||||
return verifyPSS(pubKey, digest[:], sig)
|
||||
}
|
||||
|
||||
// RSAPKCS1v15Verifier checks RSA PKCS1v15 signatures
|
||||
type RSAPKCS1v15Verifier struct{}
|
||||
|
||||
// Verify does the actual verification
|
||||
func (v RSAPKCS1v15Verifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
|
||||
// will return err if keytype is not a recognized RSA type
|
||||
pubKey, err := getRSAPubKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
digest := sha256.Sum256(msg)
|
||||
|
||||
rsaPub, ok := pubKey.(*rsa.PublicKey)
|
||||
if !ok {
|
||||
logrus.Debugf("value was not an RSA public key")
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
if rsaPub.N.BitLen() < minRSAKeySizeBit {
|
||||
logrus.Debugf("RSA keys less than 2048 bits are not acceptable, provided key has length %d.", rsaPub.N.BitLen())
|
||||
return ErrInvalidKeyLength{msg: fmt.Sprintf("RSA key must be at least %d bits.", minRSAKeySizeBit)}
|
||||
}
|
||||
|
||||
if len(sig) < minRSAKeySizeByte {
|
||||
logrus.Debugf("RSA keys less than 2048 bits are not acceptable, provided signature has length %d.", len(sig))
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
if err = rsa.VerifyPKCS1v15(rsaPub, crypto.SHA256, digest[:], sig); err != nil {
|
||||
logrus.Errorf("Failed verification: %s", err.Error())
|
||||
return ErrInvalid
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RSAPyCryptoVerifier checks RSASSA-PSS signatures
|
||||
type RSAPyCryptoVerifier struct{}
|
||||
|
||||
// Verify does the actual check.
|
||||
// N.B. We have not been able to make this work in a way that is compatible
|
||||
// with PyCrypto.
|
||||
func (v RSAPyCryptoVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
|
||||
digest := sha256.Sum256(msg)
|
||||
if key.Algorithm() != data.RSAKey {
|
||||
return ErrInvalidKeyType{}
|
||||
}
|
||||
|
||||
k, _ := pem.Decode([]byte(key.Public()))
|
||||
if k == nil {
|
||||
logrus.Debugf("failed to decode PEM-encoded x509 certificate")
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
pub, err := x509.ParsePKIXPublicKey(k.Bytes)
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to parse public key: %s\n", err)
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
return verifyPSS(pub, digest[:], sig)
|
||||
}
|
||||
|
||||
// ECDSAVerifier checks ECDSA signatures, decoding the keyType appropriately
|
||||
type ECDSAVerifier struct{}
|
||||
|
||||
// Verify does the actual check.
|
||||
func (v ECDSAVerifier) Verify(key data.PublicKey, sig []byte, msg []byte) error {
|
||||
algorithm := key.Algorithm()
|
||||
var pubKey crypto.PublicKey
|
||||
|
||||
switch algorithm {
|
||||
case data.ECDSAx509Key:
|
||||
pemCert, _ := pem.Decode([]byte(key.Public()))
|
||||
if pemCert == nil {
|
||||
logrus.Debugf("failed to decode PEM-encoded x509 certificate for keyID: %s", key.ID())
|
||||
logrus.Debugf("certificate bytes: %s", string(key.Public()))
|
||||
return ErrInvalid
|
||||
}
|
||||
cert, err := x509.ParseCertificate(pemCert.Bytes)
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to parse x509 certificate: %s\n", err)
|
||||
return ErrInvalid
|
||||
}
|
||||
pubKey = cert.PublicKey
|
||||
case data.ECDSAKey:
|
||||
var err error
|
||||
pubKey, err = x509.ParsePKIXPublicKey(key.Public())
|
||||
if err != nil {
|
||||
logrus.Debugf("Failed to parse private key for keyID: %s, %s\n", key.ID(), err)
|
||||
return ErrInvalid
|
||||
}
|
||||
default:
|
||||
// only accept ECDSA keys.
|
||||
logrus.Debugf("invalid key type for ECDSA verifier: %s", algorithm)
|
||||
return ErrInvalidKeyType{}
|
||||
}
|
||||
|
||||
ecdsaPubKey, ok := pubKey.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
logrus.Debugf("value isn't an ECDSA public key")
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
sigLength := len(sig)
|
||||
expectedOctetLength := 2 * ((ecdsaPubKey.Params().BitSize + 7) >> 3)
|
||||
if sigLength != expectedOctetLength {
|
||||
logrus.Debugf("signature had an unexpected length")
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
rBytes, sBytes := sig[:sigLength/2], sig[sigLength/2:]
|
||||
r := new(big.Int).SetBytes(rBytes)
|
||||
s := new(big.Int).SetBytes(sBytes)
|
||||
|
||||
digest := sha256.Sum256(msg)
|
||||
|
||||
if !ecdsa.Verify(ecdsaPubKey, digest[:], r, s) {
|
||||
logrus.Debugf("failed ECDSA signature validation")
|
||||
return ErrInvalid
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
123
vendor/github.com/theupdateframework/notary/tuf/signed/verify.go
generated
vendored
Normal file
123
vendor/github.com/theupdateframework/notary/tuf/signed/verify.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
package signed
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go/canonical/json"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"github.com/theupdateframework/notary/tuf/utils"
|
||||
)
|
||||
|
||||
// Various basic signing errors
|
||||
var (
|
||||
ErrNoSignatures = errors.New("tuf: data has no signatures")
|
||||
ErrInvalid = errors.New("tuf: signature verification failed")
|
||||
ErrWrongType = errors.New("tuf: meta file has wrong type")
|
||||
)
|
||||
|
||||
// IsExpired checks if the given time passed before the present time
|
||||
func IsExpired(t time.Time) bool {
|
||||
return t.Before(time.Now())
|
||||
}
|
||||
|
||||
// VerifyExpiry returns ErrExpired if the metadata is expired
|
||||
func VerifyExpiry(s *data.SignedCommon, role data.RoleName) error {
|
||||
if IsExpired(s.Expires) {
|
||||
logrus.Errorf("Metadata for %s expired", role)
|
||||
return ErrExpired{Role: role, Expired: s.Expires.Format("Mon Jan 2 15:04:05 MST 2006")}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyVersion returns ErrLowVersion if the metadata version is lower than the min version
|
||||
func VerifyVersion(s *data.SignedCommon, minVersion int) error {
|
||||
if s.Version < minVersion {
|
||||
return ErrLowVersion{Actual: s.Version, Current: minVersion}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifySignatures checks the we have sufficient valid signatures for the given role
|
||||
func VerifySignatures(s *data.Signed, roleData data.BaseRole) error {
|
||||
if len(s.Signatures) == 0 {
|
||||
return ErrNoSignatures
|
||||
}
|
||||
|
||||
if roleData.Threshold < 1 {
|
||||
return ErrRoleThreshold{}
|
||||
}
|
||||
logrus.Debugf("%s role has key IDs: %s", roleData.Name, strings.Join(roleData.ListKeyIDs(), ","))
|
||||
|
||||
// remarshal the signed part so we can verify the signature, since the signature has
|
||||
// to be of a canonically marshalled signed object
|
||||
var decoded map[string]interface{}
|
||||
if err := json.Unmarshal(*s.Signed, &decoded); err != nil {
|
||||
return err
|
||||
}
|
||||
msg, err := json.MarshalCanonical(decoded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
valid := make(map[string]struct{})
|
||||
for i := range s.Signatures {
|
||||
sig := &(s.Signatures[i])
|
||||
logrus.Debug("verifying signature for key ID: ", sig.KeyID)
|
||||
key, ok := roleData.Keys[sig.KeyID]
|
||||
if !ok {
|
||||
logrus.Debugf("continuing b/c keyid lookup was nil: %s\n", sig.KeyID)
|
||||
continue
|
||||
}
|
||||
// Check that the signature key ID actually matches the content ID of the key
|
||||
if key.ID() != sig.KeyID {
|
||||
return ErrInvalidKeyID{}
|
||||
}
|
||||
if err := VerifySignature(msg, sig, key); err != nil {
|
||||
logrus.Debugf("continuing b/c %s", err.Error())
|
||||
continue
|
||||
}
|
||||
valid[sig.KeyID] = struct{}{}
|
||||
}
|
||||
if len(valid) < roleData.Threshold {
|
||||
return ErrRoleThreshold{
|
||||
Msg: fmt.Sprintf("valid signatures did not meet threshold for %s", roleData.Name),
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifySignature checks a single signature and public key against a payload
|
||||
// If the signature is verified, the signature's is valid field will actually
|
||||
// be mutated to be equal to the boolean true
|
||||
func VerifySignature(msg []byte, sig *data.Signature, pk data.PublicKey) error {
|
||||
// method lookup is consistent due to Unmarshal JSON doing lower case for us.
|
||||
method := sig.Method
|
||||
verifier, ok := Verifiers[method]
|
||||
if !ok {
|
||||
return fmt.Errorf("signing method is not supported: %s", sig.Method)
|
||||
}
|
||||
|
||||
if err := verifier.Verify(pk, sig.Signature, msg); err != nil {
|
||||
return fmt.Errorf("signature was invalid")
|
||||
}
|
||||
sig.IsValid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyPublicKeyMatchesPrivateKey checks if the private key and the public keys forms valid key pairs.
|
||||
// Supports both x509 certificate PublicKeys and non-certificate PublicKeys
|
||||
func VerifyPublicKeyMatchesPrivateKey(privKey data.PrivateKey, pubKey data.PublicKey) error {
|
||||
pubKeyID, err := utils.CanonicalKeyID(pubKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not verify key pair: %v", err)
|
||||
}
|
||||
if privKey == nil || pubKeyID != privKey.ID() {
|
||||
return fmt.Errorf("private key is nil or does not match public key")
|
||||
}
|
||||
return nil
|
||||
}
|
1072
vendor/github.com/theupdateframework/notary/tuf/tuf.go
generated
vendored
Normal file
1072
vendor/github.com/theupdateframework/notary/tuf/tuf.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
341
vendor/github.com/theupdateframework/notary/tuf/utils/pkcs8.go
generated
vendored
Normal file
341
vendor/github.com/theupdateframework/notary/tuf/utils/pkcs8.go
generated
vendored
Normal file
@ -0,0 +1,341 @@
|
||||
// Package utils contains tuf related utility functions however this file is hard
|
||||
// forked from https://github.com/youmark/pkcs8 package. It has been further modified
|
||||
// based on the requirements of Notary. For converting keys into PKCS#8 format,
|
||||
// original package expected *crypto.PrivateKey interface, which then type inferred
|
||||
// to either *rsa.PrivateKey or *ecdsa.PrivateKey depending on the need and later
|
||||
// converted to ASN.1 DER encoded form, this whole process was superfluous here as
|
||||
// keys are already being kept in ASN.1 DER format wrapped in data.PrivateKey
|
||||
// structure. With these changes, package has became tightly coupled with notary as
|
||||
// most of the method signatures have been updated. Moreover support for ED25519
|
||||
// keys has been added as well. License for original package is following:
|
||||
//
|
||||
// The MIT License (MIT)
|
||||
//
|
||||
// # Copyright (c) 2014 youmark
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1" // #nosec
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
)
|
||||
|
||||
// Copy from crypto/x509
|
||||
var (
|
||||
oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
|
||||
oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1}
|
||||
oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1}
|
||||
// crypto/x509 doesn't have support for ED25519
|
||||
// http://www.oid-info.com/get/1.3.6.1.4.1.11591.15.1
|
||||
oidPublicKeyED25519 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11591, 15, 1}
|
||||
)
|
||||
|
||||
// Copy from crypto/x509
|
||||
var (
|
||||
oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33}
|
||||
oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7}
|
||||
oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34}
|
||||
oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35}
|
||||
)
|
||||
|
||||
// Copy from crypto/x509
|
||||
func oidFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) {
|
||||
switch curve {
|
||||
case elliptic.P224():
|
||||
return oidNamedCurveP224, true
|
||||
case elliptic.P256():
|
||||
return oidNamedCurveP256, true
|
||||
case elliptic.P384():
|
||||
return oidNamedCurveP384, true
|
||||
case elliptic.P521():
|
||||
return oidNamedCurveP521, true
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Unecrypted PKCS8
|
||||
var (
|
||||
oidPKCS5PBKDF2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 12}
|
||||
oidPBES2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 13}
|
||||
oidAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42}
|
||||
)
|
||||
|
||||
type ecPrivateKey struct {
|
||||
Version int
|
||||
PrivateKey []byte
|
||||
NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"`
|
||||
PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"`
|
||||
}
|
||||
|
||||
type privateKeyInfo struct {
|
||||
Version int
|
||||
PrivateKeyAlgorithm []asn1.ObjectIdentifier
|
||||
PrivateKey []byte
|
||||
}
|
||||
|
||||
// Encrypted PKCS8
|
||||
type pbkdf2Params struct {
|
||||
Salt []byte
|
||||
IterationCount int
|
||||
}
|
||||
|
||||
type pbkdf2Algorithms struct {
|
||||
IDPBKDF2 asn1.ObjectIdentifier
|
||||
PBKDF2Params pbkdf2Params
|
||||
}
|
||||
|
||||
type pbkdf2Encs struct {
|
||||
EncryAlgo asn1.ObjectIdentifier
|
||||
IV []byte
|
||||
}
|
||||
|
||||
type pbes2Params struct {
|
||||
KeyDerivationFunc pbkdf2Algorithms
|
||||
EncryptionScheme pbkdf2Encs
|
||||
}
|
||||
|
||||
type pbes2Algorithms struct {
|
||||
IDPBES2 asn1.ObjectIdentifier
|
||||
PBES2Params pbes2Params
|
||||
}
|
||||
|
||||
type encryptedPrivateKeyInfo struct {
|
||||
EncryptionAlgorithm pbes2Algorithms
|
||||
EncryptedData []byte
|
||||
}
|
||||
|
||||
// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey.
|
||||
// copied from https://github.com/golang/go/blob/964639cc338db650ccadeafb7424bc8ebb2c0f6c/src/crypto/x509/pkcs8.go#L17
|
||||
type pkcs8 struct {
|
||||
Version int
|
||||
Algo pkix.AlgorithmIdentifier
|
||||
PrivateKey []byte
|
||||
}
|
||||
|
||||
func parsePKCS8ToTufKey(der []byte) (data.PrivateKey, error) {
|
||||
var key pkcs8
|
||||
|
||||
if _, err := asn1.Unmarshal(der, &key); err != nil {
|
||||
if _, ok := err.(asn1.StructuralError); ok {
|
||||
return nil, errors.New("could not decrypt private key")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if key.Algo.Algorithm.Equal(oidPublicKeyED25519) {
|
||||
tufED25519PrivateKey, err := ED25519ToPrivateKey(key.PrivateKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not convert ed25519.PrivateKey to data.PrivateKey: %v", err)
|
||||
}
|
||||
|
||||
return tufED25519PrivateKey, nil
|
||||
}
|
||||
|
||||
privKey, err := x509.ParsePKCS8PrivateKey(der)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch priv := privKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
tufRSAPrivateKey, err := RSAToPrivateKey(priv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not convert rsa.PrivateKey to data.PrivateKey: %v", err)
|
||||
}
|
||||
|
||||
return tufRSAPrivateKey, nil
|
||||
case *ecdsa.PrivateKey:
|
||||
tufECDSAPrivateKey, err := ECDSAToPrivateKey(priv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
|
||||
}
|
||||
|
||||
return tufECDSAPrivateKey, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("unsupported key type")
|
||||
}
|
||||
|
||||
// ParsePKCS8ToTufKey requires PKCS#8 key in DER format and returns data.PrivateKey
|
||||
// Password should be provided in case of Encrypted PKCS#8 key, else it should be nil.
|
||||
func ParsePKCS8ToTufKey(der []byte, password []byte) (data.PrivateKey, error) {
|
||||
if password == nil {
|
||||
return parsePKCS8ToTufKey(der)
|
||||
}
|
||||
|
||||
var privKey encryptedPrivateKeyInfo
|
||||
if _, err := asn1.Unmarshal(der, &privKey); err != nil {
|
||||
return nil, errors.New("pkcs8: only PKCS #5 v2.0 supported")
|
||||
}
|
||||
|
||||
if !privKey.EncryptionAlgorithm.IDPBES2.Equal(oidPBES2) {
|
||||
return nil, errors.New("pkcs8: only PBES2 supported")
|
||||
}
|
||||
|
||||
if !privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.IDPBKDF2.Equal(oidPKCS5PBKDF2) {
|
||||
return nil, errors.New("pkcs8: only PBKDF2 supported")
|
||||
}
|
||||
|
||||
encParam := privKey.EncryptionAlgorithm.PBES2Params.EncryptionScheme
|
||||
kdfParam := privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.PBKDF2Params
|
||||
|
||||
switch {
|
||||
case encParam.EncryAlgo.Equal(oidAES256CBC):
|
||||
iv := encParam.IV
|
||||
salt := kdfParam.Salt
|
||||
iter := kdfParam.IterationCount
|
||||
|
||||
encryptedKey := privKey.EncryptedData
|
||||
symkey := pbkdf2.Key(password, salt, iter, 32, sha1.New)
|
||||
block, err := aes.NewCipher(symkey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mode := cipher.NewCBCDecrypter(block, iv)
|
||||
mode.CryptBlocks(encryptedKey, encryptedKey)
|
||||
|
||||
// no need to explicitly remove padding, as ASN.1 unmarshalling will automatically discard it
|
||||
key, err := parsePKCS8ToTufKey(encryptedKey)
|
||||
if err != nil {
|
||||
return nil, errors.New("pkcs8: incorrect password")
|
||||
}
|
||||
|
||||
return key, nil
|
||||
default:
|
||||
return nil, errors.New("pkcs8: only AES-256-CBC supported")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func convertTUFKeyToPKCS8(priv data.PrivateKey) ([]byte, error) {
|
||||
var pkey privateKeyInfo
|
||||
|
||||
switch priv.Algorithm() {
|
||||
case data.RSAKey, data.RSAx509Key:
|
||||
// Per RFC5958, if publicKey is present, then version is set to v2(1) else version is set to v1(0).
|
||||
// But openssl set to v1 even publicKey is present
|
||||
pkey.Version = 0
|
||||
pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 1)
|
||||
pkey.PrivateKeyAlgorithm[0] = oidPublicKeyRSA
|
||||
pkey.PrivateKey = priv.Private()
|
||||
case data.ECDSAKey, data.ECDSAx509Key:
|
||||
// To extract Curve value, parsing ECDSA key to *ecdsa.PrivateKey
|
||||
eckey, err := x509.ParseECPrivateKey(priv.Private())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
oidNamedCurve, ok := oidFromNamedCurve(eckey.Curve)
|
||||
if !ok {
|
||||
return nil, errors.New("pkcs8: unknown elliptic curve")
|
||||
}
|
||||
|
||||
// Per RFC5958, if publicKey is present, then version is set to v2(1) else version is set to v1(0).
|
||||
// But openssl set to v1 even publicKey is present
|
||||
pkey.Version = 1
|
||||
pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 2)
|
||||
pkey.PrivateKeyAlgorithm[0] = oidPublicKeyECDSA
|
||||
pkey.PrivateKeyAlgorithm[1] = oidNamedCurve
|
||||
pkey.PrivateKey = priv.Private()
|
||||
case data.ED25519Key:
|
||||
pkey.Version = 0
|
||||
pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 1)
|
||||
pkey.PrivateKeyAlgorithm[0] = oidPublicKeyED25519
|
||||
pkey.PrivateKey = priv.Private()
|
||||
default:
|
||||
return nil, fmt.Errorf("algorithm %s not supported", priv.Algorithm())
|
||||
}
|
||||
|
||||
return asn1.Marshal(pkey)
|
||||
}
|
||||
|
||||
func convertTUFKeyToPKCS8Encrypted(priv data.PrivateKey, password []byte) ([]byte, error) {
|
||||
// Convert private key into PKCS8 format
|
||||
pkey, err := convertTUFKeyToPKCS8(priv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Calculate key from password based on PKCS5 algorithm
|
||||
// Use 8 byte salt, 16 byte IV, and 2048 iteration
|
||||
iter := 2048
|
||||
salt := make([]byte, 8)
|
||||
iv := make([]byte, 16)
|
||||
_, err = rand.Reader.Read(salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = rand.Reader.Read(iv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key := pbkdf2.Key(password, salt, iter, 32, sha1.New)
|
||||
|
||||
// Use AES256-CBC mode, pad plaintext with PKCS5 padding scheme
|
||||
padding := aes.BlockSize - len(pkey)%aes.BlockSize
|
||||
if padding > 0 {
|
||||
n := len(pkey)
|
||||
pkey = append(pkey, make([]byte, padding)...)
|
||||
for i := 0; i < padding; i++ {
|
||||
pkey[n+i] = byte(padding)
|
||||
}
|
||||
}
|
||||
|
||||
encryptedKey := make([]byte, len(pkey))
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mode := cipher.NewCBCEncrypter(block, iv)
|
||||
mode.CryptBlocks(encryptedKey, pkey)
|
||||
|
||||
pbkdf2algo := pbkdf2Algorithms{oidPKCS5PBKDF2, pbkdf2Params{salt, iter}}
|
||||
pbkdf2encs := pbkdf2Encs{oidAES256CBC, iv}
|
||||
pbes2algo := pbes2Algorithms{oidPBES2, pbes2Params{pbkdf2algo, pbkdf2encs}}
|
||||
|
||||
encryptedPkey := encryptedPrivateKeyInfo{pbes2algo, encryptedKey}
|
||||
return asn1.Marshal(encryptedPkey)
|
||||
}
|
||||
|
||||
// ConvertTUFKeyToPKCS8 converts a private key (data.Private) to PKCS#8 and returns in DER format
|
||||
// if password is not nil, it would convert the Private Key to Encrypted PKCS#8.
|
||||
func ConvertTUFKeyToPKCS8(priv data.PrivateKey, password []byte) ([]byte, error) {
|
||||
if password == nil {
|
||||
return convertTUFKeyToPKCS8(priv)
|
||||
}
|
||||
return convertTUFKeyToPKCS8Encrypted(priv, password)
|
||||
}
|
31
vendor/github.com/theupdateframework/notary/tuf/utils/role_sort.go
generated
vendored
Normal file
31
vendor/github.com/theupdateframework/notary/tuf/utils/role_sort.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// RoleList is a list of roles
|
||||
type RoleList []string
|
||||
|
||||
// Len returns the length of the list
|
||||
func (r RoleList) Len() int {
|
||||
return len(r)
|
||||
}
|
||||
|
||||
// Less returns true if the item at i should be sorted
|
||||
// before the item at j. It's an unstable partial ordering
|
||||
// based on the number of segments, separated by "/", in
|
||||
// the role name
|
||||
func (r RoleList) Less(i, j int) bool {
|
||||
segsI := strings.Split(r[i], "/")
|
||||
segsJ := strings.Split(r[j], "/")
|
||||
if len(segsI) == len(segsJ) {
|
||||
return r[i] < r[j]
|
||||
}
|
||||
return len(segsI) < len(segsJ)
|
||||
}
|
||||
|
||||
// Swap the items at 2 locations in the list
|
||||
func (r RoleList) Swap(i, j int) {
|
||||
r[i], r[j] = r[j], r[i]
|
||||
}
|
85
vendor/github.com/theupdateframework/notary/tuf/utils/stack.go
generated
vendored
Normal file
85
vendor/github.com/theupdateframework/notary/tuf/utils/stack.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ErrEmptyStack is used when an action that requires some
|
||||
// content is invoked and the stack is empty
|
||||
type ErrEmptyStack struct {
|
||||
action string
|
||||
}
|
||||
|
||||
func (err ErrEmptyStack) Error() string {
|
||||
return fmt.Sprintf("attempted to %s with empty stack", err.action)
|
||||
}
|
||||
|
||||
// ErrBadTypeCast is used by PopX functions when the item
|
||||
// cannot be typed to X
|
||||
type ErrBadTypeCast struct{}
|
||||
|
||||
func (err ErrBadTypeCast) Error() string {
|
||||
return "attempted to do a typed pop and item was not of type"
|
||||
}
|
||||
|
||||
// Stack is a simple type agnostic stack implementation
|
||||
type Stack struct {
|
||||
s []interface{}
|
||||
l sync.Mutex
|
||||
}
|
||||
|
||||
// NewStack create a new stack
|
||||
func NewStack() *Stack {
|
||||
s := &Stack{
|
||||
s: make([]interface{}, 0),
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Push adds an item to the top of the stack.
|
||||
func (s *Stack) Push(item interface{}) {
|
||||
s.l.Lock()
|
||||
defer s.l.Unlock()
|
||||
s.s = append(s.s, item)
|
||||
}
|
||||
|
||||
// Pop removes and returns the top item on the stack, or returns
|
||||
// ErrEmptyStack if the stack has no content
|
||||
func (s *Stack) Pop() (interface{}, error) {
|
||||
s.l.Lock()
|
||||
defer s.l.Unlock()
|
||||
l := len(s.s)
|
||||
if l > 0 {
|
||||
item := s.s[l-1]
|
||||
s.s = s.s[:l-1]
|
||||
return item, nil
|
||||
}
|
||||
return nil, ErrEmptyStack{action: "Pop"}
|
||||
}
|
||||
|
||||
// PopString attempts to cast the top item on the stack to the string type.
|
||||
// If this succeeds, it removes and returns the top item. If the item
|
||||
// is not of the string type, ErrBadTypeCast is returned. If the stack
|
||||
// is empty, ErrEmptyStack is returned
|
||||
func (s *Stack) PopString() (string, error) {
|
||||
s.l.Lock()
|
||||
defer s.l.Unlock()
|
||||
l := len(s.s)
|
||||
if l > 0 {
|
||||
item := s.s[l-1]
|
||||
if item, ok := item.(string); ok {
|
||||
s.s = s.s[:l-1]
|
||||
return item, nil
|
||||
}
|
||||
return "", ErrBadTypeCast{}
|
||||
}
|
||||
return "", ErrEmptyStack{action: "PopString"}
|
||||
}
|
||||
|
||||
// Empty returns true if the stack is empty
|
||||
func (s *Stack) Empty() bool {
|
||||
s.l.Lock()
|
||||
defer s.l.Unlock()
|
||||
return len(s.s) == 0
|
||||
}
|
119
vendor/github.com/theupdateframework/notary/tuf/utils/utils.go
generated
vendored
Normal file
119
vendor/github.com/theupdateframework/notary/tuf/utils/utils.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
)
|
||||
|
||||
// StrSliceContains checks if the given string appears in the slice
|
||||
func StrSliceContains(ss []string, s string) bool {
|
||||
for _, v := range ss {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RoleNameSliceContains checks if the given string appears in the slice
|
||||
func RoleNameSliceContains(ss []data.RoleName, s data.RoleName) bool {
|
||||
for _, v := range ss {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RoleNameSliceRemove removes the given RoleName from the slice, returning a new slice
|
||||
func RoleNameSliceRemove(ss []data.RoleName, s data.RoleName) []data.RoleName {
|
||||
res := []data.RoleName{}
|
||||
for _, v := range ss {
|
||||
if v != s {
|
||||
res = append(res, v)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// NoopCloser is a simple Reader wrapper that does nothing when Close is
|
||||
// called
|
||||
type NoopCloser struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
// Close does nothing for a NoopCloser
|
||||
func (nc *NoopCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DoHash returns the digest of d using the hashing algorithm named
|
||||
// in alg
|
||||
func DoHash(alg string, d []byte) []byte {
|
||||
switch alg {
|
||||
case "sha256":
|
||||
digest := sha256.Sum256(d)
|
||||
return digest[:]
|
||||
case "sha512":
|
||||
digest := sha512.Sum512(d)
|
||||
return digest[:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnusedDelegationKeys prunes a list of keys, returning those that are no
|
||||
// longer in use for a given targets file
|
||||
func UnusedDelegationKeys(t data.SignedTargets) []string {
|
||||
// compare ids to all still active key ids in all active roles
|
||||
// with the targets file
|
||||
found := make(map[string]bool)
|
||||
for _, r := range t.Signed.Delegations.Roles {
|
||||
for _, id := range r.KeyIDs {
|
||||
found[id] = true
|
||||
}
|
||||
}
|
||||
var discard []string
|
||||
for id := range t.Signed.Delegations.Keys {
|
||||
if !found[id] {
|
||||
discard = append(discard, id)
|
||||
}
|
||||
}
|
||||
return discard
|
||||
}
|
||||
|
||||
// RemoveUnusedKeys determines which keys in the slice of IDs are no longer
|
||||
// used in the given targets file and removes them from the delegated keys
|
||||
// map
|
||||
func RemoveUnusedKeys(t *data.SignedTargets) {
|
||||
unusedIDs := UnusedDelegationKeys(*t)
|
||||
for _, id := range unusedIDs {
|
||||
delete(t.Signed.Delegations.Keys, id)
|
||||
}
|
||||
}
|
||||
|
||||
// FindRoleIndex returns the index of the role named <name> or -1 if no
|
||||
// matching role is found.
|
||||
func FindRoleIndex(rs []*data.Role, name data.RoleName) int {
|
||||
for i, r := range rs {
|
||||
if r.Name == name {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// ConsistentName generates the appropriate HTTP URL path for the role,
|
||||
// based on whether the repo is marked as consistent. The RemoteStore
|
||||
// is responsible for adding file extensions.
|
||||
func ConsistentName(role string, hashSHA256 []byte) string {
|
||||
if len(hashSHA256) > 0 {
|
||||
hash := hex.EncodeToString(hashSHA256)
|
||||
return fmt.Sprintf("%s.%s", role, hash)
|
||||
}
|
||||
return role
|
||||
}
|
564
vendor/github.com/theupdateframework/notary/tuf/utils/x509.go
generated
vendored
Normal file
564
vendor/github.com/theupdateframework/notary/tuf/utils/x509.go
generated
vendored
Normal file
@ -0,0 +1,564 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// CanonicalKeyID returns the ID of the public bytes version of a TUF key.
|
||||
// On regular RSA/ECDSA TUF keys, this is just the key ID. On X509 RSA/ECDSA
|
||||
// TUF keys, this is the key ID of the public key part of the key in the leaf cert
|
||||
func CanonicalKeyID(k data.PublicKey) (string, error) {
|
||||
if k == nil {
|
||||
return "", errors.New("public key is nil")
|
||||
}
|
||||
switch k.Algorithm() {
|
||||
case data.ECDSAx509Key, data.RSAx509Key:
|
||||
return X509PublicKeyID(k)
|
||||
default:
|
||||
return k.ID(), nil
|
||||
}
|
||||
}
|
||||
|
||||
// LoadCertFromPEM returns the first certificate found in a bunch of bytes or error
|
||||
// if nothing is found. Taken from https://golang.org/src/crypto/x509/cert_pool.go#L85.
|
||||
func LoadCertFromPEM(pemBytes []byte) (*x509.Certificate, error) {
|
||||
for len(pemBytes) > 0 {
|
||||
var block *pem.Block
|
||||
block, pemBytes = pem.Decode(pemBytes)
|
||||
if block == nil {
|
||||
return nil, errors.New("no certificates found in PEM data")
|
||||
}
|
||||
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("no certificates found in PEM data")
|
||||
}
|
||||
|
||||
// X509PublicKeyID returns a public key ID as a string, given a
|
||||
// data.PublicKey that contains an X509 Certificate
|
||||
func X509PublicKeyID(certPubKey data.PublicKey) (string, error) {
|
||||
// Note that this only loads the first certificate from the public key
|
||||
cert, err := LoadCertFromPEM(certPubKey.Public())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
pubKeyBytes, err := x509.MarshalPKIXPublicKey(cert.PublicKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var key data.PublicKey
|
||||
switch certPubKey.Algorithm() {
|
||||
case data.ECDSAx509Key:
|
||||
key = data.NewECDSAPublicKey(pubKeyBytes)
|
||||
case data.RSAx509Key:
|
||||
key = data.NewRSAPublicKey(pubKeyBytes)
|
||||
}
|
||||
|
||||
return key.ID(), nil
|
||||
}
|
||||
|
||||
func parseLegacyPrivateKey(block *pem.Block, passphrase string) (data.PrivateKey, error) {
|
||||
var privKeyBytes []byte
|
||||
var err error
|
||||
if x509.IsEncryptedPEMBlock(block) {
|
||||
privKeyBytes, err = x509.DecryptPEMBlock(block, []byte(passphrase))
|
||||
if err != nil {
|
||||
return nil, errors.New("could not decrypt private key")
|
||||
}
|
||||
} else {
|
||||
privKeyBytes = block.Bytes
|
||||
}
|
||||
|
||||
switch block.Type {
|
||||
case "RSA PRIVATE KEY":
|
||||
rsaPrivKey, err := x509.ParsePKCS1PrivateKey(privKeyBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse DER encoded key: %v", err)
|
||||
}
|
||||
|
||||
tufRSAPrivateKey, err := RSAToPrivateKey(rsaPrivKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not convert rsa.PrivateKey to data.PrivateKey: %v", err)
|
||||
}
|
||||
|
||||
return tufRSAPrivateKey, nil
|
||||
case "EC PRIVATE KEY":
|
||||
ecdsaPrivKey, err := x509.ParseECPrivateKey(privKeyBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse DER encoded private key: %v", err)
|
||||
}
|
||||
|
||||
tufECDSAPrivateKey, err := ECDSAToPrivateKey(ecdsaPrivKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
|
||||
}
|
||||
|
||||
return tufECDSAPrivateKey, nil
|
||||
case "ED25519 PRIVATE KEY":
|
||||
// We serialize ED25519 keys by concatenating the private key
|
||||
// to the public key and encoding with PEM. See the
|
||||
// ED25519ToPrivateKey function.
|
||||
tufECDSAPrivateKey, err := ED25519ToPrivateKey(privKeyBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
|
||||
}
|
||||
|
||||
return tufECDSAPrivateKey, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported key type %q", block.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// ParsePEMPrivateKey returns a data.PrivateKey from a PEM encoded private key. It
|
||||
// supports PKCS#8 as well as RSA/ECDSA (PKCS#1) only in non-FIPS mode and
|
||||
// attempts to decrypt using the passphrase, if encrypted.
|
||||
func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, error) {
|
||||
return parsePEMPrivateKey(pemBytes, passphrase, notary.FIPSEnabled())
|
||||
}
|
||||
|
||||
func parsePEMPrivateKey(pemBytes []byte, passphrase string, fips bool) (data.PrivateKey, error) {
|
||||
block, _ := pem.Decode(pemBytes)
|
||||
if block == nil {
|
||||
return nil, errors.New("no valid private key found")
|
||||
}
|
||||
|
||||
switch block.Type {
|
||||
case "RSA PRIVATE KEY", "EC PRIVATE KEY", "ED25519 PRIVATE KEY":
|
||||
if fips {
|
||||
return nil, fmt.Errorf("%s not supported in FIPS mode", block.Type)
|
||||
}
|
||||
return parseLegacyPrivateKey(block, passphrase)
|
||||
case "ENCRYPTED PRIVATE KEY", "PRIVATE KEY":
|
||||
if passphrase == "" {
|
||||
return ParsePKCS8ToTufKey(block.Bytes, nil)
|
||||
}
|
||||
return ParsePKCS8ToTufKey(block.Bytes, []byte(passphrase))
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported key type %q", block.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// CertToPEM is a utility function returns a PEM encoded x509 Certificate
|
||||
func CertToPEM(cert *x509.Certificate) []byte {
|
||||
pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
|
||||
|
||||
return pemCert
|
||||
}
|
||||
|
||||
// CertChainToPEM is a utility function returns a PEM encoded chain of x509 Certificates, in the order they are passed
|
||||
func CertChainToPEM(certChain []*x509.Certificate) ([]byte, error) {
|
||||
var pemBytes bytes.Buffer
|
||||
for _, cert := range certChain {
|
||||
if err := pem.Encode(&pemBytes, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return pemBytes.Bytes(), nil
|
||||
}
|
||||
|
||||
// LoadCertFromFile loads the first certificate from the file provided. The
|
||||
// data is expected to be PEM Encoded and contain one of more certificates
|
||||
// with PEM type "CERTIFICATE"
|
||||
func LoadCertFromFile(filename string) (*x509.Certificate, error) {
|
||||
certs, err := LoadCertBundleFromFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return certs[0], nil
|
||||
}
|
||||
|
||||
// LoadCertBundleFromFile loads certificates from the []byte provided. The
|
||||
// data is expected to be PEM Encoded and contain one of more certificates
|
||||
// with PEM type "CERTIFICATE"
|
||||
func LoadCertBundleFromFile(filename string) ([]*x509.Certificate, error) {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return LoadCertBundleFromPEM(b)
|
||||
}
|
||||
|
||||
// LoadCertBundleFromPEM loads certificates from the []byte provided. The
|
||||
// data is expected to be PEM Encoded and contain one of more certificates
|
||||
// with PEM type "CERTIFICATE"
|
||||
func LoadCertBundleFromPEM(pemBytes []byte) ([]*x509.Certificate, error) {
|
||||
certificates := []*x509.Certificate{}
|
||||
var block *pem.Block
|
||||
block, pemBytes = pem.Decode(pemBytes)
|
||||
for ; block != nil; block, pemBytes = pem.Decode(pemBytes) {
|
||||
if block.Type == "CERTIFICATE" {
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certificates = append(certificates, cert)
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid pem block type: %s", block.Type)
|
||||
}
|
||||
}
|
||||
|
||||
if len(certificates) == 0 {
|
||||
return nil, fmt.Errorf("no valid certificates found")
|
||||
}
|
||||
|
||||
return certificates, nil
|
||||
}
|
||||
|
||||
// GetLeafCerts parses a list of x509 Certificates and returns all of them
|
||||
// that aren't CA
|
||||
func GetLeafCerts(certs []*x509.Certificate) []*x509.Certificate {
|
||||
var leafCerts []*x509.Certificate
|
||||
for _, cert := range certs {
|
||||
if cert.IsCA {
|
||||
continue
|
||||
}
|
||||
leafCerts = append(leafCerts, cert)
|
||||
}
|
||||
return leafCerts
|
||||
}
|
||||
|
||||
// GetIntermediateCerts parses a list of x509 Certificates and returns all of the
|
||||
// ones marked as a CA, to be used as intermediates
|
||||
func GetIntermediateCerts(certs []*x509.Certificate) []*x509.Certificate {
|
||||
var intCerts []*x509.Certificate
|
||||
for _, cert := range certs {
|
||||
if cert.IsCA {
|
||||
intCerts = append(intCerts, cert)
|
||||
}
|
||||
}
|
||||
return intCerts
|
||||
}
|
||||
|
||||
// ParsePEMPublicKey returns a data.PublicKey from a PEM encoded public key or certificate.
|
||||
func ParsePEMPublicKey(pubKeyBytes []byte) (data.PublicKey, error) {
|
||||
pemBlock, _ := pem.Decode(pubKeyBytes)
|
||||
if pemBlock == nil {
|
||||
return nil, errors.New("no valid public key found")
|
||||
}
|
||||
|
||||
switch pemBlock.Type {
|
||||
case "CERTIFICATE":
|
||||
cert, err := x509.ParseCertificate(pemBlock.Bytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse provided certificate: %v", err)
|
||||
}
|
||||
err = ValidateCertificate(cert, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid certificate: %v", err)
|
||||
}
|
||||
return CertToKey(cert), nil
|
||||
case "PUBLIC KEY":
|
||||
keyType, err := keyTypeForPublicKey(pemBlock.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data.NewPublicKey(keyType, pemBlock.Bytes), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported PEM block type %q, expected CERTIFICATE or PUBLIC KEY", pemBlock.Type)
|
||||
}
|
||||
}
|
||||
|
||||
func keyTypeForPublicKey(pubKeyBytes []byte) (string, error) {
|
||||
pub, err := x509.ParsePKIXPublicKey(pubKeyBytes)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to parse pem encoded public key: %v", err)
|
||||
}
|
||||
switch pub.(type) {
|
||||
case *ecdsa.PublicKey:
|
||||
return data.ECDSAKey, nil
|
||||
case *rsa.PublicKey:
|
||||
return data.RSAKey, nil
|
||||
}
|
||||
return "", fmt.Errorf("unknown public key format")
|
||||
}
|
||||
|
||||
// ValidateCertificate returns an error if the certificate is not valid for notary
|
||||
// Currently this is only ensuring the public key has a large enough modulus if RSA,
|
||||
// using a non SHA1 signature algorithm, and an optional time expiry check
|
||||
func ValidateCertificate(c *x509.Certificate, checkExpiry bool) error {
|
||||
if (c.NotBefore).After(c.NotAfter) {
|
||||
return fmt.Errorf("certificate validity window is invalid")
|
||||
}
|
||||
// Can't have SHA1 sig algorithm
|
||||
if c.SignatureAlgorithm == x509.SHA1WithRSA || c.SignatureAlgorithm == x509.DSAWithSHA1 || c.SignatureAlgorithm == x509.ECDSAWithSHA1 {
|
||||
return fmt.Errorf("certificate with CN %s uses invalid SHA1 signature algorithm", c.Subject.CommonName)
|
||||
}
|
||||
// If we have an RSA key, make sure it's long enough
|
||||
if c.PublicKeyAlgorithm == x509.RSA {
|
||||
rsaKey, ok := c.PublicKey.(*rsa.PublicKey)
|
||||
if !ok {
|
||||
return fmt.Errorf("unable to parse RSA public key")
|
||||
}
|
||||
if rsaKey.N.BitLen() < notary.MinRSABitSize {
|
||||
return fmt.Errorf("RSA bit length is too short")
|
||||
}
|
||||
}
|
||||
if checkExpiry {
|
||||
now := time.Now()
|
||||
tomorrow := now.AddDate(0, 0, 1)
|
||||
// Give one day leeway on creation "before" time, check "after" against today
|
||||
if (tomorrow).Before(c.NotBefore) || now.After(c.NotAfter) {
|
||||
return data.ErrCertExpired{CN: c.Subject.CommonName}
|
||||
}
|
||||
// If this certificate is expiring within 6 months, put out a warning
|
||||
if (c.NotAfter).Before(time.Now().AddDate(0, 6, 0)) {
|
||||
logrus.Warnf("certificate with CN %s is near expiry", c.Subject.CommonName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenerateKey returns a new private key using the provided algorithm or an
|
||||
// error detailing why the key could not be generated
|
||||
func GenerateKey(algorithm string) (data.PrivateKey, error) {
|
||||
switch algorithm {
|
||||
case data.ECDSAKey:
|
||||
return GenerateECDSAKey(rand.Reader)
|
||||
case data.ED25519Key:
|
||||
return GenerateED25519Key(rand.Reader)
|
||||
}
|
||||
return nil, fmt.Errorf("private key type not supported for key generation: %s", algorithm)
|
||||
}
|
||||
|
||||
// RSAToPrivateKey converts an rsa.Private key to a TUF data.PrivateKey type
|
||||
func RSAToPrivateKey(rsaPrivKey *rsa.PrivateKey) (data.PrivateKey, error) {
|
||||
// Get a DER-encoded representation of the PublicKey
|
||||
rsaPubBytes, err := x509.MarshalPKIXPublicKey(&rsaPrivKey.PublicKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal public key: %v", err)
|
||||
}
|
||||
|
||||
// Get a DER-encoded representation of the PrivateKey
|
||||
rsaPrivBytes := x509.MarshalPKCS1PrivateKey(rsaPrivKey)
|
||||
|
||||
pubKey := data.NewRSAPublicKey(rsaPubBytes)
|
||||
return data.NewRSAPrivateKey(pubKey, rsaPrivBytes)
|
||||
}
|
||||
|
||||
// GenerateECDSAKey generates an ECDSA Private key and returns a TUF PrivateKey
|
||||
func GenerateECDSAKey(random io.Reader) (data.PrivateKey, error) {
|
||||
ecdsaPrivKey, err := ecdsa.GenerateKey(elliptic.P256(), random)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tufPrivKey, err := ECDSAToPrivateKey(ecdsaPrivKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf("generated ECDSA key with keyID: %s", tufPrivKey.ID())
|
||||
|
||||
return tufPrivKey, nil
|
||||
}
|
||||
|
||||
// GenerateED25519Key generates an ED25519 private key and returns a TUF
|
||||
// PrivateKey. The serialization format we use is just the public key bytes
|
||||
// followed by the private key bytes
|
||||
func GenerateED25519Key(random io.Reader) (data.PrivateKey, error) {
|
||||
pub, priv, err := ed25519.GenerateKey(random)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var serialized [ed25519.PublicKeySize + ed25519.PrivateKeySize]byte
|
||||
copy(serialized[:], pub[:])
|
||||
copy(serialized[ed25519.PublicKeySize:], priv[:])
|
||||
|
||||
tufPrivKey, err := ED25519ToPrivateKey(serialized[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf("generated ED25519 key with keyID: %s", tufPrivKey.ID())
|
||||
|
||||
return tufPrivKey, nil
|
||||
}
|
||||
|
||||
// ECDSAToPrivateKey converts an ecdsa.Private key to a TUF data.PrivateKey type
|
||||
func ECDSAToPrivateKey(ecdsaPrivKey *ecdsa.PrivateKey) (data.PrivateKey, error) {
|
||||
// Get a DER-encoded representation of the PublicKey
|
||||
ecdsaPubBytes, err := x509.MarshalPKIXPublicKey(&ecdsaPrivKey.PublicKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal public key: %v", err)
|
||||
}
|
||||
|
||||
// Get a DER-encoded representation of the PrivateKey
|
||||
ecdsaPrivKeyBytes, err := x509.MarshalECPrivateKey(ecdsaPrivKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal private key: %v", err)
|
||||
}
|
||||
|
||||
pubKey := data.NewECDSAPublicKey(ecdsaPubBytes)
|
||||
return data.NewECDSAPrivateKey(pubKey, ecdsaPrivKeyBytes)
|
||||
}
|
||||
|
||||
// ED25519ToPrivateKey converts a serialized ED25519 key to a TUF
|
||||
// data.PrivateKey type
|
||||
func ED25519ToPrivateKey(privKeyBytes []byte) (data.PrivateKey, error) {
|
||||
if len(privKeyBytes) != ed25519.PublicKeySize+ed25519.PrivateKeySize {
|
||||
return nil, errors.New("malformed ed25519 private key")
|
||||
}
|
||||
|
||||
pubKey := data.NewED25519PublicKey(privKeyBytes[:ed25519.PublicKeySize])
|
||||
return data.NewED25519PrivateKey(*pubKey, privKeyBytes)
|
||||
}
|
||||
|
||||
// ExtractPrivateKeyAttributes extracts role and gun values from private key bytes
|
||||
func ExtractPrivateKeyAttributes(pemBytes []byte) (data.RoleName, data.GUN, error) {
|
||||
return extractPrivateKeyAttributes(pemBytes, notary.FIPSEnabled())
|
||||
}
|
||||
|
||||
func extractPrivateKeyAttributes(pemBytes []byte, fips bool) (data.RoleName, data.GUN, error) {
|
||||
block, _ := pem.Decode(pemBytes)
|
||||
if block == nil {
|
||||
return "", "", errors.New("PEM block is empty")
|
||||
}
|
||||
|
||||
switch block.Type {
|
||||
case "RSA PRIVATE KEY", "EC PRIVATE KEY", "ED25519 PRIVATE KEY":
|
||||
if fips {
|
||||
return "", "", fmt.Errorf("%s not supported in FIPS mode", block.Type)
|
||||
}
|
||||
case "PRIVATE KEY", "ENCRYPTED PRIVATE KEY":
|
||||
// do nothing for PKCS#8 keys
|
||||
default:
|
||||
return "", "", errors.New("unknown key format")
|
||||
}
|
||||
return data.RoleName(block.Headers["role"]), data.GUN(block.Headers["gun"]), nil
|
||||
}
|
||||
|
||||
// ConvertPrivateKeyToPKCS8 converts a data.PrivateKey to PKCS#8 Format
|
||||
func ConvertPrivateKeyToPKCS8(key data.PrivateKey, role data.RoleName, gun data.GUN, passphrase string) ([]byte, error) {
|
||||
var (
|
||||
err error
|
||||
der []byte
|
||||
blockType = "PRIVATE KEY"
|
||||
)
|
||||
|
||||
if passphrase == "" {
|
||||
der, err = ConvertTUFKeyToPKCS8(key, nil)
|
||||
} else {
|
||||
blockType = "ENCRYPTED PRIVATE KEY"
|
||||
der, err = ConvertTUFKeyToPKCS8(key, []byte(passphrase))
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to convert to PKCS8 key")
|
||||
}
|
||||
|
||||
headers := make(map[string]string)
|
||||
if role != "" {
|
||||
headers["role"] = role.String()
|
||||
}
|
||||
|
||||
if gun != "" {
|
||||
headers["gun"] = gun.String()
|
||||
}
|
||||
|
||||
return pem.EncodeToMemory(&pem.Block{Bytes: der, Type: blockType, Headers: headers}), nil
|
||||
}
|
||||
|
||||
// CertToKey transforms a single input certificate into its corresponding
|
||||
// PublicKey
|
||||
func CertToKey(cert *x509.Certificate) data.PublicKey {
|
||||
block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}
|
||||
pemdata := pem.EncodeToMemory(&block)
|
||||
|
||||
switch cert.PublicKeyAlgorithm {
|
||||
case x509.RSA:
|
||||
return data.NewRSAx509PublicKey(pemdata)
|
||||
case x509.ECDSA:
|
||||
return data.NewECDSAx509PublicKey(pemdata)
|
||||
default:
|
||||
logrus.Debugf("Unknown key type parsed from certificate: %v", cert.PublicKeyAlgorithm)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// CertsToKeys transforms each of the input certificate chains into its corresponding
|
||||
// PublicKey
|
||||
func CertsToKeys(leafCerts map[string]*x509.Certificate, intCerts map[string][]*x509.Certificate) map[string]data.PublicKey {
|
||||
keys := make(map[string]data.PublicKey)
|
||||
for id, leafCert := range leafCerts {
|
||||
if key, err := CertBundleToKey(leafCert, intCerts[id]); err == nil {
|
||||
keys[key.ID()] = key
|
||||
}
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// CertBundleToKey creates a TUF key from a leaf certs and a list of
|
||||
// intermediates
|
||||
func CertBundleToKey(leafCert *x509.Certificate, intCerts []*x509.Certificate) (data.PublicKey, error) {
|
||||
certBundle := []*x509.Certificate{leafCert}
|
||||
certBundle = append(certBundle, intCerts...)
|
||||
certChainPEM, err := CertChainToPEM(certBundle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var newKey data.PublicKey
|
||||
// Use the leaf cert's public key algorithm for typing
|
||||
switch leafCert.PublicKeyAlgorithm {
|
||||
case x509.RSA:
|
||||
newKey = data.NewRSAx509PublicKey(certChainPEM)
|
||||
case x509.ECDSA:
|
||||
newKey = data.NewECDSAx509PublicKey(certChainPEM)
|
||||
default:
|
||||
logrus.Debugf("Unknown key type parsed from certificate: %v", leafCert.PublicKeyAlgorithm)
|
||||
return nil, x509.ErrUnsupportedAlgorithm
|
||||
}
|
||||
return newKey, nil
|
||||
}
|
||||
|
||||
// NewCertificate returns an X509 Certificate following a template, given a Common Name and validity interval.
|
||||
func NewCertificate(commonName string, startTime, endTime time.Time) (*x509.Certificate, error) {
|
||||
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
|
||||
|
||||
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate new certificate: %v", err)
|
||||
}
|
||||
|
||||
return &x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
Subject: pkix.Name{
|
||||
CommonName: commonName,
|
||||
},
|
||||
NotBefore: startTime,
|
||||
NotAfter: endTime,
|
||||
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning},
|
||||
BasicConstraintsValid: true,
|
||||
}, nil
|
||||
}
|
126
vendor/github.com/theupdateframework/notary/tuf/validation/errors.go
generated
vendored
Normal file
126
vendor/github.com/theupdateframework/notary/tuf/validation/errors.go
generated
vendored
Normal file
@ -0,0 +1,126 @@
|
||||
package validation
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// VALIDATION ERRORS
|
||||
|
||||
// ErrValidation represents a general validation error
|
||||
type ErrValidation struct {
|
||||
Msg string
|
||||
}
|
||||
|
||||
func (err ErrValidation) Error() string {
|
||||
return fmt.Sprintf("An error occurred during validation: %s", err.Msg)
|
||||
}
|
||||
|
||||
// ErrBadHierarchy represents missing metadata. Currently: a missing snapshot
|
||||
// at this current time. When delegations are implemented it will also
|
||||
// represent a missing delegation parent
|
||||
type ErrBadHierarchy struct {
|
||||
Missing string
|
||||
Msg string
|
||||
}
|
||||
|
||||
func (err ErrBadHierarchy) Error() string {
|
||||
return fmt.Sprintf("Metadata hierarchy is incomplete: %s", err.Msg)
|
||||
}
|
||||
|
||||
// ErrBadRoot represents a failure validating the root
|
||||
type ErrBadRoot struct {
|
||||
Msg string
|
||||
}
|
||||
|
||||
func (err ErrBadRoot) Error() string {
|
||||
return fmt.Sprintf("The root metadata is invalid: %s", err.Msg)
|
||||
}
|
||||
|
||||
// ErrBadTargets represents a failure to validate a targets (incl delegations)
|
||||
type ErrBadTargets struct {
|
||||
Msg string
|
||||
}
|
||||
|
||||
func (err ErrBadTargets) Error() string {
|
||||
return fmt.Sprintf("The targets metadata is invalid: %s", err.Msg)
|
||||
}
|
||||
|
||||
// ErrBadSnapshot represents a failure to validate the snapshot
|
||||
type ErrBadSnapshot struct {
|
||||
Msg string
|
||||
}
|
||||
|
||||
func (err ErrBadSnapshot) Error() string {
|
||||
return fmt.Sprintf("The snapshot metadata is invalid: %s", err.Msg)
|
||||
}
|
||||
|
||||
// END VALIDATION ERRORS
|
||||
|
||||
// SerializableError is a struct that can be used to serialize an error as JSON
|
||||
type SerializableError struct {
|
||||
Name string
|
||||
Error error
|
||||
}
|
||||
|
||||
// UnmarshalJSON attempts to unmarshal the error into the right type
|
||||
func (s *SerializableError) UnmarshalJSON(text []byte) (err error) {
|
||||
var x struct{ Name string }
|
||||
err = json.Unmarshal(text, &x)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var theError error
|
||||
switch x.Name {
|
||||
case "ErrValidation":
|
||||
var e struct{ Error ErrValidation }
|
||||
err = json.Unmarshal(text, &e)
|
||||
theError = e.Error
|
||||
case "ErrBadHierarchy":
|
||||
var e struct{ Error ErrBadHierarchy }
|
||||
err = json.Unmarshal(text, &e)
|
||||
theError = e.Error
|
||||
case "ErrBadRoot":
|
||||
var e struct{ Error ErrBadRoot }
|
||||
err = json.Unmarshal(text, &e)
|
||||
theError = e.Error
|
||||
case "ErrBadTargets":
|
||||
var e struct{ Error ErrBadTargets }
|
||||
err = json.Unmarshal(text, &e)
|
||||
theError = e.Error
|
||||
case "ErrBadSnapshot":
|
||||
var e struct{ Error ErrBadSnapshot }
|
||||
err = json.Unmarshal(text, &e)
|
||||
theError = e.Error
|
||||
default:
|
||||
err = fmt.Errorf("do not know how to unmarshal %s", x.Name)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
s.Name = x.Name
|
||||
s.Error = theError
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewSerializableError serializes one of the above errors into JSON
|
||||
func NewSerializableError(err error) (*SerializableError, error) {
|
||||
// make sure it's one of our errors
|
||||
var name string
|
||||
switch err.(type) {
|
||||
case ErrValidation:
|
||||
name = "ErrValidation"
|
||||
case ErrBadHierarchy:
|
||||
name = "ErrBadHierarchy"
|
||||
case ErrBadRoot:
|
||||
name = "ErrBadRoot"
|
||||
case ErrBadTargets:
|
||||
name = "ErrBadTargets"
|
||||
case ErrBadSnapshot:
|
||||
name = "ErrBadSnapshot"
|
||||
default:
|
||||
return nil, fmt.Errorf("does not support serializing non-validation errors")
|
||||
}
|
||||
return &SerializableError{Name: name, Error: err}, nil
|
||||
}
|
Reference in New Issue
Block a user