Compare commits
244 Commits
v18.04.0-c
...
v18.03.1-c
| Author | SHA1 | Date | |
|---|---|---|---|
| 9ee9f402cd | |||
| c32349b9a9 | |||
| 6d0d01b238 | |||
| fda40de70c | |||
| fdb8850492 | |||
| 56e55727a8 | |||
| fcdc984cfd | |||
| 8921f8bec1 | |||
| 6f1fe53965 | |||
| af03317180 | |||
| 76cbc5d636 | |||
| 14b513f77d | |||
| a78f916dfb | |||
| be3f367578 | |||
| db5f5c037d | |||
| 905f71561a | |||
| 60dc869e44 | |||
| ce2f874741 | |||
| e79a05793b | |||
| 4fd6bf8c4a | |||
| 3d5ce4f418 | |||
| edee73b51b | |||
| 02e93e419f | |||
| 68acf3c023 | |||
| b522d934b9 | |||
| 029cfa0956 | |||
| da69d9e253 | |||
| 79e8ae0e9d | |||
| 39f7e7efd0 | |||
| 766c24e554 | |||
| 4e72a2fd51 | |||
| 24306165c5 | |||
| 27540de1ad | |||
| 1df656a2f0 | |||
| bb68afd0fc | |||
| 35fc88bcd9 | |||
| b45862f481 | |||
| 72f31848f8 | |||
| 0e66a94980 | |||
| 85143bcb4d | |||
| a4b56ba187 | |||
| 5a884e1a0d | |||
| 0fae0d5a40 | |||
| 608b278d01 | |||
| faccf7d3a7 | |||
| 04b5ed6c10 | |||
| 7974841233 | |||
| f70c483990 | |||
| 49acaa1f03 | |||
| 09fa7b76b7 | |||
| b11c120bdf | |||
| f160c829fb | |||
| fa03812525 | |||
| 997850a2f2 | |||
| cb93f50ae0 | |||
| 174c840b96 | |||
| e0b31bfb7b | |||
| 176c10200c | |||
| 16e1ab0f89 | |||
| 1776b7beed | |||
| 362aa8f58a | |||
| 2c0117e268 | |||
| a29f2e130f | |||
| 34e1feb36b | |||
| 23752adb2d | |||
| 69f0e0e720 | |||
| 33df5137f7 | |||
| 612f972789 | |||
| f03b3e54d7 | |||
| dc814f391e | |||
| cb1f6242ed | |||
| 03ad49ebec | |||
| 2ad8904abb | |||
| a415a11279 | |||
| c4adb3c164 | |||
| 3ca5c31d84 | |||
| a978050d4f | |||
| 3c896e300b | |||
| 46ad9721ef | |||
| c9c8799088 | |||
| ec3221ac7d | |||
| d602967356 | |||
| aded7a8259 | |||
| a3540962fd | |||
| c25858e94c | |||
| ae56939688 | |||
| df6211ff9e | |||
| e945f93024 | |||
| 04ebff9f98 | |||
| b3929fad23 | |||
| 6fbd783f33 | |||
| 618f290b37 | |||
| 1f529b2bd2 | |||
| e695ffea63 | |||
| d4444e3446 | |||
| 53a10ae2bc | |||
| 0fd259afcb | |||
| 8b54bcc057 | |||
| e4022bf934 | |||
| bae8ccdbd7 | |||
| b2ff741b5f | |||
| 89f42b5a6e | |||
| 811b93ac9b | |||
| fd75fd2b45 | |||
| 7aa342f808 | |||
| 15237fd242 | |||
| 1331808a09 | |||
| dc75023a9a | |||
| 9fb2695fa5 | |||
| 473116d3a2 | |||
| f10cbe710b | |||
| aff214cf5c | |||
| 4cff440f16 | |||
| 5770d86dae | |||
| e921cf31fc | |||
| a0c8cc80ab | |||
| 7ee3cf582d | |||
| 8e67119f1f | |||
| 59875dbe8e | |||
| 0190af907b | |||
| 711807560d | |||
| d6931c2d12 | |||
| 92f0dce1d1 | |||
| 1556d2d2f9 | |||
| d8b4bd5c6f | |||
| 603c7932bb | |||
| 7bfd67bece | |||
| 010ba8f1dd | |||
| 7dd4bb6171 | |||
| 566c20094c | |||
| bc28daf367 | |||
| a20423b7f7 | |||
| 8723ba6cc0 | |||
| c270672ced | |||
| 0d45d8f964 | |||
| 311944ec04 | |||
| ad89cb6a38 | |||
| aba0bb77e1 | |||
| a20c69243b | |||
| bcb040440f | |||
| 2cc21bf56f | |||
| a218857a9a | |||
| a723f7351d | |||
| df9ed934e3 | |||
| f7124ab5e4 | |||
| 9550f71467 | |||
| 1f1816b098 | |||
| 7c2863a6e0 | |||
| fb9700fdbd | |||
| 70083e56be | |||
| 659ea5343c | |||
| 49dc9b064e | |||
| 9f1c3a6814 | |||
| 83bc7fbbe0 | |||
| fbe9d5d378 | |||
| c925888822 | |||
| 2032af137d | |||
| 7b5c3637ab | |||
| abd6f38345 | |||
| 212445876d | |||
| 9134da61fe | |||
| 45c8a7d911 | |||
| 64fe575962 | |||
| 4b4614c8b7 | |||
| 0eadfaeecd | |||
| 7364fc672e | |||
| c05a7395f2 | |||
| 0520e24302 | |||
| f178926203 | |||
| e4b87d5a7d | |||
| 95930e8794 | |||
| d8bfd4004a | |||
| 912261ed44 | |||
| fbedb97a27 | |||
| 1adc2983f8 | |||
| 6bca1f316f | |||
| 78455c2b2f | |||
| 3b7099798e | |||
| ef0da452ea | |||
| f91125ff08 | |||
| 1dd3bdc5e9 | |||
| a3fc95aed5 | |||
| 7d9137fefc | |||
| 70cb53f0ba | |||
| 9cc70ae1b0 | |||
| 30726dd76a | |||
| 0825e477d8 | |||
| 735514a077 | |||
| 093b46e361 | |||
| 518a7181ad | |||
| 48712f36a6 | |||
| 3d69121433 | |||
| 2d81349010 | |||
| 7946f15b56 | |||
| c64a65bccb | |||
| e22655d04a | |||
| fd1a7dfd47 | |||
| e7309590a2 | |||
| 49e42a6151 | |||
| 89ec01afcb | |||
| 6fa0c6462e | |||
| 2329a946f6 | |||
| 23a9017037 | |||
| fb4173d8a8 | |||
| 3638dc65e4 | |||
| cbc5bef54f | |||
| 88176d01f4 | |||
| 3e53917a28 | |||
| 5613f516dd | |||
| 7bc0502750 | |||
| 91bb2aeb67 | |||
| 5ba2b1a74d | |||
| dbe2a19e83 | |||
| 2d690d4e87 | |||
| 3ab4d93c66 | |||
| cb1018ea72 | |||
| 3310baba0f | |||
| 9dbc108a14 | |||
| 977f2704b3 | |||
| 1b39f8bd26 | |||
| 652953a81f | |||
| 7000ca4203 | |||
| 5bc239fe16 | |||
| aca674de82 | |||
| c709b18bfd | |||
| 6f6e5c5f2c | |||
| 50c9a31b4c | |||
| f8e0c47b29 | |||
| 767a8f6227 | |||
| 36343864e2 | |||
| 7d395933ee | |||
| c70b6c9f35 | |||
| dd1b760bad | |||
| de4362e128 | |||
| eda1e25f5c | |||
| c160c73353 | |||
| 5c06a61da4 | |||
| 9d7d57c20f | |||
| 138ca8c7ad | |||
| 9dd6df6ee4 | |||
| 9d4514861f | |||
| d5f8753b88 | |||
| a720337d2e | |||
| 5ff63c0239 |
220
CHANGELOG.md
220
CHANGELOG.md
@ -1,85 +1,155 @@
|
||||
# Changelog
|
||||
# Changelog
|
||||
For more information on the list of deprecated flags and APIs please have a look at
|
||||
https://docs.docker.com/engine/deprecated/ where you can find the target removal dates
|
||||
|
||||
For more information on the list of deprecated flags and APIs, have a look at
|
||||
https://docs.docker.com/engine/deprecated/ where you can find the target removal dates
|
||||
|
||||
## 18.04.0-ce (2018-04-10)
|
||||
|
||||
### Builder
|
||||
|
||||
- Fix typos in builder and client. [moby/moby#36424](https://github.com/moby/moby/pull/36424)
|
||||
## 18.03.1-ce (2018-04-26)
|
||||
|
||||
### Client
|
||||
|
||||
* Print Stack API and Kubernetes versions in version command. [docker/cli#898](https://github.com/docker/cli/pull/898)
|
||||
- Fix Kubernetes duplication in version command. [docker/cli#953](https://github.com/docker/cli/pull/953)
|
||||
* Use HasAvailableFlags instead of HasFlags for Options in help. [docker/cli#959](https://github.com/docker/cli/pull/959)
|
||||
+ Add support for mandatory variables to stack deploy. [docker/cli#893](https://github.com/docker/cli/pull/893)
|
||||
- Fix docker stack services command Port output. [docker/cli#943](https://github.com/docker/cli/pull/943)
|
||||
* Deprecate unencrypted storage. [docker/cli#561](https://github.com/docker/cli/pull/561)
|
||||
* Don't set a default filename for ConfigFile. [docker/cli#917](https://github.com/docker/cli/pull/917)
|
||||
- Fix compose network name. [docker/cli#941](https://github.com/docker/cli/pull/941)
|
||||
|
||||
### Logging
|
||||
|
||||
* Silent login: use credentials from cred store to login. [docker/cli#139](https://github.com/docker/cli/pull/139)
|
||||
+ Add support for compressibility of log file. [moby/moby#29932](https://github.com/moby/moby/pull/29932)
|
||||
- Fix empty LogPath with non-blocking logging mode. [moby/moby#36272](https://github.com/moby/moby/pull/36272)
|
||||
|
||||
### Networking
|
||||
|
||||
- Prevent explicit removal of ingress network. [moby/moby#36538](https://github.com/moby/moby/pull/36538)
|
||||
- Fix error with merge compose file with networks [docker/cli#983](https://github.com/docker/cli/pull/983)
|
||||
* Fix docker stack deploy re-deploying services after the service was updated with `--force` [docker/cli#963](https://github.com/docker/cli/pull/963)
|
||||
* Fix docker version output alignment [docker/cli#965](https://github.com/docker/cli/pull/965)
|
||||
|
||||
### Runtime
|
||||
|
||||
* Devmapper cleanup improvements. [moby/moby#36307](https://github.com/moby/moby/pull/36307)
|
||||
* Devmapper.Mounted: remove. [moby/moby#36437](https://github.com/moby/moby/pull/36437)
|
||||
* Devmapper/Remove(): use Rmdir, ignore errors. [moby/moby#36438](https://github.com/moby/moby/pull/36438)
|
||||
* LCOW - Change platform parser directive to FROM statement flag. [moby/moby#35089](https://github.com/moby/moby/pull/35089)
|
||||
* Split daemon service code to windows file. [moby/moby#36653](https://github.com/moby/moby/pull/36653)
|
||||
* Windows: Block pulling uplevel images. [moby/moby#36327](https://github.com/moby/moby/pull/36327)
|
||||
* Windows: Hyper-V containers are broken after 36586 was merged. [moby/moby#36610](https://github.com/moby/moby/pull/36610)
|
||||
* Windows: Move kernel_windows to use golang registry functions. [moby/moby#36617](https://github.com/moby/moby/pull/36617)
|
||||
* Windows: Pass back system errors on container exit. [moby/moby#35967](https://github.com/moby/moby/pull/35967)
|
||||
* Windows: Remove servicing mode. [moby/moby#36267](https://github.com/moby/moby/pull/36267)
|
||||
* Windows: Report Version and UBR. [moby/moby#36451](https://github.com/moby/moby/pull/36451)
|
||||
* Bump Runc to 1.0.0-rc5. [moby/moby#36449](https://github.com/moby/moby/pull/36449)
|
||||
* Mount failure indicates the path that failed. [moby/moby#36407](https://github.com/moby/moby/pull/36407)
|
||||
* Change return for errdefs.getImplementer(). [moby/moby#36489](https://github.com/moby/moby/pull/36489)
|
||||
* Client: fix hijackedconn reading from buffer. [moby/moby#36663](https://github.com/moby/moby/pull/36663)
|
||||
* Content encoding negotiation added to archive request. [moby/moby#36164](https://github.com/moby/moby/pull/36164)
|
||||
* Daemon/stats: more resilient cpu sampling. [moby/moby#36519](https://github.com/moby/moby/pull/36519)
|
||||
* Daemon/stats: remove obnoxious types file. [moby/moby#36494](https://github.com/moby/moby/pull/36494)
|
||||
* Daemon: use context error rather than inventing new one. [moby/moby#36670](https://github.com/moby/moby/pull/36670)
|
||||
* Enable CRIU on non-amd64 architectures (v2). [moby/moby#36676](https://github.com/moby/moby/pull/36676)
|
||||
- Fixes intermittent client hang after closing stdin to attached container [moby/moby#36517](https://github.com/moby/moby/pull/36517)
|
||||
- Fix daemon panic on container export after restart [moby/moby#36586](https://github.com/moby/moby/pull/36586)
|
||||
- Follow-up fixes on multi-stage moby's Dockerfile. [moby/moby#36425](https://github.com/moby/moby/pull/36425)
|
||||
* Freeze busybox and latest glibc in Docker image. [moby/moby#36375](https://github.com/moby/moby/pull/36375)
|
||||
* If container will run as non root user, drop permitted, effective caps early. [moby/moby#36587](https://github.com/moby/moby/pull/36587)
|
||||
* Layer: remove metadata store interface. [moby/moby#36504](https://github.com/moby/moby/pull/36504)
|
||||
* Minor optimizations to dockerd. [moby/moby#36577](https://github.com/moby/moby/pull/36577)
|
||||
* Whitelist statx syscall. [moby/moby#36417](https://github.com/moby/moby/pull/36417)
|
||||
+ Add missing error return for plugin creation. [moby/moby#36646](https://github.com/moby/moby/pull/36646)
|
||||
- Fix AppArmor not being applied to Exec processes. [moby/moby#36466](https://github.com/moby/moby/pull/36466)
|
||||
* Daemon/logger/ring.go: log error not instance. [moby/moby#36475](https://github.com/moby/moby/pull/36475)
|
||||
- Fix stats collector spinning CPU if no stats are collected. [moby/moby#36609](https://github.com/moby/moby/pull/36609)
|
||||
- Fix(distribution): digest cache should not be moved if it was an auth. [moby/moby#36509](https://github.com/moby/moby/pull/36509)
|
||||
- Make sure plugin container is removed on failure. [moby/moby#36715](https://github.com/moby/moby/pull/36715)
|
||||
* Bump to containerd 1.0.3. [moby/moby#36749](https://github.com/moby/moby/pull/36749)
|
||||
* Don't sort plugin mount slice. [moby/moby#36711](https://github.com/moby/moby/pull/36711)
|
||||
- Fix AppArmor profiles not being applied to `docker exec` processes [moby/moby#36466](https://github.com/moby/moby/pull/36466)
|
||||
- Don't sort plugin mount slice [moby/moby#36711](https://github.com/moby/moby/pull/36711)
|
||||
- Daemon/cluster: handle partial attachment entries during configure [moby/moby#36769](https://github.com/moby/moby/pull/36769)
|
||||
* Bump Golang to 1.9.5 [moby/moby#36779](https://github.com/moby/moby/pull/36779) [docker/cli#986](https://github.com/docker/cli/pull/986)
|
||||
- Daemon/stats: more resilient cpu sampling [moby/moby#36519](https://github.com/moby/moby/pull/36519)
|
||||
* Containerd: update to 1.0.3 release [moby/moby#36749](https://github.com/moby/moby/pull/36749)
|
||||
- Fix Windows layer leak when write fails [moby/moby#36728](https://github.com/moby/moby/pull/36728)
|
||||
* Don't make container mount unbindable [moby/moby#36768](https://github.com/moby/moby/pull/36768)
|
||||
- Fix Daemon panics on container export after a daemon restart [moby/moby/36586](https://github.com/moby/moby/pull/36586)
|
||||
- Fix digest cache being removed on autherrors [moby/moby#36509](https://github.com/moby/moby/pull/36509)
|
||||
- Make sure plugin container is removed on failure [moby/moby#36715](https://github.com/moby/moby/pull/36715)
|
||||
- Copy: avoid using all system memory with authz plugins [moby/moby#36595](https://github.com/moby/moby/pull/36595)
|
||||
- Relax some libcontainerd client locking [moby/moby#36848](https://github.com/moby/moby/pull/36848)
|
||||
|
||||
### Swarm Mode
|
||||
|
||||
* Fixes for synchronizing the dispatcher shutdown with in-progress rpcs. [moby/moby#36371](https://github.com/moby/moby/pull/36371)
|
||||
* Increase raft ElectionTick to 10xHeartbeatTick. [moby/moby#36672](https://github.com/moby/moby/pull/36672)
|
||||
* Make Swarm manager Raft quorum parameters configurable in daemon config. [moby/moby#36726](https://github.com/moby/moby/pull/36726)
|
||||
* Ingress network should not be attachable. [docker/swarmkit#2523](https://github.com/docker/swarmkit/pull/2523)
|
||||
* [manager/state] Add fernet as an option for raft encryption. [docker/swarmkit#2535](https://github.com/docker/swarmkit/pull/2535)
|
||||
* Log GRPC server errors. [docker/swarmkit#2541](https://github.com/docker/swarmkit/pull/2541)
|
||||
* Log leadership changes at the manager level. [docker/swarmkit#2542](https://github.com/docker/swarmkit/pull/2542)
|
||||
* Remove the containerd executor. [docker/swarmkit#2568](https://github.com/docker/swarmkit/pull/2568)
|
||||
* Agent: backoff session when no remotes are available. [docker/swarmkit#2570](https://github.com/docker/swarmkit/pull/2570)
|
||||
* [ca/manager] Remove root CA key encryption support entirely. [docker/swarmkit#2573](https://github.com/docker/swarmkit/pull/2573)
|
||||
- Fix agent logging race. [docker/swarmkit#2578](https://github.com/docker/swarmkit/pull/2578)
|
||||
* Adding logic to restore networks in order. [docker/swarmkit#2571](https://github.com/docker/swarmkit/pull/2571)
|
||||
* Increase raft Election tick to 10 times Heartbeat tick [moby/moby#36672](https://github.com/moby/moby/pull/36672)
|
||||
|
||||
### Networking
|
||||
|
||||
* Gracefully remove LB endpoints from services [docker/libnetwork#2112](https://github.com/docker/libnetwork/pull/2112)
|
||||
* Retry other external DNS servers on ServFail [docker/libnetwork#2121](https://github.com/docker/libnetwork/pull/2121)
|
||||
* Improve scalabiltiy of bridge network isolation rules [docker/libnetwork#2117](https://github.com/docker/libnetwork/pull/2117)
|
||||
* Allow for larger preset property values, do not override [docker/libnetwork#2124](https://github.com/docker/libnetwork/pull/2124)
|
||||
* Prevent panics on concurrent reads/writes when calling `changeNodeState` [docker/libnetwork#2136](https://github.com/docker/libnetwork/pull/2136)
|
||||
|
||||
## 18.03.0-ce (2018-03-21)
|
||||
|
||||
### Builder
|
||||
|
||||
* Switch to -buildmode=pie [moby/moby#34369](https://github.com/moby/moby/pull/34369)
|
||||
* Allow Dockerfile to be outside of build-context [docker/cli#886](https://github.com/docker/cli/pull/886)
|
||||
* Builder: fix wrong cache hits building from tars [moby/moby#36329](https://github.com/moby/moby/pull/36329)
|
||||
- Fixes files leaking to other images in a multi-stage build [moby/moby#36338](https://github.com/moby/moby/pull/36338)
|
||||
|
||||
### Client
|
||||
|
||||
* Simplify the marshaling of compose types.Config [docker/cli#895](https://github.com/docker/cli/pull/895)
|
||||
+ Add support for multiple composefile when deploying [docker/cli#569](https://github.com/docker/cli/pull/569)
|
||||
- Fix broken Kubernetes stack flags [docker/cli#831](https://github.com/docker/cli/pull/831)
|
||||
- Fix stack marshaling for Kubernetes [docker/cli#890](https://github.com/docker/cli/pull/890)
|
||||
- Fix and simplify bash completion for service env, mounts and labels [docker/cli#682](https://github.com/docker/cli/pull/682)
|
||||
- Fix `before` and `since` filter for `docker ps` [moby/moby#35938](https://github.com/moby/moby/pull/35938)
|
||||
- Fix `--label-file` weird behavior [docker/cli#838](https://github.com/docker/cli/pull/838)
|
||||
- Fix compilation of defaultCredentialStore() on unsupported platforms [docker/cli#872](https://github.com/docker/cli/pull/872)
|
||||
* Improve and fix bash completion for images [docker/cli#717](https://github.com/docker/cli/pull/717)
|
||||
+ Added check for empty source in bind mount [docker/cli#824](https://github.com/docker/cli/pull/824)
|
||||
- Fix TLS from environment variables in client [moby/moby#36270](https://github.com/moby/moby/pull/36270)
|
||||
* docker build now runs faster when registry-specific credential helper(s) are configured [docker/cli#840](https://github.com/docker/cli/pull/840)
|
||||
* Update event filter zsh completion with `disable`, `enable`, `install` and `remove` [docker/cli#372](https://github.com/docker/cli/pull/372)
|
||||
* Produce errors when empty ids are passed into inspect calls [moby/moby#36144](https://github.com/moby/moby/pull/36144)
|
||||
* Marshall version for the k8s controller [docker/cli#891](https://github.com/docker/cli/pull/891)
|
||||
* Set a non-zero timeout for HTTP client communication with plugin backend [docker/cli#883](https://github.com/docker/cli/pull/883)
|
||||
+ Add DOCKER_TLS environment variable for --tls option [docker/cli#863](https://github.com/docker/cli/pull/863)
|
||||
+ Add --template-driver option for secrets/configs [docker/cli#896](https://github.com/docker/cli/pull/896)
|
||||
+ Move `docker trust` commands out of experimental [docker/cli#934](https://github.com/docker/cli/pull/934) [docker/cli#935](https://github.com/docker/cli/pull/935) [docker/cli#944](https://github.com/docker/cli/pull/944)
|
||||
|
||||
### Logging
|
||||
|
||||
* AWS logs - don't add new lines to maximum sized events [moby/moby#36078](https://github.com/moby/moby/pull/36078)
|
||||
* Move log validator logic after plugins are loaded [moby/moby#36306](https://github.com/moby/moby/pull/36306)
|
||||
* Support a proxy in Splunk log driver [moby/moby#36220](https://github.com/moby/moby/pull/36220)
|
||||
- Fix log tail with empty logs [moby/moby#36305](https://github.com/moby/moby/pull/36305)
|
||||
|
||||
### Networking
|
||||
|
||||
* Libnetwork revendoring [moby/moby#36137](https://github.com/moby/moby/pull/36137)
|
||||
- Fix for deadlock on exit with Memberlist revendor [docker/libnetwork#2040](https://github.com/docker/libnetwork/pull/2040)
|
||||
* Fix user specified ndots option [docker/libnetwork#2065](https://github.com/docker/libnetwork/pull/2065)
|
||||
- Fix to use ContainerID for Windows instead of SandboxID [docker/libnetwork#2010](https://github.com/docker/libnetwork/pull/2010)
|
||||
* Verify NetworkingConfig to make sure EndpointSettings is not nil [moby/moby#36077](https://github.com/moby/moby/pull/36077)
|
||||
- Fix `DockerNetworkInternalMode` issue [moby/moby#36298](https://github.com/moby/moby/pull/36298)
|
||||
- Fix race in attachable network attachment [moby/moby#36191](https://github.com/moby/moby/pull/36191)
|
||||
- Fix timeout issue of `InspectNetwork` on AArch64 [moby/moby#36257](https://github.com/moby/moby/pull/36257)
|
||||
* Verbose info is missing for partial overlay ID [moby/moby#35989](https://github.com/moby/moby/pull/35989)
|
||||
* Update `FindNetwork` to address network name duplications [moby/moby#30897](https://github.com/moby/moby/pull/30897)
|
||||
* Disallow attaching ingress network [docker/swarmkit#2523](https://github.com/docker/swarmkit/pull/2523)
|
||||
- Prevent implicit removal of the ingress network [moby/moby#36538](https://github.com/moby/moby/pull/36538)
|
||||
- Fix stale HNS endpoints on Windows [moby/moby#36603](https://github.com/moby/moby/pull/36603)
|
||||
- IPAM fixes for duplicate IP addresses [docker/libnetwork#2104](https://github.com/docker/libnetwork/pull/2104) [docker/libnetwork#2105](https://github.com/docker/libnetwork/pull/2105)
|
||||
|
||||
### Runtime
|
||||
|
||||
* Enable HotAdd for Windows [moby/moby#35414](https://github.com/moby/moby/pull/35414)
|
||||
* LCOW: Graphdriver fix deadlock in hotRemoveVHDs [moby/moby#36114](https://github.com/moby/moby/pull/36114)
|
||||
* LCOW: Regular mount if only one layer [moby/moby#36052](https://github.com/moby/moby/pull/36052)
|
||||
* Remove interim env var LCOW_API_PLATFORM_IF_OMITTED [moby/moby#36269](https://github.com/moby/moby/pull/36269)
|
||||
* Revendor Microsoft/opengcs @ v0.3.6 [moby/moby#36108](https://github.com/moby/moby/pull/36108)
|
||||
- Fix issue of ExitCode and PID not show up in Task.Status.ContainerStatus [moby/moby#36150](https://github.com/moby/moby/pull/36150)
|
||||
- Fix issue with plugin scanner going too deep [moby/moby#36119](https://github.com/moby/moby/pull/36119)
|
||||
* Do not make graphdriver homes private mounts [moby/moby#36047](https://github.com/moby/moby/pull/36047)
|
||||
* Do not recursive unmount on cleanup of zfs/btrfs [moby/moby#36237](https://github.com/moby/moby/pull/36237)
|
||||
* Don't restore image if layer does not exist [moby/moby#36304](https://github.com/moby/moby/pull/36304)
|
||||
* Adjust minimum API version for templated configs/secrets [moby/moby#36366](https://github.com/moby/moby/pull/36366)
|
||||
* Bump containerd to 1.0.2 (cfd04396dc68220d1cecbe686a6cc3aa5ce3667c) [moby/moby#36308](https://github.com/moby/moby/pull/36308)
|
||||
* Bump Golang to 1.9.4 [moby/moby#36243](https://github.com/moby/moby/pull/36243)
|
||||
* Ensure daemon root is unmounted on shutdown [moby/moby#36107](https://github.com/moby/moby/pull/36107)
|
||||
- Fix container cleanup on daemon restart [moby/moby#36249](https://github.com/moby/moby/pull/36249)
|
||||
* Support SCTP port mapping (bump up API to v1.37) [moby/moby#33922](https://github.com/moby/moby/pull/33922)
|
||||
* Support SCTP port mapping [docker/cli#278](https://github.com/docker/cli/pull/278)
|
||||
- Fix Volumes property definition in ContainerConfig [moby/moby#35946](https://github.com/moby/moby/pull/35946)
|
||||
* Bump moby and dependencies [docker/cli#829](https://github.com/docker/cli/pull/829)
|
||||
* C.RWLayer: check for nil before use [moby/moby#36242](https://github.com/moby/moby/pull/36242)
|
||||
+ Add `REMOVE` and `ORPHANED` to TaskState [moby/moby#36146](https://github.com/moby/moby/pull/36146)
|
||||
- Fixed error detection using `IsErrNotFound` and `IsErrNotImplemented` for `ContainerStatPath`, `CopyFromContainer`, and `CopyToContainer` methods [moby/moby#35979](https://github.com/moby/moby/pull/35979)
|
||||
+ Add an integration/internal/container helper package [moby/moby#36266](https://github.com/moby/moby/pull/36266)
|
||||
+ Add canonical import path [moby/moby#36194](https://github.com/moby/moby/pull/36194)
|
||||
+ Add/use container.Exec() to integration [moby/moby#36326](https://github.com/moby/moby/pull/36326)
|
||||
- Fix "--node-generic-resource" singular/plural [moby/moby#36125](https://github.com/moby/moby/pull/36125)
|
||||
* Daemon.cleanupContainer: nullify container RWLayer upon release [moby/moby#36160](https://github.com/moby/moby/pull/36160)
|
||||
* Daemon: passdown the `--oom-kill-disable` option to containerd [moby/moby#36201](https://github.com/moby/moby/pull/36201)
|
||||
* Display a warn message when there is binding ports and net mode is host [moby/moby#35510](https://github.com/moby/moby/pull/35510)
|
||||
* Refresh containerd remotes on containerd restarted [moby/moby#36173](https://github.com/moby/moby/pull/36173)
|
||||
* Set daemon root to use shared propagation [moby/moby#36096](https://github.com/moby/moby/pull/36096)
|
||||
* Optimizations for recursive unmount [moby/moby#34379](https://github.com/moby/moby/pull/34379)
|
||||
* Perform plugin mounts in the runtime [moby/moby#35829](https://github.com/moby/moby/pull/35829)
|
||||
* Graphdriver: Fix RefCounter memory leak [moby/moby#36256](https://github.com/moby/moby/pull/36256)
|
||||
* Use continuity fs package for volume copy [moby/moby#36290](https://github.com/moby/moby/pull/36290)
|
||||
* Use proc/exe for reexec [moby/moby#36124](https://github.com/moby/moby/pull/36124)
|
||||
+ Add API support for templated secrets and configs [moby/moby#33702](https://github.com/moby/moby/pull/33702) and [moby/moby#36366](https://github.com/moby/moby/pull/36366)
|
||||
* Use rslave propagation for mounts from daemon root [moby/moby#36055](https://github.com/moby/moby/pull/36055)
|
||||
+ Add /proc/keys to masked paths [moby/moby#36368](https://github.com/moby/moby/pull/36368)
|
||||
* Bump Runc to 1.0.0-rc5 [moby/moby#36449](https://github.com/moby/moby/pull/36449)
|
||||
- Fixes `runc exec` on big-endian architectures [moby/moby#36449](https://github.com/moby/moby/pull/36449)
|
||||
* Use chroot when mount namespaces aren't provided [moby/moby#36449](https://github.com/moby/moby/pull/36449)
|
||||
- Fix systemd slice expansion so that it could be consumed by cAdvisor [moby/moby#36449](https://github.com/moby/moby/pull/36449)
|
||||
- Fix devices mounted with wrong uid/gid [moby/moby#36449](https://github.com/moby/moby/pull/36449)
|
||||
- Fix read-only containers with IPC private mounts `/dev/shm` read-only [moby/moby#36526](https://github.com/moby/moby/pull/36526)
|
||||
|
||||
### Swarm Mode
|
||||
|
||||
* Replace EC Private Key with PKCS#8 PEMs [docker/swarmkit#2246](https://github.com/docker/swarmkit/pull/2246)
|
||||
* Fix IP overlap with empty EndpointSpec [docker/swarmkit #2505](https://github.com/docker/swarmkit/pull/2505)
|
||||
* Add support for Support SCTP port mapping [docker/swarmkit#2298](https://github.com/docker/swarmkit/pull/2298)
|
||||
* Do not reschedule tasks if only placement constraints change and are satisfied by the assigned node [docker/swarmkit#2496](https://github.com/docker/swarmkit/pull/2496)
|
||||
* Ensure task reaper stopChan is closed no more than once [docker/swarmkit #2491](https://github.com/docker/swarmkit/pull/2491)
|
||||
* Synchronization fixes [docker/swarmkit#2495](https://github.com/docker/swarmkit/pull/2495)
|
||||
* Add log message to indicate message send retry if streaming unimplemented [docker/swarmkit#2483](https://github.com/docker/swarmkit/pull/2483)
|
||||
* Debug logs for session, node events on dispatcher, heartbeats [docker/swarmkit#2486](https://github.com/docker/swarmkit/pull/2486)
|
||||
+ Add swarm types to bash completion event type filter [docker/cli#888](https://github.com/docker/cli/pull/888)
|
||||
- Fix issue where network inspect does not show Created time for networks in swarm scope [moby/moby#36095](https://github.com/moby/moby/pull/36095)
|
||||
|
||||
@ -1 +1 @@
|
||||
18.04.0-ce
|
||||
18.03.1-ce
|
||||
|
||||
@ -128,7 +128,7 @@ Examples:
|
||||
{{ .Example }}
|
||||
|
||||
{{- end}}
|
||||
{{- if .HasAvailableFlags}}
|
||||
{{- if .HasFlags}}
|
||||
|
||||
Options:
|
||||
{{ wrappedFlagUsages . | trimRightSpace}}
|
||||
|
||||
55
components/cli/cli/command/image/build_linux_test.go
Normal file
55
components/cli/cli/command/image/build_linux_test.go
Normal file
@ -0,0 +1,55 @@
|
||||
//+build linux
|
||||
|
||||
package image
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
is "github.com/gotestyourself/gotestyourself/assert/cmp"
|
||||
"github.com/gotestyourself/gotestyourself/fs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestRunBuildResetsUidAndGidInContext(t *testing.T) {
|
||||
dest := fs.NewDir(t, "test-build-context-dest")
|
||||
defer dest.Remove()
|
||||
|
||||
fakeImageBuild := func(_ context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
assert.NilError(t, archive.Untar(context, dest.Path(), nil))
|
||||
|
||||
body := new(bytes.Buffer)
|
||||
return types.ImageBuildResponse{Body: ioutil.NopCloser(body)}, nil
|
||||
}
|
||||
cli := test.NewFakeCli(&fakeClient{imageBuildFunc: fakeImageBuild})
|
||||
|
||||
dir := fs.NewDir(t, "test-build-context",
|
||||
fs.WithFile("foo", "some content", fs.AsUser(65534, 65534)),
|
||||
fs.WithFile("Dockerfile", `
|
||||
FROM alpine:3.6
|
||||
COPY foo bar /
|
||||
`),
|
||||
)
|
||||
defer dir.Remove()
|
||||
|
||||
options := newBuildOptions()
|
||||
options.context = dir.Path()
|
||||
options.untrusted = true
|
||||
|
||||
err := runBuild(cli, options)
|
||||
assert.NilError(t, err)
|
||||
|
||||
files, err := ioutil.ReadDir(dest.Path())
|
||||
assert.NilError(t, err)
|
||||
for _, fileInfo := range files {
|
||||
assert.Check(t, is.Equal(uint32(0), fileInfo.Sys().(*syscall.Stat_t).Uid))
|
||||
assert.Check(t, is.Equal(uint32(0), fileInfo.Sys().(*syscall.Stat_t).Gid))
|
||||
}
|
||||
}
|
||||
@ -3,7 +3,6 @@ package image
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -15,21 +14,30 @@ import (
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
is "github.com/gotestyourself/gotestyourself/assert/cmp"
|
||||
"github.com/gotestyourself/gotestyourself/fs"
|
||||
"github.com/gotestyourself/gotestyourself/skip"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestRunBuildDockerfileFromStdinWithCompress(t *testing.T) {
|
||||
buffer := new(bytes.Buffer)
|
||||
fakeBuild := newFakeBuild()
|
||||
fakeImageBuild := func(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
dest, err := ioutil.TempDir("", "test-build-compress-dest")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(dest)
|
||||
|
||||
var dockerfileName string
|
||||
fakeImageBuild := func(_ context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
buffer := new(bytes.Buffer)
|
||||
tee := io.TeeReader(context, buffer)
|
||||
gzipReader, err := gzip.NewReader(tee)
|
||||
assert.NilError(t, err)
|
||||
return fakeBuild.build(ctx, gzipReader, options)
|
||||
|
||||
assert.NilError(t, archive.Untar(tee, dest, nil))
|
||||
dockerfileName = options.Dockerfile
|
||||
|
||||
header := buffer.Bytes()[:10]
|
||||
assert.Check(t, is.Equal(archive.Gzip, archive.DetectCompression(header)))
|
||||
|
||||
body := new(bytes.Buffer)
|
||||
return types.ImageBuildResponse{Body: ioutil.NopCloser(body)}, nil
|
||||
}
|
||||
|
||||
cli := test.NewFakeCli(&fakeClient{imageBuildFunc: fakeImageBuild})
|
||||
@ -39,57 +47,35 @@ func TestRunBuildDockerfileFromStdinWithCompress(t *testing.T) {
|
||||
`)
|
||||
cli.SetIn(command.NewInStream(ioutil.NopCloser(dockerfile)))
|
||||
|
||||
dir := fs.NewDir(t, t.Name(),
|
||||
fs.WithFile("foo", "some content"))
|
||||
defer dir.Remove()
|
||||
dir, err := ioutil.TempDir("", "test-build-compress")
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
ioutil.WriteFile(filepath.Join(dir, "foo"), []byte("some content"), 0644)
|
||||
|
||||
options := newBuildOptions()
|
||||
options.compress = true
|
||||
options.dockerfileName = "-"
|
||||
options.context = dir.Path()
|
||||
options.context = dir
|
||||
options.untrusted = true
|
||||
assert.NilError(t, runBuild(cli, options))
|
||||
|
||||
expected := []string{fakeBuild.options.Dockerfile, ".dockerignore", "foo"}
|
||||
assert.DeepEqual(t, expected, fakeBuild.filenames(t))
|
||||
err = runBuild(cli, options)
|
||||
assert.NilError(t, err)
|
||||
|
||||
header := buffer.Bytes()[:10]
|
||||
assert.Equal(t, archive.Gzip, archive.DetectCompression(header))
|
||||
}
|
||||
|
||||
func TestRunBuildResetsUidAndGidInContext(t *testing.T) {
|
||||
skip.If(t, os.Getuid() != 0, "root is required to chown files")
|
||||
fakeBuild := newFakeBuild()
|
||||
cli := test.NewFakeCli(&fakeClient{imageBuildFunc: fakeBuild.build})
|
||||
|
||||
dir := fs.NewDir(t, "test-build-context",
|
||||
fs.WithFile("foo", "some content", fs.AsUser(65534, 65534)),
|
||||
fs.WithFile("Dockerfile", `
|
||||
FROM alpine:3.6
|
||||
COPY foo bar /
|
||||
`),
|
||||
)
|
||||
defer dir.Remove()
|
||||
|
||||
options := newBuildOptions()
|
||||
options.context = dir.Path()
|
||||
options.untrusted = true
|
||||
assert.NilError(t, runBuild(cli, options))
|
||||
|
||||
headers := fakeBuild.headers(t)
|
||||
expected := []*tar.Header{
|
||||
{Name: "Dockerfile"},
|
||||
{Name: "foo"},
|
||||
files, err := ioutil.ReadDir(dest)
|
||||
assert.NilError(t, err)
|
||||
actual := []string{}
|
||||
for _, fileInfo := range files {
|
||||
actual = append(actual, fileInfo.Name())
|
||||
}
|
||||
var cmpTarHeaderNameAndOwner = cmp.Comparer(func(x, y tar.Header) bool {
|
||||
return x.Name == y.Name && x.Uid == y.Uid && x.Gid == y.Gid
|
||||
})
|
||||
assert.DeepEqual(t, expected, headers, cmpTarHeaderNameAndOwner)
|
||||
sort.Strings(actual)
|
||||
assert.Check(t, is.DeepEqual([]string{dockerfileName, ".dockerignore", "foo"}, actual))
|
||||
}
|
||||
|
||||
func TestRunBuildDockerfileOutsideContext(t *testing.T) {
|
||||
dir := fs.NewDir(t, t.Name(),
|
||||
fs.WithFile("data", "data file"))
|
||||
fs.WithFile("data", "data file"),
|
||||
)
|
||||
defer dir.Remove()
|
||||
|
||||
// Dockerfile outside of build-context
|
||||
@ -101,17 +87,40 @@ COPY data /data
|
||||
)
|
||||
defer df.Remove()
|
||||
|
||||
fakeBuild := newFakeBuild()
|
||||
cli := test.NewFakeCli(&fakeClient{imageBuildFunc: fakeBuild.build})
|
||||
dest, err := ioutil.TempDir("", t.Name())
|
||||
assert.NilError(t, err)
|
||||
defer os.RemoveAll(dest)
|
||||
|
||||
var dockerfileName string
|
||||
fakeImageBuild := func(_ context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
buffer := new(bytes.Buffer)
|
||||
tee := io.TeeReader(context, buffer)
|
||||
|
||||
assert.NilError(t, archive.Untar(tee, dest, nil))
|
||||
dockerfileName = options.Dockerfile
|
||||
|
||||
body := new(bytes.Buffer)
|
||||
return types.ImageBuildResponse{Body: ioutil.NopCloser(body)}, nil
|
||||
}
|
||||
|
||||
cli := test.NewFakeCli(&fakeClient{imageBuildFunc: fakeImageBuild})
|
||||
|
||||
options := newBuildOptions()
|
||||
options.context = dir.Path()
|
||||
options.dockerfileName = df.Path()
|
||||
options.untrusted = true
|
||||
assert.NilError(t, runBuild(cli, options))
|
||||
|
||||
expected := []string{fakeBuild.options.Dockerfile, ".dockerignore", "data"}
|
||||
assert.DeepEqual(t, expected, fakeBuild.filenames(t))
|
||||
err = runBuild(cli, options)
|
||||
assert.NilError(t, err)
|
||||
|
||||
files, err := ioutil.ReadDir(dest)
|
||||
assert.NilError(t, err)
|
||||
var actual []string
|
||||
for _, fileInfo := range files {
|
||||
actual = append(actual, fileInfo.Name())
|
||||
}
|
||||
sort.Strings(actual)
|
||||
assert.Check(t, is.DeepEqual([]string{dockerfileName, ".dockerignore", "data"}, actual))
|
||||
}
|
||||
|
||||
// TestRunBuildFromLocalGitHubDirNonExistingRepo tests that build contexts
|
||||
@ -163,54 +172,28 @@ RUN echo hello world
|
||||
fs.WithSymlink("context-link", "context"))
|
||||
defer tmpDir.Remove()
|
||||
|
||||
fakeBuild := newFakeBuild()
|
||||
cli := test.NewFakeCli(&fakeClient{imageBuildFunc: fakeBuild.build})
|
||||
files := []string{}
|
||||
fakeImageBuild := func(_ context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
tarReader := tar.NewReader(context)
|
||||
for {
|
||||
hdr, err := tarReader.Next()
|
||||
switch err {
|
||||
case io.EOF:
|
||||
body := new(bytes.Buffer)
|
||||
return types.ImageBuildResponse{Body: ioutil.NopCloser(body)}, nil
|
||||
case nil:
|
||||
files = append(files, hdr.Name)
|
||||
default:
|
||||
return types.ImageBuildResponse{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cli := test.NewFakeCli(&fakeClient{imageBuildFunc: fakeImageBuild})
|
||||
options := newBuildOptions()
|
||||
options.context = tmpDir.Join("context-link")
|
||||
options.untrusted = true
|
||||
assert.NilError(t, runBuild(cli, options))
|
||||
|
||||
assert.DeepEqual(t, fakeBuild.filenames(t), []string{"Dockerfile"})
|
||||
}
|
||||
|
||||
type fakeBuild struct {
|
||||
context *tar.Reader
|
||||
options types.ImageBuildOptions
|
||||
}
|
||||
|
||||
func newFakeBuild() *fakeBuild {
|
||||
return &fakeBuild{}
|
||||
}
|
||||
|
||||
func (f *fakeBuild) build(_ context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
|
||||
f.context = tar.NewReader(context)
|
||||
f.options = options
|
||||
body := new(bytes.Buffer)
|
||||
return types.ImageBuildResponse{Body: ioutil.NopCloser(body)}, nil
|
||||
}
|
||||
|
||||
func (f *fakeBuild) headers(t *testing.T) []*tar.Header {
|
||||
t.Helper()
|
||||
headers := []*tar.Header{}
|
||||
for {
|
||||
hdr, err := f.context.Next()
|
||||
switch err {
|
||||
case io.EOF:
|
||||
return headers
|
||||
case nil:
|
||||
headers = append(headers, hdr)
|
||||
default:
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fakeBuild) filenames(t *testing.T) []string {
|
||||
t.Helper()
|
||||
names := []string{}
|
||||
for _, header := range f.headers(t) {
|
||||
names = append(names, header.Name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
return names
|
||||
assert.DeepEqual(t, files, []string{"Dockerfile"})
|
||||
}
|
||||
|
||||
@ -10,7 +10,6 @@ import (
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
"github.com/gotestyourself/gotestyourself/golden"
|
||||
"github.com/gotestyourself/gotestyourself/skip"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@ -43,13 +42,7 @@ func TestNewHistoryCommandErrors(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func notUTCTimezone() bool {
|
||||
now := time.Now()
|
||||
return now != now.UTC()
|
||||
}
|
||||
|
||||
func TestNewHistoryCommandSuccess(t *testing.T) {
|
||||
skip.If(t, notUTCTimezone, "expected output requires UTC timezone")
|
||||
testCases := []struct {
|
||||
name string
|
||||
args []string
|
||||
|
||||
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/docker/cli/cli/trust"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
"github.com/theupdateframework/notary/client"
|
||||
"github.com/theupdateframework/notary/passphrase"
|
||||
@ -46,8 +47,8 @@ func TestHTTPENVTrustServer(t *testing.T) {
|
||||
func TestOfficialTrustServer(t *testing.T) {
|
||||
indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: true}
|
||||
output, err := trust.Server(indexInfo)
|
||||
if err != nil || output != trust.NotaryServer {
|
||||
t.Fatalf("Expected server to be %s, got %s", trust.NotaryServer, output)
|
||||
if err != nil || output != registry.NotaryServer {
|
||||
t.Fatalf("Expected server to be %s, got %s", registry.NotaryServer, output)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -10,6 +10,7 @@ import (
|
||||
)
|
||||
|
||||
type fakeRegistryClient struct {
|
||||
client.RegistryClient
|
||||
getManifestFunc func(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error)
|
||||
getManifestListFunc func(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error)
|
||||
mountBlobFunc func(ctx context.Context, source reference.Canonical, target reference.Named) error
|
||||
@ -43,5 +44,3 @@ func (c *fakeRegistryClient) PutManifest(ctx context.Context, ref reference.Name
|
||||
}
|
||||
return digest.Digest(""), nil
|
||||
}
|
||||
|
||||
var _ client.RegistryClient = &fakeRegistryClient{}
|
||||
|
||||
@ -21,7 +21,7 @@ func newCreateListCommand(dockerCli command.Cli) *cobra.Command {
|
||||
opts := createOpts{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "create MANFEST_LIST MANIFEST [MANIFEST...]",
|
||||
Use: "create MANIFEST_LIST MANIFEST [MANIFEST...]",
|
||||
Short: "Create a local manifest list for annotating and pushing to a registry",
|
||||
Args: cli.RequiresMinArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
@ -12,7 +12,9 @@ import (
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func newFakeRegistryClient() *fakeRegistryClient {
|
||||
func newFakeRegistryClient(t *testing.T) *fakeRegistryClient {
|
||||
assert.NilError(t, nil)
|
||||
|
||||
return &fakeRegistryClient{
|
||||
getManifestFunc: func(_ context.Context, _ reference.Named) (manifesttypes.ImageManifest, error) {
|
||||
return manifesttypes.ImageManifest{}, errors.New("")
|
||||
@ -47,11 +49,12 @@ func TestManifestPushErrors(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// store a one-image manifest list and puah it
|
||||
func TestManifestPush(t *testing.T) {
|
||||
store, sCleanup := newTempManifestStore(t)
|
||||
defer sCleanup()
|
||||
|
||||
registry := newFakeRegistryClient()
|
||||
registry := newFakeRegistryClient(t)
|
||||
|
||||
cli := test.NewFakeCli(nil)
|
||||
cli.SetManifestStore(store)
|
||||
|
||||
@ -52,15 +52,11 @@ func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInf
|
||||
fmt.Fprintf(cli.Out(), "\nPlease login prior to %s:\n", cmdName)
|
||||
indexServer := registry.GetAuthConfigKey(index)
|
||||
isDefaultRegistry := indexServer == ElectAuthServer(context.Background(), cli)
|
||||
authConfig, err := GetDefaultAuthConfig(cli, true, indexServer, isDefaultRegistry)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.Err(), "Unable to retrieve stored credentials for %s, error: %s.\n", indexServer, err)
|
||||
}
|
||||
err = ConfigureAuth(cli, "", "", authConfig, isDefaultRegistry)
|
||||
authConfig, err := ConfigureAuth(cli, "", "", indexServer, isDefaultRegistry)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return EncodeAuthToBase64(*authConfig)
|
||||
return EncodeAuthToBase64(authConfig)
|
||||
}
|
||||
}
|
||||
|
||||
@ -77,31 +73,22 @@ func ResolveAuthConfig(ctx context.Context, cli Cli, index *registrytypes.IndexI
|
||||
return a
|
||||
}
|
||||
|
||||
// GetDefaultAuthConfig gets the default auth config given a serverAddress
|
||||
// If credentials for given serverAddress exists in the credential store, the configuration will be populated with values in it
|
||||
func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, isDefaultRegistry bool) (*types.AuthConfig, error) {
|
||||
if !isDefaultRegistry {
|
||||
serverAddress = registry.ConvertToHostname(serverAddress)
|
||||
}
|
||||
var authconfig types.AuthConfig
|
||||
var err error
|
||||
if checkCredStore {
|
||||
authconfig, err = cli.ConfigFile().GetAuthConfig(serverAddress)
|
||||
} else {
|
||||
authconfig = types.AuthConfig{}
|
||||
}
|
||||
authconfig.ServerAddress = serverAddress
|
||||
authconfig.IdentityToken = ""
|
||||
return &authconfig, err
|
||||
}
|
||||
|
||||
// ConfigureAuth handles prompting of user's username and password if needed
|
||||
func ConfigureAuth(cli Cli, flUser, flPassword string, authconfig *types.AuthConfig, isDefaultRegistry bool) error {
|
||||
// ConfigureAuth returns an AuthConfig from the specified user, password and server.
|
||||
func ConfigureAuth(cli Cli, flUser, flPassword, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) {
|
||||
// On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210
|
||||
if runtime.GOOS == "windows" {
|
||||
cli.SetIn(NewInStream(os.Stdin))
|
||||
}
|
||||
|
||||
if !isDefaultRegistry {
|
||||
serverAddress = registry.ConvertToHostname(serverAddress)
|
||||
}
|
||||
|
||||
authconfig, err := cli.ConfigFile().GetAuthConfig(serverAddress)
|
||||
if err != nil {
|
||||
return authconfig, err
|
||||
}
|
||||
|
||||
// Some links documenting this:
|
||||
// - https://code.google.com/archive/p/mintty/issues/56
|
||||
// - https://github.com/docker/docker/issues/15272
|
||||
@ -110,7 +97,7 @@ func ConfigureAuth(cli Cli, flUser, flPassword string, authconfig *types.AuthCon
|
||||
// will hit this if you attempt docker login from mintty where stdin
|
||||
// is a pipe, not a character based console.
|
||||
if flPassword == "" && !cli.In().IsTerminal() {
|
||||
return errors.Errorf("Error: Cannot perform an interactive login from a non TTY device")
|
||||
return authconfig, errors.Errorf("Error: Cannot perform an interactive login from a non TTY device")
|
||||
}
|
||||
|
||||
authconfig.Username = strings.TrimSpace(authconfig.Username)
|
||||
@ -128,12 +115,12 @@ func ConfigureAuth(cli Cli, flUser, flPassword string, authconfig *types.AuthCon
|
||||
}
|
||||
}
|
||||
if flUser == "" {
|
||||
return errors.Errorf("Error: Non-null Username Required")
|
||||
return authconfig, errors.Errorf("Error: Non-null Username Required")
|
||||
}
|
||||
if flPassword == "" {
|
||||
oldState, err := term.SaveState(cli.In().FD())
|
||||
if err != nil {
|
||||
return err
|
||||
return authconfig, err
|
||||
}
|
||||
fmt.Fprintf(cli.Out(), "Password: ")
|
||||
term.DisableEcho(cli.In().FD(), oldState)
|
||||
@ -143,14 +130,16 @@ func ConfigureAuth(cli Cli, flUser, flPassword string, authconfig *types.AuthCon
|
||||
|
||||
term.RestoreTerminal(cli.In().FD(), oldState)
|
||||
if flPassword == "" {
|
||||
return errors.Errorf("Error: Password Required")
|
||||
return authconfig, errors.Errorf("Error: Password Required")
|
||||
}
|
||||
}
|
||||
|
||||
authconfig.Username = flUser
|
||||
authconfig.Password = flPassword
|
||||
authconfig.ServerAddress = serverAddress
|
||||
authconfig.IdentityToken = ""
|
||||
|
||||
return nil
|
||||
return authconfig, nil
|
||||
}
|
||||
|
||||
func readInput(in io.Reader, out io.Writer) string {
|
||||
|
||||
@ -9,19 +9,11 @@ import (
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const unencryptedWarning = `WARNING! Your password will be stored unencrypted in %s.
|
||||
Configure a credential helper to remove this warning. See
|
||||
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
|
||||
`
|
||||
|
||||
type loginOptions struct {
|
||||
serverAddress string
|
||||
user string
|
||||
@ -55,30 +47,10 @@ func NewLoginCommand(dockerCli command.Cli) *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
// unencryptedPrompt prompts the user to find out whether they want to continue
|
||||
// with insecure credential storage. If stdin is not a terminal, we assume they
|
||||
// want it (sadly), because people may have been scripting insecure logins and
|
||||
// we don't want to break them. Maybe they'll see the warning in their logs and
|
||||
// fix things.
|
||||
func unencryptedPrompt(dockerCli command.Streams, filename string) error {
|
||||
fmt.Fprintln(dockerCli.Err(), fmt.Sprintf(unencryptedWarning, filename))
|
||||
func runLogin(dockerCli command.Cli, opts loginOptions) error {
|
||||
ctx := context.Background()
|
||||
clnt := dockerCli.Client()
|
||||
|
||||
if dockerCli.In().IsTerminal() {
|
||||
if command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), "") {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("User refused unencrypted credentials storage.")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type isFileStore interface {
|
||||
IsFileStore() bool
|
||||
GetFilename() string
|
||||
}
|
||||
|
||||
func verifyloginOptions(dockerCli command.Cli, opts *loginOptions) error {
|
||||
if opts.password != "" {
|
||||
fmt.Fprintln(dockerCli.Err(), "WARNING! Using --password via the CLI is insecure. Use --password-stdin.")
|
||||
if opts.passwordStdin {
|
||||
@ -99,15 +71,7 @@ func verifyloginOptions(dockerCli command.Cli, opts *loginOptions) error {
|
||||
opts.password = strings.TrimSuffix(string(contents), "\n")
|
||||
opts.password = strings.TrimSuffix(opts.password, "\r")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func runLogin(dockerCli command.Cli, opts loginOptions) error { //nolint: gocyclo
|
||||
ctx := context.Background()
|
||||
clnt := dockerCli.Client()
|
||||
if err := verifyloginOptions(dockerCli, &opts); err != nil {
|
||||
return err
|
||||
}
|
||||
var (
|
||||
serverAddress string
|
||||
authServer = command.ElectAuthServer(ctx, dockerCli)
|
||||
@ -118,41 +82,21 @@ func runLogin(dockerCli command.Cli, opts loginOptions) error { //nolint: gocycl
|
||||
serverAddress = authServer
|
||||
}
|
||||
|
||||
var err error
|
||||
var authConfig *types.AuthConfig
|
||||
var response registrytypes.AuthenticateOKBody
|
||||
isDefaultRegistry := serverAddress == authServer
|
||||
authConfig, err = command.GetDefaultAuthConfig(dockerCli, opts.user == "" && opts.password == "", serverAddress, isDefaultRegistry)
|
||||
if err == nil && authConfig.Username != "" && authConfig.Password != "" {
|
||||
response, err = loginWithCredStoreCreds(ctx, dockerCli, authConfig)
|
||||
}
|
||||
if err != nil || authConfig.Username == "" || authConfig.Password == "" {
|
||||
err = command.ConfigureAuth(dockerCli, opts.user, opts.password, authConfig, isDefaultRegistry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
response, err = clnt.RegistryLogin(ctx, *authConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
authConfig, err := command.ConfigureAuth(dockerCli, opts.user, opts.password, serverAddress, isDefaultRegistry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response, err := clnt.RegistryLogin(ctx, authConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if response.IdentityToken != "" {
|
||||
authConfig.Password = ""
|
||||
authConfig.IdentityToken = response.IdentityToken
|
||||
}
|
||||
|
||||
creds := dockerCli.ConfigFile().GetCredentialsStore(serverAddress)
|
||||
|
||||
store, isDefault := creds.(isFileStore)
|
||||
if isDefault {
|
||||
err = unencryptedPrompt(dockerCli, store.GetFilename())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := creds.Store(*authConfig); err != nil {
|
||||
if err := dockerCli.ConfigFile().GetCredentialsStore(serverAddress).Store(authConfig); err != nil {
|
||||
return errors.Errorf("Error saving credentials: %v", err)
|
||||
}
|
||||
|
||||
@ -161,17 +105,3 @@ func runLogin(dockerCli command.Cli, opts loginOptions) error { //nolint: gocycl
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func loginWithCredStoreCreds(ctx context.Context, dockerCli command.Cli, authConfig *types.AuthConfig) (registrytypes.AuthenticateOKBody, error) {
|
||||
fmt.Fprintf(dockerCli.Out(), "Authenticating with existing credentials...\n")
|
||||
cliClient := dockerCli.Client()
|
||||
response, err := cliClient.RegistryLogin(ctx, *authConfig)
|
||||
if err != nil {
|
||||
if client.IsErrUnauthorized(err) {
|
||||
fmt.Fprintf(dockerCli.Err(), "Stored credentials invalid or expired\n")
|
||||
} else {
|
||||
fmt.Fprintf(dockerCli.Err(), "Login did not succeed, error: %s\n", err)
|
||||
}
|
||||
}
|
||||
return response, err
|
||||
}
|
||||
|
||||
@ -1,157 +0,0 @@
|
||||
package registry
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
is "github.com/gotestyourself/gotestyourself/assert/cmp"
|
||||
"github.com/gotestyourself/gotestyourself/fs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const userErr = "userunknownError"
|
||||
const testAuthErrMsg = "UNKNOWN_ERR"
|
||||
|
||||
var testAuthErrors = map[string]error{
|
||||
userErr: fmt.Errorf(testAuthErrMsg),
|
||||
}
|
||||
|
||||
var expiredPassword = "I_M_EXPIRED"
|
||||
|
||||
type fakeClient struct {
|
||||
client.Client
|
||||
}
|
||||
|
||||
// nolint: unparam
|
||||
func (c fakeClient) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registrytypes.AuthenticateOKBody, error) {
|
||||
if auth.Password == expiredPassword {
|
||||
return registrytypes.AuthenticateOKBody{}, fmt.Errorf("Invalid Username or Password")
|
||||
}
|
||||
err := testAuthErrors[auth.Username]
|
||||
return registrytypes.AuthenticateOKBody{}, err
|
||||
}
|
||||
|
||||
func TestLoginWithCredStoreCreds(t *testing.T) {
|
||||
testCases := []struct {
|
||||
inputAuthConfig types.AuthConfig
|
||||
expectedMsg string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
inputAuthConfig: types.AuthConfig{},
|
||||
expectedMsg: "Authenticating with existing credentials...\n",
|
||||
},
|
||||
{
|
||||
inputAuthConfig: types.AuthConfig{
|
||||
Username: userErr,
|
||||
},
|
||||
expectedMsg: "Authenticating with existing credentials...\n",
|
||||
expectedErr: fmt.Sprintf("Login did not succeed, error: %s\n", testAuthErrMsg),
|
||||
},
|
||||
// can't easily test the 401 case because client.IsErrUnauthorized(err) involving
|
||||
// creating an error of a private type
|
||||
}
|
||||
ctx := context.Background()
|
||||
for _, tc := range testCases {
|
||||
cli := (*test.FakeCli)(test.NewFakeCli(&fakeClient{}))
|
||||
errBuf := new(bytes.Buffer)
|
||||
cli.SetErr(errBuf)
|
||||
loginWithCredStoreCreds(ctx, cli, &tc.inputAuthConfig)
|
||||
outputString := cli.OutBuffer().String()
|
||||
assert.Check(t, is.Equal(tc.expectedMsg, outputString))
|
||||
errorString := errBuf.String()
|
||||
assert.Check(t, is.Equal(tc.expectedErr, errorString))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunLogin(t *testing.T) {
|
||||
const storedServerAddress = "reg1"
|
||||
const validUsername = "u1"
|
||||
const validPassword = "p1"
|
||||
const validPassword2 = "p2"
|
||||
|
||||
validAuthConfig := types.AuthConfig{
|
||||
ServerAddress: storedServerAddress,
|
||||
Username: validUsername,
|
||||
Password: validPassword,
|
||||
}
|
||||
expiredAuthConfig := types.AuthConfig{
|
||||
ServerAddress: storedServerAddress,
|
||||
Username: validUsername,
|
||||
Password: expiredPassword,
|
||||
}
|
||||
testCases := []struct {
|
||||
inputLoginOption loginOptions
|
||||
inputStoredCred *types.AuthConfig
|
||||
expectedErr string
|
||||
expectedSavedCred types.AuthConfig
|
||||
}{
|
||||
{
|
||||
inputLoginOption: loginOptions{
|
||||
serverAddress: storedServerAddress,
|
||||
},
|
||||
inputStoredCred: &validAuthConfig,
|
||||
expectedErr: "",
|
||||
expectedSavedCred: validAuthConfig,
|
||||
},
|
||||
{
|
||||
inputLoginOption: loginOptions{
|
||||
serverAddress: storedServerAddress,
|
||||
},
|
||||
inputStoredCred: &expiredAuthConfig,
|
||||
expectedErr: "Error: Cannot perform an interactive login from a non TTY device",
|
||||
},
|
||||
{
|
||||
inputLoginOption: loginOptions{
|
||||
serverAddress: storedServerAddress,
|
||||
user: validUsername,
|
||||
password: validPassword2,
|
||||
},
|
||||
inputStoredCred: &validAuthConfig,
|
||||
expectedErr: "",
|
||||
expectedSavedCred: types.AuthConfig{
|
||||
ServerAddress: storedServerAddress,
|
||||
Username: validUsername,
|
||||
Password: validPassword2,
|
||||
},
|
||||
},
|
||||
{
|
||||
inputLoginOption: loginOptions{
|
||||
serverAddress: storedServerAddress,
|
||||
user: userErr,
|
||||
password: validPassword,
|
||||
},
|
||||
inputStoredCred: &validAuthConfig,
|
||||
expectedErr: testAuthErrMsg,
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
tmpFile := fs.NewFile(t, "test-run-login")
|
||||
defer tmpFile.Remove()
|
||||
cli := test.NewFakeCli(&fakeClient{})
|
||||
configfile := cli.ConfigFile()
|
||||
configfile.Filename = tmpFile.Path()
|
||||
|
||||
if tc.inputStoredCred != nil {
|
||||
cred := *tc.inputStoredCred
|
||||
configfile.GetCredentialsStore(cred.ServerAddress).Store(cred)
|
||||
}
|
||||
loginErr := runLogin(cli, tc.inputLoginOption)
|
||||
if tc.expectedErr != "" {
|
||||
assert.Error(t, loginErr, tc.expectedErr)
|
||||
return
|
||||
}
|
||||
assert.NilError(t, loginErr)
|
||||
savedCred, credStoreErr := configfile.GetCredentialsStore(tc.inputStoredCred.ServerAddress).Get(tc.inputStoredCred.ServerAddress)
|
||||
assert.Check(t, credStoreErr)
|
||||
assert.DeepEqual(t, tc.expectedSavedCred, savedCred)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1,8 +1,6 @@
|
||||
package command_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
@ -11,7 +9,6 @@ import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
// Prevents a circular import with "github.com/docker/cli/internal/test"
|
||||
|
||||
. "github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/internal/test"
|
||||
"github.com/docker/docker/api/types"
|
||||
@ -23,19 +20,6 @@ type fakeClient struct {
|
||||
infoFunc func() (types.Info, error)
|
||||
}
|
||||
|
||||
var testAuthConfigs = []types.AuthConfig{
|
||||
{
|
||||
ServerAddress: "https://index.docker.io/v1/",
|
||||
Username: "u0",
|
||||
Password: "p0",
|
||||
},
|
||||
{
|
||||
ServerAddress: "server1.io",
|
||||
Username: "u1",
|
||||
Password: "p1",
|
||||
},
|
||||
}
|
||||
|
||||
func (cli *fakeClient) Info(_ context.Context) (types.Info, error) {
|
||||
if cli.infoFunc != nil {
|
||||
return cli.infoFunc()
|
||||
@ -90,58 +74,3 @@ func TestElectAuthServer(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDefaultAuthConfig(t *testing.T) {
|
||||
testCases := []struct {
|
||||
checkCredStore bool
|
||||
inputServerAddress string
|
||||
expectedErr string
|
||||
expectedAuthConfig types.AuthConfig
|
||||
}{
|
||||
{
|
||||
checkCredStore: false,
|
||||
inputServerAddress: "",
|
||||
expectedErr: "",
|
||||
expectedAuthConfig: types.AuthConfig{
|
||||
ServerAddress: "",
|
||||
Username: "",
|
||||
Password: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
checkCredStore: true,
|
||||
inputServerAddress: testAuthConfigs[0].ServerAddress,
|
||||
expectedErr: "",
|
||||
expectedAuthConfig: testAuthConfigs[0],
|
||||
},
|
||||
{
|
||||
checkCredStore: true,
|
||||
inputServerAddress: testAuthConfigs[1].ServerAddress,
|
||||
expectedErr: "",
|
||||
expectedAuthConfig: testAuthConfigs[1],
|
||||
},
|
||||
{
|
||||
checkCredStore: true,
|
||||
inputServerAddress: fmt.Sprintf("https://%s", testAuthConfigs[1].ServerAddress),
|
||||
expectedErr: "",
|
||||
expectedAuthConfig: testAuthConfigs[1],
|
||||
},
|
||||
}
|
||||
cli := test.NewFakeCli(&fakeClient{})
|
||||
errBuf := new(bytes.Buffer)
|
||||
cli.SetErr(errBuf)
|
||||
for _, authconfig := range testAuthConfigs {
|
||||
cli.ConfigFile().GetCredentialsStore(authconfig.ServerAddress).Store(authconfig)
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
serverAddress := tc.inputServerAddress
|
||||
authconfig, err := GetDefaultAuthConfig(cli, tc.checkCredStore, serverAddress, serverAddress == "https://index.docker.io/v1/")
|
||||
if tc.expectedErr != "" {
|
||||
assert.Check(t, err != nil)
|
||||
assert.Check(t, is.Equal(tc.expectedErr, err.Error()))
|
||||
} else {
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.DeepEqual(tc.expectedAuthConfig, *authconfig))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,7 +20,7 @@ func newDeployCommand(dockerCli command.Cli) *cobra.Command {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.Namespace = args[0]
|
||||
if dockerCli.ClientInfo().HasKubernetes() {
|
||||
kli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(cmd.Flags()))
|
||||
kli, err := kubernetes.WrapCli(dockerCli, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
32
components/cli/cli/command/stack/kubernetes/check.go
Normal file
32
components/cli/cli/command/stack/kubernetes/check.go
Normal file
@ -0,0 +1,32 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
apiv1beta1 "github.com/docker/cli/kubernetes/compose/v1beta1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// APIPresent checks that an API is installed.
|
||||
func APIPresent(config *rest.Config) error {
|
||||
log.Debugf("check API present at %s", config.Host)
|
||||
clients, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
groups, err := clients.Discovery().ServerGroups()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, group := range groups.Groups {
|
||||
if group.Name == apiv1beta1.SchemeGroupVersion.Group {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("could not find %s api. Install it on your cluster first", apiv1beta1.SchemeGroupVersion.Group)
|
||||
}
|
||||
@ -1,17 +1,16 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/kubernetes"
|
||||
composev1beta1 "github.com/docker/cli/kubernetes/client/clientset_generated/clientset/typed/compose/v1beta1"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/pkg/errors"
|
||||
flag "github.com/spf13/pflag"
|
||||
kubeclient "k8s.io/client-go/kubernetes"
|
||||
"github.com/spf13/cobra"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// KubeCli holds kubernetes specifics (client, namespace) with the command.Cli
|
||||
@ -19,38 +18,28 @@ type KubeCli struct {
|
||||
command.Cli
|
||||
kubeConfig *restclient.Config
|
||||
kubeNamespace string
|
||||
clientSet *kubeclient.Clientset
|
||||
}
|
||||
|
||||
// Options contains resolved parameters to initialize kubernetes clients
|
||||
type Options struct {
|
||||
Namespace string
|
||||
Config string
|
||||
}
|
||||
|
||||
// NewOptions returns an Options initialized with command line flags
|
||||
func NewOptions(flags *flag.FlagSet) Options {
|
||||
var opts Options
|
||||
if namespace, err := flags.GetString("namespace"); err == nil {
|
||||
opts.Namespace = namespace
|
||||
}
|
||||
if kubeConfig, err := flags.GetString("kubeconfig"); err == nil {
|
||||
opts.Config = kubeConfig
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// WrapCli wraps command.Cli with kubernetes specifics
|
||||
func WrapCli(dockerCli command.Cli, opts Options) (*KubeCli, error) {
|
||||
func WrapCli(dockerCli command.Cli, cmd *cobra.Command) (*KubeCli, error) {
|
||||
var err error
|
||||
cli := &KubeCli{
|
||||
Cli: dockerCli,
|
||||
kubeNamespace: "default",
|
||||
}
|
||||
if opts.Namespace != "" {
|
||||
cli.kubeNamespace = opts.Namespace
|
||||
if cmd.Flags().Changed("namespace") {
|
||||
cli.kubeNamespace, err = cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
kubeConfig := ""
|
||||
if cmd.Flags().Changed("kubeconfig") {
|
||||
kubeConfig, err = cmd.Flags().GetString("kubeconfig")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
kubeConfig := opts.Config
|
||||
if kubeConfig == "" {
|
||||
if config := os.Getenv("KUBECONFIG"); config != "" {
|
||||
kubeConfig = config
|
||||
@ -58,18 +47,13 @@ func WrapCli(dockerCli command.Cli, opts Options) (*KubeCli, error) {
|
||||
kubeConfig = filepath.Join(homedir.Get(), ".kube/config")
|
||||
}
|
||||
}
|
||||
config, err := kubernetes.NewKubernetesConfig(kubeConfig)
|
||||
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("Failed to load kubernetes configuration file '%s'", kubeConfig)
|
||||
}
|
||||
cli.kubeConfig = config
|
||||
|
||||
clientSet, err := kubeclient.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cli.clientSet = clientSet
|
||||
|
||||
return cli, nil
|
||||
}
|
||||
|
||||
@ -78,20 +62,15 @@ func (c *KubeCli) composeClient() (*Factory, error) {
|
||||
}
|
||||
|
||||
func (c *KubeCli) stacks() (composev1beta1.StackInterface, error) {
|
||||
version, err := kubernetes.GetStackAPIVersion(c.clientSet)
|
||||
|
||||
err := APIPresent(c.kubeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch version {
|
||||
case kubernetes.StackAPIV1Beta1:
|
||||
clientSet, err := composev1beta1.NewForConfig(c.kubeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return clientSet.Stacks(c.kubeNamespace), nil
|
||||
default:
|
||||
return nil, errors.Errorf("no supported Stack API version")
|
||||
clientSet, err := composev1beta1.NewForConfig(c.kubeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clientSet.Stacks(c.kubeNamespace), nil
|
||||
}
|
||||
|
||||
@ -116,31 +116,25 @@ func (t tasksBySlot) Less(i, j int) bool {
|
||||
return t[j].Meta.CreatedAt.Before(t[i].CreatedAt)
|
||||
}
|
||||
|
||||
const (
|
||||
publishedServiceSuffix = "-published"
|
||||
publishedOnRandomPortSuffix = "-random-ports"
|
||||
)
|
||||
|
||||
// Replicas conversion
|
||||
func replicasToServices(replicas *appsv1beta2.ReplicaSetList, services *apiv1.ServiceList) ([]swarm.Service, map[string]formatter.ServiceListInfo, error) {
|
||||
result := make([]swarm.Service, len(replicas.Items))
|
||||
infos := make(map[string]formatter.ServiceListInfo, len(replicas.Items))
|
||||
for i, r := range replicas.Items {
|
||||
serviceName := r.Labels[labels.ForServiceName]
|
||||
serviceHeadless, ok := findService(services, serviceName)
|
||||
service, ok := findService(services, r.Labels[labels.ForServiceName])
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("could not find service '%s'", serviceName)
|
||||
return nil, nil, fmt.Errorf("could not find service '%s'", r.Labels[labels.ForServiceName])
|
||||
}
|
||||
stack, ok := serviceHeadless.Labels[labels.ForStackName]
|
||||
stack, ok := service.Labels[labels.ForStackName]
|
||||
if ok {
|
||||
stack += "_"
|
||||
}
|
||||
uid := string(serviceHeadless.UID)
|
||||
uid := string(service.UID)
|
||||
s := swarm.Service{
|
||||
ID: uid,
|
||||
Spec: swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
Name: stack + serviceHeadless.Name,
|
||||
Name: stack + service.Name,
|
||||
},
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{
|
||||
@ -149,11 +143,17 @@ func replicasToServices(replicas *appsv1beta2.ReplicaSetList, services *apiv1.Se
|
||||
},
|
||||
},
|
||||
}
|
||||
if serviceNodePort, ok := findService(services, serviceName+publishedOnRandomPortSuffix); ok && serviceNodePort.Spec.Type == apiv1.ServiceTypeNodePort {
|
||||
s.Endpoint = serviceEndpoint(serviceNodePort, swarm.PortConfigPublishModeHost)
|
||||
}
|
||||
if serviceLoadBalancer, ok := findService(services, serviceName+publishedServiceSuffix); ok && serviceLoadBalancer.Spec.Type == apiv1.ServiceTypeLoadBalancer {
|
||||
s.Endpoint = serviceEndpoint(serviceLoadBalancer, swarm.PortConfigPublishModeIngress)
|
||||
if service.Spec.Type == apiv1.ServiceTypeLoadBalancer {
|
||||
configs := make([]swarm.PortConfig, len(service.Spec.Ports))
|
||||
for i, p := range service.Spec.Ports {
|
||||
configs[i] = swarm.PortConfig{
|
||||
PublishMode: swarm.PortConfigPublishModeIngress,
|
||||
PublishedPort: uint32(p.Port),
|
||||
TargetPort: uint32(p.TargetPort.IntValue()),
|
||||
Protocol: toSwarmProtocol(p.Protocol),
|
||||
}
|
||||
}
|
||||
s.Endpoint = swarm.Endpoint{Ports: configs}
|
||||
}
|
||||
result[i] = s
|
||||
infos[uid] = formatter.ServiceListInfo{
|
||||
@ -172,16 +172,3 @@ func findService(services *apiv1.ServiceList, name string) (apiv1.Service, bool)
|
||||
}
|
||||
return apiv1.Service{}, false
|
||||
}
|
||||
|
||||
func serviceEndpoint(service apiv1.Service, publishMode swarm.PortConfigPublishMode) swarm.Endpoint {
|
||||
configs := make([]swarm.PortConfig, len(service.Spec.Ports))
|
||||
for i, p := range service.Spec.Ports {
|
||||
configs[i] = swarm.PortConfig{
|
||||
PublishMode: publishMode,
|
||||
PublishedPort: uint32(p.Port),
|
||||
TargetPort: uint32(p.TargetPort.IntValue()),
|
||||
Protocol: toSwarmProtocol(p.Protocol),
|
||||
}
|
||||
}
|
||||
return swarm.Endpoint{Ports: configs}
|
||||
}
|
||||
|
||||
@ -1,192 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/docker/cli/kubernetes/labels"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apimachineryTypes "k8s.io/apimachinery/pkg/types"
|
||||
apimachineryUtil "k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func TestReplicasConversionNeedsAService(t *testing.T) {
|
||||
replicas := appsv1beta2.ReplicaSetList{
|
||||
Items: []appsv1beta2.ReplicaSet{makeReplicaSet("unknown", 0, 0)},
|
||||
}
|
||||
services := apiv1.ServiceList{}
|
||||
_, _, err := replicasToServices(&replicas, &services)
|
||||
assert.ErrorContains(t, err, "could not find service")
|
||||
}
|
||||
|
||||
func TestKubernetesServiceToSwarmServiceConversion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
replicas *appsv1beta2.ReplicaSetList
|
||||
services *apiv1.ServiceList
|
||||
expectedServices []swarm.Service
|
||||
expectedListInfo map[string]formatter.ServiceListInfo
|
||||
}{
|
||||
// Match replicas with headless stack services
|
||||
{
|
||||
&appsv1beta2.ReplicaSetList{
|
||||
Items: []appsv1beta2.ReplicaSet{
|
||||
makeReplicaSet("service1", 2, 5),
|
||||
makeReplicaSet("service2", 3, 3),
|
||||
},
|
||||
},
|
||||
&apiv1.ServiceList{
|
||||
Items: []apiv1.Service{
|
||||
makeKubeService("service1", "stack", "uid1", apiv1.ServiceTypeClusterIP, nil),
|
||||
makeKubeService("service2", "stack", "uid2", apiv1.ServiceTypeClusterIP, nil),
|
||||
makeKubeService("service3", "other-stack", "uid2", apiv1.ServiceTypeClusterIP, nil),
|
||||
},
|
||||
},
|
||||
[]swarm.Service{
|
||||
makeSwarmService("stack_service1", "uid1", nil),
|
||||
makeSwarmService("stack_service2", "uid2", nil),
|
||||
},
|
||||
map[string]formatter.ServiceListInfo{
|
||||
"uid1": {"replicated", "2/5"},
|
||||
"uid2": {"replicated", "3/3"},
|
||||
},
|
||||
},
|
||||
// Headless service and LoadBalancer Service are tied to the same Swarm service
|
||||
{
|
||||
&appsv1beta2.ReplicaSetList{
|
||||
Items: []appsv1beta2.ReplicaSet{
|
||||
makeReplicaSet("service", 1, 1),
|
||||
},
|
||||
},
|
||||
&apiv1.ServiceList{
|
||||
Items: []apiv1.Service{
|
||||
makeKubeService("service", "stack", "uid1", apiv1.ServiceTypeClusterIP, nil),
|
||||
makeKubeService("service-published", "stack", "uid2", apiv1.ServiceTypeLoadBalancer, []apiv1.ServicePort{
|
||||
{
|
||||
Port: 80,
|
||||
TargetPort: apimachineryUtil.FromInt(80),
|
||||
Protocol: apiv1.ProtocolTCP,
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
[]swarm.Service{
|
||||
makeSwarmService("stack_service", "uid1", []swarm.PortConfig{
|
||||
{
|
||||
PublishMode: swarm.PortConfigPublishModeIngress,
|
||||
PublishedPort: 80,
|
||||
TargetPort: 80,
|
||||
Protocol: swarm.PortConfigProtocolTCP,
|
||||
},
|
||||
}),
|
||||
},
|
||||
map[string]formatter.ServiceListInfo{
|
||||
"uid1": {"replicated", "1/1"},
|
||||
},
|
||||
},
|
||||
// Headless service and NodePort Service are tied to the same Swarm service
|
||||
|
||||
{
|
||||
&appsv1beta2.ReplicaSetList{
|
||||
Items: []appsv1beta2.ReplicaSet{
|
||||
makeReplicaSet("service", 1, 1),
|
||||
},
|
||||
},
|
||||
&apiv1.ServiceList{
|
||||
Items: []apiv1.Service{
|
||||
makeKubeService("service", "stack", "uid1", apiv1.ServiceTypeClusterIP, nil),
|
||||
makeKubeService("service-random-ports", "stack", "uid2", apiv1.ServiceTypeNodePort, []apiv1.ServicePort{
|
||||
{
|
||||
Port: 35666,
|
||||
TargetPort: apimachineryUtil.FromInt(80),
|
||||
Protocol: apiv1.ProtocolTCP,
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
[]swarm.Service{
|
||||
makeSwarmService("stack_service", "uid1", []swarm.PortConfig{
|
||||
{
|
||||
PublishMode: swarm.PortConfigPublishModeHost,
|
||||
PublishedPort: 35666,
|
||||
TargetPort: 80,
|
||||
Protocol: swarm.PortConfigProtocolTCP,
|
||||
},
|
||||
}),
|
||||
},
|
||||
map[string]formatter.ServiceListInfo{
|
||||
"uid1": {"replicated", "1/1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
swarmServices, listInfo, err := replicasToServices(tc.replicas, tc.services)
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, tc.expectedServices, swarmServices)
|
||||
assert.DeepEqual(t, tc.expectedListInfo, listInfo)
|
||||
}
|
||||
}
|
||||
|
||||
func makeReplicaSet(service string, available, replicas int32) appsv1beta2.ReplicaSet {
|
||||
return appsv1beta2.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
labels.ForServiceName: service,
|
||||
},
|
||||
},
|
||||
Spec: appsv1beta2.ReplicaSetSpec{
|
||||
Template: apiv1.PodTemplateSpec{
|
||||
Spec: apiv1.PodSpec{
|
||||
Containers: []apiv1.Container{
|
||||
{
|
||||
Image: "image",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: appsv1beta2.ReplicaSetStatus{
|
||||
AvailableReplicas: available,
|
||||
Replicas: replicas,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeKubeService(service, stack, uid string, serviceType apiv1.ServiceType, ports []apiv1.ServicePort) apiv1.Service {
|
||||
return apiv1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
labels.ForStackName: stack,
|
||||
},
|
||||
Name: service,
|
||||
UID: apimachineryTypes.UID(uid),
|
||||
},
|
||||
Spec: apiv1.ServiceSpec{
|
||||
Type: serviceType,
|
||||
Ports: ports,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeSwarmService(service, id string, ports []swarm.PortConfig) swarm.Service {
|
||||
return swarm.Service{
|
||||
ID: id,
|
||||
Spec: swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
Name: service,
|
||||
},
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{
|
||||
Image: "image",
|
||||
},
|
||||
},
|
||||
},
|
||||
Endpoint: swarm.Endpoint{
|
||||
Ports: ports,
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -19,7 +19,7 @@ func newListCommand(dockerCli command.Cli) *cobra.Command {
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if dockerCli.ClientInfo().HasKubernetes() {
|
||||
kli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(cmd.Flags()))
|
||||
kli, err := kubernetes.WrapCli(dockerCli, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -20,7 +20,7 @@ func newPsCommand(dockerCli command.Cli) *cobra.Command {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.Namespace = args[0]
|
||||
if dockerCli.ClientInfo().HasKubernetes() {
|
||||
kli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(cmd.Flags()))
|
||||
kli, err := kubernetes.WrapCli(dockerCli, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -20,7 +20,7 @@ func newRemoveCommand(dockerCli command.Cli) *cobra.Command {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.Namespaces = args
|
||||
if dockerCli.ClientInfo().HasKubernetes() {
|
||||
kli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(cmd.Flags()))
|
||||
kli, err := kubernetes.WrapCli(dockerCli, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -20,7 +20,7 @@ func newServicesCommand(dockerCli command.Cli) *cobra.Command {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.Namespace = args[0]
|
||||
if dockerCli.ClientInfo().HasKubernetes() {
|
||||
kli, err := kubernetes.WrapCli(dockerCli, kubernetes.NewOptions(cmd.Flags()))
|
||||
kli, err := kubernetes.WrapCli(dockerCli, cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -39,7 +39,7 @@ func deployBundle(ctx context.Context, dockerCli command.Cli, opts options.Deplo
|
||||
networks := make(map[string]types.NetworkCreate)
|
||||
for _, service := range bundle.Services {
|
||||
for _, networkName := range service.Networks {
|
||||
networks[namespace.Scope(networkName)] = types.NetworkCreate{
|
||||
networks[networkName] = types.NetworkCreate{
|
||||
Labels: convert.AddStackLabel(namespace, nil),
|
||||
}
|
||||
}
|
||||
|
||||
@ -181,7 +181,8 @@ func createNetworks(
|
||||
existingNetworkMap[network.Name] = network
|
||||
}
|
||||
|
||||
for name, createOpts := range networks {
|
||||
for internalName, createOpts := range networks {
|
||||
name := namespace.Scope(internalName)
|
||||
if _, exists := existingNetworkMap[name]; exists {
|
||||
continue
|
||||
}
|
||||
@ -192,7 +193,7 @@ func createNetworks(
|
||||
|
||||
fmt.Fprintf(dockerCli.Out(), "Creating network %s\n", name)
|
||||
if _, err := client.NetworkCreate(ctx, name, createOpts); err != nil {
|
||||
return errors.Wrapf(err, "failed to create network %s", name)
|
||||
return errors.Wrapf(err, "failed to create network %s", internalName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -248,6 +249,12 @@ func deployServices(
|
||||
// service update.
|
||||
serviceSpec.TaskTemplate.ContainerSpec.Image = service.Spec.TaskTemplate.ContainerSpec.Image
|
||||
}
|
||||
|
||||
// Stack deploy does not have a `--force` option. Preserve existing ForceUpdate
|
||||
// value so that tasks are not re-deployed if not updated.
|
||||
// TODO move this to API client?
|
||||
serviceSpec.TaskTemplate.ForceUpdate = service.Spec.TaskTemplate.ForceUpdate
|
||||
|
||||
response, err := apiClient.ServiceUpdate(
|
||||
ctx,
|
||||
service.ID,
|
||||
|
||||
@ -27,7 +27,8 @@ func TestPruneServices(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestServiceUpdateResolveImageChanged tests that the service's
|
||||
// image digest is preserved if the image did not change in the compose file
|
||||
// image digest, and "ForceUpdate" is preserved if the image did not change in
|
||||
// the compose file
|
||||
func TestServiceUpdateResolveImageChanged(t *testing.T) {
|
||||
namespace := convert.NewNamespace("mystack")
|
||||
|
||||
@ -49,6 +50,7 @@ func TestServiceUpdateResolveImageChanged(t *testing.T) {
|
||||
ContainerSpec: &swarm.ContainerSpec{
|
||||
Image: "foobar:1.2.3@sha256:deadbeef",
|
||||
},
|
||||
ForceUpdate: 123,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -65,18 +67,21 @@ func TestServiceUpdateResolveImageChanged(t *testing.T) {
|
||||
image string
|
||||
expectedQueryRegistry bool
|
||||
expectedImage string
|
||||
expectedForceUpdate uint64
|
||||
}{
|
||||
// Image not changed
|
||||
{
|
||||
image: "foobar:1.2.3",
|
||||
expectedQueryRegistry: false,
|
||||
expectedImage: "foobar:1.2.3@sha256:deadbeef",
|
||||
expectedForceUpdate: 123,
|
||||
},
|
||||
// Image changed
|
||||
{
|
||||
image: "foobar:1.2.4",
|
||||
expectedQueryRegistry: true,
|
||||
expectedImage: "foobar:1.2.4",
|
||||
expectedForceUpdate: 123,
|
||||
},
|
||||
}
|
||||
|
||||
@ -95,8 +100,9 @@ func TestServiceUpdateResolveImageChanged(t *testing.T) {
|
||||
}
|
||||
err := deployServices(ctx, client, spec, namespace, false, ResolveImageChanged)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(testcase.expectedQueryRegistry, receivedOptions.QueryRegistry))
|
||||
assert.Check(t, is.Equal(testcase.expectedImage, receivedService.TaskTemplate.ContainerSpec.Image))
|
||||
assert.Check(t, is.Equal(receivedOptions.QueryRegistry, testcase.expectedQueryRegistry))
|
||||
assert.Check(t, is.Equal(receivedService.TaskTemplate.ContainerSpec.Image, testcase.expectedImage))
|
||||
assert.Check(t, is.Equal(receivedService.TaskTemplate.ForceUpdate, testcase.expectedForceUpdate))
|
||||
|
||||
receivedService = swarm.ServiceSpec{}
|
||||
receivedOptions = types.ServiceUpdateOptions{}
|
||||
|
||||
@ -4,18 +4,16 @@ import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sort"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/kubernetes"
|
||||
"github.com/docker/cli/templates"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
kubernetesClient "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
var versionTemplate = `{{with .Client -}}
|
||||
@ -51,11 +49,10 @@ Server:{{if ne .Platform.Name ""}} {{.Platform.Name}}{{end}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- end}}{{- end}}`
|
||||
{{- end}}{{end}}`
|
||||
|
||||
type versionOptions struct {
|
||||
format string
|
||||
kubeConfig string
|
||||
format string
|
||||
}
|
||||
|
||||
// versionInfo contains version information of both the Client, and Server
|
||||
@ -79,11 +76,6 @@ type clientVersion struct {
|
||||
Orchestrator string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type kubernetesVersion struct {
|
||||
Kubernetes string
|
||||
StackAPI string
|
||||
}
|
||||
|
||||
// ServerOK returns true when the client could connect to the docker server
|
||||
// and parse the information received. It returns false otherwise.
|
||||
func (v versionInfo) ServerOK() bool {
|
||||
@ -104,10 +96,8 @@ func NewVersionCommand(dockerCli command.Cli) *cobra.Command {
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template")
|
||||
flags.StringVarP(&opts.kubeConfig, "kubeconfig", "k", "", "Kubernetes config file")
|
||||
flags.SetAnnotation("kubeconfig", "kubernetes", nil)
|
||||
flags.SetAnnotation("kubeconfig", "experimentalCLI", nil)
|
||||
|
||||
return cmd
|
||||
}
|
||||
@ -155,22 +145,15 @@ func runVersion(dockerCli command.Cli, opts *versionOptions) error {
|
||||
sv, err := dockerCli.Client().ServerVersion(context.Background())
|
||||
if err == nil {
|
||||
vd.Server = &sv
|
||||
kubeVersion := getKubernetesVersion(dockerCli, opts.kubeConfig)
|
||||
foundEngine := false
|
||||
foundKubernetes := false
|
||||
for _, component := range sv.Components {
|
||||
switch component.Name {
|
||||
case "Engine":
|
||||
if component.Name == "Engine" {
|
||||
foundEngine = true
|
||||
buildTime, ok := component.Details["BuildTime"]
|
||||
if ok {
|
||||
component.Details["BuildTime"] = reformatDate(buildTime)
|
||||
}
|
||||
case "Kubernetes":
|
||||
foundKubernetes = true
|
||||
if _, ok := component.Details["StackAPI"]; !ok && kubeVersion != nil {
|
||||
component.Details["StackAPI"] = kubeVersion.StackAPI
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@ -190,21 +173,13 @@ func runVersion(dockerCli command.Cli, opts *versionOptions) error {
|
||||
},
|
||||
})
|
||||
}
|
||||
if !foundKubernetes && kubeVersion != nil {
|
||||
vd.Server.Components = append(vd.Server.Components, types.ComponentVersion{
|
||||
Name: "Kubernetes",
|
||||
Version: kubeVersion.Kubernetes,
|
||||
Details: map[string]string{
|
||||
"StackAPI": kubeVersion.StackAPI,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if err2 := tmpl.Execute(dockerCli.Out(), vd); err2 != nil && err == nil {
|
||||
t := tabwriter.NewWriter(dockerCli.Out(), 15, 1, 1, ' ', 0)
|
||||
if err2 := tmpl.Execute(t, vd); err2 != nil && err == nil {
|
||||
err = err2
|
||||
}
|
||||
dockerCli.Out().Write([]byte{'\n'})
|
||||
t.Write([]byte("\n"))
|
||||
t.Flush()
|
||||
return err
|
||||
}
|
||||
|
||||
@ -216,45 +191,3 @@ func getDetailsOrder(v types.ComponentVersion) []string {
|
||||
sort.Strings(out)
|
||||
return out
|
||||
}
|
||||
|
||||
func getKubernetesVersion(dockerCli command.Cli, kubeConfig string) *kubernetesVersion {
|
||||
if !dockerCli.ClientInfo().HasKubernetes() {
|
||||
return nil
|
||||
}
|
||||
|
||||
version := kubernetesVersion{
|
||||
Kubernetes: "Unknown",
|
||||
StackAPI: "Unknown",
|
||||
}
|
||||
config, err := kubernetes.NewKubernetesConfig(kubeConfig)
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to get Kubernetes configuration: %s", err)
|
||||
return &version
|
||||
}
|
||||
kubeClient, err := kubernetesClient.NewForConfig(config)
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to get Kubernetes client: %s", err)
|
||||
return &version
|
||||
}
|
||||
version.StackAPI = getStackVersion(kubeClient)
|
||||
version.Kubernetes = getKubernetesServerVersion(kubeClient)
|
||||
return &version
|
||||
}
|
||||
|
||||
func getStackVersion(client *kubernetesClient.Clientset) string {
|
||||
apiVersion, err := kubernetes.GetStackAPIVersion(client)
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to get Stack API version: %s", err)
|
||||
return "Unknown"
|
||||
}
|
||||
return string(apiVersion)
|
||||
}
|
||||
|
||||
func getKubernetesServerVersion(client *kubernetesClient.Clientset) string {
|
||||
kubeVersion, err := client.DiscoveryClient.ServerVersion()
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to get Kubernetes server version: %s", err)
|
||||
return "Unknown"
|
||||
}
|
||||
return kubeVersion.String()
|
||||
}
|
||||
|
||||
@ -87,12 +87,7 @@ func Networks(namespace Namespace, networks networkMap, servicesNetworks map[str
|
||||
}
|
||||
createOpts.IPAM.Config = append(createOpts.IPAM.Config, config)
|
||||
}
|
||||
|
||||
networkName := namespace.Scope(internalName)
|
||||
if network.Name != "" {
|
||||
networkName = network.Name
|
||||
}
|
||||
result[networkName] = createOpts
|
||||
result[internalName] = createOpts
|
||||
}
|
||||
|
||||
return result, externalNetworks
|
||||
|
||||
@ -35,7 +35,6 @@ func TestNetworks(t *testing.T) {
|
||||
"outside": {},
|
||||
"default": {},
|
||||
"attachablenet": {},
|
||||
"named": {},
|
||||
}
|
||||
source := networkMap{
|
||||
"normal": composetypes.NetworkConfig{
|
||||
@ -63,17 +62,14 @@ func TestNetworks(t *testing.T) {
|
||||
Driver: "overlay",
|
||||
Attachable: true,
|
||||
},
|
||||
"named": composetypes.NetworkConfig{
|
||||
Name: "othername",
|
||||
},
|
||||
}
|
||||
expected := map[string]types.NetworkCreate{
|
||||
"foo_default": {
|
||||
"default": {
|
||||
Labels: map[string]string{
|
||||
LabelNamespace: "foo",
|
||||
},
|
||||
},
|
||||
"foo_normal": {
|
||||
"normal": {
|
||||
Driver: "overlay",
|
||||
IPAM: &network.IPAM{
|
||||
Driver: "driver",
|
||||
@ -91,21 +87,18 @@ func TestNetworks(t *testing.T) {
|
||||
"something": "labeled",
|
||||
},
|
||||
},
|
||||
"foo_attachablenet": {
|
||||
"attachablenet": {
|
||||
Driver: "overlay",
|
||||
Attachable: true,
|
||||
Labels: map[string]string{
|
||||
LabelNamespace: "foo",
|
||||
},
|
||||
},
|
||||
"othername": {
|
||||
Labels: map[string]string{LabelNamespace: "foo"},
|
||||
},
|
||||
}
|
||||
|
||||
networks, externals := Networks(namespace, source, serviceNetworks)
|
||||
assert.DeepEqual(t, expected, networks)
|
||||
assert.DeepEqual(t, []string{"special"}, externals)
|
||||
assert.Check(t, is.DeepEqual(expected, networks))
|
||||
assert.Check(t, is.DeepEqual([]string{"special"}, externals))
|
||||
}
|
||||
|
||||
func TestSecrets(t *testing.T) {
|
||||
|
||||
@ -229,7 +229,7 @@ func convertServiceNetworks(
|
||||
aliases = network.Aliases
|
||||
}
|
||||
target := namespace.Scope(networkName)
|
||||
if networkConfig.Name != "" {
|
||||
if networkConfig.External.External {
|
||||
target = networkConfig.Name
|
||||
}
|
||||
netAttachConfig := swarm.NetworkAttachmentConfig{
|
||||
|
||||
@ -10,7 +10,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/compose/types"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
is "github.com/gotestyourself/gotestyourself/assert/cmp"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -1358,40 +1357,3 @@ networks:
|
||||
assert.ErrorContains(t, err, "network.external.name and network.name conflict; only use network.name")
|
||||
assert.ErrorContains(t, err, "foo")
|
||||
}
|
||||
|
||||
func TestLoadNetworkWithName(t *testing.T) {
|
||||
config, err := loadYAML(`
|
||||
version: '3.5'
|
||||
services:
|
||||
hello-world:
|
||||
image: redis:alpine
|
||||
networks:
|
||||
- network1
|
||||
- network3
|
||||
|
||||
networks:
|
||||
network1:
|
||||
name: network2
|
||||
network3:
|
||||
`)
|
||||
assert.NilError(t, err)
|
||||
expected := &types.Config{
|
||||
Filename: "filename.yml",
|
||||
Version: "3.5",
|
||||
Services: types.Services{
|
||||
{
|
||||
Name: "hello-world",
|
||||
Image: "redis:alpine",
|
||||
Networks: map[string]*types.ServiceNetworkConfig{
|
||||
"network1": nil,
|
||||
"network3": nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
Networks: map[string]types.NetworkConfig{
|
||||
"network1": {Name: "network2"},
|
||||
"network3": {},
|
||||
},
|
||||
}
|
||||
assert.DeepEqual(t, config, expected, cmpopts.EquateEmpty())
|
||||
}
|
||||
|
||||
@ -61,7 +61,7 @@ func mergeServices(base, override []types.ServiceConfig) ([]types.ServiceConfig,
|
||||
}
|
||||
for name, overrideService := range overrideServices {
|
||||
if baseService, ok := baseServices[name]; ok {
|
||||
if err := mergo.Merge(&baseService, &overrideService, mergo.WithOverride, mergo.WithTransformers(specials)); err != nil {
|
||||
if err := mergo.Merge(&baseService, &overrideService, mergo.WithAppendSlice, mergo.WithOverride, mergo.WithTransformers(specials)); err != nil {
|
||||
return base, errors.Wrapf(err, "cannot merge service %s", name)
|
||||
}
|
||||
baseServices[name] = baseService
|
||||
@ -213,21 +213,21 @@ func mapByName(services []types.ServiceConfig) map[string]types.ServiceConfig {
|
||||
}
|
||||
|
||||
func mergeVolumes(base, override map[string]types.VolumeConfig) (map[string]types.VolumeConfig, error) {
|
||||
err := mergo.Map(&base, &override)
|
||||
err := mergo.Map(&base, &override, mergo.WithOverride)
|
||||
return base, err
|
||||
}
|
||||
|
||||
func mergeNetworks(base, override map[string]types.NetworkConfig) (map[string]types.NetworkConfig, error) {
|
||||
err := mergo.Map(&base, &override)
|
||||
err := mergo.Map(&base, &override, mergo.WithOverride)
|
||||
return base, err
|
||||
}
|
||||
|
||||
func mergeSecrets(base, override map[string]types.SecretConfig) (map[string]types.SecretConfig, error) {
|
||||
err := mergo.Map(&base, &override)
|
||||
err := mergo.Map(&base, &override, mergo.WithOverride)
|
||||
return base, err
|
||||
}
|
||||
|
||||
func mergeConfigs(base, override map[string]types.ConfigObjConfig) (map[string]types.ConfigObjConfig, error) {
|
||||
err := mergo.Map(&base, &override)
|
||||
err := mergo.Map(&base, &override, mergo.WithOverride)
|
||||
return base, err
|
||||
}
|
||||
|
||||
@ -711,7 +711,7 @@ func TestLoadMultipleUlimits(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadMultipleNetworks(t *testing.T) {
|
||||
func TestLoadMultipleServiceNetworks(t *testing.T) {
|
||||
networkCases := []struct {
|
||||
name string
|
||||
networkBase map[string]interface{}
|
||||
@ -943,3 +943,74 @@ func TestLoadMultipleConfigs(t *testing.T) {
|
||||
Configs: map[string]types.ConfigObjConfig{},
|
||||
}, config)
|
||||
}
|
||||
|
||||
// Issue#972
|
||||
func TestLoadMultipleNetworks(t *testing.T) {
|
||||
base := map[string]interface{}{
|
||||
"version": "3.4",
|
||||
"services": map[string]interface{}{
|
||||
"foo": map[string]interface{}{
|
||||
"image": "baz",
|
||||
},
|
||||
},
|
||||
"volumes": map[string]interface{}{},
|
||||
"networks": map[string]interface{}{
|
||||
"hostnet": map[string]interface{}{
|
||||
"driver": "overlay",
|
||||
"ipam": map[string]interface{}{
|
||||
"driver": "default",
|
||||
"config": []interface{}{
|
||||
map[string]interface{}{
|
||||
"subnet": "10.0.0.0/20",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"secrets": map[string]interface{}{},
|
||||
"configs": map[string]interface{}{},
|
||||
}
|
||||
override := map[string]interface{}{
|
||||
"version": "3.4",
|
||||
"services": map[string]interface{}{},
|
||||
"volumes": map[string]interface{}{},
|
||||
"networks": map[string]interface{}{
|
||||
"hostnet": map[string]interface{}{
|
||||
"external": map[string]interface{}{
|
||||
"name": "host",
|
||||
},
|
||||
},
|
||||
},
|
||||
"secrets": map[string]interface{}{},
|
||||
"configs": map[string]interface{}{},
|
||||
}
|
||||
configDetails := types.ConfigDetails{
|
||||
ConfigFiles: []types.ConfigFile{
|
||||
{Filename: "base.yml", Config: base},
|
||||
{Filename: "override.yml", Config: override},
|
||||
},
|
||||
}
|
||||
config, err := Load(configDetails)
|
||||
assert.NilError(t, err)
|
||||
assert.DeepEqual(t, &types.Config{
|
||||
Filename: "base.yml",
|
||||
Version: "3.4",
|
||||
Services: []types.ServiceConfig{
|
||||
{
|
||||
Name: "foo",
|
||||
Image: "baz",
|
||||
Environment: types.MappingWithEquals{},
|
||||
}},
|
||||
Networks: map[string]types.NetworkConfig{
|
||||
"hostnet": {
|
||||
Name: "host",
|
||||
External: types.External{
|
||||
External: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: map[string]types.VolumeConfig{},
|
||||
Secrets: map[string]types.SecretConfig{},
|
||||
Configs: map[string]types.ConfigObjConfig{},
|
||||
}, config)
|
||||
}
|
||||
|
||||
@ -7,7 +7,7 @@ import (
|
||||
)
|
||||
|
||||
var delimiter = "\\$"
|
||||
var substitution = "[_a-z][_a-z0-9]*(?::?[-?][^}]*)?"
|
||||
var substitution = "[_a-z][_a-z0-9]*(?::?-[^}]+)?"
|
||||
|
||||
var patternString = fmt.Sprintf(
|
||||
"%s(?i:(?P<escaped>%s)|(?P<named>%s)|{(?P<braced>%s)}|(?P<invalid>))",
|
||||
@ -37,78 +37,57 @@ func Substitute(template string, mapping Mapping) (string, error) {
|
||||
var err error
|
||||
result := pattern.ReplaceAllStringFunc(template, func(substring string) string {
|
||||
matches := pattern.FindStringSubmatch(substring)
|
||||
groups := matchGroups(matches)
|
||||
if escaped := groups["escaped"]; escaped != "" {
|
||||
return escaped
|
||||
groups := make(map[string]string)
|
||||
for i, name := range pattern.SubexpNames() {
|
||||
if i != 0 {
|
||||
groups[name] = matches[i]
|
||||
}
|
||||
}
|
||||
|
||||
substitution := groups["named"]
|
||||
if substitution == "" {
|
||||
substitution = groups["braced"]
|
||||
}
|
||||
|
||||
switch {
|
||||
|
||||
case substitution == "":
|
||||
err = &InvalidTemplateError{Template: template}
|
||||
return ""
|
||||
|
||||
// Soft default (fall back if unset or empty)
|
||||
case strings.Contains(substitution, ":-"):
|
||||
name, defaultValue := partition(substitution, ":-")
|
||||
value, ok := mapping(name)
|
||||
if !ok || value == "" {
|
||||
return defaultValue
|
||||
}
|
||||
return value
|
||||
|
||||
// Hard default (fall back if-and-only-if empty)
|
||||
case strings.Contains(substitution, "-"):
|
||||
name, defaultValue := partition(substitution, "-")
|
||||
value, ok := mapping(name)
|
||||
if !ok {
|
||||
return defaultValue
|
||||
}
|
||||
return value
|
||||
|
||||
case strings.Contains(substitution, ":?"):
|
||||
name, errorMessage := partition(substitution, ":?")
|
||||
value, ok := mapping(name)
|
||||
if !ok || value == "" {
|
||||
err = &InvalidTemplateError{
|
||||
Template: fmt.Sprintf("required variable %s is missing a value: %s", name, errorMessage),
|
||||
if substitution != "" {
|
||||
// Soft default (fall back if unset or empty)
|
||||
if strings.Contains(substitution, ":-") {
|
||||
name, defaultValue := partition(substitution, ":-")
|
||||
value, ok := mapping(name)
|
||||
if !ok || value == "" {
|
||||
return defaultValue
|
||||
}
|
||||
return ""
|
||||
return value
|
||||
}
|
||||
return value
|
||||
|
||||
case strings.Contains(substitution, "?"):
|
||||
name, errorMessage := partition(substitution, "?")
|
||||
value, ok := mapping(name)
|
||||
if !ok {
|
||||
err = &InvalidTemplateError{
|
||||
Template: fmt.Sprintf("required variable %s is missing a value: %s", name, errorMessage),
|
||||
// Hard default (fall back if-and-only-if empty)
|
||||
if strings.Contains(substitution, "-") {
|
||||
name, defaultValue := partition(substitution, "-")
|
||||
value, ok := mapping(name)
|
||||
if !ok {
|
||||
return defaultValue
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// No default (fall back to empty string)
|
||||
value, ok := mapping(substitution)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
value, _ := mapping(substitution)
|
||||
return value
|
||||
if escaped := groups["escaped"]; escaped != "" {
|
||||
return escaped
|
||||
}
|
||||
|
||||
err = &InvalidTemplateError{Template: template}
|
||||
return ""
|
||||
})
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
func matchGroups(matches []string) map[string]string {
|
||||
groups := make(map[string]string)
|
||||
for i, name := range pattern.SubexpNames()[1:] {
|
||||
groups[name] = matches[i+1]
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// Split the string at the first occurrence of sep, and return the part before the separator,
|
||||
// and the part after the separator.
|
||||
//
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package template
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
@ -24,12 +23,6 @@ func TestEscaped(t *testing.T) {
|
||||
assert.Check(t, is.Equal("${foo}", result))
|
||||
}
|
||||
|
||||
func TestSubstituteNoMatch(t *testing.T) {
|
||||
result, err := Substitute("foo", defaultMapping)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, "foo", result)
|
||||
}
|
||||
|
||||
func TestInvalid(t *testing.T) {
|
||||
invalidTemplates := []string{
|
||||
"${",
|
||||
@ -88,63 +81,3 @@ func TestNonAlphanumericDefault(t *testing.T) {
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal("ok /non:-alphanumeric", result))
|
||||
}
|
||||
|
||||
func TestMandatoryVariableErrors(t *testing.T) {
|
||||
testCases := []struct {
|
||||
template string
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
template: "not ok ${UNSET_VAR:?Mandatory Variable Unset}",
|
||||
expectedError: "required variable UNSET_VAR is missing a value: Mandatory Variable Unset",
|
||||
},
|
||||
{
|
||||
template: "not ok ${BAR:?Mandatory Variable Empty}",
|
||||
expectedError: "required variable BAR is missing a value: Mandatory Variable Empty",
|
||||
},
|
||||
{
|
||||
template: "not ok ${UNSET_VAR:?}",
|
||||
expectedError: "required variable UNSET_VAR is missing a value",
|
||||
},
|
||||
{
|
||||
template: "not ok ${UNSET_VAR?Mandatory Variable Unset}",
|
||||
expectedError: "required variable UNSET_VAR is missing a value: Mandatory Variable Unset",
|
||||
},
|
||||
{
|
||||
template: "not ok ${UNSET_VAR?}",
|
||||
expectedError: "required variable UNSET_VAR is missing a value",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
_, err := Substitute(tc.template, defaultMapping)
|
||||
assert.ErrorContains(t, err, tc.expectedError)
|
||||
assert.ErrorType(t, err, reflect.TypeOf(&InvalidTemplateError{}))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultsForMandatoryVariables(t *testing.T) {
|
||||
testCases := []struct {
|
||||
template string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
template: "ok ${FOO:?err}",
|
||||
expected: "ok first",
|
||||
},
|
||||
{
|
||||
template: "ok ${FOO?err}",
|
||||
expected: "ok first",
|
||||
},
|
||||
{
|
||||
template: "ok ${BAR?err}",
|
||||
expected: "ok ",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
result, err := Substitute(tc.template, defaultMapping)
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, is.Equal(tc.expected, result))
|
||||
}
|
||||
}
|
||||
|
||||
@ -307,8 +307,3 @@ func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig,
|
||||
}
|
||||
return auths, nil
|
||||
}
|
||||
|
||||
// GetFilename returns the file name that this config file is based on.
|
||||
func (configFile *ConfigFile) GetFilename() string {
|
||||
return configFile.Filename
|
||||
}
|
||||
|
||||
@ -8,7 +8,6 @@ import (
|
||||
type store interface {
|
||||
Save() error
|
||||
GetAuthConfigs() map[string]types.AuthConfig
|
||||
GetFilename() string
|
||||
}
|
||||
|
||||
// fileStore implements a credentials store using
|
||||
@ -54,11 +53,3 @@ func (c *fileStore) Store(authConfig types.AuthConfig) error {
|
||||
c.file.GetAuthConfigs()[authConfig.ServerAddress] = authConfig
|
||||
return c.file.Save()
|
||||
}
|
||||
|
||||
func (c *fileStore) GetFilename() string {
|
||||
return c.file.GetFilename()
|
||||
}
|
||||
|
||||
func (c *fileStore) IsFileStore() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
@ -20,10 +20,6 @@ func (f *fakeStore) GetAuthConfigs() map[string]types.AuthConfig {
|
||||
return f.configs
|
||||
}
|
||||
|
||||
func (f *fakeStore) GetFilename() string {
|
||||
return "/tmp/docker-fakestore"
|
||||
}
|
||||
|
||||
func newStore(auths map[string]types.AuthConfig) store {
|
||||
return &fakeStore{configs: auths}
|
||||
}
|
||||
|
||||
@ -41,8 +41,6 @@ var (
|
||||
ActionsPullOnly = []string{"pull"}
|
||||
// ActionsPushAndPull defines the actions for read-write interactions with a Notary Repository
|
||||
ActionsPushAndPull = []string{"pull", "push"}
|
||||
// NotaryServer is the endpoint serving the Notary trust server
|
||||
NotaryServer = "https://notary.docker.io"
|
||||
)
|
||||
|
||||
// GetTrustDirectory returns the base trust directory name
|
||||
@ -73,7 +71,7 @@ func Server(index *registrytypes.IndexInfo) (string, error) {
|
||||
return s, nil
|
||||
}
|
||||
if index.Official {
|
||||
return NotaryServer, nil
|
||||
return registry.NotaryServer, nil
|
||||
}
|
||||
return "https://" + index.Name, nil
|
||||
}
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package cli
|
||||
|
||||
// Default build-time variable.
|
||||
// These values are overridden via ldflags
|
||||
// These values are overriding via ldflags
|
||||
var (
|
||||
PlatformName = ""
|
||||
Version = "unknown-version"
|
||||
|
||||
@ -1220,14 +1220,18 @@ _docker_config_create() {
|
||||
--label|-l)
|
||||
return
|
||||
;;
|
||||
--template-driver)
|
||||
COMPREPLY=( $( compgen -W "golang" -- "$cur" ) )
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--help --label -l" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--help --label -l --template-driver" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag '--label|-l')
|
||||
local counter=$(__docker_pos_first_nonflag '--label|-l|--template-driver')
|
||||
if [ "$cword" -eq "$((counter + 1))" ]; then
|
||||
_filedir
|
||||
fi
|
||||
@ -4238,14 +4242,18 @@ _docker_secret_create() {
|
||||
--driver|-d|--label|-l)
|
||||
return
|
||||
;;
|
||||
--template-driver)
|
||||
COMPREPLY=( $( compgen -W "golang" -- "$cur" ) )
|
||||
return
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "--driver -d --help --label -l" -- "$cur" ) )
|
||||
COMPREPLY=( $( compgen -W "--driver -d --help --label -l --template-driver" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
local counter=$(__docker_pos_first_nonflag '--driver|-d|--label|-l')
|
||||
local counter=$(__docker_pos_first_nonflag '--driver|-d|--label|-l|--template-driver')
|
||||
if [ "$cword" -eq "$((counter + 1))" ]; then
|
||||
_filedir
|
||||
fi
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM golang:1.9.4-alpine3.6
|
||||
FROM golang:1.9.5-alpine3.6
|
||||
|
||||
RUN apk add -U git bash coreutils gcc musl-dev
|
||||
|
||||
|
||||
@ -1,3 +1,3 @@
|
||||
FROM dockercore/golang-cross:1.9.4@sha256:b8d43ef11ccaa15bec63a1f1fd0c28a0e729074aa62fcfa51f0a5888f3571315
|
||||
FROM dockercore/golang-cross:1.9.5@sha256:4d090b8c2e6d369a48254c882a4e653ba90caaa0b758105da772d9110394d958
|
||||
ENV DISABLE_WARN_OUTSIDE_CONTAINER=1
|
||||
WORKDIR /go/src/github.com/docker/cli
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
|
||||
FROM golang:1.9.4-alpine3.6
|
||||
FROM golang:1.9.5-alpine3.6
|
||||
|
||||
RUN apk add -U git make bash coreutils ca-certificates curl
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM golang:1.9.4-alpine3.6
|
||||
FROM golang:1.9.5-alpine3.6
|
||||
|
||||
RUN apk add -U git
|
||||
|
||||
|
||||
@ -218,20 +218,22 @@ plugins. This is specifically useful to collect plugin logs if they are
|
||||
redirected to a file.
|
||||
|
||||
```bash
|
||||
$ docker-runc list
|
||||
ID PID STATUS BUNDLE CREATED
|
||||
f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 2679 running /run/docker/libcontainerd/f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 2017-02-06T21:53:03.031537592Z
|
||||
r
|
||||
$ sudo docker-runc --root /var/run/docker/plugins/runtime-root/moby-plugins list
|
||||
|
||||
ID PID STATUS BUNDLE CREATED OWNER
|
||||
93f1e7dbfe11c938782c2993628c895cf28e2274072c4a346a6002446c949b25 15806 running /run/docker/containerd/daemon/io.containerd.runtime.v1.linux/moby-plugins/93f1e7dbfe11c938782c2993628c895cf28e2274072c4a346a6002446c949b25 2018-02-08T21:40:08.621358213Z root
|
||||
9b4606d84e06b56df84fadf054a21374b247941c94ce405b0a261499d689d9c9 14992 running /run/docker/containerd/daemon/io.containerd.runtime.v1.linux/moby-plugins/9b4606d84e06b56df84fadf054a21374b247941c94ce405b0a261499d689d9c9 2018-02-08T21:35:12.321325872Z root
|
||||
c5bb4b90941efcaccca999439ed06d6a6affdde7081bb34dc84126b57b3e793d 14984 running /run/docker/containerd/daemon/io.containerd.runtime.v1.linux/moby-plugins/c5bb4b90941efcaccca999439ed06d6a6affdde7081bb34dc84126b57b3e793d 2018-02-08T21:35:12.321288966Z root
|
||||
```
|
||||
|
||||
```bash
|
||||
$ docker-runc exec f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 cat /var/log/plugin.log
|
||||
$ sudo docker-runc --root /var/run/docker/plugins/runtime-root/moby-plugins exec 93f1e7dbfe11c938782c2993628c895cf28e2274072c4a346a6002446c949b25 cat /var/log/plugin.log
|
||||
```
|
||||
|
||||
If the plugin has a built-in shell, then exec into the plugin can be done as
|
||||
follows:
|
||||
```bash
|
||||
$ docker-runc exec -t f52a3df433b9aceee436eaada0752f5797aab1de47e5485f1690a073b860ff62 sh
|
||||
$ sudo docker-runc --root /var/run/docker/plugins/runtime-root/moby-plugins exec -t 93f1e7dbfe11c938782c2993628c895cf28e2274072c4a346a6002446c949b25 sh
|
||||
```
|
||||
|
||||
#### Using curl to debug plugin socket issues.
|
||||
|
||||
@ -411,13 +411,13 @@ files. The `ARG` instruction lets Dockerfile authors define values that users
|
||||
can set at build-time using the `--build-arg` flag:
|
||||
|
||||
```bash
|
||||
$ docker build --build-arg HTTP_PROXY=http://10.20.30.2:1234 .
|
||||
$ docker build --build-arg HTTP_PROXY=http://10.20.30.2:1234 --build-arg FTP_PROXY=http://40.50.60.5:4567 .
|
||||
```
|
||||
|
||||
This flag allows you to pass the build-time variables that are
|
||||
accessed like regular environment variables in the `RUN` instruction of the
|
||||
Dockerfile. Also, these values don't persist in the intermediate or final images
|
||||
like `ENV` values do.
|
||||
like `ENV` values do. You must add `--build-arg` for each build argument.
|
||||
|
||||
Using this flag will not alter the output you see when the `ARG` lines from the
|
||||
Dockerfile are echoed during the build process.
|
||||
@ -425,6 +425,18 @@ Dockerfile are echoed during the build process.
|
||||
For detailed information on using `ARG` and `ENV` instructions, see the
|
||||
[Dockerfile reference](../builder.md).
|
||||
|
||||
You may also use the `--build-arg` flag without a value, in which case the value
|
||||
from the local environment will be propagated into the Docker container being
|
||||
built:
|
||||
|
||||
```bash
|
||||
$ export HTTP_PROXY=http://10.20.30.2:1234
|
||||
$ docker build --build-arg HTTP_PROXY .
|
||||
```
|
||||
|
||||
This is similar to how `docker run -e` works. Refer to the [`docker run` documentation](https://docs.docker.com/engine/reference/commandline/run/#set-environment-variables--e---env---env-file)
|
||||
for more information.
|
||||
|
||||
### Optional security options (--security-opt)
|
||||
|
||||
This flag is only supported on a daemon running on Windows, and only supports
|
||||
@ -504,7 +516,7 @@ section in the userguide for more information.
|
||||
|
||||
The `--squash` option has a number of known limitations:
|
||||
|
||||
- When squashing layers, the resulting image cannot take advantage of layer
|
||||
- When squashing layers, the resulting image cannot take advantage of layer
|
||||
sharing with other images, and may use significantly more space. Sharing the
|
||||
base image is still supported.
|
||||
- When using this option you may see significantly more space used due to
|
||||
@ -581,7 +593,7 @@ $ docker build --squash -t test .
|
||||
If everything is right, the history will look like this:
|
||||
|
||||
```bash
|
||||
$ docker history test
|
||||
$ docker history test
|
||||
|
||||
IMAGE CREATED CREATED BY SIZE COMMENT
|
||||
4e10cb5b4cac 3 seconds ago 12 B merge sha256:88a7b0112a41826885df0e7072698006ee8f621c6ab99fca7fe9151d7b599702 to sha256:47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb
|
||||
|
||||
@ -77,17 +77,11 @@ output the data exactly as the template declares or, when using the
|
||||
`table` directive, will include column headers as well.
|
||||
|
||||
The following example uses a template without headers and outputs the
|
||||
`ID` and `CreatedSince` entries separated by a colon for all images:
|
||||
`ID` and `CreatedSince` entries separated by a colon for the `busybox` image:
|
||||
|
||||
```bash
|
||||
$ docker images --format "{{.ID}}: {{.Created}} ago"
|
||||
$ docker history --format "{{.ID}}: {{.CreatedAt}}" busybox
|
||||
|
||||
cc1b61406712: 2 weeks ago
|
||||
<missing>: 2 weeks ago
|
||||
<missing>: 2 weeks ago
|
||||
<missing>: 2 weeks ago
|
||||
<missing>: 2 weeks ago
|
||||
<missing>: 3 weeks ago
|
||||
<missing>: 3 weeks ago
|
||||
<missing>: 3 weeks ago
|
||||
f6e427c148a7: 4 weeks ago
|
||||
<missing>: 4 weeks ago
|
||||
```
|
||||
|
||||
@ -35,7 +35,7 @@ bzip2, or xz) from a file or STDIN. It restores both images and tags.
|
||||
## Examples
|
||||
|
||||
```bash
|
||||
$ docker docker image ls
|
||||
$ docker image ls
|
||||
|
||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
||||
|
||||
|
||||
@ -65,7 +65,7 @@ Options:
|
||||
### manifest create
|
||||
|
||||
```bash
|
||||
Usage: docker manifest create MANFEST_LIST MANIFEST [MANIFEST...]
|
||||
Usage: docker manifest create MANIFEST_LIST MANIFEST [MANIFEST...]
|
||||
|
||||
Create a local manifest list for annotating and pushing to a registry
|
||||
|
||||
|
||||
@ -27,7 +27,7 @@ Options:
|
||||
|
||||
## Description
|
||||
|
||||
Creates a secret using standard input or from a file for the secret content. You must run this command on a manager node.
|
||||
Creates a secret using standard input or from a file for the secret content. You must run this command on a manager node.
|
||||
|
||||
For detailed information about using secrets, refer to [manage sensitive data with Docker secrets](https://docs.docker.com/engine/swarm/secrets/).
|
||||
|
||||
@ -36,7 +36,7 @@ For detailed information about using secrets, refer to [manage sensitive data wi
|
||||
### Create a secret
|
||||
|
||||
```bash
|
||||
$ echo <secret> | docker secret create my_secret -
|
||||
$ printf <secret> | docker secret create my_secret -
|
||||
|
||||
onakdyv307se2tl7nl20anokv
|
||||
|
||||
|
||||
@ -81,7 +81,7 @@ configuration and environment-specific overrides, you can provide multiple
|
||||
`--compose-file` flags.
|
||||
|
||||
```bash
|
||||
$ docker stack deploy --compose-file docker-compose.yml -f docker-compose.prod.yml vossibility
|
||||
$ docker stack deploy --compose-file docker-compose.yml -c docker-compose.prod.yml vossibility
|
||||
|
||||
Ignoring unsupported options: links
|
||||
|
||||
|
||||
@ -1,26 +0,0 @@
|
||||
package stack
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
"github.com/gotestyourself/gotestyourself/golden"
|
||||
"github.com/gotestyourself/gotestyourself/icmd"
|
||||
)
|
||||
|
||||
func TestDeployWithNamedResources(t *testing.T) {
|
||||
stackname := "test-stack-deploy-with-names"
|
||||
composefile := golden.Path("stack-with-named-resources.yml")
|
||||
|
||||
result := icmd.RunCommand(
|
||||
"docker", "stack", "deploy", "-c", composefile, stackname)
|
||||
|
||||
result.Assert(t, icmd.Success)
|
||||
stdout := strings.Split(result.Stdout(), "\n")
|
||||
expected := strings.Split(string(golden.Get(t, "stack-deploy-with-nanes.golden")), "\n")
|
||||
sort.Strings(stdout)
|
||||
sort.Strings(expected)
|
||||
assert.DeepEqual(t, stdout, expected)
|
||||
}
|
||||
1
components/cli/e2e/stack/testdata/data
vendored
1
components/cli/e2e/stack/testdata/data
vendored
@ -1 +0,0 @@
|
||||
A file with some text
|
||||
@ -1,7 +0,0 @@
|
||||
Creating network test-stack-deploy-with-names_network2
|
||||
Creating network named-network
|
||||
Creating secret named-secret
|
||||
Creating secret test-stack-deploy-with-names_secret2
|
||||
Creating config test-stack-deploy-with-names_config2
|
||||
Creating config named-config
|
||||
Creating service test-stack-deploy-with-names_web
|
||||
@ -1,30 +0,0 @@
|
||||
version: '3.5'
|
||||
services:
|
||||
web:
|
||||
image: registry:5000/alpine:3.6
|
||||
command: top
|
||||
networks: [network1, network2]
|
||||
volumes: [volume1, volume2]
|
||||
secrets: [secret1, secret2]
|
||||
configs: [config1, config2]
|
||||
|
||||
networks:
|
||||
network1:
|
||||
name: named-network
|
||||
network2:
|
||||
volumes:
|
||||
volume1:
|
||||
name: named-volume
|
||||
volume2:
|
||||
secrets:
|
||||
secret1:
|
||||
name: named-secret
|
||||
file: ./data
|
||||
secret2:
|
||||
file: ./data
|
||||
configs:
|
||||
config1:
|
||||
name: named-config
|
||||
file: ./data
|
||||
config2:
|
||||
file: ./data
|
||||
@ -5,8 +5,8 @@ import (
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
// FakeStore implements a credentials.Store that only acts as an in memory map
|
||||
type FakeStore struct {
|
||||
// fake store implements a credentials.Store that only acts as an in memory map
|
||||
type fakeStore struct {
|
||||
store map[string]types.AuthConfig
|
||||
eraseFunc func(serverAddress string) error
|
||||
getFunc func(serverAddress string) (types.AuthConfig, error)
|
||||
@ -16,36 +16,31 @@ type FakeStore struct {
|
||||
|
||||
// NewFakeStore creates a new file credentials store.
|
||||
func NewFakeStore() credentials.Store {
|
||||
return &FakeStore{store: map[string]types.AuthConfig{}}
|
||||
return &fakeStore{store: map[string]types.AuthConfig{}}
|
||||
}
|
||||
|
||||
// SetStore is used to overrides Set function
|
||||
func (c *FakeStore) SetStore(store map[string]types.AuthConfig) {
|
||||
func (c *fakeStore) SetStore(store map[string]types.AuthConfig) {
|
||||
c.store = store
|
||||
}
|
||||
|
||||
// SetEraseFunc is used to overrides Erase function
|
||||
func (c *FakeStore) SetEraseFunc(eraseFunc func(string) error) {
|
||||
func (c *fakeStore) SetEraseFunc(eraseFunc func(string) error) {
|
||||
c.eraseFunc = eraseFunc
|
||||
}
|
||||
|
||||
// SetGetFunc is used to overrides Get function
|
||||
func (c *FakeStore) SetGetFunc(getFunc func(string) (types.AuthConfig, error)) {
|
||||
func (c *fakeStore) SetGetFunc(getFunc func(string) (types.AuthConfig, error)) {
|
||||
c.getFunc = getFunc
|
||||
}
|
||||
|
||||
// SetGetAllFunc is used to overrides GetAll function
|
||||
func (c *FakeStore) SetGetAllFunc(getAllFunc func() (map[string]types.AuthConfig, error)) {
|
||||
func (c *fakeStore) SetGetAllFunc(getAllFunc func() (map[string]types.AuthConfig, error)) {
|
||||
c.getAllFunc = getAllFunc
|
||||
}
|
||||
|
||||
// SetStoreFunc is used to override Store function
|
||||
func (c *FakeStore) SetStoreFunc(storeFunc func(types.AuthConfig) error) {
|
||||
func (c *fakeStore) SetStoreFunc(storeFunc func(types.AuthConfig) error) {
|
||||
c.storeFunc = storeFunc
|
||||
}
|
||||
|
||||
// Erase removes the given credentials from the map store
|
||||
func (c *FakeStore) Erase(serverAddress string) error {
|
||||
func (c *fakeStore) Erase(serverAddress string) error {
|
||||
if c.eraseFunc != nil {
|
||||
return c.eraseFunc(serverAddress)
|
||||
}
|
||||
@ -54,15 +49,14 @@ func (c *FakeStore) Erase(serverAddress string) error {
|
||||
}
|
||||
|
||||
// Get retrieves credentials for a specific server from the map store.
|
||||
func (c *FakeStore) Get(serverAddress string) (types.AuthConfig, error) {
|
||||
func (c *fakeStore) Get(serverAddress string) (types.AuthConfig, error) {
|
||||
if c.getFunc != nil {
|
||||
return c.getFunc(serverAddress)
|
||||
}
|
||||
return c.store[serverAddress], nil
|
||||
}
|
||||
|
||||
// GetAll returns the key value pairs of ServerAddress => Username
|
||||
func (c *FakeStore) GetAll() (map[string]types.AuthConfig, error) {
|
||||
func (c *fakeStore) GetAll() (map[string]types.AuthConfig, error) {
|
||||
if c.getAllFunc != nil {
|
||||
return c.getAllFunc()
|
||||
}
|
||||
@ -70,7 +64,7 @@ func (c *FakeStore) GetAll() (map[string]types.AuthConfig, error) {
|
||||
}
|
||||
|
||||
// Store saves the given credentials in the map store.
|
||||
func (c *FakeStore) Store(authConfig types.AuthConfig) error {
|
||||
func (c *fakeStore) Store(authConfig types.AuthConfig) error {
|
||||
if c.storeFunc != nil {
|
||||
return c.storeFunc(authConfig)
|
||||
}
|
||||
|
||||
@ -1,50 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
apiv1beta1 "github.com/docker/cli/kubernetes/compose/v1beta1"
|
||||
"github.com/pkg/errors"
|
||||
apimachinerymetav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// StackVersion represents the detected Compose Component on Kubernetes side.
|
||||
type StackVersion string
|
||||
|
||||
const (
|
||||
// StackAPIV1Beta1 is returned if it's the most recent version available.
|
||||
StackAPIV1Beta1 = StackVersion("v1beta1")
|
||||
)
|
||||
|
||||
// GetStackAPIVersion returns the most recent stack API installed.
|
||||
func GetStackAPIVersion(clientSet *kubernetes.Clientset) (StackVersion, error) {
|
||||
groups, err := clientSet.Discovery().ServerGroups()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return getAPIVersion(groups)
|
||||
}
|
||||
|
||||
func getAPIVersion(groups *metav1.APIGroupList) (StackVersion, error) {
|
||||
switch {
|
||||
case findVersion(apiv1beta1.SchemeGroupVersion, groups.Groups):
|
||||
return StackAPIV1Beta1, nil
|
||||
default:
|
||||
return "", errors.Errorf("failed to find a Stack API version")
|
||||
}
|
||||
}
|
||||
|
||||
func findVersion(stackAPI schema.GroupVersion, groups []apimachinerymetav1.APIGroup) bool {
|
||||
for _, group := range groups {
|
||||
if group.Name == stackAPI.Group {
|
||||
for _, version := range group.Versions {
|
||||
if version.Version == stackAPI.Version {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -1,49 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/gotestyourself/gotestyourself/assert"
|
||||
is "github.com/gotestyourself/gotestyourself/assert/cmp"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestGetStackAPIVersion(t *testing.T) {
|
||||
var tests = []struct {
|
||||
description string
|
||||
groups *metav1.APIGroupList
|
||||
err bool
|
||||
expectedStack StackVersion
|
||||
}{
|
||||
{"no stack api", makeGroups(), true, ""},
|
||||
{"v1beta1", makeGroups(groupVersion{"compose.docker.com", []string{"v1beta1"}}), false, StackAPIV1Beta1},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
version, err := getAPIVersion(test.groups)
|
||||
if test.err {
|
||||
assert.ErrorContains(t, err, "")
|
||||
} else {
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
assert.Check(t, is.Equal(test.expectedStack, version))
|
||||
}
|
||||
}
|
||||
|
||||
type groupVersion struct {
|
||||
name string
|
||||
versions []string
|
||||
}
|
||||
|
||||
func makeGroups(versions ...groupVersion) *metav1.APIGroupList {
|
||||
groups := make([]metav1.APIGroup, len(versions))
|
||||
for i := range versions {
|
||||
groups[i].Name = versions[i].name
|
||||
for _, v := range versions[i].versions {
|
||||
groups[i].Versions = append(groups[i].Versions, metav1.GroupVersionForDiscovery{Version: v})
|
||||
}
|
||||
}
|
||||
return &metav1.APIGroupList{
|
||||
Groups: groups,
|
||||
}
|
||||
}
|
||||
@ -1,24 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// NewKubernetesConfig resolves the path to the desired Kubernetes configuration file, depending
|
||||
// environment variable and command line flag.
|
||||
func NewKubernetesConfig(configFlag string) (*restclient.Config, error) {
|
||||
kubeConfig := configFlag
|
||||
if kubeConfig == "" {
|
||||
if config := os.Getenv("KUBECONFIG"); config != "" {
|
||||
kubeConfig = config
|
||||
} else {
|
||||
kubeConfig = filepath.Join(homedir.Get(), ".kube/config")
|
||||
}
|
||||
}
|
||||
return clientcmd.BuildConfigFromFlags("", kubeConfig)
|
||||
}
|
||||
@ -5,7 +5,7 @@ github.com/coreos/etcd v3.2.1
|
||||
github.com/cpuguy83/go-md2man v1.0.8
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
|
||||
github.com/docker/docker ed7b6428c133e7c59404251a09b7d6b02fa83cc2
|
||||
github.com/docker/docker 0ede01237c9ab871f1b8db0364427407f3e46541
|
||||
github.com/docker/docker-credential-helpers 3c90bd29a46b943b2a9842987b58fb91a7c1819b
|
||||
# the docker/go package contains a customized version of canonical/json
|
||||
# and is used by Notary. The package is periodically rebased on current Go versions.
|
||||
@ -13,7 +13,7 @@ github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06
|
||||
github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
|
||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||
github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
|
||||
github.com/docker/swarmkit 49a9d7f6ba3c1925262641e694c18eb43575f74b
|
||||
github.com/docker/swarmkit 11d7b06f48bc1d73fc6d8776c3552a4b11c94301
|
||||
github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46
|
||||
github.com/emicklei/go-restful-swagger12 dcef7f55730566d41eae5db10e7d6981829720f6
|
||||
github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff
|
||||
@ -37,20 +37,20 @@ github.com/go-openapi/swag 1d0bd113de87027671077d3c71eb3ac5d7dbba72
|
||||
github.com/gregjones/httpcache c1f8028e62adb3d518b823a2f8e6a95c38bdd3aa
|
||||
github.com/grpc-ecosystem/grpc-gateway 1a03ca3bad1e1ebadaedd3abb76bc58d4ac8143b
|
||||
github.com/howeyc/gopass 3ca23474a7c7203e0a0a070fd33508f6efdb9b3d
|
||||
github.com/imdario/mergo ea74e0177b4df59af68c076af5008b427d00d40f
|
||||
github.com/imdario/mergo 9d5f1277e9a8ed20c3684bda8fde67c05628518c # v0.3.4
|
||||
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
github.com/juju/ratelimit 5b9ff866471762aa2ab2dced63c9fb6f53921342
|
||||
github.com/json-iterator/go 6240e1e7983a85228f7fd9c3e1b6932d46ec58e2
|
||||
github.com/mailru/easyjson d5b7844b561a7bc640052f1b935f7b800330d7e0
|
||||
github.com/mattn/go-shellwords v1.0.3
|
||||
github.com/Microsoft/go-winio v0.4.6
|
||||
github.com/miekg/pkcs11 5f6e0d0dad6f472df908c8e968a98ef00c9224bb
|
||||
github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f
|
||||
github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715
|
||||
github.com/moby/buildkit aaff9d591ef128560018433fe61beb802e149de8
|
||||
github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty
|
||||
github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/opencontainers/runc 4fc53a81fb7c994640722ac585fa9ca548971871
|
||||
github.com/opencontainers/runc 6c55f98695e902427906eed2c799e566e3d3dfb5
|
||||
github.com/peterbourgon/diskv 5f041e8faa004a95c88a202771f4cc3e991971e6
|
||||
github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
|
||||
github.com/pmezard/go-difflib v1.0.0
|
||||
@ -62,14 +62,14 @@ github.com/sirupsen/logrus v1.0.3
|
||||
github.com/spf13/cobra 34ceca591bcf34a17a8b7bad5b3ce5f9c165bee5
|
||||
github.com/spf13/pflag 97afa5e7ca8a08a383cb259e06636b5e2cc7897f
|
||||
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||
github.com/theupdateframework/notary v0.6.0
|
||||
github.com/theupdateframework/notary 05985dc5d1c71ee6c387e9cd276a00b9d424af53
|
||||
github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
|
||||
github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a
|
||||
github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45
|
||||
github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d
|
||||
golang.org/x/crypto 558b6879de74bc843225cde5686419267ff707ca
|
||||
golang.org/x/net a8b9294777976932365dabb6640cf1468d95c70f
|
||||
golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5
|
||||
golang.org/x/sync f52d1811a62927559de87708c
|
||||
golang.org/x/sys 37707fdb30a5b38865cfb95e5aab41707daec7fd
|
||||
golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
|
||||
golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
|
||||
|
||||
29
components/cli/vendor/github.com/docker/docker/client/hijack.go
generated
vendored
29
components/cli/vendor/github.com/docker/docker/client/hijack.go
generated
vendored
@ -188,14 +188,8 @@ func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, e
|
||||
|
||||
c, br := clientconn.Hijack()
|
||||
if br.Buffered() > 0 {
|
||||
// If there is buffered content, wrap the connection. We return an
|
||||
// object that implements CloseWrite iff the underlying connection
|
||||
// implements it.
|
||||
if _, ok := c.(types.CloseWriter); ok {
|
||||
c = &hijackedConnCloseWriter{&hijackedConn{c, br}}
|
||||
} else {
|
||||
c = &hijackedConn{c, br}
|
||||
}
|
||||
// If there is buffered content, wrap the connection
|
||||
c = &hijackedConn{c, br}
|
||||
} else {
|
||||
br.Reset(nil)
|
||||
}
|
||||
@ -203,10 +197,6 @@ func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, e
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case
|
||||
// that a) there was already buffered data in the http layer when Hijack() was
|
||||
// called, and b) the underlying net.Conn does *not* implement CloseWrite().
|
||||
// hijackedConn does not implement CloseWrite() either.
|
||||
type hijackedConn struct {
|
||||
net.Conn
|
||||
r *bufio.Reader
|
||||
@ -215,18 +205,3 @@ type hijackedConn struct {
|
||||
func (c *hijackedConn) Read(b []byte) (int, error) {
|
||||
return c.r.Read(b)
|
||||
}
|
||||
|
||||
// hijackedConnCloseWriter is a hijackedConn which additionally implements
|
||||
// CloseWrite(). It is returned by setupHijackConn in the case that a) there
|
||||
// was already buffered data in the http layer when Hijack() was called, and b)
|
||||
// the underlying net.Conn *does* implement CloseWrite().
|
||||
type hijackedConnCloseWriter struct {
|
||||
*hijackedConn
|
||||
}
|
||||
|
||||
var _ types.CloseWriter = &hijackedConnCloseWriter{}
|
||||
|
||||
func (c *hijackedConnCloseWriter) CloseWrite() error {
|
||||
conn := c.Conn.(types.CloseWriter)
|
||||
return conn.CloseWrite()
|
||||
}
|
||||
|
||||
2
components/cli/vendor/github.com/docker/docker/errdefs/defs.go
generated
vendored
2
components/cli/vendor/github.com/docker/docker/errdefs/defs.go
generated
vendored
@ -35,7 +35,7 @@ type ErrForbidden interface {
|
||||
// ErrSystem signals that some internal error occurred.
|
||||
// An example of this would be a failed mount request.
|
||||
type ErrSystem interface {
|
||||
System()
|
||||
ErrSystem()
|
||||
}
|
||||
|
||||
// ErrNotModified signals that an action can't be performed because it's already in the desired state
|
||||
|
||||
2
components/cli/vendor/github.com/docker/docker/errdefs/is.go
generated
vendored
2
components/cli/vendor/github.com/docker/docker/errdefs/is.go
generated
vendored
@ -21,7 +21,7 @@ func getImplementer(err error) error {
|
||||
ErrDeadline,
|
||||
ErrDataLoss,
|
||||
ErrUnknown:
|
||||
return err
|
||||
return e
|
||||
case causer:
|
||||
return getImplementer(e.Cause())
|
||||
default:
|
||||
|
||||
34
components/cli/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
generated
vendored
34
components/cli/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
generated
vendored
@ -40,17 +40,21 @@ type JSONProgress struct {
|
||||
// If true, don't show xB/yB
|
||||
HideCounts bool `json:"hidecounts,omitempty"`
|
||||
Units string `json:"units,omitempty"`
|
||||
nowFunc func() time.Time
|
||||
winSize int
|
||||
}
|
||||
|
||||
func (p *JSONProgress) String() string {
|
||||
var (
|
||||
width = p.width()
|
||||
width = 200
|
||||
pbBox string
|
||||
numbersBox string
|
||||
timeLeftBox string
|
||||
)
|
||||
|
||||
ws, err := term.GetWinsize(p.terminalFd)
|
||||
if err == nil {
|
||||
width = int(ws.Width)
|
||||
}
|
||||
|
||||
if p.Current <= 0 && p.Total <= 0 {
|
||||
return ""
|
||||
}
|
||||
@ -99,7 +103,7 @@ func (p *JSONProgress) String() string {
|
||||
}
|
||||
|
||||
if p.Current > 0 && p.Start > 0 && percentage < 50 {
|
||||
fromStart := p.now().Sub(time.Unix(p.Start, 0))
|
||||
fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0))
|
||||
perEntry := fromStart / time.Duration(p.Current)
|
||||
left := time.Duration(p.Total-p.Current) * perEntry
|
||||
left = (left / time.Second) * time.Second
|
||||
@ -111,28 +115,6 @@ func (p *JSONProgress) String() string {
|
||||
return pbBox + numbersBox + timeLeftBox
|
||||
}
|
||||
|
||||
// shim for testing
|
||||
func (p *JSONProgress) now() time.Time {
|
||||
if p.nowFunc == nil {
|
||||
p.nowFunc = func() time.Time {
|
||||
return time.Now().UTC()
|
||||
}
|
||||
}
|
||||
return p.nowFunc()
|
||||
}
|
||||
|
||||
// shim for testing
|
||||
func (p *JSONProgress) width() int {
|
||||
if p.winSize != 0 {
|
||||
return p.winSize
|
||||
}
|
||||
ws, err := term.GetWinsize(p.terminalFd)
|
||||
if err == nil {
|
||||
return int(ws.Width)
|
||||
}
|
||||
return 200
|
||||
}
|
||||
|
||||
// JSONMessage defines a message struct. It describes
|
||||
// the created time, where it from, status, ID of the
|
||||
// message. It's used for docker events.
|
||||
|
||||
4
components/cli/vendor/github.com/docker/docker/pkg/mount/mount.go
generated
vendored
4
components/cli/vendor/github.com/docker/docker/pkg/mount/mount.go
generated
vendored
@ -72,9 +72,7 @@ func RecursiveUnmount(target string) error {
|
||||
}
|
||||
|
||||
// Make the deepest mount be first
|
||||
sort.Slice(mounts, func(i, j int) bool {
|
||||
return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint)
|
||||
})
|
||||
sort.Sort(sort.Reverse(byMountpoint(mounts)))
|
||||
|
||||
for i, m := range mounts {
|
||||
if !strings.HasPrefix(m.Mountpoint, target) {
|
||||
|
||||
14
components/cli/vendor/github.com/docker/docker/pkg/mount/mountinfo.go
generated
vendored
14
components/cli/vendor/github.com/docker/docker/pkg/mount/mountinfo.go
generated
vendored
@ -38,3 +38,17 @@ type Info struct {
|
||||
// VfsOpts represents per super block options.
|
||||
VfsOpts string
|
||||
}
|
||||
|
||||
type byMountpoint []*Info
|
||||
|
||||
func (by byMountpoint) Len() int {
|
||||
return len(by)
|
||||
}
|
||||
|
||||
func (by byMountpoint) Less(i, j int) bool {
|
||||
return by[i].Mountpoint < by[j].Mountpoint
|
||||
}
|
||||
|
||||
func (by byMountpoint) Swap(i, j int) {
|
||||
by[i], by[j] = by[j], by[i]
|
||||
}
|
||||
|
||||
5
components/cli/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
generated
vendored
5
components/cli/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
generated
vendored
@ -1,7 +1,6 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -54,10 +53,6 @@ func GetOSVersion() OSVersion {
|
||||
return osv
|
||||
}
|
||||
|
||||
func (osv OSVersion) ToString() string {
|
||||
return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build)
|
||||
}
|
||||
|
||||
// IsWindowsClient returns true if the SKU is client
|
||||
// @engine maintainers - this function should not be removed or modified as it
|
||||
// is used to enforce licensing restrictions on Windows.
|
||||
|
||||
3
components/cli/vendor/github.com/docker/docker/registry/config.go
generated
vendored
3
components/cli/vendor/github.com/docker/docker/registry/config.go
generated
vendored
@ -45,6 +45,9 @@ var (
|
||||
// IndexName is the name of the index
|
||||
IndexName = "docker.io"
|
||||
|
||||
// NotaryServer is the endpoint serving the Notary trust server
|
||||
NotaryServer = "https://notary.docker.io"
|
||||
|
||||
// DefaultV2Registry is the URI of the default v2 registry
|
||||
DefaultV2Registry = &url.URL{
|
||||
Scheme: "https",
|
||||
|
||||
25
components/cli/vendor/github.com/docker/docker/vendor.conf
generated
vendored
25
components/cli/vendor/github.com/docker/docker/vendor.conf
generated
vendored
@ -2,9 +2,9 @@
|
||||
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
||||
github.com/Microsoft/hcsshim v0.6.8
|
||||
github.com/Microsoft/go-winio v0.4.6
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
|
||||
github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git
|
||||
github.com/golang/gddo 9b12a26f3fbd7397dee4e20939ddca719d840d2a
|
||||
github.com/gorilla/context v1.1
|
||||
github.com/gorilla/mux v1.1
|
||||
github.com/Microsoft/opengcs v0.3.6
|
||||
@ -18,21 +18,22 @@ golang.org/x/sys 37707fdb30a5b38865cfb95e5aab41707daec7fd
|
||||
github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
|
||||
github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
|
||||
golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
|
||||
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||
github.com/pmezard/go-difflib v1.0.0
|
||||
github.com/gotestyourself/gotestyourself cf3a5ab914a2efa8bc838d09f5918c1d44d029
|
||||
github.com/google/go-cmp v0.2.0
|
||||
github.com/gotestyourself/gotestyourself 511344eed30e4384f010579a593dfb442033a692
|
||||
github.com/google/go-cmp v0.1.0
|
||||
|
||||
github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5
|
||||
github.com/imdario/mergo 0.2.1
|
||||
golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5
|
||||
golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
|
||||
|
||||
github.com/moby/buildkit aaff9d591ef128560018433fe61beb802e149de8
|
||||
github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
|
||||
|
||||
#get libnetwork packages
|
||||
|
||||
# When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy accordingly
|
||||
github.com/docker/libnetwork 1b91bc94094ecfdae41daa465cc0c8df37dfb3dd
|
||||
# When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/binaries-commits accordingly
|
||||
github.com/docker/libnetwork ed2130d117c11c542327b4d5216a5db36770bc65
|
||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
|
||||
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
|
||||
@ -46,7 +47,7 @@ github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
|
||||
github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
|
||||
github.com/vishvananda/netlink b2de5d10e38ecce8607e6b438b6d174f389a004e
|
||||
|
||||
# When updating, consider updating TOMLV_COMMIT in hack/dockerfile/install/tomlv accordingly
|
||||
# When updating, consider updating TOMLV_COMMIT in hack/dockerfile/binaries-commits accordingly
|
||||
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
|
||||
github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
|
||||
github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
|
||||
@ -61,7 +62,7 @@ github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb
|
||||
# get graph and distribution packages
|
||||
github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
|
||||
github.com/vbatts/tar-split v0.10.2
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1
|
||||
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
|
||||
|
||||
# get go-zfs packages
|
||||
github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
|
||||
@ -69,8 +70,8 @@ github.com/pborman/uuid v1.0
|
||||
|
||||
google.golang.org/grpc v1.3.0
|
||||
|
||||
# When updating, also update RUNC_COMMIT in hack/dockerfile/install/runc accordingly
|
||||
github.com/opencontainers/runc 4fc53a81fb7c994640722ac585fa9ca548971871
|
||||
# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly
|
||||
github.com/opencontainers/runc 6c55f98695e902427906eed2c799e566e3d3dfb5
|
||||
github.com/opencontainers/runtime-spec v1.0.1
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
|
||||
@ -112,14 +113,14 @@ github.com/containerd/containerd 3fa104f843ec92328912e042b767d26825f202aa
|
||||
github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6
|
||||
github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371
|
||||
github.com/containerd/cgroups c0710c92e8b3a44681d1321dcfd1360fc5c6c089
|
||||
github.com/containerd/console 2748ece16665b45a47f884001d5831ec79703880
|
||||
github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e
|
||||
github.com/containerd/go-runc 4f6e87ae043f859a38255247b49c9abc262d002f
|
||||
github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
|
||||
github.com/dmcgowan/go-tar go1.10
|
||||
github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577
|
||||
|
||||
# cluster
|
||||
github.com/docker/swarmkit 49a9d7f6ba3c1925262641e694c18eb43575f74b
|
||||
github.com/docker/swarmkit f74983e7c015a38a81c8642803a78b8322cf7eac
|
||||
github.com/gogo/protobuf v0.4
|
||||
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
|
||||
github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
|
||||
|
||||
89
components/cli/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go
generated
vendored
89
components/cli/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go
generated
vendored
@ -1,89 +0,0 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// Package cmpopts provides common options for the cmp package.
|
||||
package cmpopts
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
func equateAlways(_, _ interface{}) bool { return true }
|
||||
|
||||
// EquateEmpty returns a Comparer option that determines all maps and slices
|
||||
// with a length of zero to be equal, regardless of whether they are nil.
|
||||
//
|
||||
// EquateEmpty can be used in conjunction with SortSlices and SortMaps.
|
||||
func EquateEmpty() cmp.Option {
|
||||
return cmp.FilterValues(isEmpty, cmp.Comparer(equateAlways))
|
||||
}
|
||||
|
||||
func isEmpty(x, y interface{}) bool {
|
||||
vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
|
||||
return (x != nil && y != nil && vx.Type() == vy.Type()) &&
|
||||
(vx.Kind() == reflect.Slice || vx.Kind() == reflect.Map) &&
|
||||
(vx.Len() == 0 && vy.Len() == 0)
|
||||
}
|
||||
|
||||
// EquateApprox returns a Comparer option that determines float32 or float64
|
||||
// values to be equal if they are within a relative fraction or absolute margin.
|
||||
// This option is not used when either x or y is NaN or infinite.
|
||||
//
|
||||
// The fraction determines that the difference of two values must be within the
|
||||
// smaller fraction of the two values, while the margin determines that the two
|
||||
// values must be within some absolute margin.
|
||||
// To express only a fraction or only a margin, use 0 for the other parameter.
|
||||
// The fraction and margin must be non-negative.
|
||||
//
|
||||
// The mathematical expression used is equivalent to:
|
||||
// |x-y| ≤ max(fraction*min(|x|, |y|), margin)
|
||||
//
|
||||
// EquateApprox can be used in conjunction with EquateNaNs.
|
||||
func EquateApprox(fraction, margin float64) cmp.Option {
|
||||
if margin < 0 || fraction < 0 || math.IsNaN(margin) || math.IsNaN(fraction) {
|
||||
panic("margin or fraction must be a non-negative number")
|
||||
}
|
||||
a := approximator{fraction, margin}
|
||||
return cmp.Options{
|
||||
cmp.FilterValues(areRealF64s, cmp.Comparer(a.compareF64)),
|
||||
cmp.FilterValues(areRealF32s, cmp.Comparer(a.compareF32)),
|
||||
}
|
||||
}
|
||||
|
||||
type approximator struct{ frac, marg float64 }
|
||||
|
||||
func areRealF64s(x, y float64) bool {
|
||||
return !math.IsNaN(x) && !math.IsNaN(y) && !math.IsInf(x, 0) && !math.IsInf(y, 0)
|
||||
}
|
||||
func areRealF32s(x, y float32) bool {
|
||||
return areRealF64s(float64(x), float64(y))
|
||||
}
|
||||
func (a approximator) compareF64(x, y float64) bool {
|
||||
relMarg := a.frac * math.Min(math.Abs(x), math.Abs(y))
|
||||
return math.Abs(x-y) <= math.Max(a.marg, relMarg)
|
||||
}
|
||||
func (a approximator) compareF32(x, y float32) bool {
|
||||
return a.compareF64(float64(x), float64(y))
|
||||
}
|
||||
|
||||
// EquateNaNs returns a Comparer option that determines float32 and float64
|
||||
// NaN values to be equal.
|
||||
//
|
||||
// EquateNaNs can be used in conjunction with EquateApprox.
|
||||
func EquateNaNs() cmp.Option {
|
||||
return cmp.Options{
|
||||
cmp.FilterValues(areNaNsF64s, cmp.Comparer(equateAlways)),
|
||||
cmp.FilterValues(areNaNsF32s, cmp.Comparer(equateAlways)),
|
||||
}
|
||||
}
|
||||
|
||||
func areNaNsF64s(x, y float64) bool {
|
||||
return math.IsNaN(x) && math.IsNaN(y)
|
||||
}
|
||||
func areNaNsF32s(x, y float32) bool {
|
||||
return areNaNsF64s(float64(x), float64(y))
|
||||
}
|
||||
145
components/cli/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go
generated
vendored
145
components/cli/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go
generated
vendored
@ -1,145 +0,0 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
package cmpopts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
// IgnoreFields returns an Option that ignores exported fields of the
|
||||
// given names on a single struct type.
|
||||
// The struct type is specified by passing in a value of that type.
|
||||
//
|
||||
// The name may be a dot-delimited string (e.g., "Foo.Bar") to ignore a
|
||||
// specific sub-field that is embedded or nested within the parent struct.
|
||||
//
|
||||
// This does not handle unexported fields; use IgnoreUnexported instead.
|
||||
func IgnoreFields(typ interface{}, names ...string) cmp.Option {
|
||||
sf := newStructFilter(typ, names...)
|
||||
return cmp.FilterPath(sf.filter, cmp.Ignore())
|
||||
}
|
||||
|
||||
// IgnoreTypes returns an Option that ignores all values assignable to
|
||||
// certain types, which are specified by passing in a value of each type.
|
||||
func IgnoreTypes(typs ...interface{}) cmp.Option {
|
||||
tf := newTypeFilter(typs...)
|
||||
return cmp.FilterPath(tf.filter, cmp.Ignore())
|
||||
}
|
||||
|
||||
type typeFilter []reflect.Type
|
||||
|
||||
func newTypeFilter(typs ...interface{}) (tf typeFilter) {
|
||||
for _, typ := range typs {
|
||||
t := reflect.TypeOf(typ)
|
||||
if t == nil {
|
||||
// This occurs if someone tries to pass in sync.Locker(nil)
|
||||
panic("cannot determine type; consider using IgnoreInterfaces")
|
||||
}
|
||||
tf = append(tf, t)
|
||||
}
|
||||
return tf
|
||||
}
|
||||
func (tf typeFilter) filter(p cmp.Path) bool {
|
||||
if len(p) < 1 {
|
||||
return false
|
||||
}
|
||||
t := p.Last().Type()
|
||||
for _, ti := range tf {
|
||||
if t.AssignableTo(ti) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoreInterfaces returns an Option that ignores all values or references of
|
||||
// values assignable to certain interface types. These interfaces are specified
|
||||
// by passing in an anonymous struct with the interface types embedded in it.
|
||||
// For example, to ignore sync.Locker, pass in struct{sync.Locker}{}.
|
||||
func IgnoreInterfaces(ifaces interface{}) cmp.Option {
|
||||
tf := newIfaceFilter(ifaces)
|
||||
return cmp.FilterPath(tf.filter, cmp.Ignore())
|
||||
}
|
||||
|
||||
type ifaceFilter []reflect.Type
|
||||
|
||||
func newIfaceFilter(ifaces interface{}) (tf ifaceFilter) {
|
||||
t := reflect.TypeOf(ifaces)
|
||||
if ifaces == nil || t.Name() != "" || t.Kind() != reflect.Struct {
|
||||
panic("input must be an anonymous struct")
|
||||
}
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
fi := t.Field(i)
|
||||
switch {
|
||||
case !fi.Anonymous:
|
||||
panic("struct cannot have named fields")
|
||||
case fi.Type.Kind() != reflect.Interface:
|
||||
panic("embedded field must be an interface type")
|
||||
case fi.Type.NumMethod() == 0:
|
||||
// This matches everything; why would you ever want this?
|
||||
panic("cannot ignore empty interface")
|
||||
default:
|
||||
tf = append(tf, fi.Type)
|
||||
}
|
||||
}
|
||||
return tf
|
||||
}
|
||||
func (tf ifaceFilter) filter(p cmp.Path) bool {
|
||||
if len(p) < 1 {
|
||||
return false
|
||||
}
|
||||
t := p.Last().Type()
|
||||
for _, ti := range tf {
|
||||
if t.AssignableTo(ti) {
|
||||
return true
|
||||
}
|
||||
if t.Kind() != reflect.Ptr && reflect.PtrTo(t).AssignableTo(ti) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoreUnexported returns an Option that only ignores the immediate unexported
|
||||
// fields of a struct, including anonymous fields of unexported types.
|
||||
// In particular, unexported fields within the struct's exported fields
|
||||
// of struct types, including anonymous fields, will not be ignored unless the
|
||||
// type of the field itself is also passed to IgnoreUnexported.
|
||||
func IgnoreUnexported(typs ...interface{}) cmp.Option {
|
||||
ux := newUnexportedFilter(typs...)
|
||||
return cmp.FilterPath(ux.filter, cmp.Ignore())
|
||||
}
|
||||
|
||||
type unexportedFilter struct{ m map[reflect.Type]bool }
|
||||
|
||||
func newUnexportedFilter(typs ...interface{}) unexportedFilter {
|
||||
ux := unexportedFilter{m: make(map[reflect.Type]bool)}
|
||||
for _, typ := range typs {
|
||||
t := reflect.TypeOf(typ)
|
||||
if t == nil || t.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("invalid struct type: %T", typ))
|
||||
}
|
||||
ux.m[t] = true
|
||||
}
|
||||
return ux
|
||||
}
|
||||
func (xf unexportedFilter) filter(p cmp.Path) bool {
|
||||
sf, ok := p.Index(-1).(cmp.StructField)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return xf.m[p.Index(-2).Type()] && !isExported(sf.Name())
|
||||
}
|
||||
|
||||
// isExported reports whether the identifier is exported.
|
||||
func isExported(id string) bool {
|
||||
r, _ := utf8.DecodeRuneInString(id)
|
||||
return unicode.IsUpper(r)
|
||||
}
|
||||
146
components/cli/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go
generated
vendored
146
components/cli/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go
generated
vendored
@ -1,146 +0,0 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
package cmpopts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/internal/function"
|
||||
)
|
||||
|
||||
// SortSlices returns a Transformer option that sorts all []V.
|
||||
// The less function must be of the form "func(T, T) bool" which is used to
|
||||
// sort any slice with element type V that is assignable to T.
|
||||
//
|
||||
// The less function must be:
|
||||
// • Deterministic: less(x, y) == less(x, y)
|
||||
// • Irreflexive: !less(x, x)
|
||||
// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z)
|
||||
//
|
||||
// The less function does not have to be "total". That is, if !less(x, y) and
|
||||
// !less(y, x) for two elements x and y, their relative order is maintained.
|
||||
//
|
||||
// SortSlices can be used in conjunction with EquateEmpty.
|
||||
func SortSlices(less interface{}) cmp.Option {
|
||||
vf := reflect.ValueOf(less)
|
||||
if !function.IsType(vf.Type(), function.Less) || vf.IsNil() {
|
||||
panic(fmt.Sprintf("invalid less function: %T", less))
|
||||
}
|
||||
ss := sliceSorter{vf.Type().In(0), vf}
|
||||
return cmp.FilterValues(ss.filter, cmp.Transformer("Sort", ss.sort))
|
||||
}
|
||||
|
||||
type sliceSorter struct {
|
||||
in reflect.Type // T
|
||||
fnc reflect.Value // func(T, T) bool
|
||||
}
|
||||
|
||||
func (ss sliceSorter) filter(x, y interface{}) bool {
|
||||
vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
|
||||
if !(x != nil && y != nil && vx.Type() == vy.Type()) ||
|
||||
!(vx.Kind() == reflect.Slice && vx.Type().Elem().AssignableTo(ss.in)) ||
|
||||
(vx.Len() <= 1 && vy.Len() <= 1) {
|
||||
return false
|
||||
}
|
||||
// Check whether the slices are already sorted to avoid an infinite
|
||||
// recursion cycle applying the same transform to itself.
|
||||
ok1 := sliceIsSorted(x, func(i, j int) bool { return ss.less(vx, i, j) })
|
||||
ok2 := sliceIsSorted(y, func(i, j int) bool { return ss.less(vy, i, j) })
|
||||
return !ok1 || !ok2
|
||||
}
|
||||
func (ss sliceSorter) sort(x interface{}) interface{} {
|
||||
src := reflect.ValueOf(x)
|
||||
dst := reflect.MakeSlice(src.Type(), src.Len(), src.Len())
|
||||
for i := 0; i < src.Len(); i++ {
|
||||
dst.Index(i).Set(src.Index(i))
|
||||
}
|
||||
sortSliceStable(dst.Interface(), func(i, j int) bool { return ss.less(dst, i, j) })
|
||||
ss.checkSort(dst)
|
||||
return dst.Interface()
|
||||
}
|
||||
func (ss sliceSorter) checkSort(v reflect.Value) {
|
||||
start := -1 // Start of a sequence of equal elements.
|
||||
for i := 1; i < v.Len(); i++ {
|
||||
if ss.less(v, i-1, i) {
|
||||
// Check that first and last elements in v[start:i] are equal.
|
||||
if start >= 0 && (ss.less(v, start, i-1) || ss.less(v, i-1, start)) {
|
||||
panic(fmt.Sprintf("incomparable values detected: want equal elements: %v", v.Slice(start, i)))
|
||||
}
|
||||
start = -1
|
||||
} else if start == -1 {
|
||||
start = i
|
||||
}
|
||||
}
|
||||
}
|
||||
func (ss sliceSorter) less(v reflect.Value, i, j int) bool {
|
||||
vx, vy := v.Index(i), v.Index(j)
|
||||
return ss.fnc.Call([]reflect.Value{vx, vy})[0].Bool()
|
||||
}
|
||||
|
||||
// SortMaps returns a Transformer option that flattens map[K]V types to be a
|
||||
// sorted []struct{K, V}. The less function must be of the form
|
||||
// "func(T, T) bool" which is used to sort any map with key K that is
|
||||
// assignable to T.
|
||||
//
|
||||
// Flattening the map into a slice has the property that cmp.Equal is able to
|
||||
// use Comparers on K or the K.Equal method if it exists.
|
||||
//
|
||||
// The less function must be:
|
||||
// • Deterministic: less(x, y) == less(x, y)
|
||||
// • Irreflexive: !less(x, x)
|
||||
// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z)
|
||||
// • Total: if x != y, then either less(x, y) or less(y, x)
|
||||
//
|
||||
// SortMaps can be used in conjunction with EquateEmpty.
|
||||
func SortMaps(less interface{}) cmp.Option {
|
||||
vf := reflect.ValueOf(less)
|
||||
if !function.IsType(vf.Type(), function.Less) || vf.IsNil() {
|
||||
panic(fmt.Sprintf("invalid less function: %T", less))
|
||||
}
|
||||
ms := mapSorter{vf.Type().In(0), vf}
|
||||
return cmp.FilterValues(ms.filter, cmp.Transformer("Sort", ms.sort))
|
||||
}
|
||||
|
||||
type mapSorter struct {
|
||||
in reflect.Type // T
|
||||
fnc reflect.Value // func(T, T) bool
|
||||
}
|
||||
|
||||
func (ms mapSorter) filter(x, y interface{}) bool {
|
||||
vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
|
||||
return (x != nil && y != nil && vx.Type() == vy.Type()) &&
|
||||
(vx.Kind() == reflect.Map && vx.Type().Key().AssignableTo(ms.in)) &&
|
||||
(vx.Len() != 0 || vy.Len() != 0)
|
||||
}
|
||||
func (ms mapSorter) sort(x interface{}) interface{} {
|
||||
src := reflect.ValueOf(x)
|
||||
outType := mapEntryType(src.Type())
|
||||
dst := reflect.MakeSlice(reflect.SliceOf(outType), src.Len(), src.Len())
|
||||
for i, k := range src.MapKeys() {
|
||||
v := reflect.New(outType).Elem()
|
||||
v.Field(0).Set(k)
|
||||
v.Field(1).Set(src.MapIndex(k))
|
||||
dst.Index(i).Set(v)
|
||||
}
|
||||
sortSlice(dst.Interface(), func(i, j int) bool { return ms.less(dst, i, j) })
|
||||
ms.checkSort(dst)
|
||||
return dst.Interface()
|
||||
}
|
||||
func (ms mapSorter) checkSort(v reflect.Value) {
|
||||
for i := 1; i < v.Len(); i++ {
|
||||
if !ms.less(v, i-1, i) {
|
||||
panic(fmt.Sprintf("partial order detected: want %v < %v", v.Index(i-1), v.Index(i)))
|
||||
}
|
||||
}
|
||||
}
|
||||
func (ms mapSorter) less(v reflect.Value, i, j int) bool {
|
||||
vx, vy := v.Index(i).Field(0), v.Index(j).Field(0)
|
||||
if !hasReflectStructOf {
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
}
|
||||
return ms.fnc.Call([]reflect.Value{vx, vy})[0].Bool()
|
||||
}
|
||||
46
components/cli/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go17.go
generated
vendored
46
components/cli/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go17.go
generated
vendored
@ -1,46 +0,0 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package cmpopts
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
const hasReflectStructOf = false
|
||||
|
||||
func mapEntryType(reflect.Type) reflect.Type {
|
||||
return reflect.TypeOf(struct{ K, V interface{} }{})
|
||||
}
|
||||
|
||||
func sliceIsSorted(slice interface{}, less func(i, j int) bool) bool {
|
||||
return sort.IsSorted(reflectSliceSorter{reflect.ValueOf(slice), less})
|
||||
}
|
||||
func sortSlice(slice interface{}, less func(i, j int) bool) {
|
||||
sort.Sort(reflectSliceSorter{reflect.ValueOf(slice), less})
|
||||
}
|
||||
func sortSliceStable(slice interface{}, less func(i, j int) bool) {
|
||||
sort.Stable(reflectSliceSorter{reflect.ValueOf(slice), less})
|
||||
}
|
||||
|
||||
type reflectSliceSorter struct {
|
||||
slice reflect.Value
|
||||
less func(i, j int) bool
|
||||
}
|
||||
|
||||
func (ss reflectSliceSorter) Len() int {
|
||||
return ss.slice.Len()
|
||||
}
|
||||
func (ss reflectSliceSorter) Less(i, j int) bool {
|
||||
return ss.less(i, j)
|
||||
}
|
||||
func (ss reflectSliceSorter) Swap(i, j int) {
|
||||
vi := ss.slice.Index(i).Interface()
|
||||
vj := ss.slice.Index(j).Interface()
|
||||
ss.slice.Index(i).Set(reflect.ValueOf(vj))
|
||||
ss.slice.Index(j).Set(reflect.ValueOf(vi))
|
||||
}
|
||||
31
components/cli/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go18.go
generated
vendored
31
components/cli/vendor/github.com/google/go-cmp/cmp/cmpopts/sort_go18.go
generated
vendored
@ -1,31 +0,0 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package cmpopts
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
const hasReflectStructOf = true
|
||||
|
||||
func mapEntryType(t reflect.Type) reflect.Type {
|
||||
return reflect.StructOf([]reflect.StructField{
|
||||
{Name: "K", Type: t.Key()},
|
||||
{Name: "V", Type: t.Elem()},
|
||||
})
|
||||
}
|
||||
|
||||
func sliceIsSorted(slice interface{}, less func(i, j int) bool) bool {
|
||||
return sort.SliceIsSorted(slice, less)
|
||||
}
|
||||
func sortSlice(slice interface{}, less func(i, j int) bool) {
|
||||
sort.Slice(slice, less)
|
||||
}
|
||||
func sortSliceStable(slice interface{}, less func(i, j int) bool) {
|
||||
sort.SliceStable(slice, less)
|
||||
}
|
||||
182
components/cli/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go
generated
vendored
182
components/cli/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go
generated
vendored
@ -1,182 +0,0 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
package cmpopts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
// filterField returns a new Option where opt is only evaluated on paths that
|
||||
// include a specific exported field on a single struct type.
|
||||
// The struct type is specified by passing in a value of that type.
|
||||
//
|
||||
// The name may be a dot-delimited string (e.g., "Foo.Bar") to select a
|
||||
// specific sub-field that is embedded or nested within the parent struct.
|
||||
func filterField(typ interface{}, name string, opt cmp.Option) cmp.Option {
|
||||
// TODO: This is currently unexported over concerns of how helper filters
|
||||
// can be composed together easily.
|
||||
// TODO: Add tests for FilterField.
|
||||
|
||||
sf := newStructFilter(typ, name)
|
||||
return cmp.FilterPath(sf.filter, opt)
|
||||
}
|
||||
|
||||
type structFilter struct {
|
||||
t reflect.Type // The root struct type to match on
|
||||
ft fieldTree // Tree of fields to match on
|
||||
}
|
||||
|
||||
func newStructFilter(typ interface{}, names ...string) structFilter {
|
||||
// TODO: Perhaps allow * as a special identifier to allow ignoring any
|
||||
// number of path steps until the next field match?
|
||||
// This could be useful when a concrete struct gets transformed into
|
||||
// an anonymous struct where it is not possible to specify that by type,
|
||||
// but the transformer happens to provide guarantees about the names of
|
||||
// the transformed fields.
|
||||
|
||||
t := reflect.TypeOf(typ)
|
||||
if t == nil || t.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%T must be a struct", typ))
|
||||
}
|
||||
var ft fieldTree
|
||||
for _, name := range names {
|
||||
cname, err := canonicalName(t, name)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("%s: %v", strings.Join(cname, "."), err))
|
||||
}
|
||||
ft.insert(cname)
|
||||
}
|
||||
return structFilter{t, ft}
|
||||
}
|
||||
|
||||
func (sf structFilter) filter(p cmp.Path) bool {
|
||||
for i, ps := range p {
|
||||
if ps.Type().AssignableTo(sf.t) && sf.ft.matchPrefix(p[i+1:]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// fieldTree represents a set of dot-separated identifiers.
|
||||
//
|
||||
// For example, inserting the following selectors:
|
||||
// Foo
|
||||
// Foo.Bar.Baz
|
||||
// Foo.Buzz
|
||||
// Nuka.Cola.Quantum
|
||||
//
|
||||
// Results in a tree of the form:
|
||||
// {sub: {
|
||||
// "Foo": {ok: true, sub: {
|
||||
// "Bar": {sub: {
|
||||
// "Baz": {ok: true},
|
||||
// }},
|
||||
// "Buzz": {ok: true},
|
||||
// }},
|
||||
// "Nuka": {sub: {
|
||||
// "Cola": {sub: {
|
||||
// "Quantum": {ok: true},
|
||||
// }},
|
||||
// }},
|
||||
// }}
|
||||
type fieldTree struct {
|
||||
ok bool // Whether this is a specified node
|
||||
sub map[string]fieldTree // The sub-tree of fields under this node
|
||||
}
|
||||
|
||||
// insert inserts a sequence of field accesses into the tree.
|
||||
func (ft *fieldTree) insert(cname []string) {
|
||||
if ft.sub == nil {
|
||||
ft.sub = make(map[string]fieldTree)
|
||||
}
|
||||
if len(cname) == 0 {
|
||||
ft.ok = true
|
||||
return
|
||||
}
|
||||
sub := ft.sub[cname[0]]
|
||||
sub.insert(cname[1:])
|
||||
ft.sub[cname[0]] = sub
|
||||
}
|
||||
|
||||
// matchPrefix reports whether any selector in the fieldTree matches
|
||||
// the start of path p.
|
||||
func (ft fieldTree) matchPrefix(p cmp.Path) bool {
|
||||
for _, ps := range p {
|
||||
switch ps := ps.(type) {
|
||||
case cmp.StructField:
|
||||
ft = ft.sub[ps.Name()]
|
||||
if ft.ok {
|
||||
return true
|
||||
}
|
||||
if len(ft.sub) == 0 {
|
||||
return false
|
||||
}
|
||||
case cmp.Indirect:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// canonicalName returns a list of identifiers where any struct field access
|
||||
// through an embedded field is expanded to include the names of the embedded
|
||||
// types themselves.
|
||||
//
|
||||
// For example, suppose field "Foo" is not directly in the parent struct,
|
||||
// but actually from an embedded struct of type "Bar". Then, the canonical name
|
||||
// of "Foo" is actually "Bar.Foo".
|
||||
//
|
||||
// Suppose field "Foo" is not directly in the parent struct, but actually
|
||||
// a field in two different embedded structs of types "Bar" and "Baz".
|
||||
// Then the selector "Foo" causes a panic since it is ambiguous which one it
|
||||
// refers to. The user must specify either "Bar.Foo" or "Baz.Foo".
|
||||
func canonicalName(t reflect.Type, sel string) ([]string, error) {
|
||||
var name string
|
||||
sel = strings.TrimPrefix(sel, ".")
|
||||
if sel == "" {
|
||||
return nil, fmt.Errorf("name must not be empty")
|
||||
}
|
||||
if i := strings.IndexByte(sel, '.'); i < 0 {
|
||||
name, sel = sel, ""
|
||||
} else {
|
||||
name, sel = sel[:i], sel[i:]
|
||||
}
|
||||
|
||||
// Type must be a struct or pointer to struct.
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
if t.Kind() != reflect.Struct {
|
||||
return nil, fmt.Errorf("%v must be a struct", t)
|
||||
}
|
||||
|
||||
// Find the canonical name for this current field name.
|
||||
// If the field exists in an embedded struct, then it will be expanded.
|
||||
if !isExported(name) {
|
||||
// Disallow unexported fields:
|
||||
// * To discourage people from actually touching unexported fields
|
||||
// * FieldByName is buggy (https://golang.org/issue/4876)
|
||||
return []string{name}, fmt.Errorf("name must be exported")
|
||||
}
|
||||
sf, ok := t.FieldByName(name)
|
||||
if !ok {
|
||||
return []string{name}, fmt.Errorf("does not exist")
|
||||
}
|
||||
var ss []string
|
||||
for i := range sf.Index {
|
||||
ss = append(ss, t.FieldByIndex(sf.Index[:i+1]).Name)
|
||||
}
|
||||
if sel == "" {
|
||||
return ss, nil
|
||||
}
|
||||
ssPost, err := canonicalName(sf.Type, sel)
|
||||
return append(ss, ssPost...), err
|
||||
}
|
||||
18
components/cli/vendor/github.com/imdario/mergo/README.md
generated
vendored
18
components/cli/vendor/github.com/imdario/mergo/README.md
generated
vendored
@ -8,10 +8,11 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the
|
||||
|
||||
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
|
||||
|
||||
[![Build Status][1]][2]
|
||||
[![GoDoc][3]][4]
|
||||
[![GoCard][5]][6]
|
||||
[![Build Status][1]][2]
|
||||
[![Coverage Status][7]][8]
|
||||
[![Sourcegraph][9]][10]
|
||||
|
||||
[1]: https://travis-ci.org/imdario/mergo.png
|
||||
[2]: https://travis-ci.org/imdario/mergo
|
||||
@ -21,18 +22,22 @@ It is ready for production use. [It is used in several projects by Docker, Googl
|
||||
[6]: https://goreportcard.com/report/github.com/imdario/mergo
|
||||
[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
|
||||
[8]: https://coveralls.io/github/imdario/mergo?branch=master
|
||||
[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
|
||||
[10]: https://sourcegraph.com/github.com/imdario/mergo?badge
|
||||
|
||||
### Latest release
|
||||
|
||||
[Release 0.3.2](https://github.com/imdario/mergo/releases/tag/0.3.2) is an important release because it changes `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code.
|
||||
[Release v0.3.4](https://github.com/imdario/mergo/releases/tag/v0.3.4).
|
||||
|
||||
### Important note
|
||||
|
||||
Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code.
|
||||
|
||||
If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0).
|
||||
|
||||
### Donations
|
||||
|
||||
If Mergo is useful to you, consider buying me a coffe, a beer or making a monthly donation so I can keep building great free software. :heart_eyes:
|
||||
If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes:
|
||||
|
||||
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
|
||||
[](https://beerpay.io/imdario/mergo)
|
||||
@ -93,7 +98,7 @@ If Mergo is useful to you, consider buying me a coffe, a beer or making a monthl
|
||||
|
||||
## Usage
|
||||
|
||||
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
|
||||
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
|
||||
|
||||
```go
|
||||
if err := mergo.Merge(&dst, src); err != nil {
|
||||
@ -104,7 +109,7 @@ if err := mergo.Merge(&dst, src); err != nil {
|
||||
Also, you can merge overwriting values using the transformer `WithOverride`.
|
||||
|
||||
```go
|
||||
if err := mergo.Merge(&dst, src, WithOverride); err != nil {
|
||||
if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
@ -164,6 +169,7 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/imdario/mergo"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
@ -195,7 +201,7 @@ type Snapshot struct {
|
||||
func main() {
|
||||
src := Snapshot{time.Now()}
|
||||
dest := Snapshot{}
|
||||
mergo.Merge(&dest, src, WithTransformers(timeTransfomer{}))
|
||||
mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{}))
|
||||
fmt.Println(dest)
|
||||
// Will print
|
||||
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
|
||||
|
||||
14
components/cli/vendor/github.com/imdario/mergo/map.go
generated
vendored
14
components/cli/vendor/github.com/imdario/mergo/map.go
generated
vendored
@ -31,8 +31,8 @@ func isExported(field reflect.StructField) bool {
|
||||
// Traverses recursively both values, assigning src's fields values to dst.
|
||||
// The map argument tracks comparisons that have already been seen, which allows
|
||||
// short circuiting on recursive types.
|
||||
func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *config) (err error) {
|
||||
overwrite := config.overwrite
|
||||
func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
|
||||
overwrite := config.Overwrite
|
||||
if dst.CanAddr() {
|
||||
addr := dst.UnsafeAddr()
|
||||
h := 17 * addr
|
||||
@ -128,23 +128,23 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
|
||||
// doesn't apply if dst is a map.
|
||||
// This is separated method from Merge because it is cleaner and it keeps sane
|
||||
// semantics: merging equal types, mapping different (restricted) types.
|
||||
func Map(dst, src interface{}, opts ...func(*config)) error {
|
||||
func Map(dst, src interface{}, opts ...func(*Config)) error {
|
||||
return _map(dst, src, opts...)
|
||||
}
|
||||
|
||||
// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overriden by
|
||||
// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
|
||||
// non-empty src attribute values.
|
||||
// Deprecated: Use Map(…) with WithOverride
|
||||
func MapWithOverwrite(dst, src interface{}, opts ...func(*config)) error {
|
||||
func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
|
||||
return _map(dst, src, append(opts, WithOverride)...)
|
||||
}
|
||||
|
||||
func _map(dst, src interface{}, opts ...func(*config)) error {
|
||||
func _map(dst, src interface{}, opts ...func(*Config)) error {
|
||||
var (
|
||||
vDst, vSrc reflect.Value
|
||||
err error
|
||||
)
|
||||
config := &config{}
|
||||
config := &Config{}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(config)
|
||||
|
||||
56
components/cli/vendor/github.com/imdario/mergo/merge.go
generated
vendored
56
components/cli/vendor/github.com/imdario/mergo/merge.go
generated
vendored
@ -8,7 +8,9 @@
|
||||
|
||||
package mergo
|
||||
|
||||
import "reflect"
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func hasExportedField(dst reflect.Value) (exported bool) {
|
||||
for i, n := 0, dst.NumField(); i < n; i++ {
|
||||
@ -22,20 +24,21 @@ func hasExportedField(dst reflect.Value) (exported bool) {
|
||||
return
|
||||
}
|
||||
|
||||
type config struct {
|
||||
overwrite bool
|
||||
transformers transformers
|
||||
type Config struct {
|
||||
Overwrite bool
|
||||
AppendSlice bool
|
||||
Transformers Transformers
|
||||
}
|
||||
|
||||
type transformers interface {
|
||||
type Transformers interface {
|
||||
Transformer(reflect.Type) func(dst, src reflect.Value) error
|
||||
}
|
||||
|
||||
// Traverses recursively both values, assigning src's fields values to dst.
|
||||
// The map argument tracks comparisons that have already been seen, which allows
|
||||
// short circuiting on recursive types.
|
||||
func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *config) (err error) {
|
||||
overwrite := config.overwrite
|
||||
func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
|
||||
overwrite := config.Overwrite
|
||||
|
||||
if !src.IsValid() {
|
||||
return
|
||||
@ -54,8 +57,8 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
||||
visited[h] = &visit{addr, typ, seen}
|
||||
}
|
||||
|
||||
if config.transformers != nil && !isEmptyValue(dst) {
|
||||
if fn := config.transformers.Transformer(dst.Type()); fn != nil {
|
||||
if config.Transformers != nil && !isEmptyValue(dst) {
|
||||
if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
|
||||
err = fn(dst, src)
|
||||
return
|
||||
}
|
||||
@ -75,9 +78,8 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
if len(src.MapKeys()) == 0 && !src.IsNil() && len(dst.MapKeys()) == 0 {
|
||||
if dst.IsNil() && !src.IsNil() {
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
return
|
||||
}
|
||||
for _, key := range src.MapKeys() {
|
||||
srcElement := src.MapIndex(key)
|
||||
@ -130,7 +132,14 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
dst.Set(reflect.AppendSlice(dst, src))
|
||||
if !dst.CanSet() {
|
||||
break
|
||||
}
|
||||
if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
|
||||
dst.Set(src)
|
||||
} else {
|
||||
dst.Set(reflect.AppendSlice(dst, src))
|
||||
}
|
||||
case reflect.Ptr:
|
||||
fallthrough
|
||||
case reflect.Interface:
|
||||
@ -174,36 +183,41 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
||||
// src attributes if they themselves are not empty. dst and src must be valid same-type structs
|
||||
// and dst must be a pointer to struct.
|
||||
// It won't merge unexported (private) fields and will do recursively any exported field.
|
||||
func Merge(dst, src interface{}, opts ...func(*config)) error {
|
||||
func Merge(dst, src interface{}, opts ...func(*Config)) error {
|
||||
return merge(dst, src, opts...)
|
||||
}
|
||||
|
||||
// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by
|
||||
// non-empty src attribute values.
|
||||
// Deprecated: use Merge(…) with WithOverride
|
||||
func MergeWithOverwrite(dst, src interface{}, opts ...func(*config)) error {
|
||||
func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
|
||||
return merge(dst, src, append(opts, WithOverride)...)
|
||||
}
|
||||
|
||||
// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
|
||||
func WithTransformers(transformers transformers) func(*config) {
|
||||
return func(config *config) {
|
||||
config.transformers = transformers
|
||||
func WithTransformers(transformers Transformers) func(*Config) {
|
||||
return func(config *Config) {
|
||||
config.Transformers = transformers
|
||||
}
|
||||
}
|
||||
|
||||
// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
|
||||
func WithOverride(config *config) {
|
||||
config.overwrite = true
|
||||
func WithOverride(config *Config) {
|
||||
config.Overwrite = true
|
||||
}
|
||||
|
||||
func merge(dst, src interface{}, opts ...func(*config)) error {
|
||||
// WithAppendSlice will make merge append slices instead of overwriting it
|
||||
func WithAppendSlice(config *Config) {
|
||||
config.AppendSlice = true
|
||||
}
|
||||
|
||||
func merge(dst, src interface{}, opts ...func(*Config)) error {
|
||||
var (
|
||||
vDst, vSrc reflect.Value
|
||||
err error
|
||||
)
|
||||
|
||||
config := &config{}
|
||||
config := &Config{}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(config)
|
||||
|
||||
6
components/cli/vendor/github.com/miekg/pkcs11/README.md
generated
vendored
6
components/cli/vendor/github.com/miekg/pkcs11/README.md
generated
vendored
@ -1,4 +1,4 @@
|
||||
# PKCS#11 [](https://travis-ci.org/miekg/pkcs11) [](http://godoc.org/github.com/miekg/pkcs11)
|
||||
# PKCS#11 [](https://travis-ci.org/miekg/pkcs11)
|
||||
|
||||
This is a Go implementation of the PKCS#11 API. It wraps the library closely, but uses Go idiom
|
||||
were it makes sense. It has been tested with SoftHSM.
|
||||
@ -58,10 +58,6 @@ A skeleton program would look somewhat like this (yes, pkcs#11 is verbose):
|
||||
|
||||
Further examples are included in the tests.
|
||||
|
||||
To expose PKCS#11 keys using the
|
||||
[crypto.Signer interface](https://golang.org/pkg/crypto/#Signer),
|
||||
please see [github.com/thalesignite/crypto11](https://github.com/thalesignite/crypto11).
|
||||
|
||||
# TODO
|
||||
|
||||
* Fix/double check endian stuff, see types.go NewAttribute()
|
||||
|
||||
118
components/cli/vendor/github.com/miekg/pkcs11/const.go
generated
vendored
118
components/cli/vendor/github.com/miekg/pkcs11/const.go
generated
vendored
@ -23,19 +23,7 @@ const (
|
||||
CKO_VENDOR_DEFINED uint = 0x80000000
|
||||
)
|
||||
|
||||
const (
|
||||
CKG_MGF1_SHA1 uint = 0x00000001
|
||||
CKG_MGF1_SHA224 uint = 0x00000005
|
||||
CKG_MGF1_SHA256 uint = 0x00000002
|
||||
CKG_MGF1_SHA384 uint = 0x00000003
|
||||
CKG_MGF1_SHA512 uint = 0x00000004
|
||||
)
|
||||
|
||||
const (
|
||||
CKZ_DATA_SPECIFIED uint = 0x00000001
|
||||
)
|
||||
|
||||
// Generated with: awk '/#define CK[AFKMRC]/{ print $2 " = " $3 }' pkcs11t.h | sed -e 's/UL$//g' -e 's/UL)$/)/g'
|
||||
// Generated with: awk '/#define CK[AFKMRC]/{ print $2 "=" $3 }' pkcs11t.h
|
||||
|
||||
// All the flag (CKF_), attribute (CKA_), error code (CKR_), key type (CKK_), certificate type (CKC_) and
|
||||
// mechanism (CKM_) constants as defined in PKCS#11.
|
||||
@ -61,7 +49,6 @@ const (
|
||||
CKF_SO_PIN_FINAL_TRY = 0x00200000
|
||||
CKF_SO_PIN_LOCKED = 0x00400000
|
||||
CKF_SO_PIN_TO_BE_CHANGED = 0x00800000
|
||||
CKF_ERROR_STATE = 0x01000000
|
||||
CKF_RW_SESSION = 0x00000002
|
||||
CKF_SERIAL_SESSION = 0x00000004
|
||||
CKK_RSA = 0x00000000
|
||||
@ -95,18 +82,6 @@ const (
|
||||
CKK_ACTI = 0x00000024
|
||||
CKK_CAMELLIA = 0x00000025
|
||||
CKK_ARIA = 0x00000026
|
||||
CKK_SHA512_224_HMAC = 0x00000027
|
||||
CKK_SHA512_256_HMAC = 0x00000028
|
||||
CKK_SHA512_T_HMAC = 0x00000029
|
||||
CKK_SHA_1_HMAC = 0x00000028
|
||||
CKK_SHA224_HMAC = 0x0000002E
|
||||
CKK_SHA256_HMAC = 0x0000002B
|
||||
CKK_SHA384_HMAC = 0x0000002C
|
||||
CKK_SHA512_HMAC = 0x0000002D
|
||||
CKK_SEED = 0x00000050
|
||||
CKK_GOSTR3410 = 0x00000060
|
||||
CKK_GOSTR3411 = 0x00000061
|
||||
CKK_GOST28147 = 0x00000062
|
||||
CKK_VENDOR_DEFINED = 0x80000000
|
||||
CKC_X_509 = 0x00000000
|
||||
CKC_X_509_ATTR_CERT = 0x00000001
|
||||
@ -132,7 +107,6 @@ const (
|
||||
CKA_URL = 0x00000089
|
||||
CKA_HASH_OF_SUBJECT_PUBLIC_KEY = 0x0000008A
|
||||
CKA_HASH_OF_ISSUER_PUBLIC_KEY = 0x0000008B
|
||||
CKA_NAME_HASH_ALGORITHM = 0x0000008C
|
||||
CKA_CHECK_VALUE = 0x00000090
|
||||
CKA_KEY_TYPE = 0x00000100
|
||||
CKA_SUBJECT = 0x00000101
|
||||
@ -158,7 +132,6 @@ const (
|
||||
CKA_EXPONENT_1 = 0x00000126
|
||||
CKA_EXPONENT_2 = 0x00000127
|
||||
CKA_COEFFICIENT = 0x00000128
|
||||
CKA_PUBLIC_KEY_INFO = 0x00000129
|
||||
CKA_PRIME = 0x00000130
|
||||
CKA_SUBPRIME = 0x00000131
|
||||
CKA_BASE = 0x00000132
|
||||
@ -173,8 +146,6 @@ const (
|
||||
CKA_ALWAYS_SENSITIVE = 0x00000165
|
||||
CKA_KEY_GEN_MECHANISM = 0x00000166
|
||||
CKA_MODIFIABLE = 0x00000170
|
||||
CKA_COPYABLE = 0x00000171
|
||||
CKA_DESTROYABLE = 0x00000172
|
||||
CKA_ECDSA_PARAMS = 0x00000180
|
||||
CKA_EC_PARAMS = 0x00000180
|
||||
CKA_EC_POINT = 0x00000181
|
||||
@ -198,9 +169,6 @@ const (
|
||||
CKA_OTP_SERVICE_IDENTIFIER = 0x0000022B
|
||||
CKA_OTP_SERVICE_LOGO = 0x0000022C
|
||||
CKA_OTP_SERVICE_LOGO_TYPE = 0x0000022D
|
||||
CKA_GOSTR3410_PARAMS = 0x00000250
|
||||
CKA_GOSTR3411_PARAMS = 0x00000251
|
||||
CKA_GOST28147_PARAMS = 0x00000252
|
||||
CKA_HW_FEATURE_TYPE = 0x00000300
|
||||
CKA_RESET_ON_INIT = 0x00000301
|
||||
CKA_HAS_RESET = 0x00000302
|
||||
@ -238,11 +206,6 @@ const (
|
||||
CKM_DSA_KEY_PAIR_GEN = 0x00000010
|
||||
CKM_DSA = 0x00000011
|
||||
CKM_DSA_SHA1 = 0x00000012
|
||||
CKM_DSA_FIPS_G_GEN = 0x00000013
|
||||
CKM_DSA_SHA224 = 0x00000014
|
||||
CKM_DSA_SHA256 = 0x00000015
|
||||
CKM_DSA_SHA384 = 0x00000016
|
||||
CKM_DSA_SHA512 = 0x00000017
|
||||
CKM_DH_PKCS_KEY_PAIR_GEN = 0x00000020
|
||||
CKM_DH_PKCS_DERIVE = 0x00000021
|
||||
CKM_X9_42_DH_KEY_PAIR_GEN = 0x00000030
|
||||
@ -257,18 +220,6 @@ const (
|
||||
CKM_SHA512_RSA_PKCS_PSS = 0x00000045
|
||||
CKM_SHA224_RSA_PKCS = 0x00000046
|
||||
CKM_SHA224_RSA_PKCS_PSS = 0x00000047
|
||||
CKM_SHA512_224 = 0x00000048
|
||||
CKM_SHA512_224_HMAC = 0x00000049
|
||||
CKM_SHA512_224_HMAC_GENERAL = 0x0000004A
|
||||
CKM_SHA512_224_KEY_DERIVATION = 0x0000004B
|
||||
CKM_SHA512_256 = 0x0000004C
|
||||
CKM_SHA512_256_HMAC = 0x0000004D
|
||||
CKM_SHA512_256_HMAC_GENERAL = 0x0000004E
|
||||
CKM_SHA512_256_KEY_DERIVATION = 0x0000004F
|
||||
CKM_SHA512_T = 0x00000050
|
||||
CKM_SHA512_T_HMAC = 0x00000051
|
||||
CKM_SHA512_T_HMAC_GENERAL = 0x00000052
|
||||
CKM_SHA512_T_KEY_DERIVATION = 0x00000053
|
||||
CKM_RC2_KEY_GEN = 0x00000100
|
||||
CKM_RC2_ECB = 0x00000101
|
||||
CKM_RC2_CBC = 0x00000102
|
||||
@ -290,8 +241,6 @@ const (
|
||||
CKM_DES3_MAC = 0x00000134
|
||||
CKM_DES3_MAC_GENERAL = 0x00000135
|
||||
CKM_DES3_CBC_PAD = 0x00000136
|
||||
CKM_DES3_CMAC_GENERAL = 0x00000137
|
||||
CKM_DES3_CMAC = 0x00000138
|
||||
CKM_CDMF_KEY_GEN = 0x00000140
|
||||
CKM_CDMF_ECB = 0x00000141
|
||||
CKM_CDMF_CBC = 0x00000142
|
||||
@ -417,16 +366,6 @@ const (
|
||||
CKM_WTLS_PRF = 0x000003D3
|
||||
CKM_WTLS_SERVER_KEY_AND_MAC_DERIVE = 0x000003D4
|
||||
CKM_WTLS_CLIENT_KEY_AND_MAC_DERIVE = 0x000003D5
|
||||
CKM_TLS10_MAC_SERVER = 0x000003D6
|
||||
CKM_TLS10_MAC_CLIENT = 0x000003D7
|
||||
CKM_TLS12_MAC = 0x000003D8
|
||||
CKM_TLS12_KDF = 0x000003D9
|
||||
CKM_TLS12_MASTER_KEY_DERIVE = 0x000003E0
|
||||
CKM_TLS12_KEY_AND_MAC_DERIVE = 0x000003E1
|
||||
CKM_TLS12_MASTER_KEY_DERIVE_DH = 0x000003E2
|
||||
CKM_TLS12_KEY_SAFE_DERIVE = 0x000003E3
|
||||
CKM_TLS_MAC = 0x000003E4
|
||||
CKM_TLS_KDF = 0x000003E5
|
||||
CKM_KEY_WRAP_LYNKS = 0x00000400
|
||||
CKM_KEY_WRAP_SET_OAEP = 0x00000401
|
||||
CKM_CMS_SIG = 0x00000500
|
||||
@ -450,14 +389,6 @@ const (
|
||||
CKM_ARIA_CBC_PAD = 0x00000565
|
||||
CKM_ARIA_ECB_ENCRYPT_DATA = 0x00000566
|
||||
CKM_ARIA_CBC_ENCRYPT_DATA = 0x00000567
|
||||
CKM_SEED_KEY_GEN = 0x00000650
|
||||
CKM_SEED_ECB = 0x00000651
|
||||
CKM_SEED_CBC = 0x00000652
|
||||
CKM_SEED_MAC = 0x00000653
|
||||
CKM_SEED_MAC_GENERAL = 0x00000654
|
||||
CKM_SEED_CBC_PAD = 0x00000655
|
||||
CKM_SEED_ECB_ENCRYPT_DATA = 0x00000656
|
||||
CKM_SEED_CBC_ENCRYPT_DATA = 0x00000657
|
||||
CKM_SKIPJACK_KEY_GEN = 0x00001000
|
||||
CKM_SKIPJACK_ECB64 = 0x00001001
|
||||
CKM_SKIPJACK_CBC64 = 0x00001002
|
||||
@ -471,7 +402,6 @@ const (
|
||||
CKM_SKIPJACK_RELAYX = 0x0000100a
|
||||
CKM_KEA_KEY_PAIR_GEN = 0x00001010
|
||||
CKM_KEA_KEY_DERIVE = 0x00001011
|
||||
CKM_KEA_DERIVE = 0x00001012
|
||||
CKM_FORTEZZA_TIMESTAMP = 0x00001020
|
||||
CKM_BATON_KEY_GEN = 0x00001030
|
||||
CKM_BATON_ECB128 = 0x00001031
|
||||
@ -484,15 +414,9 @@ const (
|
||||
CKM_EC_KEY_PAIR_GEN = 0x00001040
|
||||
CKM_ECDSA = 0x00001041
|
||||
CKM_ECDSA_SHA1 = 0x00001042
|
||||
CKM_ECDSA_SHA224 = 0x00001043
|
||||
CKM_ECDSA_SHA256 = 0x00001044
|
||||
CKM_ECDSA_SHA384 = 0x00001045
|
||||
CKM_ECDSA_SHA512 = 0x00001046
|
||||
CKM_ECDH1_DERIVE = 0x00001050
|
||||
CKM_ECDH1_COFACTOR_DERIVE = 0x00001051
|
||||
CKM_ECMQV_DERIVE = 0x00001052
|
||||
CKM_ECDH_AES_KEY_WRAP = 0x00001053
|
||||
CKM_RSA_AES_KEY_WRAP = 0x00001054
|
||||
CKM_JUNIPER_KEY_GEN = 0x00001060
|
||||
CKM_JUNIPER_ECB128 = 0x00001061
|
||||
CKM_JUNIPER_CBC128 = 0x00001062
|
||||
@ -507,52 +431,19 @@ const (
|
||||
CKM_AES_MAC_GENERAL = 0x00001084
|
||||
CKM_AES_CBC_PAD = 0x00001085
|
||||
CKM_AES_CTR = 0x00001086
|
||||
CKM_AES_GCM = 0x00001087
|
||||
CKM_AES_CCM = 0x00001088
|
||||
CKM_AES_CMAC_GENERAL = 0x00001089
|
||||
CKM_AES_CMAC = 0x0000108A
|
||||
CKM_AES_CTS = 0x0000108B
|
||||
CKM_AES_XCBC_MAC = 0x0000108C
|
||||
CKM_AES_XCBC_MAC_96 = 0x0000108D
|
||||
CKM_AES_GMAC = 0x0000108E
|
||||
CKM_BLOWFISH_KEY_GEN = 0x00001090
|
||||
CKM_BLOWFISH_CBC = 0x00001091
|
||||
CKM_TWOFISH_KEY_GEN = 0x00001092
|
||||
CKM_TWOFISH_CBC = 0x00001093
|
||||
CKM_BLOWFISH_CBC_PAD = 0x00001094
|
||||
CKM_TWOFISH_CBC_PAD = 0x00001095
|
||||
CKM_DES_ECB_ENCRYPT_DATA = 0x00001100
|
||||
CKM_DES_CBC_ENCRYPT_DATA = 0x00001101
|
||||
CKM_DES3_ECB_ENCRYPT_DATA = 0x00001102
|
||||
CKM_DES3_CBC_ENCRYPT_DATA = 0x00001103
|
||||
CKM_AES_ECB_ENCRYPT_DATA = 0x00001104
|
||||
CKM_AES_CBC_ENCRYPT_DATA = 0x00001105
|
||||
CKM_GOSTR3410_KEY_PAIR_GEN = 0x00001200
|
||||
CKM_GOSTR3410 = 0x00001201
|
||||
CKM_GOSTR3410_WITH_GOSTR3411 = 0x00001202
|
||||
CKM_GOSTR3410_KEY_WRAP = 0x00001203
|
||||
CKM_GOSTR3410_DERIVE = 0x00001204
|
||||
CKM_GOSTR3411 = 0x00001210
|
||||
CKM_GOSTR3411_HMAC = 0x00001211
|
||||
CKM_GOST28147_KEY_GEN = 0x00001220
|
||||
CKM_GOST28147_ECB = 0x00001221
|
||||
CKM_GOST28147 = 0x00001222
|
||||
CKM_GOST28147_MAC = 0x00001223
|
||||
CKM_GOST28147_KEY_WRAP = 0x00001224
|
||||
CKM_DSA_PARAMETER_GEN = 0x00002000
|
||||
CKM_DH_PKCS_PARAMETER_GEN = 0x00002001
|
||||
CKM_X9_42_DH_PARAMETER_GEN = 0x00002002
|
||||
CKM_DSA_PROBABLISTIC_PARAMETER_GEN = 0x00002003
|
||||
CKM_DSA_SHAWE_TAYLOR_PARAMETER_GEN = 0x00002004
|
||||
CKM_AES_OFB = 0x00002104
|
||||
CKM_AES_CFB64 = 0x00002105
|
||||
CKM_AES_CFB8 = 0x00002106
|
||||
CKM_AES_CFB128 = 0x00002107
|
||||
CKM_AES_CFB1 = 0x00002108
|
||||
CKM_AES_KEY_WRAP = 0x00002109
|
||||
CKM_AES_KEY_WRAP_PAD = 0x0000210A
|
||||
CKM_RSA_PKCS_TPM_1_1 = 0x00004001
|
||||
CKM_RSA_PKCS_OAEP_TPM_1_1 = 0x00004002
|
||||
CKM_VENDOR_DEFINED = 0x80000000
|
||||
CKF_HW = 0x00000001
|
||||
CKF_ENCRYPT = 0x00000100
|
||||
@ -588,7 +479,6 @@ const (
|
||||
CKR_ATTRIBUTE_SENSITIVE = 0x00000011
|
||||
CKR_ATTRIBUTE_TYPE_INVALID = 0x00000012
|
||||
CKR_ATTRIBUTE_VALUE_INVALID = 0x00000013
|
||||
CKR_ACTION_PROHIBITED = 0x0000001B
|
||||
CKR_DATA_INVALID = 0x00000020
|
||||
CKR_DATA_LEN_RANGE = 0x00000021
|
||||
CKR_DEVICE_ERROR = 0x00000030
|
||||
@ -651,7 +541,6 @@ const (
|
||||
CKR_RANDOM_SEED_NOT_SUPPORTED = 0x00000120
|
||||
CKR_RANDOM_NO_RNG = 0x00000121
|
||||
CKR_DOMAIN_PARAMS_INVALID = 0x00000130
|
||||
CKR_CURVE_NOT_SUPPORTED = 0x00000140
|
||||
CKR_BUFFER_TOO_SMALL = 0x00000150
|
||||
CKR_SAVED_STATE_INVALID = 0x00000160
|
||||
CKR_INFORMATION_SENSITIVE = 0x00000170
|
||||
@ -662,11 +551,6 @@ const (
|
||||
CKR_MUTEX_NOT_LOCKED = 0x000001A1
|
||||
CKR_NEW_PIN_MODE = 0x000001B0
|
||||
CKR_NEXT_OTP = 0x000001B1
|
||||
CKR_EXCEEDED_MAX_ITERATIONS = 0x000001C0
|
||||
CKR_FIPS_SELF_TEST_FAILED = 0x000001C1
|
||||
CKR_LIBRARY_LOAD_FAILED = 0x000001C2
|
||||
CKR_PIN_TOO_WEAK = 0x000001C3
|
||||
CKR_PUBLIC_KEY_INVALID = 0x000001C4
|
||||
CKR_FUNCTION_REJECTED = 0x00000200
|
||||
CKR_VENDOR_DEFINED = 0x80000000
|
||||
CKF_LIBRARY_CANT_CREATE_OS_THREADS = 0x00000001
|
||||
|
||||
215
components/cli/vendor/github.com/miekg/pkcs11/pkcs11.go
generated
vendored
215
components/cli/vendor/github.com/miekg/pkcs11/pkcs11.go
generated
vendored
@ -11,21 +11,21 @@ package pkcs11
|
||||
// * CK_ULONG never overflows an Go int
|
||||
|
||||
/*
|
||||
#cgo windows CFLAGS: -DREPACK_STRUCTURES
|
||||
#cgo windows LDFLAGS: -lltdl
|
||||
#cgo linux LDFLAGS: -lltdl -ldl
|
||||
#cgo darwin CFLAGS: -I/usr/local/share/libtool
|
||||
#cgo darwin LDFLAGS: -lltdl -L/usr/local/lib/
|
||||
#cgo openbsd CFLAGS: -I/usr/local/include/
|
||||
#cgo openbsd LDFLAGS: -lltdl -L/usr/local/lib/
|
||||
#cgo LDFLAGS: -lltdl
|
||||
#define CK_PTR *
|
||||
#ifndef NULL_PTR
|
||||
#define NULL_PTR 0
|
||||
#endif
|
||||
#define CK_DEFINE_FUNCTION(returnType, name) returnType name
|
||||
#define CK_DECLARE_FUNCTION(returnType, name) returnType name
|
||||
#define CK_DECLARE_FUNCTION_POINTER(returnType, name) returnType (* name)
|
||||
#define CK_CALLBACK_FUNCTION(returnType, name) returnType (* name)
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <ltdl.h>
|
||||
#include <unistd.h>
|
||||
#include "pkcs11go.h"
|
||||
#include "pkcs11.h"
|
||||
|
||||
struct ctx {
|
||||
lt_dlhandle handle;
|
||||
@ -70,12 +70,9 @@ void Destroy(struct ctx *c)
|
||||
free(c);
|
||||
}
|
||||
|
||||
CK_RV Initialize(struct ctx * c)
|
||||
CK_RV Initialize(struct ctx * c, CK_VOID_PTR initArgs)
|
||||
{
|
||||
CK_C_INITIALIZE_ARGS args;
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.flags = CKF_OS_LOCKING_OK;
|
||||
return c->sym->C_Initialize(&args);
|
||||
return c->sym->C_Initialize(initArgs);
|
||||
}
|
||||
|
||||
CK_RV Finalize(struct ctx * c)
|
||||
@ -83,19 +80,9 @@ CK_RV Finalize(struct ctx * c)
|
||||
return c->sym->C_Finalize(NULL);
|
||||
}
|
||||
|
||||
CK_RV GetInfo(struct ctx * c, ckInfoPtr info)
|
||||
CK_RV GetInfo(struct ctx * c, CK_INFO_PTR info)
|
||||
{
|
||||
CK_INFO p;
|
||||
CK_RV e = c->sym->C_GetInfo(&p);
|
||||
if (e != CKR_OK) {
|
||||
return e;
|
||||
}
|
||||
info->cryptokiVersion = p.cryptokiVersion;
|
||||
memcpy(info->manufacturerID, p.manufacturerID, sizeof(p.manufacturerID));
|
||||
info->flags = p.flags;
|
||||
memcpy(info->libraryDescription, p.libraryDescription, sizeof(p.libraryDescription));
|
||||
info->libraryVersion = p.libraryVersion;
|
||||
return e;
|
||||
return c->sym->C_GetInfo(info);
|
||||
}
|
||||
|
||||
CK_RV GetSlotList(struct ctx * c, CK_BBOOL tokenPresent,
|
||||
@ -127,8 +114,7 @@ CK_RV GetMechanismList(struct ctx * c, CK_ULONG slotID,
|
||||
{
|
||||
CK_RV e =
|
||||
c->sym->C_GetMechanismList((CK_SLOT_ID) slotID, NULL, mechlen);
|
||||
// Gemaltos PKCS11 implementation returns CKR_BUFFER_TOO_SMALL on a NULL ptr instad of CKR_OK as the spec states.
|
||||
if (e != CKR_OK && e != CKR_BUFFER_TOO_SMALL) {
|
||||
if (e != CKR_OK) {
|
||||
return e;
|
||||
}
|
||||
*mech = calloc(*mechlen, sizeof(CK_MECHANISM_TYPE));
|
||||
@ -236,22 +222,18 @@ CK_RV Logout(struct ctx * c, CK_SESSION_HANDLE session)
|
||||
}
|
||||
|
||||
CK_RV CreateObject(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
ckAttrPtr temp, CK_ULONG tempCount,
|
||||
CK_ATTRIBUTE_PTR temp, CK_ULONG tempCount,
|
||||
CK_OBJECT_HANDLE_PTR obj)
|
||||
{
|
||||
ATTR_TO_C(tempc, temp, tempCount, NULL);
|
||||
CK_RV e = c->sym->C_CreateObject(session, tempc, tempCount, obj);
|
||||
ATTR_FREE(tempc);
|
||||
CK_RV e = c->sym->C_CreateObject(session, temp, tempCount, obj);
|
||||
return e;
|
||||
}
|
||||
|
||||
CK_RV CopyObject(struct ctx * c, CK_SESSION_HANDLE session, CK_OBJECT_HANDLE o,
|
||||
ckAttrPtr temp, CK_ULONG tempCount,
|
||||
CK_ATTRIBUTE_PTR temp, CK_ULONG tempCount,
|
||||
CK_OBJECT_HANDLE_PTR obj)
|
||||
{
|
||||
ATTR_TO_C(tempc, temp, tempCount, NULL);
|
||||
CK_RV e = c->sym->C_CopyObject(session, o, tempc, tempCount, obj);
|
||||
ATTR_FREE(tempc);
|
||||
CK_RV e = c->sym->C_CopyObject(session, o, temp, tempCount, obj);
|
||||
return e;
|
||||
}
|
||||
|
||||
@ -270,47 +252,39 @@ CK_RV GetObjectSize(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
}
|
||||
|
||||
CK_RV GetAttributeValue(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
CK_OBJECT_HANDLE object, ckAttrPtr temp,
|
||||
CK_OBJECT_HANDLE object, CK_ATTRIBUTE_PTR temp,
|
||||
CK_ULONG templen)
|
||||
{
|
||||
ATTR_TO_C(tempc, temp, templen, NULL);
|
||||
// Call for the first time, check the returned ulValue in the attributes, then
|
||||
// allocate enough space and try again.
|
||||
CK_RV e = c->sym->C_GetAttributeValue(session, object, tempc, templen);
|
||||
CK_RV e = c->sym->C_GetAttributeValue(session, object, temp, templen);
|
||||
if (e != CKR_OK) {
|
||||
ATTR_FREE(tempc);
|
||||
return e;
|
||||
}
|
||||
CK_ULONG i;
|
||||
for (i = 0; i < templen; i++) {
|
||||
if ((CK_LONG) tempc[i].ulValueLen == -1) {
|
||||
if ((CK_LONG) temp[i].ulValueLen == -1) {
|
||||
// either access denied or no such object
|
||||
continue;
|
||||
}
|
||||
tempc[i].pValue = calloc(tempc[i].ulValueLen, sizeof(CK_BYTE));
|
||||
temp[i].pValue = calloc(temp[i].ulValueLen, sizeof(CK_BYTE));
|
||||
}
|
||||
e = c->sym->C_GetAttributeValue(session, object, tempc, templen);
|
||||
ATTR_FROM_C(temp, tempc, templen);
|
||||
ATTR_FREE(tempc);
|
||||
e = c->sym->C_GetAttributeValue(session, object, temp, templen);
|
||||
return e;
|
||||
}
|
||||
|
||||
CK_RV SetAttributeValue(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
CK_OBJECT_HANDLE object, ckAttrPtr temp,
|
||||
CK_OBJECT_HANDLE object, CK_ATTRIBUTE_PTR temp,
|
||||
CK_ULONG templen)
|
||||
{
|
||||
ATTR_TO_C(tempc, temp, templen, NULL);
|
||||
CK_RV e = c->sym->C_SetAttributeValue(session, object, tempc, templen);
|
||||
ATTR_FREE(tempc);
|
||||
CK_RV e = c->sym->C_SetAttributeValue(session, object, temp, templen);
|
||||
return e;
|
||||
}
|
||||
|
||||
CK_RV FindObjectsInit(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
ckAttrPtr temp, CK_ULONG tempCount)
|
||||
CK_ATTRIBUTE_PTR temp, CK_ULONG tempCount)
|
||||
{
|
||||
ATTR_TO_C(tempc, temp, tempCount, NULL);
|
||||
CK_RV e = c->sym->C_FindObjectsInit(session, tempc, tempCount);
|
||||
ATTR_FREE(tempc);
|
||||
CK_RV e = c->sym->C_FindObjectsInit(session, temp, tempCount);
|
||||
return e;
|
||||
}
|
||||
|
||||
@ -330,10 +304,9 @@ CK_RV FindObjectsFinal(struct ctx * c, CK_SESSION_HANDLE session)
|
||||
}
|
||||
|
||||
CK_RV EncryptInit(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
ckMechPtr mechanism, CK_OBJECT_HANDLE key)
|
||||
CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE key)
|
||||
{
|
||||
MECH_TO_C(m, mechanism);
|
||||
CK_RV e = c->sym->C_EncryptInit(session, m, key);
|
||||
CK_RV e = c->sym->C_EncryptInit(session, mechanism, key);
|
||||
return e;
|
||||
}
|
||||
|
||||
@ -386,10 +359,9 @@ CK_RV EncryptFinal(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
}
|
||||
|
||||
CK_RV DecryptInit(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
ckMechPtr mechanism, CK_OBJECT_HANDLE key)
|
||||
CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE key)
|
||||
{
|
||||
MECH_TO_C(m, mechanism);
|
||||
CK_RV e = c->sym->C_DecryptInit(session, m, key);
|
||||
CK_RV e = c->sym->C_DecryptInit(session, mechanism, key);
|
||||
return e;
|
||||
}
|
||||
|
||||
@ -442,10 +414,9 @@ CK_RV DecryptFinal(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
}
|
||||
|
||||
CK_RV DigestInit(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
ckMechPtr mechanism)
|
||||
CK_MECHANISM_PTR mechanism)
|
||||
{
|
||||
MECH_TO_C(m, mechanism);
|
||||
CK_RV e = c->sym->C_DigestInit(session, m);
|
||||
CK_RV e = c->sym->C_DigestInit(session, mechanism);
|
||||
return e;
|
||||
}
|
||||
|
||||
@ -493,10 +464,9 @@ CK_RV DigestFinal(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR * hash,
|
||||
}
|
||||
|
||||
CK_RV SignInit(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
ckMechPtr mechanism, CK_OBJECT_HANDLE key)
|
||||
CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE key)
|
||||
{
|
||||
MECH_TO_C(m, mechanism);
|
||||
CK_RV e = c->sym->C_SignInit(session, m, key);
|
||||
CK_RV e = c->sym->C_SignInit(session, mechanism, key);
|
||||
return e;
|
||||
}
|
||||
|
||||
@ -538,10 +508,9 @@ CK_RV SignFinal(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR * sig,
|
||||
}
|
||||
|
||||
CK_RV SignRecoverInit(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
ckMechPtr mech, CK_OBJECT_HANDLE key)
|
||||
CK_MECHANISM_PTR mech, CK_OBJECT_HANDLE key)
|
||||
{
|
||||
MECH_TO_C(m, mech);
|
||||
CK_RV rv = c->sym->C_SignRecoverInit(session, m, key);
|
||||
CK_RV rv = c->sym->C_SignRecoverInit(session, mech, key);
|
||||
return rv;
|
||||
}
|
||||
|
||||
@ -561,10 +530,9 @@ CK_RV SignRecover(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR data,
|
||||
}
|
||||
|
||||
CK_RV VerifyInit(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
ckMechPtr mech, CK_OBJECT_HANDLE key)
|
||||
CK_MECHANISM_PTR mech, CK_OBJECT_HANDLE key)
|
||||
{
|
||||
MECH_TO_C(m, mech);
|
||||
CK_RV rv = c->sym->C_VerifyInit(session, m, key);
|
||||
CK_RV rv = c->sym->C_VerifyInit(session, mech, key);
|
||||
return rv;
|
||||
}
|
||||
|
||||
@ -590,10 +558,9 @@ CK_RV VerifyFinal(struct ctx * c, CK_SESSION_HANDLE session, CK_BYTE_PTR sig,
|
||||
}
|
||||
|
||||
CK_RV VerifyRecoverInit(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
ckMechPtr mech, CK_OBJECT_HANDLE key)
|
||||
CK_MECHANISM_PTR mech, CK_OBJECT_HANDLE key)
|
||||
{
|
||||
MECH_TO_C(m, mech);
|
||||
CK_RV rv = c->sym->C_VerifyRecoverInit(session, m, key);
|
||||
CK_RV rv = c->sym->C_VerifyRecoverInit(session, mech, key);
|
||||
return rv;
|
||||
}
|
||||
|
||||
@ -686,39 +653,33 @@ CK_RV DecryptVerifyUpdate(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
}
|
||||
|
||||
CK_RV GenerateKey(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
ckMechPtr mechanism, ckAttrPtr temp,
|
||||
CK_MECHANISM_PTR mechanism, CK_ATTRIBUTE_PTR temp,
|
||||
CK_ULONG tempCount, CK_OBJECT_HANDLE_PTR key)
|
||||
{
|
||||
MECH_TO_C(m, mechanism);
|
||||
ATTR_TO_C(tempc, temp, tempCount, NULL);
|
||||
CK_RV e = c->sym->C_GenerateKey(session, m, tempc, tempCount, key);
|
||||
ATTR_FREE(tempc);
|
||||
CK_RV e =
|
||||
c->sym->C_GenerateKey(session, mechanism, temp, tempCount, key);
|
||||
return e;
|
||||
}
|
||||
|
||||
CK_RV GenerateKeyPair(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
ckMechPtr mechanism, ckAttrPtr pub,
|
||||
CK_ULONG pubCount, ckAttrPtr priv,
|
||||
CK_MECHANISM_PTR mechanism, CK_ATTRIBUTE_PTR pub,
|
||||
CK_ULONG pubCount, CK_ATTRIBUTE_PTR priv,
|
||||
CK_ULONG privCount, CK_OBJECT_HANDLE_PTR pubkey,
|
||||
CK_OBJECT_HANDLE_PTR privkey)
|
||||
{
|
||||
MECH_TO_C(m, mechanism);
|
||||
ATTR_TO_C(pubc, pub, pubCount, NULL);
|
||||
ATTR_TO_C(privc, priv, privCount, pubc);
|
||||
CK_RV e = c->sym->C_GenerateKeyPair(session, m, pubc, pubCount,
|
||||
privc, privCount, pubkey, privkey);
|
||||
ATTR_FREE(pubc);
|
||||
ATTR_FREE(privc);
|
||||
CK_RV e =
|
||||
c->sym->C_GenerateKeyPair(session, mechanism, pub, pubCount, priv,
|
||||
privCount,
|
||||
pubkey, privkey);
|
||||
return e;
|
||||
}
|
||||
|
||||
CK_RV WrapKey(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
ckMechPtr mechanism, CK_OBJECT_HANDLE wrappingkey,
|
||||
CK_MECHANISM_PTR mechanism, CK_OBJECT_HANDLE wrappingkey,
|
||||
CK_OBJECT_HANDLE key, CK_BYTE_PTR * wrapped,
|
||||
CK_ULONG_PTR wrappedlen)
|
||||
{
|
||||
MECH_TO_C(m, mechanism);
|
||||
CK_RV rv = c->sym->C_WrapKey(session, m, wrappingkey, key, NULL,
|
||||
CK_RV rv = c->sym->C_WrapKey(session, mechanism, wrappingkey, key, NULL,
|
||||
wrappedlen);
|
||||
if (rv != CKR_OK) {
|
||||
return rv;
|
||||
@ -727,32 +688,26 @@ CK_RV WrapKey(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
if (*wrapped == NULL) {
|
||||
return CKR_HOST_MEMORY;
|
||||
}
|
||||
rv = c->sym->C_WrapKey(session, m, wrappingkey, key, *wrapped,
|
||||
rv = c->sym->C_WrapKey(session, mechanism, wrappingkey, key, *wrapped,
|
||||
wrappedlen);
|
||||
return rv;
|
||||
}
|
||||
|
||||
CK_RV DeriveKey(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
ckMechPtr mech, CK_OBJECT_HANDLE basekey,
|
||||
ckAttrPtr a, CK_ULONG alen, CK_OBJECT_HANDLE_PTR key)
|
||||
CK_MECHANISM_PTR mech, CK_OBJECT_HANDLE basekey,
|
||||
CK_ATTRIBUTE_PTR a, CK_ULONG alen, CK_OBJECT_HANDLE_PTR key)
|
||||
{
|
||||
MECH_TO_C(m, mech);
|
||||
ATTR_TO_C(tempc, a, alen, NULL);
|
||||
CK_RV e = c->sym->C_DeriveKey(session, m, basekey, tempc, alen, key);
|
||||
ATTR_FREE(tempc);
|
||||
CK_RV e = c->sym->C_DeriveKey(session, mech, basekey, a, alen, key);
|
||||
return e;
|
||||
}
|
||||
|
||||
CK_RV UnwrapKey(struct ctx * c, CK_SESSION_HANDLE session,
|
||||
ckMechPtr mech, CK_OBJECT_HANDLE unwrappingkey,
|
||||
CK_MECHANISM_PTR mech, CK_OBJECT_HANDLE unwrappingkey,
|
||||
CK_BYTE_PTR wrappedkey, CK_ULONG wrappedkeylen,
|
||||
ckAttrPtr a, CK_ULONG alen, CK_OBJECT_HANDLE_PTR key)
|
||||
CK_ATTRIBUTE_PTR a, CK_ULONG alen, CK_OBJECT_HANDLE_PTR key)
|
||||
{
|
||||
MECH_TO_C(m, mech);
|
||||
ATTR_TO_C(tempc, a, alen, NULL);
|
||||
CK_RV e = c->sym->C_UnwrapKey(session, m, unwrappingkey, wrappedkey,
|
||||
wrappedkeylen, tempc, alen, key);
|
||||
ATTR_FREE(tempc);
|
||||
CK_RV e = c->sym->C_UnwrapKey(session, mech, unwrappingkey, wrappedkey,
|
||||
wrappedkeylen, a, alen, key);
|
||||
return e;
|
||||
}
|
||||
|
||||
@ -780,38 +735,6 @@ CK_RV WaitForSlotEvent(struct ctx * c, CK_FLAGS flags, CK_ULONG_PTR slot)
|
||||
c->sym->C_WaitForSlotEvent(flags, (CK_SLOT_ID_PTR) slot, NULL);
|
||||
return e;
|
||||
}
|
||||
|
||||
#ifdef REPACK_STRUCTURES
|
||||
|
||||
CK_RV attrsToC(CK_ATTRIBUTE_PTR *attrOut, ckAttrPtr attrIn, CK_ULONG count) {
|
||||
CK_ATTRIBUTE_PTR attr = calloc(count, sizeof(CK_ATTRIBUTE));
|
||||
if (attr == NULL) {
|
||||
return CKR_HOST_MEMORY;
|
||||
}
|
||||
for (int i = 0; i < count; i++) {
|
||||
attr[i].type = attrIn[i].type;
|
||||
attr[i].pValue = attrIn[i].pValue;
|
||||
attr[i].ulValueLen = attrIn[i].ulValueLen;
|
||||
}
|
||||
*attrOut = attr;
|
||||
return CKR_OK;
|
||||
}
|
||||
|
||||
void attrsFromC(ckAttrPtr attrOut, CK_ATTRIBUTE_PTR attrIn, CK_ULONG count) {
|
||||
for (int i = 0; i < count; i++) {
|
||||
attrOut[i].type = attrIn[i].type;
|
||||
attrOut[i].pValue = attrIn[i].pValue;
|
||||
attrOut[i].ulValueLen = attrIn[i].ulValueLen;
|
||||
}
|
||||
}
|
||||
|
||||
void mechToC(CK_MECHANISM_PTR mechOut, ckMechPtr mechIn) {
|
||||
mechOut->mechanism = mechIn->mechanism;
|
||||
mechOut->pParameter = mechIn->pParameter;
|
||||
mechOut->ulParameterLen = mechIn->ulParameterLen;
|
||||
}
|
||||
|
||||
#endif
|
||||
*/
|
||||
import "C"
|
||||
import "strings"
|
||||
@ -825,11 +748,6 @@ type Ctx struct {
|
||||
|
||||
// New creates a new context and initializes the module/library for use.
|
||||
func New(module string) *Ctx {
|
||||
// libtool-ltdl will return an assertion error if passed an empty string, so
|
||||
// we check for it explicitly.
|
||||
if module == "" {
|
||||
return nil
|
||||
}
|
||||
c := new(Ctx)
|
||||
mod := C.CString(module)
|
||||
defer C.free(unsafe.Pointer(mod))
|
||||
@ -851,7 +769,8 @@ func (c *Ctx) Destroy() {
|
||||
|
||||
/* Initialize initializes the Cryptoki library. */
|
||||
func (c *Ctx) Initialize() error {
|
||||
e := C.Initialize(c.ctx)
|
||||
args := &C.CK_C_INITIALIZE_ARGS{nil, nil, nil, nil, C.CKF_OS_LOCKING_OK, nil}
|
||||
e := C.Initialize(c.ctx, C.CK_VOID_PTR(args))
|
||||
return toError(e)
|
||||
}
|
||||
|
||||
@ -866,8 +785,8 @@ func (c *Ctx) Finalize() error {
|
||||
|
||||
/* GetInfo returns general information about Cryptoki. */
|
||||
func (c *Ctx) GetInfo() (Info, error) {
|
||||
var p C.ckInfo
|
||||
e := C.GetInfo(c.ctx, &p)
|
||||
var p C.CK_INFO
|
||||
e := C.GetInfo(c.ctx, C.CK_INFO_PTR(&p))
|
||||
i := Info{
|
||||
CryptokiVersion: toVersion(p.cryptokiVersion),
|
||||
ManufacturerID: strings.TrimRight(string(C.GoBytes(unsafe.Pointer(&p.manufacturerID[0]), 32)), " "),
|
||||
@ -1122,11 +1041,11 @@ func (c *Ctx) GetObjectSize(sh SessionHandle, oh ObjectHandle) (uint, error) {
|
||||
func (c *Ctx) GetAttributeValue(sh SessionHandle, o ObjectHandle, a []*Attribute) ([]*Attribute, error) {
|
||||
// copy the attribute list and make all the values nil, so that
|
||||
// the C function can (allocate) fill them in
|
||||
pa := make([]C.ckAttr, len(a))
|
||||
pa := make([]C.CK_ATTRIBUTE, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
pa[i]._type = C.CK_ATTRIBUTE_TYPE(a[i].Type)
|
||||
}
|
||||
e := C.GetAttributeValue(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_OBJECT_HANDLE(o), C.ckAttrPtr(&pa[0]), C.CK_ULONG(len(a)))
|
||||
e := C.GetAttributeValue(c.ctx, C.CK_SESSION_HANDLE(sh), C.CK_OBJECT_HANDLE(o), C.CK_ATTRIBUTE_PTR(&pa[0]), C.CK_ULONG(len(a)))
|
||||
if toError(e) != nil {
|
||||
return nil, toError(e)
|
||||
}
|
||||
@ -1610,7 +1529,7 @@ func (c *Ctx) UnwrapKey(sh SessionHandle, m []*Mechanism, unwrappingkey ObjectHa
|
||||
return ObjectHandle(key), toError(e)
|
||||
}
|
||||
|
||||
// DeriveKey derives a key from a base key, creating a new key object.
|
||||
// DeriveKey derives a key from a base key, creating a new key object. */
|
||||
func (c *Ctx) DeriveKey(sh SessionHandle, m []*Mechanism, basekey ObjectHandle, a []*Attribute) (ObjectHandle, error) {
|
||||
var key C.CK_OBJECT_HANDLE
|
||||
attrarena, ac, aclen := cAttributeList(a)
|
||||
|
||||
86
components/cli/vendor/github.com/miekg/pkcs11/pkcs11.h
generated
vendored
86
components/cli/vendor/github.com/miekg/pkcs11/pkcs11.h
generated
vendored
@ -1,12 +1,19 @@
|
||||
/* Copyright (c) OASIS Open 2016. All Rights Reserved./
|
||||
* /Distributed under the terms of the OASIS IPR Policy,
|
||||
* [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY
|
||||
* IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A
|
||||
* PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others.
|
||||
*/
|
||||
|
||||
/* Latest version of the specification:
|
||||
* http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html
|
||||
/* pkcs11.h include file for PKCS #11. */
|
||||
/* $Revision: 1.2 $ */
|
||||
|
||||
/* License to copy and use this software is granted provided that it is
|
||||
* identified as "RSA Security Inc. PKCS #11 Cryptographic Token Interface
|
||||
* (Cryptoki)" in all material mentioning or referencing this software.
|
||||
|
||||
* License is also granted to make and use derivative works provided that
|
||||
* such works are identified as "derived from the RSA Security Inc. PKCS #11
|
||||
* Cryptographic Token Interface (Cryptoki)" in all material mentioning or
|
||||
* referencing the derived work.
|
||||
|
||||
* RSA Security Inc. makes no representations concerning either the
|
||||
* merchantability of this software or the suitability of this software for
|
||||
* any particular purpose. It is provided "as is" without express or implied
|
||||
* warranty of any kind.
|
||||
*/
|
||||
|
||||
#ifndef _PKCS11_H_
|
||||
@ -17,14 +24,14 @@ extern "C" {
|
||||
#endif
|
||||
|
||||
/* Before including this file (pkcs11.h) (or pkcs11t.h by
|
||||
* itself), 5 platform-specific macros must be defined. These
|
||||
* itself), 6 platform-specific macros must be defined. These
|
||||
* macros are described below, and typical definitions for them
|
||||
* are also given. Be advised that these definitions can depend
|
||||
* on both the platform and the compiler used (and possibly also
|
||||
* on whether a Cryptoki library is linked statically or
|
||||
* dynamically).
|
||||
*
|
||||
* In addition to defining these 5 macros, the packing convention
|
||||
* In addition to defining these 6 macros, the packing convention
|
||||
* for Cryptoki structures should be set. The Cryptoki
|
||||
* convention on packing is that structures should be 1-byte
|
||||
* aligned.
|
||||
@ -74,7 +81,39 @@ extern "C" {
|
||||
* #define CK_PTR *
|
||||
*
|
||||
*
|
||||
* 2. CK_DECLARE_FUNCTION(returnType, name): A macro which makes
|
||||
* 2. CK_DEFINE_FUNCTION(returnType, name): A macro which makes
|
||||
* an exportable Cryptoki library function definition out of a
|
||||
* return type and a function name. It should be used in the
|
||||
* following fashion to define the exposed Cryptoki functions in
|
||||
* a Cryptoki library:
|
||||
*
|
||||
* CK_DEFINE_FUNCTION(CK_RV, C_Initialize)(
|
||||
* CK_VOID_PTR pReserved
|
||||
* )
|
||||
* {
|
||||
* ...
|
||||
* }
|
||||
*
|
||||
* If you're using Microsoft Developer Studio 5.0 to define a
|
||||
* function in a Win32 Cryptoki .dll, it might be defined by:
|
||||
*
|
||||
* #define CK_DEFINE_FUNCTION(returnType, name) \
|
||||
* returnType __declspec(dllexport) name
|
||||
*
|
||||
* If you're using an earlier version of Microsoft Developer
|
||||
* Studio to define a function in a Win16 Cryptoki .dll, it
|
||||
* might be defined by:
|
||||
*
|
||||
* #define CK_DEFINE_FUNCTION(returnType, name) \
|
||||
* returnType __export _far _pascal name
|
||||
*
|
||||
* In a UNIX environment, it might be defined by:
|
||||
*
|
||||
* #define CK_DEFINE_FUNCTION(returnType, name) \
|
||||
* returnType name
|
||||
*
|
||||
*
|
||||
* 3. CK_DECLARE_FUNCTION(returnType, name): A macro which makes
|
||||
* an importable Cryptoki library function declaration out of a
|
||||
* return type and a function name. It should be used in the
|
||||
* following fashion:
|
||||
@ -102,7 +141,7 @@ extern "C" {
|
||||
* returnType name
|
||||
*
|
||||
*
|
||||
* 3. CK_DECLARE_FUNCTION_POINTER(returnType, name): A macro
|
||||
* 4. CK_DECLARE_FUNCTION_POINTER(returnType, name): A macro
|
||||
* which makes a Cryptoki API function pointer declaration or
|
||||
* function pointer type declaration out of a return type and a
|
||||
* function name. It should be used in the following fashion:
|
||||
@ -139,7 +178,7 @@ extern "C" {
|
||||
* returnType (* name)
|
||||
*
|
||||
*
|
||||
* 4. CK_CALLBACK_FUNCTION(returnType, name): A macro which makes
|
||||
* 5. CK_CALLBACK_FUNCTION(returnType, name): A macro which makes
|
||||
* a function pointer type for an application callback out of
|
||||
* a return type for the callback and a name for the callback.
|
||||
* It should be used in the following fashion:
|
||||
@ -171,7 +210,7 @@ extern "C" {
|
||||
* returnType (* name)
|
||||
*
|
||||
*
|
||||
* 5. NULL_PTR: This macro is the value of a NULL pointer.
|
||||
* 6. NULL_PTR: This macro is the value of a NULL pointer.
|
||||
*
|
||||
* In any ANSI/ISO C environment (and in many others as well),
|
||||
* this should best be defined by
|
||||
@ -183,8 +222,7 @@ extern "C" {
|
||||
|
||||
|
||||
/* All the various Cryptoki types and #define'd values are in the
|
||||
* file pkcs11t.h.
|
||||
*/
|
||||
* file pkcs11t.h. */
|
||||
#include "pkcs11t.h"
|
||||
|
||||
#define __PASTE(x,y) x##y
|
||||
@ -200,8 +238,7 @@ extern "C" {
|
||||
extern CK_DECLARE_FUNCTION(CK_RV, name)
|
||||
|
||||
/* pkcs11f.h has all the information about the Cryptoki
|
||||
* function prototypes.
|
||||
*/
|
||||
* function prototypes. */
|
||||
#include "pkcs11f.h"
|
||||
|
||||
#undef CK_NEED_ARG_LIST
|
||||
@ -220,8 +257,7 @@ extern "C" {
|
||||
typedef CK_DECLARE_FUNCTION_POINTER(CK_RV, __PASTE(CK_,name))
|
||||
|
||||
/* pkcs11f.h has all the information about the Cryptoki
|
||||
* function prototypes.
|
||||
*/
|
||||
* function prototypes. */
|
||||
#include "pkcs11f.h"
|
||||
|
||||
#undef CK_NEED_ARG_LIST
|
||||
@ -239,15 +275,14 @@ extern "C" {
|
||||
|
||||
#define CK_PKCS11_FUNCTION_INFO(name) \
|
||||
__PASTE(CK_,name) name;
|
||||
|
||||
|
||||
struct CK_FUNCTION_LIST {
|
||||
|
||||
CK_VERSION version; /* Cryptoki version */
|
||||
|
||||
/* Pile all the function pointers into the CK_FUNCTION_LIST. */
|
||||
/* pkcs11f.h has all the information about the Cryptoki
|
||||
* function prototypes.
|
||||
*/
|
||||
* function prototypes. */
|
||||
#include "pkcs11f.h"
|
||||
|
||||
};
|
||||
@ -261,5 +296,4 @@ struct CK_FUNCTION_LIST {
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _PKCS11_H_ */
|
||||
|
||||
#endif
|
||||
|
||||
229
components/cli/vendor/github.com/miekg/pkcs11/pkcs11f.h
generated
vendored
229
components/cli/vendor/github.com/miekg/pkcs11/pkcs11f.h
generated
vendored
@ -1,20 +1,26 @@
|
||||
/* Copyright (c) OASIS Open 2016. All Rights Reserved./
|
||||
* /Distributed under the terms of the OASIS IPR Policy,
|
||||
* [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY
|
||||
* IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A
|
||||
* PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others.
|
||||
*/
|
||||
|
||||
/* Latest version of the specification:
|
||||
* http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html
|
||||
/* pkcs11f.h include file for PKCS #11. */
|
||||
/* $Revision: 1.2 $ */
|
||||
|
||||
/* License to copy and use this software is granted provided that it is
|
||||
* identified as "RSA Security Inc. PKCS #11 Cryptographic Token Interface
|
||||
* (Cryptoki)" in all material mentioning or referencing this software.
|
||||
|
||||
* License is also granted to make and use derivative works provided that
|
||||
* such works are identified as "derived from the RSA Security Inc. PKCS #11
|
||||
* Cryptographic Token Interface (Cryptoki)" in all material mentioning or
|
||||
* referencing the derived work.
|
||||
|
||||
* RSA Security Inc. makes no representations concerning either the
|
||||
* merchantability of this software or the suitability of this software for
|
||||
* any particular purpose. It is provided "as is" without express or implied
|
||||
* warranty of any kind.
|
||||
*/
|
||||
|
||||
/* This header file contains pretty much everything about all the
|
||||
* Cryptoki function prototypes. Because this information is
|
||||
* used for more than just declaring function prototypes, the
|
||||
* order of the functions appearing herein is important, and
|
||||
* should not be altered.
|
||||
*/
|
||||
/* This header file contains pretty much everything about all the */
|
||||
/* Cryptoki function prototypes. Because this information is */
|
||||
/* used for more than just declaring function prototypes, the */
|
||||
/* order of the functions appearing herein is important, and */
|
||||
/* should not be altered. */
|
||||
|
||||
/* General-purpose */
|
||||
|
||||
@ -24,15 +30,13 @@ CK_PKCS11_FUNCTION_INFO(C_Initialize)
|
||||
(
|
||||
CK_VOID_PTR pInitArgs /* if this is not NULL_PTR, it gets
|
||||
* cast to CK_C_INITIALIZE_ARGS_PTR
|
||||
* and dereferenced
|
||||
*/
|
||||
* and dereferenced */
|
||||
);
|
||||
#endif
|
||||
|
||||
|
||||
/* C_Finalize indicates that an application is done with the
|
||||
* Cryptoki library.
|
||||
*/
|
||||
* Cryptoki library. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_Finalize)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -55,8 +59,7 @@ CK_PKCS11_FUNCTION_INFO(C_GetFunctionList)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
CK_FUNCTION_LIST_PTR_PTR ppFunctionList /* receives pointer to
|
||||
* function list
|
||||
*/
|
||||
* function list */
|
||||
);
|
||||
#endif
|
||||
|
||||
@ -68,7 +71,7 @@ CK_PKCS11_FUNCTION_INFO(C_GetFunctionList)
|
||||
CK_PKCS11_FUNCTION_INFO(C_GetSlotList)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
CK_BBOOL tokenPresent, /* only slots with tokens */
|
||||
CK_BBOOL tokenPresent, /* only slots with tokens? */
|
||||
CK_SLOT_ID_PTR pSlotList, /* receives array of slot IDs */
|
||||
CK_ULONG_PTR pulCount /* receives number of slots */
|
||||
);
|
||||
@ -76,8 +79,7 @@ CK_PKCS11_FUNCTION_INFO(C_GetSlotList)
|
||||
|
||||
|
||||
/* C_GetSlotInfo obtains information about a particular slot in
|
||||
* the system.
|
||||
*/
|
||||
* the system. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_GetSlotInfo)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -88,8 +90,7 @@ CK_PKCS11_FUNCTION_INFO(C_GetSlotInfo)
|
||||
|
||||
|
||||
/* C_GetTokenInfo obtains information about a particular token
|
||||
* in the system.
|
||||
*/
|
||||
* in the system. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_GetTokenInfo)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -100,8 +101,7 @@ CK_PKCS11_FUNCTION_INFO(C_GetTokenInfo)
|
||||
|
||||
|
||||
/* C_GetMechanismList obtains a list of mechanism types
|
||||
* supported by a token.
|
||||
*/
|
||||
* supported by a token. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_GetMechanismList)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -113,8 +113,7 @@ CK_PKCS11_FUNCTION_INFO(C_GetMechanismList)
|
||||
|
||||
|
||||
/* C_GetMechanismInfo obtains information about a particular
|
||||
* mechanism possibly supported by a token.
|
||||
*/
|
||||
* mechanism possibly supported by a token. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_GetMechanismInfo)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -128,6 +127,7 @@ CK_PKCS11_FUNCTION_INFO(C_GetMechanismInfo)
|
||||
/* C_InitToken initializes a token. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_InitToken)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
/* pLabel changed from CK_CHAR_PTR to CK_UTF8CHAR_PTR for v2.10 */
|
||||
(
|
||||
CK_SLOT_ID slotID, /* ID of the token's slot */
|
||||
CK_UTF8CHAR_PTR pPin, /* the SO's initial PIN */
|
||||
@ -165,8 +165,7 @@ CK_PKCS11_FUNCTION_INFO(C_SetPIN)
|
||||
/* Session management */
|
||||
|
||||
/* C_OpenSession opens a session between an application and a
|
||||
* token.
|
||||
*/
|
||||
* token. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_OpenSession)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -180,8 +179,7 @@ CK_PKCS11_FUNCTION_INFO(C_OpenSession)
|
||||
|
||||
|
||||
/* C_CloseSession closes a session between an application and a
|
||||
* token.
|
||||
*/
|
||||
* token. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_CloseSession)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -189,7 +187,6 @@ CK_PKCS11_FUNCTION_INFO(C_CloseSession)
|
||||
);
|
||||
#endif
|
||||
|
||||
|
||||
/* C_CloseAllSessions closes all sessions with a token. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_CloseAllSessions)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
@ -210,8 +207,7 @@ CK_PKCS11_FUNCTION_INFO(C_GetSessionInfo)
|
||||
|
||||
|
||||
/* C_GetOperationState obtains the state of the cryptographic operation
|
||||
* in a session.
|
||||
*/
|
||||
* in a session. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_GetOperationState)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -223,8 +219,7 @@ CK_PKCS11_FUNCTION_INFO(C_GetOperationState)
|
||||
|
||||
|
||||
/* C_SetOperationState restores the state of the cryptographic
|
||||
* operation in a session.
|
||||
*/
|
||||
* operation in a session. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_SetOperationState)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -272,10 +267,8 @@ CK_PKCS11_FUNCTION_INFO(C_CreateObject)
|
||||
);
|
||||
#endif
|
||||
|
||||
|
||||
/* C_CopyObject copies an object, creating a new object for the
|
||||
* copy.
|
||||
*/
|
||||
* copy. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_CopyObject)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -310,8 +303,7 @@ CK_PKCS11_FUNCTION_INFO(C_GetObjectSize)
|
||||
|
||||
|
||||
/* C_GetAttributeValue obtains the value of one or more object
|
||||
* attributes.
|
||||
*/
|
||||
* attributes. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_GetAttributeValue)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -324,8 +316,7 @@ CK_PKCS11_FUNCTION_INFO(C_GetAttributeValue)
|
||||
|
||||
|
||||
/* C_SetAttributeValue modifies the value of one or more object
|
||||
* attributes.
|
||||
*/
|
||||
* attributes */
|
||||
CK_PKCS11_FUNCTION_INFO(C_SetAttributeValue)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -338,8 +329,7 @@ CK_PKCS11_FUNCTION_INFO(C_SetAttributeValue)
|
||||
|
||||
|
||||
/* C_FindObjectsInit initializes a search for token and session
|
||||
* objects that match a template.
|
||||
*/
|
||||
* objects that match a template. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_FindObjectsInit)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -352,8 +342,7 @@ CK_PKCS11_FUNCTION_INFO(C_FindObjectsInit)
|
||||
|
||||
/* C_FindObjects continues a search for token and session
|
||||
* objects that match a template, obtaining additional object
|
||||
* handles.
|
||||
*/
|
||||
* handles. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_FindObjects)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -366,8 +355,7 @@ CK_PKCS11_FUNCTION_INFO(C_FindObjects)
|
||||
|
||||
|
||||
/* C_FindObjectsFinal finishes a search for token and session
|
||||
* objects.
|
||||
*/
|
||||
* objects. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_FindObjectsFinal)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -404,8 +392,7 @@ CK_PKCS11_FUNCTION_INFO(C_Encrypt)
|
||||
|
||||
|
||||
/* C_EncryptUpdate continues a multiple-part encryption
|
||||
* operation.
|
||||
*/
|
||||
* operation. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_EncryptUpdate)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -419,8 +406,7 @@ CK_PKCS11_FUNCTION_INFO(C_EncryptUpdate)
|
||||
|
||||
|
||||
/* C_EncryptFinal finishes a multiple-part encryption
|
||||
* operation.
|
||||
*/
|
||||
* operation. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_EncryptFinal)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -456,8 +442,7 @@ CK_PKCS11_FUNCTION_INFO(C_Decrypt)
|
||||
|
||||
|
||||
/* C_DecryptUpdate continues a multiple-part decryption
|
||||
* operation.
|
||||
*/
|
||||
* operation. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_DecryptUpdate)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -471,8 +456,7 @@ CK_PKCS11_FUNCTION_INFO(C_DecryptUpdate)
|
||||
|
||||
|
||||
/* C_DecryptFinal finishes a multiple-part decryption
|
||||
* operation.
|
||||
*/
|
||||
* operation. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_DecryptFinal)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -510,8 +494,7 @@ CK_PKCS11_FUNCTION_INFO(C_Digest)
|
||||
|
||||
|
||||
/* C_DigestUpdate continues a multiple-part message-digesting
|
||||
* operation.
|
||||
*/
|
||||
* operation. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_DigestUpdate)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -524,8 +507,7 @@ CK_PKCS11_FUNCTION_INFO(C_DigestUpdate)
|
||||
|
||||
/* C_DigestKey continues a multi-part message-digesting
|
||||
* operation, by digesting the value of a secret key as part of
|
||||
* the data already digested.
|
||||
*/
|
||||
* the data already digested. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_DigestKey)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -536,8 +518,7 @@ CK_PKCS11_FUNCTION_INFO(C_DigestKey)
|
||||
|
||||
|
||||
/* C_DigestFinal finishes a multiple-part message-digesting
|
||||
* operation.
|
||||
*/
|
||||
* operation. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_DigestFinal)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -554,8 +535,7 @@ CK_PKCS11_FUNCTION_INFO(C_DigestFinal)
|
||||
/* C_SignInit initializes a signature (private key encryption)
|
||||
* operation, where the signature is (will be) an appendix to
|
||||
* the data, and plaintext cannot be recovered from the
|
||||
* signature.
|
||||
*/
|
||||
*signature. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_SignInit)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -568,8 +548,7 @@ CK_PKCS11_FUNCTION_INFO(C_SignInit)
|
||||
|
||||
/* C_Sign signs (encrypts with private key) data in a single
|
||||
* part, where the signature is (will be) an appendix to the
|
||||
* data, and plaintext cannot be recovered from the signature.
|
||||
*/
|
||||
* data, and plaintext cannot be recovered from the signature. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_Sign)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -583,9 +562,8 @@ CK_PKCS11_FUNCTION_INFO(C_Sign)
|
||||
|
||||
|
||||
/* C_SignUpdate continues a multiple-part signature operation,
|
||||
* where the signature is (will be) an appendix to the data,
|
||||
* and plaintext cannot be recovered from the signature.
|
||||
*/
|
||||
* where the signature is (will be) an appendix to the data,
|
||||
* and plaintext cannot be recovered from the signature. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_SignUpdate)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -596,9 +574,8 @@ CK_PKCS11_FUNCTION_INFO(C_SignUpdate)
|
||||
#endif
|
||||
|
||||
|
||||
/* C_SignFinal finishes a multiple-part signature operation,
|
||||
* returning the signature.
|
||||
*/
|
||||
/* C_SignFinal finishes a multiple-part signature operation,
|
||||
* returning the signature. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_SignFinal)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -610,8 +587,7 @@ CK_PKCS11_FUNCTION_INFO(C_SignFinal)
|
||||
|
||||
|
||||
/* C_SignRecoverInit initializes a signature operation, where
|
||||
* the data can be recovered from the signature.
|
||||
*/
|
||||
* the data can be recovered from the signature. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_SignRecoverInit)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -623,8 +599,7 @@ CK_PKCS11_FUNCTION_INFO(C_SignRecoverInit)
|
||||
|
||||
|
||||
/* C_SignRecover signs data in a single operation, where the
|
||||
* data can be recovered from the signature.
|
||||
*/
|
||||
* data can be recovered from the signature. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_SignRecover)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -642,22 +617,20 @@ CK_PKCS11_FUNCTION_INFO(C_SignRecover)
|
||||
|
||||
/* C_VerifyInit initializes a verification operation, where the
|
||||
* signature is an appendix to the data, and plaintext cannot
|
||||
* cannot be recovered from the signature (e.g. DSA).
|
||||
*/
|
||||
* cannot be recovered from the signature (e.g. DSA). */
|
||||
CK_PKCS11_FUNCTION_INFO(C_VerifyInit)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
CK_SESSION_HANDLE hSession, /* the session's handle */
|
||||
CK_MECHANISM_PTR pMechanism, /* the verification mechanism */
|
||||
CK_OBJECT_HANDLE hKey /* verification key */
|
||||
CK_OBJECT_HANDLE hKey /* verification key */
|
||||
);
|
||||
#endif
|
||||
|
||||
|
||||
/* C_Verify verifies a signature in a single-part operation,
|
||||
/* C_Verify verifies a signature in a single-part operation,
|
||||
* where the signature is an appendix to the data, and plaintext
|
||||
* cannot be recovered from the signature.
|
||||
*/
|
||||
* cannot be recovered from the signature. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_Verify)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -671,9 +644,8 @@ CK_PKCS11_FUNCTION_INFO(C_Verify)
|
||||
|
||||
|
||||
/* C_VerifyUpdate continues a multiple-part verification
|
||||
* operation, where the signature is an appendix to the data,
|
||||
* and plaintext cannot be recovered from the signature.
|
||||
*/
|
||||
* operation, where the signature is an appendix to the data,
|
||||
* and plaintext cannot be recovered from the signature. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_VerifyUpdate)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -685,8 +657,7 @@ CK_PKCS11_FUNCTION_INFO(C_VerifyUpdate)
|
||||
|
||||
|
||||
/* C_VerifyFinal finishes a multiple-part verification
|
||||
* operation, checking the signature.
|
||||
*/
|
||||
* operation, checking the signature. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_VerifyFinal)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -698,8 +669,7 @@ CK_PKCS11_FUNCTION_INFO(C_VerifyFinal)
|
||||
|
||||
|
||||
/* C_VerifyRecoverInit initializes a signature verification
|
||||
* operation, where the data is recovered from the signature.
|
||||
*/
|
||||
* operation, where the data is recovered from the signature. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_VerifyRecoverInit)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -711,8 +681,7 @@ CK_PKCS11_FUNCTION_INFO(C_VerifyRecoverInit)
|
||||
|
||||
|
||||
/* C_VerifyRecover verifies a signature in a single-part
|
||||
* operation, where the data is recovered from the signature.
|
||||
*/
|
||||
* operation, where the data is recovered from the signature. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_VerifyRecover)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -729,8 +698,7 @@ CK_PKCS11_FUNCTION_INFO(C_VerifyRecover)
|
||||
/* Dual-function cryptographic operations */
|
||||
|
||||
/* C_DigestEncryptUpdate continues a multiple-part digesting
|
||||
* and encryption operation.
|
||||
*/
|
||||
* and encryption operation. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_DigestEncryptUpdate)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -744,8 +712,7 @@ CK_PKCS11_FUNCTION_INFO(C_DigestEncryptUpdate)
|
||||
|
||||
|
||||
/* C_DecryptDigestUpdate continues a multiple-part decryption and
|
||||
* digesting operation.
|
||||
*/
|
||||
* digesting operation. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_DecryptDigestUpdate)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -759,8 +726,7 @@ CK_PKCS11_FUNCTION_INFO(C_DecryptDigestUpdate)
|
||||
|
||||
|
||||
/* C_SignEncryptUpdate continues a multiple-part signing and
|
||||
* encryption operation.
|
||||
*/
|
||||
* encryption operation. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_SignEncryptUpdate)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -774,8 +740,7 @@ CK_PKCS11_FUNCTION_INFO(C_SignEncryptUpdate)
|
||||
|
||||
|
||||
/* C_DecryptVerifyUpdate continues a multiple-part decryption and
|
||||
* verify operation.
|
||||
*/
|
||||
* verify operation. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_DecryptVerifyUpdate)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -792,8 +757,7 @@ CK_PKCS11_FUNCTION_INFO(C_DecryptVerifyUpdate)
|
||||
/* Key management */
|
||||
|
||||
/* C_GenerateKey generates a secret key, creating a new key
|
||||
* object.
|
||||
*/
|
||||
* object. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_GenerateKey)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -806,20 +770,31 @@ CK_PKCS11_FUNCTION_INFO(C_GenerateKey)
|
||||
#endif
|
||||
|
||||
|
||||
/* C_GenerateKeyPair generates a public-key/private-key pair,
|
||||
* creating new key objects.
|
||||
*/
|
||||
/* C_GenerateKeyPair generates a public-key/private-key pair,
|
||||
* creating new key objects. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_GenerateKeyPair)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
CK_SESSION_HANDLE hSession, /* session handle */
|
||||
CK_MECHANISM_PTR pMechanism, /* key-gen mech. */
|
||||
CK_ATTRIBUTE_PTR pPublicKeyTemplate, /* template for pub. key */
|
||||
CK_ULONG ulPublicKeyAttributeCount, /* # pub. attrs. */
|
||||
CK_ATTRIBUTE_PTR pPrivateKeyTemplate, /* template for priv. key */
|
||||
CK_ULONG ulPrivateKeyAttributeCount, /* # priv. attrs. */
|
||||
CK_OBJECT_HANDLE_PTR phPublicKey, /* gets pub. key handle */
|
||||
CK_OBJECT_HANDLE_PTR phPrivateKey /* gets priv. key handle */
|
||||
CK_SESSION_HANDLE hSession, /* session
|
||||
* handle */
|
||||
CK_MECHANISM_PTR pMechanism, /* key-gen
|
||||
* mech. */
|
||||
CK_ATTRIBUTE_PTR pPublicKeyTemplate, /* template
|
||||
* for pub.
|
||||
* key */
|
||||
CK_ULONG ulPublicKeyAttributeCount, /* # pub.
|
||||
* attrs. */
|
||||
CK_ATTRIBUTE_PTR pPrivateKeyTemplate, /* template
|
||||
* for priv.
|
||||
* key */
|
||||
CK_ULONG ulPrivateKeyAttributeCount, /* # priv.
|
||||
* attrs. */
|
||||
CK_OBJECT_HANDLE_PTR phPublicKey, /* gets pub.
|
||||
* key
|
||||
* handle */
|
||||
CK_OBJECT_HANDLE_PTR phPrivateKey /* gets
|
||||
* priv. key
|
||||
* handle */
|
||||
);
|
||||
#endif
|
||||
|
||||
@ -839,8 +814,7 @@ CK_PKCS11_FUNCTION_INFO(C_WrapKey)
|
||||
|
||||
|
||||
/* C_UnwrapKey unwraps (decrypts) a wrapped key, creating a new
|
||||
* key object.
|
||||
*/
|
||||
* key object. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_UnwrapKey)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -857,8 +831,7 @@ CK_PKCS11_FUNCTION_INFO(C_UnwrapKey)
|
||||
|
||||
|
||||
/* C_DeriveKey derives a key from a base key, creating a new key
|
||||
* object.
|
||||
*/
|
||||
* object. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_DeriveKey)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -876,8 +849,7 @@ CK_PKCS11_FUNCTION_INFO(C_DeriveKey)
|
||||
/* Random number generation */
|
||||
|
||||
/* C_SeedRandom mixes additional seed material into the token's
|
||||
* random number generator.
|
||||
*/
|
||||
* random number generator. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_SeedRandom)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -904,8 +876,7 @@ CK_PKCS11_FUNCTION_INFO(C_GenerateRandom)
|
||||
|
||||
/* C_GetFunctionStatus is a legacy function; it obtains an
|
||||
* updated status of a function running in parallel with an
|
||||
* application.
|
||||
*/
|
||||
* application. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_GetFunctionStatus)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -915,8 +886,7 @@ CK_PKCS11_FUNCTION_INFO(C_GetFunctionStatus)
|
||||
|
||||
|
||||
/* C_CancelFunction is a legacy function; it cancels a function
|
||||
* running in parallel.
|
||||
*/
|
||||
* running in parallel. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_CancelFunction)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -925,9 +895,11 @@ CK_PKCS11_FUNCTION_INFO(C_CancelFunction)
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
/* Functions added in for Cryptoki Version 2.01 or later */
|
||||
|
||||
/* C_WaitForSlotEvent waits for a slot event (token insertion,
|
||||
* removal, etc.) to occur.
|
||||
*/
|
||||
* removal, etc.) to occur. */
|
||||
CK_PKCS11_FUNCTION_INFO(C_WaitForSlotEvent)
|
||||
#ifdef CK_NEED_ARG_LIST
|
||||
(
|
||||
@ -936,4 +908,3 @@ CK_PKCS11_FUNCTION_INFO(C_WaitForSlotEvent)
|
||||
CK_VOID_PTR pRserved /* reserved. Should be NULL_PTR */
|
||||
);
|
||||
#endif
|
||||
|
||||
|
||||
83
components/cli/vendor/github.com/miekg/pkcs11/pkcs11go.h
generated
vendored
83
components/cli/vendor/github.com/miekg/pkcs11/pkcs11go.h
generated
vendored
@ -1,83 +0,0 @@
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
//
|
||||
|
||||
#define CK_PTR *
|
||||
#ifndef NULL_PTR
|
||||
#define NULL_PTR 0
|
||||
#endif
|
||||
#define CK_DEFINE_FUNCTION(returnType, name) returnType name
|
||||
#define CK_DECLARE_FUNCTION(returnType, name) returnType name
|
||||
#define CK_DECLARE_FUNCTION_POINTER(returnType, name) returnType (* name)
|
||||
#define CK_CALLBACK_FUNCTION(returnType, name) returnType (* name)
|
||||
|
||||
#include <unistd.h>
|
||||
#ifdef REPACK_STRUCTURES
|
||||
# pragma pack(push, 1)
|
||||
# include "pkcs11.h"
|
||||
# pragma pack(pop)
|
||||
#else
|
||||
# include "pkcs11.h"
|
||||
#endif
|
||||
|
||||
#ifdef REPACK_STRUCTURES
|
||||
|
||||
// Go doesn't support structures with non-default packing, but PKCS#11 requires
|
||||
// pack(1) on Windows. Use structures with the same members as the CK_ ones but
|
||||
// default packing, and copy data between the two.
|
||||
|
||||
typedef struct ckInfo {
|
||||
CK_VERSION cryptokiVersion;
|
||||
CK_UTF8CHAR manufacturerID[32];
|
||||
CK_FLAGS flags;
|
||||
CK_UTF8CHAR libraryDescription[32];
|
||||
CK_VERSION libraryVersion;
|
||||
} ckInfo, *ckInfoPtr;
|
||||
|
||||
typedef struct ckAttr {
|
||||
CK_ATTRIBUTE_TYPE type;
|
||||
CK_VOID_PTR pValue;
|
||||
CK_ULONG ulValueLen;
|
||||
} ckAttr, *ckAttrPtr;
|
||||
|
||||
typedef struct ckMech {
|
||||
CK_MECHANISM_TYPE mechanism;
|
||||
CK_VOID_PTR pParameter;
|
||||
CK_ULONG ulParameterLen;
|
||||
} ckMech, *ckMechPtr;
|
||||
|
||||
CK_RV attrsToC(CK_ATTRIBUTE_PTR *attrOut, ckAttrPtr attrIn, CK_ULONG count);
|
||||
void attrsFromC(ckAttrPtr attrOut, CK_ATTRIBUTE_PTR attrIn, CK_ULONG count);
|
||||
void mechToC(CK_MECHANISM_PTR mechOut, ckMechPtr mechIn);
|
||||
|
||||
#define ATTR_TO_C(aout, ain, count, other) \
|
||||
CK_ATTRIBUTE_PTR aout; \
|
||||
{ \
|
||||
CK_RV e = attrsToC(&aout, ain, count); \
|
||||
if (e != CKR_OK ) { \
|
||||
if (other != NULL) free(other); \
|
||||
return e; \
|
||||
} \
|
||||
}
|
||||
#define ATTR_FREE(aout) free(aout)
|
||||
#define ATTR_FROM_C(aout, ain, count) attrsFromC(aout, ain, count)
|
||||
#define MECH_TO_C(mout, min) \
|
||||
CK_MECHANISM mval, *mout = &mval; \
|
||||
if (min != NULL) { mechToC(mout, min); \
|
||||
} else { mout = NULL; }
|
||||
|
||||
#else // REPACK_STRUCTURES
|
||||
|
||||
// Dummy types and macros to avoid any unnecessary copying on UNIX
|
||||
|
||||
typedef CK_INFO ckInfo, *ckInfoPtr;
|
||||
typedef CK_ATTRIBUTE ckAttr, *ckAttrPtr;
|
||||
typedef CK_MECHANISM ckMech, *ckMechPtr;
|
||||
|
||||
#define ATTR_TO_C(aout, ain, count, other) CK_ATTRIBUTE_PTR aout = ain
|
||||
#define ATTR_FREE(aout)
|
||||
#define ATTR_FROM_C(aout, ain, count)
|
||||
#define MECH_TO_C(mout, min) CK_MECHANISM_PTR mout = min
|
||||
|
||||
#endif // REPACK_STRUCTURES
|
||||
2020
components/cli/vendor/github.com/miekg/pkcs11/pkcs11t.h
generated
vendored
2020
components/cli/vendor/github.com/miekg/pkcs11/pkcs11t.h
generated
vendored
File diff suppressed because it is too large
Load Diff
31
components/cli/vendor/github.com/miekg/pkcs11/types.go
generated
vendored
31
components/cli/vendor/github.com/miekg/pkcs11/types.go
generated
vendored
@ -5,9 +5,18 @@
|
||||
package pkcs11
|
||||
|
||||
/*
|
||||
#define CK_PTR *
|
||||
#ifndef NULL_PTR
|
||||
#define NULL_PTR 0
|
||||
#endif
|
||||
#define CK_DEFINE_FUNCTION(returnType, name) returnType name
|
||||
#define CK_DECLARE_FUNCTION(returnType, name) returnType name
|
||||
#define CK_DECLARE_FUNCTION_POINTER(returnType, name) returnType (* name)
|
||||
#define CK_CALLBACK_FUNCTION(returnType, name) returnType (* name)
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "pkcs11go.h"
|
||||
#include "pkcs11.h"
|
||||
|
||||
CK_ULONG Index(CK_ULONG_PTR array, CK_ULONG i)
|
||||
{
|
||||
@ -187,22 +196,20 @@ func NewAttribute(typ uint, x interface{}) *Attribute {
|
||||
}
|
||||
|
||||
// cAttribute returns the start address and the length of an attribute list.
|
||||
func cAttributeList(a []*Attribute) (arena, C.ckAttrPtr, C.CK_ULONG) {
|
||||
func cAttributeList(a []*Attribute) (arena, C.CK_ATTRIBUTE_PTR, C.CK_ULONG) {
|
||||
var arena arena
|
||||
if len(a) == 0 {
|
||||
return nil, nil, 0
|
||||
}
|
||||
pa := make([]C.ckAttr, len(a))
|
||||
pa := make([]C.CK_ATTRIBUTE, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
pa[i]._type = C.CK_ATTRIBUTE_TYPE(a[i].Type)
|
||||
//skip attribute if length is 0 to prevent panic in arena.Allocate
|
||||
if a[i].Value == nil || len(a[i].Value) == 0 {
|
||||
if a[i].Value == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
pa[i].pValue, pa[i].ulValueLen = arena.Allocate(a[i].Value)
|
||||
}
|
||||
return arena, C.ckAttrPtr(&pa[0]), C.CK_ULONG(len(a))
|
||||
return arena, C.CK_ATTRIBUTE_PTR(&pa[0]), C.CK_ULONG(len(a))
|
||||
}
|
||||
|
||||
func cDate(t time.Time) []byte {
|
||||
@ -236,22 +243,20 @@ func NewMechanism(mech uint, x interface{}) *Mechanism {
|
||||
return m
|
||||
}
|
||||
|
||||
func cMechanismList(m []*Mechanism) (arena, C.ckMechPtr, C.CK_ULONG) {
|
||||
func cMechanismList(m []*Mechanism) (arena, C.CK_MECHANISM_PTR, C.CK_ULONG) {
|
||||
var arena arena
|
||||
if len(m) == 0 {
|
||||
return nil, nil, 0
|
||||
}
|
||||
pm := make([]C.ckMech, len(m))
|
||||
pm := make([]C.CK_MECHANISM, len(m))
|
||||
for i := 0; i < len(m); i++ {
|
||||
pm[i].mechanism = C.CK_MECHANISM_TYPE(m[i].Mechanism)
|
||||
//skip parameter if length is 0 to prevent panic in arena.Allocate
|
||||
if m[i].Parameter == nil || len(m[i].Parameter) == 0 {
|
||||
if m[i].Parameter == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
pm[i].pParameter, pm[i].ulParameterLen = arena.Allocate(m[i].Parameter)
|
||||
}
|
||||
return arena, C.ckMechPtr(&pm[0]), C.CK_ULONG(len(m))
|
||||
return arena, C.CK_MECHANISM_PTR(&pm[0]), C.CK_ULONG(len(m))
|
||||
}
|
||||
|
||||
// MechanismInfo provides information about a particular mechanism.
|
||||
|
||||
10
components/cli/vendor/github.com/opencontainers/runc/README.md
generated
vendored
10
components/cli/vendor/github.com/opencontainers/runc/README.md
generated
vendored
@ -41,18 +41,8 @@ make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
You can also use `go get` to install to your `GOPATH`, assuming that you have a `github.com` parent folder already created under `src`:
|
||||
|
||||
```bash
|
||||
go get github.com/opencontainers/runc
|
||||
cd $GOPATH/src/github.com/opencontainers/runc
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
`runc` will be installed to `/usr/local/sbin/runc` on your system.
|
||||
|
||||
|
||||
#### Build Tags
|
||||
|
||||
`runc` supports optional build tags for compiling support of various features.
|
||||
|
||||
2
components/cli/vendor/github.com/opencontainers/runc/vendor.conf
generated
vendored
2
components/cli/vendor/github.com/opencontainers/runc/vendor.conf
generated
vendored
@ -21,5 +21,5 @@ github.com/urfave/cli d53eb991652b1d438abdd34ce4bfa3ef1539108e
|
||||
golang.org/x/sys 7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce https://github.com/golang/sys
|
||||
|
||||
# console dependencies
|
||||
github.com/containerd/console 2748ece16665b45a47f884001d5831ec79703880
|
||||
github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e
|
||||
github.com/pkg/errors v0.8.0
|
||||
|
||||
43
components/cli/vendor/github.com/theupdateframework/notary/README.md
generated
vendored
43
components/cli/vendor/github.com/theupdateframework/notary/README.md
generated
vendored
@ -1,8 +1,6 @@
|
||||
<img src="docs/images/notary-blk.svg" alt="Notary" width="400px"/>
|
||||
|
||||
[](https://godoc.org/github.com/theupdateframework/notary)
|
||||
[](https://circleci.com/gh/theupdateframework/notary/tree/master) [](https://codecov.io/github/theupdateframework/notary) [](https://goreportcard.com/report/github.com/theupdateframework/notary)
|
||||
[](https://app.fossa.io/projects/git%2Bgithub.com%2Ftheupdateframework%2Fnotary?ref=badge_shield)
|
||||
|
||||
# Notice
|
||||
|
||||
@ -16,7 +14,7 @@ location via GitHub's redirect.
|
||||
# Overview
|
||||
|
||||
The Notary project comprises a [server](cmd/notary-server) and a [client](cmd/notary) for running and interacting
|
||||
with trusted collections. See the [service architecture](docs/service_architecture.md) documentation
|
||||
with trusted collections. Please see the [service architecture](docs/service_architecture.md) documentation
|
||||
for more information.
|
||||
|
||||
Notary aims to make the internet more secure by making it easy for people to
|
||||
@ -46,26 +44,26 @@ Notary is based on [The Update Framework](https://www.theupdateframework.com/),
|
||||
|
||||
## Security
|
||||
|
||||
See Notary's [service architecture docs](docs/service_architecture.md#threat-model) for more information about our threat model, which details the varying survivability and severities for key compromise as well as mitigations.
|
||||
Please see our [service architecture docs](docs/service_architecture.md#threat-model) for more information about our threat model, which details the varying survivability and severities for key compromise as well as mitigations.
|
||||
|
||||
Notary's last security audit was on July 31, 2015 by NCC ([results](docs/resources/ncc_docker_notary_audit_2015_07_31.pdf)).
|
||||
Our last security audit was on July 31, 2015 by NCC ([results](docs/resources/ncc_docker_notary_audit_2015_07_31.pdf)).
|
||||
|
||||
Any security vulnerabilities can be reported to security@docker.com.
|
||||
|
||||
# Getting started with the Notary CLI
|
||||
|
||||
Get the Notary Client CLI binary from [the official releases page](https://github.com/theupdateframework/notary/releases) or you can [build one yourself](#building-notary).
|
||||
Please get the Notary Client CLI binary from [the official releases page](https://github.com/theupdateframework/notary/releases) or you can [build one yourself](#building-notary).
|
||||
The version of Notary server and signer should be greater than or equal to Notary CLI's version to ensure feature compatibility (ex: CLI version 0.2, server/signer version >= 0.2), and all official releases are associated with GitHub tags.
|
||||
|
||||
To use the Notary CLI with Docker hub images, have a look at Notary's
|
||||
To use the Notary CLI with Docker hub images, please have a look at our
|
||||
[getting started docs](docs/getting_started.md).
|
||||
|
||||
For more advanced usage, see the
|
||||
For more advanced usage, please see the
|
||||
[advanced usage docs](docs/advanced_usage.md).
|
||||
|
||||
To use the CLI against a local Notary server rather than against Docker Hub:
|
||||
|
||||
1. Ensure that you have [docker and docker-compose](http://docs.docker.com/compose/install/) installed.
|
||||
1. Please ensure that you have [docker and docker-compose](http://docs.docker.com/compose/install/) installed.
|
||||
1. `git clone https://github.com/theupdateframework/notary.git` and from the cloned repository path,
|
||||
start up a local Notary server and signer and copy the config file and testing certs to your
|
||||
local notary config directory:
|
||||
@ -91,31 +89,28 @@ to use `notary` with Docker images.
|
||||
|
||||
## Building Notary
|
||||
|
||||
Note that Notary's [latest stable release](https://github.com/theupdateframework/notary/releases) is at the head of the
|
||||
Note that our [latest stable release](https://github.com/theupdateframework/notary/releases) is at the head of the
|
||||
[releases branch](https://github.com/theupdateframework/notary/tree/releases). The master branch is the development
|
||||
branch and contains features for the next release.
|
||||
|
||||
Prerequisites:
|
||||
|
||||
- Go >= 1.7.1
|
||||
- Fedora: `dnf install golang`
|
||||
- [godep](https://github.com/tools/godep) installed
|
||||
- libtool development headers installed
|
||||
- Ubuntu: `apt-get install libltdl-dev`
|
||||
- CentOS/RedHat: `yum install libtool-ltdl-devel`
|
||||
- Fedora: `dnf install libtool-ltdl-devel`
|
||||
- Mac OS ([Homebrew](http://brew.sh/)): `brew install libtool`
|
||||
|
||||
Set [```GOPATH```](https://golang.org/doc/code.html#GOPATH). Then, run:
|
||||
|
||||
```bash
|
||||
$ go get github.com/theupdateframework/notary
|
||||
# build with pcks11 support by default to support yubikey
|
||||
$ go install -tags pkcs11 github.com/theupdateframework/notary/cmd/notary
|
||||
$ notary
|
||||
Run `make client`, which creates the Notary Client CLI binary at `bin/notary`.
|
||||
Note that `make client` assumes a standard Go directory structure, in which
|
||||
Notary is checked out to the `src` directory in your `GOPATH`. For example:
|
||||
```
|
||||
$GOPATH/
|
||||
src/
|
||||
github.com/
|
||||
docker/
|
||||
notary/
|
||||
```
|
||||
|
||||
To build the server and signer, run `docker-compose build`.
|
||||
|
||||
|
||||
## License
|
||||
[](https://app.fossa.io/projects/git%2Bgithub.com%2Ftheupdateframework%2Fnotary?ref=badge_large)
|
||||
To build the server and signer, please run `docker-compose build`.
|
||||
|
||||
86
components/cli/vendor/github.com/theupdateframework/notary/client/client.go
generated
vendored
86
components/cli/vendor/github.com/theupdateframework/notary/client/client.go
generated
vendored
@ -1,4 +1,88 @@
|
||||
//Package client implements everything required for interacting with a Notary repository.
|
||||
/*
|
||||
Package client implements everything required for interacting with a Notary repository.
|
||||
|
||||
Usage
|
||||
|
||||
Use this package by creating a new repository object and calling methods on it.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/registry/client/auth"
|
||||
"github.com/docker/distribution/registry/client/auth/challenge"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
notary "github.com/theupdateframework/notary/client"
|
||||
"github.com/theupdateframework/notary/trustpinning"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
)
|
||||
|
||||
func main() {
|
||||
rootDir := ".trust"
|
||||
if err := os.MkdirAll(rootDir, 0700); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
server := "https://notary.docker.io"
|
||||
image := "docker.io/library/alpine"
|
||||
repo, err := notary.NewFileCachedNotaryRepository(
|
||||
rootDir,
|
||||
data.GUN(image),
|
||||
server,
|
||||
makeHubTransport(server, image),
|
||||
nil,
|
||||
trustpinning.TrustPinConfig{},
|
||||
)
|
||||
|
||||
targets, err := repo.ListTargets()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
for _, tgt := range targets {
|
||||
fmt.Printf("%s\t%s\n", tgt.Name, hex.EncodeToString(tgt.Hashes["sha256"]))
|
||||
}
|
||||
}
|
||||
|
||||
func makeHubTransport(server, image string) http.RoundTripper {
|
||||
base := http.DefaultTransport
|
||||
modifiers := []transport.RequestModifier{
|
||||
transport.NewHeaderRequestModifier(http.Header{
|
||||
"User-Agent": []string{"my-client"},
|
||||
}),
|
||||
}
|
||||
|
||||
authTransport := transport.NewTransport(base, modifiers...)
|
||||
pingClient := &http.Client{
|
||||
Transport: authTransport,
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
req, err := http.NewRequest("GET", server+"/v2/", nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
challengeManager := challenge.NewSimpleManager()
|
||||
resp, err := pingClient.Do(req)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if err := challengeManager.AddResponse(resp); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
tokenHandler := auth.NewTokenHandler(base, nil, image, "pull")
|
||||
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, auth.NewBasicHandler(nil)))
|
||||
|
||||
return transport.NewTransport(base, modifiers...)
|
||||
}
|
||||
|
||||
*/
|
||||
package client
|
||||
|
||||
import (
|
||||
|
||||
10
components/cli/vendor/github.com/theupdateframework/notary/trustmanager/keys.go
generated
vendored
10
components/cli/vendor/github.com/theupdateframework/notary/trustmanager/keys.go
generated
vendored
@ -3,7 +3,6 @@ package trustmanager
|
||||
import (
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
@ -101,9 +100,8 @@ func ImportKeys(from io.Reader, to []Importer, fallbackRole string, fallbackGUN
|
||||
return err
|
||||
}
|
||||
var (
|
||||
writeTo string
|
||||
toWrite []byte
|
||||
errBlocks []string
|
||||
writeTo string
|
||||
toWrite []byte
|
||||
)
|
||||
for block, rest := pem.Decode(data); block != nil; block, rest = pem.Decode(rest) {
|
||||
handleLegacyPath(block)
|
||||
@ -112,7 +110,6 @@ func ImportKeys(from io.Reader, to []Importer, fallbackRole string, fallbackGUN
|
||||
loc, err := checkValidity(block)
|
||||
if err != nil {
|
||||
// already logged in checkValidity
|
||||
errBlocks = append(errBlocks, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
@ -160,9 +157,6 @@ func ImportKeys(from io.Reader, to []Importer, fallbackRole string, fallbackGUN
|
||||
if toWrite != nil { // close out final iteration if there's data left
|
||||
return importToStores(to, writeTo, toWrite)
|
||||
}
|
||||
if len(errBlocks) > 0 {
|
||||
return fmt.Errorf("failed to import all keys: %s", strings.Join(errBlocks, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -4,9 +4,7 @@ package yubikey
|
||||
|
||||
var possiblePkcs11Libs = []string{
|
||||
"/usr/lib/libykcs11.so",
|
||||
"/usr/lib/libykcs11.so.1", // yubico-piv-tool on Fedora installs here
|
||||
"/usr/lib64/libykcs11.so",
|
||||
"/usr/lib64/libykcs11.so.1", // yubico-piv-tool on Fedora installs here
|
||||
"/usr/lib/x86_64-linux-gnu/libykcs11.so",
|
||||
"/usr/local/lib/libykcs11.so",
|
||||
}
|
||||
|
||||
11
components/cli/vendor/github.com/theupdateframework/notary/tuf/tuf.go
generated
vendored
11
components/cli/vendor/github.com/theupdateframework/notary/tuf/tuf.go
generated
vendored
@ -248,14 +248,17 @@ func (tr *Repo) GetDelegationRole(name data.RoleName) (data.DelegationRole, erro
|
||||
}
|
||||
// Check all public key certificates in the role for expiry
|
||||
// Currently we do not reject expired delegation keys but warn if they might expire soon or have already
|
||||
for _, pubKey := range delgRole.Keys {
|
||||
for keyID, pubKey := range delgRole.Keys {
|
||||
certFromKey, err := utils.LoadCertFromPEM(pubKey.Public())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
//Don't check the delegation certificate expiry once added, use the TUF role expiry instead
|
||||
if err := utils.ValidateCertificate(certFromKey, false); err != nil {
|
||||
return err
|
||||
if err := utils.ValidateCertificate(certFromKey, true); err != nil {
|
||||
if _, ok := err.(data.ErrCertExpired); !ok {
|
||||
// do not allow other invalid cert errors
|
||||
return err
|
||||
}
|
||||
logrus.Warnf("error with delegation %s key ID %d: %s", delgRole.Name, keyID, err)
|
||||
}
|
||||
}
|
||||
foundRole = &delgRole
|
||||
|
||||
12
components/cli/vendor/github.com/theupdateframework/notary/vendor.conf
generated
vendored
12
components/cli/vendor/github.com/theupdateframework/notary/vendor.conf
generated
vendored
@ -5,8 +5,8 @@ github.com/bugsnag/bugsnag-go 13fd6b8acda029830ef9904df6b63be0a83369d0
|
||||
github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782
|
||||
github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702
|
||||
github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
|
||||
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
|
||||
github.com/docker/go-connections 7395e3f8aa162843a74ed6d48e79627d9792ac55
|
||||
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
|
||||
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
|
||||
github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06
|
||||
github.com/dvsekhvalnov/jose2go 6387d3c1f5abd8443b223577d5a7e0f4e0e5731f # v1.2
|
||||
github.com/go-sql-driver/mysql a0583e0143b1624142adab07e0e97fe106d99561 # v1.3
|
||||
@ -15,22 +15,22 @@ github.com/jinzhu/gorm 5409931a1bb87e484d68d649af9367c207713ea2
|
||||
github.com/jinzhu/inflection 1c35d901db3da928c72a72d8458480cc9ade058f
|
||||
github.com/lib/pq 0dad96c0b94f8dee039aa40467f767467392a0af
|
||||
github.com/mattn/go-sqlite3 b4142c444a8941d0d92b0b7103a24df9cd815e42 # v1.0.0
|
||||
github.com/miekg/pkcs11 5f6e0d0dad6f472df908c8e968a98ef00c9224bb
|
||||
github.com/miekg/pkcs11 ba39b9c6300b7e0be41b115330145ef8afdff7d6
|
||||
github.com/mitchellh/go-homedir df55a15e5ce646808815381b3db47a8c66ea62f4
|
||||
github.com/prometheus/client_golang 449ccefff16c8e2b7229f6be1921ba22f62461fe
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 # model-0.0.2-12-gfa8ad6f
|
||||
github.com/prometheus/procfs b1afdc266f54247f5dc725544f5d351a8661f502
|
||||
github.com/prometheus/common 4fdc91a58c9d3696b982e8a680f4997403132d44
|
||||
github.com/golang/protobuf c3cefd437628a0b7d31b34fe44b3a7a540e98527
|
||||
github.com/spf13/cobra 7b2c5ac9fc04fc5efafb60700713d4fa609b777b # v0.0.1
|
||||
github.com/spf13/cobra f368244301305f414206f889b1735a54cfc8bde8
|
||||
github.com/spf13/viper be5ff3e4840cf692388bde7a057595a474ef379e
|
||||
golang.org/x/crypto 76eec36fa14229c4b25bb894c2d0e591527af429
|
||||
golang.org/x/net 6a513affb38dc9788b449d59ffed099b8de18fa0
|
||||
golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993
|
||||
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
|
||||
google.golang.org/grpc 708a7f9f3283aa2d4f6132d287d78683babe55c8 # v1.0.5
|
||||
github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
|
||||
|
||||
github.com/spf13/pflag e57e3eeb33f795204c1ca35f56c44f83227c6e66 # v1.0.0
|
||||
github.com/spf13/pflag cb88ea77998c3f024757528e3305022ab50b43be
|
||||
github.com/spf13/cast 4d07383ffe94b5e5a6fa3af9211374a4507a0184
|
||||
gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420
|
||||
gopkg.in/fatih/pool.v2 cba550ebf9bce999a02e963296d4bc7a486cb715
|
||||
|
||||
2
components/cli/vendor/golang.org/x/sync/README
generated
vendored
Normal file
2
components/cli/vendor/golang.org/x/sync/README
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
This repository provides Go concurrency primitives in addition to the
|
||||
ones provided by the language and "sync" and "sync/atomic" packages.
|
||||
18
components/cli/vendor/golang.org/x/sync/README.md
generated
vendored
18
components/cli/vendor/golang.org/x/sync/README.md
generated
vendored
@ -1,18 +0,0 @@
|
||||
# Go Sync
|
||||
|
||||
This repository provides Go concurrency primitives in addition to the
|
||||
ones provided by the language and "sync" and "sync/atomic" packages.
|
||||
|
||||
## Download/Install
|
||||
|
||||
The easiest way to install is to run `go get -u golang.org/x/sync`. You can
|
||||
also manually git clone the repository to `$GOPATH/src/golang.org/x/sync`.
|
||||
|
||||
## Report Issues / Send Patches
|
||||
|
||||
This repository uses Gerrit for code changes. To learn how to submit changes to
|
||||
this repository, see https://golang.org/doc/contribute.html.
|
||||
|
||||
The main issue tracker for the sync repository is located at
|
||||
https://github.com/golang/go/issues. Prefix your issue with "x/sync:" in the
|
||||
subject line, so it is easy to find.
|
||||
9
components/engine/.github/CODEOWNERS
vendored
9
components/engine/.github/CODEOWNERS
vendored
@ -3,7 +3,7 @@
|
||||
#
|
||||
# KEEP THIS FILE SORTED. Order is important. Last match takes precedence.
|
||||
|
||||
builder/** @tonistiigi
|
||||
builder/** @dnephin @tonistiigi
|
||||
client/** @dnephin
|
||||
contrib/mkimage/** @tianon
|
||||
daemon/graphdriver/devmapper/** @rhvgoyal
|
||||
@ -12,9 +12,10 @@ daemon/graphdriver/overlay/** @dmcgowan
|
||||
daemon/graphdriver/overlay2/** @dmcgowan
|
||||
daemon/graphdriver/windows/** @johnstep @jhowardmsft
|
||||
daemon/logger/awslogs/** @samuelkarp
|
||||
hack/** @tianon
|
||||
hack/** @dnephin @tianon
|
||||
hack/integration-cli-on-swarm/** @AkihiroSuda
|
||||
integration-cli/** @vdemeester
|
||||
integration/** @vdemeester
|
||||
integration-cli/** @dnephin @vdemeester
|
||||
integration/** @dnephin @vdemeester
|
||||
pkg/testutil/** @dnephin
|
||||
plugin/** @cpuguy83
|
||||
project/** @thaJeztah
|
||||
|
||||
@ -32,34 +32,86 @@
|
||||
# the case. Therefore, you don't have to disable it anymore.
|
||||
#
|
||||
|
||||
FROM golang:1.9.4 AS base
|
||||
# FIXME(vdemeester) this is kept for other script depending on it to not fail right away
|
||||
# Remove this once the other scripts uses something else to detect the version
|
||||
ENV GO_VERSION 1.9.4
|
||||
FROM debian:stretch
|
||||
|
||||
# allow replacing httpredir or deb mirror
|
||||
ARG APT_MIRROR=deb.debian.org
|
||||
RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
|
||||
|
||||
FROM base AS criu
|
||||
# Install CRIU for checkpoint/restore support
|
||||
ENV CRIU_VERSION 3.6
|
||||
# Install dependancy packages specific to criu
|
||||
# Packaged dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
apparmor \
|
||||
apt-utils \
|
||||
aufs-tools \
|
||||
automake \
|
||||
bash-completion \
|
||||
binutils-mingw-w64 \
|
||||
bsdmainutils \
|
||||
btrfs-tools \
|
||||
build-essential \
|
||||
cmake \
|
||||
createrepo \
|
||||
curl \
|
||||
dpkg-sig \
|
||||
gcc-mingw-w64 \
|
||||
git \
|
||||
iptables \
|
||||
jq \
|
||||
less \
|
||||
libapparmor-dev \
|
||||
libcap-dev \
|
||||
libdevmapper-dev \
|
||||
libnet-dev \
|
||||
libnl-3-dev \
|
||||
libprotobuf-c0-dev \
|
||||
libprotobuf-dev \
|
||||
libnl-3-dev \
|
||||
libcap-dev \
|
||||
libseccomp-dev \
|
||||
libsystemd-dev \
|
||||
libtool \
|
||||
libudev-dev \
|
||||
mercurial \
|
||||
net-tools \
|
||||
pigz \
|
||||
pkg-config \
|
||||
protobuf-compiler \
|
||||
protobuf-c-compiler \
|
||||
python-protobuf \
|
||||
&& mkdir -p /usr/src/criu \
|
||||
python-backports.ssl-match-hostname \
|
||||
python-dev \
|
||||
python-mock \
|
||||
python-pip \
|
||||
python-requests \
|
||||
python-setuptools \
|
||||
python-websocket \
|
||||
python-wheel \
|
||||
tar \
|
||||
thin-provisioning-tools \
|
||||
vim \
|
||||
vim-common \
|
||||
xfsprogs \
|
||||
zip \
|
||||
--no-install-recommends \
|
||||
&& pip install awscli==1.10.15
|
||||
|
||||
# Install Go
|
||||
# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines
|
||||
# will need updating, to avoid errors. Ping #docker-maintainers on IRC
|
||||
# with a heads-up.
|
||||
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
|
||||
ENV GO_VERSION 1.9.5
|
||||
RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \
|
||||
| tar -xzC /usr/local
|
||||
|
||||
ENV PATH /go/bin:/usr/local/go/bin:$PATH
|
||||
ENV GOPATH /go
|
||||
|
||||
# Install CRIU for checkpoint/restore support
|
||||
ENV CRIU_VERSION 3.6
|
||||
RUN mkdir -p /usr/src/criu \
|
||||
&& curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \
|
||||
&& cd /usr/src/criu \
|
||||
&& make \
|
||||
&& make PREFIX=/opt/criu install-criu
|
||||
&& make install-criu
|
||||
|
||||
FROM base AS registry
|
||||
# Install two versions of the registry. The first is an older version that
|
||||
# only supports schema1 manifests. The second is a newer version that supports
|
||||
# both. This allows integration-cli tests to cover push/pull with both schema1
|
||||
@ -72,27 +124,35 @@ RUN set -x \
|
||||
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
|
||||
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
|
||||
go build -buildmode=pie -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \
|
||||
&& case $(dpkg --print-architecture) in \
|
||||
amd64|ppc64*|s390x) \
|
||||
(cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1"); \
|
||||
GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \
|
||||
go build -buildmode=pie -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \
|
||||
;; \
|
||||
esac \
|
||||
&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \
|
||||
&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
|
||||
go build -buildmode=pie -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Install notary and notary-server
|
||||
ENV NOTARY_VERSION v0.5.0
|
||||
RUN set -x \
|
||||
&& export GOPATH="$(mktemp -d)" \
|
||||
&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
|
||||
&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
|
||||
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
|
||||
go build -buildmode=pie -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
|
||||
&& GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \
|
||||
go build -buildmode=pie -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
|
||||
FROM base AS docker-py
|
||||
# Get the "docker-py" source so we can run their integration tests
|
||||
ENV DOCKER_PY_COMMIT 8b246db271a85d6541dc458838627e89c683e42f
|
||||
# To run integration tests docker-pycreds is required.
|
||||
RUN git clone https://github.com/docker/docker-py.git /docker-py \
|
||||
&& cd /docker-py \
|
||||
&& git checkout -q $DOCKER_PY_COMMIT
|
||||
&& git checkout -q $DOCKER_PY_COMMIT \
|
||||
&& pip install docker-pycreds==0.2.1 \
|
||||
&& pip install -r test-requirements.txt
|
||||
|
||||
# Install yamllint for validating swagger.yaml
|
||||
RUN pip install yamllint==1.5.0
|
||||
|
||||
|
||||
FROM base AS swagger
|
||||
# Install go-swagger for validating swagger.yaml
|
||||
ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb
|
||||
RUN set -x \
|
||||
@ -102,12 +162,25 @@ RUN set -x \
|
||||
&& go build -o /usr/local/bin/swagger github.com/go-swagger/go-swagger/cmd/swagger \
|
||||
&& rm -rf "$GOPATH"
|
||||
|
||||
# Set user.email so crosbymichael's in-container merge commits go smoothly
|
||||
RUN git config --global user.email 'docker-dummy@example.com'
|
||||
|
||||
# Add an unprivileged user to be used for tests which need it
|
||||
RUN groupadd -r docker
|
||||
RUN useradd --create-home --gid docker unprivilegeduser
|
||||
|
||||
VOLUME /var/lib/docker
|
||||
WORKDIR /go/src/github.com/docker/docker
|
||||
ENV DOCKER_BUILDTAGS apparmor seccomp selinux
|
||||
|
||||
# Let us use a .bashrc file
|
||||
RUN ln -sfv $PWD/.bashrc ~/.bashrc
|
||||
# Add integration helps to bashrc
|
||||
RUN echo "source $PWD/hack/make/.integration-test-helpers" >> /etc/bash.bashrc
|
||||
|
||||
FROM base AS frozen-images
|
||||
RUN apt-get update && apt-get install -y jq ca-certificates --no-install-recommends
|
||||
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||
COPY contrib/download-frozen-image-v2.sh /
|
||||
RUN /download-frozen-image-v2.sh /docker-frozen-images \
|
||||
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
|
||||
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
|
||||
buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \
|
||||
busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0 \
|
||||
busybox:glibc@sha256:0b55a30394294ab23b9afd58fab94e61a923f5834fba7ddbae7f8e0c11ba85e6 \
|
||||
@ -115,133 +188,24 @@ RUN /download-frozen-image-v2.sh /docker-frozen-images \
|
||||
hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c
|
||||
# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list)
|
||||
|
||||
# Just a little hack so we don't have to install these deps twice, once for runc and once for dockerd
|
||||
FROM base AS runtime-dev
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libapparmor-dev \
|
||||
libseccomp-dev
|
||||
# Install tomlv, vndr, runc, containerd, tini, proxy dockercli
|
||||
# Please edit hack/dockerfile/install/<name>.installer to update them.
|
||||
COPY hack/dockerfile/install hack/dockerfile/install
|
||||
RUN for i in tomlv vndr tini gometalinter proxy dockercli runc containerd; \
|
||||
do hack/dockerfile/install/install.sh $i; \
|
||||
done
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
|
||||
|
||||
FROM base AS tomlv
|
||||
ENV INSTALL_BINARY_NAME=tomlv
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM base AS vndr
|
||||
ENV INSTALL_BINARY_NAME=vndr
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM base AS containerd
|
||||
RUN apt-get update && apt-get install -y btrfs-tools
|
||||
ENV INSTALL_BINARY_NAME=containerd
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM base AS proxy
|
||||
ENV INSTALL_BINARY_NAME=proxy
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM base AS gometalinter
|
||||
ENV INSTALL_BINARY_NAME=gometalinter
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM base AS dockercli
|
||||
ENV INSTALL_BINARY_NAME=dockercli
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM runtime-dev AS runc
|
||||
ENV INSTALL_BINARY_NAME=runc
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
FROM base AS tini
|
||||
RUN apt-get update && apt-get install -y cmake vim-common
|
||||
COPY hack/dockerfile/install/install.sh ./install.sh
|
||||
ENV INSTALL_BINARY_NAME=tini
|
||||
COPY hack/dockerfile/install/$INSTALL_BINARY_NAME.installer ./
|
||||
RUN PREFIX=/opt/$INSTALL_BINARY_NAME ./install.sh $INSTALL_BINARY_NAME
|
||||
|
||||
|
||||
|
||||
# TODO: Some of this is only really needed for testing, it would be nice to split this up
|
||||
FROM runtime-dev AS dev
|
||||
RUN groupadd -r docker
|
||||
RUN useradd --create-home --gid docker unprivilegeduser
|
||||
# Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH
|
||||
RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc
|
||||
RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker
|
||||
RUN ldconfig
|
||||
# This should only install packages that are specifically needed for the dev environment and nothing else
|
||||
# Do you really need to add another package here? Can it be done in a different build stage?
|
||||
RUN apt-get update && apt-get install -y \
|
||||
apparmor \
|
||||
aufs-tools \
|
||||
bash-completion \
|
||||
btrfs-tools \
|
||||
iptables \
|
||||
jq \
|
||||
libdevmapper-dev \
|
||||
libudev-dev \
|
||||
libsystemd-dev \
|
||||
binutils-mingw-w64 \
|
||||
g++-mingw-w64-x86-64 \
|
||||
net-tools \
|
||||
pigz \
|
||||
python-backports.ssl-match-hostname \
|
||||
python-dev \
|
||||
python-mock \
|
||||
python-pip \
|
||||
python-requests \
|
||||
python-setuptools \
|
||||
python-websocket \
|
||||
python-wheel \
|
||||
thin-provisioning-tools \
|
||||
vim \
|
||||
vim-common \
|
||||
xfsprogs \
|
||||
zip \
|
||||
bzip2 \
|
||||
xz-utils \
|
||||
--no-install-recommends
|
||||
COPY --from=swagger /usr/local/bin/swagger* /usr/local/bin/
|
||||
COPY --from=frozen-images /docker-frozen-images /docker-frozen-images
|
||||
COPY --from=gometalinter /opt/gometalinter/ /usr/local/bin/
|
||||
COPY --from=tomlv /opt/tomlv/ /usr/local/bin/
|
||||
COPY --from=vndr /opt/vndr/ /usr/local/bin/
|
||||
COPY --from=tini /opt/tini/ /usr/local/bin/
|
||||
COPY --from=runc /opt/runc/ /usr/local/bin/
|
||||
COPY --from=containerd /opt/containerd/ /usr/local/bin/
|
||||
COPY --from=proxy /opt/proxy/ /usr/local/bin/
|
||||
COPY --from=dockercli /opt/dockercli /usr/local/cli
|
||||
COPY --from=registry /usr/local/bin/registry* /usr/local/bin/
|
||||
COPY --from=criu /opt/criu/ /usr/local/
|
||||
COPY --from=docker-py /docker-py /docker-py
|
||||
# TODO: This is for the docker-py tests, which shouldn't really be needed for
|
||||
# this image, but currently CI is expecting to run this image. This should be
|
||||
# split out into a separate image, including all the `python-*` deps installed
|
||||
# above.
|
||||
RUN cd /docker-py \
|
||||
&& pip install docker-pycreds==0.2.1 \
|
||||
&& pip install -r test-requirements.txt
|
||||
|
||||
ENV PATH=/usr/local/cli:$PATH
|
||||
ENV DOCKER_BUILDTAGS apparmor seccomp selinux
|
||||
# Options for hack/validate/gometalinter
|
||||
ENV GOMETALINTER_OPTS="--deadline=2m"
|
||||
WORKDIR /go/src/github.com/docker/docker
|
||||
VOLUME /var/lib/docker
|
||||
# Wrap all commands in the "docker-in-docker" script to allow nested containers
|
||||
ENTRYPOINT ["hack/dind"]
|
||||
|
||||
# Options for hack/validate/gometalinter
|
||||
ENV GOMETALINTER_OPTS="--deadline=2m"
|
||||
|
||||
# Upload docker source
|
||||
COPY . /go/src/github.com/docker/docker
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user